auto import from //depot/cupcake/@135843
diff --git a/libc/bionic/__errno.c b/libc/bionic/__errno.c
new file mode 100644
index 0000000..8f33cce
--- /dev/null
+++ b/libc/bionic/__errno.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <errno.h>
+#include <bionic_tls.h>
+
+volatile int*  __errno( void )
+{
+  return  &((volatile int*)__get_tls())[TLS_SLOT_ERRNO];
+}
diff --git a/libc/bionic/__set_errno.c b/libc/bionic/__set_errno.c
new file mode 100644
index 0000000..c72d4f7
--- /dev/null
+++ b/libc/bionic/__set_errno.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+
+
+int __set_errno(int n)
+{
+    errno = n;
+    return -1;
+}
+
+/*
+ * this function is called from syscall stubs,
+ * (tail-called in the case of 0-4 arg versions)
+ */
+
+int __set_syscall_errno(int n)
+{
+        /* some syscalls, mmap() for example, have valid return
+        ** values that are "negative".  Since errno values are not
+        ** greater than 131 on Linux, we will just consider 
+        ** anything significantly out of range as not-an-error
+        */
+    if(n > -256) {
+        return __set_errno(-n);
+    } else {
+        return n;
+    }
+}
diff --git a/libc/bionic/_rand48.c b/libc/bionic/_rand48.c
new file mode 100644
index 0000000..e422781
--- /dev/null
+++ b/libc/bionic/_rand48.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 1993 Martin Birgmeier
+ * All rights reserved.
+ *
+ * You may redistribute unmodified or modified versions of this source
+ * code provided that the above copyright notice and this and the
+ * following conditions are retained.
+ *
+ * This software is provided ``as is'', and comes with no warranties
+ * of any kind. I shall in no event be liable for anything that happens
+ * to anyone/anything when using this software.
+ */
+
+#include <sys/cdefs.h>
+#include "rand48.h"
+
+unsigned short _rand48_seed[3] = {
+	RAND48_SEED_0,
+	RAND48_SEED_1,
+	RAND48_SEED_2
+};
+unsigned short _rand48_mult[3] = {
+	RAND48_MULT_0,
+	RAND48_MULT_1,
+	RAND48_MULT_2
+};
+unsigned short _rand48_add = RAND48_ADD;
+
+void
+_dorand48(unsigned short xseed[3])
+{
+	unsigned long accu;
+	unsigned short temp[2];
+
+	accu = (unsigned long) _rand48_mult[0] * (unsigned long) xseed[0] +
+	 (unsigned long) _rand48_add;
+	temp[0] = (unsigned short) accu;	/* lower 16 bits */
+	accu >>= sizeof(unsigned short) * 8;
+	accu += (unsigned long) _rand48_mult[0] * (unsigned long) xseed[1] +
+	 (unsigned long) _rand48_mult[1] * (unsigned long) xseed[0];
+	temp[1] = (unsigned short) accu;	/* middle 16 bits */
+	accu >>= sizeof(unsigned short) * 8;
+	accu += _rand48_mult[0] * xseed[2] + _rand48_mult[1] * xseed[1] + _rand48_mult[2] * xseed[0];
+	xseed[0] = temp[0];
+	xseed[1] = temp[1];
+	xseed[2] = (unsigned short) accu;
+}
diff --git a/libc/bionic/arc4random.c b/libc/bionic/arc4random.c
new file mode 100644
index 0000000..d70580a
--- /dev/null
+++ b/libc/bionic/arc4random.c
@@ -0,0 +1,308 @@
+/*      $OpenBSD: arc4random.c,v 1.19 2008/06/04 00:50:23 djm Exp $        */
+
+/*
+ * Copyright (c) 1996, David Mazieres <dm@uun.org>
+ * Copyright (c) 2008, Damien Miller <djm@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Arc4 random number generator for OpenBSD.
+ *
+ * This code is derived from section 17.1 of Applied Cryptography,
+ * second edition, which describes a stream cipher allegedly
+ * compatible with RSA Labs "RC4" cipher (the actual description of
+ * which is a trade secret).  The same algorithm is used as a stream
+ * cipher called "arcfour" in Tatu Ylonen's ssh package.
+ *
+ * Here the stream cipher has been modified always to include the time
+ * when initializing the state.  That makes it impossible to
+ * regenerate the same random sequence twice, so this can't be used
+ * for encryption, but will generate good random numbers.
+ *
+ * RC4 is a registered trademark of RSA Laboratories.
+ */
+
+#include <fcntl.h>
+#include <limits.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/time.h>
+#include "thread_private.h"
+
+/* BIONIC-BEGIN */
+/* this lock should protect the global variables in this file */
+static pthread_mutex_t  _arc4_lock = PTHREAD_MUTEX_INITIALIZER;
+#define  _ARC4_LOCK()      pthread_mutex_lock(&_arc4_lock)
+#define  _ARC4_UNLOCK()    pthread_mutex_unlock(&_arc4_lock)
+/* BIONIC-END */
+
+#ifdef __GNUC__
+#define inline __inline
+#else                           /* !__GNUC__ */
+#define inline
+#endif                          /* !__GNUC__ */
+
+struct arc4_stream {
+        u_int8_t i;
+        u_int8_t j;
+        u_int8_t s[256];
+};
+
+static int rs_initialized;
+static struct arc4_stream rs;
+static pid_t arc4_stir_pid;
+static int arc4_count;
+
+static inline u_int8_t arc4_getbyte(void);
+
+static inline void
+arc4_init(void)
+{
+        int     n;
+
+        for (n = 0; n < 256; n++)
+                rs.s[n] = n;
+        rs.i = 0;
+        rs.j = 0;
+}
+
+static inline void
+arc4_addrandom(u_char *dat, int datlen)
+{
+        int     n;
+        u_int8_t si;
+
+        rs.i--;
+        for (n = 0; n < 256; n++) {
+                rs.i = (rs.i + 1);
+                si = rs.s[rs.i];
+                rs.j = (rs.j + si + dat[n % datlen]);
+                rs.s[rs.i] = rs.s[rs.j];
+                rs.s[rs.j] = si;
+        }
+        rs.j = rs.i;
+}
+
+static void
+arc4_stir(void)
+{
+#if 1  /* BIONIC-BEGIN */
+	int     i, fd;
+	union {
+		struct timeval tv;
+		u_int rnd[128 / sizeof(u_int)];
+	}       rdat;
+	int	n;
+
+        if (!rs_initialized) {
+                arc4_init();
+                rs_initialized = 1;
+        }
+
+	fd = open("/dev/urandom", O_RDONLY);
+	if (fd != -1) {
+		read(fd, rdat.rnd, sizeof(rdat.rnd));
+		close(fd);
+	}
+        else
+        {
+	    /* fd < 0 ?  Ah, what the heck. We'll just take
+	     * whatever was on the stack. just add a little more
+             * time-based randomness though
+             */
+            gettimeofday(&rdat.tv, NULL);
+        }
+
+        arc4_stir_pid = getpid();
+	arc4_addrandom((void *) &rdat, sizeof(rdat));
+#else  /* BIONIC-END */
+        int     i, mib[2];
+        size_t        len;
+        u_char rnd[128];
+
+        if (!rs_initialized) {
+                arc4_init();
+                rs_initialized = 1;
+        }
+
+        mib[0] = CTL_KERN;
+        mib[1] = KERN_ARND;
+
+        len = sizeof(rnd);
+        sysctl(mib, 2, rnd, &len, NULL, 0);
+
+        arc4_stir_pid = getpid();
+        arc4_addrandom(rnd, sizeof(rnd));
+#endif
+        /*
+         * Discard early keystream, as per recommendations in:
+         * http://www.wisdom.weizmann.ac.il/~itsik/RC4/Papers/Rc4_ksa.ps
+         */
+        for (i = 0; i < 256; i++)
+                (void)arc4_getbyte();
+        arc4_count = 1600000;
+}
+
+static inline u_int8_t
+arc4_getbyte(void)
+{
+        u_int8_t si, sj;
+
+        rs.i = (rs.i + 1);
+        si = rs.s[rs.i];
+        rs.j = (rs.j + si);
+        sj = rs.s[rs.j];
+        rs.s[rs.i] = sj;
+        rs.s[rs.j] = si;
+        return (rs.s[(si + sj) & 0xff]);
+}
+
+u_int8_t
+__arc4_getbyte(void)
+{
+        u_int8_t val;
+
+        _ARC4_LOCK();
+        if (--arc4_count == 0 || !rs_initialized)
+                arc4_stir();
+        val = arc4_getbyte();
+        _ARC4_UNLOCK();
+        return val;
+}
+
+static inline u_int32_t
+arc4_getword(void)
+{
+        u_int32_t val;
+        val = arc4_getbyte() << 24;
+        val |= arc4_getbyte() << 16;
+        val |= arc4_getbyte() << 8;
+        val |= arc4_getbyte();
+        return val;
+}
+
+void
+arc4random_stir(void)
+{
+        _ARC4_LOCK();
+        arc4_stir();
+        _ARC4_UNLOCK();
+}
+
+void
+arc4random_addrandom(u_char *dat, int datlen)
+{
+        _ARC4_LOCK();
+        if (!rs_initialized)
+                arc4_stir();
+        arc4_addrandom(dat, datlen);
+        _ARC4_UNLOCK();
+}
+
+u_int32_t
+arc4random(void)
+{
+        u_int32_t val;
+        _ARC4_LOCK();
+        arc4_count -= 4;
+        if (arc4_count <= 0 || !rs_initialized || arc4_stir_pid != getpid())
+                arc4_stir();
+        val = arc4_getword();
+        _ARC4_UNLOCK();
+        return val;
+}
+
+void
+arc4random_buf(void *_buf, size_t n)
+{
+        u_char *buf = (u_char *)_buf;
+        _ARC4_LOCK();
+        if (!rs_initialized || arc4_stir_pid != getpid())
+                arc4_stir();
+        while (n--) {
+                if (--arc4_count <= 0)
+                        arc4_stir();
+                buf[n] = arc4_getbyte();
+        }
+        _ARC4_UNLOCK();
+}
+
+/*
+ * Calculate a uniformly distributed random number less than upper_bound
+ * avoiding "modulo bias".
+ *
+ * Uniformity is achieved by generating new random numbers until the one
+ * returned is outside the range [0, 2**32 % upper_bound).  This
+ * guarantees the selected random number will be inside
+ * [2**32 % upper_bound, 2**32) which maps back to [0, upper_bound)
+ * after reduction modulo upper_bound.
+ */
+u_int32_t
+arc4random_uniform(u_int32_t upper_bound)
+{
+        u_int32_t r, min;
+
+        if (upper_bound < 2)
+                return 0;
+
+#if (ULONG_MAX > 0xffffffffUL)
+        min = 0x100000000UL % upper_bound;
+#else
+        /* Calculate (2**32 % upper_bound) avoiding 64-bit math */
+        if (upper_bound > 0x80000000)
+                min = 1 + ~upper_bound;                /* 2**32 - upper_bound */
+        else {
+                /* (2**32 - (x * 2)) % x == 2**32 % x when x <= 2**31 */
+                min = ((0xffffffff - (upper_bound * 2)) + 1) % upper_bound;
+        }
+#endif
+
+        /*
+         * This could theoretically loop forever but each retry has
+         * p > 0.5 (worst case, usually far better) of selecting a
+         * number inside the range we need, so it should rarely need
+         * to re-roll.
+         */
+        for (;;) {
+                r = arc4random();
+                if (r >= min)
+                        break;
+        }
+
+        return r % upper_bound;
+}
+
+#if 0
+/*-------- Test code for i386 --------*/
+#include <stdio.h>
+#include <machine/pctr.h>
+int
+main(int argc, char **argv)
+{
+        const int iter = 1000000;
+        int     i;
+        pctrval v;
+
+        v = rdtsc();
+        for (i = 0; i < iter; i++)
+                arc4random();
+        v = rdtsc() - v;
+        v /= iter;
+
+        printf("%qd cycles\n", v);
+}
+#endif
diff --git a/libc/bionic/atomics_x86.c b/libc/bionic/atomics_x86.c
new file mode 100644
index 0000000..fd60f4f
--- /dev/null
+++ b/libc/bionic/atomics_x86.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <sys/atomics.h>
+
+#define FUTEX_SYSCALL 240
+#define FUTEX_WAIT 0
+#define FUTEX_WAKE 1
+
+int __futex_wait(volatile void *ftx, int val)
+{
+    int ret;
+    asm volatile (
+        "int $0x80;"
+        : "=a" (ret)
+        : "0" (FUTEX_SYSCALL),
+          "b" (ftx),
+          "c" (FUTEX_WAIT),
+          "d" (val),
+          "S" (0)
+    );
+    return ret;
+}
+
+int __futex_wake(volatile void *ftx, int count)
+{
+    int ret;
+    asm volatile (
+        "int $0x80;"
+        : "=a" (ret)
+        : "0" (FUTEX_SYSCALL),
+          "b" (ftx),
+          "c" (FUTEX_WAKE),
+          "d" (count)
+    );
+    return ret;
+}
+
+int __atomic_cmpxchg(int old, int new, volatile int* addr) {
+    int xchg;
+    asm volatile (
+        "lock;"
+        "cmpxchg %%ecx, (%%edx);"
+        "setne %%al;"
+        : "=a" (xchg)
+        : "a" (old),
+          "c" (new),
+          "d" (addr)
+    );
+    return xchg;
+}
+
+int __atomic_swap(int new, volatile int* addr) {
+    int old;
+    asm volatile (
+        "lock;"
+        "xchg %%ecx, (%%edx);"
+        : "=c" (old)
+        : "c" (new),
+          "d" (addr)
+    );
+    return old;
+}
+
+int __atomic_dec(volatile int* addr) {
+    int old;
+    do {
+        old = *addr;
+    } while (atomic_cmpxchg(old, old-1, addr));
+    return old;
+}
diff --git a/libc/bionic/basename.c b/libc/bionic/basename.c
new file mode 100644
index 0000000..8aaae53
--- /dev/null
+++ b/libc/bionic/basename.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <sys/cdefs.h>
+
+#include <errno.h>
+#include <libgen.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/param.h>
+
+char*
+basename(const char*  path)
+{
+    static char*  bname = NULL;
+    int           ret;
+
+    if (bname == NULL) {
+        bname = (char *)malloc(MAXPATHLEN);
+        if (bname == NULL)
+            return(NULL);
+    }
+    ret = basename_r(path, bname, MAXPATHLEN);
+    return (ret < 0) ? NULL : bname;
+}
diff --git a/libc/bionic/basename_r.c b/libc/bionic/basename_r.c
new file mode 100644
index 0000000..e9080f0
--- /dev/null
+++ b/libc/bionic/basename_r.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <libgen.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/param.h>
+
+int
+basename_r(const char* path, char*  buffer, size_t  bufflen)
+{
+    const char *endp, *startp;
+    int         len, result;
+    char        temp[2];
+
+    /* Empty or NULL string gets treated as "." */
+    if (path == NULL || *path == '\0') {
+        startp  = ".";
+        len     = 1;
+        goto Exit;
+    }
+
+    /* Strip trailing slashes */
+    endp = path + strlen(path) - 1;
+    while (endp > path && *endp == '/')
+        endp--;
+
+    /* All slashes becomes "/" */
+    if (endp == path && *endp == '/') {
+        startp = "/";
+        len    = 1;
+        goto Exit;
+    }
+
+    /* Find the start of the base */
+    startp = endp;
+    while (startp > path && *(startp - 1) != '/')
+        startp--;
+
+    len = endp - startp +1;
+
+Exit:
+    result = len;
+    if (buffer == NULL) {
+        return result;
+    }
+    if (len > (int)bufflen-1) {
+        len    = (int)bufflen-1;
+        result = -1;
+        errno  = ERANGE;
+    }
+
+    if (len >= 0) {
+        memcpy( buffer, startp, len );
+        buffer[len] = 0;
+    }
+    return result;
+}
diff --git a/libc/bionic/dirname.c b/libc/bionic/dirname.c
new file mode 100644
index 0000000..8a1db90
--- /dev/null
+++ b/libc/bionic/dirname.c
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <sys/cdefs.h>
+
+#include <errno.h>
+#include <libgen.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/param.h>
+
+char*
+dirname(const char*  path)
+{
+    static char*  bname = NULL;
+    int           ret;
+
+    if (bname == NULL) {
+        bname = (char *)malloc(MAXPATHLEN);
+        if (bname == NULL)
+            return(NULL);
+    }
+
+    ret = dirname_r(path, bname, MAXPATHLEN);
+    return (ret < 0) ? NULL : bname;
+}
diff --git a/libc/bionic/dirname_r.c b/libc/bionic/dirname_r.c
new file mode 100644
index 0000000..df2e9bf
--- /dev/null
+++ b/libc/bionic/dirname_r.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <libgen.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/param.h>
+
+int
+dirname_r(const char*  path, char*  buffer, size_t  bufflen)
+{
+    const char *endp;
+    int         result, len;
+
+    /* Empty or NULL string gets treated as "." */
+    if (path == NULL || *path == '\0') {
+        path = ".";
+        len  = 1;
+        goto Exit;
+    }
+
+    /* Strip trailing slashes */
+    endp = path + strlen(path) - 1;
+    while (endp > path && *endp == '/')
+        endp--;
+
+    /* Find the start of the dir */
+    while (endp > path && *endp != '/')
+        endp--;
+
+    /* Either the dir is "/" or there are no slashes */
+    if (endp == path) {
+        path = (*endp == '/') ? "/" : ".";
+        len  = 1;
+        goto Exit;
+    }
+
+    do {
+        endp--;
+    } while (endp > path && *endp == '/');
+
+    len = endp - path +1;
+
+Exit:
+    result = len;
+    if (len+1 > MAXPATHLEN) {
+        errno = ENAMETOOLONG;
+        return -1;
+    }
+    if (buffer == NULL)
+        return result;
+
+    if (len > (int)bufflen-1) {
+        len    = (int)bufflen-1;
+        result = -1;
+        errno  = ERANGE;
+    }
+
+    if (len >= 0) {
+        memcpy( buffer, path, len );
+        buffer[len] = 0;
+    }
+    return result;
+}
diff --git a/libc/bionic/dlmalloc.c b/libc/bionic/dlmalloc.c
new file mode 100644
index 0000000..78f20c0
--- /dev/null
+++ b/libc/bionic/dlmalloc.c
@@ -0,0 +1,5377 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+  This is a version (aka dlmalloc) of malloc/free/realloc written by
+  Doug Lea and released to the public domain, as explained at
+  http://creativecommons.org/licenses/publicdomain.  Send questions,
+  comments, complaints, performance data, etc to dl@cs.oswego.edu
+
+* Version 2.8.3 Thu Sep 22 11:16:15 2005  Doug Lea  (dl at gee)
+
+   Note: There may be an updated version of this malloc obtainable at
+           ftp://gee.cs.oswego.edu/pub/misc/malloc.c
+         Check before installing!
+
+* Quickstart
+
+  This library is all in one file to simplify the most common usage:
+  ftp it, compile it (-O3), and link it into another program. All of
+  the compile-time options default to reasonable values for use on
+  most platforms.  You might later want to step through various
+  compile-time and dynamic tuning options.
+
+  For convenience, an include file for code using this malloc is at:
+     ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.3.h
+  You don't really need this .h file unless you call functions not
+  defined in your system include files.  The .h file contains only the
+  excerpts from this file needed for using this malloc on ANSI C/C++
+  systems, so long as you haven't changed compile-time options about
+  naming and tuning parameters.  If you do, then you can create your
+  own malloc.h that does include all settings by cutting at the point
+  indicated below. Note that you may already by default be using a C
+  library containing a malloc that is based on some version of this
+  malloc (for example in linux). You might still want to use the one
+  in this file to customize settings or to avoid overheads associated
+  with library versions.
+
+* Vital statistics:
+
+  Supported pointer/size_t representation:       4 or 8 bytes
+       size_t MUST be an unsigned type of the same width as
+       pointers. (If you are using an ancient system that declares
+       size_t as a signed type, or need it to be a different width
+       than pointers, you can use a previous release of this malloc
+       (e.g. 2.7.2) supporting these.)
+
+  Alignment:                                     8 bytes (default)
+       This suffices for nearly all current machines and C compilers.
+       However, you can define MALLOC_ALIGNMENT to be wider than this
+       if necessary (up to 128bytes), at the expense of using more space.
+
+  Minimum overhead per allocated chunk:   4 or  8 bytes (if 4byte sizes)
+                                          8 or 16 bytes (if 8byte sizes)
+       Each malloced chunk has a hidden word of overhead holding size
+       and status information, and additional cross-check word
+       if FOOTERS is defined.
+
+  Minimum allocated size: 4-byte ptrs:  16 bytes    (including overhead)
+                          8-byte ptrs:  32 bytes    (including overhead)
+
+       Even a request for zero bytes (i.e., malloc(0)) returns a
+       pointer to something of the minimum allocatable size.
+       The maximum overhead wastage (i.e., number of extra bytes
+       allocated than were requested in malloc) is less than or equal
+       to the minimum size, except for requests >= mmap_threshold that
+       are serviced via mmap(), where the worst case wastage is about
+       32 bytes plus the remainder from a system page (the minimal
+       mmap unit); typically 4096 or 8192 bytes.
+
+  Security: static-safe; optionally more or less
+       The "security" of malloc refers to the ability of malicious
+       code to accentuate the effects of errors (for example, freeing
+       space that is not currently malloc'ed or overwriting past the
+       ends of chunks) in code that calls malloc.  This malloc
+       guarantees not to modify any memory locations below the base of
+       heap, i.e., static variables, even in the presence of usage
+       errors.  The routines additionally detect most improper frees
+       and reallocs.  All this holds as long as the static bookkeeping
+       for malloc itself is not corrupted by some other means.  This
+       is only one aspect of security -- these checks do not, and
+       cannot, detect all possible programming errors.
+
+       If FOOTERS is defined nonzero, then each allocated chunk
+       carries an additional check word to verify that it was malloced
+       from its space.  These check words are the same within each
+       execution of a program using malloc, but differ across
+       executions, so externally crafted fake chunks cannot be
+       freed. This improves security by rejecting frees/reallocs that
+       could corrupt heap memory, in addition to the checks preventing
+       writes to statics that are always on.  This may further improve
+       security at the expense of time and space overhead.  (Note that
+       FOOTERS may also be worth using with MSPACES.)
+
+       By default detected errors cause the program to abort (calling
+       "abort()"). You can override this to instead proceed past
+       errors by defining PROCEED_ON_ERROR.  In this case, a bad free
+       has no effect, and a malloc that encounters a bad address
+       caused by user overwrites will ignore the bad address by
+       dropping pointers and indices to all known memory. This may
+       be appropriate for programs that should continue if at all
+       possible in the face of programming errors, although they may
+       run out of memory because dropped memory is never reclaimed.
+
+       If you don't like either of these options, you can define
+       CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything
+       else. And if if you are sure that your program using malloc has
+       no errors or vulnerabilities, you can define INSECURE to 1,
+       which might (or might not) provide a small performance improvement.
+
+  Thread-safety: NOT thread-safe unless USE_LOCKS defined
+       When USE_LOCKS is defined, each public call to malloc, free,
+       etc is surrounded with either a pthread mutex or a win32
+       spinlock (depending on WIN32). This is not especially fast, and
+       can be a major bottleneck.  It is designed only to provide
+       minimal protection in concurrent environments, and to provide a
+       basis for extensions.  If you are using malloc in a concurrent
+       program, consider instead using ptmalloc, which is derived from
+       a version of this malloc. (See http://www.malloc.de).
+
+  System requirements: Any combination of MORECORE and/or MMAP/MUNMAP
+       This malloc can use unix sbrk or any emulation (invoked using
+       the CALL_MORECORE macro) and/or mmap/munmap or any emulation
+       (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system
+       memory.  On most unix systems, it tends to work best if both
+       MORECORE and MMAP are enabled.  On Win32, it uses emulations
+       based on VirtualAlloc. It also uses common C library functions
+       like memset.
+
+  Compliance: I believe it is compliant with the Single Unix Specification
+       (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably
+       others as well.
+
+* Overview of algorithms
+
+  This is not the fastest, most space-conserving, most portable, or
+  most tunable malloc ever written. However it is among the fastest
+  while also being among the most space-conserving, portable and
+  tunable.  Consistent balance across these factors results in a good
+  general-purpose allocator for malloc-intensive programs.
+
+  In most ways, this malloc is a best-fit allocator. Generally, it
+  chooses the best-fitting existing chunk for a request, with ties
+  broken in approximately least-recently-used order. (This strategy
+  normally maintains low fragmentation.) However, for requests less
+  than 256bytes, it deviates from best-fit when there is not an
+  exactly fitting available chunk by preferring to use space adjacent
+  to that used for the previous small request, as well as by breaking
+  ties in approximately most-recently-used order. (These enhance
+  locality of series of small allocations.)  And for very large requests
+  (>= 256Kb by default), it relies on system memory mapping
+  facilities, if supported.  (This helps avoid carrying around and
+  possibly fragmenting memory used only for large chunks.)
+
+  All operations (except malloc_stats and mallinfo) have execution
+  times that are bounded by a constant factor of the number of bits in
+  a size_t, not counting any clearing in calloc or copying in realloc,
+  or actions surrounding MORECORE and MMAP that have times
+  proportional to the number of non-contiguous regions returned by
+  system allocation routines, which is often just 1.
+
+  The implementation is not very modular and seriously overuses
+  macros. Perhaps someday all C compilers will do as good a job
+  inlining modular code as can now be done by brute-force expansion,
+  but now, enough of them seem not to.
+
+  Some compilers issue a lot of warnings about code that is
+  dead/unreachable only on some platforms, and also about intentional
+  uses of negation on unsigned types. All known cases of each can be
+  ignored.
+
+  For a longer but out of date high-level description, see
+     http://gee.cs.oswego.edu/dl/html/malloc.html
+
+* MSPACES
+  If MSPACES is defined, then in addition to malloc, free, etc.,
+  this file also defines mspace_malloc, mspace_free, etc. These
+  are versions of malloc routines that take an "mspace" argument
+  obtained using create_mspace, to control all internal bookkeeping.
+  If ONLY_MSPACES is defined, only these versions are compiled.
+  So if you would like to use this allocator for only some allocations,
+  and your system malloc for others, you can compile with
+  ONLY_MSPACES and then do something like...
+    static mspace mymspace = create_mspace(0,0); // for example
+    #define mymalloc(bytes)  mspace_malloc(mymspace, bytes)
+
+  (Note: If you only need one instance of an mspace, you can instead
+  use "USE_DL_PREFIX" to relabel the global malloc.)
+
+  You can similarly create thread-local allocators by storing
+  mspaces as thread-locals. For example:
+    static __thread mspace tlms = 0;
+    void*  tlmalloc(size_t bytes) {
+      if (tlms == 0) tlms = create_mspace(0, 0);
+      return mspace_malloc(tlms, bytes);
+    }
+    void  tlfree(void* mem) { mspace_free(tlms, mem); }
+
+  Unless FOOTERS is defined, each mspace is completely independent.
+  You cannot allocate from one and free to another (although
+  conformance is only weakly checked, so usage errors are not always
+  caught). If FOOTERS is defined, then each chunk carries around a tag
+  indicating its originating mspace, and frees are directed to their
+  originating spaces.
+
+ -------------------------  Compile-time options ---------------------------
+
+Be careful in setting #define values for numerical constants of type
+size_t. On some systems, literal values are not automatically extended
+to size_t precision unless they are explicitly casted.
+
+WIN32                    default: defined if _WIN32 defined
+  Defining WIN32 sets up defaults for MS environment and compilers.
+  Otherwise defaults are for unix.
+
+MALLOC_ALIGNMENT         default: (size_t)8
+  Controls the minimum alignment for malloc'ed chunks.  It must be a
+  power of two and at least 8, even on machines for which smaller
+  alignments would suffice. It may be defined as larger than this
+  though. Note however that code and data structures are optimized for
+  the case of 8-byte alignment.
+
+MSPACES                  default: 0 (false)
+  If true, compile in support for independent allocation spaces.
+  This is only supported if HAVE_MMAP is true.
+
+ONLY_MSPACES             default: 0 (false)
+  If true, only compile in mspace versions, not regular versions.
+
+USE_LOCKS                default: 0 (false)
+  Causes each call to each public routine to be surrounded with
+  pthread or WIN32 mutex lock/unlock. (If set true, this can be
+  overridden on a per-mspace basis for mspace versions.)
+
+FOOTERS                  default: 0
+  If true, provide extra checking and dispatching by placing
+  information in the footers of allocated chunks. This adds
+  space and time overhead.
+
+INSECURE                 default: 0
+  If true, omit checks for usage errors and heap space overwrites.
+
+USE_DL_PREFIX            default: NOT defined
+  Causes compiler to prefix all public routines with the string 'dl'.
+  This can be useful when you only want to use this malloc in one part
+  of a program, using your regular system malloc elsewhere.
+
+ABORT                    default: defined as abort()
+  Defines how to abort on failed checks.  On most systems, a failed
+  check cannot die with an "assert" or even print an informative
+  message, because the underlying print routines in turn call malloc,
+  which will fail again.  Generally, the best policy is to simply call
+  abort(). It's not very useful to do more than this because many
+  errors due to overwriting will show up as address faults (null, odd
+  addresses etc) rather than malloc-triggered checks, so will also
+  abort.  Also, most compilers know that abort() does not return, so
+  can better optimize code conditionally calling it.
+
+PROCEED_ON_ERROR           default: defined as 0 (false)
+  Controls whether detected bad addresses cause them to bypassed
+  rather than aborting. If set, detected bad arguments to free and
+  realloc are ignored. And all bookkeeping information is zeroed out
+  upon a detected overwrite of freed heap space, thus losing the
+  ability to ever return it from malloc again, but enabling the
+  application to proceed. If PROCEED_ON_ERROR is defined, the
+  static variable malloc_corruption_error_count is compiled in
+  and can be examined to see if errors have occurred. This option
+  generates slower code than the default abort policy.
+
+DEBUG                    default: NOT defined
+  The DEBUG setting is mainly intended for people trying to modify
+  this code or diagnose problems when porting to new platforms.
+  However, it may also be able to better isolate user errors than just
+  using runtime checks.  The assertions in the check routines spell
+  out in more detail the assumptions and invariants underlying the
+  algorithms.  The checking is fairly extensive, and will slow down
+  execution noticeably. Calling malloc_stats or mallinfo with DEBUG
+  set will attempt to check every non-mmapped allocated and free chunk
+  in the course of computing the summaries.
+
+ABORT_ON_ASSERT_FAILURE   default: defined as 1 (true)
+  Debugging assertion failures can be nearly impossible if your
+  version of the assert macro causes malloc to be called, which will
+  lead to a cascade of further failures, blowing the runtime stack.
+  ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(),
+  which will usually make debugging easier.
+
+MALLOC_FAILURE_ACTION     default: sets errno to ENOMEM, or no-op on win32
+  The action to take before "return 0" when malloc fails to be able to
+  return memory because there is none available.
+
+HAVE_MORECORE             default: 1 (true) unless win32 or ONLY_MSPACES
+  True if this system supports sbrk or an emulation of it.
+
+MORECORE                  default: sbrk
+  The name of the sbrk-style system routine to call to obtain more
+  memory.  See below for guidance on writing custom MORECORE
+  functions. The type of the argument to sbrk/MORECORE varies across
+  systems.  It cannot be size_t, because it supports negative
+  arguments, so it is normally the signed type of the same width as
+  size_t (sometimes declared as "intptr_t").  It doesn't much matter
+  though. Internally, we only call it with arguments less than half
+  the max value of a size_t, which should work across all reasonable
+  possibilities, although sometimes generating compiler warnings.  See
+  near the end of this file for guidelines for creating a custom
+  version of MORECORE.
+
+MORECORE_CONTIGUOUS       default: 1 (true)
+  If true, take advantage of fact that consecutive calls to MORECORE
+  with positive arguments always return contiguous increasing
+  addresses.  This is true of unix sbrk. It does not hurt too much to
+  set it true anyway, since malloc copes with non-contiguities.
+  Setting it false when definitely non-contiguous saves time
+  and possibly wasted space it would take to discover this though.
+
+MORECORE_CANNOT_TRIM      default: NOT defined
+  True if MORECORE cannot release space back to the system when given
+  negative arguments. This is generally necessary only if you are
+  using a hand-crafted MORECORE function that cannot handle negative
+  arguments.
+
+HAVE_MMAP                 default: 1 (true)
+  True if this system supports mmap or an emulation of it.  If so, and
+  HAVE_MORECORE is not true, MMAP is used for all system
+  allocation. If set and HAVE_MORECORE is true as well, MMAP is
+  primarily used to directly allocate very large blocks. It is also
+  used as a backup strategy in cases where MORECORE fails to provide
+  space from system. Note: A single call to MUNMAP is assumed to be
+  able to unmap memory that may have be allocated using multiple calls
+  to MMAP, so long as they are adjacent.
+
+HAVE_MREMAP               default: 1 on linux, else 0
+  If true realloc() uses mremap() to re-allocate large blocks and
+  extend or shrink allocation spaces.
+
+MMAP_CLEARS               default: 1 on unix
+  True if mmap clears memory so calloc doesn't need to. This is true
+  for standard unix mmap using /dev/zero.
+
+USE_BUILTIN_FFS            default: 0 (i.e., not used)
+  Causes malloc to use the builtin ffs() function to compute indices.
+  Some compilers may recognize and intrinsify ffs to be faster than the
+  supplied C version. Also, the case of x86 using gcc is special-cased
+  to an asm instruction, so is already as fast as it can be, and so
+  this setting has no effect. (On most x86s, the asm version is only
+  slightly faster than the C version.)
+
+malloc_getpagesize         default: derive from system includes, or 4096.
+  The system page size. To the extent possible, this malloc manages
+  memory from the system in page-size units.  This may be (and
+  usually is) a function rather than a constant. This is ignored
+  if WIN32, where page size is determined using getSystemInfo during
+  initialization.
+
+USE_DEV_RANDOM             default: 0 (i.e., not used)
+  Causes malloc to use /dev/random to initialize secure magic seed for
+  stamping footers. Otherwise, the current time is used.
+
+NO_MALLINFO                default: 0
+  If defined, don't compile "mallinfo". This can be a simple way
+  of dealing with mismatches between system declarations and
+  those in this file.
+
+MALLINFO_FIELD_TYPE        default: size_t
+  The type of the fields in the mallinfo struct. This was originally
+  defined as "int" in SVID etc, but is more usefully defined as
+  size_t. The value is used only if  HAVE_USR_INCLUDE_MALLOC_H is not set
+
+REALLOC_ZERO_BYTES_FREES    default: not defined
+  This should be set if a call to realloc with zero bytes should 
+  be the same as a call to free. Some people think it should. Otherwise, 
+  since this malloc returns a unique pointer for malloc(0), so does 
+  realloc(p, 0).
+
+LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H
+LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H,  LACKS_ERRNO_H
+LACKS_STDLIB_H                default: NOT defined unless on WIN32
+  Define these if your system does not have these header files.
+  You might need to manually insert some of the declarations they provide.
+
+DEFAULT_GRANULARITY        default: page size if MORECORE_CONTIGUOUS,
+                                system_info.dwAllocationGranularity in WIN32,
+                                otherwise 64K.
+      Also settable using mallopt(M_GRANULARITY, x)
+  The unit for allocating and deallocating memory from the system.  On
+  most systems with contiguous MORECORE, there is no reason to
+  make this more than a page. However, systems with MMAP tend to
+  either require or encourage larger granularities.  You can increase
+  this value to prevent system allocation functions to be called so
+  often, especially if they are slow.  The value must be at least one
+  page and must be a power of two.  Setting to 0 causes initialization
+  to either page size or win32 region size.  (Note: In previous
+  versions of malloc, the equivalent of this option was called
+  "TOP_PAD")
+
+DEFAULT_TRIM_THRESHOLD    default: 2MB
+      Also settable using mallopt(M_TRIM_THRESHOLD, x)
+  The maximum amount of unused top-most memory to keep before
+  releasing via malloc_trim in free().  Automatic trimming is mainly
+  useful in long-lived programs using contiguous MORECORE.  Because
+  trimming via sbrk can be slow on some systems, and can sometimes be
+  wasteful (in cases where programs immediately afterward allocate
+  more large chunks) the value should be high enough so that your
+  overall system performance would improve by releasing this much
+  memory.  As a rough guide, you might set to a value close to the
+  average size of a process (program) running on your system.
+  Releasing this much memory would allow such a process to run in
+  memory.  Generally, it is worth tuning trim thresholds when a
+  program undergoes phases where several large chunks are allocated
+  and released in ways that can reuse each other's storage, perhaps
+  mixed with phases where there are no such chunks at all. The trim
+  value must be greater than page size to have any useful effect.  To
+  disable trimming completely, you can set to MAX_SIZE_T. Note that the trick
+  some people use of mallocing a huge space and then freeing it at
+  program startup, in an attempt to reserve system memory, doesn't
+  have the intended effect under automatic trimming, since that memory
+  will immediately be returned to the system.
+
+DEFAULT_MMAP_THRESHOLD       default: 256K
+      Also settable using mallopt(M_MMAP_THRESHOLD, x)
+  The request size threshold for using MMAP to directly service a
+  request. Requests of at least this size that cannot be allocated
+  using already-existing space will be serviced via mmap.  (If enough
+  normal freed space already exists it is used instead.)  Using mmap
+  segregates relatively large chunks of memory so that they can be
+  individually obtained and released from the host system. A request
+  serviced through mmap is never reused by any other request (at least
+  not directly; the system may just so happen to remap successive
+  requests to the same locations).  Segregating space in this way has
+  the benefits that: Mmapped space can always be individually released
+  back to the system, which helps keep the system level memory demands
+  of a long-lived program low.  Also, mapped memory doesn't become
+  `locked' between other chunks, as can happen with normally allocated
+  chunks, which means that even trimming via malloc_trim would not
+  release them.  However, it has the disadvantage that the space
+  cannot be reclaimed, consolidated, and then used to service later
+  requests, as happens with normal chunks.  The advantages of mmap
+  nearly always outweigh disadvantages for "large" chunks, but the
+  value of "large" may vary across systems.  The default is an
+  empirically derived value that works well in most systems. You can
+  disable mmap by setting to MAX_SIZE_T.
+
+*/
+
+#ifndef WIN32
+#ifdef _WIN32
+#define WIN32 1
+#endif  /* _WIN32 */
+#endif  /* WIN32 */
+#ifdef WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#define HAVE_MMAP 1
+#define HAVE_MORECORE 0
+#define LACKS_UNISTD_H
+#define LACKS_SYS_PARAM_H
+#define LACKS_SYS_MMAN_H
+#define LACKS_STRING_H
+#define LACKS_STRINGS_H
+#define LACKS_SYS_TYPES_H
+#define LACKS_ERRNO_H
+#define MALLOC_FAILURE_ACTION
+#define MMAP_CLEARS 0 /* WINCE and some others apparently don't clear */
+#endif  /* WIN32 */
+
+#if defined(DARWIN) || defined(_DARWIN)
+/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
+#ifndef HAVE_MORECORE
+#define HAVE_MORECORE 0
+#define HAVE_MMAP 1
+#endif  /* HAVE_MORECORE */
+#endif  /* DARWIN */
+
+#ifndef LACKS_SYS_TYPES_H
+#include <sys/types.h>  /* For size_t */
+#endif  /* LACKS_SYS_TYPES_H */
+
+/* The maximum possible size_t value has all bits set */
+#define MAX_SIZE_T           (~(size_t)0)
+
+#ifndef ONLY_MSPACES
+#define ONLY_MSPACES 0
+#endif  /* ONLY_MSPACES */
+#ifndef MSPACES
+#if ONLY_MSPACES
+#define MSPACES 1
+#else   /* ONLY_MSPACES */
+#define MSPACES 0
+#endif  /* ONLY_MSPACES */
+#endif  /* MSPACES */
+#ifndef MALLOC_ALIGNMENT
+#define MALLOC_ALIGNMENT ((size_t)8U)
+#endif  /* MALLOC_ALIGNMENT */
+#ifndef FOOTERS
+#define FOOTERS 0
+#endif  /* FOOTERS */
+#ifndef USE_MAX_ALLOWED_FOOTPRINT
+#define USE_MAX_ALLOWED_FOOTPRINT 0
+#endif
+#ifndef ABORT
+#define ABORT  abort()
+#endif  /* ABORT */
+#ifndef ABORT_ON_ASSERT_FAILURE
+#define ABORT_ON_ASSERT_FAILURE 1
+#endif  /* ABORT_ON_ASSERT_FAILURE */
+#ifndef PROCEED_ON_ERROR
+#define PROCEED_ON_ERROR 0
+#endif  /* PROCEED_ON_ERROR */
+#ifndef USE_LOCKS
+#define USE_LOCKS 0
+#endif  /* USE_LOCKS */
+#ifndef INSECURE
+#define INSECURE 0
+#endif  /* INSECURE */
+#ifndef HAVE_MMAP
+#define HAVE_MMAP 1
+#endif  /* HAVE_MMAP */
+#ifndef MMAP_CLEARS
+#define MMAP_CLEARS 1
+#endif  /* MMAP_CLEARS */
+#ifndef HAVE_MREMAP
+#ifdef linux
+#define HAVE_MREMAP 1
+#else   /* linux */
+#define HAVE_MREMAP 0
+#endif  /* linux */
+#endif  /* HAVE_MREMAP */
+#ifndef MALLOC_FAILURE_ACTION
+#define MALLOC_FAILURE_ACTION  errno = ENOMEM;
+#endif  /* MALLOC_FAILURE_ACTION */
+#ifndef HAVE_MORECORE
+#if ONLY_MSPACES
+#define HAVE_MORECORE 0
+#else   /* ONLY_MSPACES */
+#define HAVE_MORECORE 1
+#endif  /* ONLY_MSPACES */
+#endif  /* HAVE_MORECORE */
+#if !HAVE_MORECORE
+#define MORECORE_CONTIGUOUS 0
+#else   /* !HAVE_MORECORE */
+#ifndef MORECORE
+#define MORECORE sbrk
+#endif  /* MORECORE */
+#ifndef MORECORE_CONTIGUOUS
+#define MORECORE_CONTIGUOUS 1
+#endif  /* MORECORE_CONTIGUOUS */
+#endif  /* HAVE_MORECORE */
+#ifndef DEFAULT_GRANULARITY
+#if MORECORE_CONTIGUOUS
+#define DEFAULT_GRANULARITY (0)  /* 0 means to compute in init_mparams */
+#else   /* MORECORE_CONTIGUOUS */
+#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
+#endif  /* MORECORE_CONTIGUOUS */
+#endif  /* DEFAULT_GRANULARITY */
+#ifndef DEFAULT_TRIM_THRESHOLD
+#ifndef MORECORE_CANNOT_TRIM
+#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
+#else   /* MORECORE_CANNOT_TRIM */
+#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
+#endif  /* MORECORE_CANNOT_TRIM */
+#endif  /* DEFAULT_TRIM_THRESHOLD */
+#ifndef DEFAULT_MMAP_THRESHOLD
+#if HAVE_MMAP
+#define DEFAULT_MMAP_THRESHOLD ((size_t)64U * (size_t)1024U)
+#else   /* HAVE_MMAP */
+#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
+#endif  /* HAVE_MMAP */
+#endif  /* DEFAULT_MMAP_THRESHOLD */
+#ifndef USE_BUILTIN_FFS
+#define USE_BUILTIN_FFS 0
+#endif  /* USE_BUILTIN_FFS */
+#ifndef USE_DEV_RANDOM
+#define USE_DEV_RANDOM 0
+#endif  /* USE_DEV_RANDOM */
+#ifndef NO_MALLINFO
+#define NO_MALLINFO 0
+#endif  /* NO_MALLINFO */
+#ifndef MALLINFO_FIELD_TYPE
+#define MALLINFO_FIELD_TYPE size_t
+#endif  /* MALLINFO_FIELD_TYPE */
+
+/*
+  mallopt tuning options.  SVID/XPG defines four standard parameter
+  numbers for mallopt, normally defined in malloc.h.  None of these
+  are used in this malloc, so setting them has no effect. But this
+  malloc does support the following options.
+*/
+
+#define M_TRIM_THRESHOLD     (-1)
+#define M_GRANULARITY        (-2)
+#define M_MMAP_THRESHOLD     (-3)
+
+/* ------------------------ Mallinfo declarations ------------------------ */
+
+#if !NO_MALLINFO
+/*
+  This version of malloc supports the standard SVID/XPG mallinfo
+  routine that returns a struct containing usage properties and
+  statistics. It should work on any system that has a
+  /usr/include/malloc.h defining struct mallinfo.  The main
+  declaration needed is the mallinfo struct that is returned (by-copy)
+  by mallinfo().  The malloinfo struct contains a bunch of fields that
+  are not even meaningful in this version of malloc.  These fields are
+  are instead filled by mallinfo() with other numbers that might be of
+  interest.
+
+  HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
+  /usr/include/malloc.h file that includes a declaration of struct
+  mallinfo.  If so, it is included; else a compliant version is
+  declared below.  These must be precisely the same for mallinfo() to
+  work.  The original SVID version of this struct, defined on most
+  systems with mallinfo, declares all fields as ints. But some others
+  define as unsigned long. If your system defines the fields using a
+  type of different width than listed here, you MUST #include your
+  system version and #define HAVE_USR_INCLUDE_MALLOC_H.
+*/
+
+/* #define HAVE_USR_INCLUDE_MALLOC_H */
+
+#if !ANDROID
+#ifdef HAVE_USR_INCLUDE_MALLOC_H
+#include "/usr/include/malloc.h"
+#else /* HAVE_USR_INCLUDE_MALLOC_H */
+
+struct mallinfo {
+  MALLINFO_FIELD_TYPE arena;    /* non-mmapped space allocated from system */
+  MALLINFO_FIELD_TYPE ordblks;  /* number of free chunks */
+  MALLINFO_FIELD_TYPE smblks;   /* always 0 */
+  MALLINFO_FIELD_TYPE hblks;    /* always 0 */
+  MALLINFO_FIELD_TYPE hblkhd;   /* space in mmapped regions */
+  MALLINFO_FIELD_TYPE usmblks;  /* maximum total allocated space */
+  MALLINFO_FIELD_TYPE fsmblks;  /* always 0 */
+  MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
+  MALLINFO_FIELD_TYPE fordblks; /* total free space */
+  MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
+};
+
+#endif /* HAVE_USR_INCLUDE_MALLOC_H */
+#endif /* NO_MALLINFO */
+#endif /* ANDROID */
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#if !ONLY_MSPACES
+
+/* ------------------- Declarations of public routines ------------------- */
+
+/* Check an additional macro for the five primary functions */
+#if !defined(USE_DL_PREFIX) || !defined(MALLOC_LEAK_CHECK) 
+#define dlcalloc               calloc
+#define dlfree                 free
+#define dlmalloc               malloc
+#define dlmemalign             memalign
+#define dlrealloc              realloc
+#endif
+
+#ifndef USE_DL_PREFIX
+#define dlvalloc               valloc
+#define dlpvalloc              pvalloc
+#define dlmallinfo             mallinfo
+#define dlmallopt              mallopt
+#define dlmalloc_trim          malloc_trim
+#define dlmalloc_walk_free_pages \
+                               malloc_walk_free_pages
+#define dlmalloc_walk_heap \
+                               malloc_walk_heap
+#define dlmalloc_stats         malloc_stats
+#define dlmalloc_usable_size   malloc_usable_size
+#define dlmalloc_footprint     malloc_footprint
+#define dlmalloc_max_allowed_footprint \
+                               malloc_max_allowed_footprint
+#define dlmalloc_set_max_allowed_footprint \
+                               malloc_set_max_allowed_footprint
+#define dlmalloc_max_footprint malloc_max_footprint
+#define dlindependent_calloc   independent_calloc
+#define dlindependent_comalloc independent_comalloc
+#endif /* USE_DL_PREFIX */
+
+
+/*
+  malloc(size_t n)
+  Returns a pointer to a newly allocated chunk of at least n bytes, or
+  null if no space is available, in which case errno is set to ENOMEM
+  on ANSI C systems.
+
+  If n is zero, malloc returns a minimum-sized chunk. (The minimum
+  size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
+  systems.)  Note that size_t is an unsigned type, so calls with
+  arguments that would be negative if signed are interpreted as
+  requests for huge amounts of space, which will often fail. The
+  maximum supported value of n differs across systems, but is in all
+  cases less than the maximum representable value of a size_t.
+*/
+void* dlmalloc(size_t);
+
+/*
+  free(void* p)
+  Releases the chunk of memory pointed to by p, that had been previously
+  allocated using malloc or a related routine such as realloc.
+  It has no effect if p is null. If p was not malloced or already
+  freed, free(p) will by default cause the current program to abort.
+*/
+void  dlfree(void*);
+
+/*
+  calloc(size_t n_elements, size_t element_size);
+  Returns a pointer to n_elements * element_size bytes, with all locations
+  set to zero.
+*/
+void* dlcalloc(size_t, size_t);
+
+/*
+  realloc(void* p, size_t n)
+  Returns a pointer to a chunk of size n that contains the same data
+  as does chunk p up to the minimum of (n, p's size) bytes, or null
+  if no space is available.
+
+  The returned pointer may or may not be the same as p. The algorithm
+  prefers extending p in most cases when possible, otherwise it
+  employs the equivalent of a malloc-copy-free sequence.
+
+  If p is null, realloc is equivalent to malloc.
+
+  If space is not available, realloc returns null, errno is set (if on
+  ANSI) and p is NOT freed.
+
+  if n is for fewer bytes than already held by p, the newly unused
+  space is lopped off and freed if possible.  realloc with a size
+  argument of zero (re)allocates a minimum-sized chunk.
+
+  The old unix realloc convention of allowing the last-free'd chunk
+  to be used as an argument to realloc is not supported.
+*/
+
+void* dlrealloc(void*, size_t);
+
+/*
+  memalign(size_t alignment, size_t n);
+  Returns a pointer to a newly allocated chunk of n bytes, aligned
+  in accord with the alignment argument.
+
+  The alignment argument should be a power of two. If the argument is
+  not a power of two, the nearest greater power is used.
+  8-byte alignment is guaranteed by normal malloc calls, so don't
+  bother calling memalign with an argument of 8 or less.
+
+  Overreliance on memalign is a sure way to fragment space.
+*/
+void* dlmemalign(size_t, size_t);
+
+/*
+  valloc(size_t n);
+  Equivalent to memalign(pagesize, n), where pagesize is the page
+  size of the system. If the pagesize is unknown, 4096 is used.
+*/
+void* dlvalloc(size_t);
+
+/*
+  mallopt(int parameter_number, int parameter_value)
+  Sets tunable parameters The format is to provide a
+  (parameter-number, parameter-value) pair.  mallopt then sets the
+  corresponding parameter to the argument value if it can (i.e., so
+  long as the value is meaningful), and returns 1 if successful else
+  0.  SVID/XPG/ANSI defines four standard param numbers for mallopt,
+  normally defined in malloc.h.  None of these are use in this malloc,
+  so setting them has no effect. But this malloc also supports other
+  options in mallopt. See below for details.  Briefly, supported
+  parameters are as follows (listed defaults are for "typical"
+  configurations).
+
+  Symbol            param #  default    allowed param values
+  M_TRIM_THRESHOLD     -1   2*1024*1024   any   (MAX_SIZE_T disables)
+  M_GRANULARITY        -2     page size   any power of 2 >= page size
+  M_MMAP_THRESHOLD     -3      256*1024   any   (or 0 if no MMAP support)
+*/
+int dlmallopt(int, int);
+
+/*
+  malloc_footprint();
+  Returns the number of bytes obtained from the system.  The total
+  number of bytes allocated by malloc, realloc etc., is less than this
+  value. Unlike mallinfo, this function returns only a precomputed
+  result, so can be called frequently to monitor memory consumption.
+  Even if locks are otherwise defined, this function does not use them,
+  so results might not be up to date.
+*/
+size_t dlmalloc_footprint(void);
+
+#if USE_MAX_ALLOWED_FOOTPRINT
+/*
+  malloc_max_allowed_footprint();
+  Returns the number of bytes that the heap is allowed to obtain
+  from the system.  malloc_footprint() should always return a
+  size less than or equal to max_allowed_footprint, unless the
+  max_allowed_footprint was set to a value smaller than the
+  footprint at the time.
+*/
+size_t dlmalloc_max_allowed_footprint();
+
+/*
+  malloc_set_max_allowed_footprint();
+  Set the maximum number of bytes that the heap is allowed to
+  obtain from the system.  The size will be rounded up to a whole
+  page, and the rounded number will be returned from future calls
+  to malloc_max_allowed_footprint().  If the new max_allowed_footprint
+  is larger than the current footprint, the heap will never grow
+  larger than max_allowed_footprint.  If the new max_allowed_footprint
+  is smaller than the current footprint, the heap will not grow
+  further.
+
+  TODO: try to force the heap to give up memory in the shrink case,
+        and update this comment once that happens.
+*/
+void dlmalloc_set_max_allowed_footprint(size_t bytes);
+#endif /* USE_MAX_ALLOWED_FOOTPRINT */
+
+/*
+  malloc_max_footprint();
+  Returns the maximum number of bytes obtained from the system. This
+  value will be greater than current footprint if deallocated space
+  has been reclaimed by the system. The peak number of bytes allocated
+  by malloc, realloc etc., is less than this value. Unlike mallinfo,
+  this function returns only a precomputed result, so can be called
+  frequently to monitor memory consumption.  Even if locks are
+  otherwise defined, this function does not use them, so results might
+  not be up to date.
+*/
+size_t dlmalloc_max_footprint(void);
+
+#if !NO_MALLINFO
+/*
+  mallinfo()
+  Returns (by copy) a struct containing various summary statistics:
+
+  arena:     current total non-mmapped bytes allocated from system
+  ordblks:   the number of free chunks
+  smblks:    always zero.
+  hblks:     current number of mmapped regions
+  hblkhd:    total bytes held in mmapped regions
+  usmblks:   the maximum total allocated space. This will be greater
+                than current total if trimming has occurred.
+  fsmblks:   always zero
+  uordblks:  current total allocated space (normal or mmapped)
+  fordblks:  total free space
+  keepcost:  the maximum number of bytes that could ideally be released
+               back to system via malloc_trim. ("ideally" means that
+               it ignores page restrictions etc.)
+
+  Because these fields are ints, but internal bookkeeping may
+  be kept as longs, the reported values may wrap around zero and
+  thus be inaccurate.
+*/
+struct mallinfo dlmallinfo(void);
+#endif /* NO_MALLINFO */
+
+/*
+  independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
+
+  independent_calloc is similar to calloc, but instead of returning a
+  single cleared space, it returns an array of pointers to n_elements
+  independent elements that can hold contents of size elem_size, each
+  of which starts out cleared, and can be independently freed,
+  realloc'ed etc. The elements are guaranteed to be adjacently
+  allocated (this is not guaranteed to occur with multiple callocs or
+  mallocs), which may also improve cache locality in some
+  applications.
+
+  The "chunks" argument is optional (i.e., may be null, which is
+  probably the most typical usage). If it is null, the returned array
+  is itself dynamically allocated and should also be freed when it is
+  no longer needed. Otherwise, the chunks array must be of at least
+  n_elements in length. It is filled in with the pointers to the
+  chunks.
+
+  In either case, independent_calloc returns this pointer array, or
+  null if the allocation failed.  If n_elements is zero and "chunks"
+  is null, it returns a chunk representing an array with zero elements
+  (which should be freed if not wanted).
+
+  Each element must be individually freed when it is no longer
+  needed. If you'd like to instead be able to free all at once, you
+  should instead use regular calloc and assign pointers into this
+  space to represent elements.  (In this case though, you cannot
+  independently free elements.)
+
+  independent_calloc simplifies and speeds up implementations of many
+  kinds of pools.  It may also be useful when constructing large data
+  structures that initially have a fixed number of fixed-sized nodes,
+  but the number is not known at compile time, and some of the nodes
+  may later need to be freed. For example:
+
+  struct Node { int item; struct Node* next; };
+
+  struct Node* build_list() {
+    struct Node** pool;
+    int n = read_number_of_nodes_needed();
+    if (n <= 0) return 0;
+    pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
+    if (pool == 0) die();
+    // organize into a linked list...
+    struct Node* first = pool[0];
+    for (i = 0; i < n-1; ++i)
+      pool[i]->next = pool[i+1];
+    free(pool);     // Can now free the array (or not, if it is needed later)
+    return first;
+  }
+*/
+void** dlindependent_calloc(size_t, size_t, void**);
+
+/*
+  independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
+
+  independent_comalloc allocates, all at once, a set of n_elements
+  chunks with sizes indicated in the "sizes" array.    It returns
+  an array of pointers to these elements, each of which can be
+  independently freed, realloc'ed etc. The elements are guaranteed to
+  be adjacently allocated (this is not guaranteed to occur with
+  multiple callocs or mallocs), which may also improve cache locality
+  in some applications.
+
+  The "chunks" argument is optional (i.e., may be null). If it is null
+  the returned array is itself dynamically allocated and should also
+  be freed when it is no longer needed. Otherwise, the chunks array
+  must be of at least n_elements in length. It is filled in with the
+  pointers to the chunks.
+
+  In either case, independent_comalloc returns this pointer array, or
+  null if the allocation failed.  If n_elements is zero and chunks is
+  null, it returns a chunk representing an array with zero elements
+  (which should be freed if not wanted).
+
+  Each element must be individually freed when it is no longer
+  needed. If you'd like to instead be able to free all at once, you
+  should instead use a single regular malloc, and assign pointers at
+  particular offsets in the aggregate space. (In this case though, you
+  cannot independently free elements.)
+
+  independent_comallac differs from independent_calloc in that each
+  element may have a different size, and also that it does not
+  automatically clear elements.
+
+  independent_comalloc can be used to speed up allocation in cases
+  where several structs or objects must always be allocated at the
+  same time.  For example:
+
+  struct Head { ... }
+  struct Foot { ... }
+
+  void send_message(char* msg) {
+    int msglen = strlen(msg);
+    size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
+    void* chunks[3];
+    if (independent_comalloc(3, sizes, chunks) == 0)
+      die();
+    struct Head* head = (struct Head*)(chunks[0]);
+    char*        body = (char*)(chunks[1]);
+    struct Foot* foot = (struct Foot*)(chunks[2]);
+    // ...
+  }
+
+  In general though, independent_comalloc is worth using only for
+  larger values of n_elements. For small values, you probably won't
+  detect enough difference from series of malloc calls to bother.
+
+  Overuse of independent_comalloc can increase overall memory usage,
+  since it cannot reuse existing noncontiguous small chunks that
+  might be available for some of the elements.
+*/
+void** dlindependent_comalloc(size_t, size_t*, void**);
+
+
+/*
+  pvalloc(size_t n);
+  Equivalent to valloc(minimum-page-that-holds(n)), that is,
+  round up n to nearest pagesize.
+ */
+void*  dlpvalloc(size_t);
+
+/*
+  malloc_trim(size_t pad);
+
+  If possible, gives memory back to the system (via negative arguments
+  to sbrk) if there is unused memory at the `high' end of the malloc
+  pool or in unused MMAP segments. You can call this after freeing
+  large blocks of memory to potentially reduce the system-level memory
+  requirements of a program. However, it cannot guarantee to reduce
+  memory. Under some allocation patterns, some large free blocks of
+  memory will be locked between two used chunks, so they cannot be
+  given back to the system.
+
+  The `pad' argument to malloc_trim represents the amount of free
+  trailing space to leave untrimmed. If this argument is zero, only
+  the minimum amount of memory to maintain internal data structures
+  will be left. Non-zero arguments can be supplied to maintain enough
+  trailing space to service future expected allocations without having
+  to re-obtain memory from the system.
+
+  Malloc_trim returns 1 if it actually released any memory, else 0.
+*/
+int  dlmalloc_trim(size_t);
+
+/*
+  malloc_walk_free_pages(handler, harg)
+
+  Calls the provided handler on each free region in the heap.  The
+  memory between start and end are guaranteed not to contain any
+  important data, so the handler is free to alter the contents
+  in any way.  This can be used to advise the OS that large free
+  regions may be swapped out.
+
+  The value in harg will be passed to each call of the handler.
+ */
+void dlmalloc_walk_free_pages(void(*)(void*, void*, void*), void*);
+
+/*
+  malloc_walk_heap(handler, harg)
+
+  Calls the provided handler on each object or free region in the
+  heap.  The handler will receive the chunk pointer and length, the
+  object pointer and length, and the value in harg on each call.
+ */
+void dlmalloc_walk_heap(void(*)(const void*, size_t,
+                                const void*, size_t, void*),
+                        void*);
+
+/*
+  malloc_usable_size(void* p);
+
+  Returns the number of bytes you can actually use in
+  an allocated chunk, which may be more than you requested (although
+  often not) due to alignment and minimum size constraints.
+  You can use this many bytes without worrying about
+  overwriting other allocated objects. This is not a particularly great
+  programming practice. malloc_usable_size can be more useful in
+  debugging and assertions, for example:
+
+  p = malloc(n);
+  assert(malloc_usable_size(p) >= 256);
+*/
+size_t dlmalloc_usable_size(void*);
+
+/*
+  malloc_stats();
+  Prints on stderr the amount of space obtained from the system (both
+  via sbrk and mmap), the maximum amount (which may be more than
+  current if malloc_trim and/or munmap got called), and the current
+  number of bytes allocated via malloc (or realloc, etc) but not yet
+  freed. Note that this is the number of bytes allocated, not the
+  number requested. It will be larger than the number requested
+  because of alignment and bookkeeping overhead. Because it includes
+  alignment wastage as being in use, this figure may be greater than
+  zero even when no user-level chunks are allocated.
+
+  The reported current and maximum system memory can be inaccurate if
+  a program makes other calls to system memory allocation functions
+  (normally sbrk) outside of malloc.
+
+  malloc_stats prints only the most commonly interesting statistics.
+  More information can be obtained by calling mallinfo.
+*/
+void  dlmalloc_stats(void);
+
+#endif /* ONLY_MSPACES */
+
+#if MSPACES
+
+/*
+  mspace is an opaque type representing an independent
+  region of space that supports mspace_malloc, etc.
+*/
+typedef void* mspace;
+
+/*
+  create_mspace creates and returns a new independent space with the
+  given initial capacity, or, if 0, the default granularity size.  It
+  returns null if there is no system memory available to create the
+  space.  If argument locked is non-zero, the space uses a separate
+  lock to control access. The capacity of the space will grow
+  dynamically as needed to service mspace_malloc requests.  You can
+  control the sizes of incremental increases of this space by
+  compiling with a different DEFAULT_GRANULARITY or dynamically
+  setting with mallopt(M_GRANULARITY, value).
+*/
+mspace create_mspace(size_t capacity, int locked);
+
+/*
+  destroy_mspace destroys the given space, and attempts to return all
+  of its memory back to the system, returning the total number of
+  bytes freed. After destruction, the results of access to all memory
+  used by the space become undefined.
+*/
+size_t destroy_mspace(mspace msp);
+
+/*
+  create_mspace_with_base uses the memory supplied as the initial base
+  of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
+  space is used for bookkeeping, so the capacity must be at least this
+  large. (Otherwise 0 is returned.) When this initial space is
+  exhausted, additional memory will be obtained from the system.
+  Destroying this space will deallocate all additionally allocated
+  space (if possible) but not the initial base.
+*/
+mspace create_mspace_with_base(void* base, size_t capacity, int locked);
+
+/*
+  mspace_malloc behaves as malloc, but operates within
+  the given space.
+*/
+void* mspace_malloc(mspace msp, size_t bytes);
+
+/*
+  mspace_free behaves as free, but operates within
+  the given space.
+
+  If compiled with FOOTERS==1, mspace_free is not actually needed.
+  free may be called instead of mspace_free because freed chunks from
+  any space are handled by their originating spaces.
+*/
+void mspace_free(mspace msp, void* mem);
+
+/*
+  mspace_realloc behaves as realloc, but operates within
+  the given space.
+
+  If compiled with FOOTERS==1, mspace_realloc is not actually
+  needed.  realloc may be called instead of mspace_realloc because
+  realloced chunks from any space are handled by their originating
+  spaces.
+*/
+void* mspace_realloc(mspace msp, void* mem, size_t newsize);
+
+/*
+  mspace_calloc behaves as calloc, but operates within
+  the given space.
+*/
+void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
+
+/*
+  mspace_memalign behaves as memalign, but operates within
+  the given space.
+*/
+void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
+
+/*
+  mspace_independent_calloc behaves as independent_calloc, but
+  operates within the given space.
+*/
+void** mspace_independent_calloc(mspace msp, size_t n_elements,
+                                 size_t elem_size, void* chunks[]);
+
+/*
+  mspace_independent_comalloc behaves as independent_comalloc, but
+  operates within the given space.
+*/
+void** mspace_independent_comalloc(mspace msp, size_t n_elements,
+                                   size_t sizes[], void* chunks[]);
+
+/*
+  mspace_footprint() returns the number of bytes obtained from the
+  system for this space.
+*/
+size_t mspace_footprint(mspace msp);
+
+/*
+  mspace_max_footprint() returns the peak number of bytes obtained from the
+  system for this space.
+*/
+size_t mspace_max_footprint(mspace msp);
+
+
+#if !NO_MALLINFO
+/*
+  mspace_mallinfo behaves as mallinfo, but reports properties of
+  the given space.
+*/
+struct mallinfo mspace_mallinfo(mspace msp);
+#endif /* NO_MALLINFO */
+
+/*
+  mspace_malloc_stats behaves as malloc_stats, but reports
+  properties of the given space.
+*/
+void mspace_malloc_stats(mspace msp);
+
+/*
+  mspace_trim behaves as malloc_trim, but
+  operates within the given space.
+*/
+int mspace_trim(mspace msp, size_t pad);
+
+/*
+  An alias for mallopt.
+*/
+int mspace_mallopt(int, int);
+
+#endif /* MSPACES */
+
+#ifdef __cplusplus
+};  /* end of extern "C" */
+#endif /* __cplusplus */
+
+/*
+  ========================================================================
+  To make a fully customizable malloc.h header file, cut everything
+  above this line, put into file malloc.h, edit to suit, and #include it
+  on the next line, as well as in programs that use this malloc.
+  ========================================================================
+*/
+
+/* #include "malloc.h" */
+
+/*------------------------------ internal #includes ---------------------- */
+
+#ifdef WIN32
+#pragma warning( disable : 4146 ) /* no "unsigned" warnings */
+#endif /* WIN32 */
+
+#include <stdio.h>       /* for printing in malloc_stats */
+
+#ifndef LACKS_ERRNO_H
+#include <errno.h>       /* for MALLOC_FAILURE_ACTION */
+#endif /* LACKS_ERRNO_H */
+#if FOOTERS
+#include <time.h>        /* for magic initialization */
+#endif /* FOOTERS */
+#ifndef LACKS_STDLIB_H
+#include <stdlib.h>      /* for abort() */
+#endif /* LACKS_STDLIB_H */
+#ifdef DEBUG
+#if ABORT_ON_ASSERT_FAILURE
+#define assert(x) if(!(x)) ABORT
+#else /* ABORT_ON_ASSERT_FAILURE */
+#include <assert.h>
+#endif /* ABORT_ON_ASSERT_FAILURE */
+#else  /* DEBUG */
+#define assert(x)
+#endif /* DEBUG */
+#ifndef LACKS_STRING_H
+#include <string.h>      /* for memset etc */
+#endif  /* LACKS_STRING_H */
+#if USE_BUILTIN_FFS
+#ifndef LACKS_STRINGS_H
+#include <strings.h>     /* for ffs */
+#endif /* LACKS_STRINGS_H */
+#endif /* USE_BUILTIN_FFS */
+#if HAVE_MMAP
+#ifndef LACKS_SYS_MMAN_H
+#include <sys/mman.h>    /* for mmap */
+#endif /* LACKS_SYS_MMAN_H */
+#ifndef LACKS_FCNTL_H
+#include <fcntl.h>
+#endif /* LACKS_FCNTL_H */
+#endif /* HAVE_MMAP */
+#if HAVE_MORECORE
+#ifndef LACKS_UNISTD_H
+#include <unistd.h>     /* for sbrk */
+#else /* LACKS_UNISTD_H */
+#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
+extern void*     sbrk(ptrdiff_t);
+#endif /* FreeBSD etc */
+#endif /* LACKS_UNISTD_H */
+#endif /* HAVE_MMAP */
+
+#ifndef WIN32
+#ifndef malloc_getpagesize
+#  ifdef _SC_PAGESIZE         /* some SVR4 systems omit an underscore */
+#    ifndef _SC_PAGE_SIZE
+#      define _SC_PAGE_SIZE _SC_PAGESIZE
+#    endif
+#  endif
+#  ifdef _SC_PAGE_SIZE
+#    define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
+#  else
+#    if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
+       extern size_t getpagesize();
+#      define malloc_getpagesize getpagesize()
+#    else
+#      ifdef WIN32 /* use supplied emulation of getpagesize */
+#        define malloc_getpagesize getpagesize()
+#      else
+#        ifndef LACKS_SYS_PARAM_H
+#          include <sys/param.h>
+#        endif
+#        ifdef EXEC_PAGESIZE
+#          define malloc_getpagesize EXEC_PAGESIZE
+#        else
+#          ifdef NBPG
+#            ifndef CLSIZE
+#              define malloc_getpagesize NBPG
+#            else
+#              define malloc_getpagesize (NBPG * CLSIZE)
+#            endif
+#          else
+#            ifdef NBPC
+#              define malloc_getpagesize NBPC
+#            else
+#              ifdef PAGESIZE
+#                define malloc_getpagesize PAGESIZE
+#              else /* just guess */
+#                define malloc_getpagesize ((size_t)4096U)
+#              endif
+#            endif
+#          endif
+#        endif
+#      endif
+#    endif
+#  endif
+#endif
+#endif
+
+/* ------------------- size_t and alignment properties -------------------- */
+
+/* The byte and bit size of a size_t */
+#define SIZE_T_SIZE         (sizeof(size_t))
+#define SIZE_T_BITSIZE      (sizeof(size_t) << 3)
+
+/* Some constants coerced to size_t */
+/* Annoying but necessary to avoid errors on some plaftorms */
+#define SIZE_T_ZERO         ((size_t)0)
+#define SIZE_T_ONE          ((size_t)1)
+#define SIZE_T_TWO          ((size_t)2)
+#define TWO_SIZE_T_SIZES    (SIZE_T_SIZE<<1)
+#define FOUR_SIZE_T_SIZES   (SIZE_T_SIZE<<2)
+#define SIX_SIZE_T_SIZES    (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
+#define HALF_MAX_SIZE_T     (MAX_SIZE_T / 2U)
+
+/* The bit mask value corresponding to MALLOC_ALIGNMENT */
+#define CHUNK_ALIGN_MASK    (MALLOC_ALIGNMENT - SIZE_T_ONE)
+
+/* True if address a has acceptable alignment */
+#define is_aligned(A)       (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
+
+/* the number of bytes to offset an address to align it */
+#define align_offset(A)\
+ ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
+  ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
+
+/* -------------------------- MMAP preliminaries ------------------------- */
+
+/*
+   If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
+   checks to fail so compiler optimizer can delete code rather than
+   using so many "#if"s.
+*/
+
+
+/* MORECORE and MMAP must return MFAIL on failure */
+#define MFAIL                ((void*)(MAX_SIZE_T))
+#define CMFAIL               ((char*)(MFAIL)) /* defined for convenience */
+
+#if !HAVE_MMAP
+#define IS_MMAPPED_BIT       (SIZE_T_ZERO)
+#define USE_MMAP_BIT         (SIZE_T_ZERO)
+#define CALL_MMAP(s)         MFAIL
+#define CALL_MUNMAP(a, s)    (-1)
+#define DIRECT_MMAP(s)       MFAIL
+
+#else /* HAVE_MMAP */
+#define IS_MMAPPED_BIT       (SIZE_T_ONE)
+#define USE_MMAP_BIT         (SIZE_T_ONE)
+
+#ifndef WIN32
+#define CALL_MUNMAP(a, s)    munmap((a), (s))
+#define MMAP_PROT            (PROT_READ|PROT_WRITE)
+#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
+#define MAP_ANONYMOUS        MAP_ANON
+#endif /* MAP_ANON */
+#ifdef MAP_ANONYMOUS
+#define MMAP_FLAGS           (MAP_PRIVATE|MAP_ANONYMOUS)
+#define CALL_MMAP(s)         mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
+#else /* MAP_ANONYMOUS */
+/*
+   Nearly all versions of mmap support MAP_ANONYMOUS, so the following
+   is unlikely to be needed, but is supplied just in case.
+*/
+#define MMAP_FLAGS           (MAP_PRIVATE)
+static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
+#define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
+           (dev_zero_fd = open("/dev/zero", O_RDWR), \
+            mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
+            mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
+#endif /* MAP_ANONYMOUS */
+
+#define DIRECT_MMAP(s)       CALL_MMAP(s)
+#else /* WIN32 */
+
+/* Win32 MMAP via VirtualAlloc */
+static void* win32mmap(size_t size) {
+  void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
+  return (ptr != 0)? ptr: MFAIL;
+}
+
+/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
+static void* win32direct_mmap(size_t size) {
+  void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
+                           PAGE_READWRITE);
+  return (ptr != 0)? ptr: MFAIL;
+}
+
+/* This function supports releasing coalesed segments */
+static int win32munmap(void* ptr, size_t size) {
+  MEMORY_BASIC_INFORMATION minfo;
+  char* cptr = ptr;
+  while (size) {
+    if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
+      return -1;
+    if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
+        minfo.State != MEM_COMMIT || minfo.RegionSize > size)
+      return -1;
+    if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
+      return -1;
+    cptr += minfo.RegionSize;
+    size -= minfo.RegionSize;
+  }
+  return 0;
+}
+
+#define CALL_MMAP(s)         win32mmap(s)
+#define CALL_MUNMAP(a, s)    win32munmap((a), (s))
+#define DIRECT_MMAP(s)       win32direct_mmap(s)
+#endif /* WIN32 */
+#endif /* HAVE_MMAP */
+
+#if HAVE_MMAP && HAVE_MREMAP
+#define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
+#else  /* HAVE_MMAP && HAVE_MREMAP */
+#define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
+#endif /* HAVE_MMAP && HAVE_MREMAP */
+
+#if HAVE_MORECORE
+#define CALL_MORECORE(S)     MORECORE(S)
+#else  /* HAVE_MORECORE */
+#define CALL_MORECORE(S)     MFAIL
+#endif /* HAVE_MORECORE */
+
+/* mstate bit set if continguous morecore disabled or failed */
+#define USE_NONCONTIGUOUS_BIT (4U)
+
+/* segment bit set in create_mspace_with_base */
+#define EXTERN_BIT            (8U)
+
+
+/* --------------------------- Lock preliminaries ------------------------ */
+
+#if USE_LOCKS
+
+/*
+  When locks are defined, there are up to two global locks:
+
+  * If HAVE_MORECORE, morecore_mutex protects sequences of calls to
+    MORECORE.  In many cases sys_alloc requires two calls, that should
+    not be interleaved with calls by other threads.  This does not
+    protect against direct calls to MORECORE by other threads not
+    using this lock, so there is still code to cope the best we can on
+    interference.
+
+  * magic_init_mutex ensures that mparams.magic and other
+    unique mparams values are initialized only once.
+*/
+
+#ifndef WIN32
+/* By default use posix locks */
+#include <pthread.h>
+#define MLOCK_T pthread_mutex_t
+#define INITIAL_LOCK(l)      pthread_mutex_init(l, NULL)
+#define ACQUIRE_LOCK(l)      pthread_mutex_lock(l)
+#define RELEASE_LOCK(l)      pthread_mutex_unlock(l)
+
+#if HAVE_MORECORE
+static MLOCK_T morecore_mutex = PTHREAD_MUTEX_INITIALIZER;
+#endif /* HAVE_MORECORE */
+
+static MLOCK_T magic_init_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+#else /* WIN32 */
+/*
+   Because lock-protected regions have bounded times, and there
+   are no recursive lock calls, we can use simple spinlocks.
+*/
+
+#define MLOCK_T long
+static int win32_acquire_lock (MLOCK_T *sl) {
+  for (;;) {
+#ifdef InterlockedCompareExchangePointer
+    if (!InterlockedCompareExchange(sl, 1, 0))
+      return 0;
+#else  /* Use older void* version */
+    if (!InterlockedCompareExchange((void**)sl, (void*)1, (void*)0))
+      return 0;
+#endif /* InterlockedCompareExchangePointer */
+    Sleep (0);
+  }
+}
+
+static void win32_release_lock (MLOCK_T *sl) {
+  InterlockedExchange (sl, 0);
+}
+
+#define INITIAL_LOCK(l)      *(l)=0
+#define ACQUIRE_LOCK(l)      win32_acquire_lock(l)
+#define RELEASE_LOCK(l)      win32_release_lock(l)
+#if HAVE_MORECORE
+static MLOCK_T morecore_mutex;
+#endif /* HAVE_MORECORE */
+static MLOCK_T magic_init_mutex;
+#endif /* WIN32 */
+
+#define USE_LOCK_BIT               (2U)
+#else  /* USE_LOCKS */
+#define USE_LOCK_BIT               (0U)
+#define INITIAL_LOCK(l)
+#endif /* USE_LOCKS */
+
+#if USE_LOCKS && HAVE_MORECORE
+#define ACQUIRE_MORECORE_LOCK()    ACQUIRE_LOCK(&morecore_mutex);
+#define RELEASE_MORECORE_LOCK()    RELEASE_LOCK(&morecore_mutex);
+#else /* USE_LOCKS && HAVE_MORECORE */
+#define ACQUIRE_MORECORE_LOCK()
+#define RELEASE_MORECORE_LOCK()
+#endif /* USE_LOCKS && HAVE_MORECORE */
+
+#if USE_LOCKS
+#define ACQUIRE_MAGIC_INIT_LOCK()  ACQUIRE_LOCK(&magic_init_mutex);
+#define RELEASE_MAGIC_INIT_LOCK()  RELEASE_LOCK(&magic_init_mutex);
+#else  /* USE_LOCKS */
+#define ACQUIRE_MAGIC_INIT_LOCK()
+#define RELEASE_MAGIC_INIT_LOCK()
+#endif /* USE_LOCKS */
+
+
+/* -----------------------  Chunk representations ------------------------ */
+
+/*
+  (The following includes lightly edited explanations by Colin Plumb.)
+
+  The malloc_chunk declaration below is misleading (but accurate and
+  necessary).  It declares a "view" into memory allowing access to
+  necessary fields at known offsets from a given base.
+
+  Chunks of memory are maintained using a `boundary tag' method as
+  originally described by Knuth.  (See the paper by Paul Wilson
+  ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
+  techniques.)  Sizes of free chunks are stored both in the front of
+  each chunk and at the end.  This makes consolidating fragmented
+  chunks into bigger chunks fast.  The head fields also hold bits
+  representing whether chunks are free or in use.
+
+  Here are some pictures to make it clearer.  They are "exploded" to
+  show that the state of a chunk can be thought of as extending from
+  the high 31 bits of the head field of its header through the
+  prev_foot and PINUSE_BIT bit of the following chunk header.
+
+  A chunk that's in use looks like:
+
+   chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+           | Size of previous chunk (if P = 1)                             |
+           +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
+         | Size of this chunk                                         1| +-+
+   mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+         |                                                               |
+         +-                                                             -+
+         |                                                               |
+         +-                                                             -+
+         |                                                               :
+         +-      size - sizeof(size_t) available payload bytes          -+
+         :                                                               |
+ chunk-> +-                                                             -+
+         |                                                               |
+         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
+       | Size of next chunk (may or may not be in use)               | +-+
+ mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+    And if it's free, it looks like this:
+
+   chunk-> +-                                                             -+
+           | User payload (must be in use, or we would have merged!)       |
+           +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
+         | Size of this chunk                                         0| +-+
+   mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+         | Next pointer                                                  |
+         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+         | Prev pointer                                                  |
+         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+         |                                                               :
+         +-      size - sizeof(struct chunk) unused bytes               -+
+         :                                                               |
+ chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+         | Size of this chunk                                            |
+         +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
+       | Size of next chunk (must be in use, or we would have merged)| +-+
+ mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+       |                                                               :
+       +- User payload                                                -+
+       :                                                               |
+       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+                                                                     |0|
+                                                                     +-+
+  Note that since we always merge adjacent free chunks, the chunks
+  adjacent to a free chunk must be in use.
+
+  Given a pointer to a chunk (which can be derived trivially from the
+  payload pointer) we can, in O(1) time, find out whether the adjacent
+  chunks are free, and if so, unlink them from the lists that they
+  are on and merge them with the current chunk.
+
+  Chunks always begin on even word boundaries, so the mem portion
+  (which is returned to the user) is also on an even word boundary, and
+  thus at least double-word aligned.
+
+  The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
+  chunk size (which is always a multiple of two words), is an in-use
+  bit for the *previous* chunk.  If that bit is *clear*, then the
+  word before the current chunk size contains the previous chunk
+  size, and can be used to find the front of the previous chunk.
+  The very first chunk allocated always has this bit set, preventing
+  access to non-existent (or non-owned) memory. If pinuse is set for
+  any given chunk, then you CANNOT determine the size of the
+  previous chunk, and might even get a memory addressing fault when
+  trying to do so.
+
+  The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
+  the chunk size redundantly records whether the current chunk is
+  inuse. This redundancy enables usage checks within free and realloc,
+  and reduces indirection when freeing and consolidating chunks.
+
+  Each freshly allocated chunk must have both cinuse and pinuse set.
+  That is, each allocated chunk borders either a previously allocated
+  and still in-use chunk, or the base of its memory arena. This is
+  ensured by making all allocations from the the `lowest' part of any
+  found chunk.  Further, no free chunk physically borders another one,
+  so each free chunk is known to be preceded and followed by either
+  inuse chunks or the ends of memory.
+
+  Note that the `foot' of the current chunk is actually represented
+  as the prev_foot of the NEXT chunk. This makes it easier to
+  deal with alignments etc but can be very confusing when trying
+  to extend or adapt this code.
+
+  The exceptions to all this are
+
+     1. The special chunk `top' is the top-most available chunk (i.e.,
+        the one bordering the end of available memory). It is treated
+        specially.  Top is never included in any bin, is used only if
+        no other chunk is available, and is released back to the
+        system if it is very large (see M_TRIM_THRESHOLD).  In effect,
+        the top chunk is treated as larger (and thus less well
+        fitting) than any other available chunk.  The top chunk
+        doesn't update its trailing size field since there is no next
+        contiguous chunk that would have to index off it. However,
+        space is still allocated for it (TOP_FOOT_SIZE) to enable
+        separation or merging when space is extended.
+
+     3. Chunks allocated via mmap, which have the lowest-order bit
+        (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set
+        PINUSE_BIT in their head fields.  Because they are allocated
+        one-by-one, each must carry its own prev_foot field, which is
+        also used to hold the offset this chunk has within its mmapped
+        region, which is needed to preserve alignment. Each mmapped
+        chunk is trailed by the first two fields of a fake next-chunk
+        for sake of usage checks.
+
+*/
+
+struct malloc_chunk {
+  size_t               prev_foot;  /* Size of previous chunk (if free).  */
+  size_t               head;       /* Size and inuse bits. */
+  struct malloc_chunk* fd;         /* double links -- used only if free. */
+  struct malloc_chunk* bk;
+};
+
+typedef struct malloc_chunk  mchunk;
+typedef struct malloc_chunk* mchunkptr;
+typedef struct malloc_chunk* sbinptr;  /* The type of bins of chunks */
+typedef unsigned int bindex_t;         /* Described below */
+typedef unsigned int binmap_t;         /* Described below */
+typedef unsigned int flag_t;           /* The type of various bit flag sets */
+
+/* ------------------- Chunks sizes and alignments ----------------------- */
+
+#define MCHUNK_SIZE         (sizeof(mchunk))
+
+#if FOOTERS
+#define CHUNK_OVERHEAD      (TWO_SIZE_T_SIZES)
+#else /* FOOTERS */
+#define CHUNK_OVERHEAD      (SIZE_T_SIZE)
+#endif /* FOOTERS */
+
+/* MMapped chunks need a second word of overhead ... */
+#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
+/* ... and additional padding for fake next-chunk at foot */
+#define MMAP_FOOT_PAD       (FOUR_SIZE_T_SIZES)
+
+/* The smallest size we can malloc is an aligned minimal chunk */
+#define MIN_CHUNK_SIZE\
+  ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
+
+/* conversion from malloc headers to user pointers, and back */
+#define chunk2mem(p)        ((void*)((char*)(p)       + TWO_SIZE_T_SIZES))
+#define mem2chunk(mem)      ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
+/* chunk associated with aligned address A */
+#define align_as_chunk(A)   (mchunkptr)((A) + align_offset(chunk2mem(A)))
+
+/* Bounds on request (not chunk) sizes. */
+#define MAX_REQUEST         ((-MIN_CHUNK_SIZE) << 2)
+#define MIN_REQUEST         (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
+
+/* pad request bytes into a usable size */
+#define pad_request(req) \
+   (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
+
+/* pad request, checking for minimum (but not maximum) */
+#define request2size(req) \
+  (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
+
+
+/* ------------------ Operations on head and foot fields ----------------- */
+
+/*
+  The head field of a chunk is or'ed with PINUSE_BIT when previous
+  adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
+  use. If the chunk was obtained with mmap, the prev_foot field has
+  IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
+  mmapped region to the base of the chunk.
+*/
+
+#define PINUSE_BIT          (SIZE_T_ONE)
+#define CINUSE_BIT          (SIZE_T_TWO)
+#define INUSE_BITS          (PINUSE_BIT|CINUSE_BIT)
+
+/* Head value for fenceposts */
+#define FENCEPOST_HEAD      (INUSE_BITS|SIZE_T_SIZE)
+
+/* extraction of fields from head words */
+#define cinuse(p)           ((p)->head & CINUSE_BIT)
+#define pinuse(p)           ((p)->head & PINUSE_BIT)
+#define chunksize(p)        ((p)->head & ~(INUSE_BITS))
+
+#define clear_pinuse(p)     ((p)->head &= ~PINUSE_BIT)
+#define clear_cinuse(p)     ((p)->head &= ~CINUSE_BIT)
+
+/* Treat space at ptr +/- offset as a chunk */
+#define chunk_plus_offset(p, s)  ((mchunkptr)(((char*)(p)) + (s)))
+#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
+
+/* Ptr to next or previous physical malloc_chunk. */
+#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS)))
+#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
+
+/* extract next chunk's pinuse bit */
+#define next_pinuse(p)  ((next_chunk(p)->head) & PINUSE_BIT)
+
+/* Get/set size at footer */
+#define get_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot)
+#define set_foot(p, s)  (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
+
+/* Set size, pinuse bit, and foot */
+#define set_size_and_pinuse_of_free_chunk(p, s)\
+  ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
+
+/* Set size, pinuse bit, foot, and clear next pinuse */
+#define set_free_with_pinuse(p, s, n)\
+  (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
+
+#define is_mmapped(p)\
+  (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT))
+
+/* Get the internal overhead associated with chunk p */
+#define overhead_for(p)\
+ (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
+
+/* Return true if malloced space is not necessarily cleared */
+#if MMAP_CLEARS
+#define calloc_must_clear(p) (!is_mmapped(p))
+#else /* MMAP_CLEARS */
+#define calloc_must_clear(p) (1)
+#endif /* MMAP_CLEARS */
+
+/* ---------------------- Overlaid data structures ----------------------- */
+
+/*
+  When chunks are not in use, they are treated as nodes of either
+  lists or trees.
+
+  "Small"  chunks are stored in circular doubly-linked lists, and look
+  like this:
+
+    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+            |             Size of previous chunk                            |
+            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+    `head:' |             Size of chunk, in bytes                         |P|
+      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+            |             Forward pointer to next chunk in list             |
+            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+            |             Back pointer to previous chunk in list            |
+            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+            |             Unused space (may be 0 bytes long)                .
+            .                                                               .
+            .                                                               |
+nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+    `foot:' |             Size of chunk, in bytes                           |
+            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+  Larger chunks are kept in a form of bitwise digital trees (aka
+  tries) keyed on chunksizes.  Because malloc_tree_chunks are only for
+  free chunks greater than 256 bytes, their size doesn't impose any
+  constraints on user chunk sizes.  Each node looks like:
+
+    chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+            |             Size of previous chunk                            |
+            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+    `head:' |             Size of chunk, in bytes                         |P|
+      mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+            |             Forward pointer to next chunk of same size        |
+            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+            |             Back pointer to previous chunk of same size       |
+            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+            |             Pointer to left child (child[0])                  |
+            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+            |             Pointer to right child (child[1])                 |
+            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+            |             Pointer to parent                                 |
+            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+            |             bin index of this chunk                           |
+            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+            |             Unused space                                      .
+            .                                                               |
+nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+    `foot:' |             Size of chunk, in bytes                           |
+            +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+  Each tree holding treenodes is a tree of unique chunk sizes.  Chunks
+  of the same size are arranged in a circularly-linked list, with only
+  the oldest chunk (the next to be used, in our FIFO ordering)
+  actually in the tree.  (Tree members are distinguished by a non-null
+  parent pointer.)  If a chunk with the same size an an existing node
+  is inserted, it is linked off the existing node using pointers that
+  work in the same way as fd/bk pointers of small chunks.
+
+  Each tree contains a power of 2 sized range of chunk sizes (the
+  smallest is 0x100 <= x < 0x180), which is is divided in half at each
+  tree level, with the chunks in the smaller half of the range (0x100
+  <= x < 0x140 for the top nose) in the left subtree and the larger
+  half (0x140 <= x < 0x180) in the right subtree.  This is, of course,
+  done by inspecting individual bits.
+
+  Using these rules, each node's left subtree contains all smaller
+  sizes than its right subtree.  However, the node at the root of each
+  subtree has no particular ordering relationship to either.  (The
+  dividing line between the subtree sizes is based on trie relation.)
+  If we remove the last chunk of a given size from the interior of the
+  tree, we need to replace it with a leaf node.  The tree ordering
+  rules permit a node to be replaced by any leaf below it.
+
+  The smallest chunk in a tree (a common operation in a best-fit
+  allocator) can be found by walking a path to the leftmost leaf in
+  the tree.  Unlike a usual binary tree, where we follow left child
+  pointers until we reach a null, here we follow the right child
+  pointer any time the left one is null, until we reach a leaf with
+  both child pointers null. The smallest chunk in the tree will be
+  somewhere along that path.
+
+  The worst case number of steps to add, find, or remove a node is
+  bounded by the number of bits differentiating chunks within
+  bins. Under current bin calculations, this ranges from 6 up to 21
+  (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
+  is of course much better.
+*/
+
+struct malloc_tree_chunk {
+  /* The first four fields must be compatible with malloc_chunk */
+  size_t                    prev_foot;
+  size_t                    head;
+  struct malloc_tree_chunk* fd;
+  struct malloc_tree_chunk* bk;
+
+  struct malloc_tree_chunk* child[2];
+  struct malloc_tree_chunk* parent;
+  bindex_t                  index;
+};
+
+typedef struct malloc_tree_chunk  tchunk;
+typedef struct malloc_tree_chunk* tchunkptr;
+typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
+
+/* A little helper macro for trees */
+#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
+
+/* ----------------------------- Segments -------------------------------- */
+
+/*
+  Each malloc space may include non-contiguous segments, held in a
+  list headed by an embedded malloc_segment record representing the
+  top-most space. Segments also include flags holding properties of
+  the space. Large chunks that are directly allocated by mmap are not
+  included in this list. They are instead independently created and
+  destroyed without otherwise keeping track of them.
+
+  Segment management mainly comes into play for spaces allocated by
+  MMAP.  Any call to MMAP might or might not return memory that is
+  adjacent to an existing segment.  MORECORE normally contiguously
+  extends the current space, so this space is almost always adjacent,
+  which is simpler and faster to deal with. (This is why MORECORE is
+  used preferentially to MMAP when both are available -- see
+  sys_alloc.)  When allocating using MMAP, we don't use any of the
+  hinting mechanisms (inconsistently) supported in various
+  implementations of unix mmap, or distinguish reserving from
+  committing memory. Instead, we just ask for space, and exploit
+  contiguity when we get it.  It is probably possible to do
+  better than this on some systems, but no general scheme seems
+  to be significantly better.
+
+  Management entails a simpler variant of the consolidation scheme
+  used for chunks to reduce fragmentation -- new adjacent memory is
+  normally prepended or appended to an existing segment. However,
+  there are limitations compared to chunk consolidation that mostly
+  reflect the fact that segment processing is relatively infrequent
+  (occurring only when getting memory from system) and that we
+  don't expect to have huge numbers of segments:
+
+  * Segments are not indexed, so traversal requires linear scans.  (It
+    would be possible to index these, but is not worth the extra
+    overhead and complexity for most programs on most platforms.)
+  * New segments are only appended to old ones when holding top-most
+    memory; if they cannot be prepended to others, they are held in
+    different segments.
+
+  Except for the top-most segment of an mstate, each segment record
+  is kept at the tail of its segment. Segments are added by pushing
+  segment records onto the list headed by &mstate.seg for the
+  containing mstate.
+
+  Segment flags control allocation/merge/deallocation policies:
+  * If EXTERN_BIT set, then we did not allocate this segment,
+    and so should not try to deallocate or merge with others.
+    (This currently holds only for the initial segment passed
+    into create_mspace_with_base.)
+  * If IS_MMAPPED_BIT set, the segment may be merged with
+    other surrounding mmapped segments and trimmed/de-allocated
+    using munmap.
+  * If neither bit is set, then the segment was obtained using
+    MORECORE so can be merged with surrounding MORECORE'd segments
+    and deallocated/trimmed using MORECORE with negative arguments.
+*/
+
+struct malloc_segment {
+  char*        base;             /* base address */
+  size_t       size;             /* allocated size */
+  struct malloc_segment* next;   /* ptr to next segment */
+  flag_t       sflags;           /* mmap and extern flag */
+};
+
+#define is_mmapped_segment(S)  ((S)->sflags & IS_MMAPPED_BIT)
+#define is_extern_segment(S)   ((S)->sflags & EXTERN_BIT)
+
+typedef struct malloc_segment  msegment;
+typedef struct malloc_segment* msegmentptr;
+
+/* ---------------------------- malloc_state ----------------------------- */
+
+/*
+   A malloc_state holds all of the bookkeeping for a space.
+   The main fields are:
+
+  Top
+    The topmost chunk of the currently active segment. Its size is
+    cached in topsize.  The actual size of topmost space is
+    topsize+TOP_FOOT_SIZE, which includes space reserved for adding
+    fenceposts and segment records if necessary when getting more
+    space from the system.  The size at which to autotrim top is
+    cached from mparams in trim_check, except that it is disabled if
+    an autotrim fails.
+
+  Designated victim (dv)
+    This is the preferred chunk for servicing small requests that
+    don't have exact fits.  It is normally the chunk split off most
+    recently to service another small request.  Its size is cached in
+    dvsize. The link fields of this chunk are not maintained since it
+    is not kept in a bin.
+
+  SmallBins
+    An array of bin headers for free chunks.  These bins hold chunks
+    with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
+    chunks of all the same size, spaced 8 bytes apart.  To simplify
+    use in double-linked lists, each bin header acts as a malloc_chunk
+    pointing to the real first node, if it exists (else pointing to
+    itself).  This avoids special-casing for headers.  But to avoid
+    waste, we allocate only the fd/bk pointers of bins, and then use
+    repositioning tricks to treat these as the fields of a chunk.
+
+  TreeBins
+    Treebins are pointers to the roots of trees holding a range of
+    sizes. There are 2 equally spaced treebins for each power of two
+    from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
+    larger.
+
+  Bin maps
+    There is one bit map for small bins ("smallmap") and one for
+    treebins ("treemap).  Each bin sets its bit when non-empty, and
+    clears the bit when empty.  Bit operations are then used to avoid
+    bin-by-bin searching -- nearly all "search" is done without ever
+    looking at bins that won't be selected.  The bit maps
+    conservatively use 32 bits per map word, even if on 64bit system.
+    For a good description of some of the bit-based techniques used
+    here, see Henry S. Warren Jr's book "Hacker's Delight" (and
+    supplement at http://hackersdelight.org/). Many of these are
+    intended to reduce the branchiness of paths through malloc etc, as
+    well as to reduce the number of memory locations read or written.
+
+  Segments
+    A list of segments headed by an embedded malloc_segment record
+    representing the initial space.
+
+  Address check support
+    The least_addr field is the least address ever obtained from
+    MORECORE or MMAP. Attempted frees and reallocs of any address less
+    than this are trapped (unless INSECURE is defined).
+
+  Magic tag
+    A cross-check field that should always hold same value as mparams.magic.
+
+  Flags
+    Bits recording whether to use MMAP, locks, or contiguous MORECORE
+
+  Statistics
+    Each space keeps track of current and maximum system memory
+    obtained via MORECORE or MMAP.
+
+  Locking
+    If USE_LOCKS is defined, the "mutex" lock is acquired and released
+    around every public call using this mspace.
+*/
+
+/* Bin types, widths and sizes */
+#define NSMALLBINS        (32U)
+#define NTREEBINS         (32U)
+#define SMALLBIN_SHIFT    (3U)
+#define SMALLBIN_WIDTH    (SIZE_T_ONE << SMALLBIN_SHIFT)
+#define TREEBIN_SHIFT     (8U)
+#define MIN_LARGE_SIZE    (SIZE_T_ONE << TREEBIN_SHIFT)
+#define MAX_SMALL_SIZE    (MIN_LARGE_SIZE - SIZE_T_ONE)
+#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
+
+struct malloc_state {
+  binmap_t   smallmap;
+  binmap_t   treemap;
+  size_t     dvsize;
+  size_t     topsize;
+  char*      least_addr;
+  mchunkptr  dv;
+  mchunkptr  top;
+  size_t     trim_check;
+  size_t     magic;
+  mchunkptr  smallbins[(NSMALLBINS+1)*2];
+  tbinptr    treebins[NTREEBINS];
+  size_t     footprint;
+#if USE_MAX_ALLOWED_FOOTPRINT
+  size_t     max_allowed_footprint;
+#endif
+  size_t     max_footprint;
+  flag_t     mflags;
+#if USE_LOCKS
+  MLOCK_T    mutex;     /* locate lock among fields that rarely change */
+#endif /* USE_LOCKS */
+  msegment   seg;
+};
+
+typedef struct malloc_state*    mstate;
+
+/* ------------- Global malloc_state and malloc_params ------------------- */
+
+/*
+  malloc_params holds global properties, including those that can be
+  dynamically set using mallopt. There is a single instance, mparams,
+  initialized in init_mparams.
+*/
+
+struct malloc_params {
+  size_t magic;
+  size_t page_size;
+  size_t granularity;
+  size_t mmap_threshold;
+  size_t trim_threshold;
+  flag_t default_mflags;
+};
+
+static struct malloc_params mparams;
+
+/* The global malloc_state used for all non-"mspace" calls */
+static struct malloc_state _gm_
+#if USE_MAX_ALLOWED_FOOTPRINT
+        = { .max_allowed_footprint = MAX_SIZE_T };
+#else
+        ;
+#endif
+
+#define gm                 (&_gm_)
+#define is_global(M)       ((M) == &_gm_)
+#define is_initialized(M)  ((M)->top != 0)
+
+/* -------------------------- system alloc setup ------------------------- */
+
+/* Operations on mflags */
+
+#define use_lock(M)           ((M)->mflags &   USE_LOCK_BIT)
+#define enable_lock(M)        ((M)->mflags |=  USE_LOCK_BIT)
+#define disable_lock(M)       ((M)->mflags &= ~USE_LOCK_BIT)
+
+#define use_mmap(M)           ((M)->mflags &   USE_MMAP_BIT)
+#define enable_mmap(M)        ((M)->mflags |=  USE_MMAP_BIT)
+#define disable_mmap(M)       ((M)->mflags &= ~USE_MMAP_BIT)
+
+#define use_noncontiguous(M)  ((M)->mflags &   USE_NONCONTIGUOUS_BIT)
+#define disable_contiguous(M) ((M)->mflags |=  USE_NONCONTIGUOUS_BIT)
+
+#define set_lock(M,L)\
+ ((M)->mflags = (L)?\
+  ((M)->mflags | USE_LOCK_BIT) :\
+  ((M)->mflags & ~USE_LOCK_BIT))
+
+/* page-align a size */
+#define page_align(S)\
+ (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))
+
+/* granularity-align a size */
+#define granularity_align(S)\
+  (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))
+
+#define is_page_aligned(S)\
+   (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
+#define is_granularity_aligned(S)\
+   (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
+
+/*  True if segment S holds address A */
+#define segment_holds(S, A)\
+  ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
+
+/* Return segment holding given address */
+static msegmentptr segment_holding(mstate m, char* addr) {
+  msegmentptr sp = &m->seg;
+  for (;;) {
+    if (addr >= sp->base && addr < sp->base + sp->size)
+      return sp;
+    if ((sp = sp->next) == 0)
+      return 0;
+  }
+}
+
+/* Return true if segment contains a segment link */
+static int has_segment_link(mstate m, msegmentptr ss) {
+  msegmentptr sp = &m->seg;
+  for (;;) {
+    if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
+      return 1;
+    if ((sp = sp->next) == 0)
+      return 0;
+  }
+}
+
+#ifndef MORECORE_CANNOT_TRIM
+#define should_trim(M,s)  ((s) > (M)->trim_check)
+#else  /* MORECORE_CANNOT_TRIM */
+#define should_trim(M,s)  (0)
+#endif /* MORECORE_CANNOT_TRIM */
+
+/*
+  TOP_FOOT_SIZE is padding at the end of a segment, including space
+  that may be needed to place segment records and fenceposts when new
+  noncontiguous segments are added.
+*/
+#define TOP_FOOT_SIZE\
+  (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
+
+
+/* -------------------------------  Hooks -------------------------------- */
+
+/*
+  PREACTION should be defined to return 0 on success, and nonzero on
+  failure. If you are not using locking, you can redefine these to do
+  anything you like.
+*/
+
+#if USE_LOCKS
+
+/* Ensure locks are initialized */
+#define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
+
+#define PREACTION(M)  ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
+#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
+#else /* USE_LOCKS */
+
+#ifndef PREACTION
+#define PREACTION(M) (0)
+#endif  /* PREACTION */
+
+#ifndef POSTACTION
+#define POSTACTION(M)
+#endif  /* POSTACTION */
+
+#endif /* USE_LOCKS */
+
+/*
+  CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
+  USAGE_ERROR_ACTION is triggered on detected bad frees and
+  reallocs. The argument p is an address that might have triggered the
+  fault. It is ignored by the two predefined actions, but might be
+  useful in custom actions that try to help diagnose errors.
+*/
+
+#if PROCEED_ON_ERROR
+
+/* A count of the number of corruption errors causing resets */
+int malloc_corruption_error_count;
+
+/* default corruption action */
+static void reset_on_error(mstate m);
+
+#define CORRUPTION_ERROR_ACTION(m)  reset_on_error(m)
+#define USAGE_ERROR_ACTION(m, p)
+
+#else /* PROCEED_ON_ERROR */
+
+#ifndef CORRUPTION_ERROR_ACTION
+#define CORRUPTION_ERROR_ACTION(m) ABORT
+#endif /* CORRUPTION_ERROR_ACTION */
+
+#ifndef USAGE_ERROR_ACTION
+#define USAGE_ERROR_ACTION(m,p) ABORT
+#endif /* USAGE_ERROR_ACTION */
+
+#endif /* PROCEED_ON_ERROR */
+
+/* -------------------------- Debugging setup ---------------------------- */
+
+#if ! DEBUG
+
+#define check_free_chunk(M,P)
+#define check_inuse_chunk(M,P)
+#define check_malloced_chunk(M,P,N)
+#define check_mmapped_chunk(M,P)
+#define check_malloc_state(M)
+#define check_top_chunk(M,P)
+
+#else /* DEBUG */
+#define check_free_chunk(M,P)       do_check_free_chunk(M,P)
+#define check_inuse_chunk(M,P)      do_check_inuse_chunk(M,P)
+#define check_top_chunk(M,P)        do_check_top_chunk(M,P)
+#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
+#define check_mmapped_chunk(M,P)    do_check_mmapped_chunk(M,P)
+#define check_malloc_state(M)       do_check_malloc_state(M)
+
+static void   do_check_any_chunk(mstate m, mchunkptr p);
+static void   do_check_top_chunk(mstate m, mchunkptr p);
+static void   do_check_mmapped_chunk(mstate m, mchunkptr p);
+static void   do_check_inuse_chunk(mstate m, mchunkptr p);
+static void   do_check_free_chunk(mstate m, mchunkptr p);
+static void   do_check_malloced_chunk(mstate m, void* mem, size_t s);
+static void   do_check_tree(mstate m, tchunkptr t);
+static void   do_check_treebin(mstate m, bindex_t i);
+static void   do_check_smallbin(mstate m, bindex_t i);
+static void   do_check_malloc_state(mstate m);
+static int    bin_find(mstate m, mchunkptr x);
+static size_t traverse_and_check(mstate m);
+#endif /* DEBUG */
+
+/* ---------------------------- Indexing Bins ---------------------------- */
+
+#define is_small(s)         (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
+#define small_index(s)      ((s)  >> SMALLBIN_SHIFT)
+#define small_index2size(i) ((i)  << SMALLBIN_SHIFT)
+#define MIN_SMALL_INDEX     (small_index(MIN_CHUNK_SIZE))
+
+/* addressing by index. See above about smallbin repositioning */
+#define smallbin_at(M, i)   ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
+#define treebin_at(M,i)     (&((M)->treebins[i]))
+
+/* assign tree index for size S to variable I */
+#if defined(__GNUC__) && defined(i386)
+#define compute_tree_index(S, I)\
+{\
+  size_t X = S >> TREEBIN_SHIFT;\
+  if (X == 0)\
+    I = 0;\
+  else if (X > 0xFFFF)\
+    I = NTREEBINS-1;\
+  else {\
+    unsigned int K;\
+    __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm"  (X));\
+    I =  (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
+  }\
+}
+#else /* GNUC */
+#define compute_tree_index(S, I)\
+{\
+  size_t X = S >> TREEBIN_SHIFT;\
+  if (X == 0)\
+    I = 0;\
+  else if (X > 0xFFFF)\
+    I = NTREEBINS-1;\
+  else {\
+    unsigned int Y = (unsigned int)X;\
+    unsigned int N = ((Y - 0x100) >> 16) & 8;\
+    unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
+    N += K;\
+    N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
+    K = 14 - N + ((Y <<= K) >> 15);\
+    I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
+  }\
+}
+#endif /* GNUC */
+
+/* Bit representing maximum resolved size in a treebin at i */
+#define bit_for_tree_index(i) \
+   (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
+
+/* Shift placing maximum resolved bit in a treebin at i as sign bit */
+#define leftshift_for_tree_index(i) \
+   ((i == NTREEBINS-1)? 0 : \
+    ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
+
+/* The size of the smallest chunk held in bin with index i */
+#define minsize_for_tree_index(i) \
+   ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) |  \
+   (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
+
+
+/* ------------------------ Operations on bin maps ----------------------- */
+
+/* bit corresponding to given index */
+#define idx2bit(i)              ((binmap_t)(1) << (i))
+
+/* Mark/Clear bits with given index */
+#define mark_smallmap(M,i)      ((M)->smallmap |=  idx2bit(i))
+#define clear_smallmap(M,i)     ((M)->smallmap &= ~idx2bit(i))
+#define smallmap_is_marked(M,i) ((M)->smallmap &   idx2bit(i))
+
+#define mark_treemap(M,i)       ((M)->treemap  |=  idx2bit(i))
+#define clear_treemap(M,i)      ((M)->treemap  &= ~idx2bit(i))
+#define treemap_is_marked(M,i)  ((M)->treemap  &   idx2bit(i))
+
+/* index corresponding to given bit */
+
+#if defined(__GNUC__) && defined(i386)
+#define compute_bit2idx(X, I)\
+{\
+  unsigned int J;\
+  __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
+  I = (bindex_t)J;\
+}
+
+#else /* GNUC */
+#if  USE_BUILTIN_FFS
+#define compute_bit2idx(X, I) I = ffs(X)-1
+
+#else /* USE_BUILTIN_FFS */
+#define compute_bit2idx(X, I)\
+{\
+  unsigned int Y = X - 1;\
+  unsigned int K = Y >> (16-4) & 16;\
+  unsigned int N = K;        Y >>= K;\
+  N += K = Y >> (8-3) &  8;  Y >>= K;\
+  N += K = Y >> (4-2) &  4;  Y >>= K;\
+  N += K = Y >> (2-1) &  2;  Y >>= K;\
+  N += K = Y >> (1-0) &  1;  Y >>= K;\
+  I = (bindex_t)(N + Y);\
+}
+#endif /* USE_BUILTIN_FFS */
+#endif /* GNUC */
+
+/* isolate the least set bit of a bitmap */
+#define least_bit(x)         ((x) & -(x))
+
+/* mask with all bits to left of least bit of x on */
+#define left_bits(x)         ((x<<1) | -(x<<1))
+
+/* mask with all bits to left of or equal to least bit of x on */
+#define same_or_left_bits(x) ((x) | -(x))
+
+
+/* ----------------------- Runtime Check Support ------------------------- */
+
+/*
+  For security, the main invariant is that malloc/free/etc never
+  writes to a static address other than malloc_state, unless static
+  malloc_state itself has been corrupted, which cannot occur via
+  malloc (because of these checks). In essence this means that we
+  believe all pointers, sizes, maps etc held in malloc_state, but
+  check all of those linked or offsetted from other embedded data
+  structures.  These checks are interspersed with main code in a way
+  that tends to minimize their run-time cost.
+
+  When FOOTERS is defined, in addition to range checking, we also
+  verify footer fields of inuse chunks, which can be used guarantee
+  that the mstate controlling malloc/free is intact.  This is a
+  streamlined version of the approach described by William Robertson
+  et al in "Run-time Detection of Heap-based Overflows" LISA'03
+  http://www.usenix.org/events/lisa03/tech/robertson.html The footer
+  of an inuse chunk holds the xor of its mstate and a random seed,
+  that is checked upon calls to free() and realloc().  This is
+  (probablistically) unguessable from outside the program, but can be
+  computed by any code successfully malloc'ing any chunk, so does not
+  itself provide protection against code that has already broken
+  security through some other means.  Unlike Robertson et al, we
+  always dynamically check addresses of all offset chunks (previous,
+  next, etc). This turns out to be cheaper than relying on hashes.
+*/
+
+#if !INSECURE
+/* Check if address a is at least as high as any from MORECORE or MMAP */
+#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
+/* Check if address of next chunk n is higher than base chunk p */
+#define ok_next(p, n)    ((char*)(p) < (char*)(n))
+/* Check if p has its cinuse bit on */
+#define ok_cinuse(p)     cinuse(p)
+/* Check if p has its pinuse bit on */
+#define ok_pinuse(p)     pinuse(p)
+
+#else /* !INSECURE */
+#define ok_address(M, a) (1)
+#define ok_next(b, n)    (1)
+#define ok_cinuse(p)     (1)
+#define ok_pinuse(p)     (1)
+#endif /* !INSECURE */
+
+#if (FOOTERS && !INSECURE)
+/* Check if (alleged) mstate m has expected magic field */
+#define ok_magic(M)      ((M)->magic == mparams.magic)
+#else  /* (FOOTERS && !INSECURE) */
+#define ok_magic(M)      (1)
+#endif /* (FOOTERS && !INSECURE) */
+
+
+/* In gcc, use __builtin_expect to minimize impact of checks */
+#if !INSECURE
+#if defined(__GNUC__) && __GNUC__ >= 3
+#define RTCHECK(e)  __builtin_expect(e, 1)
+#else /* GNUC */
+#define RTCHECK(e)  (e)
+#endif /* GNUC */
+#else /* !INSECURE */
+#define RTCHECK(e)  (1)
+#endif /* !INSECURE */
+
+/* macros to set up inuse chunks with or without footers */
+
+#if !FOOTERS
+
+#define mark_inuse_foot(M,p,s)
+
+/* Set cinuse bit and pinuse bit of next chunk */
+#define set_inuse(M,p,s)\
+  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
+  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
+
+/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
+#define set_inuse_and_pinuse(M,p,s)\
+  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
+  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
+
+/* Set size, cinuse and pinuse bit of this chunk */
+#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
+  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
+
+#else /* FOOTERS */
+
+/* Set foot of inuse chunk to be xor of mstate and seed */
+#define mark_inuse_foot(M,p,s)\
+  (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
+
+#define get_mstate_for(p)\
+  ((mstate)(((mchunkptr)((char*)(p) +\
+    (chunksize(p))))->prev_foot ^ mparams.magic))
+
+#define set_inuse(M,p,s)\
+  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
+  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
+  mark_inuse_foot(M,p,s))
+
+#define set_inuse_and_pinuse(M,p,s)\
+  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
+  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
+ mark_inuse_foot(M,p,s))
+
+#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
+  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
+  mark_inuse_foot(M, p, s))
+
+#endif /* !FOOTERS */
+
+/* ---------------------------- setting mparams -------------------------- */
+
+/* Initialize mparams */
+static int init_mparams(void) {
+  if (mparams.page_size == 0) {
+    size_t s;
+
+    mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
+    mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
+#if MORECORE_CONTIGUOUS
+    mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT;
+#else  /* MORECORE_CONTIGUOUS */
+    mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT;
+#endif /* MORECORE_CONTIGUOUS */
+
+#if (FOOTERS && !INSECURE)
+    {
+#if USE_DEV_RANDOM
+      int fd;
+      unsigned char buf[sizeof(size_t)];
+      /* Try to use /dev/urandom, else fall back on using time */
+      if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
+          read(fd, buf, sizeof(buf)) == sizeof(buf)) {
+        s = *((size_t *) buf);
+        close(fd);
+      }
+      else
+#endif /* USE_DEV_RANDOM */
+        s = (size_t)(time(0) ^ (size_t)0x55555555U);
+
+      s |= (size_t)8U;    /* ensure nonzero */
+      s &= ~(size_t)7U;   /* improve chances of fault for bad values */
+
+    }
+#else /* (FOOTERS && !INSECURE) */
+    s = (size_t)0x58585858U;
+#endif /* (FOOTERS && !INSECURE) */
+    ACQUIRE_MAGIC_INIT_LOCK();
+    if (mparams.magic == 0) {
+      mparams.magic = s;
+      /* Set up lock for main malloc area */
+      INITIAL_LOCK(&gm->mutex);
+      gm->mflags = mparams.default_mflags;
+    }
+    RELEASE_MAGIC_INIT_LOCK();
+
+#ifndef WIN32
+    mparams.page_size = malloc_getpagesize;
+    mparams.granularity = ((DEFAULT_GRANULARITY != 0)?
+                           DEFAULT_GRANULARITY : mparams.page_size);
+#else /* WIN32 */
+    {
+      SYSTEM_INFO system_info;
+      GetSystemInfo(&system_info);
+      mparams.page_size = system_info.dwPageSize;
+      mparams.granularity = system_info.dwAllocationGranularity;
+    }
+#endif /* WIN32 */
+
+    /* Sanity-check configuration:
+       size_t must be unsigned and as wide as pointer type.
+       ints must be at least 4 bytes.
+       alignment must be at least 8.
+       Alignment, min chunk size, and page size must all be powers of 2.
+    */
+    if ((sizeof(size_t) != sizeof(char*)) ||
+        (MAX_SIZE_T < MIN_CHUNK_SIZE)  ||
+        (sizeof(int) < 4)  ||
+        (MALLOC_ALIGNMENT < (size_t)8U) ||
+        ((MALLOC_ALIGNMENT    & (MALLOC_ALIGNMENT-SIZE_T_ONE))    != 0) ||
+        ((MCHUNK_SIZE         & (MCHUNK_SIZE-SIZE_T_ONE))         != 0) ||
+        ((mparams.granularity & (mparams.granularity-SIZE_T_ONE)) != 0) ||
+        ((mparams.page_size   & (mparams.page_size-SIZE_T_ONE))   != 0))
+      ABORT;
+  }
+  return 0;
+}
+
+/* support for mallopt */
+static int change_mparam(int param_number, int value) {
+  size_t val = (size_t)value;
+  init_mparams();
+  switch(param_number) {
+  case M_TRIM_THRESHOLD:
+    mparams.trim_threshold = val;
+    return 1;
+  case M_GRANULARITY:
+    if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
+      mparams.granularity = val;
+      return 1;
+    }
+    else
+      return 0;
+  case M_MMAP_THRESHOLD:
+    mparams.mmap_threshold = val;
+    return 1;
+  default:
+    return 0;
+  }
+}
+
+#if DEBUG
+/* ------------------------- Debugging Support --------------------------- */
+
+/* Check properties of any chunk, whether free, inuse, mmapped etc  */
+static void do_check_any_chunk(mstate m, mchunkptr p) {
+  assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
+  assert(ok_address(m, p));
+}
+
+/* Check properties of top chunk */
+static void do_check_top_chunk(mstate m, mchunkptr p) {
+  msegmentptr sp = segment_holding(m, (char*)p);
+  size_t  sz = chunksize(p);
+  assert(sp != 0);
+  assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
+  assert(ok_address(m, p));
+  assert(sz == m->topsize);
+  assert(sz > 0);
+  assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
+  assert(pinuse(p));
+  assert(!next_pinuse(p));
+}
+
+/* Check properties of (inuse) mmapped chunks */
+static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
+  size_t  sz = chunksize(p);
+  size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD);
+  assert(is_mmapped(p));
+  assert(use_mmap(m));
+  assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
+  assert(ok_address(m, p));
+  assert(!is_small(sz));
+  assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
+  assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
+  assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
+}
+
+/* Check properties of inuse chunks */
+static void do_check_inuse_chunk(mstate m, mchunkptr p) {
+  do_check_any_chunk(m, p);
+  assert(cinuse(p));
+  assert(next_pinuse(p));
+  /* If not pinuse and not mmapped, previous chunk has OK offset */
+  assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
+  if (is_mmapped(p))
+    do_check_mmapped_chunk(m, p);
+}
+
+/* Check properties of free chunks */
+static void do_check_free_chunk(mstate m, mchunkptr p) {
+  size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
+  mchunkptr next = chunk_plus_offset(p, sz);
+  do_check_any_chunk(m, p);
+  assert(!cinuse(p));
+  assert(!next_pinuse(p));
+  assert (!is_mmapped(p));
+  if (p != m->dv && p != m->top) {
+    if (sz >= MIN_CHUNK_SIZE) {
+      assert((sz & CHUNK_ALIGN_MASK) == 0);
+      assert(is_aligned(chunk2mem(p)));
+      assert(next->prev_foot == sz);
+      assert(pinuse(p));
+      assert (next == m->top || cinuse(next));
+      assert(p->fd->bk == p);
+      assert(p->bk->fd == p);
+    }
+    else  /* markers are always of size SIZE_T_SIZE */
+      assert(sz == SIZE_T_SIZE);
+  }
+}
+
+/* Check properties of malloced chunks at the point they are malloced */
+static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
+  if (mem != 0) {
+    mchunkptr p = mem2chunk(mem);
+    size_t sz = p->head & ~(PINUSE_BIT|CINUSE_BIT);
+    do_check_inuse_chunk(m, p);
+    assert((sz & CHUNK_ALIGN_MASK) == 0);
+    assert(sz >= MIN_CHUNK_SIZE);
+    assert(sz >= s);
+    /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
+    assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
+  }
+}
+
+/* Check a tree and its subtrees.  */
+static void do_check_tree(mstate m, tchunkptr t) {
+  tchunkptr head = 0;
+  tchunkptr u = t;
+  bindex_t tindex = t->index;
+  size_t tsize = chunksize(t);
+  bindex_t idx;
+  compute_tree_index(tsize, idx);
+  assert(tindex == idx);
+  assert(tsize >= MIN_LARGE_SIZE);
+  assert(tsize >= minsize_for_tree_index(idx));
+  assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
+
+  do { /* traverse through chain of same-sized nodes */
+    do_check_any_chunk(m, ((mchunkptr)u));
+    assert(u->index == tindex);
+    assert(chunksize(u) == tsize);
+    assert(!cinuse(u));
+    assert(!next_pinuse(u));
+    assert(u->fd->bk == u);
+    assert(u->bk->fd == u);
+    if (u->parent == 0) {
+      assert(u->child[0] == 0);
+      assert(u->child[1] == 0);
+    }
+    else {
+      assert(head == 0); /* only one node on chain has parent */
+      head = u;
+      assert(u->parent != u);
+      assert (u->parent->child[0] == u ||
+              u->parent->child[1] == u ||
+              *((tbinptr*)(u->parent)) == u);
+      if (u->child[0] != 0) {
+        assert(u->child[0]->parent == u);
+        assert(u->child[0] != u);
+        do_check_tree(m, u->child[0]);
+      }
+      if (u->child[1] != 0) {
+        assert(u->child[1]->parent == u);
+        assert(u->child[1] != u);
+        do_check_tree(m, u->child[1]);
+      }
+      if (u->child[0] != 0 && u->child[1] != 0) {
+        assert(chunksize(u->child[0]) < chunksize(u->child[1]));
+      }
+    }
+    u = u->fd;
+  } while (u != t);
+  assert(head != 0);
+}
+
+/*  Check all the chunks in a treebin.  */
+static void do_check_treebin(mstate m, bindex_t i) {
+  tbinptr* tb = treebin_at(m, i);
+  tchunkptr t = *tb;
+  int empty = (m->treemap & (1U << i)) == 0;
+  if (t == 0)
+    assert(empty);
+  if (!empty)
+    do_check_tree(m, t);
+}
+
+/*  Check all the chunks in a smallbin.  */
+static void do_check_smallbin(mstate m, bindex_t i) {
+  sbinptr b = smallbin_at(m, i);
+  mchunkptr p = b->bk;
+  unsigned int empty = (m->smallmap & (1U << i)) == 0;
+  if (p == b)
+    assert(empty);
+  if (!empty) {
+    for (; p != b; p = p->bk) {
+      size_t size = chunksize(p);
+      mchunkptr q;
+      /* each chunk claims to be free */
+      do_check_free_chunk(m, p);
+      /* chunk belongs in bin */
+      assert(small_index(size) == i);
+      assert(p->bk == b || chunksize(p->bk) == chunksize(p));
+      /* chunk is followed by an inuse chunk */
+      q = next_chunk(p);
+      if (q->head != FENCEPOST_HEAD)
+        do_check_inuse_chunk(m, q);
+    }
+  }
+}
+
+/* Find x in a bin. Used in other check functions. */
+static int bin_find(mstate m, mchunkptr x) {
+  size_t size = chunksize(x);
+  if (is_small(size)) {
+    bindex_t sidx = small_index(size);
+    sbinptr b = smallbin_at(m, sidx);
+    if (smallmap_is_marked(m, sidx)) {
+      mchunkptr p = b;
+      do {
+        if (p == x)
+          return 1;
+      } while ((p = p->fd) != b);
+    }
+  }
+  else {
+    bindex_t tidx;
+    compute_tree_index(size, tidx);
+    if (treemap_is_marked(m, tidx)) {
+      tchunkptr t = *treebin_at(m, tidx);
+      size_t sizebits = size << leftshift_for_tree_index(tidx);
+      while (t != 0 && chunksize(t) != size) {
+        t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
+        sizebits <<= 1;
+      }
+      if (t != 0) {
+        tchunkptr u = t;
+        do {
+          if (u == (tchunkptr)x)
+            return 1;
+        } while ((u = u->fd) != t);
+      }
+    }
+  }
+  return 0;
+}
+
+/* Traverse each chunk and check it; return total */
+static size_t traverse_and_check(mstate m) {
+  size_t sum = 0;
+  if (is_initialized(m)) {
+    msegmentptr s = &m->seg;
+    sum += m->topsize + TOP_FOOT_SIZE;
+    while (s != 0) {
+      mchunkptr q = align_as_chunk(s->base);
+      mchunkptr lastq = 0;
+      assert(pinuse(q));
+      while (segment_holds(s, q) &&
+             q != m->top && q->head != FENCEPOST_HEAD) {
+        sum += chunksize(q);
+        if (cinuse(q)) {
+          assert(!bin_find(m, q));
+          do_check_inuse_chunk(m, q);
+        }
+        else {
+          assert(q == m->dv || bin_find(m, q));
+          assert(lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */
+          do_check_free_chunk(m, q);
+        }
+        lastq = q;
+        q = next_chunk(q);
+      }
+      s = s->next;
+    }
+  }
+  return sum;
+}
+
+/* Check all properties of malloc_state. */
+static void do_check_malloc_state(mstate m) {
+  bindex_t i;
+  size_t total;
+  /* check bins */
+  for (i = 0; i < NSMALLBINS; ++i)
+    do_check_smallbin(m, i);
+  for (i = 0; i < NTREEBINS; ++i)
+    do_check_treebin(m, i);
+
+  if (m->dvsize != 0) { /* check dv chunk */
+    do_check_any_chunk(m, m->dv);
+    assert(m->dvsize == chunksize(m->dv));
+    assert(m->dvsize >= MIN_CHUNK_SIZE);
+    assert(bin_find(m, m->dv) == 0);
+  }
+
+  if (m->top != 0) {   /* check top chunk */
+    do_check_top_chunk(m, m->top);
+    assert(m->topsize == chunksize(m->top));
+    assert(m->topsize > 0);
+    assert(bin_find(m, m->top) == 0);
+  }
+
+  total = traverse_and_check(m);
+  assert(total <= m->footprint);
+  assert(m->footprint <= m->max_footprint);
+#if USE_MAX_ALLOWED_FOOTPRINT
+  //TODO: change these assertions if we allow for shrinking.
+  assert(m->footprint <= m->max_allowed_footprint);
+  assert(m->max_footprint <= m->max_allowed_footprint);
+#endif
+}
+#endif /* DEBUG */
+
+/* ----------------------------- statistics ------------------------------ */
+
+#if !NO_MALLINFO
+static struct mallinfo internal_mallinfo(mstate m) {
+  struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+  if (!PREACTION(m)) {
+    check_malloc_state(m);
+    if (is_initialized(m)) {
+      size_t nfree = SIZE_T_ONE; /* top always free */
+      size_t mfree = m->topsize + TOP_FOOT_SIZE;
+      size_t sum = mfree;
+      msegmentptr s = &m->seg;
+      while (s != 0) {
+        mchunkptr q = align_as_chunk(s->base);
+        while (segment_holds(s, q) &&
+               q != m->top && q->head != FENCEPOST_HEAD) {
+          size_t sz = chunksize(q);
+          sum += sz;
+          if (!cinuse(q)) {
+            mfree += sz;
+            ++nfree;
+          }
+          q = next_chunk(q);
+        }
+        s = s->next;
+      }
+
+      nm.arena    = sum;
+      nm.ordblks  = nfree;
+      nm.hblkhd   = m->footprint - sum;
+      nm.usmblks  = m->max_footprint;
+      nm.uordblks = m->footprint - mfree;
+      nm.fordblks = mfree;
+      nm.keepcost = m->topsize;
+    }
+
+    POSTACTION(m);
+  }
+  return nm;
+}
+#endif /* !NO_MALLINFO */
+
+static void internal_malloc_stats(mstate m) {
+  if (!PREACTION(m)) {
+    size_t maxfp = 0;
+    size_t fp = 0;
+    size_t used = 0;
+    check_malloc_state(m);
+    if (is_initialized(m)) {
+      msegmentptr s = &m->seg;
+      maxfp = m->max_footprint;
+      fp = m->footprint;
+      used = fp - (m->topsize + TOP_FOOT_SIZE);
+
+      while (s != 0) {
+        mchunkptr q = align_as_chunk(s->base);
+        while (segment_holds(s, q) &&
+               q != m->top && q->head != FENCEPOST_HEAD) {
+          if (!cinuse(q))
+            used -= chunksize(q);
+          q = next_chunk(q);
+        }
+        s = s->next;
+      }
+    }
+
+    fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
+    fprintf(stderr, "system bytes     = %10lu\n", (unsigned long)(fp));
+    fprintf(stderr, "in use bytes     = %10lu\n", (unsigned long)(used));
+
+    POSTACTION(m);
+  }
+}
+
+/* ----------------------- Operations on smallbins ----------------------- */
+
+/*
+  Various forms of linking and unlinking are defined as macros.  Even
+  the ones for trees, which are very long but have very short typical
+  paths.  This is ugly but reduces reliance on inlining support of
+  compilers.
+*/
+
+/* Link a free chunk into a smallbin  */
+#define insert_small_chunk(M, P, S) {\
+  bindex_t I  = small_index(S);\
+  mchunkptr B = smallbin_at(M, I);\
+  mchunkptr F = B;\
+  assert(S >= MIN_CHUNK_SIZE);\
+  if (!smallmap_is_marked(M, I))\
+    mark_smallmap(M, I);\
+  else if (RTCHECK(ok_address(M, B->fd)))\
+    F = B->fd;\
+  else {\
+    CORRUPTION_ERROR_ACTION(M);\
+  }\
+  B->fd = P;\
+  F->bk = P;\
+  P->fd = F;\
+  P->bk = B;\
+}
+
+/* Unlink a chunk from a smallbin
+ * Added check: if F->bk != P or B->fd != P, we have double linked list
+ * corruption, and abort.
+ */
+#define unlink_small_chunk(M, P, S) {\
+  mchunkptr F = P->fd;\
+  mchunkptr B = P->bk;\
+  bindex_t I = small_index(S);\
+  if (__builtin_expect (F->bk != P || B->fd != P, 0))\
+    CORRUPTION_ERROR_ACTION(M);\
+  assert(P != B);\
+  assert(P != F);\
+  assert(chunksize(P) == small_index2size(I));\
+  if (F == B)\
+    clear_smallmap(M, I);\
+  else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
+                   (B == smallbin_at(M,I) || ok_address(M, B)))) {\
+    F->bk = B;\
+    B->fd = F;\
+  }\
+  else {\
+    CORRUPTION_ERROR_ACTION(M);\
+  }\
+}
+
+/* Unlink the first chunk from a smallbin
+ * Added check: if F->bk != P or B->fd != P, we have double linked list
+ * corruption, and abort.
+ */
+#define unlink_first_small_chunk(M, B, P, I) {\
+  mchunkptr F = P->fd;\
+  if (__builtin_expect (F->bk != P || B->fd != P, 0))\
+    CORRUPTION_ERROR_ACTION(M);\
+  assert(P != B);\
+  assert(P != F);\
+  assert(chunksize(P) == small_index2size(I));\
+  if (B == F)\
+    clear_smallmap(M, I);\
+  else if (RTCHECK(ok_address(M, F))) {\
+    B->fd = F;\
+    F->bk = B;\
+  }\
+  else {\
+    CORRUPTION_ERROR_ACTION(M);\
+  }\
+}
+
+/* Replace dv node, binning the old one */
+/* Used only when dvsize known to be small */
+#define replace_dv(M, P, S) {\
+  size_t DVS = M->dvsize;\
+  if (DVS != 0) {\
+    mchunkptr DV = M->dv;\
+    assert(is_small(DVS));\
+    insert_small_chunk(M, DV, DVS);\
+  }\
+  M->dvsize = S;\
+  M->dv = P;\
+}
+
+/* ------------------------- Operations on trees ------------------------- */
+
+/* Insert chunk into tree */
+#define insert_large_chunk(M, X, S) {\
+  tbinptr* H;\
+  bindex_t I;\
+  compute_tree_index(S, I);\
+  H = treebin_at(M, I);\
+  X->index = I;\
+  X->child[0] = X->child[1] = 0;\
+  if (!treemap_is_marked(M, I)) {\
+    mark_treemap(M, I);\
+    *H = X;\
+    X->parent = (tchunkptr)H;\
+    X->fd = X->bk = X;\
+  }\
+  else {\
+    tchunkptr T = *H;\
+    size_t K = S << leftshift_for_tree_index(I);\
+    for (;;) {\
+      if (chunksize(T) != S) {\
+        tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
+        K <<= 1;\
+        if (*C != 0)\
+          T = *C;\
+        else if (RTCHECK(ok_address(M, C))) {\
+          *C = X;\
+          X->parent = T;\
+          X->fd = X->bk = X;\
+          break;\
+        }\
+        else {\
+          CORRUPTION_ERROR_ACTION(M);\
+          break;\
+        }\
+      }\
+      else {\
+        tchunkptr F = T->fd;\
+        if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
+          T->fd = F->bk = X;\
+          X->fd = F;\
+          X->bk = T;\
+          X->parent = 0;\
+          break;\
+        }\
+        else {\
+          CORRUPTION_ERROR_ACTION(M);\
+          break;\
+        }\
+      }\
+    }\
+  }\
+}
+
+/*
+  Unlink steps:
+
+  1. If x is a chained node, unlink it from its same-sized fd/bk links
+     and choose its bk node as its replacement.
+  2. If x was the last node of its size, but not a leaf node, it must
+     be replaced with a leaf node (not merely one with an open left or
+     right), to make sure that lefts and rights of descendents
+     correspond properly to bit masks.  We use the rightmost descendent
+     of x.  We could use any other leaf, but this is easy to locate and
+     tends to counteract removal of leftmosts elsewhere, and so keeps
+     paths shorter than minimally guaranteed.  This doesn't loop much
+     because on average a node in a tree is near the bottom.
+  3. If x is the base of a chain (i.e., has parent links) relink
+     x's parent and children to x's replacement (or null if none).
+
+  Added check: if F->bk != X or R->fd != X, we have double linked list
+  corruption, and abort.
+*/
+
+#define unlink_large_chunk(M, X) {\
+  tchunkptr XP = X->parent;\
+  tchunkptr R;\
+  if (X->bk != X) {\
+    tchunkptr F = X->fd;\
+    R = X->bk;\
+    if (__builtin_expect (F->bk != X || R->fd != X, 0))\
+      CORRUPTION_ERROR_ACTION(M);\
+    if (RTCHECK(ok_address(M, F))) {\
+      F->bk = R;\
+      R->fd = F;\
+    }\
+    else {\
+      CORRUPTION_ERROR_ACTION(M);\
+    }\
+  }\
+  else {\
+    tchunkptr* RP;\
+    if (((R = *(RP = &(X->child[1]))) != 0) ||\
+        ((R = *(RP = &(X->child[0]))) != 0)) {\
+      tchunkptr* CP;\
+      while ((*(CP = &(R->child[1])) != 0) ||\
+             (*(CP = &(R->child[0])) != 0)) {\
+        R = *(RP = CP);\
+      }\
+      if (RTCHECK(ok_address(M, RP)))\
+        *RP = 0;\
+      else {\
+        CORRUPTION_ERROR_ACTION(M);\
+      }\
+    }\
+  }\
+  if (XP != 0) {\
+    tbinptr* H = treebin_at(M, X->index);\
+    if (X == *H) {\
+      if ((*H = R) == 0) \
+        clear_treemap(M, X->index);\
+    }\
+    else if (RTCHECK(ok_address(M, XP))) {\
+      if (XP->child[0] == X) \
+        XP->child[0] = R;\
+      else \
+        XP->child[1] = R;\
+    }\
+    else\
+      CORRUPTION_ERROR_ACTION(M);\
+    if (R != 0) {\
+      if (RTCHECK(ok_address(M, R))) {\
+        tchunkptr C0, C1;\
+        R->parent = XP;\
+        if ((C0 = X->child[0]) != 0) {\
+          if (RTCHECK(ok_address(M, C0))) {\
+            R->child[0] = C0;\
+            C0->parent = R;\
+          }\
+          else\
+            CORRUPTION_ERROR_ACTION(M);\
+        }\
+        if ((C1 = X->child[1]) != 0) {\
+          if (RTCHECK(ok_address(M, C1))) {\
+            R->child[1] = C1;\
+            C1->parent = R;\
+          }\
+          else\
+            CORRUPTION_ERROR_ACTION(M);\
+        }\
+      }\
+      else\
+        CORRUPTION_ERROR_ACTION(M);\
+    }\
+  }\
+}
+
+/* Relays to large vs small bin operations */
+
+#define insert_chunk(M, P, S)\
+  if (is_small(S)) insert_small_chunk(M, P, S)\
+  else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
+
+#define unlink_chunk(M, P, S)\
+  if (is_small(S)) unlink_small_chunk(M, P, S)\
+  else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
+
+
+/* Relays to internal calls to malloc/free from realloc, memalign etc */
+
+#if ONLY_MSPACES
+#define internal_malloc(m, b) mspace_malloc(m, b)
+#define internal_free(m, mem) mspace_free(m,mem);
+#else /* ONLY_MSPACES */
+#if MSPACES
+#define internal_malloc(m, b)\
+   (m == gm)? dlmalloc(b) : mspace_malloc(m, b)
+#define internal_free(m, mem)\
+   if (m == gm) dlfree(mem); else mspace_free(m,mem);
+#else /* MSPACES */
+#define internal_malloc(m, b) dlmalloc(b)
+#define internal_free(m, mem) dlfree(mem)
+#endif /* MSPACES */
+#endif /* ONLY_MSPACES */
+
+/* -----------------------  Direct-mmapping chunks ----------------------- */
+
+/*
+  Directly mmapped chunks are set up with an offset to the start of
+  the mmapped region stored in the prev_foot field of the chunk. This
+  allows reconstruction of the required argument to MUNMAP when freed,
+  and also allows adjustment of the returned chunk to meet alignment
+  requirements (especially in memalign).  There is also enough space
+  allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain
+  the PINUSE bit so frees can be checked.
+*/
+
+/* Malloc using mmap */
+static void* mmap_alloc(mstate m, size_t nb) {
+  size_t mmsize = granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
+#if USE_MAX_ALLOWED_FOOTPRINT
+  size_t new_footprint = m->footprint + mmsize;
+  if (new_footprint <= m->footprint ||  /* Check for wrap around 0 */
+      new_footprint > m->max_allowed_footprint)
+    return 0;
+#endif
+  if (mmsize > nb) {     /* Check for wrap around 0 */
+    char* mm = (char*)(DIRECT_MMAP(mmsize));
+    if (mm != CMFAIL) {
+      size_t offset = align_offset(chunk2mem(mm));
+      size_t psize = mmsize - offset - MMAP_FOOT_PAD;
+      mchunkptr p = (mchunkptr)(mm + offset);
+      p->prev_foot = offset | IS_MMAPPED_BIT;
+      (p)->head = (psize|CINUSE_BIT);
+      mark_inuse_foot(m, p, psize);
+      chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
+      chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
+
+      if (mm < m->least_addr)
+        m->least_addr = mm;
+      if ((m->footprint += mmsize) > m->max_footprint)
+        m->max_footprint = m->footprint;
+      assert(is_aligned(chunk2mem(p)));
+      check_mmapped_chunk(m, p);
+      return chunk2mem(p);
+    }
+  }
+  return 0;
+}
+
+/* Realloc using mmap */
+static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb) {
+  size_t oldsize = chunksize(oldp);
+  if (is_small(nb)) /* Can't shrink mmap regions below small size */
+    return 0;
+  /* Keep old chunk if big enough but not too big */
+  if (oldsize >= nb + SIZE_T_SIZE &&
+      (oldsize - nb) <= (mparams.granularity << 1))
+    return oldp;
+  else {
+    size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
+    size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
+    size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES +
+                                         CHUNK_ALIGN_MASK);
+    char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
+                                  oldmmsize, newmmsize, 1);
+    if (cp != CMFAIL) {
+      mchunkptr newp = (mchunkptr)(cp + offset);
+      size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
+      newp->head = (psize|CINUSE_BIT);
+      mark_inuse_foot(m, newp, psize);
+      chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
+      chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
+
+      if (cp < m->least_addr)
+        m->least_addr = cp;
+      if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
+        m->max_footprint = m->footprint;
+      check_mmapped_chunk(m, newp);
+      return newp;
+    }
+  }
+  return 0;
+}
+
+/* -------------------------- mspace management -------------------------- */
+
+/* Initialize top chunk and its size */
+static void init_top(mstate m, mchunkptr p, size_t psize) {
+  /* Ensure alignment */
+  size_t offset = align_offset(chunk2mem(p));
+  p = (mchunkptr)((char*)p + offset);
+  psize -= offset;
+
+  m->top = p;
+  m->topsize = psize;
+  p->head = psize | PINUSE_BIT;
+  /* set size of fake trailing chunk holding overhead space only once */
+  chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
+  m->trim_check = mparams.trim_threshold; /* reset on each update */
+}
+
+/* Initialize bins for a new mstate that is otherwise zeroed out */
+static void init_bins(mstate m) {
+  /* Establish circular links for smallbins */
+  bindex_t i;
+  for (i = 0; i < NSMALLBINS; ++i) {
+    sbinptr bin = smallbin_at(m,i);
+    bin->fd = bin->bk = bin;
+  }
+}
+
+#if PROCEED_ON_ERROR
+
+/* default corruption action */
+static void reset_on_error(mstate m) {
+  int i;
+  ++malloc_corruption_error_count;
+  /* Reinitialize fields to forget about all memory */
+  m->smallbins = m->treebins = 0;
+  m->dvsize = m->topsize = 0;
+  m->seg.base = 0;
+  m->seg.size = 0;
+  m->seg.next = 0;
+  m->top = m->dv = 0;
+  for (i = 0; i < NTREEBINS; ++i)
+    *treebin_at(m, i) = 0;
+  init_bins(m);
+}
+#endif /* PROCEED_ON_ERROR */
+
+/* Allocate chunk and prepend remainder with chunk in successor base. */
+static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
+                           size_t nb) {
+  mchunkptr p = align_as_chunk(newbase);
+  mchunkptr oldfirst = align_as_chunk(oldbase);
+  size_t psize = (char*)oldfirst - (char*)p;
+  mchunkptr q = chunk_plus_offset(p, nb);
+  size_t qsize = psize - nb;
+  set_size_and_pinuse_of_inuse_chunk(m, p, nb);
+
+  assert((char*)oldfirst > (char*)q);
+  assert(pinuse(oldfirst));
+  assert(qsize >= MIN_CHUNK_SIZE);
+
+  /* consolidate remainder with first chunk of old base */
+  if (oldfirst == m->top) {
+    size_t tsize = m->topsize += qsize;
+    m->top = q;
+    q->head = tsize | PINUSE_BIT;
+    check_top_chunk(m, q);
+  }
+  else if (oldfirst == m->dv) {
+    size_t dsize = m->dvsize += qsize;
+    m->dv = q;
+    set_size_and_pinuse_of_free_chunk(q, dsize);
+  }
+  else {
+    if (!cinuse(oldfirst)) {
+      size_t nsize = chunksize(oldfirst);
+      unlink_chunk(m, oldfirst, nsize);
+      oldfirst = chunk_plus_offset(oldfirst, nsize);
+      qsize += nsize;
+    }
+    set_free_with_pinuse(q, qsize, oldfirst);
+    insert_chunk(m, q, qsize);
+    check_free_chunk(m, q);
+  }
+
+  check_malloced_chunk(m, chunk2mem(p), nb);
+  return chunk2mem(p);
+}
+
+
+/* Add a segment to hold a new noncontiguous region */
+static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
+  /* Determine locations and sizes of segment, fenceposts, old top */
+  char* old_top = (char*)m->top;
+  msegmentptr oldsp = segment_holding(m, old_top);
+  char* old_end = oldsp->base + oldsp->size;
+  size_t ssize = pad_request(sizeof(struct malloc_segment));
+  char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
+  size_t offset = align_offset(chunk2mem(rawsp));
+  char* asp = rawsp + offset;
+  char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
+  mchunkptr sp = (mchunkptr)csp;
+  msegmentptr ss = (msegmentptr)(chunk2mem(sp));
+  mchunkptr tnext = chunk_plus_offset(sp, ssize);
+  mchunkptr p = tnext;
+  int nfences = 0;
+
+  /* reset top to new space */
+  init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
+
+  /* Set up segment record */
+  assert(is_aligned(ss));
+  set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
+  *ss = m->seg; /* Push current record */
+  m->seg.base = tbase;
+  m->seg.size = tsize;
+  m->seg.sflags = mmapped;
+  m->seg.next = ss;
+
+  /* Insert trailing fenceposts */
+  for (;;) {
+    mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
+    p->head = FENCEPOST_HEAD;
+    ++nfences;
+    if ((char*)(&(nextp->head)) < old_end)
+      p = nextp;
+    else
+      break;
+  }
+  assert(nfences >= 2);
+
+  /* Insert the rest of old top into a bin as an ordinary free chunk */
+  if (csp != old_top) {
+    mchunkptr q = (mchunkptr)old_top;
+    size_t psize = csp - old_top;
+    mchunkptr tn = chunk_plus_offset(q, psize);
+    set_free_with_pinuse(q, psize, tn);
+    insert_chunk(m, q, psize);
+  }
+
+  check_top_chunk(m, m->top);
+}
+
+/* -------------------------- System allocation -------------------------- */
+
+/* Get memory from system using MORECORE or MMAP */
+static void* sys_alloc(mstate m, size_t nb) {
+  char* tbase = CMFAIL;
+  size_t tsize = 0;
+  flag_t mmap_flag = 0;
+
+  init_mparams();
+
+  /* Directly map large chunks */
+  if (use_mmap(m) && nb >= mparams.mmap_threshold) {
+    void* mem = mmap_alloc(m, nb);
+    if (mem != 0)
+      return mem;
+  }
+
+#if USE_MAX_ALLOWED_FOOTPRINT
+  /* Make sure the footprint doesn't grow past max_allowed_footprint.
+   * This covers all cases except for where we need to page align, below.
+   */
+  {
+    size_t new_footprint = m->footprint +
+                           granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
+    if (new_footprint <= m->footprint ||  /* Check for wrap around 0 */
+        new_footprint > m->max_allowed_footprint)
+      return 0;
+  }
+#endif
+
+  /*
+    Try getting memory in any of three ways (in most-preferred to
+    least-preferred order):
+    1. A call to MORECORE that can normally contiguously extend memory.
+       (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
+       or main space is mmapped or a previous contiguous call failed)
+    2. A call to MMAP new space (disabled if not HAVE_MMAP).
+       Note that under the default settings, if MORECORE is unable to
+       fulfill a request, and HAVE_MMAP is true, then mmap is
+       used as a noncontiguous system allocator. This is a useful backup
+       strategy for systems with holes in address spaces -- in this case
+       sbrk cannot contiguously expand the heap, but mmap may be able to
+       find space.
+    3. A call to MORECORE that cannot usually contiguously extend memory.
+       (disabled if not HAVE_MORECORE)
+  */
+
+  if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
+    char* br = CMFAIL;
+    msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
+    size_t asize = 0;
+    ACQUIRE_MORECORE_LOCK();
+
+    if (ss == 0) {  /* First time through or recovery */
+      char* base = (char*)CALL_MORECORE(0);
+      if (base != CMFAIL) {
+        asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
+        /* Adjust to end on a page boundary */
+        if (!is_page_aligned(base)) {
+          asize += (page_align((size_t)base) - (size_t)base);
+#if USE_MAX_ALLOWED_FOOTPRINT
+          /* If the alignment pushes us over max_allowed_footprint,
+           * poison the upcoming call to MORECORE and continue.
+           */
+          {
+            size_t new_footprint = m->footprint + asize;
+            if (new_footprint <= m->footprint ||  /* Check for wrap around 0 */
+                new_footprint > m->max_allowed_footprint) {
+              asize = HALF_MAX_SIZE_T;
+            }
+          }
+#endif
+        }
+        /* Can't call MORECORE if size is negative when treated as signed */
+        if (asize < HALF_MAX_SIZE_T &&
+            (br = (char*)(CALL_MORECORE(asize))) == base) {
+          tbase = base;
+          tsize = asize;
+        }
+      }
+    }
+    else {
+      /* Subtract out existing available top space from MORECORE request. */
+      asize = granularity_align(nb - m->topsize + TOP_FOOT_SIZE + SIZE_T_ONE);
+      /* Use mem here only if it did continuously extend old space */
+      if (asize < HALF_MAX_SIZE_T &&
+          (br = (char*)(CALL_MORECORE(asize))) == ss->base+ss->size) {
+        tbase = br;
+        tsize = asize;
+      }
+    }
+
+    if (tbase == CMFAIL) {    /* Cope with partial failure */
+      if (br != CMFAIL) {    /* Try to use/extend the space we did get */
+        if (asize < HALF_MAX_SIZE_T &&
+            asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) {
+          size_t esize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE - asize);
+          if (esize < HALF_MAX_SIZE_T) {
+            char* end = (char*)CALL_MORECORE(esize);
+            if (end != CMFAIL)
+              asize += esize;
+            else {            /* Can't use; try to release */
+              CALL_MORECORE(-asize);
+              br = CMFAIL;
+            }
+          }
+        }
+      }
+      if (br != CMFAIL) {    /* Use the space we did get */
+        tbase = br;
+        tsize = asize;
+      }
+      else
+        disable_contiguous(m); /* Don't try contiguous path in the future */
+    }
+
+    RELEASE_MORECORE_LOCK();
+  }
+
+  if (HAVE_MMAP && tbase == CMFAIL) {  /* Try MMAP */
+    size_t req = nb + TOP_FOOT_SIZE + SIZE_T_ONE;
+    size_t rsize = granularity_align(req);
+    if (rsize > nb) { /* Fail if wraps around zero */
+      char* mp = (char*)(CALL_MMAP(rsize));
+      if (mp != CMFAIL) {
+        tbase = mp;
+        tsize = rsize;
+        mmap_flag = IS_MMAPPED_BIT;
+      }
+    }
+  }
+
+  if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
+    size_t asize = granularity_align(nb + TOP_FOOT_SIZE + SIZE_T_ONE);
+    if (asize < HALF_MAX_SIZE_T) {
+      char* br = CMFAIL;
+      char* end = CMFAIL;
+      ACQUIRE_MORECORE_LOCK();
+      br = (char*)(CALL_MORECORE(asize));
+      end = (char*)(CALL_MORECORE(0));
+      RELEASE_MORECORE_LOCK();
+      if (br != CMFAIL && end != CMFAIL && br < end) {
+        size_t ssize = end - br;
+        if (ssize > nb + TOP_FOOT_SIZE) {
+          tbase = br;
+          tsize = ssize;
+        }
+      }
+    }
+  }
+
+  if (tbase != CMFAIL) {
+
+    if ((m->footprint += tsize) > m->max_footprint)
+      m->max_footprint = m->footprint;
+
+    if (!is_initialized(m)) { /* first-time initialization */
+      m->seg.base = m->least_addr = tbase;
+      m->seg.size = tsize;
+      m->seg.sflags = mmap_flag;
+      m->magic = mparams.magic;
+      init_bins(m);
+      if (is_global(m)) 
+        init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
+      else {
+        /* Offset top by embedded malloc_state */
+        mchunkptr mn = next_chunk(mem2chunk(m));
+        init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
+      }
+    }
+
+    else {
+      /* Try to merge with an existing segment */
+      msegmentptr sp = &m->seg;
+      while (sp != 0 && tbase != sp->base + sp->size)
+        sp = sp->next;
+      if (sp != 0 &&
+          !is_extern_segment(sp) &&
+          (sp->sflags & IS_MMAPPED_BIT) == mmap_flag &&
+          segment_holds(sp, m->top)) { /* append */
+        sp->size += tsize;
+        init_top(m, m->top, m->topsize + tsize);
+      }
+      else {
+        if (tbase < m->least_addr)
+          m->least_addr = tbase;
+        sp = &m->seg;
+        while (sp != 0 && sp->base != tbase + tsize)
+          sp = sp->next;
+        if (sp != 0 &&
+            !is_extern_segment(sp) &&
+            (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) {
+          char* oldbase = sp->base;
+          sp->base = tbase;
+          sp->size += tsize;
+          return prepend_alloc(m, tbase, oldbase, nb);
+        }
+        else
+          add_segment(m, tbase, tsize, mmap_flag);
+      }
+    }
+
+    if (nb < m->topsize) { /* Allocate from new or extended top space */
+      size_t rsize = m->topsize -= nb;
+      mchunkptr p = m->top;
+      mchunkptr r = m->top = chunk_plus_offset(p, nb);
+      r->head = rsize | PINUSE_BIT;
+      set_size_and_pinuse_of_inuse_chunk(m, p, nb);
+      check_top_chunk(m, m->top);
+      check_malloced_chunk(m, chunk2mem(p), nb);
+      return chunk2mem(p);
+    }
+  }
+
+  MALLOC_FAILURE_ACTION;
+  return 0;
+}
+
+/* -----------------------  system deallocation -------------------------- */
+
+/* Unmap and unlink any mmapped segments that don't contain used chunks */
+static size_t release_unused_segments(mstate m) {
+  size_t released = 0;
+  msegmentptr pred = &m->seg;
+  msegmentptr sp = pred->next;
+  while (sp != 0) {
+    char* base = sp->base;
+    size_t size = sp->size;
+    msegmentptr next = sp->next;
+    if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
+      mchunkptr p = align_as_chunk(base);
+      size_t psize = chunksize(p);
+      /* Can unmap if first chunk holds entire segment and not pinned */
+      if (!cinuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
+        tchunkptr tp = (tchunkptr)p;
+        assert(segment_holds(sp, (char*)sp));
+        if (p == m->dv) {
+          m->dv = 0;
+          m->dvsize = 0;
+        }
+        else {
+          unlink_large_chunk(m, tp);
+        }
+        if (CALL_MUNMAP(base, size) == 0) {
+          released += size;
+          m->footprint -= size;
+          /* unlink obsoleted record */
+          sp = pred;
+          sp->next = next;
+        }
+        else { /* back out if cannot unmap */
+          insert_large_chunk(m, tp, psize);
+        }
+      }
+    }
+    pred = sp;
+    sp = next;
+  }
+  return released;
+}
+
+static int sys_trim(mstate m, size_t pad) {
+  size_t released = 0;
+  if (pad < MAX_REQUEST && is_initialized(m)) {
+    pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
+
+    if (m->topsize > pad) {
+      /* Shrink top space in granularity-size units, keeping at least one */
+      size_t unit = mparams.granularity;
+      size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
+                      SIZE_T_ONE) * unit;
+      msegmentptr sp = segment_holding(m, (char*)m->top);
+
+      if (!is_extern_segment(sp)) {
+        if (is_mmapped_segment(sp)) {
+          if (HAVE_MMAP &&
+              sp->size >= extra &&
+              !has_segment_link(m, sp)) { /* can't shrink if pinned */
+            size_t newsize = sp->size - extra;
+            /* Prefer mremap, fall back to munmap */
+            if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
+                (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
+              released = extra;
+            }
+          }
+        }
+        else if (HAVE_MORECORE) {
+          if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
+            extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
+          ACQUIRE_MORECORE_LOCK();
+          {
+            /* Make sure end of memory is where we last set it. */
+            char* old_br = (char*)(CALL_MORECORE(0));
+            if (old_br == sp->base + sp->size) {
+              char* rel_br = (char*)(CALL_MORECORE(-extra));
+              char* new_br = (char*)(CALL_MORECORE(0));
+              if (rel_br != CMFAIL && new_br < old_br)
+                released = old_br - new_br;
+            }
+          }
+          RELEASE_MORECORE_LOCK();
+        }
+      }
+
+      if (released != 0) {
+        sp->size -= released;
+        m->footprint -= released;
+        init_top(m, m->top, m->topsize - released);
+        check_top_chunk(m, m->top);
+      }
+    }
+
+    /* Unmap any unused mmapped segments */
+    if (HAVE_MMAP) 
+      released += release_unused_segments(m);
+
+    /* On failure, disable autotrim to avoid repeated failed future calls */
+    if (released == 0)
+      m->trim_check = MAX_SIZE_T;
+  }
+
+  return (released != 0)? 1 : 0;
+}
+
+/* ---------------------------- malloc support --------------------------- */
+
+/* allocate a large request from the best fitting chunk in a treebin */
+static void* tmalloc_large(mstate m, size_t nb) {
+  tchunkptr v = 0;
+  size_t rsize = -nb; /* Unsigned negation */
+  tchunkptr t;
+  bindex_t idx;
+  compute_tree_index(nb, idx);
+
+  if ((t = *treebin_at(m, idx)) != 0) {
+    /* Traverse tree for this bin looking for node with size == nb */
+    size_t sizebits = nb << leftshift_for_tree_index(idx);
+    tchunkptr rst = 0;  /* The deepest untaken right subtree */
+    for (;;) {
+      tchunkptr rt;
+      size_t trem = chunksize(t) - nb;
+      if (trem < rsize) {
+        v = t;
+        if ((rsize = trem) == 0)
+          break;
+      }
+      rt = t->child[1];
+      t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
+      if (rt != 0 && rt != t)
+        rst = rt;
+      if (t == 0) {
+        t = rst; /* set t to least subtree holding sizes > nb */
+        break;
+      }
+      sizebits <<= 1;
+    }
+  }
+
+  if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
+    binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
+    if (leftbits != 0) {
+      bindex_t i;
+      binmap_t leastbit = least_bit(leftbits);
+      compute_bit2idx(leastbit, i);
+      t = *treebin_at(m, i);
+    }
+  }
+
+  while (t != 0) { /* find smallest of tree or subtree */
+    size_t trem = chunksize(t) - nb;
+    if (trem < rsize) {
+      rsize = trem;
+      v = t;
+    }
+    t = leftmost_child(t);
+  }
+
+  /*  If dv is a better fit, return 0 so malloc will use it */
+  if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
+    if (RTCHECK(ok_address(m, v))) { /* split */
+      mchunkptr r = chunk_plus_offset(v, nb);
+      assert(chunksize(v) == rsize + nb);
+      if (RTCHECK(ok_next(v, r))) {
+        unlink_large_chunk(m, v);
+        if (rsize < MIN_CHUNK_SIZE)
+          set_inuse_and_pinuse(m, v, (rsize + nb));
+        else {
+          set_size_and_pinuse_of_inuse_chunk(m, v, nb);
+          set_size_and_pinuse_of_free_chunk(r, rsize);
+          insert_chunk(m, r, rsize);
+        }
+        return chunk2mem(v);
+      }
+    }
+    CORRUPTION_ERROR_ACTION(m);
+  }
+  return 0;
+}
+
+/* allocate a small request from the best fitting chunk in a treebin */
+static void* tmalloc_small(mstate m, size_t nb) {
+  tchunkptr t, v;
+  size_t rsize;
+  bindex_t i;
+  binmap_t leastbit = least_bit(m->treemap);
+  compute_bit2idx(leastbit, i);
+
+  v = t = *treebin_at(m, i);
+  rsize = chunksize(t) - nb;
+
+  while ((t = leftmost_child(t)) != 0) {
+    size_t trem = chunksize(t) - nb;
+    if (trem < rsize) {
+      rsize = trem;
+      v = t;
+    }
+  }
+
+  if (RTCHECK(ok_address(m, v))) {
+    mchunkptr r = chunk_plus_offset(v, nb);
+    assert(chunksize(v) == rsize + nb);
+    if (RTCHECK(ok_next(v, r))) {
+      unlink_large_chunk(m, v);
+      if (rsize < MIN_CHUNK_SIZE)
+        set_inuse_and_pinuse(m, v, (rsize + nb));
+      else {
+        set_size_and_pinuse_of_inuse_chunk(m, v, nb);
+        set_size_and_pinuse_of_free_chunk(r, rsize);
+        replace_dv(m, r, rsize);
+      }
+      return chunk2mem(v);
+    }
+  }
+
+  CORRUPTION_ERROR_ACTION(m);
+  return 0;
+}
+
+/* --------------------------- realloc support --------------------------- */
+
+static void* internal_realloc(mstate m, void* oldmem, size_t bytes) {
+  if (bytes >= MAX_REQUEST) {
+    MALLOC_FAILURE_ACTION;
+    return 0;
+  }
+  if (!PREACTION(m)) {
+    mchunkptr oldp = mem2chunk(oldmem);
+    size_t oldsize = chunksize(oldp);
+    mchunkptr next = chunk_plus_offset(oldp, oldsize);
+    mchunkptr newp = 0;
+    void* extra = 0;
+
+    /* Try to either shrink or extend into top. Else malloc-copy-free */
+
+    if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
+                ok_next(oldp, next) && ok_pinuse(next))) {
+      size_t nb = request2size(bytes);
+      if (is_mmapped(oldp))
+        newp = mmap_resize(m, oldp, nb);
+      else if (oldsize >= nb) { /* already big enough */
+        size_t rsize = oldsize - nb;
+        newp = oldp;
+        if (rsize >= MIN_CHUNK_SIZE) {
+          mchunkptr remainder = chunk_plus_offset(newp, nb);
+          set_inuse(m, newp, nb);
+          set_inuse(m, remainder, rsize);
+          extra = chunk2mem(remainder);
+        }
+      }
+      else if (next == m->top && oldsize + m->topsize > nb) {
+        /* Expand into top */
+        size_t newsize = oldsize + m->topsize;
+        size_t newtopsize = newsize - nb;
+        mchunkptr newtop = chunk_plus_offset(oldp, nb);
+        set_inuse(m, oldp, nb);
+        newtop->head = newtopsize |PINUSE_BIT;
+        m->top = newtop;
+        m->topsize = newtopsize;
+        newp = oldp;
+      }
+    }
+    else {
+      USAGE_ERROR_ACTION(m, oldmem);
+      POSTACTION(m);
+      return 0;
+    }
+
+    POSTACTION(m);
+
+    if (newp != 0) {
+      if (extra != 0) {
+        internal_free(m, extra);
+      }
+      check_inuse_chunk(m, newp);
+      return chunk2mem(newp);
+    }
+    else {
+      void* newmem = internal_malloc(m, bytes);
+      if (newmem != 0) {
+        size_t oc = oldsize - overhead_for(oldp);
+        memcpy(newmem, oldmem, (oc < bytes)? oc : bytes);
+        internal_free(m, oldmem);
+      }
+      return newmem;
+    }
+  }
+  return 0;
+}
+
+/* --------------------------- memalign support -------------------------- */
+
+static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
+  if (alignment <= MALLOC_ALIGNMENT)    /* Can just use malloc */
+    return internal_malloc(m, bytes);
+  if (alignment <  MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
+    alignment = MIN_CHUNK_SIZE;
+  if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
+    size_t a = MALLOC_ALIGNMENT << 1;
+    while (a < alignment) a <<= 1;
+    alignment = a;
+  }
+  
+  if (bytes >= MAX_REQUEST - alignment) {
+    if (m != 0)  { /* Test isn't needed but avoids compiler warning */
+      MALLOC_FAILURE_ACTION;
+    }
+  }
+  else {
+    size_t nb = request2size(bytes);
+    size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
+    char* mem = (char*)internal_malloc(m, req);
+    if (mem != 0) {
+      void* leader = 0;
+      void* trailer = 0;
+      mchunkptr p = mem2chunk(mem);
+
+      if (PREACTION(m)) return 0;
+      if ((((size_t)(mem)) % alignment) != 0) { /* misaligned */
+        /*
+          Find an aligned spot inside chunk.  Since we need to give
+          back leading space in a chunk of at least MIN_CHUNK_SIZE, if
+          the first calculation places us at a spot with less than
+          MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
+          We've allocated enough total room so that this is always
+          possible.
+        */
+        char* br = (char*)mem2chunk((size_t)(((size_t)(mem +
+                                                       alignment -
+                                                       SIZE_T_ONE)) &
+                                             -alignment));
+        char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
+          br : br+alignment;
+        mchunkptr newp = (mchunkptr)pos;
+        size_t leadsize = pos - (char*)(p);
+        size_t newsize = chunksize(p) - leadsize;
+
+        if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
+          newp->prev_foot = p->prev_foot + leadsize;
+          newp->head = (newsize|CINUSE_BIT);
+        }
+        else { /* Otherwise, give back leader, use the rest */
+          set_inuse(m, newp, newsize);
+          set_inuse(m, p, leadsize);
+          leader = chunk2mem(p);
+        }
+        p = newp;
+      }
+
+      /* Give back spare room at the end */
+      if (!is_mmapped(p)) {
+        size_t size = chunksize(p);
+        if (size > nb + MIN_CHUNK_SIZE) {
+          size_t remainder_size = size - nb;
+          mchunkptr remainder = chunk_plus_offset(p, nb);
+          set_inuse(m, p, nb);
+          set_inuse(m, remainder, remainder_size);
+          trailer = chunk2mem(remainder);
+        }
+      }
+
+      assert (chunksize(p) >= nb);
+      assert((((size_t)(chunk2mem(p))) % alignment) == 0);
+      check_inuse_chunk(m, p);
+      POSTACTION(m);
+      if (leader != 0) {
+        internal_free(m, leader);
+      }
+      if (trailer != 0) {
+        internal_free(m, trailer);
+      }
+      return chunk2mem(p);
+    }
+  }
+  return 0;
+}
+
+/* ------------------------ comalloc/coalloc support --------------------- */
+
+static void** ialloc(mstate m,
+                     size_t n_elements,
+                     size_t* sizes,
+                     int opts,
+                     void* chunks[]) {
+  /*
+    This provides common support for independent_X routines, handling
+    all of the combinations that can result.
+
+    The opts arg has:
+    bit 0 set if all elements are same size (using sizes[0])
+    bit 1 set if elements should be zeroed
+  */
+
+  size_t    element_size;   /* chunksize of each element, if all same */
+  size_t    contents_size;  /* total size of elements */
+  size_t    array_size;     /* request size of pointer array */
+  void*     mem;            /* malloced aggregate space */
+  mchunkptr p;              /* corresponding chunk */
+  size_t    remainder_size; /* remaining bytes while splitting */
+  void**    marray;         /* either "chunks" or malloced ptr array */
+  mchunkptr array_chunk;    /* chunk for malloced ptr array */
+  flag_t    was_enabled;    /* to disable mmap */
+  size_t    size;
+  size_t    i;
+
+  /* compute array length, if needed */
+  if (chunks != 0) {
+    if (n_elements == 0)
+      return chunks; /* nothing to do */
+    marray = chunks;
+    array_size = 0;
+  }
+  else {
+    /* if empty req, must still return chunk representing empty array */
+    if (n_elements == 0)
+      return (void**)internal_malloc(m, 0);
+    marray = 0;
+    array_size = request2size(n_elements * (sizeof(void*)));
+  }
+
+  /* compute total element size */
+  if (opts & 0x1) { /* all-same-size */
+    element_size = request2size(*sizes);
+    contents_size = n_elements * element_size;
+  }
+  else { /* add up all the sizes */
+    element_size = 0;
+    contents_size = 0;
+    for (i = 0; i != n_elements; ++i)
+      contents_size += request2size(sizes[i]);
+  }
+
+  size = contents_size + array_size;
+
+  /*
+     Allocate the aggregate chunk.  First disable direct-mmapping so
+     malloc won't use it, since we would not be able to later
+     free/realloc space internal to a segregated mmap region.
+  */
+  was_enabled = use_mmap(m);
+  disable_mmap(m);
+  mem = internal_malloc(m, size - CHUNK_OVERHEAD);
+  if (was_enabled)
+    enable_mmap(m);
+  if (mem == 0)
+    return 0;
+
+  if (PREACTION(m)) return 0;
+  p = mem2chunk(mem);
+  remainder_size = chunksize(p);
+
+  assert(!is_mmapped(p));
+
+  if (opts & 0x2) {       /* optionally clear the elements */
+    memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
+  }
+
+  /* If not provided, allocate the pointer array as final part of chunk */
+  if (marray == 0) {
+    size_t  array_chunk_size;
+    array_chunk = chunk_plus_offset(p, contents_size);
+    array_chunk_size = remainder_size - contents_size;
+    marray = (void**) (chunk2mem(array_chunk));
+    set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
+    remainder_size = contents_size;
+  }
+
+  /* split out elements */
+  for (i = 0; ; ++i) {
+    marray[i] = chunk2mem(p);
+    if (i != n_elements-1) {
+      if (element_size != 0)
+        size = element_size;
+      else
+        size = request2size(sizes[i]);
+      remainder_size -= size;
+      set_size_and_pinuse_of_inuse_chunk(m, p, size);
+      p = chunk_plus_offset(p, size);
+    }
+    else { /* the final element absorbs any overallocation slop */
+      set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
+      break;
+    }
+  }
+
+#if DEBUG
+  if (marray != chunks) {
+    /* final element must have exactly exhausted chunk */
+    if (element_size != 0) {
+      assert(remainder_size == element_size);
+    }
+    else {
+      assert(remainder_size == request2size(sizes[i]));
+    }
+    check_inuse_chunk(m, mem2chunk(marray));
+  }
+  for (i = 0; i != n_elements; ++i)
+    check_inuse_chunk(m, mem2chunk(marray[i]));
+
+#endif /* DEBUG */
+
+  POSTACTION(m);
+  return marray;
+}
+
+
+/* -------------------------- public routines ---------------------------- */
+
+#if !ONLY_MSPACES
+
+void* dlmalloc(size_t bytes) {
+  /*
+     Basic algorithm:
+     If a small request (< 256 bytes minus per-chunk overhead):
+       1. If one exists, use a remainderless chunk in associated smallbin.
+          (Remainderless means that there are too few excess bytes to
+          represent as a chunk.)
+       2. If it is big enough, use the dv chunk, which is normally the
+          chunk adjacent to the one used for the most recent small request.
+       3. If one exists, split the smallest available chunk in a bin,
+          saving remainder in dv.
+       4. If it is big enough, use the top chunk.
+       5. If available, get memory from system and use it
+     Otherwise, for a large request:
+       1. Find the smallest available binned chunk that fits, and use it
+          if it is better fitting than dv chunk, splitting if necessary.
+       2. If better fitting than any binned chunk, use the dv chunk.
+       3. If it is big enough, use the top chunk.
+       4. If request size >= mmap threshold, try to directly mmap this chunk.
+       5. If available, get memory from system and use it
+
+     The ugly goto's here ensure that postaction occurs along all paths.
+  */
+
+  if (!PREACTION(gm)) {
+    void* mem;
+    size_t nb;
+    if (bytes <= MAX_SMALL_REQUEST) {
+      bindex_t idx;
+      binmap_t smallbits;
+      nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
+      idx = small_index(nb);
+      smallbits = gm->smallmap >> idx;
+
+      if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
+        mchunkptr b, p;
+        idx += ~smallbits & 1;       /* Uses next bin if idx empty */
+        b = smallbin_at(gm, idx);
+        p = b->fd;
+        assert(chunksize(p) == small_index2size(idx));
+        unlink_first_small_chunk(gm, b, p, idx);
+        set_inuse_and_pinuse(gm, p, small_index2size(idx));
+        mem = chunk2mem(p);
+        check_malloced_chunk(gm, mem, nb);
+        goto postaction;
+      }
+
+      else if (nb > gm->dvsize) {
+        if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
+          mchunkptr b, p, r;
+          size_t rsize;
+          bindex_t i;
+          binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
+          binmap_t leastbit = least_bit(leftbits);
+          compute_bit2idx(leastbit, i);
+          b = smallbin_at(gm, i);
+          p = b->fd;
+          assert(chunksize(p) == small_index2size(i));
+          unlink_first_small_chunk(gm, b, p, i);
+          rsize = small_index2size(i) - nb;
+          /* Fit here cannot be remainderless if 4byte sizes */
+          if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
+            set_inuse_and_pinuse(gm, p, small_index2size(i));
+          else {
+            set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
+            r = chunk_plus_offset(p, nb);
+            set_size_and_pinuse_of_free_chunk(r, rsize);
+            replace_dv(gm, r, rsize);
+          }
+          mem = chunk2mem(p);
+          check_malloced_chunk(gm, mem, nb);
+          goto postaction;
+        }
+
+        else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
+          check_malloced_chunk(gm, mem, nb);
+          goto postaction;
+        }
+      }
+    }
+    else if (bytes >= MAX_REQUEST)
+      nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
+    else {
+      nb = pad_request(bytes);
+      if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
+        check_malloced_chunk(gm, mem, nb);
+        goto postaction;
+      }
+    }
+
+    if (nb <= gm->dvsize) {
+      size_t rsize = gm->dvsize - nb;
+      mchunkptr p = gm->dv;
+      if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
+        mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
+        gm->dvsize = rsize;
+        set_size_and_pinuse_of_free_chunk(r, rsize);
+        set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
+      }
+      else { /* exhaust dv */
+        size_t dvs = gm->dvsize;
+        gm->dvsize = 0;
+        gm->dv = 0;
+        set_inuse_and_pinuse(gm, p, dvs);
+      }
+      mem = chunk2mem(p);
+      check_malloced_chunk(gm, mem, nb);
+      goto postaction;
+    }
+
+    else if (nb < gm->topsize) { /* Split top */
+      size_t rsize = gm->topsize -= nb;
+      mchunkptr p = gm->top;
+      mchunkptr r = gm->top = chunk_plus_offset(p, nb);
+      r->head = rsize | PINUSE_BIT;
+      set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
+      mem = chunk2mem(p);
+      check_top_chunk(gm, gm->top);
+      check_malloced_chunk(gm, mem, nb);
+      goto postaction;
+    }
+
+    mem = sys_alloc(gm, nb);
+
+  postaction:
+    POSTACTION(gm);
+    return mem;
+  }
+
+  return 0;
+}
+
+void dlfree(void* mem) {
+  /*
+     Consolidate freed chunks with preceeding or succeeding bordering
+     free chunks, if they exist, and then place in a bin.  Intermixed
+     with special cases for top, dv, mmapped chunks, and usage errors.
+  */
+
+  if (mem != 0) {
+    mchunkptr p  = mem2chunk(mem);
+#if FOOTERS
+    mstate fm = get_mstate_for(p);
+    if (!ok_magic(fm)) {
+      USAGE_ERROR_ACTION(fm, p);
+      return;
+    }
+#else /* FOOTERS */
+#define fm gm
+#endif /* FOOTERS */
+    if (!PREACTION(fm)) {
+      check_inuse_chunk(fm, p);
+      if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
+        size_t psize = chunksize(p);
+        mchunkptr next = chunk_plus_offset(p, psize);
+        if (!pinuse(p)) {
+          size_t prevsize = p->prev_foot;
+          if ((prevsize & IS_MMAPPED_BIT) != 0) {
+            prevsize &= ~IS_MMAPPED_BIT;
+            psize += prevsize + MMAP_FOOT_PAD;
+            if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
+              fm->footprint -= psize;
+            goto postaction;
+          }
+          else {
+            mchunkptr prev = chunk_minus_offset(p, prevsize);
+            psize += prevsize;
+            p = prev;
+            if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
+              if (p != fm->dv) {
+                unlink_chunk(fm, p, prevsize);
+              }
+              else if ((next->head & INUSE_BITS) == INUSE_BITS) {
+                fm->dvsize = psize;
+                set_free_with_pinuse(p, psize, next);
+                goto postaction;
+              }
+            }
+            else
+              goto erroraction;
+          }
+        }
+
+        if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
+          if (!cinuse(next)) {  /* consolidate forward */
+            if (next == fm->top) {
+              size_t tsize = fm->topsize += psize;
+              fm->top = p;
+              p->head = tsize | PINUSE_BIT;
+              if (p == fm->dv) {
+                fm->dv = 0;
+                fm->dvsize = 0;
+              }
+              if (should_trim(fm, tsize))
+                sys_trim(fm, 0);
+              goto postaction;
+            }
+            else if (next == fm->dv) {
+              size_t dsize = fm->dvsize += psize;
+              fm->dv = p;
+              set_size_and_pinuse_of_free_chunk(p, dsize);
+              goto postaction;
+            }
+            else {
+              size_t nsize = chunksize(next);
+              psize += nsize;
+              unlink_chunk(fm, next, nsize);
+              set_size_and_pinuse_of_free_chunk(p, psize);
+              if (p == fm->dv) {
+                fm->dvsize = psize;
+                goto postaction;
+              }
+            }
+          }
+          else
+            set_free_with_pinuse(p, psize, next);
+          insert_chunk(fm, p, psize);
+          check_free_chunk(fm, p);
+          goto postaction;
+        }
+      }
+    erroraction:
+      USAGE_ERROR_ACTION(fm, p);
+    postaction:
+      POSTACTION(fm);
+    }
+  }
+#if !FOOTERS
+#undef fm
+#endif /* FOOTERS */
+}
+
+void* dlcalloc(size_t n_elements, size_t elem_size) {
+  void *mem;
+  if (n_elements && MAX_SIZE_T / n_elements < elem_size) {
+    /* Fail on overflow */
+    MALLOC_FAILURE_ACTION;
+    return NULL;
+  }
+  elem_size *= n_elements;
+  mem = dlmalloc(elem_size);
+  if (mem && calloc_must_clear(mem2chunk(mem)))
+    memset(mem, 0, elem_size);
+  return mem;
+}
+
+void* dlrealloc(void* oldmem, size_t bytes) {
+  if (oldmem == 0)
+    return dlmalloc(bytes);
+#ifdef REALLOC_ZERO_BYTES_FREES
+  if (bytes == 0) {
+    dlfree(oldmem);
+    return 0;
+  }
+#endif /* REALLOC_ZERO_BYTES_FREES */
+  else {
+#if ! FOOTERS
+    mstate m = gm;
+#else /* FOOTERS */
+    mstate m = get_mstate_for(mem2chunk(oldmem));
+    if (!ok_magic(m)) {
+      USAGE_ERROR_ACTION(m, oldmem);
+      return 0;
+    }
+#endif /* FOOTERS */
+    return internal_realloc(m, oldmem, bytes);
+  }
+}
+
+void* dlmemalign(size_t alignment, size_t bytes) {
+  return internal_memalign(gm, alignment, bytes);
+}
+
+void** dlindependent_calloc(size_t n_elements, size_t elem_size,
+                                 void* chunks[]) {
+  size_t sz = elem_size; /* serves as 1-element array */
+  return ialloc(gm, n_elements, &sz, 3, chunks);
+}
+
+void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
+                                   void* chunks[]) {
+  return ialloc(gm, n_elements, sizes, 0, chunks);
+}
+
+void* dlvalloc(size_t bytes) {
+  size_t pagesz;
+  init_mparams();
+  pagesz = mparams.page_size;
+  return dlmemalign(pagesz, bytes);
+}
+
+void* dlpvalloc(size_t bytes) {
+  size_t pagesz;
+  init_mparams();
+  pagesz = mparams.page_size;
+  return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
+}
+
+int dlmalloc_trim(size_t pad) {
+  int result = 0;
+  if (!PREACTION(gm)) {
+    result = sys_trim(gm, pad);
+    POSTACTION(gm);
+  }
+  return result;
+}
+
+size_t dlmalloc_footprint(void) {
+  return gm->footprint;
+}
+
+#if USE_MAX_ALLOWED_FOOTPRINT
+size_t dlmalloc_max_allowed_footprint(void) {
+  return gm->max_allowed_footprint;
+}
+
+void dlmalloc_set_max_allowed_footprint(size_t bytes) {
+  if (bytes > gm->footprint) {
+    /* Increase the size in multiples of the granularity,
+     * which is the smallest unit we request from the system.
+     */
+    gm->max_allowed_footprint = gm->footprint +
+                                granularity_align(bytes - gm->footprint);
+  }
+  else {
+    //TODO: allow for reducing the max footprint
+    gm->max_allowed_footprint = gm->footprint;
+  }
+}
+#endif
+
+size_t dlmalloc_max_footprint(void) {
+  return gm->max_footprint;
+}
+
+#if !NO_MALLINFO
+struct mallinfo dlmallinfo(void) {
+  return internal_mallinfo(gm);
+}
+#endif /* NO_MALLINFO */
+
+void dlmalloc_stats() {
+  internal_malloc_stats(gm);
+}
+
+size_t dlmalloc_usable_size(void* mem) {
+  if (mem != 0) {
+    mchunkptr p = mem2chunk(mem);
+    if (cinuse(p))
+      return chunksize(p) - overhead_for(p);
+  }
+  return 0;
+}
+
+int dlmallopt(int param_number, int value) {
+  return change_mparam(param_number, value);
+}
+
+#endif /* !ONLY_MSPACES */
+
+/* ----------------------------- user mspaces ---------------------------- */
+
+#if MSPACES
+
+static mstate init_user_mstate(char* tbase, size_t tsize) {
+  size_t msize = pad_request(sizeof(struct malloc_state));
+  mchunkptr mn;
+  mchunkptr msp = align_as_chunk(tbase);
+  mstate m = (mstate)(chunk2mem(msp));
+  memset(m, 0, msize);
+  INITIAL_LOCK(&m->mutex);
+  msp->head = (msize|PINUSE_BIT|CINUSE_BIT);
+  m->seg.base = m->least_addr = tbase;
+  m->seg.size = m->footprint = m->max_footprint = tsize;
+#if USE_MAX_ALLOWED_FOOTPRINT
+  m->max_allowed_footprint = MAX_SIZE_T;
+#endif
+  m->magic = mparams.magic;
+  m->mflags = mparams.default_mflags;
+  disable_contiguous(m);
+  init_bins(m);
+  mn = next_chunk(mem2chunk(m));
+  init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
+  check_top_chunk(m, m->top);
+  return m;
+}
+
+mspace create_mspace(size_t capacity, int locked) {
+  mstate m = 0;
+  size_t msize = pad_request(sizeof(struct malloc_state));
+  init_mparams(); /* Ensure pagesize etc initialized */
+
+  if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
+    size_t rs = ((capacity == 0)? mparams.granularity :
+                 (capacity + TOP_FOOT_SIZE + msize));
+    size_t tsize = granularity_align(rs);
+    char* tbase = (char*)(CALL_MMAP(tsize));
+    if (tbase != CMFAIL) {
+      m = init_user_mstate(tbase, tsize);
+      m->seg.sflags = IS_MMAPPED_BIT;
+      set_lock(m, locked);
+    }
+  }
+  return (mspace)m;
+}
+
+mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
+  mstate m = 0;
+  size_t msize = pad_request(sizeof(struct malloc_state));
+  init_mparams(); /* Ensure pagesize etc initialized */
+
+  if (capacity > msize + TOP_FOOT_SIZE &&
+      capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
+    m = init_user_mstate((char*)base, capacity);
+    m->seg.sflags = EXTERN_BIT;
+    set_lock(m, locked);
+  }
+  return (mspace)m;
+}
+
+size_t destroy_mspace(mspace msp) {
+  size_t freed = 0;
+  mstate ms = (mstate)msp;
+  if (ok_magic(ms)) {
+    msegmentptr sp = &ms->seg;
+    while (sp != 0) {
+      char* base = sp->base;
+      size_t size = sp->size;
+      flag_t flag = sp->sflags;
+      sp = sp->next;
+      if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) &&
+          CALL_MUNMAP(base, size) == 0)
+        freed += size;
+    }
+  }
+  else {
+    USAGE_ERROR_ACTION(ms,ms);
+  }
+  return freed;
+}
+
+/*
+  mspace versions of routines are near-clones of the global
+  versions. This is not so nice but better than the alternatives.
+*/
+
+
+void* mspace_malloc(mspace msp, size_t bytes) {
+  mstate ms = (mstate)msp;
+  if (!ok_magic(ms)) {
+    USAGE_ERROR_ACTION(ms,ms);
+    return 0;
+  }
+  if (!PREACTION(ms)) {
+    void* mem;
+    size_t nb;
+    if (bytes <= MAX_SMALL_REQUEST) {
+      bindex_t idx;
+      binmap_t smallbits;
+      nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
+      idx = small_index(nb);
+      smallbits = ms->smallmap >> idx;
+
+      if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
+        mchunkptr b, p;
+        idx += ~smallbits & 1;       /* Uses next bin if idx empty */
+        b = smallbin_at(ms, idx);
+        p = b->fd;
+        assert(chunksize(p) == small_index2size(idx));
+        unlink_first_small_chunk(ms, b, p, idx);
+        set_inuse_and_pinuse(ms, p, small_index2size(idx));
+        mem = chunk2mem(p);
+        check_malloced_chunk(ms, mem, nb);
+        goto postaction;
+      }
+
+      else if (nb > ms->dvsize) {
+        if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
+          mchunkptr b, p, r;
+          size_t rsize;
+          bindex_t i;
+          binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
+          binmap_t leastbit = least_bit(leftbits);
+          compute_bit2idx(leastbit, i);
+          b = smallbin_at(ms, i);
+          p = b->fd;
+          assert(chunksize(p) == small_index2size(i));
+          unlink_first_small_chunk(ms, b, p, i);
+          rsize = small_index2size(i) - nb;
+          /* Fit here cannot be remainderless if 4byte sizes */
+          if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
+            set_inuse_and_pinuse(ms, p, small_index2size(i));
+          else {
+            set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
+            r = chunk_plus_offset(p, nb);
+            set_size_and_pinuse_of_free_chunk(r, rsize);
+            replace_dv(ms, r, rsize);
+          }
+          mem = chunk2mem(p);
+          check_malloced_chunk(ms, mem, nb);
+          goto postaction;
+        }
+
+        else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
+          check_malloced_chunk(ms, mem, nb);
+          goto postaction;
+        }
+      }
+    }
+    else if (bytes >= MAX_REQUEST)
+      nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
+    else {
+      nb = pad_request(bytes);
+      if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
+        check_malloced_chunk(ms, mem, nb);
+        goto postaction;
+      }
+    }
+
+    if (nb <= ms->dvsize) {
+      size_t rsize = ms->dvsize - nb;
+      mchunkptr p = ms->dv;
+      if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
+        mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
+        ms->dvsize = rsize;
+        set_size_and_pinuse_of_free_chunk(r, rsize);
+        set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
+      }
+      else { /* exhaust dv */
+        size_t dvs = ms->dvsize;
+        ms->dvsize = 0;
+        ms->dv = 0;
+        set_inuse_and_pinuse(ms, p, dvs);
+      }
+      mem = chunk2mem(p);
+      check_malloced_chunk(ms, mem, nb);
+      goto postaction;
+    }
+
+    else if (nb < ms->topsize) { /* Split top */
+      size_t rsize = ms->topsize -= nb;
+      mchunkptr p = ms->top;
+      mchunkptr r = ms->top = chunk_plus_offset(p, nb);
+      r->head = rsize | PINUSE_BIT;
+      set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
+      mem = chunk2mem(p);
+      check_top_chunk(ms, ms->top);
+      check_malloced_chunk(ms, mem, nb);
+      goto postaction;
+    }
+
+    mem = sys_alloc(ms, nb);
+
+  postaction:
+    POSTACTION(ms);
+    return mem;
+  }
+
+  return 0;
+}
+
+void mspace_free(mspace msp, void* mem) {
+  if (mem != 0) {
+    mchunkptr p  = mem2chunk(mem);
+#if FOOTERS
+    mstate fm = get_mstate_for(p);
+#else /* FOOTERS */
+    mstate fm = (mstate)msp;
+#endif /* FOOTERS */
+    if (!ok_magic(fm)) {
+      USAGE_ERROR_ACTION(fm, p);
+      return;
+    }
+    if (!PREACTION(fm)) {
+      check_inuse_chunk(fm, p);
+      if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
+        size_t psize = chunksize(p);
+        mchunkptr next = chunk_plus_offset(p, psize);
+        if (!pinuse(p)) {
+          size_t prevsize = p->prev_foot;
+          if ((prevsize & IS_MMAPPED_BIT) != 0) {
+            prevsize &= ~IS_MMAPPED_BIT;
+            psize += prevsize + MMAP_FOOT_PAD;
+            if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
+              fm->footprint -= psize;
+            goto postaction;
+          }
+          else {
+            mchunkptr prev = chunk_minus_offset(p, prevsize);
+            psize += prevsize;
+            p = prev;
+            if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
+              if (p != fm->dv) {
+                unlink_chunk(fm, p, prevsize);
+              }
+              else if ((next->head & INUSE_BITS) == INUSE_BITS) {
+                fm->dvsize = psize;
+                set_free_with_pinuse(p, psize, next);
+                goto postaction;
+              }
+            }
+            else
+              goto erroraction;
+          }
+        }
+
+        if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
+          if (!cinuse(next)) {  /* consolidate forward */
+            if (next == fm->top) {
+              size_t tsize = fm->topsize += psize;
+              fm->top = p;
+              p->head = tsize | PINUSE_BIT;
+              if (p == fm->dv) {
+                fm->dv = 0;
+                fm->dvsize = 0;
+              }
+              if (should_trim(fm, tsize))
+                sys_trim(fm, 0);
+              goto postaction;
+            }
+            else if (next == fm->dv) {
+              size_t dsize = fm->dvsize += psize;
+              fm->dv = p;
+              set_size_and_pinuse_of_free_chunk(p, dsize);
+              goto postaction;
+            }
+            else {
+              size_t nsize = chunksize(next);
+              psize += nsize;
+              unlink_chunk(fm, next, nsize);
+              set_size_and_pinuse_of_free_chunk(p, psize);
+              if (p == fm->dv) {
+                fm->dvsize = psize;
+                goto postaction;
+              }
+            }
+          }
+          else
+            set_free_with_pinuse(p, psize, next);
+          insert_chunk(fm, p, psize);
+          check_free_chunk(fm, p);
+          goto postaction;
+        }
+      }
+    erroraction:
+      USAGE_ERROR_ACTION(fm, p);
+    postaction:
+      POSTACTION(fm);
+    }
+  }
+}
+
+void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
+  void *mem;
+  mstate ms = (mstate)msp;
+  if (!ok_magic(ms)) {
+    USAGE_ERROR_ACTION(ms,ms);
+    return 0;
+  }
+  if (n_elements && MAX_SIZE_T / n_elements < elem_size) {
+    /* Fail on overflow */
+    MALLOC_FAILURE_ACTION;
+    return NULL;
+  }
+  elem_size *= n_elements;
+  mem = internal_malloc(ms, elem_size);
+  if (mem && calloc_must_clear(mem2chunk(mem)))
+    memset(mem, 0, elem_size);
+  return mem;
+}
+
+void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
+  if (oldmem == 0)
+    return mspace_malloc(msp, bytes);
+#ifdef REALLOC_ZERO_BYTES_FREES
+  if (bytes == 0) {
+    mspace_free(msp, oldmem);
+    return 0;
+  }
+#endif /* REALLOC_ZERO_BYTES_FREES */
+  else {
+#if FOOTERS
+    mchunkptr p  = mem2chunk(oldmem);
+    mstate ms = get_mstate_for(p);
+#else /* FOOTERS */
+    mstate ms = (mstate)msp;
+#endif /* FOOTERS */
+    if (!ok_magic(ms)) {
+      USAGE_ERROR_ACTION(ms,ms);
+      return 0;
+    }
+    return internal_realloc(ms, oldmem, bytes);
+  }
+}
+
+void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
+  mstate ms = (mstate)msp;
+  if (!ok_magic(ms)) {
+    USAGE_ERROR_ACTION(ms,ms);
+    return 0;
+  }
+  return internal_memalign(ms, alignment, bytes);
+}
+
+void** mspace_independent_calloc(mspace msp, size_t n_elements,
+                                 size_t elem_size, void* chunks[]) {
+  size_t sz = elem_size; /* serves as 1-element array */
+  mstate ms = (mstate)msp;
+  if (!ok_magic(ms)) {
+    USAGE_ERROR_ACTION(ms,ms);
+    return 0;
+  }
+  return ialloc(ms, n_elements, &sz, 3, chunks);
+}
+
+void** mspace_independent_comalloc(mspace msp, size_t n_elements,
+                                   size_t sizes[], void* chunks[]) {
+  mstate ms = (mstate)msp;
+  if (!ok_magic(ms)) {
+    USAGE_ERROR_ACTION(ms,ms);
+    return 0;
+  }
+  return ialloc(ms, n_elements, sizes, 0, chunks);
+}
+
+int mspace_trim(mspace msp, size_t pad) {
+  int result = 0;
+  mstate ms = (mstate)msp;
+  if (ok_magic(ms)) {
+    if (!PREACTION(ms)) {
+      result = sys_trim(ms, pad);
+      POSTACTION(ms);
+    }
+  }
+  else {
+    USAGE_ERROR_ACTION(ms,ms);
+  }
+  return result;
+}
+
+void mspace_malloc_stats(mspace msp) {
+  mstate ms = (mstate)msp;
+  if (ok_magic(ms)) {
+    internal_malloc_stats(ms);
+  }
+  else {
+    USAGE_ERROR_ACTION(ms,ms);
+  }
+}
+
+size_t mspace_footprint(mspace msp) {
+  size_t result;
+  mstate ms = (mstate)msp;
+  if (ok_magic(ms)) {
+    result = ms->footprint;
+  }
+  else {
+    USAGE_ERROR_ACTION(ms,ms);
+  }
+  return result;
+}
+
+#if USE_MAX_ALLOWED_FOOTPRINT
+size_t mspace_max_allowed_footprint(mspace msp) {
+  size_t result;
+  mstate ms = (mstate)msp;
+  if (ok_magic(ms)) {
+    result = ms->max_allowed_footprint;
+  }
+  else {
+    USAGE_ERROR_ACTION(ms,ms);
+  }
+  return result;
+}
+
+void mspace_set_max_allowed_footprint(mspace msp, size_t bytes) {
+  mstate ms = (mstate)msp;
+  if (ok_magic(ms)) {
+    if (bytes > ms->footprint) {
+      /* Increase the size in multiples of the granularity,
+       * which is the smallest unit we request from the system.
+       */
+      ms->max_allowed_footprint = ms->footprint +
+                                  granularity_align(bytes - ms->footprint);
+    }
+    else {
+      //TODO: allow for reducing the max footprint
+      ms->max_allowed_footprint = ms->footprint;
+    }
+  }
+  else {
+    USAGE_ERROR_ACTION(ms,ms);
+  }
+}
+#endif
+
+size_t mspace_max_footprint(mspace msp) {
+  size_t result;
+  mstate ms = (mstate)msp;
+  if (ok_magic(ms)) {
+    result = ms->max_footprint;
+  }
+  else {
+    USAGE_ERROR_ACTION(ms,ms);
+  }
+  return result;
+}
+
+
+#if !NO_MALLINFO
+struct mallinfo mspace_mallinfo(mspace msp) {
+  mstate ms = (mstate)msp;
+  if (!ok_magic(ms)) {
+    USAGE_ERROR_ACTION(ms,ms);
+  }
+  return internal_mallinfo(ms);
+}
+#endif /* NO_MALLINFO */
+
+int mspace_mallopt(int param_number, int value) {
+  return change_mparam(param_number, value);
+}
+
+#endif /* MSPACES */
+
+#if MSPACES && ONLY_MSPACES
+void mspace_walk_free_pages(mspace msp,
+    void(*handler)(void *start, void *end, void *arg), void *harg)
+{
+  mstate m = (mstate)msp;
+  if (!ok_magic(m)) {
+    USAGE_ERROR_ACTION(m,m);
+    return;
+  }
+#else
+void dlmalloc_walk_free_pages(void(*handler)(void *start, void *end, void *arg),
+    void *harg)
+{
+  mstate m = (mstate)gm;
+#endif
+  if (!PREACTION(m)) {
+    if (is_initialized(m)) {
+      msegmentptr s = &m->seg;
+      while (s != 0) {
+        mchunkptr p = align_as_chunk(s->base);
+        while (segment_holds(s, p) &&
+               p != m->top && p->head != FENCEPOST_HEAD) {
+          void *chunkptr, *userptr;
+          size_t chunklen, userlen;
+          chunkptr = p;
+          chunklen = chunksize(p);
+          if (!cinuse(p)) {
+            void *start;
+            if (is_small(chunklen)) {
+              start = (void *)(p + 1);
+            }
+            else {
+              start = (void *)((tchunkptr)p + 1);
+            }
+            handler(start, next_chunk(p), harg);
+          }
+          p = next_chunk(p);
+        }
+        if (p == m->top) {
+          handler((void *)(p + 1), next_chunk(p), harg);
+        }
+        s = s->next;
+      }
+    }
+    POSTACTION(m);
+  }
+}
+
+
+#if MSPACES && ONLY_MSPACES
+void mspace_walk_heap(mspace msp,
+                      void(*handler)(const void *chunkptr, size_t chunklen,
+                                     const void *userptr, size_t userlen,
+                                     void *arg),
+                      void *harg)
+{
+  msegmentptr s;
+  mstate m = (mstate)msp;
+  if (!ok_magic(m)) {
+    USAGE_ERROR_ACTION(m,m);
+    return;
+  }
+#else
+void dlmalloc_walk_heap(void(*handler)(const void *chunkptr, size_t chunklen,
+                                       const void *userptr, size_t userlen,
+                                       void *arg),
+                        void *harg)
+{
+  msegmentptr s;
+  mstate m = (mstate)gm;
+#endif
+
+  s = &m->seg;
+  while (s != 0) {
+    mchunkptr p = align_as_chunk(s->base);
+    while (segment_holds(s, p) &&
+           p != m->top && p->head != FENCEPOST_HEAD) {
+      void *chunkptr, *userptr;
+      size_t chunklen, userlen;
+      chunkptr = p;
+      chunklen = chunksize(p);
+      if (cinuse(p)) {
+        userptr = chunk2mem(p);
+        userlen = chunklen - overhead_for(p);
+      }
+      else {
+        userptr = NULL;
+        userlen = 0;
+      }
+      handler(chunkptr, chunklen, userptr, userlen, harg);
+      p = next_chunk(p);
+    }
+    if (p == m->top) {
+      /* The top chunk is just a big free chunk for our purposes.
+       */
+      handler(m->top, m->topsize, NULL, 0, harg);
+    }
+    s = s->next;
+  }
+}
+
+/* -------------------- Alternative MORECORE functions ------------------- */
+
+/*
+  Guidelines for creating a custom version of MORECORE:
+
+  * For best performance, MORECORE should allocate in multiples of pagesize.
+  * MORECORE may allocate more memory than requested. (Or even less,
+      but this will usually result in a malloc failure.)
+  * MORECORE must not allocate memory when given argument zero, but
+      instead return one past the end address of memory from previous
+      nonzero call.
+  * For best performance, consecutive calls to MORECORE with positive
+      arguments should return increasing addresses, indicating that
+      space has been contiguously extended.
+  * Even though consecutive calls to MORECORE need not return contiguous
+      addresses, it must be OK for malloc'ed chunks to span multiple
+      regions in those cases where they do happen to be contiguous.
+  * MORECORE need not handle negative arguments -- it may instead
+      just return MFAIL when given negative arguments.
+      Negative arguments are always multiples of pagesize. MORECORE
+      must not misinterpret negative args as large positive unsigned
+      args. You can suppress all such calls from even occurring by defining
+      MORECORE_CANNOT_TRIM,
+
+  As an example alternative MORECORE, here is a custom allocator
+  kindly contributed for pre-OSX macOS.  It uses virtually but not
+  necessarily physically contiguous non-paged memory (locked in,
+  present and won't get swapped out).  You can use it by uncommenting
+  this section, adding some #includes, and setting up the appropriate
+  defines above:
+
+      #define MORECORE osMoreCore
+
+  There is also a shutdown routine that should somehow be called for
+  cleanup upon program exit.
+
+  #define MAX_POOL_ENTRIES 100
+  #define MINIMUM_MORECORE_SIZE  (64 * 1024U)
+  static int next_os_pool;
+  void *our_os_pools[MAX_POOL_ENTRIES];
+
+  void *osMoreCore(int size)
+  {
+    void *ptr = 0;
+    static void *sbrk_top = 0;
+
+    if (size > 0)
+    {
+      if (size < MINIMUM_MORECORE_SIZE)
+         size = MINIMUM_MORECORE_SIZE;
+      if (CurrentExecutionLevel() == kTaskLevel)
+         ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
+      if (ptr == 0)
+      {
+        return (void *) MFAIL;
+      }
+      // save ptrs so they can be freed during cleanup
+      our_os_pools[next_os_pool] = ptr;
+      next_os_pool++;
+      ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
+      sbrk_top = (char *) ptr + size;
+      return ptr;
+    }
+    else if (size < 0)
+    {
+      // we don't currently support shrink behavior
+      return (void *) MFAIL;
+    }
+    else
+    {
+      return sbrk_top;
+    }
+  }
+
+  // cleanup any allocated memory pools
+  // called as last thing before shutting down driver
+
+  void osCleanupMem(void)
+  {
+    void **ptr;
+
+    for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
+      if (*ptr)
+      {
+         PoolDeallocate(*ptr);
+         *ptr = 0;
+      }
+  }
+
+*/
+
+
+/* -----------------------------------------------------------------------
+History:
+    V2.8.3 Thu Sep 22 11:16:32 2005  Doug Lea  (dl at gee)
+      * Add max_footprint functions
+      * Ensure all appropriate literals are size_t
+      * Fix conditional compilation problem for some #define settings
+      * Avoid concatenating segments with the one provided
+        in create_mspace_with_base
+      * Rename some variables to avoid compiler shadowing warnings
+      * Use explicit lock initialization.
+      * Better handling of sbrk interference.
+      * Simplify and fix segment insertion, trimming and mspace_destroy
+      * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
+      * Thanks especially to Dennis Flanagan for help on these.
+
+    V2.8.2 Sun Jun 12 16:01:10 2005  Doug Lea  (dl at gee)
+      * Fix memalign brace error.
+
+    V2.8.1 Wed Jun  8 16:11:46 2005  Doug Lea  (dl at gee)
+      * Fix improper #endif nesting in C++
+      * Add explicit casts needed for C++
+
+    V2.8.0 Mon May 30 14:09:02 2005  Doug Lea  (dl at gee)
+      * Use trees for large bins
+      * Support mspaces
+      * Use segments to unify sbrk-based and mmap-based system allocation,
+        removing need for emulation on most platforms without sbrk.
+      * Default safety checks
+      * Optional footer checks. Thanks to William Robertson for the idea.
+      * Internal code refactoring
+      * Incorporate suggestions and platform-specific changes.
+        Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
+        Aaron Bachmann,  Emery Berger, and others.
+      * Speed up non-fastbin processing enough to remove fastbins.
+      * Remove useless cfree() to avoid conflicts with other apps.
+      * Remove internal memcpy, memset. Compilers handle builtins better.
+      * Remove some options that no one ever used and rename others.
+
+    V2.7.2 Sat Aug 17 09:07:30 2002  Doug Lea  (dl at gee)
+      * Fix malloc_state bitmap array misdeclaration
+
+    V2.7.1 Thu Jul 25 10:58:03 2002  Doug Lea  (dl at gee)
+      * Allow tuning of FIRST_SORTED_BIN_SIZE
+      * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
+      * Better detection and support for non-contiguousness of MORECORE.
+        Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
+      * Bypass most of malloc if no frees. Thanks To Emery Berger.
+      * Fix freeing of old top non-contiguous chunk im sysmalloc.
+      * Raised default trim and map thresholds to 256K.
+      * Fix mmap-related #defines. Thanks to Lubos Lunak.
+      * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
+      * Branch-free bin calculation
+      * Default trim and mmap thresholds now 256K.
+
+    V2.7.0 Sun Mar 11 14:14:06 2001  Doug Lea  (dl at gee)
+      * Introduce independent_comalloc and independent_calloc.
+        Thanks to Michael Pachos for motivation and help.
+      * Make optional .h file available
+      * Allow > 2GB requests on 32bit systems.
+      * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
+        Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
+        and Anonymous.
+      * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
+        helping test this.)
+      * memalign: check alignment arg
+      * realloc: don't try to shift chunks backwards, since this
+        leads to  more fragmentation in some programs and doesn't
+        seem to help in any others.
+      * Collect all cases in malloc requiring system memory into sysmalloc
+      * Use mmap as backup to sbrk
+      * Place all internal state in malloc_state
+      * Introduce fastbins (although similar to 2.5.1)
+      * Many minor tunings and cosmetic improvements
+      * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
+      * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
+        Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
+      * Include errno.h to support default failure action.
+
+    V2.6.6 Sun Dec  5 07:42:19 1999  Doug Lea  (dl at gee)
+      * return null for negative arguments
+      * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
+         * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
+          (e.g. WIN32 platforms)
+         * Cleanup header file inclusion for WIN32 platforms
+         * Cleanup code to avoid Microsoft Visual C++ compiler complaints
+         * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
+           memory allocation routines
+         * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
+         * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
+           usage of 'assert' in non-WIN32 code
+         * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
+           avoid infinite loop
+      * Always call 'fREe()' rather than 'free()'
+
+    V2.6.5 Wed Jun 17 15:57:31 1998  Doug Lea  (dl at gee)
+      * Fixed ordering problem with boundary-stamping
+
+    V2.6.3 Sun May 19 08:17:58 1996  Doug Lea  (dl at gee)
+      * Added pvalloc, as recommended by H.J. Liu
+      * Added 64bit pointer support mainly from Wolfram Gloger
+      * Added anonymously donated WIN32 sbrk emulation
+      * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
+      * malloc_extend_top: fix mask error that caused wastage after
+        foreign sbrks
+      * Add linux mremap support code from HJ Liu
+
+    V2.6.2 Tue Dec  5 06:52:55 1995  Doug Lea  (dl at gee)
+      * Integrated most documentation with the code.
+      * Add support for mmap, with help from
+        Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
+      * Use last_remainder in more cases.
+      * Pack bins using idea from  colin@nyx10.cs.du.edu
+      * Use ordered bins instead of best-fit threshhold
+      * Eliminate block-local decls to simplify tracing and debugging.
+      * Support another case of realloc via move into top
+      * Fix error occuring when initial sbrk_base not word-aligned.
+      * Rely on page size for units instead of SBRK_UNIT to
+        avoid surprises about sbrk alignment conventions.
+      * Add mallinfo, mallopt. Thanks to Raymond Nijssen
+        (raymond@es.ele.tue.nl) for the suggestion.
+      * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
+      * More precautions for cases where other routines call sbrk,
+        courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
+      * Added macros etc., allowing use in linux libc from
+        H.J. Lu (hjl@gnu.ai.mit.edu)
+      * Inverted this history list
+
+    V2.6.1 Sat Dec  2 14:10:57 1995  Doug Lea  (dl at gee)
+      * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
+      * Removed all preallocation code since under current scheme
+        the work required to undo bad preallocations exceeds
+        the work saved in good cases for most test programs.
+      * No longer use return list or unconsolidated bins since
+        no scheme using them consistently outperforms those that don't
+        given above changes.
+      * Use best fit for very large chunks to prevent some worst-cases.
+      * Added some support for debugging
+
+    V2.6.0 Sat Nov  4 07:05:23 1995  Doug Lea  (dl at gee)
+      * Removed footers when chunks are in use. Thanks to
+        Paul Wilson (wilson@cs.texas.edu) for the suggestion.
+
+    V2.5.4 Wed Nov  1 07:54:51 1995  Doug Lea  (dl at gee)
+      * Added malloc_trim, with help from Wolfram Gloger
+        (wmglo@Dent.MED.Uni-Muenchen.DE).
+
+    V2.5.3 Tue Apr 26 10:16:01 1994  Doug Lea  (dl at g)
+
+    V2.5.2 Tue Apr  5 16:20:40 1994  Doug Lea  (dl at g)
+      * realloc: try to expand in both directions
+      * malloc: swap order of clean-bin strategy;
+      * realloc: only conditionally expand backwards
+      * Try not to scavenge used bins
+      * Use bin counts as a guide to preallocation
+      * Occasionally bin return list chunks in first scan
+      * Add a few optimizations from colin@nyx10.cs.du.edu
+
+    V2.5.1 Sat Aug 14 15:40:43 1993  Doug Lea  (dl at g)
+      * faster bin computation & slightly different binning
+      * merged all consolidations to one part of malloc proper
+         (eliminating old malloc_find_space & malloc_clean_bin)
+      * Scan 2 returns chunks (not just 1)
+      * Propagate failure in realloc if malloc returns 0
+      * Add stuff to allow compilation on non-ANSI compilers
+          from kpv@research.att.com
+
+    V2.5 Sat Aug  7 07:41:59 1993  Doug Lea  (dl at g.oswego.edu)
+      * removed potential for odd address access in prev_chunk
+      * removed dependency on getpagesize.h
+      * misc cosmetics and a bit more internal documentation
+      * anticosmetics: mangled names in macros to evade debugger strangeness
+      * tested on sparc, hp-700, dec-mips, rs6000
+          with gcc & native cc (hp, dec only) allowing
+          Detlefs & Zorn comparison study (in SIGPLAN Notices.)
+
+    Trial version Fri Aug 28 13:14:29 1992  Doug Lea  (dl at g.oswego.edu)
+      * Based loosely on libg++-1.2X malloc. (It retains some of the overall
+         structure of old version,  but most details differ.)
+ 
+*/
diff --git a/libc/bionic/dlmalloc.h b/libc/bionic/dlmalloc.h
new file mode 100644
index 0000000..75b5e1f
--- /dev/null
+++ b/libc/bionic/dlmalloc.h
@@ -0,0 +1,640 @@
+/*
+  Default header file for malloc-2.8.x, written by Doug Lea
+  and released to the public domain, as explained at
+  http://creativecommons.org/licenses/publicdomain. 
+ 
+  last update: Mon Aug 15 08:55:52 2005  Doug Lea  (dl at gee)
+
+  This header is for ANSI C/C++ only.  You can set any of
+  the following #defines before including:
+
+  * If USE_DL_PREFIX is defined, it is assumed that malloc.c 
+    was also compiled with this option, so all routines
+    have names starting with "dl".
+
+  * If HAVE_USR_INCLUDE_MALLOC_H is defined, it is assumed that this
+    file will be #included AFTER <malloc.h>. This is needed only if
+    your system defines a struct mallinfo that is incompatible with the
+    standard one declared here.  Otherwise, you can include this file
+    INSTEAD of your system system <malloc.h>.  At least on ANSI, all
+    declarations should be compatible with system versions
+
+  * If MSPACES is defined, declarations for mspace versions are included.
+*/
+
+#ifndef MALLOC_280_H
+#define MALLOC_280_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stddef.h>   /* for size_t */
+
+#if !ONLY_MSPACES
+
+/* Check an additional macro for the five primary functions */
+#if !defined(USE_DL_PREFIX) || !defined(MALLOC_LEAK_CHECK) 
+#define dlcalloc               calloc
+#define dlfree                 free
+#define dlmalloc               malloc
+#define dlmemalign             memalign
+#define dlrealloc              realloc
+#endif
+
+#ifndef USE_DL_PREFIX
+#define dlvalloc               valloc
+#define dlpvalloc              pvalloc
+#define dlmallinfo             mallinfo
+#define dlmallopt              mallopt
+#define dlmalloc_trim          malloc_trim
+#define dlmalloc_walk_free_pages \
+                               malloc_walk_free_pages
+#define dlmalloc_walk_heap \
+                               malloc_walk_heap
+#define dlmalloc_stats         malloc_stats
+#define dlmalloc_usable_size   malloc_usable_size
+#define dlmalloc_footprint     malloc_footprint
+#define dlmalloc_max_allowed_footprint \
+                               malloc_max_allowed_footprint
+#define dlmalloc_set_max_allowed_footprint \
+                               malloc_set_max_allowed_footprint
+#define dlmalloc_max_footprint malloc_max_footprint
+#define dlindependent_calloc   independent_calloc
+#define dlindependent_comalloc independent_comalloc
+#endif /* USE_DL_PREFIX */
+
+
+/*
+  malloc(size_t n)
+  Returns a pointer to a newly allocated chunk of at least n bytes, or
+  null if no space is available, in which case errno is set to ENOMEM
+  on ANSI C systems.
+
+  If n is zero, malloc returns a minimum-sized chunk. (The minimum
+  size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
+  systems.)  Note that size_t is an unsigned type, so calls with
+  arguments that would be negative if signed are interpreted as
+  requests for huge amounts of space, which will often fail. The
+  maximum supported value of n differs across systems, but is in all
+  cases less than the maximum representable value of a size_t.
+*/
+void* dlmalloc(size_t);
+
+/*
+  free(void* p)
+  Releases the chunk of memory pointed to by p, that had been previously
+  allocated using malloc or a related routine such as realloc.
+  It has no effect if p is null. If p was not malloced or already
+  freed, free(p) will by default cuase the current program to abort.
+*/
+void  dlfree(void*);
+
+/*
+  calloc(size_t n_elements, size_t element_size);
+  Returns a pointer to n_elements * element_size bytes, with all locations
+  set to zero.
+*/
+void* dlcalloc(size_t, size_t);
+
+/*
+  realloc(void* p, size_t n)
+  Returns a pointer to a chunk of size n that contains the same data
+  as does chunk p up to the minimum of (n, p's size) bytes, or null
+  if no space is available.
+
+  The returned pointer may or may not be the same as p. The algorithm
+  prefers extending p in most cases when possible, otherwise it
+  employs the equivalent of a malloc-copy-free sequence.
+
+  If p is null, realloc is equivalent to malloc.
+
+  If space is not available, realloc returns null, errno is set (if on
+  ANSI) and p is NOT freed.
+
+  if n is for fewer bytes than already held by p, the newly unused
+  space is lopped off and freed if possible.  realloc with a size
+  argument of zero (re)allocates a minimum-sized chunk.
+
+  The old unix realloc convention of allowing the last-free'd chunk
+  to be used as an argument to realloc is not supported.
+*/
+
+void* dlrealloc(void*, size_t);
+
+/*
+  memalign(size_t alignment, size_t n);
+  Returns a pointer to a newly allocated chunk of n bytes, aligned
+  in accord with the alignment argument.
+
+  The alignment argument should be a power of two. If the argument is
+  not a power of two, the nearest greater power is used.
+  8-byte alignment is guaranteed by normal malloc calls, so don't
+  bother calling memalign with an argument of 8 or less.
+
+  Overreliance on memalign is a sure way to fragment space.
+*/
+void* dlmemalign(size_t, size_t);
+
+/*
+  valloc(size_t n);
+  Equivalent to memalign(pagesize, n), where pagesize is the page
+  size of the system. If the pagesize is unknown, 4096 is used.
+*/
+void* dlvalloc(size_t);
+
+/*
+  mallopt(int parameter_number, int parameter_value)
+  Sets tunable parameters The format is to provide a
+  (parameter-number, parameter-value) pair.  mallopt then sets the
+  corresponding parameter to the argument value if it can (i.e., so
+  long as the value is meaningful), and returns 1 if successful else
+  0.  SVID/XPG/ANSI defines four standard param numbers for mallopt,
+  normally defined in malloc.h.  None of these are use in this malloc,
+  so setting them has no effect. But this malloc also supports other
+  options in mallopt:
+
+  Symbol            param #  default    allowed param values
+  M_TRIM_THRESHOLD     -1   2*1024*1024   any   (-1U disables trimming)
+  M_GRANULARITY        -2     page size   any power of 2 >= page size
+  M_MMAP_THRESHOLD     -3      256*1024   any   (or 0 if no MMAP support)
+*/
+int dlmallopt(int, int);
+
+#define M_TRIM_THRESHOLD     (-1)
+#define M_GRANULARITY        (-2)
+#define M_MMAP_THRESHOLD     (-3)
+
+
+/*
+  malloc_footprint();
+  Returns the number of bytes obtained from the system.  The total
+  number of bytes allocated by malloc, realloc etc., is less than this
+  value. Unlike mallinfo, this function returns only a precomputed
+  result, so can be called frequently to monitor memory consumption.
+  Even if locks are otherwise defined, this function does not use them,
+  so results might not be up to date.
+*/
+size_t dlmalloc_footprint();
+
+/*
+  malloc_max_allowed_footprint();
+  Returns the number of bytes that the heap is allowed to obtain
+  from the system.  malloc_footprint() should always return a
+  size less than or equal to max_allowed_footprint, unless the
+  max_allowed_footprint was set to a value smaller than the
+  footprint at the time.
+
+  This function is only available if dlmalloc.c was compiled
+  with USE_MAX_ALLOWED_FOOTPRINT set.
+*/
+size_t dlmalloc_max_allowed_footprint();
+
+/*
+  malloc_set_max_allowed_footprint();
+  Set the maximum number of bytes that the heap is allowed to
+  obtain from the system.  The size will be rounded up to a whole
+  page, and the rounded number will be returned from future calls
+  to malloc_max_allowed_footprint().  If the new max_allowed_footprint
+  is larger than the current footprint, the heap will never grow
+  larger than max_allowed_footprint.  If the new max_allowed_footprint
+  is smaller than the current footprint, the heap will not grow
+  further.
+
+  This function is only available if dlmalloc.c was compiled
+  with USE_MAX_ALLOWED_FOOTPRINT set.
+
+  TODO: try to force the heap to give up memory in the shrink case,
+        and update this comment once that happens.
+*/
+void dlmalloc_set_max_allowed_footprint(size_t bytes);
+
+/*
+  malloc_max_footprint();
+  Returns the maximum number of bytes obtained from the system. This
+  value will be greater than current footprint if deallocated space
+  has been reclaimed by the system. The peak number of bytes allocated
+  by malloc, realloc etc., is less than this value. Unlike mallinfo,
+  this function returns only a precomputed result, so can be called
+  frequently to monitor memory consumption.  Even if locks are
+  otherwise defined, this function does not use them, so results might
+  not be up to date.
+*/
+size_t dlmalloc_max_footprint(void);
+
+#if !NO_MALLINFO
+/*
+  mallinfo()
+  Returns (by copy) a struct containing various summary statistics:
+
+  arena:     current total non-mmapped bytes allocated from system
+  ordblks:   the number of free chunks
+  smblks:    always zero.
+  hblks:     current number of mmapped regions
+  hblkhd:    total bytes held in mmapped regions
+  usmblks:   the maximum total allocated space. This will be greater
+                than current total if trimming has occurred.
+  fsmblks:   always zero
+  uordblks:  current total allocated space (normal or mmapped)
+  fordblks:  total free space
+  keepcost:  the maximum number of bytes that could ideally be released
+               back to system via malloc_trim. ("ideally" means that
+               it ignores page restrictions etc.)
+
+  Because these fields are ints, but internal bookkeeping may
+  be kept as longs, the reported values may wrap around zero and
+  thus be inaccurate.
+*/
+#ifndef HAVE_USR_INCLUDE_MALLOC_H
+#ifndef _MALLOC_H_
+#ifndef MALLINFO_FIELD_TYPE
+#define MALLINFO_FIELD_TYPE size_t
+#endif /* MALLINFO_FIELD_TYPE */
+struct mallinfo {
+  MALLINFO_FIELD_TYPE arena;    /* non-mmapped space allocated from system */
+  MALLINFO_FIELD_TYPE ordblks;  /* number of free chunks */
+  MALLINFO_FIELD_TYPE smblks;   /* always 0 */
+  MALLINFO_FIELD_TYPE hblks;    /* always 0 */
+  MALLINFO_FIELD_TYPE hblkhd;   /* space in mmapped regions */
+  MALLINFO_FIELD_TYPE usmblks;  /* maximum total allocated space */
+  MALLINFO_FIELD_TYPE fsmblks;  /* always 0 */
+  MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
+  MALLINFO_FIELD_TYPE fordblks; /* total free space */
+  MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
+};
+#endif  /* _MALLOC_H_ */
+#endif  /* HAVE_USR_INCLUDE_MALLOC_H */
+
+struct mallinfo dlmallinfo(void);
+#endif  /* NO_MALLINFO */
+
+/*
+  independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
+
+  independent_calloc is similar to calloc, but instead of returning a
+  single cleared space, it returns an array of pointers to n_elements
+  independent elements that can hold contents of size elem_size, each
+  of which starts out cleared, and can be independently freed,
+  realloc'ed etc. The elements are guaranteed to be adjacently
+  allocated (this is not guaranteed to occur with multiple callocs or
+  mallocs), which may also improve cache locality in some
+  applications.
+
+  The "chunks" argument is optional (i.e., may be null, which is
+  probably the most typical usage). If it is null, the returned array
+  is itself dynamically allocated and should also be freed when it is
+  no longer needed. Otherwise, the chunks array must be of at least
+  n_elements in length. It is filled in with the pointers to the
+  chunks.
+
+  In either case, independent_calloc returns this pointer array, or
+  null if the allocation failed.  If n_elements is zero and "chunks"
+  is null, it returns a chunk representing an array with zero elements
+  (which should be freed if not wanted).
+
+  Each element must be individually freed when it is no longer
+  needed. If you'd like to instead be able to free all at once, you
+  should instead use regular calloc and assign pointers into this
+  space to represent elements.  (In this case though, you cannot
+  independently free elements.)
+
+  independent_calloc simplifies and speeds up implementations of many
+  kinds of pools.  It may also be useful when constructing large data
+  structures that initially have a fixed number of fixed-sized nodes,
+  but the number is not known at compile time, and some of the nodes
+  may later need to be freed. For example:
+
+  struct Node { int item; struct Node* next; };
+
+  struct Node* build_list() {
+    struct Node** pool;
+    int n = read_number_of_nodes_needed();
+    if (n <= 0) return 0;
+    pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
+    if (pool == 0) die();
+    // organize into a linked list...
+    struct Node* first = pool[0];
+    for (i = 0; i < n-1; ++i)
+      pool[i]->next = pool[i+1];
+    free(pool);     // Can now free the array (or not, if it is needed later)
+    return first;
+  }
+*/
+void** dlindependent_calloc(size_t, size_t, void**);
+
+/*
+  independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
+
+  independent_comalloc allocates, all at once, a set of n_elements
+  chunks with sizes indicated in the "sizes" array.    It returns
+  an array of pointers to these elements, each of which can be
+  independently freed, realloc'ed etc. The elements are guaranteed to
+  be adjacently allocated (this is not guaranteed to occur with
+  multiple callocs or mallocs), which may also improve cache locality
+  in some applications.
+
+  The "chunks" argument is optional (i.e., may be null). If it is null
+  the returned array is itself dynamically allocated and should also
+  be freed when it is no longer needed. Otherwise, the chunks array
+  must be of at least n_elements in length. It is filled in with the
+  pointers to the chunks.
+
+  In either case, independent_comalloc returns this pointer array, or
+  null if the allocation failed.  If n_elements is zero and chunks is
+  null, it returns a chunk representing an array with zero elements
+  (which should be freed if not wanted).
+
+  Each element must be individually freed when it is no longer
+  needed. If you'd like to instead be able to free all at once, you
+  should instead use a single regular malloc, and assign pointers at
+  particular offsets in the aggregate space. (In this case though, you
+  cannot independently free elements.)
+
+  independent_comallac differs from independent_calloc in that each
+  element may have a different size, and also that it does not
+  automatically clear elements.
+
+  independent_comalloc can be used to speed up allocation in cases
+  where several structs or objects must always be allocated at the
+  same time.  For example:
+
+  struct Head { ... }
+  struct Foot { ... }
+
+  void send_message(char* msg) {
+    int msglen = strlen(msg);
+    size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
+    void* chunks[3];
+    if (independent_comalloc(3, sizes, chunks) == 0)
+      die();
+    struct Head* head = (struct Head*)(chunks[0]);
+    char*        body = (char*)(chunks[1]);
+    struct Foot* foot = (struct Foot*)(chunks[2]);
+    // ...
+  }
+
+  In general though, independent_comalloc is worth using only for
+  larger values of n_elements. For small values, you probably won't
+  detect enough difference from series of malloc calls to bother.
+
+  Overuse of independent_comalloc can increase overall memory usage,
+  since it cannot reuse existing noncontiguous small chunks that
+  might be available for some of the elements.
+*/
+void** dlindependent_comalloc(size_t, size_t*, void**);
+
+
+/*
+  pvalloc(size_t n);
+  Equivalent to valloc(minimum-page-that-holds(n)), that is,
+  round up n to nearest pagesize.
+ */
+void*  dlpvalloc(size_t);
+
+/*
+  malloc_trim(size_t pad);
+
+  If possible, gives memory back to the system (via negative arguments
+  to sbrk) if there is unused memory at the `high' end of the malloc
+  pool or in unused MMAP segments. You can call this after freeing
+  large blocks of memory to potentially reduce the system-level memory
+  requirements of a program. However, it cannot guarantee to reduce
+  memory. Under some allocation patterns, some large free blocks of
+  memory will be locked between two used chunks, so they cannot be
+  given back to the system.
+
+  The `pad' argument to malloc_trim represents the amount of free
+  trailing space to leave untrimmed. If this argument is zero, only
+  the minimum amount of memory to maintain internal data structures
+  will be left. Non-zero arguments can be supplied to maintain enough
+  trailing space to service future expected allocations without having
+  to re-obtain memory from the system.
+
+  Malloc_trim returns 1 if it actually released any memory, else 0.
+*/
+int  dlmalloc_trim(size_t);
+
+/*
+  malloc_walk_free_pages(handler, harg)
+
+  Calls the provided handler on each free region in the heap.  The
+  memory between start and end are guaranteed not to contain any
+  important data, so the handler is free to alter the contents
+  in any way.  This can be used to advise the OS that large free
+  regions may be swapped out.
+
+  The value in harg will be passed to each call of the handler.
+ */
+void dlmalloc_walk_free_pages(void(*handler)(void *start, void *end, void *arg),
+    void *harg);
+
+/*
+  malloc_walk_heap(handler, harg)
+
+  Calls the provided handler on each object or free region in the
+  heap.  The handler will receive the chunk pointer and length, the
+  object pointer and length, and the value in harg on each call.
+ */
+void dlmalloc_walk_heap(void(*handler)(const void *chunkptr, size_t chunklen,
+                                       const void *userptr, size_t userlen,
+                                       void *arg),
+                        void *harg);
+
+/*
+  malloc_usable_size(void* p);
+
+  Returns the number of bytes you can actually use in
+  an allocated chunk, which may be more than you requested (although
+  often not) due to alignment and minimum size constraints.
+  You can use this many bytes without worrying about
+  overwriting other allocated objects. This is not a particularly great
+  programming practice. malloc_usable_size can be more useful in
+  debugging and assertions, for example:
+
+  p = malloc(n);
+  assert(malloc_usable_size(p) >= 256);
+*/
+size_t dlmalloc_usable_size(void*);
+
+/*
+  malloc_stats();
+  Prints on stderr the amount of space obtained from the system (both
+  via sbrk and mmap), the maximum amount (which may be more than
+  current if malloc_trim and/or munmap got called), and the current
+  number of bytes allocated via malloc (or realloc, etc) but not yet
+  freed. Note that this is the number of bytes allocated, not the
+  number requested. It will be larger than the number requested
+  because of alignment and bookkeeping overhead. Because it includes
+  alignment wastage as being in use, this figure may be greater than
+  zero even when no user-level chunks are allocated.
+
+  The reported current and maximum system memory can be inaccurate if
+  a program makes other calls to system memory allocation functions
+  (normally sbrk) outside of malloc.
+
+  malloc_stats prints only the most commonly interesting statistics.
+  More information can be obtained by calling mallinfo.
+*/
+void  dlmalloc_stats();
+
+#endif /* !ONLY_MSPACES */
+
+#if MSPACES
+
+/*
+  mspace is an opaque type representing an independent
+  region of space that supports mspace_malloc, etc.
+*/
+typedef void* mspace;
+
+/*
+  create_mspace creates and returns a new independent space with the
+  given initial capacity, or, if 0, the default granularity size.  It
+  returns null if there is no system memory available to create the
+  space.  If argument locked is non-zero, the space uses a separate
+  lock to control access. The capacity of the space will grow
+  dynamically as needed to service mspace_malloc requests.  You can
+  control the sizes of incremental increases of this space by
+  compiling with a different DEFAULT_GRANULARITY or dynamically
+  setting with mallopt(M_GRANULARITY, value).
+*/
+mspace create_mspace(size_t capacity, int locked);
+
+/*
+  destroy_mspace destroys the given space, and attempts to return all
+  of its memory back to the system, returning the total number of
+  bytes freed. After destruction, the results of access to all memory
+  used by the space become undefined.
+*/
+size_t destroy_mspace(mspace msp);
+
+/*
+  create_mspace_with_base uses the memory supplied as the initial base
+  of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
+  space is used for bookkeeping, so the capacity must be at least this
+  large. (Otherwise 0 is returned.) When this initial space is
+  exhausted, additional memory will be obtained from the system.
+  Destroying this space will deallocate all additionally allocated
+  space (if possible) but not the initial base.
+*/
+mspace create_mspace_with_base(void* base, size_t capacity, int locked);
+
+/*
+  mspace_malloc behaves as malloc, but operates within
+  the given space.
+*/
+void* mspace_malloc(mspace msp, size_t bytes);
+
+/*
+  mspace_free behaves as free, but operates within
+  the given space.
+
+  If compiled with FOOTERS==1, mspace_free is not actually needed.
+  free may be called instead of mspace_free because freed chunks from
+  any space are handled by their originating spaces.
+*/
+void mspace_free(mspace msp, void* mem);
+
+/*
+  mspace_realloc behaves as realloc, but operates within
+  the given space.
+
+  If compiled with FOOTERS==1, mspace_realloc is not actually
+  needed.  realloc may be called instead of mspace_realloc because
+  realloced chunks from any space are handled by their originating
+  spaces.
+*/
+void* mspace_realloc(mspace msp, void* mem, size_t newsize);
+
+/*
+  mspace_calloc behaves as calloc, but operates within
+  the given space.
+*/
+void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
+
+/*
+  mspace_memalign behaves as memalign, but operates within
+  the given space.
+*/
+void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
+
+/*
+  mspace_independent_calloc behaves as independent_calloc, but
+  operates within the given space.
+*/
+void** mspace_independent_calloc(mspace msp, size_t n_elements,
+                                 size_t elem_size, void* chunks[]);
+
+/*
+  mspace_independent_comalloc behaves as independent_comalloc, but
+  operates within the given space.
+*/
+void** mspace_independent_comalloc(mspace msp, size_t n_elements,
+                                   size_t sizes[], void* chunks[]);
+
+/*
+  mspace_footprint() returns the number of bytes obtained from the
+  system for this space.
+*/
+size_t mspace_footprint(mspace msp);
+
+/*
+  mspace_max_allowed_footprint() returns the number of bytes that
+  this space is allowed to obtain from the system. See
+  malloc_max_allowed_footprint() for a more in-depth description.
+
+  This function is only available if dlmalloc.c was compiled
+  with USE_MAX_ALLOWED_FOOTPRINT set.
+*/
+size_t mspace_max_allowed_footprint(mspace msp);
+
+/*
+  mspace_set_max_allowed_footprint() sets the maximum number of
+  bytes (rounded up to a page) that this space is allowed to
+  obtain from the system.  See malloc_set_max_allowed_footprint()
+  for a more in-depth description.
+
+  This function is only available if dlmalloc.c was compiled
+  with USE_MAX_ALLOWED_FOOTPRINT set.
+*/
+void mspace_set_max_allowed_footprint(mspace msp, size_t bytes);
+
+/*
+  mspace_max_footprint() returns the maximum number of bytes obtained
+  from the system over the lifetime of this space.
+*/
+size_t mspace_max_footprint(mspace msp);
+
+
+#if !NO_MALLINFO
+/*
+  mspace_mallinfo behaves as mallinfo, but reports properties of
+  the given space.
+*/
+struct mallinfo mspace_mallinfo(mspace msp);
+#endif /* NO_MALLINFO */
+
+/*
+  mspace_malloc_stats behaves as malloc_stats, but reports
+  properties of the given space.
+*/
+void mspace_malloc_stats(mspace msp);
+
+/*
+  mspace_trim behaves as malloc_trim, but
+  operates within the given space.
+*/
+int mspace_trim(mspace msp, size_t pad);
+
+/*
+  An alias for mallopt.
+*/
+int mspace_mallopt(int, int);
+
+#endif  /* MSPACES */
+
+#ifdef __cplusplus
+};  /* end of extern "C" */
+#endif
+
+#endif /* MALLOC_280_H */
diff --git a/libc/bionic/drand48.c b/libc/bionic/drand48.c
new file mode 100644
index 0000000..fd48196
--- /dev/null
+++ b/libc/bionic/drand48.c
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 1993 Martin Birgmeier
+ * All rights reserved.
+ *
+ * You may redistribute unmodified or modified versions of this source
+ * code provided that the above copyright notice and this and the
+ * following conditions are retained.
+ *
+ * This software is provided ``as is'', and comes with no warranties
+ * of any kind. I shall in no event be liable for anything that happens
+ * to anyone/anything when using this software.
+ */
+
+#include <sys/cdefs.h>
+
+#include "rand48.h"
+
+extern unsigned short _rand48_seed[3];
+
+double
+drand48(void)
+{
+	return erand48(_rand48_seed);
+}
diff --git a/libc/bionic/eabi.c b/libc/bionic/eabi.c
new file mode 100644
index 0000000..f212d05
--- /dev/null
+++ b/libc/bionic/eabi.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <stddef.h>
+#include <string.h>
+
+void* __dso_handle = 0;
+
+/* Make this a weak symbol to avoid a multiple definition error when linking
+ * with libstdc++-v3.  */
+int __attribute__((weak))
+__aeabi_atexit (void *object, void (*destructor) (void *), void *dso_handle)
+{
+    //return __cxa_atexit(destructor, object, dso_handle);
+    return 0;
+}
+
+
+void __aeabi_memcpy8(void *dest, const void *src, size_t n) {
+    memcpy(dest, src, n);
+}
+
+void __aeabi_memcpy4(void *dest, const void *src, size_t n) {
+    memcpy(dest, src, n);
+}
+
+void __aeabi_memcpy(void *dest, const void *src, size_t n) {
+    memcpy(dest, src, n);
+}
+
+
+void __aeabi_memmove8(void *dest, const void *src, size_t n) {
+    memmove(dest, src, n);
+}
+
+void __aeabi_memmove4(void *dest, const void *src, size_t n) {
+    memmove(dest, src, n);
+}
+
+void __aeabi_memmove(void *dest, const void *src, size_t n) {
+    memmove(dest, src, n);
+}
+
+/*
+ * __aeabi_memset has the order of its second and third arguments reversed. 
+ *  This allows __aeabi_memclr to tail-call __aeabi_memset
+ */
+ 
+void __aeabi_memset8(void *dest, size_t n, int c) {
+    memset(dest, c, n);
+}
+
+void __aeabi_memset4(void *dest, size_t n, int c) {
+    memset(dest, c, n);
+}
+
+void __aeabi_memset(void *dest, size_t n, int c) {
+    memset(dest, c, n);
+}
+
+
+void __aeabi_memclr8(void *dest, size_t n) {
+    __aeabi_memset8(dest, n, 0);
+}
+
+void __aeabi_memclr4(void *dest, size_t n) {
+    __aeabi_memset4(dest, n, 0);
+}
+
+void __aeabi_memclr(void *dest, size_t n) {
+    __aeabi_memset(dest, n, 0);
+}
diff --git a/libc/bionic/erand48.c b/libc/bionic/erand48.c
new file mode 100644
index 0000000..843ff34
--- /dev/null
+++ b/libc/bionic/erand48.c
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 1993 Martin Birgmeier
+ * All rights reserved.
+ *
+ * You may redistribute unmodified or modified versions of this source
+ * code provided that the above copyright notice and this and the
+ * following conditions are retained.
+ *
+ * This software is provided ``as is'', and comes with no warranties
+ * of any kind. I shall in no event be liable for anything that happens
+ * to anyone/anything when using this software.
+ */
+
+#include <sys/cdefs.h>
+
+#include "rand48.h"
+
+double
+erand48(unsigned short xseed[3])
+{
+	_dorand48(xseed);
+	return ldexp((double) xseed[0], -48) +
+	       ldexp((double) xseed[1], -32) +
+	       ldexp((double) xseed[2], -16);
+}
diff --git a/libc/bionic/fork.c b/libc/bionic/fork.c
new file mode 100644
index 0000000..1c6a4ba
--- /dev/null
+++ b/libc/bionic/fork.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the 
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <unistd.h>
+#include "pthread_internal.h"
+
+extern int  __fork(void);
+
+int  fork(void)
+{
+    int  ret;
+
+    /* Posix mandates that the timers of a fork child process be
+     * disarmed, but not destroyed. To avoid a race condition, we're
+     * going to stop all timers now, and only re-start them in case
+     * of error, or in the parent process
+     */
+    __timer_table_start_stop(1);
+    ret = __fork();
+    if (ret != 0) {  /* not a child process */
+        __timer_table_start_stop(0);
+    }
+    return ret;
+}
diff --git a/libc/bionic/hash.h b/libc/bionic/hash.h
new file mode 100644
index 0000000..3b483f1
--- /dev/null
+++ b/libc/bionic/hash.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 1999 Kungliga Tekniska Högskolan
+ * (Royal Institute of Technology, Stockholm, Sweden). 
+ * All rights reserved. 
+ *
+ * Redistribution and use in source and binary forms, with or without 
+ * modification, are permitted provided that the following conditions 
+ * are met: 
+ *
+ * 1. Redistributions of source code must retain the above copyright 
+ *    notice, this list of conditions and the following disclaimer. 
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright 
+ *    notice, this list of conditions and the following disclaimer in the 
+ *    documentation and/or other materials provided with the distribution. 
+ *
+ * 3. Neither the name of KTH nor the names of its contributors may be
+ *    used to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY KTH AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL KTH OR ITS CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
+
+/* $Heimdal: hash.h,v 1.1 1999/03/22 19:16:25 joda Exp $
+   $NetBSD: hash.h,v 1.1.1.3 2002/09/12 12:41:42 joda Exp $ */
+
+/* stuff in common between md4, md5, and sha1 */
+
+#ifndef __hash_h__
+#define __hash_h__
+
+#include <stdlib.h>
+#include <string.h>
+
+#ifndef min
+#define min(a,b) (((a)>(b))?(b):(a))
+#endif
+
+/* Vector Crays doesn't have a good 32-bit type, or more precisely,
+   int32_t as defined by <bind/bitypes.h> isn't 32 bits, and we don't
+   want to depend in being able to redefine this type.  To cope with
+   this we have to clamp the result in some places to [0,2^32); no
+   need to do this on other machines.  Did I say this was a mess?
+   */
+
+#ifdef _CRAY
+#define CRAYFIX(X) ((X) & 0xffffffff)
+#else
+#define CRAYFIX(X) (X)
+#endif
+
+static inline u_int32_t
+cshift (u_int32_t x, unsigned int n)
+{
+    x = CRAYFIX(x);
+    return CRAYFIX((x << n) | (x >> (32 - n)));
+}
+
+#endif /* __hash_h__ */
diff --git a/libc/bionic/if_indextoname.c b/libc/bionic/if_indextoname.c
new file mode 100644
index 0000000..dc08b28
--- /dev/null
+++ b/libc/bionic/if_indextoname.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <unistd.h>
+#include <linux/sockios.h>
+#include <net/if.h>
+#include <sys/socket.h>
+#include <sys/ioctl.h>
+#include <errno.h>
+
+/*
+ * Map an interface index into its name.
+ * Returns NULL on error.
+ */
+char*
+if_indextoname(unsigned ifindex, char *ifname)
+{
+    int index;
+    int ctl_sock;
+    struct ifreq ifr;
+    char*  ret = NULL;
+
+    memset(&ifr, 0, sizeof(struct ifreq));
+    ifr.ifr_ifindex = ifindex;
+
+    if ((ctl_sock = socket(AF_INET, SOCK_DGRAM, 0)) >= 0) {
+        if (ioctl(ctl_sock, SIOCGIFNAME, &ifr) >= 0) {
+            ret = strncpy (ifname, ifr.ifr_name, IFNAMSIZ);
+        } else {
+            /* Posix requires ENXIO */
+            if (errno == ENODEV)
+                errno = ENXIO;
+        }
+        close(ctl_sock);
+    }
+    return ret;
+}
diff --git a/libc/bionic/if_nametoindex.c b/libc/bionic/if_nametoindex.c
new file mode 100644
index 0000000..d670e43
--- /dev/null
+++ b/libc/bionic/if_nametoindex.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <unistd.h>
+#include <linux/sockios.h>
+#include <net/if.h>
+#include <sys/socket.h>
+#include <sys/ioctl.h>
+
+/*
+ * Map an interface name into its corresponding index.
+ * Returns 0 on error, as 0 is not a valid index.
+ */
+unsigned int if_nametoindex(const char *ifname)
+{
+    int index;
+    int ctl_sock;
+    struct ifreq ifr;
+
+    memset(&ifr, 0, sizeof(struct ifreq));
+    strncpy(ifr.ifr_name, ifname, IFNAMSIZ);
+    ifr.ifr_name[IFNAMSIZ - 1] = 0;
+
+    index = 0;
+    if ((ctl_sock = socket(AF_INET, SOCK_DGRAM, 0)) >= 0) {
+        if (ioctl(ctl_sock, SIOCGIFINDEX, &ifr) >= 0) {
+            index = ifr.ifr_ifindex;
+        }
+        close(ctl_sock);
+    }
+    return index;
+}
diff --git a/libc/bionic/ioctl.c b/libc/bionic/ioctl.c
new file mode 100644
index 0000000..6dd95d0
--- /dev/null
+++ b/libc/bionic/ioctl.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <stdarg.h>
+
+extern int __ioctl(int, int, void *);
+
+int ioctl(int fd, int request, ...)
+{
+    va_list ap;
+    void * arg;
+
+    va_start(ap, request);
+    arg = va_arg(ap, void *);
+    va_end(ap);
+
+    return __ioctl(fd, request, arg);
+}
+
diff --git a/libc/bionic/ldexp.c b/libc/bionic/ldexp.c
new file mode 100644
index 0000000..ec1f3dd
--- /dev/null
+++ b/libc/bionic/ldexp.c
@@ -0,0 +1,122 @@
+/* @(#)s_scalbn.c 5.1 93/09/24 */
+/* @(#)fdlibm.h 5.1 93/09/24 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+#include <sys/cdefs.h>
+
+#include <sys/types.h>
+#include <endian.h>
+#include <math.h>
+
+/* Bit fiddling routines copied from msun/src/math_private.h,v 1.15 */
+
+#if BYTE_ORDER == BIG_ENDIAN
+
+typedef union
+{
+  double value;
+  struct
+  {
+    u_int32_t msw;
+    u_int32_t lsw;
+  } parts;
+} ieee_double_shape_type;
+
+#endif
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+
+typedef union
+{
+  double value;
+  struct
+  {
+    u_int32_t lsw;
+    u_int32_t msw;
+  } parts;
+} ieee_double_shape_type;
+
+#endif
+
+/* Get two 32 bit ints from a double.  */
+
+#define EXTRACT_WORDS(ix0,ix1,d)				\
+do {								\
+  ieee_double_shape_type ew_u;					\
+  ew_u.value = (d);						\
+  (ix0) = ew_u.parts.msw;					\
+  (ix1) = ew_u.parts.lsw;					\
+} while (0)
+
+/* Get the more significant 32 bit int from a double.  */
+
+#define GET_HIGH_WORD(i,d)					\
+do {								\
+  ieee_double_shape_type gh_u;					\
+  gh_u.value = (d);						\
+  (i) = gh_u.parts.msw;						\
+} while (0)
+
+/* Set the more significant 32 bits of a double from an int.  */
+
+#define SET_HIGH_WORD(d,v)					\
+do {								\
+  ieee_double_shape_type sh_u;					\
+  sh_u.value = (d);						\
+  sh_u.parts.msw = (v);						\
+  (d) = sh_u.value;						\
+} while (0)
+
+
+static const double
+two54   =  1.80143985094819840000e+16, /* 0x43500000, 0x00000000 */
+twom54  =  5.55111512312578270212e-17, /* 0x3C900000, 0x00000000 */
+huge   = 1.0e+300,
+tiny   = 1.0e-300;
+
+static double
+_copysign(double x, double y)
+{
+	u_int32_t hx,hy;
+	GET_HIGH_WORD(hx,x);
+	GET_HIGH_WORD(hy,y);
+	SET_HIGH_WORD(x,(hx&0x7fffffff)|(hy&0x80000000));
+	return x;
+}
+
+double
+ldexp(double x, int n)
+{
+	int32_t k,hx,lx;
+	EXTRACT_WORDS(hx,lx,x);
+        k = (hx&0x7ff00000)>>20;		/* extract exponent */
+        if (k==0) {				/* 0 or subnormal x */
+            if ((lx|(hx&0x7fffffff))==0) return x; /* +-0 */
+	    x *= two54;
+	    GET_HIGH_WORD(hx,x);
+	    k = ((hx&0x7ff00000)>>20) - 54;
+            if (n< -50000) return tiny*x; 	/*underflow*/
+	    }
+        if (k==0x7ff) return x+x;		/* NaN or Inf */
+        k = k+n;
+        if (k >  0x7fe) return huge*_copysign(huge,x); /* overflow  */
+        if (k > 0) 				/* normal result */
+	    {SET_HIGH_WORD(x,(hx&0x800fffff)|(k<<20)); return x;}
+        if (k <= -54) {
+            if (n > 50000) 	/* in case integer overflow in n+k */
+		return huge*_copysign(huge,x);	/*overflow*/
+	    else return tiny*_copysign(tiny,x); 	/*underflow*/
+	}
+        k += 54;				/* subnormal result */
+	SET_HIGH_WORD(x,(hx&0x800fffff)|(k<<20));
+        return x*twom54;
+}
diff --git a/libc/bionic/libc_init_common.c b/libc/bionic/libc_init_common.c
new file mode 100644
index 0000000..de4919d
--- /dev/null
+++ b/libc/bionic/libc_init_common.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <elf.h>
+#include <asm/page.h>
+#include "pthread_internal.h"
+#include "atexit.h"
+#include "libc_init_common.h"
+
+#include <bionic_tls.h>
+#include <errno.h>
+
+extern void _init(void);
+extern void _fini(void);
+
+static void call_array(void(**list)())
+{
+    // First element is -1, list is null-terminated
+    while (*++list) {
+        (*list)();
+    }
+}
+
+static void __bionic_do_global_dtors(structors_array_t const * const p)
+{
+    call_array(p->fini_array);
+    //_fini();
+}
+
+extern unsigned __get_sp(void);
+extern pid_t    gettid(void);
+
+char*  __progname;
+char **environ;
+
+/* from asm/page.h */
+unsigned int __page_size = PAGE_SIZE;
+unsigned int __page_shift = PAGE_SHIFT;
+
+
+int __system_properties_init(void);
+
+void __libc_init_common(uintptr_t *elfdata,
+                       void (*onexit)(void),
+                       int (*slingshot)(int, char**, char**),
+                       structors_array_t const * const structors,
+                       void (*pre_ctor_hook)())
+{
+    pthread_internal_t thread;
+    pthread_attr_t thread_attr;
+    void *tls_area[BIONIC_TLS_SLOTS];
+    int argc;
+    char **argv, **envp, **envend;
+    struct auxentry *auxentry;
+    unsigned int page_size = 0, page_shift = 0;
+
+    /* The main thread's stack has empirically shown to be 84k */
+    unsigned stacktop = (__get_sp() & ~(PAGE_SIZE - 1)) + PAGE_SIZE;
+    unsigned stacksize = 128 * 1024; //84 * 1024;
+    unsigned stackbottom = stacktop - stacksize;
+
+    pthread_attr_init(&thread_attr);
+    pthread_attr_setstack(&thread_attr, (void*)stackbottom, stacksize);
+    _init_thread(&thread, gettid(), &thread_attr, (void*)stackbottom);
+    __init_tls(tls_area, &thread);
+
+    argc = (int) *elfdata++;
+    argv = (char**) elfdata;
+    envp = argv+(argc+1);
+    environ = envp;
+
+    __progname = argv[0] ? argv[0] : "<unknown>";
+
+    errno = 0;
+
+    __system_properties_init();
+
+    if (pre_ctor_hook) pre_ctor_hook();
+
+    // XXX: we should execute the .fini_array upon exit
+
+    // pre-init array.
+    // XXX: I'm not sure what's the different with the init array.
+    call_array(structors->preinit_array);
+
+    // for compatibility with non-eabi binary, call the .ctors section
+    call_array(structors->ctors_array);
+
+    // call static constructors
+    call_array(structors->init_array);
+
+    exit(slingshot(argc, argv, envp));
+}
diff --git a/libc/bionic/libc_init_common.h b/libc/bionic/libc_init_common.h
new file mode 100644
index 0000000..bbc82e4
--- /dev/null
+++ b/libc/bionic/libc_init_common.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#ifndef LIBC_INIT_COMMON_H
+#define LIBC_INIT_COMMON_H
+
+#include <stdint.h>
+
+typedef struct
+{
+    void (**preinit_array)(void);
+    void (**init_array)(void);
+    void (**fini_array)(void);
+    void (**ctors_array)(void);
+} structors_array_t;
+
+extern __noreturn void __libc_init_common(uintptr_t *elfdata,
+                       void (*onexit)(void),
+                       int (*slingshot)(int, char**, char**),
+                       structors_array_t const * const structors,
+                       void (*pre_ctor_hook)());
+
+#endif
diff --git a/libc/bionic/libc_init_dynamic.c b/libc/bionic/libc_init_dynamic.c
new file mode 100644
index 0000000..8cf24b4
--- /dev/null
+++ b/libc/bionic/libc_init_dynamic.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * libc_init_static.c
+ *
+ * This function takes the raw data block set up by the ELF loader
+ * in the kernel and parses it.  It is invoked by crt0.S which makes
+ * any necessary adjustments and passes calls this function using
+ * the standard C calling convention.
+ *
+ * The arguments are:
+ *  uintptr_t *elfdata	 -- The ELF loader data block; usually from the stack.
+ *                          Basically a pointer to argc.
+ *  void (*onexit)(void) -- Function to install into onexit
+ */
+
+/*
+ * Several Linux ABIs don't pass the onexit pointer, and the ones that
+ * do never use it.  Therefore, unless USE_ONEXIT is defined, we just
+ * ignore the onexit pointer.
+ */
+/* #define USE_ONEXIT */
+
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <elf.h>
+#include "pthread_internal.h"
+#include "atexit.h"
+#include "libc_init_common.h"
+
+extern void malloc_debug_init();
+
+__noreturn void __libc_init(uintptr_t *elfdata,
+                       void (*onexit)(void),
+                       int (*slingshot)(int, char**, char**),
+                       structors_array_t const * const structors)
+{
+    __libc_init_common(elfdata, onexit, slingshot, structors, malloc_debug_init);
+}
diff --git a/libc/bionic/libc_init_static.c b/libc/bionic/libc_init_static.c
new file mode 100644
index 0000000..ec463f7
--- /dev/null
+++ b/libc/bionic/libc_init_static.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * libc_init_static.c
+ *
+ * This function takes the raw data block set up by the ELF loader
+ * in the kernel and parses it.  It is invoked by crt0.S which makes
+ * any necessary adjustments and passes calls this function using
+ * the standard C calling convention.
+ *
+ * The arguments are:
+ *  uintptr_t *elfdata	 -- The ELF loader data block; usually from the stack.
+ *                          Basically a pointer to argc.
+ *  void (*onexit)(void) -- Function to install into onexit
+ */
+
+/*
+ * Several Linux ABIs don't pass the onexit pointer, and the ones that
+ * do never use it.  Therefore, unless USE_ONEXIT is defined, we just
+ * ignore the onexit pointer.
+ */
+/* #define USE_ONEXIT */
+
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <elf.h>
+#include "pthread_internal.h"
+#include "atexit.h"
+#include "libc_init_common.h"
+
+#include <bionic_tls.h>
+#include <errno.h>
+
+__noreturn void __libc_init(uintptr_t *elfdata,
+                       void (*onexit)(void),
+                       int (*slingshot)(int, char**, char**),
+                       structors_array_t const * const structors)
+{
+/* 
+ * To enable malloc checks for statically linked programs, add
+ * "WITH_MALLOC_CHECK_LIBC_A := true" in device/buildspec.mk
+ */
+#ifdef MALLOC_LEAK_CHECK
+    extern void malloc_debug_init();
+    __libc_init_common(elfdata, onexit, slingshot, structors, malloc_debug_init);
+#else
+    __libc_init_common(elfdata, onexit, slingshot, structors, NULL);
+#endif
+}
diff --git a/libc/bionic/logd_write.c b/libc/bionic/logd_write.c
new file mode 100644
index 0000000..7c3608b
--- /dev/null
+++ b/libc/bionic/logd_write.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <time.h>
+#include <stdio.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/uio.h>
+#include <arpa/inet.h>
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <fcntl.h>
+
+#include <cutils/logger.h>
+#include "logd.h"
+
+#include <pthread.h>
+
+#define LOG_BUF_SIZE	1024
+
+typedef enum {
+    LOG_ID_MAIN = 0,
+    LOG_ID_RADIO,
+    LOG_ID_MAX
+} log_id_t;
+
+static int __write_to_log_init(log_id_t, struct iovec *vec);
+static int (*write_to_log)(log_id_t, struct iovec *vec) = __write_to_log_init;
+static pthread_mutex_t log_init_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static int log_fds[(int)LOG_ID_MAX] = { -1, -1 };
+
+static int __write_to_log_null(log_id_t log_fd, struct iovec *vec)
+{
+    return -1;
+}
+
+static int __write_to_log_kernel(log_id_t log_id, struct iovec *vec)
+{
+    ssize_t ret;
+    int log_fd;
+
+    if ((int)log_id >= 0 && (int)log_id < (int)LOG_ID_MAX) {
+        log_fd = log_fds[(int)log_id];
+    } else {
+        return EBADF;
+    }
+
+    do {
+        ret = writev(log_fd, vec, 3);
+    } while (ret < 0 && errno == EINTR);
+
+    return ret;
+}
+
+static int __write_to_log_init(log_id_t log_id, struct iovec *vec)
+{
+    pthread_mutex_lock(&log_init_lock);
+
+    if (write_to_log == __write_to_log_init) {
+        log_fds[LOG_ID_MAIN] = open("/dev/"LOGGER_LOG_MAIN, O_WRONLY);
+        log_fds[LOG_ID_RADIO] = open("/dev/"LOGGER_LOG_RADIO, O_WRONLY);
+
+        write_to_log = __write_to_log_kernel;
+
+        if (log_fds[LOG_ID_MAIN] < 0 || log_fds[LOG_ID_RADIO] < 0) {
+            close(log_fds[LOG_ID_MAIN]);
+            close(log_fds[LOG_ID_RADIO]);
+            log_fds[LOG_ID_MAIN] = -1;
+            log_fds[LOG_ID_RADIO] = -1;
+            write_to_log = __write_to_log_null;
+        }
+    }
+
+    pthread_mutex_unlock(&log_init_lock);
+
+    return write_to_log(log_id, vec);
+}
+
+static int __android_log_write(int prio, const char *tag, const char *msg)
+{
+    struct iovec vec[3];
+    log_id_t log_id = LOG_ID_MAIN;
+
+    if (!tag)
+        tag = "";
+
+    if (!strcmp(tag, "HTC_RIL"))
+        log_id = LOG_ID_RADIO;
+
+    vec[0].iov_base   = (unsigned char *) &prio;
+    vec[0].iov_len    = 1;
+    vec[1].iov_base   = (void *) tag;
+    vec[1].iov_len    = strlen(tag) + 1;
+    vec[2].iov_base   = (void *) msg;
+    vec[2].iov_len    = strlen(msg) + 1;
+
+    return write_to_log(log_id, vec);
+}
+
+
+static int __android_log_vprint(int prio, const char *tag, const char *fmt,
+				va_list ap)
+{
+    char buf[LOG_BUF_SIZE];    
+
+    vsnprintf(buf, LOG_BUF_SIZE, fmt, ap);
+
+    return __android_log_write(prio, tag, buf);
+}
+
+int __libc_android_log_print(int prio, const char *tag, const char *fmt, ...)
+{
+    va_list ap;
+    char buf[LOG_BUF_SIZE];    
+
+    va_start(ap, fmt);
+    vsnprintf(buf, LOG_BUF_SIZE, fmt, ap);
+    va_end(ap);
+
+    return __android_log_write(prio, tag, buf);
+}
+
+int __libc_android_log_assert(const char *cond, const char *tag,
+			      const char *fmt, ...)
+{
+    va_list ap;
+    char buf[LOG_BUF_SIZE];    
+
+    va_start(ap, fmt);
+    vsnprintf(buf, LOG_BUF_SIZE, fmt, ap);
+    va_end(ap);
+
+    __android_log_write(ANDROID_LOG_FATAL, tag, buf);
+
+    exit(1);
+
+    return -1;
+}
diff --git a/libc/bionic/malloc_leak.c b/libc/bionic/malloc_leak.c
new file mode 100644
index 0000000..a0aa2ae
--- /dev/null
+++ b/libc/bionic/malloc_leak.c
@@ -0,0 +1,900 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <errno.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <arpa/inet.h>
+#include <sys/socket.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stddef.h>
+#include <stdarg.h>
+#include <fcntl.h>
+#include <unwind.h>
+
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <sys/select.h>
+#include <sys/types.h>
+#include <sys/system_properties.h>
+
+#include "dlmalloc.h"
+#include "logd.h"
+
+// =============================================================================
+// Utilities directly used by Dalvik
+// =============================================================================
+
+#define HASHTABLE_SIZE      1543
+#define BACKTRACE_SIZE      32
+/* flag definitions, currently sharing storage with "size" */
+#define SIZE_FLAG_ZYGOTE_CHILD  (1<<31)
+#define SIZE_FLAG_MASK          (SIZE_FLAG_ZYGOTE_CHILD)
+
+#define MAX_SIZE_T           (~(size_t)0)
+
+/*
+ * In a VM process, this is set to 1 after fork()ing out of zygote.
+ */
+int gMallocLeakZygoteChild = 0;
+
+// =============================================================================
+// Structures
+// =============================================================================
+
+typedef struct HashEntry HashEntry;
+struct HashEntry {
+    size_t slot;
+    HashEntry* prev;
+    HashEntry* next;
+    size_t numEntries;
+    // fields above "size" are NOT sent to the host
+    size_t size;
+    size_t allocations;
+    intptr_t backtrace[0];
+};
+
+typedef struct HashTable HashTable;
+struct HashTable {
+    size_t count;
+    HashEntry* slots[HASHTABLE_SIZE];
+};
+
+static pthread_mutex_t gAllocationsMutex = PTHREAD_MUTEX_INITIALIZER;
+static HashTable gHashTable;
+
+// =============================================================================
+// output fucntions
+// =============================================================================
+
+static int hash_entry_compare(const void* arg1, const void* arg2)
+{
+    HashEntry* e1 = *(HashEntry**)arg1;
+    HashEntry* e2 = *(HashEntry**)arg2;
+
+    size_t nbAlloc1 = e1->allocations;
+    size_t nbAlloc2 = e2->allocations;
+    size_t size1 = e1->size & ~SIZE_FLAG_MASK;
+    size_t size2 = e2->size & ~SIZE_FLAG_MASK;
+    size_t alloc1 = nbAlloc1 * size1;
+    size_t alloc2 = nbAlloc2 * size2;
+
+    // sort in descending order by:
+    // 1) total size
+    // 2) number of allocations
+    //
+    // This is used for sorting, not determination of equality, so we don't
+    // need to compare the bit flags.
+    int result;
+    if (alloc1 > alloc2) {
+        result = -1;
+    } else if (alloc1 < alloc2) {
+        result = 1;
+    } else {
+        if (nbAlloc1 > nbAlloc2) {
+            result = -1;
+        } else if (nbAlloc1 < nbAlloc2) {
+            result = 1;
+        } else {
+            result = 0;
+        }
+    }
+    return result;
+}
+
+/*
+ * Retrieve native heap information.
+ *
+ * "*info" is set to a buffer we allocate
+ * "*overallSize" is set to the size of the "info" buffer
+ * "*infoSize" is set to the size of a single entry
+ * "*totalMemory" is set to the sum of all allocations we're tracking; does
+ *   not include heap overhead
+ * "*backtraceSize" is set to the maximum number of entries in the back trace
+ */
+void get_malloc_leak_info(uint8_t** info, size_t* overallSize,
+        size_t* infoSize, size_t* totalMemory, size_t* backtraceSize)
+{
+    // don't do anything if we have invalid arguments
+    if (info == NULL || overallSize == NULL || infoSize == NULL ||
+            totalMemory == NULL || backtraceSize == NULL) {
+        return;
+    }
+
+    pthread_mutex_lock(&gAllocationsMutex);
+
+    if (gHashTable.count == 0) {
+        *info = NULL;
+        *overallSize = 0;
+        *infoSize = 0;
+        *totalMemory = 0;
+        *backtraceSize = 0;
+        goto done;
+    }
+    
+    void** list = (void**)dlmalloc(sizeof(void*) * gHashTable.count);
+
+    // debug_log("*****\ngHashTable.count = %d\n", gHashTable.count);
+    // debug_log("list = %p\n", list);
+
+    // get the entries into an array to be sorted
+    int index = 0;
+    int i;
+    for (i = 0 ; i < HASHTABLE_SIZE ; i++) {
+        HashEntry* entry = gHashTable.slots[i];
+        while (entry != NULL) {
+            list[index] = entry;
+            *totalMemory = *totalMemory +
+                ((entry->size & ~SIZE_FLAG_MASK) * entry->allocations);
+            index++;
+            entry = entry->next;
+        }
+    }
+
+    // debug_log("sorted list!\n");
+    // XXX: the protocol doesn't allow variable size for the stack trace (yet)
+    *infoSize = (sizeof(size_t) * 2) + (sizeof(intptr_t) * BACKTRACE_SIZE);
+    *overallSize = *infoSize * gHashTable.count;
+    *backtraceSize = BACKTRACE_SIZE;
+
+    // debug_log("infoSize = 0x%x overall = 0x%x\n", *infoSize, *overallSize);
+    // now get A byte array big enough for this
+    *info = (uint8_t*)dlmalloc(*overallSize);
+
+    // debug_log("info = %p\n", info);
+    if (*info == NULL) {
+        *overallSize = 0;
+        goto done;
+    }
+
+    // debug_log("sorting list...\n");
+    qsort((void*)list, gHashTable.count, sizeof(void*), hash_entry_compare);
+
+    uint8_t* head = *info;
+    const int count = gHashTable.count;
+    for (i = 0 ; i < count ; i++) {
+        HashEntry* entry = list[i];
+        size_t entrySize = (sizeof(size_t) * 2) + (sizeof(intptr_t) * entry->numEntries);
+        if (entrySize < *infoSize) {
+            /* we're writing less than a full entry, clear out the rest */
+            /* TODO: only clear out the part we're not overwriting? */
+            memset(head, 0, *infoSize);
+        } else {
+            /* make sure the amount we're copying doesn't exceed the limit */
+            entrySize = *infoSize;
+        }
+        memcpy(head, &(entry->size), entrySize);
+        head += *infoSize;
+    }
+
+    dlfree(list);
+
+done:
+    // debug_log("+++++ done!\n");
+    pthread_mutex_unlock(&gAllocationsMutex);
+}
+
+void free_malloc_leak_info(uint8_t* info)
+{
+    dlfree(info);
+}
+
+struct mallinfo mallinfo()
+{
+    return dlmallinfo();
+}
+
+void* valloc(size_t bytes) {
+    /* assume page size of 4096 bytes */
+    return memalign( getpagesize(), bytes );
+}
+
+
+/*
+ * Code guarded by MALLOC_LEAK_CHECK is only needed when malloc check is
+ * enabled. Currently we exclude them in libc.so, and only include them in
+ * libc_debug.so.
+ */
+#ifdef MALLOC_LEAK_CHECK
+#define MALLOC_ALIGNMENT    8
+#define GUARD               0x48151642
+
+#define DEBUG               0
+
+// =============================================================================
+// Structures
+// =============================================================================
+typedef struct AllocationEntry AllocationEntry;
+struct AllocationEntry {
+    HashEntry* entry;
+    uint32_t guard;
+};
+
+// =============================================================================
+// log funtions
+// =============================================================================
+
+#define debug_log(format, ...)  \
+    __libc_android_log_print(ANDROID_LOG_DEBUG, "malloc_leak", (format), ##__VA_ARGS__ )
+
+// =============================================================================
+// Hash Table functions
+// =============================================================================
+static uint32_t get_hash(intptr_t* backtrace, size_t numEntries)
+{
+    if (backtrace == NULL) return 0;
+
+    int hash = 0;
+    size_t i;
+    for (i = 0 ; i < numEntries ; i++) {
+        hash = (hash * 33) + (backtrace[i] >> 2);
+    }
+
+    return hash;
+}
+
+static HashEntry* find_entry(HashTable* table, int slot,
+        intptr_t* backtrace, size_t numEntries, size_t size)
+{
+    HashEntry* entry = table->slots[slot];
+    while (entry != NULL) {
+        //debug_log("backtrace: %p, entry: %p entry->backtrace: %p\n",
+        //        backtrace, entry, (entry != NULL) ? entry->backtrace : NULL);
+        /*
+         * See if the entry matches exactly.  We compare the "size" field,
+         * including the flag bits.
+         */
+        if (entry->size == size && entry->numEntries == numEntries &&
+                !memcmp(backtrace, entry->backtrace, numEntries * sizeof(intptr_t))) {
+            return entry;
+        }
+
+        entry = entry->next;
+    }
+
+    return NULL;
+}
+
+static HashEntry* record_backtrace(intptr_t* backtrace, size_t numEntries, size_t size)
+{
+    size_t hash = get_hash(backtrace, numEntries);
+    size_t slot = hash % HASHTABLE_SIZE;
+
+    if (size & SIZE_FLAG_MASK) {
+        debug_log("malloc_debug: allocation %zx exceeds bit width\n", size);
+        abort();
+    }
+
+    if (gMallocLeakZygoteChild)
+        size |= SIZE_FLAG_ZYGOTE_CHILD;
+
+    HashEntry* entry = find_entry(&gHashTable, slot, backtrace, numEntries, size);
+
+    if (entry != NULL) {
+        entry->allocations++;
+    } else {
+        // create a new entry
+        entry = (HashEntry*)dlmalloc(sizeof(HashEntry) + numEntries*sizeof(intptr_t));
+        entry->allocations = 1;
+        entry->slot = slot;
+        entry->prev = NULL;
+        entry->next = gHashTable.slots[slot];
+        entry->numEntries = numEntries;
+        entry->size = size;
+
+        memcpy(entry->backtrace, backtrace, numEntries * sizeof(intptr_t));
+
+        gHashTable.slots[slot] = entry;
+
+        if (entry->next != NULL) {
+            entry->next->prev = entry;
+        }
+
+        // we just added an entry, increase the size of the hashtable
+        gHashTable.count++;
+    }
+
+    return entry;
+}
+
+static int is_valid_entry(HashEntry* entry)
+{
+    if (entry != NULL) {
+        int i;
+        for (i = 0 ; i < HASHTABLE_SIZE ; i++) {
+            HashEntry* e1 = gHashTable.slots[i];
+
+            while (e1 != NULL) {
+                if (e1 == entry) {
+                    return 1;
+                }
+
+                e1 = e1->next;
+            }
+        }
+    }
+
+    return 0;
+}
+
+static void remove_entry(HashEntry* entry)
+{
+    HashEntry* prev = entry->prev;
+    HashEntry* next = entry->next;
+
+    if (prev != NULL) entry->prev->next = next;
+    if (next != NULL) entry->next->prev = prev;
+
+    if (prev == NULL) {
+        // we are the head of the list. set the head to be next
+        gHashTable.slots[entry->slot] = entry->next;
+    }
+
+    // we just removed and entry, decrease the size of the hashtable
+    gHashTable.count--;
+}
+
+
+// =============================================================================
+// stack trace functions
+// =============================================================================
+
+typedef struct
+{
+    size_t count;
+    intptr_t* addrs;
+} stack_crawl_state_t;
+
+
+/* depends how the system includes define this */
+#ifdef HAVE_UNWIND_CONTEXT_STRUCT
+typedef struct _Unwind_Context __unwind_context;
+#else
+typedef _Unwind_Context __unwind_context;
+#endif
+
+static _Unwind_Reason_Code trace_function(__unwind_context *context, void *arg)
+{
+    stack_crawl_state_t* state = (stack_crawl_state_t*)arg;
+    if (state->count) {
+        intptr_t ip = (intptr_t)_Unwind_GetIP(context);
+        if (ip) {
+            state->addrs[0] = ip; 
+            state->addrs++;
+            state->count--;
+            return _URC_NO_REASON;
+        }
+    }
+    /* 
+     * If we run out of space to record the address or 0 has been seen, stop
+     * unwinding the stack.
+     */
+    return _URC_END_OF_STACK;
+}
+
+static inline
+int get_backtrace(intptr_t* addrs, size_t max_entries)
+{
+    stack_crawl_state_t state;
+    state.count = max_entries;
+    state.addrs = (intptr_t*)addrs;
+    _Unwind_Backtrace(trace_function, (void*)&state);
+    return max_entries - state.count;
+}
+
+// =============================================================================
+// malloc leak function dispatcher
+// =============================================================================
+
+static void* leak_malloc(size_t bytes);
+static void  leak_free(void* mem);
+static void* leak_calloc(size_t n_elements, size_t elem_size);
+static void* leak_realloc(void* oldMem, size_t bytes);
+static void* leak_memalign(size_t alignment, size_t bytes);
+
+static void* fill_malloc(size_t bytes);
+static void  fill_free(void* mem);
+static void* fill_realloc(void* oldMem, size_t bytes);
+static void* fill_memalign(size_t alignment, size_t bytes);
+
+static void* chk_malloc(size_t bytes);
+static void  chk_free(void* mem);
+static void* chk_calloc(size_t n_elements, size_t elem_size);
+static void* chk_realloc(void* oldMem, size_t bytes);
+static void* chk_memalign(size_t alignment, size_t bytes);
+
+typedef struct {
+    void* (*malloc)(size_t bytes);
+    void  (*free)(void* mem);
+    void* (*calloc)(size_t n_elements, size_t elem_size);
+    void* (*realloc)(void* oldMem, size_t bytes);
+    void* (*memalign)(size_t alignment, size_t bytes);
+} MallocDebug;
+
+static const MallocDebug gMallocEngineTable[] __attribute__((aligned(32))) =
+{
+    { dlmalloc,     dlfree,     dlcalloc,       dlrealloc,     dlmemalign },
+    { leak_malloc,  leak_free,  leak_calloc,    leak_realloc,  leak_memalign },
+    { fill_malloc,  fill_free,  dlcalloc,       fill_realloc,  fill_memalign },
+    { chk_malloc,   chk_free,   chk_calloc,     chk_realloc,   chk_memalign }
+};
+
+enum {
+    INDEX_NORMAL = 0,
+    INDEX_LEAK_CHECK,
+    INDEX_MALLOC_FILL,
+    INDEX_MALLOC_CHECK,
+};
+
+static MallocDebug const * gMallocDispatch = &gMallocEngineTable[INDEX_NORMAL];
+static int gMallocDebugLevel;
+static int gTrapOnError = 1;
+
+void* malloc(size_t bytes) {
+    return gMallocDispatch->malloc(bytes);
+}
+void free(void* mem) {
+    gMallocDispatch->free(mem);
+}
+void* calloc(size_t n_elements, size_t elem_size) {
+    return gMallocDispatch->calloc(n_elements, elem_size);
+}
+void* realloc(void* oldMem, size_t bytes) {
+    return gMallocDispatch->realloc(oldMem, bytes);
+}
+void* memalign(size_t alignment, size_t bytes) {
+    return gMallocDispatch->memalign(alignment, bytes);
+}
+
+// =============================================================================
+// malloc check functions
+// =============================================================================
+
+#define CHK_FILL_FREE           0xef
+#define CHK_SENTINEL_VALUE      0xeb
+#define CHK_SENTINEL_HEAD_SIZE  16
+#define CHK_SENTINEL_TAIL_SIZE  16
+#define CHK_OVERHEAD_SIZE       (   CHK_SENTINEL_HEAD_SIZE +    \
+                                    CHK_SENTINEL_TAIL_SIZE +    \
+                                    sizeof(size_t) )
+
+static void dump_stack_trace()
+{
+    intptr_t addrs[20];
+    int c = get_backtrace(addrs, 20);
+    char buf[16];
+    char tmp[16*20];
+    int i;
+
+    tmp[0] = 0; // Need to initialize tmp[0] for the first strcat
+    for (i=0 ; i<c; i++) {
+        sprintf(buf, "%2d: %08x\n", i, addrs[i]);
+        strcat(tmp, buf);
+    }
+    __libc_android_log_print(ANDROID_LOG_ERROR, "libc", "call stack:\n%s", tmp);
+}
+
+static int is_valid_malloc_pointer(void* addr)
+{
+    return 1;
+}
+
+static void assert_valid_malloc_pointer(void* mem)
+{
+    if (mem && !is_valid_malloc_pointer(mem)) {
+        pthread_mutex_lock(&gAllocationsMutex);
+        gMallocDispatch = &gMallocEngineTable[INDEX_NORMAL];
+            __libc_android_log_print(ANDROID_LOG_ERROR, "libc",
+                    "*** MALLOC CHECK: buffer %p, is not a valid "
+                    "malloc pointer (are you mixing up new/delete "
+                    "and malloc/free?)", mem);
+            dump_stack_trace();
+                if (gTrapOnError) {
+                    __builtin_trap();
+                }
+        gMallocDispatch = &gMallocEngineTable[INDEX_MALLOC_CHECK];
+        pthread_mutex_unlock(&gAllocationsMutex);
+    }
+}
+
+static void chk_out_of_bounds_check__locked(void* buffer, size_t size)
+{
+    int i;
+    char* buf = (char*)buffer - CHK_SENTINEL_HEAD_SIZE;
+    for (i=0 ; i<CHK_SENTINEL_HEAD_SIZE ; i++) {
+        if (buf[i] != CHK_SENTINEL_VALUE) {
+            gMallocDispatch = &gMallocEngineTable[INDEX_NORMAL];
+            __libc_android_log_print(ANDROID_LOG_ERROR, "libc",
+                    "*** MALLOC CHECK: buffer %p, size=%lu, "
+                    "corrupted %d bytes before allocation",
+                    buffer, size, CHK_SENTINEL_HEAD_SIZE-i);
+            dump_stack_trace();
+            if (gTrapOnError) {
+                __builtin_trap();
+            }
+            gMallocDispatch = &gMallocEngineTable[INDEX_MALLOC_CHECK];
+        }
+    }
+    buf = (char*)buffer + size;
+    for (i=CHK_SENTINEL_TAIL_SIZE-1 ; i>=0 ; i--) {
+        if (buf[i] != CHK_SENTINEL_VALUE) {
+            gMallocDispatch = &gMallocEngineTable[INDEX_NORMAL];
+            __libc_android_log_print(ANDROID_LOG_ERROR, "libc",
+                    "*** MALLOC CHECK: buffer %p, size=%lu, "
+                    "corrupted %d bytes after allocation",
+                    buffer, size, i+1);
+            dump_stack_trace();
+            if (gTrapOnError) {
+                __builtin_trap();
+            }
+            gMallocDispatch = &gMallocEngineTable[INDEX_MALLOC_CHECK];
+        }
+    }
+}
+
+void* chk_malloc(size_t bytes)
+{
+    char* buffer = (char*)dlmalloc(bytes + CHK_OVERHEAD_SIZE);
+    if (buffer) {
+        pthread_mutex_lock(&gAllocationsMutex);
+            memset(buffer, CHK_SENTINEL_VALUE, bytes + CHK_OVERHEAD_SIZE);
+            size_t offset = dlmalloc_usable_size(buffer) - sizeof(size_t);
+            *(size_t *)(buffer + offset) = bytes;
+            buffer += CHK_SENTINEL_HEAD_SIZE;
+        pthread_mutex_unlock(&gAllocationsMutex);
+    }
+    return buffer;
+}
+
+void  chk_free(void* mem)
+{
+    assert_valid_malloc_pointer(mem);
+    if (mem) {
+        pthread_mutex_lock(&gAllocationsMutex);
+            char* buffer = (char*)mem - CHK_SENTINEL_HEAD_SIZE;
+            size_t offset = dlmalloc_usable_size(buffer) - sizeof(size_t);
+            size_t bytes = *(size_t *)(buffer + offset);
+            chk_out_of_bounds_check__locked(mem, bytes);
+        pthread_mutex_unlock(&gAllocationsMutex);
+        memset(buffer, CHK_FILL_FREE, bytes);
+        dlfree(buffer);
+    }
+}
+
+void* chk_calloc(size_t n_elements, size_t elem_size)
+{
+    size_t  size;
+    void*   ptr;
+
+    /* Fail on overflow - just to be safe even though this code runs only
+     * within the debugging C library, not the production one */
+    if (n_elements && MAX_SIZE_T / n_elements < elem_size) {
+        return NULL;
+    }
+    size = n_elements * elem_size;
+    ptr  = chk_malloc(size);
+    if (ptr != NULL) {
+        memset(ptr, 0, size);
+    }
+    return ptr;
+}
+
+void* chk_realloc(void* mem, size_t bytes)
+{
+    assert_valid_malloc_pointer(mem);
+    char* new_buffer = chk_malloc(bytes);
+    if (mem == NULL) {
+        return new_buffer;
+    }
+
+    pthread_mutex_lock(&gAllocationsMutex);
+        char* buffer = (char*)mem - CHK_SENTINEL_HEAD_SIZE;
+        size_t offset = dlmalloc_usable_size(buffer) - sizeof(size_t);
+        size_t old_bytes = *(size_t *)(buffer + offset);
+        chk_out_of_bounds_check__locked(mem, old_bytes);
+    pthread_mutex_unlock(&gAllocationsMutex);
+
+    if (new_buffer) {
+        size_t size = (bytes < old_bytes)?(bytes):(old_bytes);
+        memcpy(new_buffer, mem, size);
+        chk_free(mem);
+    }
+
+    return new_buffer;
+}
+
+void* chk_memalign(size_t alignment, size_t bytes)
+{
+    // XXX: it's better to use malloc, than being wrong
+    return chk_malloc(bytes);
+}
+
+// =============================================================================
+// malloc fill functions
+// =============================================================================
+
+void* fill_malloc(size_t bytes)
+{
+    void* buffer = dlmalloc(bytes);
+    if (buffer) {
+        memset(buffer, CHK_SENTINEL_VALUE, bytes);
+    }
+    return buffer;
+}
+
+void  fill_free(void* mem)
+{
+    size_t bytes = dlmalloc_usable_size(mem);
+    memset(mem, CHK_FILL_FREE, bytes);
+    dlfree(mem);
+}
+
+void* fill_realloc(void* mem, size_t bytes)
+{
+    void* buffer = fill_malloc(bytes);
+    if (mem == NULL) {
+        return buffer;
+    }
+    if (buffer) {
+        size_t old_size = dlmalloc_usable_size(mem);
+        size_t size = (bytes < old_size)?(bytes):(old_size);
+        memcpy(buffer, mem, size);
+        fill_free(mem);
+    }
+    return buffer;
+}
+
+void* fill_memalign(size_t alignment, size_t bytes)
+{
+    void* buffer = dlmemalign(alignment, bytes);
+    if (buffer) {
+        memset(buffer, CHK_SENTINEL_VALUE, bytes);
+    }
+    return buffer;
+}
+
+// =============================================================================
+// malloc leak functions
+// =============================================================================
+
+#define MEMALIGN_GUARD  ((void*)0xA1A41520)
+
+void* leak_malloc(size_t bytes)
+{
+    // allocate enough space infront of the allocation to store the pointer for
+    // the alloc structure. This will making free'ing the structer really fast!
+
+    // 1. allocate enough memory and include our header
+    // 2. set the base pointer to be right after our header
+
+    void* base = dlmalloc(bytes + sizeof(AllocationEntry));
+    if (base != NULL) {
+        pthread_mutex_lock(&gAllocationsMutex);
+
+            intptr_t backtrace[BACKTRACE_SIZE];
+            size_t numEntries = get_backtrace(backtrace, BACKTRACE_SIZE);
+    
+            AllocationEntry* header = (AllocationEntry*)base;
+            header->entry = record_backtrace(backtrace, numEntries, bytes);
+            header->guard = GUARD;
+    
+            // now increment base to point to after our header.
+            // this should just work since our header is 8 bytes.
+            base = (AllocationEntry*)base + 1;
+
+        pthread_mutex_unlock(&gAllocationsMutex);
+    }
+
+    return base;
+}
+
+void leak_free(void* mem)
+{
+    if (mem != NULL) {
+        pthread_mutex_lock(&gAllocationsMutex);
+
+        // check the guard to make sure it is valid
+        AllocationEntry* header = (AllocationEntry*)mem - 1;
+        
+        if (header->guard != GUARD) {
+            // could be a memaligned block
+            if (((void**)mem)[-1] == MEMALIGN_GUARD) {
+                mem = ((void**)mem)[-2];
+                header = (AllocationEntry*)mem - 1;
+            }
+        }
+        
+        if (header->guard == GUARD || is_valid_entry(header->entry)) {
+            // decrement the allocations
+            HashEntry* entry = header->entry;
+            entry->allocations--;
+            if (entry->allocations <= 0) {
+                remove_entry(entry);
+                dlfree(entry);
+            }
+
+            // now free the memory!
+            dlfree(header);
+        } else {
+            debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n",
+                    header->guard, header->entry);
+        }
+
+        pthread_mutex_unlock(&gAllocationsMutex);
+    }
+}
+
+void* leak_calloc(size_t n_elements, size_t elem_size)
+{
+    size_t  size;
+    void*   ptr;
+
+    /* Fail on overflow - just to be safe even though this code runs only
+     * within the debugging C library, not the production one */
+    if (n_elements && MAX_SIZE_T / n_elements < elem_size) {
+        return NULL;
+    }
+    size = n_elements * elem_size;
+    ptr  = leak_malloc(size);
+    if (ptr != NULL) {
+        memset(ptr, 0, size);
+    }
+    return ptr;
+}
+
+void* leak_realloc(void* oldMem, size_t bytes)
+{
+    if (oldMem == NULL) {
+        return leak_malloc(bytes);
+    }
+    void* newMem = NULL;
+    AllocationEntry* header = (AllocationEntry*)oldMem - 1;
+    if (header && header->guard == GUARD) {
+        size_t oldSize = header->entry->size & ~SIZE_FLAG_MASK;
+        newMem = leak_malloc(bytes);
+        if (newMem != NULL) {
+            size_t copySize = (oldSize <= bytes) ? oldSize : bytes;
+            memcpy(newMem, oldMem, copySize);
+            leak_free(oldMem);
+        }
+    } else {
+        newMem = dlrealloc(oldMem, bytes);
+    }
+    return newMem;
+}
+
+void* leak_memalign(size_t alignment, size_t bytes)
+{
+    // we can just use malloc
+    if (alignment <= MALLOC_ALIGNMENT)
+        return leak_malloc(bytes);
+
+    // need to make sure it's a power of two
+    if (alignment & (alignment-1))
+        alignment = 1L << (31 - __builtin_clz(alignment));
+    
+    // here, aligment is at least MALLOC_ALIGNMENT<<1 bytes
+    // we will align by at least MALLOC_ALIGNMENT bytes
+    // and at most alignment-MALLOC_ALIGNMENT bytes
+    size_t size = (alignment-MALLOC_ALIGNMENT) + bytes;
+    void* base = leak_malloc(size);
+    if (base != NULL) {
+        intptr_t ptr = (intptr_t)base;
+        if ((ptr % alignment) == 0)
+            return base;
+
+        // align the pointer
+        ptr += ((-ptr) % alignment);
+        
+        // there is always enough space for the base pointer and the guard
+        ((void**)ptr)[-1] = MEMALIGN_GUARD;
+        ((void**)ptr)[-2] = base;
+
+        return (void*)ptr;
+    }
+    return base;
+}
+#endif /* MALLOC_LEAK_CHECK */
+
+// called from libc_init()
+extern char*  __progname;
+
+void malloc_debug_init()
+{
+    unsigned int level = 0;
+#ifdef MALLOC_LEAK_CHECK
+    // if MALLOC_LEAK_CHECK is enabled, use level=1 by default
+    level = 1;
+#endif
+    char env[PROP_VALUE_MAX];
+    int len = __system_property_get("libc.debug.malloc", env);
+
+    if (len) {
+        level = atoi(env);
+#ifndef MALLOC_LEAK_CHECK
+        /* Alert the user that libc_debug.so needs to be installed as libc.so
+         * when performing malloc checks.
+         */
+        if (level != 0) {
+            __libc_android_log_print(ANDROID_LOG_INFO, "libc",
+                 "Malloc checks need libc_debug.so pushed to the device!\n");
+
+        }
+#endif
+    }
+
+#ifdef MALLOC_LEAK_CHECK
+    gMallocDebugLevel = level;
+    switch (level) {
+    default:
+    case 0:
+        gMallocDispatch = &gMallocEngineTable[INDEX_NORMAL];
+        break;
+    case 1:
+        __libc_android_log_print(ANDROID_LOG_INFO, "libc",
+                "%s using MALLOC_DEBUG = %d (leak checker)\n",
+                __progname, level);
+        gMallocDispatch = &gMallocEngineTable[INDEX_LEAK_CHECK];
+        break;
+    case 5:
+        __libc_android_log_print(ANDROID_LOG_INFO, "libc",
+                "%s using MALLOC_DEBUG = %d (fill)\n", 
+                __progname, level);
+        gMallocDispatch = &gMallocEngineTable[INDEX_MALLOC_FILL];
+        break;
+    case 10:
+        __libc_android_log_print(ANDROID_LOG_INFO, "libc",
+                "%s using MALLOC_DEBUG = %d (sentinels, fill)\n", 
+                __progname, level);
+        gMallocDispatch = &gMallocEngineTable[INDEX_MALLOC_CHECK];
+        break;
+    }
+#endif
+}
diff --git a/libc/bionic/md5.c b/libc/bionic/md5.c
new file mode 100644
index 0000000..087786f
--- /dev/null
+++ b/libc/bionic/md5.c
@@ -0,0 +1,275 @@
+/*
+ * Copyright (c) 1995 - 2001 Kungliga Tekniska Högskolan
+ * (Royal Institute of Technology, Stockholm, Sweden).
+ * All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * 3. Neither the name of the Institute nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+
+__RCSID("$Heimdal: md5.c,v 1.15 2001/01/29 04:33:44 assar Exp $"
+        "$NetBSD: md5.c,v 1.1.1.4 2002/09/12 12:41:42 joda Exp $");
+#endif
+
+#include "md5.h"
+#include "hash.h"
+
+#define A m->counter[0]
+#define B m->counter[1]
+#define C m->counter[2]
+#define D m->counter[3]
+#define X data
+
+void
+MD5_Init (struct md5 *m)
+{
+  m->sz[0] = 0;
+  m->sz[1] = 0;
+  D = 0x10325476;
+  C = 0x98badcfe;
+  B = 0xefcdab89;
+  A = 0x67452301;
+}
+
+#define F(x,y,z) CRAYFIX((x & y) | (~x & z))
+#define G(x,y,z) CRAYFIX((x & z) | (y & ~z))
+#define H(x,y,z) (x ^ y ^ z)
+#define I(x,y,z) CRAYFIX(y ^ (x | ~z))
+
+#define DOIT(a,b,c,d,k,s,i,OP) \
+a = b + cshift(a + OP(b,c,d) + X[k] + (i), s)
+
+#define DO1(a,b,c,d,k,s,i) DOIT(a,b,c,d,k,s,i,F)
+#define DO2(a,b,c,d,k,s,i) DOIT(a,b,c,d,k,s,i,G)
+#define DO3(a,b,c,d,k,s,i) DOIT(a,b,c,d,k,s,i,H)
+#define DO4(a,b,c,d,k,s,i) DOIT(a,b,c,d,k,s,i,I)
+
+static inline void
+calc (struct md5 *m, u_int32_t *data)
+{
+  u_int32_t AA, BB, CC, DD;
+
+  AA = A;
+  BB = B;
+  CC = C;
+  DD = D;
+
+  /* Round 1 */
+
+  DO1(A,B,C,D,0,7,0xd76aa478);
+  DO1(D,A,B,C,1,12,0xe8c7b756);
+  DO1(C,D,A,B,2,17,0x242070db);
+  DO1(B,C,D,A,3,22,0xc1bdceee);
+
+  DO1(A,B,C,D,4,7,0xf57c0faf);
+  DO1(D,A,B,C,5,12,0x4787c62a);
+  DO1(C,D,A,B,6,17,0xa8304613);
+  DO1(B,C,D,A,7,22,0xfd469501);
+
+  DO1(A,B,C,D,8,7,0x698098d8);
+  DO1(D,A,B,C,9,12,0x8b44f7af);
+  DO1(C,D,A,B,10,17,0xffff5bb1);
+  DO1(B,C,D,A,11,22,0x895cd7be);
+
+  DO1(A,B,C,D,12,7,0x6b901122);
+  DO1(D,A,B,C,13,12,0xfd987193);
+  DO1(C,D,A,B,14,17,0xa679438e);
+  DO1(B,C,D,A,15,22,0x49b40821);
+
+  /* Round 2 */
+
+  DO2(A,B,C,D,1,5,0xf61e2562);
+  DO2(D,A,B,C,6,9,0xc040b340);
+  DO2(C,D,A,B,11,14,0x265e5a51);
+  DO2(B,C,D,A,0,20,0xe9b6c7aa);
+
+  DO2(A,B,C,D,5,5,0xd62f105d);
+  DO2(D,A,B,C,10,9,0x2441453);
+  DO2(C,D,A,B,15,14,0xd8a1e681);
+  DO2(B,C,D,A,4,20,0xe7d3fbc8);
+
+  DO2(A,B,C,D,9,5,0x21e1cde6);
+  DO2(D,A,B,C,14,9,0xc33707d6);
+  DO2(C,D,A,B,3,14,0xf4d50d87);
+  DO2(B,C,D,A,8,20,0x455a14ed);
+
+  DO2(A,B,C,D,13,5,0xa9e3e905);
+  DO2(D,A,B,C,2,9,0xfcefa3f8);
+  DO2(C,D,A,B,7,14,0x676f02d9);
+  DO2(B,C,D,A,12,20,0x8d2a4c8a);
+
+  /* Round 3 */
+
+  DO3(A,B,C,D,5,4,0xfffa3942);
+  DO3(D,A,B,C,8,11,0x8771f681);
+  DO3(C,D,A,B,11,16,0x6d9d6122);
+  DO3(B,C,D,A,14,23,0xfde5380c);
+
+  DO3(A,B,C,D,1,4,0xa4beea44);
+  DO3(D,A,B,C,4,11,0x4bdecfa9);
+  DO3(C,D,A,B,7,16,0xf6bb4b60);
+  DO3(B,C,D,A,10,23,0xbebfbc70);
+
+  DO3(A,B,C,D,13,4,0x289b7ec6);
+  DO3(D,A,B,C,0,11,0xeaa127fa);
+  DO3(C,D,A,B,3,16,0xd4ef3085);
+  DO3(B,C,D,A,6,23,0x4881d05);
+
+  DO3(A,B,C,D,9,4,0xd9d4d039);
+  DO3(D,A,B,C,12,11,0xe6db99e5);
+  DO3(C,D,A,B,15,16,0x1fa27cf8);
+  DO3(B,C,D,A,2,23,0xc4ac5665);
+
+  /* Round 4 */
+
+  DO4(A,B,C,D,0,6,0xf4292244);
+  DO4(D,A,B,C,7,10,0x432aff97);
+  DO4(C,D,A,B,14,15,0xab9423a7);
+  DO4(B,C,D,A,5,21,0xfc93a039);
+
+  DO4(A,B,C,D,12,6,0x655b59c3);
+  DO4(D,A,B,C,3,10,0x8f0ccc92);
+  DO4(C,D,A,B,10,15,0xffeff47d);
+  DO4(B,C,D,A,1,21,0x85845dd1);
+
+  DO4(A,B,C,D,8,6,0x6fa87e4f);
+  DO4(D,A,B,C,15,10,0xfe2ce6e0);
+  DO4(C,D,A,B,6,15,0xa3014314);
+  DO4(B,C,D,A,13,21,0x4e0811a1);
+
+  DO4(A,B,C,D,4,6,0xf7537e82);
+  DO4(D,A,B,C,11,10,0xbd3af235);
+  DO4(C,D,A,B,2,15,0x2ad7d2bb);
+  DO4(B,C,D,A,9,21,0xeb86d391);
+
+  A += AA;
+  B += BB;
+  C += CC;
+  D += DD;
+}
+
+/*
+ * From `Performance analysis of MD5' by Joseph D. Touch <touch@isi.edu>
+ */
+
+#if defined(WORDS_BIGENDIAN)
+static inline u_int32_t
+swap_u_int32_t (u_int32_t t)
+{
+  u_int32_t temp1, temp2;
+
+  temp1   = cshift(t, 16);
+  temp2   = temp1 >> 8;
+  temp1  &= 0x00ff00ff;
+  temp2  &= 0x00ff00ff;
+  temp1 <<= 8;
+  return temp1 | temp2;
+}
+#endif
+
+struct x32{
+  unsigned int a:32;
+  unsigned int b:32;
+};
+
+void
+MD5_Update (struct md5 *m, const void *v, size_t len)
+{
+  const unsigned char *p = v;
+  size_t old_sz = m->sz[0];
+  size_t offset;
+
+  m->sz[0] += len * 8;
+  if (m->sz[0] < old_sz)
+      ++m->sz[1];
+  offset = (old_sz / 8)  % 64;
+  while(len > 0){
+    size_t l = min(len, 64 - offset);
+    memcpy(m->save + offset, p, l);
+    offset += l;
+    p += l;
+    len -= l;
+    if(offset == 64){
+#if defined(WORDS_BIGENDIAN)
+      int i;
+      u_int32_t current[16];
+      struct x32 *u = (struct x32*)m->save;
+      for(i = 0; i < 8; i++){
+	current[2*i+0] = swap_u_int32_t(u[i].a);
+	current[2*i+1] = swap_u_int32_t(u[i].b);
+      }
+      calc(m, current);
+#else
+      calc(m, (u_int32_t*)m->save);
+#endif
+      offset = 0;
+    }
+  }
+}
+
+void
+MD5_Final (void *res, struct md5 *m)
+{
+  static unsigned char zeros[72];
+  unsigned offset = (m->sz[0] / 8) % 64;
+  unsigned int dstart = (120 - offset - 1) % 64 + 1;
+
+  *zeros = 0x80;
+  memset (zeros + 1, 0, sizeof(zeros) - 1);
+  zeros[dstart+0] = (m->sz[0] >> 0) & 0xff;
+  zeros[dstart+1] = (m->sz[0] >> 8) & 0xff;
+  zeros[dstart+2] = (m->sz[0] >> 16) & 0xff;
+  zeros[dstart+3] = (m->sz[0] >> 24) & 0xff;
+  zeros[dstart+4] = (m->sz[1] >> 0) & 0xff;
+  zeros[dstart+5] = (m->sz[1] >> 8) & 0xff;
+  zeros[dstart+6] = (m->sz[1] >> 16) & 0xff;
+  zeros[dstart+7] = (m->sz[1] >> 24) & 0xff;
+  MD5_Update (m, zeros, dstart + 8);
+  {
+      int i;
+      unsigned char *r = (unsigned char *)res;
+
+      for (i = 0; i < 4; ++i) {
+	  r[4*i]   = m->counter[i] & 0xFF;
+	  r[4*i+1] = (m->counter[i] >> 8) & 0xFF;
+	  r[4*i+2] = (m->counter[i] >> 16) & 0xFF;
+	  r[4*i+3] = (m->counter[i] >> 24) & 0xFF;
+      }
+  }
+#if 0
+  {
+    int i;
+    u_int32_t *r = (u_int32_t *)res;
+
+    for (i = 0; i < 4; ++i)
+      r[i] = swap_u_int32_t (m->counter[i]);
+  }
+#endif
+}
diff --git a/libc/bionic/md5.h b/libc/bionic/md5.h
new file mode 100644
index 0000000..a381994
--- /dev/null
+++ b/libc/bionic/md5.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 1995 - 2001 Kungliga Tekniska Högskolan
+ * (Royal Institute of Technology, Stockholm, Sweden).
+ * All rights reserved.
+ * 
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 
+ * 3. Neither the name of the Institute nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ * 
+ * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* $Heimdal: md5.h,v 1.8 2001/01/29 02:08:57 assar Exp $
+   $NetBSD: md5.h,v 1.1.1.4 2002/09/12 12:41:42 joda Exp $ */
+
+#include <stdlib.h>
+#include <sys/types.h>
+
+struct md5 {
+  unsigned int sz[2];
+  u_int32_t counter[4];
+  unsigned char save[64];
+};
+
+typedef struct md5 MD5_CTX;
+
+void MD5_Init (struct md5 *m);
+void MD5_Update (struct md5 *m, const void *p, size_t len);
+void MD5_Final (void *res, struct md5 *m); /* u_int32_t res[4] */
diff --git a/libc/bionic/pthread-timers.c b/libc/bionic/pthread-timers.c
new file mode 100644
index 0000000..7b9c99e
--- /dev/null
+++ b/libc/bionic/pthread-timers.c
@@ -0,0 +1,639 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the 
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include "pthread_internal.h"
+#include <linux/time.h>
+#include <string.h>
+#include <errno.h>
+
+/* This file implements the support required to implement SIGEV_THREAD posix 
+ * timers. See the following pages for additionnal details:
+ *
+ * www.opengroup.org/onlinepubs/000095399/functions/timer_create.html
+ * www.opengroup.org/onlinepubs/000095399/functions/timer_settime.html
+ * www.opengroup.org/onlinepubs/000095399/functions/xsh_chap02_04.html#tag_02_04_01
+ *
+ * The Linux kernel doesn't support these, so we need to implement them in the
+ * C library. We use a very basic scheme where each timer is associated to a
+ * thread that will loop, waiting for timeouts or messages from the program
+ * corresponding to calls to timer_settime() and timer_delete().
+ *
+ * Note also an important thing: Posix mandates that in the case of fork(),
+ * the timers of the child process should be disarmed, but not deleted.
+ * this is implemented by providing a fork() wrapper (see bionic/fork.c) which
+ * stops all timers before the fork, and only re-start them in case of error
+ * or in the parent process.
+ *
+ * the stop/start is implemented by the __timer_table_start_stop() function
+ * below.
+ */
+
+/* normal (i.e. non-SIGEV_THREAD) timer ids are created directly by the kernel
+ * and are passed as is to/from the caller.
+ *
+ * on the other hand, a SIGEV_THREAD timer ID will have its TIMER_ID_WRAP_BIT
+ * always set to 1. In this implementation, this is always bit 31, which is
+ * guaranteed to never be used by kernel-provided timer ids
+ *
+ * (see code in <kernel>/lib/idr.c, used to manage IDs, to see why)
+ */
+
+#define  TIMER_ID_WRAP_BIT        0x80000000
+#define  TIMER_ID_WRAP(id)        ((timer_t)((id) |  TIMER_ID_WRAP_BIT))
+#define  TIMER_ID_UNWRAP(id)      ((timer_t)((id) & ~TIMER_ID_WRAP_BIT))
+#define  TIMER_ID_IS_WRAPPED(id)  (((id) & TIMER_ID_WRAP_BIT) != 0)
+
+/* this value is used internally to indicate a 'free' or 'zombie' 
+ * thr_timer structure. Here, 'zombie' means that timer_delete()
+ * has been called, but that the corresponding thread hasn't
+ * exited yet.
+ */
+#define  TIMER_ID_NONE            ((timer_t)0xffffffff)
+
+/* True iff a timer id is valid */
+#define  TIMER_ID_IS_VALID(id)    ((id) != TIMER_ID_NONE)
+
+/* the maximum value of overrun counters */
+#define  DELAYTIMER_MAX    0x7fffffff
+
+#define  __likely(x)   __builtin_expect(!!(x),1)
+#define  __unlikely(x) __builtin_expect(!!(x),0)
+
+typedef struct thr_timer          thr_timer_t;
+typedef struct thr_timer_table    thr_timer_table_t;
+
+/* The Posix spec says the function receives an unsigned parameter, but
+ * it's really a 'union sigval' a.k.a. sigval_t */
+typedef void (*thr_timer_func_t)( sigval_t );
+
+struct thr_timer {
+    thr_timer_t*       next;     /* next in free list */
+    timer_t            id;       /* TIMER_ID_NONE iff free or dying */
+    clockid_t          clock;
+    pthread_t          thread;
+    pthread_attr_t     attributes;
+    thr_timer_func_t   callback;
+    sigval_t           value;
+
+    /* the following are used to communicate between
+     * the timer thread and the timer_XXX() functions
+     */
+    pthread_mutex_t           mutex;     /* lock */
+    pthread_cond_t            cond;      /* signal a state change to thread */
+    int volatile              done;      /* set by timer_delete */
+    int volatile              stopped;   /* set by _start_stop() */
+    struct timespec volatile  expires;   /* next expiration time, or 0 */
+    struct timespec volatile  period;    /* reload value, or 0 */
+    int volatile              overruns;  /* current number of overruns */
+};
+
+#define  MAX_THREAD_TIMERS  32
+
+struct thr_timer_table {
+    pthread_mutex_t  lock;
+    thr_timer_t*     free_timer;
+    thr_timer_t      timers[ MAX_THREAD_TIMERS ];
+};
+
+/** GLOBAL TABLE OF THREAD TIMERS
+ **/
+
+static void
+thr_timer_table_init( thr_timer_table_t*  t )
+{
+    int  nn;
+
+    memset(t, 0, sizeof *t);
+    pthread_mutex_init( &t->lock, NULL );
+
+    for (nn = 0; nn < MAX_THREAD_TIMERS; nn++)
+        t->timers[nn].id = TIMER_ID_NONE;
+
+    t->free_timer = &t->timers[0];
+    for (nn = 1; nn < MAX_THREAD_TIMERS; nn++)
+        t->timers[nn-1].next = &t->timers[nn];
+}
+
+
+static thr_timer_t*
+thr_timer_table_alloc( thr_timer_table_t*  t )
+{
+    thr_timer_t*  timer;
+
+    if (t == NULL)
+        return NULL;
+
+    pthread_mutex_lock(&t->lock);
+    timer = t->free_timer;
+    if (timer != NULL) {
+        t->free_timer = timer->next;
+        timer->next   = NULL;
+        timer->id     = TIMER_ID_WRAP((timer - t->timers));
+    }
+    pthread_mutex_unlock(&t->lock);
+    return timer;
+}
+
+
+static void
+thr_timer_table_free( thr_timer_table_t*  t, thr_timer_t*  timer )
+{
+    pthread_mutex_lock( &t->lock );
+    timer->id     = TIMER_ID_NONE;
+    timer->thread = 0;
+    timer->next   = t->free_timer;
+    t->free_timer = timer;
+    pthread_mutex_unlock( &t->lock );
+}
+
+
+static void
+thr_timer_table_start_stop( thr_timer_table_t*  t, int  stop )
+{
+    int  nn;
+
+    pthread_mutex_lock(&t->lock);
+
+    for (nn = 0; nn < MAX_THREAD_TIMERS; nn++) {
+        thr_timer_t*  timer  = &t->timers[nn];
+
+        if (TIMER_ID_IS_VALID(timer->id)) {
+            /* tell the thread to start/stop */
+            pthread_mutex_lock(&timer->mutex);
+            timer->stopped = stop;
+            pthread_cond_signal( &timer->cond );
+            pthread_mutex_unlock(&timer->mutex);
+        }
+    }
+    pthread_mutex_unlock(&t->lock);
+}
+
+
+/* convert a timer_id into the corresponding thr_timer_t* pointer
+ * returns NULL if the id is not wrapped or is invalid/free
+ */
+static thr_timer_t*
+thr_timer_table_from_id( thr_timer_table_t*  t,
+                         timer_t             id,
+                         int                 remove )
+{
+    unsigned      index;
+    thr_timer_t*  timer;
+
+    if (t == NULL || !TIMER_ID_IS_WRAPPED(id))
+        return NULL;
+
+    index = (unsigned) TIMER_ID_UNWRAP(id);
+    if (index >= MAX_THREAD_TIMERS)
+        return NULL;
+
+    pthread_mutex_lock(&t->lock);
+
+    timer = &t->timers[index];
+
+    if (!TIMER_ID_IS_VALID(timer->id)) {
+        timer = NULL;
+    } else {
+        /* if we're removing this timer, clear the id
+         * right now to prevent another thread to
+         * use the same id after the unlock */
+        if (remove)
+            timer->id = TIMER_ID_NONE;
+    }
+    pthread_mutex_unlock(&t->lock);
+
+    return timer;
+}
+
+/* the static timer table - we only create it if the process
+ * really wants to use SIGEV_THREAD timers, which should be
+ * pretty infrequent
+ */
+
+static pthread_once_t      __timer_table_once = PTHREAD_ONCE_INIT;
+static thr_timer_table_t*  __timer_table;
+
+static void
+__timer_table_init( void )
+{
+    __timer_table = calloc(1,sizeof(*__timer_table));
+
+    if (__timer_table != NULL)
+        thr_timer_table_init( __timer_table );
+}
+
+static thr_timer_table_t*
+__timer_table_get(void)
+{
+    pthread_once( &__timer_table_once, __timer_table_init );
+    return __timer_table;
+}
+
+/** POSIX THREAD TIMERS CLEANUP ON FORK
+ **
+ ** this should be called from the 'fork()' wrapper to stop/start
+ ** all active thread timers. this is used to implement a Posix
+ ** requirements: the timers of fork child processes must be
+ ** disarmed but not deleted.
+ **/
+void
+__timer_table_start_stop( int  stop )
+{
+    if (__timer_table != NULL) {
+        thr_timer_table_t*  table = __timer_table_get();
+        thr_timer_table_start_stop(table, stop);
+    }
+}
+
+static thr_timer_t*
+thr_timer_from_id( timer_t   id )
+{
+    thr_timer_table_t*  table = __timer_table_get();
+    thr_timer_t*        timer = thr_timer_table_from_id( table, id, 0 );
+
+    return timer;
+}
+
+
+static __inline__ void
+thr_timer_lock( thr_timer_t*  t )
+{
+    pthread_mutex_lock(&t->mutex);
+}
+
+static __inline__ void
+thr_timer_unlock( thr_timer_t*  t )
+{
+    pthread_mutex_unlock(&t->mutex);
+}
+
+/** POSIX TIMERS APIs */
+
+/* first, declare the syscall stubs */
+extern int __timer_create( clockid_t, struct sigevent*, timer_t* );
+extern int __timer_delete( timer_t );
+extern int __timer_gettime( timer_t, struct itimerspec* );
+extern int __timer_settime( timer_t, int, const struct itimerspec*, struct itimerspec* );
+extern int __timer_getoverrun(timer_t);
+
+static void*  timer_thread_start( void* );
+
+/* then the wrappers themselves */
+int
+timer_create( clockid_t  clockid, struct sigevent*  evp, timer_t  *ptimerid)
+{
+    /* if not a SIGEV_THREAD timer, direct creation by the kernel */
+    if (__likely(evp == NULL || evp->sigev_notify != SIGEV_THREAD))
+        return __timer_create( clockid, evp, ptimerid );
+
+    // check arguments
+    if (evp->sigev_notify_function == NULL) {
+        errno = EINVAL;
+        return -1;
+    }
+
+    {
+        struct timespec  dummy;
+
+        /* check that the clock id is supported by the kernel */
+        if (clock_gettime( clockid, &dummy ) < 0 && errno == EINVAL )
+            return -1;
+    }
+
+    /* create a new timer and its thread */
+    {
+        thr_timer_table_t*  table = __timer_table_get();
+        thr_timer_t*        timer = thr_timer_table_alloc( table );
+        struct sigevent     evp0;
+
+        if (timer == NULL) {
+            errno = ENOMEM;
+            return -1;
+        }
+
+        /* copy the thread attributes */
+        if (evp->sigev_notify_attributes == NULL) {
+            pthread_attr_init(&timer->attributes);
+        }
+        else {
+            timer->attributes = ((pthread_attr_t*)evp->sigev_notify_attributes)[0];
+        }
+
+        /* Posix says that the default is PTHREAD_CREATE_DETACHED and
+         * that PTHREAD_CREATE_JOINABLE has undefined behaviour.
+         * So simply always use DETACHED :-)
+         */
+        pthread_attr_setdetachstate(&timer->attributes, PTHREAD_CREATE_DETACHED);
+
+        timer->callback = evp->sigev_notify_function;
+        timer->value    = evp->sigev_value;
+        timer->clock    = clockid;
+
+        pthread_mutex_init( &timer->mutex, NULL );
+        pthread_cond_init( &timer->cond, NULL );
+
+        timer->done           = 0;
+        timer->stopped        = 0;
+        timer->expires.tv_sec = timer->expires.tv_nsec = 0;
+        timer->period.tv_sec  = timer->period.tv_nsec  = 0;
+        timer->overruns       = 0;
+
+        /* create the thread */
+        if (pthread_create( &timer->thread, &timer->attributes, timer_thread_start, timer ) < 0) {
+            thr_timer_table_free( __timer_table, timer );
+            errno = ENOMEM;
+            return -1;
+        }
+
+        *ptimerid = timer->id;
+        return 0;
+    }
+}
+
+
+int
+timer_delete( timer_t  id )
+{
+    if ( __likely(!TIMER_ID_IS_WRAPPED(id)) )
+        return __timer_delete( id );
+    else
+    {
+        thr_timer_table_t*  table = __timer_table_get();
+        thr_timer_t*        timer = thr_timer_table_from_id(table, id, 1);
+
+        if (timer == NULL) {
+            errno = EINVAL;
+            return -1;
+        }
+
+        /* tell the timer's thread to stop */
+        thr_timer_lock(timer);
+        timer->done = 1;
+        pthread_cond_signal( &timer->cond );
+        thr_timer_unlock(timer);
+
+        /* NOTE: the thread will call __timer_table_free() to free the
+         * timer object. the '1' parameter to thr_timer_table_from_id
+         * above ensured that the object and its timer_id cannot be
+         * reused before that.
+         */
+        return 0;
+    }
+}
+
+/* return the relative time until the next expiration, or 0 if
+ * the timer is disarmed */
+static void
+timer_gettime_internal( thr_timer_t*        timer,
+                        struct itimerspec*  spec)
+{
+    struct timespec  diff;
+
+    diff = timer->expires;
+    if (!timespec_is_zero(&diff)) 
+    {
+        struct timespec  now;
+
+        clock_gettime( timer->clock, &now );
+        timespec_sub(&diff, &now);
+
+        /* in case of overrun, return 0 */
+        if (timespec_cmp0(&diff) < 0) {
+            timespec_zero(&diff);
+        }
+    }
+
+    spec->it_value    = diff;
+    spec->it_interval = timer->period;
+}
+
+
+int
+timer_gettime( timer_t  id, struct itimerspec*  ospec )
+{
+    if (ospec == NULL) {
+        errno = EINVAL;
+        return -1;
+    }
+
+    if ( __likely(!TIMER_ID_IS_WRAPPED(id)) ) {
+        return __timer_gettime( id, ospec );
+    } else {
+        thr_timer_t*  timer = thr_timer_from_id(id);
+
+        if (timer == NULL) {
+            errno = EINVAL;
+            return -1;
+        }
+        thr_timer_lock(timer);
+        timer_gettime_internal( timer, ospec );
+        thr_timer_unlock(timer);
+    }
+    return 0;
+}
+
+
+int
+timer_settime( timer_t                   id,
+               int                       flags,
+               const struct itimerspec*  spec,
+               struct itimerspec*        ospec )
+{
+    if (spec == NULL) {
+        errno = EINVAL;
+        return -1;
+    }
+
+    if ( __likely(!TIMER_ID_IS_WRAPPED(id)) ) {
+        return __timer_settime( id, flags, spec, ospec );
+    } else {
+        thr_timer_t*        timer = thr_timer_from_id(id);
+        struct timespec     expires, now;
+
+        if (timer == NULL) {
+            errno = EINVAL;
+            return -1;
+        }
+        thr_timer_lock(timer);
+
+        /* return current timer value if ospec isn't NULL */
+        if (ospec != NULL) {
+            timer_gettime_internal(timer, ospec );
+        }
+
+        /* compute next expiration time. note that if the
+         * new it_interval is 0, we should disarm the timer
+         */
+        expires = spec->it_value;
+        if (!timespec_is_zero(&expires)) {
+            clock_gettime( timer->clock, &now );
+            if (!(flags & TIMER_ABSTIME)) {
+                timespec_add(&expires, &now);
+            } else {
+                if (timespec_cmp(&expires, &now) < 0)
+                    expires = now;
+            }
+        }
+        timer->expires = expires;
+        timer->period  = spec->it_interval;
+        thr_timer_unlock( timer );
+
+        /* signal the change to the thread */
+        pthread_cond_signal( &timer->cond );
+    }
+    return 0;
+}
+
+
+int
+timer_getoverrun(timer_t  id)
+{
+    if ( __likely(!TIMER_ID_IS_WRAPPED(id)) ) {
+        return __timer_getoverrun( id );
+    } else {
+        thr_timer_t*  timer = thr_timer_from_id(id);
+        int           result;
+
+        if (timer == NULL) {
+            errno = EINVAL;
+            return -1;
+        }
+
+        thr_timer_lock(timer);
+        result = timer->overruns;
+        thr_timer_unlock(timer);
+
+        return result;
+    }
+}
+
+
+static void*
+timer_thread_start( void*  _arg )
+{
+    thr_timer_t*  timer = _arg;
+
+    thr_timer_lock( timer );
+
+    /* we loop until timer->done is set in timer_delete() */
+    while (!timer->done) 
+    {
+        struct timespec   expires = timer->expires;
+        struct timespec   period  = timer->period;
+        struct timespec   now;
+
+        /* if the timer is stopped or disarmed, wait indefinitely
+         * for a state change from timer_settime/_delete/_start_stop
+         */
+        if ( timer->stopped || timespec_is_zero(&expires) )
+        {
+            pthread_cond_wait( &timer->cond, &timer->mutex );
+            continue;
+        }
+
+        /* otherwise, we need to do a timed wait until either a
+        * state change of the timer expiration time.
+        */
+        clock_gettime(timer->clock, &now);
+
+        if (timespec_cmp( &expires, &now ) > 0)
+        {
+            /* cool, there was no overrun, so compute the
+             * relative timeout as 'expires - now', then wait
+             */
+            int              ret;
+            struct timespec  diff = expires;
+            timespec_sub( &diff, &now );
+
+            ret = __pthread_cond_timedwait_relative(
+                        &timer->cond, &timer->mutex, &diff);
+
+            /* if we didn't timeout, it means that a state change
+                * occured, so reloop to take care of it.
+                */
+            if (ret != ETIMEDOUT)
+                continue;
+        }
+        else
+        {
+            /* overrun was detected before we could wait ! */
+            if (!timespec_is_zero( &period ) )
+            {
+                /* for periodic timers, compute total overrun count */
+                do {
+                    timespec_add( &expires, &period );
+                    if (timer->overruns < DELAYTIMER_MAX)
+                        timer->overruns += 1;
+                } while ( timespec_cmp( &expires, &now ) < 0 );
+
+                /* backtrack the last one, because we're going to
+                 * add the same value just a bit later */
+                timespec_sub( &expires, &period );
+            }
+            else
+            {
+                /* for non-periodic timer, things are simple */
+                timer->overruns = 1;
+            }
+        }
+
+        /* if we get there, a timeout was detected.
+         * first reload/disarm the timer has needed
+         */
+        if ( !timespec_is_zero(&period) ) {
+            timespec_add( &expires, &period );
+        } else {
+            timespec_zero( &expires );
+        }
+        timer->expires = expires;
+
+        /* now call the timer callback function. release the
+         * lock to allow the function to modify the timer setting
+         * or call timer_getoverrun().
+         *
+         * NOTE: at this point we trust the callback not to be a
+         *       total moron and pthread_kill() the timer thread
+         */
+        thr_timer_unlock(timer);
+        timer->callback( timer->value );
+        thr_timer_lock(timer);
+
+        /* now clear the overruns counter. it only makes sense
+         * within the callback */
+        timer->overruns = 0;
+    }
+
+    thr_timer_unlock( timer );
+
+    /* free the timer object now. there is no need to call
+     * __timer_table_get() since we're guaranteed that __timer_table
+     * is initialized in this thread
+     */
+    thr_timer_table_free(__timer_table, timer);
+
+    return NULL;
+}
diff --git a/libc/bionic/pthread.c b/libc/bionic/pthread.c
new file mode 100644
index 0000000..ec3c459
--- /dev/null
+++ b/libc/bionic/pthread.c
@@ -0,0 +1,1587 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <sys/types.h>
+#include <unistd.h>
+#include <signal.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <sys/atomics.h>
+#include <bionic_tls.h>
+#include <sys/mman.h>
+#include <pthread.h>
+#include <time.h>
+#include "pthread_internal.h"
+#include "thread_private.h"
+#include <limits.h>
+#include <memory.h>
+#include <assert.h>
+#include <malloc.h>
+
+extern int  __pthread_clone(int (*fn)(void*), void *child_stack, int flags, void *arg);
+extern void _exit_with_stack_teardown(void * stackBase, int stackSize, int retCode);
+extern void _exit_thread(int  retCode);
+extern int  __set_errno(int);
+
+void _thread_created_hook(pid_t thread_id) __attribute__((noinline));
+
+#define PTHREAD_ATTR_FLAG_DETACHED      0x00000001
+#define PTHREAD_ATTR_FLAG_USER_STACK    0x00000002
+
+#define DEFAULT_STACKSIZE (1024 * 1024)
+#define STACKBASE 0x10000000
+
+static uint8_t * gStackBase = (uint8_t *)STACKBASE;
+
+static pthread_mutex_t mmap_lock = PTHREAD_MUTEX_INITIALIZER;
+
+
+static const pthread_attr_t gDefaultPthreadAttr = {
+    .flags = 0,
+    .stack_base = NULL,
+    .stack_size = DEFAULT_STACKSIZE,
+    .guard_size = PAGE_SIZE,
+    .sched_policy = SCHED_NORMAL,
+    .sched_priority = 0
+};
+
+#define  INIT_THREADS  1
+
+static pthread_internal_t*  gThreadList = NULL;
+static pthread_mutex_t gThreadListLock = PTHREAD_MUTEX_INITIALIZER;
+static pthread_mutex_t gDebuggerNotificationLock = PTHREAD_MUTEX_INITIALIZER;
+
+
+/* we simply malloc/free the internal pthread_internal_t structures. we may
+ * want to use a different allocation scheme in the future, but this one should
+ * be largely enough
+ */
+static pthread_internal_t*
+_pthread_internal_alloc(void)
+{
+    pthread_internal_t*   thread;
+
+    thread = calloc( sizeof(*thread), 1 );
+    if (thread)
+        thread->intern = 1;
+
+    return thread;
+}
+
+static void
+_pthread_internal_free( pthread_internal_t*  thread )
+{
+    if (thread && thread->intern) {
+        thread->intern = 0;  /* just in case */
+        free (thread);
+    }
+}
+
+
+static void
+_pthread_internal_remove_locked( pthread_internal_t*  thread )
+{
+    thread->next->pref = thread->pref;
+    thread->pref[0]    = thread->next;
+}
+
+static void
+_pthread_internal_remove( pthread_internal_t*  thread )
+{
+    pthread_mutex_lock(&gThreadListLock);
+    _pthread_internal_remove_locked(thread);
+    pthread_mutex_unlock(&gThreadListLock);
+}
+
+static void
+_pthread_internal_add( pthread_internal_t*  thread )
+{
+    pthread_mutex_lock(&gThreadListLock);
+    thread->pref = &gThreadList;
+    thread->next = thread->pref[0];
+    if (thread->next)
+        thread->next->pref = &thread->next;
+    thread->pref[0] = thread;
+    pthread_mutex_unlock(&gThreadListLock);
+}
+
+pthread_internal_t*
+__get_thread(void)
+{
+    void**  tls = (void**)__get_tls();
+
+    return  (pthread_internal_t*) tls[TLS_SLOT_THREAD_ID];
+}
+
+
+void*
+__get_stack_base(int  *p_stack_size)
+{
+    pthread_internal_t*  thread = __get_thread();
+
+    *p_stack_size = thread->attr.stack_size;
+    return thread->attr.stack_base;
+}
+
+
+void  __init_tls(void**  tls, void*  thread)
+{
+    int  nn;
+
+    ((pthread_internal_t*)thread)->tls = tls;
+
+    // slot 0 must point to the tls area, this is required by the implementation
+    // of the x86 Linux kernel thread-local-storage
+    tls[TLS_SLOT_SELF]      = (void*)tls;
+    tls[TLS_SLOT_THREAD_ID] = thread;
+    for (nn = TLS_SLOT_ERRNO; nn < BIONIC_TLS_SLOTS; nn++)
+       tls[nn] = 0;
+
+    __set_tls( (void*)tls );
+}
+
+
+/*
+ * This trampoline is called from the assembly clone() function
+ */
+void __thread_entry(int (*func)(void*), void *arg, void **tls)
+{
+    int retValue;
+    pthread_internal_t * thrInfo;
+
+    // Wait for our creating thread to release us. This lets it have time to
+    // notify gdb about this thread before it starts doing anything.
+    pthread_mutex_t * start_mutex = (pthread_mutex_t *)&tls[TLS_SLOT_SELF];
+    pthread_mutex_lock(start_mutex);
+    pthread_mutex_destroy(start_mutex);
+
+    thrInfo = (pthread_internal_t *) tls[TLS_SLOT_THREAD_ID];
+
+    __init_tls( tls, thrInfo );
+
+    pthread_exit( (void*)func(arg) );
+}
+
+void _init_thread(pthread_internal_t * thread, pid_t kernel_id, pthread_attr_t * attr, void * stack_base)
+{
+    if (attr == NULL) {
+        thread->attr = gDefaultPthreadAttr;
+    } else {
+        thread->attr = *attr;
+    }
+    thread->attr.stack_base = stack_base;
+    thread->kernel_id       = kernel_id;
+
+    // set the scheduling policy/priority of the thread
+    if (thread->attr.sched_policy != SCHED_NORMAL) {
+        struct sched_param param;
+        param.sched_priority = thread->attr.sched_priority;
+        sched_setscheduler(kernel_id, thread->attr.sched_policy, &param);
+    }
+
+    pthread_cond_init(&thread->join_cond, NULL);
+    thread->join_count = 0;
+
+    thread->cleanup_stack = NULL;
+
+    _pthread_internal_add(thread);
+}
+
+
+/* XXX stacks not reclaimed if thread spawn fails */
+/* XXX stacks address spaces should be reused if available again */
+
+static void *mkstack(size_t size, size_t guard_size)
+{
+    void * stack;
+
+    pthread_mutex_lock(&mmap_lock);
+
+    stack = mmap((void *)gStackBase, size,
+                 PROT_READ | PROT_WRITE,
+                 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+                 -1, 0);
+
+    if(stack == MAP_FAILED) {
+        stack = NULL;
+        goto done;
+    }
+
+    if(mprotect(stack, guard_size, PROT_NONE)){
+        munmap(stack, size);
+        stack = NULL;
+        goto done;
+    }
+
+done:
+    pthread_mutex_unlock(&mmap_lock);
+    return stack;
+}
+
+/*
+ * Create a new thread. The thread's stack is layed out like so:
+ *
+ * +---------------------------+
+ * |     pthread_internal_t    |
+ * +---------------------------+
+ * |                           |
+ * |          TLS area         |
+ * |                           |
+ * +---------------------------+
+ * |                           |
+ * .                           .
+ * .         stack area        .
+ * .                           .
+ * |                           |
+ * +---------------------------+
+ * |         guard page        |
+ * +---------------------------+
+ *
+ *  note that TLS[0] must be a pointer to itself, this is required
+ *  by the thread-local storage implementation of the x86 Linux
+ *  kernel, where the TLS pointer is read by reading fs:[0]
+ */
+int pthread_create(pthread_t *thread_out, pthread_attr_t const * attr,
+                   void *(*start_routine)(void *), void * arg)
+{
+    char*   stack;
+    void**  tls;
+    int tid;
+    pthread_mutex_t * start_mutex;
+    pthread_internal_t * thread;
+    int                  madestack = 0;
+    int     old_errno = errno;
+
+    /* this will inform the rest of the C library that at least one thread
+     * was created. this will enforce certain functions to acquire/release
+     * locks (e.g. atexit()) to protect shared global structures.
+     *
+     * this works because pthread_create() is not called by the C library
+     * initialization routine that sets up the main thread's data structures.
+     */
+    __isthreaded = 1;
+
+    thread = _pthread_internal_alloc();
+    if (thread == NULL)
+        return ENOMEM;
+
+    if (attr == NULL) {
+        attr = &gDefaultPthreadAttr;
+    }
+
+    // make sure the stack is PAGE_SIZE aligned
+    size_t stackSize = (attr->stack_size +
+                        (PAGE_SIZE-1)) & ~(PAGE_SIZE-1);
+
+    if (!attr->stack_base) {
+        stack = mkstack(stackSize, attr->guard_size);
+        if(stack == NULL) {
+            _pthread_internal_free(thread);
+            return ENOMEM;
+        }
+        madestack = 1;
+    } else {
+        stack = attr->stack_base;
+    }
+
+    // Make room for TLS
+    tls = (void**)(stack + stackSize - BIONIC_TLS_SLOTS*sizeof(void*));
+
+    // Create a mutex for the thread in TLS_SLOT_SELF to wait on once it starts so we can keep
+    // it from doing anything until after we notify the debugger about it
+    start_mutex = (pthread_mutex_t *) &tls[TLS_SLOT_SELF];
+    pthread_mutex_init(start_mutex, NULL);
+    pthread_mutex_lock(start_mutex);
+
+    tls[TLS_SLOT_THREAD_ID] = thread;
+
+    tid = __pthread_clone((int(*)(void*))start_routine, tls,
+                CLONE_FILES | CLONE_FS | CLONE_VM | CLONE_SIGHAND
+                | CLONE_THREAD | CLONE_SYSVSEM | CLONE_DETACHED,
+                arg);
+
+    if(tid < 0) {
+        int  result;
+        if (madestack)
+            munmap(stack, stackSize);
+        _pthread_internal_free(thread);
+        result = errno;
+        errno = old_errno;
+        return result;
+    }
+
+    _init_thread(thread, tid, (pthread_attr_t*)attr, stack);
+
+    if (!madestack)
+        thread->attr.flags |= PTHREAD_ATTR_FLAG_USER_STACK;
+
+    // Notify any debuggers about the new thread
+    pthread_mutex_lock(&gDebuggerNotificationLock);
+    _thread_created_hook(tid);
+    pthread_mutex_unlock(&gDebuggerNotificationLock);
+
+    // Let the thread do it's thing
+    pthread_mutex_unlock(start_mutex);
+
+    *thread_out = (pthread_t)thread;
+    return 0;
+}
+
+
+int pthread_attr_init(pthread_attr_t * attr)
+{
+    *attr = gDefaultPthreadAttr;
+    return 0;
+}
+
+int pthread_attr_destroy(pthread_attr_t * attr)
+{
+    memset(attr, 0x42, sizeof(pthread_attr_t));
+    return 0;
+}
+
+int pthread_attr_setdetachstate(pthread_attr_t * attr, int state)
+{
+    if (state == PTHREAD_CREATE_DETACHED) {
+        attr->flags |= PTHREAD_ATTR_FLAG_DETACHED;
+    } else if (state == PTHREAD_CREATE_JOINABLE) {
+        attr->flags &= ~PTHREAD_ATTR_FLAG_DETACHED;
+    } else {
+        return EINVAL;
+    }
+    return 0;
+}
+
+int pthread_attr_getdetachstate(pthread_attr_t const * attr, int * state)
+{
+    *state = (attr->flags & PTHREAD_ATTR_FLAG_DETACHED)
+           ? PTHREAD_CREATE_DETACHED
+           : PTHREAD_CREATE_JOINABLE;
+    return 0;
+}
+
+int pthread_attr_setschedpolicy(pthread_attr_t * attr, int policy)
+{
+    attr->sched_policy = policy;
+    return 0;
+}
+
+int pthread_attr_getschedpolicy(pthread_attr_t const * attr, int * policy)
+{
+    *policy = attr->sched_policy;
+    return 0;
+}
+
+int pthread_attr_setschedparam(pthread_attr_t * attr, struct sched_param const * param)
+{
+    attr->sched_priority = param->sched_priority;
+    return 0;
+}
+
+int pthread_attr_getschedparam(pthread_attr_t const * attr, struct sched_param * param)
+{
+    param->sched_priority = attr->sched_priority;
+    return 0;
+}
+
+int pthread_attr_setstacksize(pthread_attr_t * attr, size_t stack_size)
+{
+    if ((stack_size & (PAGE_SIZE - 1) || stack_size < PTHREAD_STACK_MIN)) {
+        return EINVAL;
+    }
+    attr->stack_size = stack_size;
+    return 0;
+}
+
+int pthread_attr_getstacksize(pthread_attr_t const * attr, size_t * stack_size)
+{
+    *stack_size = attr->stack_size;
+    return 0;
+}
+
+int pthread_attr_setstackaddr(pthread_attr_t * attr, void * stack_addr)
+{
+#if 1
+    // It's not clear if this is setting the top or bottom of the stack, so don't handle it for now.
+    return ENOSYS;
+#else
+    if ((uint32_t)stack_addr & (PAGE_SIZE - 1)) {
+        return EINVAL;
+    }
+    attr->stack_base = stack_addr;
+    return 0;
+#endif
+}
+
+int pthread_attr_getstackaddr(pthread_attr_t const * attr, void ** stack_addr)
+{
+    *stack_addr = attr->stack_base + attr->stack_size;
+    return 0;
+}
+
+int pthread_attr_setstack(pthread_attr_t * attr, void * stack_base, size_t stack_size)
+{
+    if ((stack_size & (PAGE_SIZE - 1) || stack_size < PTHREAD_STACK_MIN)) {
+        return EINVAL;
+    }
+    if ((uint32_t)stack_base & (PAGE_SIZE - 1)) {
+        return EINVAL;
+    }
+    attr->stack_base = stack_base;
+    attr->stack_size = stack_size;
+    return 0;
+}
+
+int pthread_attr_getstack(pthread_attr_t const * attr, void ** stack_base, size_t * stack_size)
+{
+    *stack_base = attr->stack_base;
+    *stack_size = attr->stack_size;
+    return 0;
+}
+
+int pthread_attr_setguardsize(pthread_attr_t * attr, size_t guard_size)
+{
+    if (guard_size & (PAGE_SIZE - 1) || guard_size < PAGE_SIZE) {
+        return EINVAL;
+    }
+
+    attr->guard_size = guard_size;
+    return 0;
+}
+
+int pthread_attr_getguardsize(pthread_attr_t const * attr, size_t * guard_size)
+{
+    *guard_size = attr->guard_size;
+    return 0;
+}
+
+int pthread_getattr_np(pthread_t thid, pthread_attr_t * attr)
+{
+    pthread_internal_t * thread = (pthread_internal_t *)thid;
+    *attr = thread->attr;
+    return 0;
+}
+
+int pthread_attr_setscope(pthread_attr_t *attr, int  scope)
+{
+    if (scope == PTHREAD_SCOPE_SYSTEM)
+        return 0;
+    if (scope == PTHREAD_SCOPE_PROCESS)
+        return ENOTSUP;
+
+    return EINVAL;
+}
+
+int pthread_attr_getscope(pthread_attr_t const *attr)
+{
+    return PTHREAD_SCOPE_SYSTEM;
+}
+
+
+/* CAVEAT: our implementation of pthread_cleanup_push/pop doesn't support C++ exceptions
+ *         and thread cancelation
+ */
+
+void __pthread_cleanup_push( __pthread_cleanup_t*      c,
+                             __pthread_cleanup_func_t  routine,
+                             void*                     arg )
+{
+    pthread_internal_t*  thread = __get_thread();
+
+    c->__cleanup_routine  = routine;
+    c->__cleanup_arg      = arg;
+    c->__cleanup_prev     = thread->cleanup_stack;
+    thread->cleanup_stack = c;
+}
+
+void __pthread_cleanup_pop( __pthread_cleanup_t*  c, int  execute )
+{
+    pthread_internal_t*  thread = __get_thread();
+
+    thread->cleanup_stack = c->__cleanup_prev;
+    if (execute)
+        c->__cleanup_routine(c->__cleanup_arg);
+}
+
+/* used by pthread_exit() to clean all TLS keys of the current thread */
+static void pthread_key_clean_all(void);
+
+void pthread_exit(void * retval)
+{
+    pthread_internal_t*  thread     = __get_thread();
+    void*                stack_base = thread->attr.stack_base;
+    int                  stack_size = thread->attr.stack_size;
+    int                  user_stack = (thread->attr.flags & PTHREAD_ATTR_FLAG_USER_STACK) != 0;
+
+    // call the cleanup handlers first
+    while (thread->cleanup_stack) {
+        __pthread_cleanup_t*  c = thread->cleanup_stack;
+        thread->cleanup_stack   = c->__cleanup_prev;
+        c->__cleanup_routine(c->__cleanup_arg);
+    }
+
+    // call the TLS destructors, it is important to do that before removing this
+    // thread from the global list. this will ensure that if someone else deletes
+    // a TLS key, the corresponding value will be set to NULL in this thread's TLS
+    // space (see pthread_key_delete)
+    pthread_key_clean_all();
+
+    // if the thread is detached, destroy the pthread_internal_t
+    // otherwise, keep it in memory and signal any joiners
+    if (thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) {
+        _pthread_internal_remove(thread);
+        _pthread_internal_free(thread);
+    } else {
+       /* the join_count field is used to store the number of threads waiting for
+        * the termination of this thread with pthread_join(),
+        *
+        * if it is positive we need to signal the waiters, and we do not touch
+        * the count (it will be decremented by the waiters, the last one will
+        * also remove/free the thread structure
+        *
+        * if it is zero, we set the count value to -1 to indicate that the
+        * thread is in 'zombie' state: it has stopped executing, and its stack
+        * is gone (as well as its TLS area). when another thread calls pthread_join()
+        * on it, it will immediately free the thread and return.
+        */
+        pthread_mutex_lock(&gThreadListLock);
+        thread->return_value = retval;
+        if (thread->join_count > 0) {
+            pthread_cond_broadcast(&thread->join_cond);
+        } else {
+            thread->join_count = -1;  /* zombie thread */
+        }
+        pthread_mutex_unlock(&gThreadListLock);
+    }
+
+    // destroy the thread stack
+    if (user_stack)
+        _exit_thread((int)retval);
+    else
+        _exit_with_stack_teardown(stack_base, stack_size, (int)retval);
+}
+
+int pthread_join(pthread_t thid, void ** ret_val)
+{
+    pthread_internal_t*  thread = (pthread_internal_t*)thid;
+    int                  count;
+
+    // check that the thread still exists and is not detached
+    pthread_mutex_lock(&gThreadListLock);
+
+    for (thread = gThreadList; thread != NULL; thread = thread->next)
+        if (thread == (pthread_internal_t*)thid)
+            break;
+
+    if (!thread) {
+        pthread_mutex_unlock(&gThreadListLock);
+        return ESRCH;
+    }
+
+    if (thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) {
+        pthread_mutex_unlock(&gThreadListLock);
+        return EINVAL;
+    }
+
+   /* wait for thread death when needed
+    *
+    * if the 'join_count' is negative, this is a 'zombie' thread that
+    * is already dead and without stack/TLS
+    *
+    * otherwise, we need to increment 'join-count' and wait to be signaled
+    */
+   count = thread->join_count;
+    if (count >= 0) {
+        thread->join_count += 1;
+        pthread_cond_wait( &thread->join_cond, &gThreadListLock );
+        count = --thread->join_count;
+    }
+    if (ret_val)
+        *ret_val = thread->return_value;
+
+    /* remove thread descriptor when we're the last joiner or when the
+     * thread was already a zombie.
+     */
+    if (count <= 0) {
+        _pthread_internal_remove_locked(thread);
+        _pthread_internal_free(thread);
+    }
+    pthread_mutex_unlock(&gThreadListLock);
+    return 0;
+}
+
+int  pthread_detach( pthread_t  thid )
+{
+    pthread_internal_t*  thread;
+    int                  result = 0;
+    int                  flags;
+
+    pthread_mutex_lock(&gThreadListLock);
+    for (thread = gThreadList; thread != NULL; thread = thread->next)
+        if (thread == (pthread_internal_t*)thid)
+            goto FoundIt;
+
+    result = ESRCH;
+    goto Exit;
+
+FoundIt:
+    do {
+        flags = thread->attr.flags;
+
+        if ( flags & PTHREAD_ATTR_FLAG_DETACHED ) {
+            /* thread is not joinable ! */
+            result = EINVAL;
+            goto Exit;
+        }
+    }
+    while ( __atomic_cmpxchg( flags, flags | PTHREAD_ATTR_FLAG_DETACHED,
+                              (volatile int*)&thread->attr.flags ) != 0 );
+Exit:
+    pthread_mutex_unlock(&gThreadListLock);
+    return result;
+}
+
+pthread_t pthread_self(void)
+{
+    return (pthread_t)__get_thread();
+}
+
+int pthread_equal(pthread_t one, pthread_t two)
+{
+    return (one == two ? 1 : 0);
+}
+
+int pthread_getschedparam(pthread_t thid, int * policy,
+                          struct sched_param * param)
+{
+    int  old_errno = errno;
+
+    pthread_internal_t * thread = (pthread_internal_t *)thid;
+    int err = sched_getparam(thread->kernel_id, param);
+    if (!err) {
+        *policy = sched_getscheduler(thread->kernel_id);
+    } else {
+        err = errno;
+        errno = old_errno;
+    }
+    return err;
+}
+
+int pthread_setschedparam(pthread_t thid, int policy,
+                          struct sched_param const * param)
+{
+    pthread_internal_t * thread = (pthread_internal_t *)thid;
+    int                  old_errno = errno;
+    int                  ret;
+
+    ret = sched_setscheduler(thread->kernel_id, policy, param);
+    if (ret < 0) {
+        ret = errno;
+        errno = old_errno;
+    }
+    return ret;
+}
+
+
+int __futex_wait(volatile void *ftx, int val, const struct timespec *timeout);
+int __futex_wake(volatile void *ftx, int count);
+
+// mutex lock states
+//
+// 0: unlocked
+// 1: locked, no waiters
+// 2: locked, maybe waiters
+
+/* a mutex is implemented as a 32-bit integer holding the following fields
+ *
+ * bits:     name     description
+ * 31-16     tid      owner thread's kernel id (recursive and errorcheck only)
+ * 15-14     type     mutex type
+ * 13-2      counter  counter of recursive mutexes
+ * 1-0       state    lock state (0, 1 or 2)
+ */
+
+
+#define  MUTEX_OWNER(m)  (((m)->value >> 16) & 0xffff)
+#define  MUTEX_COUNTER(m) (((m)->value >> 2) & 0xfff)
+
+#define  MUTEX_TYPE_MASK       0xc000
+#define  MUTEX_TYPE_NORMAL     0x0000
+#define  MUTEX_TYPE_RECURSIVE  0x4000
+#define  MUTEX_TYPE_ERRORCHECK 0x8000
+
+#define  MUTEX_COUNTER_SHIFT  2
+#define  MUTEX_COUNTER_MASK   0x3ffc
+
+
+
+
+int pthread_mutexattr_init(pthread_mutexattr_t *attr)
+{
+    if (attr) {
+        *attr = PTHREAD_MUTEX_DEFAULT;
+        return 0;
+    } else {
+        return EINVAL;
+    }
+}
+
+int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
+{
+    if (attr) {
+        *attr = -1;
+        return 0;
+    } else {
+        return EINVAL;
+    }
+}
+
+int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
+{
+    if (attr && *attr >= PTHREAD_MUTEX_NORMAL &&
+                *attr <= PTHREAD_MUTEX_ERRORCHECK ) {
+        *type = *attr;
+        return 0;
+    }
+    return EINVAL;
+}
+
+int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
+{
+    if (attr && type >= PTHREAD_MUTEX_NORMAL &&
+                type <= PTHREAD_MUTEX_ERRORCHECK ) {
+        *attr = type;
+        return 0;
+    }
+    return EINVAL;
+}
+
+/* process-shared mutexes are not supported at the moment */
+
+int pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int  pshared)
+{
+    if (!attr)
+        return EINVAL;
+
+    return (pshared == PTHREAD_PROCESS_PRIVATE) ? 0 : ENOTSUP;
+}
+
+int pthread_mutexattr_getpshared(pthread_mutexattr_t *attr, int *pshared)
+{
+    if (!attr)
+        return EINVAL;
+
+    *pshared = PTHREAD_PROCESS_PRIVATE;
+    return 0;
+}
+
+int pthread_mutex_init(pthread_mutex_t *mutex,
+                       const pthread_mutexattr_t *attr)
+{
+    if ( mutex ) {
+        if (attr == NULL) {
+            mutex->value = MUTEX_TYPE_NORMAL;
+            return 0;
+        }
+        switch ( *attr ) {
+        case PTHREAD_MUTEX_NORMAL:
+            mutex->value = MUTEX_TYPE_NORMAL;
+            return 0;
+
+        case PTHREAD_MUTEX_RECURSIVE:
+            mutex->value = MUTEX_TYPE_RECURSIVE;
+            return 0;
+
+        case PTHREAD_MUTEX_ERRORCHECK:
+            mutex->value = MUTEX_TYPE_ERRORCHECK;
+            return 0;
+        }
+    }
+    return EINVAL;
+}
+
+int pthread_mutex_destroy(pthread_mutex_t *mutex)
+{
+    mutex->value = 0xdead10cc;
+    return 0;
+}
+
+
+/*
+ * Lock a non-recursive mutex.
+ *
+ * As noted above, there are three states:
+ *   0 (unlocked, no contention)
+ *   1 (locked, no contention)
+ *   2 (locked, contention)
+ *
+ * Non-recursive mutexes don't use the thread-id or counter fields, and the
+ * "type" value is zero, so the only bits that will be set are the ones in
+ * the lock state field.
+ */
+static __inline__ void
+_normal_lock(pthread_mutex_t*  mutex)
+{
+    /*
+     * The common case is an unlocked mutex, so we begin by trying to
+     * change the lock's state from 0 to 1.  __atomic_cmpxchg() returns 0
+     * if it made the swap successfully.  If the result is nonzero, this
+     * lock is already held by another thread.
+     */
+    if (__atomic_cmpxchg(0, 1, &mutex->value ) != 0) {
+        /*
+         * We want to go to sleep until the mutex is available, which
+         * requires promoting it to state 2.  We need to swap in the new
+         * state value and then wait until somebody wakes us up.
+         *
+         * __atomic_swap() returns the previous value.  We swap 2 in and
+         * see if we got zero back; if so, we have acquired the lock.  If
+         * not, another thread still holds the lock and we wait again.
+         *
+         * The second argument to the __futex_wait() call is compared
+         * against the current value.  If it doesn't match, __futex_wait()
+         * returns immediately (otherwise, it sleeps for a time specified
+         * by the third argument; 0 means sleep forever).  This ensures
+         * that the mutex is in state 2 when we go to sleep on it, which
+         * guarantees a wake-up call.
+         */
+        while (__atomic_swap(2, &mutex->value ) != 0)
+            __futex_wait(&mutex->value, 2, 0);
+    }
+}
+
+/*
+ * Release a non-recursive mutex.  The caller is responsible for determining
+ * that we are in fact the owner of this lock.
+ */
+static __inline__ void
+_normal_unlock(pthread_mutex_t*  mutex)
+{
+    /*
+     * The mutex value will be 1 or (rarely) 2.  We use an atomic decrement
+     * to release the lock.  __atomic_dec() returns the previous value;
+     * if it wasn't 1 we have to do some additional work.
+     */
+    if (__atomic_dec(&mutex->value) != 1) {
+        /*
+         * Start by releasing the lock.  The decrement changed it from
+         * "contended lock" to "uncontended lock", which means we still
+         * hold it, and anybody who tries to sneak in will push it back
+         * to state 2.
+         *
+         * Once we set it to zero the lock is up for grabs.  We follow
+         * this with a __futex_wake() to ensure that one of the waiting
+         * threads has a chance to grab it.
+         *
+         * This doesn't cause a race with the swap/wait pair in
+         * _normal_lock(), because the __futex_wait() call there will
+         * return immediately if the mutex value isn't 2.
+         */
+        mutex->value = 0;
+
+        /*
+         * Wake up one waiting thread.  We don't know which thread will be
+         * woken or when it'll start executing -- futexes make no guarantees
+         * here.  There may not even be a thread waiting.
+         *
+         * The newly-woken thread will replace the 0 we just set above
+         * with 2, which means that when it eventually releases the mutex
+         * it will also call FUTEX_WAKE.  This results in one extra wake
+         * call whenever a lock is contended, but lets us avoid forgetting
+         * anyone without requiring us to track the number of sleepers.
+         *
+         * It's possible for another thread to sneak in and grab the lock
+         * between the zero assignment above and the wake call below.  If
+         * the new thread is "slow" and holds the lock for a while, we'll
+         * wake up a sleeper, which will swap in a 2 and then go back to
+         * sleep since the lock is still held.  If the new thread is "fast",
+         * running to completion before we call wake, the thread we
+         * eventually wake will find an unlocked mutex and will execute.
+         * Either way we have correct behavior and nobody is orphaned on
+         * the wait queue.
+         */
+        __futex_wake(&mutex->value, 1);
+    }
+}
+
+static pthread_mutex_t  __recursive_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static void
+_recursive_lock(void)
+{
+    _normal_lock( &__recursive_lock);
+}
+
+static void
+_recursive_unlock(void)
+{
+    _normal_unlock( &__recursive_lock );
+}
+
+#define  __likely(cond)    __builtin_expect(!!(cond), 1)
+#define  __unlikely(cond)  __builtin_expect(!!(cond), 0)
+
+int pthread_mutex_lock(pthread_mutex_t *mutex)
+{
+    if (__likely(mutex != NULL))
+    {
+        int  mtype = (mutex->value & MUTEX_TYPE_MASK);
+
+        if ( __likely(mtype == MUTEX_TYPE_NORMAL) ) {
+            _normal_lock(mutex);
+        }
+        else
+        {
+            int  tid = __get_thread()->kernel_id;
+
+            if ( tid == MUTEX_OWNER(mutex) )
+            {
+                int  oldv, counter;
+
+                if (mtype == MUTEX_TYPE_ERRORCHECK) {
+                    /* trying to re-lock a mutex we already acquired */
+                    return EDEADLK;
+                }
+                /*
+                 * We own the mutex, but other threads are able to change
+                 * the contents (e.g. promoting it to "contended"), so we
+                 * need to hold the global lock.
+                 */
+                _recursive_lock();
+                oldv         = mutex->value;
+                counter      = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
+                mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
+                _recursive_unlock();
+            }
+            else
+            {
+                /*
+                 * If the new lock is available immediately, we grab it in
+                 * the "uncontended" state.
+                 */
+                int new_lock_type = 1;
+
+                for (;;) {
+                    int  oldv;
+
+                    _recursive_lock();
+                    oldv = mutex->value;
+                    if (oldv == mtype) { /* uncontended released lock => 1 or 2 */
+                        mutex->value = ((tid << 16) | mtype | new_lock_type);
+                    } else if ((oldv & 3) == 1) { /* locked state 1 => state 2 */
+                        oldv ^= 3;
+                        mutex->value = oldv;
+                    }
+                    _recursive_unlock();
+
+                    if (oldv == mtype)
+                        break;
+
+                    /*
+                     * The lock was held, possibly contended by others.  From
+                     * now on, if we manage to acquire the lock, we have to
+                     * assume that others are still contending for it so that
+                     * we'll wake them when we unlock it.
+                     */
+                    new_lock_type = 2;
+
+                    __futex_wait( &mutex->value, oldv, 0 );
+                }
+            }
+        }
+        return 0;
+    }
+    return EINVAL;
+}
+
+
+int pthread_mutex_unlock(pthread_mutex_t *mutex)
+{
+    if (__likely(mutex != NULL))
+    {
+        int  mtype = (mutex->value & MUTEX_TYPE_MASK);
+
+        if (__likely(mtype == MUTEX_TYPE_NORMAL)) {
+            _normal_unlock(mutex);
+        }
+        else
+        {
+            int  tid = __get_thread()->kernel_id;
+
+            if ( tid == MUTEX_OWNER(mutex) )
+            {
+                int  oldv;
+
+                _recursive_lock();
+                oldv = mutex->value;
+                if (oldv & MUTEX_COUNTER_MASK) {
+                    mutex->value = oldv - (1 << MUTEX_COUNTER_SHIFT);
+                    oldv = 0;
+                } else {
+                    mutex->value = mtype;
+                }
+                _recursive_unlock();
+
+                if ((oldv & 3) == 2)
+                    __futex_wake( &mutex->value, 1 );
+            }
+            else {
+                /* trying to unlock a lock we do not own */
+                return EPERM;
+            }
+        }
+        return 0;
+    }
+    return EINVAL;
+}
+
+
+int pthread_mutex_trylock(pthread_mutex_t *mutex)
+{
+    if (__likely(mutex != NULL))
+    {
+        int  mtype = (mutex->value & MUTEX_TYPE_MASK);
+
+        if ( __likely(mtype == MUTEX_TYPE_NORMAL) )
+        {
+            if (__atomic_cmpxchg(0, 1, &mutex->value) == 0)
+                return 0;
+
+            return EBUSY;
+        }
+        else
+        {
+            int  tid = __get_thread()->kernel_id;
+            int  oldv;
+
+            if ( tid == MUTEX_OWNER(mutex) )
+            {
+                int  oldv, counter;
+
+                if (mtype == MUTEX_TYPE_ERRORCHECK) {
+                    /* already locked by ourselves */
+                    return EDEADLK;
+                }
+
+                _recursive_lock();
+                oldv = mutex->value;
+                counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
+                mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
+                _recursive_unlock();
+                return 0;
+            }
+
+            /* try to lock it */
+            _recursive_lock();
+            oldv = mutex->value;
+            if (oldv == mtype)  /* uncontended released lock => state 1 */
+                mutex->value = ((tid << 16) | mtype | 1);
+            _recursive_unlock();
+
+            if (oldv != mtype)
+                return EBUSY;
+
+            return 0;
+        }
+    }
+    return EINVAL;
+}
+
+
+/* XXX *technically* there is a race condition that could allow
+ * XXX a signal to be missed.  If thread A is preempted in _wait()
+ * XXX after unlocking the mutex and before waiting, and if other
+ * XXX threads call signal or broadcast UINT_MAX times (exactly),
+ * XXX before thread A is scheduled again and calls futex_wait(),
+ * XXX then the signal will be lost.
+ */
+
+int pthread_cond_init(pthread_cond_t *cond,
+                      const pthread_condattr_t *attr)
+{
+    cond->value = 0;
+    return 0;
+}
+
+int pthread_cond_destroy(pthread_cond_t *cond)
+{
+    cond->value = 0xdeadc04d;
+    return 0;
+}
+
+int pthread_cond_broadcast(pthread_cond_t *cond)
+{
+    __atomic_dec(&cond->value);
+    __futex_wake(&cond->value, INT_MAX);
+    return 0;
+}
+
+int pthread_cond_signal(pthread_cond_t *cond)
+{
+    __atomic_dec(&cond->value);
+    __futex_wake(&cond->value, 1);
+    return 0;
+}
+
+int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
+{
+    return pthread_cond_timedwait(cond, mutex, NULL);
+}
+
+int __pthread_cond_timedwait_relative(pthread_cond_t *cond,
+                                      pthread_mutex_t * mutex,
+                                      const struct timespec *reltime)
+{
+    int  status;
+    int  oldvalue = cond->value;
+
+    pthread_mutex_unlock(mutex);
+    status = __futex_wait(&cond->value, oldvalue, reltime);
+    pthread_mutex_lock(mutex);
+
+    if (status == (-ETIMEDOUT)) return ETIMEDOUT;
+    return 0;
+}
+
+int __pthread_cond_timedwait(pthread_cond_t *cond,
+                             pthread_mutex_t * mutex,
+                             const struct timespec *abstime,
+                             clockid_t clock)
+{
+    struct timespec ts;
+    struct timespec * tsp;
+
+    if (abstime != NULL) {
+        clock_gettime(clock, &ts);
+        ts.tv_sec = abstime->tv_sec - ts.tv_sec;
+        ts.tv_nsec = abstime->tv_nsec - ts.tv_nsec;
+        if (ts.tv_nsec < 0) {
+            ts.tv_sec--;
+            ts.tv_nsec += 1000000000;
+        }
+        if((ts.tv_nsec < 0) || (ts.tv_sec < 0)) {
+            return ETIMEDOUT;
+        }
+        tsp = &ts;
+    } else {
+        tsp = NULL;
+    }
+
+    return __pthread_cond_timedwait_relative(cond, mutex, tsp);
+}
+
+int pthread_cond_timedwait(pthread_cond_t *cond,
+                           pthread_mutex_t * mutex,
+                           const struct timespec *abstime)
+{
+    return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_REALTIME);
+}
+
+
+int pthread_cond_timedwait_monotonic(pthread_cond_t *cond,
+                                     pthread_mutex_t * mutex,
+                                     const struct timespec *abstime)
+{
+    return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC);
+}
+
+int pthread_cond_timeout_np(pthread_cond_t *cond,
+                            pthread_mutex_t * mutex,
+                            unsigned msecs)
+{
+    int oldvalue;
+    struct timespec ts;
+    int status;
+
+    ts.tv_sec = msecs / 1000;
+    ts.tv_nsec = (msecs % 1000) * 1000000;
+
+    oldvalue = cond->value;
+
+    pthread_mutex_unlock(mutex);
+    status = __futex_wait(&cond->value, oldvalue, &ts);
+    pthread_mutex_lock(mutex);
+
+    if(status == (-ETIMEDOUT)) return ETIMEDOUT;
+
+    return 0;
+}
+
+
+
+/* A technical note regarding our thread-local-storage (TLS) implementation:
+ *
+ * There can be up to TLSMAP_SIZE independent TLS keys in a given process,
+ * though the first TLSMAP_START keys are reserved for Bionic to hold
+ * special thread-specific variables like errno or a pointer to
+ * the current thread's descriptor.
+ *
+ * while stored in the TLS area, these entries cannot be accessed through
+ * pthread_getspecific() / pthread_setspecific() and pthread_key_delete()
+ *
+ * also, some entries in the key table are pre-allocated (see tlsmap_lock)
+ * to greatly simplify and speedup some OpenGL-related operations. though the
+ * initialy value will be NULL on all threads.
+ *
+ * you can use pthread_getspecific()/setspecific() on these, and in theory
+ * you could also call pthread_key_delete() as well, though this would
+ * probably break some apps.
+ *
+ * The 'tlsmap_t' type defined below implements a shared global map of
+ * currently created/allocated TLS keys and the destructors associated
+ * with them. You should use tlsmap_lock/unlock to access it to avoid
+ * any race condition.
+ *
+ * the global TLS map simply contains a bitmap of allocated keys, and
+ * an array of destructors.
+ *
+ * each thread has a TLS area that is a simple array of TLSMAP_SIZE void*
+ * pointers. the TLS area of the main thread is stack-allocated in
+ * __libc_init_common, while the TLS area of other threads is placed at
+ * the top of their stack in pthread_create.
+ *
+ * when pthread_key_create() is called, it finds the first free key in the
+ * bitmap, then set it to 1, saving the destructor altogether
+ *
+ * when pthread_key_delete() is called. it will erase the key's bitmap bit
+ * and its destructor, and will also clear the key data in the TLS area of
+ * all created threads. As mandated by Posix, it is the responsability of
+ * the caller of pthread_key_delete() to properly reclaim the objects that
+ * were pointed to by these data fields (either before or after the call).
+ *
+ */
+
+/* TLS Map implementation
+ */
+
+#define TLSMAP_START      (TLS_SLOT_MAX_WELL_KNOWN+1)
+#define TLSMAP_SIZE       BIONIC_TLS_SLOTS
+#define TLSMAP_BITS       32
+#define TLSMAP_WORDS      ((TLSMAP_SIZE+TLSMAP_BITS-1)/TLSMAP_BITS)
+#define TLSMAP_WORD(m,k)  (m)->map[(k)/TLSMAP_BITS]
+#define TLSMAP_MASK(k)    (1U << ((k)&(TLSMAP_BITS-1)))
+
+/* this macro is used to quickly check that a key belongs to a reasonable range */
+#define TLSMAP_VALIDATE_KEY(key)  \
+    ((key) >= TLSMAP_START && (key) < TLSMAP_SIZE)
+
+/* the type of tls key destructor functions */
+typedef void (*tls_dtor_t)(void*);
+
+typedef struct {
+    int         init;                  /* see comment in tlsmap_lock() */
+    uint32_t    map[TLSMAP_WORDS];     /* bitmap of allocated keys */
+    tls_dtor_t  dtors[TLSMAP_SIZE];    /* key destructors */
+} tlsmap_t;
+
+static pthread_mutex_t  _tlsmap_lock = PTHREAD_MUTEX_INITIALIZER;
+static tlsmap_t         _tlsmap;
+
+/* lock the global TLS map lock and return a handle to it */
+static __inline__ tlsmap_t* tlsmap_lock(void)
+{
+    tlsmap_t*   m = &_tlsmap;
+
+    pthread_mutex_lock(&_tlsmap_lock);
+    /* we need to initialize the first entry of the 'map' array
+     * with the value TLS_DEFAULT_ALLOC_MAP. doing it statically
+     * when declaring _tlsmap is a bit awkward and is going to
+     * produce warnings, so do it the first time we use the map
+     * instead
+     */
+    if (__unlikely(!m->init)) {
+        TLSMAP_WORD(m,0) = TLS_DEFAULT_ALLOC_MAP;
+        m->init          = 1;
+    }
+    return m;
+}
+
+/* unlock the global TLS map */
+static __inline__ void tlsmap_unlock(tlsmap_t*  m)
+{
+    pthread_mutex_unlock(&_tlsmap_lock);
+    (void)m;  /* a good compiler is a happy compiler */
+}
+
+/* test to see wether a key is allocated */
+static __inline__ int tlsmap_test(tlsmap_t*  m, int  key)
+{
+    return (TLSMAP_WORD(m,key) & TLSMAP_MASK(key)) != 0;
+}
+
+/* set the destructor and bit flag on a newly allocated key */
+static __inline__ void tlsmap_set(tlsmap_t*  m, int  key, tls_dtor_t  dtor)
+{
+    TLSMAP_WORD(m,key) |= TLSMAP_MASK(key);
+    m->dtors[key]       = dtor;
+}
+
+/* clear the destructor and bit flag on an existing key */
+static __inline__ void  tlsmap_clear(tlsmap_t*  m, int  key)
+{
+    TLSMAP_WORD(m,key) &= ~TLSMAP_MASK(key);
+    m->dtors[key]       = NULL;
+}
+
+/* allocate a new TLS key, return -1 if no room left */
+static int tlsmap_alloc(tlsmap_t*  m, tls_dtor_t  dtor)
+{
+    int  key;
+
+    for ( key = TLSMAP_START; key < TLSMAP_SIZE; key++ ) {
+        if ( !tlsmap_test(m, key) ) {
+            tlsmap_set(m, key, dtor);
+            return key;
+        }
+    }
+    return -1;
+}
+
+
+int pthread_key_create(pthread_key_t *key, void (*destructor_function)(void *))
+{
+    uint32_t   err = ENOMEM;
+    tlsmap_t*  map = tlsmap_lock();
+    int        k   = tlsmap_alloc(map, destructor_function);
+
+    if (k >= 0) {
+        *key = k;
+        err  = 0;
+    }
+    tlsmap_unlock(map);
+    return err;
+}
+
+
+/* This deletes a pthread_key_t. note that the standard mandates that this does
+ * not call the destructor of non-NULL key values. Instead, it is the
+ * responsability of the caller to properly dispose of the corresponding data
+ * and resources, using any mean it finds suitable.
+ *
+ * On the other hand, this function will clear the corresponding key data
+ * values in all known threads. this prevents later (invalid) calls to
+ * pthread_getspecific() to receive invalid/stale values.
+ */
+int pthread_key_delete(pthread_key_t key)
+{
+    uint32_t             err;
+    pthread_internal_t*  thr;
+    tlsmap_t*            map;
+
+    if (!TLSMAP_VALIDATE_KEY(key)) {
+        return EINVAL;
+    }
+
+    map = tlsmap_lock();
+
+    if (!tlsmap_test(map, key)) {
+        err = EINVAL;
+        goto err1;
+    }
+
+    /* clear value in all threads */
+    pthread_mutex_lock(&gThreadListLock);
+    for ( thr = gThreadList; thr != NULL; thr = thr->next ) {
+        /* avoid zombie threads with a negative 'join_count'. these are really
+         * already dead and don't have a TLS area anymore.
+         *
+         * similarly, it is possible to have thr->tls == NULL for threads that
+         * were just recently created through pthread_create() but whose
+         * startup trampoline (__thread_entry) hasn't been run yet by the
+         * scheduler. so check for this too.
+         */
+        if (thr->join_count < 0 || !thr->tls)
+            continue;
+
+        thr->tls[key] = NULL;
+    }
+    tlsmap_clear(map, key);
+
+    pthread_mutex_unlock(&gThreadListLock);
+    err = 0;
+
+err1:
+    tlsmap_unlock(map);
+    return err;
+}
+
+
+int pthread_setspecific(pthread_key_t key, const void *ptr)
+{
+    int        err = EINVAL;
+    tlsmap_t*  map;
+
+    if (TLSMAP_VALIDATE_KEY(key)) {
+        /* check that we're trying to set data for an allocated key */
+        map = tlsmap_lock();
+        if (tlsmap_test(map, key)) {
+            ((uint32_t *)__get_tls())[key] = (uint32_t)ptr;
+            err = 0;
+        }
+        tlsmap_unlock(map);
+    }
+    return err;
+}
+
+void * pthread_getspecific(pthread_key_t key)
+{
+    if (!TLSMAP_VALIDATE_KEY(key)) {
+        return NULL;
+    }
+
+    /* for performance reason, we do not lock/unlock the global TLS map
+     * to check that the key is properly allocated. if the key was not
+     * allocated, the value read from the TLS should always be NULL
+     * due to pthread_key_delete() clearing the values for all threads.
+     */
+    return (void *)(((unsigned *)__get_tls())[key]);
+}
+
+/* Posix mandates that this be defined in <limits.h> but we don't have
+ * it just yet.
+ */
+#ifndef PTHREAD_DESTRUCTOR_ITERATIONS
+#  define PTHREAD_DESTRUCTOR_ITERATIONS  4
+#endif
+
+/* this function is called from pthread_exit() to remove all TLS key data
+ * from this thread's TLS area. this must call the destructor of all keys
+ * that have a non-NULL data value (and a non-NULL destructor).
+ *
+ * because destructors can do funky things like deleting/creating other
+ * keys, we need to implement this in a loop
+ */
+static void pthread_key_clean_all(void)
+{
+    tlsmap_t*    map;
+    void**       tls = (void**)__get_tls();
+    int          rounds = PTHREAD_DESTRUCTOR_ITERATIONS;
+
+    map = tlsmap_lock();
+
+    for (rounds = PTHREAD_DESTRUCTOR_ITERATIONS; rounds > 0; rounds--)
+    {
+        int  kk, count = 0;
+
+        for (kk = TLSMAP_START; kk < TLSMAP_SIZE; kk++) {
+            if ( tlsmap_test(map, kk) )
+            {
+                void*       data = tls[kk];
+                tls_dtor_t  dtor = map->dtors[kk];
+
+                if (data != NULL && dtor != NULL)
+                {
+                   /* we need to clear the key data now, this will prevent the
+                    * destructor (or a later one) from seeing the old value if
+                    * it calls pthread_getspecific() for some odd reason
+                    *
+                    * we do not do this if 'dtor == NULL' just in case another
+                    * destructor function might be responsible for manually
+                    * releasing the corresponding data.
+                    */
+                    tls[kk] = NULL;
+
+                   /* because the destructor is free to call pthread_key_create
+                    * and/or pthread_key_delete, we need to temporarily unlock
+                    * the TLS map
+                    */
+                    tlsmap_unlock(map);
+                    (*dtor)(data);
+                    map = tlsmap_lock();
+
+                    count += 1;
+                }
+            }
+        }
+
+        /* if we didn't call any destructor, there is no need to check the
+         * TLS data again
+         */
+        if (count == 0)
+            break;
+    }
+    tlsmap_unlock(map);
+}
+
+// man says this should be in <linux/unistd.h>, but it isn't
+extern int tkill(int tid, int sig);
+
+int pthread_kill(pthread_t tid, int sig)
+{
+    int  ret;
+    int  old_errno = errno;
+    pthread_internal_t * thread = (pthread_internal_t *)tid;
+
+    ret = tkill(thread->kernel_id, sig);
+    if (ret < 0) {
+        ret = errno;
+        errno = old_errno;
+    }
+
+    return ret;
+}
+
+extern int __rt_sigprocmask(int, const sigset_t *, sigset_t *, size_t);
+
+int pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
+{
+    return __rt_sigprocmask(how, set, oset, _NSIG / 8);
+}
+
+
+int pthread_getcpuclockid(pthread_t  tid, clockid_t  *clockid)
+{
+    const int            CLOCK_IDTYPE_BITS = 3;
+    pthread_internal_t*  thread = (pthread_internal_t*)tid;
+
+    if (!thread)
+        return ESRCH;
+
+    *clockid = CLOCK_THREAD_CPUTIME_ID | (thread->kernel_id << CLOCK_IDTYPE_BITS);
+    return 0;
+}
+
+
+/* NOTE: this implementation doesn't support a init function that throws a C++ exception
+ *       or calls fork()
+ */
+int  pthread_once( pthread_once_t*  once_control,  void (*init_routine)(void) )
+{
+    static pthread_mutex_t   once_lock = PTHREAD_MUTEX_INITIALIZER;
+
+    if (*once_control == PTHREAD_ONCE_INIT) {
+        _normal_lock( &once_lock );
+        if (*once_control == PTHREAD_ONCE_INIT) {
+            (*init_routine)();
+            *once_control = ~PTHREAD_ONCE_INIT;
+        }
+        _normal_unlock( &once_lock );
+    }
+    return 0;
+}
diff --git a/libc/bionic/pthread_internal.h b/libc/bionic/pthread_internal.h
new file mode 100644
index 0000000..eb4e80c
--- /dev/null
+++ b/libc/bionic/pthread_internal.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#ifndef _PTHREAD_INTERNAL_H_
+#define _PTHREAD_INTERNAL_H_
+
+#include <pthread.h>
+
+__BEGIN_DECLS
+
+typedef struct pthread_internal_t
+{
+    struct pthread_internal_t*  next;
+    struct pthread_internal_t** pref;
+    pthread_attr_t              attr;
+    pid_t                       kernel_id;
+    pthread_cond_t              join_cond;
+    int                         join_count;
+    void*                       return_value;
+    int                         intern;
+    __pthread_cleanup_t*        cleanup_stack;
+    void**                      tls;         /* thread-local storage area */
+} pthread_internal_t;
+
+extern void _init_thread(pthread_internal_t * thread, pid_t kernel_id, pthread_attr_t * attr, void * stack_base);
+
+/* needed by posix-timers.c */
+
+static __inline__ void timespec_add( struct timespec*  a, const struct timespec*  b )
+{
+    a->tv_sec  += b->tv_sec;
+    a->tv_nsec += b->tv_nsec;
+    if (a->tv_nsec >= 1000000000) {
+        a->tv_nsec -= 1000000000;
+        a->tv_sec  += 1;
+    }
+}
+
+static  __inline__ void timespec_sub( struct timespec*  a, const struct timespec*  b )
+{
+    a->tv_sec  -= b->tv_sec;
+    a->tv_nsec -= b->tv_nsec;
+    if (a->tv_nsec < 0) {
+        a->tv_nsec += 1000000000;
+        a->tv_sec  -= 1;
+    }
+}
+
+static  __inline__ void timespec_zero( struct timespec*  a )
+{
+    a->tv_sec = a->tv_nsec = 0;
+}
+
+static  __inline__ int timespec_is_zero( const struct timespec*  a )
+{
+    return (a->tv_sec == 0 && a->tv_nsec == 0);
+}
+
+static  __inline__ int timespec_cmp( const struct timespec*  a, const struct timespec*  b )
+{
+    if (a->tv_sec  < b->tv_sec)  return -1;
+    if (a->tv_sec  > b->tv_sec)  return +1;
+    if (a->tv_nsec < b->tv_nsec) return -1;
+    if (a->tv_nsec > b->tv_nsec) return +1;
+    return 0;
+}
+
+static  __inline__ int timespec_cmp0( const struct timespec*  a )
+{
+    if (a->tv_sec < 0) return -1;
+    if (a->tv_sec > 0) return +1;
+    if (a->tv_nsec < 0) return -1;
+    if (a->tv_nsec > 0) return +1;
+    return 0;
+}
+
+extern int  __pthread_cond_timedwait(pthread_cond_t*, 
+                                     pthread_mutex_t*,
+                                     const struct timespec*, 
+                                     clockid_t);
+
+extern int  __pthread_cond_timedwait_relative(pthread_cond_t*,
+                                              pthread_mutex_t*,
+                                              const struct timespec*);
+
+/* needed by fork.c */
+extern void __timer_table_start_stop(int  stop);
+
+__END_DECLS
+
+#endif /* _PTHREAD_INTERNAL_H_ */
diff --git a/libc/bionic/ptrace.c b/libc/bionic/ptrace.c
new file mode 100644
index 0000000..b1ca00c
--- /dev/null
+++ b/libc/bionic/ptrace.c
@@ -0,0 +1,63 @@
+/*

+ * Copyright (C) 2008 The Android Open Source Project

+ * All rights reserved.

+ *

+ * Redistribution and use in source and binary forms, with or without

+ * modification, are permitted provided that the following conditions

+ * are met:

+ *  * Redistributions of source code must retain the above copyright

+ *    notice, this list of conditions and the following disclaimer.

+ *  * Redistributions in binary form must reproduce the above copyright

+ *    notice, this list of conditions and the following disclaimer in

+ *    the documentation and/or other materials provided with the

+ *    distribution.

+ *

+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS

+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT

+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS

+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE

+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,

+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,

+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS

+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED

+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,

+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT

+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF

+ * SUCH DAMAGE.

+ */

+#include <sys/types.h>

+#include <sys/ptrace.h>

+

+extern long __ptrace(int request, pid_t pid, void *addr, void *data);

+

+long ptrace(int request, pid_t pid, void * addr, void * data)

+{

+    switch (request) {

+        case PTRACE_PEEKUSR:

+        case PTRACE_PEEKTEXT:

+        case PTRACE_PEEKDATA:

+        {

+            long word;

+            long ret;

+            

+            ret = __ptrace(request, pid, addr, &word);

+            if (ret == 0) {

+                return word;

+            } else {

+                // __ptrace will set errno for us

+                return -1;

+            }

+        }

+

+        default:

+             return __ptrace(request, pid, addr, data);

+    }

+}

+

+/*

+ * Hook for gdb to get notified when a thread is created

+ */

+void _thread_created_hook(pid_t thread_id) __attribute__((noinline));

+void _thread_created_hook(pid_t thread_id)

+{

+}

diff --git a/libc/bionic/pututline.c b/libc/bionic/pututline.c
new file mode 100644
index 0000000..2449068
--- /dev/null
+++ b/libc/bionic/pututline.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <string.h>
+#include <stdio.h>
+#include <utmp.h>
+
+
+void pututline(struct utmp* utmp)
+{
+    FILE* f;
+    struct utmp u;
+    int i;
+
+    if (!(f = fopen(_PATH_UTMP, "w+")))
+        return;
+
+    while (fread(&u, sizeof(struct utmp), 1, f) == 1)
+    {
+        if (!strncmp(utmp->ut_line, u.ut_line, sizeof(u.ut_line) -1))
+        {
+            if ((i = ftell(f)) < 0)
+                goto ret;
+            if (fseek(f, i - sizeof(struct utmp), SEEK_SET) < 0)
+                goto ret;
+            fwrite(utmp, sizeof(struct utmp), 1, f);
+            goto ret;
+        }
+    }
+
+
+    fclose(f);
+
+    if (!(f = fopen(_PATH_UTMP, "w+")))
+        return;
+    fwrite(utmp, sizeof(struct utmp), 1, f);
+
+ret:
+    fclose(f);
+}
diff --git a/libc/bionic/rand48.h b/libc/bionic/rand48.h
new file mode 100644
index 0000000..0a3d83d
--- /dev/null
+++ b/libc/bionic/rand48.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 1993 Martin Birgmeier
+ * All rights reserved.
+ *
+ * You may redistribute unmodified or modified versions of this source
+ * code provided that the above copyright notice and this and the
+ * following conditions are retained.
+ *
+ * This software is provided ``as is'', and comes with no warranties
+ * of any kind. I shall in no event be liable for anything that happens
+ * to anyone/anything when using this software.
+ *
+ * $FreeBSD: src/lib/libc/gen/rand48.h,v 1.2 2002/02/01 01:32:19 obrien Exp $
+ */
+
+#ifndef _RAND48_H_
+#define _RAND48_H_
+
+#include <math.h>
+#include <stdlib.h>
+
+void		_dorand48(unsigned short[3]);
+
+#define	RAND48_SEED_0	(0x330e)
+#define	RAND48_SEED_1	(0xabcd)
+#define	RAND48_SEED_2	(0x1234)
+#define	RAND48_MULT_0	(0xe66d)
+#define	RAND48_MULT_1	(0xdeec)
+#define	RAND48_MULT_2	(0x0005)
+#define	RAND48_ADD	(0x000b)
+
+#endif /* _RAND48_H_ */
diff --git a/libc/bionic/realpath.c b/libc/bionic/realpath.c
new file mode 100644
index 0000000..274a3a0
--- /dev/null
+++ b/libc/bionic/realpath.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 1994
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Jan-Simon Pendry.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *	This product includes software developed by the University of
+ *	California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)realpath.c	8.1 (Berkeley) 2/16/94";
+static char rcsid[] =
+"$FreeBSD: /repoman/r/ncvs/src/lib/libc/stdlib/realpath.c,v 1.6.2.1 2003/08/03 23:47:39 nectar Exp $";
+#endif /* LIBC_SCCS and not lint */
+
+#include <sys/param.h>
+#include <sys/stat.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+/*
+ * char *realpath(const char *path, char resolved_path[MAXPATHLEN]);
+ *
+ * Find the real name of path, by removing all ".", ".." and symlink
+ * components.  Returns (resolved) on success, or (NULL) on failure,
+ * in which case the path which caused trouble is left in (resolved).
+ */
+char *
+realpath(path, resolved)
+	const char *path;
+	char *resolved;
+{
+	struct stat sb;
+	int fd, n, rootd, serrno;
+	char *p, *q, wbuf[MAXPATHLEN];
+      int symlinks = 0;
+
+	/* Save the starting point. */
+	if ((fd = open(".", O_RDONLY)) < 0) {
+		(void)strcpy(resolved, ".");
+		return (NULL);
+	}
+
+	/*
+	 * Find the dirname and basename from the path to be resolved.
+	 * Change directory to the dirname component.
+	 * lstat the basename part.
+	 *     if it is a symlink, read in the value and loop.
+	 *     if it is a directory, then change to that directory.
+	 * get the current directory name and append the basename.
+	 */
+	(void)strncpy(resolved, path, MAXPATHLEN - 1);
+	resolved[MAXPATHLEN - 1] = '\0';
+loop:
+	q = strrchr(resolved, '/');
+	if (q != NULL) {
+		p = q + 1;
+		if (q == resolved)
+			q = "/";
+		else {
+			do {
+				--q;
+			} while (q > resolved && *q == '/');
+			q[1] = '\0';
+			q = resolved;
+		}
+		if (chdir(q) < 0)
+			goto err1;
+	} else
+		p = resolved;
+
+	/* Deal with the last component. */
+	if (*p != '\0' && lstat(p, &sb) == 0) {
+		if (S_ISLNK(sb.st_mode)) {
+                      if (++symlinks > MAXSYMLINKS) {
+                              errno = ELOOP;
+                              goto err1;
+                      }
+			n = readlink(p, resolved, MAXPATHLEN - 1);
+			if (n < 0)
+				goto err1;
+			resolved[n] = '\0';
+			goto loop;
+		}
+		if (S_ISDIR(sb.st_mode)) {
+			if (chdir(p) < 0)
+				goto err1;
+			p = "";
+		}
+	}
+
+	/*
+	 * Save the last component name and get the full pathname of
+	 * the current directory.
+	 */
+	(void)strcpy(wbuf, p);
+	if (getcwd(resolved, MAXPATHLEN) == 0)
+		goto err1;
+
+	/*
+	 * Join the two strings together, ensuring that the right thing
+	 * happens if the last component is empty, or the dirname is root.
+	 */
+	if (resolved[0] == '/' && resolved[1] == '\0')
+		rootd = 1;
+	else
+		rootd = 0;
+
+	if (*wbuf) {
+		if (strlen(resolved) + strlen(wbuf) + (1-rootd) + 1 >
+		    MAXPATHLEN) {
+			errno = ENAMETOOLONG;
+			goto err1;
+		}
+		if (rootd == 0)
+			(void)strcat(resolved, "/");
+		(void)strcat(resolved, wbuf);
+	}
+
+	/* Go back to where we came from. */
+	if (fchdir(fd) < 0) {
+		serrno = errno;
+		goto err2;
+	}
+
+	/* It's okay if the close fails, what's an fd more or less? */
+	(void)close(fd);
+	return (resolved);
+
+err1:	serrno = errno;
+	(void)fchdir(fd);
+err2:	(void)close(fd);
+	errno = serrno;
+	return (NULL);
+}
diff --git a/libc/bionic/rules.jam b/libc/bionic/rules.jam
new file mode 100644
index 0000000..219526b
--- /dev/null
+++ b/libc/bionic/rules.jam
@@ -0,0 +1 @@
+NO_LOCAL_SRC = malloc_leak.c ;
diff --git a/libc/bionic/semaphore.c b/libc/bionic/semaphore.c
new file mode 100644
index 0000000..0c94600
--- /dev/null
+++ b/libc/bionic/semaphore.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <semaphore.h>
+#include <errno.h>
+#include <sys/time.h>
+#include <sys/atomics.h>
+#include <time.h>
+
+int sem_init(sem_t *sem, int pshared, unsigned int value)
+{
+    if (sem == NULL) {
+        errno = EINVAL;
+        return -1;
+    }
+
+    if (pshared != 0) {
+        errno = ENOSYS;
+        return -1;
+    }
+
+    sem->count = value;
+    return 0;
+}
+
+
+int sem_destroy(sem_t *sem)
+{
+    if (sem == NULL) {
+        errno = EINVAL;
+        return -1;
+    }
+    if (sem->count == 0) {
+        errno = EBUSY;
+        return -1;
+    }
+    return 0;
+}
+
+
+sem_t *sem_open(const char *name, int oflag, ...)
+{
+    name=name;
+    oflag=oflag;
+
+    errno = ENOSYS;
+    return SEM_FAILED;
+}
+
+
+int sem_close(sem_t *sem)
+{
+    if (sem == NULL) {
+        errno = EINVAL;
+        return -1;
+    }
+    errno = ENOSYS;
+    return -1;
+}
+
+
+int sem_unlink(const char * name)
+{
+    errno = ENOSYS;
+    return -1;
+}
+
+
+static int
+__atomic_dec_if_positive( volatile unsigned int*  pvalue )
+{
+    unsigned int  old;
+
+    do {
+        old = *pvalue;
+    }
+    while ( old != 0 && __atomic_cmpxchg( (int)old, (int)old-1, (volatile int*)pvalue ) != 0 );
+
+    return old;
+}
+
+int sem_wait(sem_t *sem)
+{
+    if (sem == NULL) {
+        errno = EINVAL;
+        return -1;
+    }
+
+    for (;;) {
+        if (__atomic_dec_if_positive(&sem->count))
+            break;
+
+        __futex_wait(&sem->count, 0, 0);
+    }
+    return 0;
+}
+
+int sem_timedwait(sem_t *sem, const struct timespec *abs_timeout)
+{
+    int  ret;
+
+    if (sem == NULL) {
+        errno = EINVAL;
+        return -1;
+    }
+
+    /* POSIX says we need to try to decrement the semaphore
+     * before checking the timeout value */
+    if (__atomic_dec_if_positive(&sem->count))
+        return 0;
+
+    /* check it as per Posix */
+    if (abs_timeout == NULL    ||
+        abs_timeout->tv_sec < 0 ||
+        abs_timeout->tv_nsec < 0 ||
+        abs_timeout->tv_nsec >= 1000000000)
+    {
+        errno = EINVAL;
+        return -1;
+    }
+
+    for (;;) {
+        struct timespec ts;
+        int             ret;
+
+        /* Posix mandates CLOCK_REALTIME here */
+        clock_gettime( CLOCK_REALTIME, &ts );
+        ts.tv_sec  = abs_timeout->tv_sec - ts.tv_sec;
+        ts.tv_nsec = abs_timeout->tv_nsec - ts.tv_nsec;
+        if (ts.tv_nsec < 0) {
+            ts.tv_nsec += 1000000000;
+            ts.tv_sec  -= 1;
+        }
+
+        if (ts.tv_sec < 0 || ts.tv_nsec < 0) {
+            errno = ETIMEDOUT;
+            return -1;
+        }
+
+        ret = __futex_wait(&sem->count, 0, &ts);
+
+        /* return in case of timeout or interrupt */
+        if (ret == -ETIMEDOUT || ret == -EINTR) {
+            errno = -ret;
+            return -1;
+        }
+
+        if (__atomic_dec_if_positive(&sem->count))
+            break;
+    }
+    return 0;
+}
+
+int sem_post(sem_t *sem)
+{
+    if (sem == NULL)
+        return EINVAL;
+
+    if (__atomic_inc((volatile int*)&sem->count) == 0)
+        __futex_wake(&sem->count, 1);
+
+    return 0;
+}
+
+int  sem_trywait(sem_t *sem)
+{
+    if (sem == NULL) {
+        errno = EINVAL;
+        return -1;
+    }
+
+    if (__atomic_dec_if_positive(&sem->count) > 0) {
+        return 0;
+    } else {
+        return EAGAIN;
+    }
+}
+
+int  sem_getvalue(sem_t *sem, int *sval)
+{
+    if (sem == NULL || sval == NULL) {
+        errno = EINVAL;
+        return -1;
+    }
+
+    *sval = sem->count;
+    return 0;
+}
diff --git a/libc/bionic/sha1.c b/libc/bionic/sha1.c
new file mode 100644
index 0000000..efa95a5
--- /dev/null
+++ b/libc/bionic/sha1.c
@@ -0,0 +1,270 @@
+/*	$NetBSD: sha1.c,v 1.1 2005/12/20 20:29:40 christos Exp $	*/
+/*	$OpenBSD: sha1.c,v 1.9 1997/07/23 21:12:32 kstailey Exp $	*/
+
+/*
+ * SHA-1 in C
+ * By Steve Reid <steve@edmweb.com>
+ * 100% Public Domain
+ *
+ * Test Vectors (from FIPS PUB 180-1)
+ * "abc"
+ *   A9993E36 4706816A BA3E2571 7850C26C 9CD0D89D
+ * "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"
+ *   84983E44 1C3BD26E BAAE4AA1 F95129E5 E54670F1
+ * A million repetitions of "a"
+ *   34AA973C D4C4DAA4 F61EEB2B DBAD2731 6534016F
+ */
+
+#define SHA1HANDSOFF		/* Copies data before messing with it. */
+
+#include <sys/cdefs.h>
+#include <sys/types.h>
+#include <assert.h>
+#include <sha1.h>
+#include <string.h>
+
+#if HAVE_NBTOOL_CONFIG_H
+#include "nbtool_config.h"
+#endif
+
+#if !HAVE_SHA1_H
+
+#define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits))))
+
+/*
+ * blk0() and blk() perform the initial expand.
+ * I got the idea of expanding during the round function from SSLeay
+ */
+#if BYTE_ORDER == LITTLE_ENDIAN
+# define blk0(i) (block->l[i] = (rol(block->l[i],24)&0xFF00FF00) \
+    |(rol(block->l[i],8)&0x00FF00FF))
+#else
+# define blk0(i) block->l[i]
+#endif
+#define blk(i) (block->l[i&15] = rol(block->l[(i+13)&15]^block->l[(i+8)&15] \
+    ^block->l[(i+2)&15]^block->l[i&15],1))
+
+/*
+ * (R0+R1), R2, R3, R4 are the different operations (rounds) used in SHA1
+ */
+#define R0(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk0(i)+0x5A827999+rol(v,5);w=rol(w,30);
+#define R1(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk(i)+0x5A827999+rol(v,5);w=rol(w,30);
+#define R2(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0x6ED9EBA1+rol(v,5);w=rol(w,30);
+#define R3(v,w,x,y,z,i) z+=(((w|x)&y)|(w&x))+blk(i)+0x8F1BBCDC+rol(v,5);w=rol(w,30);
+#define R4(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0xCA62C1D6+rol(v,5);w=rol(w,30);
+
+typedef union {
+    u_char c[64];
+    u_int l[16];
+} CHAR64LONG16;
+
+/* old sparc64 gcc could not compile this */
+#undef SPARC64_GCC_WORKAROUND
+#if defined(__sparc64__) && defined(__GNUC__) && __GNUC__ < 3
+#define SPARC64_GCC_WORKAROUND
+#endif
+
+#ifdef SPARC64_GCC_WORKAROUND
+void do_R01(u_int32_t *a, u_int32_t *b, u_int32_t *c, u_int32_t *d, u_int32_t *e, CHAR64LONG16 *);
+void do_R2(u_int32_t *a, u_int32_t *b, u_int32_t *c, u_int32_t *d, u_int32_t *e, CHAR64LONG16 *);
+void do_R3(u_int32_t *a, u_int32_t *b, u_int32_t *c, u_int32_t *d, u_int32_t *e, CHAR64LONG16 *);
+void do_R4(u_int32_t *a, u_int32_t *b, u_int32_t *c, u_int32_t *d, u_int32_t *e, CHAR64LONG16 *);
+
+#define nR0(v,w,x,y,z,i) R0(*v,*w,*x,*y,*z,i)
+#define nR1(v,w,x,y,z,i) R1(*v,*w,*x,*y,*z,i)
+#define nR2(v,w,x,y,z,i) R2(*v,*w,*x,*y,*z,i)
+#define nR3(v,w,x,y,z,i) R3(*v,*w,*x,*y,*z,i)
+#define nR4(v,w,x,y,z,i) R4(*v,*w,*x,*y,*z,i)
+
+void
+do_R01(u_int32_t *a, u_int32_t *b, u_int32_t *c, u_int32_t *d, u_int32_t *e, CHAR64LONG16 *block)
+{
+    nR0(a,b,c,d,e, 0); nR0(e,a,b,c,d, 1); nR0(d,e,a,b,c, 2); nR0(c,d,e,a,b, 3);
+    nR0(b,c,d,e,a, 4); nR0(a,b,c,d,e, 5); nR0(e,a,b,c,d, 6); nR0(d,e,a,b,c, 7);
+    nR0(c,d,e,a,b, 8); nR0(b,c,d,e,a, 9); nR0(a,b,c,d,e,10); nR0(e,a,b,c,d,11);
+    nR0(d,e,a,b,c,12); nR0(c,d,e,a,b,13); nR0(b,c,d,e,a,14); nR0(a,b,c,d,e,15);
+    nR1(e,a,b,c,d,16); nR1(d,e,a,b,c,17); nR1(c,d,e,a,b,18); nR1(b,c,d,e,a,19);
+}
+
+void
+do_R2(u_int32_t *a, u_int32_t *b, u_int32_t *c, u_int32_t *d, u_int32_t *e, CHAR64LONG16 *block)
+{
+    nR2(a,b,c,d,e,20); nR2(e,a,b,c,d,21); nR2(d,e,a,b,c,22); nR2(c,d,e,a,b,23);
+    nR2(b,c,d,e,a,24); nR2(a,b,c,d,e,25); nR2(e,a,b,c,d,26); nR2(d,e,a,b,c,27);
+    nR2(c,d,e,a,b,28); nR2(b,c,d,e,a,29); nR2(a,b,c,d,e,30); nR2(e,a,b,c,d,31);
+    nR2(d,e,a,b,c,32); nR2(c,d,e,a,b,33); nR2(b,c,d,e,a,34); nR2(a,b,c,d,e,35);
+    nR2(e,a,b,c,d,36); nR2(d,e,a,b,c,37); nR2(c,d,e,a,b,38); nR2(b,c,d,e,a,39);
+}
+
+void
+do_R3(u_int32_t *a, u_int32_t *b, u_int32_t *c, u_int32_t *d, u_int32_t *e, CHAR64LONG16 *block)
+{
+    nR3(a,b,c,d,e,40); nR3(e,a,b,c,d,41); nR3(d,e,a,b,c,42); nR3(c,d,e,a,b,43);
+    nR3(b,c,d,e,a,44); nR3(a,b,c,d,e,45); nR3(e,a,b,c,d,46); nR3(d,e,a,b,c,47);
+    nR3(c,d,e,a,b,48); nR3(b,c,d,e,a,49); nR3(a,b,c,d,e,50); nR3(e,a,b,c,d,51);
+    nR3(d,e,a,b,c,52); nR3(c,d,e,a,b,53); nR3(b,c,d,e,a,54); nR3(a,b,c,d,e,55);
+    nR3(e,a,b,c,d,56); nR3(d,e,a,b,c,57); nR3(c,d,e,a,b,58); nR3(b,c,d,e,a,59);
+}
+
+void
+do_R4(u_int32_t *a, u_int32_t *b, u_int32_t *c, u_int32_t *d, u_int32_t *e, CHAR64LONG16 *block)
+{
+    nR4(a,b,c,d,e,60); nR4(e,a,b,c,d,61); nR4(d,e,a,b,c,62); nR4(c,d,e,a,b,63);
+    nR4(b,c,d,e,a,64); nR4(a,b,c,d,e,65); nR4(e,a,b,c,d,66); nR4(d,e,a,b,c,67);
+    nR4(c,d,e,a,b,68); nR4(b,c,d,e,a,69); nR4(a,b,c,d,e,70); nR4(e,a,b,c,d,71);
+    nR4(d,e,a,b,c,72); nR4(c,d,e,a,b,73); nR4(b,c,d,e,a,74); nR4(a,b,c,d,e,75);
+    nR4(e,a,b,c,d,76); nR4(d,e,a,b,c,77); nR4(c,d,e,a,b,78); nR4(b,c,d,e,a,79);
+}
+#endif
+
+/*
+ * Hash a single 512-bit block. This is the core of the algorithm.
+ */
+void SHA1Transform(state, buffer)
+    u_int32_t state[5];
+    const u_char buffer[64];
+{
+    u_int32_t a, b, c, d, e;
+    CHAR64LONG16 *block;
+
+#ifdef SHA1HANDSOFF
+    CHAR64LONG16 workspace;
+#endif
+
+    assert(buffer != 0);
+    assert(state != 0);
+
+#ifdef SHA1HANDSOFF
+    block = &workspace;
+    (void)memcpy(block, buffer, 64);
+#else
+    block = (CHAR64LONG16 *)(void *)buffer;
+#endif
+
+    /* Copy context->state[] to working vars */
+    a = state[0];
+    b = state[1];
+    c = state[2];
+    d = state[3];
+    e = state[4];
+
+#ifdef SPARC64_GCC_WORKAROUND
+    do_R01(&a, &b, &c, &d, &e, block);
+    do_R2(&a, &b, &c, &d, &e, block);
+    do_R3(&a, &b, &c, &d, &e, block);
+    do_R4(&a, &b, &c, &d, &e, block);
+#else
+    /* 4 rounds of 20 operations each. Loop unrolled. */
+    R0(a,b,c,d,e, 0); R0(e,a,b,c,d, 1); R0(d,e,a,b,c, 2); R0(c,d,e,a,b, 3);
+    R0(b,c,d,e,a, 4); R0(a,b,c,d,e, 5); R0(e,a,b,c,d, 6); R0(d,e,a,b,c, 7);
+    R0(c,d,e,a,b, 8); R0(b,c,d,e,a, 9); R0(a,b,c,d,e,10); R0(e,a,b,c,d,11);
+    R0(d,e,a,b,c,12); R0(c,d,e,a,b,13); R0(b,c,d,e,a,14); R0(a,b,c,d,e,15);
+    R1(e,a,b,c,d,16); R1(d,e,a,b,c,17); R1(c,d,e,a,b,18); R1(b,c,d,e,a,19);
+    R2(a,b,c,d,e,20); R2(e,a,b,c,d,21); R2(d,e,a,b,c,22); R2(c,d,e,a,b,23);
+    R2(b,c,d,e,a,24); R2(a,b,c,d,e,25); R2(e,a,b,c,d,26); R2(d,e,a,b,c,27);
+    R2(c,d,e,a,b,28); R2(b,c,d,e,a,29); R2(a,b,c,d,e,30); R2(e,a,b,c,d,31);
+    R2(d,e,a,b,c,32); R2(c,d,e,a,b,33); R2(b,c,d,e,a,34); R2(a,b,c,d,e,35);
+    R2(e,a,b,c,d,36); R2(d,e,a,b,c,37); R2(c,d,e,a,b,38); R2(b,c,d,e,a,39);
+    R3(a,b,c,d,e,40); R3(e,a,b,c,d,41); R3(d,e,a,b,c,42); R3(c,d,e,a,b,43);
+    R3(b,c,d,e,a,44); R3(a,b,c,d,e,45); R3(e,a,b,c,d,46); R3(d,e,a,b,c,47);
+    R3(c,d,e,a,b,48); R3(b,c,d,e,a,49); R3(a,b,c,d,e,50); R3(e,a,b,c,d,51);
+    R3(d,e,a,b,c,52); R3(c,d,e,a,b,53); R3(b,c,d,e,a,54); R3(a,b,c,d,e,55);
+    R3(e,a,b,c,d,56); R3(d,e,a,b,c,57); R3(c,d,e,a,b,58); R3(b,c,d,e,a,59);
+    R4(a,b,c,d,e,60); R4(e,a,b,c,d,61); R4(d,e,a,b,c,62); R4(c,d,e,a,b,63);
+    R4(b,c,d,e,a,64); R4(a,b,c,d,e,65); R4(e,a,b,c,d,66); R4(d,e,a,b,c,67);
+    R4(c,d,e,a,b,68); R4(b,c,d,e,a,69); R4(a,b,c,d,e,70); R4(e,a,b,c,d,71);
+    R4(d,e,a,b,c,72); R4(c,d,e,a,b,73); R4(b,c,d,e,a,74); R4(a,b,c,d,e,75);
+    R4(e,a,b,c,d,76); R4(d,e,a,b,c,77); R4(c,d,e,a,b,78); R4(b,c,d,e,a,79);
+#endif
+
+    /* Add the working vars back into context.state[] */
+    state[0] += a;
+    state[1] += b;
+    state[2] += c;
+    state[3] += d;
+    state[4] += e;
+
+    /* Wipe variables */
+    a = b = c = d = e = 0;
+}
+
+
+/*
+ * SHA1Init - Initialize new context
+ */
+void SHA1Init(context)
+    SHA1_CTX *context;
+{
+
+    assert(context != 0);
+
+    /* SHA1 initialization constants */
+    context->state[0] = 0x67452301;
+    context->state[1] = 0xEFCDAB89;
+    context->state[2] = 0x98BADCFE;
+    context->state[3] = 0x10325476;
+    context->state[4] = 0xC3D2E1F0;
+    context->count[0] = context->count[1] = 0;
+}
+
+
+/*
+ * Run your data through this.
+ */
+void SHA1Update(context, data, len)
+    SHA1_CTX *context;
+    const u_char *data;
+    u_int len;
+{
+    u_int i, j;
+
+    assert(context != 0);
+    assert(data != 0);
+
+    j = context->count[0];
+    if ((context->count[0] += len << 3) < j)
+	context->count[1] += (len>>29)+1;
+    j = (j >> 3) & 63;
+    if ((j + len) > 63) {
+	(void)memcpy(&context->buffer[j], data, (i = 64-j));
+	SHA1Transform(context->state, context->buffer);
+	for ( ; i + 63 < len; i += 64)
+	    SHA1Transform(context->state, &data[i]);
+	j = 0;
+    } else {
+	i = 0;
+    }
+    (void)memcpy(&context->buffer[j], &data[i], len - i);
+}
+
+
+/*
+ * Add padding and return the message digest.
+ */
+void SHA1Final(digest, context)
+    u_char digest[20];
+    SHA1_CTX* context;
+{
+    u_int i;
+    u_char finalcount[8];
+
+    assert(digest != 0);
+    assert(context != 0);
+
+    for (i = 0; i < 8; i++) {
+	finalcount[i] = (u_char)((context->count[(i >= 4 ? 0 : 1)]
+	 >> ((3-(i & 3)) * 8) ) & 255);	 /* Endian independent */
+    }
+    SHA1Update(context, (const u_char *)"\200", 1);
+    while ((context->count[0] & 504) != 448)
+	SHA1Update(context, (const u_char *)"\0", 1);
+    SHA1Update(context, finalcount, 8);  /* Should cause a SHA1Transform() */
+
+    if (digest) {
+	for (i = 0; i < 20; i++)
+	    digest[i] = (u_char)
+		((context->state[i>>2] >> ((3-(i & 3)) * 8) ) & 255);
+    }
+}
+
+#endif /* HAVE_SHA1_H */
diff --git a/libc/bionic/ssp.c b/libc/bionic/ssp.c
new file mode 100644
index 0000000..20794f4
--- /dev/null
+++ b/libc/bionic/ssp.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <ctype.h>
+#include "logd.h"
+
+void *__stack_chk_guard = 0;
+
+/* Initialize the canary with a random value from /dev/urandom.
+ * If that fails, use the "terminator canary". */
+static void __attribute__ ((constructor))
+__guard_setup(void)
+{
+    int fd;
+
+    fd = open("/dev/urandom", O_RDONLY);
+    if (fd != -1) {
+        ssize_t len = read(fd, &__stack_chk_guard,
+                           sizeof(__stack_chk_guard));
+        close(fd);
+        if (len == sizeof(__stack_chk_guard))
+            return;
+    }
+
+    /* If that failed, switch to 'terminator canary' */
+    ((unsigned char *)&__stack_chk_guard)[0] = 0;
+    ((unsigned char *)&__stack_chk_guard)[1] = 0;
+    ((unsigned char *)&__stack_chk_guard)[2] = '\n';
+    ((unsigned char *)&__stack_chk_guard)[3] = 255;
+}
+
+/* This is the crash handler.
+ * Does a best effort at logging and calls _exit to terminate
+ * the process immediately (without atexit handlers, etc.) */
+void __stack_chk_fail(void)
+{
+    struct sigaction sa;
+    sigset_t sigmask;
+    static const char message[] = "stack corruption detected: aborted";
+    char path[PATH_MAX];
+    int count;
+
+    /* Immediately block all (but SIGABRT) signal handlers from running code */
+    sigfillset(&sigmask);
+    sigdelset(&sigmask, SIGABRT);
+    sigprocmask(SIG_BLOCK, &sigmask, NULL);
+
+    /* Use /proc/self/exe link to obtain the program name for logging
+     * purposes. If it's not available, we set it to "unknown" */
+    if ((count = readlink("/proc/self/exe", path, sizeof(path) - 1)) == -1) {
+        strlcpy(path, "unknown", sizeof(path));
+    } else {
+        path[count] = '\0';
+    }
+
+    /* Do a best effort at logging. This ends up calling writev(2) */
+    __libc_android_log_print(ANDROID_LOG_FATAL, path, message);
+
+    /* Make sure there is no default action for SIGABRT */
+    bzero(&sa, sizeof(struct sigaction));
+    sigemptyset(&sa.sa_mask);
+    sa.sa_flags = 0;
+    sa.sa_handler = SIG_DFL;
+    sigaction(SIGABRT, &sa, NULL);
+
+    /* Terminate the process and exit immediately */
+    kill(getpid(), SIGABRT);
+
+    _exit(127);
+}
diff --git a/libc/bionic/stubs.c b/libc/bionic/stubs.c
new file mode 100644
index 0000000..365f21a
--- /dev/null
+++ b/libc/bionic/stubs.c
@@ -0,0 +1,380 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <grp.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <pwd.h>
+#include <netdb.h>
+#include <mntent.h>
+#include <private/android_filesystem_config.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <ctype.h>
+
+/** Thread-specific state for the stubs functions
+ **/
+
+pthread_once_t   the_once = PTHREAD_ONCE_INIT;
+pthread_key_t    the_key;
+
+typedef struct {
+    struct passwd  passwd;
+    struct group   group;
+    char*          group_members[2];
+    char           app_name_buffer[32];
+    char           group_name_buffer[32];
+} stubs_state_t;
+
+static void
+stubs_state_free( void*  _s )
+{
+    stubs_state_t*  s = _s;
+    free(s);
+}
+
+static stubs_state_t*
+stubs_state_alloc( void )
+{
+    stubs_state_t*  s = calloc(1, sizeof *s);
+
+    if (s != NULL) {
+        s->group.gr_mem = s->group_members;
+    }
+    return s;
+}
+
+static void __stubs_key_init(void)
+{
+    pthread_key_create( &the_key, stubs_state_free );
+}
+
+static stubs_state_t*
+__stubs_state(void)
+{
+    stubs_state_t*  s;
+
+    pthread_once(&the_once, __stubs_key_init);
+    s = pthread_getspecific(the_key);
+    if (s == NULL) {
+        s = stubs_state_alloc();
+        if (s == NULL) {
+            errno = ENOMEM;  /* just in case */
+        } else {
+            if ( pthread_setspecific(the_key, s) != 0 ) {
+                stubs_state_free(s);
+                errno = ENOMEM;
+                s     = NULL;
+            }
+        }
+    }
+    return s;
+}
+
+static struct passwd*
+android_iinfo_to_passwd( struct passwd          *pw,
+                         struct android_id_info *iinfo )
+{
+    pw->pw_name  = (char*)iinfo->name;
+    pw->pw_uid   = iinfo->aid;
+    pw->pw_gid   = iinfo->aid;
+    pw->pw_dir   = "/";
+    pw->pw_shell = "/system/bin/sh";
+    return pw;
+}
+
+static struct group*
+android_iinfo_to_group( struct group *gr,
+                        struct android_id_info *iinfo )
+{
+    gr->gr_name   = (char*) iinfo->name;
+    gr->gr_gid    = iinfo->aid;
+    gr->gr_mem[0] = gr->gr_name;
+    gr->gr_mem[1] = NULL;
+    return gr;
+}
+
+static struct passwd *
+android_id_to_passwd( struct passwd *pw, unsigned id)
+{
+    struct android_id_info *iinfo = android_ids;
+    unsigned n;
+    for (n = 0; n < android_id_count; n++) {
+        if (iinfo[n].aid == id) {
+            return android_iinfo_to_passwd(pw, iinfo + n);
+        }
+    }
+    return NULL;
+}
+
+static struct passwd*
+android_name_to_passwd(struct passwd *pw, const char *name)
+{
+    struct android_id_info *iinfo = android_ids;
+    unsigned n;
+    for (n = 0; n < android_id_count; n++) {
+        if (!strcmp(iinfo[n].name, name)) {
+            return android_iinfo_to_passwd(pw, iinfo + n);
+        }
+    }
+    return NULL;
+}
+
+static struct group*
+android_id_to_group( struct group *gr, unsigned id )
+{
+    struct android_id_info *iinfo = android_ids;
+    unsigned n;
+    for (n = 0; n < android_id_count; n++) {
+        if (iinfo[n].aid == id) {
+            return android_iinfo_to_group(gr, iinfo + n);
+        }
+    }
+    return NULL;
+}
+
+static struct group*
+android_name_to_group( struct group *gr, const char *name )
+{
+    struct android_id_info *iinfo = android_ids;
+    unsigned n;
+    for (n = 0; n < android_id_count; n++) {
+        if (!strcmp(iinfo[n].name, name)) {
+            return android_iinfo_to_group(gr, iinfo + n);
+        }
+    }
+    return NULL;
+}
+
+/* translate a user/group name like app_1234 into the
+ * corresponding user/group id (AID_APP + 1234)
+ * returns 0 and sets errno to ENOENT in case of error
+ */
+static unsigned
+app_id_from_name( const char*  name )
+{
+    unsigned long  id;
+    char*          end;
+
+    if (memcmp(name, "app_", 4) != 0 || !isdigit(name[4]))
+        goto FAIL;
+
+    id = strtoul(name+4, &end, 10);
+    if (id == 0 || *end != '\0')
+        goto FAIL;
+
+    id += AID_APP;
+
+    /* check for overflow and that the value can be
+     * stored in our 32-bit uid_t/gid_t */
+    if (id < AID_APP || (unsigned)id != id)
+        goto FAIL;
+
+    return (unsigned)id;
+
+FAIL:
+    errno = ENOENT;
+    return 0;
+}
+
+/* translate a uid into the corresponding app_<uid>
+ * passwd structure (sets errno to ENOENT on failure)
+ */
+static struct passwd*
+app_id_to_passwd(uid_t  uid, stubs_state_t*  state)
+{
+    struct passwd*  pw = &state->passwd;
+
+    if (uid < AID_APP) {
+        errno = ENOENT;
+        return NULL;
+    }
+
+    snprintf( state->app_name_buffer, sizeof state->app_name_buffer,
+              "app_%u", uid - AID_APP );
+
+    pw->pw_name  = state->app_name_buffer;
+    pw->pw_dir   = "/data";
+    pw->pw_shell = "/system/bin/sh";
+    pw->pw_uid   = uid;
+    pw->pw_gid   = uid;
+
+    return pw;
+}
+
+/* translate a gid into the corresponding app_<gid>
+ * group structure (sets errno to ENOENT on failure)
+ */
+static struct group*
+app_id_to_group(gid_t  gid, stubs_state_t*  state)
+{
+    struct group*  gr = &state->group;
+
+    if (gid < AID_APP) {
+        errno = ENOENT;
+        return NULL;
+    }
+
+    snprintf(state->group_name_buffer, sizeof state->group_name_buffer,
+             "app_%u", gid - AID_APP);
+
+    gr->gr_name   = state->group_name_buffer;
+    gr->gr_gid    = gid;
+    gr->gr_mem[0] = gr->gr_name;
+    gr->gr_mem[1] = NULL;
+
+    return gr;
+}
+
+
+struct passwd*
+getpwuid(uid_t uid)
+{
+    stubs_state_t*  state = __stubs_state();
+    struct passwd*  pw;
+
+    if (state == NULL)
+        return NULL;
+
+    pw = &state->passwd;
+
+    if ( android_id_to_passwd(pw, uid) != NULL )
+        return pw;
+
+    return app_id_to_passwd(uid, state);
+}
+
+struct passwd*
+getpwnam(const char *login)
+{
+    stubs_state_t*  state = __stubs_state();
+
+    if (state == NULL)
+        return NULL;
+
+    if (android_name_to_passwd(&state->passwd, login) != NULL)
+        return &state->passwd;
+
+    return app_id_to_passwd( app_id_from_name(login), state );
+}
+
+int
+getgrouplist (const char *user, gid_t group,
+              gid_t *groups, int *ngroups)
+{
+    if (*ngroups < 1) {
+        *ngroups = 1;
+        return -1;
+    }
+    groups[0] = group;
+    return (*ngroups = 1);
+}
+
+char*
+getlogin(void)
+{
+    struct passwd *pw = getpwuid(getuid());
+
+    if(pw) {
+        return pw->pw_name;
+    } else {
+        return NULL;
+    }
+}
+
+struct group*
+getgrgid(gid_t gid)
+{
+    stubs_state_t*  state = __stubs_state();
+    struct group*   gr;
+
+    if (state == NULL)
+        return NULL;
+
+    gr = android_id_to_group(&state->group, gid);
+    if (gr != NULL)
+        return gr;
+
+    return app_id_to_group(gid, state);
+}
+
+struct group*
+getgrnam(const char *name)
+{
+    stubs_state_t*  state = __stubs_state();
+    unsigned        id;
+
+    if (state == NULL)
+        return NULL;
+
+    if (android_name_to_group(&state->group, name) != 0)
+        return &state->group;
+
+    return app_id_to_group( app_id_from_name(name), state );
+}
+
+
+struct netent* getnetbyname(const char *name)
+{
+    fprintf(stderr, "FIX ME! implement getgrnam() %s:%d\n", __FILE__, __LINE__);
+    return NULL;
+}
+
+void endpwent(void)
+{
+}
+
+struct mntent* getmntent(FILE* f)
+{
+    fprintf(stderr, "FIX ME! implement getmntent() %s:%d\n", __FILE__, __LINE__);
+    return NULL;
+}
+
+char* ttyname(int fd)
+{
+    fprintf(stderr, "FIX ME! implement ttyname() %s:%d\n", __FILE__, __LINE__);
+    return NULL;
+}
+
+struct netent *getnetbyaddr(uint32_t net, int type)
+{
+    fprintf(stderr, "FIX ME! implement %s() %s:%d\n", __FUNCTION__, __FILE__, __LINE__);
+    return NULL;
+}
+
+struct protoent *getprotobyname(const char *name)
+{
+    fprintf(stderr, "FIX ME! implement %s() %s:%d\n", __FUNCTION__, __FILE__, __LINE__);
+    return NULL;
+}
+
+struct protoent *getprotobynumber(int proto)
+{
+    fprintf(stderr, "FIX ME! implement %s() %s:%d\n", __FUNCTION__, __FILE__, __LINE__);
+    return NULL;
+}
diff --git a/libc/bionic/system_properties.c b/libc/bionic/system_properties.c
new file mode 100644
index 0000000..5e3b9e7
--- /dev/null
+++ b/libc/bionic/system_properties.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stddef.h>
+#include <errno.h>
+
+#include <sys/mman.h>
+
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <sys/select.h>
+#include <sys/types.h>
+#include <netinet/in.h>
+
+#define _REALLY_INCLUDE_SYS__SYSTEM_PROPERTIES_H_
+#include <sys/_system_properties.h>
+
+#include <sys/atomics.h>
+
+static const char property_service_name[] = PROP_SERVICE_NAME;
+
+static unsigned dummy_props = 0;
+
+prop_area *__system_property_area__ = (void*) &dummy_props;
+
+int __system_properties_init(void)
+{
+    prop_area *pa;
+    int s, fd;
+    unsigned sz;
+    char *env;
+
+    if(__system_property_area__ != ((void*) &dummy_props)) {
+        return 0;
+    }
+
+    env = getenv("ANDROID_PROPERTY_WORKSPACE");
+    if (!env) {
+        return -1;
+    }
+    fd = atoi(env);
+    env = strchr(env, ',');
+    if (!env) {
+        return -1;
+    }
+    sz = atoi(env + 1);
+    
+    pa = mmap(0, sz, PROT_READ, MAP_SHARED, fd, 0);
+    
+    if(pa == MAP_FAILED) {
+        return -1;
+    }
+
+    if((pa->magic != PROP_AREA_MAGIC) || (pa->version != PROP_AREA_VERSION)) {
+        munmap(pa, sz);
+        return -1;
+    }
+
+    __system_property_area__ = pa;
+    return 0;
+}
+
+const prop_info *__system_property_find_nth(unsigned n)
+{
+    prop_area *pa = __system_property_area__;
+
+    if(n >= pa->count) {
+        return 0;
+    } else {
+        return TOC_TO_INFO(pa, pa->toc[n]);
+    }
+}
+
+const prop_info *__system_property_find(const char *name)
+{
+    prop_area *pa = __system_property_area__;
+    unsigned count = pa->count;
+    unsigned *toc = pa->toc;
+    unsigned len = strlen(name);
+    prop_info *pi;
+
+    while(count--) {
+        unsigned entry = *toc++;
+        if(TOC_NAME_LEN(entry) != len) continue;
+        
+        pi = TOC_TO_INFO(pa, entry);
+        if(memcmp(name, pi->name, len)) continue;
+
+        return pi;
+    }
+
+    return 0;
+}
+
+int __system_property_read(const prop_info *pi, char *name, char *value)
+{
+    unsigned serial, len;
+    
+    for(;;) {
+        serial = pi->serial;
+        while(SERIAL_DIRTY(serial)) {
+            __futex_wait(&pi->serial, serial, 0);
+            serial = pi->serial;
+        }
+        len = SERIAL_VALUE_LEN(serial);
+        memcpy(value, pi->value, len + 1);
+        if(serial == pi->serial) {
+            if(name != 0) {
+                strcpy(name, pi->name);
+            }
+            return len;
+        }
+    }
+}
+
+int __system_property_get(const char *name, char *value)
+{
+    const prop_info *pi = __system_property_find(name);
+
+    if(pi != 0) {
+        return __system_property_read(pi, 0, value);
+    } else {
+        value[0] = 0;
+        return 0;
+    }
+}
+
+int __system_property_wait(const prop_info *pi)
+{
+    unsigned n;
+    if(pi == 0) {
+        prop_area *pa = __system_property_area__;
+        n = pa->serial;
+        do {
+            __futex_wait(&pa->serial, n, 0);
+        } while(n == pa->serial);
+    } else {
+        n = pi->serial;
+        do {
+            __futex_wait(&pi->serial, n, 0);
+        } while(n == pi->serial);
+    }
+    return 0;
+}
diff --git a/libc/bionic/thread_atexit.c b/libc/bionic/thread_atexit.c
new file mode 100644
index 0000000..dc4a5a0
--- /dev/null
+++ b/libc/bionic/thread_atexit.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* some simple glue used to make the BSD atexit code happy */
+
+#include <pthread.h>
+#include "pthread_internal.h"
+
+static pthread_mutex_t   gAtExitLock = PTHREAD_MUTEX_INITIALIZER;
+
+void  _thread_atexit_lock( void )
+{
+  pthread_mutex_lock( &gAtExitLock );
+}
+
+void _thread_atexit_unlock( void )
+{
+  pthread_mutex_unlock( &gAtExitLock );
+}
+
diff --git a/libc/bionic/time64.c b/libc/bionic/time64.c
new file mode 100644
index 0000000..1e1f881
--- /dev/null
+++ b/libc/bionic/time64.c
@@ -0,0 +1,793 @@
+/*
+
+Copyright (c) 2007-2008  Michael G Schwern
+
+This software originally derived from Paul Sheer's pivotal_gmtime_r.c.
+
+The MIT License:
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+*/
+
+/* See http://code.google.com/p/y2038 for this code's origin */
+
+/*
+
+Programmers who have available to them 64-bit time values as a 'long
+long' type can use localtime64_r() and gmtime64_r() which correctly
+converts the time even on 32-bit systems. Whether you have 64-bit time
+values will depend on the operating system.
+
+localtime64_r() is a 64-bit equivalent of localtime_r().
+
+gmtime64_r() is a 64-bit equivalent of gmtime_r().
+
+*/
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <time.h>
+#include <errno.h>
+#include "time64.h"
+
+/* BIONIC_BEGIN */
+/* the following are here to avoid exposing time64_config.h and
+ * other types in our public time64.h header
+ */
+#include "time64_config.h"
+
+/* Not everyone has gm/localtime_r(), provide a replacement */
+#ifdef HAS_LOCALTIME_R
+# define LOCALTIME_R(clock, result) localtime_r(clock, result)
+#else
+# define LOCALTIME_R(clock, result) fake_localtime_r(clock, result)
+#endif
+#ifdef HAS_GMTIME_R
+# define GMTIME_R(clock, result) gmtime_r(clock, result)
+#else
+# define GMTIME_R(clock, result) fake_gmtime_r(clock, result)
+#endif
+
+typedef int64_t  Int64;
+typedef time64_t Time64_T;
+typedef int64_t  Year;
+#define  TM      tm
+/* BIONIC_END */
+
+/* Spec says except for stftime() and the _r() functions, these
+   all return static memory.  Stabbings! */
+static struct TM   Static_Return_Date;
+static char        Static_Return_String[35];
+
+static const int days_in_month[2][12] = {
+    {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31},
+    {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31},
+};
+
+static const int julian_days_by_month[2][12] = {
+    {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334},
+    {0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335},
+};
+
+static char const wday_name[7][3] = {
+    "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"
+};
+
+static char const mon_name[12][3] = {
+    "Jan", "Feb", "Mar", "Apr", "May", "Jun",
+    "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"
+};
+
+static const int length_of_year[2] = { 365, 366 };
+
+/* Some numbers relating to the gregorian cycle */
+static const Year     years_in_gregorian_cycle   = 400;
+#define               days_in_gregorian_cycle      ((365 * 400) + 100 - 4 + 1)
+static const Time64_T seconds_in_gregorian_cycle = days_in_gregorian_cycle * 60LL * 60LL * 24LL;
+
+/* Year range we can trust the time funcitons with */
+#define MAX_SAFE_YEAR 2037
+#define MIN_SAFE_YEAR 1971
+
+/* 28 year Julian calendar cycle */
+#define SOLAR_CYCLE_LENGTH 28
+
+/* Year cycle from MAX_SAFE_YEAR down. */
+static const int safe_years_high[SOLAR_CYCLE_LENGTH] = {
+    2016, 2017, 2018, 2019,
+    2020, 2021, 2022, 2023,
+    2024, 2025, 2026, 2027,
+    2028, 2029, 2030, 2031,
+    2032, 2033, 2034, 2035,
+    2036, 2037, 2010, 2011,
+    2012, 2013, 2014, 2015
+};
+
+/* Year cycle from MIN_SAFE_YEAR up */
+static const int safe_years_low[SOLAR_CYCLE_LENGTH] = {
+    1996, 1997, 1998, 1971,
+    1972, 1973, 1974, 1975,
+    1976, 1977, 1978, 1979,
+    1980, 1981, 1982, 1983,
+    1984, 1985, 1986, 1987,
+    1988, 1989, 1990, 1991,
+    1992, 1993, 1994, 1995,
+};
+
+/* This isn't used, but it's handy to look at */
+static const int dow_year_start[SOLAR_CYCLE_LENGTH] = {
+    5, 0, 1, 2,     /* 0       2016 - 2019 */
+    3, 5, 6, 0,     /* 4  */
+    1, 3, 4, 5,     /* 8       1996 - 1998, 1971*/
+    6, 1, 2, 3,     /* 12      1972 - 1975 */
+    4, 6, 0, 1,     /* 16 */
+    2, 4, 5, 6,     /* 20      2036, 2037, 2010, 2011 */
+    0, 2, 3, 4      /* 24      2012, 2013, 2014, 2015 */
+};
+
+/* Let's assume people are going to be looking for dates in the future.
+   Let's provide some cheats so you can skip ahead.
+   This has a 4x speed boost when near 2008.
+*/
+/* Number of days since epoch on Jan 1st, 2008 GMT */
+#define CHEAT_DAYS  (1199145600 / 24 / 60 / 60)
+#define CHEAT_YEARS 108
+
+#define IS_LEAP(n)      ((!(((n) + 1900) % 400) || (!(((n) + 1900) % 4) && (((n) + 1900) % 100))) != 0)
+#define WRAP(a,b,m)     ((a) = ((a) <  0  ) ? ((b)--, (a) + (m)) : (a))
+
+#ifdef USE_SYSTEM_LOCALTIME
+#    define SHOULD_USE_SYSTEM_LOCALTIME(a)  (       \
+    (a) <= SYSTEM_LOCALTIME_MAX &&              \
+    (a) >= SYSTEM_LOCALTIME_MIN                 \
+)
+#else
+#    define SHOULD_USE_SYSTEM_LOCALTIME(a)      (0)
+#endif
+
+#ifdef USE_SYSTEM_GMTIME
+#    define SHOULD_USE_SYSTEM_GMTIME(a)     (       \
+    (a) <= SYSTEM_GMTIME_MAX    &&              \
+    (a) >= SYSTEM_GMTIME_MIN                    \
+)
+#else
+#    define SHOULD_USE_SYSTEM_GMTIME(a)         (0)
+#endif
+
+/* Multi varadic macros are a C99 thing, alas */
+#ifdef TIME_64_DEBUG
+#    define TRACE(format) (fprintf(stderr, format))
+#    define TRACE1(format, var1)    (fprintf(stderr, format, var1))
+#    define TRACE2(format, var1, var2)    (fprintf(stderr, format, var1, var2))
+#    define TRACE3(format, var1, var2, var3)    (fprintf(stderr, format, var1, var2, var3))
+#else
+#    define TRACE(format) ((void)0)
+#    define TRACE1(format, var1) ((void)0)
+#    define TRACE2(format, var1, var2) ((void)0)
+#    define TRACE3(format, var1, var2, var3) ((void)0)
+#endif
+
+
+static int is_exception_century(Year year)
+{
+    int is_exception = ((year % 100 == 0) && !(year % 400 == 0));
+    TRACE1("# is_exception_century: %s\n", is_exception ? "yes" : "no");
+
+    return(is_exception);
+}
+
+
+/* timegm() is not in the C or POSIX spec, but it is such a useful
+   extension I would be remiss in leaving it out.  Also I need it
+   for localtime64()
+*/
+Time64_T timegm64(const struct TM *date) {
+    Time64_T days    = 0;
+    Time64_T seconds = 0;
+    Year     year;
+    Year     orig_year = (Year)date->tm_year;
+    int      cycles  = 0;
+
+    if( orig_year > 100 ) {
+        cycles = (orig_year - 100) / 400;
+        orig_year -= cycles * 400;
+        days      += (Time64_T)cycles * days_in_gregorian_cycle;
+    }
+    else if( orig_year < -300 ) {
+        cycles = (orig_year - 100) / 400;
+        orig_year -= cycles * 400;
+        days      += (Time64_T)cycles * days_in_gregorian_cycle;
+    }
+    TRACE3("# timegm/ cycles: %d, days: %lld, orig_year: %lld\n", cycles, days, orig_year);
+
+    if( orig_year > 70 ) {
+        year = 70;
+        while( year < orig_year ) {
+            days += length_of_year[IS_LEAP(year)];
+            year++;
+        }
+    }
+    else if ( orig_year < 70 ) {
+        year = 69;
+        do {
+            days -= length_of_year[IS_LEAP(year)];
+            year--;
+        } while( year >= orig_year );
+    }
+
+
+    days += julian_days_by_month[IS_LEAP(orig_year)][date->tm_mon];
+    days += date->tm_mday - 1;
+
+    seconds = days * 60 * 60 * 24;
+
+    seconds += date->tm_hour * 60 * 60;
+    seconds += date->tm_min * 60;
+    seconds += date->tm_sec;
+
+    return(seconds);
+}
+
+
+static int check_tm(struct TM *tm)
+{
+    /* Don't forget leap seconds */
+    assert(tm->tm_sec >= 0);
+    assert(tm->tm_sec <= 61);
+
+    assert(tm->tm_min >= 0);
+    assert(tm->tm_min <= 59);
+
+    assert(tm->tm_hour >= 0);
+    assert(tm->tm_hour <= 23);
+
+    assert(tm->tm_mday >= 1);
+    assert(tm->tm_mday <= days_in_month[IS_LEAP(tm->tm_year)][tm->tm_mon]);
+
+    assert(tm->tm_mon  >= 0);
+    assert(tm->tm_mon  <= 11);
+
+    assert(tm->tm_wday >= 0);
+    assert(tm->tm_wday <= 6);
+   
+    assert(tm->tm_yday >= 0);
+    assert(tm->tm_yday <= length_of_year[IS_LEAP(tm->tm_year)]);
+
+#ifdef HAS_TM_TM_GMTOFF
+    assert(tm->tm_gmtoff >= -24 * 60 * 60);
+    assert(tm->tm_gmtoff <=  24 * 60 * 60);
+#endif
+
+    return 1;
+}
+
+
+/* The exceptional centuries without leap years cause the cycle to
+   shift by 16
+*/
+static Year cycle_offset(Year year)
+{
+    const Year start_year = 2000;
+    Year year_diff  = year - start_year;
+    Year exceptions;
+
+    if( year > start_year )
+        year_diff--;
+
+    exceptions  = year_diff / 100;
+    exceptions -= year_diff / 400;
+
+    TRACE3("# year: %lld, exceptions: %lld, year_diff: %lld\n",
+          year, exceptions, year_diff);
+
+    return exceptions * 16;
+}
+
+/* For a given year after 2038, pick the latest possible matching
+   year in the 28 year calendar cycle.
+
+   A matching year...
+   1) Starts on the same day of the week.
+   2) Has the same leap year status.
+
+   This is so the calendars match up.
+
+   Also the previous year must match.  When doing Jan 1st you might
+   wind up on Dec 31st the previous year when doing a -UTC time zone.
+
+   Finally, the next year must have the same start day of week.  This
+   is for Dec 31st with a +UTC time zone.
+   It doesn't need the same leap year status since we only care about
+   January 1st.
+*/
+static int safe_year(const Year year)
+{
+    int safe_year = 0;
+    Year year_cycle;
+
+    if( year >= MIN_SAFE_YEAR && year <= MAX_SAFE_YEAR ) {
+        return (int)year;
+    }
+
+    year_cycle = year + cycle_offset(year);
+
+    /* safe_years_low is off from safe_years_high by 8 years */
+    if( year < MIN_SAFE_YEAR )
+        year_cycle -= 8;
+
+    /* Change non-leap xx00 years to an equivalent */
+    if( is_exception_century(year) )
+        year_cycle += 11;
+
+    /* Also xx01 years, since the previous year will be wrong */
+    if( is_exception_century(year - 1) )
+        year_cycle += 17;
+
+    year_cycle %= SOLAR_CYCLE_LENGTH;
+    if( year_cycle < 0 )
+        year_cycle = SOLAR_CYCLE_LENGTH + year_cycle;
+
+    assert( year_cycle >= 0 );
+    assert( year_cycle < SOLAR_CYCLE_LENGTH );
+    if( year < MIN_SAFE_YEAR )
+        safe_year = safe_years_low[year_cycle];
+    else if( year > MAX_SAFE_YEAR )
+        safe_year = safe_years_high[year_cycle];
+    else
+        assert(0);
+
+    TRACE3("# year: %lld, year_cycle: %lld, safe_year: %d\n",
+          year, year_cycle, safe_year);
+
+    assert(safe_year <= MAX_SAFE_YEAR && safe_year >= MIN_SAFE_YEAR);
+
+    return safe_year;
+}
+
+
+void copy_tm_to_TM(const struct tm *src, struct TM *dest) {
+    if( src == NULL ) {
+        memset(dest, 0, sizeof(*dest));
+    }
+    else {
+#       ifdef USE_TM64
+            dest->tm_sec        = src->tm_sec;
+            dest->tm_min        = src->tm_min;
+            dest->tm_hour       = src->tm_hour;
+            dest->tm_mday       = src->tm_mday;
+            dest->tm_mon        = src->tm_mon;
+            dest->tm_year       = (Year)src->tm_year;
+            dest->tm_wday       = src->tm_wday;
+            dest->tm_yday       = src->tm_yday;
+            dest->tm_isdst      = src->tm_isdst;
+
+#           ifdef HAS_TM_TM_GMTOFF
+                dest->tm_gmtoff  = src->tm_gmtoff;
+#           endif
+
+#           ifdef HAS_TM_TM_ZONE
+                dest->tm_zone  = src->tm_zone;
+#           endif
+
+#       else
+            /* They're the same type */
+            memcpy(dest, src, sizeof(*dest));
+#       endif
+    }
+}
+
+
+void copy_TM_to_tm(const struct TM *src, struct tm *dest) {
+    if( src == NULL ) {
+        memset(dest, 0, sizeof(*dest));
+    }
+    else {
+#       ifdef USE_TM64
+            dest->tm_sec        = src->tm_sec;
+            dest->tm_min        = src->tm_min;
+            dest->tm_hour       = src->tm_hour;
+            dest->tm_mday       = src->tm_mday;
+            dest->tm_mon        = src->tm_mon;
+            dest->tm_year       = (int)src->tm_year;
+            dest->tm_wday       = src->tm_wday;
+            dest->tm_yday       = src->tm_yday;
+            dest->tm_isdst      = src->tm_isdst;
+
+#           ifdef HAS_TM_TM_GMTOFF
+                dest->tm_gmtoff  = src->tm_gmtoff;
+#           endif
+
+#           ifdef HAS_TM_TM_ZONE
+                dest->tm_zone  = src->tm_zone;
+#           endif
+
+#       else
+            /* They're the same type */
+            memcpy(dest, src, sizeof(*dest));
+#       endif
+    }
+}
+
+
+/* Simulate localtime_r() to the best of our ability */
+struct tm * fake_localtime_r(const time_t *clock, struct tm *result) {
+    const struct tm *static_result = localtime(clock);
+
+    assert(result != NULL);
+
+    if( static_result == NULL ) {
+        memset(result, 0, sizeof(*result));
+        return NULL;
+    }
+    else {
+        memcpy(result, static_result, sizeof(*result));
+        return result;
+    }
+}
+
+
+
+/* Simulate gmtime_r() to the best of our ability */
+struct tm * fake_gmtime_r(const time_t *clock, struct tm *result) {
+    const struct tm *static_result = gmtime(clock);
+
+    assert(result != NULL);
+
+    if( static_result == NULL ) {
+        memset(result, 0, sizeof(*result));
+        return NULL;
+    }
+    else {
+        memcpy(result, static_result, sizeof(*result));
+        return result;
+    }
+}
+
+
+static Time64_T seconds_between_years(Year left_year, Year right_year) {
+    int increment = (left_year > right_year) ? 1 : -1;
+    Time64_T seconds = 0;
+    int cycles;
+
+    if( left_year > 2400 ) {
+        cycles = (left_year - 2400) / 400;
+        left_year -= cycles * 400;
+        seconds   += cycles * seconds_in_gregorian_cycle;
+    }
+    else if( left_year < 1600 ) {
+        cycles = (left_year - 1600) / 400;
+        left_year += cycles * 400;
+        seconds   += cycles * seconds_in_gregorian_cycle;
+    }
+
+    while( left_year != right_year ) {
+        seconds += length_of_year[IS_LEAP(right_year - 1900)] * 60 * 60 * 24;
+        right_year += increment;
+    }
+
+    return seconds * increment;
+}
+
+
+Time64_T mktime64(const struct TM *input_date) {
+    struct tm safe_date;
+    struct TM date;
+    Time64_T  time;
+    Year      year = input_date->tm_year + 1900;
+
+    if( MIN_SAFE_YEAR <= year && year <= MAX_SAFE_YEAR ) {
+        copy_TM_to_tm(input_date, &safe_date);
+        return (Time64_T)mktime(&safe_date);
+    }
+
+    /* Have to make the year safe in date else it won't fit in safe_date */
+    date = *input_date;
+    date.tm_year = safe_year(year) - 1900;
+    copy_TM_to_tm(&date, &safe_date);
+
+    time = (Time64_T)mktime(&safe_date);
+
+    time += seconds_between_years(year, (Year)(safe_date.tm_year + 1900));
+
+    return time;
+}
+
+
+/* Because I think mktime() is a crappy name */
+Time64_T timelocal64(const struct TM *date) {
+    return mktime64(date);
+}
+
+
+struct TM *gmtime64_r (const Time64_T *in_time, struct TM *p)
+{
+    int v_tm_sec, v_tm_min, v_tm_hour, v_tm_mon, v_tm_wday;
+    Time64_T v_tm_tday;
+    int leap;
+    Time64_T m;
+    Time64_T time = *in_time;
+    Year year = 70;
+    int cycles = 0;
+
+    assert(p != NULL);
+
+    /* Use the system gmtime() if time_t is small enough */
+    if( SHOULD_USE_SYSTEM_GMTIME(*in_time) ) {
+        time_t safe_time = *in_time;
+        struct tm safe_date;
+        GMTIME_R(&safe_time, &safe_date);
+
+        copy_tm_to_TM(&safe_date, p);
+        assert(check_tm(p));
+
+        return p;
+    }
+
+#ifdef HAS_TM_TM_GMTOFF
+    p->tm_gmtoff = 0;
+#endif
+    p->tm_isdst  = 0;
+
+#ifdef HAS_TM_TM_ZONE
+    p->tm_zone   = "UTC";
+#endif
+
+    v_tm_sec =  (int)(time % 60);
+    time /= 60;
+    v_tm_min =  (int)(time % 60);
+    time /= 60;
+    v_tm_hour = (int)(time % 24);
+    time /= 24;
+    v_tm_tday = time;
+
+    WRAP (v_tm_sec, v_tm_min, 60);
+    WRAP (v_tm_min, v_tm_hour, 60);
+    WRAP (v_tm_hour, v_tm_tday, 24);
+
+    v_tm_wday = (int)((v_tm_tday + 4) % 7);
+    if (v_tm_wday < 0)
+        v_tm_wday += 7;
+    m = v_tm_tday;
+
+    if (m >= CHEAT_DAYS) {
+        year = CHEAT_YEARS;
+        m -= CHEAT_DAYS;
+    }
+
+    if (m >= 0) {
+        /* Gregorian cycles, this is huge optimization for distant times */
+        cycles = (int)(m / (Time64_T) days_in_gregorian_cycle);
+        if( cycles ) {
+            m -= (cycles * (Time64_T) days_in_gregorian_cycle);
+            year += (cycles * years_in_gregorian_cycle);
+        }
+
+        /* Years */
+        leap = IS_LEAP (year);
+        while (m >= (Time64_T) length_of_year[leap]) {
+            m -= (Time64_T) length_of_year[leap];
+            year++;
+            leap = IS_LEAP (year);
+        }
+
+        /* Months */
+        v_tm_mon = 0;
+        while (m >= (Time64_T) days_in_month[leap][v_tm_mon]) {
+            m -= (Time64_T) days_in_month[leap][v_tm_mon];
+            v_tm_mon++;
+        }
+    } else {
+        year--;
+
+        /* Gregorian cycles */
+        cycles = (int)((m / (Time64_T) days_in_gregorian_cycle) + 1);
+        if( cycles ) {
+            m -= (cycles * (Time64_T) days_in_gregorian_cycle);
+            year += (cycles * years_in_gregorian_cycle);
+        }
+
+        /* Years */
+        leap = IS_LEAP (year);
+        while (m < (Time64_T) -length_of_year[leap]) {
+            m += (Time64_T) length_of_year[leap];
+            year--;
+            leap = IS_LEAP (year);
+        }
+
+        /* Months */
+        v_tm_mon = 11;
+        while (m < (Time64_T) -days_in_month[leap][v_tm_mon]) {
+            m += (Time64_T) days_in_month[leap][v_tm_mon];
+            v_tm_mon--;
+        }
+        m += (Time64_T) days_in_month[leap][v_tm_mon];
+    }
+
+    p->tm_year = year;
+    if( p->tm_year != year ) {
+#ifdef EOVERFLOW
+        errno = EOVERFLOW;
+#endif
+        return NULL;
+    }
+
+    /* At this point m is less than a year so casting to an int is safe */
+    p->tm_mday = (int) m + 1;
+    p->tm_yday = julian_days_by_month[leap][v_tm_mon] + (int)m;
+    p->tm_sec  = v_tm_sec;
+    p->tm_min  = v_tm_min;
+    p->tm_hour = v_tm_hour;
+    p->tm_mon  = v_tm_mon;
+    p->tm_wday = v_tm_wday;
+
+    assert(check_tm(p));
+
+    return p;
+}
+
+
+struct TM *localtime64_r (const Time64_T *time, struct TM *local_tm)
+{
+    time_t safe_time;
+    struct tm safe_date;
+    struct TM gm_tm;
+    Year orig_year;
+    int month_diff;
+
+    assert(local_tm != NULL);
+
+    /* Use the system localtime() if time_t is small enough */
+    if( SHOULD_USE_SYSTEM_LOCALTIME(*time) ) {
+        safe_time = *time;
+
+        TRACE1("Using system localtime for %lld\n", *time);
+
+        LOCALTIME_R(&safe_time, &safe_date);
+
+        copy_tm_to_TM(&safe_date, local_tm);
+        assert(check_tm(local_tm));
+
+        return local_tm;
+    }
+
+    if( gmtime64_r(time, &gm_tm) == NULL ) {
+        TRACE1("gmtime64_r returned null for %lld\n", *time);
+        return NULL;
+    }
+
+    orig_year = gm_tm.tm_year;
+
+    if (gm_tm.tm_year > (2037 - 1900) ||
+        gm_tm.tm_year < (1970 - 1900)
+       )
+    {
+        TRACE1("Mapping tm_year %lld to safe_year\n", (Year)gm_tm.tm_year);
+        gm_tm.tm_year = safe_year((Year)(gm_tm.tm_year + 1900)) - 1900;
+    }
+
+    safe_time = timegm64(&gm_tm);
+    if( LOCALTIME_R(&safe_time, &safe_date) == NULL ) {
+        TRACE1("localtime_r(%d) returned NULL\n", (int)safe_time);
+        return NULL;
+    }
+
+    copy_tm_to_TM(&safe_date, local_tm);
+
+    local_tm->tm_year = orig_year;
+    if( local_tm->tm_year != orig_year ) {
+        TRACE2("tm_year overflow: tm_year %lld, orig_year %lld\n",
+              (Year)local_tm->tm_year, (Year)orig_year);
+
+#ifdef EOVERFLOW
+        errno = EOVERFLOW;
+#endif
+        return NULL;
+    }
+
+
+    month_diff = local_tm->tm_mon - gm_tm.tm_mon;
+
+    /*  When localtime is Dec 31st previous year and
+        gmtime is Jan 1st next year.
+    */
+    if( month_diff == 11 ) {
+        local_tm->tm_year--;
+    }
+
+    /*  When localtime is Jan 1st, next year and
+        gmtime is Dec 31st, previous year.
+    */
+    if( month_diff == -11 ) {
+        local_tm->tm_year++;
+    }
+
+    /* GMT is Jan 1st, xx01 year, but localtime is still Dec 31st
+       in a non-leap xx00.  There is one point in the cycle
+       we can't account for which the safe xx00 year is a leap
+       year.  So we need to correct for Dec 31st comming out as
+       the 366th day of the year.
+    */
+    if( !IS_LEAP(local_tm->tm_year) && local_tm->tm_yday == 365 )
+        local_tm->tm_yday--;
+
+    assert(check_tm(local_tm));
+
+    return local_tm;
+}
+
+
+int valid_tm_wday( const struct TM* date ) {
+    if( 0 <= date->tm_wday && date->tm_wday <= 6 )
+        return 1;
+    else
+        return 0;
+}
+
+int valid_tm_mon( const struct TM* date ) {
+    if( 0 <= date->tm_mon && date->tm_mon <= 11 )
+        return 1;
+    else
+        return 0;
+}
+
+
+char *asctime64_r( const struct TM* date, char *result ) {
+    /* I figure everything else can be displayed, even hour 25, but if
+       these are out of range we walk off the name arrays */
+    if( !valid_tm_wday(date) || !valid_tm_mon(date) )
+        return NULL;
+
+    sprintf(result, "%.3s %.3s%3d %.2d:%.2d:%.2d %d\n",
+        wday_name[date->tm_wday],
+        mon_name[date->tm_mon],
+        date->tm_mday, date->tm_hour,
+        date->tm_min, date->tm_sec,
+        1900 + date->tm_year);
+
+    return result;
+}
+
+
+char *ctime64_r( const Time64_T* time, char* result ) {
+    struct TM date;
+
+    localtime64_r( time, &date );
+    return asctime64_r( &date, result );
+}
+
+
+/* Non-thread safe versions of the above */
+struct TM *localtime64(const Time64_T *time) {
+    return localtime64_r(time, &Static_Return_Date);
+}
+
+struct TM *gmtime64(const Time64_T *time) {
+    return gmtime64_r(time, &Static_Return_Date);
+}
+
+char *asctime64( const struct TM* date ) {
+    return asctime64_r( date, Static_Return_String );
+}
+
+char *ctime64( const Time64_T* time ) {
+    return asctime64(localtime64(time));
+}
diff --git a/libc/bionic/time64_config.h b/libc/bionic/time64_config.h
new file mode 100644
index 0000000..53bcecf
--- /dev/null
+++ b/libc/bionic/time64_config.h
@@ -0,0 +1,75 @@
+/* Debugging
+   TIME_64_DEBUG
+   Define if you want debugging messages
+*/
+/* #define TIME_64_DEBUG */
+
+
+/* INT_64_T
+   A 64 bit integer type to use to store time and others.
+   Must be defined.
+*/
+#define INT_64_T                long long
+
+
+/* USE_TM64
+   Should we use a 64 bit safe replacement for tm?  This will
+   let you go past year 2 billion but the struct will be incompatible
+   with tm.  Conversion functions will be provided.
+*/
+/* #define USE_TM64 */
+
+
+/* Availability of system functions.
+
+   HAS_GMTIME_R
+   Define if your system has gmtime_r()
+
+   HAS_LOCALTIME_R
+   Define if your system has localtime_r()
+
+   HAS_TIMEGM
+   Define if your system has timegm(), a GNU extension.
+*/
+#define HAS_GMTIME_R
+#define HAS_LOCALTIME_R
+/*#define HAS_TIMEGM */
+
+
+/* Details of non-standard tm struct elements.
+
+   HAS_TM_TM_GMTOFF
+   True if your tm struct has a "tm_gmtoff" element.
+   A BSD extension.
+
+   HAS_TM_TM_ZONE
+   True if your tm struct has a "tm_zone" element.
+   A BSD extension.
+*/
+#define HAS_TM_TM_GMTOFF
+#define HAS_TM_TM_ZONE
+
+
+/* USE_SYSTEM_LOCALTIME
+   USE_SYSTEM_GMTIME
+   Should we use the system functions if the time is inside their range?
+   Your system localtime() is probably more accurate, but our gmtime() is
+   fast and safe.
+*/
+#define USE_SYSTEM_LOCALTIME
+/* #define USE_SYSTEM_GMTIME */
+
+
+/* SYSTEM_LOCALTIME_MAX
+   SYSTEM_LOCALTIME_MIN
+   SYSTEM_GMTIME_MAX
+   SYSTEM_GMTIME_MIN
+   Maximum and minimum values your system's gmtime() and localtime()
+   can handle.  We will use your system functions if the time falls
+   inside these ranges.
+*/
+#define SYSTEM_LOCALTIME_MAX     2147483647
+#define SYSTEM_LOCALTIME_MIN    -2147483647
+#define SYSTEM_GMTIME_MAX        2147483647
+#define SYSTEM_GMTIME_MIN       -2147483647
+
diff --git a/libc/bionic/utime.c b/libc/bionic/utime.c
new file mode 100644
index 0000000..7239847
--- /dev/null
+++ b/libc/bionic/utime.c
@@ -0,0 +1,55 @@
+/*-
+ * Copyright (c) 1990, 1993
+ *	The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char sccsid[] = "@(#)utime.c	8.1 (Berkeley) 6/4/93";
+#endif /* LIBC_SCCS and not lint */
+#include <sys/cdefs.h>
+//__FBSDID("$FreeBSD: /repoman/r/ncvs/src/lib/libc/gen/utime.c,v 1.3 2007/01/09 00:27:56 imp Exp $");
+
+#include <sys/time.h>
+
+#include <utime.h>
+
+int
+utime(path, times)
+	const char *path;
+	const struct utimbuf *times;
+{
+	struct timeval tv[2], *tvp;
+
+	if (times) {
+		tv[0].tv_sec = times->actime;
+		tv[1].tv_sec = times->modtime;
+		tv[0].tv_usec = tv[1].tv_usec = 0;
+		tvp = tv;
+	} else
+		tvp = NULL;
+	return (utimes(path, tvp));
+}
diff --git a/libc/bionic/utmp.c b/libc/bionic/utmp.c
new file mode 100644
index 0000000..c3b55da
--- /dev/null
+++ b/libc/bionic/utmp.c
@@ -0,0 +1,93 @@
+/*-
+ * Copyright (c) 2002 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Christos Zoulas.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *        This product includes software developed by the NetBSD
+ *        Foundation, Inc. and its contributors.
+ * 4. Neither the name of The NetBSD Foundation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <sys/cdefs.h>
+
+#include <sys/types.h>
+#include <sys/param.h>
+#include <stdio.h>
+#include <string.h>
+#include <time.h>
+#include <utmp.h>
+
+static struct utmp utmp;
+static FILE *ut;
+static char utfile[MAXPATHLEN] = _PATH_UTMP;
+
+void
+setutent(void)
+{
+	if (ut == NULL)
+		return;
+	(void)fseeko(ut, (off_t)0, SEEK_SET);
+}
+
+struct utmp *
+getutent(void)
+{
+	if (ut == NULL) {
+		if ((ut = fopen(utfile, "r")) == NULL)
+			return NULL;
+	}
+	if (fread(&utmp, sizeof(utmp), 1, ut) == 1)
+		return &utmp;
+	return NULL;
+}
+
+void
+endutent(void)
+{
+	if (ut != NULL) {
+		(void)fclose(ut);
+		ut = NULL;
+	}
+}
+
+int
+utmpname(const char *fname)
+{
+	size_t len = strlen(fname);
+
+	if (len >= sizeof(utfile))
+		return 0;
+
+	/* must not end in x! */
+	if (fname[len - 1] == 'x')
+		return 0;
+
+	(void)strlcpy(utfile, fname, sizeof(utfile));
+	endutent();
+	return 1;
+}