Merge "Add RTLD_NODELETE flag support"
diff --git a/libc/arch-arm/arm.mk b/libc/arch-arm/arm.mk
index b5ed7f0..cca4ed0 100644
--- a/libc/arch-arm/arm.mk
+++ b/libc/arch-arm/arm.mk
@@ -41,7 +41,6 @@
 
 libc_openbsd_src_files_arm += \
     upstream-openbsd/lib/libc/string/bcopy.c \
-    upstream-openbsd/lib/libc/string/stpcpy.c \
     upstream-openbsd/lib/libc/string/stpncpy.c \
     upstream-openbsd/lib/libc/string/strlcat.c \
     upstream-openbsd/lib/libc/string/strlcpy.c \
diff --git a/libc/arch-arm/cortex-a15/bionic/stpcpy.S b/libc/arch-arm/cortex-a15/bionic/stpcpy.S
new file mode 100644
index 0000000..740523b
--- /dev/null
+++ b/libc/arch-arm/cortex-a15/bionic/stpcpy.S
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#define STPCPY
+#include "string_copy.S"
diff --git a/libc/arch-arm/cortex-a15/bionic/strcpy.S b/libc/arch-arm/cortex-a15/bionic/strcpy.S
index 2cfdb19..951face 100644
--- a/libc/arch-arm/cortex-a15/bionic/strcpy.S
+++ b/libc/arch-arm/cortex-a15/bionic/strcpy.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -25,432 +25,6 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  */
-/*
- * Copyright (c) 2013 ARM Ltd
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the company may not be used to endorse or promote
- *    products derived from this software without specific prior written
- *    permission.
- *
- * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
- * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
 
-#include <private/bionic_asm.h>
-
-    .syntax unified
-
-    .thumb
-    .thumb_func
-
-    .macro m_push
-    push    {r0, r4, r5, lr}
-    .cfi_def_cfa_offset 16
-    .cfi_rel_offset r0, 0
-    .cfi_rel_offset r4, 4
-    .cfi_rel_offset r5, 8
-    .cfi_rel_offset lr, 12
-    .endm // m_push
-
-    .macro m_pop
-    pop     {r0, r4, r5, pc}
-    .endm // m_pop
-
-    .macro m_copy_byte reg, cmd, label
-    ldrb    \reg, [r1], #1
-    strb    \reg, [r0], #1
-    \cmd    \reg, \label
-    .endm // m_copy_byte
-
-ENTRY(strcpy)
-    // For short copies, hard-code checking the first 8 bytes since this
-    // new code doesn't win until after about 8 bytes.
-    m_push
-    m_copy_byte reg=r2, cmd=cbz, label=.Lstrcpy_finish
-    m_copy_byte reg=r3, cmd=cbz, label=.Lstrcpy_finish
-    m_copy_byte reg=r4, cmd=cbz, label=.Lstrcpy_finish
-    m_copy_byte reg=r5, cmd=cbz, label=.Lstrcpy_finish
-    m_copy_byte reg=r2, cmd=cbz, label=.Lstrcpy_finish
-    m_copy_byte reg=r3, cmd=cbz, label=.Lstrcpy_finish
-    m_copy_byte reg=r4, cmd=cbz, label=.Lstrcpy_finish
-    m_copy_byte reg=r5, cmd=cbnz, label=.Lstrcpy_continue
-
-.Lstrcpy_finish:
-    m_pop
-
-.Lstrcpy_continue:
-    pld     [r1, #0]
-    ands    r3, r0, #7
-    beq     .Lstrcpy_check_src_align
-
-    // Align to a double word (64 bits).
-    rsb     r3, r3, #8
-    lsls    ip, r3, #31
-    beq     .Lstrcpy_align_to_32
-
-    ldrb    r2, [r1], #1
-    strb    r2, [r0], #1
-    cbz     r2, .Lstrcpy_complete
-
-.Lstrcpy_align_to_32:
-    bcc     .Lstrcpy_align_to_64
-
-    ldrb    r2, [r1], #1
-    strb    r2, [r0], #1
-    cbz     r2, .Lstrcpy_complete
-    ldrb    r2, [r1], #1
-    strb    r2, [r0], #1
-    cbz     r2, .Lstrcpy_complete
-
-.Lstrcpy_align_to_64:
-    tst     r3, #4
-    beq     .Lstrcpy_check_src_align
-    ldr     r2, [r1], #4
-
-    sub     ip, r2, #0x01010101
-    bic     ip, ip, r2
-    ands    ip, ip, #0x80808080
-    bne     .Lstrcpy_zero_in_first_register
-    str     r2, [r0], #4
-
-.Lstrcpy_check_src_align:
-    // At this point dst is aligned to a double word, check if src
-    // is also aligned to a double word.
-    ands    r3, r1, #7
-    bne     .Lstrcpy_unaligned_copy
-
-    .p2align 2
-.Lstrcpy_mainloop:
-    ldrd    r2, r3, [r1], #8
-
-    pld     [r1, #64]
-
-    sub     ip, r2, #0x01010101
-    bic     ip, ip, r2
-    ands    ip, ip, #0x80808080
-    bne     .Lstrcpy_zero_in_first_register
-
-    sub     ip, r3, #0x01010101
-    bic     ip, ip, r3
-    ands    ip, ip, #0x80808080
-    bne     .Lstrcpy_zero_in_second_register
-
-    strd    r2, r3, [r0], #8
-    b       .Lstrcpy_mainloop
-
-.Lstrcpy_complete:
-    m_pop
-
-.Lstrcpy_zero_in_first_register:
-    lsls    lr, ip, #17
-    bne     .Lstrcpy_copy1byte
-    bcs     .Lstrcpy_copy2bytes
-    lsls    ip, ip, #1
-    bne     .Lstrcpy_copy3bytes
-
-.Lstrcpy_copy4bytes:
-    // Copy 4 bytes to the destiniation.
-    str     r2, [r0]
-    m_pop
-
-.Lstrcpy_copy1byte:
-    strb    r2, [r0]
-    m_pop
-
-.Lstrcpy_copy2bytes:
-    strh    r2, [r0]
-    m_pop
-
-.Lstrcpy_copy3bytes:
-    strh    r2, [r0], #2
-    lsr     r2, #16
-    strb    r2, [r0]
-    m_pop
-
-.Lstrcpy_zero_in_second_register:
-    lsls    lr, ip, #17
-    bne     .Lstrcpy_copy5bytes
-    bcs     .Lstrcpy_copy6bytes
-    lsls    ip, ip, #1
-    bne     .Lstrcpy_copy7bytes
-
-    // Copy 8 bytes to the destination.
-    strd    r2, r3, [r0]
-    m_pop
-
-.Lstrcpy_copy5bytes:
-    str     r2, [r0], #4
-    strb    r3, [r0]
-    m_pop
-
-.Lstrcpy_copy6bytes:
-    str     r2, [r0], #4
-    strh    r3, [r0]
-    m_pop
-
-.Lstrcpy_copy7bytes:
-    str     r2, [r0], #4
-    strh    r3, [r0], #2
-    lsr     r3, #16
-    strb    r3, [r0]
-    m_pop
-
-.Lstrcpy_unaligned_copy:
-    // Dst is aligned to a double word, while src is at an unknown alignment.
-    // There are 7 different versions of the unaligned copy code
-    // to prevent overreading the src. The mainloop of every single version
-    // will store 64 bits per loop. The difference is how much of src can
-    // be read without potentially crossing a page boundary.
-    tbb     [pc, r3]
-.Lstrcpy_unaligned_branchtable:
-    .byte 0
-    .byte ((.Lstrcpy_unalign7 - .Lstrcpy_unaligned_branchtable)/2)
-    .byte ((.Lstrcpy_unalign6 - .Lstrcpy_unaligned_branchtable)/2)
-    .byte ((.Lstrcpy_unalign5 - .Lstrcpy_unaligned_branchtable)/2)
-    .byte ((.Lstrcpy_unalign4 - .Lstrcpy_unaligned_branchtable)/2)
-    .byte ((.Lstrcpy_unalign3 - .Lstrcpy_unaligned_branchtable)/2)
-    .byte ((.Lstrcpy_unalign2 - .Lstrcpy_unaligned_branchtable)/2)
-    .byte ((.Lstrcpy_unalign1 - .Lstrcpy_unaligned_branchtable)/2)
-
-    .p2align 2
-    // Can read 7 bytes before possibly crossing a page.
-.Lstrcpy_unalign7:
-    ldr     r2, [r1], #4
-
-    sub     ip, r2, #0x01010101
-    bic     ip, ip, r2
-    ands    ip, ip, #0x80808080
-    bne     .Lstrcpy_zero_in_first_register
-
-    ldrb    r3, [r1]
-    cbz     r3, .Lstrcpy_unalign7_copy5bytes
-    ldrb    r4, [r1, #1]
-    cbz     r4, .Lstrcpy_unalign7_copy6bytes
-    ldrb    r5, [r1, #2]
-    cbz     r5, .Lstrcpy_unalign7_copy7bytes
-
-    ldr     r3, [r1], #4
-    pld     [r1, #64]
-
-    lsrs    ip, r3, #24
-    strd    r2, r3, [r0], #8
-    beq     .Lstrcpy_unalign_return
-    b       .Lstrcpy_unalign7
-
-.Lstrcpy_unalign7_copy5bytes:
-    str     r2, [r0], #4
-    strb    r3, [r0]
-.Lstrcpy_unalign_return:
-    m_pop
-
-.Lstrcpy_unalign7_copy6bytes:
-    str     r2, [r0], #4
-    strb    r3, [r0], #1
-    strb    r4, [r0], #1
-    m_pop
-
-.Lstrcpy_unalign7_copy7bytes:
-    str     r2, [r0], #4
-    strb    r3, [r0], #1
-    strb    r4, [r0], #1
-    strb    r5, [r0], #1
-    m_pop
-
-    .p2align 2
-    // Can read 6 bytes before possibly crossing a page.
-.Lstrcpy_unalign6:
-    ldr     r2, [r1], #4
-
-    sub     ip, r2, #0x01010101
-    bic     ip, ip, r2
-    ands    ip, ip, #0x80808080
-    bne     .Lstrcpy_zero_in_first_register
-
-    ldrb    r4, [r1]
-    cbz     r4, .Lstrcpy_unalign_copy5bytes
-    ldrb    r5, [r1, #1]
-    cbz     r5, .Lstrcpy_unalign_copy6bytes
-
-    ldr     r3, [r1], #4
-    pld     [r1, #64]
-
-    tst     r3, #0xff0000
-    beq     .Lstrcpy_copy7bytes
-    lsrs    ip, r3, #24
-    strd    r2, r3, [r0], #8
-    beq     .Lstrcpy_unalign_return
-    b       .Lstrcpy_unalign6
-
-    .p2align 2
-    // Can read 5 bytes before possibly crossing a page.
-.Lstrcpy_unalign5:
-    ldr     r2, [r1], #4
-
-    sub     ip, r2, #0x01010101
-    bic     ip, ip, r2
-    ands    ip, ip, #0x80808080
-    bne     .Lstrcpy_zero_in_first_register
-
-    ldrb    r4, [r1]
-    cbz     r4, .Lstrcpy_unalign_copy5bytes
-
-    ldr     r3, [r1], #4
-
-    pld     [r1, #64]
-
-    sub     ip, r3, #0x01010101
-    bic     ip, ip, r3
-    ands    ip, ip, #0x80808080
-    bne     .Lstrcpy_zero_in_second_register
-
-    strd    r2, r3, [r0], #8
-    b       .Lstrcpy_unalign5
-
-.Lstrcpy_unalign_copy5bytes:
-    str     r2, [r0], #4
-    strb    r4, [r0]
-    m_pop
-
-.Lstrcpy_unalign_copy6bytes:
-    str     r2, [r0], #4
-    strb    r4, [r0], #1
-    strb    r5, [r0]
-    m_pop
-
-    .p2align 2
-    // Can read 4 bytes before possibly crossing a page.
-.Lstrcpy_unalign4:
-    ldr     r2, [r1], #4
-
-    sub     ip, r2, #0x01010101
-    bic     ip, ip, r2
-    ands    ip, ip, #0x80808080
-    bne     .Lstrcpy_zero_in_first_register
-
-    ldr     r3, [r1], #4
-    pld     [r1, #64]
-
-    sub     ip, r3, #0x01010101
-    bic     ip, ip, r3
-    ands    ip, ip, #0x80808080
-    bne     .Lstrcpy_zero_in_second_register
-
-    strd    r2, r3, [r0], #8
-    b       .Lstrcpy_unalign4
-
-    .p2align 2
-    // Can read 3 bytes before possibly crossing a page.
-.Lstrcpy_unalign3:
-    ldrb    r2, [r1]
-    cbz     r2, .Lstrcpy_unalign3_copy1byte
-    ldrb    r3, [r1, #1]
-    cbz     r3, .Lstrcpy_unalign3_copy2bytes
-    ldrb    r4, [r1, #2]
-    cbz     r4, .Lstrcpy_unalign3_copy3bytes
-
-    ldr     r2, [r1], #4
-    ldr     r3, [r1], #4
-
-    pld     [r1, #64]
-
-    lsrs    lr, r2, #24
-    beq     .Lstrcpy_copy4bytes
-
-    sub     ip, r3, #0x01010101
-    bic     ip, ip, r3
-    ands    ip, ip, #0x80808080
-    bne     .Lstrcpy_zero_in_second_register
-
-    strd    r2, r3, [r0], #8
-    b       .Lstrcpy_unalign3
-
-.Lstrcpy_unalign3_copy1byte:
-    strb    r2, [r0]
-    m_pop
-
-.Lstrcpy_unalign3_copy2bytes:
-    strb    r2, [r0], #1
-    strb    r3, [r0]
-    m_pop
-
-.Lstrcpy_unalign3_copy3bytes:
-    strb    r2, [r0], #1
-    strb    r3, [r0], #1
-    strb    r4, [r0]
-    m_pop
-
-    .p2align 2
-    // Can read 2 bytes before possibly crossing a page.
-.Lstrcpy_unalign2:
-    ldrb    r2, [r1]
-    cbz     r2, .Lstrcpy_unalign_copy1byte
-    ldrb    r4, [r1, #1]
-    cbz     r4, .Lstrcpy_unalign_copy2bytes
-
-    ldr     r2, [r1], #4
-    ldr     r3, [r1], #4
-    pld     [r1, #64]
-
-    tst     r2, #0xff0000
-    beq     .Lstrcpy_copy3bytes
-    lsrs    ip, r2, #24
-    beq     .Lstrcpy_copy4bytes
-
-    sub     ip, r3, #0x01010101
-    bic     ip, ip, r3
-    ands    ip, ip, #0x80808080
-    bne     .Lstrcpy_zero_in_second_register
-
-    strd    r2, r3, [r0], #8
-    b       .Lstrcpy_unalign2
-
-    .p2align 2
-    // Can read 1 byte before possibly crossing a page.
-.Lstrcpy_unalign1:
-    ldrb    r2, [r1]
-    cbz     r2, .Lstrcpy_unalign_copy1byte
-
-    ldr     r2, [r1], #4
-    ldr     r3, [r1], #4
-
-    pld     [r1, #64]
-
-    sub     ip, r2, #0x01010101
-    bic     ip, ip, r2
-    ands    ip, ip, #0x80808080
-    bne     .Lstrcpy_zero_in_first_register
-
-    sub     ip, r3, #0x01010101
-    bic     ip, ip, r3
-    ands    ip, ip, #0x80808080
-    bne     .Lstrcpy_zero_in_second_register
-
-    strd    r2, r3, [r0], #8
-    b       .Lstrcpy_unalign1
-
-.Lstrcpy_unalign_copy1byte:
-    strb    r2, [r0]
-    m_pop
-
-.Lstrcpy_unalign_copy2bytes:
-    strb    r2, [r0], #1
-    strb    r4, [r0]
-    m_pop
-END(strcpy)
+#define STRCPY
+#include "string_copy.S"
diff --git a/libc/arch-arm/cortex-a15/bionic/string_copy.S b/libc/arch-arm/cortex-a15/bionic/string_copy.S
new file mode 100644
index 0000000..20f0e91
--- /dev/null
+++ b/libc/arch-arm/cortex-a15/bionic/string_copy.S
@@ -0,0 +1,513 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 2013 ARM Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ *    products derived from this software without specific prior written
+ *    permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(STPCPY) && !defined(STRCPY)
+#error "Either STPCPY or STRCPY must be defined."
+#endif
+
+#include <private/bionic_asm.h>
+
+    .syntax unified
+
+    .thumb
+    .thumb_func
+
+#if defined(STPCPY)
+    .macro m_push
+    push    {r4, r5, lr}
+    .cfi_def_cfa_offset 12
+    .cfi_rel_offset r4, 0
+    .cfi_rel_offset r5, 4
+    .cfi_rel_offset lr, 8
+    .endm // m_push
+#else
+    .macro m_push
+    push    {r0, r4, r5, lr}
+    .cfi_def_cfa_offset 16
+    .cfi_rel_offset r0, 0
+    .cfi_rel_offset r4, 4
+    .cfi_rel_offset r5, 8
+    .cfi_rel_offset lr, 12
+    .endm // m_push
+#endif
+
+#if defined(STPCPY)
+    .macro m_pop
+    pop     {r4, r5, pc}
+    .endm // m_pop
+#else
+    .macro m_pop
+    pop     {r0, r4, r5, pc}
+    .endm // m_pop
+#endif
+
+    .macro m_copy_byte reg, cmd, label
+    ldrb    \reg, [r1], #1
+    strb    \reg, [r0], #1
+    \cmd    \reg, \label
+    .endm // m_copy_byte
+
+#if defined(STPCPY)
+ENTRY(stpcpy)
+#else
+ENTRY(strcpy)
+#endif
+    // For short copies, hard-code checking the first 8 bytes since this
+    // new code doesn't win until after about 8 bytes.
+    m_push
+    m_copy_byte reg=r2, cmd=cbz, label=.Lstringcopy_finish
+    m_copy_byte reg=r3, cmd=cbz, label=.Lstringcopy_finish
+    m_copy_byte reg=r4, cmd=cbz, label=.Lstringcopy_finish
+    m_copy_byte reg=r5, cmd=cbz, label=.Lstringcopy_finish
+    m_copy_byte reg=r2, cmd=cbz, label=.Lstringcopy_finish
+    m_copy_byte reg=r3, cmd=cbz, label=.Lstringcopy_finish
+    m_copy_byte reg=r4, cmd=cbz, label=.Lstringcopy_finish
+    m_copy_byte reg=r5, cmd=cbnz, label=.Lstringcopy_continue
+
+.Lstringcopy_finish:
+#if defined(STPCPY)
+    sub     r0, r0, #1
+#endif
+    m_pop
+
+.Lstringcopy_continue:
+    pld     [r1, #0]
+    ands    r3, r0, #7
+    beq     .Lstringcopy_check_src_align
+
+    // Align to a double word (64 bits).
+    rsb     r3, r3, #8
+    lsls    ip, r3, #31
+    beq     .Lstringcopy_align_to_32
+
+    ldrb    r2, [r1], #1
+    strb    r2, [r0], #1
+    cbz     r2, .Lstringcopy_complete
+
+.Lstringcopy_align_to_32:
+    bcc     .Lstringcopy_align_to_64
+
+    ldrb    r2, [r1], #1
+    strb    r2, [r0], #1
+    cbz     r2, .Lstringcopy_complete
+    ldrb    r2, [r1], #1
+    strb    r2, [r0], #1
+    cbz     r2, .Lstringcopy_complete
+
+.Lstringcopy_align_to_64:
+    tst     r3, #4
+    beq     .Lstringcopy_check_src_align
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_first_register
+    str     r2, [r0], #4
+
+.Lstringcopy_check_src_align:
+    // At this point dst is aligned to a double word, check if src
+    // is also aligned to a double word.
+    ands    r3, r1, #7
+    bne     .Lstringcopy_unaligned_copy
+
+    .p2align 2
+.Lstringcopy_mainloop:
+    ldrd    r2, r3, [r1], #8
+
+    pld     [r1, #64]
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_first_register
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_second_register
+
+    strd    r2, r3, [r0], #8
+    b       .Lstringcopy_mainloop
+
+.Lstringcopy_complete:
+#if defined(STPCPY)
+    sub     r0, r0, #1
+#endif
+    m_pop
+
+.Lstringcopy_zero_in_first_register:
+    lsls    lr, ip, #17
+    bne     .Lstringcopy_copy1byte
+    bcs     .Lstringcopy_copy2bytes
+    lsls    ip, ip, #1
+    bne     .Lstringcopy_copy3bytes
+
+.Lstringcopy_copy4bytes:
+    // Copy 4 bytes to the destiniation.
+#if defined(STPCPY)
+    str     r2, [r0], #3
+#else
+    str     r2, [r0]
+#endif
+    m_pop
+
+.Lstringcopy_copy1byte:
+    strb    r2, [r0]
+    m_pop
+
+.Lstringcopy_copy2bytes:
+#if defined(STPCPY)
+    strh    r2, [r0], #1
+#else
+    strh    r2, [r0]
+#endif
+    m_pop
+
+.Lstringcopy_copy3bytes:
+    strh    r2, [r0], #2
+    lsr     r2, #16
+    strb    r2, [r0]
+    m_pop
+
+.Lstringcopy_zero_in_second_register:
+    lsls    lr, ip, #17
+    bne     .Lstringcopy_copy5bytes
+    bcs     .Lstringcopy_copy6bytes
+    lsls    ip, ip, #1
+    bne     .Lstringcopy_copy7bytes
+
+    // Copy 8 bytes to the destination.
+    strd    r2, r3, [r0]
+#if defined(STPCPY)
+    add     r0, r0, #7
+#endif
+    m_pop
+
+.Lstringcopy_copy5bytes:
+    str     r2, [r0], #4
+    strb    r3, [r0]
+    m_pop
+
+.Lstringcopy_copy6bytes:
+    str     r2, [r0], #4
+#if defined(STPCPY)
+    strh    r3, [r0], #1
+#else
+    strh    r3, [r0]
+#endif
+    m_pop
+
+.Lstringcopy_copy7bytes:
+    str     r2, [r0], #4
+    strh    r3, [r0], #2
+    lsr     r3, #16
+    strb    r3, [r0]
+    m_pop
+
+.Lstringcopy_unaligned_copy:
+    // Dst is aligned to a double word, while src is at an unknown alignment.
+    // There are 7 different versions of the unaligned copy code
+    // to prevent overreading the src. The mainloop of every single version
+    // will store 64 bits per loop. The difference is how much of src can
+    // be read without potentially crossing a page boundary.
+    tbb     [pc, r3]
+.Lstringcopy_unaligned_branchtable:
+    .byte 0
+    .byte ((.Lstringcopy_unalign7 - .Lstringcopy_unaligned_branchtable)/2)
+    .byte ((.Lstringcopy_unalign6 - .Lstringcopy_unaligned_branchtable)/2)
+    .byte ((.Lstringcopy_unalign5 - .Lstringcopy_unaligned_branchtable)/2)
+    .byte ((.Lstringcopy_unalign4 - .Lstringcopy_unaligned_branchtable)/2)
+    .byte ((.Lstringcopy_unalign3 - .Lstringcopy_unaligned_branchtable)/2)
+    .byte ((.Lstringcopy_unalign2 - .Lstringcopy_unaligned_branchtable)/2)
+    .byte ((.Lstringcopy_unalign1 - .Lstringcopy_unaligned_branchtable)/2)
+
+    .p2align 2
+    // Can read 7 bytes before possibly crossing a page.
+.Lstringcopy_unalign7:
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_first_register
+
+    ldrb    r3, [r1]
+    cbz     r3, .Lstringcopy_unalign7_copy5bytes
+    ldrb    r4, [r1, #1]
+    cbz     r4, .Lstringcopy_unalign7_copy6bytes
+    ldrb    r5, [r1, #2]
+    cbz     r5, .Lstringcopy_unalign7_copy7bytes
+
+    ldr     r3, [r1], #4
+    pld     [r1, #64]
+
+    lsrs    ip, r3, #24
+    strd    r2, r3, [r0], #8
+#if defined(STPCPY)
+    beq     .Lstringcopy_finish
+#else
+    beq     .Lstringcopy_unalign_return
+#endif
+    b       .Lstringcopy_unalign7
+
+.Lstringcopy_unalign7_copy5bytes:
+    str     r2, [r0], #4
+    strb    r3, [r0]
+.Lstringcopy_unalign_return:
+    m_pop
+
+.Lstringcopy_unalign7_copy6bytes:
+    str     r2, [r0], #4
+    strb    r3, [r0], #1
+    strb    r4, [r0]
+    m_pop
+
+.Lstringcopy_unalign7_copy7bytes:
+    str     r2, [r0], #4
+    strb    r3, [r0], #1
+    strb    r4, [r0], #1
+    strb    r5, [r0]
+    m_pop
+
+    .p2align 2
+    // Can read 6 bytes before possibly crossing a page.
+.Lstringcopy_unalign6:
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_first_register
+
+    ldrb    r4, [r1]
+    cbz     r4, .Lstringcopy_unalign_copy5bytes
+    ldrb    r5, [r1, #1]
+    cbz     r5, .Lstringcopy_unalign_copy6bytes
+
+    ldr     r3, [r1], #4
+    pld     [r1, #64]
+
+    tst     r3, #0xff0000
+    beq     .Lstringcopy_copy7bytes
+    lsrs    ip, r3, #24
+    strd    r2, r3, [r0], #8
+#if defined(STPCPY)
+    beq     .Lstringcopy_finish
+#else
+    beq     .Lstringcopy_unalign_return
+#endif
+    b       .Lstringcopy_unalign6
+
+    .p2align 2
+    // Can read 5 bytes before possibly crossing a page.
+.Lstringcopy_unalign5:
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_first_register
+
+    ldrb    r4, [r1]
+    cbz     r4, .Lstringcopy_unalign_copy5bytes
+
+    ldr     r3, [r1], #4
+
+    pld     [r1, #64]
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_second_register
+
+    strd    r2, r3, [r0], #8
+    b       .Lstringcopy_unalign5
+
+.Lstringcopy_unalign_copy5bytes:
+    str     r2, [r0], #4
+    strb    r4, [r0]
+    m_pop
+
+.Lstringcopy_unalign_copy6bytes:
+    str     r2, [r0], #4
+    strb    r4, [r0], #1
+    strb    r5, [r0]
+    m_pop
+
+    .p2align 2
+    // Can read 4 bytes before possibly crossing a page.
+.Lstringcopy_unalign4:
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_first_register
+
+    ldr     r3, [r1], #4
+    pld     [r1, #64]
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_second_register
+
+    strd    r2, r3, [r0], #8
+    b       .Lstringcopy_unalign4
+
+    .p2align 2
+    // Can read 3 bytes before possibly crossing a page.
+.Lstringcopy_unalign3:
+    ldrb    r2, [r1]
+    cbz     r2, .Lstringcopy_unalign3_copy1byte
+    ldrb    r3, [r1, #1]
+    cbz     r3, .Lstringcopy_unalign3_copy2bytes
+    ldrb    r4, [r1, #2]
+    cbz     r4, .Lstringcopy_unalign3_copy3bytes
+
+    ldr     r2, [r1], #4
+    ldr     r3, [r1], #4
+
+    pld     [r1, #64]
+
+    lsrs    lr, r2, #24
+    beq     .Lstringcopy_copy4bytes
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_second_register
+
+    strd    r2, r3, [r0], #8
+    b       .Lstringcopy_unalign3
+
+.Lstringcopy_unalign3_copy1byte:
+    strb    r2, [r0]
+    m_pop
+
+.Lstringcopy_unalign3_copy2bytes:
+    strb    r2, [r0], #1
+    strb    r3, [r0]
+    m_pop
+
+.Lstringcopy_unalign3_copy3bytes:
+    strb    r2, [r0], #1
+    strb    r3, [r0], #1
+    strb    r4, [r0]
+    m_pop
+
+    .p2align 2
+    // Can read 2 bytes before possibly crossing a page.
+.Lstringcopy_unalign2:
+    ldrb    r2, [r1]
+    cbz     r2, .Lstringcopy_unalign_copy1byte
+    ldrb    r4, [r1, #1]
+    cbz     r4, .Lstringcopy_unalign_copy2bytes
+
+    ldr     r2, [r1], #4
+    ldr     r3, [r1], #4
+    pld     [r1, #64]
+
+    tst     r2, #0xff0000
+    beq     .Lstringcopy_copy3bytes
+    lsrs    ip, r2, #24
+    beq     .Lstringcopy_copy4bytes
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_second_register
+
+    strd    r2, r3, [r0], #8
+    b       .Lstringcopy_unalign2
+
+    .p2align 2
+    // Can read 1 byte before possibly crossing a page.
+.Lstringcopy_unalign1:
+    ldrb    r2, [r1]
+    cbz     r2, .Lstringcopy_unalign_copy1byte
+
+    ldr     r2, [r1], #4
+    ldr     r3, [r1], #4
+
+    pld     [r1, #64]
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_first_register
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_second_register
+
+    strd    r2, r3, [r0], #8
+    b       .Lstringcopy_unalign1
+
+.Lstringcopy_unalign_copy1byte:
+    strb    r2, [r0]
+    m_pop
+
+.Lstringcopy_unalign_copy2bytes:
+    strb    r2, [r0], #1
+    strb    r4, [r0]
+    m_pop
+#if defined(STPCPY)
+END(stpcpy)
+#else
+END(strcpy)
+#endif
diff --git a/libc/arch-arm/cortex-a15/cortex-a15.mk b/libc/arch-arm/cortex-a15/cortex-a15.mk
index 552811e..f1abe32 100644
--- a/libc/arch-arm/cortex-a15/cortex-a15.mk
+++ b/libc/arch-arm/cortex-a15/cortex-a15.mk
@@ -1,10 +1,11 @@
 libc_bionic_src_files_arm += \
     arch-arm/cortex-a15/bionic/memcpy.S \
     arch-arm/cortex-a15/bionic/memset.S \
+    arch-arm/cortex-a15/bionic/stpcpy.S \
     arch-arm/cortex-a15/bionic/strcat.S \
+    arch-arm/cortex-a15/bionic/__strcat_chk.S \
     arch-arm/cortex-a15/bionic/strcmp.S \
     arch-arm/cortex-a15/bionic/strcpy.S \
-    arch-arm/cortex-a15/bionic/strlen.S \
-    arch-arm/cortex-a15/bionic/__strcat_chk.S \
     arch-arm/cortex-a15/bionic/__strcpy_chk.S \
+    arch-arm/cortex-a15/bionic/strlen.S \
     bionic/memmove.c \
diff --git a/libc/arch-arm/cortex-a9/bionic/stpcpy.S b/libc/arch-arm/cortex-a9/bionic/stpcpy.S
new file mode 100644
index 0000000..740523b
--- /dev/null
+++ b/libc/arch-arm/cortex-a9/bionic/stpcpy.S
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#define STPCPY
+#include "string_copy.S"
diff --git a/libc/arch-arm/cortex-a9/bionic/strcpy.S b/libc/arch-arm/cortex-a9/bionic/strcpy.S
index d705aa3..951face 100644
--- a/libc/arch-arm/cortex-a9/bionic/strcpy.S
+++ b/libc/arch-arm/cortex-a9/bionic/strcpy.S
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -25,437 +25,6 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  */
-/*
- * Copyright (c) 2013 ARM Ltd
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of the company may not be used to endorse or promote
- *    products derived from this software without specific prior written
- *    permission.
- *
- * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
- * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
 
-#include <private/bionic_asm.h>
-
-    .syntax unified
-
-    .thumb
-    .thumb_func
-
-    .macro m_push
-    push    {r0, r4, r5, lr}
-    .cfi_def_cfa_offset 16
-    .cfi_rel_offset r0, 0
-    .cfi_rel_offset r4, 4
-    .cfi_rel_offset r5, 8
-    .cfi_rel_offset lr, 12
-    .endm // m_push
-
-    .macro m_ret inst
-    \inst   {r0, r4, r5, pc}
-    .endm // m_ret
-
-    .macro m_copy_byte reg, cmd, label
-    ldrb    \reg, [r1], #1
-    strb    \reg, [r0], #1
-    \cmd    \reg, \label
-    .endm // m_copy_byte
-
-ENTRY(strcpy)
-    // Unroll the first 8 bytes that will be copied.
-    m_push
-    m_copy_byte reg=r2, cmd=cbz, label=.Lstrcpy_finish
-    m_copy_byte reg=r3, cmd=cbz, label=.Lstrcpy_finish
-    m_copy_byte reg=r4, cmd=cbz, label=.Lstrcpy_finish
-    m_copy_byte reg=r5, cmd=cbz, label=.Lstrcpy_finish
-    m_copy_byte reg=r2, cmd=cbz, label=.Lstrcpy_finish
-    m_copy_byte reg=r3, cmd=cbz, label=.Lstrcpy_finish
-    m_copy_byte reg=r4, cmd=cbz, label=.Lstrcpy_finish
-    m_copy_byte reg=r5, cmd=cbnz, label=.Lstrcpy_continue
-
-.Lstrcpy_finish:
-    m_ret   inst=pop
-
-.Lstrcpy_continue:
-    pld     [r1, #0]
-    ands    r3, r0, #7
-    bne     .Lstrcpy_align_dst
-
-.Lstrcpy_check_src_align:
-    // At this point dst is aligned to a double word, check if src
-    // is also aligned to a double word.
-    ands    r3, r1, #7
-    bne     .Lstrcpy_unaligned_copy
-
-    .p2align 2
-.Lstrcpy_mainloop:
-    ldmia   r1!, {r2, r3}
-
-    pld     [r1, #64]
-
-    sub     ip, r2, #0x01010101
-    bic     ip, ip, r2
-    ands    ip, ip, #0x80808080
-    bne     .Lstrcpy_zero_in_first_register
-
-    sub     ip, r3, #0x01010101
-    bic     ip, ip, r3
-    ands    ip, ip, #0x80808080
-    bne     .Lstrcpy_zero_in_second_register
-
-    stmia   r0!, {r2, r3}
-    b       .Lstrcpy_mainloop
-
-.Lstrcpy_zero_in_first_register:
-    lsls    lr, ip, #17
-    itt     ne
-    strbne  r2, [r0]
-    m_ret   inst=popne
-    itt     cs
-    strhcs  r2, [r0]
-    m_ret   inst=popcs
-    lsls    ip, ip, #1
-    itt     eq
-    streq   r2, [r0]
-    m_ret   inst=popeq
-    strh    r2, [r0], #2
-    lsr     r3, r2, #16
-    strb    r3, [r0]
-    m_ret   inst=pop
-
-.Lstrcpy_zero_in_second_register:
-    lsls    lr, ip, #17
-    ittt    ne
-    stmiane r0!, {r2}
-    strbne  r3, [r0]
-    m_ret   inst=popne
-    ittt    cs
-    strcs   r2, [r0], #4
-    strhcs  r3, [r0]
-    m_ret   inst=popcs
-    lsls    ip, ip, #1
-    itt     eq
-    stmiaeq r0, {r2, r3}
-    m_ret   inst=popeq
-    stmia   r0!, {r2}
-    strh    r3, [r0], #2
-    lsr     r4, r3, #16
-    strb    r4, [r0]
-    m_ret   inst=pop
-
-.Lstrcpy_align_dst:
-    // Align to a double word (64 bits).
-    rsb     r3, r3, #8
-    lsls    ip, r3, #31
-    beq     .Lstrcpy_align_to_32
-
-    ldrb    r2, [r1], #1
-    strb    r2, [r0], #1
-    cbz     r2, .Lstrcpy_complete
-
-.Lstrcpy_align_to_32:
-    bcc     .Lstrcpy_align_to_64
-
-    ldrb    r4, [r1], #1
-    strb    r4, [r0], #1
-    cmp     r4, #0
-    it      eq
-    m_ret   inst=popeq
-    ldrb    r5, [r1], #1
-    strb    r5, [r0], #1
-    cmp     r5, #0
-    it      eq
-    m_ret   inst=popeq
-
-.Lstrcpy_align_to_64:
-    tst     r3, #4
-    beq     .Lstrcpy_check_src_align
-    ldr     r2, [r1], #4
-
-    sub     ip, r2, #0x01010101
-    bic     ip, ip, r2
-    ands    ip, ip, #0x80808080
-    bne     .Lstrcpy_zero_in_first_register
-    stmia   r0!, {r2}
-    b       .Lstrcpy_check_src_align
-
-.Lstrcpy_complete:
-    m_ret   inst=pop
-
-.Lstrcpy_unaligned_copy:
-    // Dst is aligned to a double word, while src is at an unknown alignment.
-    // There are 7 different versions of the unaligned copy code
-    // to prevent overreading the src. The mainloop of every single version
-    // will store 64 bits per loop. The difference is how much of src can
-    // be read without potentially crossing a page boundary.
-    tbb     [pc, r3]
-.Lstrcpy_unaligned_branchtable:
-    .byte 0
-    .byte ((.Lstrcpy_unalign7 - .Lstrcpy_unaligned_branchtable)/2)
-    .byte ((.Lstrcpy_unalign6 - .Lstrcpy_unaligned_branchtable)/2)
-    .byte ((.Lstrcpy_unalign5 - .Lstrcpy_unaligned_branchtable)/2)
-    .byte ((.Lstrcpy_unalign4 - .Lstrcpy_unaligned_branchtable)/2)
-    .byte ((.Lstrcpy_unalign3 - .Lstrcpy_unaligned_branchtable)/2)
-    .byte ((.Lstrcpy_unalign2 - .Lstrcpy_unaligned_branchtable)/2)
-    .byte ((.Lstrcpy_unalign1 - .Lstrcpy_unaligned_branchtable)/2)
-
-    .p2align 2
-    // Can read 7 bytes before possibly crossing a page.
-.Lstrcpy_unalign7:
-    ldr     r2, [r1], #4
-
-    sub     ip, r2, #0x01010101
-    bic     ip, ip, r2
-    ands    ip, ip, #0x80808080
-    bne     .Lstrcpy_zero_in_first_register
-
-    ldrb    r3, [r1]
-    cbz     r3, .Lstrcpy_unalign7_copy5bytes
-    ldrb    r4, [r1, #1]
-    cbz     r4, .Lstrcpy_unalign7_copy6bytes
-    ldrb    r5, [r1, #2]
-    cbz     r5, .Lstrcpy_unalign7_copy7bytes
-
-    ldr     r3, [r1], #4
-    pld     [r1, #64]
-
-    lsrs    ip, r3, #24
-    stmia   r0!, {r2, r3}
-    beq     .Lstrcpy_unalign_return
-    b       .Lstrcpy_unalign7
-
-.Lstrcpy_unalign7_copy5bytes:
-    stmia   r0!, {r2}
-    strb    r3, [r0]
-.Lstrcpy_unalign_return:
-    m_ret   inst=pop
-
-.Lstrcpy_unalign7_copy6bytes:
-    stmia   r0!, {r2}
-    strb    r3, [r0], #1
-    strb    r4, [r0], #1
-    m_ret   inst=pop
-
-.Lstrcpy_unalign7_copy7bytes:
-    stmia   r0!, {r2}
-    strb    r3, [r0], #1
-    strb    r4, [r0], #1
-    strb    r5, [r0], #1
-    m_ret   inst=pop
-
-    .p2align 2
-    // Can read 6 bytes before possibly crossing a page.
-.Lstrcpy_unalign6:
-    ldr     r2, [r1], #4
-
-    sub     ip, r2, #0x01010101
-    bic     ip, ip, r2
-    ands    ip, ip, #0x80808080
-    bne     .Lstrcpy_zero_in_first_register
-
-    ldrb    r4, [r1]
-    cbz     r4, .Lstrcpy_unalign_copy5bytes
-    ldrb    r5, [r1, #1]
-    cbz     r5, .Lstrcpy_unalign_copy6bytes
-
-    ldr     r3, [r1], #4
-    pld     [r1, #64]
-
-    tst     r3, #0xff0000
-    beq     .Lstrcpy_unalign6_copy7bytes
-    lsrs    ip, r3, #24
-    stmia   r0!, {r2, r3}
-    beq     .Lstrcpy_unalign_return
-    b       .Lstrcpy_unalign6
-
-.Lstrcpy_unalign6_copy7bytes:
-    stmia   r0!, {r2}
-    strh    r3, [r0], #2
-    lsr     r3, #16
-    strb    r3, [r0]
-    m_ret   inst=pop
-
-    .p2align 2
-    // Can read 5 bytes before possibly crossing a page.
-.Lstrcpy_unalign5:
-    ldr     r2, [r1], #4
-
-    sub     ip, r2, #0x01010101
-    bic     ip, ip, r2
-    ands    ip, ip, #0x80808080
-    bne     .Lstrcpy_zero_in_first_register
-
-    ldrb    r4, [r1]
-    cbz     r4, .Lstrcpy_unalign_copy5bytes
-
-    ldr     r3, [r1], #4
-
-    pld     [r1, #64]
-
-    sub     ip, r3, #0x01010101
-    bic     ip, ip, r3
-    ands    ip, ip, #0x80808080
-    bne     .Lstrcpy_zero_in_second_register
-
-    stmia   r0!, {r2, r3}
-    b       .Lstrcpy_unalign5
-
-.Lstrcpy_unalign_copy5bytes:
-    stmia   r0!, {r2}
-    strb    r4, [r0]
-    m_ret   inst=pop
-
-.Lstrcpy_unalign_copy6bytes:
-    stmia   r0!, {r2}
-    strb    r4, [r0], #1
-    strb    r5, [r0]
-    m_ret   inst=pop
-
-    .p2align 2
-    // Can read 4 bytes before possibly crossing a page.
-.Lstrcpy_unalign4:
-    ldmia   r1!, {r2}
-
-    sub     ip, r2, #0x01010101
-    bic     ip, ip, r2
-    ands    ip, ip, #0x80808080
-    bne     .Lstrcpy_zero_in_first_register
-
-    ldmia   r1!, {r3}
-    pld     [r1, #64]
-
-    sub     ip, r3, #0x01010101
-    bic     ip, ip, r3
-    ands    ip, ip, #0x80808080
-    bne     .Lstrcpy_zero_in_second_register
-
-    stmia   r0!, {r2, r3}
-    b       .Lstrcpy_unalign4
-
-    .p2align 2
-    // Can read 3 bytes before possibly crossing a page.
-.Lstrcpy_unalign3:
-    ldrb    r2, [r1]
-    cbz     r2, .Lstrcpy_unalign3_copy1byte
-    ldrb    r3, [r1, #1]
-    cbz     r3, .Lstrcpy_unalign3_copy2bytes
-    ldrb    r4, [r1, #2]
-    cbz     r4, .Lstrcpy_unalign3_copy3bytes
-
-    ldr     r2, [r1], #4
-    ldr     r3, [r1], #4
-
-    pld     [r1, #64]
-
-    lsrs    lr, r2, #24
-    beq     .Lstrcpy_unalign_copy4bytes
-
-    sub     ip, r3, #0x01010101
-    bic     ip, ip, r3
-    ands    ip, ip, #0x80808080
-    bne     .Lstrcpy_zero_in_second_register
-
-    stmia   r0!, {r2, r3}
-    b       .Lstrcpy_unalign3
-
-.Lstrcpy_unalign3_copy1byte:
-    strb    r2, [r0]
-    m_ret   inst=pop
-
-.Lstrcpy_unalign3_copy2bytes:
-    strb    r2, [r0], #1
-    strb    r3, [r0]
-    m_ret   inst=pop
-
-.Lstrcpy_unalign3_copy3bytes:
-    strb    r2, [r0], #1
-    strb    r3, [r0], #1
-    strb    r4, [r0]
-    m_ret   inst=pop
-
-    .p2align 2
-    // Can read 2 bytes before possibly crossing a page.
-.Lstrcpy_unalign2:
-    ldrb    r2, [r1]
-    cbz     r2, .Lstrcpy_unalign_copy1byte
-    ldrb    r3, [r1, #1]
-    cbz     r3, .Lstrcpy_unalign_copy2bytes
-
-    ldr     r2, [r1], #4
-    ldr     r3, [r1], #4
-    pld     [r1, #64]
-
-    tst     r2, #0xff0000
-    beq     .Lstrcpy_unalign_copy3bytes
-    lsrs    ip, r2, #24
-    beq     .Lstrcpy_unalign_copy4bytes
-
-    sub     ip, r3, #0x01010101
-    bic     ip, ip, r3
-    ands    ip, ip, #0x80808080
-    bne     .Lstrcpy_zero_in_second_register
-
-    stmia   r0!, {r2, r3}
-    b       .Lstrcpy_unalign2
-
-    .p2align 2
-    // Can read 1 byte before possibly crossing a page.
-.Lstrcpy_unalign1:
-    ldrb    r2, [r1]
-    cbz     r2, .Lstrcpy_unalign_copy1byte
-
-    ldr     r2, [r1], #4
-    ldr     r3, [r1], #4
-
-    pld     [r1, #64]
-
-    sub     ip, r2, #0x01010101
-    bic     ip, ip, r2
-    ands    ip, ip, #0x80808080
-    bne     .Lstrcpy_zero_in_first_register
-
-    sub     ip, r3, #0x01010101
-    bic     ip, ip, r3
-    ands    ip, ip, #0x80808080
-    bne     .Lstrcpy_zero_in_second_register
-
-    stmia   r0!, {r2, r3}
-    b       .Lstrcpy_unalign1
-
-.Lstrcpy_unalign_copy1byte:
-    strb    r2, [r0]
-    m_ret   inst=pop
-
-.Lstrcpy_unalign_copy2bytes:
-    strb    r2, [r0], #1
-    strb    r3, [r0]
-    m_ret   inst=pop
-
-.Lstrcpy_unalign_copy3bytes:
-    strh    r2, [r0], #2
-    lsr     r2, #16
-    strb    r2, [r0]
-    m_ret   inst=pop
-
-.Lstrcpy_unalign_copy4bytes:
-    stmia   r0, {r2}
-    m_ret   inst=pop
-END(strcpy)
+#define STRCPY
+#include "string_copy.S"
diff --git a/libc/arch-arm/cortex-a9/bionic/string_copy.S b/libc/arch-arm/cortex-a9/bionic/string_copy.S
new file mode 100644
index 0000000..caf5a11
--- /dev/null
+++ b/libc/arch-arm/cortex-a9/bionic/string_copy.S
@@ -0,0 +1,535 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * Copyright (c) 2013 ARM Ltd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ *    products derived from this software without specific prior written
+ *    permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(STPCPY) && !defined(STRCPY)
+#error "Either STPCPY or STRCPY must be defined."
+#endif
+
+#include <private/bionic_asm.h>
+
+    .syntax unified
+
+    .thumb
+    .thumb_func
+
+#if defined(STPCPY)
+    .macro m_push
+    push    {r4, r5, lr}
+    .cfi_def_cfa_offset 12
+    .cfi_rel_offset r4, 0
+    .cfi_rel_offset r5, 4
+    .cfi_rel_offset lr, 8
+    .endm // m_push
+#else
+    .macro m_push
+    push    {r0, r4, r5, lr}
+    .cfi_def_cfa_offset 16
+    .cfi_rel_offset r0, 0
+    .cfi_rel_offset r4, 4
+    .cfi_rel_offset r5, 8
+    .cfi_rel_offset lr, 12
+    .endm // m_push
+#endif
+
+#if defined(STPCPY)
+    .macro m_ret inst
+    \inst   {r4, r5, pc}
+    .endm // m_ret
+#else
+    .macro m_ret inst
+    \inst   {r0, r4, r5, pc}
+    .endm // m_ret
+#endif
+
+    .macro m_copy_byte reg, cmd, label
+    ldrb    \reg, [r1], #1
+    strb    \reg, [r0], #1
+    \cmd    \reg, \label
+    .endm // m_copy_byte
+
+#if defined(STPCPY)
+ENTRY(stpcpy)
+#else
+ENTRY(strcpy)
+#endif
+    // Unroll the first 8 bytes that will be copied.
+    m_push
+    m_copy_byte reg=r2, cmd=cbz, label=.Lstringcopy_finish
+    m_copy_byte reg=r3, cmd=cbz, label=.Lstringcopy_finish
+    m_copy_byte reg=r4, cmd=cbz, label=.Lstringcopy_finish
+    m_copy_byte reg=r5, cmd=cbz, label=.Lstringcopy_finish
+    m_copy_byte reg=r2, cmd=cbz, label=.Lstringcopy_finish
+    m_copy_byte reg=r3, cmd=cbz, label=.Lstringcopy_finish
+    m_copy_byte reg=r4, cmd=cbz, label=.Lstringcopy_finish
+    m_copy_byte reg=r5, cmd=cbnz, label=.Lstringcopy_continue
+
+.Lstringcopy_finish:
+#if defined(STPCPY)
+    sub     r0, r0, #1
+#endif
+    m_ret   inst=pop
+
+.Lstringcopy_continue:
+    pld     [r1, #0]
+    ands    r3, r0, #7
+    bne     .Lstringcopy_align_dst
+
+.Lstringcopy_check_src_align:
+    // At this point dst is aligned to a double word, check if src
+    // is also aligned to a double word.
+    ands    r3, r1, #7
+    bne     .Lstringcopy_unaligned_copy
+
+    .p2align 2
+.Lstringcopy_mainloop:
+    ldmia   r1!, {r2, r3}
+
+    pld     [r1, #64]
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_first_register
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_second_register
+
+    stmia   r0!, {r2, r3}
+    b       .Lstringcopy_mainloop
+
+.Lstringcopy_zero_in_first_register:
+    lsls    lr, ip, #17
+    itt     ne
+    strbne  r2, [r0]
+    m_ret   inst=popne
+    itt     cs
+#if defined(STPCPY)
+    strhcs  r2, [r0], #1
+#else
+    strhcs  r2, [r0]
+#endif
+    m_ret   inst=popcs
+    lsls    ip, ip, #1
+    itt     eq
+#if defined(STPCPY)
+    streq   r2, [r0], #3
+#else
+    streq   r2, [r0]
+#endif
+    m_ret   inst=popeq
+    strh    r2, [r0], #2
+    lsr     r3, r2, #16
+    strb    r3, [r0]
+    m_ret   inst=pop
+
+.Lstringcopy_zero_in_second_register:
+    lsls    lr, ip, #17
+    ittt    ne
+    stmiane r0!, {r2}
+    strbne  r3, [r0]
+    m_ret   inst=popne
+    ittt    cs
+    strcs   r2, [r0], #4
+#if defined(STPCPY)
+    strhcs  r3, [r0], #1
+#else
+    strhcs  r3, [r0]
+#endif
+    m_ret   inst=popcs
+    lsls    ip, ip, #1
+#if defined(STPCPY)
+    ittt    eq
+#else
+    itt     eq
+#endif
+    stmiaeq r0, {r2, r3}
+#if defined(STPCPY)
+    addeq   r0, r0, #7
+#endif
+    m_ret   inst=popeq
+    stmia   r0!, {r2}
+    strh    r3, [r0], #2
+    lsr     r4, r3, #16
+    strb    r4, [r0]
+    m_ret   inst=pop
+
+.Lstringcopy_align_dst:
+    // Align to a double word (64 bits).
+    rsb     r3, r3, #8
+    lsls    ip, r3, #31
+    beq     .Lstringcopy_align_to_32
+
+    ldrb    r2, [r1], #1
+    strb    r2, [r0], #1
+    cbz     r2, .Lstringcopy_complete
+
+.Lstringcopy_align_to_32:
+    bcc     .Lstringcopy_align_to_64
+
+    ldrb    r4, [r1], #1
+    strb    r4, [r0], #1
+    cmp     r4, #0
+#if defined(STPCPY)
+    itt     eq
+    subeq   r0, r0, #1
+#else
+    it      eq
+#endif
+    m_ret   inst=popeq
+    ldrb    r5, [r1], #1
+    strb    r5, [r0], #1
+    cmp     r5, #0
+#if defined(STPCPY)
+    itt     eq
+    subeq   r0, r0, #1
+#else
+    it      eq
+#endif
+    m_ret   inst=popeq
+
+.Lstringcopy_align_to_64:
+    tst     r3, #4
+    beq     .Lstringcopy_check_src_align
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_first_register
+    stmia   r0!, {r2}
+    b       .Lstringcopy_check_src_align
+
+.Lstringcopy_complete:
+#if defined(STPCPY)
+    sub     r0, r0, #1
+#endif
+    m_ret   inst=pop
+
+.Lstringcopy_unaligned_copy:
+    // Dst is aligned to a double word, while src is at an unknown alignment.
+    // There are 7 different versions of the unaligned copy code
+    // to prevent overreading the src. The mainloop of every single version
+    // will store 64 bits per loop. The difference is how much of src can
+    // be read without potentially crossing a page boundary.
+    tbb     [pc, r3]
+.Lstringcopy_unaligned_branchtable:
+    .byte 0
+    .byte ((.Lstringcopy_unalign7 - .Lstringcopy_unaligned_branchtable)/2)
+    .byte ((.Lstringcopy_unalign6 - .Lstringcopy_unaligned_branchtable)/2)
+    .byte ((.Lstringcopy_unalign5 - .Lstringcopy_unaligned_branchtable)/2)
+    .byte ((.Lstringcopy_unalign4 - .Lstringcopy_unaligned_branchtable)/2)
+    .byte ((.Lstringcopy_unalign3 - .Lstringcopy_unaligned_branchtable)/2)
+    .byte ((.Lstringcopy_unalign2 - .Lstringcopy_unaligned_branchtable)/2)
+    .byte ((.Lstringcopy_unalign1 - .Lstringcopy_unaligned_branchtable)/2)
+
+    .p2align 2
+    // Can read 7 bytes before possibly crossing a page.
+.Lstringcopy_unalign7:
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_first_register
+
+    ldrb    r3, [r1]
+    cbz     r3, .Lstringcopy_unalign7_copy5bytes
+    ldrb    r4, [r1, #1]
+    cbz     r4, .Lstringcopy_unalign7_copy6bytes
+    ldrb    r5, [r1, #2]
+    cbz     r5, .Lstringcopy_unalign7_copy7bytes
+
+    ldr     r3, [r1], #4
+    pld     [r1, #64]
+
+    lsrs    ip, r3, #24
+    stmia   r0!, {r2, r3}
+#if defined(STPCPY)
+    beq     .Lstringcopy_finish
+#else
+    beq     .Lstringcopy_unalign_return
+#endif
+    b       .Lstringcopy_unalign7
+
+.Lstringcopy_unalign7_copy5bytes:
+    stmia   r0!, {r2}
+    strb    r3, [r0]
+.Lstringcopy_unalign_return:
+    m_ret   inst=pop
+
+.Lstringcopy_unalign7_copy6bytes:
+    stmia   r0!, {r2}
+    strb    r3, [r0], #1
+    strb    r4, [r0]
+    m_ret   inst=pop
+
+.Lstringcopy_unalign7_copy7bytes:
+    stmia   r0!, {r2}
+    strb    r3, [r0], #1
+    strb    r4, [r0], #1
+    strb    r5, [r0]
+    m_ret   inst=pop
+
+    .p2align 2
+    // Can read 6 bytes before possibly crossing a page.
+.Lstringcopy_unalign6:
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_first_register
+
+    ldrb    r4, [r1]
+    cbz     r4, .Lstringcopy_unalign_copy5bytes
+    ldrb    r5, [r1, #1]
+    cbz     r5, .Lstringcopy_unalign_copy6bytes
+
+    ldr     r3, [r1], #4
+    pld     [r1, #64]
+
+    tst     r3, #0xff0000
+    beq     .Lstringcopy_unalign6_copy7bytes
+    lsrs    ip, r3, #24
+    stmia   r0!, {r2, r3}
+#if defined(STPCPY)
+    beq     .Lstringcopy_finish
+#else
+    beq     .Lstringcopy_unalign_return
+#endif
+    b       .Lstringcopy_unalign6
+
+.Lstringcopy_unalign6_copy7bytes:
+    stmia   r0!, {r2}
+    strh    r3, [r0], #2
+    lsr     r3, #16
+    strb    r3, [r0]
+    m_ret   inst=pop
+
+    .p2align 2
+    // Can read 5 bytes before possibly crossing a page.
+.Lstringcopy_unalign5:
+    ldr     r2, [r1], #4
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_first_register
+
+    ldrb    r4, [r1]
+    cbz     r4, .Lstringcopy_unalign_copy5bytes
+
+    ldr     r3, [r1], #4
+
+    pld     [r1, #64]
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_second_register
+
+    stmia   r0!, {r2, r3}
+    b       .Lstringcopy_unalign5
+
+.Lstringcopy_unalign_copy5bytes:
+    stmia   r0!, {r2}
+    strb    r4, [r0]
+    m_ret   inst=pop
+
+.Lstringcopy_unalign_copy6bytes:
+    stmia   r0!, {r2}
+    strb    r4, [r0], #1
+    strb    r5, [r0]
+    m_ret   inst=pop
+
+    .p2align 2
+    // Can read 4 bytes before possibly crossing a page.
+.Lstringcopy_unalign4:
+    ldmia   r1!, {r2}
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_first_register
+
+    ldmia   r1!, {r3}
+    pld     [r1, #64]
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_second_register
+
+    stmia   r0!, {r2, r3}
+    b       .Lstringcopy_unalign4
+
+    .p2align 2
+    // Can read 3 bytes before possibly crossing a page.
+.Lstringcopy_unalign3:
+    ldrb    r2, [r1]
+    cbz     r2, .Lstringcopy_unalign3_copy1byte
+    ldrb    r3, [r1, #1]
+    cbz     r3, .Lstringcopy_unalign3_copy2bytes
+    ldrb    r4, [r1, #2]
+    cbz     r4, .Lstringcopy_unalign3_copy3bytes
+
+    ldr     r2, [r1], #4
+    ldr     r3, [r1], #4
+
+    pld     [r1, #64]
+
+    lsrs    lr, r2, #24
+    beq     .Lstringcopy_unalign_copy4bytes
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_second_register
+
+    stmia   r0!, {r2, r3}
+    b       .Lstringcopy_unalign3
+
+.Lstringcopy_unalign3_copy1byte:
+    strb    r2, [r0]
+    m_ret   inst=pop
+
+.Lstringcopy_unalign3_copy2bytes:
+    strb    r2, [r0], #1
+    strb    r3, [r0]
+    m_ret   inst=pop
+
+.Lstringcopy_unalign3_copy3bytes:
+    strb    r2, [r0], #1
+    strb    r3, [r0], #1
+    strb    r4, [r0]
+    m_ret   inst=pop
+
+    .p2align 2
+    // Can read 2 bytes before possibly crossing a page.
+.Lstringcopy_unalign2:
+    ldrb    r2, [r1]
+    cbz     r2, .Lstringcopy_unalign_copy1byte
+    ldrb    r3, [r1, #1]
+    cbz     r3, .Lstringcopy_unalign_copy2bytes
+
+    ldr     r2, [r1], #4
+    ldr     r3, [r1], #4
+    pld     [r1, #64]
+
+    tst     r2, #0xff0000
+    beq     .Lstringcopy_unalign_copy3bytes
+    lsrs    ip, r2, #24
+    beq     .Lstringcopy_unalign_copy4bytes
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_second_register
+
+    stmia   r0!, {r2, r3}
+    b       .Lstringcopy_unalign2
+
+    .p2align 2
+    // Can read 1 byte before possibly crossing a page.
+.Lstringcopy_unalign1:
+    ldrb    r2, [r1]
+    cbz     r2, .Lstringcopy_unalign_copy1byte
+
+    ldr     r2, [r1], #4
+    ldr     r3, [r1], #4
+
+    pld     [r1, #64]
+
+    sub     ip, r2, #0x01010101
+    bic     ip, ip, r2
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_first_register
+
+    sub     ip, r3, #0x01010101
+    bic     ip, ip, r3
+    ands    ip, ip, #0x80808080
+    bne     .Lstringcopy_zero_in_second_register
+
+    stmia   r0!, {r2, r3}
+    b       .Lstringcopy_unalign1
+
+.Lstringcopy_unalign_copy1byte:
+    strb    r2, [r0]
+    m_ret   inst=pop
+
+.Lstringcopy_unalign_copy2bytes:
+    strb    r2, [r0], #1
+    strb    r3, [r0]
+    m_ret   inst=pop
+
+.Lstringcopy_unalign_copy3bytes:
+    strh    r2, [r0], #2
+    lsr     r2, #16
+    strb    r2, [r0]
+    m_ret   inst=pop
+
+.Lstringcopy_unalign_copy4bytes:
+    stmia   r0, {r2}
+#if defined(STPCPY)
+    add     r0, r0, #3
+#endif
+    m_ret   inst=pop
+#if defined(STPCPY)
+END(stpcpy)
+#else
+END(strcpy)
+#endif
diff --git a/libc/arch-arm/cortex-a9/cortex-a9.mk b/libc/arch-arm/cortex-a9/cortex-a9.mk
index 9b99387..c82db3b 100644
--- a/libc/arch-arm/cortex-a9/cortex-a9.mk
+++ b/libc/arch-arm/cortex-a9/cortex-a9.mk
@@ -1,10 +1,11 @@
 libc_bionic_src_files_arm += \
     arch-arm/cortex-a9/bionic/memcpy.S \
     arch-arm/cortex-a9/bionic/memset.S \
+    arch-arm/cortex-a9/bionic/stpcpy.S \
     arch-arm/cortex-a9/bionic/strcat.S \
+    arch-arm/cortex-a9/bionic/__strcat_chk.S \
     arch-arm/cortex-a9/bionic/strcmp.S \
     arch-arm/cortex-a9/bionic/strcpy.S \
-    arch-arm/cortex-a9/bionic/strlen.S \
-    arch-arm/cortex-a9/bionic/__strcat_chk.S \
     arch-arm/cortex-a9/bionic/__strcpy_chk.S \
+    arch-arm/cortex-a9/bionic/strlen.S \
     bionic/memmove.c \
diff --git a/libc/arch-arm/denver/denver.mk b/libc/arch-arm/denver/denver.mk
index 6989187..0bc52a2 100644
--- a/libc/arch-arm/denver/denver.mk
+++ b/libc/arch-arm/denver/denver.mk
@@ -7,7 +7,8 @@
 
 # Use cortex-a15 versions of strcat/strcpy/strlen.
 libc_bionic_src_files_arm += \
+    arch-arm/cortex-a15/bionic/stpcpy.S \
     arch-arm/cortex-a15/bionic/strcat.S \
+    arch-arm/cortex-a15/bionic/strcmp.S \
     arch-arm/cortex-a15/bionic/strcpy.S \
     arch-arm/cortex-a15/bionic/strlen.S \
-    arch-arm/cortex-a15/bionic/strcmp.S \
diff --git a/libc/arch-arm/generic/generic.mk b/libc/arch-arm/generic/generic.mk
index 2456e6e..95be867 100644
--- a/libc/arch-arm/generic/generic.mk
+++ b/libc/arch-arm/generic/generic.mk
@@ -7,4 +7,5 @@
     bionic/memmove.c \
     bionic/__strcat_chk.cpp \
     bionic/__strcpy_chk.cpp \
+    upstream-openbsd/lib/libc/string/stpcpy.c \
     upstream-openbsd/lib/libc/string/strcat.c \
diff --git a/libc/arch-arm/krait/krait.mk b/libc/arch-arm/krait/krait.mk
index 631ab68..1bb7b0a 100644
--- a/libc/arch-arm/krait/krait.mk
+++ b/libc/arch-arm/krait/krait.mk
@@ -7,6 +7,7 @@
 
 # Use cortex-a15 versions of strcat/strcpy/strlen and standard memmove
 libc_bionic_src_files_arm += \
+    arch-arm/cortex-a15/bionic/stpcpy.S \
     arch-arm/cortex-a15/bionic/strcat.S \
     arch-arm/cortex-a15/bionic/strcpy.S \
     arch-arm/cortex-a15/bionic/strlen.S \
diff --git a/libc/include/stdatomic.h b/libc/include/stdatomic.h
index 3db25a7..bcea859 100644
--- a/libc/include/stdatomic.h
+++ b/libc/include/stdatomic.h
@@ -89,6 +89,7 @@
 using std::memory_order;
 using std::memory_order_relaxed;
 using std::memory_order_consume;
+using std::memory_order_acquire;
 using std::memory_order_release;
 using std::memory_order_acq_rel;
 using std::memory_order_seq_cst;
diff --git a/libc/tools/zoneinfo/update-tzdata.py b/libc/tools/zoneinfo/update-tzdata.py
index e800e8f..f5681be 100755
--- a/libc/tools/zoneinfo/update-tzdata.py
+++ b/libc/tools/zoneinfo/update-tzdata.py
@@ -123,18 +123,21 @@
   print 'Making ICU data...'
   subprocess.check_call(['make'])
 
-  # Copy the output files to their ultimate destination.
+  # Copy the source file to its ultimate destination.
   icu_txt_data_dir = '%s/data/misc' % icu_dir
   print 'Copying zoneinfo64.txt to %s ...' % icu_txt_data_dir
   shutil.copy('zoneinfo64.txt', icu_txt_data_dir)
 
+  # Regenerate the .dat file.
   os.chdir(icu_working_dir)
+  subprocess.check_call(['make', '-j32'])
+
+  # Copy the .dat file to its ultimate destination.
   icu_dat_data_dir = '%s/stubdata' % icu_dir
   datfiles = glob.glob('data/out/tmp/icudt??l.dat')
   if len(datfiles) != 1:
     print 'ERROR: Unexpectedly found %d .dat files (%s). Halting.' % (len(datfiles), datfiles)
     sys.exit(1)
-
   datfile = datfiles[0]
   print 'Copying %s to %s ...' % (datfile, icu_dat_data_dir)
   shutil.copy(datfile, icu_dat_data_dir)
diff --git a/libc/upstream-freebsd/lib/libc/stdlib/realpath.c b/libc/upstream-freebsd/lib/libc/stdlib/realpath.c
index 8fd5457..c4bd953 100644
--- a/libc/upstream-freebsd/lib/libc/stdlib/realpath.c
+++ b/libc/upstream-freebsd/lib/libc/stdlib/realpath.c
@@ -132,26 +132,7 @@
 			resolved[resolved_len] = '\0';
 		}
 		if (next_token[0] == '\0') {
-			/*
-			 * Handle consequential slashes.  The path
-			 * before slash shall point to a directory.
-			 *
-			 * Only the trailing slashes are not covered
-			 * by other checks in the loop, but we verify
-			 * the prefix for any (rare) "//" or "/\0"
-			 * occurence to not implement lookahead.
-			 */
-			if (lstat(resolved, &sb) != 0) {
-				if (m)
-					free(resolved);
-				return (NULL);
-			}
-			if (!S_ISDIR(sb.st_mode)) {
-				if (m)
-					free(resolved);
-				errno = ENOTDIR;
-				return (NULL);
-			}
+			/* Handle consequential slashes. */
 			continue;
 		}
 		else if (strcmp(next_token, ".") == 0)
@@ -236,6 +217,11 @@
 				}
 			}
 			left_len = strlcpy(left, symlink, sizeof(left));
+		} else if (!S_ISDIR(sb.st_mode) && p != NULL) {
+			if (m)
+				free(resolved);
+			errno = ENOTDIR;
+			return (NULL);
 		}
 	}
 
diff --git a/libc/zoneinfo/tzdata b/libc/zoneinfo/tzdata
index 8d574f5..33bfe6b 100644
--- a/libc/zoneinfo/tzdata
+++ b/libc/zoneinfo/tzdata
Binary files differ
diff --git a/libm/Android.mk b/libm/Android.mk
index 60f4cb8..69a17a1 100644
--- a/libm/Android.mk
+++ b/libm/Android.mk
@@ -1,6 +1,8 @@
 ifneq ($(TARGET_USE_PRIVATE_LIBM),true)
 LOCAL_PATH:= $(call my-dir)
 
+bionic_coverage := false
+
 # TODO: this comes from from upstream's libc, not libm, but it's an
 # implementation detail that should have hidden visibility, so it needs
 # to be in whatever library the math code is in.
@@ -273,6 +275,8 @@
 LOCAL_SRC_FILES := $(libm_common_src_files)
 LOCAL_SYSTEM_SHARED_LIBRARIES := libc
 
+LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
+
 # arch-specific settings
 LOCAL_C_INCLUDES_arm := $(LOCAL_PATH)/arm
 LOCAL_SRC_FILES_arm := arm/fenv.c
@@ -306,6 +310,8 @@
 LOCAL_SYSTEM_SHARED_LIBRARIES := libc
 LOCAL_WHOLE_STATIC_LIBRARIES := libm
 
+LOCAL_NATIVE_COVERAGE := $(bionic_coverage)
+
 # We'd really like to do this for all architectures, but since this wasn't done
 # before, these symbols must continue to be exported on LP32 for binary
 # compatibility.
diff --git a/linker/linker.cpp b/linker/linker.cpp
index d86379e..a8c2b1e 100644
--- a/linker/linker.cpp
+++ b/linker/linker.cpp
@@ -2081,8 +2081,6 @@
         }
         break;
 #if defined(__mips__)
-      case DT_STRSZ:
-        break;
       case DT_MIPS_RLD_MAP:
         // Set the DT_MIPS_RLD_MAP entry to the address of _r_debug for GDB.
         {
diff --git a/tests/pthread_test.cpp b/tests/pthread_test.cpp
index 9dbb4f5..01de09f 100644
--- a/tests/pthread_test.cpp
+++ b/tests/pthread_test.cpp
@@ -38,37 +38,71 @@
   ASSERT_EQ(EINVAL, pthread_key_delete(key));
 }
 
-TEST(pthread, pthread_key_create_lots) {
-#if defined(__BIONIC__) // glibc uses keys internally that its sysconf value doesn't account for.
+TEST(pthread, pthread_keys_max) {
   // POSIX says PTHREAD_KEYS_MAX should be at least 128.
   ASSERT_GE(PTHREAD_KEYS_MAX, 128);
+}
 
+TEST(pthread, _SC_THREAD_KEYS_MAX_big_enough_for_POSIX) {
+  // sysconf shouldn't return a smaller value.
+  int sysconf_max = sysconf(_SC_THREAD_KEYS_MAX);
+  ASSERT_GE(sysconf_max, PTHREAD_KEYS_MAX);
+}
+
+TEST(pthread, pthread_key_many_distinct) {
+  // We should be able to allocate at least this many keys.
+  int nkeys = sysconf(_SC_THREAD_KEYS_MAX) / 2;
+  std::vector<pthread_key_t> keys;
+
+  auto scope_guard = make_scope_guard([&keys]{
+    for (auto key : keys) {
+      EXPECT_EQ(0, pthread_key_delete(key));
+    }
+  });
+
+  for (int i = 0; i < nkeys; ++i) {
+    pthread_key_t key;
+    // If this fails, it's likely that GLOBAL_INIT_THREAD_LOCAL_BUFFER_COUNT is
+    // wrong.
+    ASSERT_EQ(0, pthread_key_create(&key, NULL)) << i << " of " << nkeys;
+    keys.push_back(key);
+    ASSERT_EQ(0, pthread_setspecific(key, reinterpret_cast<void*>(i)));
+  }
+
+  for (int i = keys.size() - 1; i >= 0; --i) {
+    ASSERT_EQ(reinterpret_cast<void*>(i), pthread_getspecific(keys.back()));
+    pthread_key_t key = keys.back();
+    keys.pop_back();
+    ASSERT_EQ(0, pthread_key_delete(key));
+  }
+}
+
+TEST(pthread, pthread_key_EAGAIN) {
   int sysconf_max = sysconf(_SC_THREAD_KEYS_MAX);
 
-  // sysconf shouldn't return a smaller value.
-  ASSERT_GE(sysconf_max, PTHREAD_KEYS_MAX);
-
-  // We can allocate _SC_THREAD_KEYS_MAX keys.
-  sysconf_max -= 2; // (Except that gtest takes two for itself.)
   std::vector<pthread_key_t> keys;
-  for (int i = 0; i < sysconf_max; ++i) {
+  int rv = 0;
+  // Two keys are used by gtest, so sysconf_max should be more than we are
+  // allowed to allocate now.
+  for (int i = 0; i < sysconf_max; i++) {
     pthread_key_t key;
-    // If this fails, it's likely that GLOBAL_INIT_THREAD_LOCAL_BUFFER_COUNT is wrong.
-    ASSERT_EQ(0, pthread_key_create(&key, NULL)) << i << " of " << sysconf_max;
+    rv = pthread_key_create(&key, NULL);
+    if (rv == EAGAIN) {
+      break;
+    }
+    EXPECT_EQ(0, rv);
     keys.push_back(key);
   }
 
-  // ...and that really is the maximum.
-  pthread_key_t key;
-  ASSERT_EQ(EAGAIN, pthread_key_create(&key, NULL));
-
-  // (Don't leak all those keys!)
-  for (size_t i = 0; i < keys.size(); ++i) {
-    ASSERT_EQ(0, pthread_key_delete(keys[i]));
+  // Don't leak keys.
+  for (auto key : keys) {
+    EXPECT_EQ(0, pthread_key_delete(key));
   }
-#else // __BIONIC__
-  GTEST_LOG_(INFO) << "This test does nothing.\n";
-#endif // __BIONIC__
+  keys.clear();
+
+  // We should have eventually reached the maximum number of keys and received
+  // EAGAIN.
+  ASSERT_EQ(EAGAIN, rv);
 }
 
 TEST(pthread, pthread_key_delete) {
diff --git a/tests/stdlib_test.cpp b/tests/stdlib_test.cpp
index e814ef7..9ad96fd 100644
--- a/tests/stdlib_test.cpp
+++ b/tests/stdlib_test.cpp
@@ -99,6 +99,18 @@
   ASSERT_EQ(ENOENT, errno);
 }
 
+TEST(stdlib, realpath__component_after_non_directory) {
+  errno = 0;
+  char* p = realpath("/dev/null/.", NULL);
+  ASSERT_TRUE(p == NULL);
+  ASSERT_EQ(ENOTDIR, errno);
+
+  errno = 0;
+  p = realpath("/dev/null/..", NULL);
+  ASSERT_TRUE(p == NULL);
+  ASSERT_EQ(ENOTDIR, errno);
+}
+
 TEST(stdlib, realpath) {
   // Get the name of this executable.
   char executable_path[PATH_MAX];