Merge "Update localtime.c."
diff --git a/libc/arch-arm64/arm64.mk b/libc/arch-arm64/arm64.mk
index 2d34f52..e44ee31 100644
--- a/libc/arch-arm64/arm64.mk
+++ b/libc/arch-arm64/arm64.mk
@@ -57,7 +57,7 @@
 endif
 cpu_variant_mk := $(LOCAL_PATH)/arch-arm64/$(TARGET_CPU_VARIANT)/$(TARGET_CPU_VARIANT).mk
 ifeq ($(wildcard $(cpu_variant_mk)),)
-$(error "TARGET_CPU_VARIANT not set or set to an unknown value. Possible values are generic, generic-neon. Use generic for devices that do not have a CPU similar to any of the supported cpu variants.")
+$(error "TARGET_CPU_VARIANT not set or set to an unknown value. Possible values are generic, generic-neon, denver64. Use generic for devices that do not have a CPU similar to any of the supported cpu variants.")
 endif
 include $(cpu_variant_mk)
 libc_common_additional_dependencies += $(cpu_variank_mk)
diff --git a/libc/arch-arm64/denver64/bionic/memcpy.S b/libc/arch-arm64/denver64/bionic/memcpy.S
new file mode 100644
index 0000000..700f0d0
--- /dev/null
+++ b/libc/arch-arm64/denver64/bionic/memcpy.S
@@ -0,0 +1,205 @@
+/* Copyright (c) 2012, Linaro Limited
+   All rights reserved.
+   Copyright (c) 2014, NVIDIA Corporation.  All rights reserved.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are met:
+       * Redistributions of source code must retain the above copyright
+         notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above copyright
+         notice, this list of conditions and the following disclaimer in the
+         documentation and/or other materials provided with the distribution.
+       * Neither the name of the Linaro nor the
+         names of its contributors may be used to endorse or promote products
+         derived from this software without specific prior written permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/* Assumptions:
+ *
+ * denver, ARMv8-a, AArch64
+ * Unaligned accesses
+ *
+ */
+
+#include <private/bionic_asm.h>
+
+#define dstin	x0
+#define src	x1
+#define count	x2
+#define tmp1	x3
+#define tmp1w	w3
+#define tmp2	x4
+#define tmp2w	w4
+#define tmp3	x5
+#define tmp3w	w5
+#define dst	x6
+
+#define A_l	x7
+#define A_h	x8
+#define B_l	x9
+#define B_h	x10
+#define C_l	x11
+#define C_h	x12
+#define D_l	x13
+#define D_h	x14
+
+#define QA_l	q0
+#define QA_h	q1
+#define QB_l	q2
+#define QB_h	q3
+
+ENTRY(memcpy)
+
+	mov	dst, dstin
+	cmp	count, #64
+	b.ge	.Lcpy_not_short
+	cmp	count, #15
+	b.le	.Ltail15tiny
+
+	/* Deal with small copies quickly by dropping straight into the
+	 * exit block.  */
+.Ltail63:
+	/* Copy up to 48 bytes of data.  At this point we only need the
+	 * bottom 6 bits of count to be accurate.  */
+	ands	tmp1, count, #0x30
+	b.eq	.Ltail15
+	add	dst, dst, tmp1
+	add	src, src, tmp1
+	cmp	tmp1w, #0x20
+	b.eq	1f
+	b.lt	2f
+	ldp	A_l, A_h, [src, #-48]
+	stp	A_l, A_h, [dst, #-48]
+1:
+	ldp	A_l, A_h, [src, #-32]
+	stp	A_l, A_h, [dst, #-32]
+2:
+	ldp	A_l, A_h, [src, #-16]
+	stp	A_l, A_h, [dst, #-16]
+
+.Ltail15:
+	ands	count, count, #15
+	beq	1f
+	add	src, src, count
+	ldp	A_l, A_h, [src, #-16]
+	add	dst, dst, count
+	stp	A_l, A_h, [dst, #-16]
+1:
+	ret
+
+.Ltail15tiny:
+	/* Copy up to 15 bytes of data.  Does not assume additional data
+	   being copied.  */
+	tbz	count, #3, 1f
+	ldr	tmp1, [src], #8
+	str	tmp1, [dst], #8
+1:
+	tbz	count, #2, 1f
+	ldr	tmp1w, [src], #4
+	str	tmp1w, [dst], #4
+1:
+	tbz	count, #1, 1f
+	ldrh	tmp1w, [src], #2
+	strh	tmp1w, [dst], #2
+1:
+	tbz	count, #0, 1f
+	ldrb	tmp1w, [src]
+	strb	tmp1w, [dst]
+1:
+	ret
+
+.Lcpy_not_short:
+	/* We don't much care about the alignment of DST, but we want SRC
+	 * to be 128-bit (16 byte) aligned so that we don't cross cache line
+	 * boundaries on both loads and stores.  */
+	neg	tmp2, src
+	ands	tmp2, tmp2, #15		/* Bytes to reach alignment.  */
+	b.eq	2f
+	sub	count, count, tmp2
+	/* Copy more data than needed; it's faster than jumping
+	 * around copying sub-Quadword quantities.  We know that
+	 * it can't overrun.  */
+	ldp	A_l, A_h, [src]
+	add	src, src, tmp2
+	stp	A_l, A_h, [dst]
+	add	dst, dst, tmp2
+	/* There may be less than 63 bytes to go now.  */
+	cmp	count, #63
+	b.le	.Ltail63
+2:
+	subs	count, count, #128
+	b.ge	.Lcpy_body_large
+	/* Less than 128 bytes to copy, so handle 64 here and then jump
+	 * to the tail.  */
+	ldp	QA_l, QA_h, [src]
+	ldp	QB_l, QB_h, [src, #32]
+	stp	QA_l, QA_h, [dst]
+	stp	QB_l, QB_h, [dst, #32]
+	tst	count, #0x3f
+	add	src, src, #64
+	add	dst, dst, #64
+	b.ne	.Ltail63
+	ret
+
+	/* Critical loop.  Start at a new cache line boundary.  Assuming
+	 * 64 bytes per line this ensures the entire loop is in one line.  */
+	.p2align 6
+.Lcpy_body_large:
+	cmp	count, 65536
+	bhi	.Lcpy_body_huge
+	/* There are at least 128 bytes to copy.  */
+	ldp	QA_l, QA_h, [src, #0]
+	sub	dst, dst, #32		/* Pre-bias.  */
+	ldp	QB_l, QB_h, [src, #32]!	/* src += 64 - Pre-bias.  */
+1:
+	stp	QA_l, QA_h, [dst, #32]
+	ldp	QA_l, QA_h, [src, #32]
+	stp	QB_l, QB_h, [dst, #64]!
+	ldp	QB_l, QB_h, [src, #64]!
+
+	subs	count, count, #64
+	b.ge	1b
+
+	stp	QA_l, QA_h, [dst, #32]
+	stp	QB_l, QB_h, [dst, #64]
+	add	src, src, #32
+	add	dst, dst, #64 + 32
+	tst	count, #0x3f
+	b.ne	.Ltail63
+	ret
+.Lcpy_body_huge:
+	/* There are at least 128 bytes to copy.  */
+	ldp	QA_l, QA_h, [src, #0]
+	sub	dst, dst, #32		/* Pre-bias.  */
+	ldp	QB_l, QB_h, [src, #32]!
+1:
+	stnp	QA_l, QA_h, [dst, #32]
+	stnp	QB_l, QB_h, [dst, #64]
+	ldp	QA_l, QA_h, [src, #32]
+	ldp	QB_l, QB_h, [src, #64]!
+	add	dst, dst, #64
+
+	subs	count, count, #64
+	b.ge	1b
+
+	stnp	QA_l, QA_h, [dst, #32]
+	stnp	QB_l, QB_h, [dst, #64]
+	add	src, src, #32
+	add	dst, dst, #64 + 32
+	tst	count, #0x3f
+	b.ne	.Ltail63
+	ret
+
+END(memcpy)
diff --git a/libc/arch-arm64/denver64/bionic/memset.S b/libc/arch-arm64/denver64/bionic/memset.S
new file mode 100644
index 0000000..9127d89
--- /dev/null
+++ b/libc/arch-arm64/denver64/bionic/memset.S
@@ -0,0 +1,271 @@
+/* Copyright (c) 2012, Linaro Limited
+   All rights reserved.
+   Copyright (c) 2014, NVIDIA Corporation.  All rights reserved.
+
+   Redistribution and use in source and binary forms, with or without
+   modification, are permitted provided that the following conditions are met:
+       * Redistributions of source code must retain the above copyright
+         notice, this list of conditions and the following disclaimer.
+       * Redistributions in binary form must reproduce the above copyright
+         notice, this list of conditions and the following disclaimer in the
+         documentation and/or other materials provided with the distribution.
+       * Neither the name of the Linaro nor the
+         names of its contributors may be used to endorse or promote products
+         derived from this software without specific prior written permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+   HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+/* Assumptions:
+ *
+ * denver, ARMv8-a, AArch64
+ * Unaligned accesses
+ *
+ */
+
+#include <private/bionic_asm.h>
+
+/* By default we assume that the DC instruction can be used to zero
+   data blocks more efficiently.  In some circumstances this might be
+   unsafe, for example in an asymmetric multiprocessor environment with
+   different DC clear lengths (neither the upper nor lower lengths are
+   safe to use).  The feature can be disabled by defining DONT_USE_DC.
+
+   If code may be run in a virtualized environment, then define
+   MAYBE_VIRT.  This will cause the code to cache the system register
+   values rather than re-reading them each call.  */
+
+#define dstin		x0
+#define val		w1
+#define count		x2
+#define tmp1		x3
+#define tmp1w		w3
+#define tmp2		x4
+#define tmp2w		w4
+#define zva_len_x	x5
+#define zva_len		w5
+#define zva_bits_x	x6
+
+#define A_l		x7
+#define A_lw		w7
+#define dst		x8
+#define tmp3w		w9
+
+#define QA_l		q0
+
+ENTRY(memset)
+
+	mov	dst, dstin		/* Preserve return value.  */
+	ands	A_lw, val, #255
+#ifndef DONT_USE_DC
+#	b.eq	.Lzero_mem
+#endif
+	orr	A_lw, A_lw, A_lw, lsl #8
+	orr	A_lw, A_lw, A_lw, lsl #16
+	orr	A_l, A_l, A_l, lsl #32
+.Ltail_maybe_long:
+	cmp	count, #256
+	b.ge	.Lnot_short
+.Ltail_maybe_tiny:
+	cmp	count, #15
+	b.le	.Ltail15tiny
+.Ltail255:
+	ands	tmp1, count, #0xC0
+	b.eq	.Ltail63
+	dup	v0.4s, A_lw
+	cmp	tmp1w, #0x80
+	b.eq	1f
+	b.lt	2f
+	stp	QA_l, QA_l, [dst], #32
+	stp	QA_l, QA_l, [dst], #32
+1:
+	stp	QA_l, QA_l, [dst], #32
+	stp	QA_l, QA_l, [dst], #32
+2:
+	stp	QA_l, QA_l, [dst], #32
+	stp	QA_l, QA_l, [dst], #32
+.Ltail63:
+	ands	tmp1, count, #0x30
+	b.eq	.Ltail15
+	add	dst, dst, tmp1
+	cmp	tmp1w, #0x20
+	b.eq	1f
+	b.lt	2f
+	stp	A_l, A_l, [dst, #-48]
+1:
+	stp	A_l, A_l, [dst, #-32]
+2:
+	stp	A_l, A_l, [dst, #-16]
+
+.Ltail15:
+	and	count, count, #15
+	add	dst, dst, count
+	stp	A_l, A_l, [dst, #-16]	/* Repeat some/all of last store. */
+	ret
+
+.Ltail15tiny:
+	/* Set up to 15 bytes.  Does not assume earlier memory
+	   being set.  */
+	tbz	count, #3, 1f
+	str	A_l, [dst], #8
+1:
+	tbz	count, #2, 1f
+	str	A_lw, [dst], #4
+1:
+	tbz	count, #1, 1f
+	strh	A_lw, [dst], #2
+1:
+	tbz	count, #0, 1f
+	strb	A_lw, [dst]
+1:
+	ret
+
+	/* Critical loop.  Start at a new cache line boundary.  Assuming
+	 * 64 bytes per line, this ensures the entire loop is in one line.  */
+	.p2align 6
+.Lnot_short:
+	dup	v0.4s, A_lw
+	neg	tmp2, dst
+	ands	tmp2, tmp2, #15
+	b.eq	2f
+	/* Bring DST to 128-bit (16-byte) alignment.  We know that there's
+	 * more than that to set, so we simply store 16 bytes and advance by
+	 * the amount required to reach alignment.  */
+	sub	count, count, tmp2
+	stp	A_l, A_l, [dst]
+	add	dst, dst, tmp2
+	/* There may be less than 63 bytes to go now.  */
+	cmp	count, #255
+	b.le	.Ltail255
+2:
+	cmp	count, #2097152
+	b.gt	3f
+1:
+	sub	count, count, #256
+2:
+	stp	QA_l, QA_l, [dst], #32
+	stp	QA_l, QA_l, [dst], #32
+	stp	QA_l, QA_l, [dst], #32
+	stp	QA_l, QA_l, [dst], #32
+	stp	QA_l, QA_l, [dst], #32
+	stp	QA_l, QA_l, [dst], #32
+	stp	QA_l, QA_l, [dst], #32
+	stp	QA_l, QA_l, [dst], #32
+	subs	count, count, #256
+	b.ge	2b
+	tst	count, #0xff
+	b.ne	.Ltail255
+	ret
+3:
+	sub	count, count, #64
+4:
+	subs	count, count, #64
+	stnp	QA_l, QA_l, [dst]
+	stnp	QA_l, QA_l, [dst, #32]
+	add	dst, dst, #64
+	b.ge	4b
+	tst	count, #0x3f
+	b.ne	.Ltail63
+	ret
+
+#ifndef DONT_USE_DC
+	/* For zeroing memory, check to see if we can use the ZVA feature to
+	 * zero entire 'cache' lines.  */
+.Lzero_mem:
+	mov	A_l, #0
+	cmp	count, #63
+	b.le	.Ltail_maybe_tiny
+	neg	tmp2, dst
+	ands	tmp2, tmp2, #15
+	b.eq	1f
+	sub	count, count, tmp2
+	stp	A_l, A_l, [dst]
+	add	dst, dst, tmp2
+	cmp	count, #63
+	b.le	.Ltail63
+1:
+	/* For zeroing small amounts of memory, it's not worth setting up
+	 * the line-clear code.  */
+	cmp	count, #128
+	b.lt	.Lnot_short
+#ifdef MAYBE_VIRT
+	/* For efficiency when virtualized, we cache the ZVA capability.  */
+	adrp	tmp2, .Lcache_clear
+	ldr	zva_len, [tmp2, #:lo12:.Lcache_clear]
+	tbnz	zva_len, #31, .Lnot_short
+	cbnz	zva_len, .Lzero_by_line
+	mrs	tmp1, dczid_el0
+	tbz	tmp1, #4, 1f
+	/* ZVA not available.  Remember this for next time.  */
+	mov	zva_len, #~0
+	str	zva_len, [tmp2, #:lo12:.Lcache_clear]
+	b	.Lnot_short
+1:
+	mov	tmp3w, #4
+	and	zva_len, tmp1w, #15	/* Safety: other bits reserved.  */
+	lsl	zva_len, tmp3w, zva_len
+	str	zva_len, [tmp2, #:lo12:.Lcache_clear]
+#else
+	mrs	tmp1, dczid_el0
+	tbnz	tmp1, #4, .Lnot_short
+	mov	tmp3w, #4
+	and	zva_len, tmp1w, #15	/* Safety: other bits reserved.  */
+	lsl	zva_len, tmp3w, zva_len
+#endif
+
+.Lzero_by_line:
+	/* Compute how far we need to go to become suitably aligned.  We're
+	 * already at quad-word alignment.  */
+	cmp	count, zva_len_x
+	b.lt	.Lnot_short		/* Not enough to reach alignment.  */
+	sub	zva_bits_x, zva_len_x, #1
+	neg	tmp2, dst
+	ands	tmp2, tmp2, zva_bits_x
+	b.eq	1f			/* Already aligned.  */
+	/* Not aligned, check that there's enough to copy after alignment.  */
+	sub	tmp1, count, tmp2
+	cmp	tmp1, #64
+	ccmp	tmp1, zva_len_x, #8, ge	/* NZCV=0b1000 */
+	b.lt	.Lnot_short
+	/* We know that there's at least 64 bytes to zero and that it's safe
+	 * to overrun by 64 bytes.  */
+	mov	count, tmp1
+2:
+	stp	A_l, A_l, [dst]
+	stp	A_l, A_l, [dst, #16]
+	stp	A_l, A_l, [dst, #32]
+	subs	tmp2, tmp2, #64
+	stp	A_l, A_l, [dst, #48]
+	add	dst, dst, #64
+	b.ge	2b
+	/* We've overrun a bit, so adjust dst downwards.  */
+	add	dst, dst, tmp2
+1:
+	sub	count, count, zva_len_x
+3:
+	dc	zva, dst
+	add	dst, dst, zva_len_x
+	subs	count, count, zva_len_x
+	b.ge	3b
+	ands	count, count, zva_bits_x
+	b.ne	.Ltail_maybe_long
+	ret
+END(memset)
+
+#ifdef MAYBE_VIRT
+	.bss
+	.p2align 2
+.Lcache_clear:
+	.space 4
+#endif
+#endif /* DONT_USE_DC */
diff --git a/libc/arch-arm64/denver64/denver64.mk b/libc/arch-arm64/denver64/denver64.mk
new file mode 100644
index 0000000..36146bf
--- /dev/null
+++ b/libc/arch-arm64/denver64/denver64.mk
@@ -0,0 +1,10 @@
+libc_bionic_src_files_arm64 += \
+    arch-arm64/generic/bionic/memcmp.S \
+    arch-arm64/denver64/bionic/memcpy.S \
+    arch-arm64/generic/bionic/memmove.S \
+    arch-arm64/denver64/bionic/memset.S \
+    arch-arm64/generic/bionic/strcmp.S \
+    arch-arm64/generic/bionic/strlen.S \
+    arch-arm64/generic/bionic/strncmp.S \
+    arch-arm64/generic/bionic/strnlen.S \
+    arch-arm64/generic/bionic/wmemmove.S
diff --git a/libc/include/ctype.h b/libc/include/ctype.h
index 6e97326..a66df12 100644
--- a/libc/include/ctype.h
+++ b/libc/include/ctype.h
@@ -56,6 +56,8 @@
 __BEGIN_DECLS
 
 extern const char	*_ctype_;
+extern const short	*_tolower_tab_;
+extern const short	*_toupper_tab_;
 
 #if defined(__GNUC__) || defined(_ANSI_LIBRARY) || defined(lint)
 int	isalnum(int);
diff --git a/libc/include/sys/cdefs_elf.h b/libc/include/sys/cdefs_elf.h
index b5db13e..c21214c 100644
--- a/libc/include/sys/cdefs_elf.h
+++ b/libc/include/sys/cdefs_elf.h
@@ -64,13 +64,6 @@
 /* Used to tag non-static symbols that are private and never exposed by the shared library. */
 #define __LIBC_HIDDEN__ __attribute__((visibility ("hidden")))
 
-/* Like __LIBC_HIDDEN__, but preserves binary compatibility for LP32. */
-#ifdef __LP64__
-#define __LIBC64_HIDDEN__ __LIBC_HIDDEN__
-#else
-#define __LIBC64_HIDDEN__
-#endif
-
 /* Used to tag non-static symbols that are public and exposed by the shared library. */
 #define __LIBC_ABI_PUBLIC__ __attribute__((visibility ("default")))
 
diff --git a/libc/upstream-openbsd/android/include/openbsd-compat.h b/libc/upstream-openbsd/android/include/openbsd-compat.h
index f00d91a..8618e31 100644
--- a/libc/upstream-openbsd/android/include/openbsd-compat.h
+++ b/libc/upstream-openbsd/android/include/openbsd-compat.h
@@ -17,8 +17,6 @@
 #ifndef _BIONIC_OPENBSD_COMPAT_H_included
 #define _BIONIC_OPENBSD_COMPAT_H_included
 
-#include <sys/cdefs.h>
-
 #define __USE_BSD
 
 /* OpenBSD's <ctype.h> uses these names, which conflicted with stlport.
@@ -36,8 +34,4 @@
 /* OpenBSD has this, but we can't really implement it correctly on Linux. */
 #define issetugid() 0
 
-/* LP32 NDK ctype.h contained references to these. */
-__LIBC64_HIDDEN__ extern const short *_tolower_tab_;
-__LIBC64_HIDDEN__ extern const short *_toupper_tab_;
-
 #endif
diff --git a/linker/linker.cpp b/linker/linker.cpp
index a22233d..e202aaf 100644
--- a/linker/linker.cpp
+++ b/linker/linker.cpp
@@ -59,6 +59,7 @@
  *
  * open issues / todo:
  *
+ * - are we doing everything we should for ARM_COPY relocations?
  * - cleaner error reporting
  * - after linking, set as much stuff as possible to READONLY
  *   and NOEXEC
@@ -1037,17 +1038,52 @@
         break;
 
     case R_AARCH64_COPY:
-        /*
-         * ET_EXEC is not supported so this should not happen.
-         *
-         * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf
-         *
-         * Section 4.7.1.10 "Dynamic relocations"
-         * R_AARCH64_COPY may only appear in executable objects where e_type is
-         * set to ET_EXEC.
-         */
-        DL_ERR("%s R_AARCH64_COPY relocations are not supported", si->name);
-        return -1;
+        if ((si->flags & FLAG_EXE) == 0) {
+            /*
+              * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf
+              *
+              * Section 4.7.1.10 "Dynamic relocations"
+              * R_AARCH64_COPY may only appear in executable objects where e_type is
+              * set to ET_EXEC.
+              *
+              * FLAG_EXE is set for both ET_DYN and ET_EXEC executables.
+              * We should explicitly disallow ET_DYN executables from having
+              * R_AARCH64_COPY relocations.
+              */
+            DL_ERR("%s R_AARCH64_COPY relocations only supported for ET_EXEC", si->name);
+            return -1;
+        }
+        count_relocation(kRelocCopy);
+        MARK(rela->r_offset);
+        TRACE_TYPE(RELO, "RELO COPY %16llx <- %lld @ %16llx %s\n",
+                   reloc,
+                   s->st_size,
+                   (sym_addr + rela->r_addend),
+                   sym_name);
+        if (reloc == (sym_addr + rela->r_addend)) {
+            ElfW(Sym)* src = soinfo_do_lookup(NULL, sym_name, &lsi, needed);
+
+            if (src == NULL) {
+                DL_ERR("%s R_AARCH64_COPY relocation source cannot be resolved", si->name);
+                return -1;
+            }
+            if (lsi->has_DT_SYMBOLIC) {
+                DL_ERR("%s invalid R_AARCH64_COPY relocation against DT_SYMBOLIC shared "
+                       "library %s (built with -Bsymbolic?)", si->name, lsi->name);
+                return -1;
+            }
+            if (s->st_size < src->st_size) {
+                DL_ERR("%s R_AARCH64_COPY relocation size mismatch (%lld < %lld)",
+                       si->name, s->st_size, src->st_size);
+                return -1;
+            }
+            memcpy(reinterpret_cast<void*>(reloc),
+                   reinterpret_cast<void*>(src->st_value + lsi->load_bias), src->st_size);
+        } else {
+            DL_ERR("%s R_AARCH64_COPY relocation target cannot be resolved", si->name);
+            return -1;
+        }
+        break;
     case R_AARCH64_TLS_TPREL64:
         TRACE_TYPE(RELO, "RELO TLS_TPREL64 *** %16llx <- %16llx - %16llx\n",
                    reloc, (sym_addr + rela->r_addend), rela->r_offset);
@@ -1225,17 +1261,48 @@
             *reinterpret_cast<ElfW(Addr)*>(reloc) += sym_addr - rel->r_offset;
             break;
         case R_ARM_COPY:
-            /*
-             * ET_EXEC is not supported so this should not happen.
-             *
-             * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf
-             *
-             * Section 4.7.1.10 "Dynamic relocations"
-             * R_ARM_COPY may only appear in executable objects where e_type is
-             * set to ET_EXEC.
-             */
-            DL_ERR("%s R_ARM_COPY relocations are not supported", si->name);
-            return -1;
+            if ((si->flags & FLAG_EXE) == 0) {
+                /*
+                 * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0044d/IHI0044D_aaelf.pdf
+                 *
+                 * Section 4.7.1.10 "Dynamic relocations"
+                 * R_ARM_COPY may only appear in executable objects where e_type is
+                 * set to ET_EXEC.
+                 *
+                 * TODO: FLAG_EXE is set for both ET_DYN and ET_EXEC executables.
+                 * We should explicitly disallow ET_DYN executables from having
+                 * R_ARM_COPY relocations.
+                 */
+                DL_ERR("%s R_ARM_COPY relocations only supported for ET_EXEC", si->name);
+                return -1;
+            }
+            count_relocation(kRelocCopy);
+            MARK(rel->r_offset);
+            TRACE_TYPE(RELO, "RELO %08x <- %d @ %08x %s", reloc, s->st_size, sym_addr, sym_name);
+            if (reloc == sym_addr) {
+                ElfW(Sym)* src = soinfo_do_lookup(NULL, sym_name, &lsi, needed);
+
+                if (src == NULL) {
+                    DL_ERR("%s R_ARM_COPY relocation source cannot be resolved", si->name);
+                    return -1;
+                }
+                if (lsi->has_DT_SYMBOLIC) {
+                    DL_ERR("%s invalid R_ARM_COPY relocation against DT_SYMBOLIC shared "
+                           "library %s (built with -Bsymbolic?)", si->name, lsi->name);
+                    return -1;
+                }
+                if (s->st_size < src->st_size) {
+                    DL_ERR("%s R_ARM_COPY relocation size mismatch (%d < %d)",
+                           si->name, s->st_size, src->st_size);
+                    return -1;
+                }
+                memcpy(reinterpret_cast<void*>(reloc),
+                       reinterpret_cast<void*>(src->st_value + lsi->load_bias), src->st_size);
+            } else {
+                DL_ERR("%s R_ARM_COPY relocation target cannot be resolved", si->name);
+                return -1;
+            }
+            break;
 #elif defined(__i386__)
         case R_386_JMP_SLOT:
             count_relocation(kRelocAbsolute);
@@ -2105,11 +2172,13 @@
     si->dynamic = NULL;
     si->ref_count = 1;
 
+#if defined(__LP64__)
     ElfW(Ehdr)* elf_hdr = reinterpret_cast<ElfW(Ehdr)*>(si->base);
     if (elf_hdr->e_type != ET_DYN) {
         __libc_format_fd(2, "error: only position independent executables (PIE) are supported.\n");
         exit(EXIT_FAILURE);
     }
+#endif
 
     // Use LD_LIBRARY_PATH and LD_PRELOAD (but only if we aren't setuid/setgid).
     parse_LD_LIBRARY_PATH(ldpath_env);