eclair snapshot
diff --git a/libc/arch-arm/bionic/atomics_arm.S b/libc/arch-arm/bionic/atomics_arm.S
index b2da09f..f8b23e6 100644
--- a/libc/arch-arm/bionic/atomics_arm.S
+++ b/libc/arch-arm/bionic/atomics_arm.S
@@ -41,6 +41,8 @@
    .equ     kernel_cmpxchg, 0xFFFF0FC0
    .equ     kernel_atomic_base, 0xFFFF0FFF
 __atomic_dec:
+    .fnstart
+    .save {r4, lr}
     stmdb   sp!, {r4, lr}
     mov     r2, r0
 1: @ atomic_dec
@@ -53,8 +55,11 @@
     add     r0, r1, #1
     ldmia   sp!, {r4, lr}
     bx      lr
+    .fnend
 
 __atomic_inc:
+    .fnstart
+    .save {r4, lr}
     stmdb   sp!, {r4, lr}
     mov     r2, r0
 1: @ atomic_inc
@@ -67,9 +72,12 @@
     sub     r0, r1, #1
     ldmia   sp!, {r4, lr}
     bx      lr
+    .fnend
 
 /* r0(old) r1(new) r2(addr) -> r0(zero_if_succeeded) */
 __atomic_cmpxchg:
+    .fnstart
+    .save {r4, lr}
     stmdb   sp!, {r4, lr}
     mov     r4, r0          /* r4 = save oldvalue */
 1: @ atomic_cmpxchg
@@ -84,6 +92,7 @@
 2: @ atomic_cmpxchg
     ldmia   sp!, {r4, lr}
     bx      lr
+    .fnend
 #else
 #define KUSER_CMPXCHG 0xffffffc0
 
diff --git a/libc/arch-arm/bionic/libgcc_compat.c b/libc/arch-arm/bionic/libgcc_compat.c
new file mode 100644
index 0000000..886d025
--- /dev/null
+++ b/libc/arch-arm/bionic/libgcc_compat.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* This file contains dummy references to libgcc.a functions to force the
+ * dynamic linker to copy their definition into the final libc.so binary.
+ *
+ * They are required to ensure backwards binary compatibility with
+ * Android 1.5 and Android 1.6 system images. Some applications built
+ * using the NDK require them to be here.
+ *
+ * Now, for a more elaborate description of the issue:
+ *
+ * libgcc.a is a compiler-specific library containing various helper
+ * functions used to implement certain operations that are not necessarily
+ * supported by the target CPU. For example, integer division doesn't have a
+ * corresponding CPU instruction on ARMv5, and is instead implemented in the
+ * compiler-generated machine code as a call to an __idiv helper function.
+ *
+ * Normally, one has to place libgcc.a in the link command used to generate
+ * target binaries (shared libraries and executables) after all objects and
+ * static libraries, but before dependent shared libraries, i.e. something
+ * like:
+ *         gcc <options> -o libfoo.so  foo.a libgcc.a -lc -lm
+ *
+ * This ensures that any helper function needed by the code in foo.a is copied
+ * into the final libfoo.so. Unfortunately, the Android build system has been
+ * using this instead:
+ *
+ *         gcc <options> -o libfoo.so foo.a -lc -lm libgcc.a
+ *
+ * The problem with this is that if one helper function needed by foo.a has
+ * already been copied into libc.so or libm.so, then nothing will be copied
+ * into libfoo.so. Instead, a symbol import definition will be added to it
+ * so libfoo.so can directly call the one in libc.so at runtime.
+ *
+ * When changing toolchains for 2.0, the set of helper functions copied to
+ * libc.so changed, which resulted in some native shared libraries generated
+ * with the NDK to fail to load properly.
+ *
+ * The NDK has been fixed after 1.6_r1 to use the correct link command, so
+ * any native shared library generated with it should now be safe from that
+ * problem. On the other hand, existing shared libraries distributed with
+ * applications that were generated with a previous version of the NDK
+ * still need all 1.5/1.6 helper functions in libc.so and libn.so
+ *
+ * Final note: some of the functions below should really be in libm.so to
+ *             completely reflect the state of 1.5/1.6 system images. However,
+ *             since libm.so depends on libc.so, it's easier to put all of
+ *             these in libc.so instead, since the dynamic linker will always
+ *             search in libc.so before libm.so for dependencies.
+ */
+
+#define   COMPAT_FUNCTIONS_LIST \
+    XX(__adddf3)             \
+    XX(__addsf3)             \
+    XX(__aeabi_cdcmpeq)      \
+    XX(__aeabi_cdcmple)      \
+    XX(__aeabi_cdrcmple)     \
+    XX(__aeabi_d2f)          \
+    XX(__aeabi_d2iz)         \
+    XX(__aeabi_dadd)         \
+    XX(__aeabi_dcmpeq)       \
+    XX(__aeabi_dcmpge)       \
+    XX(__aeabi_dcmpgt)       \
+    XX(__aeabi_dcmple)       \
+    XX(__aeabi_dcmplt)       \
+    XX(__aeabi_dcmpun)       \
+    XX(__aeabi_ddiv)         \
+    XX(__aeabi_dmul)         \
+    XX(__aeabi_drsub)        \
+    XX(__aeabi_dsub)         \
+    XX(__aeabi_f2d)          \
+    XX(__aeabi_f2iz)         \
+    XX(__aeabi_fadd)         \
+    XX(__aeabi_fcmpun)       \
+    XX(__aeabi_fdiv)         \
+    XX(__aeabi_fmul)         \
+    XX(__aeabi_frsub)        \
+    XX(__aeabi_fsub)         \
+    XX(__aeabi_i2d)          \
+    XX(__aeabi_i2f)          \
+    XX(__aeabi_l2d)          \
+    XX(__aeabi_l2f)          \
+    XX(__aeabi_lmul)         \
+    XX(__aeabi_ui2d)         \
+    XX(__aeabi_ui2f)         \
+    XX(__aeabi_ul2d)         \
+    XX(__aeabi_ul2f)         \
+    XX(__cmpdf2)             \
+    XX(__divdf3)             \
+    XX(__divsf3)             \
+    XX(__eqdf2)             \
+    XX(__extendsfdf2)        \
+    XX(__fixdfsi)            \
+    XX(__fixsfsi)            \
+    XX(__floatdidf)          \
+    XX(__floatdisf)          \
+    XX(__floatsidf)          \
+    XX(__floatsisf)          \
+    XX(__floatundidf)        \
+    XX(__floatundisf)        \
+    XX(__floatunsidf)        \
+    XX(__floatunsisf)        \
+    XX(__gedf2)              \
+    XX(__gtdf2)              \
+    XX(__ledf2)              \
+    XX(__ltdf2)              \
+    XX(__muldf3)             \
+    XX(__muldi3)             \
+    XX(__mulsf3)             \
+    XX(__nedf2)              \
+    XX(__subdf3)             \
+    XX(__subsf3)             \
+    XX(__truncdfsf2)         \
+    XX(__unorddf2)           \
+    XX(__unordsf2)           \
+
+#define  XX(f)    extern void f(void);
+COMPAT_FUNCTIONS_LIST
+#undef XX
+
+void  __bionic_libgcc_compat_hooks(void)
+{
+#define XX(f)    f();
+COMPAT_FUNCTIONS_LIST
+#undef XX
+}
diff --git a/libc/arch-arm/bionic/memcmp.S b/libc/arch-arm/bionic/memcmp.S
index f45b56b..67dcddc 100644
--- a/libc/arch-arm/bionic/memcmp.S
+++ b/libc/arch-arm/bionic/memcmp.S
@@ -44,6 +44,7 @@
  */
 
 memcmp:
+        .fnstart
         PLD         (r0, #0)
         PLD         (r1, #0)
 
@@ -53,6 +54,7 @@
         moveq       r0, #0
         bxeq        lr
 
+        .save {r4, lr}
         /* save registers */
         stmfd       sp!, {r4, lr}
         
@@ -174,6 +176,7 @@
 9:      /* restore registers and return */
         ldmfd       sp!, {r4, lr}
         bx          lr
+        .fnend
 
 
 
diff --git a/libc/arch-arm/bionic/memcmp16.S b/libc/arch-arm/bionic/memcmp16.S
index 38d8b62..f398588 100644
--- a/libc/arch-arm/bionic/memcmp16.S
+++ b/libc/arch-arm/bionic/memcmp16.S
@@ -44,6 +44,7 @@
  */
 
 __memcmp16:
+        .fnstart
         PLD         (r0, #0)
         PLD         (r1, #0)
 
@@ -79,6 +80,7 @@
         bx          lr
 
 
+        .save {r4, lr}
         /* save registers */
 0:      stmfd       sp!, {r4, lr}
         
@@ -93,6 +95,7 @@
         /* restore registers and return */
         ldmnefd     sp!, {r4, lr}
         bxne        lr
+        .fnend
 
 
 
diff --git a/libc/arch-arm/bionic/memcpy.S b/libc/arch-arm/bionic/memcpy.S
index fcb58cd..024d885 100644
--- a/libc/arch-arm/bionic/memcpy.S
+++ b/libc/arch-arm/bionic/memcpy.S
@@ -28,6 +28,127 @@
 
 #include <machine/cpu-features.h>
 
+#if __ARM_ARCH__ == 7 || defined(__ARM_NEON__)
+
+        .text
+        .fpu    neon
+
+        .global memcpy
+        .type memcpy, %function
+        .align 4
+
+/* a prefetch distance of 4 cache-lines works best experimentally */
+#define CACHE_LINE_SIZE     64
+#define PREFETCH_DISTANCE   (CACHE_LINE_SIZE*4)
+
+memcpy:
+        .fnstart
+        .save       {r0, lr}
+        stmfd       sp!, {r0, lr}
+
+        /* start preloading as early as possible */
+        pld         [r1, #(CACHE_LINE_SIZE*0)]
+        pld         [r1, #(CACHE_LINE_SIZE*1)]
+
+        /* do we have at least 16-bytes to copy (needed for alignment below) */
+        cmp         r2, #16
+        blo         5f
+
+        /* align destination to half cache-line for the write-buffer */
+        rsb         r3, r0, #0
+        ands        r3, r3, #0xF
+        beq         0f
+
+        /* copy up to 15-bytes (count in r3) */
+        sub         r2, r2, r3
+        movs        ip, r3, lsl #31
+        ldrmib      lr, [r1], #1
+        strmib      lr, [r0], #1
+        ldrcsb      ip, [r1], #1
+        ldrcsb      lr, [r1], #1
+        strcsb      ip, [r0], #1
+        strcsb      lr, [r0], #1
+        movs        ip, r3, lsl #29
+        bge         1f
+        // copies 4 bytes, destination 32-bits aligned
+        vld4.8      {d0[0], d1[0], d2[0], d3[0]}, [r1]!
+        vst4.8      {d0[0], d1[0], d2[0], d3[0]}, [r0, :32]!
+1:      bcc         2f
+        // copies 8 bytes, destination 64-bits aligned
+        vld1.8      {d0}, [r1]!
+        vst1.8      {d0}, [r0, :64]!
+2:
+
+0:      /* preload immediately the next cache line, which we may need */
+        pld         [r1, #(CACHE_LINE_SIZE*0)]
+        pld         [r1, #(CACHE_LINE_SIZE*1)]
+
+        /* make sure we have at least 64 bytes to copy */
+        subs        r2, r2, #64
+        blo         2f
+
+        /* preload all the cache lines we need.
+         * NOTE: the number of pld below depends on PREFETCH_DISTANCE,
+         * ideally would would increase the distance in the main loop to
+         * avoid the goofy code below. In practice this doesn't seem to make
+         * a big difference.
+         */
+        pld         [r1, #(CACHE_LINE_SIZE*2)]
+        pld         [r1, #(CACHE_LINE_SIZE*3)]
+        pld         [r1, #(PREFETCH_DISTANCE)]
+
+1:      /* The main loop copies 64 bytes at a time */
+        vld1.8      {d0  - d3},   [r1]!
+        vld1.8      {d4  - d7},   [r1]!
+        pld         [r1, #(PREFETCH_DISTANCE)]
+        subs        r2, r2, #64
+        vst1.8      {d0  - d3},   [r0, :128]!
+        vst1.8      {d4  - d7},   [r0, :128]!
+        bhs         1b
+
+2:      /* fix-up the remaining count and make sure we have >= 32 bytes left */
+        add         r2, r2, #64
+        subs        r2, r2, #32
+        blo         4f
+
+3:      /* 32 bytes at a time. These cache lines were already preloaded */
+        vld1.8      {d0 - d3},  [r1]!
+        subs        r2, r2, #32
+        vst1.8      {d0 - d3},  [r0, :128]!
+        bhs         3b
+
+4:      /* less than 32 left */
+        add         r2, r2, #32
+        tst         r2, #0x10
+        beq         5f
+        // copies 16 bytes, 128-bits aligned
+        vld1.8      {d0, d1}, [r1]!
+        vst1.8      {d0, d1}, [r0, :128]!
+
+5:      /* copy up to 15-bytes (count in r2) */
+        movs        ip, r2, lsl #29
+        bcc         1f
+        vld1.8      {d0}, [r1]!
+        vst1.8      {d0}, [r0]!
+1:      bge         2f
+        vld4.8      {d0[0], d1[0], d2[0], d3[0]}, [r1]!
+        vst4.8      {d0[0], d1[0], d2[0], d3[0]}, [r0]!
+2:      movs        ip, r2, lsl #31
+        ldrmib      r3, [r1], #1
+        ldrcsb      ip, [r1], #1
+        ldrcsb      lr, [r1], #1
+        strmib      r3, [r0], #1
+        strcsb      ip, [r0], #1
+        strcsb      lr, [r0], #1
+
+        ldmfd       sp!, {r0, lr}
+        bx          lr
+        .fnend
+
+
+#else   /* __ARM_ARCH__ < 7 */
+
+
 	.text
 
     .global memcpy
@@ -40,9 +161,9 @@
 		 * note that memcpy() always returns the destination pointer,
 		 * so we have to preserve R0.
 		 */
-	
-memcpy:	
-		/* The stack must always be 64-bits aligned to be compliant with the 
+
+memcpy:
+		/* The stack must always be 64-bits aligned to be compliant with the
 		 * ARM ABI. Since we have to save R0, we might as well save R4
 		 * which we can use for better pipelining of the reads below
 		 */
@@ -82,10 +203,10 @@
         strmib		r3, [r0], #1
 		strcsb		r4, [r0], #1
 		strcsb		r12,[r0], #1
-		
+
 src_aligned:
 
-		/* see if src and dst are aligned together (congruent) */	
+		/* see if src and dst are aligned together (congruent) */
 		eor			r12, r0, r1
 		tst			r12, #3
 		bne			non_congruent
@@ -103,7 +224,7 @@
 		andhi		r3, r2, #0x1C
 
 		/* conditionnaly copies 0 to 7 words (length in r3) */
-		movs		r12, r3, lsl #28 
+		movs		r12, r3, lsl #28
 		ldmcsia		r1!, {r4, r5, r6, r7}	/* 16 bytes */
 		ldmmiia		r1!, {r8, r9}			/*  8 bytes */
 		stmcsia		r0!, {r4, r5, r6, r7}
@@ -124,7 +245,7 @@
 
         /*
          * We preload a cache-line up to 64 bytes ahead. On the 926, this will
-         * stall only until the requested world is fetched, but the linefill 
+         * stall only until the requested world is fetched, but the linefill
          * continues in the the background.
          * While the linefill is going, we write our previous cache-line
          * into the write-buffer (which should have some free space).
@@ -150,19 +271,19 @@
 
         // NOTE: if r12 is more than 64 ahead of r1, the following ldrhi
         // for ARM9 preload will not be safely guarded by the preceding subs.
-        // When it is safely guarded the only possibility to have SIGSEGV here 
+        // When it is safely guarded the only possibility to have SIGSEGV here
         // is because the caller overstates the length.
         ldrhi       r3, [r12], #32      /* cheap ARM9 preload */
         stmia       r0!, { r4-r11 }
 		bhs         1b
-		
+
         add         r2, r2, #32
 
 
 
 
 less_than_32_left:
-		/* 
+		/*
 		 * less than 32 bytes left at this point (length in r2)
 		 */
 
@@ -174,7 +295,7 @@
 		beq			1f
 
 		/* conditionnaly copies 0 to 31 bytes */
-		movs		r12, r2, lsl #28 
+		movs		r12, r2, lsl #28
 		ldmcsia		r1!, {r4, r5, r6, r7}	/* 16 bytes */
 		ldmmiia		r1!, {r8, r9}			/*  8 bytes */
 		stmcsia		r0!, {r4, r5, r6, r7}
@@ -182,7 +303,7 @@
 		movs		r12, r2, lsl #30
 		ldrcs		r3, [r1], #4			/*  4 bytes */
 		ldrmih		r4, [r1], #2			/*  2 bytes */
-		strcs		r3, [r0], #4		
+		strcs		r3, [r0], #4
 		strmih		r4, [r0], #2
 		tst         r2, #0x1
 		ldrneb		r3, [r1]				/*  last byte  */
@@ -200,34 +321,34 @@
 		 * here source is aligned to 4 bytes
 		 * but destination is not.
 		 *
-		 * in the code below r2 is the number of bytes read 
+		 * in the code below r2 is the number of bytes read
 		 * (the number of bytes written is always smaller, because we have
 		 * partial words in the shift queue)
 		 */
 		cmp			r2, #4
 		blo			copy_last_3_and_return
-		
+
         /* Use post-incriment mode for stm to spill r5-r11 to reserved stack
          * frame. Don't update sp.
          */
         stmea		sp, {r5-r11}
-		
+
 		/* compute shifts needed to align src to dest */
 		rsb			r5, r0, #0
 		and			r5, r5, #3			/* r5 = # bytes in partial words */
-		mov			r12, r5, lsl #3		/* r12 = right */ 
+		mov			r12, r5, lsl #3		/* r12 = right */
 		rsb			lr, r12, #32		/* lr = left  */
-		
+
 		/* read the first word */
 		ldr			r3, [r1], #4
 		sub			r2, r2, #4
-		
+
 		/* write a partial word (0 to 3 bytes), such that destination
 		 * becomes aligned to 32 bits (r5 = nb of words to copy for alignment)
 		 */
 		movs		r5, r5, lsl #31
 		strmib		r3, [r0], #1
-		movmi		r3, r3, lsr #8		
+		movmi		r3, r3, lsr #8
 		strcsb		r3, [r0], #1
 		movcs		r3, r3, lsr #8
 		strcsb		r3, [r0], #1
@@ -235,7 +356,7 @@
 
 		cmp			r2, #4
 		blo			partial_word_tail
-		
+
 		/* Align destination to 32 bytes (cache line boundary) */
 1:		tst			r0, #0x1c
 		beq			2f
@@ -366,7 +487,7 @@
 		strcsb		r3, [r0], #1
 		movcs		r3, r3, lsr #8
 		strcsb		r3, [r0], #1
-		
+
 		/* Refill spilled registers from the stack. Don't update sp. */
 		ldmfd		sp, {r5-r11}
 
@@ -385,3 +506,5 @@
 		bx			lr
         .fnend
 
+
+#endif    /* __ARM_ARCH__ < 7 */
diff --git a/libc/arch-arm/bionic/memset.S b/libc/arch-arm/bionic/memset.S
index d52d622..93abe15 100644
--- a/libc/arch-arm/bionic/memset.S
+++ b/libc/arch-arm/bionic/memset.S
@@ -80,7 +80,7 @@
         
 		rsb         r3, r0, #0
 		ands		r3, r3, #0x1C
-		beq         aligned32
+		beq         3f
 		cmp         r3, r2
 		andhi		r3, r2, #0x1C
 		sub         r2, r2, r3
@@ -93,7 +93,7 @@
 		movs		r3, r3, lsl #2
         strcs       r1, [r0], #4
 
-aligned32:
+3:
         subs        r2, r2, #32
         mov         r3, r1
         bmi         2f