Fix for slm-tuned memmove (both 32- and 64-bit).
Introduce a test for memmove that catches a fault.
Fix both 32- and 64-bit versions of slm-tuned memmove.
Change-Id: Ib416def2610a0972e32c3b9b6055b54967643dc3
Signed-off-by: Varvara Rainchik <varvara.rainchik@intel.com>
diff --git a/libc/arch-x86/silvermont/string/sse2-memmove-slm.S b/libc/arch-x86/silvermont/string/sse2-memmove-slm.S
index 79a0a36..b971f0b 100644
--- a/libc/arch-x86/silvermont/string/sse2-memmove-slm.S
+++ b/libc/arch-x86/silvermont/string/sse2-memmove-slm.S
@@ -74,13 +74,13 @@
#endif
#ifdef USE_AS_BCOPY
-# define SRC PARMS
-# define DEST SRC+4
-# define LEN DEST+4
+# define SRC PARMS
+# define DEST SRC+4
+# define LEN DEST+4
#else
-# define DEST PARMS
-# define SRC DEST+4
-# define LEN SRC+4
+# define DEST PARMS
+# define SRC DEST+4
+# define LEN SRC+4
#endif
#define CFI_PUSH(REG) \
@@ -109,15 +109,15 @@
/* Check whether we should copy backward or forward. */
cmp %eax, %edx
je L(mm_return)
- ja L(mm_len_0_or_more_backward)
+ jg L(mm_len_0_or_more_backward)
/* Now do checks for lengths. We do [0..16], [0..32], [0..64], [0..128]
separately. */
cmp $16, %ecx
jbe L(mm_len_0_16_bytes_forward)
- cmpl $32, %ecx
- jg L(mm_len_32_or_more_forward)
+ cmpl $32, %ecx
+ ja L(mm_len_32_or_more_forward)
/* Copy [0..32] and return. */
movdqu (%eax), %xmm0
@@ -127,8 +127,8 @@
jmp L(mm_return)
L(mm_len_32_or_more_forward):
- cmpl $64, %ecx
- jg L(mm_len_64_or_more_forward)
+ cmpl $64, %ecx
+ ja L(mm_len_64_or_more_forward)
/* Copy [0..64] and return. */
movdqu (%eax), %xmm0
@@ -142,8 +142,8 @@
jmp L(mm_return)
L(mm_len_64_or_more_forward):
- cmpl $128, %ecx
- jg L(mm_len_128_or_more_forward)
+ cmpl $128, %ecx
+ ja L(mm_len_128_or_more_forward)
/* Copy [0..128] and return. */
movdqu (%eax), %xmm0
@@ -165,72 +165,66 @@
jmp L(mm_return)
L(mm_len_128_or_more_forward):
-
- cmp $SHARED_CACHE_SIZE_HALF, %ecx
- jae L(mm_large_page_forward)
-
PUSH (%esi)
PUSH (%edi)
- movl %eax, %esi
- movl %edx, %edi
/* Aligning the address of destination. */
- movdqu (%esi), %xmm0
- movdqu 16(%esi), %xmm1
- movdqu 32(%esi), %xmm2
- movdqu 48(%esi), %xmm3
+ movdqu (%eax), %xmm0
+ movdqu 16(%eax), %xmm1
+ movdqu 32(%eax), %xmm2
+ movdqu 48(%eax), %xmm3
- leal 64(%edi), %edx
- andl $-64, %edx
+ leal 64(%edx), %edi
+ andl $-64, %edi
+ subl %edx, %eax
- movl %esi, %eax
- subl %edi, %eax
+ movdqu (%eax, %edi), %xmm4
+ movdqu 16(%eax, %edi), %xmm5
+ movdqu 32(%eax, %edi), %xmm6
+ movdqu 48(%eax, %edi), %xmm7
- movdqu (%edx, %eax), %xmm4
- movdqu 16(%edx, %eax), %xmm5
- movdqu 32(%edx, %eax), %xmm6
- movdqu 48(%edx, %eax), %xmm7
+ movdqu %xmm0, (%edx)
+ movdqu %xmm1, 16(%edx)
+ movdqu %xmm2, 32(%edx)
+ movdqu %xmm3, 48(%edx)
+ movdqa %xmm4, (%edi)
+ movaps %xmm5, 16(%edi)
+ movaps %xmm6, 32(%edi)
+ movaps %xmm7, 48(%edi)
+ addl $64, %edi
- movdqu %xmm0, (%edi)
- movdqu %xmm1, 16(%edi)
- movdqu %xmm2, 32(%edi)
- movdqu %xmm3, 48(%edi)
- movdqa %xmm4, (%edx)
- movdqa %xmm5, 16(%edx)
- movdqa %xmm6, 32(%edx)
- movdqa %xmm7, 48(%edx)
- addl $64, %edx
-
- leal (%edi, %ecx), %ebx
+ leal (%edx, %ecx), %ebx
andl $-64, %ebx
-
- cmp %edx, %ebx
+ cmp %edi, %ebx
jbe L(mm_copy_remaining_forward)
+ cmp $SHARED_CACHE_SIZE_HALF, %ecx
+ jae L(mm_large_page_loop_forward)
+
.p2align 4
L(mm_main_loop_forward):
- prefetcht0 128(%edx, %eax)
+ prefetcht0 128(%eax, %edi)
- movdqu (%edx, %eax), %xmm0
- movdqu 16(%edx, %eax), %xmm1
- movdqu 32(%edx, %eax), %xmm2
- movdqu 48(%edx, %eax), %xmm3
- movdqa %xmm0, (%edx)
- movdqa %xmm1, 16(%edx)
- movdqa %xmm2, 32(%edx)
- movdqa %xmm3, 48(%edx)
- leal 64(%edx), %edx
- cmp %edx, %ebx
+ movdqu (%eax, %edi), %xmm0
+ movdqu 16(%eax, %edi), %xmm1
+ movdqu 32(%eax, %edi), %xmm2
+ movdqu 48(%eax, %edi), %xmm3
+ movdqa %xmm0, (%edi)
+ movaps %xmm1, 16(%edi)
+ movaps %xmm2, 32(%edi)
+ movaps %xmm3, 48(%edi)
+ leal 64(%edi), %edi
+ cmp %edi, %ebx
ja L(mm_main_loop_forward)
L(mm_copy_remaining_forward):
- addl %edi, %ecx
- subl %edx, %ecx
-/* We copied all up till %edx position in the dst.
+ addl %edx, %ecx
+ subl %edi, %ecx
+/* We copied all up till %edi position in the dst.
In %ecx now is how many bytes are left to copy.
Now we need to advance %esi. */
- leal (%edx, %eax), %esi
+ leal (%edi, %eax), %esi
L(mm_remaining_0_64_bytes_forward):
cmp $32, %ecx
@@ -251,8 +245,8 @@
ja L(mm_remaining_3_4_bytes_forward)
movzbl -1(%esi,%ecx), %eax
movzbl (%esi), %ebx
- movb %al, -1(%edx,%ecx)
- movb %bl, (%edx)
+ movb %al, -1(%edi,%ecx)
+ movb %bl, (%edi)
jmp L(mm_return_pop_all)
L(mm_remaining_33_64_bytes_forward):
@@ -260,40 +254,39 @@
movdqu 16(%esi), %xmm1
movdqu -32(%esi, %ecx), %xmm2
movdqu -16(%esi, %ecx), %xmm3
- movdqu %xmm0, (%edx)
- movdqu %xmm1, 16(%edx)
- movdqu %xmm2, -32(%edx, %ecx)
- movdqu %xmm3, -16(%edx, %ecx)
+ movdqu %xmm0, (%edi)
+ movdqu %xmm1, 16(%edi)
+ movdqu %xmm2, -32(%edi, %ecx)
+ movdqu %xmm3, -16(%edi, %ecx)
jmp L(mm_return_pop_all)
L(mm_remaining_17_32_bytes_forward):
movdqu (%esi), %xmm0
movdqu -16(%esi, %ecx), %xmm1
- movdqu %xmm0, (%edx)
- movdqu %xmm1, -16(%edx, %ecx)
- jmp L(mm_return_pop_all)
-
-L(mm_remaining_3_4_bytes_forward):
- movzwl -2(%esi,%ecx), %eax
- movzwl (%esi), %ebx
- movw %ax, -2(%edx,%ecx)
- movw %bx, (%edx)
- jmp L(mm_return_pop_all)
-
-L(mm_remaining_5_8_bytes_forward):
- movl (%esi), %eax
- movl -4(%esi,%ecx), %ebx
- movl %eax, (%edx)
- movl %ebx, -4(%edx,%ecx)
+ movdqu %xmm0, (%edi)
+ movdqu %xmm1, -16(%edi, %ecx)
jmp L(mm_return_pop_all)
L(mm_remaining_9_16_bytes_forward):
movq (%esi), %xmm0
movq -8(%esi, %ecx), %xmm1
- movq %xmm0, (%edx)
- movq %xmm1, -8(%edx, %ecx)
+ movq %xmm0, (%edi)
+ movq %xmm1, -8(%edi, %ecx)
jmp L(mm_return_pop_all)
+L(mm_remaining_5_8_bytes_forward):
+ movl (%esi), %eax
+ movl -4(%esi,%ecx), %ebx
+ movl %eax, (%edi)
+ movl %ebx, -4(%edi,%ecx)
+ jmp L(mm_return_pop_all)
+
+L(mm_remaining_3_4_bytes_forward):
+ movzwl -2(%esi,%ecx), %eax
+ movzwl (%esi), %ebx
+ movw %ax, -2(%edi,%ecx)
+ movw %bx, (%edi)
+ jmp L(mm_return_pop_all)
L(mm_len_0_16_bytes_forward):
testb $24, %cl
@@ -334,15 +327,20 @@
movq %xmm1, -8(%edx, %ecx)
jmp L(mm_return)
+L(mm_recalc_len):
+/* Compute in %ecx how many bytes are left to copy after
+ the main loop stops. */
+ movl %ebx, %ecx
+ subl %edx, %ecx
/* The code for copying backwards. */
L(mm_len_0_or_more_backward):
-/* Now do checks for lengths. We do [0..16], [0..32], [0..64], [0..128]
+/* Now do checks for lengths. We do [0..16], [16..32], [32..64], [64..128]
separately. */
cmp $16, %ecx
jbe L(mm_len_0_16_bytes_backward)
- cmpl $32, %ecx
+ cmpl $32, %ecx
jg L(mm_len_32_or_more_backward)
/* Copy [0..32] and return. */
@@ -353,7 +351,7 @@
jmp L(mm_return)
L(mm_len_32_or_more_backward):
- cmpl $64, %ecx
+ cmpl $64, %ecx
jg L(mm_len_64_or_more_backward)
/* Copy [0..64] and return. */
@@ -368,7 +366,7 @@
jmp L(mm_return)
L(mm_len_64_or_more_backward):
- cmpl $128, %ecx
+ cmpl $128, %ecx
jg L(mm_len_128_or_more_backward)
/* Copy [0..128] and return. */
@@ -391,10 +389,6 @@
jmp L(mm_return)
L(mm_len_128_or_more_backward):
-
- cmp $SHARED_CACHE_SIZE_HALF, %ecx
- jae L(mm_large_page_backward)
-
PUSH (%esi)
PUSH (%edi)
@@ -429,17 +423,11 @@
leal 64(%edx), %ebx
andl $-64, %ebx
-/* Compute in %ecx how many bytes are left to copy after
- the main loop stops. */
- movl %ebx, %ecx
- subl %edx, %ecx
-
cmp %edi, %ebx
- jb L(mm_main_loop_backward)
+ jae L(mm_main_loop_backward_end)
- POP (%edi)
- POP (%esi)
- jmp L(mm_len_0_or_more_backward)
+ cmp $SHARED_CACHE_SIZE_HALF, %ecx
+ jae L(mm_large_page_loop_backward)
.p2align 4
L(mm_main_loop_backward):
@@ -457,9 +445,10 @@
leal -64(%edi), %edi
cmp %edi, %ebx
jb L(mm_main_loop_backward)
+L(mm_main_loop_backward_end):
POP (%edi)
POP (%esi)
- jmp L(mm_len_0_or_more_backward)
+ jmp L(mm_recalc_len)
/* Copy [0..16] and return. */
L(mm_len_0_16_bytes_backward):
@@ -508,151 +497,30 @@
RETURN
L(mm_return_pop_all):
- movl %edi, %eax
+ movl %edx, %eax
POP (%edi)
POP (%esi)
RETURN
/* Big length copy forward part. */
-L(mm_large_page_forward):
-/* Aligning the address of destination. We need to save
- 16 bits from the source in order not to overwrite them. */
-
- PUSH (%esi)
- PUSH (%edi)
- movl %eax, %esi
- movl %edx, %edi
-
- movdqu (%esi), %xmm0
- movdqu 16(%esi), %xmm1
- movdqu 32(%esi), %xmm2
- movdqu 48(%esi), %xmm3
-
- leal 64(%edi), %edx
- andl $-64, %edx
-
- movl %esi, %eax
- subl %edi, %eax
-
- movdqu (%edx, %eax), %xmm4
- movdqu 16(%edx, %eax), %xmm5
- movdqu 32(%edx, %eax), %xmm6
- movdqu 48(%edx, %eax), %xmm7
-
- movdqu %xmm0, (%edi)
- movdqu %xmm1, 16(%edi)
- movdqu %xmm2, 32(%edi)
- movdqu %xmm3, 48(%edi)
- movntdq %xmm4, (%edx)
- movntdq %xmm5, 16(%edx)
- movntdq %xmm6, 32(%edx)
- movntdq %xmm7, 48(%edx)
- addl $64, %edx
-
- leal (%edi, %ecx), %ebx
- andl $-128, %ebx
-
- cmp %edx, %ebx
- jbe L(mm_copy_remaining_forward)
-
.p2align 4
L(mm_large_page_loop_forward):
- movdqu (%edx, %eax), %xmm0
- movdqu 16(%edx, %eax), %xmm1
- movdqu 32(%edx, %eax), %xmm2
- movdqu 48(%edx, %eax), %xmm3
- movdqu 64(%edx, %eax), %xmm4
- movdqu 80(%edx, %eax), %xmm5
- movdqu 96(%edx, %eax), %xmm6
- movdqu 112(%edx, %eax), %xmm7
- movntdq %xmm0, (%edx)
- movntdq %xmm1, 16(%edx)
- movntdq %xmm2, 32(%edx)
- movntdq %xmm3, 48(%edx)
- movntdq %xmm4, 64(%edx)
- movntdq %xmm5, 80(%edx)
- movntdq %xmm6, 96(%edx)
- movntdq %xmm7, 112(%edx)
- leal 128(%edx), %edx
- cmp %edx, %ebx
+ movdqu (%eax, %edi), %xmm0
+ movdqu 16(%eax, %edi), %xmm1
+ movdqu 32(%eax, %edi), %xmm2
+ movdqu 48(%eax, %edi), %xmm3
+ movntdq %xmm0, (%edi)
+ movntdq %xmm1, 16(%edi)
+ movntdq %xmm2, 32(%edi)
+ movntdq %xmm3, 48(%edi)
+ leal 64(%edi), %edi
+ cmp %edi, %ebx
ja L(mm_large_page_loop_forward)
sfence
-
- addl %edi, %ecx
- subl %edx, %ecx
-/* We copied all up till %edx position in the dst.
- In %ecx now is how many bytes are left to copy.
- Now we need to advance %esi. */
- leal (%edx, %eax), %esi
-
- cmp $64, %ecx
- jb L(mm_remaining_0_64_bytes_forward)
-
- movdqu (%esi), %xmm0
- movdqu 16(%esi), %xmm1
- movdqu 32(%esi), %xmm2
- movdqu 48(%esi), %xmm3
- movdqu -64(%esi, %ecx), %xmm4
- movdqu -48(%esi, %ecx), %xmm5
- movdqu -32(%esi, %ecx), %xmm6
- movdqu -16(%esi, %ecx), %xmm7
- movdqu %xmm0, (%edx)
- movdqu %xmm1, 16(%edx)
- movdqu %xmm2, 32(%edx)
- movdqu %xmm3, 48(%edx)
- movdqu %xmm4, -64(%edx, %ecx)
- movdqu %xmm5, -48(%edx, %ecx)
- movdqu %xmm6, -32(%edx, %ecx)
- movdqu %xmm7, -16(%edx, %ecx)
- jmp L(mm_return_pop_all)
-
+ jmp L(mm_copy_remaining_forward)
/* Big length copy backward part. */
-L(mm_large_page_backward):
-/* Aligning the address of destination. We need to save
- 16 bits from the source in order not to overwrite them. */
-
- PUSH (%esi)
- PUSH (%edi)
-
- movdqu -16(%eax, %ecx), %xmm0
- movdqu -32(%eax, %ecx), %xmm1
- movdqu -48(%eax, %ecx), %xmm2
- movdqu -64(%eax, %ecx), %xmm3
-
- leal (%edx, %ecx), %edi
- andl $-64, %edi
-
- movl %eax, %esi
- subl %edx, %esi
-
- movdqu -16(%edi, %esi), %xmm4
- movdqu -32(%edi, %esi), %xmm5
- movdqu -48(%edi, %esi), %xmm6
- movdqu -64(%edi, %esi), %xmm7
-
- movdqu %xmm0, -16(%edx, %ecx)
- movdqu %xmm1, -32(%edx, %ecx)
- movdqu %xmm2, -48(%edx, %ecx)
- movdqu %xmm3, -64(%edx, %ecx)
- movntdq %xmm4, -16(%edi)
- movntdq %xmm5, -32(%edi)
- movntdq %xmm6, -48(%edi)
- movntdq %xmm7, -64(%edi)
- leal -64(%edi), %edi
-
- leal 128(%edx), %ebx
- andl $-64, %ebx
-
-/* Compute in %ecx how many bytes are left to copy after
- the main loop stops. */
- movl %ebx, %ecx
- subl %edx, %ecx
-
- cmp %edi, %ebx
- jae L(mm_len_0_or_more_backward)
-
.p2align 4
L(mm_large_page_loop_backward):
movdqu -64(%edi, %esi), %xmm0
@@ -666,8 +534,9 @@
leal -64(%edi), %edi
cmp %edi, %ebx
jb L(mm_large_page_loop_backward)
+ sfence
POP (%edi)
POP (%esi)
- jmp L(mm_len_0_or_more_backward)
+ jmp L(mm_recalc_len)
END (MEMMOVE)
diff --git a/libc/arch-x86_64/string/sse2-memmove-slm.S b/libc/arch-x86_64/string/sse2-memmove-slm.S
index ee8440e..0dbffad 100644
--- a/libc/arch-x86_64/string/sse2-memmove-slm.S
+++ b/libc/arch-x86_64/string/sse2-memmove-slm.S
@@ -99,7 +99,7 @@
/* Check whether we should copy backward or forward. */
cmp %rsi, %rdi
je L(mm_return)
- ja L(mm_len_0_or_more_backward)
+ jg L(mm_len_0_or_more_backward)
/* Now do checks for lengths. We do [0..16], [0..32], [0..64], [0..128]
separately. */
@@ -107,7 +107,7 @@
jbe L(mm_len_0_16_bytes_forward)
cmp $32, %rdx
- jg L(mm_len_32_or_more_forward)
+ ja L(mm_len_32_or_more_forward)
/* Copy [0..32] and return. */
movdqu (%rsi), %xmm0
@@ -118,7 +118,7 @@
L(mm_len_32_or_more_forward):
cmp $64, %rdx
- jg L(mm_len_64_or_more_forward)
+ ja L(mm_len_64_or_more_forward)
/* Copy [0..64] and return. */
movdqu (%rsi), %xmm0
@@ -133,7 +133,7 @@
L(mm_len_64_or_more_forward):
cmp $128, %rdx
- jg L(mm_len_128_or_more_forward)
+ ja L(mm_len_128_or_more_forward)
/* Copy [0..128] and return. */
movdqu (%rsi), %xmm0
@@ -155,13 +155,6 @@
jmp L(mm_return)
L(mm_len_128_or_more_forward):
-
- cmp $SHARED_CACHE_SIZE_HALF, %rdx
- jae L(mm_large_page_forward)
-
- mov %rsi, %r8 // copy src to r8
- mov %rdi, %r9 // copy dst to r9
-
/* Aligning the address of destination. */
/* save first unaligned 64 bytes */
movdqu (%rsi), %xmm0
@@ -169,56 +162,57 @@
movdqu 32(%rsi), %xmm2
movdqu 48(%rsi), %xmm3
- lea 64(%r9), %rdi
- and $-64, %rdi /* rdi now aligned to next 64 byte boundary */
+ lea 64(%rdi), %r8
+ and $-64, %r8 /* r8 now aligned to next 64 byte boundary */
+ sub %rdi, %rsi /* rsi = src - dst = diff */
- sub %r9, %rsi /* rsi = src - dst = diff */
+ movdqu (%r8, %rsi), %xmm4
+ movdqu 16(%r8, %rsi), %xmm5
+ movdqu 32(%r8, %rsi), %xmm6
+ movdqu 48(%r8, %rsi), %xmm7
- movdqu (%rdi, %rsi), %xmm4
- movdqu 16(%rdi, %rsi), %xmm5
- movdqu 32(%rdi, %rsi), %xmm6
- movdqu 48(%rdi, %rsi), %xmm7
+ movdqu %xmm0, (%rdi)
+ movdqu %xmm1, 16(%rdi)
+ movdqu %xmm2, 32(%rdi)
+ movdqu %xmm3, 48(%rdi)
+ movdqa %xmm4, (%r8)
+ movaps %xmm5, 16(%r8)
+ movaps %xmm6, 32(%r8)
+ movaps %xmm7, 48(%r8)
+ add $64, %r8
- movdqu %xmm0, (%r9)
- movdqu %xmm1, 16(%r9)
- movdqu %xmm2, 32(%r9)
- movdqu %xmm3, 48(%r9)
- movdqa %xmm4, (%rdi)
- movdqa %xmm5, 16(%rdi)
- movdqa %xmm6, 32(%rdi)
- movdqa %xmm7, 48(%rdi)
- add $64, %rdi
-
- lea (%r9, %rdx), %rbx
+ lea (%rdi, %rdx), %rbx
and $-64, %rbx
-
- cmp %rdi, %rbx
+ cmp %r8, %rbx
jbe L(mm_copy_remaining_forward)
+ cmp $SHARED_CACHE_SIZE_HALF, %rdx
+ jae L(mm_large_page_loop_forward)
+
.p2align 4
L(mm_main_loop_forward):
- prefetcht0 128(%rdi, %rsi)
+ prefetcht0 128(%r8, %rsi)
- movdqu (%rdi, %rsi), %xmm0
- movdqu 16(%rdi, %rsi), %xmm1
- movdqu 32(%rdi, %rsi), %xmm2
- movdqu 48(%rdi, %rsi), %xmm3
- movdqa %xmm0, (%rdi)
- movdqa %xmm1, 16(%rdi)
- movdqa %xmm2, 32(%rdi)
- movdqa %xmm3, 48(%rdi)
- lea 64(%rdi), %rdi
- cmp %rdi, %rbx
+ movdqu (%r8, %rsi), %xmm0
+ movdqu 16(%r8, %rsi), %xmm1
+ movdqu 32(%r8, %rsi), %xmm2
+ movdqu 48(%r8, %rsi), %xmm3
+ movdqa %xmm0, (%r8)
+ movaps %xmm1, 16(%r8)
+ movaps %xmm2, 32(%r8)
+ movaps %xmm3, 48(%r8)
+ lea 64(%r8), %r8
+ cmp %r8, %rbx
ja L(mm_main_loop_forward)
L(mm_copy_remaining_forward):
- add %r9, %rdx
- sub %rdi, %rdx
+ add %rdi, %rdx
+ sub %r8, %rdx
/* We copied all up till %rdi position in the dst.
In %rdx now is how many bytes are left to copy.
Now we need to advance %r8. */
- lea (%rdi, %rsi), %r8
+ lea (%r8, %rsi), %r9
L(mm_remaining_0_64_bytes_forward):
cmp $32, %rdx
@@ -237,49 +231,49 @@
cmpb $2, %dl
.p2align 4,,1
ja L(mm_remaining_3_4_bytes_forward)
- movzbl -1(%r8,%rdx), %esi
- movzbl (%r8), %ebx
- movb %sil, -1(%rdi,%rdx)
- movb %bl, (%rdi)
+ movzbl -1(%r9,%rdx), %esi
+ movzbl (%r9), %ebx
+ movb %sil, -1(%r8,%rdx)
+ movb %bl, (%r8)
jmp L(mm_return)
L(mm_remaining_33_64_bytes_forward):
- movdqu (%r8), %xmm0
- movdqu 16(%r8), %xmm1
- movdqu -32(%r8, %rdx), %xmm2
- movdqu -16(%r8, %rdx), %xmm3
- movdqu %xmm0, (%rdi)
- movdqu %xmm1, 16(%rdi)
- movdqu %xmm2, -32(%rdi, %rdx)
- movdqu %xmm3, -16(%rdi, %rdx)
+ movdqu (%r9), %xmm0
+ movdqu 16(%r9), %xmm1
+ movdqu -32(%r9, %rdx), %xmm2
+ movdqu -16(%r9, %rdx), %xmm3
+ movdqu %xmm0, (%r8)
+ movdqu %xmm1, 16(%r8)
+ movdqu %xmm2, -32(%r8, %rdx)
+ movdqu %xmm3, -16(%r8, %rdx)
jmp L(mm_return)
L(mm_remaining_17_32_bytes_forward):
- movdqu (%r8), %xmm0
- movdqu -16(%r8, %rdx), %xmm1
- movdqu %xmm0, (%rdi)
- movdqu %xmm1, -16(%rdi, %rdx)
- jmp L(mm_return)
-
-L(mm_remaining_3_4_bytes_forward):
- movzwl -2(%r8,%rdx), %esi
- movzwl (%r8), %ebx
- movw %si, -2(%rdi,%rdx)
- movw %bx, (%rdi)
+ movdqu (%r9), %xmm0
+ movdqu -16(%r9, %rdx), %xmm1
+ movdqu %xmm0, (%r8)
+ movdqu %xmm1, -16(%r8, %rdx)
jmp L(mm_return)
L(mm_remaining_5_8_bytes_forward):
- movl (%r8), %esi
- movl -4(%r8,%rdx), %ebx
- movl %esi, (%rdi)
- movl %ebx, -4(%rdi,%rdx)
+ movl (%r9), %esi
+ movl -4(%r9,%rdx), %ebx
+ movl %esi, (%r8)
+ movl %ebx, -4(%r8,%rdx)
jmp L(mm_return)
L(mm_remaining_9_16_bytes_forward):
- mov (%r8), %rsi
- mov -8(%r8, %rdx), %rbx
- mov %rsi, (%rdi)
- mov %rbx, -8(%rdi, %rdx)
+ mov (%r9), %rsi
+ mov -8(%r9, %rdx), %rbx
+ mov %rsi, (%r8)
+ mov %rbx, -8(%r8, %rdx)
+ jmp L(mm_return)
+
+L(mm_remaining_3_4_bytes_forward):
+ movzwl -2(%r9,%rdx), %esi
+ movzwl (%r9), %ebx
+ movw %si, -2(%r8,%rdx)
+ movw %bx, (%r8)
jmp L(mm_return)
L(mm_len_0_16_bytes_forward):
@@ -321,16 +315,21 @@
mov %rsi, -8(%rdi, %rdx)
jmp L(mm_return)
+L(mm_recalc_len):
+/* Compute in %rdx how many bytes are left to copy after
+ the main loop stops. */
+ mov %rbx, %rdx
+ sub %rdi, %rdx
/* The code for copying backwards. */
L(mm_len_0_or_more_backward):
-/* Now do checks for lengths. We do [0..16], [0..32], [0..64], [0..128]
+/* Now do checks for lengths. We do [0..16], [16..32], [32..64], [64..128]
separately. */
cmp $16, %rdx
jbe L(mm_len_0_16_bytes_backward)
cmp $32, %rdx
- jg L(mm_len_32_or_more_backward)
+ ja L(mm_len_32_or_more_backward)
/* Copy [0..32] and return. */
movdqu (%rsi), %xmm0
@@ -341,7 +340,7 @@
L(mm_len_32_or_more_backward):
cmp $64, %rdx
- jg L(mm_len_64_or_more_backward)
+ ja L(mm_len_64_or_more_backward)
/* Copy [0..64] and return. */
movdqu (%rsi), %xmm0
@@ -356,7 +355,7 @@
L(mm_len_64_or_more_backward):
cmp $128, %rdx
- jg L(mm_len_128_or_more_backward)
+ ja L(mm_len_128_or_more_backward)
/* Copy [0..128] and return. */
movdqu (%rsi), %xmm0
@@ -378,10 +377,6 @@
jmp L(mm_return)
L(mm_len_128_or_more_backward):
-
- cmp $SHARED_CACHE_SIZE_HALF, %rdx
- jae L(mm_large_page_backward)
-
/* Aligning the address of destination. We need to save
16 bits from the source in order not to overwrite them. */
movdqu -16(%rsi, %rdx), %xmm0
@@ -405,22 +400,19 @@
movdqu %xmm2, -48(%rdi, %rdx)
movdqu %xmm3, -64(%rdi, %rdx)
movdqa %xmm4, -16(%r9)
- movdqa %xmm5, -32(%r9)
- movdqa %xmm6, -48(%r9)
- movdqa %xmm7, -64(%r9)
+ movaps %xmm5, -32(%r9)
+ movaps %xmm6, -48(%r9)
+ movaps %xmm7, -64(%r9)
lea -64(%r9), %r9
lea 64(%rdi), %rbx
and $-64, %rbx
-/* Compute in %rdx how many bytes are left to copy after
- the main loop stops. */
- mov %rbx, %rdx
- sub %rdi, %rdx
-
cmp %r9, %rbx
- jb L(mm_main_loop_backward)
- jmp L(mm_len_0_or_more_backward)
+ jae L(mm_recalc_len)
+
+ cmp $SHARED_CACHE_SIZE_HALF, %rdx
+ jae L(mm_large_page_loop_backward)
.p2align 4
L(mm_main_loop_backward):
@@ -432,13 +424,13 @@
movdqu -32(%r9, %r8), %xmm2
movdqu -16(%r9, %r8), %xmm3
movdqa %xmm0, -64(%r9)
- movdqa %xmm1, -48(%r9)
- movdqa %xmm2, -32(%r9)
- movdqa %xmm3, -16(%r9)
+ movaps %xmm1, -48(%r9)
+ movaps %xmm2, -32(%r9)
+ movaps %xmm3, -16(%r9)
lea -64(%r9), %r9
cmp %r9, %rbx
jb L(mm_main_loop_backward)
- jmp L(mm_len_0_or_more_backward)
+ jmp L(mm_recalc_len)
/* Copy [0..16] and return. */
L(mm_len_0_16_bytes_backward):
@@ -485,138 +477,23 @@
/* Big length copy forward part. */
-L(mm_large_page_forward):
-/* Aligning the address of destination. We need to save
- 16 bits from the source in order not to overwrite them. */
-
- mov %rsi, %r8
- mov %rdi, %r9
-
- movdqu (%rsi), %xmm0
- movdqu 16(%rsi), %xmm1
- movdqu 32(%rsi), %xmm2
- movdqu 48(%rsi), %xmm3
-
- lea 64(%r9), %rdi
- and $-64, %rdi /* rdi = aligned dst */
-
- sub %r9, %rsi /* rsi = diff */
-
- movdqu (%rdi, %rsi), %xmm4
- movdqu 16(%rdi, %rsi), %xmm5
- movdqu 32(%rdi, %rsi), %xmm6
- movdqu 48(%rdi, %rsi), %xmm7
-
- movdqu %xmm0, (%r9)
- movdqu %xmm1, 16(%r9)
- movdqu %xmm2, 32(%r9)
- movdqu %xmm3, 48(%r9)
- movntdq %xmm4, (%rdi)
- movntdq %xmm5, 16(%rdi)
- movntdq %xmm6, 32(%rdi)
- movntdq %xmm7, 48(%rdi)
- add $64, %rdi
-
- lea (%r9, %rdx), %rbx
- and $-128, %rbx
-
- cmp %rdi, %rbx
- jbe L(mm_copy_remaining_forward)
-
.p2align 4
L(mm_large_page_loop_forward):
- movdqu (%rdi, %rsi), %xmm0
- movdqu 16(%rdi, %rsi), %xmm1
- movdqu 32(%rdi, %rsi), %xmm2
- movdqu 48(%rdi, %rsi), %xmm3
- movdqu 64(%rdi, %rsi), %xmm4
- movdqu 80(%rdi, %rsi), %xmm5
- movdqu 96(%rdi, %rsi), %xmm6
- movdqu 112(%rdi, %rsi), %xmm7
- movntdq %xmm0, (%rdi)
- movntdq %xmm1, 16(%rdi)
- movntdq %xmm2, 32(%rdi)
- movntdq %xmm3, 48(%rdi)
- movntdq %xmm4, 64(%rdi)
- movntdq %xmm5, 80(%rdi)
- movntdq %xmm6, 96(%rdi)
- movntdq %xmm7, 112(%rdi)
- lea 128(%rdi), %rdi
- cmp %rdi, %rbx
+ movdqu (%r8, %rsi), %xmm0
+ movdqu 16(%r8, %rsi), %xmm1
+ movdqu 32(%r8, %rsi), %xmm2
+ movdqu 48(%r8, %rsi), %xmm3
+ movntdq %xmm0, (%r8)
+ movntdq %xmm1, 16(%r8)
+ movntdq %xmm2, 32(%r8)
+ movntdq %xmm3, 48(%r8)
+ lea 64(%r8), %r8
+ cmp %r8, %rbx
ja L(mm_large_page_loop_forward)
sfence
-
- add %r9, %rdx
- sub %rdi, %rdx
-/* We copied all up till %rdi position in the dst.
- In %rdx now is how many bytes are left to copy.
- Now we need to advance %r8. */
- lea (%rdi, %rsi), %r8
-
- cmp $64, %rdx
- jb L(mm_remaining_0_64_bytes_forward)
-
- movdqu (%r8), %xmm0
- movdqu 16(%r8), %xmm1
- movdqu 32(%r8), %xmm2
- movdqu 48(%r8), %xmm3
- movdqu -64(%r8, %rdx), %xmm4
- movdqu -48(%r8, %rdx), %xmm5
- movdqu -32(%r8, %rdx), %xmm6
- movdqu -16(%r8, %rdx), %xmm7
- movdqu %xmm0, (%rdi)
- movdqu %xmm1, 16(%rdi)
- movdqu %xmm2, 32(%rdi)
- movdqu %xmm3, 48(%rdi)
- movdqu %xmm4, -64(%rdi, %rdx)
- movdqu %xmm5, -48(%rdi, %rdx)
- movdqu %xmm6, -32(%rdi, %rdx)
- movdqu %xmm7, -16(%rdi, %rdx)
- jmp L(mm_return)
-
+ jmp L(mm_copy_remaining_forward)
/* Big length copy backward part. */
-L(mm_large_page_backward):
-/* Aligning the address of destination. We need to save
- 16 bits from the source in order not to overwrite them. */
-
- movdqu -16(%rsi, %rdx), %xmm0
- movdqu -32(%rsi, %rdx), %xmm1
- movdqu -48(%rsi, %rdx), %xmm2
- movdqu -64(%rsi, %rdx), %xmm3
-
- lea (%rdi, %rdx), %r9
- and $-64, %r9
-
- mov %rsi, %r8
- sub %rdi, %r8
-
- movdqu -16(%r9, %r8), %xmm4
- movdqu -32(%r9, %r8), %xmm5
- movdqu -48(%r9, %r8), %xmm6
- movdqu -64(%r9, %r8), %xmm7
-
- movdqu %xmm0, -16(%rdi, %rdx)
- movdqu %xmm1, -32(%rdi, %rdx)
- movdqu %xmm2, -48(%rdi, %rdx)
- movdqu %xmm3, -64(%rdi, %rdx)
- movntdq %xmm4, -16(%r9)
- movntdq %xmm5, -32(%r9)
- movntdq %xmm6, -48(%r9)
- movntdq %xmm7, -64(%r9)
- lea -64(%r9), %r9
-
- lea 128(%rdi), %rbx
- and $-64, %rbx
-
-/* Compute in %rdx how many bytes are left to copy after
- the main loop stops. */
- mov %rbx, %rdx
- sub %rdi, %rdx
-
- cmp %r9, %rbx
- jae L(mm_len_0_or_more_backward)
-
.p2align 4
L(mm_large_page_loop_backward):
movdqu -64(%r9, %r8), %xmm0
@@ -630,6 +507,7 @@
lea -64(%r9), %r9
cmp %r9, %rbx
jb L(mm_large_page_loop_backward)
- jmp L(mm_len_0_or_more_backward)
+ sfence
+ jmp L(mm_recalc_len)
END (MEMMOVE)
diff --git a/tests/string_test.cpp b/tests/string_test.cpp
index f17e575..c62f43b 100644
--- a/tests/string_test.cpp
+++ b/tests/string_test.cpp
@@ -909,6 +909,35 @@
}
}
+TEST(string, memmove_cache_size) {
+ size_t len = 600000;
+ int max_alignment = 31;
+ int alignments[] = {0, 5, 11, 29, 30};
+ char* ptr = reinterpret_cast<char*>(malloc(sizeof(char) * len));
+ char* ptr1 = reinterpret_cast<char*>(malloc(2 * sizeof(char) * len));
+ char* glob_ptr2 = reinterpret_cast<char*>(malloc(2 * sizeof(char) * len + max_alignment));
+ size_t pos = 64;
+
+ ASSERT_TRUE(ptr != NULL);
+ ASSERT_TRUE(ptr1 != NULL);
+ ASSERT_TRUE(glob_ptr2 != NULL);
+
+ for (int i = 0; i < 5; i++) {
+ char* ptr2 = glob_ptr2 + alignments[i];
+ memset(ptr1, random() & 255, 2 * len);
+ memset(ptr1, random() & 255, len);
+ memcpy(ptr2, ptr1, 2 * len);
+ memcpy(ptr, ptr1, len);
+ memcpy(ptr1 + pos, ptr, len);
+
+ ASSERT_TRUE(memmove(ptr2 + pos, ptr, len) == ptr2 + pos);
+ ASSERT_EQ(0, memcmp(ptr2, ptr1, 2 * len));
+ }
+ free(ptr);
+ free(ptr1);
+ free(glob_ptr2);
+}
+
static void verify_memmove(char* src_copy, char* dst, char* src, size_t size) {
memset(dst, 0, size);
memcpy(src, src_copy, size);