blob: 3d08ecdaf05add8e2a9dc941f004cabd05eda682 [file] [log] [blame]
Bernhard Rosenkraenzer7e4fa562014-03-05 11:40:57 +01001/* Copyright (c) 2014, Linaro Limited
2 All rights reserved.
3
4 Redistribution and use in source and binary forms, with or without
5 modification, are permitted provided that the following conditions are met:
6 * Redistributions of source code must retain the above copyright
7 notice, this list of conditions and the following disclaimer.
8 * Redistributions in binary form must reproduce the above copyright
9 notice, this list of conditions and the following disclaimer in the
10 documentation and/or other materials provided with the distribution.
11 * Neither the name of the Linaro nor the
12 names of its contributors may be used to endorse or promote products
13 derived from this software without specific prior written permission.
14
15 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
18 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19 HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
21 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26*/
27
28/* Assumptions:
29 *
30 * ARMv8-a, AArch64
31 */
32
33#include <private/bionic_asm.h>
34
35/* Parameters and result. */
36#define src1 x0
37#define src2 x1
38#define limit x2
39#define result x0
40
41/* Internal variables. */
42#define data1 x3
43#define data1w w3
44#define data2 x4
45#define data2w w4
46#define has_nul x5
47#define diff x6
48#define endloop x7
49#define tmp1 x8
50#define tmp2 x9
51#define tmp3 x10
52#define pos x11
53#define limit_wd x12
54#define mask x13
55
56ENTRY(memcmp)
57 cbz limit, .Lret0
58 eor tmp1, src1, src2
59 tst tmp1, #7
60 b.ne .Lmisaligned8
61 ands tmp1, src1, #7
62 b.ne .Lmutual_align
63 add limit_wd, limit, #7
64 lsr limit_wd, limit_wd, #3
65 /* Start of performance-critical section -- one 64B cache line. */
66.Lloop_aligned:
67 ldr data1, [src1], #8
68 ldr data2, [src2], #8
69.Lstart_realigned:
70 subs limit_wd, limit_wd, #1
71 eor diff, data1, data2 /* Non-zero if differences found. */
72 csinv endloop, diff, xzr, ne /* Last Dword or differences. */
73 cbz endloop, .Lloop_aligned
74 /* End of performance-critical section -- one 64B cache line. */
75
76 /* Not reached the limit, must have found a diff. */
77 cbnz limit_wd, .Lnot_limit
78
79 /* Limit % 8 == 0 => all bytes significant. */
80 ands limit, limit, #7
81 b.eq .Lnot_limit
82
83 lsl limit, limit, #3 /* Bits -> bytes. */
84 mov mask, #~0
85#ifdef __AARCH64EB__
86 lsr mask, mask, limit
87#else
88 lsl mask, mask, limit
89#endif
90 bic data1, data1, mask
91 bic data2, data2, mask
92
93 orr diff, diff, mask
94.Lnot_limit:
95
96#ifndef __AARCH64EB__
97 rev diff, diff
98 rev data1, data1
99 rev data2, data2
100#endif
101 /* The MS-non-zero bit of DIFF marks either the first bit
102 that is different, or the end of the significant data.
103 Shifting left now will bring the critical information into the
104 top bits. */
105 clz pos, diff
106 lsl data1, data1, pos
107 lsl data2, data2, pos
108 /* But we need to zero-extend (char is unsigned) the value and then
109 perform a signed 32-bit subtraction. */
110 lsr data1, data1, #56
111 sub result, data1, data2, lsr #56
112 ret
113
114.Lmutual_align:
115 /* Sources are mutually aligned, but are not currently at an
116 alignment boundary. Round down the addresses and then mask off
117 the bytes that precede the start point. */
118 bic src1, src1, #7
119 bic src2, src2, #7
120 add limit, limit, tmp1 /* Adjust the limit for the extra. */
121 lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. */
122 ldr data1, [src1], #8
123 neg tmp1, tmp1 /* Bits to alignment -64. */
124 ldr data2, [src2], #8
125 mov tmp2, #~0
126#ifdef __AARCH64EB__
127 /* Big-endian. Early bytes are at MSB. */
128 lsl tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */
129#else
130 /* Little-endian. Early bytes are at LSB. */
131 lsr tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */
132#endif
133 add limit_wd, limit, #7
134 orr data1, data1, tmp2
135 orr data2, data2, tmp2
136 lsr limit_wd, limit_wd, #3
137 b .Lstart_realigned
138
139.Lret0:
140 mov result, #0
141 ret
142
143 .p2align 6
144.Lmisaligned8:
145 sub limit, limit, #1
1461:
147 /* Perhaps we can do better than this. */
148 ldrb data1w, [src1], #1
149 ldrb data2w, [src2], #1
150 subs limit, limit, #1
151 ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
152 b.eq 1b
153 sub result, data1, data2
154 ret
155END(memcmp)