blob: 97331d33b21a05a0d0b8d56aebaee5e69f97cdd4 [file] [log] [blame]
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <machine/cpu-features.h>
30
David 'Digit' Turner1bbc56c2009-08-26 21:50:42 +020031#if __ARM_ARCH__ == 7 || defined(__ARM_NEON__)
32
33 .text
34 .fpu neon
35
36 .global memcpy
37 .type memcpy, %function
38 .align 4
39
40#define NEON_MAX_PREFETCH_DISTANCE 320
41
42memcpy:
43 mov ip, r0
44 cmp r2, #16
45 blt 4f @ Have less than 16 bytes to copy
46
47 @ First ensure 16 byte alignment for the destination buffer
48 tst r0, #0xF
49 beq 2f
50 tst r0, #1
51 ldrneb r3, [r1], #1
52 strneb r3, [ip], #1
53 subne r2, r2, #1
54 tst ip, #2
55 ldrneb r3, [r1], #1
56 strneb r3, [ip], #1
57 ldrneb r3, [r1], #1
58 strneb r3, [ip], #1
59 subne r2, r2, #2
60
61 tst ip, #4
62 beq 1f
63 vld4.8 {d0[0], d1[0], d2[0], d3[0]}, [r1]!
64 vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [ip, :32]!
65 sub r2, r2, #4
661:
67 tst ip, #8
68 beq 2f
69 vld1.8 {d0}, [r1]!
70 vst1.8 {d0}, [ip, :64]!
71 sub r2, r2, #8
722:
73 subs r2, r2, #32
74 blt 3f
75 mov r3, #32
76
77 @ Main copy loop, 32 bytes are processed per iteration.
78 @ ARM instructions are used for doing fine-grained prefetch,
79 @ increasing prefetch distance progressively up to
80 @ NEON_MAX_PREFETCH_DISTANCE at runtime
811:
82 vld1.8 {d0-d3}, [r1]!
83 cmp r3, #(NEON_MAX_PREFETCH_DISTANCE - 32)
84 pld [r1, r3]
85 addle r3, r3, #32
86 vst1.8 {d0-d3}, [ip, :128]!
87 sub r2, r2, #32
88 cmp r2, r3
89 bge 1b
90 cmp r2, #0
91 blt 3f
921: @ Copy the remaining part of the buffer (already prefetched)
93 vld1.8 {d0-d3}, [r1]!
94 subs r2, r2, #32
95 vst1.8 {d0-d3}, [ip, :128]!
96 bge 1b
973: @ Copy up to 31 remaining bytes
98 tst r2, #16
99 beq 4f
100 vld1.8 {d0, d1}, [r1]!
101 vst1.8 {d0, d1}, [ip, :128]!
1024:
103 @ Use ARM instructions exclusively for the final trailing part
104 @ not fully fitting into full 16 byte aligned block in order
105 @ to avoid "ARM store after NEON store" hazard. Also NEON
106 @ pipeline will be (mostly) flushed by the time when the
107 @ control returns to the caller, making the use of NEON mostly
108 @ transparent (and avoiding hazards in the caller code)
109
110 movs r3, r2, lsl #29
111 bcc 1f
112 .rept 8
113 ldrcsb r3, [r1], #1
114 strcsb r3, [ip], #1
115 .endr
1161:
117 bpl 1f
118 .rept 4
119 ldrmib r3, [r1], #1
120 strmib r3, [ip], #1
121 .endr
1221:
123 movs r2, r2, lsl #31
124 ldrcsb r3, [r1], #1
125 strcsb r3, [ip], #1
126 ldrcsb r3, [r1], #1
127 strcsb r3, [ip], #1
128 ldrmib r3, [r1], #1
129 strmib r3, [ip], #1
130 bx lr
131
132#else /* __ARM_ARCH__ < 7 */
133
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800134 .text
135
136 .global memcpy
137 .type memcpy, %function
138 .align 4
139
140 /*
141 * Optimized memcpy() for ARM.
142 *
143 * note that memcpy() always returns the destination pointer,
144 * so we have to preserve R0.
145 */
146
147memcpy:
148 /* The stack must always be 64-bits aligned to be compliant with the
149 * ARM ABI. Since we have to save R0, we might as well save R4
150 * which we can use for better pipelining of the reads below
151 */
152 .fnstart
153 .save {r0, r4, lr}
154 stmfd sp!, {r0, r4, lr}
155 /* Making room for r5-r11 which will be spilled later */
156 .pad #28
157 sub sp, sp, #28
158
159 // preload the destination because we'll align it to a cache line
160 // with small writes. Also start the source "pump".
161 PLD (r0, #0)
162 PLD (r1, #0)
163 PLD (r1, #32)
164
165 /* it simplifies things to take care of len<4 early */
166 cmp r2, #4
167 blo copy_last_3_and_return
168
169 /* compute the offset to align the source
170 * offset = (4-(src&3))&3 = -src & 3
171 */
172 rsb r3, r1, #0
173 ands r3, r3, #3
174 beq src_aligned
175
176 /* align source to 32 bits. We need to insert 2 instructions between
177 * a ldr[b|h] and str[b|h] because byte and half-word instructions
178 * stall 2 cycles.
179 */
180 movs r12, r3, lsl #31
181 sub r2, r2, r3 /* we know that r3 <= r2 because r2 >= 4 */
182 ldrmib r3, [r1], #1
183 ldrcsb r4, [r1], #1
184 ldrcsb r12,[r1], #1
185 strmib r3, [r0], #1
186 strcsb r4, [r0], #1
187 strcsb r12,[r0], #1
188
189src_aligned:
190
191 /* see if src and dst are aligned together (congruent) */
192 eor r12, r0, r1
193 tst r12, #3
194 bne non_congruent
195
196 /* Use post-incriment mode for stm to spill r5-r11 to reserved stack
197 * frame. Don't update sp.
198 */
199 stmea sp, {r5-r11}
200
201 /* align the destination to a cache-line */
202 rsb r3, r0, #0
203 ands r3, r3, #0x1C
204 beq congruent_aligned32
205 cmp r3, r2
206 andhi r3, r2, #0x1C
207
208 /* conditionnaly copies 0 to 7 words (length in r3) */
209 movs r12, r3, lsl #28
210 ldmcsia r1!, {r4, r5, r6, r7} /* 16 bytes */
211 ldmmiia r1!, {r8, r9} /* 8 bytes */
212 stmcsia r0!, {r4, r5, r6, r7}
213 stmmiia r0!, {r8, r9}
214 tst r3, #0x4
215 ldrne r10,[r1], #4 /* 4 bytes */
216 strne r10,[r0], #4
217 sub r2, r2, r3
218
219congruent_aligned32:
220 /*
221 * here source is aligned to 32 bytes.
222 */
223
224cached_aligned32:
225 subs r2, r2, #32
226 blo less_than_32_left
227
228 /*
229 * We preload a cache-line up to 64 bytes ahead. On the 926, this will
230 * stall only until the requested world is fetched, but the linefill
231 * continues in the the background.
232 * While the linefill is going, we write our previous cache-line
233 * into the write-buffer (which should have some free space).
234 * When the linefill is done, the writebuffer will
235 * start dumping its content into memory
236 *
237 * While all this is going, we then load a full cache line into
238 * 8 registers, this cache line should be in the cache by now
239 * (or partly in the cache).
240 *
241 * This code should work well regardless of the source/dest alignment.
242 *
243 */
244
245 // Align the preload register to a cache-line because the cpu does
246 // "critical word first" (the first word requested is loaded first).
247 bic r12, r1, #0x1F
248 add r12, r12, #64
249
2501: ldmia r1!, { r4-r11 }
251 PLD (r12, #64)
252 subs r2, r2, #32
253
254 // NOTE: if r12 is more than 64 ahead of r1, the following ldrhi
255 // for ARM9 preload will not be safely guarded by the preceding subs.
256 // When it is safely guarded the only possibility to have SIGSEGV here
257 // is because the caller overstates the length.
258 ldrhi r3, [r12], #32 /* cheap ARM9 preload */
259 stmia r0!, { r4-r11 }
260 bhs 1b
261
262 add r2, r2, #32
263
264
265
266
267less_than_32_left:
268 /*
269 * less than 32 bytes left at this point (length in r2)
270 */
271
272 /* skip all this if there is nothing to do, which should
273 * be a common case (if not executed the code below takes
274 * about 16 cycles)
275 */
276 tst r2, #0x1F
277 beq 1f
278
279 /* conditionnaly copies 0 to 31 bytes */
280 movs r12, r2, lsl #28
281 ldmcsia r1!, {r4, r5, r6, r7} /* 16 bytes */
282 ldmmiia r1!, {r8, r9} /* 8 bytes */
283 stmcsia r0!, {r4, r5, r6, r7}
284 stmmiia r0!, {r8, r9}
285 movs r12, r2, lsl #30
286 ldrcs r3, [r1], #4 /* 4 bytes */
287 ldrmih r4, [r1], #2 /* 2 bytes */
288 strcs r3, [r0], #4
289 strmih r4, [r0], #2
290 tst r2, #0x1
291 ldrneb r3, [r1] /* last byte */
292 strneb r3, [r0]
293
294 /* we're done! restore everything and return */
2951: ldmfd sp!, {r5-r11}
296 ldmfd sp!, {r0, r4, lr}
297 bx lr
298
299 /********************************************************************/
300
301non_congruent:
302 /*
303 * here source is aligned to 4 bytes
304 * but destination is not.
305 *
306 * in the code below r2 is the number of bytes read
307 * (the number of bytes written is always smaller, because we have
308 * partial words in the shift queue)
309 */
310 cmp r2, #4
311 blo copy_last_3_and_return
312
313 /* Use post-incriment mode for stm to spill r5-r11 to reserved stack
314 * frame. Don't update sp.
315 */
316 stmea sp, {r5-r11}
317
318 /* compute shifts needed to align src to dest */
319 rsb r5, r0, #0
320 and r5, r5, #3 /* r5 = # bytes in partial words */
321 mov r12, r5, lsl #3 /* r12 = right */
322 rsb lr, r12, #32 /* lr = left */
323
324 /* read the first word */
325 ldr r3, [r1], #4
326 sub r2, r2, #4
327
328 /* write a partial word (0 to 3 bytes), such that destination
329 * becomes aligned to 32 bits (r5 = nb of words to copy for alignment)
330 */
331 movs r5, r5, lsl #31
332 strmib r3, [r0], #1
333 movmi r3, r3, lsr #8
334 strcsb r3, [r0], #1
335 movcs r3, r3, lsr #8
336 strcsb r3, [r0], #1
337 movcs r3, r3, lsr #8
338
339 cmp r2, #4
340 blo partial_word_tail
341
342 /* Align destination to 32 bytes (cache line boundary) */
3431: tst r0, #0x1c
344 beq 2f
345 ldr r5, [r1], #4
346 sub r2, r2, #4
347 orr r4, r3, r5, lsl lr
348 mov r3, r5, lsr r12
349 str r4, [r0], #4
350 cmp r2, #4
351 bhs 1b
352 blo partial_word_tail
353
354 /* copy 32 bytes at a time */
3552: subs r2, r2, #32
356 blo less_than_thirtytwo
357
358 /* Use immediate mode for the shifts, because there is an extra cycle
359 * for register shifts, which could account for up to 50% of
360 * performance hit.
361 */
362
363 cmp r12, #24
364 beq loop24
365 cmp r12, #8
366 beq loop8
367
368loop16:
369 ldr r12, [r1], #4
3701: mov r4, r12
371 ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
372 PLD (r1, #64)
373 subs r2, r2, #32
374 ldrhs r12, [r1], #4
375 orr r3, r3, r4, lsl #16
376 mov r4, r4, lsr #16
377 orr r4, r4, r5, lsl #16
378 mov r5, r5, lsr #16
379 orr r5, r5, r6, lsl #16
380 mov r6, r6, lsr #16
381 orr r6, r6, r7, lsl #16
382 mov r7, r7, lsr #16
383 orr r7, r7, r8, lsl #16
384 mov r8, r8, lsr #16
385 orr r8, r8, r9, lsl #16
386 mov r9, r9, lsr #16
387 orr r9, r9, r10, lsl #16
388 mov r10, r10, lsr #16
389 orr r10, r10, r11, lsl #16
390 stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
391 mov r3, r11, lsr #16
392 bhs 1b
393 b less_than_thirtytwo
394
395loop8:
396 ldr r12, [r1], #4
3971: mov r4, r12
398 ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
399 PLD (r1, #64)
400 subs r2, r2, #32
401 ldrhs r12, [r1], #4
402 orr r3, r3, r4, lsl #24
403 mov r4, r4, lsr #8
404 orr r4, r4, r5, lsl #24
405 mov r5, r5, lsr #8
406 orr r5, r5, r6, lsl #24
407 mov r6, r6, lsr #8
408 orr r6, r6, r7, lsl #24
409 mov r7, r7, lsr #8
410 orr r7, r7, r8, lsl #24
411 mov r8, r8, lsr #8
412 orr r8, r8, r9, lsl #24
413 mov r9, r9, lsr #8
414 orr r9, r9, r10, lsl #24
415 mov r10, r10, lsr #8
416 orr r10, r10, r11, lsl #24
417 stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
418 mov r3, r11, lsr #8
419 bhs 1b
420 b less_than_thirtytwo
421
422loop24:
423 ldr r12, [r1], #4
4241: mov r4, r12
425 ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
426 PLD (r1, #64)
427 subs r2, r2, #32
428 ldrhs r12, [r1], #4
429 orr r3, r3, r4, lsl #8
430 mov r4, r4, lsr #24
431 orr r4, r4, r5, lsl #8
432 mov r5, r5, lsr #24
433 orr r5, r5, r6, lsl #8
434 mov r6, r6, lsr #24
435 orr r6, r6, r7, lsl #8
436 mov r7, r7, lsr #24
437 orr r7, r7, r8, lsl #8
438 mov r8, r8, lsr #24
439 orr r8, r8, r9, lsl #8
440 mov r9, r9, lsr #24
441 orr r9, r9, r10, lsl #8
442 mov r10, r10, lsr #24
443 orr r10, r10, r11, lsl #8
444 stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
445 mov r3, r11, lsr #24
446 bhs 1b
447
448
449less_than_thirtytwo:
450 /* copy the last 0 to 31 bytes of the source */
451 rsb r12, lr, #32 /* we corrupted r12, recompute it */
452 add r2, r2, #32
453 cmp r2, #4
454 blo partial_word_tail
455
4561: ldr r5, [r1], #4
457 sub r2, r2, #4
458 orr r4, r3, r5, lsl lr
459 mov r3, r5, lsr r12
460 str r4, [r0], #4
461 cmp r2, #4
462 bhs 1b
463
464partial_word_tail:
465 /* we have a partial word in the input buffer */
466 movs r5, lr, lsl #(31-3)
467 strmib r3, [r0], #1
468 movmi r3, r3, lsr #8
469 strcsb r3, [r0], #1
470 movcs r3, r3, lsr #8
471 strcsb r3, [r0], #1
472
473 /* Refill spilled registers from the stack. Don't update sp. */
474 ldmfd sp, {r5-r11}
475
476copy_last_3_and_return:
477 movs r2, r2, lsl #31 /* copy remaining 0, 1, 2 or 3 bytes */
478 ldrmib r2, [r1], #1
479 ldrcsb r3, [r1], #1
480 ldrcsb r12,[r1]
481 strmib r2, [r0], #1
482 strcsb r3, [r0], #1
483 strcsb r12,[r0]
484
485 /* we're done! restore sp and spilled registers and return */
486 add sp, sp, #28
487 ldmfd sp!, {r0, r4, lr}
488 bx lr
489 .fnend
490
David 'Digit' Turner1bbc56c2009-08-26 21:50:42 +0200491#endif
492