blob: 4ea2c6d01e59b1b40bc4303e9c9700f1705b43cd [file] [log] [blame]
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <machine/cpu-features.h>
30
David 'Digit' Turner1bbc56c2009-08-26 21:50:42 +020031#if __ARM_ARCH__ == 7 || defined(__ARM_NEON__)
32
33 .text
34 .fpu neon
35
36 .global memcpy
37 .type memcpy, %function
38 .align 4
39
40#define NEON_MAX_PREFETCH_DISTANCE 320
41
42memcpy:
Ben Chengbd192b42009-09-15 13:41:14 -070043 .fnstart
David 'Digit' Turner1bbc56c2009-08-26 21:50:42 +020044 mov ip, r0
45 cmp r2, #16
46 blt 4f @ Have less than 16 bytes to copy
47
48 @ First ensure 16 byte alignment for the destination buffer
49 tst r0, #0xF
50 beq 2f
51 tst r0, #1
52 ldrneb r3, [r1], #1
53 strneb r3, [ip], #1
54 subne r2, r2, #1
55 tst ip, #2
56 ldrneb r3, [r1], #1
57 strneb r3, [ip], #1
58 ldrneb r3, [r1], #1
59 strneb r3, [ip], #1
60 subne r2, r2, #2
61
62 tst ip, #4
63 beq 1f
64 vld4.8 {d0[0], d1[0], d2[0], d3[0]}, [r1]!
65 vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [ip, :32]!
66 sub r2, r2, #4
671:
68 tst ip, #8
69 beq 2f
70 vld1.8 {d0}, [r1]!
71 vst1.8 {d0}, [ip, :64]!
72 sub r2, r2, #8
732:
74 subs r2, r2, #32
75 blt 3f
76 mov r3, #32
77
78 @ Main copy loop, 32 bytes are processed per iteration.
79 @ ARM instructions are used for doing fine-grained prefetch,
80 @ increasing prefetch distance progressively up to
81 @ NEON_MAX_PREFETCH_DISTANCE at runtime
821:
83 vld1.8 {d0-d3}, [r1]!
84 cmp r3, #(NEON_MAX_PREFETCH_DISTANCE - 32)
85 pld [r1, r3]
86 addle r3, r3, #32
87 vst1.8 {d0-d3}, [ip, :128]!
88 sub r2, r2, #32
89 cmp r2, r3
90 bge 1b
91 cmp r2, #0
92 blt 3f
931: @ Copy the remaining part of the buffer (already prefetched)
94 vld1.8 {d0-d3}, [r1]!
95 subs r2, r2, #32
96 vst1.8 {d0-d3}, [ip, :128]!
97 bge 1b
983: @ Copy up to 31 remaining bytes
99 tst r2, #16
100 beq 4f
101 vld1.8 {d0, d1}, [r1]!
102 vst1.8 {d0, d1}, [ip, :128]!
1034:
104 @ Use ARM instructions exclusively for the final trailing part
105 @ not fully fitting into full 16 byte aligned block in order
106 @ to avoid "ARM store after NEON store" hazard. Also NEON
107 @ pipeline will be (mostly) flushed by the time when the
108 @ control returns to the caller, making the use of NEON mostly
109 @ transparent (and avoiding hazards in the caller code)
110
111 movs r3, r2, lsl #29
112 bcc 1f
113 .rept 8
114 ldrcsb r3, [r1], #1
115 strcsb r3, [ip], #1
116 .endr
1171:
118 bpl 1f
119 .rept 4
120 ldrmib r3, [r1], #1
121 strmib r3, [ip], #1
122 .endr
1231:
124 movs r2, r2, lsl #31
125 ldrcsb r3, [r1], #1
126 strcsb r3, [ip], #1
127 ldrcsb r3, [r1], #1
128 strcsb r3, [ip], #1
129 ldrmib r3, [r1], #1
130 strmib r3, [ip], #1
131 bx lr
Ben Chengbd192b42009-09-15 13:41:14 -0700132 .fnend
David 'Digit' Turner1bbc56c2009-08-26 21:50:42 +0200133
134#else /* __ARM_ARCH__ < 7 */
135
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800136 .text
137
138 .global memcpy
139 .type memcpy, %function
140 .align 4
141
142 /*
143 * Optimized memcpy() for ARM.
144 *
145 * note that memcpy() always returns the destination pointer,
146 * so we have to preserve R0.
147 */
148
149memcpy:
150 /* The stack must always be 64-bits aligned to be compliant with the
151 * ARM ABI. Since we have to save R0, we might as well save R4
152 * which we can use for better pipelining of the reads below
153 */
154 .fnstart
155 .save {r0, r4, lr}
156 stmfd sp!, {r0, r4, lr}
157 /* Making room for r5-r11 which will be spilled later */
158 .pad #28
159 sub sp, sp, #28
160
161 // preload the destination because we'll align it to a cache line
162 // with small writes. Also start the source "pump".
163 PLD (r0, #0)
164 PLD (r1, #0)
165 PLD (r1, #32)
166
167 /* it simplifies things to take care of len<4 early */
168 cmp r2, #4
169 blo copy_last_3_and_return
170
171 /* compute the offset to align the source
172 * offset = (4-(src&3))&3 = -src & 3
173 */
174 rsb r3, r1, #0
175 ands r3, r3, #3
176 beq src_aligned
177
178 /* align source to 32 bits. We need to insert 2 instructions between
179 * a ldr[b|h] and str[b|h] because byte and half-word instructions
180 * stall 2 cycles.
181 */
182 movs r12, r3, lsl #31
183 sub r2, r2, r3 /* we know that r3 <= r2 because r2 >= 4 */
184 ldrmib r3, [r1], #1
185 ldrcsb r4, [r1], #1
186 ldrcsb r12,[r1], #1
187 strmib r3, [r0], #1
188 strcsb r4, [r0], #1
189 strcsb r12,[r0], #1
190
191src_aligned:
192
193 /* see if src and dst are aligned together (congruent) */
194 eor r12, r0, r1
195 tst r12, #3
196 bne non_congruent
197
198 /* Use post-incriment mode for stm to spill r5-r11 to reserved stack
199 * frame. Don't update sp.
200 */
201 stmea sp, {r5-r11}
202
203 /* align the destination to a cache-line */
204 rsb r3, r0, #0
205 ands r3, r3, #0x1C
206 beq congruent_aligned32
207 cmp r3, r2
208 andhi r3, r2, #0x1C
209
210 /* conditionnaly copies 0 to 7 words (length in r3) */
211 movs r12, r3, lsl #28
212 ldmcsia r1!, {r4, r5, r6, r7} /* 16 bytes */
213 ldmmiia r1!, {r8, r9} /* 8 bytes */
214 stmcsia r0!, {r4, r5, r6, r7}
215 stmmiia r0!, {r8, r9}
216 tst r3, #0x4
217 ldrne r10,[r1], #4 /* 4 bytes */
218 strne r10,[r0], #4
219 sub r2, r2, r3
220
221congruent_aligned32:
222 /*
223 * here source is aligned to 32 bytes.
224 */
225
226cached_aligned32:
227 subs r2, r2, #32
228 blo less_than_32_left
229
230 /*
231 * We preload a cache-line up to 64 bytes ahead. On the 926, this will
232 * stall only until the requested world is fetched, but the linefill
233 * continues in the the background.
234 * While the linefill is going, we write our previous cache-line
235 * into the write-buffer (which should have some free space).
236 * When the linefill is done, the writebuffer will
237 * start dumping its content into memory
238 *
239 * While all this is going, we then load a full cache line into
240 * 8 registers, this cache line should be in the cache by now
241 * (or partly in the cache).
242 *
243 * This code should work well regardless of the source/dest alignment.
244 *
245 */
246
247 // Align the preload register to a cache-line because the cpu does
248 // "critical word first" (the first word requested is loaded first).
249 bic r12, r1, #0x1F
250 add r12, r12, #64
251
2521: ldmia r1!, { r4-r11 }
253 PLD (r12, #64)
254 subs r2, r2, #32
255
256 // NOTE: if r12 is more than 64 ahead of r1, the following ldrhi
257 // for ARM9 preload will not be safely guarded by the preceding subs.
258 // When it is safely guarded the only possibility to have SIGSEGV here
259 // is because the caller overstates the length.
260 ldrhi r3, [r12], #32 /* cheap ARM9 preload */
261 stmia r0!, { r4-r11 }
262 bhs 1b
263
264 add r2, r2, #32
265
266
267
268
269less_than_32_left:
270 /*
271 * less than 32 bytes left at this point (length in r2)
272 */
273
274 /* skip all this if there is nothing to do, which should
275 * be a common case (if not executed the code below takes
276 * about 16 cycles)
277 */
278 tst r2, #0x1F
279 beq 1f
280
281 /* conditionnaly copies 0 to 31 bytes */
282 movs r12, r2, lsl #28
283 ldmcsia r1!, {r4, r5, r6, r7} /* 16 bytes */
284 ldmmiia r1!, {r8, r9} /* 8 bytes */
285 stmcsia r0!, {r4, r5, r6, r7}
286 stmmiia r0!, {r8, r9}
287 movs r12, r2, lsl #30
288 ldrcs r3, [r1], #4 /* 4 bytes */
289 ldrmih r4, [r1], #2 /* 2 bytes */
290 strcs r3, [r0], #4
291 strmih r4, [r0], #2
292 tst r2, #0x1
293 ldrneb r3, [r1] /* last byte */
294 strneb r3, [r0]
295
296 /* we're done! restore everything and return */
2971: ldmfd sp!, {r5-r11}
298 ldmfd sp!, {r0, r4, lr}
299 bx lr
300
301 /********************************************************************/
302
303non_congruent:
304 /*
305 * here source is aligned to 4 bytes
306 * but destination is not.
307 *
308 * in the code below r2 is the number of bytes read
309 * (the number of bytes written is always smaller, because we have
310 * partial words in the shift queue)
311 */
312 cmp r2, #4
313 blo copy_last_3_and_return
314
315 /* Use post-incriment mode for stm to spill r5-r11 to reserved stack
316 * frame. Don't update sp.
317 */
318 stmea sp, {r5-r11}
319
320 /* compute shifts needed to align src to dest */
321 rsb r5, r0, #0
322 and r5, r5, #3 /* r5 = # bytes in partial words */
323 mov r12, r5, lsl #3 /* r12 = right */
324 rsb lr, r12, #32 /* lr = left */
325
326 /* read the first word */
327 ldr r3, [r1], #4
328 sub r2, r2, #4
329
330 /* write a partial word (0 to 3 bytes), such that destination
331 * becomes aligned to 32 bits (r5 = nb of words to copy for alignment)
332 */
333 movs r5, r5, lsl #31
334 strmib r3, [r0], #1
335 movmi r3, r3, lsr #8
336 strcsb r3, [r0], #1
337 movcs r3, r3, lsr #8
338 strcsb r3, [r0], #1
339 movcs r3, r3, lsr #8
340
341 cmp r2, #4
342 blo partial_word_tail
343
344 /* Align destination to 32 bytes (cache line boundary) */
3451: tst r0, #0x1c
346 beq 2f
347 ldr r5, [r1], #4
348 sub r2, r2, #4
349 orr r4, r3, r5, lsl lr
350 mov r3, r5, lsr r12
351 str r4, [r0], #4
352 cmp r2, #4
353 bhs 1b
354 blo partial_word_tail
355
356 /* copy 32 bytes at a time */
3572: subs r2, r2, #32
358 blo less_than_thirtytwo
359
360 /* Use immediate mode for the shifts, because there is an extra cycle
361 * for register shifts, which could account for up to 50% of
362 * performance hit.
363 */
364
365 cmp r12, #24
366 beq loop24
367 cmp r12, #8
368 beq loop8
369
370loop16:
371 ldr r12, [r1], #4
3721: mov r4, r12
373 ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
374 PLD (r1, #64)
375 subs r2, r2, #32
376 ldrhs r12, [r1], #4
377 orr r3, r3, r4, lsl #16
378 mov r4, r4, lsr #16
379 orr r4, r4, r5, lsl #16
380 mov r5, r5, lsr #16
381 orr r5, r5, r6, lsl #16
382 mov r6, r6, lsr #16
383 orr r6, r6, r7, lsl #16
384 mov r7, r7, lsr #16
385 orr r7, r7, r8, lsl #16
386 mov r8, r8, lsr #16
387 orr r8, r8, r9, lsl #16
388 mov r9, r9, lsr #16
389 orr r9, r9, r10, lsl #16
390 mov r10, r10, lsr #16
391 orr r10, r10, r11, lsl #16
392 stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
393 mov r3, r11, lsr #16
394 bhs 1b
395 b less_than_thirtytwo
396
397loop8:
398 ldr r12, [r1], #4
3991: mov r4, r12
400 ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
401 PLD (r1, #64)
402 subs r2, r2, #32
403 ldrhs r12, [r1], #4
404 orr r3, r3, r4, lsl #24
405 mov r4, r4, lsr #8
406 orr r4, r4, r5, lsl #24
407 mov r5, r5, lsr #8
408 orr r5, r5, r6, lsl #24
409 mov r6, r6, lsr #8
410 orr r6, r6, r7, lsl #24
411 mov r7, r7, lsr #8
412 orr r7, r7, r8, lsl #24
413 mov r8, r8, lsr #8
414 orr r8, r8, r9, lsl #24
415 mov r9, r9, lsr #8
416 orr r9, r9, r10, lsl #24
417 mov r10, r10, lsr #8
418 orr r10, r10, r11, lsl #24
419 stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
420 mov r3, r11, lsr #8
421 bhs 1b
422 b less_than_thirtytwo
423
424loop24:
425 ldr r12, [r1], #4
4261: mov r4, r12
427 ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
428 PLD (r1, #64)
429 subs r2, r2, #32
430 ldrhs r12, [r1], #4
431 orr r3, r3, r4, lsl #8
432 mov r4, r4, lsr #24
433 orr r4, r4, r5, lsl #8
434 mov r5, r5, lsr #24
435 orr r5, r5, r6, lsl #8
436 mov r6, r6, lsr #24
437 orr r6, r6, r7, lsl #8
438 mov r7, r7, lsr #24
439 orr r7, r7, r8, lsl #8
440 mov r8, r8, lsr #24
441 orr r8, r8, r9, lsl #8
442 mov r9, r9, lsr #24
443 orr r9, r9, r10, lsl #8
444 mov r10, r10, lsr #24
445 orr r10, r10, r11, lsl #8
446 stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
447 mov r3, r11, lsr #24
448 bhs 1b
449
450
451less_than_thirtytwo:
452 /* copy the last 0 to 31 bytes of the source */
453 rsb r12, lr, #32 /* we corrupted r12, recompute it */
454 add r2, r2, #32
455 cmp r2, #4
456 blo partial_word_tail
457
4581: ldr r5, [r1], #4
459 sub r2, r2, #4
460 orr r4, r3, r5, lsl lr
461 mov r3, r5, lsr r12
462 str r4, [r0], #4
463 cmp r2, #4
464 bhs 1b
465
466partial_word_tail:
467 /* we have a partial word in the input buffer */
468 movs r5, lr, lsl #(31-3)
469 strmib r3, [r0], #1
470 movmi r3, r3, lsr #8
471 strcsb r3, [r0], #1
472 movcs r3, r3, lsr #8
473 strcsb r3, [r0], #1
474
475 /* Refill spilled registers from the stack. Don't update sp. */
476 ldmfd sp, {r5-r11}
477
478copy_last_3_and_return:
479 movs r2, r2, lsl #31 /* copy remaining 0, 1, 2 or 3 bytes */
480 ldrmib r2, [r1], #1
481 ldrcsb r3, [r1], #1
482 ldrcsb r12,[r1]
483 strmib r2, [r0], #1
484 strcsb r3, [r0], #1
485 strcsb r12,[r0]
486
487 /* we're done! restore sp and spilled registers and return */
488 add sp, sp, #28
489 ldmfd sp!, {r0, r4, lr}
490 bx lr
491 .fnend
492
David 'Digit' Turner1bbc56c2009-08-26 21:50:42 +0200493#endif
494