blob: 254e8700e1a7a836da2204ad7ec4282f7a9297bb [file] [log] [blame]
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <machine/cpu-features.h>
30
David 'Digit' Turner1e407832009-10-02 19:13:27 -070031/* VERY IMPORTANT NOTE:
32 * The following ARMv7-optimized version of memcpy is DISABLED !
33 *
34 * Because the corresponding machine code is not properly emulated
35 * by the Android emulator at this time, and because running the
36 * dex pre-optimization pass in the emulator is required when
37 * building -user images (corresponding to the system images of
38 * production devices).
39 *
40 * The code will be re-enabled as soon as we fix the ARMv7 emulation
41 * issues. An even better fix would be to *not* have to run the
42 * dex pre-opt pass in the emulator, but on the build host instead.
43 */
44#if 0
45/* #if __ARM_ARCH__ == 7 || defined(__ARM_NEON__) */
Mathias Agopianee223d02009-09-27 17:46:43 -070046
47 .text
48 .fpu neon
49
50 .global memcpy
51 .type memcpy, %function
52 .align 4
53
54/* a prefetch distance of 32*4 works best experimentally */
55#define PREFETCH_DISTANCE (32*4)
56
57memcpy:
58 .fnstart
59 .save {r0, lr}
60 stmfd sp!, {r0, lr}
61
62 /* start preloading as early as possible */
63 pld [r1, #0]
64 pld [r1, #32]
65
66 /* do we have at least 16-bytes to copy (needed for alignment below) */
67 cmp r2, #16
68 blo 5f
69
70 /* align destination to half cache-line for the write-buffer */
71 rsb r3, r0, #0
72 ands r3, r3, #0xF
73 beq 0f
74
75 /* copy up to 15-bytes (count in r3) */
76 sub r2, r2, r3
77 movs ip, r3, lsl #31
78 ldrmib lr, [r1], #1
79 strmib lr, [r0], #1
80 ldrcsb ip, [r1], #1
81 ldrcsb lr, [r1], #1
82 strcsb ip, [r0], #1
83 strcsb lr, [r0], #1
84 movs ip, r3, lsl #29
85 bge 1f
86 // copies 4 bytes, destination 32-bits aligned
87 vld4.8 {d0[0], d1[0], d2[0], d3[0]}, [r1]!
88 vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r0, :32]!
891: bcc 2f
90 // copies 8 bytes, destination 64-bits aligned
91 vld1.8 {d0}, [r1]!
92 vst1.8 {d0}, [r0, :64]!
932:
94
950: /* preload immediately the next cache line, which we may need */
96 pld [r1, #(32*0)]
97 pld [r1, #(32*1)]
98 pld [r1, #(32*2)]
99 pld [r1, #(32*3)]
100
101 /* make sure we have at least 128 bytes to copy */
102 subs r2, r2, #128
103 blo 2f
104
105 /* preload all the cache lines we need.
106 * NOTE: the number of pld below depends on PREFETCH_DISTANCE,
107 * ideally would would increase the distance in the main loop to
108 * avoid the goofy code below. In practice this doesn't seem to make
109 * a big difference.
110 */
111 pld [r1, #(PREFETCH_DISTANCE + 32*0)]
112 pld [r1, #(PREFETCH_DISTANCE + 32*1)]
113 pld [r1, #(PREFETCH_DISTANCE + 32*2)]
114 pld [r1, #(PREFETCH_DISTANCE + 32*3)]
115
1161: /* The main loop copies 128 bytes at a time */
117 vld1.8 {d0 - d3}, [r1]!
118 vld1.8 {d4 - d7}, [r1]!
119 vld1.8 {d16 - d19}, [r1]!
120 vld1.8 {d20 - d23}, [r1]!
121 pld [r1, #(PREFETCH_DISTANCE + 32*0)]
122 pld [r1, #(PREFETCH_DISTANCE + 32*1)]
123 pld [r1, #(PREFETCH_DISTANCE + 32*2)]
124 pld [r1, #(PREFETCH_DISTANCE + 32*3)]
125 subs r2, r2, #128
126 vst1.8 {d0 - d3}, [r0, :128]!
127 vst1.8 {d4 - d7}, [r0, :128]!
128 vst1.8 {d16 - d19}, [r0, :128]!
129 vst1.8 {d20 - d23}, [r0, :128]!
130 bhs 1b
131
1322: /* fix-up the remaining count and make sure we have >= 32 bytes left */
133 add r2, r2, #128
134 subs r2, r2, #32
135 blo 4f
136
1373: /* 32 bytes at a time. These cache lines were already preloaded */
138 vld1.8 {d0 - d3}, [r1]!
139 subs r2, r2, #32
140 vst1.8 {d0 - d3}, [r0, :128]!
141 bhs 3b
142
1434: /* less than 32 left */
144 add r2, r2, #32
145 tst r2, #0x10
146 beq 5f
147 // copies 16 bytes, 128-bits aligned
148 vld1.8 {d0, d1}, [r1]!
149 vst1.8 {d0, d1}, [r0, :128]!
150
1515: /* copy up to 15-bytes (count in r2) */
152 movs ip, r2, lsl #29
153 bcc 1f
154 vld1.8 {d0}, [r1]!
155 vst1.8 {d0}, [r0]!
1561: bge 2f
157 vld4.8 {d0[0], d1[0], d2[0], d3[0]}, [r1]!
158 vst4.8 {d0[0], d1[0], d2[0], d3[0]}, [r0]!
1592: movs ip, r2, lsl #31
160 ldrmib r3, [r1], #1
161 ldrcsb ip, [r1], #1
162 ldrcsb lr, [r1], #1
163 strmib r3, [r0], #1
164 strcsb ip, [r0], #1
165 strcsb lr, [r0], #1
166
167 ldmfd sp!, {r0, lr}
168 bx lr
169 .fnend
170
171
172#else /* __ARM_ARCH__ < 7 */
173
174
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800175 .text
176
177 .global memcpy
178 .type memcpy, %function
179 .align 4
180
181 /*
182 * Optimized memcpy() for ARM.
183 *
184 * note that memcpy() always returns the destination pointer,
185 * so we have to preserve R0.
186 */
Mathias Agopianee223d02009-09-27 17:46:43 -0700187
188memcpy:
189 /* The stack must always be 64-bits aligned to be compliant with the
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800190 * ARM ABI. Since we have to save R0, we might as well save R4
191 * which we can use for better pipelining of the reads below
192 */
193 .fnstart
194 .save {r0, r4, lr}
195 stmfd sp!, {r0, r4, lr}
196 /* Making room for r5-r11 which will be spilled later */
197 .pad #28
198 sub sp, sp, #28
199
200 // preload the destination because we'll align it to a cache line
201 // with small writes. Also start the source "pump".
202 PLD (r0, #0)
203 PLD (r1, #0)
204 PLD (r1, #32)
205
206 /* it simplifies things to take care of len<4 early */
207 cmp r2, #4
208 blo copy_last_3_and_return
209
210 /* compute the offset to align the source
211 * offset = (4-(src&3))&3 = -src & 3
212 */
213 rsb r3, r1, #0
214 ands r3, r3, #3
215 beq src_aligned
216
217 /* align source to 32 bits. We need to insert 2 instructions between
218 * a ldr[b|h] and str[b|h] because byte and half-word instructions
219 * stall 2 cycles.
220 */
221 movs r12, r3, lsl #31
222 sub r2, r2, r3 /* we know that r3 <= r2 because r2 >= 4 */
223 ldrmib r3, [r1], #1
224 ldrcsb r4, [r1], #1
225 ldrcsb r12,[r1], #1
226 strmib r3, [r0], #1
227 strcsb r4, [r0], #1
228 strcsb r12,[r0], #1
Mathias Agopianee223d02009-09-27 17:46:43 -0700229
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800230src_aligned:
231
Mathias Agopianee223d02009-09-27 17:46:43 -0700232 /* see if src and dst are aligned together (congruent) */
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800233 eor r12, r0, r1
234 tst r12, #3
235 bne non_congruent
236
237 /* Use post-incriment mode for stm to spill r5-r11 to reserved stack
238 * frame. Don't update sp.
239 */
240 stmea sp, {r5-r11}
241
242 /* align the destination to a cache-line */
243 rsb r3, r0, #0
244 ands r3, r3, #0x1C
245 beq congruent_aligned32
246 cmp r3, r2
247 andhi r3, r2, #0x1C
248
249 /* conditionnaly copies 0 to 7 words (length in r3) */
Mathias Agopianee223d02009-09-27 17:46:43 -0700250 movs r12, r3, lsl #28
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800251 ldmcsia r1!, {r4, r5, r6, r7} /* 16 bytes */
252 ldmmiia r1!, {r8, r9} /* 8 bytes */
253 stmcsia r0!, {r4, r5, r6, r7}
254 stmmiia r0!, {r8, r9}
255 tst r3, #0x4
256 ldrne r10,[r1], #4 /* 4 bytes */
257 strne r10,[r0], #4
258 sub r2, r2, r3
259
260congruent_aligned32:
261 /*
262 * here source is aligned to 32 bytes.
263 */
264
265cached_aligned32:
266 subs r2, r2, #32
267 blo less_than_32_left
268
269 /*
270 * We preload a cache-line up to 64 bytes ahead. On the 926, this will
Mathias Agopianee223d02009-09-27 17:46:43 -0700271 * stall only until the requested world is fetched, but the linefill
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800272 * continues in the the background.
273 * While the linefill is going, we write our previous cache-line
274 * into the write-buffer (which should have some free space).
275 * When the linefill is done, the writebuffer will
276 * start dumping its content into memory
277 *
278 * While all this is going, we then load a full cache line into
279 * 8 registers, this cache line should be in the cache by now
280 * (or partly in the cache).
281 *
282 * This code should work well regardless of the source/dest alignment.
283 *
284 */
285
286 // Align the preload register to a cache-line because the cpu does
287 // "critical word first" (the first word requested is loaded first).
288 bic r12, r1, #0x1F
289 add r12, r12, #64
290
2911: ldmia r1!, { r4-r11 }
292 PLD (r12, #64)
293 subs r2, r2, #32
294
295 // NOTE: if r12 is more than 64 ahead of r1, the following ldrhi
296 // for ARM9 preload will not be safely guarded by the preceding subs.
Mathias Agopianee223d02009-09-27 17:46:43 -0700297 // When it is safely guarded the only possibility to have SIGSEGV here
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800298 // is because the caller overstates the length.
299 ldrhi r3, [r12], #32 /* cheap ARM9 preload */
300 stmia r0!, { r4-r11 }
301 bhs 1b
Mathias Agopianee223d02009-09-27 17:46:43 -0700302
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800303 add r2, r2, #32
304
305
306
307
308less_than_32_left:
Mathias Agopianee223d02009-09-27 17:46:43 -0700309 /*
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800310 * less than 32 bytes left at this point (length in r2)
311 */
312
313 /* skip all this if there is nothing to do, which should
314 * be a common case (if not executed the code below takes
315 * about 16 cycles)
316 */
317 tst r2, #0x1F
318 beq 1f
319
320 /* conditionnaly copies 0 to 31 bytes */
Mathias Agopianee223d02009-09-27 17:46:43 -0700321 movs r12, r2, lsl #28
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800322 ldmcsia r1!, {r4, r5, r6, r7} /* 16 bytes */
323 ldmmiia r1!, {r8, r9} /* 8 bytes */
324 stmcsia r0!, {r4, r5, r6, r7}
325 stmmiia r0!, {r8, r9}
326 movs r12, r2, lsl #30
327 ldrcs r3, [r1], #4 /* 4 bytes */
328 ldrmih r4, [r1], #2 /* 2 bytes */
Mathias Agopianee223d02009-09-27 17:46:43 -0700329 strcs r3, [r0], #4
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800330 strmih r4, [r0], #2
331 tst r2, #0x1
332 ldrneb r3, [r1] /* last byte */
333 strneb r3, [r0]
334
335 /* we're done! restore everything and return */
3361: ldmfd sp!, {r5-r11}
337 ldmfd sp!, {r0, r4, lr}
338 bx lr
339
340 /********************************************************************/
341
342non_congruent:
343 /*
344 * here source is aligned to 4 bytes
345 * but destination is not.
346 *
Mathias Agopianee223d02009-09-27 17:46:43 -0700347 * in the code below r2 is the number of bytes read
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800348 * (the number of bytes written is always smaller, because we have
349 * partial words in the shift queue)
350 */
351 cmp r2, #4
352 blo copy_last_3_and_return
Mathias Agopianee223d02009-09-27 17:46:43 -0700353
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800354 /* Use post-incriment mode for stm to spill r5-r11 to reserved stack
355 * frame. Don't update sp.
356 */
357 stmea sp, {r5-r11}
Mathias Agopianee223d02009-09-27 17:46:43 -0700358
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800359 /* compute shifts needed to align src to dest */
360 rsb r5, r0, #0
361 and r5, r5, #3 /* r5 = # bytes in partial words */
Mathias Agopianee223d02009-09-27 17:46:43 -0700362 mov r12, r5, lsl #3 /* r12 = right */
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800363 rsb lr, r12, #32 /* lr = left */
Mathias Agopianee223d02009-09-27 17:46:43 -0700364
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800365 /* read the first word */
366 ldr r3, [r1], #4
367 sub r2, r2, #4
Mathias Agopianee223d02009-09-27 17:46:43 -0700368
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800369 /* write a partial word (0 to 3 bytes), such that destination
370 * becomes aligned to 32 bits (r5 = nb of words to copy for alignment)
371 */
372 movs r5, r5, lsl #31
373 strmib r3, [r0], #1
Mathias Agopianee223d02009-09-27 17:46:43 -0700374 movmi r3, r3, lsr #8
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800375 strcsb r3, [r0], #1
376 movcs r3, r3, lsr #8
377 strcsb r3, [r0], #1
378 movcs r3, r3, lsr #8
379
380 cmp r2, #4
381 blo partial_word_tail
Mathias Agopianee223d02009-09-27 17:46:43 -0700382
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800383 /* Align destination to 32 bytes (cache line boundary) */
3841: tst r0, #0x1c
385 beq 2f
386 ldr r5, [r1], #4
387 sub r2, r2, #4
388 orr r4, r3, r5, lsl lr
389 mov r3, r5, lsr r12
390 str r4, [r0], #4
391 cmp r2, #4
392 bhs 1b
393 blo partial_word_tail
394
395 /* copy 32 bytes at a time */
3962: subs r2, r2, #32
397 blo less_than_thirtytwo
398
399 /* Use immediate mode for the shifts, because there is an extra cycle
400 * for register shifts, which could account for up to 50% of
401 * performance hit.
402 */
403
404 cmp r12, #24
405 beq loop24
406 cmp r12, #8
407 beq loop8
408
409loop16:
410 ldr r12, [r1], #4
4111: mov r4, r12
412 ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
413 PLD (r1, #64)
414 subs r2, r2, #32
415 ldrhs r12, [r1], #4
416 orr r3, r3, r4, lsl #16
417 mov r4, r4, lsr #16
418 orr r4, r4, r5, lsl #16
419 mov r5, r5, lsr #16
420 orr r5, r5, r6, lsl #16
421 mov r6, r6, lsr #16
422 orr r6, r6, r7, lsl #16
423 mov r7, r7, lsr #16
424 orr r7, r7, r8, lsl #16
425 mov r8, r8, lsr #16
426 orr r8, r8, r9, lsl #16
427 mov r9, r9, lsr #16
428 orr r9, r9, r10, lsl #16
429 mov r10, r10, lsr #16
430 orr r10, r10, r11, lsl #16
431 stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
432 mov r3, r11, lsr #16
433 bhs 1b
434 b less_than_thirtytwo
435
436loop8:
437 ldr r12, [r1], #4
4381: mov r4, r12
439 ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
440 PLD (r1, #64)
441 subs r2, r2, #32
442 ldrhs r12, [r1], #4
443 orr r3, r3, r4, lsl #24
444 mov r4, r4, lsr #8
445 orr r4, r4, r5, lsl #24
446 mov r5, r5, lsr #8
447 orr r5, r5, r6, lsl #24
448 mov r6, r6, lsr #8
449 orr r6, r6, r7, lsl #24
450 mov r7, r7, lsr #8
451 orr r7, r7, r8, lsl #24
452 mov r8, r8, lsr #8
453 orr r8, r8, r9, lsl #24
454 mov r9, r9, lsr #8
455 orr r9, r9, r10, lsl #24
456 mov r10, r10, lsr #8
457 orr r10, r10, r11, lsl #24
458 stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
459 mov r3, r11, lsr #8
460 bhs 1b
461 b less_than_thirtytwo
462
463loop24:
464 ldr r12, [r1], #4
4651: mov r4, r12
466 ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
467 PLD (r1, #64)
468 subs r2, r2, #32
469 ldrhs r12, [r1], #4
470 orr r3, r3, r4, lsl #8
471 mov r4, r4, lsr #24
472 orr r4, r4, r5, lsl #8
473 mov r5, r5, lsr #24
474 orr r5, r5, r6, lsl #8
475 mov r6, r6, lsr #24
476 orr r6, r6, r7, lsl #8
477 mov r7, r7, lsr #24
478 orr r7, r7, r8, lsl #8
479 mov r8, r8, lsr #24
480 orr r8, r8, r9, lsl #8
481 mov r9, r9, lsr #24
482 orr r9, r9, r10, lsl #8
483 mov r10, r10, lsr #24
484 orr r10, r10, r11, lsl #8
485 stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
486 mov r3, r11, lsr #24
487 bhs 1b
488
489
490less_than_thirtytwo:
491 /* copy the last 0 to 31 bytes of the source */
492 rsb r12, lr, #32 /* we corrupted r12, recompute it */
493 add r2, r2, #32
494 cmp r2, #4
495 blo partial_word_tail
496
4971: ldr r5, [r1], #4
498 sub r2, r2, #4
499 orr r4, r3, r5, lsl lr
500 mov r3, r5, lsr r12
501 str r4, [r0], #4
502 cmp r2, #4
503 bhs 1b
504
505partial_word_tail:
506 /* we have a partial word in the input buffer */
507 movs r5, lr, lsl #(31-3)
508 strmib r3, [r0], #1
509 movmi r3, r3, lsr #8
510 strcsb r3, [r0], #1
511 movcs r3, r3, lsr #8
512 strcsb r3, [r0], #1
Mathias Agopianee223d02009-09-27 17:46:43 -0700513
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800514 /* Refill spilled registers from the stack. Don't update sp. */
515 ldmfd sp, {r5-r11}
516
517copy_last_3_and_return:
518 movs r2, r2, lsl #31 /* copy remaining 0, 1, 2 or 3 bytes */
519 ldrmib r2, [r1], #1
520 ldrcsb r3, [r1], #1
521 ldrcsb r12,[r1]
522 strmib r2, [r0], #1
523 strcsb r3, [r0], #1
524 strcsb r12,[r0]
525
526 /* we're done! restore sp and spilled registers and return */
527 add sp, sp, #28
528 ldmfd sp!, {r0, r4, lr}
529 bx lr
530 .fnend
531
Mathias Agopianee223d02009-09-27 17:46:43 -0700532
533#endif /* __ARM_ARCH__ < 7 */