String Compression for ARM and ARM64
Changes on intrinsics and Code Generation on ARM and ARM64
for string compression feature. Currently the feature is off.
The size of boot.oat and boot.art for ARM before and after the
changes (feature OFF) are still. When the feature ON,
boot.oat increased by 0.60% and boot.art decreased by 9.38%.
Meanwhile for ARM64, size of boot.oat and boot.art before and
after changes (feature OFF) are still. When the feature ON,
boot.oat increased by 0.48% and boot.art decreased by 6.58%.
Turn feature on: runtime/mirror/string.h (kUseStringCompression = true)
runtime/asm_support.h (STRING_COMPRESSION_FEATURE 1)
Test: m -j31 test-art-target
All tests passed both when the mirror::kUseStringCompression
is ON and OFF.
Bug: 31040547
Change-Id: I24e86b99391df33ba27df747779b648c5a820649
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 5d53062..cdb4c25 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -1710,7 +1710,11 @@
.cfi_rel_offset lr, 12
ldr r3, [r0, #MIRROR_STRING_COUNT_OFFSET]
add r0, #MIRROR_STRING_VALUE_OFFSET
-
+#if (STRING_COMPRESSION_FEATURE)
+ /* r4 count (with flag) and r3 holds actual length */
+ mov r4, r3
+ bic r3, #2147483648
+#endif
/* Clamp start to [0..count] */
cmp r2, #0
it lt
@@ -1723,6 +1727,10 @@
mov r12, r0
/* Build pointer to start of data to compare and pre-bias */
+#if (STRING_COMPRESSION_FEATURE)
+ cmp r4, #0
+ blt .Lstring_indexof_compressed
+#endif
add r0, r0, r2, lsl #1
sub r0, #2
@@ -1734,6 +1742,7 @@
* r0: start of data to test
* r1: char to compare
* r2: iteration count
+ * r4: compression style (used temporarily)
* r12: original start of string data
* r3, r4, r10, r11 available for loading string data
*/
@@ -1791,6 +1800,22 @@
sub r0, r12
asr r0, r0, #1
pop {r4, r10-r11, pc}
+#if (STRING_COMPRESSION_FEATURE)
+.Lstring_indexof_compressed:
+ add r0, r0, r2
+ sub r0, #1
+ sub r2, r3, r2
+.Lstring_indexof_compressed_loop:
+ subs r2, #1
+ blt .Lindexof_nomatch
+ ldrb r3, [r0, #1]!
+ cmp r3, r1
+ beq .Lstring_indexof_compressed_matched
+ b .Lstring_indexof_compressed_loop
+.Lstring_indexof_compressed_matched:
+ sub r0, r12
+ pop {r4, r10-r11, pc}
+#endif
END art_quick_indexof
/* Assembly routines used to handle ABI differences. */