ARM64: Move xSELF from x18 to x19.
This patch moves xSELF to callee saved x19 and removes support for
ETR (external thread register), previously used across native calls.
Change-Id: Icee07fbb9292425947f7de33d10a0ddf98c7899b
Signed-off-by: Serban Constantinescu <serban.constantinescu@linaro.org>
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index 5bf77aa..303ea3e 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -35,14 +35,15 @@
* r16-r17: Also known as ip0-ip1, respectively. Used as scratch registers by
* the linker, by the trampolines and other stubs (the backend uses
* these as temporary registers).
- * r18 : (rxSELF) is reserved (pointer to thread-local storage).
- * r19-r29: Callee save registers (promotion targets).
+ * r18 : Caller save register (used as temporary register).
+ * r19 : (rxSELF) is reserved (pointer to thread-local storage).
+ * r20-r29: Callee save registers (promotion targets).
* r30 : (lr) is reserved (the link register).
* rsp : (sp) is reserved (the stack pointer).
* rzr : (zr) is reserved (the zero register).
*
- * 18 core temps that codegen can use (r0-r17).
- * 10 core registers that can be used for promotion.
+ * 19 core temps that codegen can use (r0-r18).
+ * 9 core registers that can be used for promotion.
*
* Floating-point registers
* v0-v31
@@ -145,7 +146,7 @@
// Aliases which are not defined in "ARM Architecture Reference, register names".
rxIP0 = rx16,
rxIP1 = rx17,
- rxSELF = rx18,
+ rxSELF = rx19,
rxLR = rx30,
/*
* FIXME: It's a bit awkward to define both 32 and 64-bit views of these - we'll only ever use
@@ -154,7 +155,7 @@
*/
rwIP0 = rw16,
rwIP1 = rw17,
- rwSELF = rw18,
+ rwSELF = rw19,
rwLR = rw30,
};
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index fc32ecd..fe15391 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -51,19 +51,17 @@
rs_d8, rs_d9, rs_d10, rs_d11, rs_d12, rs_d13, rs_d14, rs_d15,
rs_d16, rs_d17, rs_d18, rs_d19, rs_d20, rs_d21, rs_d22, rs_d23,
rs_d24, rs_d25, rs_d26, rs_d27, rs_d28, rs_d29, rs_d30, rs_d31};
-// Note: we are not able to call to C function since rs_xSELF is a special register need to be
-// preserved but would be scratched by native functions follow aapcs64.
static constexpr RegStorage reserved_regs_arr[] = {rs_wSELF, rs_wsp, rs_wLR, rs_wzr};
static constexpr RegStorage reserved64_regs_arr[] = {rs_xSELF, rs_sp, rs_xLR, rs_xzr};
static constexpr RegStorage core_temps_arr[] =
{rs_w0, rs_w1, rs_w2, rs_w3, rs_w4, rs_w5, rs_w6, rs_w7,
rs_w8, rs_w9, rs_w10, rs_w11, rs_w12, rs_w13, rs_w14, rs_w15, rs_w16,
- rs_w17};
+ rs_w17, rs_w18};
static constexpr RegStorage core64_temps_arr[] =
{rs_x0, rs_x1, rs_x2, rs_x3, rs_x4, rs_x5, rs_x6, rs_x7,
rs_x8, rs_x9, rs_x10, rs_x11, rs_x12, rs_x13, rs_x14, rs_x15, rs_x16,
- rs_x17};
+ rs_x17, rs_x18};
static constexpr RegStorage sp_temps_arr[] =
{rs_f0, rs_f1, rs_f2, rs_f3, rs_f4, rs_f5, rs_f6, rs_f7,
rs_f16, rs_f17, rs_f18, rs_f19, rs_f20, rs_f21, rs_f22, rs_f23,
@@ -691,6 +689,7 @@
Clobber(rs_x15);
Clobber(rs_x16);
Clobber(rs_x17);
+ Clobber(rs_x18);
Clobber(rs_x30);
Clobber(rs_f0);
diff --git a/compiler/dex/quick/quick_cfi_test_expected.inc b/compiler/dex/quick/quick_cfi_test_expected.inc
index 48109d2..634fdee 100644
--- a/compiler/dex/quick/quick_cfi_test_expected.inc
+++ b/compiler/dex/quick/quick_cfi_test_expected.inc
@@ -33,15 +33,15 @@
// 0x00000014: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kArm64[] = {
- 0xFF, 0x03, 0x01, 0xD1, 0xE8, 0xA7, 0x01, 0x6D, 0xF3, 0xD3, 0x02, 0xA9,
+ 0xFF, 0x03, 0x01, 0xD1, 0xE8, 0xA7, 0x01, 0x6D, 0xF4, 0xD7, 0x02, 0xA9,
0xFE, 0x1F, 0x00, 0xF9, 0xE0, 0x03, 0x00, 0xB9, 0xE8, 0xA7, 0x41, 0x6D,
- 0xF3, 0xD3, 0x42, 0xA9, 0xFE, 0x1F, 0x40, 0xF9, 0xFF, 0x03, 0x01, 0x91,
+ 0xF4, 0xD7, 0x42, 0xA9, 0xFE, 0x1F, 0x40, 0xF9, 0xFF, 0x03, 0x01, 0x91,
0xC0, 0x03, 0x5F, 0xD6,
};
static constexpr uint8_t expected_cfi_kArm64[] = {
- 0x44, 0x0E, 0x40, 0x44, 0x05, 0x48, 0x0A, 0x05, 0x49, 0x08, 0x44, 0x93,
- 0x06, 0x94, 0x04, 0x44, 0x9E, 0x02, 0x44, 0x0A, 0x44, 0x06, 0x48, 0x06,
- 0x49, 0x44, 0xD3, 0xD4, 0x44, 0xDE, 0x44, 0x0E, 0x00, 0x44, 0x0B, 0x0E,
+ 0x44, 0x0E, 0x40, 0x44, 0x05, 0x48, 0x0A, 0x05, 0x49, 0x08, 0x44, 0x94,
+ 0x06, 0x95, 0x04, 0x44, 0x9E, 0x02, 0x44, 0x0A, 0x44, 0x06, 0x48, 0x06,
+ 0x49, 0x44, 0xD4, 0xD5, 0x44, 0xDE, 0x44, 0x0E, 0x00, 0x44, 0x0B, 0x0E,
0x40,
};
// 0x00000000: sub sp, sp, #0x40 (64)
@@ -49,9 +49,9 @@
// 0x00000004: stp d8, d9, [sp, #24]
// 0x00000008: .cfi_offset_extended: r72 at cfa-40
// 0x00000008: .cfi_offset_extended: r73 at cfa-32
-// 0x00000008: stp x19, x20, [sp, #40]
-// 0x0000000c: .cfi_offset: r19 at cfa-24
-// 0x0000000c: .cfi_offset: r20 at cfa-16
+// 0x00000008: stp x20, x21, [sp, #40]
+// 0x0000000c: .cfi_offset: r20 at cfa-24
+// 0x0000000c: .cfi_offset: r21 at cfa-16
// 0x0000000c: str lr, [sp, #56]
// 0x00000010: .cfi_offset: r30 at cfa-8
// 0x00000010: str w0, [sp]
@@ -59,9 +59,9 @@
// 0x00000014: ldp d8, d9, [sp, #24]
// 0x00000018: .cfi_restore_extended: r72
// 0x00000018: .cfi_restore_extended: r73
-// 0x00000018: ldp x19, x20, [sp, #40]
-// 0x0000001c: .cfi_restore: r19
+// 0x00000018: ldp x20, x21, [sp, #40]
// 0x0000001c: .cfi_restore: r20
+// 0x0000001c: .cfi_restore: r21
// 0x0000001c: ldr lr, [sp, #56]
// 0x00000020: .cfi_restore: r30
// 0x00000020: add sp, sp, #0x40 (64)
diff --git a/compiler/jni/jni_cfi_test_expected.inc b/compiler/jni/jni_cfi_test_expected.inc
index eaf7872..42fc30f 100644
--- a/compiler/jni/jni_cfi_test_expected.inc
+++ b/compiler/jni/jni_cfi_test_expected.inc
@@ -84,14 +84,13 @@
0xFF, 0x03, 0x03, 0xD1, 0xF3, 0x53, 0x06, 0xA9, 0xF5, 0x5B, 0x07, 0xA9,
0xF7, 0x63, 0x08, 0xA9, 0xF9, 0x6B, 0x09, 0xA9, 0xFB, 0x73, 0x0A, 0xA9,
0xFD, 0x7B, 0x0B, 0xA9, 0xE8, 0x27, 0x02, 0x6D, 0xEA, 0x2F, 0x03, 0x6D,
- 0xEC, 0x37, 0x04, 0x6D, 0xEE, 0x3F, 0x05, 0x6D, 0xF5, 0x03, 0x12, 0xAA,
- 0xE0, 0x03, 0x00, 0xB9, 0xE1, 0xC7, 0x00, 0xB9, 0xE0, 0xCB, 0x00, 0xBD,
- 0xE2, 0xCF, 0x00, 0xB9, 0xE3, 0xD3, 0x00, 0xB9, 0xFF, 0x83, 0x00, 0xD1,
- 0xFF, 0x83, 0x00, 0x91, 0xF2, 0x03, 0x15, 0xAA, 0xF3, 0x53, 0x46, 0xA9,
- 0xF5, 0x5B, 0x47, 0xA9, 0xF7, 0x63, 0x48, 0xA9, 0xF9, 0x6B, 0x49, 0xA9,
- 0xFB, 0x73, 0x4A, 0xA9, 0xFD, 0x7B, 0x4B, 0xA9, 0xE8, 0x27, 0x42, 0x6D,
- 0xEA, 0x2F, 0x43, 0x6D, 0xEC, 0x37, 0x44, 0x6D, 0xEE, 0x3F, 0x45, 0x6D,
- 0xFF, 0x03, 0x03, 0x91, 0xC0, 0x03, 0x5F, 0xD6,
+ 0xEC, 0x37, 0x04, 0x6D, 0xEE, 0x3F, 0x05, 0x6D, 0xE0, 0x03, 0x00, 0xB9,
+ 0xE1, 0xC7, 0x00, 0xB9, 0xE0, 0xCB, 0x00, 0xBD, 0xE2, 0xCF, 0x00, 0xB9,
+ 0xE3, 0xD3, 0x00, 0xB9, 0xFF, 0x83, 0x00, 0xD1, 0xFF, 0x83, 0x00, 0x91,
+ 0xF3, 0x53, 0x46, 0xA9, 0xF5, 0x5B, 0x47, 0xA9, 0xF7, 0x63, 0x48, 0xA9,
+ 0xF9, 0x6B, 0x49, 0xA9, 0xFB, 0x73, 0x4A, 0xA9, 0xFD, 0x7B, 0x4B, 0xA9,
+ 0xE8, 0x27, 0x42, 0x6D, 0xEA, 0x2F, 0x43, 0x6D, 0xEC, 0x37, 0x44, 0x6D,
+ 0xEE, 0x3F, 0x45, 0x6D, 0xFF, 0x03, 0x03, 0x91, 0xC0, 0x03, 0x5F, 0xD6,
};
static constexpr uint8_t expected_cfi_kArm64[] = {
0x44, 0x0E, 0xC0, 0x01, 0x44, 0x93, 0x18, 0x94, 0x16, 0x44, 0x95, 0x14,
@@ -99,11 +98,11 @@
0x44, 0x9B, 0x08, 0x9C, 0x06, 0x44, 0x9D, 0x04, 0x9E, 0x02, 0x44, 0x05,
0x48, 0x28, 0x05, 0x49, 0x26, 0x44, 0x05, 0x4A, 0x24, 0x05, 0x4B, 0x22,
0x44, 0x05, 0x4C, 0x20, 0x05, 0x4D, 0x1E, 0x44, 0x05, 0x4E, 0x1C, 0x05,
- 0x4F, 0x1A, 0x5C, 0x0E, 0xE0, 0x01, 0x44, 0x0E, 0xC0, 0x01, 0x44, 0x0A,
- 0x44, 0xD3, 0xD4, 0x44, 0xD5, 0xD6, 0x44, 0xD7, 0xD8, 0x44, 0xD9, 0xDA,
- 0x44, 0xDB, 0xDC, 0x44, 0xDD, 0xDE, 0x44, 0x06, 0x48, 0x06, 0x49, 0x44,
- 0x06, 0x4A, 0x06, 0x4B, 0x44, 0x06, 0x4C, 0x06, 0x4D, 0x44, 0x06, 0x4E,
- 0x06, 0x4F, 0x44, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0xC0, 0x01,
+ 0x4F, 0x1A, 0x58, 0x0E, 0xE0, 0x01, 0x44, 0x0E, 0xC0, 0x01, 0x0A, 0x44,
+ 0xD3, 0xD4, 0x44, 0xD5, 0xD6, 0x44, 0xD7, 0xD8, 0x44, 0xD9, 0xDA, 0x44,
+ 0xDB, 0xDC, 0x44, 0xDD, 0xDE, 0x44, 0x06, 0x48, 0x06, 0x49, 0x44, 0x06,
+ 0x4A, 0x06, 0x4B, 0x44, 0x06, 0x4C, 0x06, 0x4D, 0x44, 0x06, 0x4E, 0x06,
+ 0x4F, 0x44, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0xC0, 0x01,
};
// 0x00000000: sub sp, sp, #0xc0 (192)
// 0x00000004: .cfi_def_cfa_offset: 192
@@ -137,53 +136,51 @@
// 0x00000028: stp d14, d15, [sp, #80]
// 0x0000002c: .cfi_offset_extended: r78 at cfa-112
// 0x0000002c: .cfi_offset_extended: r79 at cfa-104
-// 0x0000002c: mov x21, tr
-// 0x00000030: str w0, [sp]
-// 0x00000034: str w1, [sp, #196]
-// 0x00000038: str s0, [sp, #200]
-// 0x0000003c: str w2, [sp, #204]
-// 0x00000040: str w3, [sp, #208]
-// 0x00000044: sub sp, sp, #0x20 (32)
-// 0x00000048: .cfi_def_cfa_offset: 224
-// 0x00000048: add sp, sp, #0x20 (32)
-// 0x0000004c: .cfi_def_cfa_offset: 192
-// 0x0000004c: mov tr, x21
-// 0x00000050: .cfi_remember_state
-// 0x00000050: ldp x19, x20, [sp, #96]
-// 0x00000054: .cfi_restore: r19
-// 0x00000054: .cfi_restore: r20
-// 0x00000054: ldp x21, x22, [sp, #112]
-// 0x00000058: .cfi_restore: r21
-// 0x00000058: .cfi_restore: r22
-// 0x00000058: ldp x23, x24, [sp, #128]
-// 0x0000005c: .cfi_restore: r23
-// 0x0000005c: .cfi_restore: r24
-// 0x0000005c: ldp x25, x26, [sp, #144]
-// 0x00000060: .cfi_restore: r25
-// 0x00000060: .cfi_restore: r26
-// 0x00000060: ldp x27, x28, [sp, #160]
-// 0x00000064: .cfi_restore: r27
-// 0x00000064: .cfi_restore: r28
-// 0x00000064: ldp x29, lr, [sp, #176]
-// 0x00000068: .cfi_restore: r29
-// 0x00000068: .cfi_restore: r30
-// 0x00000068: ldp d8, d9, [sp, #32]
-// 0x0000006c: .cfi_restore_extended: r72
-// 0x0000006c: .cfi_restore_extended: r73
-// 0x0000006c: ldp d10, d11, [sp, #48]
-// 0x00000070: .cfi_restore_extended: r74
-// 0x00000070: .cfi_restore_extended: r75
-// 0x00000070: ldp d12, d13, [sp, #64]
-// 0x00000074: .cfi_restore_extended: r76
-// 0x00000074: .cfi_restore_extended: r77
-// 0x00000074: ldp d14, d15, [sp, #80]
-// 0x00000078: .cfi_restore_extended: r78
-// 0x00000078: .cfi_restore_extended: r79
-// 0x00000078: add sp, sp, #0xc0 (192)
-// 0x0000007c: .cfi_def_cfa_offset: 0
-// 0x0000007c: ret
-// 0x00000080: .cfi_restore_state
-// 0x00000080: .cfi_def_cfa_offset: 192
+// 0x0000002c: str w0, [sp]
+// 0x00000030: str w1, [sp, #196]
+// 0x00000034: str s0, [sp, #200]
+// 0x00000038: str w2, [sp, #204]
+// 0x0000003c: str w3, [sp, #208]
+// 0x00000040: sub sp, sp, #0x20 (32)
+// 0x00000044: .cfi_def_cfa_offset: 224
+// 0x00000044: add sp, sp, #0x20 (32)
+// 0x00000048: .cfi_def_cfa_offset: 192
+// 0x00000048: .cfi_remember_state
+// 0x00000048: ldp x19, x20, [sp, #96]
+// 0x0000004c: .cfi_restore: r19
+// 0x0000004c: .cfi_restore: r20
+// 0x0000004c: ldp x21, x22, [sp, #112]
+// 0x00000050: .cfi_restore: r21
+// 0x00000050: .cfi_restore: r22
+// 0x00000050: ldp x23, x24, [sp, #128]
+// 0x00000054: .cfi_restore: r23
+// 0x00000054: .cfi_restore: r24
+// 0x00000054: ldp x25, x26, [sp, #144]
+// 0x00000058: .cfi_restore: r25
+// 0x00000058: .cfi_restore: r26
+// 0x00000058: ldp x27, x28, [sp, #160]
+// 0x0000005c: .cfi_restore: r27
+// 0x0000005c: .cfi_restore: r28
+// 0x0000005c: ldp x29, lr, [sp, #176]
+// 0x00000060: .cfi_restore: r29
+// 0x00000060: .cfi_restore: r30
+// 0x00000060: ldp d8, d9, [sp, #32]
+// 0x00000064: .cfi_restore_extended: r72
+// 0x00000064: .cfi_restore_extended: r73
+// 0x00000064: ldp d10, d11, [sp, #48]
+// 0x00000068: .cfi_restore_extended: r74
+// 0x00000068: .cfi_restore_extended: r75
+// 0x00000068: ldp d12, d13, [sp, #64]
+// 0x0000006c: .cfi_restore_extended: r76
+// 0x0000006c: .cfi_restore_extended: r77
+// 0x0000006c: ldp d14, d15, [sp, #80]
+// 0x00000070: .cfi_restore_extended: r78
+// 0x00000070: .cfi_restore_extended: r79
+// 0x00000070: add sp, sp, #0xc0 (192)
+// 0x00000074: .cfi_def_cfa_offset: 0
+// 0x00000074: ret
+// 0x00000078: .cfi_restore_state
+// 0x00000078: .cfi_def_cfa_offset: 192
static constexpr uint8_t expected_asm_kX86[] = {
0x57, 0x56, 0x55, 0x83, 0xC4, 0xE4, 0x50, 0x89, 0x4C, 0x24, 0x34, 0xF3,
diff --git a/compiler/jni/quick/arm64/calling_convention_arm64.cc b/compiler/jni/quick/arm64/calling_convention_arm64.cc
index a6caff1..03dccb9 100644
--- a/compiler/jni/quick/arm64/calling_convention_arm64.cc
+++ b/compiler/jni/quick/arm64/calling_convention_arm64.cc
@@ -183,7 +183,7 @@
// Jni function is the native function which the java code wants to call.
// Jni method is the method that compiled by jni compiler.
// Call chain: managed code(java) --> jni method --> jni function.
- // Thread register(X18, scratched by aapcs64) is not saved on stack, it is saved in ETR(X21).
+ // Thread register(X19) is saved on stack.
return 1 << X19 | 1 << X20 | 1 << X21 | 1 << X22 | 1 << X23 | 1 << X24 |
1 << X25 | 1 << X26 | 1 << X27 | 1 << X28 | 1 << X29 | 1 << LR;
}
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index b56ca10..ab793a5 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -44,7 +44,7 @@
};
static constexpr size_t kParameterFPRegistersLength = arraysize(kParameterFPRegisters);
-const vixl::Register tr = vixl::x18; // Thread Register
+const vixl::Register tr = vixl::x19; // Thread Register
static const vixl::Register kArtMethodRegister = vixl::w0; // Method register on invoke.
const vixl::CPURegList vixl_reserved_core_registers(vixl::ip0, vixl::ip1);
@@ -52,10 +52,10 @@
const vixl::CPURegList runtime_reserved_core_registers(tr, vixl::lr);
-// Callee-saved registers defined by AAPCS64.
+// Callee-saved registers AAPCS64 (without x19 - Thread Register)
const vixl::CPURegList callee_saved_core_registers(vixl::CPURegister::kRegister,
vixl::kXRegSize,
- vixl::x19.code(),
+ vixl::x20.code(),
vixl::x30.code());
const vixl::CPURegList callee_saved_fp_registers(vixl::CPURegister::kFPRegister,
vixl::kDRegSize,
diff --git a/compiler/optimizing/optimizing_cfi_test_expected.inc b/compiler/optimizing/optimizing_cfi_test_expected.inc
index 2125f6e..ecb3b0a 100644
--- a/compiler/optimizing/optimizing_cfi_test_expected.inc
+++ b/compiler/optimizing/optimizing_cfi_test_expected.inc
@@ -32,20 +32,20 @@
// 0x00000012: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kArm64[] = {
- 0xE0, 0x0F, 0x1C, 0xB8, 0xF3, 0xD3, 0x02, 0xA9, 0xFE, 0x1F, 0x00, 0xF9,
- 0xE8, 0xA7, 0x01, 0x6D, 0xE8, 0xA7, 0x41, 0x6D, 0xF3, 0xD3, 0x42, 0xA9,
+ 0xE0, 0x0F, 0x1C, 0xB8, 0xF4, 0xD7, 0x02, 0xA9, 0xFE, 0x1F, 0x00, 0xF9,
+ 0xE8, 0xA7, 0x01, 0x6D, 0xE8, 0xA7, 0x41, 0x6D, 0xF4, 0xD7, 0x42, 0xA9,
0xFE, 0x1F, 0x40, 0xF9, 0xFF, 0x03, 0x01, 0x91, 0xC0, 0x03, 0x5F, 0xD6,
};
static constexpr uint8_t expected_cfi_kArm64[] = {
- 0x44, 0x0E, 0x40, 0x44, 0x93, 0x06, 0x94, 0x04, 0x44, 0x9E, 0x02, 0x44,
+ 0x44, 0x0E, 0x40, 0x44, 0x94, 0x06, 0x95, 0x04, 0x44, 0x9E, 0x02, 0x44,
0x05, 0x48, 0x0A, 0x05, 0x49, 0x08, 0x0A, 0x44, 0x06, 0x48, 0x06, 0x49,
- 0x44, 0xD3, 0xD4, 0x44, 0xDE, 0x44, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0x40,
+ 0x44, 0xD4, 0xD5, 0x44, 0xDE, 0x44, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0x40,
};
// 0x00000000: str w0, [sp, #-64]!
// 0x00000004: .cfi_def_cfa_offset: 64
-// 0x00000004: stp x19, x20, [sp, #40]
-// 0x00000008: .cfi_offset: r19 at cfa-24
-// 0x00000008: .cfi_offset: r20 at cfa-16
+// 0x00000004: stp x20, x21, [sp, #40]
+// 0x00000008: .cfi_offset: r20 at cfa-24
+// 0x00000008: .cfi_offset: r21 at cfa-16
// 0x00000008: str lr, [sp, #56]
// 0x0000000c: .cfi_offset: r30 at cfa-8
// 0x0000000c: stp d8, d9, [sp, #24]
@@ -55,9 +55,9 @@
// 0x00000010: ldp d8, d9, [sp, #24]
// 0x00000014: .cfi_restore_extended: r72
// 0x00000014: .cfi_restore_extended: r73
-// 0x00000014: ldp x19, x20, [sp, #40]
-// 0x00000018: .cfi_restore: r19
+// 0x00000014: ldp x20, x21, [sp, #40]
// 0x00000018: .cfi_restore: r20
+// 0x00000018: .cfi_restore: r21
// 0x00000018: ldr lr, [sp, #56]
// 0x0000001c: .cfi_restore: r30
// 0x0000001c: add sp, sp, #0x40 (64)
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 98702a2..f924c71 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -52,11 +52,11 @@
}
void Arm64Assembler::GetCurrentThread(ManagedRegister tr) {
- ___ Mov(reg_x(tr.AsArm64().AsXRegister()), reg_x(ETR));
+ ___ Mov(reg_x(tr.AsArm64().AsXRegister()), reg_x(TR));
}
void Arm64Assembler::GetCurrentThread(FrameOffset offset, ManagedRegister /* scratch */) {
- StoreToOffset(ETR, SP, offset.Int32Value());
+ StoreToOffset(TR, SP, offset.Int32Value());
}
// See Arm64 PCS Section 5.2.2.1.
@@ -168,7 +168,7 @@
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsXRegister()) << scratch;
LoadImmediate(scratch.AsXRegister(), imm);
- StoreToOffset(scratch.AsXRegister(), ETR, offs.Int32Value());
+ StoreToOffset(scratch.AsXRegister(), TR, offs.Int32Value());
}
void Arm64Assembler::StoreStackOffsetToThread64(ThreadOffset<8> tr_offs,
@@ -177,14 +177,14 @@
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsXRegister()) << scratch;
AddConstant(scratch.AsXRegister(), SP, fr_offs.Int32Value());
- StoreToOffset(scratch.AsXRegister(), ETR, tr_offs.Int32Value());
+ StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
}
void Arm64Assembler::StoreStackPointerToThread64(ThreadOffset<8> tr_offs) {
vixl::UseScratchRegisterScope temps(vixl_masm_);
vixl::Register temp = temps.AcquireX();
___ Mov(temp, reg_x(SP));
- ___ Str(temp, MEM_OP(reg_x(ETR), tr_offs.Int32Value()));
+ ___ Str(temp, MEM_OP(reg_x(TR), tr_offs.Int32Value()));
}
void Arm64Assembler::StoreSpanning(FrameOffset dest_off, ManagedRegister m_source,
@@ -285,7 +285,7 @@
}
void Arm64Assembler::LoadFromThread64(ManagedRegister m_dst, ThreadOffset<8> src, size_t size) {
- return Load(m_dst.AsArm64(), ETR, src.Int32Value(), size);
+ return Load(m_dst.AsArm64(), TR, src.Int32Value(), size);
}
void Arm64Assembler::LoadRef(ManagedRegister m_dst, FrameOffset offs) {
@@ -320,7 +320,7 @@
void Arm64Assembler::LoadRawPtrFromThread64(ManagedRegister m_dst, ThreadOffset<8> offs) {
Arm64ManagedRegister dst = m_dst.AsArm64();
CHECK(dst.IsXRegister()) << dst;
- LoadFromOffset(dst.AsXRegister(), ETR, offs.Int32Value());
+ LoadFromOffset(dst.AsXRegister(), TR, offs.Int32Value());
}
// Copying routines.
@@ -358,7 +358,7 @@
ManagedRegister m_scratch) {
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsXRegister()) << scratch;
- LoadFromOffset(scratch.AsXRegister(), ETR, tr_offs.Int32Value());
+ LoadFromOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
StoreToOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
}
@@ -368,7 +368,7 @@
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsXRegister()) << scratch;
LoadFromOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
- StoreToOffset(scratch.AsXRegister(), ETR, tr_offs.Int32Value());
+ StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
}
void Arm64Assembler::CopyRef(FrameOffset dest, FrameOffset src,
@@ -611,7 +611,7 @@
Arm64ManagedRegister scratch = m_scratch.AsArm64();
Arm64Exception *current_exception = new Arm64Exception(scratch, stack_adjust);
exception_blocks_.push_back(current_exception);
- LoadFromOffset(scratch.AsXRegister(), ETR, Thread::ExceptionOffset<8>().Int32Value());
+ LoadFromOffset(scratch.AsXRegister(), TR, Thread::ExceptionOffset<8>().Int32Value());
___ Cbnz(reg_x(scratch.AsXRegister()), current_exception->Entry());
}
@@ -628,12 +628,7 @@
// Pass exception object as argument.
// Don't care about preserving X0 as this won't return.
___ Mov(reg_x(X0), reg_x(exception->scratch_.AsXRegister()));
- ___ Ldr(temp, MEM_OP(reg_x(ETR), QUICK_ENTRYPOINT_OFFSET(8, pDeliverException).Int32Value()));
-
- // Move ETR(Callee saved) back to TR(Caller saved) reg. We use ETR on calls
- // to external functions that might trash TR. We do not need the original
- // ETR(X21) saved in BuildFrame().
- ___ Mov(reg_x(TR), reg_x(ETR));
+ ___ Ldr(temp, MEM_OP(reg_x(TR), QUICK_ENTRYPOINT_OFFSET(8, pDeliverException).Int32Value()));
___ Blr(temp);
// Call should never return.
@@ -714,12 +709,7 @@
SpillRegisters(core_reg_list, frame_size - core_reg_size);
SpillRegisters(fp_reg_list, frame_size - core_reg_size - fp_reg_size);
- // Note: This is specific to JNI method frame.
- // We will need to move TR(Caller saved in AAPCS) to ETR(Callee saved in AAPCS). The original
- // (ETR)X21 has been saved on stack. In this way, we can restore TR later.
- DCHECK(!core_reg_list.IncludesAliasOf(reg_x(TR)));
- DCHECK(core_reg_list.IncludesAliasOf(reg_x(ETR)));
- ___ Mov(reg_x(ETR), reg_x(TR));
+ DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
// Write StackReference<Method>.
DCHECK(X0 == method_reg.AsArm64().AsXRegister());
@@ -772,11 +762,7 @@
DCHECK_GE(frame_size, core_reg_size + fp_reg_size + sizeof(StackReference<mirror::ArtMethod>));
DCHECK_ALIGNED(frame_size, kStackAlignment);
- // Note: This is specific to JNI method frame.
- // Restore TR(Caller saved in AAPCS) from ETR(Callee saved in AAPCS).
- DCHECK(!core_reg_list.IncludesAliasOf(reg_x(TR)));
- DCHECK(core_reg_list.IncludesAliasOf(reg_x(ETR)));
- ___ Mov(reg_x(TR), reg_x(ETR));
+ DCHECK(core_reg_list.IncludesAliasOf(reg_x(TR)));
cfi_.RememberState();
diff --git a/compiler/utils/arm64/managed_register_arm64_test.cc b/compiler/utils/arm64/managed_register_arm64_test.cc
index 32c2e62..e27115d 100644
--- a/compiler/utils/arm64/managed_register_arm64_test.cc
+++ b/compiler/utils/arm64/managed_register_arm64_test.cc
@@ -623,7 +623,7 @@
EXPECT_TRUE(vixl::x29.Is(Arm64Assembler::reg_x(X29)));
EXPECT_TRUE(vixl::x30.Is(Arm64Assembler::reg_x(X30)));
- EXPECT_TRUE(vixl::x18.Is(Arm64Assembler::reg_x(TR)));
+ EXPECT_TRUE(vixl::x19.Is(Arm64Assembler::reg_x(TR)));
EXPECT_TRUE(vixl::ip0.Is(Arm64Assembler::reg_x(IP0)));
EXPECT_TRUE(vixl::ip1.Is(Arm64Assembler::reg_x(IP1)));
EXPECT_TRUE(vixl::x29.Is(Arm64Assembler::reg_x(FP)));