ART: Add more ThreadOffset in Mir2Lir and backends
This duplicates all methods with ThreadOffset parameters, so that
both ThreadOffset<4> and ThreadOffset<8> can be handled. Dynamic
checks against the compilation unit's instruction set determine
which pointer size to use and therefore which methods to call.
Methods with unsupported pointer sizes should fatally fail, as
this indicates an issue during method selection.
Change-Id: Ifdb445b3732d3dc5e6a220db57374a55e91e1bf6
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index 452c8d7..7ae4b02 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -95,25 +95,6 @@
* +========================+
*/
-#if 1
-#define A64_PTR_SIZE 4
-#define A64_GET_INT_OFFS(offs) ((offs).Int32Value())
-#else
-// Not yet ready for this.
-#define A64_PTR_SIZE 8
-#define A64_GET_INT_OFFS(offs) ((offs).Int32Value())
-#endif
-
-#define A64_QUICK_ENTRYPOINT_OFFSET(name) QUICK_ENTRYPOINT_OFFSET(A64_PTR_SIZE, name)
-#define A64_QUICK_ENTRYPOINT_INT_OFFS(name) A64_GET_INT_OFFS(A64_QUICK_ENTRYPOINT_OFFSET(name))
-#define A64_THREAD_THIN_LOCK_ID_OFFSET A64_GET_INT_OFFS(Thread::ThinLockIdOffset<A64_PTR_SIZE>())
-#define A64_THREAD_EXCEPTION_INT_OFFS A64_GET_INT_OFFS(Thread::ExceptionOffset<A64_PTR_SIZE>())
-#define A64_THREAD_CARD_TABLE_INT_OFFS A64_GET_INT_OFFS(Thread::CardTableOffset<A64_PTR_SIZE>())
-#define A64_THREAD_STACK_END_INT_OFFS A64_GET_INT_OFFS(Thread::StackEndOffset<A64_PTR_SIZE>())
-#define A64_THREAD_SUSPEND_TRIGGER_OFFSET \
- A64_GET_INT_OFFS(Thread::ThreadSuspendTriggerOffset<A64_PTR_SIZE>())
-typedef ThreadOffset<A64_PTR_SIZE> A64ThreadOffset;
-
// Offset to distinguish FP regs.
#define ARM_FP_REG_OFFSET 32
// First FP callee save.
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index c210816..51e97cd 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -180,7 +180,7 @@
// Making a call - use explicit registers
FlushAllRegs(); /* Everything to home location */
LoadValueDirectFixed(rl_src, rs_x0);
- LoadWordDisp(rs_rA64_SELF, A64_QUICK_ENTRYPOINT_INT_OFFS(pHandleFillArrayData),
+ LoadWordDisp(rs_rA64_SELF, QUICK_ENTRYPOINT_OFFSET(8, pHandleFillArrayData).Int32Value(),
rs_rA64_LR);
// Materialize a pointer to the fill data image
NewLIR3(kA64Adr2xd, rx1, 0, WrapPointer(tab_rec));
@@ -209,7 +209,7 @@
null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL);
}
}
- Load32Disp(rs_rA64_SELF, A64_THREAD_THIN_LOCK_ID_OFFSET, rs_x2);
+ Load32Disp(rs_rA64_SELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_x2);
NewLIR3(kA64Ldxr2rX, rx1, rx0, mirror::Object::MonitorOffset().Int32Value() >> 2);
MarkPossibleNullPointerException(opt_flags);
LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_x1, 0, NULL);
@@ -224,7 +224,7 @@
}
// TODO: move to a slow path.
// Go expensive route - artLockObjectFromCode(obj);
- LoadWordDisp(rs_rA64_SELF, A64_QUICK_ENTRYPOINT_INT_OFFS(pLockObject), rs_rA64_LR);
+ LoadWordDisp(rs_rA64_SELF, QUICK_ENTRYPOINT_OFFSET(8, pLockObject).Int32Value(), rs_rA64_LR);
ClobberCallerSave();
LIR* call_inst = OpReg(kOpBlx, rs_rA64_LR);
MarkSafepointPC(call_inst);
@@ -235,7 +235,7 @@
} else {
// Explicit null-check as slow-path is entered using an IT.
GenNullCheck(rs_x0, opt_flags);
- Load32Disp(rs_rA64_SELF, A64_THREAD_THIN_LOCK_ID_OFFSET, rs_x2);
+ Load32Disp(rs_rA64_SELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_x2);
MarkPossibleNullPointerException(opt_flags);
NewLIR3(kA64Ldxr2rX, rx1, rx0, mirror::Object::MonitorOffset().Int32Value() >> 2);
OpRegImm(kOpCmp, rs_x1, 0);
@@ -244,7 +244,8 @@
OpRegImm(kOpCmp, rs_x1, 0);
OpIT(kCondNe, "T");
// Go expensive route - artLockObjectFromCode(self, obj);
- LoadWordDisp/*ne*/(rs_rA64_SELF, A64_QUICK_ENTRYPOINT_INT_OFFS(pLockObject), rs_rA64_LR);
+ LoadWordDisp/*ne*/(rs_rA64_SELF, QUICK_ENTRYPOINT_OFFSET(8, pLockObject).Int32Value(),
+ rs_rA64_LR);
ClobberCallerSave();
LIR* call_inst = OpReg(kOpBlx/*ne*/, rs_rA64_LR);
MarkSafepointPC(call_inst);
@@ -262,7 +263,7 @@
LoadValueDirectFixed(rl_src, rs_x0); // Get obj
LockCallTemps(); // Prepare for explicit register usage
LIR* null_check_branch = nullptr;
- Load32Disp(rs_rA64_SELF, A64_THREAD_THIN_LOCK_ID_OFFSET, rs_x2);
+ Load32Disp(rs_rA64_SELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_x2);
constexpr bool kArchVariantHasGoodBranchPredictor = false; // TODO: true if cortex-A15.
if (kArchVariantHasGoodBranchPredictor) {
if ((opt_flags & MIR_IGNORE_NULL_CHECK) && !(cu_->disable_opt & (1 << kNullCheckElimination))) {
@@ -287,7 +288,7 @@
}
// TODO: move to a slow path.
// Go expensive route - artUnlockObjectFromCode(obj);
- LoadWordDisp(rs_rA64_SELF, A64_QUICK_ENTRYPOINT_INT_OFFS(pUnlockObject), rs_rA64_LR);
+ LoadWordDisp(rs_rA64_SELF, QUICK_ENTRYPOINT_OFFSET(8, pUnlockObject).Int32Value(), rs_rA64_LR);
ClobberCallerSave();
LIR* call_inst = OpReg(kOpBlx, rs_rA64_LR);
MarkSafepointPC(call_inst);
@@ -300,14 +301,15 @@
GenNullCheck(rs_x0, opt_flags);
Load32Disp(rs_x0, mirror::Object::MonitorOffset().Int32Value(), rs_x1); // Get lock
MarkPossibleNullPointerException(opt_flags);
- Load32Disp(rs_rA64_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_x2);
+ Load32Disp(rs_rA64_SELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_x2);
LoadConstantNoClobber(rs_x3, 0);
// Is lock unheld on lock or held by us (==thread_id) on unlock?
OpRegReg(kOpCmp, rs_x1, rs_x2);
OpIT(kCondEq, "EE");
Store32Disp/*eq*/(rs_x0, mirror::Object::MonitorOffset().Int32Value(), rs_x3);
// Go expensive route - UnlockObjectFromCode(obj);
- LoadWordDisp/*ne*/(rs_rA64_SELF, A64_QUICK_ENTRYPOINT_INT_OFFS(pUnlockObject), rs_rA64_LR);
+ LoadWordDisp/*ne*/(rs_rA64_SELF, QUICK_ENTRYPOINT_OFFSET(8, pUnlockObject).Int32Value(),
+ rs_rA64_LR);
ClobberCallerSave();
LIR* call_inst = OpReg(kOpBlx/*ne*/, rs_rA64_LR);
MarkSafepointPC(call_inst);
@@ -316,7 +318,7 @@
}
void Arm64Mir2Lir::GenMoveException(RegLocation rl_dest) {
- int ex_offset = A64_THREAD_EXCEPTION_INT_OFFS;
+ int ex_offset = Thread::ExceptionOffset<8>().Int32Value();
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
RegStorage reset_reg = AllocTemp();
Load32Disp(rs_rA64_SELF, ex_offset, rl_result.reg);
@@ -333,7 +335,7 @@
RegStorage reg_card_base = AllocTemp();
RegStorage reg_card_no = AllocTemp();
LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
- LoadWordDisp(rs_rA64_SELF, A64_THREAD_CARD_TABLE_INT_OFFS, reg_card_base);
+ LoadWordDisp(rs_rA64_SELF, Thread::CardTableOffset<8>().Int32Value(), reg_card_base);
OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
LIR* target = NewLIR0(kPseudoTargetLabel);
@@ -364,7 +366,7 @@
NewLIR0(kPseudoMethodEntry);
if (!skip_overflow_check) {
- LoadWordDisp(rs_rA64_SELF, A64_THREAD_STACK_END_INT_OFFS, rs_x12);
+ LoadWordDisp(rs_rA64_SELF, Thread::StackEndOffset<8>().Int32Value(), rs_x12);
OpRegImm64(kOpSub, rs_rA64_SP, frame_size_, /*is_wide*/true);
if (Runtime::Current()->ExplicitStackOverflowChecks()) {
/* Load stack limit */
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index 26084a2..af0029c 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -31,7 +31,8 @@
RegLocation rl_dest, int lit);
bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
LIR* CheckSuspendUsingLoad() OVERRIDE;
- RegStorage LoadHelper(A64ThreadOffset offset);
+ RegStorage LoadHelper(ThreadOffset<4> offset) OVERRIDE;
+ RegStorage LoadHelper(ThreadOffset<8> offset) OVERRIDE;
LIR* LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
OpSize size) OVERRIDE;
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
@@ -181,12 +182,14 @@
LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
LIR* OpTestSuspend(LIR* target);
- LIR* OpThreadMem(OpKind op, A64ThreadOffset thread_offset);
+ LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) OVERRIDE;
+ LIR* OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) OVERRIDE;
LIR* OpVldm(RegStorage r_base, int count);
LIR* OpVstm(RegStorage r_base, int count);
void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset);
void OpRegCopyWide(RegStorage dest, RegStorage src);
- void OpTlsCmp(A64ThreadOffset offset, int val);
+ void OpTlsCmp(ThreadOffset<4> offset, int val) OVERRIDE;
+ void OpTlsCmp(ThreadOffset<8> offset, int val) OVERRIDE;
LIR* LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size);
LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
diff --git a/compiler/dex/quick/arm64/fp_arm64.cc b/compiler/dex/quick/arm64/fp_arm64.cc
index c2a550e..87ab6fe 100644
--- a/compiler/dex/quick/arm64/fp_arm64.cc
+++ b/compiler/dex/quick/arm64/fp_arm64.cc
@@ -49,7 +49,7 @@
case Instruction::REM_FLOAT_2ADDR:
case Instruction::REM_FLOAT:
FlushAllRegs(); // Send everything to home location
- CallRuntimeHelperRegLocationRegLocation(A64_QUICK_ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2,
+ CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pFmodf), rl_src1, rl_src2,
false);
rl_result = GetReturn(true);
StoreValue(rl_dest, rl_result);
@@ -92,7 +92,7 @@
case Instruction::REM_DOUBLE_2ADDR:
case Instruction::REM_DOUBLE:
FlushAllRegs(); // Send everything to home location
- CallRuntimeHelperRegLocationRegLocation(A64_QUICK_ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2,
+ CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(8, pFmod), rl_src1, rl_src2,
false);
rl_result = GetReturnWide(true);
StoreValueWide(rl_dest, rl_result);
@@ -310,7 +310,7 @@
branch = NewLIR2(kA64B2ct, kArmCondEq, 0);
ClobberCallerSave();
LockCallTemps(); // Using fixed registers
- RegStorage r_tgt = LoadHelper(A64_QUICK_ENTRYPOINT_OFFSET(pSqrt));
+ RegStorage r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pSqrt));
// NewLIR3(kThumb2Fmrrd, r0, r1, rl_src.reg.GetReg());
NewLIR1(kA64Blr1x, r_tgt.GetReg());
// NewLIR3(kThumb2Fmdrr, rl_result.reg.GetReg(), r0, r1);
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 709f583..0465249 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -501,7 +501,11 @@
LOG(FATAL) << "Unexpected use of OpLea for Arm64";
}
-void Arm64Mir2Lir::OpTlsCmp(A64ThreadOffset offset, int val) {
+void Arm64Mir2Lir::OpTlsCmp(ThreadOffset<4> offset, int val) {
+ UNIMPLEMENTED(FATAL) << "Should not be used.";
+}
+
+void Arm64Mir2Lir::OpTlsCmp(ThreadOffset<8> offset, int val) {
LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm64";
}
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index e4764eb..6caacc8 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -731,7 +731,12 @@
FreeTemp(rs_x3);
}
-RegStorage Arm64Mir2Lir::LoadHelper(A64ThreadOffset offset) {
+RegStorage Arm64Mir2Lir::LoadHelper(ThreadOffset<4> offset) {
+ UNIMPLEMENTED(FATAL) << "Should not be called.";
+ return RegStorage::InvalidReg();
+}
+
+RegStorage Arm64Mir2Lir::LoadHelper(ThreadOffset<8> offset) {
// TODO(Arm64): use LoadWordDisp instead.
// e.g. LoadWordDisp(rs_rA64_SELF, offset.Int32Value(), rs_rA64_LR);
LoadBaseDisp(rs_rA64_SELF, offset.Int32Value(), rs_rA64_LR, k64);
@@ -740,7 +745,7 @@
LIR* Arm64Mir2Lir::CheckSuspendUsingLoad() {
RegStorage tmp = rs_x0;
- LoadWordDisp(rs_rA64_SELF, A64_THREAD_SUSPEND_TRIGGER_OFFSET, tmp);
+ LoadWordDisp(rs_rA64_SELF, Thread::ThreadSuspendTriggerOffset<8>().Int32Value(), tmp);
LIR* load2 = LoadWordDisp(tmp, 0, tmp);
return load2;
}
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index ae17711..77e4c3c 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -1056,7 +1056,12 @@
return NULL;
}
-LIR* Arm64Mir2Lir::OpThreadMem(OpKind op, A64ThreadOffset thread_offset) {
+LIR* Arm64Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) {
+ UNIMPLEMENTED(FATAL) << "Should not be used.";
+ return nullptr;
+}
+
+LIR* Arm64Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) {
LOG(FATAL) << "Unexpected use of OpThreadMem for Arm64";
return NULL;
}