ART: Add more ThreadOffset in Mir2Lir and backends
This duplicates all methods with ThreadOffset parameters, so that
both ThreadOffset<4> and ThreadOffset<8> can be handled. Dynamic
checks against the compilation unit's instruction set determine
which pointer size to use and therefore which methods to call.
Methods with unsupported pointer sizes should fatally fail, as
this indicates an issue during method selection.
Change-Id: Ifdb445b3732d3dc5e6a220db57374a55e91e1bf6
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index f701a1f..cf2b10a 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -161,7 +161,9 @@
}
void X86Mir2Lir::GenMoveException(RegLocation rl_dest) {
- int ex_offset = Thread::ExceptionOffset<4>().Int32Value();
+ int ex_offset = Is64BitInstructionSet(cu_->instruction_set) ?
+ Thread::ExceptionOffset<8>().Int32Value() :
+ Thread::ExceptionOffset<4>().Int32Value();
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
NewLIR2(kX86Mov32RT, rl_result.reg.GetReg(), ex_offset);
NewLIR2(kX86Mov32TI, ex_offset, 0);
@@ -175,7 +177,10 @@
RegStorage reg_card_base = AllocTemp();
RegStorage reg_card_no = AllocTemp();
LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
- NewLIR2(kX86Mov32RT, reg_card_base.GetReg(), Thread::CardTableOffset<4>().Int32Value());
+ int ct_offset = Is64BitInstructionSet(cu_->instruction_set) ?
+ Thread::CardTableOffset<8>().Int32Value() :
+ Thread::CardTableOffset<4>().Int32Value();
+ NewLIR2(kX86Mov32RT, reg_card_base.GetReg(), ct_offset);
OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
LIR* target = NewLIR0(kPseudoTargetLabel);
@@ -222,10 +227,14 @@
GenerateTargetLabel(kPseudoThrowTarget);
m2l_->OpRegImm(kOpAdd, rs_rX86_SP, sp_displace_);
m2l_->ClobberCallerSave();
- ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pThrowStackOverflow);
// Assumes codegen and target are in thumb2 mode.
- m2l_->CallHelper(RegStorage::InvalidReg(), func_offset, false /* MarkSafepointPC */,
- false /* UseLink */);
+ if (Is64BitInstructionSet(cu_->instruction_set)) {
+ m2l_->CallHelper(RegStorage::InvalidReg(), QUICK_ENTRYPOINT_OFFSET(8, pThrowStackOverflow),
+ false /* MarkSafepointPC */, false /* UseLink */);
+ } else {
+ m2l_->CallHelper(RegStorage::InvalidReg(), QUICK_ENTRYPOINT_OFFSET(4, pThrowStackOverflow),
+ false /* MarkSafepointPC */, false /* UseLink */);
+ }
}
private:
@@ -240,9 +249,15 @@
// in case a signal comes in that's not using an alternate signal stack and the large frame may
// have moved us outside of the reserved area at the end of the stack.
// cmp rX86_SP, fs:[stack_end_]; jcc throw_slowpath
- OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<4>());
+ if (Is64BitInstructionSet(cu_->instruction_set)) {
+ OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<8>());
+ } else {
+ OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<4>());
+ }
LIR* branch = OpCondBranch(kCondUlt, nullptr);
- AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, frame_size_ - 4));
+ AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch,
+ frame_size_ -
+ GetInstructionSetPointerSize(cu_->instruction_set)));
}
FlushIns(ArgLocs, rl_method);
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 9648312..11e7ff9 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -31,7 +31,8 @@
RegLocation rl_dest, int lit);
bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) OVERRIDE;
LIR* CheckSuspendUsingLoad() OVERRIDE;
- RegStorage LoadHelper(ThreadOffset<4> offset);
+ RegStorage LoadHelper(ThreadOffset<4> offset) OVERRIDE;
+ RegStorage LoadHelper(ThreadOffset<8> offset) OVERRIDE;
LIR* LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
OpSize size) OVERRIDE;
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
@@ -245,14 +246,17 @@
LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
LIR* OpTestSuspend(LIR* target);
- LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset);
+ LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) OVERRIDE;
+ LIR* OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) OVERRIDE;
LIR* OpVldm(RegStorage r_base, int count);
LIR* OpVstm(RegStorage r_base, int count);
void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset);
void OpRegCopyWide(RegStorage dest, RegStorage src);
- void OpTlsCmp(ThreadOffset<4> offset, int val);
+ void OpTlsCmp(ThreadOffset<4> offset, int val) OVERRIDE;
+ void OpTlsCmp(ThreadOffset<8> offset, int val) OVERRIDE;
void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<4> thread_offset);
+ void OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<8> thread_offset);
void SpillCoreRegs();
void UnSpillCoreRegs();
static const X86EncodingMap EncodingMap[kX86Last];
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index b6e0841..368234e 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -724,6 +724,12 @@
}
void X86Mir2Lir::OpTlsCmp(ThreadOffset<4> offset, int val) {
+ DCHECK_EQ(kX86, cu_->instruction_set);
+ NewLIR2(kX86Cmp16TI8, offset.Int32Value(), val);
+}
+
+void X86Mir2Lir::OpTlsCmp(ThreadOffset<8> offset, int val) {
+ DCHECK_EQ(kX86_64, cu_->instruction_set);
NewLIR2(kX86Cmp16TI8, offset.Int32Value(), val);
}
@@ -956,7 +962,11 @@
// Test suspend flag, return target of taken suspend branch
LIR* X86Mir2Lir::OpTestSuspend(LIR* target) {
- OpTlsCmp(Thread::ThreadFlagsOffset<4>(), 0);
+ if (Is64BitInstructionSet(cu_->instruction_set)) {
+ OpTlsCmp(Thread::ThreadFlagsOffset<8>(), 0);
+ } else {
+ OpTlsCmp(Thread::ThreadFlagsOffset<4>(), 0);
+ }
return OpCondBranch((target == NULL) ? kCondNe : kCondEq, target);
}
@@ -1365,6 +1375,20 @@
}
void X86Mir2Lir::OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<4> thread_offset) {
+ DCHECK_EQ(kX86, cu_->instruction_set);
+ X86OpCode opcode = kX86Bkpt;
+ switch (op) {
+ case kOpCmp: opcode = kX86Cmp32RT; break;
+ case kOpMov: opcode = kX86Mov32RT; break;
+ default:
+ LOG(FATAL) << "Bad opcode: " << op;
+ break;
+ }
+ NewLIR2(opcode, r_dest.GetReg(), thread_offset.Int32Value());
+}
+
+void X86Mir2Lir::OpRegThreadMem(OpKind op, RegStorage r_dest, ThreadOffset<8> thread_offset) {
+ DCHECK_EQ(kX86_64, cu_->instruction_set);
X86OpCode opcode = kX86Bkpt;
switch (op) {
case kOpCmp: opcode = kX86Cmp32RT; break;
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index c401baf..2db9845 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -589,6 +589,12 @@
return RegStorage::InvalidReg();
}
+// Not used in x86
+RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<8> offset) {
+ LOG(FATAL) << "Unexpected use of LoadHelper in x86";
+ return RegStorage::InvalidReg();
+}
+
LIR* X86Mir2Lir::CheckSuspendUsingLoad() {
LOG(FATAL) << "Unexpected use of CheckSuspendUsingLoad in x86";
return nullptr;
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index a4e1255..1da4f17 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -472,6 +472,20 @@
}
LIR* X86Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) {
+ DCHECK_EQ(kX86, cu_->instruction_set);
+ X86OpCode opcode = kX86Bkpt;
+ switch (op) {
+ case kOpBlx: opcode = kX86CallT; break;
+ case kOpBx: opcode = kX86JmpT; break;
+ default:
+ LOG(FATAL) << "Bad opcode: " << op;
+ break;
+ }
+ return NewLIR1(opcode, thread_offset.Int32Value());
+}
+
+LIR* X86Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) {
+ DCHECK_EQ(kX86_64, cu_->instruction_set);
X86OpCode opcode = kX86Bkpt;
switch (op) {
case kOpBlx: opcode = kX86CallT; break;