Allow mixing of thread offsets between 32 and 64bit architectures.
Begin a more full implementation x86-64 REX prefixes.
Doesn't implement 64bit thread offset support for the JNI compiler.
Change-Id: If9af2f08a1833c21ddb4b4077f9b03add1a05147
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index d97cf4d..729b30d 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -156,12 +156,12 @@
}
NewLIR2(kX86PcRelAdr, rX86_ARG1, WrapPointer(tab_rec));
NewLIR2(kX86Add32RR, rX86_ARG1, rX86_ARG2);
- CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData), rs_rX86_ARG0,
+ CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pHandleFillArrayData), rs_rX86_ARG0,
rs_rX86_ARG1, true);
}
void X86Mir2Lir::GenMoveException(RegLocation rl_dest) {
- int ex_offset = Thread::ExceptionOffset().Int32Value();
+ int ex_offset = Thread::ExceptionOffset<4>().Int32Value();
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
NewLIR2(kX86Mov32RT, rl_result.reg.GetReg(), ex_offset);
NewLIR2(kX86Mov32TI, ex_offset, 0);
@@ -175,7 +175,7 @@
RegStorage reg_card_base = AllocTemp();
RegStorage reg_card_no = AllocTemp();
LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
- NewLIR2(kX86Mov32RT, reg_card_base.GetReg(), Thread::CardTableOffset().Int32Value());
+ NewLIR2(kX86Mov32RT, reg_card_base.GetReg(), Thread::CardTableOffset<4>().Int32Value());
OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
LIR* target = NewLIR0(kPseudoTargetLabel);
@@ -222,7 +222,7 @@
GenerateTargetLabel();
m2l_->OpRegImm(kOpAdd, rs_rX86_SP, sp_displace_);
m2l_->ClobberCallerSave();
- ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowStackOverflow);
+ ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pThrowStackOverflow);
// Assumes codegen and target are in thumb2 mode.
m2l_->CallHelper(RegStorage::InvalidReg(), func_offset, false /* MarkSafepointPC */,
false /* UseLink */);
@@ -240,7 +240,7 @@
// in case a signal comes in that's not using an alternate signal stack and the large frame may
// have moved us outside of the reserved area at the end of the stack.
// cmp rX86_SP, fs:[stack_end_]; jcc throw_launchpad
- OpRegThreadMem(kOpCmp, rX86_SP, Thread::StackEndOffset());
+ OpRegThreadMem(kOpCmp, rX86_SP, Thread::StackEndOffset<4>());
LIR* branch = OpCondBranch(kCondUlt, nullptr);
AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, frame_size_ - 4));
}
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 6d427e7..56b64dd 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -30,7 +30,7 @@
bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, RegLocation rl_src,
RegLocation rl_dest, int lit);
LIR* CheckSuspendUsingLoad() OVERRIDE;
- RegStorage LoadHelper(ThreadOffset offset);
+ RegStorage LoadHelper(ThreadOffset<4> offset);
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, OpSize size,
int s_reg);
LIR* LoadBaseDispWide(RegStorage r_base, int displacement, RegStorage r_dest, int s_reg);
@@ -245,14 +245,14 @@
LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value);
LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2);
LIR* OpTestSuspend(LIR* target);
- LIR* OpThreadMem(OpKind op, ThreadOffset thread_offset);
+ LIR* OpThreadMem(OpKind op, ThreadOffset<4> thread_offset);
LIR* OpVldm(RegStorage r_base, int count);
LIR* OpVstm(RegStorage r_base, int count);
void OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset);
void OpRegCopyWide(RegStorage dest, RegStorage src);
- void OpTlsCmp(ThreadOffset offset, int val);
+ void OpTlsCmp(ThreadOffset<4> offset, int val);
- void OpRegThreadMem(OpKind op, int r_dest, ThreadOffset thread_offset);
+ void OpRegThreadMem(OpKind op, int r_dest, ThreadOffset<4> thread_offset);
void SpillCoreRegs();
void UnSpillCoreRegs();
static const X86EncodingMap EncodingMap[kX86Last];
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index 3fb9012..ee5387f 100644
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -49,7 +49,7 @@
case Instruction::REM_FLOAT_2ADDR:
case Instruction::REM_FLOAT:
FlushAllRegs(); // Send everything to home location
- CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2,
+ CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pFmodf), rl_src1, rl_src2,
false);
rl_result = GetReturn(true);
StoreValue(rl_dest, rl_result);
@@ -100,7 +100,7 @@
case Instruction::REM_DOUBLE_2ADDR:
case Instruction::REM_DOUBLE:
FlushAllRegs(); // Send everything to home location
- CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2,
+ CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(4, pFmod), rl_src1, rl_src2,
false);
rl_result = GetReturnWide(true);
StoreValueWide(rl_dest, rl_result);
@@ -274,10 +274,10 @@
GenLongToFP(rl_dest, rl_src, false /* is_double */);
return;
case Instruction::FLOAT_TO_LONG:
- GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(4, pF2l), rl_dest, rl_src);
return;
case Instruction::DOUBLE_TO_LONG:
- GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(4, pD2l), rl_dest, rl_src);
return;
default:
LOG(INFO) << "Unexpected opcode: " << opcode;
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 851f448..0e7ba6b 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -742,7 +742,7 @@
NewLIR5(kX86Lea32RA, r_base.GetReg(), reg1.GetReg(), reg2.GetReg(), scale, offset);
}
-void X86Mir2Lir::OpTlsCmp(ThreadOffset offset, int val) {
+void X86Mir2Lir::OpTlsCmp(ThreadOffset<4> offset, int val) {
NewLIR2(kX86Cmp16TI8, offset.Int32Value(), val);
}
@@ -893,7 +893,7 @@
// Test suspend flag, return target of taken suspend branch
LIR* X86Mir2Lir::OpTestSuspend(LIR* target) {
- OpTlsCmp(Thread::ThreadFlagsOffset(), 0);
+ OpTlsCmp(Thread::ThreadFlagsOffset<4>(), 0);
return OpCondBranch((target == NULL) ? kCondNe : kCondEq, target);
}
@@ -1293,7 +1293,7 @@
StoreValueWide(rl_dest, rl_result);
}
-void X86Mir2Lir::OpRegThreadMem(OpKind op, int r_dest, ThreadOffset thread_offset) {
+void X86Mir2Lir::OpRegThreadMem(OpKind op, int r_dest, ThreadOffset<4> thread_offset) {
X86OpCode opcode = kX86Bkpt;
switch (op) {
case kOpCmp: opcode = kX86Cmp32RT; break;
@@ -1834,7 +1834,7 @@
if (needs_access_check) {
// Check we have access to type_idx and if not throw IllegalAccessError,
// Caller function returns Class* in kArg0.
- CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess),
+ CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess),
type_idx, true);
OpRegCopy(class_reg, TargetReg(kRet0));
LoadValueDirectFixed(rl_src, TargetReg(kArg0));
@@ -1855,7 +1855,7 @@
// Need to test presence of type in dex cache at runtime.
LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
// Type is not resolved. Call out to helper, which will return resolved type in kRet0/kArg0.
- CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx, true);
+ CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx, true);
OpRegCopy(TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path.
LoadValueDirectFixed(rl_src, TargetReg(kArg0)); /* Reload Ref. */
// Rejoin code paths
@@ -1889,7 +1889,7 @@
branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
}
OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
- OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
+ OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial));
}
// TODO: only clobber when type isn't final?
ClobberCallerSave();
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index da64250..925e736 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -581,16 +581,18 @@
X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
: Mir2Lir(cu, mir_graph, arena),
+ base_of_code_(nullptr), store_method_addr_(false), store_method_addr_used_(false),
method_address_insns_(arena, 100, kGrowableArrayMisc),
class_type_address_insns_(arena, 100, kGrowableArrayMisc),
call_method_insns_(arena, 100, kGrowableArrayMisc),
stack_decrement_(nullptr), stack_increment_(nullptr) {
- store_method_addr_used_ = false;
- for (int i = 0; i < kX86Last; i++) {
- if (X86Mir2Lir::EncodingMap[i].opcode != i) {
- LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
- << " is wrong: expecting " << i << ", seeing "
- << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
+ if (kIsDebugBuild) {
+ for (int i = 0; i < kX86Last; i++) {
+ if (X86Mir2Lir::EncodingMap[i].opcode != i) {
+ LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
+ << " is wrong: expecting " << i << ", seeing "
+ << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
+ }
}
}
}
@@ -601,7 +603,7 @@
}
// Not used in x86
-RegStorage X86Mir2Lir::LoadHelper(ThreadOffset offset) {
+RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<4> offset) {
LOG(FATAL) << "Unexpected use of LoadHelper in x86";
return RegStorage::InvalidReg();
}
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index bb5d387..e9faa7f 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -468,7 +468,7 @@
return OpRegImm(op, r_dest, value);
}
-LIR* X86Mir2Lir::OpThreadMem(OpKind op, ThreadOffset thread_offset) {
+LIR* X86Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) {
X86OpCode opcode = kX86Bkpt;
switch (op) {
case kOpBlx: opcode = kX86CallT; break;