x86_64: TargetReg update for x86
Also includes changes in common code. Elimination of use of TargetReg
with one parameter and direct access to special target registers.
Change-Id: Ied2c1f87d4d1e4345248afe74bca40487a46a371
Signed-off-by: Serguei Katkov <serguei.i.katkov@intel.com>
Signed-off-by: Chao-ying Fu <chao-ying.fu@intel.com>
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index bfbfa0e..6ca220c 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -158,29 +158,33 @@
// Making a call - use explicit registers
FlushAllRegs(); /* Everything to home location */
- LoadValueDirectFixed(rl_src, rs_rX86_ARG0);
+ RegStorage array_ptr = TargetRefReg(kArg0);
+ RegStorage payload = TargetPtrReg(kArg1);
+ RegStorage method_start = TargetPtrReg(kArg2);
+
+ LoadValueDirectFixed(rl_src, array_ptr);
// Materialize a pointer to the fill data image
if (base_of_code_ != nullptr) {
// We can use the saved value.
RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
if (rl_method.wide) {
- LoadValueDirectWide(rl_method, rs_rX86_ARG2);
+ LoadValueDirectWide(rl_method, method_start);
} else {
- LoadValueDirect(rl_method, rs_rX86_ARG2);
+ LoadValueDirect(rl_method, method_start);
}
store_method_addr_used_ = true;
} else {
// TODO(64) force to be 64-bit
- NewLIR1(kX86StartOfMethod, rs_rX86_ARG2.GetReg());
+ NewLIR1(kX86StartOfMethod, method_start.GetReg());
}
- NewLIR2(kX86PcRelAdr, rs_rX86_ARG1.GetReg(), WrapPointer(tab_rec));
- NewLIR2(cu_->target64 ? kX86Add64RR : kX86Add32RR, rs_rX86_ARG1.GetReg(), rs_rX86_ARG2.GetReg());
+ NewLIR2(kX86PcRelAdr, payload.GetReg(), WrapPointer(tab_rec));
+ OpRegReg(kOpAdd, payload, method_start);
if (cu_->target64) {
- CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pHandleFillArrayData), rs_rX86_ARG0,
- rs_rX86_ARG1, true);
+ CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pHandleFillArrayData), array_ptr,
+ payload, true);
} else {
- CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pHandleFillArrayData), rs_rX86_ARG0,
- rs_rX86_ARG1, true);
+ CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pHandleFillArrayData), array_ptr,
+ payload, true);
}
}
@@ -291,11 +295,12 @@
FlushIns(ArgLocs, rl_method);
if (base_of_code_ != nullptr) {
+ RegStorage method_start = TargetPtrReg(kArg0);
// We have been asked to save the address of the method start for later use.
- setup_method_address_[0] = NewLIR1(kX86StartOfMethod, rs_rX86_ARG0.GetReg());
+ setup_method_address_[0] = NewLIR1(kX86StartOfMethod, method_start.GetReg());
int displacement = SRegOffset(base_of_code_->s_reg_low);
// Native pointer - must be natural word size.
- setup_method_address_[1] = StoreBaseDisp(rs_rX86_SP, displacement, rs_rX86_ARG0,
+ setup_method_address_[1] = StoreBaseDisp(rs_rX86_SP, displacement, method_start,
cu_->target64 ? k64 : k32, kNotVolatile);
}
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index afb6184..646da7f 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -34,9 +34,11 @@
class InToRegStorageX86_64Mapper : public InToRegStorageMapper {
public:
- InToRegStorageX86_64Mapper() : cur_core_reg_(0), cur_fp_reg_(0) {}
+ explicit InToRegStorageX86_64Mapper(Mir2Lir* ml) : ml_(ml), cur_core_reg_(0), cur_fp_reg_(0) {}
virtual ~InToRegStorageX86_64Mapper() {}
virtual RegStorage GetNextReg(bool is_double_or_float, bool is_wide);
+ protected:
+ Mir2Lir* ml_;
private:
int cur_core_reg_;
int cur_fp_reg_;
@@ -85,7 +87,22 @@
void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg);
// Required for target - register utilities.
- RegStorage TargetReg(SpecialTargetRegister reg);
+ RegStorage TargetReg(SpecialTargetRegister reg) OVERRIDE;
+ RegStorage TargetReg32(SpecialTargetRegister reg);
+ RegStorage TargetReg(SpecialTargetRegister symbolic_reg, bool is_wide) OVERRIDE {
+ RegStorage reg = TargetReg32(symbolic_reg);
+ if (is_wide) {
+ return (reg.Is64Bit()) ? reg : As64BitReg(reg);
+ } else {
+ return (reg.Is32Bit()) ? reg : As32BitReg(reg);
+ }
+ }
+ RegStorage TargetRefReg(SpecialTargetRegister symbolic_reg) OVERRIDE {
+ return TargetReg(symbolic_reg, cu_->target64);
+ }
+ RegStorage TargetPtrReg(SpecialTargetRegister symbolic_reg) OVERRIDE {
+ return TargetReg(symbolic_reg, cu_->target64);
+ }
RegStorage GetArgMappingToPhysicalReg(int arg_num);
RegStorage GetCoreArgMappingToPhysicalReg(int core_arg_num);
RegLocation GetReturnAlt();
@@ -388,6 +405,43 @@
std::vector<uint8_t>* ReturnCallFrameInformation();
protected:
+ // Casting of RegStorage
+ RegStorage As32BitReg(RegStorage reg) {
+ DCHECK(!reg.IsPair());
+ if ((kFailOnSizeError || kReportSizeError) && !reg.Is64Bit()) {
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Expected 64b register " << reg.GetReg();
+ } else {
+ LOG(WARNING) << "Expected 64b register " << reg.GetReg();
+ return reg;
+ }
+ }
+ RegStorage ret_val = RegStorage(RegStorage::k32BitSolo,
+ reg.GetRawBits() & RegStorage::kRegTypeMask);
+ DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k32SoloStorageMask)
+ ->GetReg().GetReg(),
+ ret_val.GetReg());
+ return ret_val;
+ }
+
+ RegStorage As64BitReg(RegStorage reg) {
+ DCHECK(!reg.IsPair());
+ if ((kFailOnSizeError || kReportSizeError) && !reg.Is32Bit()) {
+ if (kFailOnSizeError) {
+ LOG(FATAL) << "Expected 32b register " << reg.GetReg();
+ } else {
+ LOG(WARNING) << "Expected 32b register " << reg.GetReg();
+ return reg;
+ }
+ }
+ RegStorage ret_val = RegStorage(RegStorage::k64BitSolo,
+ reg.GetRawBits() & RegStorage::kRegTypeMask);
+ DCHECK_EQ(GetRegInfo(reg)->FindMatchingView(RegisterInfo::k64SoloStorageMask)
+ ->GetReg().GetReg(),
+ ret_val.GetReg());
+ return ret_val;
+ }
+
size_t ComputeSize(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_index,
int32_t raw_base, int32_t displacement);
void CheckValidByteRegister(const X86EncodingMap* entry, int32_t raw_reg);
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index 7454475..4414d7c 100644
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -145,12 +145,12 @@
} else {
// It must have been register promoted if it is not a temp but is still in physical
// register. Since we need it to be in memory to convert, we place it there now.
- StoreBaseDisp(TargetReg(kSp), src_v_reg_offset, rl_src.reg, k64, kNotVolatile);
+ StoreBaseDisp(rs_rX86_SP, src_v_reg_offset, rl_src.reg, k64, kNotVolatile);
}
}
// Push the source virtual register onto the x87 stack.
- LIR *fild64 = NewLIR2NoDest(kX86Fild64M, TargetReg(kSp).GetReg(),
+ LIR *fild64 = NewLIR2NoDest(kX86Fild64M, rs_rX86_SP.GetReg(),
src_v_reg_offset + LOWORD_OFFSET);
AnnotateDalvikRegAccess(fild64, (src_v_reg_offset + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is64bit */);
@@ -158,7 +158,7 @@
// Now pop off x87 stack and store it in the destination VR's stack location.
int opcode = is_double ? kX86Fstp64M : kX86Fstp32M;
int displacement = is_double ? dest_v_reg_offset + LOWORD_OFFSET : dest_v_reg_offset;
- LIR *fstp = NewLIR2NoDest(opcode, TargetReg(kSp).GetReg(), displacement);
+ LIR *fstp = NewLIR2NoDest(opcode, rs_rX86_SP.GetReg(), displacement);
AnnotateDalvikRegAccess(fstp, displacement >> 2, false /* is_load */, is_double);
/*
@@ -179,11 +179,11 @@
*/
rl_result = EvalLoc(rl_dest, kFPReg, true);
if (is_double) {
- LoadBaseDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg, k64, kNotVolatile);
+ LoadBaseDisp(rs_rX86_SP, dest_v_reg_offset, rl_result.reg, k64, kNotVolatile);
StoreFinalValueWide(rl_dest, rl_result);
} else {
- Load32Disp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg);
+ Load32Disp(rs_rX86_SP, dest_v_reg_offset, rl_result.reg);
StoreFinalValue(rl_dest, rl_result);
}
@@ -364,7 +364,7 @@
} else {
// It must have been register promoted if it is not a temp but is still in physical
// register. Since we need it to be in memory to convert, we place it there now.
- StoreBaseDisp(TargetReg(kSp), src1_v_reg_offset, rl_src1.reg, is_double ? k64 : k32,
+ StoreBaseDisp(rs_rX86_SP, src1_v_reg_offset, rl_src1.reg, is_double ? k64 : k32,
kNotVolatile);
}
}
@@ -375,7 +375,7 @@
FlushSpecificReg(reg_info);
ResetDef(rl_src2.reg);
} else {
- StoreBaseDisp(TargetReg(kSp), src2_v_reg_offset, rl_src2.reg, is_double ? k64 : k32,
+ StoreBaseDisp(rs_rX86_SP, src2_v_reg_offset, rl_src2.reg, is_double ? k64 : k32,
kNotVolatile);
}
}
@@ -383,12 +383,12 @@
int fld_opcode = is_double ? kX86Fld64M : kX86Fld32M;
// Push the source virtual registers onto the x87 stack.
- LIR *fld_2 = NewLIR2NoDest(fld_opcode, TargetReg(kSp).GetReg(),
+ LIR *fld_2 = NewLIR2NoDest(fld_opcode, rs_rX86_SP.GetReg(),
src2_v_reg_offset + LOWORD_OFFSET);
AnnotateDalvikRegAccess(fld_2, (src2_v_reg_offset + LOWORD_OFFSET) >> 2,
true /* is_load */, is_double /* is64bit */);
- LIR *fld_1 = NewLIR2NoDest(fld_opcode, TargetReg(kSp).GetReg(),
+ LIR *fld_1 = NewLIR2NoDest(fld_opcode, rs_rX86_SP.GetReg(),
src1_v_reg_offset + LOWORD_OFFSET);
AnnotateDalvikRegAccess(fld_1, (src1_v_reg_offset + LOWORD_OFFSET) >> 2,
true /* is_load */, is_double /* is64bit */);
@@ -417,7 +417,7 @@
// Now store result in the destination VR's stack location.
int displacement = dest_v_reg_offset + LOWORD_OFFSET;
int opcode = is_double ? kX86Fst64M : kX86Fst32M;
- LIR *fst = NewLIR2NoDest(opcode, TargetReg(kSp).GetReg(), displacement);
+ LIR *fst = NewLIR2NoDest(opcode, rs_rX86_SP.GetReg(), displacement);
AnnotateDalvikRegAccess(fst, displacement >> 2, false /* is_load */, is_double /* is64bit */);
// Pop ST(1) and ST(0).
@@ -436,10 +436,10 @@
if (rl_result.location == kLocPhysReg) {
rl_result = EvalLoc(rl_dest, kFPReg, true);
if (is_double) {
- LoadBaseDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg, k64, kNotVolatile);
+ LoadBaseDisp(rs_rX86_SP, dest_v_reg_offset, rl_result.reg, k64, kNotVolatile);
StoreFinalValueWide(rl_dest, rl_result);
} else {
- Load32Disp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg);
+ Load32Disp(rs_rX86_SP, dest_v_reg_offset, rl_result.reg);
StoreFinalValue(rl_dest, rl_result);
}
}
@@ -627,7 +627,7 @@
// Operate directly into memory.
int displacement = SRegOffset(rl_dest.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- LIR *lir = NewLIR3(kX86And32MI, TargetReg(kSp).GetReg(), displacement, 0x7fffffff);
+ LIR *lir = NewLIR3(kX86And32MI, rs_rX86_SP.GetReg(), displacement, 0x7fffffff);
AnnotateDalvikRegAccess(lir, displacement >> 2, false /*is_load */, false /* is_64bit */);
AnnotateDalvikRegAccess(lir, displacement >> 2, true /* is_load */, false /* is_64bit*/);
return true;
@@ -682,7 +682,7 @@
// Operate directly into memory.
int displacement = SRegOffset(rl_dest.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- LIR *lir = NewLIR3(kX86And32MI, TargetReg(kSp).GetReg(), displacement + HIWORD_OFFSET, 0x7fffffff);
+ LIR *lir = NewLIR3(kX86And32MI, rs_rX86_SP.GetReg(), displacement + HIWORD_OFFSET, 0x7fffffff);
AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2, true /* is_load */, true /* is_64bit*/);
AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2, false /*is_load */, true /* is_64bit */);
return true;
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 14a18e5..7a4ea26 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -93,7 +93,7 @@
}
LIR* X86Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
- NewLIR2(kX86Cmp32RR, src1.GetReg(), src2.GetReg());
+ NewLIR2(src1.Is64Bit() ? kX86Cmp64RR : kX86Cmp32RR, src1.GetReg(), src2.GetReg());
X86ConditionCode cc = X86ConditionEncoding(cond);
LIR* branch = NewLIR2(kX86Jcc8, 0 /* lir operand for Jcc offset */ ,
cc);
@@ -105,9 +105,13 @@
int check_value, LIR* target) {
if ((check_value == 0) && (cond == kCondEq || cond == kCondNe)) {
// TODO: when check_value == 0 and reg is rCX, use the jcxz/nz opcode
- NewLIR2(kX86Test32RR, reg.GetReg(), reg.GetReg());
+ NewLIR2(reg.Is64Bit() ? kX86Test64RR: kX86Test32RR, reg.GetReg(), reg.GetReg());
} else {
- NewLIR2(IS_SIMM8(check_value) ? kX86Cmp32RI8 : kX86Cmp32RI, reg.GetReg(), check_value);
+ if (reg.Is64Bit()) {
+ NewLIR2(IS_SIMM8(check_value) ? kX86Cmp64RI8 : kX86Cmp64RI, reg.GetReg(), check_value);
+ } else {
+ NewLIR2(IS_SIMM8(check_value) ? kX86Cmp32RI8 : kX86Cmp32RI, reg.GetReg(), check_value);
+ }
}
X86ConditionCode cc = X86ConditionEncoding(cond);
LIR* branch = NewLIR2(kX86Jcc8, 0 /* lir operand for Jcc offset */ , cc);
@@ -241,7 +245,7 @@
// FIXME: depending on how you use registers you could get a false != mismatch when dealing
// with different views of the same underlying physical resource (i.e. solo32 vs. solo64).
const bool result_reg_same_as_src =
- (rl_src.location == kLocPhysReg && rl_src.reg.GetReg() == rl_result.reg.GetReg());
+ (rl_src.location == kLocPhysReg && rl_src.reg.GetRegNum() == rl_result.reg.GetRegNum());
const bool true_zero_case = (true_val == 0 && false_val != 0 && !result_reg_same_as_src);
const bool false_zero_case = (false_val == 0 && true_val != 0 && !result_reg_same_as_src);
const bool catch_all_case = !(true_zero_case || false_zero_case);
@@ -846,14 +850,14 @@
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
const size_t push_offset = (push_si ? 4u : 0u) + (push_di ? 4u : 0u);
if (!obj_in_si && !obj_in_di) {
- LoadWordDisp(TargetReg(kSp), SRegOffset(rl_src_obj.s_reg_low) + push_offset, rs_obj);
+ LoadWordDisp(rs_rX86_SP, SRegOffset(rl_src_obj.s_reg_low) + push_offset, rs_obj);
// Dalvik register annotation in LoadBaseIndexedDisp() used wrong offset. Fix it.
DCHECK(!DECODE_ALIAS_INFO_WIDE(last_lir_insn_->flags.alias_info));
int reg_id = DECODE_ALIAS_INFO_REG(last_lir_insn_->flags.alias_info) - push_offset / 4u;
AnnotateDalvikRegAccess(last_lir_insn_, reg_id, true, false);
}
if (!off_in_si && !off_in_di) {
- LoadWordDisp(TargetReg(kSp), SRegOffset(rl_src_offset.s_reg_low) + push_offset, rs_off);
+ LoadWordDisp(rs_rX86_SP, SRegOffset(rl_src_offset.s_reg_low) + push_offset, rs_off);
// Dalvik register annotation in LoadBaseIndexedDisp() used wrong offset. Fix it.
DCHECK(!DECODE_ALIAS_INFO_WIDE(last_lir_insn_->flags.alias_info));
int reg_id = DECODE_ALIAS_INFO_REG(last_lir_insn_->flags.alias_info) - push_offset / 4u;
@@ -1008,23 +1012,23 @@
RegStorage new_index = index_;
// Move index out of kArg1, either directly to kArg0, or to kArg2.
// TODO: clean-up to check not a number but with type
- if (index_.GetRegNum() == m2l_->TargetReg(kArg1).GetRegNum()) {
- if (array_base_.GetRegNum() == m2l_->TargetReg(kArg0).GetRegNum()) {
- m2l_->OpRegCopy(m2l_->TargetReg(kArg2), index_);
- new_index = m2l_->TargetReg(kArg2);
+ if (index_ == m2l_->TargetReg(kArg1, false)) {
+ if (array_base_ == m2l_->TargetRefReg(kArg0)) {
+ m2l_->OpRegCopy(m2l_->TargetReg(kArg2, false), index_);
+ new_index = m2l_->TargetReg(kArg2, false);
} else {
- m2l_->OpRegCopy(m2l_->TargetReg(kArg0), index_);
- new_index = m2l_->TargetReg(kArg0);
+ m2l_->OpRegCopy(m2l_->TargetReg(kArg0, false), index_);
+ new_index = m2l_->TargetReg(kArg0, false);
}
}
// Load array length to kArg1.
- m2l_->OpRegMem(kOpMov, m2l_->TargetReg(kArg1), array_base_, len_offset_);
+ m2l_->OpRegMem(kOpMov, m2l_->TargetReg(kArg1, false), array_base_, len_offset_);
if (cu_->target64) {
m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds),
- new_index, m2l_->TargetReg(kArg1), true);
+ new_index, m2l_->TargetReg(kArg1, false), true);
} else {
m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds),
- new_index, m2l_->TargetReg(kArg1), true);
+ new_index, m2l_->TargetReg(kArg1, false), true);
}
}
@@ -1057,14 +1061,14 @@
GenerateTargetLabel(kPseudoThrowTarget);
// Load array length to kArg1.
- m2l_->OpRegMem(kOpMov, m2l_->TargetReg(kArg1), array_base_, len_offset_);
- m2l_->LoadConstant(m2l_->TargetReg(kArg0), index_);
+ m2l_->OpRegMem(kOpMov, m2l_->TargetReg(kArg1, false), array_base_, len_offset_);
+ m2l_->LoadConstant(m2l_->TargetReg(kArg0, false), index_);
if (cu_->target64) {
m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pThrowArrayBounds),
- m2l_->TargetReg(kArg0), m2l_->TargetReg(kArg1), true);
+ m2l_->TargetReg(kArg0, false), m2l_->TargetReg(kArg1, false), true);
} else {
m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pThrowArrayBounds),
- m2l_->TargetReg(kArg0), m2l_->TargetReg(kArg1), true);
+ m2l_->TargetReg(kArg0, false), m2l_->TargetReg(kArg1, false), true);
}
}
@@ -1406,7 +1410,7 @@
// RHS is in memory.
DCHECK((rl_src.location == kLocDalvikFrame) ||
(rl_src.location == kLocCompilerTemp));
- int r_base = TargetReg(kSp).GetReg();
+ int r_base = rs_rX86_SP.GetReg();
int displacement = SRegOffset(rl_src.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -1440,7 +1444,7 @@
// Operate directly into memory.
X86OpCode x86op = GetOpcode(op, rl_dest, rl_src, false);
- int r_base = TargetReg(kSp).GetReg();
+ int r_base = rs_rX86_SP.GetReg();
int displacement = SRegOffset(rl_dest.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -2122,7 +2126,7 @@
if ((rl_dest.location == kLocDalvikFrame) ||
(rl_dest.location == kLocCompilerTemp)) {
- int r_base = TargetReg(kSp).GetReg();
+ int r_base = rs_rX86_SP.GetReg();
int displacement = SRegOffset(rl_dest.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -2153,7 +2157,7 @@
// Can we just do this into memory?
if ((rl_dest.location == kLocDalvikFrame) ||
(rl_dest.location == kLocCompilerTemp)) {
- int r_base = TargetReg(kSp).GetReg();
+ int r_base = rs_rX86_SP.GetReg();
int displacement = SRegOffset(rl_dest.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -2271,7 +2275,8 @@
RegStorage result_reg = rl_result.reg;
// For 32-bit, SETcc only works with EAX..EDX.
- if (result_reg == object.reg || !IsByteRegister(result_reg)) {
+ RegStorage object_32reg = object.reg.Is64Bit() ? As32BitReg(object.reg) : object.reg;
+ if (result_reg == object_32reg || !IsByteRegister(result_reg)) {
result_reg = AllocateByteRegister();
}
@@ -2337,8 +2342,10 @@
FlushAllRegs();
// May generate a call - use explicit registers.
LockCallTemps();
- LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 gets current Method*.
- RegStorage class_reg = TargetReg(kArg2); // kArg2 will hold the Class*.
+ RegStorage method_reg = TargetRefReg(kArg1); // kArg1 gets current Method*.
+ LoadCurrMethodDirect(method_reg);
+ RegStorage class_reg = TargetRefReg(kArg2); // kArg2 will hold the Class*.
+ RegStorage ref_reg = TargetRefReg(kArg0); // kArg2 will hold the ref.
// Reference must end up in kArg0.
if (needs_access_check) {
// Check we have access to type_idx and if not throw IllegalAccessError,
@@ -2350,16 +2357,16 @@
CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess),
type_idx, true);
}
- OpRegCopy(class_reg, TargetReg(kRet0));
- LoadValueDirectFixed(rl_src, TargetReg(kArg0));
+ OpRegCopy(class_reg, TargetRefReg(kRet0));
+ LoadValueDirectFixed(rl_src, ref_reg);
} else if (use_declaring_class) {
- LoadValueDirectFixed(rl_src, TargetReg(kArg0));
- LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
+ LoadValueDirectFixed(rl_src, ref_reg);
+ LoadRefDisp(method_reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
class_reg, kNotVolatile);
} else {
// Load dex cache entry into class_reg (kArg2).
- LoadValueDirectFixed(rl_src, TargetReg(kArg0));
- LoadRefDisp(TargetReg(kArg1), mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
+ LoadValueDirectFixed(rl_src, ref_reg);
+ LoadRefDisp(method_reg, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
class_reg, kNotVolatile);
int32_t offset_of_type =
mirror::Array::DataOffset(sizeof(mirror::HeapReference<mirror::Class*>)).Int32Value() +
@@ -2374,8 +2381,8 @@
} else {
CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx, true);
}
- OpRegCopy(TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path.
- LoadValueDirectFixed(rl_src, TargetReg(kArg0)); /* Reload Ref. */
+ OpRegCopy(class_reg, TargetRefReg(kRet0)); // Align usage with fast path.
+ LoadValueDirectFixed(rl_src, ref_reg); /* Reload Ref. */
// Rejoin code paths
LIR* hop_target = NewLIR0(kPseudoTargetLabel);
hop_branch->target = hop_target;
@@ -2386,33 +2393,34 @@
// On x86-64 kArg0 is not EAX, so we have to copy ref from kArg0 to EAX.
if (cu_->target64) {
- OpRegCopy(rl_result.reg, TargetReg(kArg0));
+ OpRegCopy(rl_result.reg, ref_reg);
}
// For 32-bit, SETcc only works with EAX..EDX.
DCHECK_LT(rl_result.reg.GetRegNum(), 4);
// Is the class NULL?
- LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
+ LIR* branch1 = OpCmpImmBranch(kCondEq, ref_reg, 0, NULL);
+ RegStorage ref_class_reg = TargetRefReg(kArg1); // kArg2 will hold the Class*.
/* Load object->klass_. */
DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
- LoadRefDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1),
+ LoadRefDisp(ref_reg, mirror::Object::ClassOffset().Int32Value(), ref_class_reg,
kNotVolatile);
/* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class. */
LIR* branchover = nullptr;
if (type_known_final) {
// Ensure top 3 bytes of result are 0.
LoadConstant(rl_result.reg, 0);
- OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2));
+ OpRegReg(kOpCmp, ref_class_reg, class_reg);
// Set the low byte of the result to 0 or 1 from the compare condition code.
NewLIR2(kX86Set8R, rl_result.reg.GetReg(), kX86CondEq);
} else {
if (!type_known_abstract) {
LoadConstant(rl_result.reg, 1); // Assume result succeeds.
- branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
+ branchover = OpCmpBranch(kCondEq, ref_class_reg, class_reg, NULL);
}
- OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
+ OpRegCopy(TargetRefReg(kArg0), class_reg);
if (cu_->target64) {
OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(8, pInstanceofNonTrivial));
} else {
@@ -2552,7 +2560,7 @@
} else {
if (shift_op) {
// X86 doesn't require masking and must use ECX.
- RegStorage t_reg = TargetReg(kCount); // rCX
+ RegStorage t_reg = TargetReg(kCount, false); // rCX
LoadValueDirectFixed(rl_rhs, t_reg);
if (is_two_addr) {
// Can we do this directly into memory?
@@ -2740,7 +2748,7 @@
}
// X86 doesn't require masking and must use ECX.
- RegStorage t_reg = TargetReg(kCount); // rCX
+ RegStorage t_reg = TargetReg(kCount, false); // rCX
LoadValueDirectFixed(rl_shift, t_reg);
if (is_two_addr) {
// Can we do this directly into memory?
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 730a271..f80e200 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -154,8 +154,7 @@
}
RegLocation X86Mir2Lir::LocCReturnRef() {
- // FIXME: return x86_loc_c_return_wide for x86_64 when wide refs supported.
- return x86_loc_c_return;
+ return cu_->target64 ? x86_64_loc_c_return_ref : x86_loc_c_return_ref;
}
RegLocation X86Mir2Lir::LocCReturnWide() {
@@ -170,8 +169,8 @@
return x86_loc_c_return_double;
}
-// Return a target-dependent special register.
-RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
+// Return a target-dependent special register for 32-bit.
+RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) {
RegStorage res_reg = RegStorage::InvalidReg();
switch (reg) {
case kSelf: res_reg = RegStorage::InvalidReg(); break;
@@ -204,6 +203,11 @@
return res_reg;
}
+RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
+ LOG(FATAL) << "Do not use this function!!!";
+ return RegStorage::InvalidReg();
+}
+
/*
* Decode the register id.
*/
@@ -832,7 +836,7 @@
(rl_dest.location == kLocCompilerTemp)) {
int32_t val_lo = Low32Bits(value);
int32_t val_hi = High32Bits(value);
- int r_base = TargetReg(kSp).GetReg();
+ int r_base = rs_rX86_SP.GetReg();
int displacement = SRegOffset(rl_dest.s_reg_low);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -887,7 +891,7 @@
uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id);
// Generate the move instruction with the unique pointer and save index, dex_file, and type.
- LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg).GetReg(),
+ LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg, false).GetReg(),
static_cast<int>(target_method_id_ptr), target_method_idx,
WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
AppendLIR(move);
@@ -904,7 +908,7 @@
uintptr_t ptr = reinterpret_cast<uintptr_t>(&id);
// Generate the move instruction with the unique pointer and save index and type.
- LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg).GetReg(),
+ LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg, false).GetReg(),
static_cast<int>(ptr), type_idx);
AppendLIR(move);
class_type_address_insns_.Insert(move);
@@ -1746,29 +1750,22 @@
// ------------ ABI support: mapping of args to physical registers -------------
RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide) {
- const RegStorage coreArgMappingToPhysicalReg[] = {rs_rX86_ARG1, rs_rX86_ARG2, rs_rX86_ARG3, rs_rX86_ARG4, rs_rX86_ARG5};
- const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) / sizeof(RegStorage);
- const RegStorage fpArgMappingToPhysicalReg[] = {rs_rX86_FARG0, rs_rX86_FARG1, rs_rX86_FARG2, rs_rX86_FARG3,
- rs_rX86_FARG4, rs_rX86_FARG5, rs_rX86_FARG6, rs_rX86_FARG7};
- const int fpArgMappingToPhysicalRegSize = sizeof(fpArgMappingToPhysicalReg) / sizeof(RegStorage);
+ const SpecialTargetRegister coreArgMappingToPhysicalReg[] = {kArg1, kArg2, kArg3, kArg4, kArg5};
+ const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) / sizeof(SpecialTargetRegister);
+ const SpecialTargetRegister fpArgMappingToPhysicalReg[] = {kFArg0, kFArg1, kFArg2, kFArg3,
+ kFArg4, kFArg5, kFArg6, kFArg7};
+ const int fpArgMappingToPhysicalRegSize = sizeof(fpArgMappingToPhysicalReg) / sizeof(SpecialTargetRegister);
- RegStorage result = RegStorage::InvalidReg();
if (is_double_or_float) {
if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) {
- result = fpArgMappingToPhysicalReg[cur_fp_reg_++];
- if (result.Valid()) {
- result = is_wide ? RegStorage::FloatSolo64(result.GetReg()) : RegStorage::FloatSolo32(result.GetReg());
- }
+ return ml_->TargetReg(fpArgMappingToPhysicalReg[cur_fp_reg_++], is_wide);
}
} else {
if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
- result = coreArgMappingToPhysicalReg[cur_core_reg_++];
- if (result.Valid()) {
- result = is_wide ? RegStorage::Solo64(result.GetReg()) : RegStorage::Solo32(result.GetReg());
- }
+ return ml_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++], is_wide);
}
}
- return result;
+ return RegStorage::InvalidReg();
}
RegStorage X86Mir2Lir::InToRegStorageMapping::Get(int in_position) {
@@ -1806,7 +1803,7 @@
int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
RegLocation* arg_locs = &mir_graph_->reg_location_[start_vreg];
- InToRegStorageX86_64Mapper mapper;
+ InToRegStorageX86_64Mapper mapper(this);
in_to_reg_storage_mapping_.Initialize(arg_locs, cu_->num_ins, &mapper);
}
return in_to_reg_storage_mapping_.Get(arg_num);
@@ -1847,13 +1844,13 @@
RegLocation rl_src = rl_method;
rl_src.location = kLocPhysReg;
- rl_src.reg = TargetReg(kArg0);
+ rl_src.reg = TargetRefReg(kArg0);
rl_src.home = false;
MarkLive(rl_src);
StoreValue(rl_method, rl_src);
// If Method* has been promoted, explicitly flush
if (rl_method.location == kLocPhysReg) {
- StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0), kNotVolatile);
+ StoreRefDisp(rs_rX86_SP, 0, As32BitReg(TargetRefReg(kArg0)), kNotVolatile);
}
if (cu_->num_ins == 0) {
@@ -1890,9 +1887,9 @@
} else {
// Needs flush.
if (t_loc->ref) {
- StoreRefDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, kNotVolatile);
+ StoreRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, kNotVolatile);
} else {
- StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32,
+ StoreBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32,
kNotVolatile);
}
}
@@ -1900,9 +1897,9 @@
// If arriving in frame & promoted.
if (t_loc->location == kLocPhysReg) {
if (t_loc->ref) {
- LoadRefDisp(TargetReg(kSp), SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile);
+ LoadRefDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile);
} else {
- LoadBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), t_loc->reg,
+ LoadBaseDisp(rs_rX86_SP, SRegOffset(start_vreg + i), t_loc->reg,
t_loc->wide ? k64 : k32, kNotVolatile);
}
}
@@ -1974,7 +1971,7 @@
const int start_index = skip_this ? 1 : 0;
- InToRegStorageX86_64Mapper mapper;
+ InToRegStorageX86_64Mapper mapper(this);
InToRegStorageMapping in_to_reg_storage_mapping;
in_to_reg_storage_mapping.Initialize(info->args, info->num_arg_words, &mapper);
const int last_mapped_in = in_to_reg_storage_mapping.GetMaxMappedIn();
@@ -1993,14 +1990,14 @@
loc = UpdateLocWide(loc);
if (loc.location == kLocPhysReg) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
+ StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
}
next_arg += 2;
} else {
loc = UpdateLoc(loc);
if (loc.location == kLocPhysReg) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile);
+ StoreBaseDisp(rs_rX86_SP, SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile);
}
next_arg++;
}
@@ -2057,23 +2054,23 @@
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
if (src_is_16b_aligned) {
- ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovA128FP);
+ ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovA128FP);
} else if (src_is_8b_aligned) {
- ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovLo128FP);
- ld2 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset + (bytes_to_move >> 1),
+ ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovLo128FP);
+ ld2 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset + (bytes_to_move >> 1),
kMovHi128FP);
} else {
- ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovU128FP);
+ ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovU128FP);
}
if (dest_is_16b_aligned) {
- st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovA128FP);
+ st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovA128FP);
} else if (dest_is_8b_aligned) {
- st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovLo128FP);
- st2 = OpMovMemReg(TargetReg(kSp), current_dest_offset + (bytes_to_move >> 1),
+ st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovLo128FP);
+ st2 = OpMovMemReg(rs_rX86_SP, current_dest_offset + (bytes_to_move >> 1),
temp, kMovHi128FP);
} else {
- st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovU128FP);
+ st1 = OpMovMemReg(rs_rX86_SP, current_dest_offset, temp, kMovU128FP);
}
// TODO If we could keep track of aliasing information for memory accesses that are wider
@@ -2107,11 +2104,11 @@
// Instead of allocating a new temp, simply reuse one of the registers being used
// for argument passing.
- RegStorage temp = TargetReg(kArg3);
+ RegStorage temp = TargetReg(kArg3, false);
// Now load the argument VR and store to the outs.
- Load32Disp(TargetReg(kSp), current_src_offset, temp);
- Store32Disp(TargetReg(kSp), current_dest_offset, temp);
+ Load32Disp(rs_rX86_SP, current_src_offset, temp);
+ Store32Disp(rs_rX86_SP, current_dest_offset, temp);
}
current_src_offset += bytes_to_move;
@@ -2123,8 +2120,8 @@
// Now handle rest not registers if they are
if (in_to_reg_storage_mapping.IsThereStackMapped()) {
- RegStorage regSingle = TargetReg(kArg2);
- RegStorage regWide = RegStorage::Solo64(TargetReg(kArg3).GetReg());
+ RegStorage regSingle = TargetReg(kArg2, false);
+ RegStorage regWide = TargetReg(kArg3, true);
for (int i = start_index;
i < last_mapped_in + size_of_the_last_mapped + regs_left_to_pass_via_stack; i++) {
RegLocation rl_arg = info->args[i];
@@ -2137,17 +2134,17 @@
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
if (rl_arg.wide) {
if (rl_arg.location == kLocPhysReg) {
- StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile);
+ StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k64, kNotVolatile);
} else {
LoadValueDirectWideFixed(rl_arg, regWide);
- StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64, kNotVolatile);
+ StoreBaseDisp(rs_rX86_SP, out_offset, regWide, k64, kNotVolatile);
}
} else {
if (rl_arg.location == kLocPhysReg) {
- StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k32, kNotVolatile);
+ StoreBaseDisp(rs_rX86_SP, out_offset, rl_arg.reg, k32, kNotVolatile);
} else {
LoadValueDirectFixed(rl_arg, regSingle);
- StoreBaseDisp(TargetReg(kSp), out_offset, regSingle, k32, kNotVolatile);
+ StoreBaseDisp(rs_rX86_SP, out_offset, regSingle, k32, kNotVolatile);
}
}
}
@@ -2183,13 +2180,13 @@
direct_code, direct_method, type);
if (pcrLabel) {
if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
- *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1), info->opt_flags);
+ *pcrLabel = GenExplicitNullCheck(TargetRefReg(kArg1), info->opt_flags);
} else {
*pcrLabel = nullptr;
// In lieu of generating a check for kArg1 being null, we need to
// perform a load when doing implicit checks.
RegStorage tmp = AllocTemp();
- Load32Disp(TargetReg(kArg1), 0, tmp);
+ Load32Disp(TargetRefReg(kArg1), 0, tmp);
MarkPossibleNullPointerException(info->opt_flags);
FreeTemp(tmp);
}
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index 7ff4f72..a52e842 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -353,6 +353,12 @@
const RegLocation x86_loc_c_return_wide
{kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
RegStorage(RegStorage::k64BitPair, rAX, rDX), INVALID_SREG, INVALID_SREG};
+const RegLocation x86_loc_c_return_ref
+ {kLocPhysReg, 0, 0, 0, 0, 0, 1, 0, 1,
+ RegStorage(RegStorage::k32BitSolo, rAX), INVALID_SREG, INVALID_SREG};
+const RegLocation x86_64_loc_c_return_ref
+ {kLocPhysReg, 0, 0, 0, 0, 0, 1, 0, 1,
+ RegStorage(RegStorage::k64BitSolo, rAX), INVALID_SREG, INVALID_SREG};
const RegLocation x86_64_loc_c_return_wide
{kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
RegStorage(RegStorage::k64BitSolo, rAX), INVALID_SREG, INVALID_SREG};