Revert "Rework Quick compiler's register handling"
This reverts commit 2c1ed456dcdb027d097825dd98dbe48c71599b6c.
Change-Id: If88d69ba88e0af0b407ff2240566d7e4545d8a99
diff --git a/compiler/dex/quick/arm/arm_lir.h b/compiler/dex/quick/arm/arm_lir.h
index bb03352..37b4ec6 100644
--- a/compiler/dex/quick/arm/arm_lir.h
+++ b/compiler/dex/quick/arm/arm_lir.h
@@ -117,6 +117,14 @@
// Mask to strip off fp flags.
#define ARM_FP_REG_MASK (ARM_FP_REG_OFFSET-1)
+// RegisterLocation templates return values (r0, or r0/r1).
+#define ARM_LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r0, INVALID_REG, \
+ INVALID_SREG, INVALID_SREG}
+#define ARM_LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r0, r1, \
+ INVALID_SREG, INVALID_SREG}
+#define ARM_LOC_C_RETURN_FLOAT ARM_LOC_C_RETURN
+#define ARM_LOC_C_RETURN_DOUBLE ARM_LOC_C_RETURN_WIDE
+
enum ArmResourceEncodingPos {
kArmGPReg0 = 0,
kArmRegSP = 13,
@@ -217,20 +225,6 @@
#define rARM_INVOKE_TGT rARM_LR
#define rARM_COUNT INVALID_REG
-// RegisterLocation templates return values (r0, or r0/r1).
-const RegLocation arm_loc_c_return
- {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
- RegStorage(RegStorage::k32BitSolo, r0), INVALID_SREG, INVALID_SREG};
-const RegLocation arm_loc_c_return_wide
- {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
- RegStorage(RegStorage::k64BitPair, r0, r1), INVALID_SREG, INVALID_SREG};
-const RegLocation arm_loc_c_return_float
- {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
- RegStorage(RegStorage::k32BitSolo, r0), INVALID_SREG, INVALID_SREG};
-const RegLocation arm_loc_c_return_double
- {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
- RegStorage(RegStorage::k64BitPair, r0, r1), INVALID_SREG, INVALID_SREG};
-
enum ArmShiftEncodings {
kArmLsl = 0x0,
kArmLsr = 0x1,
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 264bded..b36dde9 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -79,7 +79,7 @@
LIR* target = NewLIR0(kPseudoTargetLabel);
// Load next key/disp
NewLIR2(kThumb2LdmiaWB, rBase, (1 << r_key) | (1 << r_disp));
- OpRegReg(kOpCmp, r_key, rl_src.reg.GetReg());
+ OpRegReg(kOpCmp, r_key, rl_src.low_reg);
// Go if match. NOTE: No instruction set switch here - must stay Thumb2
OpIT(kCondEq, "");
LIR* switch_branch = NewLIR1(kThumb2AddPCR, r_disp);
@@ -115,10 +115,10 @@
int keyReg;
// Remove the bias, if necessary
if (low_key == 0) {
- keyReg = rl_src.reg.GetReg();
+ keyReg = rl_src.low_reg;
} else {
keyReg = AllocTemp();
- OpRegRegImm(kOpSub, keyReg, rl_src.reg.GetReg(), low_key);
+ OpRegRegImm(kOpSub, keyReg, rl_src.low_reg, low_key);
}
// Bounds check - if < 0 or >= size continue following switch
OpRegImm(kOpCmp, keyReg, size-1);
@@ -293,7 +293,7 @@
int ex_offset = Thread::ExceptionOffset().Int32Value();
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
int reset_reg = AllocTemp();
- LoadWordDisp(rARM_SELF, ex_offset, rl_result.reg.GetReg());
+ LoadWordDisp(rARM_SELF, ex_offset, rl_result.low_reg);
LoadConstant(reset_reg, 0);
StoreWordDisp(rARM_SELF, ex_offset, reset_reg);
FreeTemp(reset_reg);
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 2c0cead..65dee80 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -49,7 +49,7 @@
bool IsFpReg(int reg);
bool SameRegType(int reg1, int reg2);
int AllocTypedTemp(bool fp_hint, int reg_class);
- RegStorage AllocTypedTempWide(bool fp_hint, int reg_class);
+ int AllocTypedTempPair(bool fp_hint, int reg_class);
int S2d(int low_reg, int high_reg);
int TargetReg(SpecialTargetRegister reg);
int GetArgMappingToPhysicalReg(int arg_num);
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index dd0a429..46542e1 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -63,7 +63,7 @@
rl_src1 = LoadValue(rl_src1, kFPReg);
rl_src2 = LoadValue(rl_src2, kFPReg);
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR3(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ NewLIR3(op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
StoreValue(rl_dest, rl_result);
}
@@ -111,8 +111,8 @@
rl_result = EvalLoc(rl_dest, kFPReg, true);
DCHECK(rl_dest.wide);
DCHECK(rl_result.wide);
- NewLIR3(op, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg()),
- S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg()));
+ NewLIR3(op, S2d(rl_result.low_reg, rl_result.high_reg), S2d(rl_src1.low_reg, rl_src1.high_reg),
+ S2d(rl_src2.low_reg, rl_src2.high_reg));
StoreValueWide(rl_dest, rl_result);
}
@@ -143,16 +143,16 @@
break;
case Instruction::LONG_TO_DOUBLE: {
rl_src = LoadValueWide(rl_src, kFPReg);
- src_reg = S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ src_reg = S2d(rl_src.low_reg, rl_src.high_reg);
rl_result = EvalLoc(rl_dest, kFPReg, true);
// TODO: clean up AllocTempDouble so that its result has the double bits set.
int tmp1 = AllocTempDouble();
int tmp2 = AllocTempDouble();
NewLIR2(kThumb2VcvtF64S32, tmp1 | ARM_FP_DOUBLE, (src_reg & ~ARM_FP_DOUBLE) + 1);
- NewLIR2(kThumb2VcvtF64U32, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), (src_reg & ~ARM_FP_DOUBLE));
+ NewLIR2(kThumb2VcvtF64U32, S2d(rl_result.low_reg, rl_result.high_reg), (src_reg & ~ARM_FP_DOUBLE));
LoadConstantWide(tmp2, tmp2 + 1, 0x41f0000000000000LL);
- NewLIR3(kThumb2VmlaF64, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), tmp1 | ARM_FP_DOUBLE,
+ NewLIR3(kThumb2VmlaF64, S2d(rl_result.low_reg, rl_result.high_reg), tmp1 | ARM_FP_DOUBLE,
tmp2 | ARM_FP_DOUBLE);
FreeTemp(tmp1);
FreeTemp(tmp2);
@@ -173,18 +173,18 @@
}
if (rl_src.wide) {
rl_src = LoadValueWide(rl_src, kFPReg);
- src_reg = S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ src_reg = S2d(rl_src.low_reg, rl_src.high_reg);
} else {
rl_src = LoadValue(rl_src, kFPReg);
- src_reg = rl_src.reg.GetReg();
+ src_reg = rl_src.low_reg;
}
if (rl_dest.wide) {
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(op, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), src_reg);
+ NewLIR2(op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg);
StoreValueWide(rl_dest, rl_result);
} else {
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(op, rl_result.reg.GetReg(), src_reg);
+ NewLIR2(op, rl_result.low_reg, src_reg);
StoreValue(rl_dest, rl_result);
}
}
@@ -199,14 +199,14 @@
rl_src2 = mir_graph_->GetSrcWide(mir, 2);
rl_src1 = LoadValueWide(rl_src1, kFPReg);
rl_src2 = LoadValueWide(rl_src2, kFPReg);
- NewLIR2(kThumb2Vcmpd, S2d(rl_src1.reg.GetReg(), rl_src2.reg.GetHighReg()),
- S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg()));
+ NewLIR2(kThumb2Vcmpd, S2d(rl_src1.low_reg, rl_src2.high_reg),
+ S2d(rl_src2.low_reg, rl_src2.high_reg));
} else {
rl_src1 = mir_graph_->GetSrc(mir, 0);
rl_src2 = mir_graph_->GetSrc(mir, 1);
rl_src1 = LoadValue(rl_src1, kFPReg);
rl_src2 = LoadValue(rl_src2, kFPReg);
- NewLIR2(kThumb2Vcmps, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ NewLIR2(kThumb2Vcmps, rl_src1.low_reg, rl_src2.low_reg);
}
NewLIR0(kThumb2Fmstat);
ConditionCode ccode = mir->meta.ccode;
@@ -273,28 +273,28 @@
// In case result vreg is also a src vreg, break association to avoid useless copy by EvalLoc()
ClobberSReg(rl_dest.s_reg_low);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadConstant(rl_result.reg.GetReg(), default_result);
- NewLIR2(kThumb2Vcmpd, S2d(rl_src1.reg.GetReg(), rl_src2.reg.GetHighReg()),
- S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg()));
+ LoadConstant(rl_result.low_reg, default_result);
+ NewLIR2(kThumb2Vcmpd, S2d(rl_src1.low_reg, rl_src2.high_reg),
+ S2d(rl_src2.low_reg, rl_src2.high_reg));
} else {
rl_src1 = LoadValue(rl_src1, kFPReg);
rl_src2 = LoadValue(rl_src2, kFPReg);
// In case result vreg is also a srcvreg, break association to avoid useless copy by EvalLoc()
ClobberSReg(rl_dest.s_reg_low);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadConstant(rl_result.reg.GetReg(), default_result);
- NewLIR2(kThumb2Vcmps, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ LoadConstant(rl_result.low_reg, default_result);
+ NewLIR2(kThumb2Vcmps, rl_src1.low_reg, rl_src2.low_reg);
}
- DCHECK(!ARM_FPREG(rl_result.reg.GetReg()));
+ DCHECK(!ARM_FPREG(rl_result.low_reg));
NewLIR0(kThumb2Fmstat);
OpIT((default_result == -1) ? kCondGt : kCondMi, "");
- NewLIR2(kThumb2MovI8M, rl_result.reg.GetReg(),
+ NewLIR2(kThumb2MovI8M, rl_result.low_reg,
ModifiedImmediate(-default_result)); // Must not alter ccodes
GenBarrier();
OpIT(kCondEq, "");
- LoadConstant(rl_result.reg.GetReg(), 0);
+ LoadConstant(rl_result.low_reg, 0);
GenBarrier();
StoreValue(rl_dest, rl_result);
@@ -304,7 +304,7 @@
RegLocation rl_result;
rl_src = LoadValue(rl_src, kFPReg);
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(kThumb2Vnegs, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ NewLIR2(kThumb2Vnegs, rl_result.low_reg, rl_src.low_reg);
StoreValue(rl_dest, rl_result);
}
@@ -312,8 +312,8 @@
RegLocation rl_result;
rl_src = LoadValueWide(rl_src, kFPReg);
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(kThumb2Vnegd, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()),
- S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg()));
+ NewLIR2(kThumb2Vnegd, S2d(rl_result.low_reg, rl_result.high_reg),
+ S2d(rl_src.low_reg, rl_src.high_reg));
StoreValueWide(rl_dest, rl_result);
}
@@ -324,18 +324,18 @@
RegLocation rl_dest = InlineTargetWide(info); // double place for result
rl_src = LoadValueWide(rl_src, kFPReg);
RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(kThumb2Vsqrtd, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()),
- S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg()));
- NewLIR2(kThumb2Vcmpd, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()),
- S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()));
+ NewLIR2(kThumb2Vsqrtd, S2d(rl_result.low_reg, rl_result.high_reg),
+ S2d(rl_src.low_reg, rl_src.high_reg));
+ NewLIR2(kThumb2Vcmpd, S2d(rl_result.low_reg, rl_result.high_reg),
+ S2d(rl_result.low_reg, rl_result.high_reg));
NewLIR0(kThumb2Fmstat);
branch = NewLIR2(kThumbBCond, 0, kArmCondEq);
ClobberCallerSave();
LockCallTemps(); // Using fixed registers
int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pSqrt));
- NewLIR3(kThumb2Fmrrd, r0, r1, S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg()));
+ NewLIR3(kThumb2Fmrrd, r0, r1, S2d(rl_src.low_reg, rl_src.high_reg));
NewLIR1(kThumbBlxR, r_tgt);
- NewLIR3(kThumb2Fmdrr, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), r0, r1);
+ NewLIR3(kThumb2Fmdrr, S2d(rl_result.low_reg, rl_result.high_reg), r0, r1);
branch->target = NewLIR0(kPseudoTargetLabel);
StoreValueWide(rl_dest, rl_result);
return true;
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 7aff89e..43928fc 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -90,10 +90,10 @@
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
int t_reg = AllocTemp();
LoadConstant(t_reg, -1);
- OpRegReg(kOpCmp, rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
+ OpRegReg(kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
LIR* branch1 = OpCondBranch(kCondLt, NULL);
LIR* branch2 = OpCondBranch(kCondGt, NULL);
- OpRegRegReg(kOpSub, t_reg, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ OpRegRegReg(kOpSub, t_reg, rl_src1.low_reg, rl_src2.low_reg);
LIR* branch3 = OpCondBranch(kCondEq, NULL);
OpIT(kCondHi, "E");
@@ -107,7 +107,7 @@
target1 = NewLIR0(kPseudoTargetLabel);
RegLocation rl_temp = LocCReturn(); // Just using as template, will change
- rl_temp.reg.SetReg(t_reg);
+ rl_temp.low_reg = t_reg;
StoreValue(rl_dest, rl_temp);
FreeTemp(t_reg);
@@ -125,8 +125,8 @@
LIR* taken = &block_label_list_[bb->taken];
LIR* not_taken = &block_label_list_[bb->fall_through];
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- int32_t low_reg = rl_src1.reg.GetReg();
- int32_t high_reg = rl_src1.reg.GetHighReg();
+ int32_t low_reg = rl_src1.low_reg;
+ int32_t high_reg = rl_src1.high_reg;
if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) {
int t_reg = AllocTemp();
@@ -178,15 +178,15 @@
int false_val = mir->dalvikInsn.vC;
rl_result = EvalLoc(rl_dest, kCoreReg, true);
if ((true_val == 1) && (false_val == 0)) {
- OpRegRegImm(kOpRsub, rl_result.reg.GetReg(), rl_src.reg.GetReg(), 1);
+ OpRegRegImm(kOpRsub, rl_result.low_reg, rl_src.low_reg, 1);
OpIT(kCondUlt, "");
- LoadConstant(rl_result.reg.GetReg(), 0);
+ LoadConstant(rl_result.low_reg, 0);
GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
} else if (InexpensiveConstantInt(true_val) && InexpensiveConstantInt(false_val)) {
- OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0);
+ OpRegImm(kOpCmp, rl_src.low_reg, 0);
OpIT(kCondEq, "E");
- LoadConstant(rl_result.reg.GetReg(), true_val);
- LoadConstant(rl_result.reg.GetReg(), false_val);
+ LoadConstant(rl_result.low_reg, true_val);
+ LoadConstant(rl_result.low_reg, false_val);
GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
} else {
// Unlikely case - could be tuned.
@@ -194,10 +194,10 @@
int t_reg2 = AllocTemp();
LoadConstant(t_reg1, true_val);
LoadConstant(t_reg2, false_val);
- OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0);
+ OpRegImm(kOpCmp, rl_src.low_reg, 0);
OpIT(kCondEq, "E");
- OpRegCopy(rl_result.reg.GetReg(), t_reg1);
- OpRegCopy(rl_result.reg.GetReg(), t_reg2);
+ OpRegCopy(rl_result.low_reg, t_reg1);
+ OpRegCopy(rl_result.low_reg, t_reg2);
GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
}
} else {
@@ -207,17 +207,17 @@
rl_true = LoadValue(rl_true, kCoreReg);
rl_false = LoadValue(rl_false, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0);
- if (rl_result.reg.GetReg() == rl_true.reg.GetReg()) { // Is the "true" case already in place?
+ OpRegImm(kOpCmp, rl_src.low_reg, 0);
+ if (rl_result.low_reg == rl_true.low_reg) { // Is the "true" case already in place?
OpIT(kCondNe, "");
- OpRegCopy(rl_result.reg.GetReg(), rl_false.reg.GetReg());
- } else if (rl_result.reg.GetReg() == rl_false.reg.GetReg()) { // False case in place?
+ OpRegCopy(rl_result.low_reg, rl_false.low_reg);
+ } else if (rl_result.low_reg == rl_false.low_reg) { // False case in place?
OpIT(kCondEq, "");
- OpRegCopy(rl_result.reg.GetReg(), rl_true.reg.GetReg());
+ OpRegCopy(rl_result.low_reg, rl_true.low_reg);
} else { // Normal - select between the two.
OpIT(kCondEq, "E");
- OpRegCopy(rl_result.reg.GetReg(), rl_true.reg.GetReg());
- OpRegCopy(rl_result.reg.GetReg(), rl_false.reg.GetReg());
+ OpRegCopy(rl_result.low_reg, rl_true.low_reg);
+ OpRegCopy(rl_result.low_reg, rl_false.low_reg);
}
GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
}
@@ -247,7 +247,7 @@
LIR* not_taken = &block_label_list_[bb->fall_through];
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
- OpRegReg(kOpCmp, rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
+ OpRegReg(kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
switch (ccode) {
case kCondEq:
OpCondBranch(kCondNe, not_taken);
@@ -278,7 +278,7 @@
default:
LOG(FATAL) << "Unexpected ccode: " << ccode;
}
- OpRegReg(kOpCmp, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ OpRegReg(kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
OpCondBranch(ccode, taken);
}
@@ -415,21 +415,21 @@
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
int r_hi = AllocTemp();
int r_lo = AllocTemp();
- NewLIR4(kThumb2Smull, r_lo, r_hi, r_magic, rl_src.reg.GetReg());
+ NewLIR4(kThumb2Smull, r_lo, r_hi, r_magic, rl_src.low_reg);
switch (pattern) {
case Divide3:
- OpRegRegRegShift(kOpSub, rl_result.reg.GetReg(), r_hi,
- rl_src.reg.GetReg(), EncodeShift(kArmAsr, 31));
+ OpRegRegRegShift(kOpSub, rl_result.low_reg, r_hi,
+ rl_src.low_reg, EncodeShift(kArmAsr, 31));
break;
case Divide5:
- OpRegRegImm(kOpAsr, r_lo, rl_src.reg.GetReg(), 31);
- OpRegRegRegShift(kOpRsub, rl_result.reg.GetReg(), r_lo, r_hi,
+ OpRegRegImm(kOpAsr, r_lo, rl_src.low_reg, 31);
+ OpRegRegRegShift(kOpRsub, rl_result.low_reg, r_lo, r_hi,
EncodeShift(kArmAsr, magic_table[lit].shift));
break;
case Divide7:
- OpRegReg(kOpAdd, r_hi, rl_src.reg.GetReg());
- OpRegRegImm(kOpAsr, r_lo, rl_src.reg.GetReg(), 31);
- OpRegRegRegShift(kOpRsub, rl_result.reg.GetReg(), r_lo, r_hi,
+ OpRegReg(kOpAdd, r_hi, rl_src.low_reg);
+ OpRegRegImm(kOpAsr, r_lo, rl_src.low_reg, 31);
+ OpRegRegRegShift(kOpRsub, rl_result.low_reg, r_lo, r_hi,
EncodeShift(kArmAsr, magic_table[lit].shift));
break;
default:
@@ -476,7 +476,7 @@
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (is_div) {
// Simple case, use sdiv instruction.
- OpRegRegReg(kOpDiv, rl_result.reg.GetReg(), reg1, reg2);
+ OpRegRegReg(kOpDiv, rl_result.low_reg, reg1, reg2);
} else {
// Remainder case, use the following code:
// temp = reg1 / reg2 - integer division
@@ -486,7 +486,7 @@
int temp = AllocTemp();
OpRegRegReg(kOpDiv, temp, reg1, reg2);
OpRegReg(kOpMul, temp, reg2);
- OpRegRegReg(kOpSub, rl_result.reg.GetReg(), reg1, temp);
+ OpRegRegReg(kOpSub, rl_result.low_reg, reg1, temp);
FreeTemp(temp);
}
@@ -501,10 +501,10 @@
rl_src2 = LoadValue(rl_src2, kCoreReg);
RegLocation rl_dest = InlineTarget(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegReg(kOpCmp, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ OpRegReg(kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
OpIT((is_min) ? kCondGt : kCondLt, "E");
- OpRegReg(kOpMov, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
- OpRegReg(kOpMov, rl_result.reg.GetReg(), rl_src1.reg.GetReg());
+ OpRegReg(kOpMov, rl_result.low_reg, rl_src2.low_reg);
+ OpRegReg(kOpMov, rl_result.low_reg, rl_src1.low_reg);
GenBarrier();
StoreValue(rl_dest, rl_result);
return true;
@@ -518,18 +518,18 @@
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (size == kLong) {
// Fake unaligned LDRD by two unaligned LDR instructions on ARMv7 with SCTLR.A set to 0.
- if (rl_address.reg.GetReg() != rl_result.reg.GetReg()) {
- LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), kWord, INVALID_SREG);
- LoadBaseDisp(rl_address.reg.GetReg(), 4, rl_result.reg.GetHighReg(), kWord, INVALID_SREG);
+ if (rl_address.low_reg != rl_result.low_reg) {
+ LoadBaseDisp(rl_address.low_reg, 0, rl_result.low_reg, kWord, INVALID_SREG);
+ LoadBaseDisp(rl_address.low_reg, 4, rl_result.high_reg, kWord, INVALID_SREG);
} else {
- LoadBaseDisp(rl_address.reg.GetReg(), 4, rl_result.reg.GetHighReg(), kWord, INVALID_SREG);
- LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), kWord, INVALID_SREG);
+ LoadBaseDisp(rl_address.low_reg, 4, rl_result.high_reg, kWord, INVALID_SREG);
+ LoadBaseDisp(rl_address.low_reg, 0, rl_result.low_reg, kWord, INVALID_SREG);
}
StoreValueWide(rl_dest, rl_result);
} else {
DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord);
// Unaligned load with LDR and LDRSH is allowed on ARMv7 with SCTLR.A set to 0.
- LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), size, INVALID_SREG);
+ LoadBaseDisp(rl_address.low_reg, 0, rl_result.low_reg, size, INVALID_SREG);
StoreValue(rl_dest, rl_result);
}
return true;
@@ -543,13 +543,13 @@
if (size == kLong) {
// Fake unaligned STRD by two unaligned STR instructions on ARMv7 with SCTLR.A set to 0.
RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), kWord);
- StoreBaseDisp(rl_address.reg.GetReg(), 4, rl_value.reg.GetHighReg(), kWord);
+ StoreBaseDisp(rl_address.low_reg, 0, rl_value.low_reg, kWord);
+ StoreBaseDisp(rl_address.low_reg, 4, rl_value.high_reg, kWord);
} else {
DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord);
// Unaligned store with STR and STRSH is allowed on ARMv7 with SCTLR.A set to 0.
RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), size);
+ StoreBaseDisp(rl_address.low_reg, 0, rl_value.low_reg, size);
}
return true;
}
@@ -589,24 +589,24 @@
bool load_early = true;
if (is_long) {
bool expected_is_core_reg =
- rl_src_expected.location == kLocPhysReg && !IsFpReg(rl_src_expected.reg.GetReg());
+ rl_src_expected.location == kLocPhysReg && !IsFpReg(rl_src_expected.low_reg);
bool new_value_is_core_reg =
- rl_src_new_value.location == kLocPhysReg && !IsFpReg(rl_src_new_value.reg.GetReg());
- bool expected_is_good_reg = expected_is_core_reg && !IsTemp(rl_src_expected.reg.GetReg());
- bool new_value_is_good_reg = new_value_is_core_reg && !IsTemp(rl_src_new_value.reg.GetReg());
+ rl_src_new_value.location == kLocPhysReg && !IsFpReg(rl_src_new_value.low_reg);
+ bool expected_is_good_reg = expected_is_core_reg && !IsTemp(rl_src_expected.low_reg);
+ bool new_value_is_good_reg = new_value_is_core_reg && !IsTemp(rl_src_new_value.low_reg);
if (!expected_is_good_reg && !new_value_is_good_reg) {
// None of expected/new_value is non-temp reg, need to load both late
load_early = false;
// Make sure they are not in the temp regs and the load will not be skipped.
if (expected_is_core_reg) {
- FlushRegWide(rl_src_expected.reg.GetReg(), rl_src_expected.reg.GetHighReg());
+ FlushRegWide(rl_src_expected.low_reg, rl_src_expected.high_reg);
ClobberSReg(rl_src_expected.s_reg_low);
ClobberSReg(GetSRegHi(rl_src_expected.s_reg_low));
rl_src_expected.location = kLocDalvikFrame;
}
if (new_value_is_core_reg) {
- FlushRegWide(rl_src_new_value.reg.GetReg(), rl_src_new_value.reg.GetHighReg());
+ FlushRegWide(rl_src_new_value.low_reg, rl_src_new_value.high_reg);
ClobberSReg(rl_src_new_value.s_reg_low);
ClobberSReg(GetSRegHi(rl_src_new_value.s_reg_low));
rl_src_new_value.location = kLocDalvikFrame;
@@ -627,19 +627,19 @@
if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
// Mark card for object assuming new value is stored.
- MarkGCCard(rl_new_value.reg.GetReg(), rl_object.reg.GetReg());
+ MarkGCCard(rl_new_value.low_reg, rl_object.low_reg);
}
RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
int r_ptr = rARM_LR;
- OpRegRegReg(kOpAdd, r_ptr, rl_object.reg.GetReg(), rl_offset.reg.GetReg());
+ OpRegRegReg(kOpAdd, r_ptr, rl_object.low_reg, rl_offset.low_reg);
// Free now unneeded rl_object and rl_offset to give more temps.
ClobberSReg(rl_object.s_reg_low);
- FreeTemp(rl_object.reg.GetReg());
+ FreeTemp(rl_object.low_reg);
ClobberSReg(rl_offset.s_reg_low);
- FreeTemp(rl_offset.reg.GetReg());
+ FreeTemp(rl_offset.low_reg);
RegLocation rl_expected;
if (!is_long) {
@@ -647,11 +647,8 @@
} else if (load_early) {
rl_expected = LoadValueWide(rl_src_expected, kCoreReg);
} else {
- // NOTE: partially defined rl_expected & rl_new_value - but we just want the regs.
- int low_reg = AllocTemp();
- int high_reg = AllocTemp();
- rl_new_value.reg = RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
- rl_expected = rl_new_value;
+ rl_new_value.low_reg = rl_expected.low_reg = AllocTemp();
+ rl_new_value.high_reg = rl_expected.high_reg = AllocTemp();
}
// do {
@@ -665,13 +662,13 @@
if (is_long) {
int r_tmp_high = AllocTemp();
if (!load_early) {
- LoadValueDirectWide(rl_src_expected, rl_expected.reg.GetReg(), rl_expected.reg.GetHighReg());
+ LoadValueDirectWide(rl_src_expected, rl_expected.low_reg, rl_expected.high_reg);
}
NewLIR3(kThumb2Ldrexd, r_tmp, r_tmp_high, r_ptr);
- OpRegReg(kOpSub, r_tmp, rl_expected.reg.GetReg());
- OpRegReg(kOpSub, r_tmp_high, rl_expected.reg.GetHighReg());
+ OpRegReg(kOpSub, r_tmp, rl_expected.low_reg);
+ OpRegReg(kOpSub, r_tmp_high, rl_expected.high_reg);
if (!load_early) {
- LoadValueDirectWide(rl_src_new_value, rl_new_value.reg.GetReg(), rl_new_value.reg.GetHighReg());
+ LoadValueDirectWide(rl_src_new_value, rl_new_value.low_reg, rl_new_value.high_reg);
}
// Make sure we use ORR that sets the ccode
if (ARM_LOWREG(r_tmp) && ARM_LOWREG(r_tmp_high)) {
@@ -683,14 +680,14 @@
DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
OpIT(kCondEq, "T");
- NewLIR4(kThumb2Strexd /* eq */, r_tmp, rl_new_value.reg.GetReg(), rl_new_value.reg.GetHighReg(), r_ptr);
+ NewLIR4(kThumb2Strexd /* eq */, r_tmp, rl_new_value.low_reg, rl_new_value.high_reg, r_ptr);
} else {
NewLIR3(kThumb2Ldrex, r_tmp, r_ptr, 0);
- OpRegReg(kOpSub, r_tmp, rl_expected.reg.GetReg());
+ OpRegReg(kOpSub, r_tmp, rl_expected.low_reg);
DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
OpIT(kCondEq, "T");
- NewLIR4(kThumb2Strex /* eq */, r_tmp, rl_new_value.reg.GetReg(), r_ptr, 0);
+ NewLIR4(kThumb2Strex /* eq */, r_tmp, rl_new_value.low_reg, r_ptr, 0);
}
// Still one conditional left from OpIT(kCondEq, "T") from either branch
@@ -698,16 +695,16 @@
OpCondBranch(kCondEq, target);
if (!load_early) {
- FreeTemp(rl_expected.reg.GetReg()); // Now unneeded.
- FreeTemp(rl_expected.reg.GetHighReg()); // Now unneeded.
+ FreeTemp(rl_expected.low_reg); // Now unneeded.
+ FreeTemp(rl_expected.high_reg); // Now unneeded.
}
// result := (tmp1 != 0) ? 0 : 1;
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegImm(kOpRsub, rl_result.reg.GetReg(), r_tmp, 1);
+ OpRegRegImm(kOpRsub, rl_result.low_reg, r_tmp, 1);
DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
OpIT(kCondUlt, "");
- LoadConstant(rl_result.reg.GetReg(), 0); /* cc */
+ LoadConstant(rl_result.low_reg, 0); /* cc */
FreeTemp(r_tmp); // Now unneeded.
StoreValue(rl_dest, rl_result);
@@ -733,10 +730,10 @@
void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
RegLocation rl_result, int lit,
int first_bit, int second_bit) {
- OpRegRegRegShift(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), rl_src.reg.GetReg(),
+ OpRegRegRegShift(kOpAdd, rl_result.low_reg, rl_src.low_reg, rl_src.low_reg,
EncodeShift(kArmLsl, second_bit - first_bit));
if (first_bit != 0) {
- OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_result.reg.GetReg(), first_bit);
+ OpRegRegImm(kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
}
}
@@ -785,14 +782,14 @@
int z_reg = AllocTemp();
LoadConstantNoClobber(z_reg, 0);
// Check for destructive overlap
- if (rl_result.reg.GetReg() == rl_src.reg.GetHighReg()) {
+ if (rl_result.low_reg == rl_src.high_reg) {
int t_reg = AllocTemp();
- OpRegRegReg(kOpSub, rl_result.reg.GetReg(), z_reg, rl_src.reg.GetReg());
- OpRegRegReg(kOpSbc, rl_result.reg.GetHighReg(), z_reg, t_reg);
+ OpRegRegReg(kOpSub, rl_result.low_reg, z_reg, rl_src.low_reg);
+ OpRegRegReg(kOpSbc, rl_result.high_reg, z_reg, t_reg);
FreeTemp(t_reg);
} else {
- OpRegRegReg(kOpSub, rl_result.reg.GetReg(), z_reg, rl_src.reg.GetReg());
- OpRegRegReg(kOpSbc, rl_result.reg.GetHighReg(), z_reg, rl_src.reg.GetHighReg());
+ OpRegRegReg(kOpSub, rl_result.low_reg, z_reg, rl_src.low_reg);
+ OpRegRegReg(kOpSbc, rl_result.high_reg, z_reg, rl_src.high_reg);
}
FreeTemp(z_reg);
StoreValueWide(rl_dest, rl_result);
@@ -830,41 +827,41 @@
bool special_case = true;
// If operands are the same, or any pair has been promoted we're not the special case.
if ((rl_src1.s_reg_low == rl_src2.s_reg_low) ||
- (!IsTemp(rl_src1.reg.GetReg()) && !IsTemp(rl_src1.reg.GetHighReg())) ||
- (!IsTemp(rl_src2.reg.GetReg()) && !IsTemp(rl_src2.reg.GetHighReg()))) {
+ (!IsTemp(rl_src1.low_reg) && !IsTemp(rl_src1.high_reg)) ||
+ (!IsTemp(rl_src2.low_reg) && !IsTemp(rl_src2.high_reg))) {
special_case = false;
}
// Tuning: if rl_dest has been promoted and is *not* either operand, could use directly.
int res_lo = AllocTemp();
int res_hi;
- if (rl_src1.reg.GetReg() == rl_src2.reg.GetReg()) {
+ if (rl_src1.low_reg == rl_src2.low_reg) {
res_hi = AllocTemp();
- NewLIR3(kThumb2MulRRR, tmp1, rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg());
- NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src1.reg.GetReg(), rl_src1.reg.GetReg());
+ NewLIR3(kThumb2MulRRR, tmp1, rl_src1.low_reg, rl_src1.high_reg);
+ NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src1.low_reg, rl_src1.low_reg);
OpRegRegRegShift(kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1));
} else {
// In the special case, all temps are now allocated
- NewLIR3(kThumb2MulRRR, tmp1, rl_src2.reg.GetReg(), rl_src1.reg.GetHighReg());
+ NewLIR3(kThumb2MulRRR, tmp1, rl_src2.low_reg, rl_src1.high_reg);
if (special_case) {
- DCHECK_NE(rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
- DCHECK_NE(rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
- FreeTemp(rl_src1.reg.GetHighReg());
+ DCHECK_NE(rl_src1.low_reg, rl_src2.low_reg);
+ DCHECK_NE(rl_src1.high_reg, rl_src2.high_reg);
+ FreeTemp(rl_src1.high_reg);
}
res_hi = AllocTemp();
- NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src2.reg.GetReg(), rl_src1.reg.GetReg());
- NewLIR4(kThumb2Mla, tmp1, rl_src1.reg.GetReg(), rl_src2.reg.GetHighReg(), tmp1);
+ NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src2.low_reg, rl_src1.low_reg);
+ NewLIR4(kThumb2Mla, tmp1, rl_src1.low_reg, rl_src2.high_reg, tmp1);
NewLIR4(kThumb2AddRRR, res_hi, tmp1, res_hi, 0);
if (special_case) {
- FreeTemp(rl_src1.reg.GetReg());
- Clobber(rl_src1.reg.GetReg());
- Clobber(rl_src1.reg.GetHighReg());
+ FreeTemp(rl_src1.low_reg);
+ Clobber(rl_src1.low_reg);
+ Clobber(rl_src1.high_reg);
}
}
FreeTemp(tmp1);
rl_result = GetReturnWide(false); // Just using as a template.
- rl_result.reg.SetReg(res_lo);
- rl_result.reg.SetHighReg(res_hi);
+ rl_result.low_reg = res_lo;
+ rl_result.high_reg = res_hi;
StoreValueWide(rl_dest, rl_result);
// Now, restore lr to its non-temp status.
Clobber(rARM_LR);
@@ -923,25 +920,25 @@
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
int reg_len = INVALID_REG;
if (needs_range_check) {
reg_len = AllocTemp();
/* Get len */
- LoadWordDisp(rl_array.reg.GetReg(), len_offset, reg_len);
+ LoadWordDisp(rl_array.low_reg, len_offset, reg_len);
}
if (rl_dest.wide || rl_dest.fp || constant_index) {
int reg_ptr;
if (constant_index) {
- reg_ptr = rl_array.reg.GetReg(); // NOTE: must not alter reg_ptr in constant case.
+ reg_ptr = rl_array.low_reg; // NOTE: must not alter reg_ptr in constant case.
} else {
// No special indexed operation, lea + load w/ displacement
reg_ptr = AllocTemp();
- OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg.GetReg(), rl_index.reg.GetReg(),
+ OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.low_reg, rl_index.low_reg,
EncodeShift(kArmLsl, scale));
- FreeTemp(rl_index.reg.GetReg());
+ FreeTemp(rl_index.low_reg);
}
rl_result = EvalLoc(rl_dest, reg_class, true);
@@ -949,18 +946,18 @@
if (constant_index) {
GenImmedCheck(kCondLs, reg_len, mir_graph_->ConstantValue(rl_index), kThrowConstantArrayBounds);
} else {
- GenRegRegCheck(kCondLs, reg_len, rl_index.reg.GetReg(), kThrowArrayBounds);
+ GenRegRegCheck(kCondLs, reg_len, rl_index.low_reg, kThrowArrayBounds);
}
FreeTemp(reg_len);
}
if (rl_dest.wide) {
- LoadBaseDispWide(reg_ptr, data_offset, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG);
+ LoadBaseDispWide(reg_ptr, data_offset, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
if (!constant_index) {
FreeTemp(reg_ptr);
}
StoreValueWide(rl_dest, rl_result);
} else {
- LoadBaseDisp(reg_ptr, data_offset, rl_result.reg.GetReg(), size, INVALID_SREG);
+ LoadBaseDisp(reg_ptr, data_offset, rl_result.low_reg, size, INVALID_SREG);
if (!constant_index) {
FreeTemp(reg_ptr);
}
@@ -969,15 +966,15 @@
} else {
// Offset base, then use indexed load
int reg_ptr = AllocTemp();
- OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg.GetReg(), data_offset);
- FreeTemp(rl_array.reg.GetReg());
+ OpRegRegImm(kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
+ FreeTemp(rl_array.low_reg);
rl_result = EvalLoc(rl_dest, reg_class, true);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds);
+ GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds);
FreeTemp(reg_len);
}
- LoadBaseIndexed(reg_ptr, rl_index.reg.GetReg(), rl_result.reg.GetReg(), scale, size);
+ LoadBaseIndexed(reg_ptr, rl_index.low_reg, rl_result.low_reg, scale, size);
FreeTemp(reg_ptr);
StoreValue(rl_dest, rl_result);
}
@@ -1013,17 +1010,17 @@
int reg_ptr;
bool allocated_reg_ptr_temp = false;
if (constant_index) {
- reg_ptr = rl_array.reg.GetReg();
- } else if (IsTemp(rl_array.reg.GetReg()) && !card_mark) {
- Clobber(rl_array.reg.GetReg());
- reg_ptr = rl_array.reg.GetReg();
+ reg_ptr = rl_array.low_reg;
+ } else if (IsTemp(rl_array.low_reg) && !card_mark) {
+ Clobber(rl_array.low_reg);
+ reg_ptr = rl_array.low_reg;
} else {
allocated_reg_ptr_temp = true;
reg_ptr = AllocTemp();
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
int reg_len = INVALID_REG;
@@ -1031,7 +1028,7 @@
reg_len = AllocTemp();
// NOTE: max live temps(4) here.
/* Get len */
- LoadWordDisp(rl_array.reg.GetReg(), len_offset, reg_len);
+ LoadWordDisp(rl_array.low_reg, len_offset, reg_len);
}
/* at this point, reg_ptr points to array, 2 live temps */
if (rl_src.wide || rl_src.fp || constant_index) {
@@ -1041,39 +1038,39 @@
rl_src = LoadValue(rl_src, reg_class);
}
if (!constant_index) {
- OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg.GetReg(), rl_index.reg.GetReg(),
+ OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.low_reg, rl_index.low_reg,
EncodeShift(kArmLsl, scale));
}
if (needs_range_check) {
if (constant_index) {
GenImmedCheck(kCondLs, reg_len, mir_graph_->ConstantValue(rl_index), kThrowConstantArrayBounds);
} else {
- GenRegRegCheck(kCondLs, reg_len, rl_index.reg.GetReg(), kThrowArrayBounds);
+ GenRegRegCheck(kCondLs, reg_len, rl_index.low_reg, kThrowArrayBounds);
}
FreeTemp(reg_len);
}
if (rl_src.wide) {
- StoreBaseDispWide(reg_ptr, data_offset, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ StoreBaseDispWide(reg_ptr, data_offset, rl_src.low_reg, rl_src.high_reg);
} else {
- StoreBaseDisp(reg_ptr, data_offset, rl_src.reg.GetReg(), size);
+ StoreBaseDisp(reg_ptr, data_offset, rl_src.low_reg, size);
}
} else {
/* reg_ptr -> array data */
- OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg.GetReg(), data_offset);
+ OpRegRegImm(kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
rl_src = LoadValue(rl_src, reg_class);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds);
+ GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds);
FreeTemp(reg_len);
}
- StoreBaseIndexed(reg_ptr, rl_index.reg.GetReg(), rl_src.reg.GetReg(),
+ StoreBaseIndexed(reg_ptr, rl_index.low_reg, rl_src.low_reg,
scale, size);
}
if (allocated_reg_ptr_temp) {
FreeTemp(reg_ptr);
}
if (card_mark) {
- MarkGCCard(rl_src.reg.GetReg(), rl_array.reg.GetReg());
+ MarkGCCard(rl_src.low_reg, rl_array.low_reg);
}
}
@@ -1096,53 +1093,53 @@
case Instruction::SHL_LONG:
case Instruction::SHL_LONG_2ADDR:
if (shift_amount == 1) {
- OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), rl_src.reg.GetReg());
- OpRegRegReg(kOpAdc, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), rl_src.reg.GetHighReg());
+ OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, rl_src.low_reg);
+ OpRegRegReg(kOpAdc, rl_result.high_reg, rl_src.high_reg, rl_src.high_reg);
} else if (shift_amount == 32) {
- OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetReg());
- LoadConstant(rl_result.reg.GetReg(), 0);
+ OpRegCopy(rl_result.high_reg, rl_src.low_reg);
+ LoadConstant(rl_result.low_reg, 0);
} else if (shift_amount > 31) {
- OpRegRegImm(kOpLsl, rl_result.reg.GetHighReg(), rl_src.reg.GetReg(), shift_amount - 32);
- LoadConstant(rl_result.reg.GetReg(), 0);
+ OpRegRegImm(kOpLsl, rl_result.high_reg, rl_src.low_reg, shift_amount - 32);
+ LoadConstant(rl_result.low_reg, 0);
} else {
- OpRegRegImm(kOpLsl, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), shift_amount);
- OpRegRegRegShift(kOpOr, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg(), rl_src.reg.GetReg(),
+ OpRegRegImm(kOpLsl, rl_result.high_reg, rl_src.high_reg, shift_amount);
+ OpRegRegRegShift(kOpOr, rl_result.high_reg, rl_result.high_reg, rl_src.low_reg,
EncodeShift(kArmLsr, 32 - shift_amount));
- OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_src.reg.GetReg(), shift_amount);
+ OpRegRegImm(kOpLsl, rl_result.low_reg, rl_src.low_reg, shift_amount);
}
break;
case Instruction::SHR_LONG:
case Instruction::SHR_LONG_2ADDR:
if (shift_amount == 32) {
- OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
- OpRegRegImm(kOpAsr, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), 31);
+ OpRegCopy(rl_result.low_reg, rl_src.high_reg);
+ OpRegRegImm(kOpAsr, rl_result.high_reg, rl_src.high_reg, 31);
} else if (shift_amount > 31) {
- OpRegRegImm(kOpAsr, rl_result.reg.GetReg(), rl_src.reg.GetHighReg(), shift_amount - 32);
- OpRegRegImm(kOpAsr, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), 31);
+ OpRegRegImm(kOpAsr, rl_result.low_reg, rl_src.high_reg, shift_amount - 32);
+ OpRegRegImm(kOpAsr, rl_result.high_reg, rl_src.high_reg, 31);
} else {
int t_reg = AllocTemp();
- OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetReg(), shift_amount);
- OpRegRegRegShift(kOpOr, rl_result.reg.GetReg(), t_reg, rl_src.reg.GetHighReg(),
+ OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, shift_amount);
+ OpRegRegRegShift(kOpOr, rl_result.low_reg, t_reg, rl_src.high_reg,
EncodeShift(kArmLsl, 32 - shift_amount));
FreeTemp(t_reg);
- OpRegRegImm(kOpAsr, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), shift_amount);
+ OpRegRegImm(kOpAsr, rl_result.high_reg, rl_src.high_reg, shift_amount);
}
break;
case Instruction::USHR_LONG:
case Instruction::USHR_LONG_2ADDR:
if (shift_amount == 32) {
- OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
- LoadConstant(rl_result.reg.GetHighReg(), 0);
+ OpRegCopy(rl_result.low_reg, rl_src.high_reg);
+ LoadConstant(rl_result.high_reg, 0);
} else if (shift_amount > 31) {
- OpRegRegImm(kOpLsr, rl_result.reg.GetReg(), rl_src.reg.GetHighReg(), shift_amount - 32);
- LoadConstant(rl_result.reg.GetHighReg(), 0);
+ OpRegRegImm(kOpLsr, rl_result.low_reg, rl_src.high_reg, shift_amount - 32);
+ LoadConstant(rl_result.high_reg, 0);
} else {
int t_reg = AllocTemp();
- OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetReg(), shift_amount);
- OpRegRegRegShift(kOpOr, rl_result.reg.GetReg(), t_reg, rl_src.reg.GetHighReg(),
+ OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, shift_amount);
+ OpRegRegRegShift(kOpOr, rl_result.low_reg, t_reg, rl_src.high_reg,
EncodeShift(kArmLsl, 32 - shift_amount));
FreeTemp(t_reg);
- OpRegRegImm(kOpLsr, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), shift_amount);
+ OpRegRegImm(kOpLsr, rl_result.high_reg, rl_src.high_reg, shift_amount);
}
break;
default:
@@ -1197,36 +1194,36 @@
switch (opcode) {
case Instruction::ADD_LONG:
case Instruction::ADD_LONG_2ADDR:
- NewLIR3(kThumb2AddRRI8M, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), mod_imm_lo);
- NewLIR3(kThumb2AdcRRI8M, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), mod_imm_hi);
+ NewLIR3(kThumb2AddRRI8M, rl_result.low_reg, rl_src1.low_reg, mod_imm_lo);
+ NewLIR3(kThumb2AdcRRI8M, rl_result.high_reg, rl_src1.high_reg, mod_imm_hi);
break;
case Instruction::OR_LONG:
case Instruction::OR_LONG_2ADDR:
- if ((val_lo != 0) || (rl_result.reg.GetReg() != rl_src1.reg.GetReg())) {
- OpRegRegImm(kOpOr, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), val_lo);
+ if ((val_lo != 0) || (rl_result.low_reg != rl_src1.low_reg)) {
+ OpRegRegImm(kOpOr, rl_result.low_reg, rl_src1.low_reg, val_lo);
}
- if ((val_hi != 0) || (rl_result.reg.GetHighReg() != rl_src1.reg.GetHighReg())) {
- OpRegRegImm(kOpOr, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), val_hi);
+ if ((val_hi != 0) || (rl_result.high_reg != rl_src1.high_reg)) {
+ OpRegRegImm(kOpOr, rl_result.high_reg, rl_src1.high_reg, val_hi);
}
break;
case Instruction::XOR_LONG:
case Instruction::XOR_LONG_2ADDR:
- OpRegRegImm(kOpXor, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), val_lo);
- OpRegRegImm(kOpXor, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), val_hi);
+ OpRegRegImm(kOpXor, rl_result.low_reg, rl_src1.low_reg, val_lo);
+ OpRegRegImm(kOpXor, rl_result.high_reg, rl_src1.high_reg, val_hi);
break;
case Instruction::AND_LONG:
case Instruction::AND_LONG_2ADDR:
- if ((val_lo != 0xffffffff) || (rl_result.reg.GetReg() != rl_src1.reg.GetReg())) {
- OpRegRegImm(kOpAnd, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), val_lo);
+ if ((val_lo != 0xffffffff) || (rl_result.low_reg != rl_src1.low_reg)) {
+ OpRegRegImm(kOpAnd, rl_result.low_reg, rl_src1.low_reg, val_lo);
}
- if ((val_hi != 0xffffffff) || (rl_result.reg.GetHighReg() != rl_src1.reg.GetHighReg())) {
- OpRegRegImm(kOpAnd, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), val_hi);
+ if ((val_hi != 0xffffffff) || (rl_result.high_reg != rl_src1.high_reg)) {
+ OpRegRegImm(kOpAnd, rl_result.high_reg, rl_src1.high_reg, val_hi);
}
break;
case Instruction::SUB_LONG_2ADDR:
case Instruction::SUB_LONG:
- NewLIR3(kThumb2SubRRI8M, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), mod_imm_lo);
- NewLIR3(kThumb2SbcRRI8M, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), mod_imm_hi);
+ NewLIR3(kThumb2SubRRI8M, rl_result.low_reg, rl_src1.low_reg, mod_imm_lo);
+ NewLIR3(kThumb2SbcRRI8M, rl_result.high_reg, rl_src1.high_reg, mod_imm_hi);
break;
default:
LOG(FATAL) << "Unexpected opcode " << opcode;
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index ab1a053..83431ad 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -37,19 +37,23 @@
fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15};
RegLocation ArmMir2Lir::LocCReturn() {
- return arm_loc_c_return;
+ RegLocation res = ARM_LOC_C_RETURN;
+ return res;
}
RegLocation ArmMir2Lir::LocCReturnWide() {
- return arm_loc_c_return_wide;
+ RegLocation res = ARM_LOC_C_RETURN_WIDE;
+ return res;
}
RegLocation ArmMir2Lir::LocCReturnFloat() {
- return arm_loc_c_return_float;
+ RegLocation res = ARM_LOC_C_RETURN_FLOAT;
+ return res;
}
RegLocation ArmMir2Lir::LocCReturnDouble() {
- return arm_loc_c_return_double;
+ RegLocation res = ARM_LOC_C_RETURN_DOUBLE;
+ return res;
}
// Return a target-dependent special register.
@@ -526,10 +530,14 @@
return new ArmMir2Lir(cu, mir_graph, arena);
}
-// Alloc a pair of core registers, or a double.
-RegStorage ArmMir2Lir::AllocTypedTempWide(bool fp_hint, int reg_class) {
+/*
+ * Alloc a pair of core registers, or a double. Low reg in low byte,
+ * high reg in next byte.
+ */
+int ArmMir2Lir::AllocTypedTempPair(bool fp_hint, int reg_class) {
int high_reg;
int low_reg;
+ int res = 0;
if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
low_reg = AllocTempDouble();
@@ -538,7 +546,8 @@
low_reg = AllocTemp();
high_reg = AllocTemp();
}
- return RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
+ res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
+ return res;
}
int ArmMir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
@@ -585,11 +594,11 @@
void ArmMir2Lir::FreeRegLocTemps(RegLocation rl_keep,
RegLocation rl_free) {
- if ((rl_free.reg.GetReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetReg() != rl_keep.reg.GetHighReg()) &&
- (rl_free.reg.GetHighReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetHighReg() != rl_keep.reg.GetHighReg())) {
+ if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
+ (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) {
// No overlap, free both
- FreeTemp(rl_free.reg.GetReg());
- FreeTemp(rl_free.reg.GetHighReg());
+ FreeTemp(rl_free.low_reg);
+ FreeTemp(rl_free.high_reg);
}
}
/*
@@ -688,19 +697,19 @@
RegLocation ArmMir2Lir::GetReturnWideAlt() {
RegLocation res = LocCReturnWide();
- res.reg.SetReg(r2);
- res.reg.SetHighReg(r3);
+ res.low_reg = r2;
+ res.high_reg = r3;
Clobber(r2);
Clobber(r3);
MarkInUse(r2);
MarkInUse(r3);
- MarkPair(res.reg.GetReg(), res.reg.GetHighReg());
+ MarkPair(res.low_reg, res.high_reg);
return res;
}
RegLocation ArmMir2Lir::GetReturnAlt() {
RegLocation res = LocCReturn();
- res.reg.SetReg(r1);
+ res.low_reg = r1;
Clobber(r1);
MarkInUse(r1);
return res;
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 06f2010..0533fbf 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -128,12 +128,12 @@
if ((rl_temp.location == kLocDalvikFrame) &&
InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) {
// OK - convert this to a compare immediate and branch
- OpCmpImmBranch(cond, rl_src1.reg.GetReg(), mir_graph_->ConstantValue(rl_src2), taken);
+ OpCmpImmBranch(cond, rl_src1.low_reg, mir_graph_->ConstantValue(rl_src2), taken);
return;
}
}
rl_src2 = LoadValue(rl_src2, kCoreReg);
- OpCmpBranch(cond, rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), taken);
+ OpCmpBranch(cond, rl_src1.low_reg, rl_src2.low_reg, taken);
}
void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken,
@@ -163,17 +163,17 @@
cond = static_cast<ConditionCode>(0);
LOG(FATAL) << "Unexpected opcode " << opcode;
}
- OpCmpImmBranch(cond, rl_src.reg.GetReg(), 0, taken);
+ OpCmpImmBranch(cond, rl_src.low_reg, 0, taken);
}
void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (rl_src.location == kLocPhysReg) {
- OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegCopy(rl_result.low_reg, rl_src.low_reg);
} else {
- LoadValueDirect(rl_src, rl_result.reg.GetReg());
+ LoadValueDirect(rl_src, rl_result.low_reg);
}
- OpRegRegImm(kOpAsr, rl_result.reg.GetHighReg(), rl_result.reg.GetReg(), 31);
+ OpRegRegImm(kOpAsr, rl_result.high_reg, rl_result.low_reg, 31);
StoreValueWide(rl_dest, rl_result);
}
@@ -195,7 +195,7 @@
default:
LOG(ERROR) << "Bad int conversion type";
}
- OpRegReg(op, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegReg(op, rl_result.low_reg, rl_src.low_reg);
StoreValue(rl_dest, rl_result);
}
@@ -290,7 +290,7 @@
RegLocation loc = UpdateLoc(info->args[i]);
if (loc.location == kLocPhysReg) {
StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low),
- loc.reg.GetReg(), kWord);
+ loc.low_reg, kWord);
}
}
/*
@@ -341,10 +341,10 @@
RegLocation rl_arg = LoadValue(info->args[i], kCoreReg);
StoreBaseDisp(TargetReg(kRet0),
mirror::Array::DataOffset(component_size).Int32Value() +
- i * 4, rl_arg.reg.GetReg(), kWord);
+ i * 4, rl_arg.low_reg, kWord);
// If the LoadValue caused a temp to be allocated, free it
- if (IsTemp(rl_arg.reg.GetReg())) {
- FreeTemp(rl_arg.reg.GetReg());
+ if (IsTemp(rl_arg.low_reg)) {
+ FreeTemp(rl_arg.low_reg);
}
}
}
@@ -398,10 +398,10 @@
// Fast path, static storage base is this method's class
RegLocation rl_method = LoadCurrMethod();
r_base = AllocTemp();
- LoadWordDisp(rl_method.reg.GetReg(),
+ LoadWordDisp(rl_method.low_reg,
mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
- if (IsTemp(rl_method.reg.GetReg())) {
- FreeTemp(rl_method.reg.GetReg());
+ if (IsTemp(rl_method.low_reg)) {
+ FreeTemp(rl_method.low_reg);
}
} else {
// Medium path, static storage base in a different class which requires checks that the other
@@ -453,16 +453,16 @@
GenMemBarrier(kStoreStore);
}
if (is_long_or_double) {
- StoreBaseDispWide(r_base, field_offset, rl_src.reg.GetReg(),
- rl_src.reg.GetHighReg());
+ StoreBaseDispWide(r_base, field_offset, rl_src.low_reg,
+ rl_src.high_reg);
} else {
- StoreWordDisp(r_base, field_offset, rl_src.reg.GetReg());
+ StoreWordDisp(r_base, field_offset, rl_src.low_reg);
}
if (is_volatile) {
GenMemBarrier(kStoreLoad);
}
if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
- MarkGCCard(rl_src.reg.GetReg(), r_base);
+ MarkGCCard(rl_src.low_reg, r_base);
}
FreeTemp(r_base);
} else {
@@ -492,7 +492,7 @@
// Fast path, static storage base is this method's class
RegLocation rl_method = LoadCurrMethod();
r_base = AllocTemp();
- LoadWordDisp(rl_method.reg.GetReg(),
+ LoadWordDisp(rl_method.low_reg,
mirror::ArtMethod::DeclaringClassOffset().Int32Value(), r_base);
} else {
// Medium path, static storage base in a different class which requires checks that the other
@@ -539,10 +539,10 @@
GenMemBarrier(kLoadLoad);
}
if (is_long_or_double) {
- LoadBaseDispWide(r_base, field_offset, rl_result.reg.GetReg(),
- rl_result.reg.GetHighReg(), INVALID_SREG);
+ LoadBaseDispWide(r_base, field_offset, rl_result.low_reg,
+ rl_result.high_reg, INVALID_SREG);
} else {
- LoadWordDisp(r_base, field_offset, rl_result.reg.GetReg());
+ LoadWordDisp(r_base, field_offset, rl_result.low_reg);
}
FreeTemp(r_base);
if (is_long_or_double) {
@@ -713,20 +713,20 @@
rl_obj = LoadValue(rl_obj, kCoreReg);
if (is_long_or_double) {
DCHECK(rl_dest.wide);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
if (cu_->instruction_set == kX86) {
rl_result = EvalLoc(rl_dest, reg_class, true);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags);
- LoadBaseDispWide(rl_obj.reg.GetReg(), field_offset, rl_result.reg.GetReg(),
- rl_result.reg.GetHighReg(), rl_obj.s_reg_low);
+ GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
+ LoadBaseDispWide(rl_obj.low_reg, field_offset, rl_result.low_reg,
+ rl_result.high_reg, rl_obj.s_reg_low);
if (is_volatile) {
GenMemBarrier(kLoadLoad);
}
} else {
int reg_ptr = AllocTemp();
- OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg.GetReg(), field_offset);
+ OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
rl_result = EvalLoc(rl_dest, reg_class, true);
- LoadBaseDispWide(reg_ptr, 0, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG);
+ LoadBaseDispWide(reg_ptr, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
if (is_volatile) {
GenMemBarrier(kLoadLoad);
}
@@ -735,8 +735,8 @@
StoreValueWide(rl_dest, rl_result);
} else {
rl_result = EvalLoc(rl_dest, reg_class, true);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags);
- LoadBaseDisp(rl_obj.reg.GetReg(), field_offset, rl_result.reg.GetReg(),
+ GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
+ LoadBaseDisp(rl_obj.low_reg, field_offset, rl_result.low_reg,
kWord, rl_obj.s_reg_low);
if (is_volatile) {
GenMemBarrier(kLoadLoad);
@@ -773,29 +773,29 @@
if (is_long_or_double) {
int reg_ptr;
rl_src = LoadValueWide(rl_src, kAnyReg);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
reg_ptr = AllocTemp();
- OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg.GetReg(), field_offset);
+ OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
if (is_volatile) {
GenMemBarrier(kStoreStore);
}
- StoreBaseDispWide(reg_ptr, 0, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ StoreBaseDispWide(reg_ptr, 0, rl_src.low_reg, rl_src.high_reg);
if (is_volatile) {
GenMemBarrier(kLoadLoad);
}
FreeTemp(reg_ptr);
} else {
rl_src = LoadValue(rl_src, reg_class);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
if (is_volatile) {
GenMemBarrier(kStoreStore);
}
- StoreBaseDisp(rl_obj.reg.GetReg(), field_offset, rl_src.reg.GetReg(), kWord);
+ StoreBaseDisp(rl_obj.low_reg, field_offset, rl_src.low_reg, kWord);
if (is_volatile) {
GenMemBarrier(kLoadLoad);
}
if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
- MarkGCCard(rl_src.reg.GetReg(), rl_obj.reg.GetReg());
+ MarkGCCard(rl_src.low_reg, rl_obj.low_reg);
}
}
} else {
@@ -829,23 +829,23 @@
// Call out to helper which resolves type and verifies access.
// Resolved type returned in kRet0.
CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess),
- type_idx, rl_method.reg.GetReg(), true);
+ type_idx, rl_method.low_reg, true);
RegLocation rl_result = GetReturn(false);
StoreValue(rl_dest, rl_result);
} else {
// We're don't need access checks, load type from dex cache
int32_t dex_cache_offset =
mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value();
- LoadWordDisp(rl_method.reg.GetReg(), dex_cache_offset, res_reg);
+ LoadWordDisp(rl_method.low_reg, dex_cache_offset, res_reg);
int32_t offset_of_type =
mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
* type_idx);
- LoadWordDisp(res_reg, offset_of_type, rl_result.reg.GetReg());
+ LoadWordDisp(res_reg, offset_of_type, rl_result.low_reg);
if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file,
type_idx) || SLOW_TYPE_PATH) {
// Slow path, at runtime test if type is null and if so initialize
FlushAllRegs();
- LIR* branch = OpCmpImmBranch(kCondEq, rl_result.reg.GetReg(), 0, NULL);
+ LIR* branch = OpCmpImmBranch(kCondEq, rl_result.low_reg, 0, NULL);
LIR* cont = NewLIR0(kPseudoTargetLabel);
// Object to generate the slow path for class resolution.
@@ -861,8 +861,8 @@
GenerateTargetLabel();
m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx_,
- rl_method_.reg.GetReg(), true);
- m2l_->OpRegCopy(rl_result_.reg.GetReg(), m2l_->TargetReg(kRet0));
+ rl_method_.low_reg, true);
+ m2l_->OpRegCopy(rl_result_.low_reg, m2l_->TargetReg(kRet0));
m2l_->OpUnconditionalBranch(cont_);
}
@@ -900,8 +900,8 @@
int r_method;
if (rl_method.location == kLocPhysReg) {
// A temp would conflict with register use below.
- DCHECK(!IsTemp(rl_method.reg.GetReg()));
- r_method = rl_method.reg.GetReg();
+ DCHECK(!IsTemp(rl_method.low_reg));
+ r_method = rl_method.low_reg;
} else {
r_method = TargetReg(kArg2);
LoadCurrMethodDirect(r_method);
@@ -960,9 +960,9 @@
RegLocation rl_method = LoadCurrMethod();
int res_reg = AllocTemp();
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadWordDisp(rl_method.reg.GetReg(),
+ LoadWordDisp(rl_method.low_reg,
mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), res_reg);
- LoadWordDisp(res_reg, offset_of_string, rl_result.reg.GetReg());
+ LoadWordDisp(res_reg, offset_of_string, rl_result.low_reg);
StoreValue(rl_dest, rl_result);
}
}
@@ -1035,12 +1035,12 @@
RegLocation object = LoadValue(rl_src, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- int result_reg = rl_result.reg.GetReg();
- if (result_reg == object.reg.GetReg()) {
+ int result_reg = rl_result.low_reg;
+ if (result_reg == object.low_reg) {
result_reg = AllocTypedTemp(false, kCoreReg);
}
LoadConstant(result_reg, 0); // assume false
- LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg.GetReg(), 0, NULL);
+ LIR* null_branchover = OpCmpImmBranch(kCondEq, object.low_reg, 0, NULL);
int check_class = AllocTypedTemp(false, kCoreReg);
int object_class = AllocTypedTemp(false, kCoreReg);
@@ -1049,11 +1049,11 @@
if (use_declaring_class) {
LoadWordDisp(check_class, mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
check_class);
- LoadWordDisp(object.reg.GetReg(), mirror::Object::ClassOffset().Int32Value(), object_class);
+ LoadWordDisp(object.low_reg, mirror::Object::ClassOffset().Int32Value(), object_class);
} else {
LoadWordDisp(check_class, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
check_class);
- LoadWordDisp(object.reg.GetReg(), mirror::Object::ClassOffset().Int32Value(), object_class);
+ LoadWordDisp(object.low_reg, mirror::Object::ClassOffset().Int32Value(), object_class);
int32_t offset_of_type =
mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
(sizeof(mirror::Class*) * type_idx);
@@ -1077,7 +1077,7 @@
FreeTemp(object_class);
FreeTemp(check_class);
if (IsTemp(result_reg)) {
- OpRegCopy(rl_result.reg.GetReg(), result_reg);
+ OpRegCopy(rl_result.low_reg, result_reg);
FreeTemp(result_reg);
}
StoreValue(rl_dest, rl_result);
@@ -1133,7 +1133,7 @@
RegLocation rl_result = GetReturn(false);
if (cu_->instruction_set == kMips) {
// On MIPS rArg0 != rl_result, place false in result if branch is taken.
- LoadConstant(rl_result.reg.GetReg(), 0);
+ LoadConstant(rl_result.low_reg, 0);
}
LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
@@ -1147,12 +1147,12 @@
if (cu_->instruction_set == kThumb2) {
OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same?
OpIT(kCondEq, "E"); // if-convert the test
- LoadConstant(rl_result.reg.GetReg(), 1); // .eq case - load true
- LoadConstant(rl_result.reg.GetReg(), 0); // .ne case - load false
+ LoadConstant(rl_result.low_reg, 1); // .eq case - load true
+ LoadConstant(rl_result.low_reg, 0); // .ne case - load false
} else {
- LoadConstant(rl_result.reg.GetReg(), 0); // ne case - load false
+ LoadConstant(rl_result.low_reg, 0); // ne case - load false
branchover = OpCmpBranch(kCondNe, TargetReg(kArg1), TargetReg(kArg2), NULL);
- LoadConstant(rl_result.reg.GetReg(), 1); // eq case - load true
+ LoadConstant(rl_result.low_reg, 1); // eq case - load true
}
} else {
if (cu_->instruction_set == kThumb2) {
@@ -1169,7 +1169,7 @@
} else {
if (!type_known_abstract) {
/* Uses branchovers */
- LoadConstant(rl_result.reg.GetReg(), 1); // assume true
+ LoadConstant(rl_result.low_reg, 1); // assume true
branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
}
int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
@@ -1355,16 +1355,16 @@
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
// The longs may overlap - use intermediate temp if so
- if ((rl_result.reg.GetReg() == rl_src1.reg.GetHighReg()) || (rl_result.reg.GetReg() == rl_src2.reg.GetHighReg())) {
+ if ((rl_result.low_reg == rl_src1.high_reg) || (rl_result.low_reg == rl_src2.high_reg)) {
int t_reg = AllocTemp();
- OpRegRegReg(first_op, t_reg, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
- OpRegRegReg(second_op, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
- OpRegCopy(rl_result.reg.GetReg(), t_reg);
+ OpRegRegReg(first_op, t_reg, rl_src1.low_reg, rl_src2.low_reg);
+ OpRegRegReg(second_op, rl_result.high_reg, rl_src1.high_reg, rl_src2.high_reg);
+ OpRegCopy(rl_result.low_reg, t_reg);
FreeTemp(t_reg);
} else {
- OpRegRegReg(first_op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
- OpRegRegReg(second_op, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(),
- rl_src2.reg.GetHighReg());
+ OpRegRegReg(first_op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
+ OpRegRegReg(second_op, rl_result.high_reg, rl_src1.high_reg,
+ rl_src2.high_reg);
}
/*
* NOTE: If rl_dest refers to a frame variable in a large frame, the
@@ -1487,22 +1487,22 @@
if (unary) {
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegReg(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg());
+ OpRegReg(op, rl_result.low_reg, rl_src1.low_reg);
} else {
if (shift_op) {
int t_reg = INVALID_REG;
rl_src2 = LoadValue(rl_src2, kCoreReg);
t_reg = AllocTemp();
- OpRegRegImm(kOpAnd, t_reg, rl_src2.reg.GetReg(), 31);
+ OpRegRegImm(kOpAnd, t_reg, rl_src2.low_reg, 31);
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), t_reg);
+ OpRegRegReg(op, rl_result.low_reg, rl_src1.low_reg, t_reg);
FreeTemp(t_reg);
} else {
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_src2 = LoadValue(rl_src2, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ OpRegRegReg(op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
}
}
StoreValue(rl_dest, rl_result);
@@ -1512,9 +1512,9 @@
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_src2 = LoadValue(rl_src2, kCoreReg);
if (check_zero) {
- GenImmedCheck(kCondEq, rl_src2.reg.GetReg(), 0, kThrowDivZero);
+ GenImmedCheck(kCondEq, rl_src2.low_reg, 0, kThrowDivZero);
}
- rl_result = GenDivRem(rl_dest, rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), op == kOpDiv);
+ rl_result = GenDivRem(rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv);
done = true;
} else if (cu_->instruction_set == kThumb2) {
if (cu_->GetInstructionSetFeatures().HasDivideInstruction()) {
@@ -1523,9 +1523,9 @@
rl_src1 = LoadValue(rl_src1, kCoreReg);
rl_src2 = LoadValue(rl_src2, kCoreReg);
if (check_zero) {
- GenImmedCheck(kCondEq, rl_src2.reg.GetReg(), 0, kThrowDivZero);
+ GenImmedCheck(kCondEq, rl_src2.low_reg, 0, kThrowDivZero);
}
- rl_result = GenDivRem(rl_dest, rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), op == kOpDiv);
+ rl_result = GenDivRem(rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv);
done = true;
}
}
@@ -1585,29 +1585,29 @@
int t_reg = AllocTemp();
if (lit == 2) {
// Division by 2 is by far the most common division by constant.
- OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetReg(), 32 - k);
- OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg.GetReg());
- OpRegRegImm(kOpAsr, rl_result.reg.GetReg(), t_reg, k);
+ OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, 32 - k);
+ OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.low_reg);
+ OpRegRegImm(kOpAsr, rl_result.low_reg, t_reg, k);
} else {
- OpRegRegImm(kOpAsr, t_reg, rl_src.reg.GetReg(), 31);
+ OpRegRegImm(kOpAsr, t_reg, rl_src.low_reg, 31);
OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k);
- OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.reg.GetReg());
- OpRegRegImm(kOpAsr, rl_result.reg.GetReg(), t_reg, k);
+ OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.low_reg);
+ OpRegRegImm(kOpAsr, rl_result.low_reg, t_reg, k);
}
} else {
int t_reg1 = AllocTemp();
int t_reg2 = AllocTemp();
if (lit == 2) {
- OpRegRegImm(kOpLsr, t_reg1, rl_src.reg.GetReg(), 32 - k);
- OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg.GetReg());
+ OpRegRegImm(kOpLsr, t_reg1, rl_src.low_reg, 32 - k);
+ OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.low_reg);
OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1);
- OpRegRegReg(kOpSub, rl_result.reg.GetReg(), t_reg2, t_reg1);
+ OpRegRegReg(kOpSub, rl_result.low_reg, t_reg2, t_reg1);
} else {
- OpRegRegImm(kOpAsr, t_reg1, rl_src.reg.GetReg(), 31);
+ OpRegRegImm(kOpAsr, t_reg1, rl_src.low_reg, 31);
OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k);
- OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.reg.GetReg());
+ OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.low_reg);
OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1);
- OpRegRegReg(kOpSub, rl_result.reg.GetReg(), t_reg2, t_reg1);
+ OpRegRegReg(kOpSub, rl_result.low_reg, t_reg2, t_reg1);
}
}
StoreValue(rl_dest, rl_result);
@@ -1637,7 +1637,7 @@
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (power_of_two) {
// Shift.
- OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_src.reg.GetReg(), LowestSetBit(lit));
+ OpRegRegImm(kOpLsl, rl_result.low_reg, rl_src.low_reg, LowestSetBit(lit));
} else if (pop_count_le2) {
// Shift and add and shift.
int first_bit = LowestSetBit(lit);
@@ -1648,8 +1648,8 @@
DCHECK(power_of_two_minus_one);
// TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1)
int t_reg = AllocTemp();
- OpRegRegImm(kOpLsl, t_reg, rl_src.reg.GetReg(), LowestSetBit(lit + 1));
- OpRegRegReg(kOpSub, rl_result.reg.GetReg(), t_reg, rl_src.reg.GetReg());
+ OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, LowestSetBit(lit + 1));
+ OpRegRegReg(kOpSub, rl_result.low_reg, t_reg, rl_src.low_reg);
}
StoreValue(rl_dest, rl_result);
return true;
@@ -1668,10 +1668,10 @@
rl_src = LoadValue(rl_src, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (cu_->instruction_set == kThumb2) {
- OpRegRegImm(kOpRsub, rl_result.reg.GetReg(), rl_src.reg.GetReg(), lit);
+ OpRegRegImm(kOpRsub, rl_result.low_reg, rl_src.low_reg, lit);
} else {
- OpRegReg(kOpNeg, rl_result.reg.GetReg(), rl_src.reg.GetReg());
- OpRegImm(kOpAdd, rl_result.reg.GetReg(), lit);
+ OpRegReg(kOpNeg, rl_result.low_reg, rl_src.low_reg);
+ OpRegImm(kOpAdd, rl_result.low_reg, lit);
}
StoreValue(rl_dest, rl_result);
return;
@@ -1764,7 +1764,7 @@
bool done = false;
if (cu_->instruction_set == kMips) {
rl_src = LoadValue(rl_src, kCoreReg);
- rl_result = GenDivRemLit(rl_dest, rl_src.reg.GetReg(), lit, is_div);
+ rl_result = GenDivRemLit(rl_dest, rl_src.low_reg, lit, is_div);
done = true;
} else if (cu_->instruction_set == kX86) {
rl_result = GenDivRemLit(rl_dest, rl_src, lit, is_div);
@@ -1774,7 +1774,7 @@
// Use ARM SDIV instruction for division. For remainder we also need to
// calculate using a MUL and subtract.
rl_src = LoadValue(rl_src, kCoreReg);
- rl_result = GenDivRemLit(rl_dest, rl_src.reg.GetReg(), lit, is_div);
+ rl_result = GenDivRemLit(rl_dest, rl_src.low_reg, lit, is_div);
done = true;
}
}
@@ -1800,9 +1800,9 @@
rl_result = EvalLoc(rl_dest, kCoreReg, true);
// Avoid shifts by literal 0 - no support in Thumb. Change to copy.
if (shift_op && (lit == 0)) {
- OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegCopy(rl_result.low_reg, rl_src.low_reg);
} else {
- OpRegRegImm(op, rl_result.reg.GetReg(), rl_src.reg.GetReg(), lit);
+ OpRegRegImm(op, rl_result.low_reg, rl_src.low_reg, lit);
}
StoreValue(rl_dest, rl_result);
}
@@ -1822,15 +1822,15 @@
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
// Check for destructive overlap
- if (rl_result.reg.GetReg() == rl_src2.reg.GetHighReg()) {
+ if (rl_result.low_reg == rl_src2.high_reg) {
int t_reg = AllocTemp();
- OpRegCopy(t_reg, rl_src2.reg.GetHighReg());
- OpRegReg(kOpMvn, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
- OpRegReg(kOpMvn, rl_result.reg.GetHighReg(), t_reg);
+ OpRegCopy(t_reg, rl_src2.high_reg);
+ OpRegReg(kOpMvn, rl_result.low_reg, rl_src2.low_reg);
+ OpRegReg(kOpMvn, rl_result.high_reg, t_reg);
FreeTemp(t_reg);
} else {
- OpRegReg(kOpMvn, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
- OpRegReg(kOpMvn, rl_result.reg.GetHighReg(), rl_src2.reg.GetHighReg());
+ OpRegReg(kOpMvn, rl_result.low_reg, rl_src2.low_reg);
+ OpRegReg(kOpMvn, rl_result.high_reg, rl_src2.high_reg);
}
StoreValueWide(rl_dest, rl_result);
return;
@@ -2003,7 +2003,7 @@
/* Generic code for generating a wide constant into a VR. */
void Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) {
RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
- LoadConstantWide(rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), value);
+ LoadConstantWide(rl_result.low_reg, rl_result.high_reg, value);
StoreValueWide(rl_dest, rl_result);
}
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index dd3d466..35d193c 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -284,9 +284,9 @@
*/
RegLocation rl_src = rl_method;
rl_src.location = kLocPhysReg;
- rl_src.reg = RegStorage(RegStorage::k32BitSolo, TargetReg(kArg0));
+ rl_src.low_reg = TargetReg(kArg0);
rl_src.home = false;
- MarkLive(rl_src.reg.GetReg(), rl_src.s_reg_low);
+ MarkLive(rl_src.low_reg, rl_src.s_reg_low);
StoreValue(rl_method, rl_src);
// If Method* has been promoted, explicitly flush
if (rl_method.location == kLocPhysReg) {
@@ -680,7 +680,7 @@
// Wide spans, we need the 2nd half of uses[2].
rl_arg = UpdateLocWide(rl_use2);
if (rl_arg.location == kLocPhysReg) {
- reg = rl_arg.reg.GetHighReg();
+ reg = rl_arg.high_reg;
} else {
// kArg2 & rArg3 can safely be used here
reg = TargetReg(kArg3);
@@ -701,10 +701,8 @@
rl_arg = info->args[next_use];
rl_arg = UpdateRawLoc(rl_arg);
if (rl_arg.location == kLocPhysReg) {
- low_reg = rl_arg.reg.GetReg();
- if (rl_arg.wide) {
- high_reg = rl_arg.reg.GetHighReg();
- }
+ low_reg = rl_arg.low_reg;
+ high_reg = rl_arg.high_reg;
} else {
low_reg = TargetReg(kArg2);
if (rl_arg.wide) {
@@ -777,14 +775,14 @@
loc = UpdateLocWide(loc);
if ((next_arg >= 2) && (loc.location == kLocPhysReg)) {
StoreBaseDispWide(TargetReg(kSp), SRegOffset(loc.s_reg_low),
- loc.reg.GetReg(), loc.reg.GetHighReg());
+ loc.low_reg, loc.high_reg);
}
next_arg += 2;
} else {
loc = UpdateLoc(loc);
if ((next_arg >= 3) && (loc.location == kLocPhysReg)) {
StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low),
- loc.reg.GetReg(), kWord);
+ loc.low_reg, kWord);
}
next_arg++;
}
@@ -985,7 +983,7 @@
rl_idx = LoadValue(rl_idx, kCoreReg);
}
int reg_max;
- GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), info->opt_flags);
+ GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
LIR* launch_pad = NULL;
int reg_off = INVALID_REG;
@@ -995,15 +993,15 @@
reg_ptr = AllocTemp();
if (range_check) {
reg_max = AllocTemp();
- LoadWordDisp(rl_obj.reg.GetReg(), count_offset, reg_max);
+ LoadWordDisp(rl_obj.low_reg, count_offset, reg_max);
}
- LoadWordDisp(rl_obj.reg.GetReg(), offset_offset, reg_off);
- LoadWordDisp(rl_obj.reg.GetReg(), value_offset, reg_ptr);
+ LoadWordDisp(rl_obj.low_reg, offset_offset, reg_off);
+ LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr);
if (range_check) {
// Set up a launch pad to allow retry in case of bounds violation */
launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
intrinsic_launchpads_.Insert(launch_pad);
- OpRegReg(kOpCmp, rl_idx.reg.GetReg(), reg_max);
+ OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
FreeTemp(reg_max);
OpCondBranch(kCondUge, launch_pad);
}
@@ -1015,33 +1013,33 @@
launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
intrinsic_launchpads_.Insert(launch_pad);
if (rl_idx.is_const) {
- OpCmpMemImmBranch(kCondUlt, INVALID_REG, rl_obj.reg.GetReg(), count_offset,
+ OpCmpMemImmBranch(kCondUlt, INVALID_REG, rl_obj.low_reg, count_offset,
mir_graph_->ConstantValue(rl_idx.orig_sreg), launch_pad);
} else {
- OpRegMem(kOpCmp, rl_idx.reg.GetReg(), rl_obj.reg.GetReg(), count_offset);
+ OpRegMem(kOpCmp, rl_idx.low_reg, rl_obj.low_reg, count_offset);
OpCondBranch(kCondUge, launch_pad);
}
}
reg_off = AllocTemp();
reg_ptr = AllocTemp();
- LoadWordDisp(rl_obj.reg.GetReg(), offset_offset, reg_off);
- LoadWordDisp(rl_obj.reg.GetReg(), value_offset, reg_ptr);
+ LoadWordDisp(rl_obj.low_reg, offset_offset, reg_off);
+ LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr);
}
if (rl_idx.is_const) {
OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
} else {
- OpRegReg(kOpAdd, reg_off, rl_idx.reg.GetReg());
+ OpRegReg(kOpAdd, reg_off, rl_idx.low_reg);
}
- FreeTemp(rl_obj.reg.GetReg());
- if (rl_idx.location == kLocPhysReg) {
- FreeTemp(rl_idx.reg.GetReg());
+ FreeTemp(rl_obj.low_reg);
+ if (rl_idx.low_reg != INVALID_REG) {
+ FreeTemp(rl_idx.low_reg);
}
RegLocation rl_dest = InlineTarget(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (cu_->instruction_set != kX86) {
- LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg.GetReg(), 1, kUnsignedHalf);
+ LoadBaseIndexed(reg_ptr, reg_off, rl_result.low_reg, 1, kUnsignedHalf);
} else {
- LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg.GetReg(),
+ LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.low_reg,
INVALID_REG, kUnsignedHalf, INVALID_SREG);
}
FreeTemp(reg_off);
@@ -1066,18 +1064,18 @@
rl_obj = LoadValue(rl_obj, kCoreReg);
RegLocation rl_dest = InlineTarget(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- GenNullCheck(rl_obj.s_reg_low, rl_obj.reg.GetReg(), info->opt_flags);
- LoadWordDisp(rl_obj.reg.GetReg(), mirror::String::CountOffset().Int32Value(), rl_result.reg.GetReg());
+ GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
+ LoadWordDisp(rl_obj.low_reg, mirror::String::CountOffset().Int32Value(), rl_result.low_reg);
if (is_empty) {
// dst = (dst == 0);
if (cu_->instruction_set == kThumb2) {
int t_reg = AllocTemp();
- OpRegReg(kOpNeg, t_reg, rl_result.reg.GetReg());
- OpRegRegReg(kOpAdc, rl_result.reg.GetReg(), rl_result.reg.GetReg(), t_reg);
+ OpRegReg(kOpNeg, t_reg, rl_result.low_reg);
+ OpRegRegReg(kOpAdc, rl_result.low_reg, rl_result.low_reg, t_reg);
} else {
DCHECK_EQ(cu_->instruction_set, kX86);
- OpRegImm(kOpSub, rl_result.reg.GetReg(), 1);
- OpRegImm(kOpLsr, rl_result.reg.GetReg(), 31);
+ OpRegImm(kOpSub, rl_result.low_reg, 1);
+ OpRegImm(kOpLsr, rl_result.low_reg, 31);
}
}
StoreValue(rl_dest, rl_result);
@@ -1094,15 +1092,15 @@
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (size == kLong) {
RegLocation rl_i = LoadValueWide(rl_src_i, kCoreReg);
- int r_i_low = rl_i.reg.GetReg();
- if (rl_i.reg.GetReg() == rl_result.reg.GetReg()) {
- // First REV shall clobber rl_result.reg.GetReg(), save the value in a temp for the second REV.
+ int r_i_low = rl_i.low_reg;
+ if (rl_i.low_reg == rl_result.low_reg) {
+ // First REV shall clobber rl_result.low_reg, save the value in a temp for the second REV.
r_i_low = AllocTemp();
- OpRegCopy(r_i_low, rl_i.reg.GetReg());
+ OpRegCopy(r_i_low, rl_i.low_reg);
}
- OpRegReg(kOpRev, rl_result.reg.GetReg(), rl_i.reg.GetHighReg());
- OpRegReg(kOpRev, rl_result.reg.GetHighReg(), r_i_low);
- if (rl_i.reg.GetReg() == rl_result.reg.GetReg()) {
+ OpRegReg(kOpRev, rl_result.low_reg, rl_i.high_reg);
+ OpRegReg(kOpRev, rl_result.high_reg, r_i_low);
+ if (rl_i.low_reg == rl_result.low_reg) {
FreeTemp(r_i_low);
}
StoreValueWide(rl_dest, rl_result);
@@ -1110,7 +1108,7 @@
DCHECK(size == kWord || size == kSignedHalf);
OpKind op = (size == kWord) ? kOpRev : kOpRevsh;
RegLocation rl_i = LoadValue(rl_src_i, kCoreReg);
- OpRegReg(op, rl_result.reg.GetReg(), rl_i.reg.GetReg());
+ OpRegReg(op, rl_result.low_reg, rl_i.low_reg);
StoreValue(rl_dest, rl_result);
}
return true;
@@ -1127,9 +1125,9 @@
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
int sign_reg = AllocTemp();
// abs(x) = y<=x>>31, (x+y)^y.
- OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetReg(), 31);
- OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), sign_reg);
- OpRegReg(kOpXor, rl_result.reg.GetReg(), sign_reg);
+ OpRegRegImm(kOpAsr, sign_reg, rl_src.low_reg, 31);
+ OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg);
+ OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
StoreValue(rl_dest, rl_result);
return true;
}
@@ -1146,11 +1144,11 @@
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
int sign_reg = AllocTemp();
// abs(x) = y<=x>>31, (x+y)^y.
- OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetHighReg(), 31);
- OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), sign_reg);
- OpRegRegReg(kOpAdc, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), sign_reg);
- OpRegReg(kOpXor, rl_result.reg.GetReg(), sign_reg);
- OpRegReg(kOpXor, rl_result.reg.GetHighReg(), sign_reg);
+ OpRegRegImm(kOpAsr, sign_reg, rl_src.high_reg, 31);
+ OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg);
+ OpRegRegReg(kOpAdc, rl_result.high_reg, rl_src.high_reg, sign_reg);
+ OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
+ OpRegReg(kOpXor, rl_result.high_reg, sign_reg);
StoreValueWide(rl_dest, rl_result);
return true;
} else {
@@ -1160,16 +1158,16 @@
rl_src = LoadValueWide(rl_src, kCoreReg);
RegLocation rl_dest = InlineTargetWide(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegCopyWide(rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
- FreeTemp(rl_src.reg.GetReg());
- FreeTemp(rl_src.reg.GetHighReg());
+ OpRegCopyWide(rl_result.low_reg, rl_result.high_reg, rl_src.low_reg, rl_src.high_reg);
+ FreeTemp(rl_src.low_reg);
+ FreeTemp(rl_src.high_reg);
int sign_reg = AllocTemp();
// abs(x) = y<=x>>31, (x+y)^y.
- OpRegRegImm(kOpAsr, sign_reg, rl_result.reg.GetHighReg(), 31);
- OpRegReg(kOpAdd, rl_result.reg.GetReg(), sign_reg);
- OpRegReg(kOpAdc, rl_result.reg.GetHighReg(), sign_reg);
- OpRegReg(kOpXor, rl_result.reg.GetReg(), sign_reg);
- OpRegReg(kOpXor, rl_result.reg.GetHighReg(), sign_reg);
+ OpRegRegImm(kOpAsr, sign_reg, rl_result.high_reg, 31);
+ OpRegReg(kOpAdd, rl_result.low_reg, sign_reg);
+ OpRegReg(kOpAdc, rl_result.high_reg, sign_reg);
+ OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
+ OpRegReg(kOpXor, rl_result.high_reg, sign_reg);
StoreValueWide(rl_dest, rl_result);
return true;
}
@@ -1186,7 +1184,7 @@
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
int signMask = AllocTemp();
LoadConstant(signMask, 0x7fffffff);
- OpRegRegReg(kOpAnd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), signMask);
+ OpRegRegReg(kOpAnd, rl_result.low_reg, rl_src.low_reg, signMask);
FreeTemp(signMask);
StoreValue(rl_dest, rl_result);
return true;
@@ -1201,12 +1199,12 @@
rl_src = LoadValueWide(rl_src, kCoreReg);
RegLocation rl_dest = InlineTargetWide(info);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegCopyWide(rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
- FreeTemp(rl_src.reg.GetReg());
- FreeTemp(rl_src.reg.GetHighReg());
+ OpRegCopyWide(rl_result.low_reg, rl_result.high_reg, rl_src.low_reg, rl_src.high_reg);
+ FreeTemp(rl_src.low_reg);
+ FreeTemp(rl_src.high_reg);
int signMask = AllocTemp();
LoadConstant(signMask, 0x7fffffff);
- OpRegReg(kOpAnd, rl_result.reg.GetHighReg(), signMask);
+ OpRegReg(kOpAnd, rl_result.high_reg, signMask);
FreeTemp(signMask);
StoreValueWide(rl_dest, rl_result);
return true;
@@ -1318,10 +1316,10 @@
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
ThreadOffset offset = Thread::PeerOffset();
if (cu_->instruction_set == kThumb2 || cu_->instruction_set == kMips) {
- LoadWordDisp(TargetReg(kSelf), offset.Int32Value(), rl_result.reg.GetReg());
+ LoadWordDisp(TargetReg(kSelf), offset.Int32Value(), rl_result.low_reg);
} else {
CHECK(cu_->instruction_set == kX86);
- reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.reg.GetReg(), offset);
+ reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.low_reg, offset);
}
StoreValue(rl_dest, rl_result);
return true;
@@ -1345,11 +1343,11 @@
RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (is_long) {
- OpRegReg(kOpAdd, rl_object.reg.GetReg(), rl_offset.reg.GetReg());
- LoadBaseDispWide(rl_object.reg.GetReg(), 0, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG);
+ OpRegReg(kOpAdd, rl_object.low_reg, rl_offset.low_reg);
+ LoadBaseDispWide(rl_object.low_reg, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
StoreValueWide(rl_dest, rl_result);
} else {
- LoadBaseIndexed(rl_object.reg.GetReg(), rl_offset.reg.GetReg(), rl_result.reg.GetReg(), 0, kWord);
+ LoadBaseIndexed(rl_object.low_reg, rl_offset.low_reg, rl_result.low_reg, 0, kWord);
StoreValue(rl_dest, rl_result);
}
return true;
@@ -1374,20 +1372,20 @@
RegLocation rl_value;
if (is_long) {
rl_value = LoadValueWide(rl_src_value, kCoreReg);
- OpRegReg(kOpAdd, rl_object.reg.GetReg(), rl_offset.reg.GetReg());
- StoreBaseDispWide(rl_object.reg.GetReg(), 0, rl_value.reg.GetReg(), rl_value.reg.GetHighReg());
+ OpRegReg(kOpAdd, rl_object.low_reg, rl_offset.low_reg);
+ StoreBaseDispWide(rl_object.low_reg, 0, rl_value.low_reg, rl_value.high_reg);
} else {
rl_value = LoadValue(rl_src_value, kCoreReg);
- StoreBaseIndexed(rl_object.reg.GetReg(), rl_offset.reg.GetReg(), rl_value.reg.GetReg(), 0, kWord);
+ StoreBaseIndexed(rl_object.low_reg, rl_offset.low_reg, rl_value.low_reg, 0, kWord);
}
// Free up the temp early, to ensure x86 doesn't run out of temporaries in MarkGCCard.
- FreeTemp(rl_offset.reg.GetReg());
+ FreeTemp(rl_offset.low_reg);
if (is_volatile) {
GenMemBarrier(kStoreLoad);
}
if (is_object) {
- MarkGCCard(rl_value.reg.GetReg(), rl_object.reg.GetReg());
+ MarkGCCard(rl_value.low_reg, rl_object.low_reg);
}
return true;
}
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
index 53c47f5..f7c2821 100644
--- a/compiler/dex/quick/gen_loadstore.cc
+++ b/compiler/dex/quick/gen_loadstore.cc
@@ -92,7 +92,7 @@
void Mir2Lir::LoadValueDirect(RegLocation rl_src, int r_dest) {
rl_src = UpdateLoc(rl_src);
if (rl_src.location == kLocPhysReg) {
- OpRegCopy(r_dest, rl_src.reg.GetReg());
+ OpRegCopy(r_dest, rl_src.low_reg);
} else if (IsInexpensiveConstant(rl_src)) {
LoadConstantNoClobber(r_dest, mir_graph_->ConstantValue(rl_src));
} else {
@@ -122,7 +122,7 @@
int reg_hi) {
rl_src = UpdateLocWide(rl_src);
if (rl_src.location == kLocPhysReg) {
- OpRegCopyWide(reg_lo, reg_hi, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ OpRegCopyWide(reg_lo, reg_hi, rl_src.low_reg, rl_src.high_reg);
} else if (IsInexpensiveConstant(rl_src)) {
LoadConstantWide(reg_lo, reg_hi, mir_graph_->ConstantValueWide(rl_src));
} else {
@@ -150,9 +150,9 @@
RegLocation Mir2Lir::LoadValue(RegLocation rl_src, RegisterClass op_kind) {
rl_src = EvalLoc(rl_src, op_kind, false);
if (IsInexpensiveConstant(rl_src) || rl_src.location != kLocPhysReg) {
- LoadValueDirect(rl_src, rl_src.reg.GetReg());
+ LoadValueDirect(rl_src, rl_src.low_reg);
rl_src.location = kLocPhysReg;
- MarkLive(rl_src.reg.GetReg(), rl_src.s_reg_low);
+ MarkLive(rl_src.low_reg, rl_src.s_reg_low);
}
return rl_src;
}
@@ -175,34 +175,34 @@
rl_src = UpdateLoc(rl_src);
rl_dest = UpdateLoc(rl_dest);
if (rl_src.location == kLocPhysReg) {
- if (IsLive(rl_src.reg.GetReg()) ||
- IsPromoted(rl_src.reg.GetReg()) ||
+ if (IsLive(rl_src.low_reg) ||
+ IsPromoted(rl_src.low_reg) ||
(rl_dest.location == kLocPhysReg)) {
// Src is live/promoted or Dest has assigned reg.
rl_dest = EvalLoc(rl_dest, kAnyReg, false);
- OpRegCopy(rl_dest.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegCopy(rl_dest.low_reg, rl_src.low_reg);
} else {
// Just re-assign the registers. Dest gets Src's regs
- rl_dest.reg = rl_src.reg;
- Clobber(rl_src.reg.GetReg());
+ rl_dest.low_reg = rl_src.low_reg;
+ Clobber(rl_src.low_reg);
}
} else {
// Load Src either into promoted Dest or temps allocated for Dest
rl_dest = EvalLoc(rl_dest, kAnyReg, false);
- LoadValueDirect(rl_src, rl_dest.reg.GetReg());
+ LoadValueDirect(rl_src, rl_dest.low_reg);
}
// Dest is now live and dirty (until/if we flush it to home location)
- MarkLive(rl_dest.reg.GetReg(), rl_dest.s_reg_low);
+ MarkLive(rl_dest.low_reg, rl_dest.s_reg_low);
MarkDirty(rl_dest);
ResetDefLoc(rl_dest);
- if (IsDirty(rl_dest.reg.GetReg()) &&
+ if (IsDirty(rl_dest.low_reg) &&
oat_live_out(rl_dest.s_reg_low)) {
def_start = last_lir_insn_;
StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low),
- rl_dest.reg.GetReg(), kWord);
+ rl_dest.low_reg, kWord);
MarkClean(rl_dest);
def_end = last_lir_insn_;
if (!rl_dest.ref) {
@@ -216,10 +216,10 @@
DCHECK(rl_src.wide);
rl_src = EvalLoc(rl_src, op_kind, false);
if (IsInexpensiveConstant(rl_src) || rl_src.location != kLocPhysReg) {
- LoadValueDirectWide(rl_src, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ LoadValueDirectWide(rl_src, rl_src.low_reg, rl_src.high_reg);
rl_src.location = kLocPhysReg;
- MarkLive(rl_src.reg.GetReg(), rl_src.s_reg_low);
- MarkLive(rl_src.reg.GetHighReg(), GetSRegHi(rl_src.s_reg_low));
+ MarkLive(rl_src.low_reg, rl_src.s_reg_low);
+ MarkLive(rl_src.high_reg, GetSRegHi(rl_src.s_reg_low));
}
return rl_src;
}
@@ -237,57 +237,57 @@
}
LIR* def_start;
LIR* def_end;
- DCHECK((rl_src.location != kLocPhysReg) ||
- (IsFpReg(rl_src.reg.GetReg()) == IsFpReg(rl_src.reg.GetHighReg())));
+ DCHECK_EQ(IsFpReg(rl_src.low_reg), IsFpReg(rl_src.high_reg));
DCHECK(rl_dest.wide);
DCHECK(rl_src.wide);
if (rl_src.location == kLocPhysReg) {
- if (IsLive(rl_src.reg.GetReg()) ||
- IsLive(rl_src.reg.GetHighReg()) ||
- IsPromoted(rl_src.reg.GetReg()) ||
- IsPromoted(rl_src.reg.GetHighReg()) ||
+ if (IsLive(rl_src.low_reg) ||
+ IsLive(rl_src.high_reg) ||
+ IsPromoted(rl_src.low_reg) ||
+ IsPromoted(rl_src.high_reg) ||
(rl_dest.location == kLocPhysReg)) {
// Src is live or promoted or Dest has assigned reg.
rl_dest = EvalLoc(rl_dest, kAnyReg, false);
- OpRegCopyWide(rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg(),
- rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ OpRegCopyWide(rl_dest.low_reg, rl_dest.high_reg,
+ rl_src.low_reg, rl_src.high_reg);
} else {
// Just re-assign the registers. Dest gets Src's regs
- rl_dest.reg = rl_src.reg;
- Clobber(rl_src.reg.GetReg());
- Clobber(rl_src.reg.GetHighReg());
+ rl_dest.low_reg = rl_src.low_reg;
+ rl_dest.high_reg = rl_src.high_reg;
+ Clobber(rl_src.low_reg);
+ Clobber(rl_src.high_reg);
}
} else {
// Load Src either into promoted Dest or temps allocated for Dest
rl_dest = EvalLoc(rl_dest, kAnyReg, false);
- LoadValueDirectWide(rl_src, rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg());
+ LoadValueDirectWide(rl_src, rl_dest.low_reg, rl_dest.high_reg);
}
// Dest is now live and dirty (until/if we flush it to home location)
- MarkLive(rl_dest.reg.GetReg(), rl_dest.s_reg_low);
+ MarkLive(rl_dest.low_reg, rl_dest.s_reg_low);
// Does this wide value live in two registers (or one vector one)?
- if (rl_dest.reg.GetReg() != rl_dest.reg.GetHighReg()) {
- MarkLive(rl_dest.reg.GetHighReg(), GetSRegHi(rl_dest.s_reg_low));
+ if (rl_dest.low_reg != rl_dest.high_reg) {
+ MarkLive(rl_dest.high_reg, GetSRegHi(rl_dest.s_reg_low));
MarkDirty(rl_dest);
- MarkPair(rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg());
+ MarkPair(rl_dest.low_reg, rl_dest.high_reg);
} else {
// This must be an x86 vector register value,
- DCHECK(IsFpReg(rl_dest.reg.GetReg()) && (cu_->instruction_set == kX86));
+ DCHECK(IsFpReg(rl_dest.low_reg) && (cu_->instruction_set == kX86));
MarkDirty(rl_dest);
}
ResetDefLocWide(rl_dest);
- if ((IsDirty(rl_dest.reg.GetReg()) ||
- IsDirty(rl_dest.reg.GetHighReg())) &&
+ if ((IsDirty(rl_dest.low_reg) ||
+ IsDirty(rl_dest.high_reg)) &&
(oat_live_out(rl_dest.s_reg_low) ||
oat_live_out(GetSRegHi(rl_dest.s_reg_low)))) {
def_start = last_lir_insn_;
DCHECK_EQ((mir_graph_->SRegToVReg(rl_dest.s_reg_low)+1),
mir_graph_->SRegToVReg(GetSRegHi(rl_dest.s_reg_low)));
StoreBaseDispWide(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low),
- rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg());
+ rl_dest.low_reg, rl_dest.high_reg);
MarkClean(rl_dest);
def_end = last_lir_insn_;
MarkDefWide(rl_dest, def_start, def_end);
@@ -298,25 +298,25 @@
DCHECK_EQ(rl_src.location, kLocPhysReg);
if (rl_dest.location == kLocPhysReg) {
- OpRegCopy(rl_dest.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegCopy(rl_dest.low_reg, rl_src.low_reg);
} else {
// Just re-assign the register. Dest gets Src's reg.
+ rl_dest.low_reg = rl_src.low_reg;
rl_dest.location = kLocPhysReg;
- rl_dest.reg = rl_src.reg;
- Clobber(rl_src.reg.GetReg());
+ Clobber(rl_src.low_reg);
}
// Dest is now live and dirty (until/if we flush it to home location)
- MarkLive(rl_dest.reg.GetReg(), rl_dest.s_reg_low);
+ MarkLive(rl_dest.low_reg, rl_dest.s_reg_low);
MarkDirty(rl_dest);
ResetDefLoc(rl_dest);
- if (IsDirty(rl_dest.reg.GetReg()) &&
+ if (IsDirty(rl_dest.low_reg) &&
oat_live_out(rl_dest.s_reg_low)) {
LIR *def_start = last_lir_insn_;
StoreBaseDisp(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low),
- rl_dest.reg.GetReg(), kWord);
+ rl_dest.low_reg, kWord);
MarkClean(rl_dest);
LIR *def_end = last_lir_insn_;
if (!rl_dest.ref) {
@@ -327,45 +327,46 @@
}
void Mir2Lir::StoreFinalValueWide(RegLocation rl_dest, RegLocation rl_src) {
- DCHECK_EQ(IsFpReg(rl_src.reg.GetReg()), IsFpReg(rl_src.reg.GetHighReg()));
+ DCHECK_EQ(IsFpReg(rl_src.low_reg), IsFpReg(rl_src.high_reg));
DCHECK(rl_dest.wide);
DCHECK(rl_src.wide);
DCHECK_EQ(rl_src.location, kLocPhysReg);
if (rl_dest.location == kLocPhysReg) {
- OpRegCopyWide(rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg(), rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ OpRegCopyWide(rl_dest.low_reg, rl_dest.high_reg, rl_src.low_reg, rl_src.high_reg);
} else {
// Just re-assign the registers. Dest gets Src's regs.
+ rl_dest.low_reg = rl_src.low_reg;
+ rl_dest.high_reg = rl_src.high_reg;
rl_dest.location = kLocPhysReg;
- rl_dest.reg = rl_src.reg;
- Clobber(rl_src.reg.GetReg());
- Clobber(rl_src.reg.GetHighReg());
+ Clobber(rl_src.low_reg);
+ Clobber(rl_src.high_reg);
}
// Dest is now live and dirty (until/if we flush it to home location).
- MarkLive(rl_dest.reg.GetReg(), rl_dest.s_reg_low);
+ MarkLive(rl_dest.low_reg, rl_dest.s_reg_low);
// Does this wide value live in two registers (or one vector one)?
- if (rl_dest.reg.GetReg() != rl_dest.reg.GetHighReg()) {
- MarkLive(rl_dest.reg.GetHighReg(), GetSRegHi(rl_dest.s_reg_low));
+ if (rl_dest.low_reg != rl_dest.high_reg) {
+ MarkLive(rl_dest.high_reg, GetSRegHi(rl_dest.s_reg_low));
MarkDirty(rl_dest);
- MarkPair(rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg());
+ MarkPair(rl_dest.low_reg, rl_dest.high_reg);
} else {
// This must be an x86 vector register value,
- DCHECK(IsFpReg(rl_dest.reg.GetReg()) && (cu_->instruction_set == kX86));
+ DCHECK(IsFpReg(rl_dest.low_reg) && (cu_->instruction_set == kX86));
MarkDirty(rl_dest);
}
ResetDefLocWide(rl_dest);
- if ((IsDirty(rl_dest.reg.GetReg()) ||
- IsDirty(rl_dest.reg.GetHighReg())) &&
+ if ((IsDirty(rl_dest.low_reg) ||
+ IsDirty(rl_dest.high_reg)) &&
(oat_live_out(rl_dest.s_reg_low) ||
oat_live_out(GetSRegHi(rl_dest.s_reg_low)))) {
LIR *def_start = last_lir_insn_;
DCHECK_EQ((mir_graph_->SRegToVReg(rl_dest.s_reg_low)+1),
mir_graph_->SRegToVReg(GetSRegHi(rl_dest.s_reg_low)));
StoreBaseDispWide(TargetReg(kSp), SRegOffset(rl_dest.s_reg_low),
- rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg());
+ rl_dest.low_reg, rl_dest.high_reg);
MarkClean(rl_dest);
LIR *def_end = last_lir_insn_;
MarkDefWide(rl_dest, def_start, def_end);
@@ -384,13 +385,14 @@
RegLocation Mir2Lir::ForceTemp(RegLocation loc) {
DCHECK(!loc.wide);
DCHECK(loc.location == kLocPhysReg);
- DCHECK(!IsFpReg(loc.reg.GetReg()));
- if (IsTemp(loc.reg.GetReg())) {
- Clobber(loc.reg.GetReg());
+ DCHECK(!IsFpReg(loc.low_reg));
+ DCHECK(!IsFpReg(loc.high_reg));
+ if (IsTemp(loc.low_reg)) {
+ Clobber(loc.low_reg);
} else {
int temp_low = AllocTemp();
- OpRegCopy(temp_low, loc.reg.GetReg());
- loc.reg.SetReg(temp_low);
+ OpRegCopy(temp_low, loc.low_reg);
+ loc.low_reg = temp_low;
}
// Ensure that this doesn't represent the original SR any more.
@@ -401,21 +403,21 @@
RegLocation Mir2Lir::ForceTempWide(RegLocation loc) {
DCHECK(loc.wide);
DCHECK(loc.location == kLocPhysReg);
- DCHECK(!IsFpReg(loc.reg.GetReg()));
- DCHECK(!IsFpReg(loc.reg.GetHighReg()));
- if (IsTemp(loc.reg.GetReg())) {
- Clobber(loc.reg.GetReg());
+ DCHECK(!IsFpReg(loc.low_reg));
+ DCHECK(!IsFpReg(loc.high_reg));
+ if (IsTemp(loc.low_reg)) {
+ Clobber(loc.low_reg);
} else {
int temp_low = AllocTemp();
- OpRegCopy(temp_low, loc.reg.GetReg());
- loc.reg.SetReg(temp_low);
+ OpRegCopy(temp_low, loc.low_reg);
+ loc.low_reg = temp_low;
}
- if (IsTemp(loc.reg.GetHighReg())) {
- Clobber(loc.reg.GetHighReg());
+ if (IsTemp(loc.high_reg)) {
+ Clobber(loc.high_reg);
} else {
int temp_high = AllocTemp();
- OpRegCopy(temp_high, loc.reg.GetHighReg());
- loc.reg.SetHighReg(temp_high);
+ OpRegCopy(temp_high, loc.high_reg);
+ loc.high_reg = temp_high;
}
// Ensure that this doesn't represent the original SR any more.
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 88f46fd..a663519 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -114,7 +114,7 @@
LIR* exit_branch = OpCmpBranch(kCondEq, rBase, rEnd, NULL);
LoadWordDisp(rBase, 0, r_key);
OpRegImm(kOpAdd, rBase, 8);
- OpCmpBranch(kCondNe, rl_src.reg.GetReg(), r_key, loop_label);
+ OpCmpBranch(kCondNe, rl_src.low_reg, r_key, loop_label);
int r_disp = AllocTemp();
LoadWordDisp(rBase, -4, r_disp);
OpRegRegReg(kOpAdd, r_RA, r_RA, r_disp);
@@ -162,7 +162,7 @@
bool large_bias = false;
int r_key;
if (low_key == 0) {
- r_key = rl_src.reg.GetReg();
+ r_key = rl_src.low_reg;
} else if ((low_key & 0xffff) != low_key) {
r_key = AllocTemp();
LoadConstant(r_key, low_key);
@@ -179,9 +179,9 @@
NewLIR0(kMipsNop);
} else {
if (large_bias) {
- OpRegRegReg(kOpSub, r_key, rl_src.reg.GetReg(), r_key);
+ OpRegRegReg(kOpSub, r_key, rl_src.low_reg, r_key);
} else {
- OpRegRegImm(kOpSub, r_key, rl_src.reg.GetReg(), low_key);
+ OpRegRegImm(kOpSub, r_key, rl_src.low_reg, low_key);
}
}
GenBarrier(); // Scheduling barrier
@@ -263,7 +263,7 @@
int ex_offset = Thread::ExceptionOffset().Int32Value();
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
int reset_reg = AllocTemp();
- LoadWordDisp(rMIPS_SELF, ex_offset, rl_result.reg.GetReg());
+ LoadWordDisp(rMIPS_SELF, ex_offset, rl_result.low_reg);
LoadConstant(reset_reg, 0);
StoreWordDisp(rMIPS_SELF, ex_offset, reset_reg);
FreeTemp(reset_reg);
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 61eb68d..dad8a3b 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -49,7 +49,7 @@
bool IsFpReg(int reg);
bool SameRegType(int reg1, int reg2);
int AllocTypedTemp(bool fp_hint, int reg_class);
- RegStorage AllocTypedTempWide(bool fp_hint, int reg_class);
+ int AllocTypedTempPair(bool fp_hint, int reg_class);
int S2d(int low_reg, int high_reg);
int TargetReg(SpecialTargetRegister reg);
int GetArgMappingToPhysicalReg(int arg_num);
diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc
index cf4f19f..9e2fea9 100644
--- a/compiler/dex/quick/mips/fp_mips.cc
+++ b/compiler/dex/quick/mips/fp_mips.cc
@@ -64,7 +64,7 @@
rl_src1 = LoadValue(rl_src1, kFPReg);
rl_src2 = LoadValue(rl_src2, kFPReg);
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR3(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ NewLIR3(op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
StoreValue(rl_dest, rl_result);
}
@@ -111,8 +111,8 @@
rl_result = EvalLoc(rl_dest, kFPReg, true);
DCHECK(rl_dest.wide);
DCHECK(rl_result.wide);
- NewLIR3(op, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg()),
- S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg()));
+ NewLIR3(op, S2d(rl_result.low_reg, rl_result.high_reg), S2d(rl_src1.low_reg, rl_src1.high_reg),
+ S2d(rl_src2.low_reg, rl_src2.high_reg));
StoreValueWide(rl_dest, rl_result);
}
@@ -157,18 +157,18 @@
}
if (rl_src.wide) {
rl_src = LoadValueWide(rl_src, kFPReg);
- src_reg = S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ src_reg = S2d(rl_src.low_reg, rl_src.high_reg);
} else {
rl_src = LoadValue(rl_src, kFPReg);
- src_reg = rl_src.reg.GetReg();
+ src_reg = rl_src.low_reg;
}
if (rl_dest.wide) {
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(op, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), src_reg);
+ NewLIR2(op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg);
StoreValueWide(rl_dest, rl_result);
} else {
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(op, rl_result.reg.GetReg(), src_reg);
+ NewLIR2(op, rl_result.low_reg, src_reg);
StoreValue(rl_dest, rl_result);
}
}
@@ -221,7 +221,7 @@
RegLocation rl_result;
rl_src = LoadValue(rl_src, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegImm(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), 0x80000000);
+ OpRegRegImm(kOpAdd, rl_result.low_reg, rl_src.low_reg, 0x80000000);
StoreValue(rl_dest, rl_result);
}
@@ -229,8 +229,8 @@
RegLocation rl_result;
rl_src = LoadValueWide(rl_src, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegImm(kOpAdd, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), 0x80000000);
- OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegRegImm(kOpAdd, rl_result.high_reg, rl_src.high_reg, 0x80000000);
+ OpRegCopy(rl_result.low_reg, rl_src.low_reg);
StoreValueWide(rl_dest, rl_result);
}
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index fec801b..013041a 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -47,13 +47,13 @@
int t0 = AllocTemp();
int t1 = AllocTemp();
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- NewLIR3(kMipsSlt, t0, rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
- NewLIR3(kMipsSlt, t1, rl_src2.reg.GetHighReg(), rl_src1.reg.GetHighReg());
- NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1, t0);
- LIR* branch = OpCmpImmBranch(kCondNe, rl_result.reg.GetReg(), 0, NULL);
- NewLIR3(kMipsSltu, t0, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
- NewLIR3(kMipsSltu, t1, rl_src2.reg.GetReg(), rl_src1.reg.GetReg());
- NewLIR3(kMipsSubu, rl_result.reg.GetReg(), t1, t0);
+ NewLIR3(kMipsSlt, t0, rl_src1.high_reg, rl_src2.high_reg);
+ NewLIR3(kMipsSlt, t1, rl_src2.high_reg, rl_src1.high_reg);
+ NewLIR3(kMipsSubu, rl_result.low_reg, t1, t0);
+ LIR* branch = OpCmpImmBranch(kCondNe, rl_result.low_reg, 0, NULL);
+ NewLIR3(kMipsSltu, t0, rl_src1.low_reg, rl_src2.low_reg);
+ NewLIR3(kMipsSltu, t1, rl_src2.low_reg, rl_src1.low_reg);
+ NewLIR3(kMipsSubu, rl_result.low_reg, t1, t0);
FreeTemp(t0);
FreeTemp(t1);
LIR* target = NewLIR0(kPseudoTargetLabel);
@@ -228,9 +228,9 @@
NewLIR4(kMipsDiv, r_HI, r_LO, reg1, reg2);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (is_div) {
- NewLIR2(kMipsMflo, rl_result.reg.GetReg(), r_LO);
+ NewLIR2(kMipsMflo, rl_result.low_reg, r_LO);
} else {
- NewLIR2(kMipsMfhi, rl_result.reg.GetReg(), r_HI);
+ NewLIR2(kMipsMfhi, rl_result.low_reg, r_HI);
}
return rl_result;
}
@@ -242,9 +242,9 @@
NewLIR4(kMipsDiv, r_HI, r_LO, reg1, t_reg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (is_div) {
- NewLIR2(kMipsMflo, rl_result.reg.GetReg(), r_LO);
+ NewLIR2(kMipsMflo, rl_result.low_reg, r_LO);
} else {
- NewLIR2(kMipsMfhi, rl_result.reg.GetReg(), r_HI);
+ NewLIR2(kMipsMfhi, rl_result.low_reg, r_HI);
}
FreeTemp(t_reg);
return rl_result;
@@ -290,7 +290,7 @@
RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
DCHECK(size == kSignedByte);
- LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), size, INVALID_SREG);
+ LoadBaseDisp(rl_address.low_reg, 0, rl_result.low_reg, size, INVALID_SREG);
StoreValue(rl_dest, rl_result);
return true;
}
@@ -306,7 +306,7 @@
RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
DCHECK(size == kSignedByte);
RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), size);
+ StoreBaseDisp(rl_address.low_reg, 0, rl_value.low_reg, size);
return true;
}
@@ -329,11 +329,11 @@
RegLocation rl_result, int lit,
int first_bit, int second_bit) {
int t_reg = AllocTemp();
- OpRegRegImm(kOpLsl, t_reg, rl_src.reg.GetReg(), second_bit - first_bit);
- OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), t_reg);
+ OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit);
+ OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, t_reg);
FreeTemp(t_reg);
if (first_bit != 0) {
- OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_result.reg.GetReg(), first_bit);
+ OpRegRegImm(kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
}
}
@@ -385,11 +385,11 @@
* addu v1,v1,t1
*/
- OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src2.reg.GetReg(), rl_src1.reg.GetReg());
+ OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src2.low_reg, rl_src1.low_reg);
int t_reg = AllocTemp();
- OpRegRegReg(kOpAdd, t_reg, rl_src2.reg.GetHighReg(), rl_src1.reg.GetHighReg());
- NewLIR3(kMipsSltu, rl_result.reg.GetHighReg(), rl_result.reg.GetReg(), rl_src2.reg.GetReg());
- OpRegRegReg(kOpAdd, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg(), t_reg);
+ OpRegRegReg(kOpAdd, t_reg, rl_src2.high_reg, rl_src1.high_reg);
+ NewLIR3(kMipsSltu, rl_result.high_reg, rl_result.low_reg, rl_src2.low_reg);
+ OpRegRegReg(kOpAdd, rl_result.high_reg, rl_result.high_reg, t_reg);
FreeTemp(t_reg);
StoreValueWide(rl_dest, rl_result);
}
@@ -408,10 +408,10 @@
*/
int t_reg = AllocTemp();
- NewLIR3(kMipsSltu, t_reg, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
- OpRegRegReg(kOpSub, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
- OpRegRegReg(kOpSub, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
- OpRegRegReg(kOpSub, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg(), t_reg);
+ NewLIR3(kMipsSltu, t_reg, rl_src1.low_reg, rl_src2.low_reg);
+ OpRegRegReg(kOpSub, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
+ OpRegRegReg(kOpSub, rl_result.high_reg, rl_src1.high_reg, rl_src2.high_reg);
+ OpRegRegReg(kOpSub, rl_result.high_reg, rl_result.high_reg, t_reg);
FreeTemp(t_reg);
StoreValueWide(rl_dest, rl_result);
}
@@ -427,11 +427,11 @@
* subu v1,v1,t1
*/
- OpRegReg(kOpNeg, rl_result.reg.GetReg(), rl_src.reg.GetReg());
- OpRegReg(kOpNeg, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
+ OpRegReg(kOpNeg, rl_result.low_reg, rl_src.low_reg);
+ OpRegReg(kOpNeg, rl_result.high_reg, rl_src.high_reg);
int t_reg = AllocTemp();
- NewLIR3(kMipsSltu, t_reg, r_ZERO, rl_result.reg.GetReg());
- OpRegRegReg(kOpSub, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg(), t_reg);
+ NewLIR3(kMipsSltu, t_reg, r_ZERO, rl_result.low_reg);
+ OpRegRegReg(kOpSub, rl_result.high_reg, rl_result.high_reg, t_reg);
FreeTemp(t_reg);
StoreValueWide(rl_dest, rl_result);
}
@@ -471,7 +471,7 @@
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
int reg_ptr = AllocTemp();
bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
@@ -479,28 +479,28 @@
if (needs_range_check) {
reg_len = AllocTemp();
/* Get len */
- LoadWordDisp(rl_array.reg.GetReg(), len_offset, reg_len);
+ LoadWordDisp(rl_array.low_reg, len_offset, reg_len);
}
/* reg_ptr -> array data */
- OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg.GetReg(), data_offset);
- FreeTemp(rl_array.reg.GetReg());
+ OpRegRegImm(kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
+ FreeTemp(rl_array.low_reg);
if ((size == kLong) || (size == kDouble)) {
if (scale) {
int r_new_index = AllocTemp();
- OpRegRegImm(kOpLsl, r_new_index, rl_index.reg.GetReg(), scale);
+ OpRegRegImm(kOpLsl, r_new_index, rl_index.low_reg, scale);
OpRegReg(kOpAdd, reg_ptr, r_new_index);
FreeTemp(r_new_index);
} else {
- OpRegReg(kOpAdd, reg_ptr, rl_index.reg.GetReg());
+ OpRegReg(kOpAdd, reg_ptr, rl_index.low_reg);
}
- FreeTemp(rl_index.reg.GetReg());
+ FreeTemp(rl_index.low_reg);
rl_result = EvalLoc(rl_dest, reg_class, true);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds);
+ GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds);
FreeTemp(reg_len);
}
- LoadBaseDispWide(reg_ptr, 0, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG);
+ LoadBaseDispWide(reg_ptr, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
FreeTemp(reg_ptr);
StoreValueWide(rl_dest, rl_result);
@@ -508,10 +508,10 @@
rl_result = EvalLoc(rl_dest, reg_class, true);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds);
+ GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds);
FreeTemp(reg_len);
}
- LoadBaseIndexed(reg_ptr, rl_index.reg.GetReg(), rl_result.reg.GetReg(), scale, size);
+ LoadBaseIndexed(reg_ptr, rl_index.low_reg, rl_result.low_reg, scale, size);
FreeTemp(reg_ptr);
StoreValue(rl_dest, rl_result);
@@ -538,17 +538,17 @@
rl_index = LoadValue(rl_index, kCoreReg);
int reg_ptr = INVALID_REG;
bool allocated_reg_ptr_temp = false;
- if (IsTemp(rl_array.reg.GetReg()) && !card_mark) {
- Clobber(rl_array.reg.GetReg());
- reg_ptr = rl_array.reg.GetReg();
+ if (IsTemp(rl_array.low_reg) && !card_mark) {
+ Clobber(rl_array.low_reg);
+ reg_ptr = rl_array.low_reg;
} else {
reg_ptr = AllocTemp();
- OpRegCopy(reg_ptr, rl_array.reg.GetReg());
+ OpRegCopy(reg_ptr, rl_array.low_reg);
allocated_reg_ptr_temp = true;
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
int reg_len = INVALID_REG;
@@ -556,7 +556,7 @@
reg_len = AllocTemp();
// NOTE: max live temps(4) here.
/* Get len */
- LoadWordDisp(rl_array.reg.GetReg(), len_offset, reg_len);
+ LoadWordDisp(rl_array.low_reg, len_offset, reg_len);
}
/* reg_ptr -> array data */
OpRegImm(kOpAdd, reg_ptr, data_offset);
@@ -565,34 +565,34 @@
// TUNING: specific wide routine that can handle fp regs
if (scale) {
int r_new_index = AllocTemp();
- OpRegRegImm(kOpLsl, r_new_index, rl_index.reg.GetReg(), scale);
+ OpRegRegImm(kOpLsl, r_new_index, rl_index.low_reg, scale);
OpRegReg(kOpAdd, reg_ptr, r_new_index);
FreeTemp(r_new_index);
} else {
- OpRegReg(kOpAdd, reg_ptr, rl_index.reg.GetReg());
+ OpRegReg(kOpAdd, reg_ptr, rl_index.low_reg);
}
rl_src = LoadValueWide(rl_src, reg_class);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds);
+ GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds);
FreeTemp(reg_len);
}
- StoreBaseDispWide(reg_ptr, 0, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ StoreBaseDispWide(reg_ptr, 0, rl_src.low_reg, rl_src.high_reg);
} else {
rl_src = LoadValue(rl_src, reg_class);
if (needs_range_check) {
- GenRegRegCheck(kCondUge, rl_index.reg.GetReg(), reg_len, kThrowArrayBounds);
+ GenRegRegCheck(kCondUge, rl_index.low_reg, reg_len, kThrowArrayBounds);
FreeTemp(reg_len);
}
- StoreBaseIndexed(reg_ptr, rl_index.reg.GetReg(), rl_src.reg.GetReg(),
+ StoreBaseIndexed(reg_ptr, rl_index.low_reg, rl_src.low_reg,
scale, size);
}
if (allocated_reg_ptr_temp) {
FreeTemp(reg_ptr);
}
if (card_mark) {
- MarkGCCard(rl_src.reg.GetReg(), rl_array.reg.GetReg());
+ MarkGCCard(rl_src.low_reg, rl_array.low_reg);
}
}
diff --git a/compiler/dex/quick/mips/mips_lir.h b/compiler/dex/quick/mips/mips_lir.h
index 59f442c..00eef96 100644
--- a/compiler/dex/quick/mips/mips_lir.h
+++ b/compiler/dex/quick/mips/mips_lir.h
@@ -141,6 +141,16 @@
#define rMIPS_LR INVALID_REG
#define rMIPS_PC INVALID_REG
+// RegisterLocation templates return values (r_V0, or r_V0/r_V1).
+#define MIPS_LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r_V0, INVALID_REG, \
+ INVALID_SREG, INVALID_SREG}
+#define MIPS_LOC_C_RETURN_FLOAT {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r_FRESULT0, \
+ INVALID_REG, INVALID_SREG, INVALID_SREG}
+#define MIPS_LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r_RESULT0, \
+ r_RESULT1, INVALID_SREG, INVALID_SREG}
+#define MIPS_LOC_C_RETURN_DOUBLE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r_FRESULT0, \
+ r_FRESULT1, INVALID_SREG, INVALID_SREG}
+
enum MipsResourceEncodingPos {
kMipsGPReg0 = 0,
kMipsRegSP = 29,
@@ -269,20 +279,6 @@
#define rMIPS_INVOKE_TGT r_T9
#define rMIPS_COUNT INVALID_REG
-// RegisterLocation templates return values (r_V0, or r_V0/r_V1).
-const RegLocation mips_loc_c_return
- {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
- RegStorage(RegStorage::k32BitSolo, r_V0), INVALID_SREG, INVALID_SREG};
-const RegLocation mips_loc_c_return_wide
- {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
- RegStorage(RegStorage::k64BitPair, r_V0, r_V1), INVALID_SREG, INVALID_SREG};
-const RegLocation mips_loc_c_return_float
- {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1, kVectorNotUsed,
- RegStorage(RegStorage::k32BitSolo, r_F0), INVALID_SREG, INVALID_SREG};
-const RegLocation mips_loc_c_return_double
- {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1, kVectorNotUsed,
- RegStorage(RegStorage::k64BitPair, r_F0, r_F1), INVALID_SREG, INVALID_SREG};
-
enum MipsShiftEncodings {
kMipsLsl = 0x0,
kMipsLsr = 0x1,
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index 85c250d..224e8f2 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -40,19 +40,23 @@
r_F8, r_F9, r_F10, r_F11, r_F12, r_F13, r_F14, r_F15};
RegLocation MipsMir2Lir::LocCReturn() {
- return mips_loc_c_return;
+ RegLocation res = MIPS_LOC_C_RETURN;
+ return res;
}
RegLocation MipsMir2Lir::LocCReturnWide() {
- return mips_loc_c_return_wide;
+ RegLocation res = MIPS_LOC_C_RETURN_WIDE;
+ return res;
}
RegLocation MipsMir2Lir::LocCReturnFloat() {
- return mips_loc_c_return_float;
+ RegLocation res = MIPS_LOC_C_RETURN_FLOAT;
+ return res;
}
RegLocation MipsMir2Lir::LocCReturnDouble() {
- return mips_loc_c_return_double;
+ RegLocation res = MIPS_LOC_C_RETURN_DOUBLE;
+ return res;
}
// Return a target-dependent special register.
@@ -437,20 +441,27 @@
#endif
}
-// Alloc a pair of core registers, or a double.
-RegStorage MipsMir2Lir::AllocTypedTempWide(bool fp_hint, int reg_class) {
+/*
+ * Alloc a pair of core registers, or a double. Low reg in low byte,
+ * high reg in next byte.
+ */
+int MipsMir2Lir::AllocTypedTempPair(bool fp_hint,
+ int reg_class) {
int high_reg;
int low_reg;
+ int res = 0;
if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
low_reg = AllocTempDouble();
high_reg = low_reg + 1;
- return RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
+ res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
+ return res;
}
low_reg = AllocTemp();
high_reg = AllocTemp();
- return RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
+ res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
+ return res;
}
int MipsMir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
@@ -494,11 +505,11 @@
}
void MipsMir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) {
- if ((rl_free.reg.GetReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetReg() != rl_keep.reg.GetHighReg()) &&
- (rl_free.reg.GetHighReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetHighReg() != rl_keep.reg.GetHighReg())) {
+ if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
+ (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) {
// No overlap, free both
- FreeTemp(rl_free.reg.GetReg());
- FreeTemp(rl_free.reg.GetHighReg());
+ FreeTemp(rl_free.low_reg);
+ FreeTemp(rl_free.high_reg);
}
}
/*
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 03d3634..8c2ed36 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -53,9 +53,8 @@
if (wide && reg_arg_high == INVALID_REG) {
// If the low part is not in a reg, we allocate a pair. Otherwise, we just load to high reg.
if (reg_arg_low == INVALID_REG) {
- RegStorage new_regs = AllocTypedTempWide(false, kAnyReg);
- reg_arg_low = new_regs.GetReg();
- reg_arg_high = new_regs.GetHighReg();
+ int new_regs = AllocTypedTempPair(false, kAnyReg);
+ DECODE_REG_PAIR(new_regs, reg_arg_low, reg_arg_high);
LoadBaseDispWide(TargetReg(kSp), offset, reg_arg_low, reg_arg_high, INVALID_SREG);
} else {
reg_arg_high = AllocTemp();
@@ -71,7 +70,6 @@
}
if (wide) {
- // TODO: replace w/ RegStorage.
return ENCODE_REG_PAIR(reg_arg_low, reg_arg_high);
} else {
return reg_arg_low;
@@ -92,25 +90,25 @@
if (!rl_dest.wide) {
int reg = GetArgMappingToPhysicalReg(in_position);
if (reg != INVALID_REG) {
- OpRegCopy(rl_dest.reg.GetReg(), reg);
+ OpRegCopy(rl_dest.low_reg, reg);
} else {
- LoadWordDisp(TargetReg(kSp), offset, rl_dest.reg.GetReg());
+ LoadWordDisp(TargetReg(kSp), offset, rl_dest.low_reg);
}
} else {
int reg_arg_low = GetArgMappingToPhysicalReg(in_position);
int reg_arg_high = GetArgMappingToPhysicalReg(in_position + 1);
if (reg_arg_low != INVALID_REG && reg_arg_high != INVALID_REG) {
- OpRegCopyWide(rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg(), reg_arg_low, reg_arg_high);
+ OpRegCopyWide(rl_dest.low_reg, rl_dest.high_reg, reg_arg_low, reg_arg_high);
} else if (reg_arg_low != INVALID_REG && reg_arg_high == INVALID_REG) {
- OpRegCopy(rl_dest.reg.GetReg(), reg_arg_low);
+ OpRegCopy(rl_dest.low_reg, reg_arg_low);
int offset_high = offset + sizeof(uint32_t);
- LoadWordDisp(TargetReg(kSp), offset_high, rl_dest.reg.GetHighReg());
+ LoadWordDisp(TargetReg(kSp), offset_high, rl_dest.high_reg);
} else if (reg_arg_low == INVALID_REG && reg_arg_high != INVALID_REG) {
- OpRegCopy(rl_dest.reg.GetHighReg(), reg_arg_high);
- LoadWordDisp(TargetReg(kSp), offset, rl_dest.reg.GetReg());
+ OpRegCopy(rl_dest.high_reg, reg_arg_high);
+ LoadWordDisp(TargetReg(kSp), offset, rl_dest.low_reg);
} else {
- LoadBaseDispWide(TargetReg(kSp), offset, rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg(), INVALID_SREG);
+ LoadBaseDispWide(TargetReg(kSp), offset, rl_dest.low_reg, rl_dest.high_reg, INVALID_SREG);
}
}
}
@@ -133,9 +131,9 @@
RegLocation rl_dest = wide ? GetReturnWide(double_or_float) : GetReturn(double_or_float);
int reg_obj = LoadArg(data.object_arg);
if (wide) {
- LoadBaseDispWide(reg_obj, data.field_offset, rl_dest.reg.GetReg(), rl_dest.reg.GetHighReg(), INVALID_SREG);
+ LoadBaseDispWide(reg_obj, data.field_offset, rl_dest.low_reg, rl_dest.high_reg, INVALID_SREG);
} else {
- LoadBaseDisp(reg_obj, data.field_offset, rl_dest.reg.GetReg(), kWord, INVALID_SREG);
+ LoadBaseDisp(reg_obj, data.field_offset, rl_dest.low_reg, kWord, INVALID_SREG);
}
if (data.is_volatile) {
GenMemBarrier(kLoadLoad);
@@ -212,7 +210,7 @@
successful = true;
RegLocation rl_dest = GetReturn(cu_->shorty[0] == 'F');
GenPrintLabel(mir);
- LoadConstant(rl_dest.reg.GetReg(), static_cast<int>(special.d.data));
+ LoadConstant(rl_dest.low_reg, static_cast<int>(special.d.data));
return_mir = mir_graph_->GetNextUnconditionalMir(bb, mir);
break;
}
@@ -373,19 +371,19 @@
case Instruction::CONST_4:
case Instruction::CONST_16:
rl_result = EvalLoc(rl_dest, kAnyReg, true);
- LoadConstantNoClobber(rl_result.reg.GetReg(), vB);
+ LoadConstantNoClobber(rl_result.low_reg, vB);
StoreValue(rl_dest, rl_result);
if (vB == 0) {
- Workaround7250540(rl_dest, rl_result.reg.GetReg());
+ Workaround7250540(rl_dest, rl_result.low_reg);
}
break;
case Instruction::CONST_HIGH16:
rl_result = EvalLoc(rl_dest, kAnyReg, true);
- LoadConstantNoClobber(rl_result.reg.GetReg(), vB << 16);
+ LoadConstantNoClobber(rl_result.low_reg, vB << 16);
StoreValue(rl_dest, rl_result);
if (vB == 0) {
- Workaround7250540(rl_dest, rl_result.reg.GetReg());
+ Workaround7250540(rl_dest, rl_result.low_reg);
}
break;
@@ -400,7 +398,7 @@
case Instruction::CONST_WIDE_HIGH16:
rl_result = EvalLoc(rl_dest, kAnyReg, true);
- LoadConstantWide(rl_result.reg.GetReg(), rl_result.reg.GetHighReg(),
+ LoadConstantWide(rl_result.low_reg, rl_result.high_reg,
static_cast<int64_t>(vB) << 48);
StoreValueWide(rl_dest, rl_result);
break;
@@ -433,9 +431,9 @@
int len_offset;
len_offset = mirror::Array::LengthOffset().Int32Value();
rl_src[0] = LoadValue(rl_src[0], kCoreReg);
- GenNullCheck(rl_src[0].s_reg_low, rl_src[0].reg.GetReg(), opt_flags);
+ GenNullCheck(rl_src[0].s_reg_low, rl_src[0].low_reg, opt_flags);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadWordDisp(rl_src[0].reg.GetReg(), len_offset, rl_result.reg.GetReg());
+ LoadWordDisp(rl_src[0].low_reg, len_offset, rl_result.low_reg);
StoreValue(rl_dest, rl_result);
break;
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 7522cd9..6e97c53 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -21,7 +21,6 @@
#include "compiled_method.h"
#include "dex/compiler_enums.h"
#include "dex/compiler_ir.h"
-#include "dex/reg_storage.h"
#include "dex/backend.h"
#include "driver/compiler_driver.h"
#include "leb128_encoder.h"
@@ -814,7 +813,7 @@
virtual bool IsFpReg(int reg) = 0;
virtual bool SameRegType(int reg1, int reg2) = 0;
virtual int AllocTypedTemp(bool fp_hint, int reg_class) = 0;
- virtual RegStorage AllocTypedTempWide(bool fp_hint, int reg_class) = 0;
+ virtual int AllocTypedTempPair(bool fp_hint, int reg_class) = 0;
virtual int S2d(int low_reg, int high_reg) = 0;
virtual int TargetReg(SpecialTargetRegister reg) = 0;
virtual int GetArgMappingToPhysicalReg(int arg_num) = 0;
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 3a8942e..0a65171 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -460,7 +460,7 @@
DCHECK(!rl.wide);
DCHECK(start && start->next);
DCHECK(finish);
- RegisterInfo* p = GetRegInfo(rl.reg.GetReg());
+ RegisterInfo* p = GetRegInfo(rl.low_reg);
p->def_start = start->next;
p->def_end = finish;
}
@@ -474,8 +474,8 @@
DCHECK(rl.wide);
DCHECK(start && start->next);
DCHECK(finish);
- RegisterInfo* p = GetRegInfo(rl.reg.GetReg());
- ResetDef(rl.reg.GetHighReg()); // Only track low of pair
+ RegisterInfo* p = GetRegInfo(rl.low_reg);
+ ResetDef(rl.high_reg); // Only track low of pair
p->def_start = start->next;
p->def_end = finish;
}
@@ -483,8 +483,8 @@
RegLocation Mir2Lir::WideToNarrow(RegLocation rl) {
DCHECK(rl.wide);
if (rl.location == kLocPhysReg) {
- RegisterInfo* info_lo = GetRegInfo(rl.reg.GetReg());
- RegisterInfo* info_hi = GetRegInfo(rl.reg.GetHighReg());
+ RegisterInfo* info_lo = GetRegInfo(rl.low_reg);
+ RegisterInfo* info_hi = GetRegInfo(rl.high_reg);
if (info_lo->is_temp) {
info_lo->pair = false;
info_lo->def_start = NULL;
@@ -502,18 +502,18 @@
void Mir2Lir::ResetDefLoc(RegLocation rl) {
DCHECK(!rl.wide);
- RegisterInfo* p = IsTemp(rl.reg.GetReg());
+ RegisterInfo* p = IsTemp(rl.low_reg);
if (p && !(cu_->disable_opt & (1 << kSuppressLoads))) {
DCHECK(!p->pair);
NullifyRange(p->def_start, p->def_end, p->s_reg, rl.s_reg_low);
}
- ResetDef(rl.reg.GetReg());
+ ResetDef(rl.low_reg);
}
void Mir2Lir::ResetDefLocWide(RegLocation rl) {
DCHECK(rl.wide);
- RegisterInfo* p_low = IsTemp(rl.reg.GetReg());
- RegisterInfo* p_high = IsTemp(rl.reg.GetHighReg());
+ RegisterInfo* p_low = IsTemp(rl.low_reg);
+ RegisterInfo* p_high = IsTemp(rl.high_reg);
if (p_low && !(cu_->disable_opt & (1 << kSuppressLoads))) {
DCHECK(p_low->pair);
NullifyRange(p_low->def_start, p_low->def_end, p_low->s_reg, rl.s_reg_low);
@@ -521,8 +521,8 @@
if (p_high && !(cu_->disable_opt & (1 << kSuppressLoads))) {
DCHECK(p_high->pair);
}
- ResetDef(rl.reg.GetReg());
- ResetDef(rl.reg.GetHighReg());
+ ResetDef(rl.low_reg);
+ ResetDef(rl.high_reg);
}
void Mir2Lir::ResetDefTracking() {
@@ -621,10 +621,10 @@
}
void Mir2Lir::MarkClean(RegLocation loc) {
- RegisterInfo* info = GetRegInfo(loc.reg.GetReg());
+ RegisterInfo* info = GetRegInfo(loc.low_reg);
info->dirty = false;
if (loc.wide) {
- info = GetRegInfo(loc.reg.GetHighReg());
+ info = GetRegInfo(loc.high_reg);
info->dirty = false;
}
}
@@ -634,10 +634,10 @@
// If already home, can't be dirty
return;
}
- RegisterInfo* info = GetRegInfo(loc.reg.GetReg());
+ RegisterInfo* info = GetRegInfo(loc.low_reg);
info->dirty = true;
if (loc.wide) {
- info = GetRegInfo(loc.reg.GetHighReg());
+ info = GetRegInfo(loc.high_reg);
info->dirty = true;
}
}
@@ -707,7 +707,7 @@
Clobber(info_lo->partner);
FreeTemp(info_lo->reg);
} else {
- loc.reg = RegStorage(RegStorage::k32BitSolo, info_lo->reg);
+ loc.low_reg = info_lo->reg;
loc.location = kLocPhysReg;
}
}
@@ -744,10 +744,11 @@
}
if (match) {
// Can reuse - update the register usage info
+ loc.low_reg = info_lo->reg;
+ loc.high_reg = info_hi->reg;
loc.location = kLocPhysReg;
- loc.reg = RegStorage(RegStorage::k64BitPair, info_lo->reg, info_hi->reg);
- MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg());
- DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0));
+ MarkPair(loc.low_reg, loc.high_reg);
+ DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
return loc;
}
// Can't easily reuse - clobber and free any overlaps
@@ -778,6 +779,7 @@
RegLocation Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) {
DCHECK(loc.wide);
+ int32_t new_regs;
int32_t low_reg;
int32_t high_reg;
@@ -785,21 +787,22 @@
/* If already in registers, we can assume proper form. Right reg class? */
if (loc.location == kLocPhysReg) {
- DCHECK_EQ(IsFpReg(loc.reg.GetReg()), IsFpReg(loc.reg.GetHighReg()));
- DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0));
- if (!RegClassMatches(reg_class, loc.reg.GetReg())) {
+ DCHECK_EQ(IsFpReg(loc.low_reg), IsFpReg(loc.high_reg));
+ DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
+ if (!RegClassMatches(reg_class, loc.low_reg)) {
/* Wrong register class. Reallocate and copy */
- RegStorage new_regs = AllocTypedTempWide(loc.fp, reg_class);
- low_reg = new_regs.GetReg();
- high_reg = new_regs.GetHighReg();
- OpRegCopyWide(low_reg, high_reg, loc.reg.GetReg(), loc.reg.GetHighReg());
- CopyRegInfo(low_reg, loc.reg.GetReg());
- CopyRegInfo(high_reg, loc.reg.GetHighReg());
- Clobber(loc.reg.GetReg());
- Clobber(loc.reg.GetHighReg());
- loc.reg = new_regs;
- MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg());
- DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0));
+ new_regs = AllocTypedTempPair(loc.fp, reg_class);
+ low_reg = new_regs & 0xff;
+ high_reg = (new_regs >> 8) & 0xff;
+ OpRegCopyWide(low_reg, high_reg, loc.low_reg, loc.high_reg);
+ CopyRegInfo(low_reg, loc.low_reg);
+ CopyRegInfo(high_reg, loc.high_reg);
+ Clobber(loc.low_reg);
+ Clobber(loc.high_reg);
+ loc.low_reg = low_reg;
+ loc.high_reg = high_reg;
+ MarkPair(loc.low_reg, loc.high_reg);
+ DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
}
return loc;
}
@@ -807,18 +810,20 @@
DCHECK_NE(loc.s_reg_low, INVALID_SREG);
DCHECK_NE(GetSRegHi(loc.s_reg_low), INVALID_SREG);
- loc.reg = AllocTypedTempWide(loc.fp, reg_class);
+ new_regs = AllocTypedTempPair(loc.fp, reg_class);
+ loc.low_reg = new_regs & 0xff;
+ loc.high_reg = (new_regs >> 8) & 0xff;
- MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg());
+ MarkPair(loc.low_reg, loc.high_reg);
if (update) {
loc.location = kLocPhysReg;
- MarkLive(loc.reg.GetReg(), loc.s_reg_low);
+ MarkLive(loc.low_reg, loc.s_reg_low);
// Does this wide value live in two registers or one vector register?
- if (loc.reg.GetReg() != loc.reg.GetHighReg()) {
- MarkLive(loc.reg.GetHighReg(), GetSRegHi(loc.s_reg_low));
+ if (loc.low_reg != loc.high_reg) {
+ MarkLive(loc.high_reg, GetSRegHi(loc.s_reg_low));
}
}
- DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0));
+ DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
return loc;
}
@@ -831,13 +836,13 @@
loc = UpdateLoc(loc);
if (loc.location == kLocPhysReg) {
- if (!RegClassMatches(reg_class, loc.reg.GetReg())) {
+ if (!RegClassMatches(reg_class, loc.low_reg)) {
/* Wrong register class. Realloc, copy and transfer ownership */
new_reg = AllocTypedTemp(loc.fp, reg_class);
- OpRegCopy(new_reg, loc.reg.GetReg());
- CopyRegInfo(new_reg, loc.reg.GetReg());
- Clobber(loc.reg.GetReg());
- loc.reg = RegStorage(RegStorage::k32BitSolo, new_reg);
+ OpRegCopy(new_reg, loc.low_reg);
+ CopyRegInfo(new_reg, loc.low_reg);
+ Clobber(loc.low_reg);
+ loc.low_reg = new_reg;
}
return loc;
}
@@ -845,11 +850,11 @@
DCHECK_NE(loc.s_reg_low, INVALID_SREG);
new_reg = AllocTypedTemp(loc.fp, reg_class);
- loc.reg = RegStorage(RegStorage::k32BitSolo, new_reg);
+ loc.low_reg = new_reg;
if (update) {
loc.location = kLocPhysReg;
- MarkLive(loc.reg.GetReg(), loc.s_reg_low);
+ MarkLive(loc.low_reg, loc.s_reg_low);
}
return loc;
}
@@ -1001,29 +1006,32 @@
if (curr->fp) {
if (promotion_map_[p_map_idx].fp_location == kLocPhysReg) {
curr->location = kLocPhysReg;
- curr->reg = RegStorage(RegStorage::k32BitSolo, promotion_map_[p_map_idx].FpReg);
+ curr->low_reg = promotion_map_[p_map_idx].FpReg;
curr->home = true;
}
} else {
if (promotion_map_[p_map_idx].core_location == kLocPhysReg) {
curr->location = kLocPhysReg;
- curr->reg = RegStorage(RegStorage::k32BitSolo, promotion_map_[p_map_idx].core_reg);
+ curr->low_reg = promotion_map_[p_map_idx].core_reg;
curr->home = true;
}
}
+ curr->high_reg = INVALID_REG;
} else {
if (curr->high_word) {
continue;
}
if (curr->fp) {
if ((promotion_map_[p_map_idx].fp_location == kLocPhysReg) &&
- (promotion_map_[p_map_idx+1].fp_location == kLocPhysReg)) {
+ (promotion_map_[p_map_idx+1].fp_location ==
+ kLocPhysReg)) {
int low_reg = promotion_map_[p_map_idx].FpReg;
int high_reg = promotion_map_[p_map_idx+1].FpReg;
// Doubles require pair of singles starting at even reg
if (((low_reg & 0x1) == 0) && ((low_reg + 1) == high_reg)) {
curr->location = kLocPhysReg;
- curr->reg = RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
+ curr->low_reg = low_reg;
+ curr->high_reg = high_reg;
curr->home = true;
}
}
@@ -1032,8 +1040,8 @@
&& (promotion_map_[p_map_idx+1].core_location ==
kLocPhysReg)) {
curr->location = kLocPhysReg;
- curr->reg = RegStorage(RegStorage::k64BitPair, promotion_map_[p_map_idx].core_reg,
- promotion_map_[p_map_idx+1].core_reg);
+ curr->low_reg = promotion_map_[p_map_idx].core_reg;
+ curr->high_reg = promotion_map_[p_map_idx+1].core_reg;
curr->home = true;
}
}
@@ -1060,13 +1068,13 @@
RegLocation gpr_res = LocCReturnWide();
RegLocation fpr_res = LocCReturnDouble();
RegLocation res = is_double ? fpr_res : gpr_res;
- Clobber(res.reg.GetReg());
- Clobber(res.reg.GetHighReg());
- LockTemp(res.reg.GetReg());
- LockTemp(res.reg.GetHighReg());
+ Clobber(res.low_reg);
+ Clobber(res.high_reg);
+ LockTemp(res.low_reg);
+ LockTemp(res.high_reg);
// Does this wide value live in two registers or one vector register?
- if (res.reg.GetReg() != res.reg.GetHighReg()) {
- MarkPair(res.reg.GetReg(), res.reg.GetHighReg());
+ if (res.low_reg != res.high_reg) {
+ MarkPair(res.low_reg, res.high_reg);
}
return res;
}
@@ -1075,11 +1083,11 @@
RegLocation gpr_res = LocCReturn();
RegLocation fpr_res = LocCReturnFloat();
RegLocation res = is_float ? fpr_res : gpr_res;
- Clobber(res.reg.GetReg());
+ Clobber(res.low_reg);
if (cu_->instruction_set == kMips) {
- MarkInUse(res.reg.GetReg());
+ MarkInUse(res.low_reg);
} else {
- LockTemp(res.reg.GetReg());
+ LockTemp(res.low_reg);
}
return res;
}
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 3708f01..0613cdf 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -40,7 +40,7 @@
int key = keys[i];
BasicBlock* case_block =
mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]);
- OpCmpImmBranch(kCondEq, rl_src.reg.GetReg(), key,
+ OpCmpImmBranch(kCondEq, rl_src.low_reg, key,
&block_label_list_[case_block->id]);
}
}
@@ -87,7 +87,7 @@
// We can use the saved value.
RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
rl_method = LoadValue(rl_method, kCoreReg);
- start_of_method_reg = rl_method.reg.GetReg();
+ start_of_method_reg = rl_method.low_reg;
store_method_addr_used_ = true;
} else {
start_of_method_reg = AllocTemp();
@@ -97,10 +97,10 @@
int keyReg;
// Remove the bias, if necessary
if (low_key == 0) {
- keyReg = rl_src.reg.GetReg();
+ keyReg = rl_src.low_reg;
} else {
keyReg = AllocTemp();
- OpRegRegImm(kOpSub, keyReg, rl_src.reg.GetReg(), low_key);
+ OpRegRegImm(kOpSub, keyReg, rl_src.low_reg, low_key);
}
// Bounds check - if < 0 or >= size continue following switch
OpRegImm(kOpCmp, keyReg, size-1);
@@ -164,7 +164,7 @@
void X86Mir2Lir::GenMoveException(RegLocation rl_dest) {
int ex_offset = Thread::ExceptionOffset().Int32Value();
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- NewLIR2(kX86Mov32RT, rl_result.reg.GetReg(), ex_offset);
+ NewLIR2(kX86Mov32RT, rl_result.low_reg, ex_offset);
NewLIR2(kX86Mov32TI, ex_offset, 0);
StoreValue(rl_dest, rl_result);
}
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 55f18ef..421d51e 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -48,9 +48,8 @@
// Required for target - register utilities.
bool IsFpReg(int reg);
bool SameRegType(int reg1, int reg2);
- // TODO: for consistency, make this return a RegStorage as well?
int AllocTypedTemp(bool fp_hint, int reg_class);
- RegStorage AllocTypedTempWide(bool fp_hint, int reg_class);
+ int AllocTypedTempPair(bool fp_hint, int reg_class);
int S2d(int low_reg, int high_reg);
int TargetReg(SpecialTargetRegister reg);
int GetArgMappingToPhysicalReg(int arg_num);
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index 1827901..4c2ecc0 100644
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -63,9 +63,9 @@
rl_src1 = LoadValue(rl_src1, kFPReg);
rl_src2 = LoadValue(rl_src2, kFPReg);
rl_result = EvalLoc(rl_dest, kFPReg, true);
- int r_dest = rl_result.reg.GetReg();
- int r_src1 = rl_src1.reg.GetReg();
- int r_src2 = rl_src2.reg.GetReg();
+ int r_dest = rl_result.low_reg;
+ int r_src1 = rl_src1.low_reg;
+ int r_src2 = rl_src2.low_reg;
if (r_dest == r_src2) {
r_src2 = AllocTempFloat();
OpRegCopy(r_src2, r_dest);
@@ -118,9 +118,9 @@
rl_result = EvalLoc(rl_dest, kFPReg, true);
DCHECK(rl_dest.wide);
DCHECK(rl_result.wide);
- int r_dest = S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg());
- int r_src1 = S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg());
- int r_src2 = S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg());
+ int r_dest = S2d(rl_result.low_reg, rl_result.high_reg);
+ int r_src1 = S2d(rl_src1.low_reg, rl_src1.high_reg);
+ int r_src2 = S2d(rl_src2.low_reg, rl_src2.high_reg);
if (r_dest == r_src2) {
r_src2 = AllocTempDouble() | X86_FP_DOUBLE;
OpRegCopy(r_src2, r_dest);
@@ -140,7 +140,7 @@
// If the source is in physical register, then put it in its location on stack.
if (rl_src.location == kLocPhysReg) {
- RegisterInfo* lo_info = GetRegInfo(rl_src.reg.GetReg());
+ RegisterInfo* lo_info = GetRegInfo(rl_src.low_reg);
if (lo_info != nullptr && lo_info->is_temp) {
// Calling FlushSpecificReg because it will only write back VR if it is dirty.
@@ -148,7 +148,7 @@
} else {
// It must have been register promoted if it is not a temp but is still in physical
// register. Since we need it to be in memory to convert, we place it there now.
- StoreBaseDispWide(TargetReg(kSp), src_v_reg_offset, rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ StoreBaseDispWide(TargetReg(kSp), src_v_reg_offset, rl_src.low_reg, rl_src.high_reg);
}
}
@@ -181,13 +181,13 @@
if (is_double) {
rl_result = EvalLocWide(rl_dest, kFPReg, true);
- LoadBaseDispWide(TargetReg(kSp), dest_v_reg_offset, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG);
+ LoadBaseDispWide(TargetReg(kSp), dest_v_reg_offset, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
StoreValueWide(rl_dest, rl_result);
} else {
rl_result = EvalLoc(rl_dest, kFPReg, true);
- LoadWordDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.reg.GetReg());
+ LoadWordDisp(TargetReg(kSp), dest_v_reg_offset, rl_result.low_reg);
StoreValue(rl_dest, rl_result);
}
@@ -219,21 +219,21 @@
break;
case Instruction::FLOAT_TO_INT: {
rl_src = LoadValue(rl_src, kFPReg);
- src_reg = rl_src.reg.GetReg();
+ src_reg = rl_src.low_reg;
// In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
ClobberSReg(rl_dest.s_reg_low);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
int temp_reg = AllocTempFloat();
- LoadConstant(rl_result.reg.GetReg(), 0x7fffffff);
- NewLIR2(kX86Cvtsi2ssRR, temp_reg, rl_result.reg.GetReg());
+ LoadConstant(rl_result.low_reg, 0x7fffffff);
+ NewLIR2(kX86Cvtsi2ssRR, temp_reg, rl_result.low_reg);
NewLIR2(kX86ComissRR, src_reg, temp_reg);
LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondA);
LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
- NewLIR2(kX86Cvttss2siRR, rl_result.reg.GetReg(), src_reg);
+ NewLIR2(kX86Cvttss2siRR, rl_result.low_reg, src_reg);
LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
branch_na_n->target = NewLIR0(kPseudoTargetLabel);
- NewLIR2(kX86Xor32RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
+ NewLIR2(kX86Xor32RR, rl_result.low_reg, rl_result.low_reg);
branch_pos_overflow->target = NewLIR0(kPseudoTargetLabel);
branch_normal->target = NewLIR0(kPseudoTargetLabel);
StoreValue(rl_dest, rl_result);
@@ -241,21 +241,21 @@
}
case Instruction::DOUBLE_TO_INT: {
rl_src = LoadValueWide(rl_src, kFPReg);
- src_reg = rl_src.reg.GetReg();
+ src_reg = rl_src.low_reg;
// In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
ClobberSReg(rl_dest.s_reg_low);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
int temp_reg = AllocTempDouble() | X86_FP_DOUBLE;
- LoadConstant(rl_result.reg.GetReg(), 0x7fffffff);
- NewLIR2(kX86Cvtsi2sdRR, temp_reg, rl_result.reg.GetReg());
+ LoadConstant(rl_result.low_reg, 0x7fffffff);
+ NewLIR2(kX86Cvtsi2sdRR, temp_reg, rl_result.low_reg);
NewLIR2(kX86ComisdRR, src_reg, temp_reg);
LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondA);
LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
- NewLIR2(kX86Cvttsd2siRR, rl_result.reg.GetReg(), src_reg);
+ NewLIR2(kX86Cvttsd2siRR, rl_result.low_reg, src_reg);
LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
branch_na_n->target = NewLIR0(kPseudoTargetLabel);
- NewLIR2(kX86Xor32RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
+ NewLIR2(kX86Xor32RR, rl_result.low_reg, rl_result.low_reg);
branch_pos_overflow->target = NewLIR0(kPseudoTargetLabel);
branch_normal->target = NewLIR0(kPseudoTargetLabel);
StoreValue(rl_dest, rl_result);
@@ -278,18 +278,18 @@
}
if (rl_src.wide) {
rl_src = LoadValueWide(rl_src, rcSrc);
- src_reg = S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg());
+ src_reg = S2d(rl_src.low_reg, rl_src.high_reg);
} else {
rl_src = LoadValue(rl_src, rcSrc);
- src_reg = rl_src.reg.GetReg();
+ src_reg = rl_src.low_reg;
}
if (rl_dest.wide) {
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(op, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()), src_reg);
+ NewLIR2(op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg);
StoreValueWide(rl_dest, rl_result);
} else {
rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(op, rl_result.reg.GetReg(), src_reg);
+ NewLIR2(op, rl_result.low_reg, src_reg);
StoreValue(rl_dest, rl_result);
}
}
@@ -302,19 +302,19 @@
int src_reg2;
if (single) {
rl_src1 = LoadValue(rl_src1, kFPReg);
- src_reg1 = rl_src1.reg.GetReg();
+ src_reg1 = rl_src1.low_reg;
rl_src2 = LoadValue(rl_src2, kFPReg);
- src_reg2 = rl_src2.reg.GetReg();
+ src_reg2 = rl_src2.low_reg;
} else {
rl_src1 = LoadValueWide(rl_src1, kFPReg);
- src_reg1 = S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg());
+ src_reg1 = S2d(rl_src1.low_reg, rl_src1.high_reg);
rl_src2 = LoadValueWide(rl_src2, kFPReg);
- src_reg2 = S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg());
+ src_reg2 = S2d(rl_src2.low_reg, rl_src2.high_reg);
}
// In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
ClobberSReg(rl_dest.s_reg_low);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadConstantNoClobber(rl_result.reg.GetReg(), unordered_gt ? 1 : 0);
+ LoadConstantNoClobber(rl_result.low_reg, unordered_gt ? 1 : 0);
if (single) {
NewLIR2(kX86UcomissRR, src_reg1, src_reg2);
} else {
@@ -325,20 +325,20 @@
branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
}
// If the result reg can't be byte accessed, use a jump and move instead of a set.
- if (rl_result.reg.GetReg() >= 4) {
+ if (rl_result.low_reg >= 4) {
LIR* branch2 = NULL;
if (unordered_gt) {
branch2 = NewLIR2(kX86Jcc8, 0, kX86CondA);
- NewLIR2(kX86Mov32RI, rl_result.reg.GetReg(), 0x0);
+ NewLIR2(kX86Mov32RI, rl_result.low_reg, 0x0);
} else {
branch2 = NewLIR2(kX86Jcc8, 0, kX86CondBe);
- NewLIR2(kX86Mov32RI, rl_result.reg.GetReg(), 0x1);
+ NewLIR2(kX86Mov32RI, rl_result.low_reg, 0x1);
}
branch2->target = NewLIR0(kPseudoTargetLabel);
} else {
- NewLIR2(kX86Set8R, rl_result.reg.GetReg(), kX86CondA /* above - unsigned > */);
+ NewLIR2(kX86Set8R, rl_result.low_reg, kX86CondA /* above - unsigned > */);
}
- NewLIR2(kX86Sbb32RI, rl_result.reg.GetReg(), 0);
+ NewLIR2(kX86Sbb32RI, rl_result.low_reg, 0);
if (unordered_gt) {
branch->target = NewLIR0(kPseudoTargetLabel);
}
@@ -357,14 +357,14 @@
rl_src2 = mir_graph_->GetSrcWide(mir, 2);
rl_src1 = LoadValueWide(rl_src1, kFPReg);
rl_src2 = LoadValueWide(rl_src2, kFPReg);
- NewLIR2(kX86UcomisdRR, S2d(rl_src1.reg.GetReg(), rl_src1.reg.GetHighReg()),
- S2d(rl_src2.reg.GetReg(), rl_src2.reg.GetHighReg()));
+ NewLIR2(kX86UcomisdRR, S2d(rl_src1.low_reg, rl_src1.high_reg),
+ S2d(rl_src2.low_reg, rl_src2.high_reg));
} else {
rl_src1 = mir_graph_->GetSrc(mir, 0);
rl_src2 = mir_graph_->GetSrc(mir, 1);
rl_src1 = LoadValue(rl_src1, kFPReg);
rl_src2 = LoadValue(rl_src2, kFPReg);
- NewLIR2(kX86UcomissRR, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ NewLIR2(kX86UcomissRR, rl_src1.low_reg, rl_src2.low_reg);
}
ConditionCode ccode = mir->meta.ccode;
switch (ccode) {
@@ -418,7 +418,7 @@
RegLocation rl_result;
rl_src = LoadValue(rl_src, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegImm(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), 0x80000000);
+ OpRegRegImm(kOpAdd, rl_result.low_reg, rl_src.low_reg, 0x80000000);
StoreValue(rl_dest, rl_result);
}
@@ -426,8 +426,8 @@
RegLocation rl_result;
rl_src = LoadValueWide(rl_src, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegImm(kOpAdd, rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg(), 0x80000000);
- OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ OpRegRegImm(kOpAdd, rl_result.high_reg, rl_src.high_reg, 0x80000000);
+ OpRegCopy(rl_result.low_reg, rl_src.low_reg);
StoreValueWide(rl_dest, rl_result);
}
@@ -436,8 +436,8 @@
RegLocation rl_dest = InlineTargetWide(info); // double place for result
rl_src = LoadValueWide(rl_src, kFPReg);
RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
- NewLIR2(kX86SqrtsdRR, S2d(rl_result.reg.GetReg(), rl_result.reg.GetHighReg()),
- S2d(rl_src.reg.GetReg(), rl_src.reg.GetHighReg()));
+ NewLIR2(kX86SqrtsdRR, S2d(rl_result.low_reg, rl_result.high_reg),
+ S2d(rl_src.low_reg, rl_src.high_reg));
StoreValueWide(rl_dest, rl_result);
return true;
}
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 362ab2e..5f04b7d 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -216,21 +216,21 @@
* mov t1, $false_case
* cmovnz result_reg, t1
*/
- const bool result_reg_same_as_src = (rl_src.location == kLocPhysReg && rl_src.reg.GetReg() == rl_result.reg.GetReg());
+ const bool result_reg_same_as_src = (rl_src.location == kLocPhysReg && rl_src.low_reg == rl_result.low_reg);
const bool true_zero_case = (true_val == 0 && false_val != 0 && !result_reg_same_as_src);
const bool false_zero_case = (false_val == 0 && true_val != 0 && !result_reg_same_as_src);
const bool catch_all_case = !(true_zero_case || false_zero_case);
if (true_zero_case || false_zero_case) {
- OpRegReg(kOpXor, rl_result.reg.GetReg(), rl_result.reg.GetReg());
+ OpRegReg(kOpXor, rl_result.low_reg, rl_result.low_reg);
}
if (true_zero_case || false_zero_case || catch_all_case) {
- OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0);
+ OpRegImm(kOpCmp, rl_src.low_reg, 0);
}
if (catch_all_case) {
- OpRegImm(kOpMov, rl_result.reg.GetReg(), true_val);
+ OpRegImm(kOpMov, rl_result.low_reg, true_val);
}
if (true_zero_case || false_zero_case || catch_all_case) {
@@ -239,7 +239,7 @@
OpRegImm(kOpMov, temp1_reg, immediateForTemp);
ConditionCode cc = false_zero_case ? kCondEq : kCondNe;
- OpCondRegReg(kOpCmov, cc, rl_result.reg.GetReg(), temp1_reg);
+ OpCondRegReg(kOpCmov, cc, rl_result.low_reg, temp1_reg);
FreeTemp(temp1_reg);
}
@@ -264,15 +264,15 @@
*/
// kMirOpSelect is generated just for conditional cases when comparison is done with zero.
- OpRegImm(kOpCmp, rl_src.reg.GetReg(), 0);
+ OpRegImm(kOpCmp, rl_src.low_reg, 0);
- if (rl_result.reg.GetReg() == rl_true.reg.GetReg()) {
- OpCondRegReg(kOpCmov, kCondNe, rl_result.reg.GetReg(), rl_false.reg.GetReg());
- } else if (rl_result.reg.GetReg() == rl_false.reg.GetReg()) {
- OpCondRegReg(kOpCmov, kCondEq, rl_result.reg.GetReg(), rl_true.reg.GetReg());
+ if (rl_result.low_reg == rl_true.low_reg) {
+ OpCondRegReg(kOpCmov, kCondNe, rl_result.low_reg, rl_false.low_reg);
+ } else if (rl_result.low_reg == rl_false.low_reg) {
+ OpCondRegReg(kOpCmov, kCondEq, rl_result.low_reg, rl_true.low_reg);
} else {
- OpRegCopy(rl_result.reg.GetReg(), rl_true.reg.GetReg());
- OpCondRegReg(kOpCmov, kCondNe, rl_result.reg.GetReg(), rl_false.reg.GetReg());
+ OpRegCopy(rl_result.low_reg, rl_true.low_reg);
+ OpCondRegReg(kOpCmov, kCondNe, rl_result.low_reg, rl_false.low_reg);
}
}
@@ -337,8 +337,8 @@
LIR* taken = &block_label_list_[bb->taken];
LIR* not_taken = &block_label_list_[bb->fall_through];
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- int32_t low_reg = rl_src1.reg.GetReg();
- int32_t high_reg = rl_src1.reg.GetHighReg();
+ int32_t low_reg = rl_src1.low_reg;
+ int32_t high_reg = rl_src1.high_reg;
if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) {
int t_reg = AllocTemp();
@@ -461,7 +461,7 @@
// Assume that the result will be in EDX.
RegLocation rl_result = {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
- RegStorage(RegStorage::k32BitSolo, r2), INVALID_SREG, INVALID_SREG};
+ r2, INVALID_REG, INVALID_SREG, INVALID_SREG};
// handle div/rem by 1 special case.
if (imm == 1) {
@@ -472,7 +472,7 @@
// x % 1 == 0.
LoadConstantNoClobber(r0, 0);
// For this case, return the result in EAX.
- rl_result.reg.SetReg(r0);
+ rl_result.low_reg = r0;
}
} else if (imm == -1) { // handle 0x80000000 / -1 special case.
if (is_div) {
@@ -494,7 +494,7 @@
LoadConstantNoClobber(r0, 0);
}
// For this case, return the result in EAX.
- rl_result.reg.SetReg(r0);
+ rl_result.low_reg = r0;
} else {
CHECK(imm <= -2 || imm >= 2);
// Use H.S.Warren's Hacker's Delight Chapter 10 and
@@ -524,8 +524,8 @@
// We will need the value later.
if (rl_src.location == kLocPhysReg) {
// We can use it directly.
- DCHECK(rl_src.reg.GetReg() != r0 && rl_src.reg.GetReg() != r2);
- numerator_reg = rl_src.reg.GetReg();
+ DCHECK(rl_src.low_reg != r0 && rl_src.low_reg != r2);
+ numerator_reg = rl_src.low_reg;
} else {
LoadValueDirectFixed(rl_src, r1);
numerator_reg = r1;
@@ -582,7 +582,7 @@
NewLIR2(kX86Sub32RR, r0, r2);
// For this case, return the result in EAX.
- rl_result.reg.SetReg(r0);
+ rl_result.low_reg = r0;
}
}
@@ -638,9 +638,9 @@
// Result is in EAX for div and EDX for rem.
RegLocation rl_result = {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
- RegStorage(RegStorage::k32BitSolo, r0), INVALID_SREG, INVALID_SREG};
+ r0, INVALID_REG, INVALID_SREG, INVALID_SREG};
if (!is_div) {
- rl_result.reg.SetReg(r2);
+ rl_result.low_reg = r2;
}
return rl_result;
}
@@ -662,22 +662,22 @@
* The reason is that the first copy will inadvertently clobber the second element with
* the first one thus yielding the wrong result. Thus we do a swap in that case.
*/
- if (rl_result.reg.GetReg() == rl_src2.reg.GetReg()) {
+ if (rl_result.low_reg == rl_src2.low_reg) {
std::swap(rl_src1, rl_src2);
}
// Pick the first integer as min/max.
- OpRegCopy(rl_result.reg.GetReg(), rl_src1.reg.GetReg());
+ OpRegCopy(rl_result.low_reg, rl_src1.low_reg);
// If the integers are both in the same register, then there is nothing else to do
// because they are equal and we have already moved one into the result.
- if (rl_src1.reg.GetReg() != rl_src2.reg.GetReg()) {
+ if (rl_src1.low_reg != rl_src2.low_reg) {
// It is possible we didn't pick correctly so do the actual comparison now.
- OpRegReg(kOpCmp, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
+ OpRegReg(kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
// Conditionally move the other integer into the destination register.
ConditionCode condition_code = is_min ? kCondGt : kCondLt;
- OpCondRegReg(kOpCmov, condition_code, rl_result.reg.GetReg(), rl_src2.reg.GetReg());
+ OpCondRegReg(kOpCmov, condition_code, rl_result.low_reg, rl_src2.low_reg);
}
StoreValue(rl_dest, rl_result);
@@ -692,12 +692,12 @@
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (size == kLong) {
// Unaligned access is allowed on x86.
- LoadBaseDispWide(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), INVALID_SREG);
+ LoadBaseDispWide(rl_address.low_reg, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
StoreValueWide(rl_dest, rl_result);
} else {
DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord);
// Unaligned access is allowed on x86.
- LoadBaseDisp(rl_address.reg.GetReg(), 0, rl_result.reg.GetReg(), size, INVALID_SREG);
+ LoadBaseDisp(rl_address.low_reg, 0, rl_result.low_reg, size, INVALID_SREG);
StoreValue(rl_dest, rl_result);
}
return true;
@@ -711,12 +711,12 @@
if (size == kLong) {
// Unaligned access is allowed on x86.
RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg);
- StoreBaseDispWide(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), rl_value.reg.GetHighReg());
+ StoreBaseDispWide(rl_address.low_reg, 0, rl_value.low_reg, rl_value.high_reg);
} else {
DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord);
// Unaligned access is allowed on x86.
RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
- StoreBaseDisp(rl_address.reg.GetReg(), 0, rl_value.reg.GetReg(), size);
+ StoreBaseDisp(rl_address.low_reg, 0, rl_value.low_reg, size);
}
return true;
}
@@ -776,13 +776,13 @@
if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
// Mark card for object assuming new value is stored.
FreeTemp(r0); // Temporarily release EAX for MarkGCCard().
- MarkGCCard(rl_new_value.reg.GetReg(), rl_object.reg.GetReg());
+ MarkGCCard(rl_new_value.low_reg, rl_object.low_reg);
LockTemp(r0);
}
RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
LoadValueDirect(rl_src_expected, r0);
- NewLIR5(kX86LockCmpxchgAR, rl_object.reg.GetReg(), rl_offset.reg.GetReg(), 0, 0, rl_new_value.reg.GetReg());
+ NewLIR5(kX86LockCmpxchgAR, rl_object.low_reg, rl_offset.low_reg, 0, 0, rl_new_value.low_reg);
FreeTemp(r0);
}
@@ -790,8 +790,8 @@
// Convert ZF to boolean
RegLocation rl_dest = InlineTarget(info); // boolean place for result
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- NewLIR2(kX86Set8R, rl_result.reg.GetReg(), kX86CondZ);
- NewLIR2(kX86Movzx8RR, rl_result.reg.GetReg(), rl_result.reg.GetReg());
+ NewLIR2(kX86Set8R, rl_result.low_reg, kX86CondZ);
+ NewLIR2(kX86Movzx8RR, rl_result.low_reg, rl_result.low_reg);
StoreValue(rl_dest, rl_result);
return true;
}
@@ -830,11 +830,11 @@
RegLocation rl_result, int lit,
int first_bit, int second_bit) {
int t_reg = AllocTemp();
- OpRegRegImm(kOpLsl, t_reg, rl_src.reg.GetReg(), second_bit - first_bit);
- OpRegRegReg(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), t_reg);
+ OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit);
+ OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, t_reg);
FreeTemp(t_reg);
if (first_bit != 0) {
- OpRegRegImm(kOpLsl, rl_result.reg.GetReg(), rl_result.reg.GetReg(), first_bit);
+ OpRegRegImm(kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
}
}
@@ -918,8 +918,8 @@
int64_t val = mir_graph_->ConstantValueWide(rl_src2);
if (val == 0) {
RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- OpRegReg(kOpXor, rl_result.reg.GetReg(), rl_result.reg.GetReg());
- OpRegReg(kOpXor, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg());
+ OpRegReg(kOpXor, rl_result.low_reg, rl_result.low_reg);
+ OpRegReg(kOpXor, rl_result.high_reg, rl_result.high_reg);
StoreValueWide(rl_dest, rl_result);
return;
} else if (val == 1) {
@@ -952,8 +952,8 @@
// ECX <- 1H * 2L
// EAX <- 1L * 2H
if (src1_in_reg) {
- GenImulRegImm(r1, rl_src1.reg.GetHighReg(), val_lo);
- GenImulRegImm(r0, rl_src1.reg.GetReg(), val_hi);
+ GenImulRegImm(r1, rl_src1.high_reg, val_lo);
+ GenImulRegImm(r0, rl_src1.low_reg, val_hi);
} else {
GenImulMemImm(r1, GetSRegHi(rl_src1.s_reg_low), displacement + HIWORD_OFFSET, val_lo);
GenImulMemImm(r0, rl_src1.s_reg_low, displacement + LOWORD_OFFSET, val_hi);
@@ -967,7 +967,7 @@
// EDX:EAX <- 2L * 1L (double precision)
if (src1_in_reg) {
- NewLIR1(kX86Mul32DaR, rl_src1.reg.GetReg());
+ NewLIR1(kX86Mul32DaR, rl_src1.low_reg);
} else {
LIR *m = NewLIR2(kX86Mul32DaM, rX86_SP, displacement + LOWORD_OFFSET);
AnnotateDalvikRegAccess(m, (displacement + LOWORD_OFFSET) >> 2,
@@ -978,8 +978,7 @@
NewLIR2(kX86Add32RR, r2, r1);
// Result is EDX:EAX
- RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
- RegStorage(RegStorage::k64BitPair, r0, r2),
+ RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r0, r2,
INVALID_SREG, INVALID_SREG};
StoreValueWide(rl_dest, rl_result);
return;
@@ -1001,7 +1000,7 @@
// ECX <- 1H
if (src1_in_reg) {
- NewLIR2(kX86Mov32RR, r1, rl_src1.reg.GetHighReg());
+ NewLIR2(kX86Mov32RR, r1, rl_src1.high_reg);
} else {
LoadBaseDisp(rX86_SP, SRegOffset(rl_src1.s_reg_low) + HIWORD_OFFSET, r1,
kWord, GetSRegHi(rl_src1.s_reg_low));
@@ -1011,7 +1010,7 @@
// Take advantage of the fact that the values are the same.
// ECX <- ECX * 2L (1H * 2L)
if (src2_in_reg) {
- NewLIR2(kX86Imul32RR, r1, rl_src2.reg.GetReg());
+ NewLIR2(kX86Imul32RR, r1, rl_src2.low_reg);
} else {
int displacement = SRegOffset(rl_src2.s_reg_low);
LIR *m = NewLIR3(kX86Imul32RM, r1, rX86_SP, displacement + LOWORD_OFFSET);
@@ -1024,7 +1023,7 @@
} else {
// EAX <- 2H
if (src2_in_reg) {
- NewLIR2(kX86Mov32RR, r0, rl_src2.reg.GetHighReg());
+ NewLIR2(kX86Mov32RR, r0, rl_src2.high_reg);
} else {
LoadBaseDisp(rX86_SP, SRegOffset(rl_src2.s_reg_low) + HIWORD_OFFSET, r0,
kWord, GetSRegHi(rl_src2.s_reg_low));
@@ -1032,7 +1031,7 @@
// EAX <- EAX * 1L (2H * 1L)
if (src1_in_reg) {
- NewLIR2(kX86Imul32RR, r0, rl_src1.reg.GetReg());
+ NewLIR2(kX86Imul32RR, r0, rl_src1.low_reg);
} else {
int displacement = SRegOffset(rl_src1.s_reg_low);
LIR *m = NewLIR3(kX86Imul32RM, r0, rX86_SP, displacement + LOWORD_OFFSET);
@@ -1042,7 +1041,7 @@
// ECX <- ECX * 2L (1H * 2L)
if (src2_in_reg) {
- NewLIR2(kX86Imul32RR, r1, rl_src2.reg.GetReg());
+ NewLIR2(kX86Imul32RR, r1, rl_src2.low_reg);
} else {
int displacement = SRegOffset(rl_src2.s_reg_low);
LIR *m = NewLIR3(kX86Imul32RM, r1, rX86_SP, displacement + LOWORD_OFFSET);
@@ -1056,7 +1055,7 @@
// EAX <- 2L
if (src2_in_reg) {
- NewLIR2(kX86Mov32RR, r0, rl_src2.reg.GetReg());
+ NewLIR2(kX86Mov32RR, r0, rl_src2.low_reg);
} else {
LoadBaseDisp(rX86_SP, SRegOffset(rl_src2.s_reg_low) + LOWORD_OFFSET, r0,
kWord, rl_src2.s_reg_low);
@@ -1064,7 +1063,7 @@
// EDX:EAX <- 2L * 1L (double precision)
if (src1_in_reg) {
- NewLIR1(kX86Mul32DaR, rl_src1.reg.GetReg());
+ NewLIR1(kX86Mul32DaR, rl_src1.low_reg);
} else {
int displacement = SRegOffset(rl_src1.s_reg_low);
LIR *m = NewLIR2(kX86Mul32DaM, rX86_SP, displacement + LOWORD_OFFSET);
@@ -1076,8 +1075,8 @@
NewLIR2(kX86Add32RR, r2, r1);
// Result is EDX:EAX
- RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
- RegStorage(RegStorage::k64BitPair, r0, r2), INVALID_SREG, INVALID_SREG};
+ RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, r0, r2,
+ INVALID_SREG, INVALID_SREG};
StoreValueWide(rl_dest, rl_result);
}
@@ -1087,18 +1086,18 @@
X86OpCode x86op = GetOpcode(op, rl_dest, rl_src, false);
if (rl_src.location == kLocPhysReg) {
// Both operands are in registers.
- if (rl_dest.reg.GetReg() == rl_src.reg.GetHighReg()) {
+ if (rl_dest.low_reg == rl_src.high_reg) {
// The registers are the same, so we would clobber it before the use.
int temp_reg = AllocTemp();
- OpRegCopy(temp_reg, rl_dest.reg.GetReg());
- rl_src.reg.SetHighReg(temp_reg);
+ OpRegCopy(temp_reg, rl_dest.low_reg);
+ rl_src.high_reg = temp_reg;
}
- NewLIR2(x86op, rl_dest.reg.GetReg(), rl_src.reg.GetReg());
+ NewLIR2(x86op, rl_dest.low_reg, rl_src.low_reg);
x86op = GetOpcode(op, rl_dest, rl_src, true);
- NewLIR2(x86op, rl_dest.reg.GetHighReg(), rl_src.reg.GetHighReg());
- FreeTemp(rl_src.reg.GetReg());
- FreeTemp(rl_src.reg.GetHighReg());
+ NewLIR2(x86op, rl_dest.high_reg, rl_src.high_reg);
+ FreeTemp(rl_src.low_reg);
+ FreeTemp(rl_src.high_reg);
return;
}
@@ -1108,11 +1107,11 @@
int rBase = TargetReg(kSp);
int displacement = SRegOffset(rl_src.s_reg_low);
- LIR *lir = NewLIR3(x86op, rl_dest.reg.GetReg(), rBase, displacement + LOWORD_OFFSET);
+ LIR *lir = NewLIR3(x86op, rl_dest.low_reg, rBase, displacement + LOWORD_OFFSET);
AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2,
true /* is_load */, true /* is64bit */);
x86op = GetOpcode(op, rl_dest, rl_src, true);
- lir = NewLIR3(x86op, rl_dest.reg.GetHighReg(), rBase, displacement + HIWORD_OFFSET);
+ lir = NewLIR3(x86op, rl_dest.high_reg, rBase, displacement + HIWORD_OFFSET);
AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2,
true /* is_load */, true /* is64bit */);
}
@@ -1139,15 +1138,15 @@
int rBase = TargetReg(kSp);
int displacement = SRegOffset(rl_dest.s_reg_low);
- LIR *lir = NewLIR3(x86op, rBase, displacement + LOWORD_OFFSET, rl_src.reg.GetReg());
+ LIR *lir = NewLIR3(x86op, rBase, displacement + LOWORD_OFFSET, rl_src.low_reg);
AnnotateDalvikRegAccess(lir, (displacement + LOWORD_OFFSET) >> 2,
false /* is_load */, true /* is64bit */);
x86op = GetOpcode(op, rl_dest, rl_src, true);
- lir = NewLIR3(x86op, rBase, displacement + HIWORD_OFFSET, rl_src.reg.GetHighReg());
+ lir = NewLIR3(x86op, rBase, displacement + HIWORD_OFFSET, rl_src.high_reg);
AnnotateDalvikRegAccess(lir, (displacement + HIWORD_OFFSET) >> 2,
false /* is_load */, true /* is64bit */);
- FreeTemp(rl_src.reg.GetReg());
- FreeTemp(rl_src.reg.GetHighReg());
+ FreeTemp(rl_src.low_reg);
+ FreeTemp(rl_src.high_reg);
}
void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src1,
@@ -1189,12 +1188,12 @@
// Get one of the source operands into temporary register.
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- if (IsTemp(rl_src1.reg.GetReg()) && IsTemp(rl_src1.reg.GetHighReg())) {
+ if (IsTemp(rl_src1.low_reg) && IsTemp(rl_src1.high_reg)) {
GenLongRegOrMemOp(rl_src1, rl_src2, op);
} else if (is_commutative) {
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
// We need at least one of them to be a temporary.
- if (!(IsTemp(rl_src2.reg.GetReg()) && IsTemp(rl_src2.reg.GetHighReg()))) {
+ if (!(IsTemp(rl_src2.low_reg) && IsTemp(rl_src2.high_reg))) {
rl_src1 = ForceTempWide(rl_src1);
}
GenLongRegOrMemOp(rl_src1, rl_src2, op);
@@ -1235,16 +1234,15 @@
void X86Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
rl_src = LoadValueWide(rl_src, kCoreReg);
RegLocation rl_result = ForceTempWide(rl_src);
- if (((rl_dest.location == kLocPhysReg) && (rl_src.location == kLocPhysReg)) &&
- ((rl_dest.reg.GetReg() == rl_src.reg.GetHighReg()))) {
+ if (rl_dest.low_reg == rl_src.high_reg) {
// The registers are the same, so we would clobber it before the use.
int temp_reg = AllocTemp();
- OpRegCopy(temp_reg, rl_result.reg.GetReg());
- rl_result.reg.SetHighReg(temp_reg);
+ OpRegCopy(temp_reg, rl_result.low_reg);
+ rl_result.high_reg = temp_reg;
}
- OpRegReg(kOpNeg, rl_result.reg.GetReg(), rl_result.reg.GetReg()); // rLow = -rLow
- OpRegImm(kOpAdc, rl_result.reg.GetHighReg(), 0); // rHigh = rHigh + CF
- OpRegReg(kOpNeg, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg()); // rHigh = -rHigh
+ OpRegReg(kOpNeg, rl_result.low_reg, rl_result.low_reg); // rLow = -rLow
+ OpRegImm(kOpAdc, rl_result.high_reg, 0); // rHigh = rHigh + CF
+ OpRegReg(kOpNeg, rl_result.high_reg, rl_result.high_reg); // rHigh = -rHigh
StoreValueWide(rl_dest, rl_result);
}
@@ -1286,29 +1284,29 @@
// If index is constant, just fold it into the data offset
data_offset += constant_index_value << scale;
// treat as non array below
- rl_index.reg = RegStorage(RegStorage::k32BitSolo, INVALID_REG);
+ rl_index.low_reg = INVALID_REG;
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
if (constant_index) {
- GenMemImmedCheck(kCondLs, rl_array.reg.GetReg(), len_offset,
+ GenMemImmedCheck(kCondLs, rl_array.low_reg, len_offset,
constant_index_value, kThrowConstantArrayBounds);
} else {
- GenRegMemCheck(kCondUge, rl_index.reg.GetReg(), rl_array.reg.GetReg(),
+ GenRegMemCheck(kCondUge, rl_index.low_reg, rl_array.low_reg,
len_offset, kThrowArrayBounds);
}
}
rl_result = EvalLoc(rl_dest, reg_class, true);
if ((size == kLong) || (size == kDouble)) {
- LoadBaseIndexedDisp(rl_array.reg.GetReg(), rl_index.reg.GetReg(), scale, data_offset, rl_result.reg.GetReg(),
- rl_result.reg.GetHighReg(), size, INVALID_SREG);
+ LoadBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale, data_offset, rl_result.low_reg,
+ rl_result.high_reg, size, INVALID_SREG);
StoreValueWide(rl_dest, rl_result);
} else {
- LoadBaseIndexedDisp(rl_array.reg.GetReg(), rl_index.reg.GetReg(), scale,
- data_offset, rl_result.reg.GetReg(), INVALID_REG, size,
+ LoadBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale,
+ data_offset, rl_result.low_reg, INVALID_REG, size,
INVALID_SREG);
StoreValue(rl_dest, rl_result);
}
@@ -1340,18 +1338,18 @@
constant_index_value = mir_graph_->ConstantValue(rl_index);
data_offset += constant_index_value << scale;
// treat as non array below
- rl_index.reg = RegStorage(RegStorage::k32BitSolo, INVALID_REG);
+ rl_index.low_reg = INVALID_REG;
}
/* null object? */
- GenNullCheck(rl_array.s_reg_low, rl_array.reg.GetReg(), opt_flags);
+ GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
if (constant_index) {
- GenMemImmedCheck(kCondLs, rl_array.reg.GetReg(), len_offset,
+ GenMemImmedCheck(kCondLs, rl_array.low_reg, len_offset,
constant_index_value, kThrowConstantArrayBounds);
} else {
- GenRegMemCheck(kCondUge, rl_index.reg.GetReg(), rl_array.reg.GetReg(),
+ GenRegMemCheck(kCondUge, rl_index.low_reg, rl_array.low_reg,
len_offset, kThrowArrayBounds);
}
}
@@ -1361,21 +1359,21 @@
rl_src = LoadValue(rl_src, reg_class);
}
// If the src reg can't be byte accessed, move it to a temp first.
- if ((size == kSignedByte || size == kUnsignedByte) && rl_src.reg.GetReg() >= 4) {
+ if ((size == kSignedByte || size == kUnsignedByte) && rl_src.low_reg >= 4) {
int temp = AllocTemp();
- OpRegCopy(temp, rl_src.reg.GetReg());
- StoreBaseIndexedDisp(rl_array.reg.GetReg(), rl_index.reg.GetReg(), scale, data_offset, temp,
+ OpRegCopy(temp, rl_src.low_reg);
+ StoreBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale, data_offset, temp,
INVALID_REG, size, INVALID_SREG);
} else {
- StoreBaseIndexedDisp(rl_array.reg.GetReg(), rl_index.reg.GetReg(), scale, data_offset, rl_src.reg.GetReg(),
- rl_src.wide ? rl_src.reg.GetHighReg() : INVALID_REG, size, INVALID_SREG);
+ StoreBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale, data_offset, rl_src.low_reg,
+ rl_src.high_reg, size, INVALID_SREG);
}
if (card_mark) {
// Free rl_index if its a temp. Ensures there are 2 free regs for card mark.
if (!constant_index) {
- FreeTemp(rl_index.reg.GetReg());
+ FreeTemp(rl_index.low_reg);
}
- MarkGCCard(rl_src.reg.GetReg(), rl_array.reg.GetReg());
+ MarkGCCard(rl_src.low_reg, rl_array.low_reg);
}
}
@@ -1387,52 +1385,52 @@
case Instruction::SHL_LONG_2ADDR:
DCHECK_NE(shift_amount, 1); // Prevent a double store from happening.
if (shift_amount == 32) {
- OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetReg());
- LoadConstant(rl_result.reg.GetReg(), 0);
+ OpRegCopy(rl_result.high_reg, rl_src.low_reg);
+ LoadConstant(rl_result.low_reg, 0);
} else if (shift_amount > 31) {
- OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetReg());
- FreeTemp(rl_src.reg.GetHighReg());
- NewLIR2(kX86Sal32RI, rl_result.reg.GetHighReg(), shift_amount - 32);
- LoadConstant(rl_result.reg.GetReg(), 0);
+ OpRegCopy(rl_result.high_reg, rl_src.low_reg);
+ FreeTemp(rl_src.high_reg);
+ NewLIR2(kX86Sal32RI, rl_result.high_reg, shift_amount - 32);
+ LoadConstant(rl_result.low_reg, 0);
} else {
- OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
- OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
- NewLIR3(kX86Shld32RRI, rl_result.reg.GetHighReg(), rl_result.reg.GetReg(), shift_amount);
- NewLIR2(kX86Sal32RI, rl_result.reg.GetReg(), shift_amount);
+ OpRegCopy(rl_result.low_reg, rl_src.low_reg);
+ OpRegCopy(rl_result.high_reg, rl_src.high_reg);
+ NewLIR3(kX86Shld32RRI, rl_result.high_reg, rl_result.low_reg, shift_amount);
+ NewLIR2(kX86Sal32RI, rl_result.low_reg, shift_amount);
}
break;
case Instruction::SHR_LONG:
case Instruction::SHR_LONG_2ADDR:
if (shift_amount == 32) {
- OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
- OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
- NewLIR2(kX86Sar32RI, rl_result.reg.GetHighReg(), 31);
+ OpRegCopy(rl_result.low_reg, rl_src.high_reg);
+ OpRegCopy(rl_result.high_reg, rl_src.high_reg);
+ NewLIR2(kX86Sar32RI, rl_result.high_reg, 31);
} else if (shift_amount > 31) {
- OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
- OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
- NewLIR2(kX86Sar32RI, rl_result.reg.GetReg(), shift_amount - 32);
- NewLIR2(kX86Sar32RI, rl_result.reg.GetHighReg(), 31);
+ OpRegCopy(rl_result.low_reg, rl_src.high_reg);
+ OpRegCopy(rl_result.high_reg, rl_src.high_reg);
+ NewLIR2(kX86Sar32RI, rl_result.low_reg, shift_amount - 32);
+ NewLIR2(kX86Sar32RI, rl_result.high_reg, 31);
} else {
- OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
- OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
- NewLIR3(kX86Shrd32RRI, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), shift_amount);
- NewLIR2(kX86Sar32RI, rl_result.reg.GetHighReg(), shift_amount);
+ OpRegCopy(rl_result.low_reg, rl_src.low_reg);
+ OpRegCopy(rl_result.high_reg, rl_src.high_reg);
+ NewLIR3(kX86Shrd32RRI, rl_result.low_reg, rl_result.high_reg, shift_amount);
+ NewLIR2(kX86Sar32RI, rl_result.high_reg, shift_amount);
}
break;
case Instruction::USHR_LONG:
case Instruction::USHR_LONG_2ADDR:
if (shift_amount == 32) {
- OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
- LoadConstant(rl_result.reg.GetHighReg(), 0);
+ OpRegCopy(rl_result.low_reg, rl_src.high_reg);
+ LoadConstant(rl_result.high_reg, 0);
} else if (shift_amount > 31) {
- OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetHighReg());
- NewLIR2(kX86Shr32RI, rl_result.reg.GetReg(), shift_amount - 32);
- LoadConstant(rl_result.reg.GetHighReg(), 0);
+ OpRegCopy(rl_result.low_reg, rl_src.high_reg);
+ NewLIR2(kX86Shr32RI, rl_result.low_reg, shift_amount - 32);
+ LoadConstant(rl_result.high_reg, 0);
} else {
- OpRegCopy(rl_result.reg.GetReg(), rl_src.reg.GetReg());
- OpRegCopy(rl_result.reg.GetHighReg(), rl_src.reg.GetHighReg());
- NewLIR3(kX86Shrd32RRI, rl_result.reg.GetReg(), rl_result.reg.GetHighReg(), shift_amount);
- NewLIR2(kX86Shr32RI, rl_result.reg.GetHighReg(), shift_amount);
+ OpRegCopy(rl_result.low_reg, rl_src.low_reg);
+ OpRegCopy(rl_result.high_reg, rl_src.high_reg);
+ NewLIR3(kX86Shrd32RRI, rl_result.low_reg, rl_result.high_reg, shift_amount);
+ NewLIR2(kX86Shr32RI, rl_result.high_reg, shift_amount);
}
break;
default:
@@ -1569,7 +1567,7 @@
int32_t value) {
bool in_mem = loc.location != kLocPhysReg;
bool byte_imm = IS_SIMM8(value);
- DCHECK(in_mem || !IsFpReg(loc.reg.GetReg()));
+ DCHECK(in_mem || !IsFpReg(loc.low_reg));
switch (op) {
case Instruction::ADD_LONG:
case Instruction::ADD_LONG_2ADDR:
@@ -1649,15 +1647,15 @@
RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
DCHECK_EQ(rl_result.location, kLocPhysReg);
- DCHECK(!IsFpReg(rl_result.reg.GetReg()));
+ DCHECK(!IsFpReg(rl_result.low_reg));
if (!IsNoOp(op, val_lo)) {
X86OpCode x86op = GetOpcode(op, rl_result, false, val_lo);
- NewLIR2(x86op, rl_result.reg.GetReg(), val_lo);
+ NewLIR2(x86op, rl_result.low_reg, val_lo);
}
if (!IsNoOp(op, val_hi)) {
X86OpCode x86op = GetOpcode(op, rl_result, true, val_hi);
- NewLIR2(x86op, rl_result.reg.GetHighReg(), val_hi);
+ NewLIR2(x86op, rl_result.high_reg, val_hi);
}
StoreValueWide(rl_dest, rl_result);
}
@@ -1673,15 +1671,15 @@
// Can we do this directly into the destination registers?
if (rl_dest.location == kLocPhysReg && rl_src1.location == kLocPhysReg &&
- rl_dest.reg.GetReg() == rl_src1.reg.GetReg() && rl_dest.reg.GetHighReg() == rl_src1.reg.GetHighReg() &&
- !IsFpReg(rl_dest.reg.GetReg())) {
+ rl_dest.low_reg == rl_src1.low_reg && rl_dest.high_reg == rl_src1.high_reg &&
+ !IsFpReg(rl_dest.low_reg)) {
if (!IsNoOp(op, val_lo)) {
X86OpCode x86op = GetOpcode(op, rl_dest, false, val_lo);
- NewLIR2(x86op, rl_dest.reg.GetReg(), val_lo);
+ NewLIR2(x86op, rl_dest.low_reg, val_lo);
}
if (!IsNoOp(op, val_hi)) {
X86OpCode x86op = GetOpcode(op, rl_dest, true, val_hi);
- NewLIR2(x86op, rl_dest.reg.GetHighReg(), val_hi);
+ NewLIR2(x86op, rl_dest.high_reg, val_hi);
}
StoreFinalValueWide(rl_dest, rl_dest);
@@ -1695,11 +1693,11 @@
RegLocation rl_result = ForceTempWide(rl_src1);
if (!IsNoOp(op, val_lo)) {
X86OpCode x86op = GetOpcode(op, rl_result, false, val_lo);
- NewLIR2(x86op, rl_result.reg.GetReg(), val_lo);
+ NewLIR2(x86op, rl_result.low_reg, val_lo);
}
if (!IsNoOp(op, val_hi)) {
X86OpCode x86op = GetOpcode(op, rl_result, true, val_hi);
- NewLIR2(x86op, rl_result.reg.GetHighReg(), val_hi);
+ NewLIR2(x86op, rl_result.high_reg, val_hi);
}
StoreFinalValueWide(rl_dest, rl_result);
@@ -1711,17 +1709,17 @@
RegLocation rl_dest, RegLocation rl_src) {
RegLocation object = LoadValue(rl_src, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
- int result_reg = rl_result.reg.GetReg();
+ int result_reg = rl_result.low_reg;
// SETcc only works with EAX..EDX.
- if (result_reg == object.reg.GetReg() || result_reg >= 4) {
+ if (result_reg == object.low_reg || result_reg >= 4) {
result_reg = AllocTypedTemp(false, kCoreReg);
DCHECK_LT(result_reg, 4);
}
// Assume that there is no match.
LoadConstant(result_reg, 0);
- LIR* null_branchover = OpCmpImmBranch(kCondEq, object.reg.GetReg(), 0, NULL);
+ LIR* null_branchover = OpCmpImmBranch(kCondEq, object.low_reg, 0, NULL);
int check_class = AllocTypedTemp(false, kCoreReg);
@@ -1732,11 +1730,11 @@
if (rl_method.location == kLocPhysReg) {
if (use_declaring_class) {
- LoadWordDisp(rl_method.reg.GetReg(),
+ LoadWordDisp(rl_method.low_reg,
mirror::ArtMethod::DeclaringClassOffset().Int32Value(),
check_class);
} else {
- LoadWordDisp(rl_method.reg.GetReg(),
+ LoadWordDisp(rl_method.low_reg,
mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(),
check_class);
LoadWordDisp(check_class, offset_of_type, check_class);
@@ -1757,7 +1755,7 @@
// Compare the computed class to the class in the object.
DCHECK_EQ(object.location, kLocPhysReg);
- OpRegMem(kOpCmp, check_class, object.reg.GetReg(),
+ OpRegMem(kOpCmp, check_class, object.low_reg,
mirror::Object::ClassOffset().Int32Value());
// Set the low byte of the result to 0 or 1 from the compare condition code.
@@ -1767,7 +1765,7 @@
null_branchover->target = target;
FreeTemp(check_class);
if (IsTemp(result_reg)) {
- OpRegCopy(rl_result.reg.GetReg(), result_reg);
+ OpRegCopy(rl_result.low_reg, result_reg);
FreeTemp(result_reg);
}
StoreValue(rl_dest, rl_result);
@@ -1820,7 +1818,7 @@
RegLocation rl_result = GetReturn(false);
// SETcc only works with EAX..EDX.
- DCHECK_LT(rl_result.reg.GetReg(), 4);
+ DCHECK_LT(rl_result.low_reg, 4);
// Is the class NULL?
LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
@@ -1832,13 +1830,13 @@
LIR* branchover = nullptr;
if (type_known_final) {
// Ensure top 3 bytes of result are 0.
- LoadConstant(rl_result.reg.GetReg(), 0);
+ LoadConstant(rl_result.low_reg, 0);
OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2));
// Set the low byte of the result to 0 or 1 from the compare condition code.
- NewLIR2(kX86Set8R, rl_result.reg.GetReg(), kX86CondEq);
+ NewLIR2(kX86Set8R, rl_result.low_reg, kX86CondEq);
} else {
if (!type_known_abstract) {
- LoadConstant(rl_result.reg.GetReg(), 1); // Assume result succeeds.
+ LoadConstant(rl_result.low_reg, 1); // Assume result succeeds.
branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
}
OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
@@ -1966,7 +1964,7 @@
rl_lhs = LoadValue(rl_lhs, kCoreReg);
rl_result = UpdateLoc(rl_dest);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg());
+ OpRegReg(op, rl_result.low_reg, rl_lhs.low_reg);
} else {
if (shift_op) {
// X86 doesn't require masking and must use ECX.
@@ -1981,9 +1979,9 @@
OpMemReg(op, rl_result, t_reg);
FreeTemp(t_reg);
return;
- } else if (!IsFpReg(rl_result.reg.GetReg())) {
+ } else if (!IsFpReg(rl_result.low_reg)) {
// Can do this directly into the result register
- OpRegReg(op, rl_result.reg.GetReg(), t_reg);
+ OpRegReg(op, rl_result.low_reg, t_reg);
FreeTemp(t_reg);
StoreFinalValue(rl_dest, rl_result);
return;
@@ -1992,7 +1990,7 @@
// Three address form, or we can't do directly.
rl_lhs = LoadValue(rl_lhs, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), t_reg);
+ OpRegRegReg(op, rl_result.low_reg, rl_lhs.low_reg, t_reg);
FreeTemp(t_reg);
} else {
// Multiply is 3 operand only (sort of).
@@ -2003,11 +2001,11 @@
// Can we do this from memory directly?
rl_rhs = UpdateLoc(rl_rhs);
if (rl_rhs.location != kLocPhysReg) {
- OpRegMem(op, rl_result.reg.GetReg(), rl_rhs);
+ OpRegMem(op, rl_result.low_reg, rl_rhs);
StoreFinalValue(rl_dest, rl_result);
return;
- } else if (!IsFpReg(rl_rhs.reg.GetReg())) {
- OpRegReg(op, rl_result.reg.GetReg(), rl_rhs.reg.GetReg());
+ } else if (!IsFpReg(rl_rhs.low_reg)) {
+ OpRegReg(op, rl_result.low_reg, rl_rhs.low_reg);
StoreFinalValue(rl_dest, rl_result);
return;
}
@@ -2015,17 +2013,17 @@
rl_rhs = LoadValue(rl_rhs, kCoreReg);
if (rl_result.location != kLocPhysReg) {
// Okay, we can do this into memory.
- OpMemReg(op, rl_result, rl_rhs.reg.GetReg());
+ OpMemReg(op, rl_result, rl_rhs.low_reg);
return;
- } else if (!IsFpReg(rl_result.reg.GetReg())) {
+ } else if (!IsFpReg(rl_result.low_reg)) {
// Can do this directly into the result register.
- OpRegReg(op, rl_result.reg.GetReg(), rl_rhs.reg.GetReg());
+ OpRegReg(op, rl_result.low_reg, rl_rhs.low_reg);
StoreFinalValue(rl_dest, rl_result);
return;
} else {
rl_lhs = LoadValue(rl_lhs, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), rl_rhs.reg.GetReg());
+ OpRegRegReg(op, rl_result.low_reg, rl_lhs.low_reg, rl_rhs.low_reg);
}
} else {
// Try to use reg/memory instructions.
@@ -2037,34 +2035,34 @@
rl_lhs = LoadValue(rl_lhs, kCoreReg);
rl_rhs = LoadValue(rl_rhs, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), rl_rhs.reg.GetReg());
+ OpRegRegReg(op, rl_result.low_reg, rl_lhs.low_reg, rl_rhs.low_reg);
} else {
// We can optimize by moving to result and using memory operands.
if (rl_rhs.location != kLocPhysReg) {
// Force LHS into result.
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- LoadValueDirect(rl_lhs, rl_result.reg.GetReg());
- OpRegMem(op, rl_result.reg.GetReg(), rl_rhs);
+ LoadValueDirect(rl_lhs, rl_result.low_reg);
+ OpRegMem(op, rl_result.low_reg, rl_rhs);
} else if (rl_lhs.location != kLocPhysReg) {
// RHS is in a register; LHS is in memory.
if (op != kOpSub) {
// Force RHS into result and operate on memory.
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegCopy(rl_result.reg.GetReg(), rl_rhs.reg.GetReg());
- OpRegMem(op, rl_result.reg.GetReg(), rl_lhs);
+ OpRegCopy(rl_result.low_reg, rl_rhs.low_reg);
+ OpRegMem(op, rl_result.low_reg, rl_lhs);
} else {
// Subtraction isn't commutative.
rl_lhs = LoadValue(rl_lhs, kCoreReg);
rl_rhs = LoadValue(rl_rhs, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), rl_rhs.reg.GetReg());
+ OpRegRegReg(op, rl_result.low_reg, rl_lhs.low_reg, rl_rhs.low_reg);
}
} else {
// Both are in registers.
rl_lhs = LoadValue(rl_lhs, kCoreReg);
rl_rhs = LoadValue(rl_rhs, kCoreReg);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
- OpRegRegReg(op, rl_result.reg.GetReg(), rl_lhs.reg.GetReg(), rl_rhs.reg.GetReg());
+ OpRegRegReg(op, rl_result.low_reg, rl_lhs.low_reg, rl_rhs.low_reg);
}
}
}
@@ -2075,10 +2073,10 @@
bool X86Mir2Lir::IsOperationSafeWithoutTemps(RegLocation rl_lhs, RegLocation rl_rhs) {
// If we have non-core registers, then we can't do good things.
- if (rl_lhs.location == kLocPhysReg && IsFpReg(rl_lhs.reg.GetReg())) {
+ if (rl_lhs.location == kLocPhysReg && IsFpReg(rl_lhs.low_reg)) {
return false;
}
- if (rl_rhs.location == kLocPhysReg && IsFpReg(rl_rhs.reg.GetReg())) {
+ if (rl_rhs.location == kLocPhysReg && IsFpReg(rl_rhs.low_reg)) {
return false;
}
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 0b8e1ee..eea7191 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -49,19 +49,23 @@
};
RegLocation X86Mir2Lir::LocCReturn() {
- return x86_loc_c_return;
+ RegLocation res = X86_LOC_C_RETURN;
+ return res;
}
RegLocation X86Mir2Lir::LocCReturnWide() {
- return x86_loc_c_return_wide;
+ RegLocation res = X86_LOC_C_RETURN_WIDE;
+ return res;
}
RegLocation X86Mir2Lir::LocCReturnFloat() {
- return x86_loc_c_return_float;
+ RegLocation res = X86_LOC_C_RETURN_FLOAT;
+ return res;
}
RegLocation X86Mir2Lir::LocCReturnDouble() {
- return x86_loc_c_return_double;
+ RegLocation res = X86_LOC_C_RETURN_DOUBLE;
+ return res;
}
// Return a target-dependent special register.
@@ -386,19 +390,19 @@
RegLocation X86Mir2Lir::GetReturnWideAlt() {
RegLocation res = LocCReturnWide();
- CHECK(res.reg.GetReg() == rAX);
- CHECK(res.reg.GetHighReg() == rDX);
+ CHECK(res.low_reg == rAX);
+ CHECK(res.high_reg == rDX);
Clobber(rAX);
Clobber(rDX);
MarkInUse(rAX);
MarkInUse(rDX);
- MarkPair(res.reg.GetReg(), res.reg.GetHighReg());
+ MarkPair(res.low_reg, res.high_reg);
return res;
}
RegLocation X86Mir2Lir::GetReturnAlt() {
RegLocation res = LocCReturn();
- res.reg.SetReg(rDX);
+ res.low_reg = rDX;
Clobber(rDX);
MarkInUse(rDX);
return res;
@@ -426,21 +430,27 @@
NewLIR0(kX86Mfence);
#endif
}
-
-// Alloc a pair of core registers, or a double.
-RegStorage X86Mir2Lir::AllocTypedTempWide(bool fp_hint, int reg_class) {
+/*
+ * Alloc a pair of core registers, or a double. Low reg in low byte,
+ * high reg in next byte.
+ */
+int X86Mir2Lir::AllocTypedTempPair(bool fp_hint,
+ int reg_class) {
int high_reg;
int low_reg;
+ int res = 0;
if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
low_reg = AllocTempDouble();
high_reg = low_reg; // only one allocated!
- // TODO: take advantage of 64-bit notation.
- return RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
+ res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
+ return res;
}
+
low_reg = AllocTemp();
high_reg = AllocTemp();
- return RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
+ res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
+ return res;
}
int X86Mir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
@@ -483,11 +493,11 @@
void X86Mir2Lir::FreeRegLocTemps(RegLocation rl_keep,
RegLocation rl_free) {
- if ((rl_free.reg.GetReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetReg() != rl_keep.reg.GetHighReg()) &&
- (rl_free.reg.GetHighReg() != rl_keep.reg.GetReg()) && (rl_free.reg.GetHighReg() != rl_keep.reg.GetHighReg())) {
+ if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
+ (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) {
// No overlap, free both
- FreeTemp(rl_free.reg.GetReg());
- FreeTemp(rl_free.reg.GetHighReg());
+ FreeTemp(rl_free.low_reg);
+ FreeTemp(rl_free.high_reg);
}
}
@@ -591,11 +601,11 @@
if (match) {
// We can reuse;update the register usage info.
+ loc.low_reg = info_lo->reg;
+ loc.high_reg = info_lo->reg; // Play nice with existing code.
loc.location = kLocPhysReg;
loc.vec_len = kVectorLength8;
- // TODO: use k64BitVector
- loc.reg = RegStorage(RegStorage::k64BitPair, info_lo->reg, info_lo->reg);
- DCHECK(IsFpReg(loc.reg.GetReg()));
+ DCHECK(IsFpReg(loc.low_reg));
return loc;
}
// We can't easily reuse; clobber and free any overlaps.
@@ -625,10 +635,11 @@
}
if (match) {
// Can reuse - update the register usage info
- loc.reg = RegStorage(RegStorage::k64BitPair, info_lo->reg, info_hi->reg);
+ loc.low_reg = info_lo->reg;
+ loc.high_reg = info_hi->reg;
loc.location = kLocPhysReg;
- MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg());
- DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0));
+ MarkPair(loc.low_reg, loc.high_reg);
+ DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
return loc;
}
// Can't easily reuse - clobber and free any overlaps
@@ -652,6 +663,7 @@
// TODO: Reunify with common code after 'pair mess' has been fixed
RegLocation X86Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) {
DCHECK(loc.wide);
+ int32_t new_regs;
int32_t low_reg;
int32_t high_reg;
@@ -659,37 +671,38 @@
/* If it is already in a register, we can assume proper form. Is it the right reg class? */
if (loc.location == kLocPhysReg) {
- DCHECK_EQ(IsFpReg(loc.reg.GetReg()), loc.IsVectorScalar());
- if (!RegClassMatches(reg_class, loc.reg.GetReg())) {
+ DCHECK_EQ(IsFpReg(loc.low_reg), loc.IsVectorScalar());
+ if (!RegClassMatches(reg_class, loc.low_reg)) {
/* It is the wrong register class. Reallocate and copy. */
- if (!IsFpReg(loc.reg.GetReg())) {
+ if (!IsFpReg(loc.low_reg)) {
// We want this in a FP reg, and it is in core registers.
DCHECK(reg_class != kCoreReg);
// Allocate this into any FP reg, and mark it with the right size.
low_reg = AllocTypedTemp(true, reg_class);
- OpVectorRegCopyWide(low_reg, loc.reg.GetReg(), loc.reg.GetHighReg());
- CopyRegInfo(low_reg, loc.reg.GetReg());
- Clobber(loc.reg.GetReg());
- Clobber(loc.reg.GetHighReg());
- loc.reg.SetReg(low_reg);
- loc.reg.SetHighReg(low_reg); // Play nice with existing code.
+ OpVectorRegCopyWide(low_reg, loc.low_reg, loc.high_reg);
+ CopyRegInfo(low_reg, loc.low_reg);
+ Clobber(loc.low_reg);
+ Clobber(loc.high_reg);
+ loc.low_reg = low_reg;
+ loc.high_reg = low_reg; // Play nice with existing code.
loc.vec_len = kVectorLength8;
} else {
// The value is in a FP register, and we want it in a pair of core registers.
DCHECK_EQ(reg_class, kCoreReg);
- DCHECK_EQ(loc.reg.GetReg(), loc.reg.GetHighReg());
- RegStorage new_regs = AllocTypedTempWide(false, kCoreReg); // Force to core registers.
- low_reg = new_regs.GetReg();
- high_reg = new_regs.GetHighReg();
+ DCHECK_EQ(loc.low_reg, loc.high_reg);
+ new_regs = AllocTypedTempPair(false, kCoreReg); // Force to core registers.
+ low_reg = new_regs & 0xff;
+ high_reg = (new_regs >> 8) & 0xff;
DCHECK_NE(low_reg, high_reg);
- OpRegCopyWide(low_reg, high_reg, loc.reg.GetReg(), loc.reg.GetHighReg());
- CopyRegInfo(low_reg, loc.reg.GetReg());
- CopyRegInfo(high_reg, loc.reg.GetHighReg());
- Clobber(loc.reg.GetReg());
- Clobber(loc.reg.GetHighReg());
- loc.reg = new_regs;
- MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg());
- DCHECK(!IsFpReg(loc.reg.GetReg()) || ((loc.reg.GetReg() & 0x1) == 0));
+ OpRegCopyWide(low_reg, high_reg, loc.low_reg, loc.high_reg);
+ CopyRegInfo(low_reg, loc.low_reg);
+ CopyRegInfo(high_reg, loc.high_reg);
+ Clobber(loc.low_reg);
+ Clobber(loc.high_reg);
+ loc.low_reg = low_reg;
+ loc.high_reg = high_reg;
+ MarkPair(loc.low_reg, loc.high_reg);
+ DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
}
}
return loc;
@@ -698,20 +711,21 @@
DCHECK_NE(loc.s_reg_low, INVALID_SREG);
DCHECK_NE(GetSRegHi(loc.s_reg_low), INVALID_SREG);
- loc.reg = AllocTypedTempWide(loc.fp, reg_class);
+ new_regs = AllocTypedTempPair(loc.fp, reg_class);
+ loc.low_reg = new_regs & 0xff;
+ loc.high_reg = (new_regs >> 8) & 0xff;
- // FIXME: take advantage of RegStorage notation.
- if (loc.reg.GetReg() == loc.reg.GetHighReg()) {
- DCHECK(IsFpReg(loc.reg.GetReg()));
+ if (loc.low_reg == loc.high_reg) {
+ DCHECK(IsFpReg(loc.low_reg));
loc.vec_len = kVectorLength8;
} else {
- MarkPair(loc.reg.GetReg(), loc.reg.GetHighReg());
+ MarkPair(loc.low_reg, loc.high_reg);
}
if (update) {
loc.location = kLocPhysReg;
- MarkLive(loc.reg.GetReg(), loc.s_reg_low);
- if (loc.reg.GetReg() != loc.reg.GetHighReg()) {
- MarkLive(loc.reg.GetHighReg(), GetSRegHi(loc.s_reg_low));
+ MarkLive(loc.low_reg, loc.s_reg_low);
+ if (loc.low_reg != loc.high_reg) {
+ MarkLive(loc.high_reg, GetSRegHi(loc.s_reg_low));
}
}
return loc;
@@ -727,14 +741,14 @@
loc = UpdateLoc(loc);
if (loc.location == kLocPhysReg) {
- if (!RegClassMatches(reg_class, loc.reg.GetReg())) {
+ if (!RegClassMatches(reg_class, loc.low_reg)) {
/* Wrong register class. Realloc, copy and transfer ownership. */
new_reg = AllocTypedTemp(loc.fp, reg_class);
- OpRegCopy(new_reg, loc.reg.GetReg());
- CopyRegInfo(new_reg, loc.reg.GetReg());
- Clobber(loc.reg.GetReg());
- loc.reg.SetReg(new_reg);
- if (IsFpReg(loc.reg.GetReg()) && reg_class != kCoreReg)
+ OpRegCopy(new_reg, loc.low_reg);
+ CopyRegInfo(new_reg, loc.low_reg);
+ Clobber(loc.low_reg);
+ loc.low_reg = new_reg;
+ if (IsFpReg(loc.low_reg) && reg_class != kCoreReg)
loc.vec_len = kVectorLength4;
}
return loc;
@@ -742,13 +756,14 @@
DCHECK_NE(loc.s_reg_low, INVALID_SREG);
- loc.reg = RegStorage(RegStorage::k32BitSolo, AllocTypedTemp(loc.fp, reg_class));
- if (IsFpReg(loc.reg.GetReg()) && reg_class != kCoreReg)
+ new_reg = AllocTypedTemp(loc.fp, reg_class);
+ loc.low_reg = new_reg;
+ if (IsFpReg(loc.low_reg) && reg_class != kCoreReg)
loc.vec_len = kVectorLength4;
if (update) {
loc.location = kLocPhysReg;
- MarkLive(loc.reg.GetReg(), loc.s_reg_low);
+ MarkLive(loc.low_reg, loc.s_reg_low);
}
return loc;
}
@@ -761,15 +776,15 @@
// TODO: Reunify with common code after 'pair mess' has been fixed
void X86Mir2Lir::ResetDefLocWide(RegLocation rl) {
DCHECK(rl.wide);
- RegisterInfo* p_low = IsTemp(rl.reg.GetReg());
- if (IsFpReg(rl.reg.GetReg())) {
+ RegisterInfo* p_low = IsTemp(rl.low_reg);
+ if (IsFpReg(rl.low_reg)) {
// We are using only the low register.
if (p_low && !(cu_->disable_opt & (1 << kSuppressLoads))) {
NullifyRange(p_low->def_start, p_low->def_end, p_low->s_reg, rl.s_reg_low);
}
- ResetDef(rl.reg.GetReg());
+ ResetDef(rl.low_reg);
} else {
- RegisterInfo* p_high = IsTemp(rl.reg.GetHighReg());
+ RegisterInfo* p_high = IsTemp(rl.high_reg);
if (p_low && !(cu_->disable_opt & (1 << kSuppressLoads))) {
DCHECK(p_low->pair);
NullifyRange(p_low->def_start, p_low->def_end, p_low->s_reg, rl.s_reg_low);
@@ -777,8 +792,8 @@
if (p_high && !(cu_->disable_opt & (1 << kSuppressLoads))) {
DCHECK(p_high->pair);
}
- ResetDef(rl.reg.GetReg());
- ResetDef(rl.reg.GetHighReg());
+ ResetDef(rl.low_reg);
+ ResetDef(rl.high_reg);
}
}
@@ -817,8 +832,8 @@
<< (loc.high_word ? " h" : " ")
<< (loc.home ? " H" : " ")
<< " vec_len: " << loc.vec_len
- << ", low: " << static_cast<int>(loc.reg.GetReg())
- << ", high: " << static_cast<int>(loc.reg.GetHighReg())
+ << ", low: " << static_cast<int>(loc.low_reg)
+ << ", high: " << static_cast<int>(loc.high_reg)
<< ", s_reg: " << loc.s_reg_low
<< ", orig: " << loc.orig_sreg;
}
@@ -1021,8 +1036,8 @@
// Runtime start index.
rl_start = UpdateLoc(rl_start);
if (rl_start.location == kLocPhysReg) {
- length_compare = OpCmpBranch(kCondLe, rCX, rl_start.reg.GetReg(), nullptr);
- OpRegReg(kOpSub, rCX, rl_start.reg.GetReg());
+ length_compare = OpCmpBranch(kCondLe, rCX, rl_start.low_reg, nullptr);
+ OpRegReg(kOpSub, rCX, rl_start.low_reg);
} else {
// Compare to memory to avoid a register load. Handle pushed EDI.
int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t);
@@ -1051,13 +1066,13 @@
}
} else {
if (rl_start.location == kLocPhysReg) {
- if (rl_start.reg.GetReg() == rDI) {
+ if (rl_start.low_reg == rDI) {
// We have a slight problem here. We are already using RDI!
// Grab the value from the stack.
LoadWordDisp(rX86_SP, 0, rDX);
OpLea(rDI, rBX, rDX, 1, 0);
} else {
- OpLea(rDI, rBX, rl_start.reg.GetReg(), 1, 0);
+ OpLea(rDI, rBX, rl_start.low_reg, 1, 0);
}
} else {
OpRegCopy(rDI, rBX);
@@ -1079,14 +1094,14 @@
// index = ((curr_ptr - orig_ptr) / 2) - 1.
OpRegReg(kOpSub, rDI, rBX);
OpRegImm(kOpAsr, rDI, 1);
- NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rDI, -1);
+ NewLIR3(kX86Lea32RM, rl_return.low_reg, rDI, -1);
LIR *all_done = NewLIR1(kX86Jmp8, 0);
// Failed to match; return -1.
LIR *not_found = NewLIR0(kPseudoTargetLabel);
length_compare->target = not_found;
failed_branch->target = not_found;
- LoadConstantNoClobber(rl_return.reg.GetReg(), -1);
+ LoadConstantNoClobber(rl_return.low_reg, -1);
// And join up at the end.
all_done->target = NewLIR0(kPseudoTargetLabel);
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index d5d6b0e..48a39bb 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -514,7 +514,7 @@
// We don't know the proper offset for the value, so pick one that will force
// 4 byte offset. We will fix this up in the assembler later to have the right
// value.
- res = LoadBaseDisp(rl_method.reg.GetReg(), 256 /* bogus */, r_dest_lo, kDouble, INVALID_SREG);
+ res = LoadBaseDisp(rl_method.low_reg, 256 /* bogus */, r_dest_lo, kDouble, INVALID_SREG);
res->target = data_target;
res->flags.fixup = kFixupLoad;
SetMemRefType(res, true, kLiteral);
@@ -714,7 +714,7 @@
opcode = is_array ? kX86Mov8AR : kX86Mov8MR;
break;
default:
- LOG(FATAL) << "Bad case in StoreBaseIndexedDispBody";
+ LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody";
}
if (!is_array) {
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index 1df9ab1..4064bd6 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -126,6 +126,13 @@
/* Mask to strip off fp flags */
#define X86_FP_REG_MASK 0xF
+// RegisterLocation templates return values (rAX, rAX/rDX or XMM0).
+// location, wide, defined, const, fp, core, ref, high_word, home, vec_len, low_reg, high_reg, s_reg_low
+#define X86_LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, rAX, INVALID_REG, INVALID_SREG, INVALID_SREG}
+#define X86_LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, rAX, rDX, INVALID_SREG, INVALID_SREG}
+#define X86_LOC_C_RETURN_FLOAT {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1, kVectorLength4, fr0, INVALID_REG, INVALID_SREG, INVALID_SREG}
+#define X86_LOC_C_RETURN_DOUBLE {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1, kVectorLength8, fr0, fr0, INVALID_SREG, INVALID_SREG}
+
enum X86ResourceEncodingPos {
kX86GPReg0 = 0,
kX86RegSP = 4,
@@ -204,22 +211,6 @@
#define rX86_COUNT rCX
#define rX86_PC INVALID_REG
-// RegisterLocation templates return values (r_V0, or r_V0/r_V1).
-const RegLocation x86_loc_c_return
- {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
- RegStorage(RegStorage::k32BitSolo, rAX), INVALID_SREG, INVALID_SREG};
-const RegLocation x86_loc_c_return_wide
- {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
- RegStorage(RegStorage::k64BitPair, rAX, rDX), INVALID_SREG, INVALID_SREG};
-// TODO: update to use k32BitVector (must encode in 7 bits, including fp flag).
-const RegLocation x86_loc_c_return_float
- {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1, kVectorLength4,
- RegStorage(RegStorage::k32BitSolo, fr0), INVALID_SREG, INVALID_SREG};
-// TODO: update to use k64BitVector (must encode in 7 bits, including fp flag).
-const RegLocation x86_loc_c_return_double
- {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1, kVectorLength8,
- RegStorage(RegStorage::k64BitPair, fr0, fr1), INVALID_SREG, INVALID_SREG};
-
/*
* The following enum defines the list of supported X86 instructions by the
* assembler. Their corresponding EncodingMap positions will be defined in