Restructure to reduce MIR references
This CL eliminates most of the MIR references in the lower-level
code generator. This allows a higher level of code sharing with
the MIR->LIR and GreenlandIR->LIR lowering passes.
The invoke, launchpads and new array support will need some more
extensive refactoring (future CL).
Change-Id: I75f249268c8ac18da1dd9180ff855d5176d6c4fe
diff --git a/src/compiler/codegen/x86/X86/Factory.cc b/src/compiler/codegen/x86/X86/Factory.cc
index 66e7028..c5186c6 100644
--- a/src/compiler/codegen/x86/X86/Factory.cc
+++ b/src/compiler/codegen/x86/X86/Factory.cc
@@ -414,7 +414,7 @@
#endif
}
-LIR* loadBaseIndexedDisp(CompilationUnit *cUnit, MIR *mir,
+LIR* loadBaseIndexedDisp(CompilationUnit *cUnit,
int rBase, int rIndex, int scale, int displacement,
int rDest, int rDestHi,
OpSize size, int sReg) {
@@ -505,27 +505,27 @@
/* Load value from base + scaled index. */
LIR *loadBaseIndexed(CompilationUnit *cUnit, int rBase,
int rIndex, int rDest, int scale, OpSize size) {
- return loadBaseIndexedDisp(cUnit, NULL, rBase, rIndex, scale, 0,
+ return loadBaseIndexedDisp(cUnit, rBase, rIndex, scale, 0,
rDest, INVALID_REG, size, INVALID_SREG);
}
-LIR *loadBaseDisp(CompilationUnit *cUnit, MIR *mir,
+LIR *loadBaseDisp(CompilationUnit *cUnit,
int rBase, int displacement,
int rDest,
OpSize size, int sReg) {
- return loadBaseIndexedDisp(cUnit, mir, rBase, INVALID_REG, 0, displacement,
+ return loadBaseIndexedDisp(cUnit, rBase, INVALID_REG, 0, displacement,
rDest, INVALID_REG, size, sReg);
}
-LIR *loadBaseDispWide(CompilationUnit *cUnit, MIR *mir,
+LIR *loadBaseDispWide(CompilationUnit *cUnit,
int rBase, int displacement,
int rDestLo, int rDestHi,
int sReg) {
- return loadBaseIndexedDisp(cUnit, mir, rBase, INVALID_REG, 0, displacement,
+ return loadBaseIndexedDisp(cUnit, rBase, INVALID_REG, 0, displacement,
rDestLo, rDestHi, kLong, sReg);
}
-LIR* storeBaseIndexedDisp(CompilationUnit *cUnit, MIR *mir,
+LIR* storeBaseIndexedDisp(CompilationUnit *cUnit,
int rBase, int rIndex, int scale, int displacement,
int rSrc, int rSrcHi,
OpSize size, int sReg) {
@@ -600,14 +600,14 @@
LIR *storeBaseIndexed(CompilationUnit *cUnit, int rBase, int rIndex, int rSrc,
int scale, OpSize size)
{
- return storeBaseIndexedDisp(cUnit, NULL, rBase, rIndex, scale, 0,
+ return storeBaseIndexedDisp(cUnit, rBase, rIndex, scale, 0,
rSrc, INVALID_REG, size, INVALID_SREG);
}
LIR *storeBaseDisp(CompilationUnit *cUnit, int rBase, int displacement,
int rSrc, OpSize size)
{
- return storeBaseIndexedDisp(cUnit, NULL, rBase, INVALID_REG, 0,
+ return storeBaseIndexedDisp(cUnit, rBase, INVALID_REG, 0,
displacement, rSrc, INVALID_REG, size,
INVALID_SREG);
}
@@ -615,7 +615,7 @@
LIR *storeBaseDispWide(CompilationUnit *cUnit, int rBase, int displacement,
int rSrcLo, int rSrcHi)
{
- return storeBaseIndexedDisp(cUnit, NULL, rBase, INVALID_REG, 0, displacement,
+ return storeBaseIndexedDisp(cUnit, rBase, INVALID_REG, 0, displacement,
rSrcLo, rSrcHi, kLong, INVALID_SREG);
}
diff --git a/src/compiler/codegen/x86/X86/Gen.cc b/src/compiler/codegen/x86/X86/Gen.cc
index 46c98ad..597eda1 100644
--- a/src/compiler/codegen/x86/X86/Gen.cc
+++ b/src/compiler/codegen/x86/X86/Gen.cc
@@ -34,10 +34,10 @@
* Perform register memory operation.
*/
LIR* genRegMemCheck(CompilationUnit* cUnit, ConditionCode cCode,
- int reg1, int base, int offset, MIR* mir, ThrowKind kind)
+ int reg1, int base, int offset, ThrowKind kind)
{
LIR* tgt = rawLIR(cUnit, 0, kPseudoThrowTarget, kind,
- mir ? mir->offset : 0, reg1, base, offset);
+ cUnit->currentDalvikOffset, reg1, base, offset);
opRegMem(cUnit, kOpCmp, reg1, base, offset);
LIR* branch = opCondBranch(cUnit, cCode, tgt);
// Remember branch target - will process later
@@ -51,10 +51,10 @@
*/
BasicBlock *findBlock(CompilationUnit* cUnit, unsigned int codeOffset,
bool split, bool create, BasicBlock** immedPredBlockP);
-void genSparseSwitch(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc,
- LIR* labelList)
+void genSparseSwitch(CompilationUnit* cUnit, uint32_t tableOffset,
+ RegLocation rlSrc, LIR* labelList)
{
- const u2* table = cUnit->insns + mir->offset + mir->dalvikInsn.vB;
+ const u2* table = cUnit->insns + cUnit->currentDalvikOffset + tableOffset;
if (cUnit->printMe) {
dumpSparseSwitchTable(table);
}
@@ -64,7 +64,8 @@
rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
for (int i = 0; i < entries; i++) {
int key = keys[i];
- BasicBlock* case_block = findBlock(cUnit, mir->offset + targets[i],
+ BasicBlock* case_block = findBlock(cUnit,
+ cUnit->currentDalvikOffset + targets[i],
false, false, NULL);
opCmpImmBranch(cUnit, kCondEq, rlSrc.lowReg, key,
&labelList[case_block->id]);
@@ -87,9 +88,10 @@
* jmp rStartOfMethod
* done:
*/
-void genPackedSwitch(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
+void genPackedSwitch(CompilationUnit* cUnit, uint32_t tableOffset,
+ RegLocation rlSrc)
{
- const u2* table = cUnit->insns + mir->offset + mir->dalvikInsn.vB;
+ const u2* table = cUnit->insns + cUnit->currentDalvikOffset + tableOffset;
if (cUnit->printMe) {
dumpPackedSwitchTable(table);
}
@@ -97,7 +99,7 @@
SwitchTable *tabRec = (SwitchTable *)oatNew(cUnit, sizeof(SwitchTable),
true, kAllocData);
tabRec->table = table;
- tabRec->vaddr = mir->offset;
+ tabRec->vaddr = cUnit->currentDalvikOffset;
int size = table[1];
tabRec->targets = (LIR* *)oatNew(cUnit, size * sizeof(LIR*), true,
kAllocLIR);
@@ -149,14 +151,15 @@
*
* Total size is 4+(width * size + 1)/2 16-bit code units.
*/
-void genFillArrayData(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
+void genFillArrayData(CompilationUnit* cUnit, uint32_t tableOffset,
+ RegLocation rlSrc)
{
- const u2* table = cUnit->insns + mir->offset + mir->dalvikInsn.vB;
+ const u2* table = cUnit->insns + cUnit->currentDalvikOffset + tableOffset;
// Add the table to the list - we'll process it later
FillArrayData *tabRec = (FillArrayData *)oatNew(cUnit, sizeof(FillArrayData),
true, kAllocData);
tabRec->table = table;
- tabRec->vaddr = mir->offset;
+ tabRec->vaddr = cUnit->currentDalvikOffset;
u2 width = tabRec->table[1];
u4 size = tabRec->table[2] | (((u4)tabRec->table[3]) << 16);
tabRec->size = (size * width) + 8;
@@ -204,18 +207,18 @@
#endif
}
-LIR* genNullCheck(CompilationUnit* cUnit, int sReg, int mReg, MIR* mir);
+LIR* genNullCheck(CompilationUnit* cUnit, int sReg, int mReg, int optFlags);
void callRuntimeHelperReg(CompilationUnit* cUnit, int helperOffset, int arg0);
/*
* TODO: implement fast path to short-circuit thin-lock case
*/
-void genMonitorEnter(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
+void genMonitorEnter(CompilationUnit* cUnit, int optFlags, RegLocation rlSrc)
{
oatFlushAllRegs(cUnit);
loadValueDirectFixed(cUnit, rlSrc, rARG0); // Get obj
oatLockCallTemps(cUnit); // Prepare for explicit register usage
- genNullCheck(cUnit, rlSrc.sRegLow, rARG0, mir);
+ genNullCheck(cUnit, rlSrc.sRegLow, rARG0, optFlags);
// Go expensive route - artLockObjectFromCode(self, obj);
callRuntimeHelperReg(cUnit, ENTRYPOINT_OFFSET(pLockObjectFromCode), rARG0);
}
@@ -223,12 +226,12 @@
/*
* TODO: implement fast path to short-circuit thin-lock case
*/
-void genMonitorExit(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
+void genMonitorExit(CompilationUnit* cUnit, int optFlags, RegLocation rlSrc)
{
oatFlushAllRegs(cUnit);
loadValueDirectFixed(cUnit, rlSrc, rARG0); // Get obj
oatLockCallTemps(cUnit); // Prepare for explicit register usage
- genNullCheck(cUnit, rlSrc.sRegLow, rARG0, mir);
+ genNullCheck(cUnit, rlSrc.sRegLow, rARG0, optFlags);
// Go expensive route - UnlockObjectFromCode(obj);
callRuntimeHelperReg(cUnit, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARG0);
}
@@ -249,7 +252,7 @@
* finish:
*
*/
-void genCmpLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
+void genCmpLong(CompilationUnit* cUnit, RegLocation rlDest,
RegLocation rlSrc1, RegLocation rlSrc2)
{
oatFlushAllRegs(cUnit);