Quick Compiler: Shoot the Camel
Another step towards moving the Quick Compiler from the old
Dalvik coding style to Art's coding style. In this CL, Camel-case
locals, struct variables and arguments are converted to lower-case
with underscore names. Most of the name changes were formulistic,
but I also took this opportunity to change the old "printMe" into
the more traditional "verbose", and shorten cUnit to cu.
No logic changes.
Change-Id: I64b69b28a8357d5cc0abc1dc975954c91abd9b45
diff --git a/src/compiler/codegen/arm/arm_lir.h b/src/compiler/codegen/arm/arm_lir.h
index bc3277f..7955b1b 100644
--- a/src/compiler/codegen/arm/arm_lir.h
+++ b/src/compiler/codegen/arm/arm_lir.h
@@ -89,7 +89,7 @@
* | OUT[outs-2] |
* | . |
* | OUT[0] |
- * | curMethod* | <<== sp w/ 16-byte alignment
+ * | cur_method* | <<== sp w/ 16-byte alignment
* +========================+
*/
@@ -568,7 +568,7 @@
};
/* Bit flags describing the behavior of each native opcode */
-/* Instruction assembly fieldLoc kind */
+/* Instruction assembly field_loc kind */
enum ArmEncodingKind {
kFmtUnused,
kFmtBitBlt, /* Bit string using end/start */
@@ -594,7 +594,7 @@
ArmEncodingKind kind;
int end; /* end for kFmtBitBlt, 1-bit slice end for FP regs */
int start; /* start for kFmtBitBlt, 4-bit slice end for FP regs */
- } fieldLoc[4];
+ } field_loc[4];
ArmOpcode opcode;
uint64_t flags;
const char* name;
diff --git a/src/compiler/codegen/arm/assemble_arm.cc b/src/compiler/codegen/arm/assemble_arm.cc
index 8e7a07b..f89915b 100644
--- a/src/compiler/codegen/arm/assemble_arm.cc
+++ b/src/compiler/codegen/arm/assemble_arm.cc
@@ -987,24 +987,24 @@
* discover that pc-relative displacements may not fit the selected
* instruction.
*/
-AssemblerStatus AssembleInstructions(CompilationUnit* cUnit,
- uintptr_t startAddr)
+AssemblerStatus AssembleInstructions(CompilationUnit* cu,
+ uintptr_t start_addr)
{
LIR* lir;
AssemblerStatus res = kSuccess; // Assume success
- for (lir = cUnit->firstLIRInsn; lir; lir = NEXT_LIR(lir)) {
+ for (lir = cu->first_lir_insn; lir; lir = NEXT_LIR(lir)) {
if (lir->opcode < 0) {
/* 1 means padding is needed */
if ((lir->opcode == kPseudoPseudoAlign4) && (lir->operands[0] == 1)) {
- cUnit->codeBuffer.push_back(PADDING_MOV_R5_R5 & 0xFF);
- cUnit->codeBuffer.push_back((PADDING_MOV_R5_R5 >> 8) & 0xFF);
+ cu->code_buffer.push_back(PADDING_MOV_R5_R5 & 0xFF);
+ cu->code_buffer.push_back((PADDING_MOV_R5_R5 >> 8) & 0xFF);
}
continue;
}
- if (lir->flags.isNop) {
+ if (lir->flags.is_nop) {
continue;
}
@@ -1031,9 +1031,9 @@
* However, if the load displacement exceeds the limit,
* we revert to a 2-instruction materialization sequence.
*/
- LIR *lirTarget = lir->target;
+ LIR *lir_target = lir->target;
uintptr_t pc = (lir->offset + 4) & ~3;
- uintptr_t target = lirTarget->offset;
+ uintptr_t target = lir_target->offset;
int delta = target - pc;
if (delta & 0x3) {
LOG(FATAL) << "PC-rel offset not multiple of 4: " << delta;
@@ -1053,22 +1053,22 @@
* vldrs/vldrd we include REG_DEF_LR in the resource
* masks for these instructions.
*/
- int baseReg = (lir->opcode == kThumb2LdrPcRel12) ?
+ int base_reg = (lir->opcode == kThumb2LdrPcRel12) ?
lir->operands[0] : rARM_LR;
// Add new Adr to generate the address
- LIR* newAdr = RawLIR(cUnit, lir->dalvikOffset, kThumb2Adr,
- baseReg, 0, 0, 0, 0, lir->target);
- InsertLIRBefore(lir, newAdr);
+ LIR* new_adr = RawLIR(cu, lir->dalvik_offset, kThumb2Adr,
+ base_reg, 0, 0, 0, 0, lir->target);
+ InsertLIRBefore(lir, new_adr);
// Convert to normal load
if (lir->opcode == kThumb2LdrPcRel12) {
lir->opcode = kThumb2LdrRRI12;
}
// Change the load to be relative to the new Adr base
- lir->operands[1] = baseReg;
+ lir->operands[1] = base_reg;
lir->operands[2] = 0;
- SetupResourceMasks(cUnit, lir);
+ SetupResourceMasks(cu, lir);
res = kRetryAll;
} else {
if ((lir->opcode == kThumb2Vldrs) ||
@@ -1080,26 +1080,26 @@
}
}
} else if (lir->opcode == kThumb2Cbnz || lir->opcode == kThumb2Cbz) {
- LIR *targetLIR = lir->target;
+ LIR *target_lir = lir->target;
uintptr_t pc = lir->offset + 4;
- uintptr_t target = targetLIR->offset;
+ uintptr_t target = target_lir->offset;
int delta = target - pc;
if (delta > 126 || delta < 0) {
/*
* Convert to cmp rx,#0 / b[eq/ne] tgt pair
* Make new branch instruction and insert after
*/
- LIR* newInst =
- RawLIR(cUnit, lir->dalvikOffset, kThumbBCond, 0,
+ LIR* new_inst =
+ RawLIR(cu, lir->dalvik_offset, kThumbBCond, 0,
(lir->opcode == kThumb2Cbz) ? kArmCondEq : kArmCondNe,
0, 0, 0, lir->target);
- InsertLIRAfter(lir, newInst);
+ InsertLIRAfter(lir, new_inst);
/* Convert the cb[n]z to a cmp rx, #0 ] */
lir->opcode = kThumbCmpRI8;
/* operand[0] is src1 in both cb[n]z & CmpRI8 */
lir->operands[1] = 0;
lir->target = 0;
- SetupResourceMasks(cUnit, lir);
+ SetupResourceMasks(cu, lir);
res = kRetryAll;
} else {
lir->operands[1] = delta >> 1;
@@ -1124,121 +1124,121 @@
}
}
lir->operands[0] = reg;
- SetupResourceMasks(cUnit, lir);
+ SetupResourceMasks(cu, lir);
res = kRetryAll;
}
} else if (lir->opcode == kThumbBCond || lir->opcode == kThumb2BCond) {
- LIR *targetLIR = lir->target;
+ LIR *target_lir = lir->target;
int delta = 0;
- DCHECK(targetLIR);
+ DCHECK(target_lir);
uintptr_t pc = lir->offset + 4;
- uintptr_t target = targetLIR->offset;
+ uintptr_t target = target_lir->offset;
delta = target - pc;
if ((lir->opcode == kThumbBCond) && (delta > 254 || delta < -256)) {
lir->opcode = kThumb2BCond;
- SetupResourceMasks(cUnit, lir);
+ SetupResourceMasks(cu, lir);
res = kRetryAll;
}
lir->operands[0] = delta >> 1;
} else if (lir->opcode == kThumb2BUncond) {
- LIR *targetLIR = lir->target;
+ LIR *target_lir = lir->target;
uintptr_t pc = lir->offset + 4;
- uintptr_t target = targetLIR->offset;
+ uintptr_t target = target_lir->offset;
int delta = target - pc;
lir->operands[0] = delta >> 1;
- if (!(cUnit->disableOpt & (1 << kSafeOptimizations)) &&
+ if (!(cu->disable_opt & (1 << kSafeOptimizations)) &&
lir->operands[0] == 0) { // Useless branch
- lir->flags.isNop = true;
+ lir->flags.is_nop = true;
res = kRetryAll;
}
} else if (lir->opcode == kThumbBUncond) {
- LIR *targetLIR = lir->target;
+ LIR *target_lir = lir->target;
uintptr_t pc = lir->offset + 4;
- uintptr_t target = targetLIR->offset;
+ uintptr_t target = target_lir->offset;
int delta = target - pc;
if (delta > 2046 || delta < -2048) {
// Convert to Thumb2BCond w/ kArmCondAl
lir->opcode = kThumb2BUncond;
lir->operands[0] = 0;
- SetupResourceMasks(cUnit, lir);
+ SetupResourceMasks(cu, lir);
res = kRetryAll;
} else {
lir->operands[0] = delta >> 1;
- if (!(cUnit->disableOpt & (1 << kSafeOptimizations)) &&
+ if (!(cu->disable_opt & (1 << kSafeOptimizations)) &&
lir->operands[0] == -1) { // Useless branch
- lir->flags.isNop = true;
+ lir->flags.is_nop = true;
res = kRetryAll;
}
}
} else if (lir->opcode == kThumbBlx1) {
DCHECK(NEXT_LIR(lir)->opcode == kThumbBlx2);
- /* curPC is Thumb */
- uintptr_t curPC = (startAddr + lir->offset + 4) & ~3;
+ /* cur_pc is Thumb */
+ uintptr_t cur_pc = (start_addr + lir->offset + 4) & ~3;
uintptr_t target = lir->operands[1];
/* Match bit[1] in target with base */
- if (curPC & 0x2) {
+ if (cur_pc & 0x2) {
target |= 0x2;
}
- int delta = target - curPC;
+ int delta = target - cur_pc;
DCHECK((delta >= -(1<<22)) && (delta <= ((1<<22)-2)));
lir->operands[0] = (delta >> 12) & 0x7ff;
NEXT_LIR(lir)->operands[0] = (delta>> 1) & 0x7ff;
} else if (lir->opcode == kThumbBl1) {
DCHECK(NEXT_LIR(lir)->opcode == kThumbBl2);
- /* Both curPC and target are Thumb */
- uintptr_t curPC = startAddr + lir->offset + 4;
+ /* Both cur_pc and target are Thumb */
+ uintptr_t cur_pc = start_addr + lir->offset + 4;
uintptr_t target = lir->operands[1];
- int delta = target - curPC;
+ int delta = target - cur_pc;
DCHECK((delta >= -(1<<22)) && (delta <= ((1<<22)-2)));
lir->operands[0] = (delta >> 12) & 0x7ff;
NEXT_LIR(lir)->operands[0] = (delta>> 1) & 0x7ff;
} else if (lir->opcode == kThumb2Adr) {
- SwitchTable *tabRec = reinterpret_cast<SwitchTable*>(lir->operands[2]);
+ SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[2]);
LIR* target = lir->target;
- int targetDisp = tabRec ? tabRec->offset
+ int target_disp = tab_rec ? tab_rec->offset
: target->offset;
- int disp = targetDisp - ((lir->offset + 4) & ~3);
+ int disp = target_disp - ((lir->offset + 4) & ~3);
if (disp < 4096) {
lir->operands[1] = disp;
} else {
// convert to ldimm16l, ldimm16h, add tgt, pc, operands[0]
- LIR *newMov16L =
- RawLIR(cUnit, lir->dalvikOffset, kThumb2MovImm16LST,
+ LIR *new_mov16L =
+ RawLIR(cu, lir->dalvik_offset, kThumb2MovImm16LST,
lir->operands[0], 0, reinterpret_cast<uintptr_t>(lir),
- reinterpret_cast<uintptr_t>(tabRec), 0, lir->target);
- InsertLIRBefore(lir, newMov16L);
- LIR *newMov16H =
- RawLIR(cUnit, lir->dalvikOffset, kThumb2MovImm16HST,
+ reinterpret_cast<uintptr_t>(tab_rec), 0, lir->target);
+ InsertLIRBefore(lir, new_mov16L);
+ LIR *new_mov16H =
+ RawLIR(cu, lir->dalvik_offset, kThumb2MovImm16HST,
lir->operands[0], 0, reinterpret_cast<uintptr_t>(lir),
- reinterpret_cast<uintptr_t>(tabRec), 0, lir->target);
- InsertLIRBefore(lir, newMov16H);
+ reinterpret_cast<uintptr_t>(tab_rec), 0, lir->target);
+ InsertLIRBefore(lir, new_mov16H);
lir->opcode = kThumb2AddRRR;
lir->operands[1] = rARM_PC;
lir->operands[2] = lir->operands[0];
- SetupResourceMasks(cUnit, lir);
+ SetupResourceMasks(cu, lir);
res = kRetryAll;
}
} else if (lir->opcode == kThumb2MovImm16LST) {
- // operands[1] should hold disp, [2] has add, [3] has tabRec
+ // operands[1] should hold disp, [2] has add, [3] has tab_rec
LIR *addPCInst = reinterpret_cast<LIR*>(lir->operands[2]);
- SwitchTable *tabRec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
- // If tabRec is null, this is a literal load. Use target
+ SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+ // If tab_rec is null, this is a literal load. Use target
LIR* target = lir->target;
- int targetDisp = tabRec ? tabRec->offset : target->offset;
- lir->operands[1] = (targetDisp - (addPCInst->offset + 4)) & 0xffff;
+ int target_disp = tab_rec ? tab_rec->offset : target->offset;
+ lir->operands[1] = (target_disp - (addPCInst->offset + 4)) & 0xffff;
} else if (lir->opcode == kThumb2MovImm16HST) {
- // operands[1] should hold disp, [2] has add, [3] has tabRec
+ // operands[1] should hold disp, [2] has add, [3] has tab_rec
LIR *addPCInst = reinterpret_cast<LIR*>(lir->operands[2]);
- SwitchTable *tabRec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
- // If tabRec is null, this is a literal load. Use target
+ SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(lir->operands[3]);
+ // If tab_rec is null, this is a literal load. Use target
LIR* target = lir->target;
- int targetDisp = tabRec ? tabRec->offset : target->offset;
+ int target_disp = tab_rec ? tab_rec->offset : target->offset;
lir->operands[1] =
- ((targetDisp - (addPCInst->offset + 4)) >> 16) & 0xffff;
+ ((target_disp - (addPCInst->offset + 4)) >> 16) & 0xffff;
}
}
/*
@@ -1256,12 +1256,12 @@
uint32_t operand;
uint32_t value;
operand = lir->operands[i];
- switch (encoder->fieldLoc[i].kind) {
+ switch (encoder->field_loc[i].kind) {
case kFmtUnused:
break;
case kFmtFPImm:
- value = ((operand & 0xF0) >> 4) << encoder->fieldLoc[i].end;
- value |= (operand & 0x0F) << encoder->fieldLoc[i].start;
+ value = ((operand & 0xF0) >> 4) << encoder->field_loc[i].end;
+ value |= (operand & 0x0F) << encoder->field_loc[i].start;
bits |= value;
break;
case kFmtBrOffset:
@@ -1297,27 +1297,27 @@
bits |= value;
break;
case kFmtBitBlt:
- value = (operand << encoder->fieldLoc[i].start) &
- ((1 << (encoder->fieldLoc[i].end + 1)) - 1);
+ value = (operand << encoder->field_loc[i].start) &
+ ((1 << (encoder->field_loc[i].end + 1)) - 1);
bits |= value;
break;
case kFmtDfp: {
DCHECK(ARM_DOUBLEREG(operand));
DCHECK_EQ((operand & 0x1), 0U);
- int regName = (operand & ARM_FP_REG_MASK) >> 1;
+ int reg_name = (operand & ARM_FP_REG_MASK) >> 1;
/* Snag the 1-bit slice and position it */
- value = ((regName & 0x10) >> 4) << encoder->fieldLoc[i].end;
+ value = ((reg_name & 0x10) >> 4) << encoder->field_loc[i].end;
/* Extract and position the 4-bit slice */
- value |= (regName & 0x0f) << encoder->fieldLoc[i].start;
+ value |= (reg_name & 0x0f) << encoder->field_loc[i].start;
bits |= value;
break;
}
case kFmtSfp:
DCHECK(ARM_SINGLEREG(operand));
/* Snag the 1-bit slice and position it */
- value = (operand & 0x1) << encoder->fieldLoc[i].end;
+ value = (operand & 0x1) << encoder->field_loc[i].end;
/* Extract and position the 4-bit slice */
- value |= ((operand & 0x1e) >> 1) << encoder->fieldLoc[i].start;
+ value |= ((operand & 0x1e) >> 1) << encoder->field_loc[i].start;
bits |= value;
break;
case kFmtImm12:
@@ -1348,15 +1348,15 @@
}
break;
default:
- LOG(FATAL) << "Bad fmt:" << encoder->fieldLoc[i].kind;
+ LOG(FATAL) << "Bad fmt:" << encoder->field_loc[i].kind;
}
}
if (encoder->size == 4) {
- cUnit->codeBuffer.push_back((bits >> 16) & 0xff);
- cUnit->codeBuffer.push_back((bits >> 24) & 0xff);
+ cu->code_buffer.push_back((bits >> 16) & 0xff);
+ cu->code_buffer.push_back((bits >> 24) & 0xff);
}
- cUnit->codeBuffer.push_back(bits & 0xff);
- cUnit->codeBuffer.push_back((bits >> 8) & 0xff);
+ cu->code_buffer.push_back(bits & 0xff);
+ cu->code_buffer.push_back((bits >> 8) & 0xff);
}
return res;
}
@@ -1369,23 +1369,23 @@
/*
* Target-dependent offset assignment.
*/
-int AssignInsnOffsets(CompilationUnit* cUnit)
+int AssignInsnOffsets(CompilationUnit* cu)
{
- LIR* armLIR;
+ LIR* arm_lir;
int offset = 0;
- for (armLIR = cUnit->firstLIRInsn; armLIR; armLIR = NEXT_LIR(armLIR)) {
- armLIR->offset = offset;
- if (armLIR->opcode >= 0) {
- if (!armLIR->flags.isNop) {
- offset += armLIR->flags.size;
+ for (arm_lir = cu->first_lir_insn; arm_lir; arm_lir = NEXT_LIR(arm_lir)) {
+ arm_lir->offset = offset;
+ if (arm_lir->opcode >= 0) {
+ if (!arm_lir->flags.is_nop) {
+ offset += arm_lir->flags.size;
}
- } else if (armLIR->opcode == kPseudoPseudoAlign4) {
+ } else if (arm_lir->opcode == kPseudoPseudoAlign4) {
if (offset & 0x2) {
offset += 2;
- armLIR->operands[0] = 1;
+ arm_lir->operands[0] = 1;
} else {
- armLIR->operands[0] = 0;
+ arm_lir->operands[0] = 0;
}
}
/* Pseudo opcodes don't consume space */
diff --git a/src/compiler/codegen/arm/call_arm.cc b/src/compiler/codegen/arm/call_arm.cc
index 98137ad..775b25d 100644
--- a/src/compiler/codegen/arm/call_arm.cc
+++ b/src/compiler/codegen/arm/call_arm.cc
@@ -26,10 +26,10 @@
/* Return the position of an ssa name within the argument list */
-static int InPosition(CompilationUnit* cUnit, int sReg)
+static int InPosition(CompilationUnit* cu, int s_reg)
{
- int vReg = SRegToVReg(cUnit, sReg);
- return vReg - cUnit->numRegs;
+ int v_reg = SRegToVReg(cu, s_reg);
+ return v_reg - cu->num_regs;
}
/*
@@ -37,23 +37,23 @@
* there. NOTE: all live arg registers must be locked prior to this call
* to avoid having them allocated as a temp by downstream utilities.
*/
-RegLocation ArgLoc(CompilationUnit* cUnit, RegLocation loc)
+RegLocation ArgLoc(CompilationUnit* cu, RegLocation loc)
{
- int argNum = InPosition(cUnit, loc.sRegLow);
+ int arg_num = InPosition(cu, loc.s_reg_low);
if (loc.wide) {
- if (argNum == 2) {
+ if (arg_num == 2) {
// Bad case - half in register, half in frame. Just punt
loc.location = kLocInvalid;
- } else if (argNum < 2) {
- loc.lowReg = rARM_ARG1 + argNum;
- loc.highReg = loc.lowReg + 1;
+ } else if (arg_num < 2) {
+ loc.low_reg = rARM_ARG1 + arg_num;
+ loc.high_reg = loc.low_reg + 1;
loc.location = kLocPhysReg;
} else {
loc.location = kLocDalvikFrame;
}
} else {
- if (argNum < 3) {
- loc.lowReg = rARM_ARG1 + argNum;
+ if (arg_num < 3) {
+ loc.low_reg = rARM_ARG1 + arg_num;
loc.location = kLocPhysReg;
} else {
loc.location = kLocDalvikFrame;
@@ -67,15 +67,15 @@
* the frame, we can't use the normal LoadValue() because it assumed
* a proper frame - and we're frameless.
*/
-RegLocation LoadArg(CompilationUnit* cUnit, RegLocation loc)
+RegLocation LoadArg(CompilationUnit* cu, RegLocation loc)
{
if (loc.location == kLocDalvikFrame) {
- int start = (InPosition(cUnit, loc.sRegLow) + 1) * sizeof(uint32_t);
- loc.lowReg = AllocTemp(cUnit);
- LoadWordDisp(cUnit, rARM_SP, start, loc.lowReg);
+ int start = (InPosition(cu, loc.s_reg_low) + 1) * sizeof(uint32_t);
+ loc.low_reg = AllocTemp(cu);
+ LoadWordDisp(cu, rARM_SP, start, loc.low_reg);
if (loc.wide) {
- loc.highReg = AllocTemp(cUnit);
- LoadWordDisp(cUnit, rARM_SP, start + sizeof(uint32_t), loc.highReg);
+ loc.high_reg = AllocTemp(cu);
+ LoadWordDisp(cu, rARM_SP, start + sizeof(uint32_t), loc.high_reg);
}
loc.location = kLocPhysReg;
}
@@ -83,24 +83,24 @@
}
/* Lock any referenced arguments that arrive in registers */
-static void LockLiveArgs(CompilationUnit* cUnit, MIR* mir)
+static void LockLiveArgs(CompilationUnit* cu, MIR* mir)
{
- int firstIn = cUnit->numRegs;
- const int numArgRegs = 3; // TODO: generalize & move to RegUtil.cc
- for (int i = 0; i < mir->ssaRep->numUses; i++) {
- int vReg = SRegToVReg(cUnit, mir->ssaRep->uses[i]);
- int InPosition = vReg - firstIn;
- if (InPosition < numArgRegs) {
- LockTemp(cUnit, rARM_ARG1 + InPosition);
+ int first_in = cu->num_regs;
+ const int num_arg_regs = 3; // TODO: generalize & move to RegUtil.cc
+ for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
+ int v_reg = SRegToVReg(cu, mir->ssa_rep->uses[i]);
+ int InPosition = v_reg - first_in;
+ if (InPosition < num_arg_regs) {
+ LockTemp(cu, rARM_ARG1 + InPosition);
}
}
}
/* Find the next MIR, which may be in a following basic block */
-static MIR* GetNextMir(CompilationUnit* cUnit, BasicBlock** pBb, MIR* mir)
+static MIR* GetNextMir(CompilationUnit* cu, BasicBlock** p_bb, MIR* mir)
{
- BasicBlock* bb = *pBb;
- MIR* origMir = mir;
+ BasicBlock* bb = *p_bb;
+ MIR* orig_mir = mir;
while (bb != NULL) {
if (mir != NULL) {
mir = mir->next;
@@ -108,121 +108,121 @@
if (mir != NULL) {
return mir;
} else {
- bb = bb->fallThrough;
- *pBb = bb;
+ bb = bb->fall_through;
+ *p_bb = bb;
if (bb) {
- mir = bb->firstMIRInsn;
+ mir = bb->first_mir_insn;
if (mir != NULL) {
return mir;
}
}
}
}
- return origMir;
+ return orig_mir;
}
-/* Used for the "printMe" listing */
-void GenPrintLabel(CompilationUnit *cUnit, MIR* mir)
+/* Used for the "verbose" listing */
+void GenPrintLabel(CompilationUnit *cu, MIR* mir)
{
/* Mark the beginning of a Dalvik instruction for line tracking */
- char* instStr = cUnit->printMe ?
- GetDalvikDisassembly(cUnit, mir->dalvikInsn, "") : NULL;
- MarkBoundary(cUnit, mir->offset, instStr);
+ char* inst_str = cu->verbose ?
+ GetDalvikDisassembly(cu, mir->dalvikInsn, "") : NULL;
+ MarkBoundary(cu, mir->offset, inst_str);
/* Don't generate the SSA annotation unless verbose mode is on */
- if (cUnit->printMe && mir->ssaRep) {
- char* ssaString = GetSSAString(cUnit, mir->ssaRep);
- NewLIR1(cUnit, kPseudoSSARep, reinterpret_cast<uintptr_t>(ssaString));
+ if (cu->verbose && mir->ssa_rep) {
+ char* ssa_string = GetSSAString(cu, mir->ssa_rep);
+ NewLIR1(cu, kPseudoSSARep, reinterpret_cast<uintptr_t>(ssa_string));
}
}
-static MIR* SpecialIGet(CompilationUnit* cUnit, BasicBlock** bb, MIR* mir,
- OpSize size, bool longOrDouble, bool isObject)
+static MIR* SpecialIGet(CompilationUnit* cu, BasicBlock** bb, MIR* mir,
+ OpSize size, bool long_or_double, bool is_object)
{
- int fieldOffset;
- bool isVolatile;
- uint32_t fieldIdx = mir->dalvikInsn.vC;
- bool fastPath = FastInstance(cUnit, fieldIdx, fieldOffset, isVolatile, false);
- if (!fastPath || !(mir->optimizationFlags & MIR_IGNORE_NULL_CHECK)) {
+ int field_offset;
+ bool is_volatile;
+ uint32_t field_idx = mir->dalvikInsn.vC;
+ bool fast_path = FastInstance(cu, field_idx, field_offset, is_volatile, false);
+ if (!fast_path || !(mir->optimization_flags & MIR_IGNORE_NULL_CHECK)) {
return NULL;
}
- RegLocation rlObj = GetSrc(cUnit, mir, 0);
- LockLiveArgs(cUnit, mir);
- rlObj = ArgLoc(cUnit, rlObj);
- RegLocation rlDest;
- if (longOrDouble) {
- rlDest = GetReturnWide(cUnit, false);
+ RegLocation rl_obj = GetSrc(cu, mir, 0);
+ LockLiveArgs(cu, mir);
+ rl_obj = ArgLoc(cu, rl_obj);
+ RegLocation rl_dest;
+ if (long_or_double) {
+ rl_dest = GetReturnWide(cu, false);
} else {
- rlDest = GetReturn(cUnit, false);
+ rl_dest = GetReturn(cu, false);
}
// Point of no return - no aborts after this
- GenPrintLabel(cUnit, mir);
- rlObj = LoadArg(cUnit, rlObj);
- GenIGet(cUnit, fieldIdx, mir->optimizationFlags, size, rlDest, rlObj,
- longOrDouble, isObject);
- return GetNextMir(cUnit, bb, mir);
+ GenPrintLabel(cu, mir);
+ rl_obj = LoadArg(cu, rl_obj);
+ GenIGet(cu, field_idx, mir->optimization_flags, size, rl_dest, rl_obj,
+ long_or_double, is_object);
+ return GetNextMir(cu, bb, mir);
}
-static MIR* SpecialIPut(CompilationUnit* cUnit, BasicBlock** bb, MIR* mir,
- OpSize size, bool longOrDouble, bool isObject)
+static MIR* SpecialIPut(CompilationUnit* cu, BasicBlock** bb, MIR* mir,
+ OpSize size, bool long_or_double, bool is_object)
{
- int fieldOffset;
- bool isVolatile;
- uint32_t fieldIdx = mir->dalvikInsn.vC;
- bool fastPath = FastInstance(cUnit, fieldIdx, fieldOffset, isVolatile, false);
- if (!fastPath || !(mir->optimizationFlags & MIR_IGNORE_NULL_CHECK)) {
+ int field_offset;
+ bool is_volatile;
+ uint32_t field_idx = mir->dalvikInsn.vC;
+ bool fast_path = FastInstance(cu, field_idx, field_offset, is_volatile, false);
+ if (!fast_path || !(mir->optimization_flags & MIR_IGNORE_NULL_CHECK)) {
return NULL;
}
- RegLocation rlSrc;
- RegLocation rlObj;
- LockLiveArgs(cUnit, mir);
- if (longOrDouble) {
- rlSrc = GetSrcWide(cUnit, mir, 0);
- rlObj = GetSrc(cUnit, mir, 2);
+ RegLocation rl_src;
+ RegLocation rl_obj;
+ LockLiveArgs(cu, mir);
+ if (long_or_double) {
+ rl_src = GetSrcWide(cu, mir, 0);
+ rl_obj = GetSrc(cu, mir, 2);
} else {
- rlSrc = GetSrc(cUnit, mir, 0);
- rlObj = GetSrc(cUnit, mir, 1);
+ rl_src = GetSrc(cu, mir, 0);
+ rl_obj = GetSrc(cu, mir, 1);
}
- rlSrc = ArgLoc(cUnit, rlSrc);
- rlObj = ArgLoc(cUnit, rlObj);
+ rl_src = ArgLoc(cu, rl_src);
+ rl_obj = ArgLoc(cu, rl_obj);
// Reject if source is split across registers & frame
- if (rlObj.location == kLocInvalid) {
- ResetRegPool(cUnit);
+ if (rl_obj.location == kLocInvalid) {
+ ResetRegPool(cu);
return NULL;
}
// Point of no return - no aborts after this
- GenPrintLabel(cUnit, mir);
- rlObj = LoadArg(cUnit, rlObj);
- rlSrc = LoadArg(cUnit, rlSrc);
- GenIPut(cUnit, fieldIdx, mir->optimizationFlags, size, rlSrc, rlObj,
- longOrDouble, isObject);
- return GetNextMir(cUnit, bb, mir);
+ GenPrintLabel(cu, mir);
+ rl_obj = LoadArg(cu, rl_obj);
+ rl_src = LoadArg(cu, rl_src);
+ GenIPut(cu, field_idx, mir->optimization_flags, size, rl_src, rl_obj,
+ long_or_double, is_object);
+ return GetNextMir(cu, bb, mir);
}
-static MIR* SpecialIdentity(CompilationUnit* cUnit, MIR* mir)
+static MIR* SpecialIdentity(CompilationUnit* cu, MIR* mir)
{
- RegLocation rlSrc;
- RegLocation rlDest;
- bool wide = (mir->ssaRep->numUses == 2);
+ RegLocation rl_src;
+ RegLocation rl_dest;
+ bool wide = (mir->ssa_rep->num_uses == 2);
if (wide) {
- rlSrc = GetSrcWide(cUnit, mir, 0);
- rlDest = GetReturnWide(cUnit, false);
+ rl_src = GetSrcWide(cu, mir, 0);
+ rl_dest = GetReturnWide(cu, false);
} else {
- rlSrc = GetSrc(cUnit, mir, 0);
- rlDest = GetReturn(cUnit, false);
+ rl_src = GetSrc(cu, mir, 0);
+ rl_dest = GetReturn(cu, false);
}
- LockLiveArgs(cUnit, mir);
- rlSrc = ArgLoc(cUnit, rlSrc);
- if (rlSrc.location == kLocInvalid) {
- ResetRegPool(cUnit);
+ LockLiveArgs(cu, mir);
+ rl_src = ArgLoc(cu, rl_src);
+ if (rl_src.location == kLocInvalid) {
+ ResetRegPool(cu);
return NULL;
}
// Point of no return - no aborts after this
- GenPrintLabel(cUnit, mir);
- rlSrc = LoadArg(cUnit, rlSrc);
+ GenPrintLabel(cu, mir);
+ rl_src = LoadArg(cu, rl_src);
if (wide) {
- StoreValueWide(cUnit, rlDest, rlSrc);
+ StoreValueWide(cu, rl_dest, rl_src);
} else {
- StoreValue(cUnit, rlDest, rlSrc);
+ StoreValue(cu, rl_dest, rl_src);
}
return mir;
}
@@ -230,78 +230,78 @@
/*
* Special-case code genration for simple non-throwing leaf methods.
*/
-void GenSpecialCase(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
- SpecialCaseHandler specialCase)
+void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+ SpecialCaseHandler special_case)
{
- cUnit->currentDalvikOffset = mir->offset;
- MIR* nextMir = NULL;
- switch (specialCase) {
+ cu->current_dalvik_offset = mir->offset;
+ MIR* next_mir = NULL;
+ switch (special_case) {
case kNullMethod:
DCHECK(mir->dalvikInsn.opcode == Instruction::RETURN_VOID);
- nextMir = mir;
+ next_mir = mir;
break;
case kConstFunction:
- GenPrintLabel(cUnit, mir);
- LoadConstant(cUnit, rARM_RET0, mir->dalvikInsn.vB);
- nextMir = GetNextMir(cUnit, &bb, mir);
+ GenPrintLabel(cu, mir);
+ LoadConstant(cu, rARM_RET0, mir->dalvikInsn.vB);
+ next_mir = GetNextMir(cu, &bb, mir);
break;
case kIGet:
- nextMir = SpecialIGet(cUnit, &bb, mir, kWord, false, false);
+ next_mir = SpecialIGet(cu, &bb, mir, kWord, false, false);
break;
case kIGetBoolean:
case kIGetByte:
- nextMir = SpecialIGet(cUnit, &bb, mir, kUnsignedByte, false, false);
+ next_mir = SpecialIGet(cu, &bb, mir, kUnsignedByte, false, false);
break;
case kIGetObject:
- nextMir = SpecialIGet(cUnit, &bb, mir, kWord, false, true);
+ next_mir = SpecialIGet(cu, &bb, mir, kWord, false, true);
break;
case kIGetChar:
- nextMir = SpecialIGet(cUnit, &bb, mir, kUnsignedHalf, false, false);
+ next_mir = SpecialIGet(cu, &bb, mir, kUnsignedHalf, false, false);
break;
case kIGetShort:
- nextMir = SpecialIGet(cUnit, &bb, mir, kSignedHalf, false, false);
+ next_mir = SpecialIGet(cu, &bb, mir, kSignedHalf, false, false);
break;
case kIGetWide:
- nextMir = SpecialIGet(cUnit, &bb, mir, kLong, true, false);
+ next_mir = SpecialIGet(cu, &bb, mir, kLong, true, false);
break;
case kIPut:
- nextMir = SpecialIPut(cUnit, &bb, mir, kWord, false, false);
+ next_mir = SpecialIPut(cu, &bb, mir, kWord, false, false);
break;
case kIPutBoolean:
case kIPutByte:
- nextMir = SpecialIPut(cUnit, &bb, mir, kUnsignedByte, false, false);
+ next_mir = SpecialIPut(cu, &bb, mir, kUnsignedByte, false, false);
break;
case kIPutObject:
- nextMir = SpecialIPut(cUnit, &bb, mir, kWord, false, true);
+ next_mir = SpecialIPut(cu, &bb, mir, kWord, false, true);
break;
case kIPutChar:
- nextMir = SpecialIPut(cUnit, &bb, mir, kUnsignedHalf, false, false);
+ next_mir = SpecialIPut(cu, &bb, mir, kUnsignedHalf, false, false);
break;
case kIPutShort:
- nextMir = SpecialIPut(cUnit, &bb, mir, kSignedHalf, false, false);
+ next_mir = SpecialIPut(cu, &bb, mir, kSignedHalf, false, false);
break;
case kIPutWide:
- nextMir = SpecialIPut(cUnit, &bb, mir, kLong, true, false);
+ next_mir = SpecialIPut(cu, &bb, mir, kLong, true, false);
break;
case kIdentity:
- nextMir = SpecialIdentity(cUnit, mir);
+ next_mir = SpecialIdentity(cu, mir);
break;
default:
return;
}
- if (nextMir != NULL) {
- cUnit->currentDalvikOffset = nextMir->offset;
- if (specialCase != kIdentity) {
- GenPrintLabel(cUnit, nextMir);
+ if (next_mir != NULL) {
+ cu->current_dalvik_offset = next_mir->offset;
+ if (special_case != kIdentity) {
+ GenPrintLabel(cu, next_mir);
}
- NewLIR1(cUnit, kThumbBx, rARM_LR);
- cUnit->coreSpillMask = 0;
- cUnit->numCoreSpills = 0;
- cUnit->fpSpillMask = 0;
- cUnit->numFPSpills = 0;
- cUnit->frameSize = 0;
- cUnit->coreVmapTable.clear();
- cUnit->fpVmapTable.clear();
+ NewLIR1(cu, kThumbBx, rARM_LR);
+ cu->core_spill_mask = 0;
+ cu->num_core_spills = 0;
+ cu->fp_spill_mask = 0;
+ cu->num_fp_spills = 0;
+ cu->frame_size = 0;
+ cu->core_vmap_table.clear();
+ cu->fp_vmap_table.clear();
}
}
@@ -314,109 +314,109 @@
* The test loop will look something like:
*
* adr rBase, <table>
- * ldr rVal, [rARM_SP, vRegOff]
- * mov rIdx, #tableSize
+ * ldr r_val, [rARM_SP, v_reg_off]
+ * mov r_idx, #table_size
* lp:
- * ldmia rBase!, {rKey, rDisp}
- * sub rIdx, #1
- * cmp rVal, rKey
+ * ldmia rBase!, {r_key, r_disp}
+ * sub r_idx, #1
+ * cmp r_val, r_key
* ifeq
- * add rARM_PC, rDisp ; This is the branch from which we compute displacement
- * cbnz rIdx, lp
+ * add rARM_PC, r_disp ; This is the branch from which we compute displacement
+ * cbnz r_idx, lp
*/
-void GenSparseSwitch(CompilationUnit* cUnit, uint32_t tableOffset,
- RegLocation rlSrc)
+void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset,
+ RegLocation rl_src)
{
- const uint16_t* table = cUnit->insns + cUnit->currentDalvikOffset + tableOffset;
- if (cUnit->printMe) {
+ const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
+ if (cu->verbose) {
DumpSparseSwitchTable(table);
}
// Add the table to the list - we'll process it later
- SwitchTable *tabRec =
- static_cast<SwitchTable*>(NewMem(cUnit, sizeof(SwitchTable), true, kAllocData));
- tabRec->table = table;
- tabRec->vaddr = cUnit->currentDalvikOffset;
+ SwitchTable *tab_rec =
+ static_cast<SwitchTable*>(NewMem(cu, sizeof(SwitchTable), true, kAllocData));
+ tab_rec->table = table;
+ tab_rec->vaddr = cu->current_dalvik_offset;
int size = table[1];
- tabRec->targets = static_cast<LIR**>(NewMem(cUnit, size * sizeof(LIR*), true, kAllocLIR));
- InsertGrowableList(cUnit, &cUnit->switchTables, reinterpret_cast<uintptr_t>(tabRec));
+ tab_rec->targets = static_cast<LIR**>(NewMem(cu, size * sizeof(LIR*), true, kAllocLIR));
+ InsertGrowableList(cu, &cu->switch_tables, reinterpret_cast<uintptr_t>(tab_rec));
// Get the switch value
- rlSrc = LoadValue(cUnit, rlSrc, kCoreReg);
- int rBase = AllocTemp(cUnit);
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ int rBase = AllocTemp(cu);
/* Allocate key and disp temps */
- int rKey = AllocTemp(cUnit);
- int rDisp = AllocTemp(cUnit);
- // Make sure rKey's register number is less than rDisp's number for ldmia
- if (rKey > rDisp) {
- int tmp = rDisp;
- rDisp = rKey;
- rKey = tmp;
+ int r_key = AllocTemp(cu);
+ int r_disp = AllocTemp(cu);
+ // Make sure r_key's register number is less than r_disp's number for ldmia
+ if (r_key > r_disp) {
+ int tmp = r_disp;
+ r_disp = r_key;
+ r_key = tmp;
}
// Materialize a pointer to the switch table
- NewLIR3(cUnit, kThumb2Adr, rBase, 0, reinterpret_cast<uintptr_t>(tabRec));
- // Set up rIdx
- int rIdx = AllocTemp(cUnit);
- LoadConstant(cUnit, rIdx, size);
+ NewLIR3(cu, kThumb2Adr, rBase, 0, reinterpret_cast<uintptr_t>(tab_rec));
+ // Set up r_idx
+ int r_idx = AllocTemp(cu);
+ LoadConstant(cu, r_idx, size);
// Establish loop branch target
- LIR* target = NewLIR0(cUnit, kPseudoTargetLabel);
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
// Load next key/disp
- NewLIR2(cUnit, kThumb2LdmiaWB, rBase, (1 << rKey) | (1 << rDisp));
- OpRegReg(cUnit, kOpCmp, rKey, rlSrc.lowReg);
+ NewLIR2(cu, kThumb2LdmiaWB, rBase, (1 << r_key) | (1 << r_disp));
+ OpRegReg(cu, kOpCmp, r_key, rl_src.low_reg);
// Go if match. NOTE: No instruction set switch here - must stay Thumb2
- OpIT(cUnit, kArmCondEq, "");
- LIR* switchBranch = NewLIR1(cUnit, kThumb2AddPCR, rDisp);
- tabRec->anchor = switchBranch;
+ OpIT(cu, kArmCondEq, "");
+ LIR* switch_branch = NewLIR1(cu, kThumb2AddPCR, r_disp);
+ tab_rec->anchor = switch_branch;
// Needs to use setflags encoding here
- NewLIR3(cUnit, kThumb2SubsRRI12, rIdx, rIdx, 1);
- OpCondBranch(cUnit, kCondNe, target);
+ NewLIR3(cu, kThumb2SubsRRI12, r_idx, r_idx, 1);
+ OpCondBranch(cu, kCondNe, target);
}
-void GenPackedSwitch(CompilationUnit* cUnit, uint32_t tableOffset,
- RegLocation rlSrc)
+void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset,
+ RegLocation rl_src)
{
- const uint16_t* table = cUnit->insns + cUnit->currentDalvikOffset + tableOffset;
- if (cUnit->printMe) {
+ const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
+ if (cu->verbose) {
DumpPackedSwitchTable(table);
}
// Add the table to the list - we'll process it later
- SwitchTable *tabRec =
- static_cast<SwitchTable*>(NewMem(cUnit, sizeof(SwitchTable), true, kAllocData));
- tabRec->table = table;
- tabRec->vaddr = cUnit->currentDalvikOffset;
+ SwitchTable *tab_rec =
+ static_cast<SwitchTable*>(NewMem(cu, sizeof(SwitchTable), true, kAllocData));
+ tab_rec->table = table;
+ tab_rec->vaddr = cu->current_dalvik_offset;
int size = table[1];
- tabRec->targets = static_cast<LIR**>(NewMem(cUnit, size * sizeof(LIR*), true, kAllocLIR));
- InsertGrowableList(cUnit, &cUnit->switchTables, reinterpret_cast<uintptr_t>(tabRec));
+ tab_rec->targets = static_cast<LIR**>(NewMem(cu, size * sizeof(LIR*), true, kAllocLIR));
+ InsertGrowableList(cu, &cu->switch_tables, reinterpret_cast<uintptr_t>(tab_rec));
// Get the switch value
- rlSrc = LoadValue(cUnit, rlSrc, kCoreReg);
- int tableBase = AllocTemp(cUnit);
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ int table_base = AllocTemp(cu);
// Materialize a pointer to the switch table
- NewLIR3(cUnit, kThumb2Adr, tableBase, 0, reinterpret_cast<uintptr_t>(tabRec));
- int lowKey = s4FromSwitchData(&table[2]);
+ NewLIR3(cu, kThumb2Adr, table_base, 0, reinterpret_cast<uintptr_t>(tab_rec));
+ int low_key = s4FromSwitchData(&table[2]);
int keyReg;
// Remove the bias, if necessary
- if (lowKey == 0) {
- keyReg = rlSrc.lowReg;
+ if (low_key == 0) {
+ keyReg = rl_src.low_reg;
} else {
- keyReg = AllocTemp(cUnit);
- OpRegRegImm(cUnit, kOpSub, keyReg, rlSrc.lowReg, lowKey);
+ keyReg = AllocTemp(cu);
+ OpRegRegImm(cu, kOpSub, keyReg, rl_src.low_reg, low_key);
}
// Bounds check - if < 0 or >= size continue following switch
- OpRegImm(cUnit, kOpCmp, keyReg, size-1);
- LIR* branchOver = OpCondBranch(cUnit, kCondHi, NULL);
+ OpRegImm(cu, kOpCmp, keyReg, size-1);
+ LIR* branch_over = OpCondBranch(cu, kCondHi, NULL);
// Load the displacement from the switch table
- int dispReg = AllocTemp(cUnit);
- LoadBaseIndexed(cUnit, tableBase, keyReg, dispReg, 2, kWord);
+ int disp_reg = AllocTemp(cu);
+ LoadBaseIndexed(cu, table_base, keyReg, disp_reg, 2, kWord);
// ..and go! NOTE: No instruction set switch here - must stay Thumb2
- LIR* switchBranch = NewLIR1(cUnit, kThumb2AddPCR, dispReg);
- tabRec->anchor = switchBranch;
+ LIR* switch_branch = NewLIR1(cu, kThumb2AddPCR, disp_reg);
+ tab_rec->anchor = switch_branch;
- /* branchOver target here */
- LIR* target = NewLIR0(cUnit, kPseudoTargetLabel);
- branchOver->target = target;
+ /* branch_over target here */
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ branch_over->target = target;
}
/*
@@ -429,30 +429,30 @@
*
* Total size is 4+(width * size + 1)/2 16-bit code units.
*/
-void GenFillArrayData(CompilationUnit* cUnit, uint32_t tableOffset, RegLocation rlSrc)
+void GenFillArrayData(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
{
- const uint16_t* table = cUnit->insns + cUnit->currentDalvikOffset + tableOffset;
+ const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
// Add the table to the list - we'll process it later
- FillArrayData *tabRec =
- static_cast<FillArrayData*>(NewMem(cUnit, sizeof(FillArrayData), true, kAllocData));
- tabRec->table = table;
- tabRec->vaddr = cUnit->currentDalvikOffset;
- uint16_t width = tabRec->table[1];
- uint32_t size = tabRec->table[2] | ((static_cast<uint32_t>(tabRec->table[3])) << 16);
- tabRec->size = (size * width) + 8;
+ FillArrayData *tab_rec =
+ static_cast<FillArrayData*>(NewMem(cu, sizeof(FillArrayData), true, kAllocData));
+ tab_rec->table = table;
+ tab_rec->vaddr = cu->current_dalvik_offset;
+ uint16_t width = tab_rec->table[1];
+ uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
+ tab_rec->size = (size * width) + 8;
- InsertGrowableList(cUnit, &cUnit->fillArrayData, reinterpret_cast<uintptr_t>(tabRec));
+ InsertGrowableList(cu, &cu->fill_array_data, reinterpret_cast<uintptr_t>(tab_rec));
// Making a call - use explicit registers
- FlushAllRegs(cUnit); /* Everything to home location */
- LoadValueDirectFixed(cUnit, rlSrc, r0);
- LoadWordDisp(cUnit, rARM_SELF, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode),
+ FlushAllRegs(cu); /* Everything to home location */
+ LoadValueDirectFixed(cu, rl_src, r0);
+ LoadWordDisp(cu, rARM_SELF, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode),
rARM_LR);
// Materialize a pointer to the fill data image
- NewLIR3(cUnit, kThumb2Adr, r1, 0, reinterpret_cast<uintptr_t>(tabRec));
- ClobberCalleeSave(cUnit);
- LIR* callInst = OpReg(cUnit, kOpBlx, rARM_LR);
- MarkSafepointPC(cUnit, callInst);
+ NewLIR3(cu, kThumb2Adr, r1, 0, reinterpret_cast<uintptr_t>(tab_rec));
+ ClobberCalleeSave(cu);
+ LIR* call_inst = OpReg(cu, kOpBlx, rARM_LR);
+ MarkSafepointPC(cu, call_inst);
}
/*
@@ -464,7 +464,7 @@
* r0 -> self pointer [arg0 for oat[Lock/Unlock]Object
* r1 -> object [arg1 for oat[Lock/Unlock]Object
* r2 -> intial contents of object->lock, later result of strex
- * r3 -> self->threadId
+ * r3 -> self->thread_id
* r12 -> allow to be used by utilities as general temp
*
* The result of the strex is 0 if we acquire the lock.
@@ -481,33 +481,33 @@
* preserved.
*
*/
-void GenMonitorEnter(CompilationUnit* cUnit, int optFlags, RegLocation rlSrc)
+void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
{
- FlushAllRegs(cUnit);
+ FlushAllRegs(cu);
DCHECK_EQ(LW_SHAPE_THIN, 0);
- LoadValueDirectFixed(cUnit, rlSrc, r0); // Get obj
- LockCallTemps(cUnit); // Prepare for explicit register usage
- GenNullCheck(cUnit, rlSrc.sRegLow, r0, optFlags);
- LoadWordDisp(cUnit, rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
- NewLIR3(cUnit, kThumb2Ldrex, r1, r0,
+ LoadValueDirectFixed(cu, rl_src, r0); // Get obj
+ LockCallTemps(cu); // Prepare for explicit register usage
+ GenNullCheck(cu, rl_src.s_reg_low, r0, opt_flags);
+ LoadWordDisp(cu, rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
+ NewLIR3(cu, kThumb2Ldrex, r1, r0,
Object::MonitorOffset().Int32Value() >> 2); // Get object->lock
// Align owner
- OpRegImm(cUnit, kOpLsl, r2, LW_LOCK_OWNER_SHIFT);
- // Is lock unheld on lock or held by us (==threadId) on unlock?
- NewLIR4(cUnit, kThumb2Bfi, r2, r1, 0, LW_LOCK_OWNER_SHIFT - 1);
- NewLIR3(cUnit, kThumb2Bfc, r1, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
- OpRegImm(cUnit, kOpCmp, r1, 0);
- OpIT(cUnit, kArmCondEq, "");
- NewLIR4(cUnit, kThumb2Strex, r1, r2, r0,
+ OpRegImm(cu, kOpLsl, r2, LW_LOCK_OWNER_SHIFT);
+ // Is lock unheld on lock or held by us (==thread_id) on unlock?
+ NewLIR4(cu, kThumb2Bfi, r2, r1, 0, LW_LOCK_OWNER_SHIFT - 1);
+ NewLIR3(cu, kThumb2Bfc, r1, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
+ OpRegImm(cu, kOpCmp, r1, 0);
+ OpIT(cu, kArmCondEq, "");
+ NewLIR4(cu, kThumb2Strex, r1, r2, r0,
Object::MonitorOffset().Int32Value() >> 2);
- OpRegImm(cUnit, kOpCmp, r1, 0);
- OpIT(cUnit, kArmCondNe, "T");
+ OpRegImm(cu, kOpCmp, r1, 0);
+ OpIT(cu, kArmCondNe, "T");
// Go expensive route - artLockObjectFromCode(self, obj);
- LoadWordDisp(cUnit, rARM_SELF, ENTRYPOINT_OFFSET(pLockObjectFromCode), rARM_LR);
- ClobberCalleeSave(cUnit);
- LIR* callInst = OpReg(cUnit, kOpBlx, rARM_LR);
- MarkSafepointPC(cUnit, callInst);
- GenMemBarrier(cUnit, kLoadLoad);
+ LoadWordDisp(cu, rARM_SELF, ENTRYPOINT_OFFSET(pLockObjectFromCode), rARM_LR);
+ ClobberCalleeSave(cu);
+ LIR* call_inst = OpReg(cu, kOpBlx, rARM_LR);
+ MarkSafepointPC(cu, call_inst);
+ GenMemBarrier(cu, kLoadLoad);
}
/*
@@ -516,129 +516,129 @@
* a zero recursion count, it's safe to punch it back to the
* initial, unlock thin state with a store word.
*/
-void GenMonitorExit(CompilationUnit* cUnit, int optFlags, RegLocation rlSrc)
+void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
{
DCHECK_EQ(LW_SHAPE_THIN, 0);
- FlushAllRegs(cUnit);
- LoadValueDirectFixed(cUnit, rlSrc, r0); // Get obj
- LockCallTemps(cUnit); // Prepare for explicit register usage
- GenNullCheck(cUnit, rlSrc.sRegLow, r0, optFlags);
- LoadWordDisp(cUnit, r0, Object::MonitorOffset().Int32Value(), r1); // Get lock
- LoadWordDisp(cUnit, rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
- // Is lock unheld on lock or held by us (==threadId) on unlock?
- OpRegRegImm(cUnit, kOpAnd, r3, r1,
+ FlushAllRegs(cu);
+ LoadValueDirectFixed(cu, rl_src, r0); // Get obj
+ LockCallTemps(cu); // Prepare for explicit register usage
+ GenNullCheck(cu, rl_src.s_reg_low, r0, opt_flags);
+ LoadWordDisp(cu, r0, Object::MonitorOffset().Int32Value(), r1); // Get lock
+ LoadWordDisp(cu, rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
+ // Is lock unheld on lock or held by us (==thread_id) on unlock?
+ OpRegRegImm(cu, kOpAnd, r3, r1,
(LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT));
// Align owner
- OpRegImm(cUnit, kOpLsl, r2, LW_LOCK_OWNER_SHIFT);
- NewLIR3(cUnit, kThumb2Bfc, r1, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
- OpRegReg(cUnit, kOpSub, r1, r2);
- OpIT(cUnit, kArmCondEq, "EE");
- StoreWordDisp(cUnit, r0, Object::MonitorOffset().Int32Value(), r3);
+ OpRegImm(cu, kOpLsl, r2, LW_LOCK_OWNER_SHIFT);
+ NewLIR3(cu, kThumb2Bfc, r1, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
+ OpRegReg(cu, kOpSub, r1, r2);
+ OpIT(cu, kArmCondEq, "EE");
+ StoreWordDisp(cu, r0, Object::MonitorOffset().Int32Value(), r3);
// Go expensive route - UnlockObjectFromCode(obj);
- LoadWordDisp(cUnit, rARM_SELF, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARM_LR);
- ClobberCalleeSave(cUnit);
- LIR* callInst = OpReg(cUnit, kOpBlx, rARM_LR);
- MarkSafepointPC(cUnit, callInst);
- GenMemBarrier(cUnit, kStoreLoad);
+ LoadWordDisp(cu, rARM_SELF, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARM_LR);
+ ClobberCalleeSave(cu);
+ LIR* call_inst = OpReg(cu, kOpBlx, rARM_LR);
+ MarkSafepointPC(cu, call_inst);
+ GenMemBarrier(cu, kStoreLoad);
}
/*
* Mark garbage collection card. Skip if the value we're storing is null.
*/
-void MarkGCCard(CompilationUnit* cUnit, int valReg, int tgtAddrReg)
+void MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg)
{
- int regCardBase = AllocTemp(cUnit);
- int regCardNo = AllocTemp(cUnit);
- LIR* branchOver = OpCmpImmBranch(cUnit, kCondEq, valReg, 0, NULL);
- LoadWordDisp(cUnit, rARM_SELF, Thread::CardTableOffset().Int32Value(), regCardBase);
- OpRegRegImm(cUnit, kOpLsr, regCardNo, tgtAddrReg, CardTable::kCardShift);
- StoreBaseIndexed(cUnit, regCardBase, regCardNo, regCardBase, 0,
+ int reg_card_base = AllocTemp(cu);
+ int reg_card_no = AllocTemp(cu);
+ LIR* branch_over = OpCmpImmBranch(cu, kCondEq, val_reg, 0, NULL);
+ LoadWordDisp(cu, rARM_SELF, Thread::CardTableOffset().Int32Value(), reg_card_base);
+ OpRegRegImm(cu, kOpLsr, reg_card_no, tgt_addr_reg, CardTable::kCardShift);
+ StoreBaseIndexed(cu, reg_card_base, reg_card_no, reg_card_base, 0,
kUnsignedByte);
- LIR* target = NewLIR0(cUnit, kPseudoTargetLabel);
- branchOver->target = target;
- FreeTemp(cUnit, regCardBase);
- FreeTemp(cUnit, regCardNo);
+ LIR* target = NewLIR0(cu, kPseudoTargetLabel);
+ branch_over->target = target;
+ FreeTemp(cu, reg_card_base);
+ FreeTemp(cu, reg_card_no);
}
-void GenEntrySequence(CompilationUnit* cUnit, RegLocation* ArgLocs,
- RegLocation rlMethod)
+void GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs,
+ RegLocation rl_method)
{
- int spillCount = cUnit->numCoreSpills + cUnit->numFPSpills;
+ int spill_count = cu->num_core_spills + cu->num_fp_spills;
/*
* On entry, r0, r1, r2 & r3 are live. Let the register allocation
* mechanism know so it doesn't try to use any of them when
* expanding the frame or flushing. This leaves the utility
* code with a single temp: r12. This should be enough.
*/
- LockTemp(cUnit, r0);
- LockTemp(cUnit, r1);
- LockTemp(cUnit, r2);
- LockTemp(cUnit, r3);
+ LockTemp(cu, r0);
+ LockTemp(cu, r1);
+ LockTemp(cu, r2);
+ LockTemp(cu, r3);
/*
* We can safely skip the stack overflow check if we're
* a leaf *and* our frame size < fudge factor.
*/
- bool skipOverflowCheck = ((cUnit->attrs & METHOD_IS_LEAF) &&
- (static_cast<size_t>(cUnit->frameSize) <
+ bool skip_overflow_check = ((cu->attrs & METHOD_IS_LEAF) &&
+ (static_cast<size_t>(cu->frame_size) <
Thread::kStackOverflowReservedBytes));
- NewLIR0(cUnit, kPseudoMethodEntry);
- if (!skipOverflowCheck) {
+ NewLIR0(cu, kPseudoMethodEntry);
+ if (!skip_overflow_check) {
/* Load stack limit */
- LoadWordDisp(cUnit, rARM_SELF, Thread::StackEndOffset().Int32Value(), r12);
+ LoadWordDisp(cu, rARM_SELF, Thread::StackEndOffset().Int32Value(), r12);
}
/* Spill core callee saves */
- NewLIR1(cUnit, kThumb2Push, cUnit->coreSpillMask);
+ NewLIR1(cu, kThumb2Push, cu->core_spill_mask);
/* Need to spill any FP regs? */
- if (cUnit->numFPSpills) {
+ if (cu->num_fp_spills) {
/*
* NOTE: fp spills are a little different from core spills in that
* they are pushed as a contiguous block. When promoting from
* the fp set, we must allocate all singles from s16..highest-promoted
*/
- NewLIR1(cUnit, kThumb2VPushCS, cUnit->numFPSpills);
+ NewLIR1(cu, kThumb2VPushCS, cu->num_fp_spills);
}
- if (!skipOverflowCheck) {
- OpRegRegImm(cUnit, kOpSub, rARM_LR, rARM_SP, cUnit->frameSize - (spillCount * 4));
- GenRegRegCheck(cUnit, kCondCc, rARM_LR, r12, kThrowStackOverflow);
- OpRegCopy(cUnit, rARM_SP, rARM_LR); // Establish stack
+ if (!skip_overflow_check) {
+ OpRegRegImm(cu, kOpSub, rARM_LR, rARM_SP, cu->frame_size - (spill_count * 4));
+ GenRegRegCheck(cu, kCondCc, rARM_LR, r12, kThrowStackOverflow);
+ OpRegCopy(cu, rARM_SP, rARM_LR); // Establish stack
} else {
- OpRegImm(cUnit, kOpSub, rARM_SP, cUnit->frameSize - (spillCount * 4));
+ OpRegImm(cu, kOpSub, rARM_SP, cu->frame_size - (spill_count * 4));
}
- FlushIns(cUnit, ArgLocs, rlMethod);
+ FlushIns(cu, ArgLocs, rl_method);
- FreeTemp(cUnit, r0);
- FreeTemp(cUnit, r1);
- FreeTemp(cUnit, r2);
- FreeTemp(cUnit, r3);
+ FreeTemp(cu, r0);
+ FreeTemp(cu, r1);
+ FreeTemp(cu, r2);
+ FreeTemp(cu, r3);
}
-void GenExitSequence(CompilationUnit* cUnit)
+void GenExitSequence(CompilationUnit* cu)
{
- int spillCount = cUnit->numCoreSpills + cUnit->numFPSpills;
+ int spill_count = cu->num_core_spills + cu->num_fp_spills;
/*
* In the exit path, r0/r1 are live - make sure they aren't
* allocated by the register utilities as temps.
*/
- LockTemp(cUnit, r0);
- LockTemp(cUnit, r1);
+ LockTemp(cu, r0);
+ LockTemp(cu, r1);
- NewLIR0(cUnit, kPseudoMethodExit);
- OpRegImm(cUnit, kOpAdd, rARM_SP, cUnit->frameSize - (spillCount * 4));
+ NewLIR0(cu, kPseudoMethodExit);
+ OpRegImm(cu, kOpAdd, rARM_SP, cu->frame_size - (spill_count * 4));
/* Need to restore any FP callee saves? */
- if (cUnit->numFPSpills) {
- NewLIR1(cUnit, kThumb2VPopCS, cUnit->numFPSpills);
+ if (cu->num_fp_spills) {
+ NewLIR1(cu, kThumb2VPopCS, cu->num_fp_spills);
}
- if (cUnit->coreSpillMask & (1 << rARM_LR)) {
+ if (cu->core_spill_mask & (1 << rARM_LR)) {
/* Unspill rARM_LR to rARM_PC */
- cUnit->coreSpillMask &= ~(1 << rARM_LR);
- cUnit->coreSpillMask |= (1 << rARM_PC);
+ cu->core_spill_mask &= ~(1 << rARM_LR);
+ cu->core_spill_mask |= (1 << rARM_PC);
}
- NewLIR1(cUnit, kThumb2Pop, cUnit->coreSpillMask);
- if (!(cUnit->coreSpillMask & (1 << rARM_PC))) {
+ NewLIR1(cu, kThumb2Pop, cu->core_spill_mask);
+ if (!(cu->core_spill_mask & (1 << rARM_PC))) {
/* We didn't pop to rARM_PC, so must do a bv rARM_LR */
- NewLIR1(cUnit, kThumbBx, rARM_LR);
+ NewLIR1(cu, kThumbBx, rARM_LR);
}
}
diff --git a/src/compiler/codegen/arm/fp_arm.cc b/src/compiler/codegen/arm/fp_arm.cc
index 3584971..46695b9 100644
--- a/src/compiler/codegen/arm/fp_arm.cc
+++ b/src/compiler/codegen/arm/fp_arm.cc
@@ -20,11 +20,11 @@
namespace art {
-bool GenArithOpFloat(CompilationUnit* cUnit, Instruction::Code opcode, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenArithOpFloat(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
int op = kThumbBkpt;
- RegLocation rlResult;
+ RegLocation rl_result;
/*
* Don't attempt to optimize register usage since these opcodes call out to
@@ -50,24 +50,24 @@
case Instruction::REM_FLOAT_2ADDR:
case Instruction::REM_FLOAT:
case Instruction::NEG_FLOAT: {
- return GenArithOpFloatPortable(cUnit, opcode, rlDest, rlSrc1, rlSrc2);
+ return GenArithOpFloatPortable(cu, opcode, rl_dest, rl_src1, rl_src2);
}
default:
return true;
}
- rlSrc1 = LoadValue(cUnit, rlSrc1, kFPReg);
- rlSrc2 = LoadValue(cUnit, rlSrc2, kFPReg);
- rlResult = EvalLoc(cUnit, rlDest, kFPReg, true);
- NewLIR3(cUnit, op, rlResult.lowReg, rlSrc1.lowReg, rlSrc2.lowReg);
- StoreValue(cUnit, rlDest, rlResult);
+ rl_src1 = LoadValue(cu, rl_src1, kFPReg);
+ rl_src2 = LoadValue(cu, rl_src2, kFPReg);
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ NewLIR3(cu, op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
+ StoreValue(cu, rl_dest, rl_result);
return false;
}
-bool GenArithOpDouble(CompilationUnit* cUnit, Instruction::Code opcode,
- RegLocation rlDest, RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenArithOpDouble(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
{
int op = kThumbBkpt;
- RegLocation rlResult;
+ RegLocation rl_result;
switch (opcode) {
case Instruction::ADD_DOUBLE_2ADDR:
@@ -89,31 +89,31 @@
case Instruction::REM_DOUBLE_2ADDR:
case Instruction::REM_DOUBLE:
case Instruction::NEG_DOUBLE: {
- return GenArithOpDoublePortable(cUnit, opcode, rlDest, rlSrc1, rlSrc2);
+ return GenArithOpDoublePortable(cu, opcode, rl_dest, rl_src1, rl_src2);
}
default:
return true;
}
- rlSrc1 = LoadValueWide(cUnit, rlSrc1, kFPReg);
- DCHECK(rlSrc1.wide);
- rlSrc2 = LoadValueWide(cUnit, rlSrc2, kFPReg);
- DCHECK(rlSrc2.wide);
- rlResult = EvalLoc(cUnit, rlDest, kFPReg, true);
- DCHECK(rlDest.wide);
- DCHECK(rlResult.wide);
- NewLIR3(cUnit, op, S2d(rlResult.lowReg, rlResult.highReg), S2d(rlSrc1.lowReg, rlSrc1.highReg),
- S2d(rlSrc2.lowReg, rlSrc2.highReg));
- StoreValueWide(cUnit, rlDest, rlResult);
+ rl_src1 = LoadValueWide(cu, rl_src1, kFPReg);
+ DCHECK(rl_src1.wide);
+ rl_src2 = LoadValueWide(cu, rl_src2, kFPReg);
+ DCHECK(rl_src2.wide);
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ DCHECK(rl_dest.wide);
+ DCHECK(rl_result.wide);
+ NewLIR3(cu, op, S2d(rl_result.low_reg, rl_result.high_reg), S2d(rl_src1.low_reg, rl_src1.high_reg),
+ S2d(rl_src2.low_reg, rl_src2.high_reg));
+ StoreValueWide(cu, rl_dest, rl_result);
return false;
}
-bool GenConversion(CompilationUnit* cUnit, Instruction::Code opcode,
- RegLocation rlDest, RegLocation rlSrc)
+bool GenConversion(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src)
{
int op = kThumbBkpt;
- int srcReg;
- RegLocation rlResult;
+ int src_reg;
+ RegLocation rl_result;
switch (opcode) {
case Instruction::INT_TO_FLOAT:
@@ -138,182 +138,182 @@
case Instruction::FLOAT_TO_LONG:
case Instruction::LONG_TO_FLOAT:
case Instruction::DOUBLE_TO_LONG:
- return GenConversionPortable(cUnit, opcode, rlDest, rlSrc);
+ return GenConversionPortable(cu, opcode, rl_dest, rl_src);
default:
return true;
}
- if (rlSrc.wide) {
- rlSrc = LoadValueWide(cUnit, rlSrc, kFPReg);
- srcReg = S2d(rlSrc.lowReg, rlSrc.highReg);
+ if (rl_src.wide) {
+ rl_src = LoadValueWide(cu, rl_src, kFPReg);
+ src_reg = S2d(rl_src.low_reg, rl_src.high_reg);
} else {
- rlSrc = LoadValue(cUnit, rlSrc, kFPReg);
- srcReg = rlSrc.lowReg;
+ rl_src = LoadValue(cu, rl_src, kFPReg);
+ src_reg = rl_src.low_reg;
}
- if (rlDest.wide) {
- rlResult = EvalLoc(cUnit, rlDest, kFPReg, true);
- NewLIR2(cUnit, op, S2d(rlResult.lowReg, rlResult.highReg), srcReg);
- StoreValueWide(cUnit, rlDest, rlResult);
+ if (rl_dest.wide) {
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ NewLIR2(cu, op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg);
+ StoreValueWide(cu, rl_dest, rl_result);
} else {
- rlResult = EvalLoc(cUnit, rlDest, kFPReg, true);
- NewLIR2(cUnit, op, rlResult.lowReg, srcReg);
- StoreValue(cUnit, rlDest, rlResult);
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ NewLIR2(cu, op, rl_result.low_reg, src_reg);
+ StoreValue(cu, rl_dest, rl_result);
}
return false;
}
-void GenFusedFPCmpBranch(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
- bool gtBias, bool isDouble)
+void GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+ bool gt_bias, bool is_double)
{
- LIR* labelList = cUnit->blockLabelList;
- LIR* target = &labelList[bb->taken->id];
- RegLocation rlSrc1;
- RegLocation rlSrc2;
- if (isDouble) {
- rlSrc1 = GetSrcWide(cUnit, mir, 0);
- rlSrc2 = GetSrcWide(cUnit, mir, 2);
- rlSrc1 = LoadValueWide(cUnit, rlSrc1, kFPReg);
- rlSrc2 = LoadValueWide(cUnit, rlSrc2, kFPReg);
- NewLIR2(cUnit, kThumb2Vcmpd, S2d(rlSrc1.lowReg, rlSrc2.highReg),
- S2d(rlSrc2.lowReg, rlSrc2.highReg));
+ LIR* label_list = cu->block_label_list;
+ LIR* target = &label_list[bb->taken->id];
+ RegLocation rl_src1;
+ RegLocation rl_src2;
+ if (is_double) {
+ rl_src1 = GetSrcWide(cu, mir, 0);
+ rl_src2 = GetSrcWide(cu, mir, 2);
+ rl_src1 = LoadValueWide(cu, rl_src1, kFPReg);
+ rl_src2 = LoadValueWide(cu, rl_src2, kFPReg);
+ NewLIR2(cu, kThumb2Vcmpd, S2d(rl_src1.low_reg, rl_src2.high_reg),
+ S2d(rl_src2.low_reg, rl_src2.high_reg));
} else {
- rlSrc1 = GetSrc(cUnit, mir, 0);
- rlSrc2 = GetSrc(cUnit, mir, 1);
- rlSrc1 = LoadValue(cUnit, rlSrc1, kFPReg);
- rlSrc2 = LoadValue(cUnit, rlSrc2, kFPReg);
- NewLIR2(cUnit, kThumb2Vcmps, rlSrc1.lowReg, rlSrc2.lowReg);
+ rl_src1 = GetSrc(cu, mir, 0);
+ rl_src2 = GetSrc(cu, mir, 1);
+ rl_src1 = LoadValue(cu, rl_src1, kFPReg);
+ rl_src2 = LoadValue(cu, rl_src2, kFPReg);
+ NewLIR2(cu, kThumb2Vcmps, rl_src1.low_reg, rl_src2.low_reg);
}
- NewLIR0(cUnit, kThumb2Fmstat);
+ NewLIR0(cu, kThumb2Fmstat);
ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
switch(ccode) {
case kCondEq:
case kCondNe:
break;
case kCondLt:
- if (gtBias) {
+ if (gt_bias) {
ccode = kCondMi;
}
break;
case kCondLe:
- if (gtBias) {
+ if (gt_bias) {
ccode = kCondLs;
}
break;
case kCondGt:
- if (gtBias) {
+ if (gt_bias) {
ccode = kCondHi;
}
break;
case kCondGe:
- if (gtBias) {
+ if (gt_bias) {
ccode = kCondCs;
}
break;
default:
LOG(FATAL) << "Unexpected ccode: " << ccode;
}
- OpCondBranch(cUnit, ccode, target);
+ OpCondBranch(cu, ccode, target);
}
-bool GenCmpFP(CompilationUnit* cUnit, Instruction::Code opcode, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenCmpFP(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
- bool isDouble;
- int defaultResult;
- RegLocation rlResult;
+ bool is_double;
+ int default_result;
+ RegLocation rl_result;
switch (opcode) {
case Instruction::CMPL_FLOAT:
- isDouble = false;
- defaultResult = -1;
+ is_double = false;
+ default_result = -1;
break;
case Instruction::CMPG_FLOAT:
- isDouble = false;
- defaultResult = 1;
+ is_double = false;
+ default_result = 1;
break;
case Instruction::CMPL_DOUBLE:
- isDouble = true;
- defaultResult = -1;
+ is_double = true;
+ default_result = -1;
break;
case Instruction::CMPG_DOUBLE:
- isDouble = true;
- defaultResult = 1;
+ is_double = true;
+ default_result = 1;
break;
default:
return true;
}
- if (isDouble) {
- rlSrc1 = LoadValueWide(cUnit, rlSrc1, kFPReg);
- rlSrc2 = LoadValueWide(cUnit, rlSrc2, kFPReg);
- ClobberSReg(cUnit, rlDest.sRegLow);
- rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- LoadConstant(cUnit, rlResult.lowReg, defaultResult);
- NewLIR2(cUnit, kThumb2Vcmpd, S2d(rlSrc1.lowReg, rlSrc2.highReg),
- S2d(rlSrc2.lowReg, rlSrc2.highReg));
+ if (is_double) {
+ rl_src1 = LoadValueWide(cu, rl_src1, kFPReg);
+ rl_src2 = LoadValueWide(cu, rl_src2, kFPReg);
+ ClobberSReg(cu, rl_dest.s_reg_low);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ LoadConstant(cu, rl_result.low_reg, default_result);
+ NewLIR2(cu, kThumb2Vcmpd, S2d(rl_src1.low_reg, rl_src2.high_reg),
+ S2d(rl_src2.low_reg, rl_src2.high_reg));
} else {
- rlSrc1 = LoadValue(cUnit, rlSrc1, kFPReg);
- rlSrc2 = LoadValue(cUnit, rlSrc2, kFPReg);
- ClobberSReg(cUnit, rlDest.sRegLow);
- rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- LoadConstant(cUnit, rlResult.lowReg, defaultResult);
- NewLIR2(cUnit, kThumb2Vcmps, rlSrc1.lowReg, rlSrc2.lowReg);
+ rl_src1 = LoadValue(cu, rl_src1, kFPReg);
+ rl_src2 = LoadValue(cu, rl_src2, kFPReg);
+ ClobberSReg(cu, rl_dest.s_reg_low);
+ rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ LoadConstant(cu, rl_result.low_reg, default_result);
+ NewLIR2(cu, kThumb2Vcmps, rl_src1.low_reg, rl_src2.low_reg);
}
- DCHECK(!ARM_FPREG(rlResult.lowReg));
- NewLIR0(cUnit, kThumb2Fmstat);
+ DCHECK(!ARM_FPREG(rl_result.low_reg));
+ NewLIR0(cu, kThumb2Fmstat);
- OpIT(cUnit, (defaultResult == -1) ? kArmCondGt : kArmCondMi, "");
- NewLIR2(cUnit, kThumb2MovImmShift, rlResult.lowReg,
- ModifiedImmediate(-defaultResult)); // Must not alter ccodes
- GenBarrier(cUnit);
+ OpIT(cu, (default_result == -1) ? kArmCondGt : kArmCondMi, "");
+ NewLIR2(cu, kThumb2MovImmShift, rl_result.low_reg,
+ ModifiedImmediate(-default_result)); // Must not alter ccodes
+ GenBarrier(cu);
- OpIT(cUnit, kArmCondEq, "");
- LoadConstant(cUnit, rlResult.lowReg, 0);
- GenBarrier(cUnit);
+ OpIT(cu, kArmCondEq, "");
+ LoadConstant(cu, rl_result.low_reg, 0);
+ GenBarrier(cu);
- StoreValue(cUnit, rlDest, rlResult);
+ StoreValue(cu, rl_dest, rl_result);
return false;
}
-void GenNegFloat(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc)
+void GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
{
- RegLocation rlResult;
- rlSrc = LoadValue(cUnit, rlSrc, kFPReg);
- rlResult = EvalLoc(cUnit, rlDest, kFPReg, true);
- NewLIR2(cUnit, kThumb2Vnegs, rlResult.lowReg, rlSrc.lowReg);
- StoreValue(cUnit, rlDest, rlResult);
+ RegLocation rl_result;
+ rl_src = LoadValue(cu, rl_src, kFPReg);
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ NewLIR2(cu, kThumb2Vnegs, rl_result.low_reg, rl_src.low_reg);
+ StoreValue(cu, rl_dest, rl_result);
}
-void GenNegDouble(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc)
+void GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
{
- RegLocation rlResult;
- rlSrc = LoadValueWide(cUnit, rlSrc, kFPReg);
- rlResult = EvalLoc(cUnit, rlDest, kFPReg, true);
- NewLIR2(cUnit, kThumb2Vnegd, S2d(rlResult.lowReg, rlResult.highReg),
- S2d(rlSrc.lowReg, rlSrc.highReg));
- StoreValueWide(cUnit, rlDest, rlResult);
+ RegLocation rl_result;
+ rl_src = LoadValueWide(cu, rl_src, kFPReg);
+ rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ NewLIR2(cu, kThumb2Vnegd, S2d(rl_result.low_reg, rl_result.high_reg),
+ S2d(rl_src.low_reg, rl_src.high_reg));
+ StoreValueWide(cu, rl_dest, rl_result);
}
-bool GenInlinedSqrt(CompilationUnit* cUnit, CallInfo* info) {
- DCHECK_EQ(cUnit->instructionSet, kThumb2);
+bool GenInlinedSqrt(CompilationUnit* cu, CallInfo* info) {
+ DCHECK_EQ(cu->instruction_set, kThumb2);
LIR *branch;
- RegLocation rlSrc = info->args[0];
- RegLocation rlDest = InlineTargetWide(cUnit, info); // double place for result
- rlSrc = LoadValueWide(cUnit, rlSrc, kFPReg);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kFPReg, true);
- NewLIR2(cUnit, kThumb2Vsqrtd, S2d(rlResult.lowReg, rlResult.highReg),
- S2d(rlSrc.lowReg, rlSrc.highReg));
- NewLIR2(cUnit, kThumb2Vcmpd, S2d(rlResult.lowReg, rlResult.highReg),
- S2d(rlResult.lowReg, rlResult.highReg));
- NewLIR0(cUnit, kThumb2Fmstat);
- branch = NewLIR2(cUnit, kThumbBCond, 0, kArmCondEq);
- ClobberCalleeSave(cUnit);
- LockCallTemps(cUnit); // Using fixed registers
- int rTgt = LoadHelper(cUnit, ENTRYPOINT_OFFSET(pSqrt));
- NewLIR3(cUnit, kThumb2Fmrrd, r0, r1, S2d(rlSrc.lowReg, rlSrc.highReg));
- NewLIR1(cUnit, kThumbBlxR, rTgt);
- NewLIR3(cUnit, kThumb2Fmdrr, S2d(rlResult.lowReg, rlResult.highReg), r0, r1);
- branch->target = NewLIR0(cUnit, kPseudoTargetLabel);
- StoreValueWide(cUnit, rlDest, rlResult);
+ RegLocation rl_src = info->args[0];
+ RegLocation rl_dest = InlineTargetWide(cu, info); // double place for result
+ rl_src = LoadValueWide(cu, rl_src, kFPReg);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kFPReg, true);
+ NewLIR2(cu, kThumb2Vsqrtd, S2d(rl_result.low_reg, rl_result.high_reg),
+ S2d(rl_src.low_reg, rl_src.high_reg));
+ NewLIR2(cu, kThumb2Vcmpd, S2d(rl_result.low_reg, rl_result.high_reg),
+ S2d(rl_result.low_reg, rl_result.high_reg));
+ NewLIR0(cu, kThumb2Fmstat);
+ branch = NewLIR2(cu, kThumbBCond, 0, kArmCondEq);
+ ClobberCalleeSave(cu);
+ LockCallTemps(cu); // Using fixed registers
+ int r_tgt = LoadHelper(cu, ENTRYPOINT_OFFSET(pSqrt));
+ NewLIR3(cu, kThumb2Fmrrd, r0, r1, S2d(rl_src.low_reg, rl_src.high_reg));
+ NewLIR1(cu, kThumbBlxR, r_tgt);
+ NewLIR3(cu, kThumb2Fmdrr, S2d(rl_result.low_reg, rl_result.high_reg), r0, r1);
+ branch->target = NewLIR0(cu, kPseudoTargetLabel);
+ StoreValueWide(cu, rl_dest, rl_result);
return true;
}
diff --git a/src/compiler/codegen/arm/int_arm.cc b/src/compiler/codegen/arm/int_arm.cc
index bdb3bea..45fe807 100644
--- a/src/compiler/codegen/arm/int_arm.cc
+++ b/src/compiler/codegen/arm/int_arm.cc
@@ -24,11 +24,11 @@
namespace art {
-LIR* OpCmpBranch(CompilationUnit* cUnit, ConditionCode cond, int src1,
+LIR* OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1,
int src2, LIR* target)
{
- OpRegReg(cUnit, kOpCmp, src1, src2);
- return OpCondBranch(cUnit, cond, target);
+ OpRegReg(cu, kOpCmp, src1, src2);
+ return OpCondBranch(cu, cond, target);
}
/*
@@ -41,11 +41,11 @@
* met, and an "E" means the instruction is executed if the condition
* is not met.
*/
-LIR* OpIT(CompilationUnit* cUnit, ArmConditionCode code, const char* guide)
+LIR* OpIT(CompilationUnit* cu, ArmConditionCode code, const char* guide)
{
int mask;
- int condBit = code & 1;
- int altBit = condBit ^ 1;
+ int cond_bit = code & 1;
+ int alt_bit = cond_bit ^ 1;
int mask3 = 0;
int mask2 = 0;
int mask1 = 0;
@@ -53,11 +53,11 @@
//Note: case fallthroughs intentional
switch (strlen(guide)) {
case 3:
- mask1 = (guide[2] == 'T') ? condBit : altBit;
+ mask1 = (guide[2] == 'T') ? cond_bit : alt_bit;
case 2:
- mask2 = (guide[1] == 'T') ? condBit : altBit;
+ mask2 = (guide[1] == 'T') ? cond_bit : alt_bit;
case 1:
- mask3 = (guide[0] == 'T') ? condBit : altBit;
+ mask3 = (guide[0] == 'T') ? cond_bit : alt_bit;
break;
case 0:
break;
@@ -66,7 +66,7 @@
}
mask = (mask3 << 3) | (mask2 << 2) | (mask1 << 1) |
(1 << (3 - strlen(guide)));
- return NewLIR2(cUnit, kThumb2It, code, mask);
+ return NewLIR2(cu, kThumb2It, code, mask);
}
/*
@@ -84,168 +84,168 @@
* neg rX
* done:
*/
-void GenCmpLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+void GenCmpLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
LIR* target1;
LIR* target2;
- rlSrc1 = LoadValueWide(cUnit, rlSrc1, kCoreReg);
- rlSrc2 = LoadValueWide(cUnit, rlSrc2, kCoreReg);
- int tReg = AllocTemp(cUnit);
- LoadConstant(cUnit, tReg, -1);
- OpRegReg(cUnit, kOpCmp, rlSrc1.highReg, rlSrc2.highReg);
- LIR* branch1 = OpCondBranch(cUnit, kCondLt, NULL);
- LIR* branch2 = OpCondBranch(cUnit, kCondGt, NULL);
- OpRegRegReg(cUnit, kOpSub, tReg, rlSrc1.lowReg, rlSrc2.lowReg);
- LIR* branch3 = OpCondBranch(cUnit, kCondEq, NULL);
+ rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
+ int t_reg = AllocTemp(cu);
+ LoadConstant(cu, t_reg, -1);
+ OpRegReg(cu, kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
+ LIR* branch1 = OpCondBranch(cu, kCondLt, NULL);
+ LIR* branch2 = OpCondBranch(cu, kCondGt, NULL);
+ OpRegRegReg(cu, kOpSub, t_reg, rl_src1.low_reg, rl_src2.low_reg);
+ LIR* branch3 = OpCondBranch(cu, kCondEq, NULL);
- OpIT(cUnit, kArmCondHi, "E");
- NewLIR2(cUnit, kThumb2MovImmShift, tReg, ModifiedImmediate(-1));
- LoadConstant(cUnit, tReg, 1);
- GenBarrier(cUnit);
+ OpIT(cu, kArmCondHi, "E");
+ NewLIR2(cu, kThumb2MovImmShift, t_reg, ModifiedImmediate(-1));
+ LoadConstant(cu, t_reg, 1);
+ GenBarrier(cu);
- target2 = NewLIR0(cUnit, kPseudoTargetLabel);
- OpRegReg(cUnit, kOpNeg, tReg, tReg);
+ target2 = NewLIR0(cu, kPseudoTargetLabel);
+ OpRegReg(cu, kOpNeg, t_reg, t_reg);
- target1 = NewLIR0(cUnit, kPseudoTargetLabel);
+ target1 = NewLIR0(cu, kPseudoTargetLabel);
- RegLocation rlTemp = LocCReturn(); // Just using as template, will change
- rlTemp.lowReg = tReg;
- StoreValue(cUnit, rlDest, rlTemp);
- FreeTemp(cUnit, tReg);
+ RegLocation rl_temp = LocCReturn(); // Just using as template, will change
+ rl_temp.low_reg = t_reg;
+ StoreValue(cu, rl_dest, rl_temp);
+ FreeTemp(cu, t_reg);
branch1->target = target1;
branch2->target = target2;
branch3->target = branch1->target;
}
-void GenFusedLongCmpBranch(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir)
+void GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir)
{
- LIR* labelList = cUnit->blockLabelList;
- LIR* taken = &labelList[bb->taken->id];
- LIR* notTaken = &labelList[bb->fallThrough->id];
- RegLocation rlSrc1 = GetSrcWide(cUnit, mir, 0);
- RegLocation rlSrc2 = GetSrcWide(cUnit, mir, 2);
- rlSrc1 = LoadValueWide(cUnit, rlSrc1, kCoreReg);
- rlSrc2 = LoadValueWide(cUnit, rlSrc2, kCoreReg);
+ LIR* label_list = cu->block_label_list;
+ LIR* taken = &label_list[bb->taken->id];
+ LIR* not_taken = &label_list[bb->fall_through->id];
+ RegLocation rl_src1 = GetSrcWide(cu, mir, 0);
+ RegLocation rl_src2 = GetSrcWide(cu, mir, 2);
+ rl_src1 = LoadValueWide(cu, rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(cu, rl_src2, kCoreReg);
ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
- OpRegReg(cUnit, kOpCmp, rlSrc1.highReg, rlSrc2.highReg);
+ OpRegReg(cu, kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
switch(ccode) {
case kCondEq:
- OpCondBranch(cUnit, kCondNe, notTaken);
+ OpCondBranch(cu, kCondNe, not_taken);
break;
case kCondNe:
- OpCondBranch(cUnit, kCondNe, taken);
+ OpCondBranch(cu, kCondNe, taken);
break;
case kCondLt:
- OpCondBranch(cUnit, kCondLt, taken);
- OpCondBranch(cUnit, kCondGt, notTaken);
+ OpCondBranch(cu, kCondLt, taken);
+ OpCondBranch(cu, kCondGt, not_taken);
ccode = kCondCc;
break;
case kCondLe:
- OpCondBranch(cUnit, kCondLt, taken);
- OpCondBranch(cUnit, kCondGt, notTaken);
+ OpCondBranch(cu, kCondLt, taken);
+ OpCondBranch(cu, kCondGt, not_taken);
ccode = kCondLs;
break;
case kCondGt:
- OpCondBranch(cUnit, kCondGt, taken);
- OpCondBranch(cUnit, kCondLt, notTaken);
+ OpCondBranch(cu, kCondGt, taken);
+ OpCondBranch(cu, kCondLt, not_taken);
ccode = kCondHi;
break;
case kCondGe:
- OpCondBranch(cUnit, kCondGt, taken);
- OpCondBranch(cUnit, kCondLt, notTaken);
+ OpCondBranch(cu, kCondGt, taken);
+ OpCondBranch(cu, kCondLt, not_taken);
ccode = kCondCs;
break;
default:
LOG(FATAL) << "Unexpected ccode: " << ccode;
}
- OpRegReg(cUnit, kOpCmp, rlSrc1.lowReg, rlSrc2.lowReg);
- OpCondBranch(cUnit, ccode, taken);
+ OpRegReg(cu, kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
+ OpCondBranch(cu, ccode, taken);
}
/*
* Generate a register comparison to an immediate and branch. Caller
* is responsible for setting branch target field.
*/
-LIR* OpCmpImmBranch(CompilationUnit* cUnit, ConditionCode cond, int reg,
- int checkValue, LIR* target)
+LIR* OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg,
+ int check_value, LIR* target)
{
LIR* branch;
- int modImm;
- ArmConditionCode armCond = ArmConditionEncoding(cond);
- if ((ARM_LOWREG(reg)) && (checkValue == 0) &&
- ((armCond == kArmCondEq) || (armCond == kArmCondNe))) {
- branch = NewLIR2(cUnit, (armCond == kArmCondEq) ? kThumb2Cbz : kThumb2Cbnz,
+ int mod_imm;
+ ArmConditionCode arm_cond = ArmConditionEncoding(cond);
+ if ((ARM_LOWREG(reg)) && (check_value == 0) &&
+ ((arm_cond == kArmCondEq) || (arm_cond == kArmCondNe))) {
+ branch = NewLIR2(cu, (arm_cond == kArmCondEq) ? kThumb2Cbz : kThumb2Cbnz,
reg, 0);
} else {
- modImm = ModifiedImmediate(checkValue);
- if (ARM_LOWREG(reg) && ((checkValue & 0xff) == checkValue)) {
- NewLIR2(cUnit, kThumbCmpRI8, reg, checkValue);
- } else if (modImm >= 0) {
- NewLIR2(cUnit, kThumb2CmpRI8, reg, modImm);
+ mod_imm = ModifiedImmediate(check_value);
+ if (ARM_LOWREG(reg) && ((check_value & 0xff) == check_value)) {
+ NewLIR2(cu, kThumbCmpRI8, reg, check_value);
+ } else if (mod_imm >= 0) {
+ NewLIR2(cu, kThumb2CmpRI8, reg, mod_imm);
} else {
- int tReg = AllocTemp(cUnit);
- LoadConstant(cUnit, tReg, checkValue);
- OpRegReg(cUnit, kOpCmp, reg, tReg);
+ int t_reg = AllocTemp(cu);
+ LoadConstant(cu, t_reg, check_value);
+ OpRegReg(cu, kOpCmp, reg, t_reg);
}
- branch = NewLIR2(cUnit, kThumbBCond, 0, armCond);
+ branch = NewLIR2(cu, kThumbBCond, 0, arm_cond);
}
branch->target = target;
return branch;
}
-LIR* OpRegCopyNoInsert(CompilationUnit* cUnit, int rDest, int rSrc)
+LIR* OpRegCopyNoInsert(CompilationUnit* cu, int r_dest, int r_src)
{
LIR* res;
int opcode;
- if (ARM_FPREG(rDest) || ARM_FPREG(rSrc))
- return FpRegCopy(cUnit, rDest, rSrc);
- if (ARM_LOWREG(rDest) && ARM_LOWREG(rSrc))
+ if (ARM_FPREG(r_dest) || ARM_FPREG(r_src))
+ return FpRegCopy(cu, r_dest, r_src);
+ if (ARM_LOWREG(r_dest) && ARM_LOWREG(r_src))
opcode = kThumbMovRR;
- else if (!ARM_LOWREG(rDest) && !ARM_LOWREG(rSrc))
+ else if (!ARM_LOWREG(r_dest) && !ARM_LOWREG(r_src))
opcode = kThumbMovRR_H2H;
- else if (ARM_LOWREG(rDest))
+ else if (ARM_LOWREG(r_dest))
opcode = kThumbMovRR_H2L;
else
opcode = kThumbMovRR_L2H;
- res = RawLIR(cUnit, cUnit->currentDalvikOffset, opcode, rDest, rSrc);
- if (!(cUnit->disableOpt & (1 << kSafeOptimizations)) && rDest == rSrc) {
- res->flags.isNop = true;
+ res = RawLIR(cu, cu->current_dalvik_offset, opcode, r_dest, r_src);
+ if (!(cu->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
+ res->flags.is_nop = true;
}
return res;
}
-LIR* OpRegCopy(CompilationUnit* cUnit, int rDest, int rSrc)
+LIR* OpRegCopy(CompilationUnit* cu, int r_dest, int r_src)
{
- LIR* res = OpRegCopyNoInsert(cUnit, rDest, rSrc);
- AppendLIR(cUnit, res);
+ LIR* res = OpRegCopyNoInsert(cu, r_dest, r_src);
+ AppendLIR(cu, res);
return res;
}
-void OpRegCopyWide(CompilationUnit* cUnit, int destLo, int destHi,
- int srcLo, int srcHi)
+void OpRegCopyWide(CompilationUnit* cu, int dest_lo, int dest_hi,
+ int src_lo, int src_hi)
{
- bool destFP = ARM_FPREG(destLo) && ARM_FPREG(destHi);
- bool srcFP = ARM_FPREG(srcLo) && ARM_FPREG(srcHi);
- DCHECK_EQ(ARM_FPREG(srcLo), ARM_FPREG(srcHi));
- DCHECK_EQ(ARM_FPREG(destLo), ARM_FPREG(destHi));
- if (destFP) {
- if (srcFP) {
- OpRegCopy(cUnit, S2d(destLo, destHi), S2d(srcLo, srcHi));
+ bool dest_fp = ARM_FPREG(dest_lo) && ARM_FPREG(dest_hi);
+ bool src_fp = ARM_FPREG(src_lo) && ARM_FPREG(src_hi);
+ DCHECK_EQ(ARM_FPREG(src_lo), ARM_FPREG(src_hi));
+ DCHECK_EQ(ARM_FPREG(dest_lo), ARM_FPREG(dest_hi));
+ if (dest_fp) {
+ if (src_fp) {
+ OpRegCopy(cu, S2d(dest_lo, dest_hi), S2d(src_lo, src_hi));
} else {
- NewLIR3(cUnit, kThumb2Fmdrr, S2d(destLo, destHi), srcLo, srcHi);
+ NewLIR3(cu, kThumb2Fmdrr, S2d(dest_lo, dest_hi), src_lo, src_hi);
}
} else {
- if (srcFP) {
- NewLIR3(cUnit, kThumb2Fmrrd, destLo, destHi, S2d(srcLo, srcHi));
+ if (src_fp) {
+ NewLIR3(cu, kThumb2Fmrrd, dest_lo, dest_hi, S2d(src_lo, src_hi));
} else {
// Handle overlap
- if (srcHi == destLo) {
- OpRegCopy(cUnit, destHi, srcHi);
- OpRegCopy(cUnit, destLo, srcLo);
+ if (src_hi == dest_lo) {
+ OpRegCopy(cu, dest_hi, src_hi);
+ OpRegCopy(cu, dest_lo, src_lo);
} else {
- OpRegCopy(cUnit, destLo, srcLo);
- OpRegCopy(cUnit, destHi, srcHi);
+ OpRegCopy(cu, dest_lo, src_lo);
+ OpRegCopy(cu, dest_hi, src_hi);
}
}
}
@@ -258,7 +258,7 @@
DividePattern pattern;
};
-static const MagicTable magicTable[] = {
+static const MagicTable magic_table[] = {
{0, 0, DivideNone}, // 0
{0, 0, DivideNone}, // 1
{0, 0, DivideNone}, // 2
@@ -278,277 +278,277 @@
};
// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
-bool SmallLiteralDivide(CompilationUnit* cUnit, Instruction::Code dalvikOpcode,
- RegLocation rlSrc, RegLocation rlDest, int lit)
+bool SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
+ RegLocation rl_src, RegLocation rl_dest, int lit)
{
- if ((lit < 0) || (lit >= static_cast<int>(sizeof(magicTable)/sizeof(magicTable[0])))) {
+ if ((lit < 0) || (lit >= static_cast<int>(sizeof(magic_table)/sizeof(magic_table[0])))) {
return false;
}
- DividePattern pattern = magicTable[lit].pattern;
+ DividePattern pattern = magic_table[lit].pattern;
if (pattern == DivideNone) {
return false;
}
// Tuning: add rem patterns
- if (dalvikOpcode != Instruction::DIV_INT_LIT8) {
+ if (dalvik_opcode != Instruction::DIV_INT_LIT8) {
return false;
}
- int rMagic = AllocTemp(cUnit);
- LoadConstant(cUnit, rMagic, magicTable[lit].magic);
- rlSrc = LoadValue(cUnit, rlSrc, kCoreReg);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- int rHi = AllocTemp(cUnit);
- int rLo = AllocTemp(cUnit);
- NewLIR4(cUnit, kThumb2Smull, rLo, rHi, rMagic, rlSrc.lowReg);
+ int r_magic = AllocTemp(cu);
+ LoadConstant(cu, r_magic, magic_table[lit].magic);
+ rl_src = LoadValue(cu, rl_src, kCoreReg);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ int r_hi = AllocTemp(cu);
+ int r_lo = AllocTemp(cu);
+ NewLIR4(cu, kThumb2Smull, r_lo, r_hi, r_magic, rl_src.low_reg);
switch(pattern) {
case Divide3:
- OpRegRegRegShift(cUnit, kOpSub, rlResult.lowReg, rHi,
- rlSrc.lowReg, EncodeShift(kArmAsr, 31));
+ OpRegRegRegShift(cu, kOpSub, rl_result.low_reg, r_hi,
+ rl_src.low_reg, EncodeShift(kArmAsr, 31));
break;
case Divide5:
- OpRegRegImm(cUnit, kOpAsr, rLo, rlSrc.lowReg, 31);
- OpRegRegRegShift(cUnit, kOpRsub, rlResult.lowReg, rLo, rHi,
- EncodeShift(kArmAsr, magicTable[lit].shift));
+ OpRegRegImm(cu, kOpAsr, r_lo, rl_src.low_reg, 31);
+ OpRegRegRegShift(cu, kOpRsub, rl_result.low_reg, r_lo, r_hi,
+ EncodeShift(kArmAsr, magic_table[lit].shift));
break;
case Divide7:
- OpRegReg(cUnit, kOpAdd, rHi, rlSrc.lowReg);
- OpRegRegImm(cUnit, kOpAsr, rLo, rlSrc.lowReg, 31);
- OpRegRegRegShift(cUnit, kOpRsub, rlResult.lowReg, rLo, rHi,
- EncodeShift(kArmAsr, magicTable[lit].shift));
+ OpRegReg(cu, kOpAdd, r_hi, rl_src.low_reg);
+ OpRegRegImm(cu, kOpAsr, r_lo, rl_src.low_reg, 31);
+ OpRegRegRegShift(cu, kOpRsub, rl_result.low_reg, r_lo, r_hi,
+ EncodeShift(kArmAsr, magic_table[lit].shift));
break;
default:
LOG(FATAL) << "Unexpected pattern: " << pattern;
}
- StoreValue(cUnit, rlDest, rlResult);
+ StoreValue(cu, rl_dest, rl_result);
return true;
}
-LIR* GenRegMemCheck(CompilationUnit* cUnit, ConditionCode cCode,
+LIR* GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code,
int reg1, int base, int offset, ThrowKind kind)
{
LOG(FATAL) << "Unexpected use of GenRegMemCheck for Arm";
return NULL;
}
-RegLocation GenDivRemLit(CompilationUnit* cUnit, RegLocation rlDest, int reg1, int lit, bool isDiv)
+RegLocation GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg1, int lit, bool is_div)
{
LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm";
- return rlDest;
+ return rl_dest;
}
-RegLocation GenDivRem(CompilationUnit* cUnit, RegLocation rlDest, int reg1, int reg2, bool isDiv)
+RegLocation GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg1, int reg2, bool is_div)
{
LOG(FATAL) << "Unexpected use of GenDivRem for Arm";
- return rlDest;
+ return rl_dest;
}
-bool GenInlinedMinMaxInt(CompilationUnit *cUnit, CallInfo* info, bool isMin)
+bool GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min)
{
- DCHECK_EQ(cUnit->instructionSet, kThumb2);
- RegLocation rlSrc1 = info->args[0];
- RegLocation rlSrc2 = info->args[1];
- rlSrc1 = LoadValue(cUnit, rlSrc1, kCoreReg);
- rlSrc2 = LoadValue(cUnit, rlSrc2, kCoreReg);
- RegLocation rlDest = InlineTarget(cUnit, info);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- OpRegReg(cUnit, kOpCmp, rlSrc1.lowReg, rlSrc2.lowReg);
- OpIT(cUnit, (isMin) ? kArmCondGt : kArmCondLt, "E");
- OpRegReg(cUnit, kOpMov, rlResult.lowReg, rlSrc2.lowReg);
- OpRegReg(cUnit, kOpMov, rlResult.lowReg, rlSrc1.lowReg);
- GenBarrier(cUnit);
- StoreValue(cUnit, rlDest, rlResult);
+ DCHECK_EQ(cu->instruction_set, kThumb2);
+ RegLocation rl_src1 = info->args[0];
+ RegLocation rl_src2 = info->args[1];
+ rl_src1 = LoadValue(cu, rl_src1, kCoreReg);
+ rl_src2 = LoadValue(cu, rl_src2, kCoreReg);
+ RegLocation rl_dest = InlineTarget(cu, info);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ OpRegReg(cu, kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
+ OpIT(cu, (is_min) ? kArmCondGt : kArmCondLt, "E");
+ OpRegReg(cu, kOpMov, rl_result.low_reg, rl_src2.low_reg);
+ OpRegReg(cu, kOpMov, rl_result.low_reg, rl_src1.low_reg);
+ GenBarrier(cu);
+ StoreValue(cu, rl_dest, rl_result);
return true;
}
-void OpLea(CompilationUnit* cUnit, int rBase, int reg1, int reg2, int scale, int offset)
+void OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale, int offset)
{
LOG(FATAL) << "Unexpected use of OpLea for Arm";
}
-void OpTlsCmp(CompilationUnit* cUnit, int offset, int val)
+void OpTlsCmp(CompilationUnit* cu, int offset, int val)
{
LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm";
}
-bool GenInlinedCas32(CompilationUnit* cUnit, CallInfo* info, bool need_write_barrier) {
- DCHECK_EQ(cUnit->instructionSet, kThumb2);
- // Unused - RegLocation rlSrcUnsafe = info->args[0];
- RegLocation rlSrcObj= info->args[1]; // Object - known non-null
- RegLocation rlSrcOffset= info->args[2]; // long low
- rlSrcOffset.wide = 0; // ignore high half in info->args[3]
- RegLocation rlSrcExpected= info->args[4]; // int or Object
- RegLocation rlSrcNewValue= info->args[5]; // int or Object
- RegLocation rlDest = InlineTarget(cUnit, info); // boolean place for result
+bool GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier) {
+ DCHECK_EQ(cu->instruction_set, kThumb2);
+ // Unused - RegLocation rl_src_unsafe = info->args[0];
+ RegLocation rl_src_obj= info->args[1]; // Object - known non-null
+ RegLocation rl_src_offset= info->args[2]; // long low
+ rl_src_offset.wide = 0; // ignore high half in info->args[3]
+ RegLocation rl_src_expected= info->args[4]; // int or Object
+ RegLocation rl_src_new_value= info->args[5]; // int or Object
+ RegLocation rl_dest = InlineTarget(cu, info); // boolean place for result
// Release store semantics, get the barrier out of the way. TODO: revisit
- GenMemBarrier(cUnit, kStoreLoad);
+ GenMemBarrier(cu, kStoreLoad);
- RegLocation rlObject = LoadValue(cUnit, rlSrcObj, kCoreReg);
- RegLocation rlNewValue = LoadValue(cUnit, rlSrcNewValue, kCoreReg);
+ RegLocation rl_object = LoadValue(cu, rl_src_obj, kCoreReg);
+ RegLocation rl_new_value = LoadValue(cu, rl_src_new_value, kCoreReg);
if (need_write_barrier) {
// Mark card for object assuming new value is stored.
- MarkGCCard(cUnit, rlNewValue.lowReg, rlObject.lowReg);
+ MarkGCCard(cu, rl_new_value.low_reg, rl_object.low_reg);
}
- RegLocation rlOffset = LoadValue(cUnit, rlSrcOffset, kCoreReg);
+ RegLocation rl_offset = LoadValue(cu, rl_src_offset, kCoreReg);
- int rPtr = AllocTemp(cUnit);
- OpRegRegReg(cUnit, kOpAdd, rPtr, rlObject.lowReg, rlOffset.lowReg);
+ int r_ptr = AllocTemp(cu);
+ OpRegRegReg(cu, kOpAdd, r_ptr, rl_object.low_reg, rl_offset.low_reg);
- // Free now unneeded rlObject and rlOffset to give more temps.
- ClobberSReg(cUnit, rlObject.sRegLow);
- FreeTemp(cUnit, rlObject.lowReg);
- ClobberSReg(cUnit, rlOffset.sRegLow);
- FreeTemp(cUnit, rlOffset.lowReg);
+ // Free now unneeded rl_object and rl_offset to give more temps.
+ ClobberSReg(cu, rl_object.s_reg_low);
+ FreeTemp(cu, rl_object.low_reg);
+ ClobberSReg(cu, rl_offset.s_reg_low);
+ FreeTemp(cu, rl_offset.low_reg);
- int rOldValue = AllocTemp(cUnit);
- NewLIR3(cUnit, kThumb2Ldrex, rOldValue, rPtr, 0); // rOldValue := [rPtr]
+ int r_old_value = AllocTemp(cu);
+ NewLIR3(cu, kThumb2Ldrex, r_old_value, r_ptr, 0); // r_old_value := [r_ptr]
- RegLocation rlExpected = LoadValue(cUnit, rlSrcExpected, kCoreReg);
+ RegLocation rl_expected = LoadValue(cu, rl_src_expected, kCoreReg);
- // if (rOldValue == rExpected) {
- // [rPtr] <- rNewValue && rResult := success ? 0 : 1
- // rResult ^= 1
+ // if (r_old_value == rExpected) {
+ // [r_ptr] <- r_new_value && r_result := success ? 0 : 1
+ // r_result ^= 1
// } else {
- // rResult := 0
+ // r_result := 0
// }
- OpRegReg(cUnit, kOpCmp, rOldValue, rlExpected.lowReg);
- FreeTemp(cUnit, rOldValue); // Now unneeded.
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- OpIT(cUnit, kArmCondEq, "TE");
- NewLIR4(cUnit, kThumb2Strex, rlResult.lowReg, rlNewValue.lowReg, rPtr, 0);
- FreeTemp(cUnit, rPtr); // Now unneeded.
- OpRegImm(cUnit, kOpXor, rlResult.lowReg, 1);
- OpRegReg(cUnit, kOpXor, rlResult.lowReg, rlResult.lowReg);
+ OpRegReg(cu, kOpCmp, r_old_value, rl_expected.low_reg);
+ FreeTemp(cu, r_old_value); // Now unneeded.
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ OpIT(cu, kArmCondEq, "TE");
+ NewLIR4(cu, kThumb2Strex, rl_result.low_reg, rl_new_value.low_reg, r_ptr, 0);
+ FreeTemp(cu, r_ptr); // Now unneeded.
+ OpRegImm(cu, kOpXor, rl_result.low_reg, 1);
+ OpRegReg(cu, kOpXor, rl_result.low_reg, rl_result.low_reg);
- StoreValue(cUnit, rlDest, rlResult);
+ StoreValue(cu, rl_dest, rl_result);
return true;
}
-LIR* OpPcRelLoad(CompilationUnit* cUnit, int reg, LIR* target)
+LIR* OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target)
{
- return RawLIR(cUnit, cUnit->currentDalvikOffset, kThumb2LdrPcRel12, reg, 0, 0, 0, 0, target);
+ return RawLIR(cu, cu->current_dalvik_offset, kThumb2LdrPcRel12, reg, 0, 0, 0, 0, target);
}
-LIR* OpVldm(CompilationUnit* cUnit, int rBase, int count)
+LIR* OpVldm(CompilationUnit* cu, int rBase, int count)
{
- return NewLIR3(cUnit, kThumb2Vldms, rBase, fr0, count);
+ return NewLIR3(cu, kThumb2Vldms, rBase, fr0, count);
}
-LIR* OpVstm(CompilationUnit* cUnit, int rBase, int count)
+LIR* OpVstm(CompilationUnit* cu, int rBase, int count)
{
- return NewLIR3(cUnit, kThumb2Vstms, rBase, fr0, count);
+ return NewLIR3(cu, kThumb2Vstms, rBase, fr0, count);
}
-void GenMultiplyByTwoBitMultiplier(CompilationUnit* cUnit, RegLocation rlSrc,
- RegLocation rlResult, int lit,
- int firstBit, int secondBit)
+void GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
+ RegLocation rl_result, int lit,
+ int first_bit, int second_bit)
{
- OpRegRegRegShift(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, rlSrc.lowReg,
- EncodeShift(kArmLsl, secondBit - firstBit));
- if (firstBit != 0) {
- OpRegRegImm(cUnit, kOpLsl, rlResult.lowReg, rlResult.lowReg, firstBit);
+ OpRegRegRegShift(cu, kOpAdd, rl_result.low_reg, rl_src.low_reg, rl_src.low_reg,
+ EncodeShift(kArmLsl, second_bit - first_bit));
+ if (first_bit != 0) {
+ OpRegRegImm(cu, kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
}
}
-void GenDivZeroCheck(CompilationUnit* cUnit, int regLo, int regHi)
+void GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi)
{
- int tReg = AllocTemp(cUnit);
- NewLIR4(cUnit, kThumb2OrrRRRs, tReg, regLo, regHi, 0);
- FreeTemp(cUnit, tReg);
- GenCheck(cUnit, kCondEq, kThrowDivZero);
+ int t_reg = AllocTemp(cu);
+ NewLIR4(cu, kThumb2OrrRRRs, t_reg, reg_lo, reg_hi, 0);
+ FreeTemp(cu, t_reg);
+ GenCheck(cu, kCondEq, kThrowDivZero);
}
// Test suspend flag, return target of taken suspend branch
-LIR* OpTestSuspend(CompilationUnit* cUnit, LIR* target)
+LIR* OpTestSuspend(CompilationUnit* cu, LIR* target)
{
- NewLIR2(cUnit, kThumbSubRI8, rARM_SUSPEND, 1);
- return OpCondBranch(cUnit, (target == NULL) ? kCondEq : kCondNe, target);
+ NewLIR2(cu, kThumbSubRI8, rARM_SUSPEND, 1);
+ return OpCondBranch(cu, (target == NULL) ? kCondEq : kCondNe, target);
}
// Decrement register and branch on condition
-LIR* OpDecAndBranch(CompilationUnit* cUnit, ConditionCode cCode, int reg, LIR* target)
+LIR* OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg, LIR* target)
{
// Combine sub & test using sub setflags encoding here
- NewLIR3(cUnit, kThumb2SubsRRI12, reg, reg, 1);
- return OpCondBranch(cUnit, cCode, target);
+ NewLIR3(cu, kThumb2SubsRRI12, reg, reg, 1);
+ return OpCondBranch(cu, c_code, target);
}
-void GenMemBarrier(CompilationUnit* cUnit, MemBarrierKind barrierKind)
+void GenMemBarrier(CompilationUnit* cu, MemBarrierKind barrier_kind)
{
#if ANDROID_SMP != 0
- int dmbFlavor;
+ int dmb_flavor;
// TODO: revisit Arm barrier kinds
- switch (barrierKind) {
- case kLoadStore: dmbFlavor = kSY; break;
- case kLoadLoad: dmbFlavor = kSY; break;
- case kStoreStore: dmbFlavor = kST; break;
- case kStoreLoad: dmbFlavor = kSY; break;
+ switch (barrier_kind) {
+ case kLoadStore: dmb_flavor = kSY; break;
+ case kLoadLoad: dmb_flavor = kSY; break;
+ case kStoreStore: dmb_flavor = kST; break;
+ case kStoreLoad: dmb_flavor = kSY; break;
default:
- LOG(FATAL) << "Unexpected MemBarrierKind: " << barrierKind;
- dmbFlavor = kSY; // quiet gcc.
+ LOG(FATAL) << "Unexpected MemBarrierKind: " << barrier_kind;
+ dmb_flavor = kSY; // quiet gcc.
break;
}
- LIR* dmb = NewLIR1(cUnit, kThumb2Dmb, dmbFlavor);
- dmb->defMask = ENCODE_ALL;
+ LIR* dmb = NewLIR1(cu, kThumb2Dmb, dmb_flavor);
+ dmb->def_mask = ENCODE_ALL;
#endif
}
-bool GenNegLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc)
+bool GenNegLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src)
{
- rlSrc = LoadValueWide(cUnit, rlSrc, kCoreReg);
- RegLocation rlResult = EvalLoc(cUnit, rlDest, kCoreReg, true);
- int zReg = AllocTemp(cUnit);
- LoadConstantNoClobber(cUnit, zReg, 0);
+ rl_src = LoadValueWide(cu, rl_src, kCoreReg);
+ RegLocation rl_result = EvalLoc(cu, rl_dest, kCoreReg, true);
+ int z_reg = AllocTemp(cu);
+ LoadConstantNoClobber(cu, z_reg, 0);
// Check for destructive overlap
- if (rlResult.lowReg == rlSrc.highReg) {
- int tReg = AllocTemp(cUnit);
- OpRegRegReg(cUnit, kOpSub, rlResult.lowReg, zReg, rlSrc.lowReg);
- OpRegRegReg(cUnit, kOpSbc, rlResult.highReg, zReg, tReg);
- FreeTemp(cUnit, tReg);
+ if (rl_result.low_reg == rl_src.high_reg) {
+ int t_reg = AllocTemp(cu);
+ OpRegRegReg(cu, kOpSub, rl_result.low_reg, z_reg, rl_src.low_reg);
+ OpRegRegReg(cu, kOpSbc, rl_result.high_reg, z_reg, t_reg);
+ FreeTemp(cu, t_reg);
} else {
- OpRegRegReg(cUnit, kOpSub, rlResult.lowReg, zReg, rlSrc.lowReg);
- OpRegRegReg(cUnit, kOpSbc, rlResult.highReg, zReg, rlSrc.highReg);
+ OpRegRegReg(cu, kOpSub, rl_result.low_reg, z_reg, rl_src.low_reg);
+ OpRegRegReg(cu, kOpSbc, rl_result.high_reg, z_reg, rl_src.high_reg);
}
- FreeTemp(cUnit, zReg);
- StoreValueWide(cUnit, rlDest, rlResult);
+ FreeTemp(cu, z_reg);
+ StoreValueWide(cu, rl_dest, rl_result);
return false;
}
-bool GenAddLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenAddLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
LOG(FATAL) << "Unexpected use of GenAddLong for Arm";
return false;
}
-bool GenSubLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenSubLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
LOG(FATAL) << "Unexpected use of GenSubLong for Arm";
return false;
}
-bool GenAndLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenAndLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
LOG(FATAL) << "Unexpected use of GenAndLong for Arm";
return false;
}
-bool GenOrLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenOrLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
LOG(FATAL) << "Unexpected use of GenOrLong for Arm";
return false;
}
-bool GenXorLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+bool GenXorLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
LOG(FATAL) << "Unexpected use of genXoLong for Arm";
return false;
diff --git a/src/compiler/codegen/arm/target_arm.cc b/src/compiler/codegen/arm/target_arm.cc
index f5d13d3..9c12237 100644
--- a/src/compiler/codegen/arm/target_arm.cc
+++ b/src/compiler/codegen/arm/target_arm.cc
@@ -23,15 +23,15 @@
namespace art {
-static int coreRegs[] = {r0, r1, r2, r3, rARM_SUSPEND, r5, r6, r7, r8, rARM_SELF, r10,
+static int core_regs[] = {r0, r1, r2, r3, rARM_SUSPEND, r5, r6, r7, r8, rARM_SELF, r10,
r11, r12, rARM_SP, rARM_LR, rARM_PC};
static int ReservedRegs[] = {rARM_SUSPEND, rARM_SELF, rARM_SP, rARM_LR, rARM_PC};
static int FpRegs[] = {fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15,
fr16, fr17, fr18, fr19, fr20, fr21, fr22, fr23,
fr24, fr25, fr26, fr27, fr28, fr29, fr30, fr31};
-static int coreTemps[] = {r0, r1, r2, r3, r12};
-static int fpTemps[] = {fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
+static int core_temps[] = {r0, r1, r2, r3, r12};
+static int fp_temps[] = {fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15};
RegLocation LocCReturn()
@@ -85,9 +85,9 @@
// Create a double from a pair of singles.
-int S2d(int lowReg, int highReg)
+int S2d(int low_reg, int high_reg)
{
- return ARM_S2D(lowReg, highReg);
+ return ARM_S2D(low_reg, high_reg);
}
// Is reg a single or double?
@@ -123,20 +123,20 @@
/*
* Decode the register id.
*/
-uint64_t GetRegMaskCommon(CompilationUnit* cUnit, int reg)
+uint64_t GetRegMaskCommon(CompilationUnit* cu, int reg)
{
uint64_t seed;
int shift;
- int regId;
+ int reg_id;
- regId = reg & 0x1f;
+ reg_id = reg & 0x1f;
/* Each double register is equal to a pair of single-precision FP registers */
seed = ARM_DOUBLEREG(reg) ? 3 : 1;
/* FP register starts at bit position 16 */
shift = ARM_FPREG(reg) ? kArmFPReg0 : 0;
/* Expand the double register id into single offset */
- shift += regId;
+ shift += reg_id;
return (seed << shift);
}
@@ -145,79 +145,79 @@
return ENCODE_ARM_REG_PC;
}
-void SetupTargetResourceMasks(CompilationUnit* cUnit, LIR* lir)
+void SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir)
{
- DCHECK_EQ(cUnit->instructionSet, kThumb2);
+ DCHECK_EQ(cu->instruction_set, kThumb2);
// Thumb2 specific setup
uint64_t flags = EncodingMap[lir->opcode].flags;
int opcode = lir->opcode;
if (flags & REG_DEF_SP) {
- lir->defMask |= ENCODE_ARM_REG_SP;
+ lir->def_mask |= ENCODE_ARM_REG_SP;
}
if (flags & REG_USE_SP) {
- lir->useMask |= ENCODE_ARM_REG_SP;
+ lir->use_mask |= ENCODE_ARM_REG_SP;
}
if (flags & REG_DEF_LIST0) {
- lir->defMask |= ENCODE_ARM_REG_LIST(lir->operands[0]);
+ lir->def_mask |= ENCODE_ARM_REG_LIST(lir->operands[0]);
}
if (flags & REG_DEF_LIST1) {
- lir->defMask |= ENCODE_ARM_REG_LIST(lir->operands[1]);
+ lir->def_mask |= ENCODE_ARM_REG_LIST(lir->operands[1]);
}
if (flags & REG_DEF_FPCS_LIST0) {
- lir->defMask |= ENCODE_ARM_REG_FPCS_LIST(lir->operands[0]);
+ lir->def_mask |= ENCODE_ARM_REG_FPCS_LIST(lir->operands[0]);
}
if (flags & REG_DEF_FPCS_LIST2) {
for (int i = 0; i < lir->operands[2]; i++) {
- SetupRegMask(cUnit, &lir->defMask, lir->operands[1] + i);
+ SetupRegMask(cu, &lir->def_mask, lir->operands[1] + i);
}
}
if (flags & REG_USE_PC) {
- lir->useMask |= ENCODE_ARM_REG_PC;
+ lir->use_mask |= ENCODE_ARM_REG_PC;
}
/* Conservatively treat the IT block */
if (flags & IS_IT) {
- lir->defMask = ENCODE_ALL;
+ lir->def_mask = ENCODE_ALL;
}
if (flags & REG_USE_LIST0) {
- lir->useMask |= ENCODE_ARM_REG_LIST(lir->operands[0]);
+ lir->use_mask |= ENCODE_ARM_REG_LIST(lir->operands[0]);
}
if (flags & REG_USE_LIST1) {
- lir->useMask |= ENCODE_ARM_REG_LIST(lir->operands[1]);
+ lir->use_mask |= ENCODE_ARM_REG_LIST(lir->operands[1]);
}
if (flags & REG_USE_FPCS_LIST0) {
- lir->useMask |= ENCODE_ARM_REG_FPCS_LIST(lir->operands[0]);
+ lir->use_mask |= ENCODE_ARM_REG_FPCS_LIST(lir->operands[0]);
}
if (flags & REG_USE_FPCS_LIST2) {
for (int i = 0; i < lir->operands[2]; i++) {
- SetupRegMask(cUnit, &lir->useMask, lir->operands[1] + i);
+ SetupRegMask(cu, &lir->use_mask, lir->operands[1] + i);
}
}
/* Fixup for kThumbPush/lr and kThumbPop/pc */
if (opcode == kThumbPush || opcode == kThumbPop) {
- uint64_t r8Mask = GetRegMaskCommon(cUnit, r8);
- if ((opcode == kThumbPush) && (lir->useMask & r8Mask)) {
- lir->useMask &= ~r8Mask;
- lir->useMask |= ENCODE_ARM_REG_LR;
- } else if ((opcode == kThumbPop) && (lir->defMask & r8Mask)) {
- lir->defMask &= ~r8Mask;
- lir->defMask |= ENCODE_ARM_REG_PC;
+ uint64_t r8Mask = GetRegMaskCommon(cu, r8);
+ if ((opcode == kThumbPush) && (lir->use_mask & r8Mask)) {
+ lir->use_mask &= ~r8Mask;
+ lir->use_mask |= ENCODE_ARM_REG_LR;
+ } else if ((opcode == kThumbPop) && (lir->def_mask & r8Mask)) {
+ lir->def_mask &= ~r8Mask;
+ lir->def_mask |= ENCODE_ARM_REG_PC;
}
}
if (flags & REG_DEF_LR) {
- lir->defMask |= ENCODE_ARM_REG_LR;
+ lir->def_mask |= ENCODE_ARM_REG_LR;
}
}
@@ -248,7 +248,7 @@
return res;
}
-static const char* coreRegNames[16] = {
+static const char* core_reg_names[16] = {
"r0",
"r1",
"r2",
@@ -268,7 +268,7 @@
};
-static const char* shiftNames[4] = {
+static const char* shift_names[4] = {
"lsl",
"lsr",
"asr",
@@ -282,17 +282,17 @@
buf[0] = 0;
for (i = 0; i < 16; i++, vector >>= 1) {
if (vector & 0x1) {
- int regId = i;
+ int reg_id = i;
if (opcode == kThumbPush && i == 8) {
- regId = r14lr;
+ reg_id = r14lr;
} else if (opcode == kThumbPop && i == 8) {
- regId = r15pc;
+ reg_id = r15pc;
}
if (printed) {
- sprintf(buf + strlen(buf), ", r%d", regId);
+ sprintf(buf + strlen(buf), ", r%d", reg_id);
} else {
printed = true;
- sprintf(buf, "r%d", regId);
+ sprintf(buf, "r%d", reg_id);
}
}
}
@@ -328,36 +328,36 @@
return bits >> (((value & 0xf80) >> 7) - 8);
}
-const char* ccNames[] = {"eq","ne","cs","cc","mi","pl","vs","vc",
+const char* cc_names[] = {"eq","ne","cs","cc","mi","pl","vs","vc",
"hi","ls","ge","lt","gt","le","al","nv"};
/*
* Interpret a format string and build a string no longer than size
* See format key in Assemble.c.
*/
-std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* baseAddr)
+std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr)
{
std::string buf;
int i;
- const char* fmtEnd = &fmt[strlen(fmt)];
+ const char* fmt_end = &fmt[strlen(fmt)];
char tbuf[256];
const char* name;
char nc;
- while (fmt < fmtEnd) {
+ while (fmt < fmt_end) {
int operand;
if (*fmt == '!') {
fmt++;
- DCHECK_LT(fmt, fmtEnd);
+ DCHECK_LT(fmt, fmt_end);
nc = *fmt++;
if (nc=='!') {
strcpy(tbuf, "!");
} else {
- DCHECK_LT(fmt, fmtEnd);
+ DCHECK_LT(fmt, fmt_end);
DCHECK_LT(static_cast<unsigned>(nc-'0'), 4U);
operand = lir->operands[nc-'0'];
switch (*fmt++) {
case 'H':
if (operand != 0) {
- sprintf(tbuf, ", %s %d",shiftNames[operand & 0x3], operand >> 2);
+ sprintf(tbuf, ", %s %d",shift_names[operand & 0x3], operand >> 2);
} else {
strcpy(tbuf,"");
}
@@ -418,8 +418,8 @@
break;
case 'C':
DCHECK_LT(operand, static_cast<int>(
- sizeof(coreRegNames)/sizeof(coreRegNames[0])));
- sprintf(tbuf,"%s",coreRegNames[operand]);
+ sizeof(core_reg_names)/sizeof(core_reg_names[0])));
+ sprintf(tbuf,"%s",core_reg_names[operand]);
break;
case 'E':
sprintf(tbuf,"%d", operand*4);
@@ -428,11 +428,11 @@
sprintf(tbuf,"%d", operand*2);
break;
case 'c':
- strcpy(tbuf, ccNames[operand]);
+ strcpy(tbuf, cc_names[operand]);
break;
case 't':
sprintf(tbuf,"0x%08x (L%p)",
- reinterpret_cast<uintptr_t>(baseAddr) + lir->offset + 4 +
+ reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4 +
(operand << 1),
lir->target);
break;
@@ -440,7 +440,7 @@
int offset_1 = lir->operands[0];
int offset_2 = NEXT_LIR(lir)->operands[0];
uintptr_t target =
- (((reinterpret_cast<uintptr_t>(baseAddr) + lir->offset + 4) &
+ (((reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4) &
~3) + (offset_1 << 21 >> 9) + (offset_2 << 1)) &
0xfffffffc;
sprintf(tbuf, "%p", reinterpret_cast<void *>(target));
@@ -473,7 +473,7 @@
return buf;
}
-void DumpResourceMask(LIR* armLIR, uint64_t mask, const char* prefix)
+void DumpResourceMask(LIR* arm_lir, uint64_t mask, const char* prefix)
{
char buf[256];
buf[0] = 0;
@@ -499,9 +499,9 @@
}
/* Memory bits */
- if (armLIR && (mask & ENCODE_DALVIK_REG)) {
- sprintf(buf + strlen(buf), "dr%d%s", armLIR->aliasInfo & 0xffff,
- (armLIR->aliasInfo & 0x80000000) ? "(+1)" : "");
+ if (arm_lir && (mask & ENCODE_DALVIK_REG)) {
+ sprintf(buf + strlen(buf), "dr%d%s", arm_lir->alias_info & 0xffff,
+ (arm_lir->alias_info & 0x80000000) ? "(+1)" : "");
}
if (mask & ENCODE_LITERAL) {
strcat(buf, "lit ");
@@ -550,105 +550,105 @@
* Alloc a pair of core registers, or a double. Low reg in low byte,
* high reg in next byte.
*/
-int AllocTypedTempPair(CompilationUnit* cUnit, bool fpHint, int regClass)
+int AllocTypedTempPair(CompilationUnit* cu, bool fp_hint, int reg_class)
{
- int highReg;
- int lowReg;
+ int high_reg;
+ int low_reg;
int res = 0;
- if (((regClass == kAnyReg) && fpHint) || (regClass == kFPReg)) {
- lowReg = AllocTempDouble(cUnit);
- highReg = lowReg + 1;
+ if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
+ low_reg = AllocTempDouble(cu);
+ high_reg = low_reg + 1;
} else {
- lowReg = AllocTemp(cUnit);
- highReg = AllocTemp(cUnit);
+ low_reg = AllocTemp(cu);
+ high_reg = AllocTemp(cu);
}
- res = (lowReg & 0xff) | ((highReg & 0xff) << 8);
+ res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
return res;
}
-int AllocTypedTemp(CompilationUnit* cUnit, bool fpHint, int regClass)
+int AllocTypedTemp(CompilationUnit* cu, bool fp_hint, int reg_class)
{
- if (((regClass == kAnyReg) && fpHint) || (regClass == kFPReg))
- return AllocTempFloat(cUnit);
- return AllocTemp(cUnit);
+ if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg))
+ return AllocTempFloat(cu);
+ return AllocTemp(cu);
}
-void CompilerInitializeRegAlloc(CompilationUnit* cUnit)
+void CompilerInitializeRegAlloc(CompilationUnit* cu)
{
- int numRegs = sizeof(coreRegs)/sizeof(*coreRegs);
- int numReserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs);
- int numTemps = sizeof(coreTemps)/sizeof(*coreTemps);
- int numFPRegs = sizeof(FpRegs)/sizeof(*FpRegs);
- int numFPTemps = sizeof(fpTemps)/sizeof(*fpTemps);
+ int num_regs = sizeof(core_regs)/sizeof(*core_regs);
+ int num_reserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs);
+ int num_temps = sizeof(core_temps)/sizeof(*core_temps);
+ int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
+ int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
RegisterPool *pool =
- static_cast<RegisterPool*>(NewMem(cUnit, sizeof(*pool), true, kAllocRegAlloc));
- cUnit->regPool = pool;
- pool->numCoreRegs = numRegs;
- pool->coreRegs = reinterpret_cast<RegisterInfo*>
- (NewMem(cUnit, numRegs * sizeof(*cUnit->regPool->coreRegs), true, kAllocRegAlloc));
- pool->numFPRegs = numFPRegs;
+ static_cast<RegisterPool*>(NewMem(cu, sizeof(*pool), true, kAllocRegAlloc));
+ cu->reg_pool = pool;
+ pool->num_core_regs = num_regs;
+ pool->core_regs = reinterpret_cast<RegisterInfo*>
+ (NewMem(cu, num_regs * sizeof(*cu->reg_pool->core_regs), true, kAllocRegAlloc));
+ pool->num_fp_regs = num_fp_regs;
pool->FPRegs = static_cast<RegisterInfo*>
- (NewMem(cUnit, numFPRegs * sizeof(*cUnit->regPool->FPRegs), true, kAllocRegAlloc));
- CompilerInitPool(pool->coreRegs, coreRegs, pool->numCoreRegs);
- CompilerInitPool(pool->FPRegs, FpRegs, pool->numFPRegs);
+ (NewMem(cu, num_fp_regs * sizeof(*cu->reg_pool->FPRegs), true, kAllocRegAlloc));
+ CompilerInitPool(pool->core_regs, core_regs, pool->num_core_regs);
+ CompilerInitPool(pool->FPRegs, FpRegs, pool->num_fp_regs);
// Keep special registers from being allocated
- for (int i = 0; i < numReserved; i++) {
+ for (int i = 0; i < num_reserved; i++) {
if (NO_SUSPEND && (ReservedRegs[i] == rARM_SUSPEND)) {
//To measure cost of suspend check
continue;
}
- MarkInUse(cUnit, ReservedRegs[i]);
+ MarkInUse(cu, ReservedRegs[i]);
}
// Mark temp regs - all others not in use can be used for promotion
- for (int i = 0; i < numTemps; i++) {
- MarkTemp(cUnit, coreTemps[i]);
+ for (int i = 0; i < num_temps; i++) {
+ MarkTemp(cu, core_temps[i]);
}
- for (int i = 0; i < numFPTemps; i++) {
- MarkTemp(cUnit, fpTemps[i]);
+ for (int i = 0; i < num_fp_temps; i++) {
+ MarkTemp(cu, fp_temps[i]);
}
// Start allocation at r2 in an attempt to avoid clobbering return values
- pool->nextCoreReg = r2;
+ pool->next_core_reg = r2;
// Construct the alias map.
- cUnit->phiAliasMap = static_cast<int*>
- (NewMem(cUnit, cUnit->numSSARegs * sizeof(cUnit->phiAliasMap[0]), false, kAllocDFInfo));
- for (int i = 0; i < cUnit->numSSARegs; i++) {
- cUnit->phiAliasMap[i] = i;
+ cu->phi_alias_map = static_cast<int*>
+ (NewMem(cu, cu->num_ssa_regs * sizeof(cu->phi_alias_map[0]), false, kAllocDFInfo));
+ for (int i = 0; i < cu->num_ssa_regs; i++) {
+ cu->phi_alias_map[i] = i;
}
- for (MIR* phi = cUnit->phiList; phi; phi = phi->meta.phiNext) {
- int defReg = phi->ssaRep->defs[0];
- for (int i = 0; i < phi->ssaRep->numUses; i++) {
- for (int j = 0; j < cUnit->numSSARegs; j++) {
- if (cUnit->phiAliasMap[j] == phi->ssaRep->uses[i]) {
- cUnit->phiAliasMap[j] = defReg;
+ for (MIR* phi = cu->phi_list; phi; phi = phi->meta.phi_next) {
+ int def_reg = phi->ssa_rep->defs[0];
+ for (int i = 0; i < phi->ssa_rep->num_uses; i++) {
+ for (int j = 0; j < cu->num_ssa_regs; j++) {
+ if (cu->phi_alias_map[j] == phi->ssa_rep->uses[i]) {
+ cu->phi_alias_map[j] = def_reg;
}
}
}
}
}
-void FreeRegLocTemps(CompilationUnit* cUnit, RegLocation rlKeep,
- RegLocation rlFree)
+void FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep,
+ RegLocation rl_free)
{
- if ((rlFree.lowReg != rlKeep.lowReg) && (rlFree.lowReg != rlKeep.highReg) &&
- (rlFree.highReg != rlKeep.lowReg) && (rlFree.highReg != rlKeep.highReg)) {
+ if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
+ (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) {
// No overlap, free both
- FreeTemp(cUnit, rlFree.lowReg);
- FreeTemp(cUnit, rlFree.highReg);
+ FreeTemp(cu, rl_free.low_reg);
+ FreeTemp(cu, rl_free.high_reg);
}
}
/*
- * TUNING: is leaf? Can't just use "hasInvoke" to determine as some
+ * TUNING: is leaf? Can't just use "has_invoke" to determine as some
* instructions might call out to C/assembly helper functions. Until
* machinery is in place, always spill lr.
*/
-void AdjustSpillMask(CompilationUnit* cUnit)
+void AdjustSpillMask(CompilationUnit* cu)
{
- cUnit->coreSpillMask |= (1 << rARM_LR);
- cUnit->numCoreSpills++;
+ cu->core_spill_mask |= (1 << rARM_LR);
+ cu->num_core_spills++;
}
/*
@@ -657,52 +657,52 @@
* include any holes in the mask. Associate holes with
* Dalvik register INVALID_VREG (0xFFFFU).
*/
-void MarkPreservedSingle(CompilationUnit* cUnit, int vReg, int reg)
+void MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg)
{
DCHECK_GE(reg, ARM_FP_REG_MASK + ARM_FP_CALLEE_SAVE_BASE);
reg = (reg & ARM_FP_REG_MASK) - ARM_FP_CALLEE_SAVE_BASE;
- // Ensure fpVmapTable is large enough
- int tableSize = cUnit->fpVmapTable.size();
- for (int i = tableSize; i < (reg + 1); i++) {
- cUnit->fpVmapTable.push_back(INVALID_VREG);
+ // Ensure fp_vmap_table is large enough
+ int table_size = cu->fp_vmap_table.size();
+ for (int i = table_size; i < (reg + 1); i++) {
+ cu->fp_vmap_table.push_back(INVALID_VREG);
}
// Add the current mapping
- cUnit->fpVmapTable[reg] = vReg;
- // Size of fpVmapTable is high-water mark, use to set mask
- cUnit->numFPSpills = cUnit->fpVmapTable.size();
- cUnit->fpSpillMask = ((1 << cUnit->numFPSpills) - 1) << ARM_FP_CALLEE_SAVE_BASE;
+ cu->fp_vmap_table[reg] = v_reg;
+ // Size of fp_vmap_table is high-water mark, use to set mask
+ cu->num_fp_spills = cu->fp_vmap_table.size();
+ cu->fp_spill_mask = ((1 << cu->num_fp_spills) - 1) << ARM_FP_CALLEE_SAVE_BASE;
}
-void FlushRegWide(CompilationUnit* cUnit, int reg1, int reg2)
+void FlushRegWide(CompilationUnit* cu, int reg1, int reg2)
{
- RegisterInfo* info1 = GetRegInfo(cUnit, reg1);
- RegisterInfo* info2 = GetRegInfo(cUnit, reg2);
+ RegisterInfo* info1 = GetRegInfo(cu, reg1);
+ RegisterInfo* info2 = GetRegInfo(cu, reg2);
DCHECK(info1 && info2 && info1->pair && info2->pair &&
(info1->partner == info2->reg) &&
(info2->partner == info1->reg));
if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
- if (!(info1->isTemp && info2->isTemp)) {
- /* Should not happen. If it does, there's a problem in evalLoc */
+ if (!(info1->is_temp && info2->is_temp)) {
+ /* Should not happen. If it does, there's a problem in eval_loc */
LOG(FATAL) << "Long half-temp, half-promoted";
}
info1->dirty = false;
info2->dirty = false;
- if (SRegToVReg(cUnit, info2->sReg) <
- SRegToVReg(cUnit, info1->sReg))
+ if (SRegToVReg(cu, info2->s_reg) <
+ SRegToVReg(cu, info1->s_reg))
info1 = info2;
- int vReg = SRegToVReg(cUnit, info1->sReg);
- StoreBaseDispWide(cUnit, rARM_SP, VRegOffset(cUnit, vReg), info1->reg, info1->partner);
+ int v_reg = SRegToVReg(cu, info1->s_reg);
+ StoreBaseDispWide(cu, rARM_SP, VRegOffset(cu, v_reg), info1->reg, info1->partner);
}
}
-void FlushReg(CompilationUnit* cUnit, int reg)
+void FlushReg(CompilationUnit* cu, int reg)
{
- RegisterInfo* info = GetRegInfo(cUnit, reg);
+ RegisterInfo* info = GetRegInfo(cu, reg);
if (info->live && info->dirty) {
info->dirty = false;
- int vReg = SRegToVReg(cUnit, info->sReg);
- StoreBaseDisp(cUnit, rARM_SP, VRegOffset(cUnit, vReg), reg, kWord);
+ int v_reg = SRegToVReg(cu, info->s_reg);
+ StoreBaseDisp(cu, rARM_SP, VRegOffset(cu, v_reg), reg, kWord);
}
}
@@ -712,81 +712,81 @@
}
/* Clobber all regs that might be used by an external C call */
-void ClobberCalleeSave(CompilationUnit *cUnit)
+void ClobberCalleeSave(CompilationUnit *cu)
{
- Clobber(cUnit, r0);
- Clobber(cUnit, r1);
- Clobber(cUnit, r2);
- Clobber(cUnit, r3);
- Clobber(cUnit, r12);
- Clobber(cUnit, r14lr);
- Clobber(cUnit, fr0);
- Clobber(cUnit, fr1);
- Clobber(cUnit, fr2);
- Clobber(cUnit, fr3);
- Clobber(cUnit, fr4);
- Clobber(cUnit, fr5);
- Clobber(cUnit, fr6);
- Clobber(cUnit, fr7);
- Clobber(cUnit, fr8);
- Clobber(cUnit, fr9);
- Clobber(cUnit, fr10);
- Clobber(cUnit, fr11);
- Clobber(cUnit, fr12);
- Clobber(cUnit, fr13);
- Clobber(cUnit, fr14);
- Clobber(cUnit, fr15);
+ Clobber(cu, r0);
+ Clobber(cu, r1);
+ Clobber(cu, r2);
+ Clobber(cu, r3);
+ Clobber(cu, r12);
+ Clobber(cu, r14lr);
+ Clobber(cu, fr0);
+ Clobber(cu, fr1);
+ Clobber(cu, fr2);
+ Clobber(cu, fr3);
+ Clobber(cu, fr4);
+ Clobber(cu, fr5);
+ Clobber(cu, fr6);
+ Clobber(cu, fr7);
+ Clobber(cu, fr8);
+ Clobber(cu, fr9);
+ Clobber(cu, fr10);
+ Clobber(cu, fr11);
+ Clobber(cu, fr12);
+ Clobber(cu, fr13);
+ Clobber(cu, fr14);
+ Clobber(cu, fr15);
}
-RegLocation GetReturnWideAlt(CompilationUnit* cUnit)
+RegLocation GetReturnWideAlt(CompilationUnit* cu)
{
RegLocation res = LocCReturnWide();
- res.lowReg = r2;
- res.highReg = r3;
- Clobber(cUnit, r2);
- Clobber(cUnit, r3);
- MarkInUse(cUnit, r2);
- MarkInUse(cUnit, r3);
- MarkPair(cUnit, res.lowReg, res.highReg);
+ res.low_reg = r2;
+ res.high_reg = r3;
+ Clobber(cu, r2);
+ Clobber(cu, r3);
+ MarkInUse(cu, r2);
+ MarkInUse(cu, r3);
+ MarkPair(cu, res.low_reg, res.high_reg);
return res;
}
-RegLocation GetReturnAlt(CompilationUnit* cUnit)
+RegLocation GetReturnAlt(CompilationUnit* cu)
{
RegLocation res = LocCReturn();
- res.lowReg = r1;
- Clobber(cUnit, r1);
- MarkInUse(cUnit, r1);
+ res.low_reg = r1;
+ Clobber(cu, r1);
+ MarkInUse(cu, r1);
return res;
}
-RegisterInfo* GetRegInfo(CompilationUnit* cUnit, int reg)
+RegisterInfo* GetRegInfo(CompilationUnit* cu, int reg)
{
- return ARM_FPREG(reg) ? &cUnit->regPool->FPRegs[reg & ARM_FP_REG_MASK]
- : &cUnit->regPool->coreRegs[reg];
+ return ARM_FPREG(reg) ? &cu->reg_pool->FPRegs[reg & ARM_FP_REG_MASK]
+ : &cu->reg_pool->core_regs[reg];
}
/* To be used when explicitly managing register use */
-void LockCallTemps(CompilationUnit* cUnit)
+void LockCallTemps(CompilationUnit* cu)
{
- LockTemp(cUnit, r0);
- LockTemp(cUnit, r1);
- LockTemp(cUnit, r2);
- LockTemp(cUnit, r3);
+ LockTemp(cu, r0);
+ LockTemp(cu, r1);
+ LockTemp(cu, r2);
+ LockTemp(cu, r3);
}
/* To be used when explicitly managing register use */
-void FreeCallTemps(CompilationUnit* cUnit)
+void FreeCallTemps(CompilationUnit* cu)
{
- FreeTemp(cUnit, r0);
- FreeTemp(cUnit, r1);
- FreeTemp(cUnit, r2);
- FreeTemp(cUnit, r3);
+ FreeTemp(cu, r0);
+ FreeTemp(cu, r1);
+ FreeTemp(cu, r2);
+ FreeTemp(cu, r3);
}
-int LoadHelper(CompilationUnit* cUnit, int offset)
+int LoadHelper(CompilationUnit* cu, int offset)
{
- LoadWordDisp(cUnit, rARM_SELF, offset, rARM_LR);
+ LoadWordDisp(cu, rARM_SELF, offset, rARM_LR);
return rARM_LR;
}
diff --git a/src/compiler/codegen/arm/utility_arm.cc b/src/compiler/codegen/arm/utility_arm.cc
index bfb05d5..b064135 100644
--- a/src/compiler/codegen/arm/utility_arm.cc
+++ b/src/compiler/codegen/arm/utility_arm.cc
@@ -25,42 +25,42 @@
static int EncodeImmSingle(int value)
{
int res;
- int bitA = (value & 0x80000000) >> 31;
- int notBitB = (value & 0x40000000) >> 30;
- int bitB = (value & 0x20000000) >> 29;
- int bSmear = (value & 0x3e000000) >> 25;
+ int bit_a = (value & 0x80000000) >> 31;
+ int not_bit_b = (value & 0x40000000) >> 30;
+ int bit_b = (value & 0x20000000) >> 29;
+ int b_smear = (value & 0x3e000000) >> 25;
int slice = (value & 0x01f80000) >> 19;
int zeroes = (value & 0x0007ffff);
if (zeroes != 0)
return -1;
- if (bitB) {
- if ((notBitB != 0) || (bSmear != 0x1f))
+ if (bit_b) {
+ if ((not_bit_b != 0) || (b_smear != 0x1f))
return -1;
} else {
- if ((notBitB != 1) || (bSmear != 0x0))
+ if ((not_bit_b != 1) || (b_smear != 0x0))
return -1;
}
- res = (bitA << 7) | (bitB << 6) | slice;
+ res = (bit_a << 7) | (bit_b << 6) | slice;
return res;
}
-static LIR* LoadFPConstantValue(CompilationUnit* cUnit, int rDest, int value)
+static LIR* LoadFPConstantValue(CompilationUnit* cu, int r_dest, int value)
{
- int encodedImm = EncodeImmSingle(value);
- DCHECK(ARM_SINGLEREG(rDest));
- if (encodedImm >= 0) {
- return NewLIR2(cUnit, kThumb2Vmovs_IMM8, rDest, encodedImm);
+ int encoded_imm = EncodeImmSingle(value);
+ DCHECK(ARM_SINGLEREG(r_dest));
+ if (encoded_imm >= 0) {
+ return NewLIR2(cu, kThumb2Vmovs_IMM8, r_dest, encoded_imm);
}
- LIR* dataTarget = ScanLiteralPool(cUnit->literalList, value, 0);
- if (dataTarget == NULL) {
- dataTarget = AddWordData(cUnit, &cUnit->literalList, value);
+ LIR* data_target = ScanLiteralPool(cu->literal_list, value, 0);
+ if (data_target == NULL) {
+ data_target = AddWordData(cu, &cu->literal_list, value);
}
- LIR* loadPcRel = RawLIR(cUnit, cUnit->currentDalvikOffset, kThumb2Vldrs,
- rDest, r15pc, 0, 0, 0, dataTarget);
- SetMemRefType(loadPcRel, true, kLiteral);
- loadPcRel->aliasInfo = reinterpret_cast<uintptr_t>(dataTarget);
- AppendLIR(cUnit, loadPcRel);
- return loadPcRel;
+ LIR* load_pc_rel = RawLIR(cu, cu->current_dalvik_offset, kThumb2Vldrs,
+ r_dest, r15pc, 0, 0, 0, data_target);
+ SetMemRefType(load_pc_rel, true, kLiteral);
+ load_pc_rel->alias_info = reinterpret_cast<uintptr_t>(data_target);
+ AppendLIR(cu, load_pc_rel);
+ return load_pc_rel;
}
static int LeadingZeros(uint32_t val)
@@ -88,8 +88,8 @@
*/
int ModifiedImmediate(uint32_t value)
{
- int zLeading;
- int zTrailing;
+ int z_leading;
+ int z_trailing;
uint32_t b0 = value & 0xff;
/* Note: case of value==0 must use 0:000:0:0000000 encoding */
@@ -103,17 +103,17 @@
if (value == ((b0 << 24) | (b0 << 8)))
return (0x2 << 8) | b0; /* 0:010:a:bcdefgh */
/* Can we do it with rotation? */
- zLeading = LeadingZeros(value);
- zTrailing = 32 - LeadingZeros(~value & (value - 1));
+ z_leading = LeadingZeros(value);
+ z_trailing = 32 - LeadingZeros(~value & (value - 1));
/* A run of eight or fewer active bits? */
- if ((zLeading + zTrailing) < 24)
+ if ((z_leading + z_trailing) < 24)
return -1; /* No - bail */
/* left-justify the constant, discarding msb (known to be 1) */
- value <<= zLeading + 1;
+ value <<= z_leading + 1;
/* Create bcdefgh */
value >>= 25;
/* Put it all together */
- return value | ((0x8 + zLeading) << 7); /* [01000..11111]:bcdefgh */
+ return value | ((0x8 + z_leading) << 7); /* [01000..11111]:bcdefgh */
}
/*
@@ -121,75 +121,75 @@
* grab from the per-translation literal pool.
*
* No additional register clobbering operation performed. Use this version when
- * 1) rDest is freshly returned from AllocTemp or
+ * 1) r_dest is freshly returned from AllocTemp or
* 2) The codegen is under fixed register usage
*/
-LIR* LoadConstantNoClobber(CompilationUnit* cUnit, int rDest, int value)
+LIR* LoadConstantNoClobber(CompilationUnit* cu, int r_dest, int value)
{
LIR* res;
- int modImm;
+ int mod_imm;
- if (ARM_FPREG(rDest)) {
- return LoadFPConstantValue(cUnit, rDest, value);
+ if (ARM_FPREG(r_dest)) {
+ return LoadFPConstantValue(cu, r_dest, value);
}
/* See if the value can be constructed cheaply */
- if (ARM_LOWREG(rDest) && (value >= 0) && (value <= 255)) {
- return NewLIR2(cUnit, kThumbMovImm, rDest, value);
+ if (ARM_LOWREG(r_dest) && (value >= 0) && (value <= 255)) {
+ return NewLIR2(cu, kThumbMovImm, r_dest, value);
}
/* Check Modified immediate special cases */
- modImm = ModifiedImmediate(value);
- if (modImm >= 0) {
- res = NewLIR2(cUnit, kThumb2MovImmShift, rDest, modImm);
+ mod_imm = ModifiedImmediate(value);
+ if (mod_imm >= 0) {
+ res = NewLIR2(cu, kThumb2MovImmShift, r_dest, mod_imm);
return res;
}
- modImm = ModifiedImmediate(~value);
- if (modImm >= 0) {
- res = NewLIR2(cUnit, kThumb2MvnImm12, rDest, modImm);
+ mod_imm = ModifiedImmediate(~value);
+ if (mod_imm >= 0) {
+ res = NewLIR2(cu, kThumb2MvnImm12, r_dest, mod_imm);
return res;
}
/* 16-bit immediate? */
if ((value & 0xffff) == value) {
- res = NewLIR2(cUnit, kThumb2MovImm16, rDest, value);
+ res = NewLIR2(cu, kThumb2MovImm16, r_dest, value);
return res;
}
/* No shortcut - go ahead and use literal pool */
- LIR* dataTarget = ScanLiteralPool(cUnit->literalList, value, 0);
- if (dataTarget == NULL) {
- dataTarget = AddWordData(cUnit, &cUnit->literalList, value);
+ LIR* data_target = ScanLiteralPool(cu->literal_list, value, 0);
+ if (data_target == NULL) {
+ data_target = AddWordData(cu, &cu->literal_list, value);
}
- LIR* loadPcRel = RawLIR(cUnit, cUnit->currentDalvikOffset,
- kThumb2LdrPcRel12, rDest, 0, 0, 0, 0, dataTarget);
- SetMemRefType(loadPcRel, true, kLiteral);
- loadPcRel->aliasInfo = reinterpret_cast<uintptr_t>(dataTarget);
- res = loadPcRel;
- AppendLIR(cUnit, loadPcRel);
+ LIR* load_pc_rel = RawLIR(cu, cu->current_dalvik_offset,
+ kThumb2LdrPcRel12, r_dest, 0, 0, 0, 0, data_target);
+ SetMemRefType(load_pc_rel, true, kLiteral);
+ load_pc_rel->alias_info = reinterpret_cast<uintptr_t>(data_target);
+ res = load_pc_rel;
+ AppendLIR(cu, load_pc_rel);
/*
* To save space in the constant pool, we use the ADD_RRI8 instruction to
* add up to 255 to an existing constant value.
*/
- if (dataTarget->operands[0] != value) {
- OpRegImm(cUnit, kOpAdd, rDest, value - dataTarget->operands[0]);
+ if (data_target->operands[0] != value) {
+ OpRegImm(cu, kOpAdd, r_dest, value - data_target->operands[0]);
}
return res;
}
-LIR* OpBranchUnconditional(CompilationUnit* cUnit, OpKind op)
+LIR* OpBranchUnconditional(CompilationUnit* cu, OpKind op)
{
DCHECK_EQ(op, kOpUncondBr);
- return NewLIR1(cUnit, kThumbBUncond, 0 /* offset to be patched */);
+ return NewLIR1(cu, kThumbBUncond, 0 /* offset to be patched */);
}
-LIR* OpCondBranch(CompilationUnit* cUnit, ConditionCode cc, LIR* target)
+LIR* OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target)
{
- LIR* branch = NewLIR2(cUnit, kThumb2BCond, 0 /* offset to be patched */,
+ LIR* branch = NewLIR2(cu, kThumb2BCond, 0 /* offset to be patched */,
ArmConditionEncoding(cc));
branch->target = target;
return branch;
}
-LIR* OpReg(CompilationUnit* cUnit, OpKind op, int rDestSrc)
+LIR* OpReg(CompilationUnit* cu, OpKind op, int r_dest_src)
{
ArmOpcode opcode = kThumbBkpt;
switch (op) {
@@ -199,34 +199,34 @@
default:
LOG(FATAL) << "Bad opcode " << op;
}
- return NewLIR1(cUnit, opcode, rDestSrc);
+ return NewLIR1(cu, opcode, r_dest_src);
}
-LIR* OpRegRegShift(CompilationUnit* cUnit, OpKind op, int rDestSrc1,
- int rSrc2, int shift)
+LIR* OpRegRegShift(CompilationUnit* cu, OpKind op, int r_dest_src1,
+ int r_src2, int shift)
{
- bool thumbForm = ((shift == 0) && ARM_LOWREG(rDestSrc1) && ARM_LOWREG(rSrc2));
+ bool thumb_form = ((shift == 0) && ARM_LOWREG(r_dest_src1) && ARM_LOWREG(r_src2));
ArmOpcode opcode = kThumbBkpt;
switch (op) {
case kOpAdc:
- opcode = (thumbForm) ? kThumbAdcRR : kThumb2AdcRRR;
+ opcode = (thumb_form) ? kThumbAdcRR : kThumb2AdcRRR;
break;
case kOpAnd:
- opcode = (thumbForm) ? kThumbAndRR : kThumb2AndRRR;
+ opcode = (thumb_form) ? kThumbAndRR : kThumb2AndRRR;
break;
case kOpBic:
- opcode = (thumbForm) ? kThumbBicRR : kThumb2BicRRR;
+ opcode = (thumb_form) ? kThumbBicRR : kThumb2BicRRR;
break;
case kOpCmn:
DCHECK_EQ(shift, 0);
- opcode = (thumbForm) ? kThumbCmnRR : kThumb2CmnRR;
+ opcode = (thumb_form) ? kThumbCmnRR : kThumb2CmnRR;
break;
case kOpCmp:
- if (thumbForm)
+ if (thumb_form)
opcode = kThumbCmpRR;
- else if ((shift == 0) && !ARM_LOWREG(rDestSrc1) && !ARM_LOWREG(rSrc2))
+ else if ((shift == 0) && !ARM_LOWREG(r_dest_src1) && !ARM_LOWREG(r_src2))
opcode = kThumbCmpHH;
- else if ((shift == 0) && ARM_LOWREG(rDestSrc1))
+ else if ((shift == 0) && ARM_LOWREG(r_dest_src1))
opcode = kThumbCmpLH;
else if (shift == 0)
opcode = kThumbCmpHL;
@@ -234,107 +234,107 @@
opcode = kThumb2CmpRR;
break;
case kOpXor:
- opcode = (thumbForm) ? kThumbEorRR : kThumb2EorRRR;
+ opcode = (thumb_form) ? kThumbEorRR : kThumb2EorRRR;
break;
case kOpMov:
DCHECK_EQ(shift, 0);
- if (ARM_LOWREG(rDestSrc1) && ARM_LOWREG(rSrc2))
+ if (ARM_LOWREG(r_dest_src1) && ARM_LOWREG(r_src2))
opcode = kThumbMovRR;
- else if (!ARM_LOWREG(rDestSrc1) && !ARM_LOWREG(rSrc2))
+ else if (!ARM_LOWREG(r_dest_src1) && !ARM_LOWREG(r_src2))
opcode = kThumbMovRR_H2H;
- else if (ARM_LOWREG(rDestSrc1))
+ else if (ARM_LOWREG(r_dest_src1))
opcode = kThumbMovRR_H2L;
else
opcode = kThumbMovRR_L2H;
break;
case kOpMul:
DCHECK_EQ(shift, 0);
- opcode = (thumbForm) ? kThumbMul : kThumb2MulRRR;
+ opcode = (thumb_form) ? kThumbMul : kThumb2MulRRR;
break;
case kOpMvn:
- opcode = (thumbForm) ? kThumbMvn : kThumb2MnvRR;
+ opcode = (thumb_form) ? kThumbMvn : kThumb2MnvRR;
break;
case kOpNeg:
DCHECK_EQ(shift, 0);
- opcode = (thumbForm) ? kThumbNeg : kThumb2NegRR;
+ opcode = (thumb_form) ? kThumbNeg : kThumb2NegRR;
break;
case kOpOr:
- opcode = (thumbForm) ? kThumbOrr : kThumb2OrrRRR;
+ opcode = (thumb_form) ? kThumbOrr : kThumb2OrrRRR;
break;
case kOpSbc:
- opcode = (thumbForm) ? kThumbSbc : kThumb2SbcRRR;
+ opcode = (thumb_form) ? kThumbSbc : kThumb2SbcRRR;
break;
case kOpTst:
- opcode = (thumbForm) ? kThumbTst : kThumb2TstRR;
+ opcode = (thumb_form) ? kThumbTst : kThumb2TstRR;
break;
case kOpLsl:
DCHECK_EQ(shift, 0);
- opcode = (thumbForm) ? kThumbLslRR : kThumb2LslRRR;
+ opcode = (thumb_form) ? kThumbLslRR : kThumb2LslRRR;
break;
case kOpLsr:
DCHECK_EQ(shift, 0);
- opcode = (thumbForm) ? kThumbLsrRR : kThumb2LsrRRR;
+ opcode = (thumb_form) ? kThumbLsrRR : kThumb2LsrRRR;
break;
case kOpAsr:
DCHECK_EQ(shift, 0);
- opcode = (thumbForm) ? kThumbAsrRR : kThumb2AsrRRR;
+ opcode = (thumb_form) ? kThumbAsrRR : kThumb2AsrRRR;
break;
case kOpRor:
DCHECK_EQ(shift, 0);
- opcode = (thumbForm) ? kThumbRorRR : kThumb2RorRRR;
+ opcode = (thumb_form) ? kThumbRorRR : kThumb2RorRRR;
break;
case kOpAdd:
- opcode = (thumbForm) ? kThumbAddRRR : kThumb2AddRRR;
+ opcode = (thumb_form) ? kThumbAddRRR : kThumb2AddRRR;
break;
case kOpSub:
- opcode = (thumbForm) ? kThumbSubRRR : kThumb2SubRRR;
+ opcode = (thumb_form) ? kThumbSubRRR : kThumb2SubRRR;
break;
case kOp2Byte:
DCHECK_EQ(shift, 0);
- return NewLIR4(cUnit, kThumb2Sbfx, rDestSrc1, rSrc2, 0, 8);
+ return NewLIR4(cu, kThumb2Sbfx, r_dest_src1, r_src2, 0, 8);
case kOp2Short:
DCHECK_EQ(shift, 0);
- return NewLIR4(cUnit, kThumb2Sbfx, rDestSrc1, rSrc2, 0, 16);
+ return NewLIR4(cu, kThumb2Sbfx, r_dest_src1, r_src2, 0, 16);
case kOp2Char:
DCHECK_EQ(shift, 0);
- return NewLIR4(cUnit, kThumb2Ubfx, rDestSrc1, rSrc2, 0, 16);
+ return NewLIR4(cu, kThumb2Ubfx, r_dest_src1, r_src2, 0, 16);
default:
LOG(FATAL) << "Bad opcode: " << op;
break;
}
DCHECK_GE(static_cast<int>(opcode), 0);
if (EncodingMap[opcode].flags & IS_BINARY_OP)
- return NewLIR2(cUnit, opcode, rDestSrc1, rSrc2);
+ return NewLIR2(cu, opcode, r_dest_src1, r_src2);
else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
- if (EncodingMap[opcode].fieldLoc[2].kind == kFmtShift)
- return NewLIR3(cUnit, opcode, rDestSrc1, rSrc2, shift);
+ if (EncodingMap[opcode].field_loc[2].kind == kFmtShift)
+ return NewLIR3(cu, opcode, r_dest_src1, r_src2, shift);
else
- return NewLIR3(cUnit, opcode, rDestSrc1, rDestSrc1, rSrc2);
+ return NewLIR3(cu, opcode, r_dest_src1, r_dest_src1, r_src2);
} else if (EncodingMap[opcode].flags & IS_QUAD_OP)
- return NewLIR4(cUnit, opcode, rDestSrc1, rDestSrc1, rSrc2, shift);
+ return NewLIR4(cu, opcode, r_dest_src1, r_dest_src1, r_src2, shift);
else {
LOG(FATAL) << "Unexpected encoding operand count";
return NULL;
}
}
-LIR* OpRegReg(CompilationUnit* cUnit, OpKind op, int rDestSrc1, int rSrc2)
+LIR* OpRegReg(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2)
{
- return OpRegRegShift(cUnit, op, rDestSrc1, rSrc2, 0);
+ return OpRegRegShift(cu, op, r_dest_src1, r_src2, 0);
}
-LIR* OpRegRegRegShift(CompilationUnit* cUnit, OpKind op, int rDest, int rSrc1,
- int rSrc2, int shift)
+LIR* OpRegRegRegShift(CompilationUnit* cu, OpKind op, int r_dest, int r_src1,
+ int r_src2, int shift)
{
ArmOpcode opcode = kThumbBkpt;
- bool thumbForm = (shift == 0) && ARM_LOWREG(rDest) && ARM_LOWREG(rSrc1) &&
- ARM_LOWREG(rSrc2);
+ bool thumb_form = (shift == 0) && ARM_LOWREG(r_dest) && ARM_LOWREG(r_src1) &&
+ ARM_LOWREG(r_src2);
switch (op) {
case kOpAdd:
- opcode = (thumbForm) ? kThumbAddRRR : kThumb2AddRRR;
+ opcode = (thumb_form) ? kThumbAddRRR : kThumb2AddRRR;
break;
case kOpSub:
- opcode = (thumbForm) ? kThumbSubRRR : kThumb2SubRRR;
+ opcode = (thumb_form) ? kThumbSubRRR : kThumb2SubRRR;
break;
case kOpRsub:
opcode = kThumb2RsubRRR;
@@ -383,119 +383,119 @@
}
DCHECK_GE(static_cast<int>(opcode), 0);
if (EncodingMap[opcode].flags & IS_QUAD_OP)
- return NewLIR4(cUnit, opcode, rDest, rSrc1, rSrc2, shift);
+ return NewLIR4(cu, opcode, r_dest, r_src1, r_src2, shift);
else {
DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP);
- return NewLIR3(cUnit, opcode, rDest, rSrc1, rSrc2);
+ return NewLIR3(cu, opcode, r_dest, r_src1, r_src2);
}
}
-LIR* OpRegRegReg(CompilationUnit* cUnit, OpKind op, int rDest, int rSrc1,
- int rSrc2)
+LIR* OpRegRegReg(CompilationUnit* cu, OpKind op, int r_dest, int r_src1,
+ int r_src2)
{
- return OpRegRegRegShift(cUnit, op, rDest, rSrc1, rSrc2, 0);
+ return OpRegRegRegShift(cu, op, r_dest, r_src1, r_src2, 0);
}
-LIR* OpRegRegImm(CompilationUnit* cUnit, OpKind op, int rDest, int rSrc1,
+LIR* OpRegRegImm(CompilationUnit* cu, OpKind op, int r_dest, int r_src1,
int value)
{
LIR* res;
bool neg = (value < 0);
- int absValue = (neg) ? -value : value;
+ int abs_value = (neg) ? -value : value;
ArmOpcode opcode = kThumbBkpt;
- ArmOpcode altOpcode = kThumbBkpt;
- bool allLowRegs = (ARM_LOWREG(rDest) && ARM_LOWREG(rSrc1));
- int modImm = ModifiedImmediate(value);
- int modImmNeg = ModifiedImmediate(-value);
+ ArmOpcode alt_opcode = kThumbBkpt;
+ bool all_low_regs = (ARM_LOWREG(r_dest) && ARM_LOWREG(r_src1));
+ int mod_imm = ModifiedImmediate(value);
+ int mod_imm_neg = ModifiedImmediate(-value);
switch (op) {
case kOpLsl:
- if (allLowRegs)
- return NewLIR3(cUnit, kThumbLslRRI5, rDest, rSrc1, value);
+ if (all_low_regs)
+ return NewLIR3(cu, kThumbLslRRI5, r_dest, r_src1, value);
else
- return NewLIR3(cUnit, kThumb2LslRRI5, rDest, rSrc1, value);
+ return NewLIR3(cu, kThumb2LslRRI5, r_dest, r_src1, value);
case kOpLsr:
- if (allLowRegs)
- return NewLIR3(cUnit, kThumbLsrRRI5, rDest, rSrc1, value);
+ if (all_low_regs)
+ return NewLIR3(cu, kThumbLsrRRI5, r_dest, r_src1, value);
else
- return NewLIR3(cUnit, kThumb2LsrRRI5, rDest, rSrc1, value);
+ return NewLIR3(cu, kThumb2LsrRRI5, r_dest, r_src1, value);
case kOpAsr:
- if (allLowRegs)
- return NewLIR3(cUnit, kThumbAsrRRI5, rDest, rSrc1, value);
+ if (all_low_regs)
+ return NewLIR3(cu, kThumbAsrRRI5, r_dest, r_src1, value);
else
- return NewLIR3(cUnit, kThumb2AsrRRI5, rDest, rSrc1, value);
+ return NewLIR3(cu, kThumb2AsrRRI5, r_dest, r_src1, value);
case kOpRor:
- return NewLIR3(cUnit, kThumb2RorRRI5, rDest, rSrc1, value);
+ return NewLIR3(cu, kThumb2RorRRI5, r_dest, r_src1, value);
case kOpAdd:
- if (ARM_LOWREG(rDest) && (rSrc1 == r13sp) &&
+ if (ARM_LOWREG(r_dest) && (r_src1 == r13sp) &&
(value <= 1020) && ((value & 0x3)==0)) {
- return NewLIR3(cUnit, kThumbAddSpRel, rDest, rSrc1, value >> 2);
- } else if (ARM_LOWREG(rDest) && (rSrc1 == r15pc) &&
+ return NewLIR3(cu, kThumbAddSpRel, r_dest, r_src1, value >> 2);
+ } else if (ARM_LOWREG(r_dest) && (r_src1 == r15pc) &&
(value <= 1020) && ((value & 0x3)==0)) {
- return NewLIR3(cUnit, kThumbAddPcRel, rDest, rSrc1, value >> 2);
+ return NewLIR3(cu, kThumbAddPcRel, r_dest, r_src1, value >> 2);
}
// Note: intentional fallthrough
case kOpSub:
- if (allLowRegs && ((absValue & 0x7) == absValue)) {
+ if (all_low_regs && ((abs_value & 0x7) == abs_value)) {
if (op == kOpAdd)
opcode = (neg) ? kThumbSubRRI3 : kThumbAddRRI3;
else
opcode = (neg) ? kThumbAddRRI3 : kThumbSubRRI3;
- return NewLIR3(cUnit, opcode, rDest, rSrc1, absValue);
- } else if ((absValue & 0xff) == absValue) {
+ return NewLIR3(cu, opcode, r_dest, r_src1, abs_value);
+ } else if ((abs_value & 0xff) == abs_value) {
if (op == kOpAdd)
opcode = (neg) ? kThumb2SubRRI12 : kThumb2AddRRI12;
else
opcode = (neg) ? kThumb2AddRRI12 : kThumb2SubRRI12;
- return NewLIR3(cUnit, opcode, rDest, rSrc1, absValue);
+ return NewLIR3(cu, opcode, r_dest, r_src1, abs_value);
}
- if (modImmNeg >= 0) {
+ if (mod_imm_neg >= 0) {
op = (op == kOpAdd) ? kOpSub : kOpAdd;
- modImm = modImmNeg;
+ mod_imm = mod_imm_neg;
}
if (op == kOpSub) {
opcode = kThumb2SubRRI8;
- altOpcode = kThumb2SubRRR;
+ alt_opcode = kThumb2SubRRR;
} else {
opcode = kThumb2AddRRI8;
- altOpcode = kThumb2AddRRR;
+ alt_opcode = kThumb2AddRRR;
}
break;
case kOpAdc:
opcode = kThumb2AdcRRI8;
- altOpcode = kThumb2AdcRRR;
+ alt_opcode = kThumb2AdcRRR;
break;
case kOpSbc:
opcode = kThumb2SbcRRI8;
- altOpcode = kThumb2SbcRRR;
+ alt_opcode = kThumb2SbcRRR;
break;
case kOpOr:
opcode = kThumb2OrrRRI8;
- altOpcode = kThumb2OrrRRR;
+ alt_opcode = kThumb2OrrRRR;
break;
case kOpAnd:
opcode = kThumb2AndRRI8;
- altOpcode = kThumb2AndRRR;
+ alt_opcode = kThumb2AndRRR;
break;
case kOpXor:
opcode = kThumb2EorRRI8;
- altOpcode = kThumb2EorRRR;
+ alt_opcode = kThumb2EorRRR;
break;
case kOpMul:
//TUNING: power of 2, shift & add
- modImm = -1;
- altOpcode = kThumb2MulRRR;
+ mod_imm = -1;
+ alt_opcode = kThumb2MulRRR;
break;
case kOpCmp: {
- int modImm = ModifiedImmediate(value);
+ int mod_imm = ModifiedImmediate(value);
LIR* res;
- if (modImm >= 0) {
- res = NewLIR2(cUnit, kThumb2CmpRI8, rSrc1, modImm);
+ if (mod_imm >= 0) {
+ res = NewLIR2(cu, kThumb2CmpRI8, r_src1, mod_imm);
} else {
- int rTmp = AllocTemp(cUnit);
- res = LoadConstant(cUnit, rTmp, value);
- OpRegReg(cUnit, kOpCmp, rSrc1, rTmp);
- FreeTemp(cUnit, rTmp);
+ int r_tmp = AllocTemp(cu);
+ res = LoadConstant(cu, r_tmp, value);
+ OpRegReg(cu, kOpCmp, r_src1, r_tmp);
+ FreeTemp(cu, r_tmp);
}
return res;
}
@@ -503,63 +503,63 @@
LOG(FATAL) << "Bad opcode: " << op;
}
- if (modImm >= 0) {
- return NewLIR3(cUnit, opcode, rDest, rSrc1, modImm);
+ if (mod_imm >= 0) {
+ return NewLIR3(cu, opcode, r_dest, r_src1, mod_imm);
} else {
- int rScratch = AllocTemp(cUnit);
- LoadConstant(cUnit, rScratch, value);
- if (EncodingMap[altOpcode].flags & IS_QUAD_OP)
- res = NewLIR4(cUnit, altOpcode, rDest, rSrc1, rScratch, 0);
+ int r_scratch = AllocTemp(cu);
+ LoadConstant(cu, r_scratch, value);
+ if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
+ res = NewLIR4(cu, alt_opcode, r_dest, r_src1, r_scratch, 0);
else
- res = NewLIR3(cUnit, altOpcode, rDest, rSrc1, rScratch);
- FreeTemp(cUnit, rScratch);
+ res = NewLIR3(cu, alt_opcode, r_dest, r_src1, r_scratch);
+ FreeTemp(cu, r_scratch);
return res;
}
}
/* Handle Thumb-only variants here - otherwise punt to OpRegRegImm */
-LIR* OpRegImm(CompilationUnit* cUnit, OpKind op, int rDestSrc1, int value)
+LIR* OpRegImm(CompilationUnit* cu, OpKind op, int r_dest_src1, int value)
{
bool neg = (value < 0);
- int absValue = (neg) ? -value : value;
- bool shortForm = (((absValue & 0xff) == absValue) && ARM_LOWREG(rDestSrc1));
+ int abs_value = (neg) ? -value : value;
+ bool short_form = (((abs_value & 0xff) == abs_value) && ARM_LOWREG(r_dest_src1));
ArmOpcode opcode = kThumbBkpt;
switch (op) {
case kOpAdd:
- if ( !neg && (rDestSrc1 == r13sp) && (value <= 508)) { /* sp */
+ if ( !neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */
DCHECK_EQ((value & 0x3), 0);
- return NewLIR1(cUnit, kThumbAddSpI7, value >> 2);
- } else if (shortForm) {
+ return NewLIR1(cu, kThumbAddSpI7, value >> 2);
+ } else if (short_form) {
opcode = (neg) ? kThumbSubRI8 : kThumbAddRI8;
}
break;
case kOpSub:
- if (!neg && (rDestSrc1 == r13sp) && (value <= 508)) { /* sp */
+ if (!neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */
DCHECK_EQ((value & 0x3), 0);
- return NewLIR1(cUnit, kThumbSubSpI7, value >> 2);
- } else if (shortForm) {
+ return NewLIR1(cu, kThumbSubSpI7, value >> 2);
+ } else if (short_form) {
opcode = (neg) ? kThumbAddRI8 : kThumbSubRI8;
}
break;
case kOpCmp:
- if (ARM_LOWREG(rDestSrc1) && shortForm)
- opcode = (shortForm) ? kThumbCmpRI8 : kThumbCmpRR;
- else if (ARM_LOWREG(rDestSrc1))
+ if (ARM_LOWREG(r_dest_src1) && short_form)
+ opcode = (short_form) ? kThumbCmpRI8 : kThumbCmpRR;
+ else if (ARM_LOWREG(r_dest_src1))
opcode = kThumbCmpRR;
else {
- shortForm = false;
+ short_form = false;
opcode = kThumbCmpHL;
}
break;
default:
/* Punt to OpRegRegImm - if bad case catch it there */
- shortForm = false;
+ short_form = false;
break;
}
- if (shortForm)
- return NewLIR2(cUnit, opcode, rDestSrc1, absValue);
+ if (short_form)
+ return NewLIR2(cu, opcode, r_dest_src1, abs_value);
else {
- return OpRegRegImm(cUnit, op, rDestSrc1, rDestSrc1, value);
+ return OpRegRegImm(cu, op, r_dest_src1, r_dest_src1, value);
}
}
@@ -570,58 +570,58 @@
static int EncodeImmDoubleHigh(int value)
{
int res;
- int bitA = (value & 0x80000000) >> 31;
- int notBitB = (value & 0x40000000) >> 30;
- int bitB = (value & 0x20000000) >> 29;
- int bSmear = (value & 0x3fc00000) >> 22;
+ int bit_a = (value & 0x80000000) >> 31;
+ int not_bit_b = (value & 0x40000000) >> 30;
+ int bit_b = (value & 0x20000000) >> 29;
+ int b_smear = (value & 0x3fc00000) >> 22;
int slice = (value & 0x003f0000) >> 16;
int zeroes = (value & 0x0000ffff);
if (zeroes != 0)
return -1;
- if (bitB) {
- if ((notBitB != 0) || (bSmear != 0xff))
+ if (bit_b) {
+ if ((not_bit_b != 0) || (b_smear != 0xff))
return -1;
} else {
- if ((notBitB != 1) || (bSmear != 0x0))
+ if ((not_bit_b != 1) || (b_smear != 0x0))
return -1;
}
- res = (bitA << 7) | (bitB << 6) | slice;
+ res = (bit_a << 7) | (bit_b << 6) | slice;
return res;
}
-static int EncodeImmDouble(int valLo, int valHi)
+static int EncodeImmDouble(int val_lo, int val_hi)
{
int res = -1;
- if (valLo == 0)
- res = EncodeImmDoubleHigh(valHi);
+ if (val_lo == 0)
+ res = EncodeImmDoubleHigh(val_hi);
return res;
}
-LIR* LoadConstantValueWide(CompilationUnit* cUnit, int rDestLo, int rDestHi,
- int valLo, int valHi)
+LIR* LoadConstantValueWide(CompilationUnit* cu, int r_dest_lo, int r_dest_hi,
+ int val_lo, int val_hi)
{
- int encodedImm = EncodeImmDouble(valLo, valHi);
+ int encoded_imm = EncodeImmDouble(val_lo, val_hi);
LIR* res;
- if (ARM_FPREG(rDestLo)) {
- if (encodedImm >= 0) {
- res = NewLIR2(cUnit, kThumb2Vmovd_IMM8, S2d(rDestLo, rDestHi),
- encodedImm);
+ if (ARM_FPREG(r_dest_lo)) {
+ if (encoded_imm >= 0) {
+ res = NewLIR2(cu, kThumb2Vmovd_IMM8, S2d(r_dest_lo, r_dest_hi),
+ encoded_imm);
} else {
- LIR* dataTarget = ScanLiteralPoolWide(cUnit->literalList, valLo, valHi);
- if (dataTarget == NULL) {
- dataTarget = AddWideData(cUnit, &cUnit->literalList, valLo, valHi);
+ LIR* data_target = ScanLiteralPoolWide(cu->literal_list, val_lo, val_hi);
+ if (data_target == NULL) {
+ data_target = AddWideData(cu, &cu->literal_list, val_lo, val_hi);
}
- LIR* loadPcRel =
- RawLIR(cUnit, cUnit->currentDalvikOffset, kThumb2Vldrd,
- S2d(rDestLo, rDestHi), r15pc, 0, 0, 0, dataTarget);
- SetMemRefType(loadPcRel, true, kLiteral);
- loadPcRel->aliasInfo = reinterpret_cast<uintptr_t>(dataTarget);
- AppendLIR(cUnit, loadPcRel);
- res = loadPcRel;
+ LIR* load_pc_rel =
+ RawLIR(cu, cu->current_dalvik_offset, kThumb2Vldrd,
+ S2d(r_dest_lo, r_dest_hi), r15pc, 0, 0, 0, data_target);
+ SetMemRefType(load_pc_rel, true, kLiteral);
+ load_pc_rel->alias_info = reinterpret_cast<uintptr_t>(data_target);
+ AppendLIR(cu, load_pc_rel);
+ res = load_pc_rel;
}
} else {
- res = LoadConstantNoClobber(cUnit, rDestLo, valLo);
- LoadConstantNoClobber(cUnit, rDestHi, valHi);
+ res = LoadConstantNoClobber(cu, r_dest_lo, val_lo);
+ LoadConstantNoClobber(cu, r_dest_hi, val_hi);
}
return res;
}
@@ -630,24 +630,24 @@
return ((amount & 0x1f) << 2) | code;
}
-LIR* LoadBaseIndexed(CompilationUnit* cUnit, int rBase, int rIndex, int rDest,
+LIR* LoadBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_dest,
int scale, OpSize size)
{
- bool allLowRegs = ARM_LOWREG(rBase) && ARM_LOWREG(rIndex) && ARM_LOWREG(rDest);
+ bool all_low_regs = ARM_LOWREG(rBase) && ARM_LOWREG(r_index) && ARM_LOWREG(r_dest);
LIR* load;
ArmOpcode opcode = kThumbBkpt;
- bool thumbForm = (allLowRegs && (scale == 0));
- int regPtr;
+ bool thumb_form = (all_low_regs && (scale == 0));
+ int reg_ptr;
- if (ARM_FPREG(rDest)) {
- if (ARM_SINGLEREG(rDest)) {
+ if (ARM_FPREG(r_dest)) {
+ if (ARM_SINGLEREG(r_dest)) {
DCHECK((size == kWord) || (size == kSingle));
opcode = kThumb2Vldrs;
size = kSingle;
} else {
- DCHECK(ARM_DOUBLEREG(rDest));
+ DCHECK(ARM_DOUBLEREG(r_dest));
DCHECK((size == kLong) || (size == kDouble));
- DCHECK_EQ((rDest & 0x1), 0);
+ DCHECK_EQ((r_dest & 0x1), 0);
opcode = kThumb2Vldrd;
size = kDouble;
}
@@ -659,60 +659,60 @@
switch (size) {
case kDouble: // fall-through
case kSingle:
- regPtr = AllocTemp(cUnit);
+ reg_ptr = AllocTemp(cu);
if (scale) {
- NewLIR4(cUnit, kThumb2AddRRR, regPtr, rBase, rIndex,
+ NewLIR4(cu, kThumb2AddRRR, reg_ptr, rBase, r_index,
EncodeShift(kArmLsl, scale));
} else {
- OpRegRegReg(cUnit, kOpAdd, regPtr, rBase, rIndex);
+ OpRegRegReg(cu, kOpAdd, reg_ptr, rBase, r_index);
}
- load = NewLIR3(cUnit, opcode, rDest, regPtr, 0);
- FreeTemp(cUnit, regPtr);
+ load = NewLIR3(cu, opcode, r_dest, reg_ptr, 0);
+ FreeTemp(cu, reg_ptr);
return load;
case kWord:
- opcode = (thumbForm) ? kThumbLdrRRR : kThumb2LdrRRR;
+ opcode = (thumb_form) ? kThumbLdrRRR : kThumb2LdrRRR;
break;
case kUnsignedHalf:
- opcode = (thumbForm) ? kThumbLdrhRRR : kThumb2LdrhRRR;
+ opcode = (thumb_form) ? kThumbLdrhRRR : kThumb2LdrhRRR;
break;
case kSignedHalf:
- opcode = (thumbForm) ? kThumbLdrshRRR : kThumb2LdrshRRR;
+ opcode = (thumb_form) ? kThumbLdrshRRR : kThumb2LdrshRRR;
break;
case kUnsignedByte:
- opcode = (thumbForm) ? kThumbLdrbRRR : kThumb2LdrbRRR;
+ opcode = (thumb_form) ? kThumbLdrbRRR : kThumb2LdrbRRR;
break;
case kSignedByte:
- opcode = (thumbForm) ? kThumbLdrsbRRR : kThumb2LdrsbRRR;
+ opcode = (thumb_form) ? kThumbLdrsbRRR : kThumb2LdrsbRRR;
break;
default:
LOG(FATAL) << "Bad size: " << size;
}
- if (thumbForm)
- load = NewLIR3(cUnit, opcode, rDest, rBase, rIndex);
+ if (thumb_form)
+ load = NewLIR3(cu, opcode, r_dest, rBase, r_index);
else
- load = NewLIR4(cUnit, opcode, rDest, rBase, rIndex, scale);
+ load = NewLIR4(cu, opcode, r_dest, rBase, r_index, scale);
return load;
}
-LIR* StoreBaseIndexed(CompilationUnit* cUnit, int rBase, int rIndex, int rSrc,
+LIR* StoreBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_src,
int scale, OpSize size)
{
- bool allLowRegs = ARM_LOWREG(rBase) && ARM_LOWREG(rIndex) && ARM_LOWREG(rSrc);
+ bool all_low_regs = ARM_LOWREG(rBase) && ARM_LOWREG(r_index) && ARM_LOWREG(r_src);
LIR* store;
ArmOpcode opcode = kThumbBkpt;
- bool thumbForm = (allLowRegs && (scale == 0));
- int regPtr;
+ bool thumb_form = (all_low_regs && (scale == 0));
+ int reg_ptr;
- if (ARM_FPREG(rSrc)) {
- if (ARM_SINGLEREG(rSrc)) {
+ if (ARM_FPREG(r_src)) {
+ if (ARM_SINGLEREG(r_src)) {
DCHECK((size == kWord) || (size == kSingle));
opcode = kThumb2Vstrs;
size = kSingle;
} else {
- DCHECK(ARM_DOUBLEREG(rSrc));
+ DCHECK(ARM_DOUBLEREG(r_src));
DCHECK((size == kLong) || (size == kDouble));
- DCHECK_EQ((rSrc & 0x1), 0);
+ DCHECK_EQ((r_src & 0x1), 0);
opcode = kThumb2Vstrd;
size = kDouble;
}
@@ -724,136 +724,136 @@
switch (size) {
case kDouble: // fall-through
case kSingle:
- regPtr = AllocTemp(cUnit);
+ reg_ptr = AllocTemp(cu);
if (scale) {
- NewLIR4(cUnit, kThumb2AddRRR, regPtr, rBase, rIndex,
+ NewLIR4(cu, kThumb2AddRRR, reg_ptr, rBase, r_index,
EncodeShift(kArmLsl, scale));
} else {
- OpRegRegReg(cUnit, kOpAdd, regPtr, rBase, rIndex);
+ OpRegRegReg(cu, kOpAdd, reg_ptr, rBase, r_index);
}
- store = NewLIR3(cUnit, opcode, rSrc, regPtr, 0);
- FreeTemp(cUnit, regPtr);
+ store = NewLIR3(cu, opcode, r_src, reg_ptr, 0);
+ FreeTemp(cu, reg_ptr);
return store;
case kWord:
- opcode = (thumbForm) ? kThumbStrRRR : kThumb2StrRRR;
+ opcode = (thumb_form) ? kThumbStrRRR : kThumb2StrRRR;
break;
case kUnsignedHalf:
case kSignedHalf:
- opcode = (thumbForm) ? kThumbStrhRRR : kThumb2StrhRRR;
+ opcode = (thumb_form) ? kThumbStrhRRR : kThumb2StrhRRR;
break;
case kUnsignedByte:
case kSignedByte:
- opcode = (thumbForm) ? kThumbStrbRRR : kThumb2StrbRRR;
+ opcode = (thumb_form) ? kThumbStrbRRR : kThumb2StrbRRR;
break;
default:
LOG(FATAL) << "Bad size: " << size;
}
- if (thumbForm)
- store = NewLIR3(cUnit, opcode, rSrc, rBase, rIndex);
+ if (thumb_form)
+ store = NewLIR3(cu, opcode, r_src, rBase, r_index);
else
- store = NewLIR4(cUnit, opcode, rSrc, rBase, rIndex, scale);
+ store = NewLIR4(cu, opcode, r_src, rBase, r_index, scale);
return store;
}
/*
* Load value from base + displacement. Optionally perform null check
- * on base (which must have an associated sReg and MIR). If not
+ * on base (which must have an associated s_reg and MIR). If not
* performing null check, incoming MIR can be null.
*/
-LIR* LoadBaseDispBody(CompilationUnit* cUnit, int rBase,
- int displacement, int rDest, int rDestHi, OpSize size,
- int sReg)
+LIR* LoadBaseDispBody(CompilationUnit* cu, int rBase,
+ int displacement, int r_dest, int r_dest_hi, OpSize size,
+ int s_reg)
{
LIR* res;
LIR* load;
ArmOpcode opcode = kThumbBkpt;
- bool shortForm = false;
+ bool short_form = false;
bool thumb2Form = (displacement < 4092 && displacement >= 0);
- bool allLowRegs = (ARM_LOWREG(rBase) && ARM_LOWREG(rDest));
- int encodedDisp = displacement;
+ bool all_low_regs = (ARM_LOWREG(rBase) && ARM_LOWREG(r_dest));
+ int encoded_disp = displacement;
bool is64bit = false;
switch (size) {
case kDouble:
case kLong:
is64bit = true;
- if (ARM_FPREG(rDest)) {
- if (ARM_SINGLEREG(rDest)) {
- DCHECK(ARM_FPREG(rDestHi));
- rDest = S2d(rDest, rDestHi);
+ if (ARM_FPREG(r_dest)) {
+ if (ARM_SINGLEREG(r_dest)) {
+ DCHECK(ARM_FPREG(r_dest_hi));
+ r_dest = S2d(r_dest, r_dest_hi);
}
opcode = kThumb2Vldrd;
if (displacement <= 1020) {
- shortForm = true;
- encodedDisp >>= 2;
+ short_form = true;
+ encoded_disp >>= 2;
}
break;
} else {
- res = LoadBaseDispBody(cUnit, rBase, displacement, rDest,
- -1, kWord, sReg);
- LoadBaseDispBody(cUnit, rBase, displacement + 4, rDestHi,
+ res = LoadBaseDispBody(cu, rBase, displacement, r_dest,
+ -1, kWord, s_reg);
+ LoadBaseDispBody(cu, rBase, displacement + 4, r_dest_hi,
-1, kWord, INVALID_SREG);
return res;
}
case kSingle:
case kWord:
- if (ARM_FPREG(rDest)) {
+ if (ARM_FPREG(r_dest)) {
opcode = kThumb2Vldrs;
if (displacement <= 1020) {
- shortForm = true;
- encodedDisp >>= 2;
+ short_form = true;
+ encoded_disp >>= 2;
}
break;
}
- if (ARM_LOWREG(rDest) && (rBase == r15pc) &&
+ if (ARM_LOWREG(r_dest) && (rBase == r15pc) &&
(displacement <= 1020) && (displacement >= 0)) {
- shortForm = true;
- encodedDisp >>= 2;
+ short_form = true;
+ encoded_disp >>= 2;
opcode = kThumbLdrPcRel;
- } else if (ARM_LOWREG(rDest) && (rBase == r13sp) &&
+ } else if (ARM_LOWREG(r_dest) && (rBase == r13sp) &&
(displacement <= 1020) && (displacement >= 0)) {
- shortForm = true;
- encodedDisp >>= 2;
+ short_form = true;
+ encoded_disp >>= 2;
opcode = kThumbLdrSpRel;
- } else if (allLowRegs && displacement < 128 && displacement >= 0) {
+ } else if (all_low_regs && displacement < 128 && displacement >= 0) {
DCHECK_EQ((displacement & 0x3), 0);
- shortForm = true;
- encodedDisp >>= 2;
+ short_form = true;
+ encoded_disp >>= 2;
opcode = kThumbLdrRRI5;
} else if (thumb2Form) {
- shortForm = true;
+ short_form = true;
opcode = kThumb2LdrRRI12;
}
break;
case kUnsignedHalf:
- if (allLowRegs && displacement < 64 && displacement >= 0) {
+ if (all_low_regs && displacement < 64 && displacement >= 0) {
DCHECK_EQ((displacement & 0x1), 0);
- shortForm = true;
- encodedDisp >>= 1;
+ short_form = true;
+ encoded_disp >>= 1;
opcode = kThumbLdrhRRI5;
} else if (displacement < 4092 && displacement >= 0) {
- shortForm = true;
+ short_form = true;
opcode = kThumb2LdrhRRI12;
}
break;
case kSignedHalf:
if (thumb2Form) {
- shortForm = true;
+ short_form = true;
opcode = kThumb2LdrshRRI12;
}
break;
case kUnsignedByte:
- if (allLowRegs && displacement < 32 && displacement >= 0) {
- shortForm = true;
+ if (all_low_regs && displacement < 32 && displacement >= 0) {
+ short_form = true;
opcode = kThumbLdrbRRI5;
} else if (thumb2Form) {
- shortForm = true;
+ short_form = true;
opcode = kThumb2LdrbRRI12;
}
break;
case kSignedByte:
if (thumb2Form) {
- shortForm = true;
+ short_form = true;
opcode = kThumb2LdrsbRRI12;
}
break;
@@ -861,199 +861,199 @@
LOG(FATAL) << "Bad size: " << size;
}
- if (shortForm) {
- load = res = NewLIR3(cUnit, opcode, rDest, rBase, encodedDisp);
+ if (short_form) {
+ load = res = NewLIR3(cu, opcode, r_dest, rBase, encoded_disp);
} else {
- int regOffset = AllocTemp(cUnit);
- res = LoadConstant(cUnit, regOffset, encodedDisp);
- load = LoadBaseIndexed(cUnit, rBase, regOffset, rDest, 0, size);
- FreeTemp(cUnit, regOffset);
+ int reg_offset = AllocTemp(cu);
+ res = LoadConstant(cu, reg_offset, encoded_disp);
+ load = LoadBaseIndexed(cu, rBase, reg_offset, r_dest, 0, size);
+ FreeTemp(cu, reg_offset);
}
// TODO: in future may need to differentiate Dalvik accesses w/ spills
if (rBase == rARM_SP) {
- AnnotateDalvikRegAccess(load, displacement >> 2, true /* isLoad */, is64bit);
+ AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, is64bit);
}
return load;
}
-LIR* LoadBaseDisp(CompilationUnit* cUnit, int rBase,
- int displacement, int rDest, OpSize size, int sReg)
+LIR* LoadBaseDisp(CompilationUnit* cu, int rBase,
+ int displacement, int r_dest, OpSize size, int s_reg)
{
- return LoadBaseDispBody(cUnit, rBase, displacement, rDest, -1, size,
- sReg);
+ return LoadBaseDispBody(cu, rBase, displacement, r_dest, -1, size,
+ s_reg);
}
- LIR* LoadBaseDispWide(CompilationUnit* cUnit, int rBase,
- int displacement, int rDestLo, int rDestHi, int sReg)
+ LIR* LoadBaseDispWide(CompilationUnit* cu, int rBase,
+ int displacement, int r_dest_lo, int r_dest_hi, int s_reg)
{
- return LoadBaseDispBody(cUnit, rBase, displacement, rDestLo, rDestHi,
- kLong, sReg);
+ return LoadBaseDispBody(cu, rBase, displacement, r_dest_lo, r_dest_hi,
+ kLong, s_reg);
}
-LIR* StoreBaseDispBody(CompilationUnit* cUnit, int rBase, int displacement,
- int rSrc, int rSrcHi, OpSize size)
+LIR* StoreBaseDispBody(CompilationUnit* cu, int rBase, int displacement,
+ int r_src, int r_src_hi, OpSize size)
{
LIR* res, *store;
ArmOpcode opcode = kThumbBkpt;
- bool shortForm = false;
+ bool short_form = false;
bool thumb2Form = (displacement < 4092 && displacement >= 0);
- bool allLowRegs = (ARM_LOWREG(rBase) && ARM_LOWREG(rSrc));
- int encodedDisp = displacement;
+ bool all_low_regs = (ARM_LOWREG(rBase) && ARM_LOWREG(r_src));
+ int encoded_disp = displacement;
bool is64bit = false;
switch (size) {
case kLong:
case kDouble:
is64bit = true;
- if (!ARM_FPREG(rSrc)) {
- res = StoreBaseDispBody(cUnit, rBase, displacement, rSrc, -1, kWord);
- StoreBaseDispBody(cUnit, rBase, displacement + 4, rSrcHi, -1, kWord);
+ if (!ARM_FPREG(r_src)) {
+ res = StoreBaseDispBody(cu, rBase, displacement, r_src, -1, kWord);
+ StoreBaseDispBody(cu, rBase, displacement + 4, r_src_hi, -1, kWord);
return res;
}
- if (ARM_SINGLEREG(rSrc)) {
- DCHECK(ARM_FPREG(rSrcHi));
- rSrc = S2d(rSrc, rSrcHi);
+ if (ARM_SINGLEREG(r_src)) {
+ DCHECK(ARM_FPREG(r_src_hi));
+ r_src = S2d(r_src, r_src_hi);
}
opcode = kThumb2Vstrd;
if (displacement <= 1020) {
- shortForm = true;
- encodedDisp >>= 2;
+ short_form = true;
+ encoded_disp >>= 2;
}
break;
case kSingle:
case kWord:
- if (ARM_FPREG(rSrc)) {
- DCHECK(ARM_SINGLEREG(rSrc));
+ if (ARM_FPREG(r_src)) {
+ DCHECK(ARM_SINGLEREG(r_src));
opcode = kThumb2Vstrs;
if (displacement <= 1020) {
- shortForm = true;
- encodedDisp >>= 2;
+ short_form = true;
+ encoded_disp >>= 2;
}
break;
}
- if (allLowRegs && displacement < 128 && displacement >= 0) {
+ if (all_low_regs && displacement < 128 && displacement >= 0) {
DCHECK_EQ((displacement & 0x3), 0);
- shortForm = true;
- encodedDisp >>= 2;
+ short_form = true;
+ encoded_disp >>= 2;
opcode = kThumbStrRRI5;
} else if (thumb2Form) {
- shortForm = true;
+ short_form = true;
opcode = kThumb2StrRRI12;
}
break;
case kUnsignedHalf:
case kSignedHalf:
- if (allLowRegs && displacement < 64 && displacement >= 0) {
+ if (all_low_regs && displacement < 64 && displacement >= 0) {
DCHECK_EQ((displacement & 0x1), 0);
- shortForm = true;
- encodedDisp >>= 1;
+ short_form = true;
+ encoded_disp >>= 1;
opcode = kThumbStrhRRI5;
} else if (thumb2Form) {
- shortForm = true;
+ short_form = true;
opcode = kThumb2StrhRRI12;
}
break;
case kUnsignedByte:
case kSignedByte:
- if (allLowRegs && displacement < 32 && displacement >= 0) {
- shortForm = true;
+ if (all_low_regs && displacement < 32 && displacement >= 0) {
+ short_form = true;
opcode = kThumbStrbRRI5;
} else if (thumb2Form) {
- shortForm = true;
+ short_form = true;
opcode = kThumb2StrbRRI12;
}
break;
default:
LOG(FATAL) << "Bad size: " << size;
}
- if (shortForm) {
- store = res = NewLIR3(cUnit, opcode, rSrc, rBase, encodedDisp);
+ if (short_form) {
+ store = res = NewLIR3(cu, opcode, r_src, rBase, encoded_disp);
} else {
- int rScratch = AllocTemp(cUnit);
- res = LoadConstant(cUnit, rScratch, encodedDisp);
- store = StoreBaseIndexed(cUnit, rBase, rScratch, rSrc, 0, size);
- FreeTemp(cUnit, rScratch);
+ int r_scratch = AllocTemp(cu);
+ res = LoadConstant(cu, r_scratch, encoded_disp);
+ store = StoreBaseIndexed(cu, rBase, r_scratch, r_src, 0, size);
+ FreeTemp(cu, r_scratch);
}
// TODO: In future, may need to differentiate Dalvik & spill accesses
if (rBase == rARM_SP) {
- AnnotateDalvikRegAccess(store, displacement >> 2, false /* isLoad */,
+ AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */,
is64bit);
}
return res;
}
-LIR* StoreBaseDisp(CompilationUnit* cUnit, int rBase, int displacement,
- int rSrc, OpSize size)
+LIR* StoreBaseDisp(CompilationUnit* cu, int rBase, int displacement,
+ int r_src, OpSize size)
{
- return StoreBaseDispBody(cUnit, rBase, displacement, rSrc, -1, size);
+ return StoreBaseDispBody(cu, rBase, displacement, r_src, -1, size);
}
-LIR* StoreBaseDispWide(CompilationUnit* cUnit, int rBase, int displacement,
- int rSrcLo, int rSrcHi)
+LIR* StoreBaseDispWide(CompilationUnit* cu, int rBase, int displacement,
+ int r_src_lo, int r_src_hi)
{
- return StoreBaseDispBody(cUnit, rBase, displacement, rSrcLo, rSrcHi, kLong);
+ return StoreBaseDispBody(cu, rBase, displacement, r_src_lo, r_src_hi, kLong);
}
-void LoadPair(CompilationUnit* cUnit, int base, int lowReg, int highReg)
+void LoadPair(CompilationUnit* cu, int base, int low_reg, int high_reg)
{
- LoadBaseDispWide(cUnit, base, 0, lowReg, highReg, INVALID_SREG);
+ LoadBaseDispWide(cu, base, 0, low_reg, high_reg, INVALID_SREG);
}
-LIR* FpRegCopy(CompilationUnit* cUnit, int rDest, int rSrc)
+LIR* FpRegCopy(CompilationUnit* cu, int r_dest, int r_src)
{
int opcode;
- DCHECK_EQ(ARM_DOUBLEREG(rDest), ARM_DOUBLEREG(rSrc));
- if (ARM_DOUBLEREG(rDest)) {
+ DCHECK_EQ(ARM_DOUBLEREG(r_dest), ARM_DOUBLEREG(r_src));
+ if (ARM_DOUBLEREG(r_dest)) {
opcode = kThumb2Vmovd;
} else {
- if (ARM_SINGLEREG(rDest)) {
- opcode = ARM_SINGLEREG(rSrc) ? kThumb2Vmovs : kThumb2Fmsr;
+ if (ARM_SINGLEREG(r_dest)) {
+ opcode = ARM_SINGLEREG(r_src) ? kThumb2Vmovs : kThumb2Fmsr;
} else {
- DCHECK(ARM_SINGLEREG(rSrc));
+ DCHECK(ARM_SINGLEREG(r_src));
opcode = kThumb2Fmrs;
}
}
- LIR* res = RawLIR(cUnit, cUnit->currentDalvikOffset, opcode, rDest, rSrc);
- if (!(cUnit->disableOpt & (1 << kSafeOptimizations)) && rDest == rSrc) {
- res->flags.isNop = true;
+ LIR* res = RawLIR(cu, cu->current_dalvik_offset, opcode, r_dest, r_src);
+ if (!(cu->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
+ res->flags.is_nop = true;
}
return res;
}
-LIR* OpThreadMem(CompilationUnit* cUnit, OpKind op, int threadOffset)
+LIR* OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset)
{
LOG(FATAL) << "Unexpected use of OpThreadMem for Arm";
return NULL;
}
-LIR* OpMem(CompilationUnit* cUnit, OpKind op, int rBase, int disp)
+LIR* OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp)
{
LOG(FATAL) << "Unexpected use of OpMem for Arm";
return NULL;
}
-LIR* StoreBaseIndexedDisp(CompilationUnit *cUnit,
- int rBase, int rIndex, int scale, int displacement,
- int rSrc, int rSrcHi,
- OpSize size, int sReg)
+LIR* StoreBaseIndexedDisp(CompilationUnit *cu,
+ int rBase, int r_index, int scale, int displacement,
+ int r_src, int r_src_hi,
+ OpSize size, int s_reg)
{
LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for Arm";
return NULL;
}
-LIR* OpRegMem(CompilationUnit *cUnit, OpKind op, int rDest, int rBase,
+LIR* OpRegMem(CompilationUnit *cu, OpKind op, int r_dest, int rBase,
int offset)
{
LOG(FATAL) << "Unexpected use of OpRegMem for Arm";
return NULL;
}
-LIR* LoadBaseIndexedDisp(CompilationUnit *cUnit,
- int rBase, int rIndex, int scale, int displacement,
- int rDest, int rDestHi,
- OpSize size, int sReg)
+LIR* LoadBaseIndexedDisp(CompilationUnit *cu,
+ int rBase, int r_index, int scale, int displacement,
+ int r_dest, int r_dest_hi,
+ OpSize size, int s_reg)
{
LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for Arm";
return NULL;