Remove -Wno-unused-parameter and -Wno-sign-promo from base cflags.
Fix associated errors about unused paramenters and implict sign conversions.
For sign conversion this was largely in the area of enums, so add ostream
operators for the effected enums and fix tools/generate-operator-out.py.
Tidy arena allocation code and arena allocated data types, rather than fixing
new and delete operators.
Remove dead code.
Change-Id: I5b433e722d2f75baacfacae4d32aef4a828bfe1b
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 79d5eeb..9616d8f 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -143,25 +143,6 @@
RegStorage rs_rX86_SP;
-X86NativeRegisterPool rX86_ARG0;
-X86NativeRegisterPool rX86_ARG1;
-X86NativeRegisterPool rX86_ARG2;
-X86NativeRegisterPool rX86_ARG3;
-X86NativeRegisterPool rX86_ARG4;
-X86NativeRegisterPool rX86_ARG5;
-X86NativeRegisterPool rX86_FARG0;
-X86NativeRegisterPool rX86_FARG1;
-X86NativeRegisterPool rX86_FARG2;
-X86NativeRegisterPool rX86_FARG3;
-X86NativeRegisterPool rX86_FARG4;
-X86NativeRegisterPool rX86_FARG5;
-X86NativeRegisterPool rX86_FARG6;
-X86NativeRegisterPool rX86_FARG7;
-X86NativeRegisterPool rX86_RET0;
-X86NativeRegisterPool rX86_RET1;
-X86NativeRegisterPool rX86_INVOKE_TGT;
-X86NativeRegisterPool rX86_COUNT;
-
RegStorage rs_rX86_ARG0;
RegStorage rs_rX86_ARG1;
RegStorage rs_rX86_ARG2;
@@ -237,8 +218,9 @@
}
RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
+ UNUSED(reg);
LOG(FATAL) << "Do not use this function!!!";
- return RegStorage::InvalidReg();
+ UNREACHABLE();
}
/*
@@ -795,14 +777,11 @@
class_type_address_insns_.reserve(100);
call_method_insns_.reserve(100);
store_method_addr_used_ = false;
- if (kIsDebugBuild) {
for (int i = 0; i < kX86Last; i++) {
- if (X86Mir2Lir::EncodingMap[i].opcode != i) {
- LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
- << " is wrong: expecting " << i << ", seeing "
- << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
- }
- }
+ DCHECK_EQ(X86Mir2Lir::EncodingMap[i].opcode, i)
+ << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
+ << " is wrong: expecting " << i << ", seeing "
+ << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
}
if (cu_->target64) {
rs_rX86_SP = rs_rX86_SP_64;
@@ -821,20 +800,6 @@
rs_rX86_FARG5 = rs_fr5;
rs_rX86_FARG6 = rs_fr6;
rs_rX86_FARG7 = rs_fr7;
- rX86_ARG0 = rDI;
- rX86_ARG1 = rSI;
- rX86_ARG2 = rDX;
- rX86_ARG3 = rCX;
- rX86_ARG4 = r8;
- rX86_ARG5 = r9;
- rX86_FARG0 = fr0;
- rX86_FARG1 = fr1;
- rX86_FARG2 = fr2;
- rX86_FARG3 = fr3;
- rX86_FARG4 = fr4;
- rX86_FARG5 = fr5;
- rX86_FARG6 = fr6;
- rX86_FARG7 = fr7;
rs_rX86_INVOKE_TGT = rs_rDI;
} else {
rs_rX86_SP = rs_rX86_SP_32;
@@ -853,14 +818,6 @@
rs_rX86_FARG5 = RegStorage::InvalidReg();
rs_rX86_FARG6 = RegStorage::InvalidReg();
rs_rX86_FARG7 = RegStorage::InvalidReg();
- rX86_ARG0 = rAX;
- rX86_ARG1 = rCX;
- rX86_ARG2 = rDX;
- rX86_ARG3 = rBX;
- rX86_FARG0 = rAX;
- rX86_FARG1 = rCX;
- rX86_FARG2 = rDX;
- rX86_FARG3 = rBX;
rs_rX86_INVOKE_TGT = rs_rAX;
// TODO(64): Initialize with invalid reg
// rX86_ARG4 = RegStorage::InvalidReg();
@@ -869,10 +826,6 @@
rs_rX86_RET0 = rs_rAX;
rs_rX86_RET1 = rs_rDX;
rs_rX86_COUNT = rs_rCX;
- rX86_RET0 = rAX;
- rX86_RET1 = rDX;
- rX86_INVOKE_TGT = rAX;
- rX86_COUNT = rCX;
}
Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
@@ -882,8 +835,9 @@
// Not used in x86(-64)
RegStorage X86Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
+ UNUSED(trampoline);
LOG(FATAL) << "Unexpected use of LoadHelper in x86";
- return RegStorage::InvalidReg();
+ UNREACHABLE();
}
LIR* X86Mir2Lir::CheckSuspendUsingLoad() {
@@ -1548,46 +1502,46 @@
ReturnVectorRegisters(mir);
break;
case kMirOpConstVector:
- GenConst128(bb, mir);
+ GenConst128(mir);
break;
case kMirOpMoveVector:
- GenMoveVector(bb, mir);
+ GenMoveVector(mir);
break;
case kMirOpPackedMultiply:
- GenMultiplyVector(bb, mir);
+ GenMultiplyVector(mir);
break;
case kMirOpPackedAddition:
- GenAddVector(bb, mir);
+ GenAddVector(mir);
break;
case kMirOpPackedSubtract:
- GenSubtractVector(bb, mir);
+ GenSubtractVector(mir);
break;
case kMirOpPackedShiftLeft:
- GenShiftLeftVector(bb, mir);
+ GenShiftLeftVector(mir);
break;
case kMirOpPackedSignedShiftRight:
- GenSignedShiftRightVector(bb, mir);
+ GenSignedShiftRightVector(mir);
break;
case kMirOpPackedUnsignedShiftRight:
- GenUnsignedShiftRightVector(bb, mir);
+ GenUnsignedShiftRightVector(mir);
break;
case kMirOpPackedAnd:
- GenAndVector(bb, mir);
+ GenAndVector(mir);
break;
case kMirOpPackedOr:
- GenOrVector(bb, mir);
+ GenOrVector(mir);
break;
case kMirOpPackedXor:
- GenXorVector(bb, mir);
+ GenXorVector(mir);
break;
case kMirOpPackedAddReduce:
- GenAddReduceVector(bb, mir);
+ GenAddReduceVector(mir);
break;
case kMirOpPackedReduce:
- GenReduceVector(bb, mir);
+ GenReduceVector(mir);
break;
case kMirOpPackedSet:
- GenSetVector(bb, mir);
+ GenSetVector(mir);
break;
case kMirOpMemBarrier:
GenMemBarrier(static_cast<MemBarrierKind>(mir->dalvikInsn.vA));
@@ -1638,7 +1592,7 @@
}
}
-void X86Mir2Lir::GenConst128(BasicBlock* bb, MIR* mir) {
+void X86Mir2Lir::GenConst128(MIR* mir) {
RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
Clobber(rs_dest);
@@ -1689,7 +1643,7 @@
load->target = data_target;
}
-void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenMoveVector(MIR* mir) {
// We only support 128 bit registers.
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1805,7 +1759,7 @@
NewLIR2(kX86PaddqRR, rs_dest_src1.GetReg(), rs_tmp_vector_1.GetReg());
}
-void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenMultiplyVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1839,7 +1793,7 @@
NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
}
-void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenAddVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1874,7 +1828,7 @@
NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
}
-void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenSubtractVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1909,7 +1863,7 @@
NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
}
-void X86Mir2Lir::GenShiftByteVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenShiftByteVector(MIR* mir) {
// Destination does not need clobbered because it has already been as part
// of the general packed shift handler (caller of this method).
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1953,7 +1907,7 @@
AndMaskVectorRegister(rs_dest_src1, int_mask, int_mask, int_mask, int_mask);
}
-void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenShiftLeftVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1973,7 +1927,7 @@
break;
case kSignedByte:
case kUnsignedByte:
- GenShiftByteVector(bb, mir);
+ GenShiftByteVector(mir);
return;
default:
LOG(FATAL) << "Unsupported vector shift left " << opsize;
@@ -1982,7 +1936,7 @@
NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
}
-void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenSignedShiftRightVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1999,18 +1953,18 @@
break;
case kSignedByte:
case kUnsignedByte:
- GenShiftByteVector(bb, mir);
+ GenShiftByteVector(mir);
return;
case k64:
// TODO Implement emulated shift algorithm.
default:
LOG(FATAL) << "Unsupported vector signed shift right " << opsize;
- break;
+ UNREACHABLE();
}
NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
}
-void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenUnsignedShiftRightVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2030,7 +1984,7 @@
break;
case kSignedByte:
case kUnsignedByte:
- GenShiftByteVector(bb, mir);
+ GenShiftByteVector(mir);
return;
default:
LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize;
@@ -2039,7 +1993,7 @@
NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
}
-void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenAndVector(MIR* mir) {
// We only support 128 bit registers.
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2048,7 +2002,7 @@
NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
}
-void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenOrVector(MIR* mir) {
// We only support 128 bit registers.
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2057,7 +2011,7 @@
NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
}
-void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenXorVector(MIR* mir) {
// We only support 128 bit registers.
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2084,7 +2038,7 @@
AppendOpcodeWithConst(opcode, rs_src1.GetReg(), const_mirp);
}
-void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenAddReduceVector(MIR* mir) {
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage vector_src = RegStorage::Solo128(mir->dalvikInsn.vB);
bool is_wide = opsize == k64 || opsize == kDouble;
@@ -2219,7 +2173,7 @@
// except the rhs is not a VR but a physical register allocated above.
// No load of source VR is done because it assumes that rl_result will
// share physical register / memory location.
- rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocTyped(rl_dest);
if (rl_result.location == kLocPhysReg) {
// Ensure res is in a core reg.
rl_result = EvalLoc(rl_dest, kCoreReg, true);
@@ -2232,7 +2186,7 @@
}
}
-void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenReduceVector(MIR* mir) {
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegLocation rl_dest = mir_graph_->GetDest(mir);
RegStorage vector_src = RegStorage::Solo128(mir->dalvikInsn.vB);
@@ -2286,7 +2240,7 @@
} else {
int extract_index = mir->dalvikInsn.arg[0];
int extr_opcode = 0;
- rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocTyped(rl_dest);
// Handle the rest of integral types now.
switch (opsize) {
@@ -2302,7 +2256,7 @@
break;
default:
LOG(FATAL) << "Unsupported vector reduce " << opsize;
- return;
+ UNREACHABLE();
}
if (rl_result.location == kLocPhysReg) {
@@ -2331,7 +2285,7 @@
}
}
-void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenSetVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2406,11 +2360,13 @@
}
}
-void X86Mir2Lir::GenPackedArrayGet(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenPackedArrayGet(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb, mir);
UNIMPLEMENTED(FATAL) << "Extended opcode kMirOpPackedArrayGet not supported.";
}
-void X86Mir2Lir::GenPackedArrayPut(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenPackedArrayPut(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb, mir);
UNIMPLEMENTED(FATAL) << "Extended opcode kMirOpPackedArrayPut not supported.";
}