Remove -Wno-unused-parameter and -Wno-sign-promo from base cflags.
Fix associated errors about unused paramenters and implict sign conversions.
For sign conversion this was largely in the area of enums, so add ostream
operators for the effected enums and fix tools/generate-operator-out.py.
Tidy arena allocation code and arena allocated data types, rather than fixing
new and delete operators.
Remove dead code.
Change-Id: I5b433e722d2f75baacfacae4d32aef4a828bfe1b
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index bc4d00b..c7449c8 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -473,8 +473,7 @@
cg->MarkPossibleNullPointerException(info->opt_flags);
}
-static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const CallInfo* info,
- const RegStorage* alt_from,
+static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const RegStorage* alt_from,
const CompilationUnit* cu, Mir2Lir* cg) {
if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
// Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt]
@@ -492,9 +491,10 @@
*/
static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
int state, const MethodReference& target_method,
- uint32_t unused,
+ uint32_t,
uintptr_t direct_code, uintptr_t direct_method,
InvokeType type) {
+ UNUSED(info);
DCHECK(cu->instruction_set != kX86 && cu->instruction_set != kX86_64 &&
cu->instruction_set != kThumb2 && cu->instruction_set != kArm &&
cu->instruction_set != kArm64);
@@ -547,7 +547,7 @@
break;
case 3: // Grab the code from the method*
if (direct_code == 0) {
- if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, &arg0_ref, cu, cg)) {
+ if (CommonCallCodeLoadCodePointerIntoInvokeTgt(&arg0_ref, cu, cg)) {
break; // kInvokeTgt := arg0_ref->entrypoint
}
} else {
@@ -571,8 +571,9 @@
*/
static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
int state, const MethodReference& target_method,
- uint32_t method_idx, uintptr_t unused, uintptr_t unused2,
- InvokeType unused3) {
+ uint32_t method_idx, uintptr_t, uintptr_t,
+ InvokeType) {
+ UNUSED(target_method);
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
/*
* This is the fast path in which the target virtual method is
@@ -595,7 +596,7 @@
break;
}
case 3:
- if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, nullptr, cu, cg)) {
+ if (CommonCallCodeLoadCodePointerIntoInvokeTgt(nullptr, cu, cg)) {
break; // kInvokeTgt := kArg0->entrypoint
}
DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
@@ -614,8 +615,7 @@
*/
static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t method_idx, uintptr_t unused,
- uintptr_t direct_method, InvokeType unused2) {
+ uint32_t method_idx, uintptr_t, uintptr_t, InvokeType) {
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
switch (state) {
@@ -641,7 +641,7 @@
break;
}
case 4:
- if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, nullptr, cu, cg)) {
+ if (CommonCallCodeLoadCodePointerIntoInvokeTgt(nullptr, cu, cg)) {
break; // kInvokeTgt := kArg0->entrypoint
}
DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
@@ -655,9 +655,9 @@
static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info,
QuickEntrypointEnum trampoline, int state,
const MethodReference& target_method, uint32_t method_idx) {
+ UNUSED(info, method_idx);
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
-
/*
* This handles the case in which the base method is not fully
* resolved at compile time, we bail to a runtime helper.
@@ -684,32 +684,28 @@
static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
int state,
const MethodReference& target_method,
- uint32_t unused, uintptr_t unused2,
- uintptr_t unused3, InvokeType unused4) {
+ uint32_t, uintptr_t, uintptr_t, InvokeType) {
return NextInvokeInsnSP(cu, info, kQuickInvokeStaticTrampolineWithAccessCheck, state,
target_method, 0);
}
static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t unused, uintptr_t unused2,
- uintptr_t unused3, InvokeType unused4) {
+ uint32_t, uintptr_t, uintptr_t, InvokeType) {
return NextInvokeInsnSP(cu, info, kQuickInvokeDirectTrampolineWithAccessCheck, state,
target_method, 0);
}
static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t unused, uintptr_t unused2,
- uintptr_t unused3, InvokeType unused4) {
+ uint32_t, uintptr_t, uintptr_t, InvokeType) {
return NextInvokeInsnSP(cu, info, kQuickInvokeSuperTrampolineWithAccessCheck, state,
target_method, 0);
}
static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t unused, uintptr_t unused2,
- uintptr_t unused3, InvokeType unused4) {
+ uint32_t, uintptr_t, uintptr_t, InvokeType) {
return NextInvokeInsnSP(cu, info, kQuickInvokeVirtualTrampolineWithAccessCheck, state,
target_method, 0);
}
@@ -717,8 +713,7 @@
static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t unused, uintptr_t unused2,
- uintptr_t unused3, InvokeType unused4) {
+ uint32_t, uintptr_t, uintptr_t, InvokeType) {
return NextInvokeInsnSP(cu, info, kQuickInvokeInterfaceTrampolineWithAccessCheck, state,
target_method, 0);
}
@@ -1400,28 +1395,34 @@
}
bool Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
- // Currently implemented only for ARM64
+ // Currently implemented only for ARM64.
+ UNUSED(info, size);
return false;
}
bool Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) {
- // Currently implemented only for ARM64
+ // Currently implemented only for ARM64.
+ UNUSED(info, is_min, is_double);
return false;
}
bool Mir2Lir::GenInlinedCeil(CallInfo* info) {
+ UNUSED(info);
return false;
}
bool Mir2Lir::GenInlinedFloor(CallInfo* info) {
+ UNUSED(info);
return false;
}
bool Mir2Lir::GenInlinedRint(CallInfo* info) {
+ UNUSED(info);
return false;
}
bool Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) {
+ UNUSED(info, is_double);
return false;
}
@@ -1448,6 +1449,7 @@
}
bool Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
+ UNUSED(info);
return false;
}
@@ -1690,7 +1692,6 @@
const MirMethodLoweringInfo& method_info = mir_graph_->GetMethodLoweringInfo(info->mir);
cu_->compiler_driver->ProcessedInvoke(method_info.GetInvokeType(), method_info.StatsFlags());
- BeginInvoke(info);
InvokeType original_type = static_cast<InvokeType>(method_info.GetInvokeType());
info->type = method_info.GetSharpType();
bool fast_path = method_info.FastPath();
@@ -1734,7 +1735,6 @@
method_info.DirectCode(), method_info.DirectMethod(), original_type);
}
LIR* call_insn = GenCallInsn(method_info);
- EndInvoke(info);
MarkSafepointPC(call_insn);
ClobberCallerSave();
@@ -1755,6 +1755,7 @@
}
LIR* Mir2Lir::GenCallInsn(const MirMethodLoweringInfo& method_info) {
+ UNUSED(method_info);
DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64 &&
cu_->instruction_set != kThumb2 && cu_->instruction_set != kArm &&
cu_->instruction_set != kArm64);