Merge "Generalize codegen and simplification of deopt."
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index b507124..b3d246c 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -34,6 +34,10 @@
ART_BUILD_HOST_NDEBUG ?= true
ART_BUILD_HOST_DEBUG ?= true
+# Set this to change what opt level Art is built at.
+ART_DEBUG_OPT_FLAG ?= -O2
+ART_NDEBUG_OPT_FLAG ?= -O3
+
# Enable the static builds only for checkbuilds.
ifneq (,$(filter checkbuild,$(MAKECMDGOALS)))
ART_BUILD_HOST_STATIC ?= true
@@ -319,11 +323,11 @@
# Cflags for non-debug ART and ART tools.
art_non_debug_cflags := \
- -O3
+ $(ART_NDEBUG_OPT_FLAG)
# Cflags for debug ART and ART tools.
art_debug_cflags := \
- -O2 \
+ $(ART_DEBUG_OPT_FLAG) \
-DDYNAMIC_ANNOTATIONS_ENABLED=1 \
-DVIXL_DEBUG \
-UNDEBUG
diff --git a/compiler/compiler.h b/compiler/compiler.h
index 9b4dbe0..8788dc1 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -77,9 +77,8 @@
* information.
* @note This is used for backtrace information in generated code.
*/
- virtual std::vector<uint8_t>* GetCallFrameInformationInitialization(const CompilerDriver& driver)
- const {
- UNUSED(driver);
+ virtual std::vector<uint8_t>* GetCallFrameInformationInitialization(
+ const CompilerDriver& driver ATTRIBUTE_UNUSED) const {
return nullptr;
}
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 4efe4af..b0972d9 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -511,9 +511,8 @@
/* Process instructions with the kSwitch flag */
BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
- int width, int flags,
+ int width, int flags ATTRIBUTE_UNUSED,
ScopedArenaVector<uint16_t>* dex_pc_to_block_map) {
- UNUSED(flags);
const uint16_t* switch_data =
reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset +
static_cast<int32_t>(insn->dalvikInsn.vB));
@@ -592,11 +591,15 @@
}
/* Process instructions with the kThrow flag */
-BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
- int width, int flags, ArenaBitVector* try_block_addr,
- const uint16_t* code_ptr, const uint16_t* code_end,
+BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block,
+ MIR* insn,
+ DexOffset cur_offset,
+ int width,
+ int flags ATTRIBUTE_UNUSED,
+ ArenaBitVector* try_block_addr,
+ const uint16_t* code_ptr,
+ const uint16_t* code_end,
ScopedArenaVector<uint16_t>* dex_pc_to_block_map) {
- UNUSED(flags);
bool in_try_block = try_block_addr->IsBitSet(cur_offset);
bool is_throw = (insn->dalvikInsn.opcode == Instruction::THROW);
diff --git a/compiler/dex/pass.h b/compiler/dex/pass.h
index 0def056..16414ef 100644
--- a/compiler/dex/pass.h
+++ b/compiler/dex/pass.h
@@ -53,10 +53,7 @@
* @param data the PassDataHolder.
* @return whether or not to execute the pass.
*/
- virtual bool Gate(const PassDataHolder* data) const {
- // Unused parameter.
- UNUSED(data);
-
+ virtual bool Gate(const PassDataHolder* data ATTRIBUTE_UNUSED) const {
// Base class says yes.
return true;
}
@@ -64,17 +61,13 @@
/**
* @brief Start of the pass: called before the Worker function.
*/
- virtual void Start(PassDataHolder* data) const {
- // Unused parameter.
- UNUSED(data);
+ virtual void Start(PassDataHolder* data ATTRIBUTE_UNUSED) const {
}
/**
* @brief End of the pass: called after the WalkBasicBlocks function.
*/
- virtual void End(PassDataHolder* data) const {
- // Unused parameter.
- UNUSED(data);
+ virtual void End(PassDataHolder* data ATTRIBUTE_UNUSED) const {
}
/**
diff --git a/compiler/dex/pass_driver.h b/compiler/dex/pass_driver.h
index 8762b53..34a6f63 100644
--- a/compiler/dex/pass_driver.h
+++ b/compiler/dex/pass_driver.h
@@ -125,8 +125,7 @@
* @brief Dispatch a patch.
* Gives the ability to add logic when running the patch.
*/
- virtual void DispatchPass(const Pass* pass) {
- UNUSED(pass);
+ virtual void DispatchPass(const Pass* pass ATTRIBUTE_UNUSED) {
}
/** @brief List of passes: provides the order to execute the passes.
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index db76cc6..b2bd6fa 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -216,8 +216,7 @@
void ArmMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- RegisterClass dest_reg_class) {
- UNUSED(dest_reg_class);
+ RegisterClass dest_reg_class ATTRIBUTE_UNUSED) {
// TODO: Generalize the IT below to accept more than one-instruction loads.
DCHECK(InexpensiveConstantInt(true_val));
DCHECK(InexpensiveConstantInt(false_val));
@@ -239,8 +238,7 @@
OpEndIT(it);
}
-void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
- UNUSED(bb);
+void ArmMir2Lir::GenSelect(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir) {
RegLocation rl_result;
RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
RegLocation rl_dest = mir_graph_->GetDest(mir);
@@ -516,9 +514,8 @@
};
// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
-bool ArmMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
+bool ArmMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit) {
- UNUSED(dalvik_opcode);
if ((lit < 0) || (lit >= static_cast<int>(sizeof(magic_table)/sizeof(magic_table[0])))) {
return false;
}
@@ -728,16 +725,19 @@
return true;
}
-RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, int flags) {
- UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
+RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ RegLocation rl_src2 ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of GenDivRem for Arm";
UNREACHABLE();
}
-RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
- bool is_div) {
- UNUSED(rl_dest, rl_src1, lit, is_div);
+RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ int lit ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm";
UNREACHABLE();
}
@@ -1160,9 +1160,8 @@
}
void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
- RegLocation rl_result, int lit,
+ RegLocation rl_result, int lit ATTRIBUTE_UNUSED,
int first_bit, int second_bit) {
- UNUSED(lit);
OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg,
EncodeShift(kArmLsl, second_bit - first_bit));
if (first_bit != 0) {
@@ -1257,9 +1256,8 @@
StoreValueWide(rl_dest, rl_result);
}
-void ArmMir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
+void ArmMir2Lir::GenMulLong(Instruction::Code opcode ATTRIBUTE_UNUSED, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2) {
- UNUSED(opcode);
/*
* tmp1 = src1.hi * src2.lo; // src1.hi is no longer needed
* dest = src1.lo * src2.lo;
@@ -1564,8 +1562,7 @@
void ArmMir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift,
- int flags) {
- UNUSED(flags);
+ int flags ATTRIBUTE_UNUSED) {
rl_src = LoadValueWide(rl_src, kCoreReg);
// Per spec, we only care about low 6 bits of shift amount.
int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 5f27338..355485e 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -987,8 +987,7 @@
return count;
}
-void ArmMir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
- UNUSED(bb);
+void ArmMir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir) {
DCHECK(MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode));
RegLocation rl_src[3];
RegLocation rl_dest = mir_graph_->GetBadLoc();
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index 062f7af..c31f46b 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -419,20 +419,26 @@
return OpRegRegShift(op, r_dest_src1, r_src2, 0);
}
-LIR* ArmMir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
- UNUSED(r_dest, r_base, offset, move_type);
+LIR* ArmMir2Lir::OpMovRegMem(RegStorage r_dest ATTRIBUTE_UNUSED,
+ RegStorage r_base ATTRIBUTE_UNUSED,
+ int offset ATTRIBUTE_UNUSED,
+ MoveType move_type ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
UNREACHABLE();
}
-LIR* ArmMir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
- UNUSED(r_base, offset, r_src, move_type);
+LIR* ArmMir2Lir::OpMovMemReg(RegStorage r_base ATTRIBUTE_UNUSED,
+ int offset ATTRIBUTE_UNUSED,
+ RegStorage r_src ATTRIBUTE_UNUSED,
+ MoveType move_type ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
UNREACHABLE();
}
-LIR* ArmMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
- UNUSED(op, cc, r_dest, r_src);
+LIR* ArmMir2Lir::OpCondRegReg(OpKind op ATTRIBUTE_UNUSED,
+ ConditionCode cc ATTRIBUTE_UNUSED,
+ RegStorage r_dest ATTRIBUTE_UNUSED,
+ RegStorage r_src ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm";
UNREACHABLE();
}
@@ -1243,14 +1249,17 @@
return res;
}
-LIR* ArmMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
- UNUSED(op, r_base, disp);
+LIR* ArmMir2Lir::OpMem(OpKind op ATTRIBUTE_UNUSED,
+ RegStorage r_base ATTRIBUTE_UNUSED,
+ int disp ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpMem for Arm";
UNREACHABLE();
}
-LIR* ArmMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
- UNUSED(trampoline); // The address of the trampoline is already loaded into r_tgt.
+LIR* ArmMir2Lir::InvokeTrampoline(OpKind op,
+ RegStorage r_tgt,
+ // The address of the trampoline is already loaded into r_tgt.
+ QuickEntrypointEnum trampoline ATTRIBUTE_UNUSED) {
return OpReg(op, r_tgt);
}
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 31cf667..d92dea2 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -37,14 +37,12 @@
return OpCondBranch(cond, target);
}
-LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode, const char* guide) {
- UNUSED(ccode, guide);
+LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode ATTRIBUTE_UNUSED, const char* guide ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpIT for Arm64";
UNREACHABLE();
}
-void Arm64Mir2Lir::OpEndIT(LIR* it) {
- UNUSED(it);
+void Arm64Mir2Lir::OpEndIT(LIR* it ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpEndIT for Arm64";
}
@@ -188,8 +186,7 @@
GenSelect(true_val, false_val, code, rs_dest, dest_reg_class);
}
-void Arm64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
- UNUSED(bb);
+void Arm64Mir2Lir::GenSelect(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir) {
RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
rl_src = LoadValue(rl_src, rl_src.ref ? kRefReg : kCoreReg);
// rl_src may be aliased with rl_result/rl_dest, so do compare early.
@@ -413,9 +410,11 @@
};
// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
-bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
- RegLocation rl_src, RegLocation rl_dest, int lit) {
- UNUSED(dalvik_opcode);
+bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED,
+ bool is_div,
+ RegLocation rl_src,
+ RegLocation rl_dest,
+ int lit) {
if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
return false;
}
@@ -457,9 +456,11 @@
return true;
}
-bool Arm64Mir2Lir::SmallLiteralDivRem64(Instruction::Code dalvik_opcode, bool is_div,
- RegLocation rl_src, RegLocation rl_dest, int64_t lit) {
- UNUSED(dalvik_opcode);
+bool Arm64Mir2Lir::SmallLiteralDivRem64(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED,
+ bool is_div,
+ RegLocation rl_src,
+ RegLocation rl_dest,
+ int64_t lit) {
if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
return false;
}
@@ -599,15 +600,17 @@
return true;
}
-bool Arm64Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
- UNUSED(rl_src, rl_dest, lit);
+bool Arm64Mir2Lir::EasyMultiply(RegLocation rl_src ATTRIBUTE_UNUSED,
+ RegLocation rl_dest ATTRIBUTE_UNUSED,
+ int lit ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of EasyMultiply for Arm64";
UNREACHABLE();
}
-RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
- bool is_div) {
- UNUSED(rl_dest, rl_src1, lit, is_div);
+RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ int lit ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm64";
UNREACHABLE();
}
@@ -626,9 +629,11 @@
return rl_result;
}
-RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, int flags) {
- UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
+RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ RegLocation rl_src2 ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of GenDivRem for Arm64";
UNREACHABLE();
}
@@ -963,14 +968,12 @@
dex_cache_access_insns_.push_back(ldr);
}
-LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base, int count) {
- UNUSED(r_base, count);
+LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpVldm for Arm64";
UNREACHABLE();
}
-LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base, int count) {
- UNUSED(r_base, count);
+LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpVstm for Arm64";
UNREACHABLE();
}
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index 6efa11e..691bfd9 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -881,8 +881,7 @@
return count;
}
-void Arm64Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
- UNUSED(bb);
+void Arm64Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir) {
DCHECK(MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode));
RegLocation rl_src[3];
RegLocation rl_dest = mir_graph_->GetBadLoc();
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index 483231f..58769ea 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -672,22 +672,26 @@
}
}
-LIR* Arm64Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
- MoveType move_type) {
- UNUSED(r_dest, r_base, offset, move_type);
+LIR* Arm64Mir2Lir::OpMovRegMem(RegStorage r_dest ATTRIBUTE_UNUSED,
+ RegStorage r_base ATTRIBUTE_UNUSED,
+ int offset ATTRIBUTE_UNUSED,
+ MoveType move_type ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
UNREACHABLE();
}
-LIR* Arm64Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src,
- MoveType move_type) {
- UNUSED(r_base, offset, r_src, move_type);
+LIR* Arm64Mir2Lir::OpMovMemReg(RegStorage r_base ATTRIBUTE_UNUSED,
+ int offset ATTRIBUTE_UNUSED,
+ RegStorage r_src ATTRIBUTE_UNUSED,
+ MoveType move_type ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
return nullptr;
}
-LIR* Arm64Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
- UNUSED(op, cc, r_dest, r_src);
+LIR* Arm64Mir2Lir::OpCondRegReg(OpKind op ATTRIBUTE_UNUSED,
+ ConditionCode cc ATTRIBUTE_UNUSED,
+ RegStorage r_dest ATTRIBUTE_UNUSED,
+ RegStorage r_src ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm64";
UNREACHABLE();
}
@@ -1381,14 +1385,15 @@
return store;
}
-LIR* Arm64Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
- UNUSED(r_dest, r_src);
+LIR* Arm64Mir2Lir::OpFpRegCopy(RegStorage r_dest ATTRIBUTE_UNUSED,
+ RegStorage r_src ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpFpRegCopy for Arm64";
UNREACHABLE();
}
-LIR* Arm64Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
- UNUSED(op, r_base, disp);
+LIR* Arm64Mir2Lir::OpMem(OpKind op ATTRIBUTE_UNUSED,
+ RegStorage r_base ATTRIBUTE_UNUSED,
+ int disp ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpMem for Arm64";
UNREACHABLE();
}
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index dbcc868..cde99b3 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -992,8 +992,7 @@
}
/* Set up special LIR to mark a Dalvik byte-code instruction start for pretty printing */
-void Mir2Lir::MarkBoundary(DexOffset offset, const char* inst_str) {
- UNUSED(offset);
+void Mir2Lir::MarkBoundary(DexOffset offset ATTRIBUTE_UNUSED, const char* inst_str) {
// NOTE: only used for debug listings.
NewLIR1(kPseudoDalvikByteCodeBoundary, WrapPointer(ArenaStrdup(inst_str)));
}
@@ -1358,8 +1357,8 @@
return loc;
}
-void Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
- UNUSED(bb, mir);
+void Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb ATTRIBUTE_UNUSED,
+ MIR* mir ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unknown MIR opcode not supported on this architecture";
UNREACHABLE();
}
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 2a1d644..2b60a51 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -2102,15 +2102,15 @@
}
/* Call out to helper assembly routine that will null check obj and then lock it. */
-void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
- UNUSED(opt_flags); // TODO: avoid null check with specialized non-null helper.
+void Mir2Lir::GenMonitorEnter(int opt_flags ATTRIBUTE_UNUSED, RegLocation rl_src) {
+ // TODO: avoid null check with specialized non-null helper.
FlushAllRegs();
CallRuntimeHelperRegLocation(kQuickLockObject, rl_src, true);
}
/* Call out to helper assembly routine that will null check obj and then unlock it. */
-void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
- UNUSED(opt_flags); // TODO: avoid null check with specialized non-null helper.
+void Mir2Lir::GenMonitorExit(int opt_flags ATTRIBUTE_UNUSED, RegLocation rl_src) {
+ // TODO: avoid null check with specialized non-null helper.
FlushAllRegs();
CallRuntimeHelperRegLocation(kQuickUnlockObject, rl_src, true);
}
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 3c5c2fe..422d82f 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -521,10 +521,9 @@
* kArg1 here rather than the standard GenDalvikArgs.
*/
static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
- int state, const MethodReference& target_method,
+ int state, const MethodReference& target_method ATTRIBUTE_UNUSED,
uint32_t method_idx, uintptr_t, uintptr_t,
InvokeType) {
- UNUSED(target_method);
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
/*
* This is the fast path in which the target virtual method is
@@ -607,10 +606,12 @@
return state + 1;
}
-static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info,
- QuickEntrypointEnum trampoline, int state,
- const MethodReference& target_method, uint32_t method_idx) {
- UNUSED(info, method_idx);
+static int NextInvokeInsnSP(CompilationUnit* cu,
+ CallInfo* info ATTRIBUTE_UNUSED,
+ QuickEntrypointEnum trampoline,
+ int state,
+ const MethodReference& target_method,
+ uint32_t method_idx ATTRIBUTE_UNUSED) {
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
/*
@@ -1266,35 +1267,31 @@
return true;
}
-bool Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
+bool Mir2Lir::GenInlinedReverseBits(CallInfo* info ATTRIBUTE_UNUSED, OpSize size ATTRIBUTE_UNUSED) {
// Currently implemented only for ARM64.
- UNUSED(info, size);
return false;
}
-bool Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) {
+bool Mir2Lir::GenInlinedMinMaxFP(CallInfo* info ATTRIBUTE_UNUSED,
+ bool is_min ATTRIBUTE_UNUSED,
+ bool is_double ATTRIBUTE_UNUSED) {
// Currently implemented only for ARM64.
- UNUSED(info, is_min, is_double);
return false;
}
-bool Mir2Lir::GenInlinedCeil(CallInfo* info) {
- UNUSED(info);
+bool Mir2Lir::GenInlinedCeil(CallInfo* info ATTRIBUTE_UNUSED) {
return false;
}
-bool Mir2Lir::GenInlinedFloor(CallInfo* info) {
- UNUSED(info);
+bool Mir2Lir::GenInlinedFloor(CallInfo* info ATTRIBUTE_UNUSED) {
return false;
}
-bool Mir2Lir::GenInlinedRint(CallInfo* info) {
- UNUSED(info);
+bool Mir2Lir::GenInlinedRint(CallInfo* info ATTRIBUTE_UNUSED) {
return false;
}
-bool Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) {
- UNUSED(info, is_double);
+bool Mir2Lir::GenInlinedRound(CallInfo* info ATTRIBUTE_UNUSED, bool is_double ATTRIBUTE_UNUSED) {
return false;
}
@@ -1328,8 +1325,7 @@
return true;
}
-bool Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
- UNUSED(info);
+bool Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info ATTRIBUTE_UNUSED) {
return false;
}
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 8863c05..4a736f3d 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -32,9 +32,10 @@
namespace art {
-bool MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special) {
+bool MipsMir2Lir::GenSpecialCase(BasicBlock* bb ATTRIBUTE_UNUSED,
+ MIR* mir ATTRIBUTE_UNUSED,
+ const InlineMethod& special ATTRIBUTE_UNUSED) {
// TODO
- UNUSED(bb, mir, special);
return false;
}
diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc
index 45fd1a9..52706df 100644
--- a/compiler/dex/quick/mips/fp_mips.cc
+++ b/compiler/dex/quick/mips/fp_mips.cc
@@ -115,17 +115,17 @@
StoreValueWide(rl_dest, rl_result);
}
-void MipsMir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
- int32_t constant) {
+void MipsMir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ int32_t constant ATTRIBUTE_UNUSED) {
// TODO: need mips implementation.
- UNUSED(rl_dest, rl_src1, constant);
LOG(FATAL) << "Unimplemented GenMultiplyByConstantFloat in mips";
}
-void MipsMir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
- int64_t constant) {
+void MipsMir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ int64_t constant ATTRIBUTE_UNUSED) {
// TODO: need mips implementation.
- UNUSED(rl_dest, rl_src1, constant);
LOG(FATAL) << "Unimplemented GenMultiplyByConstantDouble in mips";
}
@@ -254,8 +254,10 @@
StoreValue(rl_dest, rl_result);
}
-void MipsMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) {
- UNUSED(bb, mir, gt_bias, is_double);
+void MipsMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb ATTRIBUTE_UNUSED,
+ MIR* mir ATTRIBUTE_UNUSED,
+ bool gt_bias ATTRIBUTE_UNUSED,
+ bool is_double ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Need codegen for fused fp cmp branch";
}
@@ -288,9 +290,10 @@
StoreValueWide(rl_dest, rl_result);
}
-bool MipsMir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) {
+bool MipsMir2Lir::GenInlinedMinMax(CallInfo* info ATTRIBUTE_UNUSED,
+ bool is_min ATTRIBUTE_UNUSED,
+ bool is_long ATTRIBUTE_UNUSED) {
// TODO: need Mips implementation.
- UNUSED(info, is_min, is_long);
return false;
}
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index 1099303..8ca53ea 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -279,8 +279,7 @@
void MipsMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- RegisterClass dest_reg_class) {
- UNUSED(dest_reg_class);
+ RegisterClass dest_reg_class ATTRIBUTE_UNUSED) {
// Implement as a branch-over.
// TODO: Conditional move?
LoadConstant(rs_dest, true_val);
@@ -290,13 +289,12 @@
ne_branchover->target = target_label;
}
-void MipsMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
- UNUSED(bb, mir);
+void MipsMir2Lir::GenSelect(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Need codegen for select";
}
-void MipsMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
- UNUSED(bb, mir);
+void MipsMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb ATTRIBUTE_UNUSED,
+ MIR* mir ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Need codegen for fused long cmp branch";
}
@@ -327,39 +325,40 @@
return rl_result;
}
-RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
- bool is_div, int flags) {
- UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
+RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ RegLocation rl_src2 ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of GenDivRem for Mips";
UNREACHABLE();
}
-RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
- bool is_div) {
- UNUSED(rl_dest, rl_src1, lit, is_div);
+RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ int lit ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of GenDivRemLit for Mips";
UNREACHABLE();
}
-bool MipsMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
- UNUSED(info, is_long, is_object);
+bool MipsMir2Lir::GenInlinedCas(CallInfo* info ATTRIBUTE_UNUSED,
+ bool is_long ATTRIBUTE_UNUSED,
+ bool is_object ATTRIBUTE_UNUSED) {
return false;
}
-bool MipsMir2Lir::GenInlinedAbsFloat(CallInfo* info) {
- UNUSED(info);
+bool MipsMir2Lir::GenInlinedAbsFloat(CallInfo* info ATTRIBUTE_UNUSED) {
// TODO: add Mips implementation.
return false;
}
-bool MipsMir2Lir::GenInlinedAbsDouble(CallInfo* info) {
- UNUSED(info);
+bool MipsMir2Lir::GenInlinedAbsDouble(CallInfo* info ATTRIBUTE_UNUSED) {
// TODO: add Mips implementation.
return false;
}
-bool MipsMir2Lir::GenInlinedSqrt(CallInfo* info) {
- UNUSED(info);
+bool MipsMir2Lir::GenInlinedSqrt(CallInfo* info ATTRIBUTE_UNUSED) {
return false;
}
@@ -408,27 +407,26 @@
return true;
}
-void MipsMir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
- UNUSED(reg, target);
+void MipsMir2Lir::OpPcRelLoad(RegStorage reg ATTRIBUTE_UNUSED, LIR* target ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpPcRelLoad for Mips";
UNREACHABLE();
}
-LIR* MipsMir2Lir::OpVldm(RegStorage r_base, int count) {
- UNUSED(r_base, count);
+LIR* MipsMir2Lir::OpVldm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpVldm for Mips";
UNREACHABLE();
}
-LIR* MipsMir2Lir::OpVstm(RegStorage r_base, int count) {
- UNUSED(r_base, count);
+LIR* MipsMir2Lir::OpVstm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpVstm for Mips";
UNREACHABLE();
}
-void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
- int first_bit, int second_bit) {
- UNUSED(lit);
+void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
+ RegLocation rl_result,
+ int lit ATTRIBUTE_UNUSED,
+ int first_bit,
+ int second_bit) {
RegStorage t_reg = AllocTemp();
OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit);
OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg);
@@ -462,27 +460,28 @@
return OpCmpImmBranch(c_code, reg, 0, target);
}
-bool MipsMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
- RegLocation rl_src, RegLocation rl_dest, int lit) {
- UNUSED(dalvik_opcode, is_div, rl_src, rl_dest, lit);
- LOG(FATAL) << "Unexpected use of smallLiteralDive in Mips";
+bool MipsMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED,
+ RegLocation rl_src ATTRIBUTE_UNUSED,
+ RegLocation rl_dest ATTRIBUTE_UNUSED,
+ int lit ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unexpected use of smallLiteralDivRem in Mips";
UNREACHABLE();
}
-bool MipsMir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
- UNUSED(rl_src, rl_dest, lit);
+bool MipsMir2Lir::EasyMultiply(RegLocation rl_src ATTRIBUTE_UNUSED,
+ RegLocation rl_dest ATTRIBUTE_UNUSED,
+ int lit ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of easyMultiply in Mips";
UNREACHABLE();
}
-LIR* MipsMir2Lir::OpIT(ConditionCode cond, const char* guide) {
- UNUSED(cond, guide);
+LIR* MipsMir2Lir::OpIT(ConditionCode cond ATTRIBUTE_UNUSED, const char* guide ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpIT in Mips";
UNREACHABLE();
}
-void MipsMir2Lir::OpEndIT(LIR* it) {
- UNUSED(it);
+void MipsMir2Lir::OpEndIT(LIR* it ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpEndIT in Mips";
}
@@ -621,9 +620,12 @@
StoreValueWide(rl_dest, rl_result);
}
-void MipsMir2Lir::GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, int flags) {
- UNUSED(opcode);
+void MipsMir2Lir::GenDivRemLong(Instruction::Code opcode ATTRIBUTE_UNUSED,
+ RegLocation rl_dest,
+ RegLocation rl_src1,
+ RegLocation rl_src2,
+ bool is_div,
+ int flags) {
// TODO: Implement easy div/rem?
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
@@ -855,9 +857,11 @@
StoreValueWide(rl_dest, rl_result);
}
-void MipsMir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_shift, int flags) {
- UNUSED(flags);
+void MipsMir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
+ RegLocation rl_dest,
+ RegLocation rl_src1,
+ RegLocation rl_shift,
+ int flags ATTRIBUTE_UNUSED) {
if (!cu_->target64) {
// Default implementation is just to ignore the constant case.
GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index ec2475a..372fe2b 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -103,18 +103,15 @@
return ((value == 0) || IsUint<16>(value) || IsInt<16>(value));
}
-bool MipsMir2Lir::InexpensiveConstantFloat(int32_t value) {
- UNUSED(value);
+bool MipsMir2Lir::InexpensiveConstantFloat(int32_t value ATTRIBUTE_UNUSED) {
return false; // TUNING
}
-bool MipsMir2Lir::InexpensiveConstantLong(int64_t value) {
- UNUSED(value);
+bool MipsMir2Lir::InexpensiveConstantLong(int64_t value ATTRIBUTE_UNUSED) {
return false; // TUNING
}
-bool MipsMir2Lir::InexpensiveConstantDouble(int64_t value) {
- UNUSED(value);
+bool MipsMir2Lir::InexpensiveConstantDouble(int64_t value ATTRIBUTE_UNUSED) {
return false; // TUNING
}
@@ -520,21 +517,26 @@
return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
}
-LIR* MipsMir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
- MoveType move_type) {
- UNUSED(r_dest, r_base, offset, move_type);
+LIR* MipsMir2Lir::OpMovRegMem(RegStorage r_dest ATTRIBUTE_UNUSED,
+ RegStorage r_base ATTRIBUTE_UNUSED,
+ int offset ATTRIBUTE_UNUSED,
+ MoveType move_type ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
UNREACHABLE();
}
-LIR* MipsMir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
- UNUSED(r_base, offset, r_src, move_type);
+LIR* MipsMir2Lir::OpMovMemReg(RegStorage r_base ATTRIBUTE_UNUSED,
+ int offset ATTRIBUTE_UNUSED,
+ RegStorage r_src ATTRIBUTE_UNUSED,
+ MoveType move_type ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
UNREACHABLE();
}
-LIR* MipsMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
- UNUSED(op, cc, r_dest, r_src);
+LIR* MipsMir2Lir::OpCondRegReg(OpKind op ATTRIBUTE_UNUSED,
+ ConditionCode cc ATTRIBUTE_UNUSED,
+ RegStorage r_dest ATTRIBUTE_UNUSED,
+ RegStorage r_src ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpCondRegReg for MIPS";
UNREACHABLE();
}
@@ -1031,14 +1033,14 @@
return store;
}
-LIR* MipsMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
- UNUSED(op, r_base, disp);
+LIR* MipsMir2Lir::OpMem(OpKind op ATTRIBUTE_UNUSED,
+ RegStorage r_base ATTRIBUTE_UNUSED,
+ int disp ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpMem for MIPS";
UNREACHABLE();
}
-LIR* MipsMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
- UNUSED(cc, target);
+LIR* MipsMir2Lir::OpCondBranch(ConditionCode cc ATTRIBUTE_UNUSED, LIR* target ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpCondBranch for MIPS";
UNREACHABLE();
}
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index c50246d..8da3863 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -1411,8 +1411,7 @@
rl.ref ? RefCheck::kCheckRef : RefCheck::kCheckNotRef, FPCheck::kIgnoreFP, fail, report);
}
-size_t Mir2Lir::GetInstructionOffset(LIR* lir) {
- UNUSED(lir);
+size_t Mir2Lir::GetInstructionOffset(LIR* lir ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Unsupported GetInstructionOffset()";
UNREACHABLE();
}
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 4e3aab2..a0db1e8 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -1463,8 +1463,7 @@
virtual bool InexpensiveConstantFloat(int32_t value) = 0;
virtual bool InexpensiveConstantLong(int64_t value) = 0;
virtual bool InexpensiveConstantDouble(int64_t value) = 0;
- virtual bool InexpensiveConstantInt(int32_t value, Instruction::Code opcode) {
- UNUSED(opcode);
+ virtual bool InexpensiveConstantInt(int32_t value, Instruction::Code opcode ATTRIBUTE_UNUSED) {
return InexpensiveConstantInt(value);
}
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index c2fe553..6673ea8 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -851,8 +851,8 @@
InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet())));
}
-Mir2Lir* QuickCompiler::GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) {
- UNUSED(compilation_unit);
+Mir2Lir* QuickCompiler::GetCodeGenerator(CompilationUnit* cu,
+ void* compilation_unit ATTRIBUTE_UNUSED) {
Mir2Lir* mir_to_lir = nullptr;
switch (cu->instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 8ec86fa..d9d0434 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -320,15 +320,13 @@
}
// TODO: this is Thumb2 only. Remove when DoPromotion refactored.
-RegStorage Mir2Lir::AllocPreservedDouble(int s_reg) {
- UNUSED(s_reg);
+RegStorage Mir2Lir::AllocPreservedDouble(int s_reg ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Unexpected use of AllocPreservedDouble";
UNREACHABLE();
}
// TODO: this is Thumb2 only. Remove when DoPromotion refactored.
-RegStorage Mir2Lir::AllocPreservedSingle(int s_reg) {
- UNUSED(s_reg);
+RegStorage Mir2Lir::AllocPreservedSingle(int s_reg ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Unexpected use of AllocPreservedSingle";
UNREACHABLE();
}
@@ -1553,8 +1551,7 @@
return (lowSreg == INVALID_SREG) ? INVALID_SREG : lowSreg + 1;
}
-bool Mir2Lir::LiveOut(int s_reg) {
- UNUSED(s_reg);
+bool Mir2Lir::LiveOut(int s_reg ATTRIBUTE_UNUSED) {
// For now.
return true;
}
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index 12523ac..64becb9 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -1629,8 +1629,8 @@
* instruction. In those cases we will try to substitute a new code
* sequence or request that the trace be shortened and retried.
*/
-AssemblerStatus X86Mir2Lir::AssembleInstructions(LIR* first_lir_insn, CodeOffset start_addr) {
- UNUSED(start_addr);
+AssemblerStatus X86Mir2Lir::AssembleInstructions(LIR* first_lir_insn,
+ CodeOffset start_addr ATTRIBUTE_UNUSED) {
LIR *lir;
AssemblerStatus res = kSuccess; // Assume success
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index 8e81746..b11d41c 100755
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -124,17 +124,17 @@
StoreValueWide(rl_dest, rl_result);
}
-void X86Mir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
- int32_t constant) {
+void X86Mir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ int32_t constant ATTRIBUTE_UNUSED) {
// TODO: need x86 implementation.
- UNUSED(rl_dest, rl_src1, constant);
LOG(FATAL) << "Unimplemented GenMultiplyByConstantFloat in x86";
}
-void X86Mir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
- int64_t constant) {
+void X86Mir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1 ATTRIBUTE_UNUSED,
+ int64_t constant ATTRIBUTE_UNUSED) {
// TODO: need x86 implementation.
- UNUSED(rl_dest, rl_src1, constant);
LOG(FATAL) << "Unimplemented GenMultiplyByConstantDouble in x86";
}
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index ecd23e9..a8706c3 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -270,8 +270,7 @@
}
}
-void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
- UNUSED(bb);
+void X86Mir2Lir::GenSelect(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir) {
RegLocation rl_result;
RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
RegLocation rl_dest = mir_graph_->GetDest(mir);
@@ -597,8 +596,10 @@
shift = (is_long) ? p - 64 : p - 32;
}
-RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div) {
- UNUSED(rl_dest, reg_lo, lit, is_div);
+RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegStorage reg_lo ATTRIBUTE_UNUSED,
+ int lit ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of GenDivRemLit for x86";
UNREACHABLE();
}
@@ -766,16 +767,19 @@
return rl_result;
}
-RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi,
- bool is_div) {
- UNUSED(rl_dest, reg_lo, reg_hi, is_div);
+RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegStorage reg_lo ATTRIBUTE_UNUSED,
+ RegStorage reg_hi ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of GenDivRem for x86";
UNREACHABLE();
}
-RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, int flags) {
- UNUSED(rl_dest);
+RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest ATTRIBUTE_UNUSED,
+ RegLocation rl_src1,
+ RegLocation rl_src2,
+ bool is_div,
+ int flags) {
// We have to use fixed registers, so flush all the temps.
// Prepare for explicit register usage.
@@ -1449,22 +1453,21 @@
}
}
-LIR* X86Mir2Lir::OpVldm(RegStorage r_base, int count) {
- UNUSED(r_base, count);
+LIR* X86Mir2Lir::OpVldm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpVldm for x86";
UNREACHABLE();
}
-LIR* X86Mir2Lir::OpVstm(RegStorage r_base, int count) {
- UNUSED(r_base, count);
+LIR* X86Mir2Lir::OpVstm(RegStorage r_base ATTRIBUTE_UNUSED, int count ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpVstm for x86";
UNREACHABLE();
}
void X86Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
- RegLocation rl_result, int lit,
- int first_bit, int second_bit) {
- UNUSED(lit);
+ RegLocation rl_result,
+ int lit ATTRIBUTE_UNUSED,
+ int first_bit,
+ int second_bit) {
RegStorage t_reg = AllocTemp();
OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit);
OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg);
@@ -1595,27 +1598,28 @@
return OpCondBranch(c_code, target);
}
-bool X86Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
- RegLocation rl_src, RegLocation rl_dest, int lit) {
- UNUSED(dalvik_opcode, is_div, rl_src, rl_dest, lit);
- LOG(FATAL) << "Unexpected use of smallLiteralDive in x86";
+bool X86Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode ATTRIBUTE_UNUSED,
+ bool is_div ATTRIBUTE_UNUSED,
+ RegLocation rl_src ATTRIBUTE_UNUSED,
+ RegLocation rl_dest ATTRIBUTE_UNUSED,
+ int lit ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Unexpected use of smallLiteralDivRem in x86";
UNREACHABLE();
}
-bool X86Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
- UNUSED(rl_src, rl_dest, lit);
+bool X86Mir2Lir::EasyMultiply(RegLocation rl_src ATTRIBUTE_UNUSED,
+ RegLocation rl_dest ATTRIBUTE_UNUSED,
+ int lit ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of easyMultiply in x86";
UNREACHABLE();
}
-LIR* X86Mir2Lir::OpIT(ConditionCode cond, const char* guide) {
- UNUSED(cond, guide);
+LIR* X86Mir2Lir::OpIT(ConditionCode cond ATTRIBUTE_UNUSED, const char* guide ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpIT in x86";
UNREACHABLE();
}
-void X86Mir2Lir::OpEndIT(LIR* it) {
- UNUSED(it);
+void X86Mir2Lir::OpEndIT(LIR* it ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of OpEndIT in x86";
UNREACHABLE();
}
@@ -1634,8 +1638,10 @@
}
}
-void X86Mir2Lir::GenImulMemImm(RegStorage dest, int sreg, int displacement, int val) {
- UNUSED(sreg);
+void X86Mir2Lir::GenImulMemImm(RegStorage dest,
+ int sreg ATTRIBUTE_UNUSED,
+ int displacement,
+ int val) {
// All memory accesses below reference dalvik regs.
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -2548,9 +2554,11 @@
}
}
-RegLocation X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
- RegLocation rl_src, int shift_amount, int flags) {
- UNUSED(flags);
+RegLocation X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
+ RegLocation rl_dest,
+ RegLocation rl_src,
+ int shift_amount,
+ int flags ATTRIBUTE_UNUSED) {
RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
if (cu_->target64) {
OpKind op = static_cast<OpKind>(0); /* Make gcc happy */
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index c62cd47..25fb886 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -254,8 +254,7 @@
: RegStorage32FromSpecialTargetRegister_Target32[reg];
}
-RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
- UNUSED(reg);
+RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Do not use this function!!!";
UNREACHABLE();
}
@@ -861,8 +860,7 @@
}
// Not used in x86(-64)
-RegStorage X86Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
- UNUSED(trampoline);
+RegStorage X86Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of LoadHelper in x86";
UNREACHABLE();
}
@@ -2323,13 +2321,11 @@
}
}
-void X86Mir2Lir::GenPackedArrayGet(BasicBlock* bb, MIR* mir) {
- UNUSED(bb, mir);
+void X86Mir2Lir::GenPackedArrayGet(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Extended opcode kMirOpPackedArrayGet not supported.";
}
-void X86Mir2Lir::GenPackedArrayPut(BasicBlock* bb, MIR* mir) {
- UNUSED(bb, mir);
+void X86Mir2Lir::GenPackedArrayPut(BasicBlock* bb ATTRIBUTE_UNUSED, MIR* mir ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Extended opcode kMirOpPackedArrayPut not supported.";
}
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index b16ae98..61354df 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -57,8 +57,7 @@
return res;
}
-bool X86Mir2Lir::InexpensiveConstantInt(int32_t value) {
- UNUSED(value);
+bool X86Mir2Lir::InexpensiveConstantInt(int32_t value ATTRIBUTE_UNUSED) {
return true;
}
@@ -66,8 +65,7 @@
return value == 0;
}
-bool X86Mir2Lir::InexpensiveConstantLong(int64_t value) {
- UNUSED(value);
+bool X86Mir2Lir::InexpensiveConstantLong(int64_t value ATTRIBUTE_UNUSED) {
return true;
}
@@ -942,9 +940,14 @@
return store;
}
-LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
- int offset, int check_value, LIR* target, LIR** compare) {
- UNUSED(temp_reg); // Comparison performed directly with memory.
+LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond,
+ // Comparison performed directly with memory.
+ RegStorage temp_reg ATTRIBUTE_UNUSED,
+ RegStorage base_reg,
+ int offset,
+ int check_value,
+ LIR* target,
+ LIR** compare) {
LIR* inst = NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(),
offset, check_value);
if (compare != nullptr) {
@@ -1114,8 +1117,11 @@
return loc;
}
-LIR* X86Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
- UNUSED(r_tgt); // Call to absolute memory location doesn't need a temporary target register.
+LIR* X86Mir2Lir::InvokeTrampoline(OpKind op,
+ // Call to absolute memory location doesn't
+ // need a temporary target register.
+ RegStorage r_tgt ATTRIBUTE_UNUSED,
+ QuickEntrypointEnum trampoline) {
if (cu_->target64) {
return OpThreadMem(op, GetThreadOffset<8>(trampoline));
} else {
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index 6f2b234..65b0ad6 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -34,7 +34,6 @@
verified_methods_(),
rejected_classes_lock_("compiler rejected classes lock"),
rejected_classes_() {
- UNUSED(compiler_options);
}
VerificationResults::~VerificationResults() {
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index b6a40a2..a45df95 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -19,6 +19,7 @@
#include "art_method-inl.h"
#include "arch/instruction_set.h"
#include "arch/instruction_set_features.h"
+#include "base/stringpiece.h"
#include "base/time_utils.h"
#include "base/timing_logger.h"
#include "compiler_callbacks.h"
@@ -86,7 +87,37 @@
/* init_failure_output */ nullptr,
/* abort_on_hard_verifier_failure */ false));
const InstructionSet instruction_set = kRuntimeISA;
- instruction_set_features_.reset(InstructionSetFeatures::FromCppDefines());
+ for (const StringPiece option : Runtime::Current()->GetCompilerOptions()) {
+ VLOG(compiler) << "JIT compiler option " << option;
+ std::string error_msg;
+ if (option.starts_with("--instruction-set-variant=")) {
+ StringPiece str = option.substr(strlen("--instruction-set-variant=")).data();
+ VLOG(compiler) << "JIT instruction set variant " << str;
+ instruction_set_features_.reset(InstructionSetFeatures::FromVariant(
+ instruction_set, str.as_string(), &error_msg));
+ if (instruction_set_features_ == nullptr) {
+ LOG(WARNING) << "Error parsing " << option << " message=" << error_msg;
+ }
+ } else if (option.starts_with("--instruction-set-features=")) {
+ StringPiece str = option.substr(strlen("--instruction-set-features=")).data();
+ VLOG(compiler) << "JIT instruction set features " << str;
+ if (instruction_set_features_.get() == nullptr) {
+ instruction_set_features_.reset(InstructionSetFeatures::FromVariant(
+ instruction_set, "default", &error_msg));
+ if (instruction_set_features_ == nullptr) {
+ LOG(WARNING) << "Error parsing " << option << " message=" << error_msg;
+ }
+ }
+ instruction_set_features_.reset(
+ instruction_set_features_->AddFeaturesFromString(str.as_string(), &error_msg));
+ if (instruction_set_features_ == nullptr) {
+ LOG(WARNING) << "Error parsing " << option << " message=" << error_msg;
+ }
+ }
+ }
+ if (instruction_set_features_ == nullptr) {
+ instruction_set_features_.reset(InstructionSetFeatures::FromCppDefines());
+ }
cumulative_logger_.reset(new CumulativeLogger("jit times"));
verification_results_.reset(new VerificationResults(compiler_options_.get()));
method_inliner_map_.reset(new DexFileToMethodInlinerMap);
diff --git a/compiler/jni/jni_cfi_test_expected.inc b/compiler/jni/jni_cfi_test_expected.inc
index 5af2242..16b4386 100644
--- a/compiler/jni/jni_cfi_test_expected.inc
+++ b/compiler/jni/jni_cfi_test_expected.inc
@@ -394,76 +394,77 @@
// 0x0000006c: .cfi_def_cfa_offset: 64
static constexpr uint8_t expected_asm_kMips64[] = {
- 0xA0, 0xFF, 0xBD, 0x67, 0x58, 0x00, 0xBF, 0xFF, 0x50, 0x00, 0xBE, 0xFF,
- 0x48, 0x00, 0xBC, 0xFF, 0x40, 0x00, 0xB7, 0xFF, 0x38, 0x00, 0xB6, 0xFF,
- 0x30, 0x00, 0xB5, 0xFF, 0x28, 0x00, 0xB4, 0xFF, 0x20, 0x00, 0xB3, 0xFF,
- 0x18, 0x00, 0xB2, 0xFF, 0x00, 0x00, 0xA4, 0xFF, 0x68, 0x00, 0xA5, 0xAF,
- 0x6C, 0x00, 0xAE, 0xE7, 0x70, 0x00, 0xA7, 0xAF, 0x74, 0x00, 0xA8, 0xAF,
- 0xE0, 0xFF, 0xBD, 0x67, 0x20, 0x00, 0xBD, 0x67, 0x18, 0x00, 0xB2, 0xDF,
- 0x20, 0x00, 0xB3, 0xDF, 0x28, 0x00, 0xB4, 0xDF, 0x30, 0x00, 0xB5, 0xDF,
- 0x38, 0x00, 0xB6, 0xDF, 0x40, 0x00, 0xB7, 0xDF, 0x48, 0x00, 0xBC, 0xDF,
- 0x50, 0x00, 0xBE, 0xDF, 0x58, 0x00, 0xBF, 0xDF, 0x60, 0x00, 0xBD, 0x67,
+ 0x90, 0xFF, 0xBD, 0x67, 0x68, 0x00, 0xBF, 0xFF, 0x60, 0x00, 0xBE, 0xFF,
+ 0x58, 0x00, 0xBC, 0xFF, 0x50, 0x00, 0xB7, 0xFF, 0x48, 0x00, 0xB6, 0xFF,
+ 0x40, 0x00, 0xB5, 0xFF, 0x38, 0x00, 0xB4, 0xFF, 0x30, 0x00, 0xB3, 0xFF,
+ 0x28, 0x00, 0xB2, 0xFF, 0x00, 0x00, 0xA4, 0xFF, 0x78, 0x00, 0xA5, 0xAF,
+ 0x7C, 0x00, 0xAE, 0xE7, 0x80, 0x00, 0xA7, 0xAF, 0x84, 0x00, 0xA8, 0xAF,
+ 0xE0, 0xFF, 0xBD, 0x67, 0x20, 0x00, 0xBD, 0x67, 0x28, 0x00, 0xB2, 0xDF,
+ 0x30, 0x00, 0xB3, 0xDF, 0x38, 0x00, 0xB4, 0xDF, 0x40, 0x00, 0xB5, 0xDF,
+ 0x48, 0x00, 0xB6, 0xDF, 0x50, 0x00, 0xB7, 0xDF, 0x58, 0x00, 0xBC, 0xDF,
+ 0x60, 0x00, 0xBE, 0xDF, 0x68, 0x00, 0xBF, 0xDF, 0x70, 0x00, 0xBD, 0x67,
0x09, 0x00, 0xE0, 0x03, 0x00, 0x00, 0x00, 0x00,
};
static constexpr uint8_t expected_cfi_kMips64[] = {
- 0x44, 0x0E, 0x60, 0x44, 0x9F, 0x02, 0x44, 0x9E, 0x04, 0x44, 0x9C, 0x06,
+ 0x44, 0x0E, 0x70, 0x44, 0x9F, 0x02, 0x44, 0x9E, 0x04, 0x44, 0x9C, 0x06,
0x44, 0x97, 0x08, 0x44, 0x96, 0x0A, 0x44, 0x95, 0x0C, 0x44, 0x94, 0x0E,
- 0x44, 0x93, 0x10, 0x44, 0x92, 0x12, 0x58, 0x0E, 0x80, 0x01, 0x44, 0x0E,
- 0x60, 0x0A, 0x44, 0xD2, 0x44, 0xD3, 0x44, 0xD4, 0x44, 0xD5, 0x44, 0xD6,
+ 0x44, 0x93, 0x10, 0x44, 0x92, 0x12, 0x58, 0x0E, 0x90, 0x01, 0x44, 0x0E,
+ 0x70, 0x0A, 0x44, 0xD2, 0x44, 0xD3, 0x44, 0xD4, 0x44, 0xD5, 0x44, 0xD6,
0x44, 0xD7, 0x44, 0xDC, 0x44, 0xDE, 0x44, 0xDF, 0x44, 0x0E, 0x00, 0x48,
- 0x0B, 0x0E, 0x60,
+ 0x0B, 0x0E, 0x70,
};
-// 0x00000000: daddiu r29, r29, -96
-// 0x00000004: .cfi_def_cfa_offset: 96
-// 0x00000004: sd r31, +88(r29)
+// 0x00000000: daddiu r29, r29, -112
+// 0x00000004: .cfi_def_cfa_offset: 112
+// 0x00000004: sd r31, +104(r29)
// 0x00000008: .cfi_offset: r31 at cfa-8
-// 0x00000008: sd r30, +80(r29)
+// 0x00000008: sd r30, +96(r29)
// 0x0000000c: .cfi_offset: r30 at cfa-16
-// 0x0000000c: sd r28, +72(r29)
+// 0x0000000c: sd r28, +88(r29)
// 0x00000010: .cfi_offset: r28 at cfa-24
-// 0x00000010: sd r23, +64(r29)
+// 0x00000010: sd r23, +80(r29)
// 0x00000014: .cfi_offset: r23 at cfa-32
-// 0x00000014: sd r22, +56(r29)
+// 0x00000014: sd r22, +72(r29)
// 0x00000018: .cfi_offset: r22 at cfa-40
-// 0x00000018: sd r21, +48(r29)
+// 0x00000018: sd r21, +64(r29)
// 0x0000001c: .cfi_offset: r21 at cfa-48
-// 0x0000001c: sd r20, +40(r29)
+// 0x0000001c: sd r20, +56(r29)
// 0x00000020: .cfi_offset: r20 at cfa-56
-// 0x00000020: sd r19, +32(r29)
+// 0x00000020: sd r19, +48(r29)
// 0x00000024: .cfi_offset: r19 at cfa-64
-// 0x00000024: sd r18, +24(r29)
+// 0x00000024: sd r18, +40(r29)
// 0x00000028: .cfi_offset: r18 at cfa-72
// 0x00000028: sd r4, +0(r29)
-// 0x0000002c: sw r5, +104(r29)
-// 0x00000030: swc1 f14, +108(r29)
-// 0x00000034: sw r7, +112(r29)
-// 0x00000038: sw r8, +116(r29)
+// 0x0000002c: sw r5, +120(r29)
+// 0x00000030: swc1 f14, +124(r29)
+// 0x00000034: sw r7, +128(r29)
+// 0x00000038: sw r8, +132(r29)
// 0x0000003c: daddiu r29, r29, -32
-// 0x00000040: .cfi_def_cfa_offset: 128
+// 0x00000040: .cfi_def_cfa_offset: 144
// 0x00000040: daddiu r29, r29, 32
-// 0x00000044: .cfi_def_cfa_offset: 96
+// 0x00000044: .cfi_def_cfa_offset: 112
// 0x00000044: .cfi_remember_state
-// 0x00000044: ld r18, +24(r29)
+// 0x00000044: ld r18, +40(r29)
// 0x00000048: .cfi_restore: r18
-// 0x00000048: ld r19, +32(r29)
+// 0x00000048: ld r19, +48(r29)
// 0x0000004c: .cfi_restore: r19
-// 0x0000004c: ld r20, +40(r29)
+// 0x0000004c: ld r20, +56(r29)
// 0x00000050: .cfi_restore: r20
-// 0x00000050: ld r21, +48(r29)
+// 0x00000050: ld r21, +64(r29)
// 0x00000054: .cfi_restore: r21
-// 0x00000054: ld r22, +56(r29)
+// 0x00000054: ld r22, +72(r29)
// 0x00000058: .cfi_restore: r22
-// 0x00000058: ld r23, +64(r29)
+// 0x00000058: ld r23, +80(r29)
// 0x0000005c: .cfi_restore: r23
-// 0x0000005c: ld r28, +72(r29)
+// 0x0000005c: ld r28, +88(r29)
// 0x00000060: .cfi_restore: r28
-// 0x00000060: ld r30, +80(r29)
+// 0x00000060: ld r30, +96(r29)
// 0x00000064: .cfi_restore: r30
-// 0x00000064: ld r31, +88(r29)
+// 0x00000064: ld r31, +104(r29)
// 0x00000068: .cfi_restore: r31
-// 0x00000068: daddiu r29, r29, 96
+// 0x00000068: daddiu r29, r29, 112
// 0x0000006c: .cfi_def_cfa_offset: 0
// 0x0000006c: jr r31
// 0x00000070: nop
// 0x00000074: .cfi_restore_state
-// 0x00000074: .cfi_def_cfa_offset: 96
+// 0x00000074: .cfi_def_cfa_offset: 112
+
diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc
index ecf143d..2d31a98 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.cc
+++ b/compiler/jni/quick/mips/calling_convention_mips.cc
@@ -183,7 +183,7 @@
}
size_t MipsJniCallingConvention::FrameSize() {
- // Method*, LR and callee save area size, local reference segment state
+ // ArtMethod*, RA and callee save area size, local reference segment state
size_t frame_data_size = kMipsPointerSize +
(2 + CalleeSaveRegisters().size()) * kFramePointerSize;
// References plus 2 words for HandleScope header
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.cc b/compiler/jni/quick/mips64/calling_convention_mips64.cc
index 3a11bcf..807d740 100644
--- a/compiler/jni/quick/mips64/calling_convention_mips64.cc
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.cc
@@ -140,6 +140,7 @@
// Compute spill mask to agree with callee saves initialized in the constructor
uint32_t result = 0;
result = 1 << S2 | 1 << S3 | 1 << S4 | 1 << S5 | 1 << S6 | 1 << S7 | 1 << GP | 1 << S8 | 1 << RA;
+ DCHECK_EQ(static_cast<size_t>(POPCOUNT(result)), callee_save_regs_.size() + 1);
return result;
}
@@ -148,9 +149,9 @@
}
size_t Mips64JniCallingConvention::FrameSize() {
- // Mehtod* and callee save area size, local reference segment state
+ // ArtMethod*, RA and callee save area size, local reference segment state
size_t frame_data_size = kFramePointerSize +
- CalleeSaveRegisters().size() * kFramePointerSize + sizeof(uint32_t);
+ (CalleeSaveRegisters().size() + 1) * kFramePointerSize + sizeof(uint32_t);
// References plus 2 words for HandleScope header
size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
// Plus return value spill area size
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
index 9c7eab1..b6b11ca 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -38,8 +38,7 @@
return ManagedRegister::NoRegister(); // No free regs, so assembler uses push/pop
}
-static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni) {
- UNUSED(jni);
+static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni ATTRIBUTE_UNUSED) {
if (shorty[0] == 'F' || shorty[0] == 'D') {
return X86_64ManagedRegister::FromXmmRegister(XMM0);
} else if (shorty[0] == 'J') {
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index a82d09e..d6cb65b 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -281,7 +281,7 @@
// Offsets of the dex cache arrays for each app dex file. For the
// boot image, this information is provided by the ImageWriter.
- SafeMap<const DexFile*, size_t> dex_cache_arrays_offsets_;
+ SafeMap<const DexFile*, size_t> dex_cache_arrays_offsets_; // DexFiles not owned.
// Offset of the oat data from the start of the mmapped region of the elf file.
size_t oat_data_offset_;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 358a35c..92a5878 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1141,8 +1141,7 @@
exit->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM::VisitExit(HExit* exit) {
- UNUSED(exit);
+void InstructionCodeGeneratorARM::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
void InstructionCodeGeneratorARM::GenerateCompareWithImmediate(Register left, int32_t right) {
@@ -1569,9 +1568,8 @@
load->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load) {
+void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(load);
}
void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
@@ -1598,8 +1596,7 @@
}
}
-void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store) {
- UNUSED(store);
+void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
}
void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
@@ -1608,9 +1605,8 @@
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant) {
+void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM::VisitNullConstant(HNullConstant* constant) {
@@ -1619,9 +1615,8 @@
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM::VisitNullConstant(HNullConstant* constant) {
+void InstructionCodeGeneratorARM::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
@@ -1630,9 +1625,8 @@
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant) {
+void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) {
@@ -1641,9 +1635,8 @@
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant) {
+void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) {
@@ -1652,9 +1645,8 @@
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant) {
+void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
@@ -1669,8 +1661,7 @@
ret->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret) {
- UNUSED(ret);
+void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
codegen_->GenerateFrameExit();
}
@@ -1680,8 +1671,7 @@
locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType()));
}
-void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) {
- UNUSED(ret);
+void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
codegen_->GenerateFrameExit();
}
@@ -3327,8 +3317,7 @@
locations->SetOut(Location::Any());
}
-void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction) {
- UNUSED(instruction);
+void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unreachable";
}
@@ -4289,13 +4278,11 @@
temp->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp) {
+void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(temp);
}
-void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction) {
- UNUSED(instruction);
+void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unreachable";
}
@@ -5343,15 +5330,13 @@
return DeduplicateMethodLiteral(target_method, &call_patches_);
}
-void LocationsBuilderARM::VisitBoundType(HBoundType* instruction) {
+void LocationsBuilderARM::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
-void InstructionCodeGeneratorARM::VisitBoundType(HBoundType* instruction) {
+void InstructionCodeGeneratorARM::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 37f10a5..f68b11b 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1330,8 +1330,7 @@
};
#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name) \
- void InstructionCodeGeneratorARM64::Visit##name(H##name* instr) { \
- UNUSED(instr); \
+ void InstructionCodeGeneratorARM64::Visit##name(H##name* instr ATTRIBUTE_UNUSED) { \
__ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name)); \
} \
void LocationsBuilderARM64::Visit##name(H##name* instr) { \
@@ -2183,8 +2182,8 @@
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM64::VisitDoubleConstant(HDoubleConstant* constant) {
- UNUSED(constant);
+void InstructionCodeGeneratorARM64::VisitDoubleConstant(
+ HDoubleConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
}
@@ -2192,8 +2191,7 @@
exit->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM64::VisitExit(HExit* exit) {
- UNUSED(exit);
+void InstructionCodeGeneratorARM64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) {
@@ -2202,8 +2200,7 @@
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM64::VisitFloatConstant(HFloatConstant* constant) {
- UNUSED(constant);
+void InstructionCodeGeneratorARM64::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
}
@@ -2689,9 +2686,8 @@
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant) {
+void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM64::VisitNullConstant(HNullConstant* constant) {
@@ -2699,9 +2695,8 @@
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM64::VisitNullConstant(HNullConstant* constant) {
+void InstructionCodeGeneratorARM64::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
@@ -3092,9 +3087,8 @@
load->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load) {
+void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(load);
}
void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
@@ -3131,9 +3125,8 @@
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant) {
+void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderARM64::VisitMonitorOperation(HMonitorOperation* instruction) {
@@ -3400,8 +3393,7 @@
locations->SetOut(Location::Any());
}
-void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction) {
- UNUSED(instruction);
+void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unreachable";
}
@@ -3471,8 +3463,7 @@
locations->SetInAt(0, ARM64ReturnLocation(return_type));
}
-void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction) {
- UNUSED(instruction);
+void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction ATTRIBUTE_UNUSED) {
codegen_->GenerateFrameExit();
}
@@ -3480,8 +3471,7 @@
instruction->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction) {
- UNUSED(instruction);
+void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction ATTRIBUTE_UNUSED) {
codegen_->GenerateFrameExit();
}
@@ -3525,8 +3515,7 @@
}
}
-void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store) {
- UNUSED(store);
+void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
}
void LocationsBuilderARM64::VisitSub(HSub* instruction) {
@@ -3643,9 +3632,8 @@
temp->SetLocations(nullptr);
}
-void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp) {
+void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(temp);
}
void LocationsBuilderARM64::VisitThrow(HThrow* instruction) {
@@ -3744,15 +3732,13 @@
HandleBinaryOp(instruction);
}
-void LocationsBuilderARM64::VisitBoundType(HBoundType* instruction) {
+void LocationsBuilderARM64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
-void InstructionCodeGeneratorARM64::VisitBoundType(HBoundType* instruction) {
+void InstructionCodeGeneratorARM64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 8b33f56..963eec2 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1052,8 +1052,7 @@
exit->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86::VisitExit(HExit* exit) {
- UNUSED(exit);
+void InstructionCodeGeneratorX86::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
void InstructionCodeGeneratorX86::GenerateFPJumps(HCondition* cond,
@@ -1332,9 +1331,8 @@
local->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86::VisitLoadLocal(HLoadLocal* load) {
+void InstructionCodeGeneratorX86::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(load);
}
void LocationsBuilderX86::VisitStoreLocal(HStoreLocal* store) {
@@ -1361,8 +1359,7 @@
}
}
-void InstructionCodeGeneratorX86::VisitStoreLocal(HStoreLocal* store) {
- UNUSED(store);
+void InstructionCodeGeneratorX86::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
}
void LocationsBuilderX86::VisitCondition(HCondition* cond) {
@@ -1544,9 +1541,8 @@
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86::VisitIntConstant(HIntConstant* constant) {
+void InstructionCodeGeneratorX86::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86::VisitNullConstant(HNullConstant* constant) {
@@ -1555,9 +1551,8 @@
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86::VisitNullConstant(HNullConstant* constant) {
+void InstructionCodeGeneratorX86::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86::VisitLongConstant(HLongConstant* constant) {
@@ -1566,9 +1561,8 @@
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86::VisitLongConstant(HLongConstant* constant) {
+void InstructionCodeGeneratorX86::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86::VisitFloatConstant(HFloatConstant* constant) {
@@ -1577,9 +1571,8 @@
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86::VisitFloatConstant(HFloatConstant* constant) {
+void InstructionCodeGeneratorX86::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86::VisitDoubleConstant(HDoubleConstant* constant) {
@@ -1588,9 +1581,8 @@
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86::VisitDoubleConstant(HDoubleConstant* constant) {
+void InstructionCodeGeneratorX86::VisitDoubleConstant(HDoubleConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
@@ -1605,8 +1597,7 @@
ret->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86::VisitReturnVoid(HReturnVoid* ret) {
- UNUSED(ret);
+void InstructionCodeGeneratorX86::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
codegen_->GenerateFrameExit();
}
@@ -3740,8 +3731,7 @@
locations->SetOut(Location::Any());
}
-void InstructionCodeGeneratorX86::VisitPhi(HPhi* instruction) {
- UNUSED(instruction);
+void InstructionCodeGeneratorX86::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unreachable";
}
@@ -4739,13 +4729,11 @@
temp->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86::VisitTemporary(HTemporary* temp) {
+void InstructionCodeGeneratorX86::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(temp);
}
-void LocationsBuilderX86::VisitParallelMove(HParallelMove* instruction) {
- UNUSED(instruction);
+void LocationsBuilderX86::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unreachable";
}
@@ -5668,15 +5656,13 @@
}
}
-void LocationsBuilderX86::VisitBoundType(HBoundType* instruction) {
+void LocationsBuilderX86::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
-void InstructionCodeGeneratorX86::VisitBoundType(HBoundType* instruction) {
+void InstructionCodeGeneratorX86::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index a2af4ba..ed2e4ca 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1050,8 +1050,7 @@
exit->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86_64::VisitExit(HExit* exit) {
- UNUSED(exit);
+void InstructionCodeGeneratorX86_64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
}
void InstructionCodeGeneratorX86_64::GenerateFPJumps(HCondition* cond,
@@ -1278,9 +1277,8 @@
local->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86_64::VisitLoadLocal(HLoadLocal* load) {
+void InstructionCodeGeneratorX86_64::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(load);
}
void LocationsBuilderX86_64::VisitStoreLocal(HStoreLocal* store) {
@@ -1307,8 +1305,7 @@
}
}
-void InstructionCodeGeneratorX86_64::VisitStoreLocal(HStoreLocal* store) {
- UNUSED(store);
+void InstructionCodeGeneratorX86_64::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
}
void LocationsBuilderX86_64::VisitCondition(HCondition* cond) {
@@ -1613,9 +1610,8 @@
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86_64::VisitIntConstant(HIntConstant* constant) {
+void InstructionCodeGeneratorX86_64::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86_64::VisitNullConstant(HNullConstant* constant) {
@@ -1624,9 +1620,8 @@
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86_64::VisitNullConstant(HNullConstant* constant) {
+void InstructionCodeGeneratorX86_64::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86_64::VisitLongConstant(HLongConstant* constant) {
@@ -1635,9 +1630,8 @@
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86_64::VisitLongConstant(HLongConstant* constant) {
+void InstructionCodeGeneratorX86_64::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86_64::VisitFloatConstant(HFloatConstant* constant) {
@@ -1646,9 +1640,8 @@
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86_64::VisitFloatConstant(HFloatConstant* constant) {
+void InstructionCodeGeneratorX86_64::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86_64::VisitDoubleConstant(HDoubleConstant* constant) {
@@ -1657,9 +1650,9 @@
locations->SetOut(Location::ConstantLocation(constant));
}
-void InstructionCodeGeneratorX86_64::VisitDoubleConstant(HDoubleConstant* constant) {
+void InstructionCodeGeneratorX86_64::VisitDoubleConstant(
+ HDoubleConstant* constant ATTRIBUTE_UNUSED) {
// Will be generated at use site.
- UNUSED(constant);
}
void LocationsBuilderX86_64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
@@ -1674,8 +1667,7 @@
ret->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86_64::VisitReturnVoid(HReturnVoid* ret) {
- UNUSED(ret);
+void InstructionCodeGeneratorX86_64::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
codegen_->GenerateFrameExit();
}
@@ -3629,8 +3621,7 @@
locations->SetOut(Location::Any());
}
-void InstructionCodeGeneratorX86_64::VisitPhi(HPhi* instruction) {
- UNUSED(instruction);
+void InstructionCodeGeneratorX86_64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unimplemented";
}
@@ -4450,13 +4441,11 @@
temp->SetLocations(nullptr);
}
-void InstructionCodeGeneratorX86_64::VisitTemporary(HTemporary* temp) {
+void InstructionCodeGeneratorX86_64::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
// Nothing to do, this is driven by the code generator.
- UNUSED(temp);
}
-void LocationsBuilderX86_64::VisitParallelMove(HParallelMove* instruction) {
- UNUSED(instruction);
+void LocationsBuilderX86_64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unimplemented";
}
@@ -5331,15 +5320,13 @@
}
}
-void LocationsBuilderX86_64::VisitBoundType(HBoundType* instruction) {
+void LocationsBuilderX86_64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
-void InstructionCodeGeneratorX86_64::VisitBoundType(HBoundType* instruction) {
+void InstructionCodeGeneratorX86_64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, this should be removed during prepare for register allocator.
- UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index f545475..4abe5e9 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -206,7 +206,9 @@
if (instr->IsAdd() || instr->IsSub() || instr->IsCondition() ||
instr->IsCompare() || instr->IsBoundsCheck()) {
// Uses aliases of ADD/SUB instructions.
- return vixl::Assembler::IsImmAddSub(value);
+ // If `value` does not fit but `-value` does, VIXL will automatically use
+ // the 'opposite' instruction.
+ return vixl::Assembler::IsImmAddSub(value) || vixl::Assembler::IsImmAddSub(-value);
} else if (instr->IsAnd() || instr->IsOr() || instr->IsXor()) {
// Uses logical operations.
return vixl::Assembler::IsImmLogical(value, vixl::kXRegSize);
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 764a114..fe16d00 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -43,6 +43,93 @@
return codegen_->GetGraph()->GetArena();
}
+#define __ codegen->GetAssembler()->
+
+static void MoveFromReturnRegister(Location trg,
+ Primitive::Type type,
+ CodeGeneratorMIPS64* codegen) {
+ if (!trg.IsValid()) {
+ DCHECK_EQ(type, Primitive::kPrimVoid);
+ return;
+ }
+
+ DCHECK_NE(type, Primitive::kPrimVoid);
+
+ if (Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) {
+ GpuRegister trg_reg = trg.AsRegister<GpuRegister>();
+ if (trg_reg != V0) {
+ __ Move(V0, trg_reg);
+ }
+ } else {
+ FpuRegister trg_reg = trg.AsFpuRegister<FpuRegister>();
+ if (trg_reg != F0) {
+ if (type == Primitive::kPrimFloat) {
+ __ MovS(F0, trg_reg);
+ } else {
+ __ MovD(F0, trg_reg);
+ }
+ }
+ }
+}
+
+static void MoveArguments(HInvoke* invoke, CodeGeneratorMIPS64* codegen) {
+ InvokeDexCallingConventionVisitorMIPS64 calling_convention_visitor;
+ IntrinsicVisitor::MoveArguments(invoke, codegen, &calling_convention_visitor);
+}
+
+// Slow-path for fallback (calling the managed code to handle the
+// intrinsic) in an intrinsified call. This will copy the arguments
+// into the positions for a regular call.
+//
+// Note: The actual parameters are required to be in the locations
+// given by the invoke's location summary. If an intrinsic
+// modifies those locations before a slowpath call, they must be
+// restored!
+class IntrinsicSlowPathMIPS64 : public SlowPathCodeMIPS64 {
+ public:
+ explicit IntrinsicSlowPathMIPS64(HInvoke* invoke) : invoke_(invoke) { }
+
+ void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
+ CodeGeneratorMIPS64* codegen = down_cast<CodeGeneratorMIPS64*>(codegen_in);
+
+ __ Bind(GetEntryLabel());
+
+ SaveLiveRegisters(codegen, invoke_->GetLocations());
+
+ MoveArguments(invoke_, codegen);
+
+ if (invoke_->IsInvokeStaticOrDirect()) {
+ codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
+ Location::RegisterLocation(A0));
+ codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
+ } else {
+ UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
+ UNREACHABLE();
+ }
+
+ // Copy the result back to the expected output.
+ Location out = invoke_->GetLocations()->Out();
+ if (out.IsValid()) {
+ DCHECK(out.IsRegister()); // TODO: Replace this when we support output in memory.
+ DCHECK(!invoke_->GetLocations()->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
+ MoveFromReturnRegister(out, invoke_->GetType(), codegen);
+ }
+
+ RestoreLiveRegisters(codegen, invoke_->GetLocations());
+ __ B(GetExitLabel());
+ }
+
+ const char* GetDescription() const OVERRIDE { return "IntrinsicSlowPathMIPS64"; }
+
+ private:
+ // The instruction where this slow path is happening.
+ HInvoke* const invoke_;
+
+ DISALLOW_COPY_AND_ASSIGN(IntrinsicSlowPathMIPS64);
+};
+
+#undef __
+
bool IntrinsicLocationsBuilderMIPS64::TryDispatch(HInvoke* invoke) {
Dispatch(invoke);
LocationSummary* res = invoke->GetLocations();
@@ -185,7 +272,7 @@
GenReverseBytes(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler());
}
-static void GenCountZeroes(LocationSummary* locations, bool is64bit, Mips64Assembler* assembler) {
+static void GenNumberOfLeadingZeroes(LocationSummary* locations, bool is64bit, Mips64Assembler* assembler) {
GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>();
GpuRegister out = locations->Out().AsRegister<GpuRegister>();
@@ -202,7 +289,7 @@
}
void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfLeadingZeros(HInvoke* invoke) {
- GenCountZeroes(invoke->GetLocations(), false, GetAssembler());
+ GenNumberOfLeadingZeroes(invoke->GetLocations(), false, GetAssembler());
}
// int java.lang.Long.numberOfLeadingZeros(long i)
@@ -211,7 +298,103 @@
}
void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfLeadingZeros(HInvoke* invoke) {
- GenCountZeroes(invoke->GetLocations(), true, GetAssembler());
+ GenNumberOfLeadingZeroes(invoke->GetLocations(), true, GetAssembler());
+}
+
+static void GenNumberOfTrailingZeroes(LocationSummary* locations, bool is64bit, Mips64Assembler* assembler) {
+ Location in = locations->InAt(0);
+ Location out = locations->Out();
+
+ if (is64bit) {
+ __ Dsbh(out.AsRegister<GpuRegister>(), in.AsRegister<GpuRegister>());
+ __ Dshd(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
+ __ Dbitswap(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
+ __ Dclz(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
+ } else {
+ __ Rotr(out.AsRegister<GpuRegister>(), in.AsRegister<GpuRegister>(), 16);
+ __ Wsbh(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
+ __ Bitswap(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
+ __ Clz(out.AsRegister<GpuRegister>(), out.AsRegister<GpuRegister>());
+ }
+}
+
+// int java.lang.Integer.numberOfTrailingZeros(int i)
+void IntrinsicLocationsBuilderMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
+ CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitIntegerNumberOfTrailingZeros(HInvoke* invoke) {
+ GenNumberOfTrailingZeroes(invoke->GetLocations(), false, GetAssembler());
+}
+
+// int java.lang.Long.numberOfTrailingZeros(long i)
+void IntrinsicLocationsBuilderMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
+ CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitLongNumberOfTrailingZeros(HInvoke* invoke) {
+ GenNumberOfTrailingZeroes(invoke->GetLocations(), true, GetAssembler());
+}
+
+static void GenRotateRight(HInvoke* invoke,
+ Primitive::Type type,
+ Mips64Assembler* assembler) {
+ DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
+
+ LocationSummary* locations = invoke->GetLocations();
+ GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>();
+ GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+
+ if (invoke->InputAt(1)->IsIntConstant()) {
+ uint32_t shift = static_cast<uint32_t>(invoke->InputAt(1)->AsIntConstant()->GetValue());
+ if (type == Primitive::kPrimInt) {
+ shift &= 0x1f;
+ __ Rotr(out, in, shift);
+ } else {
+ shift &= 0x3f;
+ if (shift < 32) {
+ __ Drotr(out, in, shift);
+ } else {
+ shift &= 0x1f;
+ __ Drotr32(out, in, shift);
+ }
+ }
+ } else {
+ GpuRegister shamt = locations->InAt(1).AsRegister<GpuRegister>();
+ if (type == Primitive::kPrimInt) {
+ __ Rotrv(out, in, shamt);
+ } else {
+ __ Drotrv(out, in, shamt);
+ }
+ }
+}
+
+// int java.lang.Integer.rotateRight(int i, int distance)
+void IntrinsicLocationsBuilderMIPS64::VisitIntegerRotateRight(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitIntegerRotateRight(HInvoke* invoke) {
+ GenRotateRight(invoke, Primitive::kPrimInt, GetAssembler());
+}
+
+// int java.lang.Long.rotateRight(long i, int distance)
+void IntrinsicLocationsBuilderMIPS64::VisitLongRotateRight(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RegisterOrConstant(invoke->InputAt(1)));
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitLongRotateRight(HInvoke* invoke) {
+ GenRotateRight(invoke, Primitive::kPrimLong, GetAssembler());
}
static void GenReverse(LocationSummary* locations,
@@ -765,6 +948,505 @@
Thread::PeerOffset<kMips64PointerSize>().Int32Value());
}
+static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
+ LocationSummary* locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(2, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+static void GenUnsafeGet(HInvoke* invoke,
+ Primitive::Type type,
+ bool is_volatile,
+ CodeGeneratorMIPS64* codegen) {
+ LocationSummary* locations = invoke->GetLocations();
+ DCHECK((type == Primitive::kPrimInt) ||
+ (type == Primitive::kPrimLong) ||
+ (type == Primitive::kPrimNot));
+ Mips64Assembler* assembler = codegen->GetAssembler();
+ // Object pointer.
+ GpuRegister base = locations->InAt(1).AsRegister<GpuRegister>();
+ // Long offset.
+ GpuRegister offset = locations->InAt(2).AsRegister<GpuRegister>();
+ GpuRegister trg = locations->Out().AsRegister<GpuRegister>();
+
+ __ Daddu(TMP, base, offset);
+ if (is_volatile) {
+ __ Sync(0);
+ }
+ switch (type) {
+ case Primitive::kPrimInt:
+ __ Lw(trg, TMP, 0);
+ break;
+
+ case Primitive::kPrimNot:
+ __ Lwu(trg, TMP, 0);
+ break;
+
+ case Primitive::kPrimLong:
+ __ Ld(trg, TMP, 0);
+ break;
+
+ default:
+ LOG(FATAL) << "Unsupported op size " << type;
+ UNREACHABLE();
+ }
+}
+
+// int sun.misc.Unsafe.getInt(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGet(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGet(HInvoke* invoke) {
+ GenUnsafeGet(invoke, Primitive::kPrimInt, false, codegen_);
+}
+
+// int sun.misc.Unsafe.getIntVolatile(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetVolatile(HInvoke* invoke) {
+ GenUnsafeGet(invoke, Primitive::kPrimInt, true, codegen_);
+}
+
+// long sun.misc.Unsafe.getLong(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLong(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLong(HInvoke* invoke) {
+ GenUnsafeGet(invoke, Primitive::kPrimLong, false, codegen_);
+}
+
+// long sun.misc.Unsafe.getLongVolatile(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
+ GenUnsafeGet(invoke, Primitive::kPrimLong, true, codegen_);
+}
+
+// Object sun.misc.Unsafe.getObject(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObject(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObject(HInvoke* invoke) {
+ GenUnsafeGet(invoke, Primitive::kPrimNot, false, codegen_);
+}
+
+// Object sun.misc.Unsafe.getObjectVolatile(Object o, long offset)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
+ CreateIntIntIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
+ GenUnsafeGet(invoke, Primitive::kPrimNot, true, codegen_);
+}
+
+static void CreateIntIntIntIntToVoid(ArenaAllocator* arena, HInvoke* invoke) {
+ LocationSummary* locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::NoLocation()); // Unused receiver.
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetInAt(2, Location::RequiresRegister());
+ locations->SetInAt(3, Location::RequiresRegister());
+}
+
+static void GenUnsafePut(LocationSummary* locations,
+ Primitive::Type type,
+ bool is_volatile,
+ bool is_ordered,
+ CodeGeneratorMIPS64* codegen) {
+ DCHECK((type == Primitive::kPrimInt) ||
+ (type == Primitive::kPrimLong) ||
+ (type == Primitive::kPrimNot));
+ Mips64Assembler* assembler = codegen->GetAssembler();
+ // Object pointer.
+ GpuRegister base = locations->InAt(1).AsRegister<GpuRegister>();
+ // Long offset.
+ GpuRegister offset = locations->InAt(2).AsRegister<GpuRegister>();
+ GpuRegister value = locations->InAt(3).AsRegister<GpuRegister>();
+
+ __ Daddu(TMP, base, offset);
+ if (is_volatile || is_ordered) {
+ __ Sync(0);
+ }
+ switch (type) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimNot:
+ __ Sw(value, TMP, 0);
+ break;
+
+ case Primitive::kPrimLong:
+ __ Sd(value, TMP, 0);
+ break;
+
+ default:
+ LOG(FATAL) << "Unsupported op size " << type;
+ UNREACHABLE();
+ }
+ if (is_volatile) {
+ __ Sync(0);
+ }
+
+ if (type == Primitive::kPrimNot) {
+ codegen->MarkGCCard(base, value);
+ }
+}
+
+// void sun.misc.Unsafe.putInt(Object o, long offset, int x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafePut(HInvoke* invoke) {
+ CreateIntIntIntIntToVoid(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafePut(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, false, false, codegen_);
+}
+
+// void sun.misc.Unsafe.putOrderedInt(Object o, long offset, int x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) {
+ CreateIntIntIntIntToVoid(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutOrdered(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, false, true, codegen_);
+}
+
+// void sun.misc.Unsafe.putIntVolatile(Object o, long offset, int x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) {
+ CreateIntIntIntIntToVoid(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutVolatile(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, true, false, codegen_);
+}
+
+// void sun.misc.Unsafe.putObject(Object o, long offset, Object x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObject(HInvoke* invoke) {
+ CreateIntIntIntIntToVoid(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObject(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, false, false, codegen_);
+}
+
+// void sun.misc.Unsafe.putOrderedObject(Object o, long offset, Object x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
+ CreateIntIntIntIntToVoid(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, false, true, codegen_);
+}
+
+// void sun.misc.Unsafe.putObjectVolatile(Object o, long offset, Object x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
+ CreateIntIntIntIntToVoid(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, true, false, codegen_);
+}
+
+// void sun.misc.Unsafe.putLong(Object o, long offset, long x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLong(HInvoke* invoke) {
+ CreateIntIntIntIntToVoid(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLong(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, false, false, codegen_);
+}
+
+// void sun.misc.Unsafe.putOrderedLong(Object o, long offset, long x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
+ CreateIntIntIntIntToVoid(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongOrdered(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, false, true, codegen_);
+}
+
+// void sun.misc.Unsafe.putLongVolatile(Object o, long offset, long x)
+void IntrinsicLocationsBuilderMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
+ CreateIntIntIntIntToVoid(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitUnsafePutLongVolatile(HInvoke* invoke) {
+ GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, true, false, codegen_);
+}
+
+// char java.lang.String.charAt(int index)
+void IntrinsicLocationsBuilderMIPS64::VisitStringCharAt(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCallOnSlowPath,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringCharAt(HInvoke* invoke) {
+ LocationSummary* locations = invoke->GetLocations();
+ Mips64Assembler* assembler = GetAssembler();
+
+ // Location of reference to data array
+ const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
+ // Location of count
+ const int32_t count_offset = mirror::String::CountOffset().Int32Value();
+
+ GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
+ GpuRegister idx = locations->InAt(1).AsRegister<GpuRegister>();
+ GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+
+ // TODO: Maybe we can support range check elimination. Overall,
+ // though, I think it's not worth the cost.
+ // TODO: For simplicity, the index parameter is requested in a
+ // register, so different from Quick we will not optimize the
+ // code for constants (which would save a register).
+
+ SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
+ codegen_->AddSlowPath(slow_path);
+
+ // Load the string size
+ __ Lw(TMP, obj, count_offset);
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
+ // Revert to slow path if idx is too large, or negative
+ __ Bgeuc(idx, TMP, slow_path->GetEntryLabel());
+
+ // out = obj[2*idx].
+ __ Sll(TMP, idx, 1); // idx * 2
+ __ Daddu(TMP, TMP, obj); // Address of char at location idx
+ __ Lhu(out, TMP, value_offset); // Load char at location idx
+
+ __ Bind(slow_path->GetExitLabel());
+}
+
+// int java.lang.String.compareTo(String anotherString)
+void IntrinsicLocationsBuilderMIPS64::VisitStringCompareTo(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringCompareTo(HInvoke* invoke) {
+ Mips64Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ // Note that the null check must have been done earlier.
+ DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
+
+ GpuRegister argument = locations->InAt(1).AsRegister<GpuRegister>();
+ SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ Beqzc(argument, slow_path->GetEntryLabel());
+
+ __ LoadFromOffset(kLoadDoubleword,
+ TMP,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kMips64WordSize,
+ pStringCompareTo).Int32Value());
+ __ Jalr(TMP);
+ __ Nop();
+ __ Bind(slow_path->GetExitLabel());
+}
+
+static void GenerateStringIndexOf(HInvoke* invoke,
+ Mips64Assembler* assembler,
+ CodeGeneratorMIPS64* codegen,
+ ArenaAllocator* allocator,
+ bool start_at_zero) {
+ LocationSummary* locations = invoke->GetLocations();
+ GpuRegister tmp_reg = start_at_zero ? locations->GetTemp(0).AsRegister<GpuRegister>() : TMP;
+
+ // Note that the null check must have been done earlier.
+ DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
+
+ // Check for code points > 0xFFFF. Either a slow-path check when we
+ // don't know statically, or directly dispatch if we have a constant.
+ SlowPathCodeMIPS64* slow_path = nullptr;
+ if (invoke->InputAt(1)->IsIntConstant()) {
+ if (!IsUint<16>(invoke->InputAt(1)->AsIntConstant()->GetValue())) {
+ // Always needs the slow-path. We could directly dispatch to it,
+ // but this case should be rare, so for simplicity just put the
+ // full slow-path down and branch unconditionally.
+ slow_path = new (allocator) IntrinsicSlowPathMIPS64(invoke);
+ codegen->AddSlowPath(slow_path);
+ __ B(slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+ return;
+ }
+ } else {
+ GpuRegister char_reg = locations->InAt(1).AsRegister<GpuRegister>();
+ __ LoadConst32(tmp_reg, std::numeric_limits<uint16_t>::max());
+ slow_path = new (allocator) IntrinsicSlowPathMIPS64(invoke);
+ codegen->AddSlowPath(slow_path);
+ __ Bltuc(tmp_reg, char_reg, slow_path->GetEntryLabel()); // UTF-16 required
+ }
+
+ if (start_at_zero) {
+ DCHECK_EQ(tmp_reg, A2);
+ // Start-index = 0.
+ __ Clear(tmp_reg);
+ } else {
+ __ Slt(TMP, A2, ZERO); // if fromIndex < 0
+ __ Seleqz(A2, A2, TMP); // fromIndex = 0
+ }
+
+ __ LoadFromOffset(kLoadDoubleword,
+ TMP,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, pIndexOf).Int32Value());
+ __ Jalr(TMP);
+ __ Nop();
+
+ if (slow_path != nullptr) {
+ __ Bind(slow_path->GetExitLabel());
+ }
+}
+
+// int java.lang.String.indexOf(int ch)
+void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOf(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ // We have a hand-crafted assembly stub that follows the runtime
+ // calling convention. So it's best to align the inputs accordingly.
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
+
+ // Need a temp for slow-path codepoint compare, and need to send start-index=0.
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOf(HInvoke* invoke) {
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), true);
+}
+
+// int java.lang.String.indexOf(int ch, int fromIndex)
+void IntrinsicLocationsBuilderMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ // We have a hand-crafted assembly stub that follows the runtime
+ // calling convention. So it's best to align the inputs accordingly.
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringIndexOfAfter(HInvoke* invoke) {
+ GenerateStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), false);
+}
+
+// java.lang.String.String(byte[] bytes)
+void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromBytes(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetInAt(3, Location::RegisterLocation(calling_convention.GetRegisterAt(3)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromBytes(HInvoke* invoke) {
+ Mips64Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ GpuRegister byte_array = locations->InAt(0).AsRegister<GpuRegister>();
+ SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ Beqzc(byte_array, slow_path->GetEntryLabel());
+
+ __ LoadFromOffset(kLoadDoubleword,
+ TMP,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, pAllocStringFromBytes).Int32Value());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ Jalr(TMP);
+ __ Nop();
+ __ Bind(slow_path->GetExitLabel());
+}
+
+// java.lang.String.String(char[] value)
+void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromChars(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromChars(HInvoke* invoke) {
+ Mips64Assembler* assembler = GetAssembler();
+
+ __ LoadFromOffset(kLoadDoubleword,
+ TMP,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, pAllocStringFromChars).Int32Value());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ Jalr(TMP);
+ __ Nop();
+}
+
+// java.lang.String.String(String original)
+void IntrinsicLocationsBuilderMIPS64::VisitStringNewStringFromString(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCall,
+ kIntrinsified);
+ InvokeRuntimeCallingConvention calling_convention;
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
+ Location outLocation = calling_convention.GetReturnLocation(Primitive::kPrimInt);
+ locations->SetOut(Location::RegisterLocation(outLocation.AsRegister<GpuRegister>()));
+}
+
+void IntrinsicCodeGeneratorMIPS64::VisitStringNewStringFromString(HInvoke* invoke) {
+ Mips64Assembler* assembler = GetAssembler();
+ LocationSummary* locations = invoke->GetLocations();
+
+ GpuRegister string_to_copy = locations->InAt(0).AsRegister<GpuRegister>();
+ SlowPathCodeMIPS64* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS64(invoke);
+ codegen_->AddSlowPath(slow_path);
+ __ Beqzc(string_to_copy, slow_path->GetEntryLabel());
+
+ __ LoadFromOffset(kLoadDoubleword,
+ TMP,
+ TR,
+ QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, pAllocStringFromString).Int32Value());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ __ Jalr(TMP);
+ __ Nop();
+ __ Bind(slow_path->GetExitLabel());
+}
+
// Unimplemented intrinsics.
#define UNIMPLEMENTED_INTRINSIC(Name) \
@@ -776,38 +1458,12 @@
UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
UNIMPLEMENTED_INTRINSIC(MathRoundFloat)
-UNIMPLEMENTED_INTRINSIC(UnsafeGet)
-UNIMPLEMENTED_INTRINSIC(UnsafeGetVolatile)
-UNIMPLEMENTED_INTRINSIC(UnsafeGetLong)
-UNIMPLEMENTED_INTRINSIC(UnsafeGetLongVolatile)
-UNIMPLEMENTED_INTRINSIC(UnsafeGetObject)
-UNIMPLEMENTED_INTRINSIC(UnsafeGetObjectVolatile)
-UNIMPLEMENTED_INTRINSIC(UnsafePut)
-UNIMPLEMENTED_INTRINSIC(UnsafePutOrdered)
-UNIMPLEMENTED_INTRINSIC(UnsafePutVolatile)
-UNIMPLEMENTED_INTRINSIC(UnsafePutObject)
-UNIMPLEMENTED_INTRINSIC(UnsafePutObjectOrdered)
-UNIMPLEMENTED_INTRINSIC(UnsafePutObjectVolatile)
-UNIMPLEMENTED_INTRINSIC(UnsafePutLong)
-UNIMPLEMENTED_INTRINSIC(UnsafePutLongOrdered)
-UNIMPLEMENTED_INTRINSIC(UnsafePutLongVolatile)
UNIMPLEMENTED_INTRINSIC(UnsafeCASInt)
UNIMPLEMENTED_INTRINSIC(UnsafeCASLong)
UNIMPLEMENTED_INTRINSIC(UnsafeCASObject)
-UNIMPLEMENTED_INTRINSIC(StringCharAt)
-UNIMPLEMENTED_INTRINSIC(StringCompareTo)
UNIMPLEMENTED_INTRINSIC(StringEquals)
-UNIMPLEMENTED_INTRINSIC(StringIndexOf)
-UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter)
-UNIMPLEMENTED_INTRINSIC(StringNewStringFromBytes)
-UNIMPLEMENTED_INTRINSIC(StringNewStringFromChars)
-UNIMPLEMENTED_INTRINSIC(StringNewStringFromString)
UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
-UNIMPLEMENTED_INTRINSIC(LongRotateRight)
-UNIMPLEMENTED_INTRINSIC(LongNumberOfTrailingZeros)
UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft)
-UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
-UNIMPLEMENTED_INTRINSIC(IntegerNumberOfTrailingZeros)
UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 2426f8b..939e62c 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1799,8 +1799,7 @@
return true;
}
- virtual bool CanDoImplicitNullCheckOn(HInstruction* obj) const {
- UNUSED(obj);
+ virtual bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const {
return false;
}
@@ -1917,16 +1916,14 @@
virtual bool CanBeMoved() const { return false; }
// Returns whether the two instructions are of the same kind.
- virtual bool InstructionTypeEquals(HInstruction* other) const {
- UNUSED(other);
+ virtual bool InstructionTypeEquals(HInstruction* other ATTRIBUTE_UNUSED) const {
return false;
}
// Returns whether any data encoded in the two instructions is equal.
// This method does not look at the inputs. Both instructions must be
// of the same type, otherwise the method has undefined behavior.
- virtual bool InstructionDataEquals(HInstruction* other) const {
- UNUSED(other);
+ virtual bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const {
return false;
}
@@ -2489,8 +2486,7 @@
Primitive::Type GetResultType() const { return GetType(); }
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -2560,8 +2556,7 @@
}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -3392,8 +3387,7 @@
target_method_(target_method),
dispatch_info_(dispatch_info) {}
- bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
- UNUSED(obj);
+ bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
// We access the method via the dex cache so we can't do an implicit null check.
// TODO: for intrinsics we can generate implicit null checks.
return false;
@@ -3831,8 +3825,7 @@
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -4102,8 +4095,7 @@
: HUnaryOperation(result_type, input, dex_pc) {}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -4128,8 +4120,7 @@
: HUnaryOperation(Primitive::Type::kPrimBoolean, input, dex_pc) {}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -4295,8 +4286,7 @@
}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -4441,12 +4431,10 @@
}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
- bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
- UNUSED(obj);
+ bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
// TODO: We can be smarter here.
// Currently, the array access is always preceded by an ArrayLength or a NullCheck
// which generates the implicit null check. There are cases when these can be removed
@@ -4494,8 +4482,7 @@
// Can throw ArrayStoreException.
bool CanThrow() const OVERRIDE { return needs_type_check_; }
- bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
- UNUSED(obj);
+ bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
// TODO: Same as for ArrayGet.
return false;
}
@@ -4558,8 +4545,7 @@
}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
@@ -4582,8 +4568,7 @@
}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -4797,8 +4782,7 @@
}
bool CanBeMoved() const OVERRIDE { return true; }
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- UNUSED(other);
+ bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
}
@@ -5416,7 +5400,7 @@
explicit HGraphVisitor(HGraph* graph) : graph_(graph) {}
virtual ~HGraphVisitor() {}
- virtual void VisitInstruction(HInstruction* instruction) { UNUSED(instruction); }
+ virtual void VisitInstruction(HInstruction* instruction ATTRIBUTE_UNUSED) {}
virtual void VisitBasicBlock(HBasicBlock* block);
// Visit the graph following basic block insertion order.
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index c7f0806..17a4743 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -666,7 +666,6 @@
jobject class_loader,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache) const {
- UNUSED(invoke_type);
std::string method_name = PrettyMethod(method_idx, dex_file);
MaybeRecordStat(MethodCompilationStat::kAttemptCompilation);
CompilerDriver* compiler_driver = GetCompilerDriver();
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index f7a7e42..a1feaf7 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -756,7 +756,9 @@
while (!worklist_.empty()) {
HInstruction* instruction = worklist_.back();
worklist_.pop_back();
- if (UpdateNullability(instruction) || UpdateReferenceTypeInfo(instruction)) {
+ bool updated_nullability = UpdateNullability(instruction);
+ bool updated_reference_type = UpdateReferenceTypeInfo(instruction);
+ if (updated_nullability || updated_reference_type) {
AddDependentInstructionsToWorklist(instruction);
}
}
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index f1c0b92..d97a2a4 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -301,7 +301,7 @@
}
// TODO: Implement with disassembler.
- virtual void Comment(const char* format, ...) { UNUSED(format); }
+ virtual void Comment(const char* format ATTRIBUTE_UNUSED, ...) {}
// Emit code that will create an activation on the stack
virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h
index b30f7d7..f1233ca 100644
--- a/compiler/utils/assembler_test.h
+++ b/compiler/utils/assembler_test.h
@@ -83,6 +83,15 @@
fmt);
}
+ std::string RepeatRRNoDupes(void (Ass::*f)(Reg, Reg), std::string fmt) {
+ return RepeatTemplatedRegistersNoDupes<Reg, Reg>(f,
+ GetRegisters(),
+ GetRegisters(),
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>,
+ fmt);
+ }
+
std::string Repeatrr(void (Ass::*f)(Reg, Reg), std::string fmt) {
return RepeatTemplatedRegisters<Reg, Reg>(f,
GetRegisters(),
@@ -608,6 +617,45 @@
return str;
}
+ template <typename Reg1, typename Reg2>
+ std::string RepeatTemplatedRegistersNoDupes(void (Ass::*f)(Reg1, Reg2),
+ const std::vector<Reg1*> reg1_registers,
+ const std::vector<Reg2*> reg2_registers,
+ std::string (AssemblerTest::*GetName1)(const Reg1&),
+ std::string (AssemblerTest::*GetName2)(const Reg2&),
+ std::string fmt) {
+ WarnOnCombinations(reg1_registers.size() * reg2_registers.size());
+
+ std::string str;
+ for (auto reg1 : reg1_registers) {
+ for (auto reg2 : reg2_registers) {
+ if (reg1 == reg2) continue;
+ (assembler_.get()->*f)(*reg1, *reg2);
+ std::string base = fmt;
+
+ std::string reg1_string = (this->*GetName1)(*reg1);
+ size_t reg1_index;
+ while ((reg1_index = base.find(REG1_TOKEN)) != std::string::npos) {
+ base.replace(reg1_index, ConstexprStrLen(REG1_TOKEN), reg1_string);
+ }
+
+ std::string reg2_string = (this->*GetName2)(*reg2);
+ size_t reg2_index;
+ while ((reg2_index = base.find(REG2_TOKEN)) != std::string::npos) {
+ base.replace(reg2_index, ConstexprStrLen(REG2_TOKEN), reg2_string);
+ }
+
+ if (str.size() > 0) {
+ str += "\n";
+ }
+ str += base;
+ }
+ }
+ // Add a newline at the end.
+ str += "\n";
+ return str;
+ }
+
template <typename Reg1, typename Reg2, typename Reg3>
std::string RepeatTemplatedRegisters(void (Ass::*f)(Reg1, Reg2, Reg3),
const std::vector<Reg1*> reg1_registers,
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index b078f3e..00e8995 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -335,6 +335,10 @@
EmitR(0, rs, rt, rd, 0, 0x04);
}
+void Mips64Assembler::Rotrv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
+ EmitR(0, rs, rt, rd, 1, 0x06);
+}
+
void Mips64Assembler::Srlv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
EmitR(0, rs, rt, rd, 0, 0x06);
}
@@ -351,6 +355,10 @@
EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3a);
}
+void Mips64Assembler::Drotr(GpuRegister rd, GpuRegister rt, int shamt) {
+ EmitR(0, static_cast<GpuRegister>(1), rt, rd, shamt, 0x3a);
+}
+
void Mips64Assembler::Dsra(GpuRegister rd, GpuRegister rt, int shamt) {
EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3b);
}
@@ -363,6 +371,10 @@
EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3e);
}
+void Mips64Assembler::Drotr32(GpuRegister rd, GpuRegister rt, int shamt) {
+ EmitR(0, static_cast<GpuRegister>(1), rt, rd, shamt, 0x3e);
+}
+
void Mips64Assembler::Dsra32(GpuRegister rd, GpuRegister rt, int shamt) {
EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3f);
}
@@ -375,6 +387,10 @@
EmitR(0, rs, rt, rd, 0, 0x16);
}
+void Mips64Assembler::Drotrv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
+ EmitR(0, rs, rt, rd, 1, 0x16);
+}
+
void Mips64Assembler::Dsrav(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
EmitR(0, rs, rt, rd, 0, 0x17);
}
@@ -773,6 +789,10 @@
EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x21);
}
+void Mips64Assembler::Cvtsl(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x15, static_cast<FpuRegister>(0), fs, fd, 0x20);
+}
+
void Mips64Assembler::Cvtdl(FpuRegister fd, FpuRegister fs) {
EmitFR(0x11, 0x15, static_cast<FpuRegister>(0), fs, fd, 0x21);
}
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index d083eb4..33f22d2 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -123,15 +123,19 @@
void Sra(GpuRegister rd, GpuRegister rt, int shamt);
void Sllv(GpuRegister rd, GpuRegister rt, GpuRegister rs);
void Srlv(GpuRegister rd, GpuRegister rt, GpuRegister rs);
+ void Rotrv(GpuRegister rd, GpuRegister rt, GpuRegister rs);
void Srav(GpuRegister rd, GpuRegister rt, GpuRegister rs);
void Dsll(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
void Dsrl(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
+ void Drotr(GpuRegister rd, GpuRegister rt, int shamt);
void Dsra(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
void Dsll32(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
void Dsrl32(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
+ void Drotr32(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
void Dsra32(GpuRegister rd, GpuRegister rt, int shamt); // MIPS64
void Dsllv(GpuRegister rd, GpuRegister rt, GpuRegister rs); // MIPS64
void Dsrlv(GpuRegister rd, GpuRegister rt, GpuRegister rs); // MIPS64
+ void Drotrv(GpuRegister rd, GpuRegister rt, GpuRegister rs); // MIPS64
void Dsrav(GpuRegister rd, GpuRegister rt, GpuRegister rs); // MIPS64
void Lb(GpuRegister rt, GpuRegister rs, uint16_t imm16);
@@ -230,6 +234,7 @@
void Cvtdw(FpuRegister fd, FpuRegister fs);
void Cvtsd(FpuRegister fd, FpuRegister fs);
void Cvtds(FpuRegister fd, FpuRegister fs);
+ void Cvtsl(FpuRegister fd, FpuRegister fs);
void Cvtdl(FpuRegister fd, FpuRegister fs);
void Mfc1(GpuRegister rt, FpuRegister fs);
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index 2071aca..16f29b0 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -215,6 +215,22 @@
DriverStr(RepeatFF(&mips64::Mips64Assembler::AbsD, "abs.d ${reg1}, ${reg2}"), "abs.d");
}
+TEST_F(AssemblerMIPS64Test, MovS) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::MovS, "mov.s ${reg1}, ${reg2}"), "mov.s");
+}
+
+TEST_F(AssemblerMIPS64Test, MovD) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::MovD, "mov.d ${reg1}, ${reg2}"), "mov.d");
+}
+
+TEST_F(AssemblerMIPS64Test, NegS) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::NegS, "neg.s ${reg1}, ${reg2}"), "neg.s");
+}
+
+TEST_F(AssemblerMIPS64Test, NegD) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::NegD, "neg.d ${reg1}, ${reg2}"), "neg.d");
+}
+
TEST_F(AssemblerMIPS64Test, RoundLS) {
DriverStr(RepeatFF(&mips64::Mips64Assembler::RoundLS, "round.l.s ${reg1}, ${reg2}"), "round.l.s");
}
@@ -307,6 +323,34 @@
DriverStr(RepeatFF(&mips64::Mips64Assembler::Cvtdl, "cvt.d.l ${reg1}, ${reg2}"), "cvt.d.l");
}
+TEST_F(AssemblerMIPS64Test, CvtDS) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::Cvtds, "cvt.d.s ${reg1}, ${reg2}"), "cvt.d.s");
+}
+
+TEST_F(AssemblerMIPS64Test, CvtDW) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::Cvtdw, "cvt.d.w ${reg1}, ${reg2}"), "cvt.d.w");
+}
+
+TEST_F(AssemblerMIPS64Test, CvtSL) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::Cvtsl, "cvt.s.l ${reg1}, ${reg2}"), "cvt.s.l");
+}
+
+TEST_F(AssemblerMIPS64Test, CvtSD) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::Cvtsd, "cvt.s.d ${reg1}, ${reg2}"), "cvt.s.d");
+}
+
+TEST_F(AssemblerMIPS64Test, CvtSW) {
+ DriverStr(RepeatFF(&mips64::Mips64Assembler::Cvtsw, "cvt.s.w ${reg1}, ${reg2}"), "cvt.s.w");
+}
+
+////////////////
+// CALL / JMP //
+////////////////
+
+TEST_F(AssemblerMIPS64Test, Jalr) {
+ DriverStr(RepeatRRNoDupes(&mips64::Mips64Assembler::Jalr, "jalr ${reg1}, ${reg2}"), "jalr");
+}
+
//////////
// MISC //
//////////
@@ -319,6 +363,14 @@
DriverStr(RepeatRR(&mips64::Mips64Assembler::Dbitswap, "dbitswap ${reg1}, ${reg2}"), "dbitswap");
}
+TEST_F(AssemblerMIPS64Test, Seb) {
+ DriverStr(RepeatRR(&mips64::Mips64Assembler::Seb, "seb ${reg1}, ${reg2}"), "seb");
+}
+
+TEST_F(AssemblerMIPS64Test, Seh) {
+ DriverStr(RepeatRR(&mips64::Mips64Assembler::Seh, "seh ${reg1}, ${reg2}"), "seh");
+}
+
TEST_F(AssemblerMIPS64Test, Dsbh) {
DriverStr(RepeatRR(&mips64::Mips64Assembler::Dsbh, "dsbh ${reg1}, ${reg2}"), "dsbh");
}
@@ -331,6 +383,42 @@
DriverStr(RepeatRR(&mips64::Mips64Assembler::Wsbh, "wsbh ${reg1}, ${reg2}"), "wsbh");
}
+TEST_F(AssemblerMIPS64Test, Sll) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Sll, 5, "sll ${reg1}, ${reg2}, {imm}"), "sll");
+}
+
+TEST_F(AssemblerMIPS64Test, Srl) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Srl, 5, "srl ${reg1}, ${reg2}, {imm}"), "srl");
+}
+
+TEST_F(AssemblerMIPS64Test, Sra) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Sra, 5, "sra ${reg1}, ${reg2}, {imm}"), "sra");
+}
+
+TEST_F(AssemblerMIPS64Test, Dsll) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsll, 5, "dsll ${reg1}, ${reg2}, {imm}"), "dsll");
+}
+
+TEST_F(AssemblerMIPS64Test, Dsrl) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsrl, 5, "dsrl ${reg1}, ${reg2}, {imm}"), "dsrl");
+}
+
+TEST_F(AssemblerMIPS64Test, Dsra) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsra, 5, "dsra ${reg1}, ${reg2}, {imm}"), "dsra");
+}
+
+TEST_F(AssemblerMIPS64Test, Dsll32) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsll32, 5, "dsll32 ${reg1}, ${reg2}, {imm}"), "dsll32");
+}
+
+TEST_F(AssemblerMIPS64Test, Dsrl32) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsrl32, 5, "dsrl32 ${reg1}, ${reg2}, {imm}"), "dsrl32");
+}
+
+TEST_F(AssemblerMIPS64Test, Dsra32) {
+ DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsra32, 5, "dsra32 ${reg1}, ${reg2}, {imm}"), "dsra32");
+}
+
TEST_F(AssemblerMIPS64Test, Sc) {
DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Sc, -9, "sc ${reg1}, {imm}(${reg2})"), "sc");
}
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index d09631b..631b784 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -891,7 +891,110 @@
ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
// Generate the allocation entrypoints for each allocator.
-GENERATE_ALL_ALLOC_ENTRYPOINTS
+GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
+// A hand-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc).
+ENTRY art_quick_alloc_object_rosalloc
+ // Fast path rosalloc allocation.
+ // r0: type_idx/return value, r1: ArtMethod*, r9: Thread::Current
+ // r2, r3, r12: free.
+ ldr r2, [r1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_32] // Load dex cache resolved types array
+ // Load the class (r2)
+ ldr r2, [r2, r0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
+ cbz r2, .Lart_quick_alloc_object_rosalloc_slow_path // Check null class
+ // Check class status.
+ ldr r3, [r2, #MIRROR_CLASS_STATUS_OFFSET]
+ cmp r3, #MIRROR_CLASS_STATUS_INITIALIZED
+ bne .Lart_quick_alloc_object_rosalloc_slow_path
+ // Add a fake dependence from the
+ // following access flag and size
+ // loads to the status load.
+ // This is to prevent those loads
+ // from being reordered above the
+ // status load and reading wrong
+ // values (an alternative is to use
+ // a load-acquire for the status).
+ eor r3, r3, r3
+ add r2, r2, r3
+ // Check access flags has
+ // kAccClassIsFinalizable
+ ldr r3, [r2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
+ tst r3, #ACCESS_FLAGS_CLASS_IS_FINALIZABLE
+ bne .Lart_quick_alloc_object_rosalloc_slow_path
+
+ ldr r3, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] // Check if the thread local
+ // allocation stack has room.
+ // TODO: consider using ldrd.
+ ldr r12, [r9, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
+ cmp r3, r12
+ bhs .Lart_quick_alloc_object_rosalloc_slow_path
+
+ ldr r3, [r2, #MIRROR_CLASS_OBJECT_SIZE_OFFSET] // Load the object size (r3)
+ cmp r3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE // Check if the size is for a thread
+ // local allocation
+ bhs .Lart_quick_alloc_object_rosalloc_slow_path
+ // Compute the rosalloc bracket index
+ // from the size.
+ // Align up the size by the rosalloc
+ // bracket quantum size and divide
+ // by the quantum size and subtract
+ // by 1. This code is a shorter but
+ // equivalent version.
+ sub r3, r3, #1
+ lsr r3, r3, #ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT
+ // Load the rosalloc run (r12)
+ add r12, r9, r3, lsl #POINTER_SIZE_SHIFT
+ ldr r12, [r12, #THREAD_ROSALLOC_RUNS_OFFSET]
+ // Load the free list head (r3). This
+ // will be the return val.
+ ldr r3, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
+ cbz r3, .Lart_quick_alloc_object_rosalloc_slow_path
+ // "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
+ ldr r1, [r3, #ROSALLOC_SLOT_NEXT_OFFSET] // Load the next pointer of the head
+ // and update the list head with the
+ // next pointer.
+ str r1, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
+ // Store the class pointer in the
+ // header. This also overwrites the
+ // next pointer. The offsets are
+ // asserted to match.
+#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET
+#error "Class pointer needs to overwrite next pointer."
+#endif
+ POISON_HEAP_REF r2
+ str r2, [r3, #MIRROR_OBJECT_CLASS_OFFSET]
+ // Push the new object onto the thread
+ // local allocation stack and
+ // increment the thread local
+ // allocation stack top.
+ ldr r1, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
+ str r3, [r1], #COMPRESSED_REFERENCE_SIZE // (Increment r1 as a side effect.)
+ str r1, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET]
+ // Decrement the size of the free list
+ ldr r1, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)]
+ sub r1, #1
+ // TODO: consider combining this store
+ // and the list head store above using
+ // strd.
+ str r1, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)]
+ // Fence. This is "ish" not "ishst" so
+ // that the code after this allocation
+ // site will see the right values in
+ // the fields of the class.
+ // Alternatively we could use "ishst"
+ // if we use load-acquire for the
+ // class status load.)
+ dmb ish
+ mov r0, r3 // Set the return value and return.
+ bx lr
+
+.Lart_quick_alloc_object_rosalloc_slow_path:
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r2, r3 @ save callee saves in case of GC
+ mov r2, r9 @ pass Thread::Current
+ bl artAllocObjectFromCodeRosAlloc @ (uint32_t type_idx, Method* method, Thread*)
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END art_quick_alloc_object_rosalloc
/*
* Called by managed code when the value in rSUSPEND has been decremented to 0.
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 68156ae..66c8aad 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1615,5 +1615,70 @@
move $a0, rSELF # pass Thread::current
END art_quick_deoptimize_from_compiled_code
-UNIMPLEMENTED art_quick_indexof
-UNIMPLEMENTED art_quick_string_compareto
+ .set push
+ .set noat
+/* java.lang.String.compareTo(String anotherString) */
+ENTRY_NO_GP art_quick_string_compareto
+/* $a0 holds address of "this" */
+/* $a1 holds address of "anotherString" */
+ beq $a0,$a1,9f # this and anotherString are the same object
+ move $v0,$zero
+
+ lw $a2,MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
+ lw $a3,MIRROR_STRING_COUNT_OFFSET($a1) # anotherString.length()
+ sltu $at,$a2,$a3
+ seleqz $t2,$a3,$at
+ selnez $at,$a2,$at
+ or $t2,$t2,$at # $t2 now holds min(this.length(),anotherString.length())
+
+ beqz $t2,9f # while min(this.length(),anotherString.length())-i != 0
+ subu $v0,$a2,$a3 # if $t2==0 return
+ # (this.length() - anotherString.length())
+1:
+ lhu $t0,MIRROR_STRING_VALUE_OFFSET($a0) # while this.charAt(i) == anotherString.charAt(i)
+ lhu $t1,MIRROR_STRING_VALUE_OFFSET($a1)
+ bne $t0,$t1,9f # if this.charAt(i) != anotherString.charAt(i)
+ subu $v0,$t0,$t1 # return (this.charAt(i) - anotherString.charAt(i))
+ daddiu $a0,$a0,2 # point at this.charAt(i++)
+ subu $t2,$t2,1 # new value of
+ # min(this.length(),anotherString.length())-i
+ bnez $t2,1b
+ daddiu $a1,$a1,2 # point at anotherString.charAt(i++)
+ subu $v0,$a2,$a3
+
+9:
+ j $ra
+ nop
+END art_quick_string_compareto
+
+/* java.lang.String.indexOf(int ch, int fromIndex=0) */
+ENTRY_NO_GP art_quick_indexof
+/* $a0 holds address of "this" */
+/* $a1 holds address of "ch" */
+/* $a2 holds address of "fromIndex" */
+ lw $t0,MIRROR_STRING_COUNT_OFFSET($a0) # this.length()
+ subu $t0,$t0,$a2 # this.length() - offset
+ blez $t0,6f # if this.length()-offset <= 0
+ li $v0,-1 # return -1;
+
+ sll $v0,$a2,1 # $a0 += $a2 * 2
+ daddu $a0,$a0,$v0 # " " " " "
+ move $v0,$a2 # Set i to offset.
+
+1:
+ lhu $t3,MIRROR_STRING_VALUE_OFFSET($a0) # if this.charAt(i) == ch
+ beq $t3,$a1,6f # return i;
+ daddu $a0,$a0,2 # i++
+ subu $t0,$t0,1 # this.length() - i
+ bnez $t0,1b # while this.length() - i > 0
+ addu $v0,$v0,1 # i++
+
+ li $v0,-1 # if this.length() - i <= 0
+ # return -1;
+
+6:
+ j $ra
+ nop
+END art_quick_indexof
+
+ .set pop
diff --git a/runtime/arch/quick_alloc_entrypoints.S b/runtime/arch/quick_alloc_entrypoints.S
index ef5edbb..fbacdbc 100644
--- a/runtime/arch/quick_alloc_entrypoints.S
+++ b/runtime/arch/quick_alloc_entrypoints.S
@@ -113,7 +113,8 @@
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_dlmalloc_instrumented, DlMallocInstrumented)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_dlmalloc_instrumented, DlMallocInstrumented)
-GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc)
+// This is to be separately defined for each architecture to allow a hand-written assembly fast path.
+// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_rosalloc, RosAlloc)
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 4a106e4..2f485ae 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -788,6 +788,7 @@
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc)
GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB)
ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 5c413d2..95f0ccb 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -809,6 +809,7 @@
// Generate the allocation entrypoints for each allocator.
GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR
+GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_rosalloc, RosAlloc)
// A handle-written override for GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT(_tlab, TLAB).
DEFINE_FUNCTION art_quick_alloc_object_tlab
// Fast path tlab allocation.
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index d98fc51..69f6fe9 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -19,6 +19,7 @@
#if defined(__cplusplus)
#include "art_method.h"
+#include "gc/allocator/rosalloc.h"
#include "lock_word.h"
#include "mirror/class.h"
#include "mirror/string.h"
@@ -53,6 +54,14 @@
#define ADD_TEST_EQ(x, y)
#endif
+#if defined(__LP64__)
+#define POINTER_SIZE_SHIFT 3
+#else
+#define POINTER_SIZE_SHIFT 2
+#endif
+ADD_TEST_EQ(static_cast<size_t>(1U << POINTER_SIZE_SHIFT),
+ static_cast<size_t>(__SIZEOF_POINTER__))
+
// Size of references to the heap on the stack.
#define STACK_REFERENCE_SIZE 4
ADD_TEST_EQ(static_cast<size_t>(STACK_REFERENCE_SIZE), sizeof(art::StackReference<art::mirror::Object>))
@@ -62,6 +71,10 @@
ADD_TEST_EQ(static_cast<size_t>(COMPRESSED_REFERENCE_SIZE),
sizeof(art::mirror::CompressedReference<art::mirror::Object>))
+#define COMPRESSED_REFERENCE_SIZE_SHIFT 2
+ADD_TEST_EQ(static_cast<size_t>(1U << COMPRESSED_REFERENCE_SIZE_SHIFT),
+ static_cast<size_t>(COMPRESSED_REFERENCE_SIZE))
+
// Note: these callee save methods loads require read barriers.
// Offset of field Runtime::callee_save_methods_[kSaveAll]
#define RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET 0
@@ -120,6 +133,18 @@
#define THREAD_LOCAL_OBJECTS_OFFSET (THREAD_LOCAL_POS_OFFSET + 2 * __SIZEOF_POINTER__)
ADD_TEST_EQ(THREAD_LOCAL_OBJECTS_OFFSET,
art::Thread::ThreadLocalObjectsOffset<__SIZEOF_POINTER__>().Int32Value())
+// Offset of field Thread::tlsPtr_.rosalloc_runs.
+#define THREAD_ROSALLOC_RUNS_OFFSET (THREAD_LOCAL_POS_OFFSET + 3 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(THREAD_ROSALLOC_RUNS_OFFSET,
+ art::Thread::RosAllocRunsOffset<__SIZEOF_POINTER__>().Int32Value())
+// Offset of field Thread::tlsPtr_.thread_local_alloc_stack_top.
+#define THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET (THREAD_ROSALLOC_RUNS_OFFSET + 34 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET,
+ art::Thread::ThreadLocalAllocStackTopOffset<__SIZEOF_POINTER__>().Int32Value())
+// Offset of field Thread::tlsPtr_.thread_local_alloc_stack_end.
+#define THREAD_LOCAL_ALLOC_STACK_END_OFFSET (THREAD_ROSALLOC_RUNS_OFFSET + 35 * __SIZEOF_POINTER__)
+ADD_TEST_EQ(THREAD_LOCAL_ALLOC_STACK_END_OFFSET,
+ art::Thread::ThreadLocalAllocStackEndOffset<__SIZEOF_POINTER__>().Int32Value())
// Offsets within java.lang.Object.
#define MIRROR_OBJECT_CLASS_OFFSET 0
@@ -236,6 +261,44 @@
ADD_TEST_EQ(static_cast<uint32_t>(OBJECT_ALIGNMENT_MASK_TOGGLED),
~static_cast<uint32_t>(art::kObjectAlignment - 1))
+#define ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE 128
+ADD_TEST_EQ(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE,
+ static_cast<int32_t>(art::gc::allocator::RosAlloc::kMaxThreadLocalBracketSize))
+
+#define ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT 4
+ADD_TEST_EQ(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT,
+ static_cast<int32_t>(art::gc::allocator::RosAlloc::kBracketQuantumSizeShift))
+
+#define ROSALLOC_BRACKET_QUANTUM_SIZE_MASK 15
+ADD_TEST_EQ(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK,
+ static_cast<int32_t>(art::gc::allocator::RosAlloc::kBracketQuantumSize - 1))
+
+#define ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED32 0xfffffff0
+ADD_TEST_EQ(static_cast<uint32_t>(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED32),
+ ~static_cast<uint32_t>(art::gc::allocator::RosAlloc::kBracketQuantumSize - 1))
+
+#define ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED64 0xfffffffffffffff0
+ADD_TEST_EQ(static_cast<uint64_t>(ROSALLOC_BRACKET_QUANTUM_SIZE_MASK_TOGGLED64),
+ ~static_cast<uint64_t>(art::gc::allocator::RosAlloc::kBracketQuantumSize - 1))
+
+#define ROSALLOC_RUN_FREE_LIST_OFFSET 8
+ADD_TEST_EQ(ROSALLOC_RUN_FREE_LIST_OFFSET,
+ static_cast<int32_t>(art::gc::allocator::RosAlloc::RunFreeListOffset()))
+
+#define ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET 0
+ADD_TEST_EQ(ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET,
+ static_cast<int32_t>(art::gc::allocator::RosAlloc::RunFreeListHeadOffset()))
+
+#define ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET 16
+ADD_TEST_EQ(ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET,
+ static_cast<int32_t>(art::gc::allocator::RosAlloc::RunFreeListSizeOffset()))
+
+#define ROSALLOC_SLOT_NEXT_OFFSET 0
+ADD_TEST_EQ(ROSALLOC_SLOT_NEXT_OFFSET,
+ static_cast<int32_t>(art::gc::allocator::RosAlloc::RunSlotNextOffset()))
+// Assert this so that we can avoid zeroing the next field by installing the class pointer.
+ADD_TEST_EQ(ROSALLOC_SLOT_NEXT_OFFSET, MIRROR_OBJECT_CLASS_OFFSET)
+
#if defined(__cplusplus)
} // End of CheckAsmSupportOffsets.
#endif
diff --git a/runtime/base/allocator.cc b/runtime/base/allocator.cc
index 4f2fc07..f1d0a5f 100644
--- a/runtime/base/allocator.cc
+++ b/runtime/base/allocator.cc
@@ -49,15 +49,13 @@
explicit NoopAllocator() {}
~NoopAllocator() {}
- void* Alloc(size_t size) {
- UNUSED(size);
+ void* Alloc(size_t size ATTRIBUTE_UNUSED) {
LOG(FATAL) << "NoopAllocator::Alloc should not be called";
UNREACHABLE();
}
- void Free(void* p) {
+ void Free(void* p ATTRIBUTE_UNUSED) {
// Noop.
- UNUSED(p);
}
private:
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
index 3422625..f9960ac 100644
--- a/runtime/base/allocator.h
+++ b/runtime/base/allocator.h
@@ -115,9 +115,7 @@
// Used internally by STL data structures.
template <class U>
- TrackingAllocatorImpl(const TrackingAllocatorImpl<U, kTag>& alloc) noexcept {
- UNUSED(alloc);
- }
+ TrackingAllocatorImpl(const TrackingAllocatorImpl<U, kTag>& alloc ATTRIBUTE_UNUSED) noexcept {}
// Used internally by STL data structures.
TrackingAllocatorImpl() noexcept {
@@ -131,8 +129,7 @@
typedef TrackingAllocatorImpl<U, kTag> other;
};
- pointer allocate(size_type n, const_pointer hint = 0) {
- UNUSED(hint);
+ pointer allocate(size_type n, const_pointer hint ATTRIBUTE_UNUSED = 0) {
const size_t size = n * sizeof(T);
TrackedAllocators::RegisterAllocation(GetTag(), size);
return reinterpret_cast<pointer>(malloc(size));
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 004895a..4e9282f 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -119,13 +119,13 @@
ArenaAllocatorStatsImpl(const ArenaAllocatorStatsImpl& other) = default;
ArenaAllocatorStatsImpl& operator = (const ArenaAllocatorStatsImpl& other) = delete;
- void Copy(const ArenaAllocatorStatsImpl& other) { UNUSED(other); }
- void RecordAlloc(size_t bytes, ArenaAllocKind kind) { UNUSED(bytes, kind); }
+ void Copy(const ArenaAllocatorStatsImpl& other ATTRIBUTE_UNUSED) {}
+ void RecordAlloc(size_t bytes ATTRIBUTE_UNUSED, ArenaAllocKind kind ATTRIBUTE_UNUSED) {}
size_t NumAllocations() const { return 0u; }
size_t BytesAllocated() const { return 0u; }
- void Dump(std::ostream& os, const Arena* first, ssize_t lost_bytes_adjustment) const {
- UNUSED(os); UNUSED(first); UNUSED(lost_bytes_adjustment);
- }
+ void Dump(std::ostream& os ATTRIBUTE_UNUSED,
+ const Arena* first ATTRIBUTE_UNUSED,
+ ssize_t lost_bytes_adjustment ATTRIBUTE_UNUSED) const {}
};
template <bool kCount>
diff --git a/runtime/base/arena_containers.h b/runtime/base/arena_containers.h
index 9174d2d..e2d4c24 100644
--- a/runtime/base/arena_containers.h
+++ b/runtime/base/arena_containers.h
@@ -176,8 +176,8 @@
pointer address(reference x) const { return &x; }
const_pointer address(const_reference x) const { return &x; }
- pointer allocate(size_type n, ArenaAllocatorAdapter<void>::pointer hint = nullptr) {
- UNUSED(hint);
+ pointer allocate(size_type n,
+ ArenaAllocatorAdapter<void>::pointer hint ATTRIBUTE_UNUSED = nullptr) {
DCHECK_LE(n, max_size());
return arena_allocator_->AllocArray<T>(n, ArenaAllocatorAdapterKind::Kind());
}
diff --git a/runtime/base/debug_stack.h b/runtime/base/debug_stack.h
index 03f4575..e19aecb 100644
--- a/runtime/base/debug_stack.h
+++ b/runtime/base/debug_stack.h
@@ -54,7 +54,7 @@
template <>
class DebugStackReferenceImpl<false> {
public:
- explicit DebugStackReferenceImpl(DebugStackRefCounterImpl<false>* counter) { UNUSED(counter); }
+ explicit DebugStackReferenceImpl(DebugStackRefCounterImpl<false>* counter ATTRIBUTE_UNUSED) {}
DebugStackReferenceImpl(const DebugStackReferenceImpl& other) = default;
DebugStackReferenceImpl& operator=(const DebugStackReferenceImpl& other) = default;
void CheckTop() { }
@@ -63,7 +63,7 @@
template <>
class DebugStackIndirectTopRefImpl<false> {
public:
- explicit DebugStackIndirectTopRefImpl(DebugStackReferenceImpl<false>* ref) { UNUSED(ref); }
+ explicit DebugStackIndirectTopRefImpl(DebugStackReferenceImpl<false>* ref ATTRIBUTE_UNUSED) {}
DebugStackIndirectTopRefImpl(const DebugStackIndirectTopRefImpl& other) = default;
DebugStackIndirectTopRefImpl& operator=(const DebugStackIndirectTopRefImpl& other) = default;
void CheckTop() { }
diff --git a/runtime/base/hash_set.h b/runtime/base/hash_set.h
index f2b1cc0..4819f06 100644
--- a/runtime/base/hash_set.h
+++ b/runtime/base/hash_set.h
@@ -127,8 +127,8 @@
using size_type = size_t;
using difference_type = ptrdiff_t;
- static constexpr double kDefaultMinLoadFactor = 0.5;
- static constexpr double kDefaultMaxLoadFactor = 0.9;
+ static constexpr double kDefaultMinLoadFactor = 0.4;
+ static constexpr double kDefaultMaxLoadFactor = 0.7;
static constexpr size_t kMinBuckets = 1000;
// If we don't own the data, this will create a new array which owns the data.
@@ -138,14 +138,18 @@
elements_until_expand_ = 0;
}
- HashSet()
+ HashSet() : HashSet(kDefaultMinLoadFactor, kDefaultMaxLoadFactor) {}
+
+ HashSet(double min_load_factor, double max_load_factor)
: num_elements_(0u),
num_buckets_(0u),
elements_until_expand_(0u),
owns_data_(false),
data_(nullptr),
- min_load_factor_(kDefaultMinLoadFactor),
- max_load_factor_(kDefaultMaxLoadFactor) {
+ min_load_factor_(min_load_factor),
+ max_load_factor_(max_load_factor) {
+ DCHECK_GT(min_load_factor, 0.0);
+ DCHECK_LT(max_load_factor, 1.0);
}
explicit HashSet(const allocator_type& alloc)
@@ -459,6 +463,31 @@
return errors;
}
+ double GetMinLoadFactor() const {
+ return min_load_factor_;
+ }
+
+ double GetMaxLoadFactor() const {
+ return max_load_factor_;
+ }
+
+ // Change the load factor of the hash set. If the current load factor is greater than the max
+ // specified, then we resize the hash table storage.
+ void SetLoadFactor(double min_load_factor, double max_load_factor) {
+ DCHECK_LT(min_load_factor, max_load_factor);
+ DCHECK_GT(min_load_factor, 0.0);
+ DCHECK_LT(max_load_factor, 1.0);
+ min_load_factor_ = min_load_factor;
+ max_load_factor_ = max_load_factor;
+ elements_until_expand_ = NumBuckets() * max_load_factor_;
+ // If the current load factor isn't in the range, then resize to the mean of the minimum and
+ // maximum load factor.
+ const double load_factor = CalculateLoadFactor();
+ if (load_factor > max_load_factor_) {
+ Resize(Size() / ((min_load_factor_ + max_load_factor_) * 0.5));
+ }
+ }
+
private:
T& ElementForIndex(size_t index) {
DCHECK_LT(index, NumBuckets());
diff --git a/runtime/base/hash_set_test.cc b/runtime/base/hash_set_test.cc
index 6d2c5e0..743e98e 100644
--- a/runtime/base/hash_set_test.cc
+++ b/runtime/base/hash_set_test.cc
@@ -196,6 +196,24 @@
}
}
+TEST_F(HashSetTest, TestLoadFactor) {
+ HashSet<std::string, IsEmptyFnString> hash_set;
+ static constexpr size_t kStringCount = 1000;
+ static constexpr double kEpsilon = 0.01;
+ for (size_t i = 0; i < kStringCount; ++i) {
+ hash_set.Insert(RandomString(i % 10 + 1));
+ }
+ // Check that changing the load factor resizes the table to be within the target range.
+ EXPECT_GE(hash_set.CalculateLoadFactor() + kEpsilon, hash_set.GetMinLoadFactor());
+ EXPECT_LE(hash_set.CalculateLoadFactor() - kEpsilon, hash_set.GetMaxLoadFactor());
+ hash_set.SetLoadFactor(0.1, 0.3);
+ EXPECT_DOUBLE_EQ(0.1, hash_set.GetMinLoadFactor());
+ EXPECT_DOUBLE_EQ(0.3, hash_set.GetMaxLoadFactor());
+ EXPECT_LE(hash_set.CalculateLoadFactor() - kEpsilon, hash_set.GetMaxLoadFactor());
+ hash_set.SetLoadFactor(0.6, 0.8);
+ EXPECT_LE(hash_set.CalculateLoadFactor() - kEpsilon, hash_set.GetMaxLoadFactor());
+}
+
TEST_F(HashSetTest, TestStress) {
HashSet<std::string, IsEmptyFnString> hash_set;
std::unordered_multiset<std::string> std_set;
diff --git a/runtime/base/scoped_arena_allocator.h b/runtime/base/scoped_arena_allocator.h
index 4f51370..2554fb0 100644
--- a/runtime/base/scoped_arena_allocator.h
+++ b/runtime/base/scoped_arena_allocator.h
@@ -132,7 +132,7 @@
ScopedArenaAllocatorAdapter<void> Adapter(ArenaAllocKind kind = kArenaAllocSTL);
// Allow a delete-expression to destroy but not deallocate allocators created by Create().
- static void operator delete(void* ptr) { UNUSED(ptr); }
+ static void operator delete(void* ptr ATTRIBUTE_UNUSED) {}
private:
ArenaStack* const arena_stack_;
diff --git a/runtime/base/scoped_arena_containers.h b/runtime/base/scoped_arena_containers.h
index 7c64449..562c2bf 100644
--- a/runtime/base/scoped_arena_containers.h
+++ b/runtime/base/scoped_arena_containers.h
@@ -146,8 +146,8 @@
pointer address(reference x) const { return &x; }
const_pointer address(const_reference x) const { return &x; }
- pointer allocate(size_type n, ScopedArenaAllocatorAdapter<void>::pointer hint = nullptr) {
- UNUSED(hint);
+ pointer allocate(size_type n,
+ ScopedArenaAllocatorAdapter<void>::pointer hint ATTRIBUTE_UNUSED = nullptr) {
DCHECK_LE(n, max_size());
DebugStackIndirectTopRef::CheckTop();
return reinterpret_cast<T*>(arena_stack_->Alloc(n * sizeof(T),
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 4ce52f1..73fd091 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1208,18 +1208,13 @@
uint8_t* raw_arrays = nullptr;
if (dex_file.GetOatDexFile() != nullptr &&
dex_file.GetOatDexFile()->GetDexCacheArrays() != nullptr) {
- raw_arrays = const_cast<uint8_t*>(dex_file.GetOatDexFile()->GetDexCacheArrays());
+ raw_arrays = dex_file.GetOatDexFile()->GetDexCacheArrays();
} else if (dex_file.NumStringIds() != 0u || dex_file.NumTypeIds() != 0u ||
dex_file.NumMethodIds() != 0u || dex_file.NumFieldIds() != 0u) {
// NOTE: We "leak" the raw_arrays because we never destroy the dex cache.
DCHECK(image_pointer_size_ == 4u || image_pointer_size_ == 8u);
- // When cross-compiling for a 32-bit target on a 64-bit host, we need these arrays
- // in the low 4GiB address space so that we can store pointers in 32-bit fields.
- // This is conveniently provided by the linear allocator.
- raw_arrays = reinterpret_cast<uint8_t*>(
- (sizeof(void*) == 8u && image_pointer_size_ == 4u)
- ? Runtime::Current()->GetLinearAlloc()->Alloc(self, layout.Size()) // Zero-initialized.
- : linear_alloc->Alloc(self, layout.Size())); // Zero-initialized.
+ // Zero-initialized.
+ raw_arrays = reinterpret_cast<uint8_t*>(linear_alloc->Alloc(self, layout.Size()));
}
GcRoot<mirror::String>* strings = (dex_file.NumStringIds() == 0u) ? nullptr :
reinterpret_cast<GcRoot<mirror::String>*>(raw_arrays + layout.StringsOffset());
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index a70967d4..fd30a46 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -529,6 +529,8 @@
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!Locks::classlinker_classes_lock_);
+ // Unlike GetOrCreateAllocatorForClassLoader, GetAllocatorForClassLoader asserts that the
+ // allocator for this class loader is already created.
static LinearAlloc* GetAllocatorForClassLoader(mirror::ClassLoader* class_loader)
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
index fc8e6c4..4b0cbc8 100644
--- a/runtime/class_table.cc
+++ b/runtime/class_table.cc
@@ -21,7 +21,9 @@
namespace art {
ClassTable::ClassTable() {
- classes_.push_back(ClassSet());
+ Runtime* const runtime = Runtime::Current();
+ classes_.push_back(ClassSet(runtime->GetHashTableMinLoadFactor(),
+ runtime->GetHashTableMaxLoadFactor()));
}
void ClassTable::FreezeSnapshot() {
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index a4f95b6..b17b76e 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -1903,8 +1903,7 @@
JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string* name) {
ScopedObjectAccessUnchecked soa(Thread::Current());
JDWP::JdwpError error;
- Thread* thread = DecodeThread(soa, thread_id, &error);
- UNUSED(thread);
+ DecodeThread(soa, thread_id, &error);
if (error != JDWP::ERR_NONE && error != JDWP::ERR_THREAD_NOT_ALIVE) {
return error;
}
@@ -1931,8 +1930,7 @@
}
ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroup");
// Okay, so it's an object, but is it actually a thread?
- Thread* thread = DecodeThread(soa, thread_id, &error);
- UNUSED(thread);
+ DecodeThread(soa, thread_id, &error);
if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
// Zombie threads are in the null group.
expandBufAddObjectId(pReply, JDWP::ObjectId(0));
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index 28c62a8..4e4f851 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -56,9 +56,8 @@
return AllocObjectFromCode<false, instrumented_bool>(type_idx, method, self, allocator_type); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \
- mirror::Class* klass, ArtMethod* method, Thread* self) \
+ mirror::Class* klass, ArtMethod* method ATTRIBUTE_UNUSED, Thread* self) \
SHARED_REQUIRES(Locks::mutator_lock_) { \
- UNUSED(method); \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
if (LIKELY(klass->IsInitialized())) { \
@@ -83,9 +82,8 @@
return AllocObjectFromCodeResolved<instrumented_bool>(klass, self, allocator_type); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \
- mirror::Class* klass, ArtMethod* method, Thread* self) \
+ mirror::Class* klass, ArtMethod* method ATTRIBUTE_UNUSED, Thread* self) \
SHARED_REQUIRES(Locks::mutator_lock_) { \
- UNUSED(method); \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
size_t byte_count = klass->GetObjectSize(); \
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 377675e..6035dfe 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -1524,9 +1524,9 @@
return sp8;
}
- virtual void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm)
+ virtual void WalkHeader(
+ BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm ATTRIBUTE_UNUSED)
SHARED_REQUIRES(Locks::mutator_lock_) {
- UNUSED(sm);
}
void Walk(const char* shorty, uint32_t shorty_len) SHARED_REQUIRES(Locks::mutator_lock_) {
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 30a0983..5b31b3a 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -406,9 +406,8 @@
manager_->AddHandler(this, false);
}
-bool JavaStackTraceHandler::Action(int sig, siginfo_t* siginfo, void* context) {
+bool JavaStackTraceHandler::Action(int sig ATTRIBUTE_UNUSED, siginfo_t* siginfo, void* context) {
// Make sure that we are in the generated code, but we may not have a dex pc.
- UNUSED(sig);
#ifdef TEST_NESTED_SIGNAL
bool in_generated_code = true;
#else
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index 277d319..eb0852a 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -41,8 +41,7 @@
explicit RememberedSetCardVisitor(RememberedSet::CardSet* const dirty_cards)
: dirty_cards_(dirty_cards) {}
- void operator()(uint8_t* card, uint8_t expected_value, uint8_t new_value) const {
- UNUSED(new_value);
+ void operator()(uint8_t* card, uint8_t expected_value, uint8_t new_value ATTRIBUTE_UNUSED) const {
if (expected_value == CardTable::kCardDirty) {
dirty_cards_->insert(card);
}
diff --git a/runtime/gc/allocator/dlmalloc.cc b/runtime/gc/allocator/dlmalloc.cc
index 3d85395..e747f00 100644
--- a/runtime/gc/allocator/dlmalloc.cc
+++ b/runtime/gc/allocator/dlmalloc.cc
@@ -77,7 +77,8 @@
}
extern "C" void DlmallocBytesAllocatedCallback(void* start ATTRIBUTE_UNUSED,
- void* end ATTRIBUTE_UNUSED, size_t used_bytes,
+ void* end ATTRIBUTE_UNUSED,
+ size_t used_bytes,
void* arg) {
if (used_bytes == 0) {
return;
@@ -86,10 +87,10 @@
*bytes_allocated += used_bytes + sizeof(size_t);
}
-extern "C" void DlmallocObjectsAllocatedCallback(void* start, void* end, size_t used_bytes,
+extern "C" void DlmallocObjectsAllocatedCallback(void* start ATTRIBUTE_UNUSED,
+ void* end ATTRIBUTE_UNUSED,
+ size_t used_bytes,
void* arg) {
- UNUSED(start);
- UNUSED(end);
if (used_bytes == 0) {
return;
}
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 87f1392..3ce3d63 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -131,6 +131,7 @@
private:
Slot* next_; // Next slot in the list.
+ friend class RosAlloc;
};
// We use the tail (kUseTail == true) for the bulk or thread-local free lists to avoid the need to
@@ -302,6 +303,7 @@
// free without traversing the whole free list.
uint32_t size_;
uint32_t padding_ ATTRIBUTE_UNUSED;
+ friend class RosAlloc;
};
// Represents a run of memory slots of the same size.
@@ -482,7 +484,7 @@
static constexpr uint8_t kMagicNumFree = 43;
// The number of size brackets. Sync this with the length of Thread::rosalloc_runs_.
static constexpr size_t kNumOfSizeBrackets = kNumRosAllocThreadLocalSizeBrackets;
- // The number of smaller size brackets that are 16 bytes apart.
+ // The number of smaller size brackets that are the quantum size apart.
static constexpr size_t kNumOfQuantumSizeBrackets = 32;
// The sizes (the slot sizes, in bytes) of the size brackets.
static size_t bracketSizes[kNumOfSizeBrackets];
@@ -520,9 +522,7 @@
}
// Returns true if the given allocation size is for a thread local allocation.
static bool IsSizeForThreadLocal(size_t size) {
- DCHECK_GT(kNumThreadLocalSizeBrackets, 0U);
- size_t max_thread_local_bracket_idx = kNumThreadLocalSizeBrackets - 1;
- bool is_size_for_thread_local = size <= bracketSizes[max_thread_local_bracket_idx];
+ bool is_size_for_thread_local = size <= kMaxThreadLocalBracketSize;
DCHECK(size > kLargeSizeThreshold ||
(is_size_for_thread_local == (SizeToIndex(size) < kNumThreadLocalSizeBrackets)));
return is_size_for_thread_local;
@@ -634,6 +634,16 @@
// are less than this index. We use shared (current) runs for the rest.
static const size_t kNumThreadLocalSizeBrackets = 8;
+ // The size of the largest bracket we use thread-local runs for.
+ // This should be equal to bracketSizes[kNumThreadLocalSizeBrackets - 1].
+ static const size_t kMaxThreadLocalBracketSize = 128;
+
+ // The bracket size increment for the brackets of size <= 512 bytes.
+ static constexpr size_t kBracketQuantumSize = 16;
+
+ // Equal to Log2(kQuantumBracketSizeIncrement).
+ static constexpr size_t kBracketQuantumSizeShift = 4;
+
private:
// The base address of the memory region that's managed by this allocator.
uint8_t* base_;
@@ -770,6 +780,19 @@
size_t page_release_size_threshold = kDefaultPageReleaseSizeThreshold);
~RosAlloc();
+ static size_t RunFreeListOffset() {
+ return OFFSETOF_MEMBER(Run, free_list_);
+ }
+ static size_t RunFreeListHeadOffset() {
+ return OFFSETOF_MEMBER(SlotFreeList<false>, head_);
+ }
+ static size_t RunFreeListSizeOffset() {
+ return OFFSETOF_MEMBER(SlotFreeList<false>, size_);
+ }
+ static size_t RunSlotNextOffset() {
+ return OFFSETOF_MEMBER(Slot, next_);
+ }
+
// If kThreadUnsafe is true then the allocator may avoid acquiring some locks as an optimization.
// If used, this may cause race conditions if multiple threads are allocating at the same time.
template<bool kThreadSafe = true>
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index 6c32658..bb7e854 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -56,8 +56,7 @@
RecursiveMarkDirtyObjects(false, accounting::CardTable::kCardDirty - 1);
}
-void StickyMarkSweep::Sweep(bool swap_bitmaps) {
- UNUSED(swap_bitmaps);
+void StickyMarkSweep::Sweep(bool swap_bitmaps ATTRIBUTE_UNUSED) {
SweepArray(GetHeap()->GetLiveStack(), false);
}
diff --git a/runtime/gc/space/memory_tool_malloc_space.h b/runtime/gc/space/memory_tool_malloc_space.h
index a5dbad9..c081011 100644
--- a/runtime/gc/space/memory_tool_malloc_space.h
+++ b/runtime/gc/space/memory_tool_malloc_space.h
@@ -48,9 +48,7 @@
size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
SHARED_REQUIRES(Locks::mutator_lock_);
- void RegisterRecentFree(mirror::Object* ptr) OVERRIDE {
- UNUSED(ptr);
- }
+ void RegisterRecentFree(mirror::Object* ptr ATTRIBUTE_UNUSED) OVERRIDE {}
size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE;
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index 9e882a8..bbfcb31 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -31,8 +31,7 @@
explicit CountObjectsAllocated(size_t* objects_allocated)
: objects_allocated_(objects_allocated) {}
- void operator()(mirror::Object* obj) const {
- UNUSED(obj);
+ void operator()(mirror::Object* obj ATTRIBUTE_UNUSED) const {
++*objects_allocated_;
}
diff --git a/runtime/gc/task_processor_test.cc b/runtime/gc/task_processor_test.cc
index 2c44da2..f1d26d9 100644
--- a/runtime/gc/task_processor_test.cc
+++ b/runtime/gc/task_processor_test.cc
@@ -105,8 +105,7 @@
TestOrderTask(uint64_t expected_time, size_t expected_counter, size_t* counter)
: HeapTask(expected_time), expected_counter_(expected_counter), counter_(counter) {
}
- virtual void Run(Thread* thread) OVERRIDE {
- UNUSED(thread); // Fix cppling bug.
+ virtual void Run(Thread* thread ATTRIBUTE_UNUSED) OVERRIDE {
ASSERT_EQ(*counter_, expected_counter_);
++*counter_;
}
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 179353e..f4658d5 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -459,4 +459,12 @@
}
}
+InternTable::Table::Table() {
+ Runtime* const runtime = Runtime::Current();
+ pre_zygote_table_.SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
+ runtime->GetHashTableMaxLoadFactor());
+ post_zygote_table_.SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
+ runtime->GetHashTableMaxLoadFactor());
+}
+
} // namespace art
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 24c5af9..3a4e8d8 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -146,6 +146,7 @@
// weak interns and strong interns.
class Table {
public:
+ Table();
mirror::String* Find(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(Locks::intern_table_lock_);
void Insert(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_)
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 44eb29e..18fb0d8 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -40,8 +40,9 @@
uint16_t inst_data) {
const bool is_static = (find_type == StaticObjectRead) || (find_type == StaticPrimitiveRead);
const uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
- ArtField* f = FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self,
- Primitive::ComponentSize(field_type));
+ ArtField* f =
+ FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self,
+ Primitive::ComponentSize(field_type));
if (UNLIKELY(f == nullptr)) {
CHECK(self->IsExceptionPending());
return false;
@@ -234,8 +235,9 @@
bool do_assignability_check = do_access_check;
bool is_static = (find_type == StaticObjectWrite) || (find_type == StaticPrimitiveWrite);
uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
- ArtField* f = FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self,
- Primitive::ComponentSize(field_type));
+ ArtField* f =
+ FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self,
+ Primitive::ComponentSize(field_type));
if (UNLIKELY(f == nullptr)) {
CHECK(self->IsExceptionPending());
return false;
@@ -775,7 +777,7 @@
template<bool is_range, bool do_assignability_check>
bool DoLambdaCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
- const Instruction* inst, uint16_t inst_data, JValue* result) {
+ const Instruction* inst, uint16_t inst_data ATTRIBUTE_UNUSED, JValue* result) {
const uint4_t num_additional_registers = inst->VRegB_25x();
// Argument word count.
const uint16_t number_of_inputs = num_additional_registers + kLambdaVirtualRegisterWidth;
@@ -790,7 +792,6 @@
vregC = inst->VRegC_3rc();
} else {
// TODO(iam): See if it's possible to remove inst_data dependency from 35x to avoid this path
- UNUSED(inst_data);
inst->GetAllArgs25x(arg);
}
@@ -806,7 +807,8 @@
bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst, uint16_t inst_data, JValue* result) {
// Argument word count.
- const uint16_t number_of_inputs = (is_range) ? inst->VRegA_3rc(inst_data) : inst->VRegA_35c(inst_data);
+ const uint16_t number_of_inputs =
+ (is_range) ? inst->VRegA_3rc(inst_data) : inst->VRegA_35c(inst_data);
// TODO: find a cleaner way to separate non-range and range information without duplicating
// code.
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 0a4d6e3..5427a58 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -1072,9 +1072,8 @@
return WriteTaggedObject(reply, contended_monitor);
}
-static JdwpError TR_Interrupt(JdwpState*, Request* request, ExpandBuf* reply)
+static JdwpError TR_Interrupt(JdwpState*, Request* request, ExpandBuf* reply ATTRIBUTE_UNUSED)
SHARED_REQUIRES(Locks::mutator_lock_) {
- UNUSED(reply);
ObjectId thread_id = request->ReadThreadId();
return Dbg::Interrupt(thread_id);
}
diff --git a/runtime/jni_env_ext.cc b/runtime/jni_env_ext.cc
index 4104d7a..dab1040 100644
--- a/runtime/jni_env_ext.cc
+++ b/runtime/jni_env_ext.cc
@@ -93,8 +93,7 @@
monitors.Dump(os);
}
-void JNIEnvExt::PushFrame(int capacity) {
- UNUSED(capacity); // cpplint gets confused with (int) and thinks its a cast.
+void JNIEnvExt::PushFrame(int capacity ATTRIBUTE_UNUSED) {
// TODO: take 'capacity' into account.
stacked_local_ref_cookies.push_back(local_ref_cookie);
local_ref_cookie = locals.GetSegmentState();
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 6bc1829..234a733 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -1743,8 +1743,9 @@
return static_cast<jchar*>(s->GetValue());
}
- static void ReleaseStringCritical(JNIEnv* env, jstring java_string, const jchar* chars) {
- UNUSED(chars);
+ static void ReleaseStringCritical(JNIEnv* env,
+ jstring java_string,
+ const jchar* chars ATTRIBUTE_UNUSED) {
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(java_string);
ScopedObjectAccess soa(env);
gc::Heap* heap = Runtime::Current()->GetHeap();
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 3d54029..ec7d758 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -100,9 +100,8 @@
explicit SetLengthVisitor(int32_t length) : length_(length) {
}
- void operator()(Object* obj, size_t usable_size) const
+ void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
SHARED_REQUIRES(Locks::mutator_lock_) {
- UNUSED(usable_size);
// Avoid AsArray as object is not yet in live bitmap or allocation stack.
Array* array = down_cast<Array*>(obj);
// DCHECK(array->IsArrayInstance());
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index d931777..a162a4e 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -18,6 +18,7 @@
#include <dlfcn.h>
#include <string.h>
+#include <type_traits>
#include <unistd.h>
#include <cstdlib>
@@ -248,10 +249,7 @@
#ifdef __APPLE__
// The dl_iterate_phdr syscall is missing. There is similar API on OSX,
// but let's fallback to the custom loading code for the time being.
- UNUSED(elf_filename);
- UNUSED(requested_base);
- UNUSED(abs_dex_location);
- UNUSED(error_msg);
+ UNUSED(elf_filename, requested_base, abs_dex_location, error_msg);
return false;
#else
{
@@ -391,13 +389,13 @@
// Readjust to be non-inclusive upper bound.
end_ += sizeof(uint32_t);
- bss_begin_ = elf_file_->FindDynamicSymbolAddress("oatbss");
+ bss_begin_ = const_cast<uint8_t*>(elf_file_->FindDynamicSymbolAddress("oatbss"));
if (bss_begin_ == nullptr) {
// No .bss section. Clear dlerror().
bss_end_ = nullptr;
dlerror();
} else {
- bss_end_ = elf_file_->FindDynamicSymbolAddress("oatbsslastword");
+ bss_end_ = const_cast<uint8_t*>(elf_file_->FindDynamicSymbolAddress("oatbsslastword"));
if (bss_end_ == nullptr) {
*error_msg = StringPrintf("Failed to find oatbasslastword symbol in '%s'",
file->GetPath().c_str());
@@ -410,10 +408,31 @@
return Setup(abs_dex_location, error_msg);
}
+// Read an unaligned entry from the OatDexFile data in OatFile and advance the read
+// position by the number of bytes read, i.e. sizeof(T).
+// Return true on success, false if the read would go beyond the end of the OatFile.
+template <typename T>
+inline static bool ReadOatDexFileData(const OatFile& oat_file,
+ /*inout*/const uint8_t** oat,
+ /*out*/T* value) {
+ DCHECK(oat != nullptr);
+ DCHECK(value != nullptr);
+ DCHECK_LE(*oat, oat_file.End());
+ if (UNLIKELY(static_cast<size_t>(oat_file.End() - *oat) < sizeof(T))) {
+ return false;
+ }
+ static_assert(std::is_trivial<T>::value, "T must be a trivial type");
+ typedef __attribute__((__aligned__(1))) T unaligned_type;
+ *value = *reinterpret_cast<const unaligned_type*>(*oat);
+ *oat += sizeof(T);
+ return true;
+}
+
bool OatFile::Setup(const char* abs_dex_location, std::string* error_msg) {
if (!GetOatHeader().IsValid()) {
std::string cause = GetOatHeader().GetValidationErrorMessage();
- *error_msg = StringPrintf("Invalid oat header for '%s': %s", GetLocation().c_str(),
+ *error_msg = StringPrintf("Invalid oat header for '%s': %s",
+ GetLocation().c_str(),
cause.c_str());
return false;
}
@@ -427,35 +446,42 @@
oat += GetOatHeader().GetKeyValueStoreSize();
if (oat > End()) {
*error_msg = StringPrintf("In oat file '%s' found truncated variable-size data: "
- "%p + %zd + %ud <= %p", GetLocation().c_str(),
- Begin(), sizeof(OatHeader), GetOatHeader().GetKeyValueStoreSize(),
+ "%p + %zu + %u <= %p",
+ GetLocation().c_str(),
+ Begin(),
+ sizeof(OatHeader),
+ GetOatHeader().GetKeyValueStoreSize(),
End());
return false;
}
size_t pointer_size = GetInstructionSetPointerSize(GetOatHeader().GetInstructionSet());
- const uint8_t* dex_cache_arrays = bss_begin_;
+ uint8_t* dex_cache_arrays = bss_begin_;
uint32_t dex_file_count = GetOatHeader().GetDexFileCount();
oat_dex_files_storage_.reserve(dex_file_count);
for (size_t i = 0; i < dex_file_count; i++) {
- uint32_t dex_file_location_size = *reinterpret_cast<const uint32_t*>(oat);
- if (UNLIKELY(dex_file_location_size == 0U)) {
- *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd with empty location name",
- GetLocation().c_str(), i);
+ uint32_t dex_file_location_size;
+ if (UNLIKELY(!ReadOatDexFileData(*this, &oat, &dex_file_location_size))) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu truncated after dex file "
+ "location size",
+ GetLocation().c_str(),
+ i);
return false;
}
- oat += sizeof(dex_file_location_size);
- if (UNLIKELY(oat > End())) {
- *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd truncated after dex file "
- "location size", GetLocation().c_str(), i);
+ if (UNLIKELY(dex_file_location_size == 0U)) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu with empty location name",
+ GetLocation().c_str(),
+ i);
return false;
}
const char* dex_file_location_data = reinterpret_cast<const char*>(oat);
oat += dex_file_location_size;
if (UNLIKELY(oat > End())) {
- *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd with truncated dex file "
- "location", GetLocation().c_str(), i);
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu with truncated dex file "
+ "location",
+ GetLocation().c_str(),
+ i);
return false;
}
@@ -463,46 +489,61 @@
abs_dex_location,
std::string(dex_file_location_data, dex_file_location_size));
- uint32_t dex_file_checksum = *reinterpret_cast<const uint32_t*>(oat);
- oat += sizeof(dex_file_checksum);
- if (UNLIKELY(oat > End())) {
- *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' truncated after "
- "dex file checksum", GetLocation().c_str(), i,
+ uint32_t dex_file_checksum;
+ if (UNLIKELY(!ReadOatDexFileData(*this, &oat, &dex_file_checksum))) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' truncated after "
+ "dex file checksum",
+ GetLocation().c_str(),
+ i,
dex_file_location.c_str());
return false;
}
- uint32_t dex_file_offset = *reinterpret_cast<const uint32_t*>(oat);
+ uint32_t dex_file_offset;
+ if (UNLIKELY(!ReadOatDexFileData(*this, &oat, &dex_file_offset))) {
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' truncated "
+ "after dex file offsets",
+ GetLocation().c_str(),
+ i,
+ dex_file_location.c_str());
+ return false;
+ }
if (UNLIKELY(dex_file_offset == 0U)) {
- *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' with zero dex "
- "file offset", GetLocation().c_str(), i, dex_file_location.c_str());
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' with zero dex "
+ "file offset",
+ GetLocation().c_str(),
+ i,
+ dex_file_location.c_str());
return false;
}
if (UNLIKELY(dex_file_offset > Size())) {
- *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' with dex file "
- "offset %ud > %zd", GetLocation().c_str(), i,
- dex_file_location.c_str(), dex_file_offset, Size());
- return false;
- }
- oat += sizeof(dex_file_offset);
- if (UNLIKELY(oat > End())) {
- *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' truncated "
- "after dex file offsets", GetLocation().c_str(), i,
- dex_file_location.c_str());
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' with dex file "
+ "offset %u > %zu",
+ GetLocation().c_str(),
+ i,
+ dex_file_location.c_str(),
+ dex_file_offset,
+ Size());
return false;
}
const uint8_t* dex_file_pointer = Begin() + dex_file_offset;
if (UNLIKELY(!DexFile::IsMagicValid(dex_file_pointer))) {
- *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' with invalid "
- "dex file magic '%s'", GetLocation().c_str(), i,
- dex_file_location.c_str(), dex_file_pointer);
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' with invalid "
+ "dex file magic '%s'",
+ GetLocation().c_str(),
+ i,
+ dex_file_location.c_str(),
+ dex_file_pointer);
return false;
}
if (UNLIKELY(!DexFile::IsVersionValid(dex_file_pointer))) {
- *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' with invalid "
- "dex file version '%s'", GetLocation().c_str(), i,
- dex_file_location.c_str(), dex_file_pointer);
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' with invalid "
+ "dex file version '%s'",
+ GetLocation().c_str(),
+ i,
+ dex_file_location.c_str(),
+ dex_file_pointer);
return false;
}
const DexFile::Header* header = reinterpret_cast<const DexFile::Header*>(dex_file_pointer);
@@ -510,21 +551,26 @@
oat += (sizeof(*methods_offsets_pointer) * header->class_defs_size_);
if (UNLIKELY(oat > End())) {
- *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' with truncated "
- "method offsets", GetLocation().c_str(), i,
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' with truncated "
+ "method offsets",
+ GetLocation().c_str(),
+ i,
dex_file_location.c_str());
return false;
}
- const uint8_t* current_dex_cache_arrays = nullptr;
+ uint8_t* current_dex_cache_arrays = nullptr;
if (dex_cache_arrays != nullptr) {
DexCacheArraysLayout layout(pointer_size, *header);
if (layout.Size() != 0u) {
if (static_cast<size_t>(bss_end_ - dex_cache_arrays) < layout.Size()) {
- *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zd for '%s' with "
- "truncated dex cache arrays, %zd < %zd.",
- GetLocation().c_str(), i, dex_file_location.c_str(),
- static_cast<size_t>(bss_end_ - dex_cache_arrays), layout.Size());
+ *error_msg = StringPrintf("In oat file '%s' found OatDexFile #%zu for '%s' with "
+ "truncated dex cache arrays, %zu < %zu.",
+ GetLocation().c_str(),
+ i,
+ dex_file_location.c_str(),
+ static_cast<size_t>(bss_end_ - dex_cache_arrays),
+ layout.Size());
return false;
}
current_dex_cache_arrays = dex_cache_arrays;
@@ -556,7 +602,7 @@
if (dex_cache_arrays != bss_end_) {
// We expect the bss section to be either empty (dex_cache_arrays and bss_end_
// both null) or contain just the dex cache arrays and nothing else.
- *error_msg = StringPrintf("In oat file '%s' found unexpected bss size bigger by %zd bytes.",
+ *error_msg = StringPrintf("In oat file '%s' found unexpected bss size bigger by %zu bytes.",
GetLocation().c_str(),
static_cast<size_t>(bss_end_ - dex_cache_arrays));
return false;
@@ -664,7 +710,7 @@
uint32_t dex_file_location_checksum,
const uint8_t* dex_file_pointer,
const uint32_t* oat_class_offsets_pointer,
- const uint8_t* dex_cache_arrays)
+ uint8_t* dex_cache_arrays)
: oat_file_(oat_file),
dex_file_location_(dex_file_location),
canonical_dex_file_location_(canonical_dex_file_location),
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 34f0141..6acdf86 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -301,10 +301,10 @@
const uint8_t* end_;
// Pointer to the .bss section, if present, otherwise null.
- const uint8_t* bss_begin_;
+ uint8_t* bss_begin_;
// Pointer to the end of the .bss section, if present, otherwise null.
- const uint8_t* bss_end_;
+ uint8_t* bss_end_;
// Was this oat_file loaded executable?
const bool is_executable_;
@@ -396,7 +396,7 @@
// Returns the offset to the OatClass information. Most callers should use GetOatClass.
uint32_t GetOatClassOffset(uint16_t class_def_index) const;
- const uint8_t* GetDexCacheArrays() const {
+ uint8_t* GetDexCacheArrays() const {
return dex_cache_arrays_;
}
@@ -409,7 +409,7 @@
uint32_t dex_file_checksum,
const uint8_t* dex_file_pointer,
const uint32_t* oat_class_offsets_pointer,
- const uint8_t* dex_cache_arrays);
+ uint8_t* dex_cache_arrays);
const OatFile* const oat_file_;
const std::string dex_file_location_;
@@ -417,7 +417,7 @@
const uint32_t dex_file_location_checksum_;
const uint8_t* const dex_file_pointer_;
const uint32_t* const oat_class_offsets_pointer_;
- const uint8_t* const dex_cache_arrays_;
+ uint8_t* const dex_cache_arrays_;
friend class OatFile;
DISALLOW_COPY_AND_ASSIGN(OatDexFile);
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index cef8702..2c81edd 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -954,9 +954,7 @@
loaded_oat_file_(nullptr)
{}
- void Run(Thread* self) {
- UNUSED(self);
-
+ void Run(Thread* self ATTRIBUTE_UNUSED) {
// Load the dex files, and save a pointer to the loaded oat file, so that
// we can verify only one oat file was loaded for the dex location.
std::vector<std::unique_ptr<const DexFile>> dex_files;
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index cd09bee..6c459a3 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -140,6 +140,12 @@
// If a signal isn't handled properly, enable a handler that attempts to dump the Java stack.
static constexpr bool kEnableJavaStackTraceHandler = false;
+// Tuned by compiling GmsCore under perf and measuring time spent in DescriptorEquals for class
+// linking.
+static constexpr double kLowMemoryMinLoadFactor = 0.5;
+static constexpr double kLowMemoryMaxLoadFactor = 0.8;
+static constexpr double kNormalMinLoadFactor = 0.4;
+static constexpr double kNormalMaxLoadFactor = 0.7;
Runtime* Runtime::instance_ = nullptr;
struct TraceConfig {
@@ -200,7 +206,9 @@
no_sig_chain_(false),
is_native_bridge_loaded_(false),
zygote_max_failed_boots_(0),
- experimental_flags_(ExperimentalFlags::kNone) {
+ experimental_flags_(ExperimentalFlags::kNone),
+ oat_file_manager_(nullptr),
+ is_low_memory_mode_(false) {
CheckAsmSupportOffsetsAndSizes();
std::fill(callee_save_methods_, callee_save_methods_ + arraysize(callee_save_methods_), 0u);
}
@@ -886,6 +894,7 @@
zygote_max_failed_boots_ = runtime_options.GetOrDefault(Opt::ZygoteMaxFailedBoots);
experimental_flags_ = runtime_options.GetOrDefault(Opt::Experimental);
+ is_low_memory_mode_ = runtime_options.Exists(Opt::LowMemoryMode);
XGcOption xgc_option = runtime_options.GetOrDefault(Opt::GcOption);
ATRACE_BEGIN("CreateHeap");
@@ -1804,4 +1813,12 @@
: new LinearAlloc(arena_pool_.get());
}
+double Runtime::GetHashTableMinLoadFactor() const {
+ return is_low_memory_mode_ ? kLowMemoryMinLoadFactor : kNormalMinLoadFactor;
+}
+
+double Runtime::GetHashTableMaxLoadFactor() const {
+ return is_low_memory_mode_ ? kLowMemoryMaxLoadFactor : kNormalMaxLoadFactor;
+}
+
} // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 458f08a..7b1fdb2 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -580,6 +580,9 @@
return *oat_file_manager_;
}
+ double GetHashTableMinLoadFactor() const;
+ double GetHashTableMaxLoadFactor() const;
+
private:
static void InitPlatformSignalHandlers();
@@ -780,6 +783,9 @@
// Oat file manager, keeps track of what oat files are open.
OatFileManager* oat_file_manager_;
+ // Whether or not we are on a low RAM device.
+ bool is_low_memory_mode_;
+
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
std::ostream& operator<<(std::ostream& os, const Runtime::CalleeSaveType& rhs);
diff --git a/runtime/thread.h b/runtime/thread.h
index 8cea10c..8f3461a 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -626,6 +626,24 @@
return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, thread_local_objects));
}
+ template<size_t pointer_size>
+ static ThreadOffset<pointer_size> RosAllocRunsOffset() {
+ return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
+ rosalloc_runs));
+ }
+
+ template<size_t pointer_size>
+ static ThreadOffset<pointer_size> ThreadLocalAllocStackTopOffset() {
+ return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
+ thread_local_alloc_stack_top));
+ }
+
+ template<size_t pointer_size>
+ static ThreadOffset<pointer_size> ThreadLocalAllocStackEndOffset() {
+ return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
+ thread_local_alloc_stack_end));
+ }
+
// Size of stack less any space reserved for stack overflow
size_t GetStackSize() const {
return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 745aa63..ab342aa 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -752,26 +752,31 @@
}
}
-void Trace::DexPcMoved(Thread* thread, mirror::Object* this_object,
- ArtMethod* method, uint32_t new_dex_pc) {
- UNUSED(thread, this_object, method, new_dex_pc);
+void Trace::DexPcMoved(Thread* thread ATTRIBUTE_UNUSED,
+ mirror::Object* this_object ATTRIBUTE_UNUSED,
+ ArtMethod* method,
+ uint32_t new_dex_pc) {
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected dex PC event in tracing " << PrettyMethod(method) << " " << new_dex_pc;
}
-void Trace::FieldRead(Thread* thread, mirror::Object* this_object,
- ArtMethod* method, uint32_t dex_pc, ArtField* field)
+void Trace::FieldRead(Thread* thread ATTRIBUTE_UNUSED,
+ mirror::Object* this_object ATTRIBUTE_UNUSED,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ ArtField* field ATTRIBUTE_UNUSED)
SHARED_REQUIRES(Locks::mutator_lock_) {
- UNUSED(thread, this_object, method, dex_pc, field);
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected field read event in tracing " << PrettyMethod(method) << " " << dex_pc;
}
-void Trace::FieldWritten(Thread* thread, mirror::Object* this_object,
- ArtMethod* method, uint32_t dex_pc, ArtField* field,
- const JValue& field_value)
+void Trace::FieldWritten(Thread* thread ATTRIBUTE_UNUSED,
+ mirror::Object* this_object ATTRIBUTE_UNUSED,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ ArtField* field ATTRIBUTE_UNUSED,
+ const JValue& field_value ATTRIBUTE_UNUSED)
SHARED_REQUIRES(Locks::mutator_lock_) {
- UNUSED(thread, this_object, method, dex_pc, field, field_value);
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected field write event in tracing " << PrettyMethod(method) << " " << dex_pc;
}
@@ -804,9 +809,9 @@
thread_clock_diff, wall_clock_diff);
}
-void Trace::ExceptionCaught(Thread* thread, mirror::Throwable* exception_object)
+void Trace::ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED,
+ mirror::Throwable* exception_object ATTRIBUTE_UNUSED)
SHARED_REQUIRES(Locks::mutator_lock_) {
- UNUSED(thread, exception_object);
LOG(ERROR) << "Unexpected exception caught event in tracing";
}
diff --git a/runtime/utils.h b/runtime/utils.h
index b67f273..457d43f 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -272,18 +272,15 @@
class VoidFunctor {
public:
template <typename A>
- inline void operator() (A a) const {
- UNUSED(a);
+ inline void operator() (A a ATTRIBUTE_UNUSED) const {
}
template <typename A, typename B>
- inline void operator() (A a, B b) const {
- UNUSED(a, b);
+ inline void operator() (A a ATTRIBUTE_UNUSED, B b ATTRIBUTE_UNUSED) const {
}
template <typename A, typename B, typename C>
- inline void operator() (A a, B b, C c) const {
- UNUSED(a, b, c);
+ inline void operator() (A a ATTRIBUTE_UNUSED, B b ATTRIBUTE_UNUSED, C c ATTRIBUTE_UNUSED) const {
}
};
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 02c93cf..f48b1e1 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -338,6 +338,8 @@
}
}
+static constexpr uint32_t kVirtualNullRegister = std::numeric_limits<uint32_t>::max();
+
void RegisterLine::PushMonitor(MethodVerifier* verifier, uint32_t reg_idx, int32_t insn_idx) {
const RegType& reg_type = GetRegisterType(verifier, reg_idx);
if (!reg_type.IsReferenceTypes()) {
@@ -352,6 +354,12 @@
}
} else {
if (SetRegToLockDepth(reg_idx, monitors_.size())) {
+ // Null literals can establish aliases that we can't easily track. As such, handle the zero
+ // case as the 2^32-1 register (which isn't available in dex bytecode).
+ if (reg_type.IsZero()) {
+ SetRegToLockDepth(kVirtualNullRegister, monitors_.size());
+ }
+
monitors_.push_back(insn_idx);
} else {
verifier->Fail(VERIFY_ERROR_LOCKING);
@@ -377,7 +385,19 @@
}
} else {
monitors_.pop_back();
- if (!IsSetLockDepth(reg_idx, monitors_.size())) {
+
+ bool success = IsSetLockDepth(reg_idx, monitors_.size());
+
+ if (!success && reg_type.IsZero()) {
+ // Null literals can establish aliases that we can't easily track. As such, handle the zero
+ // case as the 2^32-1 register (which isn't available in dex bytecode).
+ success = IsSetLockDepth(kVirtualNullRegister, monitors_.size());
+ if (success) {
+ reg_idx = kVirtualNullRegister;
+ }
+ }
+
+ if (!success) {
verifier->Fail(VERIFY_ERROR_LOCKING);
if (kDumpLockFailures) {
LOG(WARNING) << "monitor-exit not unlocking the top of the monitor stack while verifying "
@@ -385,7 +405,8 @@
*verifier->GetMethodReference().dex_file);
}
} else {
- // Record the register was unlocked
+ // Record the register was unlocked. This clears all aliases, thus it will also clear the
+ // null lock, if necessary.
ClearRegToLockDepth(reg_idx, monitors_.size());
}
}
diff --git a/test/079-phantom/src/Bitmap.java b/test/079-phantom/src/Bitmap.java
index 85eb3cc..ff43749 100644
--- a/test/079-phantom/src/Bitmap.java
+++ b/test/079-phantom/src/Bitmap.java
@@ -125,7 +125,6 @@
*/
class BitmapWatcher extends Thread {
ReferenceQueue<PhantomWrapper> mQueue;
- volatile boolean mQuit = false;
BitmapWatcher(ReferenceQueue<PhantomWrapper> queue) {
mQueue = queue;
@@ -133,7 +132,7 @@
}
public void run() {
- while (!mQuit) {
+ while (true) {
try {
PhantomWrapper ref = (PhantomWrapper) mQueue.remove();
//System.out.println("dequeued ref " + ref.mNativeData +
@@ -142,12 +141,12 @@
//ref.clear();
} catch (InterruptedException ie) {
System.out.println("intr");
+ break;
}
}
}
public void shutDown() {
- mQuit = true;
interrupt();
}
}
diff --git a/test/088-monitor-verification/smali/NullLocks.smali b/test/088-monitor-verification/smali/NullLocks.smali
new file mode 100644
index 0000000..8262f19
--- /dev/null
+++ b/test/088-monitor-verification/smali/NullLocks.smali
@@ -0,0 +1,28 @@
+.class public LNullLocks;
+
+.super Ljava/lang/Object;
+
+.method public static run(Z)V
+ .registers 3
+
+ invoke-static {}, LMain;->assertIsManaged()V
+
+ if-eqz v2, :Lfalse
+
+ const v0, 0 # Null.
+ monitor-enter v0
+ const v1, 0 # Another null. This should be detected as an alias, such that the exit
+ # will not fail verification.
+ monitor-exit v1
+
+ monitor-enter v0
+ monitor-exit v1
+
+ monitor-enter v1
+ monitor-exit v0
+
+:Lfalse
+
+ return-void
+
+.end method
diff --git a/test/088-monitor-verification/src/Main.java b/test/088-monitor-verification/src/Main.java
index d742b14..212c894 100644
--- a/test/088-monitor-verification/src/Main.java
+++ b/test/088-monitor-verification/src/Main.java
@@ -221,6 +221,8 @@
IllegalMonitorStateException.class);
runTest("UnbalancedJoin", new Object[] { new Object(), new Object() }, null);
runTest("UnbalancedStraight", new Object[] { new Object(), new Object() }, null);
+ runTest("NullLocks", new Object[] { false }, null);
+ runTest("NullLocks", new Object[] { true }, NullPointerException.class);
}
private static void runTest(String className, Object[] parameters, Class<?> excType) {
diff --git a/test/539-checker-arm64-encodable-immediates/expected.txt b/test/539-checker-arm64-encodable-immediates/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/539-checker-arm64-encodable-immediates/expected.txt
diff --git a/test/539-checker-arm64-encodable-immediates/info.txt b/test/539-checker-arm64-encodable-immediates/info.txt
new file mode 100644
index 0000000..efeef33
--- /dev/null
+++ b/test/539-checker-arm64-encodable-immediates/info.txt
@@ -0,0 +1,2 @@
+Basic tests that check the compiler recognizes when constant values can be
+encoded in the immediate field of instructions.
diff --git a/test/539-checker-arm64-encodable-immediates/src/Main.java b/test/539-checker-arm64-encodable-immediates/src/Main.java
new file mode 100644
index 0000000..7e3ff9f
--- /dev/null
+++ b/test/539-checker-arm64-encodable-immediates/src/Main.java
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+public class Main {
+
+ public static void assertLongEquals(long expected, long result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+
+ /**
+ * Test that the `-1` constant is not synthesized in a register and that we
+ * instead simply switch between `add` and `sub` instructions with the
+ * constant embedded.
+ * We need two uses (or more) of the constant because the compiler always
+ * delegates the immediate value handling to VIXL when there is only one use.
+ */
+
+ /// CHECK-START-ARM64: long Main.addM1(long) register (after)
+ /// CHECK: <<Arg:j\d+>> ParameterValue
+ /// CHECK: <<ConstM1:j\d+>> LongConstant -1
+ /// CHECK-NOT: ParallelMove
+ /// CHECK: Add [<<Arg>>,<<ConstM1>>]
+ /// CHECK: Sub [<<Arg>>,<<ConstM1>>]
+
+ /// CHECK-START-ARM64: long Main.addM1(long) disassembly (after)
+ /// CHECK: sub x{{\d+}}, x{{\d+}}, #0x1
+ /// CHECK: add x{{\d+}}, x{{\d+}}, #0x1
+
+ public static long addM1(long arg) {
+ return (arg + (-1)) | (arg - (-1));
+ }
+
+ public static void main(String[] args) {
+ assertLongEquals(14, addM1(7));
+ }
+}
diff --git a/test/540-checker-rtp-bug/expected.txt b/test/540-checker-rtp-bug/expected.txt
new file mode 100644
index 0000000..2cf2842
--- /dev/null
+++ b/test/540-checker-rtp-bug/expected.txt
@@ -0,0 +1 @@
+instanceof failed
diff --git a/test/540-checker-rtp-bug/info.txt b/test/540-checker-rtp-bug/info.txt
new file mode 100644
index 0000000..852cd7c
--- /dev/null
+++ b/test/540-checker-rtp-bug/info.txt
@@ -0,0 +1 @@
+Test that we set the proper types for objects (b/25008765).
diff --git a/test/540-checker-rtp-bug/src/Main.java b/test/540-checker-rtp-bug/src/Main.java
new file mode 100644
index 0000000..e9f16c0
--- /dev/null
+++ b/test/540-checker-rtp-bug/src/Main.java
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+final class Final {
+ public String toString() {
+ return "final";
+ }
+}
+
+public class Main {
+ /// CHECK-START: Final Main.testKeepCheckCast(java.lang.Object, boolean) reference_type_propagation (after)
+ /// CHECK: <<Phi:l\d+>> Phi klass:java.lang.Object
+ /// CHECK: <<Class:l\d+>> LoadClass
+ /// CHECK: CheckCast [<<Phi>>,<<Class>>]
+ /// CHECK: <<Ret:l\d+>> BoundType [<<Phi>>] klass:Final
+ /// CHECK: Return [<<Ret>>]
+
+ /// CHECK-START: Final Main.testKeepCheckCast(java.lang.Object, boolean) instruction_simplifier_after_types (after)
+ /// CHECK: <<Phi:l\d+>> Phi
+ /// CHECK: <<Class:l\d+>> LoadClass
+ /// CHECK: CheckCast [<<Phi>>,<<Class>>]
+ /// CHECK: <<Ret:l\d+>> BoundType [<<Phi>>]
+ /// CHECK: Return [<<Ret>>]
+ public static Final testKeepCheckCast(Object o, boolean cond) {
+ Object x = new Final();
+ while (cond) {
+ x = o;
+ cond = false;
+ }
+ return (Final) x;
+ }
+
+ /// CHECK-START: void Main.testKeepInstanceOf(java.lang.Object, boolean) reference_type_propagation (after)
+ /// CHECK: <<Phi:l\d+>> Phi klass:java.lang.Object
+ /// CHECK: <<Class:l\d+>> LoadClass
+ /// CHECK: InstanceOf [<<Phi>>,<<Class>>]
+
+ /// CHECK-START: void Main.testKeepInstanceOf(java.lang.Object, boolean) dead_code_elimination (after)
+ /// CHECK: <<Phi:l\d+>> Phi
+ /// CHECK: <<Class:l\d+>> LoadClass
+ /// CHECK: InstanceOf [<<Phi>>,<<Class>>]
+ public static void testKeepInstanceOf(Object o, boolean cond) {
+ Object x = new Final();
+ while (cond) {
+ x = o;
+ cond = false;
+ }
+ if (x instanceof Final) {
+ System.out.println("instanceof succeed");
+ } else {
+ System.out.println("instanceof failed");
+ }
+ }
+
+ /// CHECK-START: java.lang.String Main.testNoInline(java.lang.Object, boolean) reference_type_propagation (after)
+ /// CHECK: <<Phi:l\d+>> Phi klass:java.lang.Object
+ /// CHECK: <<NC:l\d+>> NullCheck [<<Phi>>]
+ /// CHECK: <<Ret:l\d+>> InvokeVirtual [<<NC>>] method_name:java.lang.Object.toString
+ /// CHECK: Return [<<Ret>>]
+
+ /// CHECK-START: java.lang.String Main.testNoInline(java.lang.Object, boolean) inliner (after)
+ /// CHECK: <<Phi:l\d+>> Phi
+ /// CHECK: <<NC:l\d+>> NullCheck [<<Phi>>]
+ /// CHECK: <<Ret:l\d+>> InvokeVirtual [<<NC>>] method_name:java.lang.Object.toString
+ /// CHECK: Return [<<Ret>>]
+ public static String testNoInline(Object o, boolean cond) {
+ Object x = new Final();
+ while (cond) {
+ x = o;
+ cond = false;
+ }
+ return x.toString();
+ }
+
+ public static void main(String[] args) {
+ try {
+ testKeepCheckCast(new Object(), true);
+ throw new Error("Expected check cast exception");
+ } catch (ClassCastException e) {
+ // expected
+ }
+
+ testKeepInstanceOf(new Object(), true);
+
+ if ("final".equals(testNoInline(new Object(), true))) {
+ throw new Error("Bad inlining");
+ }
+ }
+}