Merge "Implement polymorphic inlining."
diff --git a/compiler/debug/elf_debug_loc_writer.h b/compiler/debug/elf_debug_loc_writer.h
index a19b36f..8fd20aa 100644
--- a/compiler/debug/elf_debug_loc_writer.h
+++ b/compiler/debug/elf_debug_loc_writer.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_DEBUG_ELF_DEBUG_LOC_WRITER_H_
#define ART_COMPILER_DEBUG_ELF_DEBUG_LOC_WRITER_H_
+#include <cstring>
#include <map>
#include "arch/instruction_set.h"
@@ -172,11 +173,6 @@
return;
}
- dwarf::Writer<> debug_loc(debug_loc_buffer);
- dwarf::Writer<> debug_ranges(debug_ranges_buffer);
- debug_info->WriteSecOffset(dwarf::DW_AT_location, debug_loc.size());
- debug_info->WriteSecOffset(dwarf::DW_AT_start_scope, debug_ranges.size());
-
std::vector<VariableLocation> variable_locations = GetVariableLocations(
method_info,
vreg,
@@ -185,6 +181,8 @@
dex_pc_high);
// Write .debug_loc entries.
+ dwarf::Writer<> debug_loc(debug_loc_buffer);
+ const size_t debug_loc_offset = debug_loc.size();
const bool is64bit = Is64BitInstructionSet(isa);
std::vector<uint8_t> expr_buffer;
for (const VariableLocation& variable_location : variable_locations) {
@@ -271,6 +269,8 @@
// Write .debug_ranges entries.
// This includes ranges where the variable is in scope but the location is not known.
+ dwarf::Writer<> debug_ranges(debug_ranges_buffer);
+ size_t debug_ranges_offset = debug_ranges.size();
for (size_t i = 0; i < variable_locations.size(); i++) {
uint32_t low_pc = variable_locations[i].low_pc;
uint32_t high_pc = variable_locations[i].high_pc;
@@ -294,6 +294,23 @@
debug_ranges.PushUint32(0);
debug_ranges.PushUint32(0);
}
+
+ // Simple de-duplication - check whether this entry is same as the last one (or tail of it).
+ size_t debug_ranges_entry_size = debug_ranges.size() - debug_ranges_offset;
+ if (debug_ranges_offset >= debug_ranges_entry_size) {
+ size_t previous_offset = debug_ranges_offset - debug_ranges_entry_size;
+ if (memcmp(debug_ranges_buffer->data() + previous_offset,
+ debug_ranges_buffer->data() + debug_ranges_offset,
+ debug_ranges_entry_size) == 0) {
+ // Remove what we have just written and use the last entry instead.
+ debug_ranges_buffer->resize(debug_ranges_offset);
+ debug_ranges_offset = previous_offset;
+ }
+ }
+
+ // Write attributes to .debug_info.
+ debug_info->WriteSecOffset(dwarf::DW_AT_location, debug_loc_offset);
+ debug_info->WriteSecOffset(dwarf::DW_AT_start_scope, debug_ranges_offset);
}
} // namespace debug
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index eb4915b..6f9dd6d 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -1679,9 +1679,7 @@
if (opcode == Instruction::NEW_INSTANCE) {
uint32_t type_idx = mir->dalvikInsn.vB;
if (cu_->compiler_driver->IsStringTypeIndex(type_idx, cu_->dex_file)) {
- // Change NEW_INSTANCE into CONST_4 of 0
- mir->dalvikInsn.opcode = Instruction::CONST_4;
- mir->dalvikInsn.vB = 0;
+ LOG(FATAL) << "Quick cannot compile String allocations";
}
} else if ((opcode == Instruction::INVOKE_DIRECT) ||
(opcode == Instruction::INVOKE_DIRECT_RANGE)) {
@@ -1689,52 +1687,13 @@
DexFileMethodInliner* inliner =
cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file);
if (inliner->IsStringInitMethodIndex(method_idx)) {
- bool is_range = (opcode == Instruction::INVOKE_DIRECT_RANGE);
- uint32_t orig_this_reg = is_range ? mir->dalvikInsn.vC : mir->dalvikInsn.arg[0];
- // Remove this pointer from string init and change to static call.
- mir->dalvikInsn.vA--;
- if (!is_range) {
- mir->dalvikInsn.opcode = Instruction::INVOKE_STATIC;
- for (uint32_t i = 0; i < mir->dalvikInsn.vA; i++) {
- mir->dalvikInsn.arg[i] = mir->dalvikInsn.arg[i + 1];
- }
- } else {
- mir->dalvikInsn.opcode = Instruction::INVOKE_STATIC_RANGE;
- mir->dalvikInsn.vC++;
- }
- // Insert a move-result instruction to the original this pointer reg.
- MIR* move_result_mir = static_cast<MIR *>(arena_->Alloc(sizeof(MIR), kArenaAllocMIR));
- move_result_mir->dalvikInsn.opcode = Instruction::MOVE_RESULT_OBJECT;
- move_result_mir->dalvikInsn.vA = orig_this_reg;
- move_result_mir->offset = mir->offset;
- move_result_mir->m_unit_index = mir->m_unit_index;
- bb->InsertMIRAfter(mir, move_result_mir);
- // Add additional moves if this pointer was copied to other registers.
- const VerifiedMethod* verified_method =
- cu_->compiler_driver->GetVerifiedMethod(cu_->dex_file, cu_->method_idx);
- DCHECK(verified_method != nullptr);
- const SafeMap<uint32_t, std::set<uint32_t>>& string_init_map =
- verified_method->GetStringInitPcRegMap();
- auto map_it = string_init_map.find(mir->offset);
- if (map_it != string_init_map.end()) {
- const std::set<uint32_t>& reg_set = map_it->second;
- for (auto set_it = reg_set.begin(); set_it != reg_set.end(); ++set_it) {
- MIR* move_mir = static_cast<MIR *>(arena_->Alloc(sizeof(MIR), kArenaAllocMIR));
- move_mir->dalvikInsn.opcode = Instruction::MOVE_OBJECT;
- move_mir->dalvikInsn.vA = *set_it;
- move_mir->dalvikInsn.vB = orig_this_reg;
- move_mir->offset = mir->offset;
- move_mir->m_unit_index = mir->m_unit_index;
- bb->InsertMIRAfter(move_result_mir, move_mir);
- }
- }
+ LOG(FATAL) << "Quick cannot compile String allocations";
}
}
}
}
}
-
bool MIRGraph::EliminateSuspendChecksGate() {
if (kLeafOptimization || // Incompatible (could create loops without suspend checks).
(cu_->disable_opt & (1 << kSuspendCheckElimination)) != 0 || // Disabled.
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index 027290f..49768de 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -509,7 +509,8 @@
}
bool QuickCompiler::CanCompileInstruction(const MIR* mir,
- const DexFile& dex_file) const {
+ const DexFile& dex_file,
+ CompilationUnit* cu) const {
switch (mir->dalvikInsn.opcode) {
// Quick compiler won't support new instruction semantics to invoke-super into an interface
// method
@@ -522,6 +523,13 @@
// False if we are an interface i.e. !(java_access_flags & kAccInterface)
return class_def != nullptr && ((class_def->GetJavaAccessFlags() & kAccInterface) == 0);
}
+ case Instruction::NEW_INSTANCE: {
+ uint32_t type_idx = mir->dalvikInsn.vB;
+ if (cu->compiler_driver->IsStringTypeIndex(type_idx, cu->dex_file)) {
+ return false;
+ }
+ return true;
+ }
default:
return true;
}
@@ -567,7 +575,7 @@
<< MIRGraph::extended_mir_op_names_[opcode - kMirOpFirst];
}
return false;
- } else if (!CanCompileInstruction(mir, dex_file)) {
+ } else if (!CanCompileInstruction(mir, dex_file, cu)) {
VLOG(compiler) << "Cannot compile dalvik opcode : " << mir->dalvikInsn.opcode;
return false;
}
diff --git a/compiler/dex/quick/quick_compiler.h b/compiler/dex/quick/quick_compiler.h
index 55f45f1..f32cf86 100644
--- a/compiler/dex/quick/quick_compiler.h
+++ b/compiler/dex/quick/quick_compiler.h
@@ -75,7 +75,7 @@
explicit QuickCompiler(CompilerDriver* driver);
private:
- bool CanCompileInstruction(const MIR* mir, const DexFile& dex_file) const;
+ bool CanCompileInstruction(const MIR* mir, const DexFile& dex_file, CompilationUnit* cu) const;
std::unique_ptr<PassManager> pre_opt_pass_manager_;
std::unique_ptr<PassManager> post_opt_pass_manager_;
diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc
index 0355f11..9ae2164 100644
--- a/compiler/dex/verified_method.cc
+++ b/compiler/dex/verified_method.cc
@@ -37,20 +37,16 @@
namespace art {
-VerifiedMethod::VerifiedMethod(uint32_t encountered_error_types,
- bool has_runtime_throw,
- const SafeMap<uint32_t, std::set<uint32_t>>& string_init_pc_reg_map)
+VerifiedMethod::VerifiedMethod(uint32_t encountered_error_types, bool has_runtime_throw)
: encountered_error_types_(encountered_error_types),
- has_runtime_throw_(has_runtime_throw),
- string_init_pc_reg_map_(string_init_pc_reg_map) {
+ has_runtime_throw_(has_runtime_throw) {
}
const VerifiedMethod* VerifiedMethod::Create(verifier::MethodVerifier* method_verifier,
bool compile) {
std::unique_ptr<VerifiedMethod> verified_method(
new VerifiedMethod(method_verifier->GetEncounteredFailureTypes(),
- method_verifier->HasInstructionThatWillThrow(),
- method_verifier->GetStringInitPcRegMap()));
+ method_verifier->HasInstructionThatWillThrow()));
if (compile) {
/* Generate a register map. */
diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h
index 74fcb07..12d0219 100644
--- a/compiler/dex/verified_method.h
+++ b/compiler/dex/verified_method.h
@@ -83,14 +83,8 @@
return has_runtime_throw_;
}
- const SafeMap<uint32_t, std::set<uint32_t>>& GetStringInitPcRegMap() const {
- return string_init_pc_reg_map_;
- }
-
private:
- VerifiedMethod(uint32_t encountered_error_types,
- bool has_runtime_throw,
- const SafeMap<uint32_t, std::set<uint32_t>>& string_init_pc_reg_map);
+ VerifiedMethod(uint32_t encountered_error_types, bool has_runtime_throw);
/*
* Generate the GC map for a method that has just been verified (i.e. we're doing this as part of
@@ -129,10 +123,6 @@
const uint32_t encountered_error_types_;
const bool has_runtime_throw_;
-
- // Copy of mapping generated by verifier of dex PCs of string init invocations
- // to the set of other registers that the receiver has been copied into.
- const SafeMap<uint32_t, std::set<uint32_t>> string_init_pc_reg_map_;
};
} // namespace art
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 3eda863..c500ea4 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -2107,7 +2107,6 @@
LocationSummary* locations = instruction->GetLocations();
Register res = locations->Out().AsRegister<Register>();
Primitive::Type in_type = instruction->InputAt(0)->GetType();
- bool gt_bias = instruction->IsGtBias();
bool isR6 = codegen_->GetInstructionSetFeatures().IsR6();
// 0 if: left == right
@@ -2141,6 +2140,7 @@
}
case Primitive::kPrimFloat: {
+ bool gt_bias = instruction->IsGtBias();
FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
MipsLabel done;
@@ -2180,6 +2180,7 @@
break;
}
case Primitive::kPrimDouble: {
+ bool gt_bias = instruction->IsGtBias();
FRegister lhs = locations->InAt(0).AsFpuRegister<FRegister>();
FRegister rhs = locations->InAt(1).AsFpuRegister<FRegister>();
MipsLabel done;
@@ -3953,28 +3954,19 @@
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
-void InstructionCodeGeneratorMIPS::VisitInvokeVirtual(HInvokeVirtual* invoke) {
- if (TryGenerateIntrinsicCode(invoke, codegen_)) {
- return;
- }
-
+void CodeGeneratorMIPS::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp_location) {
LocationSummary* locations = invoke->GetLocations();
Location receiver = locations->InAt(0);
- Register temp = invoke->GetLocations()->GetTemp(0).AsRegister<Register>();
+ Register temp = temp_location.AsRegister<Register>();
size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
invoke->GetVTableIndex(), kMipsPointerSize).SizeValue();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMipsWordSize);
// temp = object->GetClass();
- if (receiver.IsStackSlot()) {
- __ LoadFromOffset(kLoadWord, temp, SP, receiver.GetStackIndex());
- __ LoadFromOffset(kLoadWord, temp, temp, class_offset);
- } else {
- DCHECK(receiver.IsRegister());
- __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
- }
- codegen_->MaybeRecordImplicitNullCheck(invoke);
+ DCHECK(receiver.IsRegister());
+ __ LoadFromOffset(kLoadWord, temp, receiver.AsRegister<Register>(), class_offset);
+ MaybeRecordImplicitNullCheck(invoke);
// temp = temp->GetMethodAt(method_offset);
__ LoadFromOffset(kLoadWord, temp, temp, method_offset);
// T9 = temp->GetEntryPoint();
@@ -3982,6 +3974,14 @@
// T9();
__ Jalr(T9);
__ Nop();
+}
+
+void InstructionCodeGeneratorMIPS::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ if (TryGenerateIntrinsicCode(invoke, codegen_)) {
+ return;
+ }
+
+ codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
DCHECK(!codegen_->IsLeafMethod());
codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index 12964b0..dd0641c 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -353,10 +353,7 @@
MethodReference target_method) OVERRIDE;
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
- void GenerateVirtualCall(HInvokeVirtual* invoke ATTRIBUTE_UNUSED,
- Location temp ATTRIBUTE_UNUSED) OVERRIDE {
- UNIMPLEMENTED(FATAL) << "Not implemented on MIPS";
- }
+ void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
Primitive::Type type ATTRIBUTE_UNUSED) OVERRIDE {
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 119084e..e3a44f1 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -1727,7 +1727,6 @@
LocationSummary* locations = instruction->GetLocations();
GpuRegister res = locations->Out().AsRegister<GpuRegister>();
Primitive::Type in_type = instruction->InputAt(0)->GetType();
- bool gt_bias = instruction->IsGtBias();
// 0 if: left == right
// 1 if: left > right
@@ -1769,7 +1768,7 @@
__ CmpEqS(FTMP, lhs, rhs);
__ LoadConst32(res, 0);
__ Bc1nez(FTMP, &done);
- if (gt_bias) {
+ if (instruction->IsGtBias()) {
__ CmpLtS(FTMP, lhs, rhs);
__ LoadConst32(res, -1);
__ Bc1nez(FTMP, &done);
@@ -1791,7 +1790,7 @@
__ CmpEqD(FTMP, lhs, rhs);
__ LoadConst32(res, 0);
__ Bc1nez(FTMP, &done);
- if (gt_bias) {
+ if (instruction->IsGtBias()) {
__ CmpLtD(FTMP, lhs, rhs);
__ LoadConst32(res, -1);
__ Bc1nez(FTMP, &done);
@@ -4258,4 +4257,3 @@
} // namespace mips64
} // namespace art
-
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 1161253..eb7315a 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -349,7 +349,7 @@
void MoveFromReturnRegister(Location trg ATTRIBUTE_UNUSED,
Primitive::Type type ATTRIBUTE_UNUSED) OVERRIDE {
- UNIMPLEMENTED(FATAL);
+ UNIMPLEMENTED(FATAL) << "Not implemented on MIPS64";
}
private:
diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc
index 57452cc..7ddabde 100644
--- a/compiler/optimizing/constant_folding.cc
+++ b/compiler/optimizing/constant_folding.cc
@@ -18,8 +18,28 @@
namespace art {
-// This visitor tries to simplify operations that yield a constant. For example
-// `input * 0` is replaced by a null constant.
+// This visitor tries to simplify instructions that can be evaluated
+// as constants.
+class HConstantFoldingVisitor : public HGraphDelegateVisitor {
+ public:
+ explicit HConstantFoldingVisitor(HGraph* graph)
+ : HGraphDelegateVisitor(graph) {}
+
+ private:
+ void VisitBasicBlock(HBasicBlock* block) OVERRIDE;
+
+ void VisitUnaryOperation(HUnaryOperation* inst) OVERRIDE;
+ void VisitBinaryOperation(HBinaryOperation* inst) OVERRIDE;
+
+ void VisitTypeConversion(HTypeConversion* inst) OVERRIDE;
+ void VisitDivZeroCheck(HDivZeroCheck* inst) OVERRIDE;
+
+ DISALLOW_COPY_AND_ASSIGN(HConstantFoldingVisitor);
+};
+
+// This visitor tries to simplify operations with an absorbing input,
+// yielding a constant. For example `input * 0` is replaced by a
+// null constant.
class InstructionWithAbsorbingInputSimplifier : public HGraphVisitor {
public:
explicit InstructionWithAbsorbingInputSimplifier(HGraph* graph) : HGraphVisitor(graph) {}
@@ -44,59 +64,69 @@
void VisitXor(HXor* instruction) OVERRIDE;
};
+
void HConstantFolding::Run() {
- InstructionWithAbsorbingInputSimplifier simplifier(graph_);
+ HConstantFoldingVisitor visitor(graph_);
// Process basic blocks in reverse post-order in the dominator tree,
// so that an instruction turned into a constant, used as input of
// another instruction, may possibly be used to turn that second
// instruction into a constant as well.
- for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
- // Traverse this block's instructions in (forward) order and
- // replace the ones that can be statically evaluated by a
- // compile-time counterpart.
- for (HInstructionIterator inst_it(block->GetInstructions());
- !inst_it.Done(); inst_it.Advance()) {
- HInstruction* inst = inst_it.Current();
- if (inst->IsBinaryOperation()) {
- // Constant folding: replace `op(a, b)' with a constant at
- // compile time if `a' and `b' are both constants.
- HConstant* constant = inst->AsBinaryOperation()->TryStaticEvaluation();
- if (constant != nullptr) {
- inst->ReplaceWith(constant);
- inst->GetBlock()->RemoveInstruction(inst);
- } else {
- inst->Accept(&simplifier);
- }
- } else if (inst->IsUnaryOperation()) {
- // Constant folding: replace `op(a)' with a constant at compile
- // time if `a' is a constant.
- HConstant* constant = inst->AsUnaryOperation()->TryStaticEvaluation();
- if (constant != nullptr) {
- inst->ReplaceWith(constant);
- inst->GetBlock()->RemoveInstruction(inst);
- }
- } else if (inst->IsTypeConversion()) {
- // Constant folding: replace `TypeConversion(a)' with a constant at
- // compile time if `a' is a constant.
- HConstant* constant = inst->AsTypeConversion()->TryStaticEvaluation();
- if (constant != nullptr) {
- inst->ReplaceWith(constant);
- inst->GetBlock()->RemoveInstruction(inst);
- }
- } else if (inst->IsDivZeroCheck()) {
- // We can safely remove the check if the input is a non-null constant.
- HDivZeroCheck* check = inst->AsDivZeroCheck();
- HInstruction* check_input = check->InputAt(0);
- if (check_input->IsConstant() && !check_input->AsConstant()->IsZero()) {
- check->ReplaceWith(check_input);
- check->GetBlock()->RemoveInstruction(check);
- }
- }
- }
+ visitor.VisitReversePostOrder();
+}
+
+
+void HConstantFoldingVisitor::VisitBasicBlock(HBasicBlock* block) {
+ // Traverse this block's instructions (phis don't need to be
+ // processed) in (forward) order and replace the ones that can be
+ // statically evaluated by a compile-time counterpart.
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ it.Current()->Accept(this);
}
}
+void HConstantFoldingVisitor::VisitUnaryOperation(HUnaryOperation* inst) {
+ // Constant folding: replace `op(a)' with a constant at compile
+ // time if `a' is a constant.
+ HConstant* constant = inst->TryStaticEvaluation();
+ if (constant != nullptr) {
+ inst->ReplaceWith(constant);
+ inst->GetBlock()->RemoveInstruction(inst);
+ }
+}
+
+void HConstantFoldingVisitor::VisitBinaryOperation(HBinaryOperation* inst) {
+ // Constant folding: replace `op(a, b)' with a constant at
+ // compile time if `a' and `b' are both constants.
+ HConstant* constant = inst->TryStaticEvaluation();
+ if (constant != nullptr) {
+ inst->ReplaceWith(constant);
+ inst->GetBlock()->RemoveInstruction(inst);
+ } else {
+ InstructionWithAbsorbingInputSimplifier simplifier(GetGraph());
+ inst->Accept(&simplifier);
+ }
+}
+
+void HConstantFoldingVisitor::VisitTypeConversion(HTypeConversion* inst) {
+ // Constant folding: replace `TypeConversion(a)' with a constant at
+ // compile time if `a' is a constant.
+ HConstant* constant = inst->AsTypeConversion()->TryStaticEvaluation();
+ if (constant != nullptr) {
+ inst->ReplaceWith(constant);
+ inst->GetBlock()->RemoveInstruction(inst);
+ }
+}
+
+void HConstantFoldingVisitor::VisitDivZeroCheck(HDivZeroCheck* inst) {
+ // We can safely remove the check if the input is a non-null constant.
+ HInstruction* check_input = inst->InputAt(0);
+ if (check_input->IsConstant() && !check_input->AsConstant()->IsZero()) {
+ inst->ReplaceWith(check_input);
+ inst->GetBlock()->RemoveInstruction(inst);
+ }
+}
+
+
void InstructionWithAbsorbingInputSimplifier::VisitShift(HBinaryOperation* instruction) {
DCHECK(instruction->IsShl() || instruction->IsShr() || instruction->IsUShr());
HInstruction* left = instruction->GetLeft();
@@ -178,7 +208,7 @@
((input_cst->IsFloatConstant() && input_cst->AsFloatConstant()->IsNaN()) ||
(input_cst->IsDoubleConstant() && input_cst->AsDoubleConstant()->IsNaN()))) {
// Replace code looking like
- // CMP{G,L} dst, src, NaN
+ // CMP{G,L}-{FLOAT,DOUBLE} dst, src, NaN
// with
// CONSTANT +1 (gt bias)
// or
diff --git a/compiler/optimizing/constant_folding.h b/compiler/optimizing/constant_folding.h
index 2698b2d..e0bc6f7 100644
--- a/compiler/optimizing/constant_folding.h
+++ b/compiler/optimizing/constant_folding.h
@@ -26,6 +26,13 @@
* Optimization pass performing a simple constant-expression
* evaluation on the SSA form.
*
+ * Note that graph simplifications producing a constant should be
+ * implemented in art::HConstantFolding, while graph simplifications
+ * not producing constants should be implemented in
+ * art::InstructionSimplifier. (This convention is a choice that was
+ * made during the development of these parts of the compiler and is
+ * not bound by any technical requirement.)
+ *
* This class is named art::HConstantFolding to avoid name
* clashes with the art::ConstantPropagation class defined in
* compiler/dex/post_opt_passes.h.
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 4cf0eb1..c0263e4 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -384,6 +384,13 @@
<< array_set->GetValueCanBeNull() << std::noboolalpha;
}
+ void VisitCompare(HCompare* compare) OVERRIDE {
+ ComparisonBias bias = compare->GetBias();
+ StartAttributeStream("bias") << (bias == ComparisonBias::kGtBias
+ ? "gt"
+ : (bias == ComparisonBias::kLtBias ? "lt" : "none"));
+ }
+
void VisitInvoke(HInvoke* invoke) OVERRIDE {
StartAttributeStream("dex_file_index") << invoke->GetDexMethodIndex();
StartAttributeStream("method_name") << PrettyMethod(
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index a48d06f..13d3f75 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -92,6 +92,7 @@
void SimplifySystemArrayCopy(HInvoke* invoke);
void SimplifyStringEquals(HInvoke* invoke);
void SimplifyCompare(HInvoke* invoke, bool has_zero_op);
+ void SimplifyIsNaN(HInvoke* invoke);
OptimizingCompilerStats* stats_;
bool simplification_occurred_ = false;
@@ -1551,6 +1552,16 @@
invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, compare);
}
+void InstructionSimplifierVisitor::SimplifyIsNaN(HInvoke* invoke) {
+ DCHECK(invoke->IsInvokeStaticOrDirect());
+ uint32_t dex_pc = invoke->GetDexPc();
+ // IsNaN(x) is the same as x != x.
+ HInstruction* x = invoke->InputAt(0);
+ HCondition* condition = new (GetGraph()->GetArena()) HNotEqual(x, x, dex_pc);
+ condition->SetBias(ComparisonBias::kLtBias);
+ invoke->GetBlock()->ReplaceAndRemoveInstructionWith(invoke, condition);
+}
+
void InstructionSimplifierVisitor::VisitInvoke(HInvoke* instruction) {
if (instruction->GetIntrinsic() == Intrinsics::kStringEquals) {
SimplifyStringEquals(instruction);
@@ -1568,6 +1579,9 @@
} else if (instruction->GetIntrinsic() == Intrinsics::kIntegerSignum ||
instruction->GetIntrinsic() == Intrinsics::kLongSignum) {
SimplifyCompare(instruction, /* is_signum */ true);
+ } else if (instruction->GetIntrinsic() == Intrinsics::kFloatIsNaN ||
+ instruction->GetIntrinsic() == Intrinsics::kDoubleIsNaN) {
+ SimplifyIsNaN(instruction);
}
}
diff --git a/compiler/optimizing/instruction_simplifier.h b/compiler/optimizing/instruction_simplifier.h
index cc4b6f6..7905104 100644
--- a/compiler/optimizing/instruction_simplifier.h
+++ b/compiler/optimizing/instruction_simplifier.h
@@ -25,6 +25,13 @@
/**
* Implements optimizations specific to each instruction.
+ *
+ * Note that graph simplifications producing a constant should be
+ * implemented in art::HConstantFolding, while graph simplifications
+ * not producing constants should be implemented in
+ * art::InstructionSimplifier. (This convention is a choice that was
+ * made during the development of these parts of the compiler and is
+ * not bound by any technical requirement.)
*/
class InstructionSimplifier : public HOptimization {
public:
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 00a158b..ea8669f 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -1858,8 +1858,6 @@
UNIMPLEMENTED_INTRINSIC(FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite)
-UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
-UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
UNIMPLEMENTED_INTRINSIC(IntegerHighestOneBit)
UNIMPLEMENTED_INTRINSIC(LongHighestOneBit)
@@ -1867,6 +1865,8 @@
UNIMPLEMENTED_INTRINSIC(LongLowestOneBit)
// Handled as HIR instructions.
+UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
+UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft)
UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 4140d94..8741fd2 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1618,8 +1618,6 @@
UNIMPLEMENTED_INTRINSIC(FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite)
-UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
-UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
UNIMPLEMENTED_INTRINSIC(IntegerHighestOneBit)
UNIMPLEMENTED_INTRINSIC(LongHighestOneBit)
@@ -1627,6 +1625,8 @@
UNIMPLEMENTED_INTRINSIC(LongLowestOneBit)
// Handled as HIR instructions.
+UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
+UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft)
UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 2294713..c862964 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -113,11 +113,10 @@
if (invoke_->IsInvokeStaticOrDirect()) {
codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(),
Location::RegisterLocation(A0));
- codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
} else {
- UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
- UNREACHABLE();
+ codegen->GenerateVirtualCall(invoke_->AsInvokeVirtual(), Location::RegisterLocation(A0));
}
+ codegen->RecordPcInfo(invoke_, invoke_->GetDexPc(), this);
// Copy the result back to the expected output.
Location out = invoke_->GetLocations()->Out();
@@ -825,6 +824,220 @@
GetAssembler());
}
+// byte libcore.io.Memory.peekByte(long address)
+void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekByte(HInvoke* invoke) {
+ CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekByte(HInvoke* invoke) {
+ MipsAssembler* assembler = GetAssembler();
+ Register adr = invoke->GetLocations()->InAt(0).AsRegisterPairLow<Register>();
+ Register out = invoke->GetLocations()->Out().AsRegister<Register>();
+
+ __ Lb(out, adr, 0);
+}
+
+// short libcore.io.Memory.peekShort(long address)
+void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekShortNative(HInvoke* invoke) {
+ CreateIntToIntLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekShortNative(HInvoke* invoke) {
+ MipsAssembler* assembler = GetAssembler();
+ Register adr = invoke->GetLocations()->InAt(0).AsRegisterPairLow<Register>();
+ Register out = invoke->GetLocations()->Out().AsRegister<Register>();
+
+ if (IsR6()) {
+ __ Lh(out, adr, 0);
+ } else if (IsR2OrNewer()) {
+ // Unlike for words, there are no lhl/lhr instructions to load
+ // unaligned halfwords so the code loads individual bytes, in case
+ // the address isn't halfword-aligned, and assembles them into a
+ // signed halfword.
+ __ Lb(AT, adr, 1); // This byte must be sign-extended.
+ __ Lb(out, adr, 0); // This byte can be either sign-extended, or
+ // zero-extended because the following
+ // instruction overwrites the sign bits.
+ __ Ins(out, AT, 8, 24);
+ } else {
+ __ Lbu(AT, adr, 0); // This byte must be zero-extended. If it's not
+ // the "or" instruction below will destroy the upper
+ // 24 bits of the final result.
+ __ Lb(out, adr, 1); // This byte must be sign-extended.
+ __ Sll(out, out, 8);
+ __ Or(out, out, AT);
+ }
+}
+
+// int libcore.io.Memory.peekInt(long address)
+void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekIntNative(HInvoke* invoke) {
+ CreateIntToIntLocations(arena_, invoke, Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekIntNative(HInvoke* invoke) {
+ MipsAssembler* assembler = GetAssembler();
+ Register adr = invoke->GetLocations()->InAt(0).AsRegisterPairLow<Register>();
+ Register out = invoke->GetLocations()->Out().AsRegister<Register>();
+
+ if (IsR6()) {
+ __ Lw(out, adr, 0);
+ } else {
+ __ Lwr(out, adr, 0);
+ __ Lwl(out, adr, 3);
+ }
+}
+
+// long libcore.io.Memory.peekLong(long address)
+void IntrinsicLocationsBuilderMIPS::VisitMemoryPeekLongNative(HInvoke* invoke) {
+ CreateIntToIntLocations(arena_, invoke, Location::kOutputOverlap);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitMemoryPeekLongNative(HInvoke* invoke) {
+ MipsAssembler* assembler = GetAssembler();
+ Register adr = invoke->GetLocations()->InAt(0).AsRegisterPairLow<Register>();
+ Register out_lo = invoke->GetLocations()->Out().AsRegisterPairLow<Register>();
+ Register out_hi = invoke->GetLocations()->Out().AsRegisterPairHigh<Register>();
+
+ if (IsR6()) {
+ __ Lw(out_lo, adr, 0);
+ __ Lw(out_hi, adr, 4);
+ } else {
+ __ Lwr(out_lo, adr, 0);
+ __ Lwl(out_lo, adr, 3);
+ __ Lwr(out_hi, adr, 4);
+ __ Lwl(out_hi, adr, 7);
+ }
+}
+
+static void CreateIntIntToVoidLocations(ArenaAllocator* arena, HInvoke* invoke) {
+ LocationSummary* locations = new (arena) LocationSummary(invoke,
+ LocationSummary::kNoCall,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+}
+
+// void libcore.io.Memory.pokeByte(long address, byte value)
+void IntrinsicLocationsBuilderMIPS::VisitMemoryPokeByte(HInvoke* invoke) {
+ CreateIntIntToVoidLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeByte(HInvoke* invoke) {
+ MipsAssembler* assembler = GetAssembler();
+ Register adr = invoke->GetLocations()->InAt(0).AsRegisterPairLow<Register>();
+ Register val = invoke->GetLocations()->InAt(1).AsRegister<Register>();
+
+ __ Sb(val, adr, 0);
+}
+
+// void libcore.io.Memory.pokeShort(long address, short value)
+void IntrinsicLocationsBuilderMIPS::VisitMemoryPokeShortNative(HInvoke* invoke) {
+ CreateIntIntToVoidLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeShortNative(HInvoke* invoke) {
+ MipsAssembler* assembler = GetAssembler();
+ Register adr = invoke->GetLocations()->InAt(0).AsRegisterPairLow<Register>();
+ Register val = invoke->GetLocations()->InAt(1).AsRegister<Register>();
+
+ if (IsR6()) {
+ __ Sh(val, adr, 0);
+ } else {
+ // Unlike for words, there are no shl/shr instructions to store
+ // unaligned halfwords so the code stores individual bytes, in case
+ // the address isn't halfword-aligned.
+ __ Sb(val, adr, 0);
+ __ Srl(AT, val, 8);
+ __ Sb(AT, adr, 1);
+ }
+}
+
+// void libcore.io.Memory.pokeInt(long address, int value)
+void IntrinsicLocationsBuilderMIPS::VisitMemoryPokeIntNative(HInvoke* invoke) {
+ CreateIntIntToVoidLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeIntNative(HInvoke* invoke) {
+ MipsAssembler* assembler = GetAssembler();
+ Register adr = invoke->GetLocations()->InAt(0).AsRegisterPairLow<Register>();
+ Register val = invoke->GetLocations()->InAt(1).AsRegister<Register>();
+
+ if (IsR6()) {
+ __ Sw(val, adr, 0);
+ } else {
+ __ Swr(val, adr, 0);
+ __ Swl(val, adr, 3);
+ }
+}
+
+// void libcore.io.Memory.pokeLong(long address, long value)
+void IntrinsicLocationsBuilderMIPS::VisitMemoryPokeLongNative(HInvoke* invoke) {
+ CreateIntIntToVoidLocations(arena_, invoke);
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitMemoryPokeLongNative(HInvoke* invoke) {
+ MipsAssembler* assembler = GetAssembler();
+ Register adr = invoke->GetLocations()->InAt(0).AsRegisterPairLow<Register>();
+ Register val_lo = invoke->GetLocations()->InAt(1).AsRegisterPairLow<Register>();
+ Register val_hi = invoke->GetLocations()->InAt(1).AsRegisterPairHigh<Register>();
+
+ if (IsR6()) {
+ __ Sw(val_lo, adr, 0);
+ __ Sw(val_hi, adr, 4);
+ } else {
+ __ Swr(val_lo, adr, 0);
+ __ Swl(val_lo, adr, 3);
+ __ Swr(val_hi, adr, 4);
+ __ Swl(val_hi, adr, 7);
+ }
+}
+
+// char java.lang.String.charAt(int index)
+void IntrinsicLocationsBuilderMIPS::VisitStringCharAt(HInvoke* invoke) {
+ LocationSummary* locations = new (arena_) LocationSummary(invoke,
+ LocationSummary::kCallOnSlowPath,
+ kIntrinsified);
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::SameAsFirstInput());
+}
+
+void IntrinsicCodeGeneratorMIPS::VisitStringCharAt(HInvoke* invoke) {
+ LocationSummary* locations = invoke->GetLocations();
+ MipsAssembler* assembler = GetAssembler();
+
+ // Location of reference to data array
+ const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
+ // Location of count
+ const int32_t count_offset = mirror::String::CountOffset().Int32Value();
+
+ Register obj = locations->InAt(0).AsRegister<Register>();
+ Register idx = locations->InAt(1).AsRegister<Register>();
+ Register out = locations->Out().AsRegister<Register>();
+
+ // TODO: Maybe we can support range check elimination. Overall,
+ // though, I think it's not worth the cost.
+ // TODO: For simplicity, the index parameter is requested in a
+ // register, so different from Quick we will not optimize the
+ // code for constants (which would save a register).
+
+ SlowPathCodeMIPS* slow_path = new (GetAllocator()) IntrinsicSlowPathMIPS(invoke);
+ codegen_->AddSlowPath(slow_path);
+
+ // Load the string size
+ __ Lw(TMP, obj, count_offset);
+ codegen_->MaybeRecordImplicitNullCheck(invoke);
+ // Revert to slow path if idx is too large, or negative
+ __ Bgeu(idx, TMP, slow_path->GetEntryLabel());
+
+ // out = obj[2*idx].
+ __ Sll(TMP, idx, 1); // idx * 2
+ __ Addu(TMP, TMP, obj); // Address of char at location idx
+ __ Lhu(out, TMP, value_offset); // Load char at location idx
+
+ __ Bind(slow_path->GetExitLabel());
+}
+
// boolean java.lang.String.equals(Object anObject)
void IntrinsicLocationsBuilderMIPS::VisitStringEquals(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
@@ -956,14 +1169,6 @@
UNIMPLEMENTED_INTRINSIC(MathRint)
UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
UNIMPLEMENTED_INTRINSIC(MathRoundFloat)
-UNIMPLEMENTED_INTRINSIC(MemoryPeekByte)
-UNIMPLEMENTED_INTRINSIC(MemoryPeekIntNative)
-UNIMPLEMENTED_INTRINSIC(MemoryPeekLongNative)
-UNIMPLEMENTED_INTRINSIC(MemoryPeekShortNative)
-UNIMPLEMENTED_INTRINSIC(MemoryPokeByte)
-UNIMPLEMENTED_INTRINSIC(MemoryPokeIntNative)
-UNIMPLEMENTED_INTRINSIC(MemoryPokeLongNative)
-UNIMPLEMENTED_INTRINSIC(MemoryPokeShortNative)
UNIMPLEMENTED_INTRINSIC(ThreadCurrentThread)
UNIMPLEMENTED_INTRINSIC(UnsafeGet)
UNIMPLEMENTED_INTRINSIC(UnsafeGetVolatile)
@@ -983,7 +1188,6 @@
UNIMPLEMENTED_INTRINSIC(UnsafeCASInt)
UNIMPLEMENTED_INTRINSIC(UnsafeCASLong)
UNIMPLEMENTED_INTRINSIC(UnsafeCASObject)
-UNIMPLEMENTED_INTRINSIC(StringCharAt)
UNIMPLEMENTED_INTRINSIC(StringCompareTo)
UNIMPLEMENTED_INTRINSIC(StringIndexOf)
UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter)
@@ -1016,8 +1220,6 @@
UNIMPLEMENTED_INTRINSIC(FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite)
-UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
-UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
UNIMPLEMENTED_INTRINSIC(IntegerHighestOneBit)
UNIMPLEMENTED_INTRINSIC(LongHighestOneBit)
@@ -1025,6 +1227,8 @@
UNIMPLEMENTED_INTRINSIC(LongLowestOneBit)
// Handled as HIR instructions.
+UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
+UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
UNIMPLEMENTED_INTRINSIC(IntegerCompare)
UNIMPLEMENTED_INTRINSIC(LongCompare)
UNIMPLEMENTED_INTRINSIC(IntegerSignum)
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index ac28503..cf3a365 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1764,8 +1764,6 @@
UNIMPLEMENTED_INTRINSIC(FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite)
-UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
-UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
UNIMPLEMENTED_INTRINSIC(IntegerHighestOneBit)
UNIMPLEMENTED_INTRINSIC(LongHighestOneBit)
@@ -1773,6 +1771,8 @@
UNIMPLEMENTED_INTRINSIC(LongLowestOneBit)
// Handled as HIR instructions.
+UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
+UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
UNIMPLEMENTED_INTRINSIC(IntegerCompare)
UNIMPLEMENTED_INTRINSIC(LongCompare)
UNIMPLEMENTED_INTRINSIC(IntegerSignum)
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 22cefe8..260a877 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -2635,8 +2635,6 @@
UNIMPLEMENTED_INTRINSIC(FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite)
-UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
-UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
UNIMPLEMENTED_INTRINSIC(IntegerHighestOneBit)
UNIMPLEMENTED_INTRINSIC(LongHighestOneBit)
@@ -2644,6 +2642,8 @@
UNIMPLEMENTED_INTRINSIC(LongLowestOneBit)
// Handled as HIR instructions.
+UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
+UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft)
UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index c9a4344..93e8c00 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -2717,10 +2717,10 @@
UNIMPLEMENTED_INTRINSIC(FloatIsInfinite)
UNIMPLEMENTED_INTRINSIC(DoubleIsInfinite)
-UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
-UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
// Handled as HIR instructions.
+UNIMPLEMENTED_INTRINSIC(FloatIsNaN)
+UNIMPLEMENTED_INTRINSIC(DoubleIsNaN)
UNIMPLEMENTED_INTRINSIC(IntegerRotateLeft)
UNIMPLEMENTED_INTRINSIC(LongRotateLeft)
UNIMPLEMENTED_INTRINSIC(IntegerRotateRight)
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 95d0af5..f36dc6e 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -15,6 +15,8 @@
*/
#include "nodes.h"
+#include <cfloat>
+
#include "code_generator.h"
#include "common_dominator.h"
#include "ssa_builder.h"
@@ -27,6 +29,12 @@
namespace art {
+// Enable floating-point static evaluation during constant folding
+// only if all floating-point operations and constants evaluate in the
+// range and precision of the type used (i.e., 32-bit float, 64-bit
+// double).
+static constexpr bool kEnableFloatingPointStaticEvaluation = (FLT_EVAL_METHOD == 0);
+
void HGraph::InitializeInexactObjectRTI(StackHandleScopeCollection* handles) {
ScopedObjectAccess soa(Thread::Current());
// Create the inexact Object reference type and store it in the HGraph.
@@ -1159,6 +1167,12 @@
return Evaluate(GetInput()->AsIntConstant());
} else if (GetInput()->IsLongConstant()) {
return Evaluate(GetInput()->AsLongConstant());
+ } else if (kEnableFloatingPointStaticEvaluation) {
+ if (GetInput()->IsFloatConstant()) {
+ return Evaluate(GetInput()->AsFloatConstant());
+ } else if (GetInput()->IsDoubleConstant()) {
+ return Evaluate(GetInput()->AsDoubleConstant());
+ }
}
return nullptr;
}
@@ -1178,6 +1192,12 @@
}
} else if (GetLeft()->IsNullConstant() && GetRight()->IsNullConstant()) {
return Evaluate(GetLeft()->AsNullConstant(), GetRight()->AsNullConstant());
+ } else if (kEnableFloatingPointStaticEvaluation) {
+ if (GetLeft()->IsFloatConstant() && GetRight()->IsFloatConstant()) {
+ return Evaluate(GetLeft()->AsFloatConstant(), GetRight()->AsFloatConstant());
+ } else if (GetLeft()->IsDoubleConstant() && GetRight()->IsDoubleConstant()) {
+ return Evaluate(GetLeft()->AsDoubleConstant(), GetRight()->AsDoubleConstant());
+ }
}
return nullptr;
}
@@ -1205,6 +1225,20 @@
}
}
+std::ostream& operator<<(std::ostream& os, const ComparisonBias& rhs) {
+ switch (rhs) {
+ case ComparisonBias::kNoBias:
+ return os << "no_bias";
+ case ComparisonBias::kGtBias:
+ return os << "gt_bias";
+ case ComparisonBias::kLtBias:
+ return os << "lt_bias";
+ default:
+ LOG(FATAL) << "Unknown ComparisonBias: " << static_cast<int>(rhs);
+ UNREACHABLE();
+ }
+}
+
bool HCondition::IsBeforeWhenDisregardMoves(HInstruction* instruction) const {
return this == instruction->GetPreviousDisregardingMoves();
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 7567510..f7307ad 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -2399,7 +2399,7 @@
}
bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- DCHECK(other->IsIntConstant());
+ DCHECK(other->IsIntConstant()) << other->DebugName();
return other->AsIntConstant()->value_ == value_;
}
@@ -2432,7 +2432,7 @@
uint64_t GetValueAsUint64() const OVERRIDE { return value_; }
bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- DCHECK(other->IsLongConstant());
+ DCHECK(other->IsLongConstant()) << other->DebugName();
return other->AsLongConstant()->value_ == value_;
}
@@ -2454,6 +2454,92 @@
DISALLOW_COPY_AND_ASSIGN(HLongConstant);
};
+class HFloatConstant : public HConstant {
+ public:
+ float GetValue() const { return value_; }
+
+ uint64_t GetValueAsUint64() const OVERRIDE {
+ return static_cast<uint64_t>(bit_cast<uint32_t, float>(value_));
+ }
+
+ bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
+ DCHECK(other->IsFloatConstant()) << other->DebugName();
+ return other->AsFloatConstant()->GetValueAsUint64() == GetValueAsUint64();
+ }
+
+ size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
+
+ bool IsMinusOne() const OVERRIDE {
+ return bit_cast<uint32_t, float>(value_) == bit_cast<uint32_t, float>((-1.0f));
+ }
+ bool IsZero() const OVERRIDE {
+ return value_ == 0.0f;
+ }
+ bool IsOne() const OVERRIDE {
+ return bit_cast<uint32_t, float>(value_) == bit_cast<uint32_t, float>(1.0f);
+ }
+ bool IsNaN() const {
+ return std::isnan(value_);
+ }
+
+ DECLARE_INSTRUCTION(FloatConstant);
+
+ private:
+ explicit HFloatConstant(float value, uint32_t dex_pc = kNoDexPc)
+ : HConstant(Primitive::kPrimFloat, dex_pc), value_(value) {}
+ explicit HFloatConstant(int32_t value, uint32_t dex_pc = kNoDexPc)
+ : HConstant(Primitive::kPrimFloat, dex_pc), value_(bit_cast<float, int32_t>(value)) {}
+
+ const float value_;
+
+ // Only the SsaBuilder and HGraph can create floating-point constants.
+ friend class SsaBuilder;
+ friend class HGraph;
+ DISALLOW_COPY_AND_ASSIGN(HFloatConstant);
+};
+
+class HDoubleConstant : public HConstant {
+ public:
+ double GetValue() const { return value_; }
+
+ uint64_t GetValueAsUint64() const OVERRIDE { return bit_cast<uint64_t, double>(value_); }
+
+ bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
+ DCHECK(other->IsDoubleConstant()) << other->DebugName();
+ return other->AsDoubleConstant()->GetValueAsUint64() == GetValueAsUint64();
+ }
+
+ size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
+
+ bool IsMinusOne() const OVERRIDE {
+ return bit_cast<uint64_t, double>(value_) == bit_cast<uint64_t, double>((-1.0));
+ }
+ bool IsZero() const OVERRIDE {
+ return value_ == 0.0;
+ }
+ bool IsOne() const OVERRIDE {
+ return bit_cast<uint64_t, double>(value_) == bit_cast<uint64_t, double>(1.0);
+ }
+ bool IsNaN() const {
+ return std::isnan(value_);
+ }
+
+ DECLARE_INSTRUCTION(DoubleConstant);
+
+ private:
+ explicit HDoubleConstant(double value, uint32_t dex_pc = kNoDexPc)
+ : HConstant(Primitive::kPrimDouble, dex_pc), value_(value) {}
+ explicit HDoubleConstant(int64_t value, uint32_t dex_pc = kNoDexPc)
+ : HConstant(Primitive::kPrimDouble, dex_pc), value_(bit_cast<double, int64_t>(value)) {}
+
+ const double value_;
+
+ // Only the SsaBuilder and HGraph can create floating-point constants.
+ friend class SsaBuilder;
+ friend class HGraph;
+ DISALLOW_COPY_AND_ASSIGN(HDoubleConstant);
+};
+
// Conditional branch. A block ending with an HIf instruction must have
// two successors.
class HIf : public HTemplateInstruction<1> {
@@ -2655,14 +2741,16 @@
return true;
}
- // Try to statically evaluate `operation` and return a HConstant
- // containing the result of this evaluation. If `operation` cannot
+ // Try to statically evaluate `this` and return a HConstant
+ // containing the result of this evaluation. If `this` cannot
// be evaluated as a constant, return null.
HConstant* TryStaticEvaluation() const;
// Apply this operation to `x`.
virtual HConstant* Evaluate(HIntConstant* x) const = 0;
virtual HConstant* Evaluate(HLongConstant* x) const = 0;
+ virtual HConstant* Evaluate(HFloatConstant* x) const = 0;
+ virtual HConstant* Evaluate(HDoubleConstant* x) const = 0;
DECLARE_ABSTRACT_INSTRUCTION(UnaryOperation);
@@ -2725,12 +2813,17 @@
return true;
}
- // Try to statically evaluate `operation` and return a HConstant
- // containing the result of this evaluation. If `operation` cannot
+ // Try to statically evaluate `this` and return a HConstant
+ // containing the result of this evaluation. If `this` cannot
// be evaluated as a constant, return null.
HConstant* TryStaticEvaluation() const;
// Apply this operation to `x` and `y`.
+ virtual HConstant* Evaluate(HNullConstant* x ATTRIBUTE_UNUSED,
+ HNullConstant* y ATTRIBUTE_UNUSED) const {
+ VLOG(compiler) << DebugName() << " is not defined for the (null, null) case.";
+ return nullptr;
+ }
virtual HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const = 0;
virtual HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const = 0;
virtual HConstant* Evaluate(HIntConstant* x ATTRIBUTE_UNUSED,
@@ -2743,11 +2836,8 @@
VLOG(compiler) << DebugName() << " is not defined for the (long, int) case.";
return nullptr;
}
- virtual HConstant* Evaluate(HNullConstant* x ATTRIBUTE_UNUSED,
- HNullConstant* y ATTRIBUTE_UNUSED) const {
- VLOG(compiler) << DebugName() << " is not defined for the (null, null) case.";
- return nullptr;
- }
+ virtual HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const = 0;
+ virtual HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const = 0;
// Returns an input that can legally be used as the right input and is
// constant, or null.
@@ -2771,6 +2861,8 @@
kLtBias, // return -1 for NaN comparisons
};
+std::ostream& operator<<(std::ostream& os, const ComparisonBias& rhs);
+
class HCondition : public HBinaryOperation {
public:
HCondition(HInstruction* first, HInstruction* second, uint32_t dex_pc = kNoDexPc)
@@ -2788,7 +2880,7 @@
virtual IfCondition GetOppositeCondition() const = 0;
bool IsGtBias() const { return bias_ == ComparisonBias::kGtBias; }
-
+ ComparisonBias GetBias() const { return bias_; }
void SetBias(ComparisonBias bias) { bias_ = bias; }
bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
@@ -2796,17 +2888,34 @@
}
bool IsFPConditionTrueIfNaN() const {
- DCHECK(Primitive::IsFloatingPointType(InputAt(0)->GetType()));
+ DCHECK(Primitive::IsFloatingPointType(InputAt(0)->GetType())) << InputAt(0)->GetType();
IfCondition if_cond = GetCondition();
return IsGtBias() ? ((if_cond == kCondGT) || (if_cond == kCondGE)) : (if_cond == kCondNE);
}
bool IsFPConditionFalseIfNaN() const {
- DCHECK(Primitive::IsFloatingPointType(InputAt(0)->GetType()));
+ DCHECK(Primitive::IsFloatingPointType(InputAt(0)->GetType())) << InputAt(0)->GetType();
IfCondition if_cond = GetCondition();
return IsGtBias() ? ((if_cond == kCondLT) || (if_cond == kCondLE)) : (if_cond == kCondEQ);
}
+ protected:
+ template <typename T>
+ int32_t Compare(T x, T y) const { return x > y ? 1 : (x < y ? -1 : 0); }
+
+ template <typename T>
+ int32_t CompareFP(T x, T y) const {
+ DCHECK(Primitive::IsFloatingPointType(InputAt(0)->GetType())) << InputAt(0)->GetType();
+ DCHECK_NE(GetBias(), ComparisonBias::kNoBias);
+ // Handle the bias.
+ return std::isunordered(x, y) ? (IsGtBias() ? 1 : -1) : Compare(x, y);
+ }
+
+ // Return an integer constant containing the result of a condition evaluated at compile time.
+ HIntConstant* MakeConstantCondition(bool value, uint32_t dex_pc) const {
+ return GetBlock()->GetGraph()->GetIntConstant(value, dex_pc);
+ }
+
private:
// Needed if we merge a HCompare into a HCondition.
ComparisonBias bias_;
@@ -2822,17 +2931,25 @@
bool IsCommutative() const OVERRIDE { return true; }
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(
- Compute(x->GetValue(), y->GetValue()), GetDexPc());
- }
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(
- Compute(x->GetValue(), y->GetValue()), GetDexPc());
- }
HConstant* Evaluate(HNullConstant* x ATTRIBUTE_UNUSED,
HNullConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(1);
+ return MakeConstantCondition(true, GetDexPc());
+ }
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ }
+ // In the following Evaluate methods, a HCompare instruction has
+ // been merged into this HEqual instruction; evaluate it as
+ // `Compare(x, y) == 0`.
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0),
+ GetDexPc());
+ }
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
+ }
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
DECLARE_INSTRUCTION(Equal);
@@ -2858,17 +2975,24 @@
bool IsCommutative() const OVERRIDE { return true; }
- HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(
- Compute(x->GetValue(), y->GetValue()), GetDexPc());
- }
- HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(
- Compute(x->GetValue(), y->GetValue()), GetDexPc());
- }
HConstant* Evaluate(HNullConstant* x ATTRIBUTE_UNUSED,
HNullConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(0);
+ return MakeConstantCondition(false, GetDexPc());
+ }
+ HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
+ return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ }
+ // In the following Evaluate methods, a HCompare instruction has
+ // been merged into this HNotEqual instruction; evaluate it as
+ // `Compare(x, y) != 0`.
+ HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
+ return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc());
+ }
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
+ }
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
DECLARE_INSTRUCTION(NotEqual);
@@ -2893,12 +3017,19 @@
: HCondition(first, second, dex_pc) {}
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(
- Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
+ // In the following Evaluate methods, a HCompare instruction has
+ // been merged into this HLessThan instruction; evaluate it as
+ // `Compare(x, y) < 0`.
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(
- Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc());
+ }
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
+ }
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
DECLARE_INSTRUCTION(LessThan);
@@ -2923,12 +3054,19 @@
: HCondition(first, second, dex_pc) {}
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(
- Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
+ // In the following Evaluate methods, a HCompare instruction has
+ // been merged into this HLessThanOrEqual instruction; evaluate it as
+ // `Compare(x, y) <= 0`.
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(
- Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc());
+ }
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
+ }
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
DECLARE_INSTRUCTION(LessThanOrEqual);
@@ -2953,12 +3091,19 @@
: HCondition(first, second, dex_pc) {}
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(
- Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
+ // In the following Evaluate methods, a HCompare instruction has
+ // been merged into this HGreaterThan instruction; evaluate it as
+ // `Compare(x, y) > 0`.
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(
- Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc());
+ }
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
+ }
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
DECLARE_INSTRUCTION(GreaterThan);
@@ -2983,12 +3128,19 @@
: HCondition(first, second, dex_pc) {}
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(
- Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
+ // In the following Evaluate methods, a HCompare instruction has
+ // been merged into this HGreaterThanOrEqual instruction; evaluate it as
+ // `Compare(x, y) >= 0`.
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(
- Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ return MakeConstantCondition(Compute(Compare(x->GetValue(), y->GetValue()), 0), GetDexPc());
+ }
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
+ }
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ return MakeConstantCondition(Compute(CompareFP(x->GetValue(), y->GetValue()), 0), GetDexPc());
}
DECLARE_INSTRUCTION(GreaterThanOrEqual);
@@ -3013,14 +3165,20 @@
: HCondition(first, second, dex_pc) {}
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(
- Compute(static_cast<uint32_t>(x->GetValue()),
- static_cast<uint32_t>(y->GetValue())), GetDexPc());
+ return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(
- Compute(static_cast<uint64_t>(x->GetValue()),
- static_cast<uint64_t>(y->GetValue())), GetDexPc());
+ return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ }
+ HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
+ HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ LOG(FATAL) << DebugName() << " is not defined for float values";
+ UNREACHABLE();
+ }
+ HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ LOG(FATAL) << DebugName() << " is not defined for double values";
+ UNREACHABLE();
}
DECLARE_INSTRUCTION(Below);
@@ -3034,7 +3192,9 @@
}
private:
- template <typename T> bool Compute(T x, T y) const { return x < y; }
+ template <typename T> bool Compute(T x, T y) const {
+ return MakeUnsigned(x) < MakeUnsigned(y);
+ }
DISALLOW_COPY_AND_ASSIGN(HBelow);
};
@@ -3045,14 +3205,20 @@
: HCondition(first, second, dex_pc) {}
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(
- Compute(static_cast<uint32_t>(x->GetValue()),
- static_cast<uint32_t>(y->GetValue())), GetDexPc());
+ return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(
- Compute(static_cast<uint64_t>(x->GetValue()),
- static_cast<uint64_t>(y->GetValue())), GetDexPc());
+ return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ }
+ HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
+ HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ LOG(FATAL) << DebugName() << " is not defined for float values";
+ UNREACHABLE();
+ }
+ HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ LOG(FATAL) << DebugName() << " is not defined for double values";
+ UNREACHABLE();
}
DECLARE_INSTRUCTION(BelowOrEqual);
@@ -3066,7 +3232,9 @@
}
private:
- template <typename T> bool Compute(T x, T y) const { return x <= y; }
+ template <typename T> bool Compute(T x, T y) const {
+ return MakeUnsigned(x) <= MakeUnsigned(y);
+ }
DISALLOW_COPY_AND_ASSIGN(HBelowOrEqual);
};
@@ -3077,14 +3245,20 @@
: HCondition(first, second, dex_pc) {}
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(
- Compute(static_cast<uint32_t>(x->GetValue()),
- static_cast<uint32_t>(y->GetValue())), GetDexPc());
+ return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(
- Compute(static_cast<uint64_t>(x->GetValue()),
- static_cast<uint64_t>(y->GetValue())), GetDexPc());
+ return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ }
+ HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
+ HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ LOG(FATAL) << DebugName() << " is not defined for float values";
+ UNREACHABLE();
+ }
+ HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ LOG(FATAL) << DebugName() << " is not defined for double values";
+ UNREACHABLE();
}
DECLARE_INSTRUCTION(Above);
@@ -3098,7 +3272,9 @@
}
private:
- template <typename T> bool Compute(T x, T y) const { return x > y; }
+ template <typename T> bool Compute(T x, T y) const {
+ return MakeUnsigned(x) > MakeUnsigned(y);
+ }
DISALLOW_COPY_AND_ASSIGN(HAbove);
};
@@ -3109,14 +3285,20 @@
: HCondition(first, second, dex_pc) {}
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(
- Compute(static_cast<uint32_t>(x->GetValue()),
- static_cast<uint32_t>(y->GetValue())), GetDexPc());
+ return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(
- Compute(static_cast<uint64_t>(x->GetValue()),
- static_cast<uint64_t>(y->GetValue())), GetDexPc());
+ return MakeConstantCondition(Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ }
+ HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
+ HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ LOG(FATAL) << DebugName() << " is not defined for float values";
+ UNREACHABLE();
+ }
+ HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ LOG(FATAL) << DebugName() << " is not defined for double values";
+ UNREACHABLE();
}
DECLARE_INSTRUCTION(AboveOrEqual);
@@ -3130,7 +3312,9 @@
}
private:
- template <typename T> bool Compute(T x, T y) const { return x >= y; }
+ template <typename T> bool Compute(T x, T y) const {
+ return MakeUnsigned(x) >= MakeUnsigned(y);
+ }
DISALLOW_COPY_AND_ASSIGN(HAboveOrEqual);
};
@@ -3155,15 +3339,32 @@
}
template <typename T>
- int32_t Compute(T x, T y) const { return x == y ? 0 : x > y ? 1 : -1; }
+ int32_t Compute(T x, T y) const { return x > y ? 1 : (x < y ? -1 : 0); }
+
+ template <typename T>
+ int32_t ComputeFP(T x, T y) const {
+ DCHECK(Primitive::IsFloatingPointType(InputAt(0)->GetType())) << InputAt(0)->GetType();
+ DCHECK_NE(GetBias(), ComparisonBias::kNoBias);
+ // Handle the bias.
+ return std::isunordered(x, y) ? (IsGtBias() ? 1 : -1) : Compute(x, y);
+ }
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(
- Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ // Note that there is no "cmp-int" Dex instruction so we shouldn't
+ // reach this code path when processing a freshly built HIR
+ // graph. However HCompare integer instructions can be synthesized
+ // by the instruction simplifier to implement IntegerCompare and
+ // IntegerSignum intrinsics, so we have to handle this case.
+ return MakeConstantComparison(Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
- return GetBlock()->GetGraph()->GetIntConstant(
- Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ return MakeConstantComparison(Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ }
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ return MakeConstantComparison(ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
+ }
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ return MakeConstantComparison(ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
}
bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
@@ -3172,8 +3373,12 @@
ComparisonBias GetBias() const { return bias_; }
- bool IsGtBias() { return bias_ == ComparisonBias::kGtBias; }
-
+ // Does this compare instruction have a "gt bias" (vs an "lt bias")?
+ // Only meaninfgul for floating-point comparisons.
+ bool IsGtBias() const {
+ DCHECK(Primitive::IsFloatingPointType(InputAt(0)->GetType())) << InputAt(0)->GetType();
+ return bias_ == ComparisonBias::kGtBias;
+ }
static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type type) {
// MIPS64 uses a runtime call for FP comparisons.
@@ -3182,6 +3387,13 @@
DECLARE_INSTRUCTION(Compare);
+ protected:
+ // Return an integer constant containing the result of a comparison evaluated at compile time.
+ HIntConstant* MakeConstantComparison(int32_t value, uint32_t dex_pc) const {
+ DCHECK(value == -1 || value == 0 || value == 1) << value;
+ return GetBlock()->GetGraph()->GetIntConstant(value, dex_pc);
+ }
+
private:
const ComparisonBias bias_;
@@ -3239,92 +3451,6 @@
DISALLOW_COPY_AND_ASSIGN(HStoreLocal);
};
-class HFloatConstant : public HConstant {
- public:
- float GetValue() const { return value_; }
-
- uint64_t GetValueAsUint64() const OVERRIDE {
- return static_cast<uint64_t>(bit_cast<uint32_t, float>(value_));
- }
-
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- DCHECK(other->IsFloatConstant());
- return other->AsFloatConstant()->GetValueAsUint64() == GetValueAsUint64();
- }
-
- size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
-
- bool IsMinusOne() const OVERRIDE {
- return bit_cast<uint32_t, float>(value_) == bit_cast<uint32_t, float>((-1.0f));
- }
- bool IsZero() const OVERRIDE {
- return value_ == 0.0f;
- }
- bool IsOne() const OVERRIDE {
- return bit_cast<uint32_t, float>(value_) == bit_cast<uint32_t, float>(1.0f);
- }
- bool IsNaN() const {
- return std::isnan(value_);
- }
-
- DECLARE_INSTRUCTION(FloatConstant);
-
- private:
- explicit HFloatConstant(float value, uint32_t dex_pc = kNoDexPc)
- : HConstant(Primitive::kPrimFloat, dex_pc), value_(value) {}
- explicit HFloatConstant(int32_t value, uint32_t dex_pc = kNoDexPc)
- : HConstant(Primitive::kPrimFloat, dex_pc), value_(bit_cast<float, int32_t>(value)) {}
-
- const float value_;
-
- // Only the SsaBuilder and HGraph can create floating-point constants.
- friend class SsaBuilder;
- friend class HGraph;
- DISALLOW_COPY_AND_ASSIGN(HFloatConstant);
-};
-
-class HDoubleConstant : public HConstant {
- public:
- double GetValue() const { return value_; }
-
- uint64_t GetValueAsUint64() const OVERRIDE { return bit_cast<uint64_t, double>(value_); }
-
- bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
- DCHECK(other->IsDoubleConstant());
- return other->AsDoubleConstant()->GetValueAsUint64() == GetValueAsUint64();
- }
-
- size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
-
- bool IsMinusOne() const OVERRIDE {
- return bit_cast<uint64_t, double>(value_) == bit_cast<uint64_t, double>((-1.0));
- }
- bool IsZero() const OVERRIDE {
- return value_ == 0.0;
- }
- bool IsOne() const OVERRIDE {
- return bit_cast<uint64_t, double>(value_) == bit_cast<uint64_t, double>(1.0);
- }
- bool IsNaN() const {
- return std::isnan(value_);
- }
-
- DECLARE_INSTRUCTION(DoubleConstant);
-
- private:
- explicit HDoubleConstant(double value, uint32_t dex_pc = kNoDexPc)
- : HConstant(Primitive::kPrimDouble, dex_pc), value_(value) {}
- explicit HDoubleConstant(int64_t value, uint32_t dex_pc = kNoDexPc)
- : HConstant(Primitive::kPrimDouble, dex_pc), value_(bit_cast<double, int64_t>(value)) {}
-
- const double value_;
-
- // Only the SsaBuilder and HGraph can create floating-point constants.
- friend class SsaBuilder;
- friend class HGraph;
- DISALLOW_COPY_AND_ASSIGN(HDoubleConstant);
-};
-
class HNewInstance : public HExpression<2> {
public:
HNewInstance(HInstruction* cls,
@@ -3876,6 +4002,12 @@
HConstant* Evaluate(HLongConstant* x) const OVERRIDE {
return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue()), GetDexPc());
}
+ HConstant* Evaluate(HFloatConstant* x) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetFloatConstant(Compute(x->GetValue()), GetDexPc());
+ }
+ HConstant* Evaluate(HDoubleConstant* x) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetDoubleConstant(Compute(x->GetValue()), GetDexPc());
+ }
DECLARE_INSTRUCTION(Neg);
@@ -3942,6 +4074,14 @@
return GetBlock()->GetGraph()->GetLongConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetFloatConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ }
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetDoubleConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ }
DECLARE_INSTRUCTION(Add);
@@ -3967,6 +4107,14 @@
return GetBlock()->GetGraph()->GetLongConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetFloatConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ }
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetDoubleConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ }
DECLARE_INSTRUCTION(Sub);
@@ -3994,6 +4142,14 @@
return GetBlock()->GetGraph()->GetLongConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetFloatConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ }
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetDoubleConstant(
+ Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ }
DECLARE_INSTRUCTION(Mul);
@@ -4010,7 +4166,8 @@
: HBinaryOperation(result_type, left, right, SideEffectsForArchRuntimeCalls(), dex_pc) {}
template <typename T>
- T Compute(T x, T y) const {
+ T ComputeIntegral(T x, T y) const {
+ DCHECK(!Primitive::IsFloatingPointType(GetType())) << GetType();
// Our graph structure ensures we never have 0 for `y` during
// constant folding.
DCHECK_NE(y, 0);
@@ -4018,13 +4175,27 @@
return (y == -1) ? -x : x / y;
}
+ template <typename T>
+ T ComputeFP(T x, T y) const {
+ DCHECK(Primitive::IsFloatingPointType(GetType())) << GetType();
+ return x / y;
+ }
+
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
- Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetLongConstant(
- Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
+ }
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetFloatConstant(
+ ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
+ }
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetDoubleConstant(
+ ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
}
static SideEffects SideEffectsForArchRuntimeCalls() {
@@ -4047,7 +4218,8 @@
: HBinaryOperation(result_type, left, right, SideEffectsForArchRuntimeCalls(), dex_pc) {}
template <typename T>
- T Compute(T x, T y) const {
+ T ComputeIntegral(T x, T y) const {
+ DCHECK(!Primitive::IsFloatingPointType(GetType())) << GetType();
// Our graph structure ensures we never have 0 for `y` during
// constant folding.
DCHECK_NE(y, 0);
@@ -4055,15 +4227,28 @@
return (y == -1) ? 0 : x % y;
}
+ template <typename T>
+ T ComputeFP(T x, T y) const {
+ DCHECK(Primitive::IsFloatingPointType(GetType())) << GetType();
+ return std::fmod(x, y);
+ }
+
HConstant* Evaluate(HIntConstant* x, HIntConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetIntConstant(
- Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
}
HConstant* Evaluate(HLongConstant* x, HLongConstant* y) const OVERRIDE {
return GetBlock()->GetGraph()->GetLongConstant(
- Compute(x->GetValue(), y->GetValue()), GetDexPc());
+ ComputeIntegral(x->GetValue(), y->GetValue()), GetDexPc());
}
-
+ HConstant* Evaluate(HFloatConstant* x, HFloatConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetFloatConstant(
+ ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
+ }
+ HConstant* Evaluate(HDoubleConstant* x, HDoubleConstant* y) const OVERRIDE {
+ return GetBlock()->GetGraph()->GetDoubleConstant(
+ ComputeFP(x->GetValue(), y->GetValue()), GetDexPc());
+ }
static SideEffects SideEffectsForArchRuntimeCalls() {
return SideEffects::CanTriggerGC();
@@ -4130,6 +4315,16 @@
return GetBlock()->GetGraph()->GetLongConstant(
Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue), GetDexPc());
}
+ HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
+ HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ LOG(FATAL) << DebugName() << " is not defined for float values";
+ UNREACHABLE();
+ }
+ HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ LOG(FATAL) << DebugName() << " is not defined for double values";
+ UNREACHABLE();
+ }
DECLARE_INSTRUCTION(Shl);
@@ -4166,6 +4361,16 @@
return GetBlock()->GetGraph()->GetLongConstant(
Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue), GetDexPc());
}
+ HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
+ HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ LOG(FATAL) << DebugName() << " is not defined for float values";
+ UNREACHABLE();
+ }
+ HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ LOG(FATAL) << DebugName() << " is not defined for double values";
+ UNREACHABLE();
+ }
DECLARE_INSTRUCTION(Shr);
@@ -4203,6 +4408,16 @@
return GetBlock()->GetGraph()->GetLongConstant(
Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue), GetDexPc());
}
+ HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
+ HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ LOG(FATAL) << DebugName() << " is not defined for float values";
+ UNREACHABLE();
+ }
+ HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ LOG(FATAL) << DebugName() << " is not defined for double values";
+ UNREACHABLE();
+ }
DECLARE_INSTRUCTION(UShr);
@@ -4239,6 +4454,16 @@
return GetBlock()->GetGraph()->GetLongConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
+ HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
+ HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ LOG(FATAL) << DebugName() << " is not defined for float values";
+ UNREACHABLE();
+ }
+ HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ LOG(FATAL) << DebugName() << " is not defined for double values";
+ UNREACHABLE();
+ }
DECLARE_INSTRUCTION(And);
@@ -4275,6 +4500,16 @@
return GetBlock()->GetGraph()->GetLongConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
+ HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
+ HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ LOG(FATAL) << DebugName() << " is not defined for float values";
+ UNREACHABLE();
+ }
+ HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ LOG(FATAL) << DebugName() << " is not defined for double values";
+ UNREACHABLE();
+ }
DECLARE_INSTRUCTION(Or);
@@ -4311,6 +4546,16 @@
return GetBlock()->GetGraph()->GetLongConstant(
Compute(x->GetValue(), y->GetValue()), GetDexPc());
}
+ HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
+ HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ LOG(FATAL) << DebugName() << " is not defined for float values";
+ UNREACHABLE();
+ }
+ HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ LOG(FATAL) << DebugName() << " is not defined for double values";
+ UNREACHABLE();
+ }
DECLARE_INSTRUCTION(Xor);
@@ -4349,6 +4594,16 @@
return GetBlock()->GetGraph()->GetLongConstant(
Compute(x->GetValue(), y->GetValue(), kMaxLongShiftValue), GetDexPc());
}
+ HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED,
+ HFloatConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ LOG(FATAL) << DebugName() << " is not defined for float values";
+ UNREACHABLE();
+ }
+ HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED,
+ HDoubleConstant* y ATTRIBUTE_UNUSED) const OVERRIDE {
+ LOG(FATAL) << DebugName() << " is not defined for double values";
+ UNREACHABLE();
+ }
DECLARE_INSTRUCTION(Ror);
@@ -4415,6 +4670,14 @@
HConstant* Evaluate(HLongConstant* x) const OVERRIDE {
return GetBlock()->GetGraph()->GetLongConstant(Compute(x->GetValue()), GetDexPc());
}
+ HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED) const OVERRIDE {
+ LOG(FATAL) << DebugName() << " is not defined for float values";
+ UNREACHABLE();
+ }
+ HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED) const OVERRIDE {
+ LOG(FATAL) << DebugName() << " is not defined for double values";
+ UNREACHABLE();
+ }
DECLARE_INSTRUCTION(Not);
@@ -4433,7 +4696,7 @@
}
template <typename T> bool Compute(T x) const {
- DCHECK(IsUint<1>(x));
+ DCHECK(IsUint<1>(x)) << x;
return !x;
}
@@ -4444,6 +4707,14 @@
LOG(FATAL) << DebugName() << " is not defined for long values";
UNREACHABLE();
}
+ HConstant* Evaluate(HFloatConstant* x ATTRIBUTE_UNUSED) const OVERRIDE {
+ LOG(FATAL) << DebugName() << " is not defined for float values";
+ UNREACHABLE();
+ }
+ HConstant* Evaluate(HDoubleConstant* x ATTRIBUTE_UNUSED) const OVERRIDE {
+ LOG(FATAL) << DebugName() << " is not defined for double values";
+ UNREACHABLE();
+ }
DECLARE_INSTRUCTION(BooleanNot);
@@ -4791,10 +5062,10 @@
DCHECK_EQ(GetArray(), other->GetArray());
DCHECK_EQ(GetIndex(), other->GetIndex());
if (Primitive::IsIntOrLongType(GetType())) {
- DCHECK(Primitive::IsFloatingPointType(other->GetType()));
+ DCHECK(Primitive::IsFloatingPointType(other->GetType())) << other->GetType();
} else {
- DCHECK(Primitive::IsFloatingPointType(GetType()));
- DCHECK(Primitive::IsIntOrLongType(other->GetType()));
+ DCHECK(Primitive::IsFloatingPointType(GetType())) << GetType();
+ DCHECK(Primitive::IsIntOrLongType(other->GetType())) << other->GetType();
}
}
return result;
@@ -6009,7 +6280,7 @@
} else if (constant->IsLongConstant()) {
return constant->AsLongConstant()->GetValue();
} else {
- DCHECK(constant->IsNullConstant());
+ DCHECK(constant->IsNullConstant()) << constant->DebugName();
return 0;
}
}
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 43f2499..09ca8b7 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -422,6 +422,34 @@
return true;
}
+static bool HasAliasInEnvironments(HInstruction* instruction) {
+ for (HUseIterator<HEnvironment*> use_it(instruction->GetEnvUses());
+ !use_it.Done();
+ use_it.Advance()) {
+ HEnvironment* use = use_it.Current()->GetUser();
+ HUseListNode<HEnvironment*>* next = use_it.Current()->GetNext();
+ if (next != nullptr && next->GetUser() == use) {
+ return true;
+ }
+ }
+
+ if (kIsDebugBuild) {
+ // Do a quadratic search to ensure same environment uses are next
+ // to each other.
+ for (HUseIterator<HEnvironment*> use_it(instruction->GetEnvUses());
+ !use_it.Done();
+ use_it.Advance()) {
+ HUseListNode<HEnvironment*>* current = use_it.Current();
+ HUseListNode<HEnvironment*>* next = current->GetNext();
+ while (next != nullptr) {
+ DCHECK(next->GetUser() != current->GetUser());
+ next = next->GetNext();
+ }
+ }
+ }
+ return false;
+}
+
void SsaBuilder::RemoveRedundantUninitializedStrings() {
if (GetGraph()->IsDebuggable()) {
// Do not perform the optimization for consistency with the interpreter
@@ -433,7 +461,7 @@
// Replace NewInstance of String with NullConstant if not used prior to
// calling StringFactory. In case of deoptimization, the interpreter is
// expected to skip null check on the `this` argument of the StringFactory call.
- if (!new_instance->HasNonEnvironmentUses()) {
+ if (!new_instance->HasNonEnvironmentUses() && !HasAliasInEnvironments(new_instance)) {
new_instance->ReplaceWith(GetGraph()->GetNullConstant());
new_instance->GetBlock()->RemoveInstruction(new_instance);
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index ac9c097..6fd65ee 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -426,6 +426,16 @@
EmitI(0x23, rs, rt, imm16);
}
+void MipsAssembler::Lwl(Register rt, Register rs, uint16_t imm16) {
+ CHECK(!IsR6());
+ EmitI(0x22, rs, rt, imm16);
+}
+
+void MipsAssembler::Lwr(Register rt, Register rs, uint16_t imm16) {
+ CHECK(!IsR6());
+ EmitI(0x26, rs, rt, imm16);
+}
+
void MipsAssembler::Lbu(Register rt, Register rs, uint16_t imm16) {
EmitI(0x24, rs, rt, imm16);
}
@@ -465,6 +475,16 @@
EmitI(0x2b, rs, rt, imm16);
}
+void MipsAssembler::Swl(Register rt, Register rs, uint16_t imm16) {
+ CHECK(!IsR6());
+ EmitI(0x2a, rs, rt, imm16);
+}
+
+void MipsAssembler::Swr(Register rt, Register rs, uint16_t imm16) {
+ CHECK(!IsR6());
+ EmitI(0x2e, rs, rt, imm16);
+}
+
void MipsAssembler::Slt(Register rd, Register rs, Register rt) {
EmitR(0, rs, rt, rd, 0, 0x2a);
}
diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
index 01c6490..2262af4 100644
--- a/compiler/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -162,6 +162,8 @@
void Lb(Register rt, Register rs, uint16_t imm16);
void Lh(Register rt, Register rs, uint16_t imm16);
void Lw(Register rt, Register rs, uint16_t imm16);
+ void Lwl(Register rt, Register rs, uint16_t imm16);
+ void Lwr(Register rt, Register rs, uint16_t imm16);
void Lbu(Register rt, Register rs, uint16_t imm16);
void Lhu(Register rt, Register rs, uint16_t imm16);
void Lui(Register rt, uint16_t imm16);
@@ -172,6 +174,8 @@
void Sb(Register rt, Register rs, uint16_t imm16);
void Sh(Register rt, Register rs, uint16_t imm16);
void Sw(Register rt, Register rs, uint16_t imm16);
+ void Swl(Register rt, Register rs, uint16_t imm16);
+ void Swr(Register rt, Register rs, uint16_t imm16);
void Slt(Register rd, Register rs, Register rt);
void Sltu(Register rd, Register rs, Register rt);
diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc
index 5fc3dee..9e27f07 100644
--- a/compiler/utils/mips/assembler_mips_test.cc
+++ b/compiler/utils/mips/assembler_mips_test.cc
@@ -335,6 +335,18 @@
DriverStr(RepeatRRR(&mips::MipsAssembler::Nor, "nor ${reg1}, ${reg2}, ${reg3}"), "Nor");
}
+//////////
+// MISC //
+//////////
+
+TEST_F(AssemblerMIPSTest, Movz) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::Movz, "movz ${reg1}, ${reg2}, ${reg3}"), "Movz");
+}
+
+TEST_F(AssemblerMIPSTest, Movn) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::Movn, "movn ${reg1}, ${reg2}, ${reg3}"), "Movn");
+}
+
TEST_F(AssemblerMIPSTest, Seb) {
DriverStr(RepeatRR(&mips::MipsAssembler::Seb, "seb ${reg1}, ${reg2}"), "Seb");
}
@@ -363,6 +375,10 @@
DriverStr(RepeatRRR(&mips::MipsAssembler::Srlv, "srlv ${reg1}, ${reg2}, ${reg3}"), "Srlv");
}
+TEST_F(AssemblerMIPSTest, Rotrv) {
+ DriverStr(RepeatRRR(&mips::MipsAssembler::Rotrv, "rotrv ${reg1}, ${reg2}, ${reg3}"), "rotrv");
+}
+
TEST_F(AssemblerMIPSTest, Srav) {
DriverStr(RepeatRRR(&mips::MipsAssembler::Srav, "srav ${reg1}, ${reg2}, ${reg3}"), "Srav");
}
@@ -405,6 +421,14 @@
DriverStr(expected, "Ext");
}
+TEST_F(AssemblerMIPSTest, ClzR2) {
+ DriverStr(RepeatRR(&mips::MipsAssembler::ClzR2, "clz ${reg1}, ${reg2}"), "clzR2");
+}
+
+TEST_F(AssemblerMIPSTest, CloR2) {
+ DriverStr(RepeatRR(&mips::MipsAssembler::CloR2, "clo ${reg1}, ${reg2}"), "cloR2");
+}
+
TEST_F(AssemblerMIPSTest, Lb) {
DriverStr(RepeatRRIb(&mips::MipsAssembler::Lb, -16, "lb ${reg1}, {imm}(${reg2})"), "Lb");
}
@@ -413,10 +437,18 @@
DriverStr(RepeatRRIb(&mips::MipsAssembler::Lh, -16, "lh ${reg1}, {imm}(${reg2})"), "Lh");
}
+TEST_F(AssemblerMIPSTest, Lwl) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Lwl, -16, "lwl ${reg1}, {imm}(${reg2})"), "Lwl");
+}
+
TEST_F(AssemblerMIPSTest, Lw) {
DriverStr(RepeatRRIb(&mips::MipsAssembler::Lw, -16, "lw ${reg1}, {imm}(${reg2})"), "Lw");
}
+TEST_F(AssemblerMIPSTest, Lwr) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Lwr, -16, "lwr ${reg1}, {imm}(${reg2})"), "Lwr");
+}
+
TEST_F(AssemblerMIPSTest, Lbu) {
DriverStr(RepeatRRIb(&mips::MipsAssembler::Lbu, -16, "lbu ${reg1}, {imm}(${reg2})"), "Lbu");
}
@@ -445,10 +477,18 @@
DriverStr(RepeatRRIb(&mips::MipsAssembler::Sh, -16, "sh ${reg1}, {imm}(${reg2})"), "Sh");
}
+TEST_F(AssemblerMIPSTest, Swl) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Swl, -16, "swl ${reg1}, {imm}(${reg2})"), "Swl");
+}
+
TEST_F(AssemblerMIPSTest, Sw) {
DriverStr(RepeatRRIb(&mips::MipsAssembler::Sw, -16, "sw ${reg1}, {imm}(${reg2})"), "Sw");
}
+TEST_F(AssemblerMIPSTest, Swr) {
+ DriverStr(RepeatRRIb(&mips::MipsAssembler::Swr, -16, "swr ${reg1}, {imm}(${reg2})"), "Swr");
+}
+
TEST_F(AssemblerMIPSTest, Slt) {
DriverStr(RepeatRRR(&mips::MipsAssembler::Slt, "slt ${reg1}, ${reg2}, ${reg3}"), "Slt");
}
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index f922687..428266f 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -306,13 +306,17 @@
{ kITypeMask, 32u << kOpcodeShift, "lb", "TO", },
{ kITypeMask, 33u << kOpcodeShift, "lh", "TO", },
+ { kITypeMask, 34u << kOpcodeShift, "lwl", "TO", },
{ kITypeMask, 35u << kOpcodeShift, "lw", "TO", },
{ kITypeMask, 36u << kOpcodeShift, "lbu", "TO", },
{ kITypeMask, 37u << kOpcodeShift, "lhu", "TO", },
+ { kITypeMask, 38u << kOpcodeShift, "lwr", "TO", },
{ kITypeMask, 39u << kOpcodeShift, "lwu", "TO", },
{ kITypeMask, 40u << kOpcodeShift, "sb", "TO", },
{ kITypeMask, 41u << kOpcodeShift, "sh", "TO", },
+ { kITypeMask, 42u << kOpcodeShift, "swl", "TO", },
{ kITypeMask, 43u << kOpcodeShift, "sw", "TO", },
+ { kITypeMask, 46u << kOpcodeShift, "swr", "TO", },
{ kITypeMask, 49u << kOpcodeShift, "lwc1", "tO", },
{ kJTypeMask, 50u << kOpcodeShift, "bc", "P" },
{ kITypeMask, 53u << kOpcodeShift, "ldc1", "tO", },
diff --git a/runtime/Android.mk b/runtime/Android.mk
index e9f7add..947de8a 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -238,6 +238,7 @@
# (empty) body is called.
JIT_DEBUG_REGISTER_CODE_LDFLAGS := -Wl,--keep-unique,__jit_debug_register_code
LIBART_TARGET_LDFLAGS_arm := $(JIT_DEBUG_REGISTER_CODE_LDFLAGS)
+LIBART_TARGET_LDFLAGS_arm64 := $(JIT_DEBUG_REGISTER_CODE_LDFLAGS)
LIBART_TARGET_LDFLAGS_x86 := $(JIT_DEBUG_REGISTER_CODE_LDFLAGS)
LIBART_TARGET_LDFLAGS_x86_64 := $(JIT_DEBUG_REGISTER_CODE_LDFLAGS)
JIT_DEBUG_REGISTER_CODE_LDFLAGS :=
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 82a5f96..6972b3e 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -1009,10 +1009,6 @@
DCHECK(alloc_tracker_lock_ == nullptr);
alloc_tracker_lock_ = new Mutex("AllocTracker lock", current_lock_level);
- UPDATE_CURRENT_LOCK_LEVEL(kInterpreterStringInitMapLock);
- DCHECK(interpreter_string_init_map_lock_ == nullptr);
- interpreter_string_init_map_lock_ = new Mutex("Interpreter String initializer reference map lock", current_lock_level);
-
UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock);
DCHECK(thread_list_lock_ == nullptr);
thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index f674a6f..e72f2a2 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -102,7 +102,6 @@
kMonitorListLock,
kJniLoadLibraryLock,
kThreadListLock,
- kInterpreterStringInitMapLock,
kAllocTrackerLock,
kDeoptimizationLock,
kProfilerLock,
diff --git a/runtime/base/scoped_arena_containers.h b/runtime/base/scoped_arena_containers.h
index 9b56856..bd19d00 100644
--- a/runtime/base/scoped_arena_containers.h
+++ b/runtime/base/scoped_arena_containers.h
@@ -201,11 +201,12 @@
template <typename T>
class ArenaDelete {
static constexpr uint8_t kMagicFill = 0xCE;
+
protected:
// Used for variable sized objects such as RegisterLine.
ALWAYS_INLINE void ProtectMemory(T* ptr, size_t size) const {
if (RUNNING_ON_MEMORY_TOOL > 0) {
- // Writing to the memory will fail if it we already destroyed the pointer with
+ // Writing to the memory will fail ift we already destroyed the pointer with
// DestroyOnlyDelete since we make it no access.
memset(ptr, kMagicFill, size);
MEMORY_TOOL_MAKE_NOACCESS(ptr, size);
@@ -220,8 +221,10 @@
public:
void operator()(T* ptr) const {
- ptr->~T();
- ProtectMemory(ptr, sizeof(T));
+ if (ptr != nullptr) {
+ ptr->~T();
+ ProtectMemory(ptr, sizeof(T));
+ }
}
};
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 0c06c38..894ce9a 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -676,13 +676,17 @@
dest_(dest),
length_(length) {}
- bool ContainsSource(uintptr_t address) const {
+ bool InSource(uintptr_t address) const {
return address - source_ < length_;
}
+ bool InDest(uintptr_t address) const {
+ return address - dest_ < length_;
+ }
+
// Translate a source address to the destination space.
uintptr_t ToDest(uintptr_t address) const {
- DCHECK(ContainsSource(address));
+ DCHECK(InSource(address));
return address + Delta();
}
@@ -724,24 +728,28 @@
template <typename T>
ALWAYS_INLINE T* ForwardObject(T* src) const {
const uintptr_t uint_src = reinterpret_cast<uintptr_t>(src);
- if (boot_image_.ContainsSource(uint_src)) {
+ if (boot_image_.InSource(uint_src)) {
return reinterpret_cast<T*>(boot_image_.ToDest(uint_src));
}
- if (app_image_.ContainsSource(uint_src)) {
+ if (app_image_.InSource(uint_src)) {
return reinterpret_cast<T*>(app_image_.ToDest(uint_src));
}
+ // Since we are fixing up the app image, there should only be pointers to the app image and
+ // boot image.
+ DCHECK(src == nullptr) << reinterpret_cast<const void*>(src);
return src;
}
// Return the relocated address of a code pointer (contained by an oat file).
ALWAYS_INLINE const void* ForwardCode(const void* src) const {
const uintptr_t uint_src = reinterpret_cast<uintptr_t>(src);
- if (boot_oat_.ContainsSource(uint_src)) {
+ if (boot_oat_.InSource(uint_src)) {
return reinterpret_cast<const void*>(boot_oat_.ToDest(uint_src));
}
- if (app_oat_.ContainsSource(uint_src)) {
+ if (app_oat_.InSource(uint_src)) {
return reinterpret_cast<const void*>(app_oat_.ToDest(uint_src));
}
+ DCHECK(src == nullptr) << src;
return src;
}
@@ -766,6 +774,11 @@
template<typename... Args>
explicit FixupObjectAdapter(Args... args) : FixupVisitor(args...) {}
+ // Must be called on pointers that already have been relocated to the destination relocation.
+ ALWAYS_INLINE bool IsInAppImage(mirror::Object* object) const {
+ return app_image_.InDest(reinterpret_cast<uintptr_t>(object));
+ }
+
template <typename T>
T* operator()(T* obj) const {
return ForwardObject(obj);
@@ -816,7 +829,10 @@
class FixupObjectVisitor : public FixupVisitor {
public:
template<typename... Args>
- explicit FixupObjectVisitor(Args... args) : FixupVisitor(args...) {}
+ explicit FixupObjectVisitor(gc::accounting::ContinuousSpaceBitmap* pointer_array_visited,
+ Args... args)
+ : FixupVisitor(args...),
+ pointer_array_visited_(pointer_array_visited) {}
// Fix up separately since we also need to fix up method entrypoints.
ALWAYS_INLINE void VisitRootIfNonNull(
@@ -841,6 +857,19 @@
}
}
+ // Visit a pointer array and forward corresponding native data. Ignores pointer arrays in the
+ // boot image. Uses the bitmap to ensure the same array is not visited multiple times.
+ template <typename Visitor>
+ void VisitPointerArray(mirror::PointerArray* array, const Visitor& visitor) const
+ NO_THREAD_SAFETY_ANALYSIS {
+ if (array != nullptr &&
+ visitor.IsInAppImage(array) &&
+ !pointer_array_visited_->Test(array)) {
+ array->Fixup<kVerifyNone, kWithoutReadBarrier>(array, sizeof(void*), visitor);
+ pointer_array_visited_->Set(array);
+ }
+ }
+
// java.lang.ref.Reference visitor.
void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
@@ -859,11 +888,9 @@
mirror::Class* klass = obj->AsClass<kVerifyNone, kWithoutReadBarrier>();
FixupObjectAdapter visitor(boot_image_, boot_oat_, app_image_, app_oat_);
klass->FixupNativePointers<kVerifyNone, kWithoutReadBarrier>(klass, sizeof(void*), visitor);
- // Deal with the arrays.
- mirror::PointerArray* vtable = klass->GetVTable<kVerifyNone, kWithoutReadBarrier>();
- if (vtable != nullptr) {
- vtable->Fixup<kVerifyNone, kWithoutReadBarrier>(vtable, sizeof(void*), visitor);
- }
+ // Deal with the pointer arrays. Use the helper function since multiple classes can reference
+ // the same arrays.
+ VisitPointerArray(klass->GetVTable<kVerifyNone, kWithoutReadBarrier>(), visitor);
mirror::IfTable* iftable = klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>();
if (iftable != nullptr) {
for (int32_t i = 0, count = iftable->Count(); i < count; ++i) {
@@ -871,12 +898,15 @@
mirror::PointerArray* methods =
iftable->GetMethodArray<kVerifyNone, kWithoutReadBarrier>(i);
DCHECK(methods != nullptr);
- methods->Fixup<kVerifyNone, kWithoutReadBarrier>(methods, sizeof(void*), visitor);
+ VisitPointerArray(methods, visitor);
}
}
}
}
}
+
+ private:
+ gc::accounting::ContinuousSpaceBitmap* const pointer_array_visited_;
};
class ForwardObjectAdapter {
@@ -1010,9 +1040,18 @@
const ImageSection& objects_section = image_header.GetImageSection(ImageHeader::kSectionObjects);
uintptr_t objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset());
uintptr_t objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End());
- // Two pass approach, fix up all classes first, then fix up non class-objects.
- FixupObjectVisitor fixup_object_visitor(boot_image, boot_oat, app_image, app_oat);
if (fixup_image) {
+ // Two pass approach, fix up all classes first, then fix up non class-objects.
+ // The visited bitmap is used to ensure that pointer arrays are not forwarded twice.
+ std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> visited_bitmap(
+ gc::accounting::ContinuousSpaceBitmap::Create("Pointer array bitmap",
+ target_base,
+ image_header.GetImageSize()));
+ FixupObjectVisitor fixup_object_visitor(visited_bitmap.get(),
+ boot_image,
+ boot_oat,
+ app_image,
+ app_oat);
TimingLogger::ScopedTiming timing("Fixup classes", &logger);
// Fixup class only touches app image classes, don't need the mutator lock since the space is
// not yet visible to the GC.
@@ -1025,7 +1064,7 @@
bitmap->VisitMarkedRange(objects_begin, objects_end, fixup_object_visitor);
FixupObjectAdapter fixup_adapter(boot_image, boot_oat, app_image, app_oat);
// Fixup image roots.
- CHECK(app_image.ContainsSource(reinterpret_cast<uintptr_t>(
+ CHECK(app_image.InSource(reinterpret_cast<uintptr_t>(
image_header.GetImageRoots<kWithoutReadBarrier>())));
image_header.RelocateImageObjects(app_image.Delta());
CHECK_EQ(image_header.GetImageBegin(), target_base);
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index cbaa817..3453abc 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -733,39 +733,21 @@
}
if (string_init && !self->IsExceptionPending()) {
- // Set the new string result of the StringFactory.
- shadow_frame.SetVRegReference(string_init_vreg_this, result->GetL());
- // Overwrite all potential copies of the original result of the new-instance of string with the
- // new result of the StringFactory. Use the verifier to find this set of registers.
- ArtMethod* method = shadow_frame.GetMethod();
- MethodReference method_ref = method->ToMethodReference();
- SafeMap<uint32_t, std::set<uint32_t>>* string_init_map_ptr = nullptr;
- MethodRefToStringInitRegMap& method_to_string_init_map = Runtime::Current()->GetStringInitMap();
- {
- MutexLock mu(self, *Locks::interpreter_string_init_map_lock_);
- auto it = method_to_string_init_map.find(method_ref);
- if (it != method_to_string_init_map.end()) {
- string_init_map_ptr = &it->second;
- }
- }
- if (string_init_map_ptr == nullptr) {
- SafeMap<uint32_t, std::set<uint32_t>> string_init_map =
- verifier::MethodVerifier::FindStringInitMap(method);
- MutexLock mu(self, *Locks::interpreter_string_init_map_lock_);
- auto it = method_to_string_init_map.lower_bound(method_ref);
- if (it == method_to_string_init_map.end() ||
- method_to_string_init_map.key_comp()(method_ref, it->first)) {
- it = method_to_string_init_map.PutBefore(it, method_ref, std::move(string_init_map));
- }
- string_init_map_ptr = &it->second;
- }
- if (string_init_map_ptr->size() != 0) {
- uint32_t dex_pc = shadow_frame.GetDexPC();
- auto map_it = string_init_map_ptr->find(dex_pc);
- if (map_it != string_init_map_ptr->end()) {
- const std::set<uint32_t>& reg_set = map_it->second;
- for (auto set_it = reg_set.begin(); set_it != reg_set.end(); ++set_it) {
- shadow_frame.SetVRegReference(*set_it, result->GetL());
+ mirror::Object* existing = shadow_frame.GetVRegReference(string_init_vreg_this);
+ if (existing == nullptr) {
+ // If it's null, we come from compiled code that was deoptimized. Nothing to do,
+ // as the compiler verified there was no alias.
+ // Set the new string result of the StringFactory.
+ shadow_frame.SetVRegReference(string_init_vreg_this, result->GetL());
+ } else {
+ // Replace the fake string that was allocated with the StringFactory result.
+ for (uint32_t i = 0; i < shadow_frame.NumberOfVRegs(); ++i) {
+ if (shadow_frame.GetVRegReference(i) == existing) {
+ DCHECK_EQ(shadow_frame.GetVRegReference(i),
+ reinterpret_cast<mirror::Object*>(shadow_frame.GetVReg(i)));
+ shadow_frame.SetVRegReference(i, result->GetL());
+ DCHECK_EQ(shadow_frame.GetVRegReference(i),
+ reinterpret_cast<mirror::Object*>(shadow_frame.GetVReg(i)));
}
}
}
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 188deb0..8d3da37 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -294,6 +294,14 @@
return false;
}
+ if (UNLIKELY(__builtin_frame_address(0) < thread->GetStackEnd())) {
+ // Don't attempt to do an OSR if we are close to the stack limit. Since
+ // the interpreter frames are still on stack, OSR has the potential
+ // to stack overflow even for a simple loop.
+ // b/27094810.
+ return false;
+ }
+
// Get the actual Java method if this method is from a proxy class. The compiler
// and the JIT code cache do not expect methods from proxy classes.
method = method->GetInterfaceMethodIfProxy(sizeof(void*));
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index fc89bfd..74ff741 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -790,5 +790,24 @@
return mspace_usable_size(reinterpret_cast<const void*>(FromCodeToAllocation(ptr)));
}
+void JitCodeCache::InvalidateCompiledCodeFor(ArtMethod* method,
+ const OatQuickMethodHeader* header) {
+ if (method->GetEntryPointFromQuickCompiledCode() == header->GetEntryPoint()) {
+ // The entrypoint is the one to invalidate, so we just update
+ // it to the interpreter entry point and clear the counter to get the method
+ // Jitted again.
+ Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
+ method, GetQuickToInterpreterBridge());
+ method->ClearCounter();
+ } else {
+ MutexLock mu(Thread::Current(), lock_);
+ auto it = osr_code_map_.find(method);
+ if (it != osr_code_map_.end() && OatQuickMethodHeader::FromCodePointer(it->second) == header) {
+ // Remove the OSR method, to avoid using it again.
+ osr_code_map_.erase(it);
+ }
+ }
+}
+
} // namespace jit
} // namespace art
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 048f8d0..71f5cda 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -172,6 +172,10 @@
size_t GetMemorySizeOfCodePointer(const void* ptr) REQUIRES(!lock_);
+ void InvalidateCompiledCodeFor(ArtMethod* method, const OatQuickMethodHeader* code)
+ REQUIRES(!lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
private:
// Take ownership of maps.
JitCodeCache(MemMap* code_map,
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 786cf06..dd384c7 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -23,6 +23,8 @@
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "entrypoints/runtime_asm_entrypoints.h"
#include "handle_scope-inl.h"
+#include "jit/jit.h"
+#include "jit/jit_code_cache.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/throwable.h"
@@ -629,13 +631,17 @@
DeoptimizeStackVisitor visitor(self_, context_, this, true);
visitor.WalkStack(true);
- // Compiled code made an explicit deoptimization. Transfer the code
- // to interpreter and clear the counter to JIT the method again.
+ // Compiled code made an explicit deoptimization.
ArtMethod* deopt_method = visitor.GetSingleFrameDeoptMethod();
DCHECK(deopt_method != nullptr);
- deopt_method->ClearCounter();
- Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
- deopt_method, GetQuickToInterpreterBridge());
+ if (Runtime::Current()->UseJit()) {
+ Runtime::Current()->GetJit()->GetCodeCache()->InvalidateCompiledCodeFor(
+ deopt_method, handler_method_header_);
+ } else {
+ // Transfer the code to interpreter.
+ Runtime::Current()->GetInstrumentation()->UpdateMethodsCode(
+ deopt_method, GetQuickToInterpreterBridge());
+ }
// PC needs to be of the quick-to-interpreter bridge.
int32_t offset;
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 1956bae..cbb3e89 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -94,8 +94,6 @@
class Transaction;
typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions;
-typedef SafeMap<MethodReference, SafeMap<uint32_t, std::set<uint32_t>>,
- MethodReferenceComparator> MethodRefToStringInitRegMap;
// Not all combinations of flags are valid. You may not visit all roots as well as the new roots
// (no logical reason to do this). You also may not start logging new roots and stop logging new
@@ -574,10 +572,6 @@
return jit_options_.get();
}
- MethodRefToStringInitRegMap& GetStringInitMap() {
- return method_ref_string_init_reg_map_;
- }
-
bool IsDebuggable() const;
// Returns the build fingerprint, if set. Otherwise an empty string is returned.
@@ -803,8 +797,6 @@
// Experimental opcodes should not be used by other production code.
ExperimentalFlags experimental_flags_;
- MethodRefToStringInitRegMap method_ref_string_init_reg_map_;
-
// Contains the build fingerprint, if given as a parameter.
std::string fingerprint_;
diff --git a/runtime/utils.h b/runtime/utils.h
index c00db11..79e4da1 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -111,6 +111,11 @@
: std::abs(value);
}
+template <typename T>
+inline typename std::make_unsigned<T>::type MakeUnsigned(T x) {
+ return static_cast<typename std::make_unsigned<T>::type>(x);
+}
+
std::string PrintableChar(uint16_t ch);
// Returns an ASCII string corresponding to the given UTF-8 string.
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index a6cf9ea..0c6060e 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -617,23 +617,6 @@
return GetQuickInvokedMethod(inst, register_line, is_range, false);
}
-SafeMap<uint32_t, std::set<uint32_t>> MethodVerifier::FindStringInitMap(ArtMethod* m) {
- Thread* self = Thread::Current();
- StackHandleScope<2> hs(self);
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(m->GetDexCache()));
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(m->GetClassLoader()));
- MethodVerifier verifier(self, m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(),
- m->GetCodeItem(), m->GetDexMethodIndex(), m, m->GetAccessFlags(),
- true, true, false, true);
- // Avoid copying: The map is moved out of the verifier before the verifier is destroyed.
- return std::move(verifier.FindStringInitMap());
-}
-
-SafeMap<uint32_t, std::set<uint32_t>>& MethodVerifier::FindStringInitMap() {
- Verify();
- return GetStringInitPcRegMap();
-}
-
bool MethodVerifier::Verify() {
// Some older code doesn't correctly mark constructors as such. Test for this case by looking at
// the name.
@@ -2865,8 +2848,7 @@
* Replace the uninitialized reference with an initialized one. We need to do this for all
* registers that have the same object instance in them, not just the "this" register.
*/
- const uint32_t this_reg = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
- work_line_->MarkRefsAsInitialized(this, this_type, this_reg, work_insn_idx_);
+ work_line_->MarkRefsAsInitialized(this, this_type);
}
if (return_type == nullptr) {
return_type = ®_types_.FromDescriptor(GetClassLoader(), return_type_descriptor, false);
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index b53a45c..6d8e1ab 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -213,9 +213,6 @@
static ArtMethod* FindInvokedMethodAtDexPc(ArtMethod* m, uint32_t dex_pc)
SHARED_REQUIRES(Locks::mutator_lock_);
- static SafeMap<uint32_t, std::set<uint32_t>> FindStringInitMap(ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
static void Init() SHARED_REQUIRES(Locks::mutator_lock_);
static void Shutdown();
@@ -294,10 +291,6 @@
ArtField* GetQuickFieldAccess(const Instruction* inst, RegisterLine* reg_line)
SHARED_REQUIRES(Locks::mutator_lock_);
- SafeMap<uint32_t, std::set<uint32_t>>& GetStringInitPcRegMap() {
- return string_init_pc_reg_map_;
- }
-
uint32_t GetEncounteredFailureTypes() {
return encountered_failure_types_;
}
@@ -875,11 +868,6 @@
friend class art::Thread;
- // Map of dex pcs of invocations of java.lang.String.<init> to the set of other registers that
- // contain the uninitialized this pointer to that invoke. Will contain no entry if there are
- // no other registers.
- SafeMap<uint32_t, std::set<uint32_t>> string_init_pc_reg_map_;
-
DISALLOW_COPY_AND_ASSIGN(MethodVerifier);
};
std::ostream& operator<<(std::ostream& os, const MethodVerifier::FailureKind& rhs);
diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h
index bfbb78f..29d87c4 100644
--- a/runtime/verifier/register_line-inl.h
+++ b/runtime/verifier/register_line-inl.h
@@ -204,9 +204,10 @@
}
inline void RegisterLineArenaDelete::operator()(RegisterLine* ptr) const {
- const size_t size = ptr != nullptr ? RegisterLine::ComputeSize(ptr->NumRegs()) : 0u;
- ptr->~RegisterLine();
- ProtectMemory(ptr, size);
+ if (ptr != nullptr) {
+ ptr->~RegisterLine();
+ ProtectMemory(ptr, RegisterLine::ComputeSize(ptr->NumRegs()));
+ }
}
} // namespace verifier
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index b7cde99..82c371d 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -91,25 +91,14 @@
return true;
}
-void RegisterLine::MarkRefsAsInitialized(MethodVerifier* verifier, const RegType& uninit_type,
- uint32_t this_reg, uint32_t dex_pc) {
+void RegisterLine::MarkRefsAsInitialized(MethodVerifier* verifier, const RegType& uninit_type) {
DCHECK(uninit_type.IsUninitializedTypes());
- bool is_string = !uninit_type.IsUnresolvedTypes() && uninit_type.GetClass()->IsStringClass();
const RegType& init_type = verifier->GetRegTypeCache()->FromUninitialized(uninit_type);
size_t changed = 0;
for (uint32_t i = 0; i < num_regs_; i++) {
if (GetRegisterType(verifier, i).Equals(uninit_type)) {
line_[i] = init_type.GetId();
changed++;
- if (is_string && i != this_reg) {
- auto it = verifier->GetStringInitPcRegMap().find(dex_pc);
- if (it != verifier->GetStringInitPcRegMap().end()) {
- it->second.insert(i);
- } else {
- std::set<uint32_t> reg_set = { i };
- verifier->GetStringInitPcRegMap().Put(dex_pc, reg_set);
- }
- }
}
}
// Is this initializing "this"?
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index d508454..15ae202 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -99,11 +99,14 @@
// available now. An example is sharpening types after a check-cast. Note that when given kKeep,
// the new_type is dchecked to be a reference type.
template <LockOp kLockOp>
- ALWAYS_INLINE bool SetRegisterType(MethodVerifier* verifier, uint32_t vdst,
+ ALWAYS_INLINE bool SetRegisterType(MethodVerifier* verifier,
+ uint32_t vdst,
const RegType& new_type)
SHARED_REQUIRES(Locks::mutator_lock_);
- bool SetRegisterTypeWide(MethodVerifier* verifier, uint32_t vdst, const RegType& new_type1,
+ bool SetRegisterTypeWide(MethodVerifier* verifier,
+ uint32_t vdst,
+ const RegType& new_type1,
const RegType& new_type2)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -117,11 +120,14 @@
// Get the type of register vsrc.
const RegType& GetRegisterType(MethodVerifier* verifier, uint32_t vsrc) const;
- ALWAYS_INLINE bool VerifyRegisterType(MethodVerifier* verifier, uint32_t vsrc,
+ ALWAYS_INLINE bool VerifyRegisterType(MethodVerifier* verifier,
+ uint32_t vsrc,
const RegType& check_type)
SHARED_REQUIRES(Locks::mutator_lock_);
- bool VerifyRegisterTypeWide(MethodVerifier* verifier, uint32_t vsrc, const RegType& check_type1,
+ bool VerifyRegisterTypeWide(MethodVerifier* verifier,
+ uint32_t vsrc,
+ const RegType& check_type1,
const RegType& check_type2)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -155,8 +161,7 @@
* reference type. This is called when an appropriate constructor is invoked -- all copies of
* the reference must be marked as initialized.
*/
- void MarkRefsAsInitialized(MethodVerifier* verifier, const RegType& uninit_type,
- uint32_t this_reg, uint32_t dex_pc)
+ void MarkRefsAsInitialized(MethodVerifier* verifier, const RegType& uninit_type)
SHARED_REQUIRES(Locks::mutator_lock_);
/*
@@ -210,31 +215,42 @@
* allow_failure will return Conflict() instead of causing a verification failure if there is an
* error.
*/
- const RegType& GetInvocationThis(MethodVerifier* verifier, const Instruction* inst,
- bool is_range, bool allow_failure = false)
+ const RegType& GetInvocationThis(MethodVerifier* verifier,
+ const Instruction* inst,
+ bool is_range,
+ bool allow_failure = false)
SHARED_REQUIRES(Locks::mutator_lock_);
/*
* Verify types for a simple two-register instruction (e.g. "neg-int").
* "dst_type" is stored into vA, and "src_type" is verified against vB.
*/
- void CheckUnaryOp(MethodVerifier* verifier, const Instruction* inst, const RegType& dst_type,
+ void CheckUnaryOp(MethodVerifier* verifier,
+ const Instruction* inst,
+ const RegType& dst_type,
const RegType& src_type)
SHARED_REQUIRES(Locks::mutator_lock_);
- void CheckUnaryOpWide(MethodVerifier* verifier, const Instruction* inst,
- const RegType& dst_type1, const RegType& dst_type2,
- const RegType& src_type1, const RegType& src_type2)
+ void CheckUnaryOpWide(MethodVerifier* verifier,
+ const Instruction* inst,
+ const RegType& dst_type1,
+ const RegType& dst_type2,
+ const RegType& src_type1,
+ const RegType& src_type2)
SHARED_REQUIRES(Locks::mutator_lock_);
- void CheckUnaryOpToWide(MethodVerifier* verifier, const Instruction* inst,
- const RegType& dst_type1, const RegType& dst_type2,
+ void CheckUnaryOpToWide(MethodVerifier* verifier,
+ const Instruction* inst,
+ const RegType& dst_type1,
+ const RegType& dst_type2,
const RegType& src_type)
SHARED_REQUIRES(Locks::mutator_lock_);
- void CheckUnaryOpFromWide(MethodVerifier* verifier, const Instruction* inst,
+ void CheckUnaryOpFromWide(MethodVerifier* verifier,
+ const Instruction* inst,
const RegType& dst_type,
- const RegType& src_type1, const RegType& src_type2)
+ const RegType& src_type1,
+ const RegType& src_type2)
SHARED_REQUIRES(Locks::mutator_lock_);
/*
@@ -242,19 +258,28 @@
* "dst_type" is stored into vA, and "src_type1"/"src_type2" are verified
* against vB/vC.
*/
- void CheckBinaryOp(MethodVerifier* verifier, const Instruction* inst,
- const RegType& dst_type, const RegType& src_type1, const RegType& src_type2,
+ void CheckBinaryOp(MethodVerifier* verifier,
+ const Instruction* inst,
+ const RegType& dst_type,
+ const RegType& src_type1,
+ const RegType& src_type2,
bool check_boolean_op)
SHARED_REQUIRES(Locks::mutator_lock_);
- void CheckBinaryOpWide(MethodVerifier* verifier, const Instruction* inst,
- const RegType& dst_type1, const RegType& dst_type2,
- const RegType& src_type1_1, const RegType& src_type1_2,
- const RegType& src_type2_1, const RegType& src_type2_2)
+ void CheckBinaryOpWide(MethodVerifier* verifier,
+ const Instruction* inst,
+ const RegType& dst_type1,
+ const RegType& dst_type2,
+ const RegType& src_type1_1,
+ const RegType& src_type1_2,
+ const RegType& src_type2_1,
+ const RegType& src_type2_2)
SHARED_REQUIRES(Locks::mutator_lock_);
- void CheckBinaryOpWideShift(MethodVerifier* verifier, const Instruction* inst,
- const RegType& long_lo_type, const RegType& long_hi_type,
+ void CheckBinaryOpWideShift(MethodVerifier* verifier,
+ const Instruction* inst,
+ const RegType& long_lo_type,
+ const RegType& long_hi_type,
const RegType& int_type)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -262,20 +287,28 @@
* Verify types for a binary "2addr" operation. "src_type1"/"src_type2"
* are verified against vA/vB, then "dst_type" is stored into vA.
*/
- void CheckBinaryOp2addr(MethodVerifier* verifier, const Instruction* inst,
+ void CheckBinaryOp2addr(MethodVerifier* verifier,
+ const Instruction* inst,
const RegType& dst_type,
- const RegType& src_type1, const RegType& src_type2,
+ const RegType& src_type1,
+ const RegType& src_type2,
bool check_boolean_op)
SHARED_REQUIRES(Locks::mutator_lock_);
- void CheckBinaryOp2addrWide(MethodVerifier* verifier, const Instruction* inst,
- const RegType& dst_type1, const RegType& dst_type2,
- const RegType& src_type1_1, const RegType& src_type1_2,
- const RegType& src_type2_1, const RegType& src_type2_2)
+ void CheckBinaryOp2addrWide(MethodVerifier* verifier,
+ const Instruction* inst,
+ const RegType& dst_type1,
+ const RegType& dst_type2,
+ const RegType& src_type1_1,
+ const RegType& src_type1_2,
+ const RegType& src_type2_1,
+ const RegType& src_type2_2)
SHARED_REQUIRES(Locks::mutator_lock_);
- void CheckBinaryOp2addrWideShift(MethodVerifier* verifier, const Instruction* inst,
- const RegType& long_lo_type, const RegType& long_hi_type,
+ void CheckBinaryOp2addrWideShift(MethodVerifier* verifier,
+ const Instruction* inst,
+ const RegType& long_lo_type,
+ const RegType& long_hi_type,
const RegType& int_type)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -285,9 +318,12 @@
*
* If "check_boolean_op" is set, we use the constant value in vC.
*/
- void CheckLiteralOp(MethodVerifier* verifier, const Instruction* inst,
- const RegType& dst_type, const RegType& src_type,
- bool check_boolean_op, bool is_lit16)
+ void CheckLiteralOp(MethodVerifier* verifier,
+ const Instruction* inst,
+ const RegType& dst_type,
+ const RegType& src_type,
+ bool check_boolean_op,
+ bool is_lit16)
SHARED_REQUIRES(Locks::mutator_lock_);
// Verify/push monitor onto the monitor stack, locking the value in reg_idx at location insn_idx.
@@ -409,8 +445,6 @@
void operator()(RegisterLine* ptr) const;
};
-
-
} // namespace verifier
} // namespace art
diff --git a/test/442-checker-constant-folding/smali/TestCmp.smali b/test/442-checker-constant-folding/smali/TestCmp.smali
new file mode 100644
index 0000000..df631bc
--- /dev/null
+++ b/test/442-checker-constant-folding/smali/TestCmp.smali
@@ -0,0 +1,332 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTestCmp;
+
+.super Ljava/lang/Object;
+
+
+## CHECK-START: int TestCmp.$opt$CmpLongConstants() constant_folding (before)
+## CHECK-DAG: <<Const13:j\d+>> LongConstant 13
+## CHECK-DAG: <<Const7:j\d+>> LongConstant 7
+## CHECK-DAG: <<Cmp:i\d+>> Compare [<<Const13>>,<<Const7>>]
+## CHECK-DAG: Return [<<Cmp>>]
+
+## CHECK-START: int TestCmp.$opt$CmpLongConstants() constant_folding (after)
+## CHECK-DAG: LongConstant 13
+## CHECK-DAG: LongConstant 7
+## CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+## CHECK-DAG: Return [<<Const1>>]
+
+## CHECK-START: int TestCmp.$opt$CmpLongConstants() constant_folding (after)
+## CHECK-NOT: Compare
+
+.method public static $opt$CmpLongConstants()I
+ .registers 5
+ const-wide v1, 13
+ const-wide v3, 7
+ cmp-long v0, v1, v3
+ return v0
+.end method
+
+## CHECK-START: int TestCmp.$opt$CmpGtFloatConstants() constant_folding (before)
+## CHECK-DAG: <<Const11:f\d+>> FloatConstant 11
+## CHECK-DAG: <<Const22:f\d+>> FloatConstant 22
+## CHECK-DAG: <<Cmp:i\d+>> Compare [<<Const11>>,<<Const22>>] bias:gt
+## CHECK-DAG: Return [<<Cmp>>]
+
+## CHECK-START: int TestCmp.$opt$CmpGtFloatConstants() constant_folding (after)
+## CHECK-DAG: FloatConstant 11
+## CHECK-DAG: FloatConstant 22
+## CHECK-DAG: <<ConstM1:i\d+>> IntConstant -1
+## CHECK-DAG: Return [<<ConstM1>>]
+
+## CHECK-START: int TestCmp.$opt$CmpGtFloatConstants() constant_folding (after)
+## CHECK-NOT: Compare
+
+.method public static $opt$CmpGtFloatConstants()I
+ .registers 3
+ const v1, 11.f
+ const v2, 22.f
+ cmpg-float v0, v1, v2
+ return v0
+.end method
+
+## CHECK-START: int TestCmp.$opt$CmpLtFloatConstants() constant_folding (before)
+## CHECK-DAG: <<Const33:f\d+>> FloatConstant 33
+## CHECK-DAG: <<Const44:f\d+>> FloatConstant 44
+## CHECK-DAG: <<Cmp:i\d+>> Compare [<<Const33>>,<<Const44>>] bias:lt
+## CHECK-DAG: Return [<<Cmp>>]
+
+## CHECK-START: int TestCmp.$opt$CmpLtFloatConstants() constant_folding (after)
+## CHECK-DAG: FloatConstant 33
+## CHECK-DAG: FloatConstant 44
+## CHECK-DAG: <<ConstM1:i\d+>> IntConstant -1
+## CHECK-DAG: Return [<<ConstM1>>]
+
+## CHECK-START: int TestCmp.$opt$CmpLtFloatConstants() constant_folding (after)
+## CHECK-NOT: Compare
+
+.method public static $opt$CmpLtFloatConstants()I
+ .registers 3
+ const v1, 33.f
+ const v2, 44.f
+ cmpl-float v0, v1, v2
+ return v0
+.end method
+
+## CHECK-START: int TestCmp.$opt$CmpGtDoubleConstants() constant_folding (before)
+## CHECK-DAG: <<Const55:d\d+>> DoubleConstant 55
+## CHECK-DAG: <<Const66:d\d+>> DoubleConstant 66
+## CHECK-DAG: <<Cmp:i\d+>> Compare [<<Const55>>,<<Const66>>] bias:gt
+## CHECK-DAG: Return [<<Cmp>>]
+
+## CHECK-START: int TestCmp.$opt$CmpGtDoubleConstants() constant_folding (after)
+## CHECK-DAG: DoubleConstant 55
+## CHECK-DAG: DoubleConstant 66
+## CHECK-DAG: <<ConstM1:i\d+>> IntConstant -1
+## CHECK-DAG: Return [<<ConstM1>>]
+
+## CHECK-START: int TestCmp.$opt$CmpGtDoubleConstants() constant_folding (after)
+## CHECK-NOT: Compare
+
+.method public static $opt$CmpGtDoubleConstants()I
+ .registers 5
+ const-wide v1, 55.
+ const-wide v3, 66.
+ cmpg-double v0, v1, v3
+ return v0
+.end method
+
+## CHECK-START: int TestCmp.$opt$CmpLtDoubleConstants() constant_folding (before)
+## CHECK-DAG: <<Const77:d\d+>> DoubleConstant 77
+## CHECK-DAG: <<Const88:d\d+>> DoubleConstant 88
+## CHECK-DAG: <<Cmp:i\d+>> Compare [<<Const77>>,<<Const88>>] bias:lt
+## CHECK-DAG: Return [<<Cmp>>]
+
+## CHECK-START: int TestCmp.$opt$CmpLtDoubleConstants() constant_folding (after)
+## CHECK-DAG: DoubleConstant 77
+## CHECK-DAG: DoubleConstant 88
+## CHECK-DAG: <<ConstM1:i\d+>> IntConstant -1
+## CHECK-DAG: Return [<<ConstM1>>]
+
+## CHECK-START: int TestCmp.$opt$CmpLtDoubleConstants() constant_folding (after)
+## CHECK-NOT: Compare
+
+.method public static $opt$CmpLtDoubleConstants()I
+ .registers 5
+ const-wide v1, 77.
+ const-wide v3, 88.
+ cmpl-double v0, v1, v3
+ return v0
+.end method
+
+
+## CHECK-START: int TestCmp.$opt$CmpLongSameConstant() constant_folding (before)
+## CHECK-DAG: <<Const100:j\d+>> LongConstant 100
+## CHECK-DAG: <<Cmp:i\d+>> Compare [<<Const100>>,<<Const100>>]
+## CHECK-DAG: Return [<<Cmp>>]
+
+## CHECK-START: int TestCmp.$opt$CmpLongSameConstant() constant_folding (after)
+## CHECK-DAG: LongConstant 100
+## CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+## CHECK-DAG: Return [<<Const0>>]
+
+## CHECK-START: int TestCmp.$opt$CmpLongSameConstant() constant_folding (after)
+## CHECK-NOT: Compare
+
+.method public static $opt$CmpLongSameConstant()I
+ .registers 5
+ const-wide v1, 100
+ const-wide v3, 100
+ cmp-long v0, v1, v3
+ return v0
+.end method
+
+## CHECK-START: int TestCmp.$opt$CmpGtFloatSameConstant() constant_folding (before)
+## CHECK-DAG: <<Const200:f\d+>> FloatConstant 200
+## CHECK-DAG: <<Cmp:i\d+>> Compare [<<Const200>>,<<Const200>>] bias:gt
+## CHECK-DAG: Return [<<Cmp>>]
+
+## CHECK-START: int TestCmp.$opt$CmpGtFloatSameConstant() constant_folding (after)
+## CHECK-DAG: FloatConstant 200
+## CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+## CHECK-DAG: Return [<<Const0>>]
+
+## CHECK-START: int TestCmp.$opt$CmpGtFloatSameConstant() constant_folding (after)
+## CHECK-NOT: Compare
+
+.method public static $opt$CmpGtFloatSameConstant()I
+ .registers 3
+ const v1, 200.f
+ const v2, 200.f
+ cmpg-float v0, v1, v2
+ return v0
+.end method
+
+## CHECK-START: int TestCmp.$opt$CmpLtFloatSameConstant() constant_folding (before)
+## CHECK-DAG: <<Const300:f\d+>> FloatConstant 300
+## CHECK-DAG: <<Cmp:i\d+>> Compare [<<Const300>>,<<Const300>>] bias:lt
+## CHECK-DAG: Return [<<Cmp>>]
+
+## CHECK-START: int TestCmp.$opt$CmpLtFloatSameConstant() constant_folding (after)
+## CHECK-DAG: FloatConstant 300
+## CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+## CHECK-DAG: Return [<<Const0>>]
+
+## CHECK-START: int TestCmp.$opt$CmpLtFloatSameConstant() constant_folding (after)
+## CHECK-NOT: Compare
+
+.method public static $opt$CmpLtFloatSameConstant()I
+ .registers 3
+ const v1, 300.f
+ const v2, 300.f
+ cmpl-float v0, v1, v2
+ return v0
+.end method
+
+## CHECK-START: int TestCmp.$opt$CmpGtDoubleSameConstant() constant_folding (before)
+## CHECK-DAG: <<Const400:d\d+>> DoubleConstant 400
+## CHECK-DAG: <<Cmp:i\d+>> Compare [<<Const400>>,<<Const400>>] bias:gt
+## CHECK-DAG: Return [<<Cmp>>]
+
+## CHECK-START: int TestCmp.$opt$CmpGtDoubleSameConstant() constant_folding (after)
+## CHECK-DAG: DoubleConstant 400
+## CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+## CHECK-DAG: Return [<<Const0>>]
+
+## CHECK-START: int TestCmp.$opt$CmpGtDoubleSameConstant() constant_folding (after)
+## CHECK-NOT: Compare
+
+.method public static $opt$CmpGtDoubleSameConstant()I
+ .registers 5
+ const-wide v1, 400.
+ const-wide v3, 400.
+ cmpg-double v0, v1, v3
+ return v0
+.end method
+
+## CHECK-START: int TestCmp.$opt$CmpLtDoubleSameConstant() constant_folding (before)
+## CHECK-DAG: <<Const500:d\d+>> DoubleConstant 500
+## CHECK-DAG: <<Cmp:i\d+>> Compare [<<Const500>>,<<Const500>>] bias:lt
+## CHECK-DAG: Return [<<Cmp>>]
+
+## CHECK-START: int TestCmp.$opt$CmpLtDoubleSameConstant() constant_folding (after)
+## CHECK-DAG: DoubleConstant 500
+## CHECK-DAG: <<Const0:i\d+>> IntConstant 0
+## CHECK-DAG: Return [<<Const0>>]
+
+## CHECK-START: int TestCmp.$opt$CmpLtDoubleSameConstant() constant_folding (after)
+## CHECK-NOT: Compare
+
+.method public static $opt$CmpLtDoubleSameConstant()I
+ .registers 5
+ const-wide v1, 500.
+ const-wide v3, 500.
+ cmpl-double v0, v1, v3
+ return v0
+.end method
+
+
+## CHECK-START: int TestCmp.$opt$CmpGtFloatConstantWithNaN() constant_folding (before)
+## CHECK-DAG: <<Const44:f\d+>> FloatConstant 44
+## CHECK-DAG: <<ConstNan:f\d+>> FloatConstant nan
+## CHECK-DAG: <<Cmp:i\d+>> Compare [<<Const44>>,<<ConstNan>>] bias:gt
+## CHECK-DAG: Return [<<Cmp>>]
+
+## CHECK-START: int TestCmp.$opt$CmpGtFloatConstantWithNaN() constant_folding (after)
+## CHECK-DAG: FloatConstant 44
+## CHECK-DAG: FloatConstant nan
+## CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+## CHECK-DAG: Return [<<Const1>>]
+
+## CHECK-START: int TestCmp.$opt$CmpGtFloatConstantWithNaN() constant_folding (after)
+## CHECK-NOT: Compare
+
+.method public static $opt$CmpGtFloatConstantWithNaN()I
+ .registers 3
+ const v1, 44.f
+ const v2, NaNf
+ cmpg-float v0, v1, v2
+ return v0
+.end method
+
+## CHECK-START: int TestCmp.$opt$CmpLtFloatConstantWithNaN() constant_folding (before)
+## CHECK-DAG: <<Const44:f\d+>> FloatConstant 44
+## CHECK-DAG: <<ConstNan:f\d+>> FloatConstant nan
+## CHECK-DAG: <<Cmp:i\d+>> Compare [<<Const44>>,<<ConstNan>>] bias:lt
+## CHECK-DAG: Return [<<Cmp>>]
+
+## CHECK-START: int TestCmp.$opt$CmpLtFloatConstantWithNaN() constant_folding (after)
+## CHECK-DAG: FloatConstant 44
+## CHECK-DAG: FloatConstant nan
+## CHECK-DAG: <<ConstM1:i\d+>> IntConstant -1
+## CHECK-DAG: Return [<<ConstM1>>]
+
+## CHECK-START: int TestCmp.$opt$CmpLtFloatConstantWithNaN() constant_folding (after)
+## CHECK-NOT: Compare
+
+.method public static $opt$CmpLtFloatConstantWithNaN()I
+ .registers 3
+ const v1, 44.f
+ const v2, NaNf
+ cmpl-float v0, v1, v2
+ return v0
+.end method
+
+## CHECK-START: int TestCmp.$opt$CmpGtDoubleConstantWithNaN() constant_folding (before)
+## CHECK-DAG: <<Const45:d\d+>> DoubleConstant 45
+## CHECK-DAG: <<ConstNan:d\d+>> DoubleConstant nan
+## CHECK-DAG: <<Cmp:i\d+>> Compare [<<Const45>>,<<ConstNan>>] bias:gt
+## CHECK-DAG: Return [<<Cmp>>]
+
+## CHECK-START: int TestCmp.$opt$CmpGtDoubleConstantWithNaN() constant_folding (after)
+## CHECK-DAG: DoubleConstant 45
+## CHECK-DAG: DoubleConstant nan
+## CHECK-DAG: <<Const1:i\d+>> IntConstant 1
+## CHECK-DAG: Return [<<Const1>>]
+
+## CHECK-START: int TestCmp.$opt$CmpGtDoubleConstantWithNaN() constant_folding (after)
+## CHECK-NOT: Compare
+
+.method public static $opt$CmpGtDoubleConstantWithNaN()I
+ .registers 5
+ const-wide v1, 45.
+ const-wide v3, NaN
+ cmpg-double v0, v1, v3
+ return v0
+.end method
+
+## CHECK-START: int TestCmp.$opt$CmpLtDoubleConstantWithNaN() constant_folding (before)
+## CHECK-DAG: <<Const46:d\d+>> DoubleConstant 46
+## CHECK-DAG: <<ConstNan:d\d+>> DoubleConstant nan
+## CHECK-DAG: <<Cmp:i\d+>> Compare [<<Const46>>,<<ConstNan>>] bias:lt
+## CHECK-DAG: Return [<<Cmp>>]
+
+## CHECK-START: int TestCmp.$opt$CmpLtDoubleConstantWithNaN() constant_folding (after)
+## CHECK-DAG: DoubleConstant 46
+## CHECK-DAG: DoubleConstant nan
+## CHECK-DAG: <<ConstM1:i\d+>> IntConstant -1
+## CHECK-DAG: Return [<<ConstM1>>]
+
+## CHECK-START: int TestCmp.$opt$CmpLtDoubleConstantWithNaN() constant_folding (after)
+## CHECK-NOT: Compare
+
+.method public static $opt$CmpLtDoubleConstantWithNaN()I
+ .registers 5
+ const-wide v1, 46.
+ const-wide v3, NaN
+ cmpl-double v0, v1, v3
+ return v0
+.end method
diff --git a/test/442-checker-constant-folding/src/Main.java b/test/442-checker-constant-folding/src/Main.java
index 5479818..93fe397 100644
--- a/test/442-checker-constant-folding/src/Main.java
+++ b/test/442-checker-constant-folding/src/Main.java
@@ -14,8 +14,13 @@
* limitations under the License.
*/
+import java.lang.reflect.Method;
+
public class Main {
+ // Workaround for b/18051191.
+ class InnerClass {}
+
public static void assertFalse(boolean condition) {
if (condition) {
throw new Error();
@@ -47,6 +52,68 @@
}
+ // Wrappers around methods located in file TestCmp.smali.
+
+ public int smaliCmpLongConstants() throws Exception {
+ Method m = testCmp.getMethod("$opt$CmpLongConstants");
+ return (Integer)m.invoke(null);
+ }
+ public int smaliCmpGtFloatConstants() throws Exception {
+ Method m = testCmp.getMethod("$opt$CmpGtFloatConstants");
+ return (Integer)m.invoke(null);
+ }
+ public int smaliCmpLtFloatConstants() throws Exception {
+ Method m = testCmp.getMethod("$opt$CmpLtFloatConstants");
+ return (Integer)m.invoke(null);
+ }
+ public int smaliCmpGtDoubleConstants() throws Exception {
+ Method m = testCmp.getMethod("$opt$CmpGtDoubleConstants");
+ return (Integer)m.invoke(null);
+ }
+ public int smaliCmpLtDoubleConstants() throws Exception {
+ Method m = testCmp.getMethod("$opt$CmpLtDoubleConstants");
+ return (Integer)m.invoke(null);
+ }
+
+ public int smaliCmpLongSameConstant() throws Exception {
+ Method m = testCmp.getMethod("$opt$CmpLongSameConstant");
+ return (Integer)m.invoke(null);
+ }
+ public int smaliCmpGtFloatSameConstant() throws Exception {
+ Method m = testCmp.getMethod("$opt$CmpGtFloatSameConstant");
+ return (Integer)m.invoke(null);
+ }
+ public int smaliCmpLtFloatSameConstant() throws Exception {
+ Method m = testCmp.getMethod("$opt$CmpLtFloatSameConstant");
+ return (Integer)m.invoke(null);
+ }
+ public int smaliCmpGtDoubleSameConstant() throws Exception {
+ Method m = testCmp.getMethod("$opt$CmpGtDoubleSameConstant");
+ return (Integer)m.invoke(null);
+ }
+ public int smaliCmpLtDoubleSameConstant() throws Exception {
+ Method m = testCmp.getMethod("$opt$CmpLtDoubleSameConstant");
+ return (Integer)m.invoke(null);
+ }
+
+ public int smaliCmpGtFloatConstantWithNaN() throws Exception {
+ Method m = testCmp.getMethod("$opt$CmpGtFloatConstantWithNaN");
+ return (Integer)m.invoke(null);
+ }
+ public int smaliCmpLtFloatConstantWithNaN() throws Exception {
+ Method m = testCmp.getMethod("$opt$CmpLtFloatConstantWithNaN");
+ return (Integer)m.invoke(null);
+ }
+ public int smaliCmpGtDoubleConstantWithNaN() throws Exception {
+ Method m = testCmp.getMethod("$opt$CmpGtDoubleConstantWithNaN");
+ return (Integer)m.invoke(null);
+ }
+ public int smaliCmpLtDoubleConstantWithNaN() throws Exception {
+ Method m = testCmp.getMethod("$opt$CmpLtDoubleConstantWithNaN");
+ return (Integer)m.invoke(null);
+ }
+
+
/**
* Exercise constant folding on negation.
*/
@@ -89,6 +156,44 @@
return y;
}
+ /// CHECK-START: float Main.FloatNegation() constant_folding (before)
+ /// CHECK-DAG: <<Const42:f\d+>> FloatConstant 42
+ /// CHECK-DAG: <<Neg:f\d+>> Neg [<<Const42>>]
+ /// CHECK-DAG: Return [<<Neg>>]
+
+ /// CHECK-START: float Main.FloatNegation() constant_folding (after)
+ /// CHECK-DAG: <<ConstN42:f\d+>> FloatConstant -42
+ /// CHECK-DAG: Return [<<ConstN42>>]
+
+ /// CHECK-START: float Main.FloatNegation() constant_folding (after)
+ /// CHECK-NOT: Neg
+
+ public static float FloatNegation() {
+ float x, y;
+ x = 42F;
+ y = -x;
+ return y;
+ }
+
+ /// CHECK-START: double Main.DoubleNegation() constant_folding (before)
+ /// CHECK-DAG: <<Const42:d\d+>> DoubleConstant 42
+ /// CHECK-DAG: <<Neg:d\d+>> Neg [<<Const42>>]
+ /// CHECK-DAG: Return [<<Neg>>]
+
+ /// CHECK-START: double Main.DoubleNegation() constant_folding (after)
+ /// CHECK-DAG: <<ConstN42:d\d+>> DoubleConstant -42
+ /// CHECK-DAG: Return [<<ConstN42>>]
+
+ /// CHECK-START: double Main.DoubleNegation() constant_folding (after)
+ /// CHECK-NOT: Neg
+
+ public static double DoubleNegation() {
+ double x, y;
+ x = 42D;
+ y = -x;
+ return y;
+ }
+
/**
* Exercise constant folding on addition.
@@ -166,6 +271,48 @@
return c;
}
+ /// CHECK-START: float Main.FloatAddition() constant_folding (before)
+ /// CHECK-DAG: <<Const1:f\d+>> FloatConstant 1
+ /// CHECK-DAG: <<Const2:f\d+>> FloatConstant 2
+ /// CHECK-DAG: <<Add:f\d+>> Add [<<Const1>>,<<Const2>>]
+ /// CHECK-DAG: Return [<<Add>>]
+
+ /// CHECK-START: float Main.FloatAddition() constant_folding (after)
+ /// CHECK-DAG: <<Const3:f\d+>> FloatConstant 3
+ /// CHECK-DAG: Return [<<Const3>>]
+
+ /// CHECK-START: float Main.FloatAddition() constant_folding (after)
+ /// CHECK-NOT: Add
+
+ public static float FloatAddition() {
+ float a, b, c;
+ a = 1F;
+ b = 2F;
+ c = a + b;
+ return c;
+ }
+
+ /// CHECK-START: double Main.DoubleAddition() constant_folding (before)
+ /// CHECK-DAG: <<Const1:d\d+>> DoubleConstant 1
+ /// CHECK-DAG: <<Const2:d\d+>> DoubleConstant 2
+ /// CHECK-DAG: <<Add:d\d+>> Add [<<Const1>>,<<Const2>>]
+ /// CHECK-DAG: Return [<<Add>>]
+
+ /// CHECK-START: double Main.DoubleAddition() constant_folding (after)
+ /// CHECK-DAG: <<Const3:d\d+>> DoubleConstant 3
+ /// CHECK-DAG: Return [<<Const3>>]
+
+ /// CHECK-START: double Main.DoubleAddition() constant_folding (after)
+ /// CHECK-NOT: Add
+
+ public static double DoubleAddition() {
+ double a, b, c;
+ a = 1D;
+ b = 2D;
+ c = a + b;
+ return c;
+ }
+
/**
* Exercise constant folding on subtraction.
@@ -213,6 +360,48 @@
return c;
}
+ /// CHECK-START: float Main.FloatSubtraction() constant_folding (before)
+ /// CHECK-DAG: <<Const6:f\d+>> FloatConstant 6
+ /// CHECK-DAG: <<Const2:f\d+>> FloatConstant 2
+ /// CHECK-DAG: <<Sub:f\d+>> Sub [<<Const6>>,<<Const2>>]
+ /// CHECK-DAG: Return [<<Sub>>]
+
+ /// CHECK-START: float Main.FloatSubtraction() constant_folding (after)
+ /// CHECK-DAG: <<Const4:f\d+>> FloatConstant 4
+ /// CHECK-DAG: Return [<<Const4>>]
+
+ /// CHECK-START: float Main.FloatSubtraction() constant_folding (after)
+ /// CHECK-NOT: Sub
+
+ public static float FloatSubtraction() {
+ float a, b, c;
+ a = 6F;
+ b = 2F;
+ c = a - b;
+ return c;
+ }
+
+ /// CHECK-START: double Main.DoubleSubtraction() constant_folding (before)
+ /// CHECK-DAG: <<Const6:d\d+>> DoubleConstant 6
+ /// CHECK-DAG: <<Const2:d\d+>> DoubleConstant 2
+ /// CHECK-DAG: <<Sub:d\d+>> Sub [<<Const6>>,<<Const2>>]
+ /// CHECK-DAG: Return [<<Sub>>]
+
+ /// CHECK-START: double Main.DoubleSubtraction() constant_folding (after)
+ /// CHECK-DAG: <<Const4:d\d+>> DoubleConstant 4
+ /// CHECK-DAG: Return [<<Const4>>]
+
+ /// CHECK-START: double Main.DoubleSubtraction() constant_folding (after)
+ /// CHECK-NOT: Sub
+
+ public static double DoubleSubtraction() {
+ double a, b, c;
+ a = 6D;
+ b = 2D;
+ c = a - b;
+ return c;
+ }
+
/**
* Exercise constant folding on multiplication.
@@ -260,6 +449,48 @@
return c;
}
+ /// CHECK-START: float Main.FloatMultiplication() constant_folding (before)
+ /// CHECK-DAG: <<Const7:f\d+>> FloatConstant 7
+ /// CHECK-DAG: <<Const3:f\d+>> FloatConstant 3
+ /// CHECK-DAG: <<Mul:f\d+>> Mul [<<Const7>>,<<Const3>>]
+ /// CHECK-DAG: Return [<<Mul>>]
+
+ /// CHECK-START: float Main.FloatMultiplication() constant_folding (after)
+ /// CHECK-DAG: <<Const21:f\d+>> FloatConstant 21
+ /// CHECK-DAG: Return [<<Const21>>]
+
+ /// CHECK-START: float Main.FloatMultiplication() constant_folding (after)
+ /// CHECK-NOT: Mul
+
+ public static float FloatMultiplication() {
+ float a, b, c;
+ a = 7F;
+ b = 3F;
+ c = a * b;
+ return c;
+ }
+
+ /// CHECK-START: double Main.DoubleMultiplication() constant_folding (before)
+ /// CHECK-DAG: <<Const7:d\d+>> DoubleConstant 7
+ /// CHECK-DAG: <<Const3:d\d+>> DoubleConstant 3
+ /// CHECK-DAG: <<Mul:d\d+>> Mul [<<Const7>>,<<Const3>>]
+ /// CHECK-DAG: Return [<<Mul>>]
+
+ /// CHECK-START: double Main.DoubleMultiplication() constant_folding (after)
+ /// CHECK-DAG: <<Const21:d\d+>> DoubleConstant 21
+ /// CHECK-DAG: Return [<<Const21>>]
+
+ /// CHECK-START: double Main.DoubleMultiplication() constant_folding (after)
+ /// CHECK-NOT: Mul
+
+ public static double DoubleMultiplication() {
+ double a, b, c;
+ a = 7D;
+ b = 3D;
+ c = a * b;
+ return c;
+ }
+
/**
* Exercise constant folding on division.
@@ -311,6 +542,48 @@
return c;
}
+ /// CHECK-START: float Main.FloatDivision() constant_folding (before)
+ /// CHECK-DAG: <<Const8:f\d+>> FloatConstant 8
+ /// CHECK-DAG: <<Const2P5:f\d+>> FloatConstant 2.5
+ /// CHECK-DAG: <<Div:f\d+>> Div [<<Const8>>,<<Const2P5>>]
+ /// CHECK-DAG: Return [<<Div>>]
+
+ /// CHECK-START: float Main.FloatDivision() constant_folding (after)
+ /// CHECK-DAG: <<Const3P2:f\d+>> FloatConstant 3.2
+ /// CHECK-DAG: Return [<<Const3P2>>]
+
+ /// CHECK-START: float Main.FloatDivision() constant_folding (after)
+ /// CHECK-NOT: Div
+
+ public static float FloatDivision() {
+ float a, b, c;
+ a = 8F;
+ b = 2.5F;
+ c = a / b;
+ return c;
+ }
+
+ /// CHECK-START: double Main.DoubleDivision() constant_folding (before)
+ /// CHECK-DAG: <<Const8:d\d+>> DoubleConstant 8
+ /// CHECK-DAG: <<Const2P5:d\d+>> DoubleConstant 2.5
+ /// CHECK-DAG: <<Div:d\d+>> Div [<<Const8>>,<<Const2P5>>]
+ /// CHECK-DAG: Return [<<Div>>]
+
+ /// CHECK-START: double Main.DoubleDivision() constant_folding (after)
+ /// CHECK-DAG: <<Const3P2:d\d+>> DoubleConstant 3.2
+ /// CHECK-DAG: Return [<<Const3P2>>]
+
+ /// CHECK-START: double Main.DoubleDivision() constant_folding (after)
+ /// CHECK-NOT: Div
+
+ public static double DoubleDivision() {
+ double a, b, c;
+ a = 8D;
+ b = 2.5D;
+ c = a / b;
+ return c;
+ }
+
/**
* Exercise constant folding on remainder.
@@ -362,6 +635,48 @@
return c;
}
+ /// CHECK-START: float Main.FloatRemainder() constant_folding (before)
+ /// CHECK-DAG: <<Const8:f\d+>> FloatConstant 8
+ /// CHECK-DAG: <<Const2P5:f\d+>> FloatConstant 2.5
+ /// CHECK-DAG: <<Rem:f\d+>> Rem [<<Const8>>,<<Const2P5>>]
+ /// CHECK-DAG: Return [<<Rem>>]
+
+ /// CHECK-START: float Main.FloatRemainder() constant_folding (after)
+ /// CHECK-DAG: <<Const0P5:f\d+>> FloatConstant 0.5
+ /// CHECK-DAG: Return [<<Const0P5>>]
+
+ /// CHECK-START: float Main.FloatRemainder() constant_folding (after)
+ /// CHECK-NOT: Rem
+
+ public static float FloatRemainder() {
+ float a, b, c;
+ a = 8F;
+ b = 2.5F;
+ c = a % b;
+ return c;
+ }
+
+ /// CHECK-START: double Main.DoubleRemainder() constant_folding (before)
+ /// CHECK-DAG: <<Const8:d\d+>> DoubleConstant 8
+ /// CHECK-DAG: <<Const2P5:d\d+>> DoubleConstant 2.5
+ /// CHECK-DAG: <<Rem:d\d+>> Rem [<<Const8>>,<<Const2P5>>]
+ /// CHECK-DAG: Return [<<Rem>>]
+
+ /// CHECK-START: double Main.DoubleRemainder() constant_folding (after)
+ /// CHECK-DAG: <<Const0P5:d\d+>> DoubleConstant 0.5
+ /// CHECK-DAG: Return [<<Const0P5>>]
+
+ /// CHECK-START: double Main.DoubleRemainder() constant_folding (after)
+ /// CHECK-NOT: Rem
+
+ public static double DoubleRemainder() {
+ double a, b, c;
+ a = 8D;
+ b = 2.5D;
+ c = a % b;
+ return c;
+ }
+
/**
* Exercise constant folding on left shift.
@@ -1197,25 +1512,37 @@
}
- public static void main(String[] args) {
+ public static void main(String[] args) throws Exception {
assertIntEquals(-42, IntNegation());
assertLongEquals(-42L, LongNegation());
+ assertFloatEquals(-42F, FloatNegation());
+ assertDoubleEquals(-42D, DoubleNegation());
assertIntEquals(3, IntAddition1());
assertIntEquals(14, IntAddition2());
assertLongEquals(3L, LongAddition());
+ assertFloatEquals(3F, FloatAddition());
+ assertDoubleEquals(3D, DoubleAddition());
assertIntEquals(4, IntSubtraction());
assertLongEquals(4L, LongSubtraction());
+ assertFloatEquals(4F, FloatSubtraction());
+ assertDoubleEquals(4D, DoubleSubtraction());
assertIntEquals(21, IntMultiplication());
assertLongEquals(21L, LongMultiplication());
+ assertFloatEquals(21F, FloatMultiplication());
+ assertDoubleEquals(21D, DoubleMultiplication());
assertIntEquals(2, IntDivision());
assertLongEquals(2L, LongDivision());
+ assertFloatEquals(3.2F, FloatDivision());
+ assertDoubleEquals(3.2D, DoubleDivision());
assertIntEquals(2, IntRemainder());
assertLongEquals(2L, LongRemainder());
+ assertFloatEquals(0.5F, FloatRemainder());
+ assertDoubleEquals(0.5D, DoubleRemainder());
assertIntEquals(4, ShlIntLong());
assertLongEquals(12L, ShlLongInt());
@@ -1259,6 +1586,24 @@
assertFalse(CmpFloatGreaterThanNaN(arbitrary));
assertFalse(CmpDoubleLessThanNaN(arbitrary));
+ Main main = new Main();
+ assertIntEquals(1, main.smaliCmpLongConstants());
+ assertIntEquals(-1, main.smaliCmpGtFloatConstants());
+ assertIntEquals(-1, main.smaliCmpLtFloatConstants());
+ assertIntEquals(-1, main.smaliCmpGtDoubleConstants());
+ assertIntEquals(-1, main.smaliCmpLtDoubleConstants());
+
+ assertIntEquals(0, main.smaliCmpLongSameConstant());
+ assertIntEquals(0, main.smaliCmpGtFloatSameConstant());
+ assertIntEquals(0, main.smaliCmpLtFloatSameConstant());
+ assertIntEquals(0, main.smaliCmpGtDoubleSameConstant());
+ assertIntEquals(0, main.smaliCmpLtDoubleSameConstant());
+
+ assertIntEquals(1, main.smaliCmpGtFloatConstantWithNaN());
+ assertIntEquals(-1, main.smaliCmpLtFloatConstantWithNaN());
+ assertIntEquals(1, main.smaliCmpGtDoubleConstantWithNaN());
+ assertIntEquals(-1, main.smaliCmpLtDoubleConstantWithNaN());
+
assertIntEquals(33, ReturnInt33());
assertIntEquals(2147483647, ReturnIntMax());
assertIntEquals(0, ReturnInt0());
@@ -1275,4 +1620,10 @@
assertDoubleEquals(34, ReturnDouble34());
assertDoubleEquals(99.25, ReturnDouble99P25());
}
+
+ Main() throws ClassNotFoundException {
+ testCmp = Class.forName("TestCmp");
+ }
+
+ private Class<?> testCmp;
}
diff --git a/test/570-checker-osr/osr.cc b/test/570-checker-osr/osr.cc
index 4c58b39..09e97ea 100644
--- a/test/570-checker-osr/osr.cc
+++ b/test/570-checker-osr/osr.cc
@@ -41,7 +41,8 @@
(m_name.compare("$noinline$returnDouble") == 0) ||
(m_name.compare("$noinline$returnLong") == 0) ||
(m_name.compare("$noinline$deopt") == 0) ||
- (m_name.compare("$noinline$inlineCache") == 0)) {
+ (m_name.compare("$noinline$inlineCache") == 0) ||
+ (m_name.compare("$noinline$stackOverflow") == 0)) {
const OatQuickMethodHeader* header =
Runtime::Current()->GetJit()->GetCodeCache()->LookupOsrMethodHeader(m);
if (header != nullptr && header == GetCurrentOatQuickMethodHeader()) {
@@ -91,7 +92,8 @@
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
- if (m_name.compare("$noinline$inlineCache") == 0) {
+ if ((m_name.compare("$noinline$inlineCache") == 0) ||
+ (m_name.compare("$noinline$stackOverflow") == 0)) {
ProfilingInfo::Create(Thread::Current(), m, /* retry_allocation */ true);
return false;
}
@@ -119,7 +121,8 @@
std::string m_name(m->GetName());
jit::Jit* jit = Runtime::Current()->GetJit();
- if (m_name.compare("$noinline$inlineCache") == 0 && jit != nullptr) {
+ if ((m_name.compare("$noinline$inlineCache") == 0) ||
+ (m_name.compare("$noinline$stackOverflow") == 0)) {
while (jit->GetCodeCache()->LookupOsrMethodHeader(m) == nullptr) {
// Sleep to yield to the compiler thread.
sleep(0);
diff --git a/test/570-checker-osr/run b/test/570-checker-osr/run
new file mode 100755
index 0000000..24d69b4
--- /dev/null
+++ b/test/570-checker-osr/run
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Ensure this test is not subject to code collection.
+exec ${RUN} "$@" --runtime-option -Xjitinitialsize:32M
diff --git a/test/570-checker-osr/src/Main.java b/test/570-checker-osr/src/Main.java
index 828908a..1142d49 100644
--- a/test/570-checker-osr/src/Main.java
+++ b/test/570-checker-osr/src/Main.java
@@ -40,6 +40,9 @@
if ($noinline$inlineCache(new SubMain(), /* isSecondInvocation */ true) != SubMain.class) {
throw new Error("Unexpected return value");
}
+
+ $noinline$stackOverflow(new Main(), /* isSecondInvocation */ false);
+ $noinline$stackOverflow(new SubMain(), /* isSecondInvocation */ true);
}
public static int $noinline$returnInt() {
@@ -129,7 +132,32 @@
return Main.class;
}
- public static int[] array = new int[4];
+ public void otherInlineCache() {
+ return;
+ }
+
+ public static void $noinline$stackOverflow(Main m, boolean isSecondInvocation) {
+ // If we are running in non-JIT mode, or were unlucky enough to get this method
+ // already JITted, just return the expected value.
+ if (!ensureInInterpreter()) {
+ return;
+ }
+
+ // We need a ProfilingInfo object to populate the 'otherInlineCache' call.
+ ensureHasProfilingInfo();
+
+ if (isSecondInvocation) {
+ // Ensure we have an OSR code and we jump to it.
+ while (!ensureInOsrCode()) {}
+ }
+
+ for (int i = 0; i < (isSecondInvocation ? 10000000 : 1); ++i) {
+ // The first invocation of $noinline$stackOverflow will populate the inline
+ // cache with Main. The second invocation of the method, will see a SubMain
+ // and will therefore trigger deoptimization.
+ m.otherInlineCache();
+ }
+ }
public static native boolean ensureInInterpreter();
public static native boolean ensureInOsrCode();
@@ -147,4 +175,8 @@
public Main inlineCache() {
return new SubMain();
}
+
+ public void otherInlineCache() {
+ return;
+ }
}
diff --git a/test/575-checker-isnan/expected.txt b/test/575-checker-isnan/expected.txt
new file mode 100644
index 0000000..b0aad4d
--- /dev/null
+++ b/test/575-checker-isnan/expected.txt
@@ -0,0 +1 @@
+passed
diff --git a/test/575-checker-isnan/info.txt b/test/575-checker-isnan/info.txt
new file mode 100644
index 0000000..5c48a6a
--- /dev/null
+++ b/test/575-checker-isnan/info.txt
@@ -0,0 +1 @@
+Unit test for float/double isNaN() operation.
diff --git a/test/575-checker-isnan/src/Main.java b/test/575-checker-isnan/src/Main.java
new file mode 100644
index 0000000..cc71e5e
--- /dev/null
+++ b/test/575-checker-isnan/src/Main.java
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+ /// CHECK-START: boolean Main.isNaN32(float) instruction_simplifier (before)
+ /// CHECK-DAG: <<Result:z\d+>> InvokeStaticOrDirect
+ /// CHECK-DAG: Return [<<Result>>]
+ //
+ /// CHECK-START: boolean Main.isNaN32(float) instruction_simplifier (after)
+ /// CHECK-DAG: <<Result:z\d+>> NotEqual
+ /// CHECK-DAG: Return [<<Result>>]
+ //
+ /// CHECK-START: boolean Main.isNaN32(float) instruction_simplifier (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ private static boolean isNaN32(float x) {
+ return Float.isNaN(x);
+ }
+
+ /// CHECK-START: boolean Main.isNaN64(double) instruction_simplifier (before)
+ /// CHECK-DAG: <<Result:z\d+>> InvokeStaticOrDirect
+ /// CHECK-DAG: Return [<<Result>>]
+ //
+ /// CHECK-START: boolean Main.isNaN64(double) instruction_simplifier (after)
+ /// CHECK-DAG: <<Result:z\d+>> NotEqual
+ /// CHECK-DAG: Return [<<Result>>]
+ //
+ /// CHECK-START: boolean Main.isNaN64(double) instruction_simplifier (after)
+ /// CHECK-NOT: InvokeStaticOrDirect
+ private static boolean isNaN64(double x) {
+ return Double.isNaN(x);
+ }
+
+ public static void main(String args[]) {
+ // A few distinct numbers.
+ expectFalse(isNaN32(Float.NEGATIVE_INFINITY));
+ expectFalse(isNaN32(-1.0f));
+ expectFalse(isNaN32(-0.0f));
+ expectFalse(isNaN32(0.0f));
+ expectFalse(isNaN32(1.0f));
+ expectFalse(isNaN32(Float.POSITIVE_INFINITY));
+
+ // A few distinct subnormal numbers.
+ expectFalse(isNaN32(Float.intBitsToFloat(0x00400000)));
+ expectFalse(isNaN32(Float.intBitsToFloat(0x80400000)));
+ expectFalse(isNaN32(Float.intBitsToFloat(0x00000001)));
+ expectFalse(isNaN32(Float.intBitsToFloat(0x80000001)));
+
+ // A few NaN numbers.
+ expectTrue(isNaN32(Float.NaN));
+ expectTrue(isNaN32(0.0f / 0.0f));
+ expectTrue(isNaN32((float)Math.sqrt(-1.0f)));
+ float[] fvals = {
+ Float.intBitsToFloat(0x7f800001),
+ Float.intBitsToFloat(0x7fa00000),
+ Float.intBitsToFloat(0x7fc00000),
+ Float.intBitsToFloat(0x7fffffff),
+ Float.intBitsToFloat(0xff800001),
+ Float.intBitsToFloat(0xffa00000),
+ Float.intBitsToFloat(0xffc00000),
+ Float.intBitsToFloat(0xffffffff)
+ };
+ for (int i = 0; i < fvals.length; i++) {
+ expectTrue(isNaN32(fvals[i]));
+ }
+
+ // A few distinct numbers.
+ expectFalse(isNaN64(Double.NEGATIVE_INFINITY));
+ expectFalse(isNaN32(-1.0f));
+ expectFalse(isNaN64(-0.0d));
+ expectFalse(isNaN64(0.0d));
+ expectFalse(isNaN64(1.0d));
+ expectFalse(isNaN64(Double.POSITIVE_INFINITY));
+
+ // A few distinct subnormal numbers.
+ expectFalse(isNaN64(Double.longBitsToDouble(0x0008000000000000l)));
+ expectFalse(isNaN64(Double.longBitsToDouble(0x8008000000000000l)));
+ expectFalse(isNaN64(Double.longBitsToDouble(0x0000000000000001l)));
+ expectFalse(isNaN64(Double.longBitsToDouble(0x8000000000000001l)));
+
+ // A few NaN numbers.
+ expectTrue(isNaN64(Double.NaN));
+ expectTrue(isNaN64(0.0d / 0.0d));
+ expectTrue(isNaN64(Math.sqrt(-1.0d)));
+ double[] dvals = {
+ Double.longBitsToDouble(0x7ff0000000000001L),
+ Double.longBitsToDouble(0x7ff4000000000000L),
+ Double.longBitsToDouble(0x7ff8000000000000L),
+ Double.longBitsToDouble(0x7fffffffffffffffL),
+ Double.longBitsToDouble(0xfff0000000000001L),
+ Double.longBitsToDouble(0xfff4000000000000L),
+ Double.longBitsToDouble(0xfff8000000000000L),
+ Double.longBitsToDouble(0xffffffffffffffffL)
+ };
+ for (int i = 0; i < dvals.length; i++) {
+ expectTrue(isNaN64(dvals[i]));
+ }
+
+ System.out.println("passed");
+ }
+
+ private static void expectTrue(boolean value) {
+ if (!value) {
+ throw new Error("Expected True");
+ }
+ }
+
+ private static void expectFalse(boolean value) {
+ if (value) {
+ throw new Error("Expected False");
+ }
+ }
+}
diff --git a/test/575-checker-string-init-alias/expected.txt b/test/575-checker-string-init-alias/expected.txt
new file mode 100644
index 0000000..6a5618e
--- /dev/null
+++ b/test/575-checker-string-init-alias/expected.txt
@@ -0,0 +1 @@
+JNI_OnLoad called
diff --git a/test/575-checker-string-init-alias/info.txt b/test/575-checker-string-init-alias/info.txt
new file mode 100644
index 0000000..a91ea64
--- /dev/null
+++ b/test/575-checker-string-init-alias/info.txt
@@ -0,0 +1,2 @@
+Test for the String.<init> change and deoptimization: make
+sure the compiler knows how to handle dex aliases.
diff --git a/test/575-checker-string-init-alias/smali/TestCase.smali b/test/575-checker-string-init-alias/smali/TestCase.smali
new file mode 100644
index 0000000..ff04b27
--- /dev/null
+++ b/test/575-checker-string-init-alias/smali/TestCase.smali
@@ -0,0 +1,72 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTestCase;
+
+.super Ljava/lang/Object;
+
+.field public static staticField:Ljava/lang/String;
+
+## CHECK-START: void TestCase.testNoAlias(int[], java.lang.String) register (after)
+## CHECK: <<Null:l\d+>> NullConstant
+## CHECK: Deoptimize env:[[<<Null>>,{{.*]]}}
+## CHECK: InvokeStaticOrDirect method_name:java.lang.String.<init>
+.method public static testNoAlias([ILjava/lang/String;)V
+ .registers 6
+ const v1, 0
+ const v2, 1
+ new-instance v0, Ljava/lang/String;
+
+ # Will deoptimize.
+ aget v3, p0, v1
+
+ # Check that we're being executed by the interpreter.
+ invoke-static {}, LMain;->assertIsInterpreted()V
+
+ invoke-direct {v0, p1}, Ljava/lang/String;-><init>(Ljava/lang/String;)V
+
+ sput-object v0, LTestCase;->staticField:Ljava/lang/String;
+
+ # Will throw AIOOBE.
+ aget v3, p0, v2
+
+ return-void
+.end method
+
+## CHECK-START: void TestCase.testAlias(int[], java.lang.String) register (after)
+## CHECK: <<New:l\d+>> NewInstance
+## CHECK: Deoptimize env:[[<<New>>,<<New>>,{{.*]]}}
+## CHECK: InvokeStaticOrDirect method_name:java.lang.String.<init>
+.method public static testAlias([ILjava/lang/String;)V
+ .registers 7
+ const v2, 0
+ const v3, 1
+ new-instance v0, Ljava/lang/String;
+ move-object v1, v0
+
+ # Will deoptimize.
+ aget v4, p0, v2
+
+ # Check that we're being executed by the interpreter.
+ invoke-static {}, LMain;->assertIsInterpreted()V
+
+ invoke-direct {v1, p1}, Ljava/lang/String;-><init>(Ljava/lang/String;)V
+
+ sput-object v1, LTestCase;->staticField:Ljava/lang/String;
+
+ # Will throw AIOOBE.
+ aget v4, p0, v3
+
+ return-void
+.end method
diff --git a/test/575-checker-string-init-alias/src/Main.java b/test/575-checker-string-init-alias/src/Main.java
new file mode 100644
index 0000000..1ab3207
--- /dev/null
+++ b/test/575-checker-string-init-alias/src/Main.java
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.lang.reflect.InvocationTargetException;
+
+public class Main {
+ // Workaround for b/18051191.
+ class Inner {}
+
+ public static native void assertIsInterpreted();
+
+ private static void assertEqual(String expected, String actual) {
+ if (!expected.equals(actual)) {
+ throw new Error("Assertion failed: " + expected + " != " + actual);
+ }
+ }
+
+ public static void main(String[] args) throws Throwable {
+ System.loadLibrary(args[0]);
+ Class<?> c = Class.forName("TestCase");
+ int[] array = new int[1];
+
+ {
+ Method m = c.getMethod("testNoAlias", int[].class, String.class);
+ try {
+ m.invoke(null, new Object[] { array , "foo" });
+ throw new Error("Expected AIOOBE");
+ } catch (InvocationTargetException e) {
+ if (!(e.getCause() instanceof ArrayIndexOutOfBoundsException)) {
+ throw new Error("Expected AIOOBE");
+ }
+ // Ignore
+ }
+ Field field = c.getField("staticField");
+ assertEqual("foo", (String)field.get(null));
+ }
+
+ {
+ Method m = c.getMethod("testAlias", int[].class, String.class);
+ try {
+ m.invoke(null, new Object[] { array, "bar" });
+ throw new Error("Expected AIOOBE");
+ } catch (InvocationTargetException e) {
+ if (!(e.getCause() instanceof ArrayIndexOutOfBoundsException)) {
+ throw new Error("Expected AIOOBE");
+ }
+ // Ignore
+ }
+ Field field = c.getField("staticField");
+ assertEqual("bar", (String)field.get(null));
+ }
+ }
+}
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index f29e51f..e4af9fa 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -20,9 +20,9 @@
fi
# Jar containing all the tests.
-test_jar=${OUT_DIR-out}/host/linux-x86/framework/apache-harmony-jdwp-tests-hostdex.jar
+test_jack=${OUT_DIR-out}/host/common/obj/JAVA_LIBRARIES/apache-harmony-jdwp-tests-hostdex_intermediates/classes.jack
-if [ ! -f $test_jar ]; then
+if [ ! -f $test_jack ]; then
echo "Before running, you must build jdwp tests and vogar:" \
"make apache-harmony-jdwp-tests-hostdex vogar vogar.jar"
exit 1
@@ -117,6 +117,9 @@
art_debugee="$art_debugee -verbose:jdwp"
fi
+# Use Jack with "1.8" configuration.
+export JACK_VERSION=`basename prebuilts/sdk/tools/jacks/*ALPHA* | sed 's/^jack-//' | sed 's/.jar$//'`
+
# Run the tests using vogar.
vogar $vm_command \
$vm_args \
@@ -129,7 +132,8 @@
--vm-arg -Djpda.settings.syncPort=34016 \
--vm-arg -Djpda.settings.transportAddress=127.0.0.1:55107 \
--vm-arg -Djpda.settings.debuggeeJavaPath="$art_debugee $image $debuggee_args" \
- --classpath $test_jar \
+ --classpath $test_jack \
+ --toolchain jack --language JN \
--vm-arg -Xcompiler-option --vm-arg --debuggable \
$test