Merge "ART: Public classes never require access checks"
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index b5fd1e0..9594dce 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -204,8 +204,6 @@
2,
true,
true,
- "",
- false,
timer_.get(),
-1,
/* dex_to_oat_map */ nullptr,
diff --git a/compiler/dex/quick/quick_cfi_test.cc b/compiler/dex/quick/quick_cfi_test.cc
index 12568a4..c5df134 100644
--- a/compiler/dex/quick/quick_cfi_test.cc
+++ b/compiler/dex/quick/quick_cfi_test.cc
@@ -69,6 +69,8 @@
false,
nullptr,
nullptr,
+ false,
+ "",
false);
VerificationResults verification_results(&compiler_options);
DexFileToMethodInlinerMap method_inliner_map;
@@ -88,8 +90,6 @@
0,
false,
false,
- "",
- false,
0,
-1,
nullptr,
diff --git a/compiler/dex/quick/x86/quick_assemble_x86_test.cc b/compiler/dex/quick/x86/quick_assemble_x86_test.cc
index b39fe4d..d63878d 100644
--- a/compiler/dex/quick/x86/quick_assemble_x86_test.cc
+++ b/compiler/dex/quick/x86/quick_assemble_x86_test.cc
@@ -52,6 +52,8 @@
false,
nullptr,
nullptr,
+ false,
+ "",
false));
verification_results_.reset(new VerificationResults(compiler_options_.get()));
method_inliner_map_.reset(new DexFileToMethodInlinerMap());
@@ -69,8 +71,6 @@
0,
false,
false,
- "",
- false,
0,
-1,
nullptr,
diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc
index f18fa67..2e2d1f9 100644
--- a/compiler/driver/compiled_method_storage_test.cc
+++ b/compiler/driver/compiled_method_storage_test.cc
@@ -41,8 +41,6 @@
1u,
false,
false,
- "",
- false,
nullptr,
-1,
nullptr,
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 56e5abe..d021525 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -345,7 +345,6 @@
std::unordered_set<std::string>* compiled_classes,
std::unordered_set<std::string>* compiled_methods,
size_t thread_count, bool dump_stats, bool dump_passes,
- const std::string& dump_cfg_file_name, bool dump_cfg_append,
CumulativeLogger* timer, int swap_fd,
const std::unordered_map<const DexFile*, const char*>* dex_to_oat_map,
const ProfileCompilationInfo* profile_compilation_info)
@@ -370,8 +369,6 @@
stats_(new AOTCompilationStats),
dump_stats_(dump_stats),
dump_passes_(dump_passes),
- dump_cfg_file_name_(dump_cfg_file_name),
- dump_cfg_append_(dump_cfg_append),
timings_logger_(timer),
compiler_context_(nullptr),
support_boot_image_fixup_(instruction_set != kMips && instruction_set != kMips64),
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 17b2f5e..6a2f7bf 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -95,7 +95,6 @@
std::unordered_set<std::string>* compiled_classes,
std::unordered_set<std::string>* compiled_methods,
size_t thread_count, bool dump_stats, bool dump_passes,
- const std::string& dump_cfg_file_name, bool dump_cfg_append,
CumulativeLogger* timer, int swap_fd,
const std::unordered_map<const DexFile*, const char*>* dex_to_oat_map,
const ProfileCompilationInfo* profile_compilation_info);
@@ -423,14 +422,6 @@
return dump_passes_;
}
- const std::string& GetDumpCfgFileName() const {
- return dump_cfg_file_name_;
- }
-
- bool GetDumpCfgAppend() const {
- return dump_cfg_append_;
- }
-
CumulativeLogger* GetTimingsLogger() const {
return timings_logger_;
}
@@ -668,8 +659,6 @@
bool dump_stats_;
const bool dump_passes_;
- const std::string dump_cfg_file_name_;
- const bool dump_cfg_append_;
CumulativeLogger* const timings_logger_;
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 385f34a..2644528 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -44,7 +44,9 @@
verbose_methods_(nullptr),
pass_manager_options_(),
abort_on_hard_verifier_failure_(false),
- init_failure_output_(nullptr) {
+ init_failure_output_(nullptr),
+ dump_cfg_file_name_(""),
+ dump_cfg_append_(false) {
}
CompilerOptions::~CompilerOptions() {
@@ -71,7 +73,9 @@
bool compile_pic,
const std::vector<std::string>* verbose_methods,
std::ostream* init_failure_output,
- bool abort_on_hard_verifier_failure
+ bool abort_on_hard_verifier_failure,
+ const std::string& dump_cfg_file_name,
+ bool dump_cfg_append
) : // NOLINT(whitespace/parens)
compiler_filter_(compiler_filter),
huge_method_threshold_(huge_method_threshold),
@@ -94,7 +98,9 @@
verbose_methods_(verbose_methods),
pass_manager_options_(),
abort_on_hard_verifier_failure_(abort_on_hard_verifier_failure),
- init_failure_output_(init_failure_output) {
+ init_failure_output_(init_failure_output),
+ dump_cfg_file_name_(dump_cfg_file_name),
+ dump_cfg_append_(dump_cfg_append) {
}
void CompilerOptions::ParseHugeMethodMax(const StringPiece& option, UsageFn Usage) {
@@ -238,6 +244,10 @@
ParsePassOptions(option, Usage);
} else if (option.starts_with("--dump-init-failures=")) {
ParseDumpInitFailures(option, Usage);
+ } else if (option.starts_with("--dump-cfg=")) {
+ dump_cfg_file_name_ = option.substr(strlen("--dump-cfg=")).data();
+ } else if (option.starts_with("--dump-cfg-append")) {
+ dump_cfg_append_ = true;
} else {
// Option not recognized.
return false;
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index f14bdc4..d47fc2a 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -83,7 +83,9 @@
bool compile_pic,
const std::vector<std::string>* verbose_methods,
std::ostream* init_failure_output,
- bool abort_on_hard_verifier_failure);
+ bool abort_on_hard_verifier_failure,
+ const std::string& dump_cfg_file_name,
+ bool dump_cfg_append);
CompilerFilter GetCompilerFilter() const {
return compiler_filter_;
@@ -224,6 +226,14 @@
bool ParseCompilerOption(const StringPiece& option, UsageFn Usage);
+ const std::string& GetDumpCfgFileName() const {
+ return dump_cfg_file_name_;
+ }
+
+ bool GetDumpCfgAppend() const {
+ return dump_cfg_append_;
+ }
+
private:
void ParseDumpInitFailures(const StringPiece& option, UsageFn Usage);
void ParsePassOptions(const StringPiece& option, UsageFn Usage);
@@ -273,6 +283,9 @@
// Log initialization of initialization failures to this stream if not null.
std::unique_ptr<std::ostream> init_failure_output_;
+ std::string dump_cfg_file_name_;
+ bool dump_cfg_append_;
+
friend class Dex2Oat;
DISALLOW_COPY_AND_ASSIGN(CompilerOptions);
diff --git a/compiler/elf_writer_debug.cc b/compiler/elf_writer_debug.cc
index dd50f69..3e64762 100644
--- a/compiler/elf_writer_debug.cc
+++ b/compiler/elf_writer_debug.cc
@@ -653,6 +653,21 @@
info_.EndTag(); // DW_TAG_member.
}
+ if (type->IsStringClass()) {
+ // Emit debug info about an artifical class member for java.lang.String which represents
+ // the first element of the data stored in a string instance. Consumers of the debug
+ // info will be able to read the content of java.lang.String based on the count (real
+ // field) and based on the location of this data member.
+ info_.StartTag(DW_TAG_member);
+ WriteName("value");
+ // We don't support fields with C like array types so we just say its type is java char.
+ WriteLazyType("C"); // char.
+ info_.WriteUdata(DW_AT_data_member_location,
+ mirror::String::ValueOffset().Uint32Value());
+ info_.WriteSdata(DW_AT_accessibility, DW_ACCESS_private);
+ info_.EndTag(); // DW_TAG_member.
+ }
+
EndClassTag(desc);
}
}
@@ -883,6 +898,8 @@
info_.EndTag();
} else {
// Primitive types.
+ DCHECK_EQ(desc.size(), 1u);
+
const char* name;
uint32_t encoding;
uint32_t byte_size;
@@ -1226,26 +1243,8 @@
std::vector<uintptr_t> debug_line_patches;
};
-// Get all types loaded by the runtime.
-static std::vector<mirror::Class*> GetLoadedRuntimeTypes() SHARED_REQUIRES(Locks::mutator_lock_) {
- std::vector<mirror::Class*> result;
- class CollectClasses : public ClassVisitor {
- public:
- virtual bool Visit(mirror::Class* klass) {
- classes_->push_back(klass);
- return true;
- }
- std::vector<mirror::Class*>* classes_;
- };
- CollectClasses visitor;
- visitor.classes_ = &result;
- Runtime::Current()->GetClassLinker()->VisitClasses(&visitor);
- return result;
-}
-
template<typename ElfTypes>
static void WriteDebugSections(ElfBuilder<ElfTypes>* builder,
- bool write_loaded_runtime_types,
const ArrayRef<const MethodDebugInfo>& method_infos) {
// Group the methods into compilation units based on source file.
std::vector<CompilationUnit> compilation_units;
@@ -1274,19 +1273,12 @@
}
// Write .debug_info section.
- if (!compilation_units.empty() || write_loaded_runtime_types) {
+ if (!compilation_units.empty()) {
DebugInfoWriter<ElfTypes> info_writer(builder);
info_writer.Start();
for (const auto& compilation_unit : compilation_units) {
info_writer.WriteCompilationUnit(compilation_unit);
}
- if (write_loaded_runtime_types) {
- Thread* self = Thread::Current();
- // The lock prevents the classes being moved by the GC.
- ReaderMutexLock mu(self, *Locks::mutator_lock_);
- std::vector<mirror::Class*> types = GetLoadedRuntimeTypes();
- info_writer.WriteTypes(ArrayRef<mirror::Class*>(types.data(), types.size()));
- }
info_writer.End();
}
}
@@ -1353,7 +1345,6 @@
template <typename ElfTypes>
void WriteDebugInfo(ElfBuilder<ElfTypes>* builder,
- bool write_loaded_runtime_types,
const ArrayRef<const MethodDebugInfo>& method_infos,
CFIFormat cfi_format) {
// Add methods to .symtab.
@@ -1361,7 +1352,7 @@
// Generate CFI (stack unwinding information).
WriteCFISection(builder, method_infos, cfi_format);
// Write DWARF .debug_* sections.
- WriteDebugSections(builder, write_loaded_runtime_types, method_infos);
+ WriteDebugSections(builder, method_infos);
}
template <typename ElfTypes>
@@ -1374,7 +1365,6 @@
std::unique_ptr<ElfBuilder<ElfTypes>> builder(new ElfBuilder<ElfTypes>(isa, &out));
builder->Start();
WriteDebugInfo(builder.get(),
- false,
ArrayRef<const MethodDebugInfo>(&method_info, 1),
DW_DEBUG_FRAME_FORMAT);
builder->End();
@@ -1396,8 +1386,8 @@
}
template <typename ElfTypes>
-static ArrayRef<const uint8_t> WriteDebugElfFileForClassInternal(const InstructionSet isa,
- mirror::Class* type)
+static ArrayRef<const uint8_t> WriteDebugElfFileForClassesInternal(
+ const InstructionSet isa, const ArrayRef<mirror::Class*>& types)
SHARED_REQUIRES(Locks::mutator_lock_) {
std::vector<uint8_t> buffer;
buffer.reserve(KB);
@@ -1407,7 +1397,7 @@
DebugInfoWriter<ElfTypes> info_writer(builder.get());
info_writer.Start();
- info_writer.WriteTypes(ArrayRef<mirror::Class*>(&type, 1));
+ info_writer.WriteTypes(types);
info_writer.End();
builder->End();
@@ -1419,23 +1409,22 @@
return ArrayRef<const uint8_t>(result, buffer.size());
}
-ArrayRef<const uint8_t> WriteDebugElfFileForClass(const InstructionSet isa, mirror::Class* type) {
+ArrayRef<const uint8_t> WriteDebugElfFileForClasses(const InstructionSet isa,
+ const ArrayRef<mirror::Class*>& types) {
if (Is64BitInstructionSet(isa)) {
- return WriteDebugElfFileForClassInternal<ElfTypes64>(isa, type);
+ return WriteDebugElfFileForClassesInternal<ElfTypes64>(isa, types);
} else {
- return WriteDebugElfFileForClassInternal<ElfTypes32>(isa, type);
+ return WriteDebugElfFileForClassesInternal<ElfTypes32>(isa, types);
}
}
// Explicit instantiations
template void WriteDebugInfo<ElfTypes32>(
ElfBuilder<ElfTypes32>* builder,
- bool write_loaded_runtime_types,
const ArrayRef<const MethodDebugInfo>& method_infos,
CFIFormat cfi_format);
template void WriteDebugInfo<ElfTypes64>(
ElfBuilder<ElfTypes64>* builder,
- bool write_loaded_runtime_types,
const ArrayRef<const MethodDebugInfo>& method_infos,
CFIFormat cfi_format);
diff --git a/compiler/elf_writer_debug.h b/compiler/elf_writer_debug.h
index 91da00f..e4bc856 100644
--- a/compiler/elf_writer_debug.h
+++ b/compiler/elf_writer_debug.h
@@ -32,13 +32,13 @@
template <typename ElfTypes>
void WriteDebugInfo(ElfBuilder<ElfTypes>* builder,
- bool write_loaded_runtime_types,
const ArrayRef<const MethodDebugInfo>& method_infos,
CFIFormat cfi_format);
ArrayRef<const uint8_t> WriteDebugElfFileForMethod(const dwarf::MethodDebugInfo& method_info);
-ArrayRef<const uint8_t> WriteDebugElfFileForClass(const InstructionSet isa, mirror::Class* type)
+ArrayRef<const uint8_t> WriteDebugElfFileForClasses(const InstructionSet isa,
+ const ArrayRef<mirror::Class*>& types)
SHARED_REQUIRES(Locks::mutator_lock_);
} // namespace dwarf
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index a67f3bd..7b1bdd7 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -152,7 +152,7 @@
void ElfWriterQuick<ElfTypes>::WriteDebugInfo(
const ArrayRef<const dwarf::MethodDebugInfo>& method_infos) {
if (compiler_options_->GetGenerateDebugInfo()) {
- dwarf::WriteDebugInfo(builder_.get(), /* write_types */ true, method_infos, kCFIFormat);
+ dwarf::WriteDebugInfo(builder_.get(), method_infos, kCFIFormat);
}
}
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index bc51ed6..3a3275a 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -28,6 +28,8 @@
#include "dex/quick_compiler_callbacks.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
+#include "elf_writer_debug.h"
+#include "jit/debugger_interface.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "oat_file-inl.h"
@@ -65,6 +67,17 @@
return jit_compiler->CompileMethod(self, method);
}
+extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t count)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
+ DCHECK(jit_compiler != nullptr);
+ if (jit_compiler->GetCompilerOptions()->GetGenerateDebugInfo()) {
+ const ArrayRef<mirror::Class*> types_array(types, count);
+ ArrayRef<const uint8_t> elf_file = dwarf::WriteDebugElfFileForClasses(kRuntimeISA, types_array);
+ CreateJITCodeEntry(std::unique_ptr<const uint8_t[]>(elf_file.data()), elf_file.size());
+ }
+}
+
// Callers of this method assume it has NO_RETURN.
NO_RETURN static void Usage(const char* fmt, ...) {
va_list ap;
@@ -97,7 +110,9 @@
/* pic */ true, // TODO: Support non-PIC in optimizing.
/* verbose_methods */ nullptr,
/* init_failure_output */ nullptr,
- /* abort_on_hard_verifier_failure */ false));
+ /* abort_on_hard_verifier_failure */ false,
+ /* dump_cfg_file_name */ "",
+ /* dump_cfg_append */ false));
for (const std::string& argument : Runtime::Current()->GetCompilerOptions()) {
compiler_options_->ParseCompilerOption(argument, Usage);
}
@@ -153,8 +168,6 @@
/* thread_count */ 1,
/* dump_stats */ false,
/* dump_passes */ false,
- /* dump_cfg_file_name */ "",
- /* dump_cfg_append */ false,
cumulative_logger_.get(),
/* swap_fd */ -1,
/* dex to oat map */ nullptr,
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index b10cc35..bf8e786 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -47,7 +47,7 @@
driver_(&compiler_options_, &verification_results_, &inliner_map_,
Compiler::kQuick, instruction_set, nullptr,
false, nullptr, nullptr, nullptr, 1u,
- false, false, "", false, nullptr, -1, nullptr, nullptr),
+ false, false, nullptr, -1, nullptr, nullptr),
error_msg_(),
instruction_set_(instruction_set),
features_(InstructionSetFeatures::FromVariant(instruction_set, variant, &error_msg_)),
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 9f7ffa5..7a2b74e 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -117,8 +117,6 @@
2,
true,
true,
- "",
- false,
timer_.get(),
-1,
nullptr,
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 26bf1cb..1d604e7 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -56,7 +56,6 @@
return_type_(Primitive::GetType(dex_compilation_unit_->GetShorty()[0])),
code_start_(nullptr),
latest_result_(nullptr),
- can_use_baseline_for_string_init_(true),
compilation_stats_(compiler_stats),
interpreter_metadata_(interpreter_metadata),
dex_cache_(dex_cache) {}
@@ -77,7 +76,6 @@
return_type_(return_type),
code_start_(nullptr),
latest_result_(nullptr),
- can_use_baseline_for_string_init_(true),
compilation_stats_(nullptr),
interpreter_metadata_(nullptr),
null_dex_cache_(),
@@ -85,10 +83,6 @@
bool BuildGraph(const DexFile::CodeItem& code);
- bool CanUseBaselineForStringInit() const {
- return can_use_baseline_for_string_init_;
- }
-
static constexpr const char* kBuilderPassName = "builder";
// The number of entries in a packed switch before we use a jump table or specified
@@ -363,11 +357,6 @@
// used by move-result instructions.
HInstruction* latest_result_;
- // We need to know whether we have built a graph that has calls to StringFactory
- // and hasn't gone through the verifier. If the following flag is `false`, then
- // we cannot compile with baseline.
- bool can_use_baseline_for_string_init_;
-
OptimizingCompilerStats* compilation_stats_;
const uint8_t* interpreter_metadata_;
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index ea0b9ec..a3bbfdb 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -142,23 +142,6 @@
return pointer_size * index;
}
-void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) {
- Initialize();
- if (!is_leaf) {
- MarkNotLeaf();
- }
- const bool is_64_bit = Is64BitInstructionSet(GetInstructionSet());
- InitializeCodeGeneration(GetGraph()->GetNumberOfLocalVRegs()
- + GetGraph()->GetTemporariesVRegSlots()
- + 1 /* filler */,
- 0, /* the baseline compiler does not have live registers at slow path */
- 0, /* the baseline compiler does not have live registers at slow path */
- GetGraph()->GetMaximumNumberOfOutVRegs()
- + (is_64_bit ? 2 : 1) /* current method */,
- GetGraph()->GetBlocks());
- CompileInternal(allocator, /* is_baseline */ true);
-}
-
bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const {
DCHECK_EQ((*block_order_)[current_block_index_], current);
return GetNextBlockToEmit() == FirstNonEmptyBlock(next);
@@ -220,8 +203,12 @@
current_slow_path_ = nullptr;
}
-void CodeGenerator::CompileInternal(CodeAllocator* allocator, bool is_baseline) {
- is_baseline_ = is_baseline;
+void CodeGenerator::Compile(CodeAllocator* allocator) {
+ // The register allocator already called `InitializeCodeGeneration`,
+ // where the frame size has been computed.
+ DCHECK(block_order_ != nullptr);
+ Initialize();
+
HGraphVisitor* instruction_visitor = GetInstructionVisitor();
DCHECK_EQ(current_block_index_, 0u);
@@ -242,9 +229,6 @@
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
DisassemblyScope disassembly_scope(current, *this);
- if (is_baseline) {
- InitLocationsBaseline(current);
- }
DCHECK(CheckTypeConsistency(current));
current->Accept(instruction_visitor);
}
@@ -254,7 +238,7 @@
// Emit catch stack maps at the end of the stack map stream as expected by the
// runtime exception handler.
- if (!is_baseline && graph_->HasTryCatch()) {
+ if (graph_->HasTryCatch()) {
RecordCatchBlockInfo();
}
@@ -262,14 +246,6 @@
Finalize(allocator);
}
-void CodeGenerator::CompileOptimized(CodeAllocator* allocator) {
- // The register allocator already called `InitializeCodeGeneration`,
- // where the frame size has been computed.
- DCHECK(block_order_ != nullptr);
- Initialize();
- CompileInternal(allocator, /* is_baseline */ false);
-}
-
void CodeGenerator::Finalize(CodeAllocator* allocator) {
size_t code_size = GetAssembler()->CodeSize();
uint8_t* buffer = allocator->Allocate(code_size);
@@ -282,29 +258,6 @@
// No linker patches by default.
}
-size_t CodeGenerator::FindFreeEntry(bool* array, size_t length) {
- for (size_t i = 0; i < length; ++i) {
- if (!array[i]) {
- array[i] = true;
- return i;
- }
- }
- LOG(FATAL) << "Could not find a register in baseline register allocator";
- UNREACHABLE();
-}
-
-size_t CodeGenerator::FindTwoFreeConsecutiveAlignedEntries(bool* array, size_t length) {
- for (size_t i = 0; i < length - 1; i += 2) {
- if (!array[i] && !array[i + 1]) {
- array[i] = true;
- array[i + 1] = true;
- return i;
- }
- }
- LOG(FATAL) << "Could not find a register in baseline register allocator";
- UNREACHABLE();
-}
-
void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots,
size_t maximum_number_of_live_core_registers,
size_t maximum_number_of_live_fpu_registers,
@@ -592,123 +545,6 @@
}
}
-void CodeGenerator::AllocateRegistersLocally(HInstruction* instruction) const {
- LocationSummary* locations = instruction->GetLocations();
- if (locations == nullptr) return;
-
- for (size_t i = 0, e = GetNumberOfCoreRegisters(); i < e; ++i) {
- blocked_core_registers_[i] = false;
- }
-
- for (size_t i = 0, e = GetNumberOfFloatingPointRegisters(); i < e; ++i) {
- blocked_fpu_registers_[i] = false;
- }
-
- for (size_t i = 0, e = number_of_register_pairs_; i < e; ++i) {
- blocked_register_pairs_[i] = false;
- }
-
- // Mark all fixed input, temp and output registers as used.
- for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
- BlockIfInRegister(locations->InAt(i));
- }
-
- for (size_t i = 0, e = locations->GetTempCount(); i < e; ++i) {
- Location loc = locations->GetTemp(i);
- BlockIfInRegister(loc);
- }
- Location result_location = locations->Out();
- if (locations->OutputCanOverlapWithInputs()) {
- BlockIfInRegister(result_location, /* is_out */ true);
- }
-
- SetupBlockedRegisters(/* is_baseline */ true);
-
- // Allocate all unallocated input locations.
- for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
- Location loc = locations->InAt(i);
- HInstruction* input = instruction->InputAt(i);
- if (loc.IsUnallocated()) {
- if ((loc.GetPolicy() == Location::kRequiresRegister)
- || (loc.GetPolicy() == Location::kRequiresFpuRegister)) {
- loc = AllocateFreeRegister(input->GetType());
- } else {
- DCHECK_EQ(loc.GetPolicy(), Location::kAny);
- HLoadLocal* load = input->AsLoadLocal();
- if (load != nullptr) {
- loc = GetStackLocation(load);
- } else {
- loc = AllocateFreeRegister(input->GetType());
- }
- }
- locations->SetInAt(i, loc);
- }
- }
-
- // Allocate all unallocated temp locations.
- for (size_t i = 0, e = locations->GetTempCount(); i < e; ++i) {
- Location loc = locations->GetTemp(i);
- if (loc.IsUnallocated()) {
- switch (loc.GetPolicy()) {
- case Location::kRequiresRegister:
- // Allocate a core register (large enough to fit a 32-bit integer).
- loc = AllocateFreeRegister(Primitive::kPrimInt);
- break;
-
- case Location::kRequiresFpuRegister:
- // Allocate a core register (large enough to fit a 64-bit double).
- loc = AllocateFreeRegister(Primitive::kPrimDouble);
- break;
-
- default:
- LOG(FATAL) << "Unexpected policy for temporary location "
- << loc.GetPolicy();
- }
- locations->SetTempAt(i, loc);
- }
- }
- if (result_location.IsUnallocated()) {
- switch (result_location.GetPolicy()) {
- case Location::kAny:
- case Location::kRequiresRegister:
- case Location::kRequiresFpuRegister:
- result_location = AllocateFreeRegister(instruction->GetType());
- break;
- case Location::kSameAsFirstInput:
- result_location = locations->InAt(0);
- break;
- }
- locations->UpdateOut(result_location);
- }
-}
-
-void CodeGenerator::InitLocationsBaseline(HInstruction* instruction) {
- AllocateLocations(instruction);
- if (instruction->GetLocations() == nullptr) {
- if (instruction->IsTemporary()) {
- HInstruction* previous = instruction->GetPrevious();
- Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
- Move(previous, temp_location, instruction);
- }
- return;
- }
- AllocateRegistersLocally(instruction);
- for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
- Location location = instruction->GetLocations()->InAt(i);
- HInstruction* input = instruction->InputAt(i);
- if (location.IsValid()) {
- // Move the input to the desired location.
- if (input->GetNext()->IsTemporary()) {
- // If the input was stored in a temporary, use that temporary to
- // perform the move.
- Move(input->GetNext(), location, instruction);
- } else {
- Move(input, location, instruction);
- }
- }
- }
-}
-
void CodeGenerator::AllocateLocations(HInstruction* instruction) {
instruction->Accept(GetLocationBuilder());
DCHECK(CheckTypeConsistency(instruction));
@@ -789,132 +625,6 @@
}
}
-void CodeGenerator::BuildNativeGCMap(
- ArenaVector<uint8_t>* data, const CompilerDriver& compiler_driver) const {
- const std::vector<uint8_t>& gc_map_raw =
- compiler_driver.GetVerifiedMethod(&GetGraph()->GetDexFile(), GetGraph()->GetMethodIdx())
- ->GetDexGcMap();
- verifier::DexPcToReferenceMap dex_gc_map(&(gc_map_raw)[0]);
-
- uint32_t max_native_offset = stack_map_stream_.ComputeMaxNativePcOffset();
-
- size_t num_stack_maps = stack_map_stream_.GetNumberOfStackMaps();
- GcMapBuilder builder(data, num_stack_maps, max_native_offset, dex_gc_map.RegWidth());
- for (size_t i = 0; i != num_stack_maps; ++i) {
- const StackMapStream::StackMapEntry& stack_map_entry = stack_map_stream_.GetStackMap(i);
- uint32_t native_offset = stack_map_entry.native_pc_offset;
- uint32_t dex_pc = stack_map_entry.dex_pc;
- const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false);
- CHECK(references != nullptr) << "Missing ref for dex pc 0x" << std::hex << dex_pc;
- builder.AddEntry(native_offset, references);
- }
-}
-
-void CodeGenerator::BuildMappingTable(ArenaVector<uint8_t>* data) const {
- uint32_t pc2dex_data_size = 0u;
- uint32_t pc2dex_entries = stack_map_stream_.GetNumberOfStackMaps();
- uint32_t pc2dex_offset = 0u;
- int32_t pc2dex_dalvik_offset = 0;
- uint32_t dex2pc_data_size = 0u;
- uint32_t dex2pc_entries = 0u;
- uint32_t dex2pc_offset = 0u;
- int32_t dex2pc_dalvik_offset = 0;
-
- for (size_t i = 0; i < pc2dex_entries; i++) {
- const StackMapStream::StackMapEntry& stack_map_entry = stack_map_stream_.GetStackMap(i);
- pc2dex_data_size += UnsignedLeb128Size(stack_map_entry.native_pc_offset - pc2dex_offset);
- pc2dex_data_size += SignedLeb128Size(stack_map_entry.dex_pc - pc2dex_dalvik_offset);
- pc2dex_offset = stack_map_entry.native_pc_offset;
- pc2dex_dalvik_offset = stack_map_entry.dex_pc;
- }
-
- // Walk over the blocks and find which ones correspond to catch block entries.
- for (HBasicBlock* block : graph_->GetBlocks()) {
- if (block->IsCatchBlock()) {
- intptr_t native_pc = GetAddressOf(block);
- ++dex2pc_entries;
- dex2pc_data_size += UnsignedLeb128Size(native_pc - dex2pc_offset);
- dex2pc_data_size += SignedLeb128Size(block->GetDexPc() - dex2pc_dalvik_offset);
- dex2pc_offset = native_pc;
- dex2pc_dalvik_offset = block->GetDexPc();
- }
- }
-
- uint32_t total_entries = pc2dex_entries + dex2pc_entries;
- uint32_t hdr_data_size = UnsignedLeb128Size(total_entries) + UnsignedLeb128Size(pc2dex_entries);
- uint32_t data_size = hdr_data_size + pc2dex_data_size + dex2pc_data_size;
- data->resize(data_size);
-
- uint8_t* data_ptr = &(*data)[0];
- uint8_t* write_pos = data_ptr;
-
- write_pos = EncodeUnsignedLeb128(write_pos, total_entries);
- write_pos = EncodeUnsignedLeb128(write_pos, pc2dex_entries);
- DCHECK_EQ(static_cast<size_t>(write_pos - data_ptr), hdr_data_size);
- uint8_t* write_pos2 = write_pos + pc2dex_data_size;
-
- pc2dex_offset = 0u;
- pc2dex_dalvik_offset = 0u;
- dex2pc_offset = 0u;
- dex2pc_dalvik_offset = 0u;
-
- for (size_t i = 0; i < pc2dex_entries; i++) {
- const StackMapStream::StackMapEntry& stack_map_entry = stack_map_stream_.GetStackMap(i);
- DCHECK(pc2dex_offset <= stack_map_entry.native_pc_offset);
- write_pos = EncodeUnsignedLeb128(write_pos, stack_map_entry.native_pc_offset - pc2dex_offset);
- write_pos = EncodeSignedLeb128(write_pos, stack_map_entry.dex_pc - pc2dex_dalvik_offset);
- pc2dex_offset = stack_map_entry.native_pc_offset;
- pc2dex_dalvik_offset = stack_map_entry.dex_pc;
- }
-
- for (HBasicBlock* block : graph_->GetBlocks()) {
- if (block->IsCatchBlock()) {
- intptr_t native_pc = GetAddressOf(block);
- write_pos2 = EncodeUnsignedLeb128(write_pos2, native_pc - dex2pc_offset);
- write_pos2 = EncodeSignedLeb128(write_pos2, block->GetDexPc() - dex2pc_dalvik_offset);
- dex2pc_offset = native_pc;
- dex2pc_dalvik_offset = block->GetDexPc();
- }
- }
-
-
- DCHECK_EQ(static_cast<size_t>(write_pos - data_ptr), hdr_data_size + pc2dex_data_size);
- DCHECK_EQ(static_cast<size_t>(write_pos2 - data_ptr), data_size);
-
- if (kIsDebugBuild) {
- // Verify the encoded table holds the expected data.
- MappingTable table(data_ptr);
- CHECK_EQ(table.TotalSize(), total_entries);
- CHECK_EQ(table.PcToDexSize(), pc2dex_entries);
- auto it = table.PcToDexBegin();
- auto it2 = table.DexToPcBegin();
- for (size_t i = 0; i < pc2dex_entries; i++) {
- const StackMapStream::StackMapEntry& stack_map_entry = stack_map_stream_.GetStackMap(i);
- CHECK_EQ(stack_map_entry.native_pc_offset, it.NativePcOffset());
- CHECK_EQ(stack_map_entry.dex_pc, it.DexPc());
- ++it;
- }
- for (HBasicBlock* block : graph_->GetBlocks()) {
- if (block->IsCatchBlock()) {
- CHECK_EQ(GetAddressOf(block), it2.NativePcOffset());
- CHECK_EQ(block->GetDexPc(), it2.DexPc());
- ++it2;
- }
- }
- CHECK(it == table.PcToDexEnd());
- CHECK(it2 == table.DexToPcEnd());
- }
-}
-
-void CodeGenerator::BuildVMapTable(ArenaVector<uint8_t>* data) const {
- Leb128Encoder<ArenaVector<uint8_t>> vmap_encoder(data);
- // We currently don't use callee-saved registers.
- size_t size = 0 + 1 /* marker */ + 0;
- vmap_encoder.Reserve(size + 1u); // All values are likely to be one byte in ULEB128 (<128).
- vmap_encoder.PushBackUnsigned(size);
- vmap_encoder.PushBackUnsigned(VmapTable::kAdjustedFpMarker);
-}
-
size_t CodeGenerator::ComputeStackMapsSize() {
return stack_map_stream_.PrepareForFillIn();
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 5958cd8..4f8f146 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -158,10 +158,8 @@
class CodeGenerator {
public:
- // Compiles the graph to executable instructions. Returns whether the compilation
- // succeeded.
- void CompileBaseline(CodeAllocator* allocator, bool is_leaf = false);
- void CompileOptimized(CodeAllocator* allocator);
+ // Compiles the graph to executable instructions.
+ void Compile(CodeAllocator* allocator);
static CodeGenerator* Create(HGraph* graph,
InstructionSet instruction_set,
const InstructionSetFeatures& isa_features,
@@ -214,7 +212,7 @@
size_t GetNumberOfCoreRegisters() const { return number_of_core_registers_; }
size_t GetNumberOfFloatingPointRegisters() const { return number_of_fpu_registers_; }
- virtual void SetupBlockedRegisters(bool is_baseline) const = 0;
+ virtual void SetupBlockedRegisters() const = 0;
virtual void ComputeSpillMask() {
core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_;
@@ -290,17 +288,9 @@
slow_paths_.push_back(slow_path);
}
- void BuildMappingTable(ArenaVector<uint8_t>* vector) const;
- void BuildVMapTable(ArenaVector<uint8_t>* vector) const;
- void BuildNativeGCMap(
- ArenaVector<uint8_t>* vector, const CompilerDriver& compiler_driver) const;
void BuildStackMaps(MemoryRegion region);
size_t ComputeStackMapsSize();
- bool IsBaseline() const {
- return is_baseline_;
- }
-
bool IsLeafMethod() const {
return is_leaf_;
}
@@ -489,7 +479,6 @@
fpu_callee_save_mask_(fpu_callee_save_mask),
stack_map_stream_(graph->GetArena()),
block_order_(nullptr),
- is_baseline_(false),
disasm_info_(nullptr),
stats_(stats),
graph_(graph),
@@ -502,15 +491,6 @@
slow_paths_.reserve(8);
}
- // Register allocation logic.
- void AllocateRegistersLocally(HInstruction* instruction) const;
-
- // Backend specific implementation for allocating a register.
- virtual Location AllocateFreeRegister(Primitive::Type type) const = 0;
-
- static size_t FindFreeEntry(bool* array, size_t length);
- static size_t FindTwoFreeConsecutiveAlignedEntries(bool* array, size_t length);
-
virtual Location GetStackLocation(HLoadLocal* load) const = 0;
virtual HGraphVisitor* GetLocationBuilder() = 0;
@@ -593,16 +573,11 @@
// The order to use for code generation.
const ArenaVector<HBasicBlock*>* block_order_;
- // Whether we are using baseline.
- bool is_baseline_;
-
DisassemblyInformation* disasm_info_;
private:
- void InitLocationsBaseline(HInstruction* instruction);
size_t GetStackOffsetOfSavedRegister(size_t index);
void GenerateSlowPaths();
- void CompileInternal(CodeAllocator* allocator, bool is_baseline);
void BlockIfInRegister(Location location, bool is_out = false) const;
void EmitEnvironment(HEnvironment* environment, SlowPathCode* slow_path);
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index a11ceb9..2bf8404 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -47,9 +47,7 @@
static constexpr int kCurrentMethodStackOffset = 0;
static constexpr Register kMethodRegisterArgument = R0;
-// We unconditionally allocate R5 to ensure we can do long operations
-// with baseline.
-static constexpr Register kCoreSavedRegisterForBaseline = R5;
+static constexpr Register kCoreAlwaysSpillRegister = R5;
static constexpr Register kCoreCalleeSaves[] =
{ R5, R6, R7, R8, R10, R11, LR };
static constexpr SRegister kFpuCalleeSaves[] =
@@ -728,6 +726,24 @@
UNREACHABLE();
}
+inline Condition ARMFPCondition(IfCondition cond, bool gt_bias) {
+ // The ARM condition codes can express all the necessary branches, see the
+ // "Meaning (floating-point)" column in the table A8-1 of the ARMv7 reference manual.
+ // There is no dex instruction or HIR that would need the missing conditions
+ // "equal or unordered" or "not equal".
+ switch (cond) {
+ case kCondEQ: return EQ;
+ case kCondNE: return NE /* unordered */;
+ case kCondLT: return gt_bias ? CC : LT /* unordered */;
+ case kCondLE: return gt_bias ? LS : LE /* unordered */;
+ case kCondGT: return gt_bias ? HI /* unordered */ : GT;
+ case kCondGE: return gt_bias ? CS /* unordered */ : GE;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
+ }
+}
+
void CodeGeneratorARM::DumpCoreRegister(std::ostream& stream, int reg) const {
stream << Register(reg);
}
@@ -815,58 +831,7 @@
CodeGenerator::Finalize(allocator);
}
-Location CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type) const {
- switch (type) {
- case Primitive::kPrimLong: {
- size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs);
- ArmManagedRegister pair =
- ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg));
- DCHECK(!blocked_core_registers_[pair.AsRegisterPairLow()]);
- DCHECK(!blocked_core_registers_[pair.AsRegisterPairHigh()]);
-
- blocked_core_registers_[pair.AsRegisterPairLow()] = true;
- blocked_core_registers_[pair.AsRegisterPairHigh()] = true;
- UpdateBlockedPairRegisters();
- return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
- }
-
- case Primitive::kPrimByte:
- case Primitive::kPrimBoolean:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- case Primitive::kPrimInt:
- case Primitive::kPrimNot: {
- int reg = FindFreeEntry(blocked_core_registers_, kNumberOfCoreRegisters);
- // Block all register pairs that contain `reg`.
- for (int i = 0; i < kNumberOfRegisterPairs; i++) {
- ArmManagedRegister current =
- ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
- if (current.AsRegisterPairLow() == reg || current.AsRegisterPairHigh() == reg) {
- blocked_register_pairs_[i] = true;
- }
- }
- return Location::RegisterLocation(reg);
- }
-
- case Primitive::kPrimFloat: {
- int reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfSRegisters);
- return Location::FpuRegisterLocation(reg);
- }
-
- case Primitive::kPrimDouble: {
- int reg = FindTwoFreeConsecutiveAlignedEntries(blocked_fpu_registers_, kNumberOfSRegisters);
- DCHECK_EQ(reg % 2, 0);
- return Location::FpuRegisterPairLocation(reg, reg + 1);
- }
-
- case Primitive::kPrimVoid:
- LOG(FATAL) << "Unreachable type " << type;
- }
-
- return Location::NoLocation();
-}
-
-void CodeGeneratorARM::SetupBlockedRegisters(bool is_baseline) const {
+void CodeGeneratorARM::SetupBlockedRegisters() const {
// Don't allocate the dalvik style register pair passing.
blocked_register_pairs_[R1_R2] = true;
@@ -881,15 +846,7 @@
// Reserve temp register.
blocked_core_registers_[IP] = true;
- if (is_baseline) {
- for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
- blocked_core_registers_[kCoreCalleeSaves[i]] = true;
- }
-
- blocked_core_registers_[kCoreSavedRegisterForBaseline] = false;
- }
-
- if (is_baseline || GetGraph()->IsDebuggable()) {
+ if (GetGraph()->IsDebuggable()) {
// Stubs do not save callee-save floating point registers. If the graph
// is debuggable, we need to deal with these registers differently. For
// now, just block them.
@@ -919,11 +876,10 @@
void CodeGeneratorARM::ComputeSpillMask() {
core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_;
- // Save one extra register for baseline. Note that on thumb2, there is no easy
- // instruction to restore just the PC, so this actually helps both baseline
- // and non-baseline to save and restore at least two registers at entry and exit.
- core_spill_mask_ |= (1 << kCoreSavedRegisterForBaseline);
DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved";
+ // There is no easy instruction to restore just the PC on thumb2. We spill and
+ // restore another arbitrary register.
+ core_spill_mask_ |= (1 << kCoreAlwaysSpillRegister);
fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_;
// We use vpush and vpop for saving and restoring floating point registers, which take
// a SRegister and the number of registers to save/restore after that SRegister. We
@@ -1416,15 +1372,9 @@
void InstructionCodeGeneratorARM::GenerateFPJumps(HCondition* cond,
Label* true_label,
- Label* false_label) {
+ Label* false_label ATTRIBUTE_UNUSED) {
__ vmstat(); // transfer FP status register to ARM APSR.
- // TODO: merge into a single branch (except "equal or unordered" and "not equal")
- if (cond->IsFPConditionTrueIfNaN()) {
- __ b(true_label, VS); // VS for unordered.
- } else if (cond->IsFPConditionFalseIfNaN()) {
- __ b(false_label, VS); // VS for unordered.
- }
- __ b(true_label, ARMCondition(cond->GetCondition()));
+ __ b(true_label, ARMFPCondition(cond->GetCondition(), cond->IsGtBias()));
}
void InstructionCodeGeneratorARM::GenerateLongComparesAndJumps(HCondition* cond,
@@ -1972,9 +1922,9 @@
}
void LocationsBuilderARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
- // When we do not run baseline, explicit clinit checks triggered by static
- // invokes must have been pruned by art::PrepareForRegisterAllocation.
- DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+ // Explicit clinit checks triggered by static invokes must have been pruned by
+ // art::PrepareForRegisterAllocation.
+ DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
codegen_->GetAssembler(),
@@ -2004,9 +1954,9 @@
}
void InstructionCodeGeneratorARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
- // When we do not run baseline, explicit clinit checks triggered by static
- // invokes must have been pruned by art::PrepareForRegisterAllocation.
- DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+ // Explicit clinit checks triggered by static invokes must have been pruned by
+ // art::PrepareForRegisterAllocation.
+ DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
return;
@@ -3803,6 +3753,7 @@
Label less, greater, done;
Primitive::Type type = compare->InputAt(0)->GetType();
+ Condition less_cond;
switch (type) {
case Primitive::kPrimLong: {
__ cmp(left.AsRegisterPairHigh<Register>(),
@@ -3813,6 +3764,7 @@
__ LoadImmediate(out, 0);
__ cmp(left.AsRegisterPairLow<Register>(),
ShifterOperand(right.AsRegisterPairLow<Register>())); // Unsigned compare.
+ less_cond = LO;
break;
}
case Primitive::kPrimFloat:
@@ -3825,14 +3777,15 @@
FromLowSToD(right.AsFpuRegisterPairLow<SRegister>()));
}
__ vmstat(); // transfer FP status register to ARM APSR.
- __ b(compare->IsGtBias() ? &greater : &less, VS); // VS for unordered.
+ less_cond = ARMFPCondition(kCondLT, compare->IsGtBias());
break;
}
default:
LOG(FATAL) << "Unexpected compare type " << type;
+ UNREACHABLE();
}
__ b(&done, EQ);
- __ b(&less, LO); // LO is for both: unsigned compare for longs and 'less than' for floats.
+ __ b(&less, less_cond);
__ Bind(&greater);
__ LoadImmediate(out, 1);
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 26d6d63..1183dda 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -340,9 +340,7 @@
return GetLabelOf(block)->Position();
}
- void SetupBlockedRegisters(bool is_baseline) const OVERRIDE;
-
- Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+ void SetupBlockedRegisters() const OVERRIDE;
Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 6ed2c5a..2cb2741 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -93,6 +93,24 @@
UNREACHABLE();
}
+inline Condition ARM64FPCondition(IfCondition cond, bool gt_bias) {
+ // The ARM64 condition codes can express all the necessary branches, see the
+ // "Meaning (floating-point)" column in the table C1-1 in the ARMv8 reference manual.
+ // There is no dex instruction or HIR that would need the missing conditions
+ // "equal or unordered" or "not equal".
+ switch (cond) {
+ case kCondEQ: return eq;
+ case kCondNE: return ne /* unordered */;
+ case kCondLT: return gt_bias ? cc : lt /* unordered */;
+ case kCondLE: return gt_bias ? ls : le /* unordered */;
+ case kCondGT: return gt_bias ? hi /* unordered */ : gt;
+ case kCondGE: return gt_bias ? cs /* unordered */ : ge;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ UNREACHABLE();
+ }
+}
+
Location ARM64ReturnLocation(Primitive::Type return_type) {
// Note that in practice, `LocationFrom(x0)` and `LocationFrom(w0)` create the
// same Location object, and so do `LocationFrom(d0)` and `LocationFrom(s0)`,
@@ -1094,7 +1112,7 @@
}
}
-void CodeGeneratorARM64::SetupBlockedRegisters(bool is_baseline) const {
+void CodeGeneratorARM64::SetupBlockedRegisters() const {
// Blocked core registers:
// lr : Runtime reserved.
// tr : Runtime reserved.
@@ -1115,40 +1133,17 @@
blocked_fpu_registers_[reserved_fp_registers.PopLowestIndex().code()] = true;
}
- if (is_baseline) {
- CPURegList reserved_core_baseline_registers = callee_saved_core_registers;
- while (!reserved_core_baseline_registers.IsEmpty()) {
- blocked_core_registers_[reserved_core_baseline_registers.PopLowestIndex().code()] = true;
- }
- }
-
- if (is_baseline || GetGraph()->IsDebuggable()) {
+ if (GetGraph()->IsDebuggable()) {
// Stubs do not save callee-save floating point registers. If the graph
// is debuggable, we need to deal with these registers differently. For
// now, just block them.
- CPURegList reserved_fp_baseline_registers = callee_saved_fp_registers;
- while (!reserved_fp_baseline_registers.IsEmpty()) {
- blocked_fpu_registers_[reserved_fp_baseline_registers.PopLowestIndex().code()] = true;
+ CPURegList reserved_fp_registers_debuggable = callee_saved_fp_registers;
+ while (!reserved_fp_registers_debuggable.IsEmpty()) {
+ blocked_fpu_registers_[reserved_fp_registers_debuggable.PopLowestIndex().code()] = true;
}
}
}
-Location CodeGeneratorARM64::AllocateFreeRegister(Primitive::Type type) const {
- if (type == Primitive::kPrimVoid) {
- LOG(FATAL) << "Unreachable type " << type;
- }
-
- if (Primitive::IsFloatingPointType(type)) {
- ssize_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfAllocatableFPRegisters);
- DCHECK_NE(reg, -1);
- return Location::FpuRegisterLocation(reg);
- } else {
- ssize_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfAllocatableRegisters);
- DCHECK_NE(reg, -1);
- return Location::RegisterLocation(reg);
- }
-}
-
size_t CodeGeneratorARM64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize);
__ Str(reg, MemOperand(sp, stack_index));
@@ -2381,12 +2376,8 @@
} else {
__ Fcmp(left, InputFPRegisterAt(compare, 1));
}
- if (compare->IsGtBias()) {
- __ Cset(result, ne);
- } else {
- __ Csetm(result, ne);
- }
- __ Cneg(result, result, compare->IsGtBias() ? mi : gt);
+ __ Cset(result, ne);
+ __ Cneg(result, result, ARM64FPCondition(kCondLT, compare->IsGtBias()));
break;
}
default:
@@ -2422,7 +2413,6 @@
LocationSummary* locations = instruction->GetLocations();
Register res = RegisterFrom(locations->Out(), instruction->GetType());
IfCondition if_cond = instruction->GetCondition();
- Condition arm64_cond = ARM64Condition(if_cond);
if (Primitive::IsFloatingPointType(instruction->InputAt(0)->GetType())) {
FPRegister lhs = InputFPRegisterAt(instruction, 0);
@@ -2433,20 +2423,13 @@
} else {
__ Fcmp(lhs, InputFPRegisterAt(instruction, 1));
}
- __ Cset(res, arm64_cond);
- if (instruction->IsFPConditionTrueIfNaN()) {
- // res = IsUnordered(arm64_cond) ? 1 : res <=> res = IsNotUnordered(arm64_cond) ? res : 1
- __ Csel(res, res, Operand(1), vc); // VC for "not unordered".
- } else if (instruction->IsFPConditionFalseIfNaN()) {
- // res = IsUnordered(arm64_cond) ? 0 : res <=> res = IsNotUnordered(arm64_cond) ? res : 0
- __ Csel(res, res, Operand(0), vc); // VC for "not unordered".
- }
+ __ Cset(res, ARM64FPCondition(if_cond, instruction->IsGtBias()));
} else {
// Integer cases.
Register lhs = InputRegisterAt(instruction, 0);
Operand rhs = InputOperandAt(instruction, 1);
__ Cmp(lhs, rhs);
- __ Cset(res, arm64_cond);
+ __ Cset(res, ARM64Condition(if_cond));
}
}
@@ -2816,15 +2799,11 @@
} else {
__ Fcmp(lhs, InputFPRegisterAt(condition, 1));
}
- if (condition->IsFPConditionTrueIfNaN()) {
- __ B(vs, true_target == nullptr ? &fallthrough_target : true_target);
- } else if (condition->IsFPConditionFalseIfNaN()) {
- __ B(vs, false_target == nullptr ? &fallthrough_target : false_target);
- }
if (true_target == nullptr) {
- __ B(ARM64Condition(condition->GetOppositeCondition()), false_target);
+ IfCondition opposite_condition = condition->GetOppositeCondition();
+ __ B(ARM64FPCondition(opposite_condition, condition->IsGtBias()), false_target);
} else {
- __ B(ARM64Condition(condition->GetCondition()), true_target);
+ __ B(ARM64FPCondition(condition->GetCondition(), condition->IsGtBias()), true_target);
}
} else {
// Integer cases.
@@ -3462,9 +3441,9 @@
}
void LocationsBuilderARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
- // When we do not run baseline, explicit clinit checks triggered by static
- // invokes must have been pruned by art::PrepareForRegisterAllocation.
- DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+ // Explicit clinit checks triggered by static invokes must have been pruned by
+ // art::PrepareForRegisterAllocation.
+ DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena());
if (intrinsic.TryDispatch(invoke)) {
@@ -3712,9 +3691,9 @@
void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
- // When we do not run baseline, explicit clinit checks triggered by static
- // invokes must have been pruned by art::PrepareForRegisterAllocation.
- DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+ // Explicit clinit checks triggered by static invokes must have been pruned by
+ // art::PrepareForRegisterAllocation.
+ DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
return;
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index f2ff894..8eb9fcc 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -339,10 +339,7 @@
// Register allocation.
- void SetupBlockedRegisters(bool is_baseline) const OVERRIDE;
- // AllocateFreeRegister() is only used when allocating registers locally
- // during CompileBaseline().
- Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+ void SetupBlockedRegisters() const OVERRIDE;
Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index e34767c..5bd136a 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1042,7 +1042,7 @@
__ Bind(&done);
}
-void CodeGeneratorMIPS::SetupBlockedRegisters(bool is_baseline) const {
+void CodeGeneratorMIPS::SetupBlockedRegisters() const {
// Don't allocate the dalvik style register pair passing.
blocked_register_pairs_[A1_A2] = true;
@@ -1072,16 +1072,6 @@
blocked_fpu_registers_[i] = true;
}
- if (is_baseline) {
- for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
- blocked_core_registers_[kCoreCalleeSaves[i]] = true;
- }
-
- for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
- blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
- }
- }
-
UpdateBlockedPairRegisters();
}
@@ -1096,52 +1086,6 @@
}
}
-Location CodeGeneratorMIPS::AllocateFreeRegister(Primitive::Type type) const {
- switch (type) {
- case Primitive::kPrimLong: {
- size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs);
- MipsManagedRegister pair =
- MipsManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg));
- DCHECK(!blocked_core_registers_[pair.AsRegisterPairLow()]);
- DCHECK(!blocked_core_registers_[pair.AsRegisterPairHigh()]);
-
- blocked_core_registers_[pair.AsRegisterPairLow()] = true;
- blocked_core_registers_[pair.AsRegisterPairHigh()] = true;
- UpdateBlockedPairRegisters();
- return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
- }
-
- case Primitive::kPrimByte:
- case Primitive::kPrimBoolean:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- case Primitive::kPrimInt:
- case Primitive::kPrimNot: {
- int reg = FindFreeEntry(blocked_core_registers_, kNumberOfCoreRegisters);
- // Block all register pairs that contain `reg`.
- for (int i = 0; i < kNumberOfRegisterPairs; i++) {
- MipsManagedRegister current =
- MipsManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
- if (current.AsRegisterPairLow() == reg || current.AsRegisterPairHigh() == reg) {
- blocked_register_pairs_[i] = true;
- }
- }
- return Location::RegisterLocation(reg);
- }
-
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble: {
- int reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfFRegisters);
- return Location::FpuRegisterLocation(reg);
- }
-
- case Primitive::kPrimVoid:
- LOG(FATAL) << "Unreachable type " << type;
- }
-
- UNREACHABLE();
-}
-
size_t CodeGeneratorMIPS::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
__ StoreToOffset(kStoreWord, Register(reg_id), SP, stack_index);
return kMipsWordSize;
@@ -3835,9 +3779,9 @@
}
void LocationsBuilderMIPS::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
- // When we do not run baseline, explicit clinit checks triggered by static
- // invokes must have been pruned by art::PrepareForRegisterAllocation.
- DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+ // Explicit clinit checks triggered by static invokes must have been pruned by
+ // art::PrepareForRegisterAllocation.
+ DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
IntrinsicLocationsBuilderMIPS intrinsic(codegen_);
if (intrinsic.TryDispatch(invoke)) {
@@ -3973,9 +3917,9 @@
}
void InstructionCodeGeneratorMIPS::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
- // When we do not run baseline, explicit clinit checks triggered by static
- // invokes must have been pruned by art::PrepareForRegisterAllocation.
- DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+ // Explicit clinit checks triggered by static invokes must have been pruned by
+ // art::PrepareForRegisterAllocation.
+ DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
return;
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index c3d4851..2cde0ed 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -290,10 +290,7 @@
// Register allocation.
- void SetupBlockedRegisters(bool is_baseline) const OVERRIDE;
- // AllocateFreeRegister() is only used when allocating registers locally
- // during CompileBaseline().
- Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+ void SetupBlockedRegisters() const OVERRIDE;
Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 79cd56d..0505486 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -979,7 +979,7 @@
__ Bind(&done);
}
-void CodeGeneratorMIPS64::SetupBlockedRegisters(bool is_baseline ATTRIBUTE_UNUSED) const {
+void CodeGeneratorMIPS64::SetupBlockedRegisters() const {
// ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
blocked_core_registers_[ZERO] = true;
blocked_core_registers_[K0] = true;
@@ -1003,8 +1003,7 @@
// TODO: review; anything else?
- // TODO: make these two for's conditional on is_baseline once
- // all the issues with register saving/restoring are sorted out.
+ // TODO: remove once all the issues with register saving/restoring are sorted out.
for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
blocked_core_registers_[kCoreCalleeSaves[i]] = true;
}
@@ -1014,20 +1013,6 @@
}
}
-Location CodeGeneratorMIPS64::AllocateFreeRegister(Primitive::Type type) const {
- if (type == Primitive::kPrimVoid) {
- LOG(FATAL) << "Unreachable type " << type;
- }
-
- if (Primitive::IsFloatingPointType(type)) {
- size_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfFpuRegisters);
- return Location::FpuRegisterLocation(reg);
- } else {
- size_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfGpuRegisters);
- return Location::RegisterLocation(reg);
- }
-}
-
size_t CodeGeneratorMIPS64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
__ StoreToOffset(kStoreDoubleword, GpuRegister(reg_id), SP, stack_index);
return kMips64WordSize;
@@ -3031,9 +3016,9 @@
}
void LocationsBuilderMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
- // When we do not run baseline, explicit clinit checks triggered by static
- // invokes must have been pruned by art::PrepareForRegisterAllocation.
- DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+ // Explicit clinit checks triggered by static invokes must have been pruned by
+ // art::PrepareForRegisterAllocation.
+ DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
IntrinsicLocationsBuilderMIPS64 intrinsic(codegen_);
if (intrinsic.TryDispatch(invoke)) {
@@ -3182,9 +3167,9 @@
}
void InstructionCodeGeneratorMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
- // When we do not run baseline, explicit clinit checks triggered by static
- // invokes must have been pruned by art::PrepareForRegisterAllocation.
- DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+ // Explicit clinit checks triggered by static invokes must have been pruned by
+ // art::PrepareForRegisterAllocation.
+ DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
return;
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 7182e8e..140ff95 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -289,10 +289,7 @@
// Register allocation.
- void SetupBlockedRegisters(bool is_baseline) const OVERRIDE;
- // AllocateFreeRegister() is only used when allocating registers locally
- // during CompileBaseline().
- Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+ void SetupBlockedRegisters() const OVERRIDE;
Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 6259acd..f95cb30 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -817,65 +817,13 @@
AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
}
-Location CodeGeneratorX86::AllocateFreeRegister(Primitive::Type type) const {
- switch (type) {
- case Primitive::kPrimLong: {
- size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs);
- X86ManagedRegister pair =
- X86ManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg));
- DCHECK(!blocked_core_registers_[pair.AsRegisterPairLow()]);
- DCHECK(!blocked_core_registers_[pair.AsRegisterPairHigh()]);
- blocked_core_registers_[pair.AsRegisterPairLow()] = true;
- blocked_core_registers_[pair.AsRegisterPairHigh()] = true;
- UpdateBlockedPairRegisters();
- return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
- }
-
- case Primitive::kPrimByte:
- case Primitive::kPrimBoolean:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- case Primitive::kPrimInt:
- case Primitive::kPrimNot: {
- Register reg = static_cast<Register>(
- FindFreeEntry(blocked_core_registers_, kNumberOfCpuRegisters));
- // Block all register pairs that contain `reg`.
- for (int i = 0; i < kNumberOfRegisterPairs; i++) {
- X86ManagedRegister current =
- X86ManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
- if (current.AsRegisterPairLow() == reg || current.AsRegisterPairHigh() == reg) {
- blocked_register_pairs_[i] = true;
- }
- }
- return Location::RegisterLocation(reg);
- }
-
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble: {
- return Location::FpuRegisterLocation(
- FindFreeEntry(blocked_fpu_registers_, kNumberOfXmmRegisters));
- }
-
- case Primitive::kPrimVoid:
- LOG(FATAL) << "Unreachable type " << type;
- }
-
- return Location::NoLocation();
-}
-
-void CodeGeneratorX86::SetupBlockedRegisters(bool is_baseline) const {
+void CodeGeneratorX86::SetupBlockedRegisters() const {
// Don't allocate the dalvik style register pair passing.
blocked_register_pairs_[ECX_EDX] = true;
// Stack register is always reserved.
blocked_core_registers_[ESP] = true;
- if (is_baseline) {
- blocked_core_registers_[EBP] = true;
- blocked_core_registers_[ESI] = true;
- blocked_core_registers_[EDI] = true;
- }
-
UpdateBlockedPairRegisters();
}
@@ -1981,9 +1929,9 @@
}
void LocationsBuilderX86::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
- // When we do not run baseline, explicit clinit checks triggered by static
- // invokes must have been pruned by art::PrepareForRegisterAllocation.
- DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+ // Explicit clinit checks triggered by static invokes must have been pruned by
+ // art::PrepareForRegisterAllocation.
+ DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
IntrinsicLocationsBuilderX86 intrinsic(codegen_);
if (intrinsic.TryDispatch(invoke)) {
@@ -1999,17 +1947,6 @@
if (invoke->HasPcRelativeDexCache()) {
invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::RequiresRegister());
}
-
- if (codegen_->IsBaseline()) {
- // Baseline does not have enough registers if the current method also
- // needs a register. We therefore do not require a register for it, and let
- // the code generation of the invoke handle it.
- LocationSummary* locations = invoke->GetLocations();
- Location location = locations->InAt(invoke->GetSpecialInputIndex());
- if (location.IsUnallocated() && location.GetPolicy() == Location::kRequiresRegister) {
- locations->SetInAt(invoke->GetSpecialInputIndex(), Location::NoLocation());
- }
- }
}
static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorX86* codegen) {
@@ -2022,9 +1959,9 @@
}
void InstructionCodeGeneratorX86::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
- // When we do not run baseline, explicit clinit checks triggered by static
- // invokes must have been pruned by art::PrepareForRegisterAllocation.
- DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+ // Explicit clinit checks triggered by static invokes must have been pruned by
+ // art::PrepareForRegisterAllocation.
+ DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
return;
@@ -4286,7 +4223,7 @@
if (current_method.IsRegister()) {
method_reg = current_method.AsRegister<Register>();
} else {
- DCHECK(IsBaseline() || invoke->GetLocations()->Intrinsified());
+ DCHECK(invoke->GetLocations()->Intrinsified());
DCHECK(!current_method.IsValid());
method_reg = reg;
__ movl(reg, Address(ESP, kCurrentMethodStackOffset));
@@ -5076,11 +5013,6 @@
}
void LocationsBuilderX86::VisitArraySet(HArraySet* instruction) {
- // This location builder might end up asking to up to four registers, which is
- // not currently possible for baseline. The situation in which we need four
- // registers cannot be met by baseline though, because it has not run any
- // optimization.
-
Primitive::Type value_type = instruction->GetComponentType();
bool needs_write_barrier =
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index c65c423..34f983f 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -359,9 +359,7 @@
return GetLabelOf(block)->Position();
}
- void SetupBlockedRegisters(bool is_baseline) const OVERRIDE;
-
- Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+ void SetupBlockedRegisters() const OVERRIDE;
Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index e024ce2..31f3660 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1002,47 +1002,12 @@
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
-Location CodeGeneratorX86_64::AllocateFreeRegister(Primitive::Type type) const {
- switch (type) {
- case Primitive::kPrimLong:
- case Primitive::kPrimByte:
- case Primitive::kPrimBoolean:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- case Primitive::kPrimInt:
- case Primitive::kPrimNot: {
- size_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfCpuRegisters);
- return Location::RegisterLocation(reg);
- }
-
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble: {
- size_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfFloatRegisters);
- return Location::FpuRegisterLocation(reg);
- }
-
- case Primitive::kPrimVoid:
- LOG(FATAL) << "Unreachable type " << type;
- }
-
- return Location::NoLocation();
-}
-
-void CodeGeneratorX86_64::SetupBlockedRegisters(bool is_baseline) const {
+void CodeGeneratorX86_64::SetupBlockedRegisters() const {
// Stack register is always reserved.
blocked_core_registers_[RSP] = true;
// Block the register used as TMP.
blocked_core_registers_[TMP] = true;
-
- if (is_baseline) {
- for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
- blocked_core_registers_[kCoreCalleeSaves[i]] = true;
- }
- for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
- blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
- }
- }
}
static dwarf::Reg DWARFReg(Register reg) {
@@ -2161,9 +2126,9 @@
}
void LocationsBuilderX86_64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
- // When we do not run baseline, explicit clinit checks triggered by static
- // invokes must have been pruned by art::PrepareForRegisterAllocation.
- DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+ // Explicit clinit checks triggered by static invokes must have been pruned by
+ // art::PrepareForRegisterAllocation.
+ DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
IntrinsicLocationsBuilderX86_64 intrinsic(codegen_);
if (intrinsic.TryDispatch(invoke)) {
@@ -2183,9 +2148,9 @@
}
void InstructionCodeGeneratorX86_64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
- // When we do not run baseline, explicit clinit checks triggered by static
- // invokes must have been pruned by art::PrepareForRegisterAllocation.
- DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+ // Explicit clinit checks triggered by static invokes must have been pruned by
+ // art::PrepareForRegisterAllocation.
+ DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
return;
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 505c9dc..9de9d9e 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -347,8 +347,7 @@
Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
- void SetupBlockedRegisters(bool is_baseline) const OVERRIDE;
- Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+ void SetupBlockedRegisters() const OVERRIDE;
void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
void Finalize(CodeAllocator* allocator) OVERRIDE;
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index d970704..19d63de 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -40,6 +40,7 @@
#include "dex_file.h"
#include "dex_instruction.h"
#include "driver/compiler_options.h"
+#include "graph_checker.h"
#include "nodes.h"
#include "optimizing_unit_test.h"
#include "prepare_for_register_allocation.h"
@@ -70,8 +71,8 @@
AddAllocatedRegister(Location::RegisterLocation(arm::R7));
}
- void SetupBlockedRegisters(bool is_baseline) const OVERRIDE {
- arm::CodeGeneratorARM::SetupBlockedRegisters(is_baseline);
+ void SetupBlockedRegisters() const OVERRIDE {
+ arm::CodeGeneratorARM::SetupBlockedRegisters();
blocked_core_registers_[arm::R4] = true;
blocked_core_registers_[arm::R6] = false;
blocked_core_registers_[arm::R7] = false;
@@ -90,8 +91,8 @@
AddAllocatedRegister(Location::RegisterLocation(x86::EDI));
}
- void SetupBlockedRegisters(bool is_baseline) const OVERRIDE {
- x86::CodeGeneratorX86::SetupBlockedRegisters(is_baseline);
+ void SetupBlockedRegisters() const OVERRIDE {
+ x86::CodeGeneratorX86::SetupBlockedRegisters();
// ebx is a callee-save register in C, but caller-save for ART.
blocked_core_registers_[x86::EBX] = true;
blocked_register_pairs_[x86::EAX_EBX] = true;
@@ -200,259 +201,228 @@
}
template <typename Expected>
-static void RunCodeBaseline(InstructionSet target_isa,
- HGraph* graph,
- bool has_result,
- Expected expected) {
- InternalCodeAllocator allocator;
+static void RunCode(CodeGenerator* codegen,
+ HGraph* graph,
+ std::function<void(HGraph*)> hook_before_codegen,
+ bool has_result,
+ Expected expected) {
+ ASSERT_TRUE(graph->IsInSsaForm());
- CompilerOptions compiler_options;
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- TestCodeGeneratorX86 codegenX86(graph, *features_x86.get(), compiler_options);
- // We avoid doing a stack overflow check that requires the runtime being setup,
- // by making sure the compiler knows the methods we are running are leaf methods.
- codegenX86.CompileBaseline(&allocator, true);
- if (target_isa == kX86) {
- Run(allocator, codegenX86, has_result, expected);
- }
+ SSAChecker graph_checker(graph);
+ graph_checker.Run();
+ ASSERT_TRUE(graph_checker.IsValid());
- std::unique_ptr<const ArmInstructionSetFeatures> features_arm(
- ArmInstructionSetFeatures::FromCppDefines());
- TestCodeGeneratorARM codegenARM(graph, *features_arm.get(), compiler_options);
- codegenARM.CompileBaseline(&allocator, true);
- if (target_isa == kArm || target_isa == kThumb2) {
- Run(allocator, codegenARM, has_result, expected);
- }
-
- std::unique_ptr<const X86_64InstructionSetFeatures> features_x86_64(
- X86_64InstructionSetFeatures::FromCppDefines());
- x86_64::CodeGeneratorX86_64 codegenX86_64(graph, *features_x86_64.get(), compiler_options);
- codegenX86_64.CompileBaseline(&allocator, true);
- if (target_isa == kX86_64) {
- Run(allocator, codegenX86_64, has_result, expected);
- }
-
- std::unique_ptr<const Arm64InstructionSetFeatures> features_arm64(
- Arm64InstructionSetFeatures::FromCppDefines());
- arm64::CodeGeneratorARM64 codegenARM64(graph, *features_arm64.get(), compiler_options);
- codegenARM64.CompileBaseline(&allocator, true);
- if (target_isa == kArm64) {
- Run(allocator, codegenARM64, has_result, expected);
- }
-
- std::unique_ptr<const MipsInstructionSetFeatures> features_mips(
- MipsInstructionSetFeatures::FromCppDefines());
- mips::CodeGeneratorMIPS codegenMIPS(graph, *features_mips.get(), compiler_options);
- codegenMIPS.CompileBaseline(&allocator, true);
- if (kRuntimeISA == kMips) {
- Run(allocator, codegenMIPS, has_result, expected);
- }
-
- std::unique_ptr<const Mips64InstructionSetFeatures> features_mips64(
- Mips64InstructionSetFeatures::FromCppDefines());
- mips64::CodeGeneratorMIPS64 codegenMIPS64(graph, *features_mips64.get(), compiler_options);
- codegenMIPS64.CompileBaseline(&allocator, true);
- if (target_isa == kMips64) {
- Run(allocator, codegenMIPS64, has_result, expected);
- }
-}
-
-template <typename Expected>
-static void RunCodeOptimized(CodeGenerator* codegen,
- HGraph* graph,
- std::function<void(HGraph*)> hook_before_codegen,
- bool has_result,
- Expected expected) {
- // Tests may have already computed it.
- if (graph->GetReversePostOrder().empty()) {
- graph->BuildDominatorTree();
- }
SsaLivenessAnalysis liveness(graph, codegen);
- liveness.Analyze();
- RegisterAllocator register_allocator(graph->GetArena(), codegen, liveness);
- register_allocator.AllocateRegisters();
+ PrepareForRegisterAllocation(graph).Run();
+ liveness.Analyze();
+ RegisterAllocator(graph->GetArena(), codegen, liveness).AllocateRegisters();
hook_before_codegen(graph);
InternalCodeAllocator allocator;
- codegen->CompileOptimized(&allocator);
+ codegen->Compile(&allocator);
Run(allocator, *codegen, has_result, expected);
}
template <typename Expected>
-static void RunCodeOptimized(InstructionSet target_isa,
- HGraph* graph,
- std::function<void(HGraph*)> hook_before_codegen,
- bool has_result,
- Expected expected) {
+static void RunCode(InstructionSet target_isa,
+ HGraph* graph,
+ std::function<void(HGraph*)> hook_before_codegen,
+ bool has_result,
+ Expected expected) {
CompilerOptions compiler_options;
if (target_isa == kArm || target_isa == kThumb2) {
std::unique_ptr<const ArmInstructionSetFeatures> features_arm(
ArmInstructionSetFeatures::FromCppDefines());
TestCodeGeneratorARM codegenARM(graph, *features_arm.get(), compiler_options);
- RunCodeOptimized(&codegenARM, graph, hook_before_codegen, has_result, expected);
+ RunCode(&codegenARM, graph, hook_before_codegen, has_result, expected);
} else if (target_isa == kArm64) {
std::unique_ptr<const Arm64InstructionSetFeatures> features_arm64(
Arm64InstructionSetFeatures::FromCppDefines());
arm64::CodeGeneratorARM64 codegenARM64(graph, *features_arm64.get(), compiler_options);
- RunCodeOptimized(&codegenARM64, graph, hook_before_codegen, has_result, expected);
+ RunCode(&codegenARM64, graph, hook_before_codegen, has_result, expected);
} else if (target_isa == kX86) {
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegenX86(graph, *features_x86.get(), compiler_options);
- RunCodeOptimized(&codegenX86, graph, hook_before_codegen, has_result, expected);
+ RunCode(&codegenX86, graph, hook_before_codegen, has_result, expected);
} else if (target_isa == kX86_64) {
std::unique_ptr<const X86_64InstructionSetFeatures> features_x86_64(
X86_64InstructionSetFeatures::FromCppDefines());
x86_64::CodeGeneratorX86_64 codegenX86_64(graph, *features_x86_64.get(), compiler_options);
- RunCodeOptimized(&codegenX86_64, graph, hook_before_codegen, has_result, expected);
+ RunCode(&codegenX86_64, graph, hook_before_codegen, has_result, expected);
} else if (target_isa == kMips) {
std::unique_ptr<const MipsInstructionSetFeatures> features_mips(
MipsInstructionSetFeatures::FromCppDefines());
mips::CodeGeneratorMIPS codegenMIPS(graph, *features_mips.get(), compiler_options);
- RunCodeOptimized(&codegenMIPS, graph, hook_before_codegen, has_result, expected);
+ RunCode(&codegenMIPS, graph, hook_before_codegen, has_result, expected);
} else if (target_isa == kMips64) {
std::unique_ptr<const Mips64InstructionSetFeatures> features_mips64(
Mips64InstructionSetFeatures::FromCppDefines());
mips64::CodeGeneratorMIPS64 codegenMIPS64(graph, *features_mips64.get(), compiler_options);
- RunCodeOptimized(&codegenMIPS64, graph, hook_before_codegen, has_result, expected);
+ RunCode(&codegenMIPS64, graph, hook_before_codegen, has_result, expected);
}
}
-static void TestCode(InstructionSet target_isa,
- const uint16_t* data,
+static ::std::vector<InstructionSet> GetTargetISAs() {
+ ::std::vector<InstructionSet> v;
+ // Add all ISAs that are executable on hardware or on simulator.
+ const ::std::vector<InstructionSet> executable_isa_candidates = {
+ kArm,
+ kArm64,
+ kThumb2,
+ kX86,
+ kX86_64,
+ kMips,
+ kMips64
+ };
+
+ for (auto target_isa : executable_isa_candidates) {
+ if (CanExecute(target_isa)) {
+ v.push_back(target_isa);
+ }
+ }
+
+ return v;
+}
+
+static void TestCode(const uint16_t* data,
bool has_result = false,
int32_t expected = 0) {
- ArenaPool pool;
- ArenaAllocator arena(&pool);
- HGraph* graph = CreateGraph(&arena);
- HGraphBuilder builder(graph);
- const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
- bool graph_built = builder.BuildGraph(*item);
- ASSERT_TRUE(graph_built);
- // Remove suspend checks, they cannot be executed in this context.
- RemoveSuspendChecks(graph);
- RunCodeBaseline(target_isa, graph, has_result, expected);
+ for (InstructionSet target_isa : GetTargetISAs()) {
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+ HGraph* graph = CreateGraph(&arena);
+ HGraphBuilder builder(graph);
+ const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
+ bool graph_built = builder.BuildGraph(*item);
+ ASSERT_TRUE(graph_built);
+ // Remove suspend checks, they cannot be executed in this context.
+ RemoveSuspendChecks(graph);
+ TransformToSsa(graph);
+ RunCode(target_isa, graph, [](HGraph*) {}, has_result, expected);
+ }
}
-static void TestCodeLong(InstructionSet target_isa,
- const uint16_t* data,
+static void TestCodeLong(const uint16_t* data,
bool has_result,
int64_t expected) {
- ArenaPool pool;
- ArenaAllocator arena(&pool);
- HGraph* graph = CreateGraph(&arena);
- HGraphBuilder builder(graph, Primitive::kPrimLong);
- const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
- bool graph_built = builder.BuildGraph(*item);
- ASSERT_TRUE(graph_built);
- // Remove suspend checks, they cannot be executed in this context.
- RemoveSuspendChecks(graph);
- RunCodeBaseline(target_isa, graph, has_result, expected);
+ for (InstructionSet target_isa : GetTargetISAs()) {
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+ HGraph* graph = CreateGraph(&arena);
+ HGraphBuilder builder(graph, Primitive::kPrimLong);
+ const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
+ bool graph_built = builder.BuildGraph(*item);
+ ASSERT_TRUE(graph_built);
+ // Remove suspend checks, they cannot be executed in this context.
+ RemoveSuspendChecks(graph);
+ TransformToSsa(graph);
+ RunCode(target_isa, graph, [](HGraph*) {}, has_result, expected);
+ }
}
-class CodegenTest: public ::testing::TestWithParam<InstructionSet> {};
+class CodegenTest : public CommonCompilerTest {};
-TEST_P(CodegenTest, ReturnVoid) {
+TEST_F(CodegenTest, ReturnVoid) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(Instruction::RETURN_VOID);
- TestCode(GetParam(), data);
+ TestCode(data);
}
-TEST_P(CodegenTest, CFG1) {
+TEST_F(CodegenTest, CFG1) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x100,
Instruction::RETURN_VOID);
- TestCode(GetParam(), data);
+ TestCode(data);
}
-TEST_P(CodegenTest, CFG2) {
+TEST_F(CodegenTest, CFG2) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x100,
Instruction::GOTO | 0x100,
Instruction::RETURN_VOID);
- TestCode(GetParam(), data);
+ TestCode(data);
}
-TEST_P(CodegenTest, CFG3) {
+TEST_F(CodegenTest, CFG3) {
const uint16_t data1[] = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x200,
Instruction::RETURN_VOID,
Instruction::GOTO | 0xFF00);
- TestCode(GetParam(), data1);
+ TestCode(data1);
const uint16_t data2[] = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO_16, 3,
Instruction::RETURN_VOID,
Instruction::GOTO_16, 0xFFFF);
- TestCode(GetParam(), data2);
+ TestCode(data2);
const uint16_t data3[] = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO_32, 4, 0,
Instruction::RETURN_VOID,
Instruction::GOTO_32, 0xFFFF, 0xFFFF);
- TestCode(GetParam(), data3);
+ TestCode(data3);
}
-TEST_P(CodegenTest, CFG4) {
+TEST_F(CodegenTest, CFG4) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::RETURN_VOID,
Instruction::GOTO | 0x100,
Instruction::GOTO | 0xFE00);
- TestCode(GetParam(), data);
+ TestCode(data);
}
-TEST_P(CodegenTest, CFG5) {
+TEST_F(CodegenTest, CFG5) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::GOTO | 0x100,
Instruction::RETURN_VOID);
- TestCode(GetParam(), data);
+ TestCode(data);
}
-TEST_P(CodegenTest, IntConstant) {
+TEST_F(CodegenTest, IntConstant) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN_VOID);
- TestCode(GetParam(), data);
+ TestCode(data);
}
-TEST_P(CodegenTest, Return1) {
+TEST_F(CodegenTest, Return1) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN | 0);
- TestCode(GetParam(), data, true, 0);
+ TestCode(data, true, 0);
}
-TEST_P(CodegenTest, Return2) {
+TEST_F(CodegenTest, Return2) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::CONST_4 | 0 | 1 << 8,
Instruction::RETURN | 1 << 8);
- TestCode(GetParam(), data, true, 0);
+ TestCode(data, true, 0);
}
-TEST_P(CodegenTest, Return3) {
+TEST_F(CodegenTest, Return3) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::CONST_4 | 1 << 8 | 1 << 12,
Instruction::RETURN | 1 << 8);
- TestCode(GetParam(), data, true, 1);
+ TestCode(data, true, 1);
}
-TEST_P(CodegenTest, ReturnIf1) {
+TEST_F(CodegenTest, ReturnIf1) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::CONST_4 | 1 << 8 | 1 << 12,
@@ -460,10 +430,10 @@
Instruction::RETURN | 0 << 8,
Instruction::RETURN | 1 << 8);
- TestCode(GetParam(), data, true, 1);
+ TestCode(data, true, 1);
}
-TEST_P(CodegenTest, ReturnIf2) {
+TEST_F(CodegenTest, ReturnIf2) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::CONST_4 | 1 << 8 | 1 << 12,
@@ -471,12 +441,12 @@
Instruction::RETURN | 0 << 8,
Instruction::RETURN | 1 << 8);
- TestCode(GetParam(), data, true, 0);
+ TestCode(data, true, 0);
}
// Exercise bit-wise (one's complement) not-int instruction.
#define NOT_INT_TEST(TEST_NAME, INPUT, EXPECTED_OUTPUT) \
-TEST_P(CodegenTest, TEST_NAME) { \
+TEST_F(CodegenTest, TEST_NAME) { \
const int32_t input = INPUT; \
const uint16_t input_lo = Low16Bits(input); \
const uint16_t input_hi = High16Bits(input); \
@@ -485,7 +455,7 @@
Instruction::NOT_INT | 1 << 8 | 0 << 12 , \
Instruction::RETURN | 1 << 8); \
\
- TestCode(GetParam(), data, true, EXPECTED_OUTPUT); \
+ TestCode(data, true, EXPECTED_OUTPUT); \
}
NOT_INT_TEST(ReturnNotIntMinus2, -2, 1)
@@ -501,7 +471,7 @@
// Exercise bit-wise (one's complement) not-long instruction.
#define NOT_LONG_TEST(TEST_NAME, INPUT, EXPECTED_OUTPUT) \
-TEST_P(CodegenTest, TEST_NAME) { \
+TEST_F(CodegenTest, TEST_NAME) { \
const int64_t input = INPUT; \
const uint16_t word0 = Low16Bits(Low32Bits(input)); /* LSW. */ \
const uint16_t word1 = High16Bits(Low32Bits(input)); \
@@ -512,7 +482,7 @@
Instruction::NOT_LONG | 2 << 8 | 0 << 12, \
Instruction::RETURN_WIDE | 2 << 8); \
\
- TestCodeLong(GetParam(), data, true, EXPECTED_OUTPUT); \
+ TestCodeLong(data, true, EXPECTED_OUTPUT); \
}
NOT_LONG_TEST(ReturnNotLongMinus2, INT64_C(-2), INT64_C(1))
@@ -551,7 +521,7 @@
#undef NOT_LONG_TEST
-TEST_P(CodegenTest, IntToLongOfLongToInt) {
+TEST_F(CodegenTest, IntToLongOfLongToInt) {
const int64_t input = INT64_C(4294967296); // 2^32
const uint16_t word0 = Low16Bits(Low32Bits(input)); // LSW.
const uint16_t word1 = High16Bits(Low32Bits(input));
@@ -565,192 +535,146 @@
Instruction::INT_TO_LONG | 2 << 8 | 4 << 12,
Instruction::RETURN_WIDE | 2 << 8);
- TestCodeLong(GetParam(), data, true, 1);
+ TestCodeLong(data, true, 1);
}
-TEST_P(CodegenTest, ReturnAdd1) {
+TEST_F(CodegenTest, ReturnAdd1) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 3 << 12 | 0,
Instruction::CONST_4 | 4 << 12 | 1 << 8,
Instruction::ADD_INT, 1 << 8 | 0,
Instruction::RETURN);
- TestCode(GetParam(), data, true, 7);
+ TestCode(data, true, 7);
}
-TEST_P(CodegenTest, ReturnAdd2) {
+TEST_F(CodegenTest, ReturnAdd2) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 3 << 12 | 0,
Instruction::CONST_4 | 4 << 12 | 1 << 8,
Instruction::ADD_INT_2ADDR | 1 << 12,
Instruction::RETURN);
- TestCode(GetParam(), data, true, 7);
+ TestCode(data, true, 7);
}
-TEST_P(CodegenTest, ReturnAdd3) {
+TEST_F(CodegenTest, ReturnAdd3) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::ADD_INT_LIT8, 3 << 8 | 0,
Instruction::RETURN);
- TestCode(GetParam(), data, true, 7);
+ TestCode(data, true, 7);
}
-TEST_P(CodegenTest, ReturnAdd4) {
+TEST_F(CodegenTest, ReturnAdd4) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::ADD_INT_LIT16, 3,
Instruction::RETURN);
- TestCode(GetParam(), data, true, 7);
+ TestCode(data, true, 7);
}
-TEST_P(CodegenTest, NonMaterializedCondition) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(entry);
- graph->SetEntryBlock(entry);
- entry->AddInstruction(new (&allocator) HGoto());
-
- HBasicBlock* first_block = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(first_block);
- entry->AddSuccessor(first_block);
- HIntConstant* constant0 = graph->GetIntConstant(0);
- HIntConstant* constant1 = graph->GetIntConstant(1);
- HEqual* equal = new (&allocator) HEqual(constant0, constant0);
- first_block->AddInstruction(equal);
- first_block->AddInstruction(new (&allocator) HIf(equal));
-
- HBasicBlock* then = new (&allocator) HBasicBlock(graph);
- HBasicBlock* else_ = new (&allocator) HBasicBlock(graph);
- HBasicBlock* exit = new (&allocator) HBasicBlock(graph);
-
- graph->AddBlock(then);
- graph->AddBlock(else_);
- graph->AddBlock(exit);
- first_block->AddSuccessor(then);
- first_block->AddSuccessor(else_);
- then->AddSuccessor(exit);
- else_->AddSuccessor(exit);
-
- exit->AddInstruction(new (&allocator) HExit());
- then->AddInstruction(new (&allocator) HReturn(constant0));
- else_->AddInstruction(new (&allocator) HReturn(constant1));
-
- ASSERT_TRUE(equal->NeedsMaterialization());
- graph->BuildDominatorTree();
- PrepareForRegisterAllocation(graph).Run();
- ASSERT_FALSE(equal->NeedsMaterialization());
-
- auto hook_before_codegen = [](HGraph* graph_in) {
- HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0];
- HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
- block->InsertInstructionBefore(move, block->GetLastInstruction());
- };
-
- RunCodeOptimized(GetParam(), graph, hook_before_codegen, true, 0);
-}
-
-TEST_P(CodegenTest, ReturnMulInt) {
+TEST_F(CodegenTest, ReturnMulInt) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 3 << 12 | 0,
Instruction::CONST_4 | 4 << 12 | 1 << 8,
Instruction::MUL_INT, 1 << 8 | 0,
Instruction::RETURN);
- TestCode(GetParam(), data, true, 12);
+ TestCode(data, true, 12);
}
-TEST_P(CodegenTest, ReturnMulInt2addr) {
+TEST_F(CodegenTest, ReturnMulInt2addr) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 3 << 12 | 0,
Instruction::CONST_4 | 4 << 12 | 1 << 8,
Instruction::MUL_INT_2ADDR | 1 << 12,
Instruction::RETURN);
- TestCode(GetParam(), data, true, 12);
+ TestCode(data, true, 12);
}
-TEST_P(CodegenTest, ReturnMulLong) {
+TEST_F(CodegenTest, ReturnMulLong) {
const uint16_t data[] = FOUR_REGISTERS_CODE_ITEM(
- Instruction::CONST_4 | 3 << 12 | 0,
- Instruction::CONST_4 | 0 << 12 | 1 << 8,
- Instruction::CONST_4 | 4 << 12 | 2 << 8,
- Instruction::CONST_4 | 0 << 12 | 3 << 8,
+ Instruction::CONST_WIDE | 0 << 8, 3, 0, 0, 0,
+ Instruction::CONST_WIDE | 2 << 8, 4, 0, 0, 0,
Instruction::MUL_LONG, 2 << 8 | 0,
Instruction::RETURN_WIDE);
- TestCodeLong(GetParam(), data, true, 12);
+ TestCodeLong(data, true, 12);
}
-TEST_P(CodegenTest, ReturnMulLong2addr) {
+TEST_F(CodegenTest, ReturnMulLong2addr) {
const uint16_t data[] = FOUR_REGISTERS_CODE_ITEM(
- Instruction::CONST_4 | 3 << 12 | 0 << 8,
- Instruction::CONST_4 | 0 << 12 | 1 << 8,
- Instruction::CONST_4 | 4 << 12 | 2 << 8,
- Instruction::CONST_4 | 0 << 12 | 3 << 8,
+ Instruction::CONST_WIDE | 0 << 8, 3, 0, 0, 0,
+ Instruction::CONST_WIDE | 2 << 8, 4, 0, 0, 0,
Instruction::MUL_LONG_2ADDR | 2 << 12,
Instruction::RETURN_WIDE);
- TestCodeLong(GetParam(), data, true, 12);
+ TestCodeLong(data, true, 12);
}
-TEST_P(CodegenTest, ReturnMulIntLit8) {
+TEST_F(CodegenTest, ReturnMulIntLit8) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::MUL_INT_LIT8, 3 << 8 | 0,
Instruction::RETURN);
- TestCode(GetParam(), data, true, 12);
+ TestCode(data, true, 12);
}
-TEST_P(CodegenTest, ReturnMulIntLit16) {
+TEST_F(CodegenTest, ReturnMulIntLit16) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::MUL_INT_LIT16, 3,
Instruction::RETURN);
- TestCode(GetParam(), data, true, 12);
+ TestCode(data, true, 12);
}
-TEST_P(CodegenTest, MaterializedCondition1) {
- // Check that condition are materialized correctly. A materialized condition
- // should yield `1` if it evaluated to true, and `0` otherwise.
- // We force the materialization of comparisons for different combinations of
- // inputs and check the results.
-
- int lhs[] = {1, 2, -1, 2, 0xabc};
- int rhs[] = {2, 1, 2, -1, 0xabc};
-
- for (size_t i = 0; i < arraysize(lhs); i++) {
+TEST_F(CodegenTest, NonMaterializedCondition) {
+ for (InstructionSet target_isa : GetTargetISAs()) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
+
HGraph* graph = CreateGraph(&allocator);
+ HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(entry);
+ graph->SetEntryBlock(entry);
+ entry->AddInstruction(new (&allocator) HGoto());
- HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(entry_block);
- graph->SetEntryBlock(entry_block);
- entry_block->AddInstruction(new (&allocator) HGoto());
- HBasicBlock* code_block = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(code_block);
+ HBasicBlock* first_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(first_block);
+ entry->AddSuccessor(first_block);
+ HIntConstant* constant0 = graph->GetIntConstant(0);
+ HIntConstant* constant1 = graph->GetIntConstant(1);
+ HEqual* equal = new (&allocator) HEqual(constant0, constant0);
+ first_block->AddInstruction(equal);
+ first_block->AddInstruction(new (&allocator) HIf(equal));
+
+ HBasicBlock* then_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* else_block = new (&allocator) HBasicBlock(graph);
HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(exit_block);
- exit_block->AddInstruction(new (&allocator) HExit());
-
- entry_block->AddSuccessor(code_block);
- code_block->AddSuccessor(exit_block);
graph->SetExitBlock(exit_block);
- HIntConstant* cst_lhs = graph->GetIntConstant(lhs[i]);
- HIntConstant* cst_rhs = graph->GetIntConstant(rhs[i]);
- HLessThan cmp_lt(cst_lhs, cst_rhs);
- code_block->AddInstruction(&cmp_lt);
- HReturn ret(&cmp_lt);
- code_block->AddInstruction(&ret);
+ graph->AddBlock(then_block);
+ graph->AddBlock(else_block);
+ graph->AddBlock(exit_block);
+ first_block->AddSuccessor(then_block);
+ first_block->AddSuccessor(else_block);
+ then_block->AddSuccessor(exit_block);
+ else_block->AddSuccessor(exit_block);
+
+ exit_block->AddInstruction(new (&allocator) HExit());
+ then_block->AddInstruction(new (&allocator) HReturn(constant0));
+ else_block->AddInstruction(new (&allocator) HReturn(constant1));
+
+ ASSERT_TRUE(equal->NeedsMaterialization());
+ TransformToSsa(graph);
+ PrepareForRegisterAllocation(graph).Run();
+ ASSERT_FALSE(equal->NeedsMaterialization());
auto hook_before_codegen = [](HGraph* graph_in) {
HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0];
@@ -758,93 +682,143 @@
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
- RunCodeOptimized(GetParam(), graph, hook_before_codegen, true, lhs[i] < rhs[i]);
+ RunCode(target_isa, graph, hook_before_codegen, true, 0);
}
}
-TEST_P(CodegenTest, MaterializedCondition2) {
- // Check that HIf correctly interprets a materialized condition.
- // We force the materialization of comparisons for different combinations of
- // inputs. An HIf takes the materialized combination as input and returns a
- // value that we verify.
+TEST_F(CodegenTest, MaterializedCondition1) {
+ for (InstructionSet target_isa : GetTargetISAs()) {
+ // Check that condition are materialized correctly. A materialized condition
+ // should yield `1` if it evaluated to true, and `0` otherwise.
+ // We force the materialization of comparisons for different combinations of
- int lhs[] = {1, 2, -1, 2, 0xabc};
- int rhs[] = {2, 1, 2, -1, 0xabc};
+ // inputs and check the results.
+ int lhs[] = {1, 2, -1, 2, 0xabc};
+ int rhs[] = {2, 1, 2, -1, 0xabc};
- for (size_t i = 0; i < arraysize(lhs); i++) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
+ for (size_t i = 0; i < arraysize(lhs); i++) {
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(entry_block);
- graph->SetEntryBlock(entry_block);
- entry_block->AddInstruction(new (&allocator) HGoto());
+ HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(entry_block);
+ graph->SetEntryBlock(entry_block);
+ entry_block->AddInstruction(new (&allocator) HGoto());
+ HBasicBlock* code_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(code_block);
+ HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(exit_block);
+ exit_block->AddInstruction(new (&allocator) HExit());
- HBasicBlock* if_block = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(if_block);
- HBasicBlock* if_true_block = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(if_true_block);
- HBasicBlock* if_false_block = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(if_false_block);
- HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(exit_block);
- exit_block->AddInstruction(new (&allocator) HExit());
+ entry_block->AddSuccessor(code_block);
+ code_block->AddSuccessor(exit_block);
+ graph->SetExitBlock(exit_block);
- graph->SetEntryBlock(entry_block);
- entry_block->AddSuccessor(if_block);
- if_block->AddSuccessor(if_true_block);
- if_block->AddSuccessor(if_false_block);
- if_true_block->AddSuccessor(exit_block);
- if_false_block->AddSuccessor(exit_block);
- graph->SetExitBlock(exit_block);
+ HIntConstant* cst_lhs = graph->GetIntConstant(lhs[i]);
+ HIntConstant* cst_rhs = graph->GetIntConstant(rhs[i]);
+ HLessThan cmp_lt(cst_lhs, cst_rhs);
+ code_block->AddInstruction(&cmp_lt);
+ HReturn ret(&cmp_lt);
+ code_block->AddInstruction(&ret);
- HIntConstant* cst_lhs = graph->GetIntConstant(lhs[i]);
- HIntConstant* cst_rhs = graph->GetIntConstant(rhs[i]);
- HLessThan cmp_lt(cst_lhs, cst_rhs);
- if_block->AddInstruction(&cmp_lt);
- // We insert a temporary to separate the HIf from the HLessThan and force
- // the materialization of the condition.
- HTemporary force_materialization(0);
- if_block->AddInstruction(&force_materialization);
- HIf if_lt(&cmp_lt);
- if_block->AddInstruction(&if_lt);
-
- HIntConstant* cst_lt = graph->GetIntConstant(1);
- HReturn ret_lt(cst_lt);
- if_true_block->AddInstruction(&ret_lt);
- HIntConstant* cst_ge = graph->GetIntConstant(0);
- HReturn ret_ge(cst_ge);
- if_false_block->AddInstruction(&ret_ge);
-
- auto hook_before_codegen = [](HGraph* graph_in) {
- HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0];
- HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
- block->InsertInstructionBefore(move, block->GetLastInstruction());
- };
-
- RunCodeOptimized(GetParam(), graph, hook_before_codegen, true, lhs[i] < rhs[i]);
+ TransformToSsa(graph);
+ auto hook_before_codegen = [](HGraph* graph_in) {
+ HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0];
+ HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
+ block->InsertInstructionBefore(move, block->GetLastInstruction());
+ };
+ RunCode(target_isa, graph, hook_before_codegen, true, lhs[i] < rhs[i]);
+ }
}
}
-TEST_P(CodegenTest, ReturnDivIntLit8) {
+TEST_F(CodegenTest, MaterializedCondition2) {
+ for (InstructionSet target_isa : GetTargetISAs()) {
+ // Check that HIf correctly interprets a materialized condition.
+ // We force the materialization of comparisons for different combinations of
+ // inputs. An HIf takes the materialized combination as input and returns a
+ // value that we verify.
+
+ int lhs[] = {1, 2, -1, 2, 0xabc};
+ int rhs[] = {2, 1, 2, -1, 0xabc};
+
+
+ for (size_t i = 0; i < arraysize(lhs); i++) {
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ HGraph* graph = CreateGraph(&allocator);
+
+ HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(entry_block);
+ graph->SetEntryBlock(entry_block);
+ entry_block->AddInstruction(new (&allocator) HGoto());
+
+ HBasicBlock* if_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(if_block);
+ HBasicBlock* if_true_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(if_true_block);
+ HBasicBlock* if_false_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(if_false_block);
+ HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(exit_block);
+ exit_block->AddInstruction(new (&allocator) HExit());
+
+ graph->SetEntryBlock(entry_block);
+ entry_block->AddSuccessor(if_block);
+ if_block->AddSuccessor(if_true_block);
+ if_block->AddSuccessor(if_false_block);
+ if_true_block->AddSuccessor(exit_block);
+ if_false_block->AddSuccessor(exit_block);
+ graph->SetExitBlock(exit_block);
+
+ HIntConstant* cst_lhs = graph->GetIntConstant(lhs[i]);
+ HIntConstant* cst_rhs = graph->GetIntConstant(rhs[i]);
+ HLessThan cmp_lt(cst_lhs, cst_rhs);
+ if_block->AddInstruction(&cmp_lt);
+ // We insert a temporary to separate the HIf from the HLessThan and force
+ // the materialization of the condition.
+ HTemporary force_materialization(0);
+ if_block->AddInstruction(&force_materialization);
+ HIf if_lt(&cmp_lt);
+ if_block->AddInstruction(&if_lt);
+
+ HIntConstant* cst_lt = graph->GetIntConstant(1);
+ HReturn ret_lt(cst_lt);
+ if_true_block->AddInstruction(&ret_lt);
+ HIntConstant* cst_ge = graph->GetIntConstant(0);
+ HReturn ret_ge(cst_ge);
+ if_false_block->AddInstruction(&ret_ge);
+
+ TransformToSsa(graph);
+ auto hook_before_codegen = [](HGraph* graph_in) {
+ HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0];
+ HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
+ block->InsertInstructionBefore(move, block->GetLastInstruction());
+ };
+ RunCode(target_isa, graph, hook_before_codegen, true, lhs[i] < rhs[i]);
+ }
+ }
+}
+
+TEST_F(CodegenTest, ReturnDivIntLit8) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::DIV_INT_LIT8, 3 << 8 | 0,
Instruction::RETURN);
- TestCode(GetParam(), data, true, 1);
+ TestCode(data, true, 1);
}
-TEST_P(CodegenTest, ReturnDivInt2Addr) {
+TEST_F(CodegenTest, ReturnDivInt2Addr) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0,
Instruction::CONST_4 | 2 << 12 | 1 << 8,
Instruction::DIV_INT_2ADDR | 1 << 12,
Instruction::RETURN);
- TestCode(GetParam(), data, true, 2);
+ TestCode(data, true, 2);
}
// Helper method.
@@ -933,80 +907,55 @@
block->AddInstruction(comparison);
block->AddInstruction(new (&allocator) HReturn(comparison));
- auto hook_before_codegen = [](HGraph*) {
- };
- RunCodeOptimized(target_isa, graph, hook_before_codegen, true, expected_result);
+ TransformToSsa(graph);
+ RunCode(target_isa, graph, [](HGraph*) {}, true, expected_result);
}
-TEST_P(CodegenTest, ComparisonsInt) {
- const InstructionSet target_isa = GetParam();
- for (int64_t i = -1; i <= 1; i++) {
- for (int64_t j = -1; j <= 1; j++) {
- TestComparison(kCondEQ, i, j, Primitive::kPrimInt, target_isa);
- TestComparison(kCondNE, i, j, Primitive::kPrimInt, target_isa);
- TestComparison(kCondLT, i, j, Primitive::kPrimInt, target_isa);
- TestComparison(kCondLE, i, j, Primitive::kPrimInt, target_isa);
- TestComparison(kCondGT, i, j, Primitive::kPrimInt, target_isa);
- TestComparison(kCondGE, i, j, Primitive::kPrimInt, target_isa);
- TestComparison(kCondB, i, j, Primitive::kPrimInt, target_isa);
- TestComparison(kCondBE, i, j, Primitive::kPrimInt, target_isa);
- TestComparison(kCondA, i, j, Primitive::kPrimInt, target_isa);
- TestComparison(kCondAE, i, j, Primitive::kPrimInt, target_isa);
+TEST_F(CodegenTest, ComparisonsInt) {
+ for (InstructionSet target_isa : GetTargetISAs()) {
+ for (int64_t i = -1; i <= 1; i++) {
+ for (int64_t j = -1; j <= 1; j++) {
+ TestComparison(kCondEQ, i, j, Primitive::kPrimInt, target_isa);
+ TestComparison(kCondNE, i, j, Primitive::kPrimInt, target_isa);
+ TestComparison(kCondLT, i, j, Primitive::kPrimInt, target_isa);
+ TestComparison(kCondLE, i, j, Primitive::kPrimInt, target_isa);
+ TestComparison(kCondGT, i, j, Primitive::kPrimInt, target_isa);
+ TestComparison(kCondGE, i, j, Primitive::kPrimInt, target_isa);
+ TestComparison(kCondB, i, j, Primitive::kPrimInt, target_isa);
+ TestComparison(kCondBE, i, j, Primitive::kPrimInt, target_isa);
+ TestComparison(kCondA, i, j, Primitive::kPrimInt, target_isa);
+ TestComparison(kCondAE, i, j, Primitive::kPrimInt, target_isa);
+ }
}
}
}
-TEST_P(CodegenTest, ComparisonsLong) {
+TEST_F(CodegenTest, ComparisonsLong) {
// TODO: make MIPS work for long
if (kRuntimeISA == kMips || kRuntimeISA == kMips64) {
return;
}
- const InstructionSet target_isa = GetParam();
- if (target_isa == kMips || target_isa == kMips64) {
- return;
- }
+ for (InstructionSet target_isa : GetTargetISAs()) {
+ if (target_isa == kMips || target_isa == kMips64) {
+ continue;
+ }
- for (int64_t i = -1; i <= 1; i++) {
- for (int64_t j = -1; j <= 1; j++) {
- TestComparison(kCondEQ, i, j, Primitive::kPrimLong, target_isa);
- TestComparison(kCondNE, i, j, Primitive::kPrimLong, target_isa);
- TestComparison(kCondLT, i, j, Primitive::kPrimLong, target_isa);
- TestComparison(kCondLE, i, j, Primitive::kPrimLong, target_isa);
- TestComparison(kCondGT, i, j, Primitive::kPrimLong, target_isa);
- TestComparison(kCondGE, i, j, Primitive::kPrimLong, target_isa);
- TestComparison(kCondB, i, j, Primitive::kPrimLong, target_isa);
- TestComparison(kCondBE, i, j, Primitive::kPrimLong, target_isa);
- TestComparison(kCondA, i, j, Primitive::kPrimLong, target_isa);
- TestComparison(kCondAE, i, j, Primitive::kPrimLong, target_isa);
+ for (int64_t i = -1; i <= 1; i++) {
+ for (int64_t j = -1; j <= 1; j++) {
+ TestComparison(kCondEQ, i, j, Primitive::kPrimLong, target_isa);
+ TestComparison(kCondNE, i, j, Primitive::kPrimLong, target_isa);
+ TestComparison(kCondLT, i, j, Primitive::kPrimLong, target_isa);
+ TestComparison(kCondLE, i, j, Primitive::kPrimLong, target_isa);
+ TestComparison(kCondGT, i, j, Primitive::kPrimLong, target_isa);
+ TestComparison(kCondGE, i, j, Primitive::kPrimLong, target_isa);
+ TestComparison(kCondB, i, j, Primitive::kPrimLong, target_isa);
+ TestComparison(kCondBE, i, j, Primitive::kPrimLong, target_isa);
+ TestComparison(kCondA, i, j, Primitive::kPrimLong, target_isa);
+ TestComparison(kCondAE, i, j, Primitive::kPrimLong, target_isa);
+ }
}
}
}
-static ::std::vector<InstructionSet> GetTargetISAs() {
- ::std::vector<InstructionSet> v;
- // Add all ISAs that are executable on hardware or on simulator.
- const ::std::vector<InstructionSet> executable_isa_candidates = {
- kArm,
- kArm64,
- kThumb2,
- kX86,
- kX86_64,
- kMips,
- kMips64
- };
-
- for (auto target_isa : executable_isa_candidates) {
- if (CanExecute(target_isa)) {
- v.push_back(target_isa);
- }
- }
-
- return v;
-}
-
-INSTANTIATE_TEST_CASE_P(MultipleTargets,
- CodegenTest,
- ::testing::ValuesIn(GetTargetISAs()));
-
} // namespace art
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 293282e..20c4f1f 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -356,12 +356,12 @@
compare, invoke_instruction->GetDexPc());
// TODO: Extend reference type propagation to understand the guard.
if (cursor != nullptr) {
- bb_cursor->InsertInstructionAfter(load_class, cursor);
+ bb_cursor->InsertInstructionAfter(field_get, cursor);
} else {
- bb_cursor->InsertInstructionBefore(load_class, bb_cursor->GetFirstInstruction());
+ bb_cursor->InsertInstructionBefore(field_get, bb_cursor->GetFirstInstruction());
}
- bb_cursor->InsertInstructionAfter(field_get, load_class);
- bb_cursor->InsertInstructionAfter(compare, field_get);
+ bb_cursor->InsertInstructionAfter(load_class, field_get);
+ bb_cursor->InsertInstructionAfter(compare, load_class);
bb_cursor->InsertInstructionAfter(deoptimize, compare);
deoptimize->CopyEnvironmentFrom(invoke_instruction->GetEnvironment());
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 9f50d18..3bf3f7f 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -85,9 +85,9 @@
InvokeDexCallingConventionVisitor* calling_convention_visitor) {
if (kIsDebugBuild && invoke->IsInvokeStaticOrDirect()) {
HInvokeStaticOrDirect* invoke_static_or_direct = invoke->AsInvokeStaticOrDirect();
- // When we do not run baseline, explicit clinit checks triggered by static
- // invokes must have been pruned by art::PrepareForRegisterAllocation.
- DCHECK(codegen->IsBaseline() || !invoke_static_or_direct->IsStaticWithExplicitClinitCheck());
+ // Explicit clinit checks triggered by static invokes must have been
+ // pruned by art::PrepareForRegisterAllocation.
+ DCHECK(!invoke_static_or_direct->IsStaticWithExplicitClinitCheck());
}
if (invoke->GetNumberOfArguments() == 0) {
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index bb840ea..fffd005 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -127,7 +127,7 @@
timing_logger_enabled_(compiler_driver->GetDumpPasses()),
timing_logger_(timing_logger_enabled_ ? GetMethodName() : "", true, true),
disasm_info_(graph->GetArena()),
- visualizer_enabled_(!compiler_driver->GetDumpCfgFileName().empty()),
+ visualizer_enabled_(!compiler_driver->GetCompilerOptions().GetDumpCfgFileName().empty()),
visualizer_(visualizer_output, graph, *codegen),
graph_in_bad_state_(false) {
if (timing_logger_enabled_ || visualizer_enabled_) {
@@ -305,30 +305,19 @@
SHARED_REQUIRES(Locks::mutator_lock_);
private:
- // Whether we should run any optimization or register allocation. If false, will
- // just run the code generation after the graph was built.
- const bool run_optimizations_;
-
// Create a 'CompiledMethod' for an optimized graph.
- CompiledMethod* EmitOptimized(ArenaAllocator* arena,
- CodeVectorAllocator* code_allocator,
- CodeGenerator* codegen,
- CompilerDriver* driver) const;
-
- // Create a 'CompiledMethod' for a non-optimized graph.
- CompiledMethod* EmitBaseline(ArenaAllocator* arena,
- CodeVectorAllocator* code_allocator,
- CodeGenerator* codegen,
- CompilerDriver* driver) const;
+ CompiledMethod* Emit(ArenaAllocator* arena,
+ CodeVectorAllocator* code_allocator,
+ CodeGenerator* codegen,
+ CompilerDriver* driver) const;
// Try compiling a method and return the code generator used for
// compiling it.
// This method:
// 1) Builds the graph. Returns null if it failed to build it.
- // 2) If `run_optimizations_` is set:
- // 2.1) Transform the graph to SSA. Returns null if it failed.
- // 2.2) Run optimizations on the graph, including register allocator.
- // 3) Generate code with the `code_allocator` provided.
+ // 2) Transforms the graph to SSA. Returns null if it failed.
+ // 3) Runs optimizations on the graph, including register allocator.
+ // 4) Generates code with the `code_allocator` provided.
CodeGenerator* TryCompile(ArenaAllocator* arena,
CodeVectorAllocator* code_allocator,
const DexFile::CodeItem* code_item,
@@ -350,21 +339,19 @@
static const int kMaximumCompilationTimeBeforeWarning = 100; /* ms */
OptimizingCompiler::OptimizingCompiler(CompilerDriver* driver)
- : Compiler(driver, kMaximumCompilationTimeBeforeWarning),
- run_optimizations_(
- driver->GetCompilerOptions().GetCompilerFilter() != CompilerOptions::kTime) {}
+ : Compiler(driver, kMaximumCompilationTimeBeforeWarning) {}
void OptimizingCompiler::Init() {
// Enable C1visualizer output. Must be done in Init() because the compiler
// driver is not fully initialized when passed to the compiler's constructor.
CompilerDriver* driver = GetCompilerDriver();
- const std::string cfg_file_name = driver->GetDumpCfgFileName();
+ const std::string cfg_file_name = driver->GetCompilerOptions().GetDumpCfgFileName();
if (!cfg_file_name.empty()) {
CHECK_EQ(driver->GetThreadCount(), 1U)
<< "Graph visualizer requires the compiler to run single-threaded. "
<< "Invoke the compiler with '-j1'.";
std::ios_base::openmode cfg_file_mode =
- driver->GetDumpCfgAppend() ? std::ofstream::app : std::ofstream::out;
+ driver->GetCompilerOptions().GetDumpCfgAppend() ? std::ofstream::app : std::ofstream::out;
visualizer_output_.reset(new std::ofstream(cfg_file_name, cfg_file_mode));
}
if (driver->GetDumpStats()) {
@@ -577,17 +564,6 @@
AllocateRegisters(graph, codegen, pass_observer);
}
-// The stack map we generate must be 4-byte aligned on ARM. Since existing
-// maps are generated alongside these stack maps, we must also align them.
-static ArrayRef<const uint8_t> AlignVectorSize(ArenaVector<uint8_t>& vector) {
- size_t size = vector.size();
- size_t aligned_size = RoundUp(size, 4);
- for (; size < aligned_size; ++size) {
- vector.push_back(0);
- }
- return ArrayRef<const uint8_t>(vector);
-}
-
static ArenaVector<LinkerPatch> EmitAndSortLinkerPatches(CodeGenerator* codegen) {
ArenaVector<LinkerPatch> linker_patches(codegen->GetGraph()->GetArena()->Adapter());
codegen->EmitLinkerPatches(&linker_patches);
@@ -601,10 +577,10 @@
return linker_patches;
}
-CompiledMethod* OptimizingCompiler::EmitOptimized(ArenaAllocator* arena,
- CodeVectorAllocator* code_allocator,
- CodeGenerator* codegen,
- CompilerDriver* compiler_driver) const {
+CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* arena,
+ CodeVectorAllocator* code_allocator,
+ CodeGenerator* codegen,
+ CompilerDriver* compiler_driver) const {
ArenaVector<LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen);
ArenaVector<uint8_t> stack_map(arena->Adapter(kArenaAllocStackMaps));
stack_map.resize(codegen->ComputeStackMapsSize());
@@ -630,39 +606,6 @@
return compiled_method;
}
-CompiledMethod* OptimizingCompiler::EmitBaseline(
- ArenaAllocator* arena,
- CodeVectorAllocator* code_allocator,
- CodeGenerator* codegen,
- CompilerDriver* compiler_driver) const {
- ArenaVector<LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen);
-
- ArenaVector<uint8_t> mapping_table(arena->Adapter(kArenaAllocBaselineMaps));
- codegen->BuildMappingTable(&mapping_table);
- ArenaVector<uint8_t> vmap_table(arena->Adapter(kArenaAllocBaselineMaps));
- codegen->BuildVMapTable(&vmap_table);
- ArenaVector<uint8_t> gc_map(arena->Adapter(kArenaAllocBaselineMaps));
- codegen->BuildNativeGCMap(&gc_map, *compiler_driver);
-
- CompiledMethod* compiled_method = CompiledMethod::SwapAllocCompiledMethod(
- compiler_driver,
- codegen->GetInstructionSet(),
- ArrayRef<const uint8_t>(code_allocator->GetMemory()),
- // Follow Quick's behavior and set the frame size to zero if it is
- // considered "empty" (see the definition of
- // art::CodeGenerator::HasEmptyFrame).
- codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(),
- codegen->GetCoreSpillMask(),
- codegen->GetFpuSpillMask(),
- ArrayRef<const SrcMapElem>(),
- AlignVectorSize(mapping_table),
- AlignVectorSize(vmap_table),
- AlignVectorSize(gc_map),
- ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()),
- ArrayRef<const LinkerPatch>(linker_patches));
- return compiled_method;
-}
-
CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
CodeVectorAllocator* code_allocator,
const DexFile::CodeItem* code_item,
@@ -775,41 +718,37 @@
VLOG(compiler) << "Optimizing " << pass_observer.GetMethodName();
- if (run_optimizations_) {
- ScopedObjectAccess soa(Thread::Current());
- StackHandleScopeCollection handles(soa.Self());
- ScopedThreadSuspension sts(soa.Self(), kNative);
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScopeCollection handles(soa.Self());
+ ScopedThreadSuspension sts(soa.Self(), kNative);
- {
- PassScope scope(SsaBuilder::kSsaBuilderPassName, &pass_observer);
- GraphAnalysisResult result = graph->TryBuildingSsa(&handles);
- if (result != kAnalysisSuccess) {
- switch (result) {
- case kAnalysisFailThrowCatchLoop:
- MaybeRecordStat(MethodCompilationStat::kNotCompiledThrowCatchLoop);
- break;
- case kAnalysisFailAmbiguousArrayOp:
- MaybeRecordStat(MethodCompilationStat::kNotCompiledAmbiguousArrayOp);
- break;
- case kAnalysisSuccess:
- UNREACHABLE();
- }
- pass_observer.SetGraphInBadState();
- return nullptr;
+ {
+ PassScope scope(SsaBuilder::kSsaBuilderPassName, &pass_observer);
+ GraphAnalysisResult result = graph->TryBuildingSsa(&handles);
+ if (result != kAnalysisSuccess) {
+ switch (result) {
+ case kAnalysisFailThrowCatchLoop:
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledThrowCatchLoop);
+ break;
+ case kAnalysisFailAmbiguousArrayOp:
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledAmbiguousArrayOp);
+ break;
+ case kAnalysisSuccess:
+ UNREACHABLE();
}
+ pass_observer.SetGraphInBadState();
+ return nullptr;
}
-
- RunOptimizations(graph,
- codegen.get(),
- compiler_driver,
- compilation_stats_.get(),
- dex_compilation_unit,
- &pass_observer,
- &handles);
- codegen->CompileOptimized(code_allocator);
- } else {
- codegen->CompileBaseline(code_allocator);
}
+
+ RunOptimizations(graph,
+ codegen.get(),
+ compiler_driver,
+ compilation_stats_.get(),
+ dex_compilation_unit,
+ &pass_observer,
+ &handles);
+ codegen->Compile(code_allocator);
pass_observer.DumpDisassembly();
if (kArenaAllocatorCountAllocations) {
@@ -861,11 +800,7 @@
dex_cache));
if (codegen.get() != nullptr) {
MaybeRecordStat(MethodCompilationStat::kCompiled);
- if (run_optimizations_) {
- method = EmitOptimized(&arena, &code_allocator, codegen.get(), compiler_driver);
- } else {
- method = EmitBaseline(&arena, &code_allocator, codegen.get(), compiler_driver);
- }
+ method = Emit(&arena, &code_allocator, codegen.get(), compiler_driver);
}
} else {
if (compiler_driver->GetCompilerOptions().VerifyAtRuntime()) {
@@ -928,8 +863,6 @@
{
// Go to native so that we don't block GC during compilation.
ScopedThreadSuspension sts(self, kNative);
-
- DCHECK(run_optimizations_);
codegen.reset(
TryCompile(&arena,
&code_allocator,
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 2bae4bc..a966b62 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -72,8 +72,7 @@
float_spill_slots_.reserve(kDefaultNumberOfSpillSlots);
double_spill_slots_.reserve(kDefaultNumberOfSpillSlots);
- static constexpr bool kIsBaseline = false;
- codegen->SetupBlockedRegisters(kIsBaseline);
+ codegen->SetupBlockedRegisters();
physical_core_register_intervals_.resize(codegen->GetNumberOfCoreRegisters(), nullptr);
physical_fp_register_intervals_.resize(codegen->GetNumberOfFloatingPointRegisters(), nullptr);
// Always reserve for the current method and the graph's max out registers.
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index ea54239..dbb7341 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -528,7 +528,6 @@
dump_passes_(false),
dump_timing_(false),
dump_slow_timing_(kIsDebugBuild),
- dump_cfg_append_(false),
swap_fd_(-1),
app_image_fd_(kInvalidImageFd),
timings_(timings) {}
@@ -1093,10 +1092,6 @@
dump_timing_ = true;
} else if (option == "--dump-passes") {
dump_passes_ = true;
- } else if (option.starts_with("--dump-cfg=")) {
- dump_cfg_file_name_ = option.substr(strlen("--dump-cfg=")).data();
- } else if (option.starts_with("--dump-cfg-append")) {
- dump_cfg_append_ = true;
} else if (option == "--dump-stats") {
dump_stats_ = true;
} else if (option.starts_with("--swap-file=")) {
@@ -1509,8 +1504,6 @@
thread_count_,
dump_stats_,
dump_passes_,
- dump_cfg_file_name_,
- dump_cfg_append_,
compiler_phases_timings_.get(),
swap_fd_,
&dex_file_oat_filename_map_,
@@ -2312,8 +2305,6 @@
bool dump_passes_;
bool dump_timing_;
bool dump_slow_timing_;
- std::string dump_cfg_file_name_;
- bool dump_cfg_append_;
std::string swap_file_name_;
int swap_fd_;
std::string app_image_file_name_;
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index ddd285a..ed833c4 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1880,6 +1880,9 @@
*/
Dbg::PostClassPrepare(h_new_class.Get());
+ // Notify native debugger of the new class and its layout.
+ jit::Jit::NewTypeLoadedIfUsingJit(h_new_class.Get());
+
return h_new_class.Get();
}
@@ -2766,6 +2769,7 @@
mirror::Class* existing = InsertClass(descriptor, new_class.Get(), hash);
if (existing == nullptr) {
+ jit::Jit::NewTypeLoadedIfUsingJit(new_class.Get());
return new_class.Get();
}
// Another thread must have loaded the class after we
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 05668a9..5c9dab2 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -127,6 +127,13 @@
*error_msg = "JIT couldn't find jit_compile_method entry point";
return false;
}
+ jit_types_loaded_ = reinterpret_cast<void (*)(void*, mirror::Class**, size_t)>(
+ dlsym(jit_library_handle_, "jit_types_loaded"));
+ if (jit_types_loaded_ == nullptr) {
+ dlclose(jit_library_handle_);
+ *error_msg = "JIT couldn't find jit_types_loaded entry point";
+ return false;
+ }
CompilerCallbacks* callbacks = nullptr;
bool will_generate_debug_symbols = false;
VLOG(jit) << "Calling JitLoad interpreter_only="
@@ -214,5 +221,31 @@
new jit::JitInstrumentationCache(compile_threshold, warmup_threshold));
}
+void Jit::NewTypeLoadedIfUsingJit(mirror::Class* type) {
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (jit != nullptr && jit->generate_debug_info_) {
+ DCHECK(jit->jit_types_loaded_ != nullptr);
+ jit->jit_types_loaded_(jit->jit_compiler_handle_, &type, 1);
+ }
+}
+
+void Jit::DumpTypeInfoForLoadedTypes(ClassLinker* linker) {
+ struct CollectClasses : public ClassVisitor {
+ bool Visit(mirror::Class* klass) override {
+ classes_.push_back(klass);
+ return true;
+ }
+ std::vector<mirror::Class*> classes_;
+ };
+
+ if (generate_debug_info_) {
+ ScopedObjectAccess so(Thread::Current());
+
+ CollectClasses visitor;
+ linker->VisitClasses(&visitor);
+ jit_types_loaded_(jit_compiler_handle_, visitor.classes_.data(), visitor.classes_.size());
+ }
+}
+
} // namespace jit
} // namespace art
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 42bbbe7..429edf6 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -79,6 +79,13 @@
DumpInfo(os);
}
+ static void NewTypeLoadedIfUsingJit(mirror::Class* type)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // If debug info generation is turned on then write the type information for types already loaded
+ // into the specified class linker to the jit debug interface,
+ void DumpTypeInfoForLoadedTypes(ClassLinker* linker);
+
private:
Jit();
bool LoadCompiler(std::string* error_msg);
@@ -89,6 +96,7 @@
void* (*jit_load_)(CompilerCallbacks**, bool*);
void (*jit_unload_)(void*);
bool (*jit_compile_method_)(void*, ArtMethod*, Thread*);
+ void (*jit_types_loaded_)(void*, mirror::Class**, size_t count);
// Performance monitoring.
bool dump_info_on_shutdown_;
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index c4694ee..1101acd 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -1881,6 +1881,9 @@
jit_->CreateInstrumentationCache(jit_options_->GetCompileThreshold(),
jit_options_->GetWarmupThreshold());
jit_->CreateThreadPool();
+
+ // Notify native debugger about the classes already loaded before the creation of the jit.
+ jit_->DumpTypeInfoForLoadedTypes(GetClassLinker());
} else {
LOG(WARNING) << "Failed to create JIT " << error_msg;
}
diff --git a/test/971-iface-super-partial-compile-generated/build b/test/971-iface-super/build
similarity index 100%
rename from test/971-iface-super-partial-compile-generated/build
rename to test/971-iface-super/build
diff --git a/test/971-iface-super-partial-compile-generated/expected.txt b/test/971-iface-super/expected.txt
similarity index 100%
rename from test/971-iface-super-partial-compile-generated/expected.txt
rename to test/971-iface-super/expected.txt
diff --git a/test/971-iface-super-partial-compile-generated/info.txt b/test/971-iface-super/info.txt
similarity index 100%
rename from test/971-iface-super-partial-compile-generated/info.txt
rename to test/971-iface-super/info.txt
diff --git a/test/971-iface-super-partial-compile-generated/run b/test/971-iface-super/run
similarity index 100%
rename from test/971-iface-super-partial-compile-generated/run
rename to test/971-iface-super/run
diff --git a/test/971-iface-super-partial-compile-generated/util-src/generate_java.py b/test/971-iface-super/util-src/generate_java.py
similarity index 100%
rename from test/971-iface-super-partial-compile-generated/util-src/generate_java.py
rename to test/971-iface-super/util-src/generate_java.py
diff --git a/test/971-iface-super-partial-compile-generated/util-src/generate_smali.py b/test/971-iface-super/util-src/generate_smali.py
similarity index 100%
rename from test/971-iface-super-partial-compile-generated/util-src/generate_smali.py
rename to test/971-iface-super/util-src/generate_smali.py
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index f62d0e9..586c805 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -242,7 +242,7 @@
968-default-partial-compile-generated \
969-iface-super \
970-iface-super-resolution-generated \
- 971-iface-super-partial-compile-generated
+ 971-iface-super
# Check if we have python3 to run our tests.
ifeq ($(wildcard /usr/bin/python3),)
diff --git a/tools/art b/tools/art
index 304a9d0..d91b451 100644
--- a/tools/art
+++ b/tools/art
@@ -75,6 +75,7 @@
ANDROID_ROOT=$PROG_DIR/..
LIBDIR=$(find_libdir)
LD_LIBRARY_PATH=$ANDROID_ROOT/$LIBDIR
+DEBUG_OPTION=""
DELETE_ANDROID_DATA=false
# If ANDROID_DATA is the system ANDROID_DATA or is not set, use our own,
@@ -87,6 +88,7 @@
if [ z"$PERF" != z ]; then
invoke_with="perf record -o $ANDROID_DATA/perf.data -e cycles:u $invoke_with"
+ DEBUG_OPTION="-Xcompiler-option --generate-debug-info"
fi
# We use the PIC core image to work with perf.
@@ -99,7 +101,7 @@
-XXlib:$LIBART \
-Xnorelocate \
-Ximage:$ANDROID_ROOT/framework/core-optimizing-pic.art \
- -Xcompiler-option --generate-debug-info \
+ $DEBUG_OPTION \
"$@"
EXIT_STATUS=$?
diff --git a/tools/libcore_failures_concurrent_collector.txt b/tools/libcore_failures_concurrent_collector.txt
index 2cb2c50..6ea83d2 100644
--- a/tools/libcore_failures_concurrent_collector.txt
+++ b/tools/libcore_failures_concurrent_collector.txt
@@ -27,7 +27,10 @@
description: "TimeoutException on host-{x86,x86-64}-concurrent-collector",
result: EXEC_FAILED,
modes: [host],
- names: ["libcore.java.util.zip.GZIPOutputStreamTest#testSyncFlushEnabled",
+ names: ["libcore.java.util.zip.DeflaterOutputStreamTest#testSyncFlushDisabled",
+ "libcore.java.util.zip.GZIPOutputStreamTest#testSyncFlushEnabled",
+ "libcore.java.util.zip.OldAndroidGZIPStreamTest#testGZIPStream",
+ "libcore.java.util.zip.OldAndroidZipStreamTest#testZipStream",
"libcore.java.util.zip.ZipFileTest#testZipFileWithLotsOfEntries",
"libcore.java.util.zip.ZipInputStreamTest#testLongMessage"],
bug: 26507762