Merge "Add allocation tracking allocators to ROSAlloc"
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index b265ee7..9fa5fac 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -1094,6 +1094,7 @@
default_cutoff = compiler_options.GetSmallMethodThreshold();
break;
case CompilerOptions::kSpeed:
+ case CompilerOptions::kTime:
small_cutoff = compiler_options.GetHugeMethodThreshold();
default_cutoff = compiler_options.GetHugeMethodThreshold();
break;
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 3c76098..e7bd357 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -260,7 +260,7 @@
uint16_t* declaring_class_idx, uint16_t* declaring_method_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Get declaration location of a resolved field.
+ // Get the index in the vtable of the method.
uint16_t GetResolvedMethodVTableIndex(
mirror::ArtMethod* resolved_method, InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index c0f91d1..eb3de97 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -27,7 +27,8 @@
kSpace, // Maximize space savings.
kBalanced, // Try to get the best performance return on compilation investment.
kSpeed, // Maximize runtime performance.
- kEverything // Force compilation (Note: excludes compilaton of class initializers).
+ kEverything, // Force compilation (Note: excludes compilation of class initializers).
+ kTime // Compile methods, but minimize compilation time.
};
// Guide heuristics to determine whether to compile method if profile data not available.
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index a03588f..33b00d2 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -331,18 +331,61 @@
bool is_range,
uint32_t* args,
uint32_t register_index) {
+ Instruction::Code opcode = instruction.Opcode();
+ InvokeType invoke_type;
+ switch (opcode) {
+ case Instruction::INVOKE_STATIC:
+ case Instruction::INVOKE_STATIC_RANGE:
+ invoke_type = kStatic;
+ break;
+ case Instruction::INVOKE_DIRECT:
+ case Instruction::INVOKE_DIRECT_RANGE:
+ invoke_type = kDirect;
+ break;
+ case Instruction::INVOKE_VIRTUAL:
+ case Instruction::INVOKE_VIRTUAL_RANGE:
+ invoke_type = kVirtual;
+ break;
+ case Instruction::INVOKE_INTERFACE:
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ invoke_type = kInterface;
+ break;
+ case Instruction::INVOKE_SUPER_RANGE:
+ case Instruction::INVOKE_SUPER:
+ invoke_type = kSuper;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected invoke op: " << opcode;
+ return false;
+ }
+
const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
const DexFile::ProtoId& proto_id = dex_file_->GetProtoId(method_id.proto_idx_);
const char* descriptor = dex_file_->StringDataByIdx(proto_id.shorty_idx_);
Primitive::Type return_type = Primitive::GetType(descriptor[0]);
- bool is_instance_call =
- instruction.Opcode() != Instruction::INVOKE_STATIC
- && instruction.Opcode() != Instruction::INVOKE_STATIC_RANGE;
+ bool is_instance_call = invoke_type != kStatic;
const size_t number_of_arguments = strlen(descriptor) - (is_instance_call ? 0 : 1);
- // Treat invoke-direct like static calls for now.
- HInvoke* invoke = new (arena_) HInvokeStatic(
- arena_, number_of_arguments, return_type, dex_offset, method_idx);
+ HInvoke* invoke = nullptr;
+ if (invoke_type == kVirtual) {
+ MethodReference target_method(dex_file_, method_idx);
+ uintptr_t direct_code;
+ uintptr_t direct_method;
+ int vtable_index;
+ // TODO: Add devirtualization support.
+ compiler_driver_->ComputeInvokeInfo(dex_compilation_unit_, dex_offset, true, true,
+ &invoke_type, &target_method, &vtable_index,
+ &direct_code, &direct_method);
+ if (vtable_index == -1) {
+ return false;
+ }
+ invoke = new (arena_) HInvokeVirtual(
+ arena_, number_of_arguments, return_type, dex_offset, vtable_index);
+ } else {
+ // Treat invoke-direct like static calls for now.
+ invoke = new (arena_) HInvokeStatic(
+ arena_, number_of_arguments, return_type, dex_offset, method_idx);
+ }
size_t start_index = 0;
Temporaries temps(graph_, is_instance_call ? 1 : 0);
@@ -620,7 +663,8 @@
}
case Instruction::INVOKE_STATIC:
- case Instruction::INVOKE_DIRECT: {
+ case Instruction::INVOKE_DIRECT:
+ case Instruction::INVOKE_VIRTUAL: {
uint32_t method_idx = instruction.VRegB_35c();
uint32_t number_of_vreg_arguments = instruction.VRegA_35c();
uint32_t args[5];
@@ -632,7 +676,8 @@
}
case Instruction::INVOKE_STATIC_RANGE:
- case Instruction::INVOKE_DIRECT_RANGE: {
+ case Instruction::INVOKE_DIRECT_RANGE:
+ case Instruction::INVOKE_VIRTUAL_RANGE: {
uint32_t method_idx = instruction.VRegB_3rc();
uint32_t number_of_vreg_arguments = instruction.VRegA_3rc();
uint32_t register_index = instruction.VRegC();
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 9903092..ad62279 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -20,6 +20,7 @@
#include "gc/accounting/card_table.h"
#include "mirror/array.h"
#include "mirror/art_method.h"
+#include "mirror/class.h"
#include "thread.h"
#include "utils/assembler.h"
#include "utils/arm/assembler_arm.h"
@@ -818,6 +819,47 @@
}
void LocationsBuilderARM::VisitInvokeStatic(HInvokeStatic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorARM::LoadCurrentMethod(Register reg) {
+ __ ldr(reg, Address(SP, kCurrentMethodStackOffset));
+}
+
+void InstructionCodeGeneratorARM::VisitInvokeStatic(HInvokeStatic* invoke) {
+ Register temp = invoke->GetLocations()->GetTemp(0).AsArm().AsCoreRegister();
+ uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
+ size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).Int32Value() +
+ invoke->GetIndexInDexCache() * kArmWordSize;
+
+ // TODO: Implement all kinds of calls:
+ // 1) boot -> boot
+ // 2) app -> boot
+ // 3) app -> app
+ //
+ // Currently we implement the app -> app logic, which looks up in the resolve cache.
+
+ // temp = method;
+ LoadCurrentMethod(temp);
+ // temp = temp->dex_cache_resolved_methods_;
+ __ ldr(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
+ // temp = temp[index_in_cache]
+ __ ldr(temp, Address(temp, index_in_cache));
+ // LR = temp[offset_of_quick_compiled_code]
+ __ ldr(LR, Address(temp,
+ mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
+ // LR()
+ __ blx(LR);
+
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+ DCHECK(!codegen_->IsLeafMethod());
+}
+
+void LocationsBuilderARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ HandleInvoke(invoke);
+}
+
+void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
locations->AddTemp(ArmCoreLocation(R0));
@@ -852,37 +894,30 @@
}
}
-void InstructionCodeGeneratorARM::LoadCurrentMethod(Register reg) {
- __ ldr(reg, Address(SP, kCurrentMethodStackOffset));
-}
-void InstructionCodeGeneratorARM::VisitInvokeStatic(HInvokeStatic* invoke) {
+void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
Register temp = invoke->GetLocations()->GetTemp(0).AsArm().AsCoreRegister();
- uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
- size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).Int32Value() +
- invoke->GetIndexInDexCache() * kArmWordSize;
-
- // TODO: Implement all kinds of calls:
- // 1) boot -> boot
- // 2) app -> boot
- // 3) app -> app
- //
- // Currently we implement the app -> app logic, which looks up in the resolve cache.
-
- // temp = method;
- LoadCurrentMethod(temp);
- // temp = temp->dex_cache_resolved_methods_;
- __ ldr(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
- // temp = temp[index_in_cache]
- __ ldr(temp, Address(temp, index_in_cache));
- // LR = temp[offset_of_quick_compiled_code]
- __ ldr(LR, Address(temp,
- mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
- // LR()
+ uint32_t method_offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() +
+ invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
+ LocationSummary* locations = invoke->GetLocations();
+ Location receiver = locations->InAt(0);
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ // temp = object->GetClass();
+ if (receiver.IsStackSlot()) {
+ __ ldr(temp, Address(SP, receiver.GetStackIndex()));
+ __ ldr(temp, Address(temp, class_offset));
+ } else {
+ __ ldr(temp, Address(receiver.AsArm().AsCoreRegister(), class_offset));
+ }
+ // temp = temp->GetMethodAt(method_offset);
+ uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value();
+ __ ldr(temp, Address(temp, method_offset));
+ // LR = temp->GetEntryPoint();
+ __ ldr(LR, Address(temp, entry_point));
+ // LR();
__ blx(LR);
-
- codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
}
void LocationsBuilderARM::VisitAdd(HAdd* add) {
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 660294b..2480960 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -93,6 +93,8 @@
#undef DECLARE_VISIT_INSTRUCTION
+ void HandleInvoke(HInvoke* invoke);
+
private:
CodeGeneratorARM* const codegen_;
InvokeDexCallingConventionVisitor parameter_visitor_;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 3dd9b37..3383cb2 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -20,6 +20,7 @@
#include "gc/accounting/card_table.h"
#include "mirror/array.h"
#include "mirror/art_method.h"
+#include "mirror/class.h"
#include "thread.h"
#include "utils/assembler.h"
#include "utils/stack_checks.h"
@@ -763,6 +764,40 @@
}
void LocationsBuilderX86::VisitInvokeStatic(HInvokeStatic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorX86::VisitInvokeStatic(HInvokeStatic* invoke) {
+ Register temp = invoke->GetLocations()->GetTemp(0).AsX86().AsCpuRegister();
+ uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
+ size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).Int32Value() +
+ invoke->GetIndexInDexCache() * kX86WordSize;
+
+ // TODO: Implement all kinds of calls:
+ // 1) boot -> boot
+ // 2) app -> boot
+ // 3) app -> app
+ //
+ // Currently we implement the app -> app logic, which looks up in the resolve cache.
+
+ // temp = method;
+ LoadCurrentMethod(temp);
+ // temp = temp->dex_cache_resolved_methods_;
+ __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
+ // temp = temp[index_in_cache]
+ __ movl(temp, Address(temp, index_in_cache));
+ // (temp + offset_of_quick_compiled_code)()
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
+
+ DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void LocationsBuilderX86::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ HandleInvoke(invoke);
+}
+
+void LocationsBuilderX86::HandleInvoke(HInvoke* invoke) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
locations->AddTemp(X86CpuLocation(EAX));
@@ -799,26 +834,23 @@
invoke->SetLocations(locations);
}
-void InstructionCodeGeneratorX86::VisitInvokeStatic(HInvokeStatic* invoke) {
+void InstructionCodeGeneratorX86::VisitInvokeVirtual(HInvokeVirtual* invoke) {
Register temp = invoke->GetLocations()->GetTemp(0).AsX86().AsCpuRegister();
- uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
- size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).Int32Value() +
- invoke->GetIndexInDexCache() * kX86WordSize;
-
- // TODO: Implement all kinds of calls:
- // 1) boot -> boot
- // 2) app -> boot
- // 3) app -> app
- //
- // Currently we implement the app -> app logic, which looks up in the resolve cache.
-
- // temp = method;
- LoadCurrentMethod(temp);
- // temp = temp->dex_cache_resolved_methods_;
- __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
- // temp = temp[index_in_cache]
- __ movl(temp, Address(temp, index_in_cache));
- // (temp + offset_of_quick_compiled_code)()
+ uint32_t method_offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() +
+ invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
+ LocationSummary* locations = invoke->GetLocations();
+ Location receiver = locations->InAt(0);
+ uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
+ // temp = object->GetClass();
+ if (receiver.IsStackSlot()) {
+ __ movl(temp, Address(ESP, receiver.GetStackIndex()));
+ __ movl(temp, Address(temp, class_offset));
+ } else {
+ __ movl(temp, Address(receiver.AsX86().AsCpuRegister(), class_offset));
+ }
+ // temp = temp->GetMethodAt(method_offset);
+ __ movl(temp, Address(temp, method_offset));
+ // call temp->GetEntryPoint();
__ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
DCHECK(!codegen_->IsLeafMethod());
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 7c50204..f1be0ad 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -94,6 +94,8 @@
#undef DECLARE_VISIT_INSTRUCTION
+ void HandleInvoke(HInvoke* invoke);
+
private:
CodeGeneratorX86* const codegen_;
InvokeDexCallingConventionVisitor parameter_visitor_;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 2f352e0..ca03af8 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -20,6 +20,7 @@
#include "gc/accounting/card_table.h"
#include "mirror/array.h"
#include "mirror/art_method.h"
+#include "mirror/class.h"
#include "mirror/object_reference.h"
#include "thread.h"
#include "utils/assembler.h"
@@ -709,12 +710,46 @@
}
void LocationsBuilderX86_64::VisitInvokeStatic(HInvokeStatic* invoke) {
+ HandleInvoke(invoke);
+}
+
+void InstructionCodeGeneratorX86_64::VisitInvokeStatic(HInvokeStatic* invoke) {
+ CpuRegister temp = invoke->GetLocations()->GetTemp(0).AsX86_64().AsCpuRegister();
+ uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
+ size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).SizeValue() +
+ invoke->GetIndexInDexCache() * heap_reference_size;
+
+ // TODO: Implement all kinds of calls:
+ // 1) boot -> boot
+ // 2) app -> boot
+ // 3) app -> app
+ //
+ // Currently we implement the app -> app logic, which looks up in the resolve cache.
+
+ // temp = method;
+ LoadCurrentMethod(temp);
+ // temp = temp->dex_cache_resolved_methods_;
+ __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
+ // temp = temp[index_in_cache]
+ __ movl(temp, Address(temp, index_in_cache));
+ // (temp + offset_of_quick_compiled_code)()
+ __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
+
+ DCHECK(!codegen_->IsLeafMethod());
+ codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
+}
+
+void LocationsBuilderX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
+ HandleInvoke(invoke);
+}
+
+void LocationsBuilderX86_64::HandleInvoke(HInvoke* invoke) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
locations->AddTemp(X86_64CpuLocation(RDI));
InvokeDexCallingConventionVisitor calling_convention_visitor;
- for (size_t i = 0; i < invoke->InputCount(); ++i) {
+ for (size_t i = 0; i < invoke->InputCount(); i++) {
HInstruction* input = invoke->InputAt(i);
locations->SetInAt(i, calling_convention_visitor.GetNextLocation(input->GetType()));
}
@@ -740,26 +775,23 @@
}
}
-void InstructionCodeGeneratorX86_64::VisitInvokeStatic(HInvokeStatic* invoke) {
+void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
CpuRegister temp = invoke->GetLocations()->GetTemp(0).AsX86_64().AsCpuRegister();
- uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
- size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).SizeValue() +
- invoke->GetIndexInDexCache() * heap_reference_size;
-
- // TODO: Implement all kinds of calls:
- // 1) boot -> boot
- // 2) app -> boot
- // 3) app -> app
- //
- // Currently we implement the app -> app logic, which looks up in the resolve cache.
-
- // temp = method;
- LoadCurrentMethod(temp);
- // temp = temp->dex_cache_resolved_methods_;
- __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue()));
- // temp = temp[index_in_cache]
- __ movl(temp, Address(temp, index_in_cache));
- // (temp + offset_of_quick_compiled_code)()
+ size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() +
+ invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
+ LocationSummary* locations = invoke->GetLocations();
+ Location receiver = locations->InAt(0);
+ size_t class_offset = mirror::Object::ClassOffset().SizeValue();
+ // temp = object->GetClass();
+ if (receiver.IsStackSlot()) {
+ __ movq(temp, Address(CpuRegister(RSP), receiver.GetStackIndex()));
+ __ movq(temp, Address(temp, class_offset));
+ } else {
+ __ movq(temp, Address(receiver.AsX86_64().AsCpuRegister(), class_offset));
+ }
+ // temp = temp->GetMethodAt(method_offset);
+ __ movl(temp, Address(temp, method_offset));
+ // call temp->GetEntryPoint();
__ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
DCHECK(!codegen_->IsLeafMethod());
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 44552ea..78b60fe 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -91,6 +91,8 @@
#undef DECLARE_VISIT_INSTRUCTION
+ void HandleInvoke(HInvoke* invoke);
+
private:
CodeGeneratorX86_64* const codegen_;
InvokeDexCallingConventionVisitor parameter_visitor_;
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index ed6dd93..d6dfeae 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -422,6 +422,7 @@
M(If) \
M(IntConstant) \
M(InvokeStatic) \
+ M(InvokeVirtual) \
M(LoadLocal) \
M(Local) \
M(LongConstant) \
@@ -1272,6 +1273,26 @@
DISALLOW_COPY_AND_ASSIGN(HInvokeStatic);
};
+class HInvokeVirtual : public HInvoke {
+ public:
+ HInvokeVirtual(ArenaAllocator* arena,
+ uint32_t number_of_arguments,
+ Primitive::Type return_type,
+ uint32_t dex_pc,
+ uint32_t vtable_index)
+ : HInvoke(arena, number_of_arguments, return_type, dex_pc),
+ vtable_index_(vtable_index) {}
+
+ uint32_t GetVTableIndex() const { return vtable_index_; }
+
+ DECLARE_INSTRUCTION(InvokeVirtual);
+
+ private:
+ const uint32_t vtable_index_;
+
+ DISALLOW_COPY_AND_ASSIGN(HInvokeVirtual);
+};
+
class HNewInstance : public HExpression<0> {
public:
HNewInstance(uint32_t dex_pc, uint16_t type_index)
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 75f4155..a539192 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -38,7 +38,7 @@
*/
class CodeVectorAllocator FINAL : public CodeAllocator {
public:
- CodeVectorAllocator() { }
+ CodeVectorAllocator() {}
virtual uint8_t* Allocate(size_t size) {
size_ = size;
@@ -70,6 +70,7 @@
class OptimizingCompiler FINAL : public Compiler {
public:
explicit OptimizingCompiler(CompilerDriver* driver);
+ ~OptimizingCompiler();
bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file, CompilationUnit* cu) const
OVERRIDE;
@@ -113,6 +114,13 @@
void UnInit() const OVERRIDE;
private:
+ // Whether we should run any optimization or register allocation. If false, will
+ // just run the code generation after the graph was built.
+ const bool run_optimizations_;
+ mutable AtomicInteger total_compiled_methods_;
+ mutable AtomicInteger unoptimized_compiled_methods_;
+ mutable AtomicInteger optimized_compiled_methods_;
+
std::unique_ptr<std::ostream> visualizer_output_;
// Delegate to another compiler in case the optimizing compiler cannot compile a method.
@@ -122,8 +130,16 @@
DISALLOW_COPY_AND_ASSIGN(OptimizingCompiler);
};
-OptimizingCompiler::OptimizingCompiler(CompilerDriver* driver) : Compiler(driver, 100),
- delegate_(Create(driver, Compiler::Kind::kQuick)) {
+static const int kMaximumCompilationTimeBeforeWarning = 100; /* ms */
+
+OptimizingCompiler::OptimizingCompiler(CompilerDriver* driver)
+ : Compiler(driver, kMaximumCompilationTimeBeforeWarning),
+ run_optimizations_(
+ driver->GetCompilerOptions().GetCompilerFilter() != CompilerOptions::kTime),
+ total_compiled_methods_(0),
+ unoptimized_compiled_methods_(0),
+ optimized_compiled_methods_(0),
+ delegate_(Create(driver, Compiler::Kind::kQuick)) {
if (kIsVisualizerEnabled) {
visualizer_output_.reset(new std::ofstream("art.cfg"));
}
@@ -137,6 +153,14 @@
delegate_->UnInit();
}
+OptimizingCompiler::~OptimizingCompiler() {
+ size_t unoptimized_percent = (unoptimized_compiled_methods_ * 100 / total_compiled_methods_);
+ size_t optimized_percent = (optimized_compiled_methods_ * 100 / total_compiled_methods_);
+ LOG(INFO) << "Compiled " << total_compiled_methods_ << " methods: "
+ << unoptimized_percent << "% (" << unoptimized_compiled_methods_ << ") unoptimized, "
+ << optimized_percent << "% (" << optimized_compiled_methods_ << ") optimized.";
+}
+
bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx, const DexFile& dex_file,
CompilationUnit* cu) const {
return delegate_->CanCompileMethod(method_idx, dex_file, cu);
@@ -173,6 +197,7 @@
uint32_t method_idx,
jobject class_loader,
const DexFile& dex_file) const {
+ total_compiled_methods_++;
InstructionSet instruction_set = GetCompilerDriver()->GetInstructionSet();
// Always use the thumb2 assembler: some runtime functionality (like implicit stack
// overflow checks) assume thumb2.
@@ -222,7 +247,8 @@
CodeVectorAllocator allocator;
- if (RegisterAllocator::CanAllocateRegistersFor(*graph, instruction_set)) {
+ if (run_optimizations_ && RegisterAllocator::CanAllocateRegistersFor(*graph, instruction_set)) {
+ optimized_compiled_methods_++;
graph->BuildDominatorTree();
graph->TransformToSSA();
visualizer.DumpGraph("ssa");
@@ -262,6 +288,7 @@
LOG(FATAL) << "Could not allocate registers in optimizing compiler";
return nullptr;
} else {
+ unoptimized_compiled_methods_++;
codegen->CompileBaseline(&allocator);
// Run these phases to get some test coverage.
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index be6f097..b64390b 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -160,7 +160,14 @@
UsageError(" Example: --compiler-backend=Portable");
UsageError(" Default: Quick");
UsageError("");
- UsageError(" --compiler-filter=(verify-none|interpret-only|space|balanced|speed|everything):");
+ UsageError(" --compiler-filter="
+ "(verify-none"
+ "|interpret-only"
+ "|space"
+ "|balanced"
+ "|speed"
+ "|everything"
+ "|time):");
UsageError(" select compiler filter.");
UsageError(" Example: --compiler-filter=everything");
#if ART_SMALL_MODE
@@ -1181,6 +1188,8 @@
compiler_filter = CompilerOptions::kSpeed;
} else if (strcmp(compiler_filter_string, "everything") == 0) {
compiler_filter = CompilerOptions::kEverything;
+ } else if (strcmp(compiler_filter_string, "time") == 0) {
+ compiler_filter = CompilerOptions::kTime;
} else {
Usage("Unknown --compiler-filter value %s", compiler_filter_string);
}
@@ -1376,7 +1385,7 @@
* If we're not in interpret-only or verify-none mode, go ahead and compile small applications.
* Don't bother to check if we're doing the image.
*/
- if (!image && compiler_options->IsCompilationEnabled()) {
+ if (!image && compiler_options->IsCompilationEnabled() && compiler_kind == Compiler::kQuick) {
size_t num_methods = 0;
for (size_t i = 0; i != dex_files.size(); ++i) {
const DexFile* dex_file = dex_files[i];
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
new file mode 100644
index 0000000..1a78d72
--- /dev/null
+++ b/runtime/check_reference_map_visitor.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_CHECK_REFERENCE_MAP_VISITOR_H_
+#define ART_RUNTIME_CHECK_REFERENCE_MAP_VISITOR_H_
+
+#include "gc_map.h"
+#include "mirror/art_method-inl.h"
+#include "scoped_thread_state_change.h"
+#include "stack_map.h"
+
+namespace art {
+
+// Helper class for tests checking that the compiler keeps track of dex registers
+// holding references.
+class CheckReferenceMapVisitor : public StackVisitor {
+ public:
+ explicit CheckReferenceMapVisitor(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : StackVisitor(thread, nullptr) {}
+
+ bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* m = GetMethod();
+ if (m->IsCalleeSaveMethod() || m->IsNative()) {
+ CHECK_EQ(GetDexPc(), DexFile::kDexNoIndex);
+ }
+
+ if (!m || m->IsNative() || m->IsRuntimeMethod() || IsShadowFrame()) {
+ return true;
+ }
+
+ LOG(INFO) << "At " << PrettyMethod(m, false);
+
+ if (m->IsCalleeSaveMethod()) {
+ LOG(WARNING) << "no PC for " << PrettyMethod(m);
+ return true;
+ }
+
+ return false;
+ }
+
+ void CheckReferences(int* registers, int number_of_references, uint32_t native_pc_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (GetMethod()->IsOptimized()) {
+ CheckOptimizedMethod(registers, number_of_references, native_pc_offset);
+ } else {
+ CheckQuickMethod(registers, number_of_references, native_pc_offset);
+ }
+ }
+
+ private:
+ void CheckOptimizedMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* m = GetMethod();
+ CodeInfo code_info = m->GetOptimizedCodeInfo();
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
+ DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(stack_map, m->GetCodeItem()->registers_size_);
+ MemoryRegion stack_mask = stack_map.GetStackMask();
+ uint32_t register_mask = stack_map.GetRegisterMask();
+ for (int i = 0; i < number_of_references; ++i) {
+ int reg = registers[i];
+ CHECK(reg < m->GetCodeItem()->registers_size_);
+ DexRegisterMap::LocationKind location = dex_register_map.GetLocationKind(reg);
+ switch (location) {
+ case DexRegisterMap::kNone:
+ // Not set, should not be a reference.
+ CHECK(false);
+ break;
+ case DexRegisterMap::kInStack:
+ CHECK(stack_mask.LoadBit(dex_register_map.GetValue(reg) >> 2));
+ break;
+ case DexRegisterMap::kInRegister:
+ CHECK_NE(register_mask & dex_register_map.GetValue(reg), 0u);
+ break;
+ case DexRegisterMap::kConstant:
+ CHECK_EQ(dex_register_map.GetValue(0), 0);
+ break;
+ }
+ }
+ }
+
+ void CheckQuickMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* m = GetMethod();
+ NativePcOffsetToReferenceMap map(m->GetNativeGcMap());
+ const uint8_t* ref_bitmap = map.FindBitMap(native_pc_offset);
+ CHECK(ref_bitmap);
+ for (int i = 0; i < number_of_references; ++i) {
+ int reg = registers[i];
+ CHECK(reg < m->GetCodeItem()->registers_size_);
+ CHECK((*((ref_bitmap) + reg / 8) >> (reg % 8) ) & 0x01)
+ << "Error: Reg @" << i << " is not in GC map";
+ }
+ }
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_CHECK_REFERENCE_MAP_VISITOR_H_
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index cc1e0b9..df51973 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -1886,11 +1886,25 @@
return SetFieldValueImpl(0, field_id, value, width, true);
}
-std::string Dbg::StringToUtf8(JDWP::ObjectId string_id) {
+JDWP::JdwpError Dbg::StringToUtf8(JDWP::ObjectId string_id, std::string* str) {
JDWP::JdwpError error;
- mirror::String* s = gRegistry->Get<mirror::String*>(string_id, &error);
- CHECK(s != nullptr) << error;
- return s->ToModifiedUtf8();
+ mirror::Object* obj = gRegistry->Get<mirror::Object*>(string_id, &error);
+ if (error != JDWP::ERR_NONE) {
+ return error;
+ }
+ if (obj == nullptr) {
+ return JDWP::ERR_INVALID_OBJECT;
+ }
+ {
+ ScopedObjectAccessUnchecked soa(Thread::Current());
+ mirror::Class* java_lang_String = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_String);
+ if (!java_lang_String->IsAssignableFrom(obj->GetClass())) {
+ // This isn't a string.
+ return JDWP::ERR_INVALID_STRING;
+ }
+ }
+ *str = obj->AsString()->ToModifiedUtf8();
+ return JDWP::ERR_NONE;
}
void Dbg::OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply) {
@@ -1939,7 +1953,7 @@
}
JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
- ScopedObjectAccess soa(Thread::Current());
+ ScopedObjectAccessUnchecked soa(Thread::Current());
JDWP::JdwpError error;
mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id, &error);
if (error != JDWP::ERR_NONE) {
@@ -1970,26 +1984,54 @@
return error;
}
-std::string Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id) {
- ScopedObjectAccess soa(Thread::Current());
- JDWP::JdwpError error;
- mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id, &error);
- CHECK(thread_group != nullptr) << error;
- const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupName");
+static mirror::Object* DecodeThreadGroup(ScopedObjectAccessUnchecked& soa,
+ JDWP::ObjectId thread_group_id, JDWP::JdwpError* error)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id, error);
+ if (*error != JDWP::ERR_NONE) {
+ return nullptr;
+ }
+ if (thread_group == nullptr) {
+ *error = JDWP::ERR_INVALID_OBJECT;
+ return nullptr;
+ }
mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
CHECK(c != nullptr);
+ if (!c->IsAssignableFrom(thread_group->GetClass())) {
+ // This is not a java.lang.ThreadGroup.
+ *error = JDWP::ERR_INVALID_THREAD_GROUP;
+ return nullptr;
+ }
+ *error = JDWP::ERR_NONE;
+ return thread_group;
+}
+
+JDWP::JdwpError Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) {
+ ScopedObjectAccessUnchecked soa(Thread::Current());
+ JDWP::JdwpError error;
+ mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
+ if (error != JDWP::ERR_NONE) {
+ return error;
+ }
+ const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupName");
+ mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
mirror::ArtField* f = c->FindInstanceField("name", "Ljava/lang/String;");
CHECK(f != nullptr);
mirror::String* s = reinterpret_cast<mirror::String*>(f->GetObject(thread_group));
soa.Self()->EndAssertNoThreadSuspension(old_cause);
- return s->ToModifiedUtf8();
+
+ std::string thread_group_name(s->ToModifiedUtf8());
+ expandBufAddUtf8String(pReply, thread_group_name);
+ return JDWP::ERR_NONE;
}
-JDWP::ObjectId Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id) {
+JDWP::JdwpError Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) {
ScopedObjectAccessUnchecked soa(Thread::Current());
JDWP::JdwpError error;
- mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id, &error);
- CHECK(thread_group != nullptr) << error;
+ mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
+ if (error != JDWP::ERR_NONE) {
+ return error;
+ }
const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupParent");
mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
CHECK(c != nullptr);
@@ -1997,7 +2039,64 @@
CHECK(f != nullptr);
mirror::Object* parent = f->GetObject(thread_group);
soa.Self()->EndAssertNoThreadSuspension(old_cause);
- return gRegistry->Add(parent);
+
+ JDWP::ObjectId parent_group_id = gRegistry->Add(parent);
+ expandBufAddObjectId(pReply, parent_group_id);
+ return JDWP::ERR_NONE;
+}
+
+static void GetChildThreadGroups(ScopedObjectAccessUnchecked& soa, mirror::Object* thread_group,
+ std::vector<JDWP::ObjectId>* child_thread_group_ids)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ CHECK(thread_group != nullptr);
+
+ // Get the ArrayList<ThreadGroup> "groups" out of this thread group...
+ mirror::ArtField* groups_field = thread_group->GetClass()->FindInstanceField("groups", "Ljava/util/List;");
+ mirror::Object* groups_array_list = groups_field->GetObject(thread_group);
+
+ // Get the array and size out of the ArrayList<ThreadGroup>...
+ mirror::ArtField* array_field = groups_array_list->GetClass()->FindInstanceField("array", "[Ljava/lang/Object;");
+ mirror::ArtField* size_field = groups_array_list->GetClass()->FindInstanceField("size", "I");
+ mirror::ObjectArray<mirror::Object>* groups_array =
+ array_field->GetObject(groups_array_list)->AsObjectArray<mirror::Object>();
+ const int32_t size = size_field->GetInt(groups_array_list);
+
+ // Copy the first 'size' elements out of the array into the result.
+ for (int32_t i = 0; i < size; ++i) {
+ child_thread_group_ids->push_back(gRegistry->Add(groups_array->Get(i)));
+ }
+}
+
+JDWP::JdwpError Dbg::GetThreadGroupChildren(JDWP::ObjectId thread_group_id,
+ JDWP::ExpandBuf* pReply) {
+ ScopedObjectAccessUnchecked soa(Thread::Current());
+ JDWP::JdwpError error;
+ mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
+ if (error != JDWP::ERR_NONE) {
+ return error;
+ }
+
+ // Add child threads.
+ {
+ std::vector<JDWP::ObjectId> child_thread_ids;
+ GetThreads(thread_group, &child_thread_ids);
+ expandBufAdd4BE(pReply, child_thread_ids.size());
+ for (JDWP::ObjectId child_thread_id : child_thread_ids) {
+ expandBufAddObjectId(pReply, child_thread_id);
+ }
+ }
+
+ // Add child thread groups.
+ {
+ std::vector<JDWP::ObjectId> child_thread_groups_ids;
+ GetChildThreadGroups(soa, thread_group, &child_thread_groups_ids);
+ expandBufAdd4BE(pReply, child_thread_groups_ids.size());
+ for (JDWP::ObjectId child_thread_group_id : child_thread_groups_ids) {
+ expandBufAddObjectId(pReply, child_thread_group_id);
+ }
+ }
+
+ return JDWP::ERR_NONE;
}
JDWP::ObjectId Dbg::GetSystemThreadGroupId() {
@@ -2007,13 +2106,6 @@
return gRegistry->Add(group);
}
-JDWP::ObjectId Dbg::GetMainThreadGroupId() {
- ScopedObjectAccess soa(Thread::Current());
- mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup);
- mirror::Object* group = f->GetObject(f->GetDeclaringClass());
- return gRegistry->Add(group);
-}
-
JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) {
switch (state) {
case kBlocked:
@@ -2111,11 +2203,8 @@
return (group == desired_thread_group);
}
-void Dbg::GetThreads(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>* thread_ids) {
+void Dbg::GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* thread_ids) {
ScopedObjectAccessUnchecked soa(Thread::Current());
- JDWP::JdwpError error;
- mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id, &error);
- CHECK_EQ(error, JDWP::ERR_NONE);
std::list<Thread*> all_threads_list;
{
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
@@ -2147,30 +2236,6 @@
}
}
-void Dbg::GetChildThreadGroups(JDWP::ObjectId thread_group_id,
- std::vector<JDWP::ObjectId>* child_thread_group_ids) {
- ScopedObjectAccess soa(Thread::Current());
- JDWP::JdwpError error;
- mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id, &error);
- CHECK(thread_group != nullptr) << error;
-
- // Get the ArrayList<ThreadGroup> "groups" out of this thread group...
- mirror::ArtField* groups_field = thread_group->GetClass()->FindInstanceField("groups", "Ljava/util/List;");
- mirror::Object* groups_array_list = groups_field->GetObject(thread_group);
-
- // Get the array and size out of the ArrayList<ThreadGroup>...
- mirror::ArtField* array_field = groups_array_list->GetClass()->FindInstanceField("array", "[Ljava/lang/Object;");
- mirror::ArtField* size_field = groups_array_list->GetClass()->FindInstanceField("size", "I");
- mirror::ObjectArray<mirror::Object>* groups_array =
- array_field->GetObject(groups_array_list)->AsObjectArray<mirror::Object>();
- const int32_t size = size_field->GetInt(groups_array_list);
-
- // Copy the first 'size' elements out of the array into the result.
- for (int32_t i = 0; i < size; ++i) {
- child_thread_group_ids->push_back(gRegistry->Add(groups_array->Get(i)));
- }
-}
-
static int GetStackDepth(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
struct CountStackDepthVisitor : public StackVisitor {
explicit CountStackDepthVisitor(Thread* thread)
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 219210e..e171d78 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -381,7 +381,7 @@
static JDWP::JdwpError SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static std::string StringToUtf8(JDWP::ObjectId string_id)
+ static JDWP::JdwpError StringToUtf8(JDWP::ObjectId string_id, std::string* str)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -393,13 +393,19 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(Locks::thread_list_lock_);
static JDWP::JdwpError GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
LOCKS_EXCLUDED(Locks::thread_list_lock_);
- static std::string GetThreadGroupName(JDWP::ObjectId thread_group_id);
- static JDWP::ObjectId GetThreadGroupParent(JDWP::ObjectId thread_group_id)
+ static JDWP::JdwpError GetThreadGroupName(JDWP::ObjectId thread_group_id,
+ JDWP::ExpandBuf* pReply)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static JDWP::JdwpError GetThreadGroupParent(JDWP::ObjectId thread_group_id,
+ JDWP::ExpandBuf* pReply)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ static JDWP::JdwpError GetThreadGroupChildren(JDWP::ObjectId thread_group_id,
+ JDWP::ExpandBuf* pReply)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static JDWP::ObjectId GetSystemThreadGroupId()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static JDWP::ObjectId GetMainThreadGroupId();
static JDWP::JdwpThreadStatus ToJdwpThreadStatus(ThreadState state);
static JDWP::JdwpError GetThreadStatus(JDWP::ObjectId thread_id,
@@ -414,11 +420,9 @@
// Fills 'thread_ids' with the threads in the given thread group. If thread_group_id == 0,
// returns all threads.
- static void GetThreads(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>* thread_ids)
+ static void GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* thread_ids)
LOCKS_EXCLUDED(Locks::thread_list_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void GetChildThreadGroups(JDWP::ObjectId thread_group_id,
- std::vector<JDWP::ObjectId>* child_thread_group_ids);
static JDWP::JdwpError GetThreadFrameCount(JDWP::ObjectId thread_id, size_t* result)
LOCKS_EXCLUDED(Locks::thread_list_lock_);
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 35095f9..e0a83f6 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -151,7 +151,12 @@
/* show detailed debug output */
if (resultTag == JT_STRING && exceptObjId == 0) {
if (resultValue != 0) {
- VLOG(jdwp) << " string '" << Dbg::StringToUtf8(resultValue) << "'";
+ if (VLOG_IS_ON(jdwp)) {
+ std::string result_string;
+ JDWP::JdwpError error = Dbg::StringToUtf8(resultValue, &result_string);
+ CHECK_EQ(error, JDWP::ERR_NONE);
+ VLOG(jdwp) << " string '" << result_string << "'";
+ }
} else {
VLOG(jdwp) << " string (null)";
}
@@ -220,7 +225,7 @@
static JdwpError VM_AllThreads(JdwpState*, Request*, ExpandBuf* pReply)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::vector<ObjectId> thread_ids;
- Dbg::GetThreads(0, &thread_ids);
+ Dbg::GetThreads(nullptr /* all thread groups */, &thread_ids);
expandBufAdd4BE(pReply, thread_ids.size());
for (uint32_t i = 0; i < thread_ids.size(); ++i) {
@@ -919,7 +924,11 @@
static JdwpError SR_Value(JdwpState*, Request* request, ExpandBuf* pReply)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ObjectId stringObject = request->ReadObjectId();
- std::string str(Dbg::StringToUtf8(stringObject));
+ std::string str;
+ JDWP::JdwpError error = Dbg::StringToUtf8(stringObject, &str);
+ if (error != JDWP::ERR_NONE) {
+ return error;
+ }
VLOG(jdwp) << StringPrintf(" --> %s", PrintableString(str.c_str()).c_str());
@@ -1141,10 +1150,7 @@
static JdwpError TGR_Name(JdwpState*, Request* request, ExpandBuf* pReply)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ObjectId thread_group_id = request->ReadThreadGroupId();
-
- expandBufAddUtf8String(pReply, Dbg::GetThreadGroupName(thread_group_id));
-
- return ERR_NONE;
+ return Dbg::GetThreadGroupName(thread_group_id, pReply);
}
/*
@@ -1154,11 +1160,7 @@
static JdwpError TGR_Parent(JdwpState*, Request* request, ExpandBuf* pReply)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ObjectId thread_group_id = request->ReadThreadGroupId();
-
- ObjectId parentGroup = Dbg::GetThreadGroupParent(thread_group_id);
- expandBufAddObjectId(pReply, parentGroup);
-
- return ERR_NONE;
+ return Dbg::GetThreadGroupParent(thread_group_id, pReply);
}
/*
@@ -1168,22 +1170,7 @@
static JdwpError TGR_Children(JdwpState*, Request* request, ExpandBuf* pReply)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ObjectId thread_group_id = request->ReadThreadGroupId();
-
- std::vector<ObjectId> thread_ids;
- Dbg::GetThreads(thread_group_id, &thread_ids);
- expandBufAdd4BE(pReply, thread_ids.size());
- for (uint32_t i = 0; i < thread_ids.size(); ++i) {
- expandBufAddObjectId(pReply, thread_ids[i]);
- }
-
- std::vector<ObjectId> child_thread_groups_ids;
- Dbg::GetChildThreadGroups(thread_group_id, &child_thread_groups_ids);
- expandBufAdd4BE(pReply, child_thread_groups_ids.size());
- for (uint32_t i = 0; i < child_thread_groups_ids.size(); ++i) {
- expandBufAddObjectId(pReply, child_thread_groups_ids[i]);
- }
-
- return ERR_NONE;
+ return Dbg::GetThreadGroupChildren(thread_group_id, pReply);
}
/*
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index ae17070..8447616 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -285,14 +285,17 @@
}
inline StackMap ArtMethod::GetStackMap(uint32_t native_pc_offset) {
+ return GetOptimizedCodeInfo().GetStackMapForNativePcOffset(native_pc_offset);
+}
+
+inline CodeInfo ArtMethod::GetOptimizedCodeInfo() {
DCHECK(IsOptimized());
const void* code_pointer = GetQuickOatCodePointer();
DCHECK(code_pointer != nullptr);
uint32_t offset =
reinterpret_cast<const OatQuickMethodHeader*>(code_pointer)[-1].vmap_table_offset_;
const void* data = reinterpret_cast<const void*>(reinterpret_cast<const uint8_t*>(code_pointer) - offset);
- CodeInfo code_info(data);
- return code_info.GetStackMapForNativePcOffset(native_pc_offset);
+ return CodeInfo(data);
}
inline void ArtMethod::SetOatNativeGcMapOffset(uint32_t gc_map_offset) {
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index d37aa57..de6ec05 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -155,7 +155,9 @@
// Temporary solution for detecting if a method has been optimized: the compiler
// does not create a GC map. Instead, the vmap table contains the stack map
// (as in stack_map.h).
- return (GetEntryPointFromQuickCompiledCode() != nullptr) && (GetNativeGcMap() == nullptr);
+ return (GetEntryPointFromQuickCompiledCode() != nullptr)
+ && (GetQuickOatCodePointer() != nullptr)
+ && (GetNativeGcMap() == nullptr);
}
bool IsPortableCompiled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -349,6 +351,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
StackMap GetStackMap(uint32_t native_pc_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ CodeInfo GetOptimizedCodeInfo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const uint8_t* GetNativeGcMap() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return GetFieldPtr<uint8_t*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, gc_map_));
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 521a2dd..f28d488 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -311,9 +311,15 @@
verifier->Verify();
verifier->DumpFailures(os);
os << verifier->info_messages_.str();
- verifier->Dump(os);
-
- return verifier;
+ // Only dump and return if no hard failures. Otherwise the verifier may be not fully initialized
+ // and querying any info is dangerous/can abort.
+ if (verifier->have_pending_hard_failure_) {
+ delete verifier;
+ return nullptr;
+ } else {
+ verifier->Dump(os);
+ return verifier;
+ }
}
MethodVerifier::MethodVerifier(Thread* self,
diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
index 7929554..e914bd9 100644
--- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
@@ -14,138 +14,57 @@
* limitations under the License.
*/
-#include <stdio.h>
-#include <memory>
-
-#include "class_linker.h"
-#include "dex_file-inl.h"
-#include "gc_map.h"
-#include "mirror/art_method-inl.h"
-#include "mirror/class-inl.h"
-#include "mirror/object_array-inl.h"
-#include "mirror/object-inl.h"
-#include "scoped_thread_state_change.h"
-#include "thread.h"
+#include "check_reference_map_visitor.h"
#include "jni.h"
-#include "verifier/method_verifier.h"
namespace art {
-#define IS_IN_REF_BITMAP(ref_bitmap, reg) \
- (((reg) < m->GetCodeItem()->registers_size_) && \
- ((*((ref_bitmap) + (reg)/8) >> ((reg) % 8) ) & 0x01))
+#define CHECK_REGS_CONTAIN_REFS(native_pc_offset, ...) do { \
+ int t[] = {__VA_ARGS__}; \
+ int t_size = sizeof(t) / sizeof(*t); \
+ CheckReferences(t, t_size, m->NativePcOffset(m->ToNativePc(native_pc_offset))); \
+} while (false);
-#define CHECK_REGS_CONTAIN_REFS(...) \
- do { \
- int t[] = {__VA_ARGS__}; \
- int t_size = sizeof(t) / sizeof(*t); \
- for (int i = 0; i < t_size; ++i) \
- CHECK(IS_IN_REF_BITMAP(ref_bitmap, t[i])) \
- << "Error: Reg @ " << i << "-th argument is not in GC map"; \
- } while (false)
-
-struct ReferenceMap2Visitor : public StackVisitor {
- explicit ReferenceMap2Visitor(Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, NULL) {
- }
+struct ReferenceMap2Visitor : public CheckReferenceMapVisitor {
+ explicit ReferenceMap2Visitor(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : CheckReferenceMapVisitor(thread) {}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (CheckReferenceMapVisitor::VisitFrame()) {
+ return true;
+ }
mirror::ArtMethod* m = GetMethod();
- if (!m || m->IsNative() || m->IsRuntimeMethod() || IsShadowFrame()) {
- return true;
- }
- LOG(INFO) << "At " << PrettyMethod(m, false);
-
- NativePcOffsetToReferenceMap map(m->GetNativeGcMap());
-
- if (m->IsCalleeSaveMethod()) {
- LOG(WARNING) << "no PC for " << PrettyMethod(m);
- return true;
- }
-
- const uint8_t* ref_bitmap = NULL;
std::string m_name(m->GetName());
// Given the method name and the number of times the method has been called,
// we know the Dex registers with live reference values. Assert that what we
// find is what is expected.
if (m_name.compare("f") == 0) {
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x03U)));
- CHECK(ref_bitmap);
- CHECK_REGS_CONTAIN_REFS(8); // v8: this
-
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x06U)));
- CHECK(ref_bitmap);
- CHECK_REGS_CONTAIN_REFS(8, 1); // v8: this, v1: x
-
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x08U)));
- CHECK(ref_bitmap);
- CHECK_REGS_CONTAIN_REFS(8, 3, 1); // v8: this, v3: y, v1: x
-
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x0cU)));
- CHECK(ref_bitmap);
- CHECK_REGS_CONTAIN_REFS(8, 3, 1); // v8: this, v3: y, v1: x
-
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x0eU)));
- CHECK(ref_bitmap);
- CHECK_REGS_CONTAIN_REFS(8, 3, 1); // v8: this, v3: y, v1: x
-
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x10U)));
- CHECK(ref_bitmap);
- CHECK_REGS_CONTAIN_REFS(8, 3, 1); // v8: this, v3: y, v1: x
-
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x13U)));
- CHECK(ref_bitmap);
+ CHECK_REGS_CONTAIN_REFS(0x03U, 8); // v8: this
+ CHECK_REGS_CONTAIN_REFS(0x06U, 8, 1); // v8: this, v1: x
+ CHECK_REGS_CONTAIN_REFS(0x08U, 8, 3, 1); // v8: this, v3: y, v1: x
+ CHECK_REGS_CONTAIN_REFS(0x0cU, 8, 3, 1); // v8: this, v3: y, v1: x
+ CHECK_REGS_CONTAIN_REFS(0x0eU, 8, 3, 1); // v8: this, v3: y, v1: x
+ CHECK_REGS_CONTAIN_REFS(0x10U, 8, 3, 1); // v8: this, v3: y, v1: x
// v2 is added because of the instruction at DexPC 0024. Object merges with 0 is Object. See:
// 0024: move-object v3, v2
// 0025: goto 0013
// Detaled dex instructions for ReferenceMap.java are at the end of this function.
// CHECK_REGS_CONTAIN_REFS(8, 3, 2, 1); // v8: this, v3: y, v2: y, v1: x
// We eliminate the non-live registers at a return, so only v3 is live:
- CHECK_REGS_CONTAIN_REFS(3); // v3: y
-
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x18U)));
- CHECK(ref_bitmap);
- CHECK_REGS_CONTAIN_REFS(8, 2, 1, 0); // v8: this, v2: y, v1: x, v0: ex
-
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x1aU)));
- CHECK(ref_bitmap);
- CHECK_REGS_CONTAIN_REFS(8, 5, 2, 1, 0); // v8: this, v5: x[1], v2: y, v1: x, v0: ex
-
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x1dU)));
- CHECK(ref_bitmap);
- CHECK_REGS_CONTAIN_REFS(8, 5, 2, 1, 0); // v8: this, v5: x[1], v2: y, v1: x, v0: ex
-
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x1fU)));
- CHECK(ref_bitmap);
+ CHECK_REGS_CONTAIN_REFS(0x13U); // v3: y
+ CHECK_REGS_CONTAIN_REFS(0x18U, 8, 2, 1, 0); // v8: this, v2: y, v1: x, v0: ex
+ CHECK_REGS_CONTAIN_REFS(0x1aU, 8, 5, 2, 1, 0); // v8: this, v5: x[1], v2: y, v1: x, v0: ex
+ CHECK_REGS_CONTAIN_REFS(0x1dU, 8, 5, 2, 1, 0); // v8: this, v5: x[1], v2: y, v1: x, v0: ex
// v5 is removed from the root set because there is a "merge" operation.
// See 0015: if-nez v2, 001f.
- CHECK_REGS_CONTAIN_REFS(8, 2, 1, 0); // v8: this, v2: y, v1: x, v0: ex
-
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x21U)));
- CHECK(ref_bitmap);
- CHECK_REGS_CONTAIN_REFS(8, 2, 1, 0); // v8: this, v2: y, v1: x, v0: ex
-
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x27U)));
- CHECK(ref_bitmap);
- CHECK_REGS_CONTAIN_REFS(8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
-
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x29U)));
- CHECK(ref_bitmap);
- CHECK_REGS_CONTAIN_REFS(8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
-
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x2cU)));
- CHECK(ref_bitmap);
- CHECK_REGS_CONTAIN_REFS(8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
-
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x2fU)));
- CHECK(ref_bitmap);
- CHECK_REGS_CONTAIN_REFS(8, 4, 3, 2, 1); // v8: this, v4: ex, v3: y, v2: y, v1: x
-
- ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToNativePc(0x32U)));
- CHECK(ref_bitmap);
- CHECK_REGS_CONTAIN_REFS(8, 3, 2, 1, 0); // v8: this, v3: y, v2: y, v1: x, v0: ex
+ CHECK_REGS_CONTAIN_REFS(0x1fU, 8, 2, 1, 0); // v8: this, v2: y, v1: x, v0: ex
+ CHECK_REGS_CONTAIN_REFS(0x21U, 8, 2, 1, 0); // v8: this, v2: y, v1: x, v0: ex
+ CHECK_REGS_CONTAIN_REFS(0x27U, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
+ CHECK_REGS_CONTAIN_REFS(0x29U, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
+ CHECK_REGS_CONTAIN_REFS(0x2cU, 8, 4, 2, 1); // v8: this, v4: ex, v2: y, v1: x
+ CHECK_REGS_CONTAIN_REFS(0x2fU, 8, 4, 3, 2, 1); // v8: this, v4: ex, v3: y, v2: y, v1: x
+ CHECK_REGS_CONTAIN_REFS(0x32U, 8, 3, 2, 1, 0); // v8: this, v3: y, v2: y, v1: x, v0: ex
}
return true;
diff --git a/test/004-StackWalk/stack_walk_jni.cc b/test/004-StackWalk/stack_walk_jni.cc
index 30a0d59..c40de7e 100644
--- a/test/004-StackWalk/stack_walk_jni.cc
+++ b/test/004-StackWalk/stack_walk_jni.cc
@@ -14,54 +14,29 @@
* limitations under the License.
*/
-#include <stdio.h>
-#include <memory>
-
-#include "class_linker.h"
-#include "gc_map.h"
-#include "mirror/art_method-inl.h"
-#include "mirror/class-inl.h"
-#include "mirror/object_array-inl.h"
-#include "mirror/object-inl.h"
+#include "check_reference_map_visitor.h"
#include "jni.h"
-#include "scoped_thread_state_change.h"
namespace art {
-#define REG(reg_bitmap, reg) \
- (((reg) < m->GetCodeItem()->registers_size_) && \
- ((*((reg_bitmap) + (reg)/8) >> ((reg) % 8) ) & 0x01))
-
-#define CHECK_REGS(...) if (!IsShadowFrame()) { \
- int t[] = {__VA_ARGS__}; \
- int t_size = sizeof(t) / sizeof(*t); \
- for (int i = 0; i < t_size; ++i) \
- CHECK(REG(reg_bitmap, t[i])) << "Error: Reg " << i << " is not in RegisterMap"; \
- }
+#define CHECK_REGS(...) do { \
+ int t[] = {__VA_ARGS__}; \
+ int t_size = sizeof(t) / sizeof(*t); \
+ CheckReferences(t, t_size, GetNativePcOffset()); \
+} while (false);
static int gJava_StackWalk_refmap_calls = 0;
-struct TestReferenceMapVisitor : public StackVisitor {
- explicit TestReferenceMapVisitor(Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(thread, NULL) {
- }
+class TestReferenceMapVisitor : public CheckReferenceMapVisitor {
+ public:
+ explicit TestReferenceMapVisitor(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : CheckReferenceMapVisitor(thread) {}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* m = GetMethod();
- CHECK(m != NULL);
- LOG(INFO) << "At " << PrettyMethod(m, false);
-
- if (m->IsCalleeSaveMethod() || m->IsNative()) {
- LOG(WARNING) << "no PC for " << PrettyMethod(m);
- CHECK_EQ(GetDexPc(), DexFile::kDexNoIndex);
+ if (CheckReferenceMapVisitor::VisitFrame()) {
return true;
}
- const uint8_t* reg_bitmap = NULL;
- if (!IsShadowFrame()) {
- NativePcOffsetToReferenceMap map(m->GetNativeGcMap());
- reg_bitmap = map.FindBitMap(GetNativePcOffset());
- }
+ mirror::ArtMethod* m = GetMethod();
StringPiece m_name(m->GetName());
// Given the method name and the number of times the method has been called,
diff --git a/test/401-optimizing-compiler/src/Main.java b/test/401-optimizing-compiler/src/Main.java
index 2c6d1c2..07c407b 100644
--- a/test/401-optimizing-compiler/src/Main.java
+++ b/test/401-optimizing-compiler/src/Main.java
@@ -97,6 +97,11 @@
if (exception == null) {
throw new Error("Missing NullPointerException");
}
+
+ result = $opt$InvokeVirtualMethod();
+ if (result != 42) {
+ throw new Error("Unexpected result: " + result);
+ }
}
public static void invokePrivate() {
@@ -205,5 +210,13 @@
m.o = new Main();
}
+ public static int $opt$InvokeVirtualMethod() {
+ return new Main().virtualMethod();
+ }
+
+ public int virtualMethod() {
+ return 42;
+ }
+
Object o;
}