Merge "Don't do a null test in MarkGCCard if the value cannot be null."
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index 03165ed..e02fe4b 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -579,6 +579,8 @@
log_verbosity.class_linker = true;
} else if (verbose_options[j] == "compiler") {
log_verbosity.compiler = true;
+ } else if (verbose_options[j] == "deopt") {
+ log_verbosity.deopt = true;
} else if (verbose_options[j] == "gc") {
log_verbosity.gc = true;
} else if (verbose_options[j] == "heap") {
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 502ef2f..740beab 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -82,8 +82,8 @@
virtual void EmitNativeCode(CodeGenerator* codegen) = 0;
- void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
- void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
+ virtual void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
+ virtual void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
void RecordPcInfo(CodeGenerator* codegen, HInstruction* instruction, uint32_t dex_pc);
bool IsCoreRegisterSaved(int reg) const {
@@ -102,11 +102,13 @@
return saved_fpu_stack_offsets_[reg];
}
- private:
+ protected:
static constexpr size_t kMaximumNumberOfExpectedRegisters = 32;
static constexpr uint32_t kRegisterNotSaved = -1;
uint32_t saved_core_stack_offsets_[kMaximumNumberOfExpectedRegisters];
uint32_t saved_fpu_stack_offsets_[kMaximumNumberOfExpectedRegisters];
+
+ private:
DISALLOW_COPY_AND_ASSIGN(SlowPathCode);
};
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 89d809d..b6d99ab 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -65,6 +65,7 @@
using helpers::WRegisterFrom;
using helpers::XRegisterFrom;
using helpers::ARM64EncodableConstantOrRegister;
+using helpers::ArtVixlRegCodeCoherentForRegSet;
static constexpr size_t kHeapRefSize = sizeof(mirror::HeapReference<mirror::Object>);
static constexpr int kCurrentMethodStackOffset = 0;
@@ -106,6 +107,88 @@
#define __ down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler()->
#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, x).Int32Value()
+// Calculate memory accessing operand for save/restore live registers.
+static void SaveRestoreLiveRegistersHelper(CodeGenerator* codegen,
+ RegisterSet* register_set,
+ int64_t spill_offset,
+ bool is_save) {
+ DCHECK(ArtVixlRegCodeCoherentForRegSet(register_set->GetCoreRegisters(),
+ codegen->GetNumberOfCoreRegisters(),
+ register_set->GetFloatingPointRegisters(),
+ codegen->GetNumberOfFloatingPointRegisters()));
+
+ CPURegList core_list = CPURegList(CPURegister::kRegister, kXRegSize,
+ register_set->GetCoreRegisters() & (~callee_saved_core_registers.list()));
+ CPURegList fp_list = CPURegList(CPURegister::kFPRegister, kDRegSize,
+ register_set->GetFloatingPointRegisters() & (~callee_saved_fp_registers.list()));
+
+ MacroAssembler* masm = down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler();
+ UseScratchRegisterScope temps(masm);
+
+ Register base = masm->StackPointer();
+ int64_t core_spill_size = core_list.TotalSizeInBytes();
+ int64_t fp_spill_size = fp_list.TotalSizeInBytes();
+ int64_t reg_size = kXRegSizeInBytes;
+ int64_t max_ls_pair_offset = spill_offset + core_spill_size + fp_spill_size - 2 * reg_size;
+ uint32_t ls_access_size = WhichPowerOf2(reg_size);
+ if (((core_list.Count() > 1) || (fp_list.Count() > 1)) &&
+ !masm->IsImmLSPair(max_ls_pair_offset, ls_access_size)) {
+ // If the offset does not fit in the instruction's immediate field, use an alternate register
+ // to compute the base address(float point registers spill base address).
+ Register new_base = temps.AcquireSameSizeAs(base);
+ __ Add(new_base, base, Operand(spill_offset + core_spill_size));
+ base = new_base;
+ spill_offset = -core_spill_size;
+ int64_t new_max_ls_pair_offset = fp_spill_size - 2 * reg_size;
+ DCHECK(masm->IsImmLSPair(spill_offset, ls_access_size));
+ DCHECK(masm->IsImmLSPair(new_max_ls_pair_offset, ls_access_size));
+ }
+
+ if (is_save) {
+ __ StoreCPURegList(core_list, MemOperand(base, spill_offset));
+ __ StoreCPURegList(fp_list, MemOperand(base, spill_offset + core_spill_size));
+ } else {
+ __ LoadCPURegList(core_list, MemOperand(base, spill_offset));
+ __ LoadCPURegList(fp_list, MemOperand(base, spill_offset + core_spill_size));
+ }
+}
+
+void SlowPathCodeARM64::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
+ RegisterSet* register_set = locations->GetLiveRegisters();
+ size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
+ for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
+ if (!codegen->IsCoreCalleeSaveRegister(i) && register_set->ContainsCoreRegister(i)) {
+ // If the register holds an object, update the stack mask.
+ if (locations->RegisterContainsObject(i)) {
+ locations->SetStackBit(stack_offset / kVRegSize);
+ }
+ DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
+ DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
+ saved_core_stack_offsets_[i] = stack_offset;
+ stack_offset += kXRegSizeInBytes;
+ }
+ }
+
+ for (size_t i = 0, e = codegen->GetNumberOfFloatingPointRegisters(); i < e; ++i) {
+ if (!codegen->IsFloatingPointCalleeSaveRegister(i) &&
+ register_set->ContainsFloatingPointRegister(i)) {
+ DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
+ DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
+ saved_fpu_stack_offsets_[i] = stack_offset;
+ stack_offset += kDRegSizeInBytes;
+ }
+ }
+
+ SaveRestoreLiveRegistersHelper(codegen, register_set,
+ codegen->GetFirstRegisterSlotInSlowPath(), true /* is_save */);
+}
+
+void SlowPathCodeARM64::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
+ RegisterSet* register_set = locations->GetLiveRegisters();
+ SaveRestoreLiveRegistersHelper(codegen, register_set,
+ codegen->GetFirstRegisterSlotInSlowPath(), false /* is_save */);
+}
+
class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
public:
BoundsCheckSlowPathARM64(HBoundsCheck* instruction,
@@ -530,6 +613,19 @@
GetAssembler()->cfi().DefCFAOffset(GetFrameSize());
}
+vixl::CPURegList CodeGeneratorARM64::GetFramePreservedCoreRegisters() const {
+ DCHECK(ArtVixlRegCodeCoherentForRegSet(core_spill_mask_, GetNumberOfCoreRegisters(), 0, 0));
+ return vixl::CPURegList(vixl::CPURegister::kRegister, vixl::kXRegSize,
+ core_spill_mask_);
+}
+
+vixl::CPURegList CodeGeneratorARM64::GetFramePreservedFPRegisters() const {
+ DCHECK(ArtVixlRegCodeCoherentForRegSet(0, 0, fpu_spill_mask_,
+ GetNumberOfFloatingPointRegisters()));
+ return vixl::CPURegList(vixl::CPURegister::kFPRegister, vixl::kDRegSize,
+ fpu_spill_mask_);
+}
+
void CodeGeneratorARM64::Bind(HBasicBlock* block) {
__ Bind(GetLabelOf(block));
}
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index e87b888..b56ca10 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -70,6 +70,9 @@
vixl::Label* GetEntryLabel() { return &entry_label_; }
vixl::Label* GetExitLabel() { return &exit_label_; }
+ void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
+ void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) OVERRIDE;
+
private:
vixl::Label entry_label_;
vixl::Label exit_label_;
@@ -234,15 +237,8 @@
void GenerateFrameEntry() OVERRIDE;
void GenerateFrameExit() OVERRIDE;
- vixl::CPURegList GetFramePreservedCoreRegisters() const {
- return vixl::CPURegList(vixl::CPURegister::kRegister, vixl::kXRegSize,
- core_spill_mask_);
- }
-
- vixl::CPURegList GetFramePreservedFPRegisters() const {
- return vixl::CPURegList(vixl::CPURegister::kFPRegister, vixl::kDRegSize,
- fpu_spill_mask_);
- }
+ vixl::CPURegList GetFramePreservedCoreRegisters() const;
+ vixl::CPURegList GetFramePreservedFPRegisters() const;
void Bind(HBasicBlock* block) OVERRIDE;
@@ -284,10 +280,10 @@
Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
- size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id);
- size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id);
- size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id);
- size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id);
+ size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
+ size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) OVERRIDE;
// The number of registers that can be allocated. The register allocator may
// decide to reserve and not use a few of them.
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index 53f1f3c..246fff9 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -218,6 +218,28 @@
return Location::RequiresRegister();
}
+// Check if registers in art register set have the same register code in vixl. If the register
+// codes are same, we can initialize vixl register list simply by the register masks. Currently,
+// only SP/WSP and ZXR/WZR codes are different between art and vixl.
+// Note: This function is only used for debug checks.
+static inline bool ArtVixlRegCodeCoherentForRegSet(uint32_t art_core_registers,
+ size_t num_core,
+ uint32_t art_fpu_registers,
+ size_t num_fpu) {
+ // The register masks won't work if the number of register is larger than 32.
+ DCHECK_GE(sizeof(art_core_registers) * 8, num_core);
+ DCHECK_GE(sizeof(art_fpu_registers) * 8, num_fpu);
+ for (size_t art_reg_code = 0; art_reg_code < num_core; ++art_reg_code) {
+ if (RegisterSet::Contains(art_core_registers, art_reg_code)) {
+ if (art_reg_code != static_cast<size_t>(VIXLRegCodeFromART(art_reg_code))) {
+ return false;
+ }
+ }
+ }
+ // There is no register code translation for float registers.
+ return true;
+}
+
} // namespace helpers
} // namespace arm64
} // namespace art
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 7ea1240..be28755 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -42,13 +42,18 @@
class StringList {
public:
+ enum Format {
+ kArrayBrackets,
+ kSetBrackets,
+ };
+
// Create an empty list
- StringList() : is_empty_(true) {}
+ explicit StringList(Format format = kArrayBrackets) : format_(format), is_empty_(true) {}
// Construct StringList from a linked list. List element class T
// must provide methods `GetNext` and `Dump`.
template<class T>
- explicit StringList(T* first_entry) : StringList() {
+ explicit StringList(T* first_entry, Format format = kArrayBrackets) : StringList(format) {
for (T* current = first_entry; current != nullptr; current = current->GetNext()) {
current->Dump(NewEntryStream());
}
@@ -64,6 +69,7 @@
}
private:
+ Format format_;
bool is_empty_;
std::ostringstream sstream_;
@@ -71,7 +77,13 @@
};
std::ostream& operator<<(std::ostream& os, const StringList& list) {
- return os << "[" << list.sstream_.str() << "]";
+ switch (list.format_) {
+ case StringList::kArrayBrackets: return os << "[" << list.sstream_.str() << "]";
+ case StringList::kSetBrackets: return os << "{" << list.sstream_.str() << "}";
+ default:
+ LOG(FATAL) << "Invalid StringList format";
+ UNREACHABLE();
+ }
}
/**
@@ -291,7 +303,8 @@
StartAttributeStream("liveness") << instruction->GetLifetimePosition();
if (instruction->HasLiveInterval()) {
LiveInterval* interval = instruction->GetLiveInterval();
- StartAttributeStream("ranges") << StringList(interval->GetFirstRange());
+ StartAttributeStream("ranges")
+ << StringList(interval->GetFirstRange(), StringList::kSetBrackets);
StartAttributeStream("uses") << StringList(interval->GetFirstUse());
StartAttributeStream("env_uses") << StringList(interval->GetFirstEnvironmentUse());
StartAttributeStream("is_fixed") << interval->IsFixed();
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index ce4bbd4..4b19c5b 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -76,7 +76,7 @@
}
void Dump(std::ostream& stream) const {
- stream << start_ << "-" << end_;
+ stream << "[" << start_ << "," << end_ << ")";
}
LiveRange* Dup(ArenaAllocator* allocator) const {
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index 8b34374..35b50d1 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -39,6 +39,7 @@
struct LogVerbosity {
bool class_linker; // Enabled with "-verbose:class".
bool compiler;
+ bool deopt;
bool gc;
bool heap;
bool jdwp;
diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
index 6a8aaf2..1b1ef66 100644
--- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "base/logging.h"
#include "callee_save_frame.h"
#include "dex_file-inl.h"
#include "interpreter/interpreter.h"
@@ -29,6 +30,12 @@
extern "C" NO_RETURN void artDeoptimize(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
+
+ if (VLOG_IS_ON(deopt)) {
+ LOG(INFO) << "Deopting:";
+ self->Dump(LOG(INFO));
+ }
+
self->SetException(Thread::GetDeoptimizationException());
self->QuickDeliverException();
}
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 53e56da..5401b56 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -373,8 +373,7 @@
: mark_sweep_(mark_sweep), holder_(holder), offset_(offset) {
}
- void operator()(const Object* obj) const ALWAYS_INLINE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ void operator()(const Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
if (kProfileLargeObjects) {
// TODO: Differentiate between marking and testing somehow.
++mark_sweep_->large_object_test_;
@@ -385,17 +384,51 @@
(kIsDebugBuild && large_object_space != nullptr &&
!large_object_space->Contains(obj)))) {
LOG(INTERNAL_FATAL) << "Tried to mark " << obj << " not contained by any spaces";
- LOG(INTERNAL_FATAL) << "Attempting see if it's a bad root";
- mark_sweep_->VerifyRoots();
if (holder_ != nullptr) {
+ size_t holder_size = holder_->SizeOf();
ArtField* field = holder_->FindFieldByOffset(offset_);
- LOG(INTERNAL_FATAL) << "Field info: holder=" << holder_
+ LOG(INTERNAL_FATAL) << "Field info: "
+ << " holder=" << holder_
+ << " holder_size=" << holder_size
<< " holder_type=" << PrettyTypeOf(holder_)
<< " offset=" << offset_.Uint32Value()
- << " field=" << (field != nullptr ? field->GetName() : "nullptr");
+ << " field=" << (field != nullptr ? field->GetName() : "nullptr")
+ << " field_type="
+ << (field != nullptr ? field->GetTypeDescriptor() : "")
+ << " first_ref_field_offset="
+ << (holder_->IsClass()
+ ? holder_->AsClass()->GetFirstReferenceStaticFieldOffset()
+ : holder_->GetClass()->GetFirstReferenceInstanceFieldOffset())
+ << " num_of_ref_fields="
+ << (holder_->IsClass()
+ ? holder_->AsClass()->NumReferenceStaticFields()
+ : holder_->GetClass()->NumReferenceInstanceFields())
+ << "\n";
}
PrintFileToLog("/proc/self/maps", LogSeverity::INTERNAL_FATAL);
MemMap::DumpMaps(LOG(INTERNAL_FATAL), true);
+ {
+ LOG(INTERNAL_FATAL) << "Attempting see if it's a bad root";
+ Thread* self = Thread::Current();
+ if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
+ mark_sweep_->VerifyRoots();
+ } else {
+ const bool heap_bitmap_exclusive_locked =
+ Locks::heap_bitmap_lock_->IsExclusiveHeld(self);
+ if (heap_bitmap_exclusive_locked) {
+ Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
+ }
+ Locks::mutator_lock_->SharedUnlock(self);
+ ThreadList* tl = Runtime::Current()->GetThreadList();
+ tl->SuspendAll(__FUNCTION__);
+ mark_sweep_->VerifyRoots();
+ tl->ResumeAll();
+ Locks::mutator_lock_->SharedLock(self);
+ if (heap_bitmap_exclusive_locked) {
+ Locks::heap_bitmap_lock_->ExclusiveLock(self);
+ }
+ }
+ }
LOG(FATAL) << "Can't mark invalid object";
}
}
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 8b504c1..922334e 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -62,7 +62,7 @@
RUNTIME_OPTIONS_KEY (Unit, DumpJITInfoOnShutdown)
RUNTIME_OPTIONS_KEY (Unit, IgnoreMaxFootprint)
RUNTIME_OPTIONS_KEY (Unit, LowMemoryMode)
-RUNTIME_OPTIONS_KEY (bool, UseTLAB, kUseTlab)
+RUNTIME_OPTIONS_KEY (bool, UseTLAB, (kUseTlab || kUseReadBarrier))
RUNTIME_OPTIONS_KEY (bool, EnableHSpaceCompactForOOM, true)
RUNTIME_OPTIONS_KEY (bool, UseJIT, false)
RUNTIME_OPTIONS_KEY (unsigned int, JITCompileThreshold, jit::Jit::kDefaultCompileThreshold)
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 800acaa..6795516 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -99,15 +99,45 @@
cur_quick_frame_pc_(0),
num_frames_(num_frames),
cur_depth_(0),
+ current_inlining_depth_(0),
context_(context) {
DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
}
+InlineInfo StackVisitor::GetCurrentInlineInfo() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* outer_method = GetCurrentQuickFrame()->AsMirrorPtr();
+ uint32_t native_pc_offset = outer_method->NativeQuickPcOffset(cur_quick_frame_pc_);
+ CodeInfo code_info = outer_method->GetOptimizedCodeInfo();
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
+ return code_info.GetInlineInfoOf(stack_map);
+}
+
+mirror::ArtMethod* StackVisitor::GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (cur_shadow_frame_ != nullptr) {
+ return cur_shadow_frame_->GetMethod();
+ } else if (cur_quick_frame_ != nullptr) {
+ if (IsInInlinedFrame()) {
+ size_t depth_in_stack_map = current_inlining_depth_ - 1;
+ return GetCurrentQuickFrame()->AsMirrorPtr()->GetDexCacheResolvedMethod(
+ GetCurrentInlineInfo().GetMethodIndexAtDepth(depth_in_stack_map));
+ } else {
+ return cur_quick_frame_->AsMirrorPtr();
+ }
+ } else {
+ return nullptr;
+ }
+}
+
uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
if (cur_shadow_frame_ != nullptr) {
return cur_shadow_frame_->GetDexPC();
} else if (cur_quick_frame_ != nullptr) {
- return GetMethod()->ToDexPc(cur_quick_frame_pc_, abort_on_failure);
+ if (IsInInlinedFrame()) {
+ size_t depth_in_stack_map = current_inlining_depth_ - 1;
+ return GetCurrentInlineInfo().GetDexPcAtDepth(depth_in_stack_map);
+ } else {
+ return GetMethod()->ToDexPc(cur_quick_frame_pc_, abort_on_failure);
+ }
} else {
return 0;
}
@@ -225,18 +255,27 @@
bool StackVisitor::GetVRegFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const {
- const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*));
- DCHECK(code_pointer != nullptr);
- uint32_t native_pc_offset = m->NativeQuickPcOffset(cur_quick_frame_pc_);
- CodeInfo code_info = m->GetOptimizedCodeInfo();
- StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
+ DCHECK_EQ(m, GetMethod());
const DexFile::CodeItem* code_item = m->GetCodeItem();
DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile
// its instructions?
- DCHECK_LT(vreg, code_item->registers_size_);
uint16_t number_of_dex_registers = code_item->registers_size_;
- DexRegisterMap dex_register_map =
- code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+ DCHECK_LT(vreg, code_item->registers_size_);
+
+ mirror::ArtMethod* outer_method = GetCurrentQuickFrame()->AsMirrorPtr();
+ const void* code_pointer = outer_method->GetQuickOatCodePointer(sizeof(void*));
+ DCHECK(code_pointer != nullptr);
+ CodeInfo code_info = outer_method->GetOptimizedCodeInfo();
+
+ uint32_t native_pc_offset = outer_method->NativeQuickPcOffset(cur_quick_frame_pc_);
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
+ size_t depth_in_stack_map = current_inlining_depth_ - 1;
+
+ DexRegisterMap dex_register_map = IsInInlinedFrame()
+ ? code_info.GetDexRegisterMapAtDepth(
+ depth_in_stack_map, code_info.GetInlineInfoOf(stack_map), number_of_dex_registers)
+ : code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+
DexRegisterLocation::Kind location_kind =
dex_register_map.GetLocationKind(vreg, number_of_dex_registers, code_info);
switch (location_kind) {
@@ -704,6 +743,26 @@
mirror::ArtMethod* method = cur_quick_frame_->AsMirrorPtr();
while (method != nullptr) {
SanityCheckFrame();
+
+ if ((walk_kind_ == StackWalkKind::kIncludeInlinedFrames)
+ && method->IsOptimized(sizeof(void*))) {
+ CodeInfo code_info = method->GetOptimizedCodeInfo();
+ uint32_t native_pc_offset = method->NativeQuickPcOffset(cur_quick_frame_pc_);
+ StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
+ if (stack_map.HasInlineInfo(code_info)) {
+ InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
+ DCHECK_EQ(current_inlining_depth_, 0u);
+ for (current_inlining_depth_ = inline_info.GetDepth();
+ current_inlining_depth_ != 0;
+ --current_inlining_depth_) {
+ bool should_continue = VisitFrame();
+ if (UNLIKELY(!should_continue)) {
+ return;
+ }
+ }
+ }
+ }
+
bool should_continue = VisitFrame();
if (UNLIKELY(!should_continue)) {
return;
diff --git a/runtime/stack.h b/runtime/stack.h
index bf61016..5b43848 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -36,9 +36,10 @@
} // namespace mirror
class Context;
-class ShadowFrame;
class HandleScope;
+class InlineInfo;
class ScopedObjectAccess;
+class ShadowFrame;
class StackVisitor;
class Thread;
@@ -430,15 +431,7 @@
void WalkStack(bool include_transitions = false)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (cur_shadow_frame_ != nullptr) {
- return cur_shadow_frame_->GetMethod();
- } else if (cur_quick_frame_ != nullptr) {
- return cur_quick_frame_->AsMirrorPtr();
- } else {
- return nullptr;
- }
- }
+ mirror::ArtMethod* GetMethod() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsShadowFrame() const {
return cur_shadow_frame_ != nullptr;
@@ -611,7 +604,7 @@
}
bool IsInInlinedFrame() const {
- return false;
+ return current_inlining_depth_ != 0;
}
uintptr_t GetCurrentQuickFramePc() const {
@@ -703,6 +696,8 @@
void SanityCheckFrame() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ InlineInfo GetCurrentInlineInfo() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
Thread* const thread_;
const StackWalkKind walk_kind_;
ShadowFrame* cur_shadow_frame_;
@@ -712,6 +707,9 @@
size_t num_frames_;
// Depth of the frame we're currently at.
size_t cur_depth_;
+ // Current inlining depth of the method we are currently at.
+ // 0 if there is no inlined frame.
+ size_t current_inlining_depth_;
protected:
Context* const context_;
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 475fe8b..1b1bc54 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -286,6 +286,13 @@
}
}
+static bool IsLargeMethod(const DexFile::CodeItem* const code_item) {
+ uint16_t registers_size = code_item->registers_size_;
+ uint32_t insns_size = code_item->insns_size_in_code_units_;
+
+ return registers_size * insns_size > 4*1024*1024;
+}
+
MethodVerifier::FailureKind MethodVerifier::VerifyMethod(Thread* self, uint32_t method_idx,
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
@@ -329,7 +336,8 @@
uint64_t duration_ns = NanoTime() - start_ns;
if (duration_ns > MsToNs(100)) {
LOG(WARNING) << "Verification of " << PrettyMethod(method_idx, *dex_file)
- << " took " << PrettyDuration(duration_ns);
+ << " took " << PrettyDuration(duration_ns)
+ << (IsLargeMethod(code_item) ? " (large method)" : "");
}
}
return result;
@@ -1211,10 +1219,6 @@
uint16_t registers_size = code_item_->registers_size_;
uint32_t insns_size = code_item_->insns_size_in_code_units_;
- if (registers_size * insns_size > 4*1024*1024) {
- LOG(WARNING) << "warning: method is huge (regs=" << registers_size
- << " insns_size=" << insns_size << ")";
- }
/* Create and initialize table holding register status */
reg_table_.Init(kTrackCompilerInterestPoints,
insn_flags_.get(),
diff --git a/test/462-checker-inlining-across-dex-files/src/Main.java b/test/462-checker-inlining-across-dex-files/src/Main.java
index 3d583b4..218c7ce 100644
--- a/test/462-checker-inlining-across-dex-files/src/Main.java
+++ b/test/462-checker-inlining-across-dex-files/src/Main.java
@@ -120,7 +120,8 @@
// CHECK-START: java.lang.Class Main.inlineMainClass() inliner (after)
// CHECK-DAG: Return [<<Class:l\d+>>]
// CHECK-DAG: <<Class>> LoadClass
- // Note: Verify backwards because there are two LoadClass instructions
+ // Note: There are two LoadClass instructions. We obtain the correct
+ // instruction id by matching the Return's input list first.
public static Class inlineMainClass() {
return OtherDex.returnMainClass();
@@ -148,7 +149,8 @@
// CHECK-START: java.lang.Class Main.inlineOtherDexCallingMain() inliner (after)
// CHECK-DAG: Return [<<Class:l\d+>>]
// CHECK-DAG: <<Class>> LoadClass
- // Note: Verify backwards because there are two LoadClass instructions
+ // Note: There are two LoadClass instructions. We obtain the correct
+ // instruction id by matching the Return's input list first.
public static Class inlineOtherDexCallingMain() {
return OtherDex.returnOtherDexCallingMain();
diff --git a/test/482-checker-loop-back-edge-use/src/Main.java b/test/482-checker-loop-back-edge-use/src/Main.java
index 9e1076a..0ed9267 100644
--- a/test/482-checker-loop-back-edge-use/src/Main.java
+++ b/test/482-checker-loop-back-edge-use/src/Main.java
@@ -18,14 +18,14 @@
public class Main {
// CHECK-START: void Main.loop1(boolean) liveness (after)
- // CHECK: ParameterValue liveness:2 ranges:[2-22] uses:[17,22]
+ // CHECK: ParameterValue liveness:2 ranges:{[2,22)} uses:[17,22]
// CHECK: Goto liveness:20
public static void loop1(boolean incoming) {
while (incoming) {}
}
// CHECK-START: void Main.loop2(boolean) liveness (after)
- // CHECK: ParameterValue liveness:2 ranges:[2-42] uses:[33,38,42]
+ // CHECK: ParameterValue liveness:2 ranges:{[2,42)} uses:[33,38,42]
// CHECK: Goto liveness:36
// CHECK: Goto liveness:40
public static void loop2(boolean incoming) {
@@ -36,7 +36,7 @@
}
// CHECK-START: void Main.loop3(boolean) liveness (after)
- // CHECK: ParameterValue liveness:2 ranges:[2-60] uses:[56,60]
+ // CHECK: ParameterValue liveness:2 ranges:{[2,60)} uses:[56,60]
// CHECK: Goto liveness:58
// CHECK-START: void Main.loop3(boolean) liveness (after)
@@ -50,7 +50,7 @@
}
// CHECK-START: void Main.loop4(boolean) liveness (after)
- // CHECK: ParameterValue liveness:2 ranges:[2-22] uses:[22]
+ // CHECK: ParameterValue liveness:2 ranges:{[2,22)} uses:[22]
// CHECK-START: void Main.loop4(boolean) liveness (after)
// CHECK-NOT: Goto liveness:20
@@ -63,7 +63,7 @@
}
// CHECK-START: void Main.loop5(boolean) liveness (after)
- // CHECK: ParameterValue liveness:2 ranges:[2-50] uses:[33,42,46,50]
+ // CHECK: ParameterValue liveness:2 ranges:{[2,50)} uses:[33,42,46,50]
// CHECK: Goto liveness:44
// CHECK: Goto liveness:48
public static void loop5(boolean incoming) {
@@ -76,7 +76,7 @@
}
// CHECK-START: void Main.loop6(boolean) liveness (after)
- // CHECK ParameterValue liveness:2 ranges:[2-46] uses:[24,46]
+ // CHECK ParameterValue liveness:2 ranges:{[2,46)} uses:[24,46]
// CHECK: Goto liveness:44
// CHECK-START: void Main.loop6(boolean) liveness (after)
@@ -90,7 +90,7 @@
}
// CHECK-START: void Main.loop7(boolean) liveness (after)
- // CHECK: ParameterValue liveness:2 ranges:[2-50] uses:[32,41,46,50]
+ // CHECK: ParameterValue liveness:2 ranges:{[2,50)} uses:[32,41,46,50]
// CHECK: Goto liveness:44
// CHECK: Goto liveness:48
public static void loop7(boolean incoming) {
@@ -102,7 +102,7 @@
}
// CHECK-START: void Main.loop8() liveness (after)
- // CHECK: StaticFieldGet liveness:12 ranges:[12-44] uses:[35,40,44]
+ // CHECK: StaticFieldGet liveness:12 ranges:{[12,44)} uses:[35,40,44]
// CHECK: Goto liveness:38
// CHECK: Goto liveness:42
public static void loop8() {
@@ -114,7 +114,7 @@
}
// CHECK-START: void Main.loop9() liveness (after)
- // CHECK: StaticFieldGet liveness:22 ranges:[22-36] uses:[31,36]
+ // CHECK: StaticFieldGet liveness:22 ranges:{[22,36)} uses:[31,36]
// CHECK: Goto liveness:38
public static void loop9() {
while (Runtime.getRuntime() != null) {
diff --git a/tools/checker/README b/tools/checker/README
index 2763948..858a773 100644
--- a/tools/checker/README
+++ b/tools/checker/README
@@ -47,7 +47,7 @@
// CHECK-START: int MyClass.MyMethod() constant_folding (after)
// CHECK: <<ID:i\d+>> IntConstant {{11|22}}
- // CHECK: Return [ <<ID>> ]
+ // CHECK: Return [<<ID>>]
The engine will attempt to match the check lines against the output of the
group named on the first line. Together they verify that the CFG after