Merge "Remove HNativeDebugInfo from start of basic blocks."
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 05e1356..35ec7d4 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -368,7 +368,6 @@
if (native_debuggable) {
const uint32_t num_instructions = code_item.insns_size_in_code_units_;
native_debug_info_locations = new (arena_) ArenaBitVector (arena_, num_instructions, false);
- native_debug_info_locations->ClearAllBits();
FindNativeDebugInfoLocations(code_item, native_debug_info_locations);
}
@@ -443,23 +442,15 @@
}
};
dex_file_->DecodeDebugPositionInfo(&code_item, Callback::Position, locations);
- // Add native debug info at the start of every basic block.
- for (uint32_t pc = 0; pc < code_item.insns_size_in_code_units_; pc++) {
- if (FindBlockStartingAt(pc) != nullptr) {
- locations->SetBit(pc);
- }
- }
// Instruction-specific tweaks.
const Instruction* const begin = Instruction::At(code_item.insns_);
const Instruction* const end = begin->RelativeAt(code_item.insns_size_in_code_units_);
for (const Instruction* inst = begin; inst < end; inst = inst->Next()) {
switch (inst->Opcode()) {
- case Instruction::MOVE_EXCEPTION:
- case Instruction::MOVE_RESULT:
- case Instruction::MOVE_RESULT_WIDE:
- case Instruction::MOVE_RESULT_OBJECT: {
- // The compiler checks that there are no instructions before those.
- // So generate HNativeDebugInfo after them instead.
+ case Instruction::MOVE_EXCEPTION: {
+ // Stop in native debugger after the exception has been moved.
+ // The compiler also expects the move at the start of basic block so
+ // we do not want to interfere by inserting native-debug-info before it.
locations->ClearBit(inst->GetDexPc(code_item.insns_));
const Instruction* next = inst->Next();
if (next < end) {
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index c2c8ccf..c67efc0 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -226,6 +226,10 @@
// errors where we reference that label.
if (block->IsSingleJump()) continue;
Bind(block);
+ // This ensures that we have correct native line mapping for all native instructions.
+ // It is necessary to make stepping over a statement work. Otherwise, any initial
+ // instructions (e.g. moves) would be assumed to be the start of next statement.
+ MaybeRecordNativeDebugInfo(nullptr /* instruction */, block->GetDexPc());
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
DisassemblyScope disassembly_scope(current, *this);
@@ -733,7 +737,8 @@
uint32_t native_pc = GetAssembler()->CodeSize();
if (instruction == nullptr) {
- // For stack overflow checks.
+ // For stack overflow checks and native-debug-info entries without dex register
+ // mapping (i.e. start of basic block or start of slow path).
stack_map_stream_.BeginStackMapEntry(outer_dex_pc, native_pc, 0, 0, 0, 0);
stack_map_stream_.EndStackMapEntry();
return;
@@ -808,6 +813,16 @@
return count > 0 && stack_map_stream_.GetStackMap(count - 1).native_pc_offset == pc;
}
+void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction, uint32_t dex_pc) {
+ if (GetCompilerOptions().GetNativeDebuggable() && dex_pc != kNoDexPc) {
+ if (HasStackMapAtCurrentPc()) {
+ // Ensure that we do not collide with the stack map of the previous instruction.
+ GenerateNop();
+ }
+ RecordPcInfo(instruction, dex_pc);
+ }
+}
+
void CodeGenerator::RecordCatchBlockInfo() {
ArenaAllocator* arena = graph_->GetArena();
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 49c193e..789bf40 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -267,6 +267,8 @@
void RecordPcInfo(HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path = nullptr);
// Check whether we have already recorded mapping at this PC.
bool HasStackMapAtCurrentPc();
+ // Record extra stack maps if we support native debugging.
+ void MaybeRecordNativeDebugInfo(HInstruction* instruction, uint32_t dex_pc);
bool CanMoveNullCheckToUser(HNullCheck* null_check);
void MaybeRecordImplicitNullCheck(HInstruction* instruction);
@@ -440,6 +442,8 @@
// Copy the result of a call into the given target.
virtual void MoveFromReturnRegister(Location trg, Primitive::Type type) = 0;
+ virtual void GenerateNop() = 0;
+
protected:
// Method patch info used for recording locations of required linker patches and
// target methods. The target method can be used for various purposes, whether for
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 87f52c6..f60c5e9 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -1557,11 +1557,11 @@
}
void InstructionCodeGeneratorARM::VisitNativeDebugInfo(HNativeDebugInfo* info) {
- if (codegen_->HasStackMapAtCurrentPc()) {
- // Ensure that we do not collide with the stack map of the previous instruction.
- __ nop();
- }
- codegen_->RecordPcInfo(info, info->GetDexPc());
+ codegen_->MaybeRecordNativeDebugInfo(info, info->GetDexPc());
+}
+
+void CodeGeneratorARM::GenerateNop() {
+ __ nop();
}
void LocationsBuilderARM::HandleCondition(HCondition* cond) {
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index cfd7a3b..2e4dc1e 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -510,6 +510,8 @@
// artReadBarrierForRootSlow.
void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
+ void GenerateNop();
+
private:
// Factored implementation of GenerateFieldLoadWithBakerReadBarrier
// and GenerateArrayLoadWithBakerReadBarrier.
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 435ae5e..0c2e9cf 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -3057,11 +3057,11 @@
}
void InstructionCodeGeneratorARM64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
- if (codegen_->HasStackMapAtCurrentPc()) {
- // Ensure that we do not collide with the stack map of the previous instruction.
- __ Nop();
- }
- codegen_->RecordPcInfo(info, info->GetDexPc());
+ codegen_->MaybeRecordNativeDebugInfo(info, info->GetDexPc());
+}
+
+void CodeGeneratorARM64::GenerateNop() {
+ __ Nop();
}
void LocationsBuilderARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 360488e..fea87ab 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -532,6 +532,8 @@
// artReadBarrierForRootSlow.
void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root);
+ void GenerateNop();
+
private:
// Factored implementation of GenerateFieldLoadWithBakerReadBarrier
// and GenerateArrayLoadWithBakerReadBarrier.
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index c500ea4..9dd7c51 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -3407,11 +3407,11 @@
}
void InstructionCodeGeneratorMIPS::VisitNativeDebugInfo(HNativeDebugInfo* info) {
- if (codegen_->HasStackMapAtCurrentPc()) {
- // Ensure that we do not collide with the stack map of the previous instruction.
- __ Nop();
- }
- codegen_->RecordPcInfo(info, info->GetDexPc());
+ codegen_->MaybeRecordNativeDebugInfo(info, info->GetDexPc());
+}
+
+void CodeGeneratorMIPS::GenerateNop() {
+ __ Nop();
}
void LocationsBuilderMIPS::HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info) {
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index dd0641c..49c9583 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -360,6 +360,8 @@
UNIMPLEMENTED(FATAL) << "Not implemented on MIPS";
}
+ void GenerateNop();
+
private:
// Labels for each block that will be compiled.
MipsLabel* block_labels_;
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index e3a44f1..2c0ae9b 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -2732,11 +2732,11 @@
}
void InstructionCodeGeneratorMIPS64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
- if (codegen_->HasStackMapAtCurrentPc()) {
- // Ensure that we do not collide with the stack map of the previous instruction.
- __ Nop();
- }
- codegen_->RecordPcInfo(info, info->GetDexPc());
+ codegen_->MaybeRecordNativeDebugInfo(info, info->GetDexPc());
+}
+
+void CodeGeneratorMIPS64::GenerateNop() {
+ __ Nop();
}
void LocationsBuilderMIPS64::HandleFieldGet(HInstruction* instruction,
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index eb7315a..c298097 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -352,6 +352,8 @@
UNIMPLEMENTED(FATAL) << "Not implemented on MIPS64";
}
+ void GenerateNop();
+
private:
// Labels for each block that will be compiled.
Mips64Label* block_labels_; // Indexed by block id.
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index f032f51..236dea1 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1649,11 +1649,11 @@
}
void InstructionCodeGeneratorX86::VisitNativeDebugInfo(HNativeDebugInfo* info) {
- if (codegen_->HasStackMapAtCurrentPc()) {
- // Ensure that we do not collide with the stack map of the previous instruction.
- __ nop();
- }
- codegen_->RecordPcInfo(info, info->GetDexPc());
+ codegen_->MaybeRecordNativeDebugInfo(info, info->GetDexPc());
+}
+
+void CodeGeneratorX86::GenerateNop() {
+ __ nop();
}
void LocationsBuilderX86::VisitLocal(HLocal* local) {
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 63e9b2f..0795f3b 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -540,6 +540,7 @@
}
}
+ void GenerateNop();
private:
// Factored implementation of GenerateFieldLoadWithBakerReadBarrier
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index f3c40b1..8def1de 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1632,11 +1632,11 @@
}
void InstructionCodeGeneratorX86_64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
- if (codegen_->HasStackMapAtCurrentPc()) {
- // Ensure that we do not collide with the stack map of the previous instruction.
- __ nop();
- }
- codegen_->RecordPcInfo(info, info->GetDexPc());
+ codegen_->MaybeRecordNativeDebugInfo(info, info->GetDexPc());
+}
+
+void CodeGeneratorX86_64::GenerateNop() {
+ __ nop();
}
void LocationsBuilderX86_64::VisitLocal(HLocal* local) {
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 97f6f84..b3d27e1 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -513,6 +513,8 @@
}
}
+ void GenerateNop();
+
private:
// Factored implementation of GenerateFieldLoadWithBakerReadBarrier
// and GenerateArrayLoadWithBakerReadBarrier.