Write .debug_line section using the new DWARF library.
Also simplify dex to java mapping and handle mapping
in prologues and epilogues.
Change-Id: I410f06024580f2a8788f2c93fe9bca132805029a
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index 3e69878..c5ac4c1 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -1083,7 +1083,9 @@
#define PADDING_MOV_R5_R5 0x1C2D
uint8_t* ArmMir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
+ uint8_t* const write_buffer = write_pos;
for (; lir != NULL; lir = NEXT_LIR(lir)) {
+ lir->offset = (write_pos - write_buffer);
if (!lir->flags.is_nop) {
int opcode = lir->opcode;
if (IsPseudoLirOp(opcode)) {
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 3081c9e..e6158c3 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -372,7 +372,6 @@
* a leaf *and* our frame size < fudge factor.
*/
bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, kArm);
- NewLIR0(kPseudoMethodEntry);
const size_t kStackOverflowReservedUsableBytes = GetStackOverflowReservedBytes(kArm);
bool large_frame = (static_cast<size_t>(frame_size_) > kStackOverflowReservedUsableBytes);
bool generate_explicit_stack_overflow_check = large_frame ||
@@ -507,7 +506,6 @@
LockTemp(rs_r0);
LockTemp(rs_r1);
- NewLIR0(kPseudoMethodExit);
OpRegImm(kOpAdd, rs_rARM_SP, frame_size_ - (spill_count * 4));
/* Need to restore any FP callee saves? */
if (num_fp_spills_) {
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index a59deb5..2f1ae66 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -686,7 +686,9 @@
#define PADDING_NOP (UINT32_C(0xd503201f))
uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
+ uint8_t* const write_buffer = write_pos;
for (; lir != nullptr; lir = NEXT_LIR(lir)) {
+ lir->offset = (write_pos - write_buffer);
bool opcode_is_wide = IS_WIDE(lir->opcode);
A64Opcode opcode = UNWIDE(lir->opcode);
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index 3316945..6b47bba 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -312,8 +312,6 @@
bool skip_overflow_check = mir_graph_->MethodIsLeaf() &&
!FrameNeedsStackCheck(frame_size_, kArm64);
- NewLIR0(kPseudoMethodEntry);
-
const size_t kStackOverflowReservedUsableBytes = GetStackOverflowReservedBytes(kArm64);
const bool large_frame = static_cast<size_t>(frame_size_) > kStackOverflowReservedUsableBytes;
bool generate_explicit_stack_overflow_check = large_frame ||
@@ -401,9 +399,6 @@
*/
LockTemp(rs_x0);
LockTemp(rs_x1);
-
- NewLIR0(kPseudoMethodExit);
-
UnspillRegs(rs_sp, core_spill_mask_, fp_spill_mask_, frame_size_);
// Finally return.
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 509d448..483a5d0 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -203,12 +203,17 @@
/* Handle pseudo-ops individually, and all regular insns as a group */
switch (lir->opcode) {
- case kPseudoMethodEntry:
- LOG(INFO) << "-------- method entry "
- << PrettyMethod(cu_->method_idx, *cu_->dex_file);
+ case kPseudoPrologueBegin:
+ LOG(INFO) << "-------- PrologueBegin";
break;
- case kPseudoMethodExit:
- LOG(INFO) << "-------- Method_Exit";
+ case kPseudoPrologueEnd:
+ LOG(INFO) << "-------- PrologueEnd";
+ break;
+ case kPseudoEpilogueBegin:
+ LOG(INFO) << "-------- EpilogueBegin";
+ break;
+ case kPseudoEpilogueEnd:
+ LOG(INFO) << "-------- EpilogueEnd";
break;
case kPseudoBarrier:
LOG(INFO) << "-------- BARRIER";
@@ -267,8 +272,9 @@
lir, base_addr));
std::string op_operands(BuildInsnString(GetTargetInstFmt(lir->opcode),
lir, base_addr));
- LOG(INFO) << StringPrintf("%5p: %-9s%s%s",
+ LOG(INFO) << StringPrintf("%5p|0x%02x: %-9s%s%s",
base_addr + offset,
+ lir->dalvik_offset,
op_name.c_str(), op_operands.c_str(),
lir->flags.is_nop ? "(nop)" : "");
}
@@ -713,14 +719,17 @@
DCHECK_EQ(static_cast<size_t>(write_pos - &encoded_mapping_table_[0]), hdr_data_size);
uint8_t* write_pos2 = write_pos + pc2dex_data_size;
+ bool is_in_prologue_or_epilogue = false;
pc2dex_offset = 0u;
pc2dex_dalvik_offset = 0u;
dex2pc_offset = 0u;
dex2pc_dalvik_offset = 0u;
for (LIR* tgt_lir = first_lir_insn_; tgt_lir != nullptr; tgt_lir = NEXT_LIR(tgt_lir)) {
- if (generate_src_map && !tgt_lir->flags.is_nop) {
- src_mapping_table_.push_back(SrcMapElem({tgt_lir->offset,
- static_cast<int32_t>(tgt_lir->dalvik_offset)}));
+ if (generate_src_map && !tgt_lir->flags.is_nop && tgt_lir->opcode >= 0) {
+ if (!is_in_prologue_or_epilogue) {
+ src_mapping_table_.push_back(SrcMapElem({tgt_lir->offset,
+ static_cast<int32_t>(tgt_lir->dalvik_offset)}));
+ }
}
if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
DCHECK(pc2dex_offset <= tgt_lir->offset);
@@ -738,6 +747,12 @@
dex2pc_offset = tgt_lir->offset;
dex2pc_dalvik_offset = tgt_lir->dalvik_offset;
}
+ if (tgt_lir->opcode == kPseudoPrologueBegin || tgt_lir->opcode == kPseudoEpilogueBegin) {
+ is_in_prologue_or_epilogue = true;
+ }
+ if (tgt_lir->opcode == kPseudoPrologueEnd || tgt_lir->opcode == kPseudoEpilogueEnd) {
+ is_in_prologue_or_epilogue = false;
+ }
}
DCHECK_EQ(static_cast<size_t>(write_pos - &encoded_mapping_table_[0]),
hdr_data_size + pc2dex_data_size);
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index de66b35..c932df6 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -275,7 +275,6 @@
*/
skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, target);
- NewLIR0(kPseudoMethodEntry);
RegStorage check_reg = AllocPtrSizeTemp();
RegStorage new_sp = AllocPtrSizeTemp();
const RegStorage rs_sp = TargetPtrReg(kSp);
@@ -345,7 +344,6 @@
LockTemp(TargetPtrReg(kRet0));
LockTemp(TargetPtrReg(kRet1));
- NewLIR0(kPseudoMethodExit);
UnSpillCoreRegs();
OpReg(kOpBx, TargetPtrReg(kLr));
}
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 0b480a0..ed8e21e 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -1250,10 +1250,14 @@
if (bb->block_type == kEntryBlock) {
ResetRegPool();
int start_vreg = mir_graph_->GetFirstInVR();
+ AppendLIR(NewLIR0(kPseudoPrologueBegin));
GenEntrySequence(&mir_graph_->reg_location_[start_vreg], mir_graph_->GetMethodLoc());
+ AppendLIR(NewLIR0(kPseudoPrologueEnd));
} else if (bb->block_type == kExitBlock) {
ResetRegPool();
+ AppendLIR(NewLIR0(kPseudoEpilogueBegin));
GenExitSequence();
+ AppendLIR(NewLIR0(kPseudoEpilogueEnd));
}
for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index e81228a..89f6c6e 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -186,7 +186,6 @@
stack_decrement_ = OpRegImm(kOpSub, rs_rSP, frame_size_ -
GetInstructionSetPointerSize(cu_->instruction_set));
- NewLIR0(kPseudoMethodEntry);
/* Spill core callee saves */
SpillCoreRegs();
SpillFPRegs();
@@ -259,7 +258,6 @@
LockTemp(rs_rX86_RET0);
LockTemp(rs_rX86_RET1);
- NewLIR0(kPseudoMethodExit);
UnSpillCoreRegs();
UnSpillFPRegs();
/* Remove frame except for return address */