Add suspend checks to special methods.
Generate suspend checks at the beginning of special methods.
If we need to call to runtime, go to the slow path where we
create a simplified but valid frame, spill all arguments,
call art_quick_test_suspend, restore necessary arguments and
return back to the fast path. This keeps the fast path
overhead to a minimum.
Bug: 19245639
Change-Id: I3de5aee783943941322a49c4cf2c4c94411dbaa2
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 284e8f6..f964691 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -272,6 +272,41 @@
NewLIR0(kX86Ret);
}
+void X86Mir2Lir::GenSpecialEntryForSuspend() {
+ // Keep 16-byte stack alignment, there's already the return address, so
+ // - for 32-bit push EAX, i.e. ArtMethod*, ESI, EDI,
+ // - for 64-bit push RAX, i.e. ArtMethod*.
+ if (!cu_->target64) {
+ DCHECK(!IsTemp(rs_rSI));
+ DCHECK(!IsTemp(rs_rDI));
+ core_spill_mask_ =
+ (1u << rs_rSI.GetRegNum()) | (1u << rs_rSI.GetRegNum()) | (1u << rs_rRET.GetRegNum());
+ num_core_spills_ = 3u;
+ } else {
+ core_spill_mask_ = (1u << rs_rRET.GetRegNum());
+ num_core_spills_ = 1u;
+ }
+ fp_spill_mask_ = 0u;
+ num_fp_spills_ = 0u;
+ frame_size_ = 16u;
+ core_vmap_table_.clear();
+ fp_vmap_table_.clear();
+ if (!cu_->target64) {
+ NewLIR1(kX86Push32R, rs_rDI.GetReg());
+ NewLIR1(kX86Push32R, rs_rSI.GetReg());
+ }
+ NewLIR1(kX86Push32R, TargetReg(kArg0, kRef).GetReg()); // ArtMethod*
+}
+
+void X86Mir2Lir::GenSpecialExitForSuspend() {
+ // Pop the frame. (ArtMethod* no longer needed but restore it anyway.)
+ NewLIR1(kX86Pop32R, TargetReg(kArg0, kRef).GetReg()); // ArtMethod*
+ if (!cu_->target64) {
+ NewLIR1(kX86Pop32R, rs_rSI.GetReg());
+ NewLIR1(kX86Pop32R, rs_rDI.GetReg());
+ }
+}
+
void X86Mir2Lir::GenImplicitNullCheck(RegStorage reg, int opt_flags) {
if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
return;
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index ca60400..20163b4 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -259,6 +259,8 @@
void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) OVERRIDE;
void GenExitSequence() OVERRIDE;
void GenSpecialExitSequence() OVERRIDE;
+ void GenSpecialEntryForSuspend() OVERRIDE;
+ void GenSpecialExitForSuspend() OVERRIDE;
void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) OVERRIDE;
void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) OVERRIDE;
void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;