MIPS: Improve InstructionCodeGeneratorMIPS*::GenerateSuspendCheck().

Relax the only back-edge restriction. Implement optimization for
MIPS32/MIPS64 which has already been done for the ARM & x86
architectures in
https://android-review.googlesource.com/#/c/platform/art/+/149370/.

Test: Boot & run tests on 32- & 64-bit version of QEMU.
Test: test/testrunner/testrunner.py --target --optimizing
Test: test-art-host-gtest
Test: test-art-target-gtest

Change-Id: Ie0a4c19ee50ad532fe53933d5808f9d7a4f89b8e
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 6cbfa14..73bb642 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -415,6 +415,10 @@
 
   const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS64"; }
 
+  HBasicBlock* GetSuccessor() const {
+    return successor_;
+  }
+
  private:
   // If not null, the block to branch to after the suspend check.
   HBasicBlock* const successor_;
@@ -1831,8 +1835,19 @@
 void InstructionCodeGeneratorMIPS64::GenerateSuspendCheck(HSuspendCheck* instruction,
                                                           HBasicBlock* successor) {
   SuspendCheckSlowPathMIPS64* slow_path =
-    new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathMIPS64(instruction, successor);
-  codegen_->AddSlowPath(slow_path);
+      down_cast<SuspendCheckSlowPathMIPS64*>(instruction->GetSlowPath());
+
+  if (slow_path == nullptr) {
+    slow_path =
+        new (codegen_->GetScopedAllocator()) SuspendCheckSlowPathMIPS64(instruction, successor);
+    instruction->SetSlowPath(slow_path);
+    codegen_->AddSlowPath(slow_path);
+    if (successor != nullptr) {
+      DCHECK(successor->IsLoopHeader());
+    }
+  } else {
+    DCHECK_EQ(slow_path->GetSuccessor(), successor);
+  }
 
   __ LoadFromOffset(kLoadUnsignedHalfword,
                     TMP,