Merge "Default to 64-bit for tests of methods with varying ISAs for valgrind."
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 24d96ba..5966951 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -154,6 +154,7 @@
   runtime/jni_internal_test.cc \
   runtime/proxy_test.cc \
   runtime/reflection_test.cc \
+  compiler/dex/gvn_dead_code_elimination_test.cc \
   compiler/dex/global_value_numbering_test.cc \
   compiler/dex/local_value_numbering_test.cc \
   compiler/dex/mir_graph_test.cc \
diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h
index 45f9b56..180baec 100644
--- a/cmdline/cmdline_types.h
+++ b/cmdline/cmdline_types.h
@@ -88,7 +88,6 @@
     Split(options, ',', &pairs);
 
     JDWP::JdwpOptions jdwp_options;
-    std::stringstream error_stream;
 
     for (const std::string& jdwp_option : pairs) {
       std::string::size_type equals_pos = jdwp_option.find('=');
@@ -97,11 +96,12 @@
             "Can't parse JDWP option '" + jdwp_option + "' in '" + options + "'");
       }
 
-      if (!ParseJdwpOption(jdwp_option.substr(0, equals_pos),
-                           jdwp_option.substr(equals_pos + 1),
-                           error_stream,
-                           &jdwp_options)) {
-        return Result::Failure(error_stream.str());
+      Result parse_attempt = ParseJdwpOption(jdwp_option.substr(0, equals_pos),
+                                             jdwp_option.substr(equals_pos + 1),
+                                             &jdwp_options);
+      if (parse_attempt.IsError()) {
+        // We fail to parse this JDWP option.
+        return parse_attempt;
       }
     }
 
@@ -115,17 +115,15 @@
     return Result::Success(std::move(jdwp_options));
   }
 
-  bool ParseJdwpOption(const std::string& name, const std::string& value,
-                       std::ostream& error_stream,
-                       JDWP::JdwpOptions* jdwp_options) {
+  Result ParseJdwpOption(const std::string& name, const std::string& value,
+                         JDWP::JdwpOptions* jdwp_options) {
     if (name == "transport") {
       if (value == "dt_socket") {
         jdwp_options->transport = JDWP::kJdwpTransportSocket;
       } else if (value == "dt_android_adb") {
         jdwp_options->transport = JDWP::kJdwpTransportAndroidAdb;
       } else {
-        error_stream << "JDWP transport not supported: " << value;
-        return false;
+        return Result::Failure("JDWP transport not supported: " + value);
       }
     } else if (name == "server") {
       if (value == "n") {
@@ -133,8 +131,7 @@
       } else if (value == "y") {
         jdwp_options->server = true;
       } else {
-        error_stream << "JDWP option 'server' must be 'y' or 'n'";
-        return false;
+        return Result::Failure("JDWP option 'server' must be 'y' or 'n'");
       }
     } else if (name == "suspend") {
       if (value == "n") {
@@ -142,8 +139,7 @@
       } else if (value == "y") {
         jdwp_options->suspend = true;
       } else {
-        error_stream << "JDWP option 'suspend' must be 'y' or 'n'";
-        return false;
+        return Result::Failure("JDWP option 'suspend' must be 'y' or 'n'");
       }
     } else if (name == "address") {
       /* this is either <port> or <host>:<port> */
@@ -157,14 +153,12 @@
         port_string = value;
       }
       if (port_string.empty()) {
-        error_stream << "JDWP address missing port: " << value;
-        return false;
+        return Result::Failure("JDWP address missing port: " + value);
       }
       char* end;
       uint64_t port = strtoul(port_string.c_str(), &end, 10);
       if (*end != '\0' || port > 0xffff) {
-        error_stream << "JDWP address has junk in port field: " << value;
-        return false;
+        return Result::Failure("JDWP address has junk in port field: " + value);
       }
       jdwp_options->port = port;
     } else if (name == "launch" || name == "onthrow" || name == "oncaught" || name == "timeout") {
@@ -174,7 +168,7 @@
       LOG(INFO) << "Ignoring unrecognized JDWP option '" << name << "'='" << value << "'";
     }
 
-    return true;
+    return Result::SuccessNoValue();
   }
 
   static const char* Name() { return "JdwpOptions"; }
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 61379fb..69b4295 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -21,6 +21,7 @@
 LIBART_COMPILER_SRC_FILES := \
 	compiled_method.cc \
 	dex/global_value_numbering.cc \
+	dex/gvn_dead_code_elimination.cc \
 	dex/local_value_numbering.cc \
 	dex/quick/arm/assemble_arm.cc \
 	dex/quick/arm/call_arm.cc \
diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h
index 7685200..93d83c6 100644
--- a/compiler/dex/bb_optimizations.h
+++ b/compiler/dex/bb_optimizations.h
@@ -240,6 +240,41 @@
 };
 
 /**
+ * @class DeadCodeEliminationPass
+ * @brief Performs the GVN-based dead code elimination pass.
+ */
+class DeadCodeEliminationPass : public PassME {
+ public:
+  DeadCodeEliminationPass() : PassME("DCE", kPreOrderDFSTraversal, "4_post_dce_cfg") {
+  }
+
+  bool Gate(const PassDataHolder* data) const OVERRIDE {
+    DCHECK(data != nullptr);
+    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    DCHECK(c_unit != nullptr);
+    return c_unit->mir_graph->EliminateDeadCodeGate();
+  }
+
+  bool Worker(PassDataHolder* data) const {
+    DCHECK(data != nullptr);
+    PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
+    CompilationUnit* c_unit = pass_me_data_holder->c_unit;
+    DCHECK(c_unit != nullptr);
+    BasicBlock* bb = pass_me_data_holder->bb;
+    DCHECK(bb != nullptr);
+    return c_unit->mir_graph->EliminateDeadCode(bb);
+  }
+
+  void End(PassDataHolder* data) const OVERRIDE {
+    DCHECK(data != nullptr);
+    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
+    DCHECK(c_unit != nullptr);
+    c_unit->mir_graph->EliminateDeadCodeEnd();
+    down_cast<PassMEDataHolder*>(data)->dirty = !c_unit->mir_graph->MirSsaRepUpToDate();
+  }
+};
+
+/**
  * @class BBCombine
  * @brief Perform the basic block combination pass.
  */
diff --git a/compiler/dex/dex_flags.h b/compiler/dex/dex_flags.h
index eaf272b..e8eb40c 100644
--- a/compiler/dex/dex_flags.h
+++ b/compiler/dex/dex_flags.h
@@ -27,6 +27,7 @@
   kNullCheckElimination,
   kClassInitCheckElimination,
   kGlobalValueNumbering,
+  kGvnDeadCodeElimination,
   kLocalValueNumbering,
   kPromoteRegs,
   kTrackLiveTemps,
diff --git a/compiler/dex/global_value_numbering.cc b/compiler/dex/global_value_numbering.cc
index a8fd812..ab3c946 100644
--- a/compiler/dex/global_value_numbering.cc
+++ b/compiler/dex/global_value_numbering.cc
@@ -28,7 +28,7 @@
       allocator_(allocator),
       bbs_processed_(0u),
       max_bbs_to_process_(kMaxBbsToProcessMultiplyFactor * mir_graph_->GetNumReachableBlocks()),
-      last_value_(0u),
+      last_value_(kNullValue),
       modifications_allowed_(true),
       mode_(mode),
       global_value_map_(std::less<uint64_t>(), allocator->Adapter()),
@@ -128,7 +128,11 @@
   merge_lvns_.clear();
 
   bool change = (lvns_[bb->id] == nullptr) || !lvns_[bb->id]->Equals(*work_lvn_);
-  if (change) {
+  if (mode_ == kModeGvn) {
+    // In GVN mode, keep the latest LVN even if Equals() indicates no change. This is
+    // to keep the correct values of fields that do not contribute to Equals() as long
+    // as they depend only on predecessor LVNs' fields that do contribute to Equals().
+    // Currently, that's LVN::merge_map_ used by LVN::GetStartingVregValueNumberImpl().
     std::unique_ptr<const LocalValueNumbering> old_lvn(lvns_[bb->id]);
     lvns_[bb->id] = work_lvn_.release();
   } else {
@@ -178,7 +182,7 @@
       }
       // IF_EQZ/IF_NEZ checks some sreg, see if that sreg contains the value_name.
       int s_reg = pred_bb->last_mir_insn->ssa_rep->uses[0];
-      if (!pred_lvn->IsSregValue(s_reg, value_name)) {
+      if (pred_lvn->GetSregValue(s_reg) != value_name) {
         return false;
       }
     }
diff --git a/compiler/dex/global_value_numbering.h b/compiler/dex/global_value_numbering.h
index 023dbdb..c7bca85 100644
--- a/compiler/dex/global_value_numbering.h
+++ b/compiler/dex/global_value_numbering.h
@@ -31,6 +31,9 @@
 
 class GlobalValueNumbering : public DeletableArenaObject<kArenaAllocMisc> {
  public:
+  static constexpr uint16_t kNoValue = 0xffffu;
+  static constexpr uint16_t kNullValue = 1u;
+
   enum Mode {
     kModeGvn,
     kModeGvnPostProcessing,
@@ -51,6 +54,14 @@
   GlobalValueNumbering(CompilationUnit* cu, ScopedArenaAllocator* allocator, Mode mode);
   ~GlobalValueNumbering();
 
+  CompilationUnit* GetCompilationUnit() const {
+    return cu_;
+  }
+
+  MIRGraph* GetMirGraph() const {
+    return mir_graph_;
+  }
+
   // Prepare LVN for the basic block.
   LocalValueNumbering* PrepareBasicBlock(BasicBlock* bb,
                                          ScopedArenaAllocator* allocator = nullptr);
@@ -70,9 +81,10 @@
     return modifications_allowed_ && Good();
   }
 
- private:
-  static constexpr uint16_t kNoValue = 0xffffu;
+  // Retrieve the LVN with GVN results for a given BasicBlock.
+  const LocalValueNumbering* GetLvn(BasicBlockId bb_id) const;
 
+ private:
   // Allocate a new value name.
   uint16_t NewValueName();
 
@@ -88,7 +100,7 @@
   uint16_t LookupValue(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier) {
     uint16_t res;
     uint64_t key = BuildKey(op, operand1, operand2, modifier);
-    ValueMap::iterator lb = global_value_map_.lower_bound(key);
+    auto lb = global_value_map_.lower_bound(key);
     if (lb != global_value_map_.end() && lb->first == key) {
       res = lb->second;
     } else {
@@ -99,10 +111,10 @@
   }
 
   // Look up a value in the global value map, don't add a new entry if there was none before.
-  uint16_t FindValue(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier) {
+  uint16_t FindValue(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier) const {
     uint16_t res;
     uint64_t key = BuildKey(op, operand1, operand2, modifier);
-    ValueMap::iterator lb = global_value_map_.lower_bound(key);
+    auto lb = global_value_map_.lower_bound(key);
     if (lb != global_value_map_.end() && lb->first == key) {
       res = lb->second;
     } else {
@@ -111,18 +123,6 @@
     return res;
   }
 
-  // Check if the exact value is stored in the global value map.
-  bool HasValue(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier,
-                uint16_t value) const {
-    DCHECK(value != 0u || !Good());
-    DCHECK_LE(value, last_value_);
-    // This is equivalent to value == LookupValue(op, operand1, operand2, modifier)
-    // except that it doesn't add an entry to the global value map if it's not there.
-    uint64_t key = BuildKey(op, operand1, operand2, modifier);
-    ValueMap::const_iterator it = global_value_map_.find(key);
-    return (it != global_value_map_.end() && it->second == value);
-  }
-
   // Get an instance field id.
   uint16_t GetIFieldId(MIR* mir) {
     return GetMirGraph()->GetGvnIFieldId(mir);
@@ -200,14 +200,6 @@
 
   bool DivZeroCheckedInAllPredecessors(const ScopedArenaVector<uint16_t>& merge_names) const;
 
-  CompilationUnit* GetCompilationUnit() const {
-    return cu_;
-  }
-
-  MIRGraph* GetMirGraph() const {
-    return mir_graph_;
-  }
-
   ScopedArenaAllocator* Allocator() const {
     return allocator_;
   }
@@ -255,6 +247,13 @@
 };
 std::ostream& operator<<(std::ostream& os, const GlobalValueNumbering::Mode& rhs);
 
+inline const LocalValueNumbering* GlobalValueNumbering::GetLvn(BasicBlockId bb_id) const {
+  DCHECK_EQ(mode_, kModeGvnPostProcessing);
+  DCHECK_LT(bb_id, lvns_.size());
+  DCHECK(lvns_[bb_id] != nullptr);
+  return lvns_[bb_id];
+}
+
 inline void GlobalValueNumbering::StartPostProcessing() {
   DCHECK(Good());
   DCHECK_EQ(mode_, kModeGvn);
diff --git a/compiler/dex/global_value_numbering_test.cc b/compiler/dex/global_value_numbering_test.cc
index cfa6388..54e34ea 100644
--- a/compiler/dex/global_value_numbering_test.cc
+++ b/compiler/dex/global_value_numbering_test.cc
@@ -134,8 +134,8 @@
     { bb, opcode, 0u, 0u, 2, { src, src + 1 }, 2, { reg, reg + 1 } }
 #define DEF_PHI2(bb, reg, src1, src2) \
     { bb, static_cast<Instruction::Code>(kMirOpPhi), 0, 0u, 2u, { src1, src2 }, 1, { reg } }
-#define DEF_DIV_REM(bb, opcode, result, dividend, divisor) \
-    { bb, opcode, 0u, 0u, 2, { dividend, divisor }, 1, { result } }
+#define DEF_BINOP(bb, opcode, result, src1, src2) \
+    { bb, opcode, 0u, 0u, 2, { src1, src2 }, 1, { result } }
 
   void DoPrepareIFields(const IFieldDef* defs, size_t count) {
     cu_.mir_graph->ifield_lowering_infos_.clear();
@@ -267,7 +267,6 @@
       mir->offset = i;  // LVN uses offset only for debug output
       mir->optimization_flags = 0u;
     }
-    mirs_[count - 1u].next = nullptr;
     DexFile::CodeItem* code_item = static_cast<DexFile::CodeItem*>(
         cu_.arena.Alloc(sizeof(DexFile::CodeItem), kArenaAllocMisc));
     code_item->insns_size_in_code_units_ = 2u * count;
@@ -279,6 +278,20 @@
     DoPrepareMIRs(defs, count);
   }
 
+  void DoPrepareVregToSsaMapExit(BasicBlockId bb_id, const int32_t* map, size_t count) {
+    BasicBlock* bb = cu_.mir_graph->GetBasicBlock(bb_id);
+    ASSERT_TRUE(bb != nullptr);
+    ASSERT_TRUE(bb->data_flow_info != nullptr);
+    bb->data_flow_info->vreg_to_ssa_map_exit =
+        cu_.arena.AllocArray<int32_t>(count, kArenaAllocDFInfo);
+    std::copy_n(map, count, bb->data_flow_info->vreg_to_ssa_map_exit);
+  }
+
+  template <size_t count>
+  void PrepareVregToSsaMapExit(BasicBlockId bb_id, const int32_t (&map)[count]) {
+    DoPrepareVregToSsaMapExit(bb_id, map, count);
+  }
+
   void PerformGVN() {
     DoPerformGVN<LoopRepeatingTopologicalSortIterator>();
   }
@@ -294,9 +307,9 @@
     cu_.mir_graph->ComputeDominators();
     cu_.mir_graph->ComputeTopologicalSortOrder();
     cu_.mir_graph->SSATransformationEnd();
-    cu_.mir_graph->temp_.gvn.ifield_ids_ =  GlobalValueNumbering::PrepareGvnFieldIds(
+    cu_.mir_graph->temp_.gvn.ifield_ids =  GlobalValueNumbering::PrepareGvnFieldIds(
         allocator_.get(), cu_.mir_graph->ifield_lowering_infos_);
-    cu_.mir_graph->temp_.gvn.sfield_ids_ =  GlobalValueNumbering::PrepareGvnFieldIds(
+    cu_.mir_graph->temp_.gvn.sfield_ids =  GlobalValueNumbering::PrepareGvnFieldIds(
         allocator_.get(), cu_.mir_graph->sfield_lowering_infos_);
     ASSERT_TRUE(gvn_ == nullptr);
     gvn_.reset(new (allocator_.get()) GlobalValueNumbering(&cu_, allocator_.get(),
@@ -348,6 +361,10 @@
     cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
     cu_.access_flags = kAccStatic;  // Don't let "this" interfere with this test.
     allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
+    // By default, the zero-initialized reg_location_[.] with ref == false tells LVN that
+    // 0 constants are integral, not references. Nothing else is used by LVN/GVN.
+    cu_.mir_graph->reg_location_ =
+        cu_.arena.AllocArray<RegLocation>(kMaxSsaRegs, kArenaAllocRegAlloc);
     // Bind all possible sregs to live vregs for test purposes.
     live_in_v_->SetInitialBits(kMaxSsaRegs);
     cu_.mir_graph->ssa_base_vregs_.reserve(kMaxSsaRegs);
@@ -1570,6 +1587,40 @@
   EXPECT_NE(value_names_[4], value_names_[3]);
 }
 
+TEST_F(GlobalValueNumberingTestLoop, IFieldLoopVariable) {
+  static const IFieldDef ifields[] = {
+      { 0u, 1u, 0u, false, kDexMemAccessWord },
+  };
+  static const MIRDef mirs[] = {
+      DEF_CONST(3, Instruction::CONST, 0u, 0),
+      DEF_IPUT(3, Instruction::IPUT, 0u, 100u, 0u),
+      DEF_IGET(4, Instruction::IGET, 2u, 100u, 0u),
+      DEF_BINOP(4, Instruction::ADD_INT, 3u, 2u, 101u),
+      DEF_IPUT(4, Instruction::IPUT, 3u, 100u, 0u),
+  };
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformGVN();
+  ASSERT_EQ(arraysize(mirs), value_names_.size());
+  EXPECT_NE(value_names_[2], value_names_[0]);
+  EXPECT_NE(value_names_[3], value_names_[0]);
+  EXPECT_NE(value_names_[3], value_names_[2]);
+
+
+  // Set up vreg_to_ssa_map_exit for prologue and loop and set post-processing mode
+  // as needed for GetStartingVregValueNumber().
+  const int32_t prologue_vreg_to_ssa_map_exit[] = { 0 };
+  const int32_t loop_vreg_to_ssa_map_exit[] = { 3 };
+  PrepareVregToSsaMapExit(3, prologue_vreg_to_ssa_map_exit);
+  PrepareVregToSsaMapExit(4, loop_vreg_to_ssa_map_exit);
+  gvn_->StartPostProcessing();
+
+  // Check that vreg 0 has the same value number as the result of IGET 2u.
+  const LocalValueNumbering* loop = gvn_->GetLvn(4);
+  EXPECT_EQ(value_names_[2], loop->GetStartingVregValueNumber(0));
+}
+
 TEST_F(GlobalValueNumberingTestCatch, IFields) {
   static const IFieldDef ifields[] = {
       { 0u, 1u, 0u, false, kDexMemAccessWord },
@@ -2225,18 +2276,18 @@
 
 TEST_F(GlobalValueNumberingTestDiamond, DivZeroCheckDiamond) {
   static const MIRDef mirs[] = {
-      DEF_DIV_REM(3u, Instruction::DIV_INT, 1u, 20u, 21u),
-      DEF_DIV_REM(3u, Instruction::DIV_INT, 2u, 24u, 21u),
-      DEF_DIV_REM(3u, Instruction::DIV_INT, 3u, 20u, 23u),
-      DEF_DIV_REM(4u, Instruction::DIV_INT, 4u, 24u, 22u),
-      DEF_DIV_REM(4u, Instruction::DIV_INT, 9u, 24u, 25u),
-      DEF_DIV_REM(5u, Instruction::DIV_INT, 5u, 24u, 21u),
-      DEF_DIV_REM(5u, Instruction::DIV_INT, 10u, 24u, 26u),
+      DEF_BINOP(3u, Instruction::DIV_INT, 1u, 20u, 21u),
+      DEF_BINOP(3u, Instruction::DIV_INT, 2u, 24u, 21u),
+      DEF_BINOP(3u, Instruction::DIV_INT, 3u, 20u, 23u),
+      DEF_BINOP(4u, Instruction::DIV_INT, 4u, 24u, 22u),
+      DEF_BINOP(4u, Instruction::DIV_INT, 9u, 24u, 25u),
+      DEF_BINOP(5u, Instruction::DIV_INT, 5u, 24u, 21u),
+      DEF_BINOP(5u, Instruction::DIV_INT, 10u, 24u, 26u),
       DEF_PHI2(6u, 27u, 25u, 26u),
-      DEF_DIV_REM(6u, Instruction::DIV_INT, 12u, 20u, 27u),
-      DEF_DIV_REM(6u, Instruction::DIV_INT, 6u, 24u, 21u),
-      DEF_DIV_REM(6u, Instruction::DIV_INT, 7u, 20u, 23u),
-      DEF_DIV_REM(6u, Instruction::DIV_INT, 8u, 20u, 22u),
+      DEF_BINOP(6u, Instruction::DIV_INT, 12u, 20u, 27u),
+      DEF_BINOP(6u, Instruction::DIV_INT, 6u, 24u, 21u),
+      DEF_BINOP(6u, Instruction::DIV_INT, 7u, 20u, 23u),
+      DEF_BINOP(6u, Instruction::DIV_INT, 8u, 20u, 22u),
   };
 
   static const bool expected_ignore_div_zero_check[] = {
diff --git a/compiler/dex/gvn_dead_code_elimination.cc b/compiler/dex/gvn_dead_code_elimination.cc
new file mode 100644
index 0000000..2e7f032
--- /dev/null
+++ b/compiler/dex/gvn_dead_code_elimination.cc
@@ -0,0 +1,1391 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <sstream>
+
+#include "gvn_dead_code_elimination.h"
+
+#include "base/bit_vector-inl.h"
+#include "base/macros.h"
+#include "compiler_enums.h"
+#include "dataflow_iterator-inl.h"
+#include "dex_instruction.h"
+#include "dex/mir_graph.h"
+#include "local_value_numbering.h"
+#include "utils/arena_bit_vector.h"
+
+namespace art {
+
+constexpr uint16_t GvnDeadCodeElimination::kNoValue;
+constexpr uint16_t GvnDeadCodeElimination::kNPos;
+
+inline uint16_t GvnDeadCodeElimination::MIRData::PrevChange(int v_reg) const {
+  DCHECK(has_def);
+  DCHECK(v_reg == vreg_def || v_reg == vreg_def + 1);
+  return (v_reg == vreg_def) ? prev_value.change : prev_value_high.change;
+}
+
+inline void GvnDeadCodeElimination::MIRData::SetPrevChange(int v_reg, uint16_t change) {
+  DCHECK(has_def);
+  DCHECK(v_reg == vreg_def || v_reg == vreg_def + 1);
+  if (v_reg == vreg_def) {
+    prev_value.change = change;
+  } else {
+    prev_value_high.change = change;
+  }
+}
+
+inline void GvnDeadCodeElimination::MIRData::RemovePrevChange(int v_reg, MIRData* prev_data) {
+  DCHECK_NE(PrevChange(v_reg), kNPos);
+  DCHECK(v_reg == prev_data->vreg_def || v_reg == prev_data->vreg_def + 1);
+  if (vreg_def == v_reg) {
+    if (prev_data->vreg_def == v_reg) {
+      prev_value = prev_data->prev_value;
+      low_def_over_high_word = prev_data->low_def_over_high_word;
+    } else {
+      prev_value = prev_data->prev_value_high;
+      low_def_over_high_word =
+          prev_data->prev_value_high.value != kNPos && !prev_data->high_def_over_low_word;
+    }
+  } else {
+    if (prev_data->vreg_def == v_reg) {
+      prev_value_high = prev_data->prev_value;
+      high_def_over_low_word =
+          prev_data->prev_value.value != kNPos && !prev_data->low_def_over_high_word;
+    } else {
+      prev_value_high = prev_data->prev_value_high;
+      high_def_over_low_word = prev_data->high_def_over_low_word;
+    }
+  }
+}
+
+GvnDeadCodeElimination::VRegChains::VRegChains(uint32_t num_vregs, ScopedArenaAllocator* alloc)
+    : num_vregs_(num_vregs),
+      vreg_data_(alloc->AllocArray<VRegValue>(num_vregs, kArenaAllocMisc)),
+      mir_data_(alloc->Adapter()) {
+  mir_data_.reserve(100);
+}
+
+inline void GvnDeadCodeElimination::VRegChains::Reset() {
+  DCHECK(mir_data_.empty());
+  std::fill_n(vreg_data_, num_vregs_, VRegValue());
+}
+
+void GvnDeadCodeElimination::VRegChains::AddMIRWithDef(MIR* mir, int v_reg, bool wide,
+                                                       uint16_t new_value) {
+  uint16_t pos = mir_data_.size();
+  mir_data_.emplace_back(mir);
+  MIRData* data = &mir_data_.back();
+  data->has_def = true;
+  data->wide_def = wide;
+  data->vreg_def = v_reg;
+
+  if (vreg_data_[v_reg].change != kNPos &&
+      mir_data_[vreg_data_[v_reg].change].vreg_def + 1 == v_reg) {
+    data->low_def_over_high_word = true;
+  }
+  data->prev_value = vreg_data_[v_reg];
+  DCHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
+  vreg_data_[v_reg].value = new_value;
+  vreg_data_[v_reg].change = pos;
+
+  if (wide) {
+    if (vreg_data_[v_reg + 1].change != kNPos &&
+        mir_data_[vreg_data_[v_reg + 1].change].vreg_def == v_reg + 1) {
+      data->high_def_over_low_word = true;
+    }
+    data->prev_value_high = vreg_data_[v_reg + 1];
+    DCHECK_LT(static_cast<size_t>(v_reg + 1), num_vregs_);
+    vreg_data_[v_reg + 1].value = new_value;
+    vreg_data_[v_reg + 1].change = pos;
+  }
+}
+
+inline void GvnDeadCodeElimination::VRegChains::AddMIRWithoutDef(MIR* mir) {
+  mir_data_.emplace_back(mir);
+}
+
+void GvnDeadCodeElimination::VRegChains::RemoveLastMIRData() {
+  MIRData* data = LastMIRData();
+  if (data->has_def) {
+    DCHECK_EQ(vreg_data_[data->vreg_def].change, NumMIRs() - 1u);
+    vreg_data_[data->vreg_def] = data->prev_value;
+    if (data->wide_def) {
+      DCHECK_EQ(vreg_data_[data->vreg_def + 1].change, NumMIRs() - 1u);
+      vreg_data_[data->vreg_def + 1] = data->prev_value_high;
+    }
+  }
+  mir_data_.pop_back();
+}
+
+void GvnDeadCodeElimination::VRegChains::RemoveTrailingNops() {
+  // There's at least one NOP to drop. There may be more.
+  MIRData* last_data = LastMIRData();
+  DCHECK(!last_data->must_keep && !last_data->has_def);
+  do {
+    DCHECK_EQ(static_cast<int>(last_data->mir->dalvikInsn.opcode), static_cast<int>(kMirOpNop));
+    mir_data_.pop_back();
+    if (mir_data_.empty()) {
+      break;
+    }
+    last_data = LastMIRData();
+  } while (!last_data->must_keep && !last_data->has_def);
+}
+
+inline size_t GvnDeadCodeElimination::VRegChains::NumMIRs() const {
+  return mir_data_.size();
+}
+
+inline GvnDeadCodeElimination::MIRData* GvnDeadCodeElimination::VRegChains::GetMIRData(size_t pos) {
+  DCHECK_LT(pos, mir_data_.size());
+  return &mir_data_[pos];
+}
+
+inline GvnDeadCodeElimination::MIRData* GvnDeadCodeElimination::VRegChains::LastMIRData() {
+  DCHECK(!mir_data_.empty());
+  return &mir_data_.back();
+}
+
+uint32_t GvnDeadCodeElimination::VRegChains::NumVRegs() const {
+  return num_vregs_;
+}
+
+void GvnDeadCodeElimination::VRegChains::InsertInitialValueHigh(int v_reg, uint16_t value) {
+  DCHECK_NE(value, kNoValue);
+  DCHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
+  uint16_t change = vreg_data_[v_reg].change;
+  if (change == kNPos) {
+    vreg_data_[v_reg].value = value;
+  } else {
+    while (true) {
+      MIRData* data = &mir_data_[change];
+      DCHECK(data->vreg_def == v_reg || data->vreg_def + 1 == v_reg);
+      if (data->vreg_def == v_reg) {  // Low word, use prev_value.
+        if (data->prev_value.change == kNPos) {
+          DCHECK_EQ(data->prev_value.value, kNoValue);
+          data->prev_value.value = value;
+          data->low_def_over_high_word = true;
+          break;
+        }
+        change = data->prev_value.change;
+      } else {  // High word, use prev_value_high.
+        if (data->prev_value_high.change == kNPos) {
+          DCHECK_EQ(data->prev_value_high.value, kNoValue);
+          data->prev_value_high.value = value;
+          break;
+        }
+        change = data->prev_value_high.change;
+      }
+    }
+  }
+}
+
+void GvnDeadCodeElimination::VRegChains::UpdateInitialVRegValue(int v_reg, bool wide,
+                                                                const LocalValueNumbering* lvn) {
+  DCHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
+  if (!wide) {
+    if (vreg_data_[v_reg].value == kNoValue) {
+      uint16_t old_value = lvn->GetStartingVregValueNumber(v_reg);
+      if (old_value == kNoValue) {
+        // Maybe there was a wide value in v_reg before. Do not check for wide value in v_reg-1,
+        // that will be done only if we see a definition of v_reg-1, otherwise it's unnecessary.
+        old_value = lvn->GetStartingVregValueNumberWide(v_reg);
+        if (old_value != kNoValue) {
+          InsertInitialValueHigh(v_reg + 1, old_value);
+        }
+      }
+      vreg_data_[v_reg].value = old_value;
+    }
+  } else {
+    DCHECK_LT(static_cast<size_t>(v_reg + 1), num_vregs_);
+    bool check_high = true;
+    if (vreg_data_[v_reg].value == kNoValue) {
+      uint16_t old_value = lvn->GetStartingVregValueNumberWide(v_reg);
+      if (old_value != kNoValue) {
+        InsertInitialValueHigh(v_reg + 1, old_value);
+        check_high = false;  // High word has been processed.
+      } else {
+        // Maybe there was a narrow value before. Do not check for wide value in v_reg-1,
+        // that will be done only if we see a definition of v_reg-1, otherwise it's unnecessary.
+        old_value = lvn->GetStartingVregValueNumber(v_reg);
+      }
+      vreg_data_[v_reg].value = old_value;
+    }
+    if (check_high && vreg_data_[v_reg + 1].value == kNoValue) {
+      uint16_t old_value = lvn->GetStartingVregValueNumber(v_reg + 1);
+      if (old_value == kNoValue && static_cast<size_t>(v_reg + 2) < num_vregs_) {
+        // Maybe there was a wide value before.
+        old_value = lvn->GetStartingVregValueNumberWide(v_reg + 1);
+        if (old_value != kNoValue) {
+          InsertInitialValueHigh(v_reg + 2, old_value);
+        }
+      }
+      vreg_data_[v_reg + 1].value = old_value;
+    }
+  }
+}
+
+inline uint16_t GvnDeadCodeElimination::VRegChains::LastChange(int v_reg) {
+  DCHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
+  return vreg_data_[v_reg].change;
+}
+
+inline uint16_t GvnDeadCodeElimination::VRegChains::CurrentValue(int v_reg) {
+  DCHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
+  return vreg_data_[v_reg].value;
+}
+
+uint16_t GvnDeadCodeElimination::VRegChains::FindKillHead(int v_reg, uint16_t cutoff) {
+  uint16_t current_value = this->CurrentValue(v_reg);
+  DCHECK_NE(current_value, kNoValue);
+  uint16_t change = LastChange(v_reg);
+  DCHECK_LT(change, mir_data_.size());
+  DCHECK_GE(change, cutoff);
+  bool match_high_word = (mir_data_[change].vreg_def != v_reg);
+  do {
+    MIRData* data = &mir_data_[change];
+    DCHECK(data->vreg_def == v_reg || data->vreg_def + 1 == v_reg);
+    if (data->vreg_def == v_reg) {  // Low word, use prev_value.
+      if (data->prev_value.value == current_value &&
+          match_high_word == data->low_def_over_high_word) {
+        break;
+      }
+      change = data->prev_value.change;
+    } else {  // High word, use prev_value_high.
+      if (data->prev_value_high.value == current_value &&
+          match_high_word != data->high_def_over_low_word) {
+        break;
+      }
+      change = data->prev_value_high.change;
+    }
+    if (change < cutoff) {
+      change = kNPos;
+    }
+  } while (change != kNPos);
+  return change;
+}
+
+uint16_t GvnDeadCodeElimination::VRegChains::FindFirstChangeAfter(int v_reg,
+                                                                  uint16_t change) const {
+  DCHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
+  DCHECK_LT(change, mir_data_.size());
+  uint16_t result = kNPos;
+  uint16_t search_change = vreg_data_[v_reg].change;
+  while (search_change != kNPos && search_change > change) {
+    result = search_change;
+    search_change = mir_data_[search_change].PrevChange(v_reg);
+  }
+  return result;
+}
+
+void GvnDeadCodeElimination::VRegChains::ReplaceChange(uint16_t old_change, uint16_t new_change) {
+  const MIRData* old_data = GetMIRData(old_change);
+  DCHECK(old_data->has_def);
+  int count = old_data->wide_def ? 2 : 1;
+  for (int v_reg = old_data->vreg_def, end = old_data->vreg_def + count; v_reg != end; ++v_reg) {
+    uint16_t next_change = FindFirstChangeAfter(v_reg, old_change);
+    if (next_change == kNPos) {
+      DCHECK_EQ(vreg_data_[v_reg].change, old_change);
+      vreg_data_[v_reg].change = new_change;
+    } else {
+      DCHECK_EQ(mir_data_[next_change].PrevChange(v_reg), old_change);
+      mir_data_[next_change].SetPrevChange(v_reg, new_change);
+    }
+  }
+}
+
+void GvnDeadCodeElimination::VRegChains::RemoveChange(uint16_t change) {
+  MIRData* data = &mir_data_[change];
+  DCHECK(data->has_def);
+  int count = data->wide_def ? 2 : 1;
+  for (int v_reg = data->vreg_def, end = data->vreg_def + count; v_reg != end; ++v_reg) {
+    uint16_t next_change = FindFirstChangeAfter(v_reg, change);
+    if (next_change == kNPos) {
+      DCHECK_EQ(vreg_data_[v_reg].change, change);
+      vreg_data_[v_reg] = (data->vreg_def == v_reg) ? data->prev_value : data->prev_value_high;
+    } else {
+      DCHECK_EQ(mir_data_[next_change].PrevChange(v_reg), change);
+      mir_data_[next_change].RemovePrevChange(v_reg, data);
+    }
+  }
+}
+
+inline bool GvnDeadCodeElimination::VRegChains::IsTopChange(uint16_t change) const {
+  DCHECK_LT(change, mir_data_.size());
+  const MIRData* data = &mir_data_[change];
+  DCHECK(data->has_def);
+  DCHECK_LT(data->wide_def ? data->vreg_def + 1u : data->vreg_def, num_vregs_);
+  return vreg_data_[data->vreg_def].change == change &&
+      (!data->wide_def || vreg_data_[data->vreg_def + 1u].change == change);
+}
+
+bool GvnDeadCodeElimination::VRegChains::IsSRegUsed(uint16_t first_change, uint16_t last_change,
+                                                    int s_reg) const {
+  DCHECK_LE(first_change, last_change);
+  DCHECK_LE(last_change, mir_data_.size());
+  for (size_t c = first_change; c != last_change; ++c) {
+    SSARepresentation* ssa_rep = mir_data_[c].mir->ssa_rep;
+    for (int i = 0; i != ssa_rep->num_uses; ++i) {
+      if (ssa_rep->uses[i] == s_reg) {
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
+void GvnDeadCodeElimination::VRegChains::RenameSRegUses(uint16_t first_change, uint16_t last_change,
+                                                        int old_s_reg, int new_s_reg, bool wide) {
+  for (size_t c = first_change; c != last_change; ++c) {
+    SSARepresentation* ssa_rep = mir_data_[c].mir->ssa_rep;
+    for (int i = 0; i != ssa_rep->num_uses; ++i) {
+      if (ssa_rep->uses[i] == old_s_reg) {
+        ssa_rep->uses[i] = new_s_reg;
+        if (wide) {
+          ++i;
+          DCHECK_LT(i, ssa_rep->num_uses);
+          ssa_rep->uses[i] = new_s_reg + 1;
+        }
+      }
+    }
+  }
+}
+
+void GvnDeadCodeElimination::VRegChains::RenameVRegUses(uint16_t first_change, uint16_t last_change,
+                                                    int old_s_reg, int old_v_reg,
+                                                    int new_s_reg, int new_v_reg) {
+  for (size_t c = first_change; c != last_change; ++c) {
+    MIR* mir = mir_data_[c].mir;
+    if (IsInstructionBinOp2Addr(mir->dalvikInsn.opcode) &&
+        mir->ssa_rep->uses[0] == old_s_reg && old_v_reg != new_v_reg) {
+      // Rewrite binop_2ADDR with plain binop before doing the register rename.
+      ChangeBinOp2AddrToPlainBinOp(mir);
+    }
+    uint64_t df_attr = MIRGraph::GetDataFlowAttributes(mir);
+    size_t use = 0u;
+#define REPLACE_VREG(REG) \
+    if ((df_attr & DF_U##REG) != 0) {                                         \
+      if (mir->ssa_rep->uses[use] == old_s_reg) {                             \
+        DCHECK_EQ(mir->dalvikInsn.v##REG, static_cast<uint32_t>(old_v_reg));  \
+        mir->dalvikInsn.v##REG = new_v_reg;                                   \
+        mir->ssa_rep->uses[use] = new_s_reg;                                  \
+        if ((df_attr & DF_##REG##_WIDE) != 0) {                               \
+          DCHECK_EQ(mir->ssa_rep->uses[use + 1], old_s_reg + 1);              \
+          mir->ssa_rep->uses[use + 1] = new_s_reg + 1;                        \
+        }                                                                     \
+      }                                                                       \
+      use += ((df_attr & DF_##REG##_WIDE) != 0) ? 2 : 1;                      \
+    }
+    REPLACE_VREG(A)
+    REPLACE_VREG(B)
+    REPLACE_VREG(C)
+#undef REPLACE_VREG
+    // We may encounter an out-of-order Phi which we need to ignore, otherwise we should
+    // only be asked to rename registers specified by DF_UA, DF_UB and DF_UC.
+    DCHECK_EQ(use,
+              static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi
+              ? 0u
+              : static_cast<size_t>(mir->ssa_rep->num_uses));
+  }
+}
+
+GvnDeadCodeElimination::GvnDeadCodeElimination(const GlobalValueNumbering* gvn,
+                                         ScopedArenaAllocator* alloc)
+    : gvn_(gvn),
+      mir_graph_(gvn_->GetMirGraph()),
+      vreg_chains_(mir_graph_->GetNumOfCodeAndTempVRs(), alloc),
+      bb_(nullptr),
+      lvn_(nullptr),
+      no_uses_all_since_(0u),
+      unused_vregs_(new (alloc) ArenaBitVector(alloc, vreg_chains_.NumVRegs(), false)),
+      vregs_to_kill_(new (alloc) ArenaBitVector(alloc, vreg_chains_.NumVRegs(), false)),
+      kill_heads_(alloc->AllocArray<uint16_t>(vreg_chains_.NumVRegs(), kArenaAllocMisc)),
+      changes_to_kill_(alloc->Adapter()),
+      dependent_vregs_(new (alloc) ArenaBitVector(alloc, vreg_chains_.NumVRegs(), false)) {
+  changes_to_kill_.reserve(16u);
+}
+
+void GvnDeadCodeElimination::Apply(BasicBlock* bb) {
+  bb_ = bb;
+  lvn_ = gvn_->GetLvn(bb->id);
+
+  RecordPass();
+  BackwardPass();
+
+  DCHECK_EQ(no_uses_all_since_, 0u);
+  lvn_ = nullptr;
+  bb_ = nullptr;
+}
+
+void GvnDeadCodeElimination::RecordPass() {
+  // Record MIRs with vreg definition data, eliminate single instructions.
+  vreg_chains_.Reset();
+  DCHECK_EQ(no_uses_all_since_, 0u);
+  for (MIR* mir = bb_->first_mir_insn; mir != nullptr; mir = mir->next) {
+    if (RecordMIR(mir)) {
+      RecordPassTryToKillOverwrittenMoveOrMoveSrc();
+      RecordPassTryToKillLastMIR();
+    }
+  }
+}
+
+void GvnDeadCodeElimination::BackwardPass() {
+  // Now process MIRs in reverse order, trying to eliminate them.
+  unused_vregs_->ClearAllBits();  // Implicitly depend on all vregs at the end of BB.
+  while (vreg_chains_.NumMIRs() != 0u) {
+    if (BackwardPassTryToKillLastMIR()) {
+      continue;
+    }
+    BackwardPassProcessLastMIR();
+  }
+}
+
+void GvnDeadCodeElimination::KillMIR(MIRData* data) {
+  DCHECK(!data->must_keep);
+  DCHECK(!data->uses_all_vregs);
+  DCHECK(data->has_def);
+  DCHECK(data->mir->ssa_rep->num_defs == 1 || data->mir->ssa_rep->num_defs == 2);
+
+  KillMIR(data->mir);
+  data->has_def = false;
+  data->is_move = false;
+  data->is_move_src = false;
+}
+
+void GvnDeadCodeElimination::KillMIR(MIR* mir) {
+  mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+  mir->ssa_rep->num_uses = 0;
+  mir->ssa_rep->num_defs = 0;
+}
+
+void GvnDeadCodeElimination::ChangeBinOp2AddrToPlainBinOp(MIR* mir) {
+  mir->dalvikInsn.vC = mir->dalvikInsn.vB;
+  mir->dalvikInsn.vB = mir->dalvikInsn.vA;
+  mir->dalvikInsn.opcode = static_cast<Instruction::Code>(
+      mir->dalvikInsn.opcode - Instruction::ADD_INT_2ADDR +  Instruction::ADD_INT);
+}
+
+MIR* GvnDeadCodeElimination::CreatePhi(int s_reg, bool fp) {
+  int v_reg = mir_graph_->SRegToVReg(s_reg);
+  MIR* phi = mir_graph_->NewMIR();
+  phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpPhi);
+  phi->dalvikInsn.vA = v_reg;
+  phi->offset = bb_->start_offset;
+  phi->m_unit_index = 0;  // Arbitrarily assign all Phi nodes to outermost method.
+
+  phi->ssa_rep = static_cast<struct SSARepresentation *>(mir_graph_->GetArena()->Alloc(
+      sizeof(SSARepresentation), kArenaAllocDFInfo));
+
+  mir_graph_->AllocateSSADefData(phi, 1);
+  phi->ssa_rep->defs[0] = s_reg;
+  phi->ssa_rep->fp_def[0] = fp;
+
+  size_t num_uses = bb_->predecessors.size();
+  mir_graph_->AllocateSSAUseData(phi, num_uses);
+  std::fill_n(phi->ssa_rep->fp_use, num_uses, fp);
+  size_t idx = 0u;
+  for (BasicBlockId pred_id : bb_->predecessors) {
+    BasicBlock* pred_bb = mir_graph_->GetBasicBlock(pred_id);
+    DCHECK(pred_bb != nullptr);
+    phi->ssa_rep->uses[idx] = pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg];
+    DCHECK_NE(phi->ssa_rep->uses[idx], INVALID_SREG);
+    idx++;
+  }
+
+  phi->meta.phi_incoming = static_cast<BasicBlockId*>(mir_graph_->GetArena()->Alloc(
+      sizeof(BasicBlockId) * num_uses, kArenaAllocDFInfo));
+  std::copy(bb_->predecessors.begin(), bb_->predecessors.end(), phi->meta.phi_incoming);
+  bb_->PrependMIR(phi);
+  return phi;
+}
+
+MIR* GvnDeadCodeElimination::RenameSRegDefOrCreatePhi(uint16_t def_change, uint16_t last_change,
+                                                      MIR* mir_to_kill) {
+  DCHECK(mir_to_kill->ssa_rep->num_defs == 1 || mir_to_kill->ssa_rep->num_defs == 2);
+  bool wide = (mir_to_kill->ssa_rep->num_defs != 1);
+  int new_s_reg = mir_to_kill->ssa_rep->defs[0];
+
+  // Just before we kill mir_to_kill, we need to replace the previous SSA reg assigned to the
+  // same dalvik reg to keep consistency with subsequent instructions. However, if there's no
+  // defining MIR for that dalvik reg, the preserved valus must come from its predecessors
+  // and we need to create a new Phi (a degenerate Phi if there's only a single predecessor).
+  if (def_change == kNPos) {
+    bool fp = mir_to_kill->ssa_rep->fp_def[0];
+    if (wide) {
+      DCHECK_EQ(new_s_reg + 1, mir_to_kill->ssa_rep->defs[1]);
+      DCHECK_EQ(fp, mir_to_kill->ssa_rep->fp_def[1]);
+      DCHECK_EQ(mir_graph_->SRegToVReg(new_s_reg) + 1, mir_graph_->SRegToVReg(new_s_reg + 1));
+      CreatePhi(new_s_reg + 1, fp);  // High word Phi.
+    }
+    return CreatePhi(new_s_reg, fp);
+  } else {
+    DCHECK_LT(def_change, last_change);
+    DCHECK_LE(last_change, vreg_chains_.NumMIRs());
+    MIRData* def_data = vreg_chains_.GetMIRData(def_change);
+    DCHECK(def_data->has_def);
+    int old_s_reg = def_data->mir->ssa_rep->defs[0];
+    DCHECK_NE(old_s_reg, new_s_reg);
+    DCHECK_EQ(mir_graph_->SRegToVReg(old_s_reg), mir_graph_->SRegToVReg(new_s_reg));
+    def_data->mir->ssa_rep->defs[0] = new_s_reg;
+    if (wide) {
+      if (static_cast<int>(def_data->mir->dalvikInsn.opcode) == kMirOpPhi) {
+        // Currently the high word Phi is always located after the low word Phi.
+        MIR* phi_high = def_data->mir->next;
+        DCHECK(phi_high != nullptr && static_cast<int>(phi_high->dalvikInsn.opcode) == kMirOpPhi);
+        DCHECK_EQ(phi_high->ssa_rep->defs[0], old_s_reg + 1);
+        phi_high->ssa_rep->defs[0] = new_s_reg + 1;
+      } else {
+        DCHECK_EQ(def_data->mir->ssa_rep->defs[1], old_s_reg + 1);
+        def_data->mir->ssa_rep->defs[1] = new_s_reg + 1;
+      }
+    }
+    vreg_chains_.RenameSRegUses(def_change + 1u, last_change, old_s_reg, new_s_reg, wide);
+    return nullptr;
+  }
+}
+
+
+void GvnDeadCodeElimination::BackwardPassProcessLastMIR() {
+  MIRData* data = vreg_chains_.LastMIRData();
+  if (data->uses_all_vregs) {
+    DCHECK(data->must_keep);
+    unused_vregs_->ClearAllBits();
+    DCHECK_EQ(no_uses_all_since_, vreg_chains_.NumMIRs());
+    --no_uses_all_since_;
+    while (no_uses_all_since_ != 0u &&
+        !vreg_chains_.GetMIRData(no_uses_all_since_ - 1u)->uses_all_vregs) {
+      --no_uses_all_since_;
+    }
+  } else {
+    if (data->has_def) {
+      unused_vregs_->SetBit(data->vreg_def);
+      if (data->wide_def) {
+        unused_vregs_->SetBit(data->vreg_def + 1);
+      }
+    }
+    for (int i = 0, num_uses = data->mir->ssa_rep->num_uses; i != num_uses; ++i) {
+      int v_reg = mir_graph_->SRegToVReg(data->mir->ssa_rep->uses[i]);
+      unused_vregs_->ClearBit(v_reg);
+    }
+  }
+  vreg_chains_.RemoveLastMIRData();
+}
+
+void GvnDeadCodeElimination::RecordPassKillMoveByRenamingSrcDef(uint16_t src_change,
+                                                                uint16_t move_change) {
+  DCHECK_LT(src_change, move_change);
+  MIRData* src_data = vreg_chains_.GetMIRData(src_change);
+  MIRData* move_data = vreg_chains_.GetMIRData(move_change);
+  DCHECK(src_data->is_move_src);
+  DCHECK_EQ(src_data->wide_def, move_data->wide_def);
+  DCHECK(move_data->prev_value.change == kNPos || move_data->prev_value.change <= src_change);
+  DCHECK(!move_data->wide_def || move_data->prev_value_high.change == kNPos ||
+         move_data->prev_value_high.change <= src_change);
+
+  int old_s_reg = src_data->mir->ssa_rep->defs[0];
+  // NOTE: old_s_reg may differ from move_data->mir->ssa_rep->uses[0]; value names must match.
+  int new_s_reg = move_data->mir->ssa_rep->defs[0];
+  DCHECK_NE(old_s_reg, new_s_reg);
+
+  if (IsInstructionBinOp2Addr(src_data->mir->dalvikInsn.opcode) &&
+      src_data->vreg_def != move_data->vreg_def) {
+    // Rewrite binop_2ADDR with plain binop before doing the register rename.
+    ChangeBinOp2AddrToPlainBinOp(src_data->mir);
+  }
+  // Remove src_change from the vreg chain(s).
+  vreg_chains_.RemoveChange(src_change);
+  // Replace the move_change with the src_change, copying all necessary data.
+  src_data->is_move_src = move_data->is_move_src;
+  src_data->low_def_over_high_word = move_data->low_def_over_high_word;
+  src_data->high_def_over_low_word = move_data->high_def_over_low_word;
+  src_data->vreg_def = move_data->vreg_def;
+  src_data->prev_value = move_data->prev_value;
+  src_data->prev_value_high = move_data->prev_value_high;
+  src_data->mir->dalvikInsn.vA = move_data->vreg_def;
+  src_data->mir->ssa_rep->defs[0] = new_s_reg;
+  if (move_data->wide_def) {
+    DCHECK_EQ(src_data->mir->ssa_rep->defs[1], old_s_reg + 1);
+    src_data->mir->ssa_rep->defs[1] = new_s_reg + 1;
+  }
+  vreg_chains_.ReplaceChange(move_change, src_change);
+
+  // Rename uses and kill the move.
+  vreg_chains_.RenameVRegUses(src_change + 1u, vreg_chains_.NumMIRs(),
+                              old_s_reg, mir_graph_->SRegToVReg(old_s_reg),
+                              new_s_reg, mir_graph_->SRegToVReg(new_s_reg));
+  KillMIR(move_data);
+}
+
+void GvnDeadCodeElimination::RecordPassTryToKillOverwrittenMoveOrMoveSrc(uint16_t check_change) {
+  MIRData* data = vreg_chains_.GetMIRData(check_change);
+  DCHECK(data->is_move || data->is_move_src);
+  int32_t dest_s_reg = data->mir->ssa_rep->defs[0];
+
+  if (data->is_move) {
+    // Check if source vreg has changed since the MOVE.
+    int32_t src_s_reg = data->mir->ssa_rep->uses[0];
+    uint32_t src_v_reg = mir_graph_->SRegToVReg(src_s_reg);
+    uint16_t src_change = vreg_chains_.FindFirstChangeAfter(src_v_reg, check_change);
+    bool wide = data->wide_def;
+    if (wide) {
+      uint16_t src_change_high = vreg_chains_.FindFirstChangeAfter(src_v_reg + 1, check_change);
+      if (src_change_high != kNPos && (src_change == kNPos || src_change_high < src_change)) {
+        src_change = src_change_high;
+      }
+    }
+    if (src_change == kNPos ||
+        !vreg_chains_.IsSRegUsed(src_change + 1u, vreg_chains_.NumMIRs(), dest_s_reg)) {
+      // We can simply change all uses of dest to src.
+      size_t rename_end = (src_change != kNPos) ? src_change + 1u : vreg_chains_.NumMIRs();
+      vreg_chains_.RenameVRegUses(check_change + 1u, rename_end,
+                                  dest_s_reg, mir_graph_->SRegToVReg(dest_s_reg),
+                                  src_s_reg,  mir_graph_->SRegToVReg(src_s_reg));
+
+      // Now, remove the MOVE from the vreg chain(s) and kill it.
+      vreg_chains_.RemoveChange(check_change);
+      KillMIR(data);
+      return;
+    }
+  }
+
+  if (data->is_move_src) {
+    // Try to find a MOVE to a vreg that wasn't changed since check_change.
+    uint16_t value_name =
+        data->wide_def ? lvn_->GetSregValueWide(dest_s_reg) : lvn_->GetSregValue(dest_s_reg);
+    for (size_t c = check_change + 1u, size = vreg_chains_.NumMIRs(); c != size; ++c) {
+      MIRData* d = vreg_chains_.GetMIRData(c);
+      if (d->is_move && d->wide_def == data->wide_def &&
+          (d->prev_value.change == kNPos || d->prev_value.change <= check_change) &&
+          (!d->wide_def ||
+           d->prev_value_high.change == kNPos || d->prev_value_high.change <= check_change)) {
+        // Compare value names to find move to move.
+        int32_t src_s_reg = d->mir->ssa_rep->uses[0];
+        uint16_t src_name =
+            (d->wide_def ? lvn_->GetSregValueWide(src_s_reg) : lvn_->GetSregValue(src_s_reg));
+        if (value_name == src_name) {
+          RecordPassKillMoveByRenamingSrcDef(check_change, c);
+          return;
+        }
+      }
+    }
+  }
+}
+
+void GvnDeadCodeElimination::RecordPassTryToKillOverwrittenMoveOrMoveSrc() {
+  // Check if we're overwriting a the result of a move or the definition of a source of a move.
+  // For MOVE_WIDE, we may be overwriting partially; if that's the case, check that the other
+  // word wasn't previously overwritten - we would have tried to rename back then.
+  MIRData* data = vreg_chains_.LastMIRData();
+  if (!data->has_def) {
+    return;
+  }
+  // NOTE: Instructions such as new-array implicitly use all vregs (if they throw) but they can
+  // define a move source which can be renamed. Therefore we allow the checked change to be the
+  // change before no_uses_all_since_. This has no effect on moves as they never use all vregs.
+  if (data->prev_value.change != kNPos && data->prev_value.change + 1u >= no_uses_all_since_) {
+    MIRData* check_data = vreg_chains_.GetMIRData(data->prev_value.change);
+    bool try_to_kill = false;
+    if (!check_data->is_move && !check_data->is_move_src) {
+      DCHECK(!try_to_kill);
+    } else if (!check_data->wide_def) {
+      // Narrow move; always fully overwritten by the last MIR.
+      try_to_kill = true;
+    } else if (data->low_def_over_high_word) {
+      // Overwriting only the high word; is the low word still valid?
+      DCHECK_EQ(check_data->vreg_def + 1u, data->vreg_def);
+      if (vreg_chains_.LastChange(check_data->vreg_def) == data->prev_value.change) {
+        try_to_kill = true;
+      }
+    } else if (!data->wide_def) {
+      // Overwriting only the low word, is the high word still valid?
+      if (vreg_chains_.LastChange(data->vreg_def + 1) == data->prev_value.change) {
+        try_to_kill = true;
+      }
+    } else {
+      // Overwriting both words; was the high word still from the same move?
+      if (data->prev_value_high.change == data->prev_value.change) {
+        try_to_kill = true;
+      }
+    }
+    if (try_to_kill) {
+      RecordPassTryToKillOverwrittenMoveOrMoveSrc(data->prev_value.change);
+    }
+  }
+  if (data->wide_def && data->high_def_over_low_word &&
+      data->prev_value_high.change != kNPos &&
+      data->prev_value_high.change + 1u >= no_uses_all_since_) {
+    MIRData* check_data = vreg_chains_.GetMIRData(data->prev_value_high.change);
+    bool try_to_kill = false;
+    if (!check_data->is_move && !check_data->is_move_src) {
+      DCHECK(!try_to_kill);
+    } else if (!check_data->wide_def) {
+      // Narrow move; always fully overwritten by the last MIR.
+      try_to_kill = true;
+    } else if (vreg_chains_.LastChange(check_data->vreg_def + 1) ==
+        data->prev_value_high.change) {
+      // High word is still valid.
+      try_to_kill = true;
+    }
+    if (try_to_kill) {
+      RecordPassTryToKillOverwrittenMoveOrMoveSrc(data->prev_value_high.change);
+    }
+  }
+}
+
+void GvnDeadCodeElimination::RecordPassTryToKillLastMIR() {
+  MIRData* last_data = vreg_chains_.LastMIRData();
+  if (last_data->must_keep) {
+    return;
+  }
+  if (UNLIKELY(!last_data->has_def)) {
+    // Must be an eliminated MOVE. Drop its data and data of all eliminated MIRs before it.
+    vreg_chains_.RemoveTrailingNops();
+    return;
+  }
+
+  // Try to kill a sequence of consecutive definitions of the same vreg. Allow mixing
+  // wide and non-wide defs; consider high word dead if low word has been overwritten.
+  uint16_t current_value = vreg_chains_.CurrentValue(last_data->vreg_def);
+  uint16_t change = vreg_chains_.NumMIRs() - 1u;
+  MIRData* data = last_data;
+  while (data->prev_value.value != current_value) {
+    --change;
+    if (data->prev_value.change == kNPos || data->prev_value.change != change) {
+      return;
+    }
+    data = vreg_chains_.GetMIRData(data->prev_value.change);
+    if (data->must_keep || !data->has_def || data->vreg_def != last_data->vreg_def) {
+      return;
+    }
+  }
+
+  bool wide = last_data->wide_def;
+  if (wide) {
+    // Check that the low word is valid.
+    if (data->low_def_over_high_word) {
+      return;
+    }
+    // Check that the high word is valid.
+    MIRData* high_data = data;
+    if (!high_data->wide_def) {
+      uint16_t high_change = vreg_chains_.FindFirstChangeAfter(data->vreg_def + 1, change);
+      DCHECK_NE(high_change, kNPos);
+      high_data = vreg_chains_.GetMIRData(high_change);
+      DCHECK_EQ(high_data->vreg_def, data->vreg_def);
+    }
+    if (high_data->prev_value_high.value != current_value || high_data->high_def_over_low_word) {
+      return;
+    }
+  }
+
+  MIR* phi = RenameSRegDefOrCreatePhi(data->prev_value.change, change, last_data->mir);
+  for (size_t i = 0, count = vreg_chains_.NumMIRs() - change; i != count; ++i) {
+    KillMIR(vreg_chains_.LastMIRData()->mir);
+    vreg_chains_.RemoveLastMIRData();
+  }
+  if (phi != nullptr) {
+    // Though the Phi has been added to the beginning, we can put the MIRData at the end.
+    vreg_chains_.AddMIRWithDef(phi, phi->dalvikInsn.vA, wide, current_value);
+    // Reset the previous value to avoid eventually eliminating the Phi itself (unless unused).
+    last_data = vreg_chains_.LastMIRData();
+    last_data->prev_value.value = kNoValue;
+    last_data->prev_value_high.value = kNoValue;
+  }
+}
+
+uint16_t GvnDeadCodeElimination::FindChangesToKill(uint16_t first_change, uint16_t last_change) {
+  // Process dependencies for changes in range [first_change, last_change) and record all
+  // changes that we need to kill. Return kNPos if there's a dependent change that must be
+  // kept unconditionally; otherwise the end of the range processed before encountering
+  // a change that defines a dalvik reg that we need to keep (last_change on full success).
+  changes_to_kill_.clear();
+  dependent_vregs_->ClearAllBits();
+  for (size_t change = first_change; change != last_change; ++change) {
+    MIRData* data = vreg_chains_.GetMIRData(change);
+    DCHECK(!data->uses_all_vregs);
+    bool must_not_depend = data->must_keep;
+    bool depends = false;
+    // Check if the MIR defines a vreg we're trying to eliminate.
+    if (data->has_def && vregs_to_kill_->IsBitSet(data->vreg_def)) {
+      if (change < kill_heads_[data->vreg_def]) {
+        must_not_depend = true;
+      } else {
+        depends = true;
+      }
+    }
+    if (data->has_def && data->wide_def && vregs_to_kill_->IsBitSet(data->vreg_def + 1)) {
+      if (change < kill_heads_[data->vreg_def + 1]) {
+        must_not_depend = true;
+      } else {
+        depends = true;
+      }
+    }
+    if (!depends) {
+      // Check for dependency through SSA reg uses.
+      SSARepresentation* ssa_rep = data->mir->ssa_rep;
+      for (int i = 0; i != ssa_rep->num_uses; ++i) {
+        if (dependent_vregs_->IsBitSet(mir_graph_->SRegToVReg(ssa_rep->uses[i]))) {
+          depends = true;
+          break;
+        }
+      }
+    }
+    // Now check if we can eliminate the insn if we need to.
+    if (depends && must_not_depend) {
+      return kNPos;
+    }
+    if (depends && data->has_def &&
+        vreg_chains_.IsTopChange(change) && !vregs_to_kill_->IsBitSet(data->vreg_def) &&
+        !unused_vregs_->IsBitSet(data->vreg_def) &&
+        (!data->wide_def || !unused_vregs_->IsBitSet(data->vreg_def + 1))) {
+      // This is a top change but neither unnecessary nor one of the top kill changes.
+      return change;
+    }
+    // Finally, update the data.
+    if (depends) {
+      changes_to_kill_.push_back(change);
+      if (data->has_def) {
+        dependent_vregs_->SetBit(data->vreg_def);
+        if (data->wide_def) {
+          dependent_vregs_->SetBit(data->vreg_def + 1);
+        }
+      }
+    } else {
+      if (data->has_def) {
+        dependent_vregs_->ClearBit(data->vreg_def);
+        if (data->wide_def) {
+          dependent_vregs_->ClearBit(data->vreg_def + 1);
+        }
+      }
+    }
+  }
+  return last_change;
+}
+
+void GvnDeadCodeElimination::BackwardPassTryToKillRevertVRegs() {
+}
+
+bool GvnDeadCodeElimination::BackwardPassTryToKillLastMIR() {
+  MIRData* last_data = vreg_chains_.LastMIRData();
+  if (last_data->must_keep) {
+    return false;
+  }
+  DCHECK(!last_data->uses_all_vregs);
+  if (!last_data->has_def) {
+    // Previously eliminated.
+    DCHECK_EQ(static_cast<int>(last_data->mir->dalvikInsn.opcode), static_cast<int>(kMirOpNop));
+    vreg_chains_.RemoveTrailingNops();
+    return true;
+  }
+  if (unused_vregs_->IsBitSet(last_data->vreg_def) ||
+      (last_data->wide_def && unused_vregs_->IsBitSet(last_data->vreg_def + 1))) {
+    if (last_data->wide_def) {
+      // For wide defs, one of the vregs may still be considered needed, fix that.
+      unused_vregs_->SetBit(last_data->vreg_def);
+      unused_vregs_->SetBit(last_data->vreg_def + 1);
+    }
+    KillMIR(last_data->mir);
+    vreg_chains_.RemoveLastMIRData();
+    return true;
+  }
+
+  vregs_to_kill_->ClearAllBits();
+  size_t num_mirs = vreg_chains_.NumMIRs();
+  DCHECK_NE(num_mirs, 0u);
+  uint16_t kill_change = num_mirs - 1u;
+  uint16_t start = num_mirs;
+  size_t num_killed_top_changes = 0u;
+  while (num_killed_top_changes != kMaxNumTopChangesToKill &&
+      kill_change != kNPos && kill_change != num_mirs) {
+    ++num_killed_top_changes;
+
+    DCHECK(vreg_chains_.IsTopChange(kill_change));
+    MIRData* data = vreg_chains_.GetMIRData(kill_change);
+    int count = data->wide_def ? 2 : 1;
+    for (int v_reg = data->vreg_def, end = data->vreg_def + count; v_reg != end; ++v_reg) {
+      uint16_t kill_head = vreg_chains_.FindKillHead(v_reg, no_uses_all_since_);
+      if (kill_head == kNPos) {
+        return false;
+      }
+      kill_heads_[v_reg] = kill_head;
+      vregs_to_kill_->SetBit(v_reg);
+      start = std::min(start, kill_head);
+    }
+    DCHECK_LT(start, vreg_chains_.NumMIRs());
+
+    kill_change = FindChangesToKill(start, num_mirs);
+  }
+
+  if (kill_change != num_mirs) {
+    return false;
+  }
+
+  // Kill all MIRs marked as dependent.
+  for (uint32_t v_reg : vregs_to_kill_->Indexes()) {
+    // Rename s_regs or create Phi only once for each MIR (only for low word).
+    MIRData* data = vreg_chains_.GetMIRData(vreg_chains_.LastChange(v_reg));
+    DCHECK(data->has_def);
+    if (data->vreg_def == v_reg) {
+      MIRData* kill_head_data = vreg_chains_.GetMIRData(kill_heads_[v_reg]);
+      RenameSRegDefOrCreatePhi(kill_head_data->PrevChange(v_reg), num_mirs, data->mir);
+    } else {
+      DCHECK_EQ(data->vreg_def + 1u, v_reg);
+      DCHECK_EQ(vreg_chains_.GetMIRData(kill_heads_[v_reg - 1u])->PrevChange(v_reg - 1u),
+                vreg_chains_.GetMIRData(kill_heads_[v_reg])->PrevChange(v_reg));
+    }
+  }
+  unused_vregs_->Union(vregs_to_kill_);
+  for (auto it = changes_to_kill_.rbegin(), end = changes_to_kill_.rend(); it != end; ++it) {
+    MIRData* data = vreg_chains_.GetMIRData(*it);
+    DCHECK(!data->must_keep);
+    DCHECK(data->has_def);
+    vreg_chains_.RemoveChange(*it);
+    KillMIR(data);
+  }
+
+  vreg_chains_.RemoveTrailingNops();
+  return true;
+}
+
+bool GvnDeadCodeElimination::RecordMIR(MIR* mir) {
+  bool must_keep = false;
+  bool uses_all_vregs = false;
+  bool is_move = false;
+  uint16_t opcode = mir->dalvikInsn.opcode;
+  switch (opcode) {
+    case kMirOpPhi: {
+      // We can't recognize wide variables in Phi from num_defs == 2 as we've got two Phis instead.
+      DCHECK_EQ(mir->ssa_rep->num_defs, 1);
+      int s_reg = mir->ssa_rep->defs[0];
+      bool wide = false;
+      uint16_t new_value = lvn_->GetSregValue(s_reg);
+      if (new_value == kNoValue) {
+        wide = true;
+        new_value = lvn_->GetSregValueWide(s_reg);
+        if (new_value == kNoValue) {
+          return false;  // Ignore the high word Phi.
+        }
+      }
+
+      int v_reg = mir_graph_->SRegToVReg(s_reg);
+      DCHECK_EQ(vreg_chains_.CurrentValue(v_reg), kNoValue);  // No previous def for v_reg.
+      if (wide) {
+        DCHECK_EQ(vreg_chains_.CurrentValue(v_reg + 1), kNoValue);
+      }
+      vreg_chains_.AddMIRWithDef(mir, v_reg, wide, new_value);
+      return true;  // Avoid the common processing.
+    }
+
+    case kMirOpNop:
+    case Instruction::NOP:
+      // Don't record NOPs.
+      return false;
+
+    case kMirOpCheck:
+      must_keep = true;
+      uses_all_vregs = true;
+      break;
+
+    case Instruction::RETURN_VOID:
+    case Instruction::RETURN:
+    case Instruction::RETURN_OBJECT:
+    case Instruction::RETURN_WIDE:
+    case Instruction::GOTO:
+    case Instruction::GOTO_16:
+    case Instruction::GOTO_32:
+    case Instruction::PACKED_SWITCH:
+    case Instruction::SPARSE_SWITCH:
+    case Instruction::IF_EQ:
+    case Instruction::IF_NE:
+    case Instruction::IF_LT:
+    case Instruction::IF_GE:
+    case Instruction::IF_GT:
+    case Instruction::IF_LE:
+    case Instruction::IF_EQZ:
+    case Instruction::IF_NEZ:
+    case Instruction::IF_LTZ:
+    case Instruction::IF_GEZ:
+    case Instruction::IF_GTZ:
+    case Instruction::IF_LEZ:
+    case kMirOpFusedCmplFloat:
+    case kMirOpFusedCmpgFloat:
+    case kMirOpFusedCmplDouble:
+    case kMirOpFusedCmpgDouble:
+    case kMirOpFusedCmpLong:
+      must_keep = true;
+      uses_all_vregs = true;  // Keep the implicit dependencies on all vregs.
+      break;
+
+    case Instruction::CONST_CLASS:
+    case Instruction::CONST_STRING:
+    case Instruction::CONST_STRING_JUMBO:
+      // NOTE: While we're currently treating CONST_CLASS, CONST_STRING and CONST_STRING_JUMBO
+      // as throwing but we could conceivably try and eliminate those exceptions if we're
+      // retrieving the class/string repeatedly.
+      must_keep = true;
+      uses_all_vregs = true;
+      break;
+
+    case Instruction::MONITOR_ENTER:
+    case Instruction::MONITOR_EXIT:
+      // We can actually try and optimize across the acquire operation of MONITOR_ENTER,
+      // the value names provided by GVN reflect the possible changes to memory visibility.
+      // NOTE: In ART, MONITOR_ENTER and MONITOR_EXIT can throw only NPE.
+      must_keep = true;
+      uses_all_vregs = (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0;
+      break;
+
+    case Instruction::INVOKE_DIRECT:
+    case Instruction::INVOKE_DIRECT_RANGE:
+    case Instruction::INVOKE_VIRTUAL:
+    case Instruction::INVOKE_VIRTUAL_RANGE:
+    case Instruction::INVOKE_SUPER:
+    case Instruction::INVOKE_SUPER_RANGE:
+    case Instruction::INVOKE_INTERFACE:
+    case Instruction::INVOKE_INTERFACE_RANGE:
+    case Instruction::INVOKE_STATIC:
+    case Instruction::INVOKE_STATIC_RANGE:
+    case Instruction::CHECK_CAST:
+    case Instruction::THROW:
+    case Instruction::FILLED_NEW_ARRAY:
+    case Instruction::FILLED_NEW_ARRAY_RANGE:
+    case Instruction::FILL_ARRAY_DATA:
+      must_keep = true;
+      uses_all_vregs = true;
+      break;
+
+    case Instruction::NEW_INSTANCE:
+    case Instruction::NEW_ARRAY:
+      must_keep = true;
+      uses_all_vregs = true;
+      break;
+
+    case kMirOpNullCheck:
+      DCHECK_EQ(mir->ssa_rep->num_uses, 1);
+      if ((mir->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) {
+        mir->ssa_rep->num_uses = 0;
+        mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+        return false;
+      }
+      must_keep = true;
+      uses_all_vregs = true;
+      break;
+
+    case Instruction::MOVE_RESULT:
+    case Instruction::MOVE_RESULT_OBJECT:
+    case Instruction::MOVE_RESULT_WIDE:
+      break;
+
+    case Instruction::INSTANCE_OF:
+      break;
+
+    case Instruction::MOVE_EXCEPTION:
+      must_keep = true;
+      break;
+
+    case kMirOpCopy:
+    case Instruction::MOVE:
+    case Instruction::MOVE_FROM16:
+    case Instruction::MOVE_16:
+    case Instruction::MOVE_WIDE:
+    case Instruction::MOVE_WIDE_FROM16:
+    case Instruction::MOVE_WIDE_16:
+    case Instruction::MOVE_OBJECT:
+    case Instruction::MOVE_OBJECT_FROM16:
+    case Instruction::MOVE_OBJECT_16: {
+      is_move = true;
+      // If the MIR defining src vreg is known, allow renaming all uses of src vreg to dest vreg
+      // while updating the defining MIR to directly define dest vreg. However, changing Phi's
+      // def this way doesn't work without changing MIRs in other BBs.
+      int src_v_reg = mir_graph_->SRegToVReg(mir->ssa_rep->uses[0]);
+      int src_change = vreg_chains_.LastChange(src_v_reg);
+      if (src_change != kNPos) {
+        MIRData* src_data = vreg_chains_.GetMIRData(src_change);
+        if (static_cast<int>(src_data->mir->dalvikInsn.opcode) != kMirOpPhi) {
+          src_data->is_move_src = true;
+        }
+      }
+      break;
+    }
+
+    case Instruction::CONST_4:
+    case Instruction::CONST_16:
+    case Instruction::CONST:
+    case Instruction::CONST_HIGH16:
+    case Instruction::CONST_WIDE_16:
+    case Instruction::CONST_WIDE_32:
+    case Instruction::CONST_WIDE:
+    case Instruction::CONST_WIDE_HIGH16:
+    case Instruction::ARRAY_LENGTH:
+    case Instruction::CMPL_FLOAT:
+    case Instruction::CMPG_FLOAT:
+    case Instruction::CMPL_DOUBLE:
+    case Instruction::CMPG_DOUBLE:
+    case Instruction::CMP_LONG:
+    case Instruction::NEG_INT:
+    case Instruction::NOT_INT:
+    case Instruction::NEG_LONG:
+    case Instruction::NOT_LONG:
+    case Instruction::NEG_FLOAT:
+    case Instruction::NEG_DOUBLE:
+    case Instruction::INT_TO_LONG:
+    case Instruction::INT_TO_FLOAT:
+    case Instruction::INT_TO_DOUBLE:
+    case Instruction::LONG_TO_INT:
+    case Instruction::LONG_TO_FLOAT:
+    case Instruction::LONG_TO_DOUBLE:
+    case Instruction::FLOAT_TO_INT:
+    case Instruction::FLOAT_TO_LONG:
+    case Instruction::FLOAT_TO_DOUBLE:
+    case Instruction::DOUBLE_TO_INT:
+    case Instruction::DOUBLE_TO_LONG:
+    case Instruction::DOUBLE_TO_FLOAT:
+    case Instruction::INT_TO_BYTE:
+    case Instruction::INT_TO_CHAR:
+    case Instruction::INT_TO_SHORT:
+    case Instruction::ADD_INT:
+    case Instruction::SUB_INT:
+    case Instruction::MUL_INT:
+    case Instruction::AND_INT:
+    case Instruction::OR_INT:
+    case Instruction::XOR_INT:
+    case Instruction::SHL_INT:
+    case Instruction::SHR_INT:
+    case Instruction::USHR_INT:
+    case Instruction::ADD_LONG:
+    case Instruction::SUB_LONG:
+    case Instruction::MUL_LONG:
+    case Instruction::AND_LONG:
+    case Instruction::OR_LONG:
+    case Instruction::XOR_LONG:
+    case Instruction::SHL_LONG:
+    case Instruction::SHR_LONG:
+    case Instruction::USHR_LONG:
+    case Instruction::ADD_FLOAT:
+    case Instruction::SUB_FLOAT:
+    case Instruction::MUL_FLOAT:
+    case Instruction::DIV_FLOAT:
+    case Instruction::REM_FLOAT:
+    case Instruction::ADD_DOUBLE:
+    case Instruction::SUB_DOUBLE:
+    case Instruction::MUL_DOUBLE:
+    case Instruction::DIV_DOUBLE:
+    case Instruction::REM_DOUBLE:
+    case Instruction::ADD_INT_2ADDR:
+    case Instruction::SUB_INT_2ADDR:
+    case Instruction::MUL_INT_2ADDR:
+    case Instruction::AND_INT_2ADDR:
+    case Instruction::OR_INT_2ADDR:
+    case Instruction::XOR_INT_2ADDR:
+    case Instruction::SHL_INT_2ADDR:
+    case Instruction::SHR_INT_2ADDR:
+    case Instruction::USHR_INT_2ADDR:
+    case Instruction::ADD_LONG_2ADDR:
+    case Instruction::SUB_LONG_2ADDR:
+    case Instruction::MUL_LONG_2ADDR:
+    case Instruction::AND_LONG_2ADDR:
+    case Instruction::OR_LONG_2ADDR:
+    case Instruction::XOR_LONG_2ADDR:
+    case Instruction::SHL_LONG_2ADDR:
+    case Instruction::SHR_LONG_2ADDR:
+    case Instruction::USHR_LONG_2ADDR:
+    case Instruction::ADD_FLOAT_2ADDR:
+    case Instruction::SUB_FLOAT_2ADDR:
+    case Instruction::MUL_FLOAT_2ADDR:
+    case Instruction::DIV_FLOAT_2ADDR:
+    case Instruction::REM_FLOAT_2ADDR:
+    case Instruction::ADD_DOUBLE_2ADDR:
+    case Instruction::SUB_DOUBLE_2ADDR:
+    case Instruction::MUL_DOUBLE_2ADDR:
+    case Instruction::DIV_DOUBLE_2ADDR:
+    case Instruction::REM_DOUBLE_2ADDR:
+    case Instruction::ADD_INT_LIT16:
+    case Instruction::RSUB_INT:
+    case Instruction::MUL_INT_LIT16:
+    case Instruction::AND_INT_LIT16:
+    case Instruction::OR_INT_LIT16:
+    case Instruction::XOR_INT_LIT16:
+    case Instruction::ADD_INT_LIT8:
+    case Instruction::RSUB_INT_LIT8:
+    case Instruction::MUL_INT_LIT8:
+    case Instruction::AND_INT_LIT8:
+    case Instruction::OR_INT_LIT8:
+    case Instruction::XOR_INT_LIT8:
+    case Instruction::SHL_INT_LIT8:
+    case Instruction::SHR_INT_LIT8:
+    case Instruction::USHR_INT_LIT8:
+      break;
+
+    case Instruction::DIV_INT:
+    case Instruction::REM_INT:
+    case Instruction::DIV_LONG:
+    case Instruction::REM_LONG:
+    case Instruction::DIV_INT_2ADDR:
+    case Instruction::REM_INT_2ADDR:
+    case Instruction::DIV_LONG_2ADDR:
+    case Instruction::REM_LONG_2ADDR:
+      if ((mir->optimization_flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
+        must_keep = true;
+        uses_all_vregs = true;
+      }
+      break;
+
+    case Instruction::DIV_INT_LIT16:
+    case Instruction::REM_INT_LIT16:
+    case Instruction::DIV_INT_LIT8:
+    case Instruction::REM_INT_LIT8:
+      if (mir->dalvikInsn.vC == 0) {  // Explicit division by 0?
+        must_keep = true;
+        uses_all_vregs = true;
+      }
+      break;
+
+    case Instruction::AGET_OBJECT:
+    case Instruction::AGET:
+    case Instruction::AGET_WIDE:
+    case Instruction::AGET_BOOLEAN:
+    case Instruction::AGET_BYTE:
+    case Instruction::AGET_CHAR:
+    case Instruction::AGET_SHORT:
+      if ((mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0 ||
+          (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) == 0) {
+        must_keep = true;
+        uses_all_vregs = true;
+      }
+      break;
+
+    case Instruction::APUT_OBJECT:
+    case Instruction::APUT:
+    case Instruction::APUT_WIDE:
+    case Instruction::APUT_BYTE:
+    case Instruction::APUT_BOOLEAN:
+    case Instruction::APUT_SHORT:
+    case Instruction::APUT_CHAR:
+      must_keep = true;
+      if ((mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0 ||
+          (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) == 0) {
+        uses_all_vregs = true;
+      }
+      break;
+
+    case Instruction::IGET_OBJECT:
+    case Instruction::IGET:
+    case Instruction::IGET_WIDE:
+    case Instruction::IGET_BOOLEAN:
+    case Instruction::IGET_BYTE:
+    case Instruction::IGET_CHAR:
+    case Instruction::IGET_SHORT: {
+      const MirIFieldLoweringInfo& info = mir_graph_->GetIFieldLoweringInfo(mir);
+      if ((mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0 ||
+          !info.IsResolved() || !info.FastGet()) {
+        must_keep = true;
+        uses_all_vregs = true;
+      } else if (info.IsVolatile()) {
+        must_keep = true;
+      }
+      break;
+    }
+
+    case Instruction::IPUT_OBJECT:
+    case Instruction::IPUT:
+    case Instruction::IPUT_WIDE:
+    case Instruction::IPUT_BOOLEAN:
+    case Instruction::IPUT_BYTE:
+    case Instruction::IPUT_CHAR:
+    case Instruction::IPUT_SHORT: {
+      must_keep = true;
+      const MirIFieldLoweringInfo& info = mir_graph_->GetIFieldLoweringInfo(mir);
+      if ((mir->optimization_flags & MIR_IGNORE_NULL_CHECK) == 0 ||
+          !info.IsResolved() || !info.FastPut()) {
+        uses_all_vregs = true;
+      }
+      break;
+    }
+
+    case Instruction::SGET_OBJECT:
+    case Instruction::SGET:
+    case Instruction::SGET_WIDE:
+    case Instruction::SGET_BOOLEAN:
+    case Instruction::SGET_BYTE:
+    case Instruction::SGET_CHAR:
+    case Instruction::SGET_SHORT: {
+      const MirSFieldLoweringInfo& info = mir_graph_->GetSFieldLoweringInfo(mir);
+      if ((mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) == 0 ||
+          !info.IsResolved() || !info.FastGet()) {
+        must_keep = true;
+        uses_all_vregs = true;
+      } else if (info.IsVolatile()) {
+        must_keep = true;
+      }
+      break;
+    }
+
+    case Instruction::SPUT_OBJECT:
+    case Instruction::SPUT:
+    case Instruction::SPUT_WIDE:
+    case Instruction::SPUT_BOOLEAN:
+    case Instruction::SPUT_BYTE:
+    case Instruction::SPUT_CHAR:
+    case Instruction::SPUT_SHORT: {
+      must_keep = true;
+      const MirSFieldLoweringInfo& info = mir_graph_->GetSFieldLoweringInfo(mir);
+      if ((mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) == 0 ||
+          !info.IsResolved() || !info.FastPut()) {
+        uses_all_vregs = true;
+      }
+      break;
+    }
+
+    default:
+      LOG(FATAL) << "Unexpected opcode: " << opcode;
+      UNREACHABLE();
+      break;
+  }
+
+  if (mir->ssa_rep->num_defs != 0) {
+    DCHECK(mir->ssa_rep->num_defs == 1 || mir->ssa_rep->num_defs == 2);
+    bool wide = (mir->ssa_rep->num_defs == 2);
+    int s_reg = mir->ssa_rep->defs[0];
+    int v_reg = mir_graph_->SRegToVReg(s_reg);
+    uint16_t new_value = wide ? lvn_->GetSregValueWide(s_reg) : lvn_->GetSregValue(s_reg);
+    DCHECK_NE(new_value, kNoValue);
+
+    vreg_chains_.UpdateInitialVRegValue(v_reg, wide, lvn_);
+    vreg_chains_.AddMIRWithDef(mir, v_reg, wide, new_value);
+    if (is_move) {
+      // Allow renaming all uses of dest vreg to src vreg.
+      vreg_chains_.LastMIRData()->is_move = true;
+    }
+  } else {
+    vreg_chains_.AddMIRWithoutDef(mir);
+    DCHECK(!is_move) << opcode;
+  }
+
+  if (must_keep) {
+    MIRData* last_data = vreg_chains_.LastMIRData();
+    last_data->must_keep = true;
+    if (uses_all_vregs) {
+      last_data->uses_all_vregs = true;
+      no_uses_all_since_ = vreg_chains_.NumMIRs();
+    }
+  } else {
+    DCHECK_NE(mir->ssa_rep->num_defs, 0) << opcode;
+    DCHECK(!uses_all_vregs) << opcode;
+  }
+  return true;
+}
+
+}  // namespace art
diff --git a/compiler/dex/gvn_dead_code_elimination.h b/compiler/dex/gvn_dead_code_elimination.h
new file mode 100644
index 0000000..ea28039
--- /dev/null
+++ b/compiler/dex/gvn_dead_code_elimination.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_GVN_DEAD_CODE_ELIMINATION_H_
+#define ART_COMPILER_DEX_GVN_DEAD_CODE_ELIMINATION_H_
+
+#include "global_value_numbering.h"
+#include "utils/arena_object.h"
+#include "utils/scoped_arena_containers.h"
+
+namespace art {
+
+class ArenaBitVector;
+class BasicBlock;
+class LocalValueNumbering;
+class MIR;
+class MIRGraph;
+
+/**
+ * @class DeadCodeElimination
+ * @details Eliminate dead code based on the results of global value numbering.
+ * Also get rid of MOVE insns when we can use the source instead of destination
+ * without affecting the vreg values at safepoints; this is useful in methods
+ * with a large number of vregs that frequently move values to and from low vregs
+ * to accommodate insns that can work only with the low 16 or 256 vregs.
+ */
+class GvnDeadCodeElimination : public DeletableArenaObject<kArenaAllocMisc> {
+ public:
+  GvnDeadCodeElimination(const GlobalValueNumbering* gvn, ScopedArenaAllocator* alloc);
+
+  // Apply the DCE to a basic block.
+  void Apply(BasicBlock* bb);
+
+ private:
+  static constexpr uint16_t kNoValue = GlobalValueNumbering::kNoValue;
+  static constexpr uint16_t kNPos = 0xffffu;
+  static constexpr size_t kMaxNumTopChangesToKill = 2;
+
+  struct VRegValue {
+    VRegValue() : value(kNoValue), change(kNPos) { }
+
+    // Value name as reported by GVN, kNoValue if not available.
+    uint16_t value;
+    // Index of the change in mir_data_ that defined the value, kNPos if initial value for the BB.
+    uint16_t change;
+  };
+
+  struct MIRData {
+    explicit MIRData(MIR* m)
+        : mir(m), uses_all_vregs(false), must_keep(false), is_move(false), is_move_src(false),
+          has_def(false), wide_def(false),
+          low_def_over_high_word(false), high_def_over_low_word(false), vreg_def(0u),
+          prev_value(), prev_value_high() {
+    }
+
+    uint16_t PrevChange(int v_reg) const;
+    void SetPrevChange(int v_reg, uint16_t change);
+    void RemovePrevChange(int v_reg, MIRData* prev_data);
+
+    MIR* mir;
+    bool uses_all_vregs : 1;  // If mir uses all vregs, uses in mir->ssa_rep are irrelevant.
+    bool must_keep : 1;
+    bool is_move : 1;
+    bool is_move_src : 1;
+    bool has_def : 1;
+    bool wide_def : 1;
+    bool low_def_over_high_word : 1;
+    bool high_def_over_low_word : 1;
+    uint16_t vreg_def;
+    VRegValue prev_value;
+    VRegValue prev_value_high;   // For wide defs.
+  };
+
+  class VRegChains {
+   public:
+    VRegChains(uint32_t num_vregs, ScopedArenaAllocator* alloc);
+
+    void Reset();
+
+    void AddMIRWithDef(MIR* mir, int v_reg, bool wide, uint16_t new_value);
+    void AddMIRWithoutDef(MIR* mir);
+    void RemoveLastMIRData();
+    void RemoveTrailingNops();
+
+    size_t NumMIRs() const;
+    MIRData* GetMIRData(size_t pos);
+    MIRData* LastMIRData();
+
+    uint32_t NumVRegs() const;
+    void InsertInitialValueHigh(int v_reg, uint16_t value);
+    void UpdateInitialVRegValue(int v_reg, bool wide, const LocalValueNumbering* lvn);
+    uint16_t LastChange(int v_reg);
+    uint16_t CurrentValue(int v_reg);
+
+    uint16_t FindKillHead(int v_reg, uint16_t cutoff);
+    uint16_t FindFirstChangeAfter(int v_reg, uint16_t change) const;
+    void ReplaceChange(uint16_t old_change, uint16_t new_change);
+    void RemoveChange(uint16_t change);
+    bool IsTopChange(uint16_t change) const;
+    bool IsSRegUsed(uint16_t first_change, uint16_t last_change, int s_reg) const;
+    void RenameSRegUses(uint16_t first_change, uint16_t last_change,
+                        int old_s_reg, int new_s_reg, bool wide);
+    void RenameVRegUses(uint16_t first_change, uint16_t last_change,
+                        int old_s_reg, int old_v_reg, int new_s_reg, int new_v_reg);
+
+   private:
+    const uint32_t num_vregs_;
+    VRegValue* const vreg_data_;
+    ScopedArenaVector<MIRData> mir_data_;
+  };
+
+  void RecordPass();
+  void BackwardPass();
+
+  void KillMIR(MIRData* data);
+  static void KillMIR(MIR* mir);
+  static void ChangeBinOp2AddrToPlainBinOp(MIR* mir);
+  MIR* CreatePhi(int s_reg, bool fp);
+  MIR* RenameSRegDefOrCreatePhi(uint16_t def_change, uint16_t last_change, MIR* mir_to_kill);
+
+  // Update state variables going backwards through a MIR.
+  void BackwardPassProcessLastMIR();
+
+  uint16_t FindChangesToKill(uint16_t first_change, uint16_t last_change);
+  void BackwardPassTryToKillRevertVRegs();
+  bool BackwardPassTryToKillLastMIR();
+
+  void RecordPassKillMoveByRenamingSrcDef(uint16_t src_change, uint16_t move_change);
+  void RecordPassTryToKillOverwrittenMoveOrMoveSrc(uint16_t check_change);
+  void RecordPassTryToKillOverwrittenMoveOrMoveSrc();
+  void RecordPassTryToKillLastMIR();
+
+  bool RecordMIR(MIR* mir);
+
+  const GlobalValueNumbering* const gvn_;
+  MIRGraph* const mir_graph_;
+
+  VRegChains vreg_chains_;
+  BasicBlock* bb_;
+  const LocalValueNumbering* lvn_;
+  size_t no_uses_all_since_;  // The change index after the last change with uses_all_vregs set.
+
+  // Data used when processing MIRs in reverse order.
+  ArenaBitVector* unused_vregs_;              // vregs that are not needed later.
+  ArenaBitVector* vregs_to_kill_;             // vregs that revert to a previous value.
+  uint16_t* kill_heads_;  // For each vreg in vregs_to_kill_, the first change to kill.
+  ScopedArenaVector<uint16_t> changes_to_kill_;
+  ArenaBitVector* dependent_vregs_;
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_DEX_GVN_DEAD_CODE_ELIMINATION_H_
diff --git a/compiler/dex/gvn_dead_code_elimination_test.cc b/compiler/dex/gvn_dead_code_elimination_test.cc
new file mode 100644
index 0000000..954e9f1
--- /dev/null
+++ b/compiler/dex/gvn_dead_code_elimination_test.cc
@@ -0,0 +1,1800 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dataflow_iterator-inl.h"
+#include "dex/mir_field_info.h"
+#include "global_value_numbering.h"
+#include "gvn_dead_code_elimination.h"
+#include "local_value_numbering.h"
+#include "gtest/gtest.h"
+
+namespace art {
+
+class GvnDeadCodeEliminationTest : public testing::Test {
+ protected:
+  static constexpr uint16_t kNoValue = GlobalValueNumbering::kNoValue;
+
+  struct IFieldDef {
+    uint16_t field_idx;
+    uintptr_t declaring_dex_file;
+    uint16_t declaring_field_idx;
+    bool is_volatile;
+    DexMemAccessType type;
+  };
+
+  struct SFieldDef {
+    uint16_t field_idx;
+    uintptr_t declaring_dex_file;
+    uint16_t declaring_field_idx;
+    bool is_volatile;
+    DexMemAccessType type;
+  };
+
+  struct BBDef {
+    static constexpr size_t kMaxSuccessors = 4;
+    static constexpr size_t kMaxPredecessors = 4;
+
+    BBType type;
+    size_t num_successors;
+    BasicBlockId successors[kMaxPredecessors];
+    size_t num_predecessors;
+    BasicBlockId predecessors[kMaxPredecessors];
+  };
+
+  struct MIRDef {
+    static constexpr size_t kMaxSsaDefs = 2;
+    static constexpr size_t kMaxSsaUses = 4;
+
+    BasicBlockId bbid;
+    Instruction::Code opcode;
+    int64_t value;
+    uint32_t field_info;
+    size_t num_uses;
+    int32_t uses[kMaxSsaUses];
+    size_t num_defs;
+    int32_t defs[kMaxSsaDefs];
+  };
+
+#define DEF_SUCC0() \
+    0u, { }
+#define DEF_SUCC1(s1) \
+    1u, { s1 }
+#define DEF_SUCC2(s1, s2) \
+    2u, { s1, s2 }
+#define DEF_SUCC3(s1, s2, s3) \
+    3u, { s1, s2, s3 }
+#define DEF_SUCC4(s1, s2, s3, s4) \
+    4u, { s1, s2, s3, s4 }
+#define DEF_PRED0() \
+    0u, { }
+#define DEF_PRED1(p1) \
+    1u, { p1 }
+#define DEF_PRED2(p1, p2) \
+    2u, { p1, p2 }
+#define DEF_PRED3(p1, p2, p3) \
+    3u, { p1, p2, p3 }
+#define DEF_PRED4(p1, p2, p3, p4) \
+    4u, { p1, p2, p3, p4 }
+#define DEF_BB(type, succ, pred) \
+    { type, succ, pred }
+
+#define DEF_CONST(bb, opcode, reg, value) \
+    { bb, opcode, value, 0u, 0, { }, 1, { reg } }
+#define DEF_CONST_WIDE(bb, opcode, reg, value) \
+    { bb, opcode, value, 0u, 0, { }, 2, { reg, reg + 1 } }
+#define DEF_CONST_STRING(bb, opcode, reg, index) \
+    { bb, opcode, index, 0u, 0, { }, 1, { reg } }
+#define DEF_IGET(bb, opcode, reg, obj, field_info) \
+    { bb, opcode, 0u, field_info, 1, { obj }, 1, { reg } }
+#define DEF_IGET_WIDE(bb, opcode, reg, obj, field_info) \
+    { bb, opcode, 0u, field_info, 1, { obj }, 2, { reg, reg + 1 } }
+#define DEF_IPUT(bb, opcode, reg, obj, field_info) \
+    { bb, opcode, 0u, field_info, 2, { reg, obj }, 0, { } }
+#define DEF_IPUT_WIDE(bb, opcode, reg, obj, field_info) \
+    { bb, opcode, 0u, field_info, 3, { reg, reg + 1, obj }, 0, { } }
+#define DEF_SGET(bb, opcode, reg, field_info) \
+    { bb, opcode, 0u, field_info, 0, { }, 1, { reg } }
+#define DEF_SGET_WIDE(bb, opcode, reg, field_info) \
+    { bb, opcode, 0u, field_info, 0, { }, 2, { reg, reg + 1 } }
+#define DEF_SPUT(bb, opcode, reg, field_info) \
+    { bb, opcode, 0u, field_info, 1, { reg }, 0, { } }
+#define DEF_SPUT_WIDE(bb, opcode, reg, field_info) \
+    { bb, opcode, 0u, field_info, 2, { reg, reg + 1 }, 0, { } }
+#define DEF_AGET(bb, opcode, reg, obj, idx) \
+    { bb, opcode, 0u, 0u, 2, { obj, idx }, 1, { reg } }
+#define DEF_AGET_WIDE(bb, opcode, reg, obj, idx) \
+    { bb, opcode, 0u, 0u, 2, { obj, idx }, 2, { reg, reg + 1 } }
+#define DEF_APUT(bb, opcode, reg, obj, idx) \
+    { bb, opcode, 0u, 0u, 3, { reg, obj, idx }, 0, { } }
+#define DEF_APUT_WIDE(bb, opcode, reg, obj, idx) \
+    { bb, opcode, 0u, 0u, 4, { reg, reg + 1, obj, idx }, 0, { } }
+#define DEF_INVOKE1(bb, opcode, reg) \
+    { bb, opcode, 0u, 0u, 1, { reg }, 0, { } }
+#define DEF_UNIQUE_REF(bb, opcode, reg) \
+    { bb, opcode, 0u, 0u, 0, { }, 1, { reg } }  // CONST_CLASS, CONST_STRING, NEW_ARRAY, ...
+#define DEF_IFZ(bb, opcode, reg) \
+    { bb, opcode, 0u, 0u, 1, { reg }, 0, { } }
+#define DEF_MOVE(bb, opcode, reg, src) \
+    { bb, opcode, 0u, 0u, 1, { src }, 1, { reg } }
+#define DEF_MOVE_WIDE(bb, opcode, reg, src) \
+    { bb, opcode, 0u, 0u, 2, { src, src + 1 }, 2, { reg, reg + 1 } }
+#define DEF_PHI2(bb, reg, src1, src2) \
+    { bb, static_cast<Instruction::Code>(kMirOpPhi), 0, 0u, 2u, { src1, src2 }, 1, { reg } }
+#define DEF_UNOP(bb, opcode, result, src1) \
+    { bb, opcode, 0u, 0u, 1, { src1 }, 1, { result } }
+#define DEF_BINOP(bb, opcode, result, src1, src2) \
+    { bb, opcode, 0u, 0u, 2, { src1, src2 }, 1, { result } }
+
+  void DoPrepareIFields(const IFieldDef* defs, size_t count) {
+    cu_.mir_graph->ifield_lowering_infos_.clear();
+    cu_.mir_graph->ifield_lowering_infos_.reserve(count);
+    for (size_t i = 0u; i != count; ++i) {
+      const IFieldDef* def = &defs[i];
+      MirIFieldLoweringInfo field_info(def->field_idx, def->type);
+      if (def->declaring_dex_file != 0u) {
+        field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
+        field_info.declaring_field_idx_ = def->declaring_field_idx;
+        field_info.flags_ =
+            MirIFieldLoweringInfo::kFlagFastGet | MirIFieldLoweringInfo::kFlagFastPut |
+            (field_info.flags_ & ~(def->is_volatile ? 0u : MirIFieldLoweringInfo::kFlagIsVolatile));
+      }
+      cu_.mir_graph->ifield_lowering_infos_.push_back(field_info);
+    }
+  }
+
+  template <size_t count>
+  void PrepareIFields(const IFieldDef (&defs)[count]) {
+    DoPrepareIFields(defs, count);
+  }
+
+  void DoPrepareSFields(const SFieldDef* defs, size_t count) {
+    cu_.mir_graph->sfield_lowering_infos_.clear();
+    cu_.mir_graph->sfield_lowering_infos_.reserve(count);
+    for (size_t i = 0u; i != count; ++i) {
+      const SFieldDef* def = &defs[i];
+      MirSFieldLoweringInfo field_info(def->field_idx, def->type);
+      // Mark even unresolved fields as initialized.
+      field_info.flags_ |= MirSFieldLoweringInfo::kFlagClassIsInitialized;
+      // NOTE: MirSFieldLoweringInfo::kFlagClassIsInDexCache isn't used by GVN.
+      if (def->declaring_dex_file != 0u) {
+        field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
+        field_info.declaring_field_idx_ = def->declaring_field_idx;
+        field_info.flags_ =
+            MirSFieldLoweringInfo::kFlagFastGet | MirSFieldLoweringInfo::kFlagFastPut |
+            (field_info.flags_ & ~(def->is_volatile ? 0u : MirSFieldLoweringInfo::kFlagIsVolatile));
+      }
+      cu_.mir_graph->sfield_lowering_infos_.push_back(field_info);
+    }
+  }
+
+  template <size_t count>
+  void PrepareSFields(const SFieldDef (&defs)[count]) {
+    DoPrepareSFields(defs, count);
+  }
+
+  void DoPrepareBasicBlocks(const BBDef* defs, size_t count) {
+    cu_.mir_graph->block_id_map_.clear();
+    cu_.mir_graph->block_list_.clear();
+    ASSERT_LT(3u, count);  // null, entry, exit and at least one bytecode block.
+    ASSERT_EQ(kNullBlock, defs[0].type);
+    ASSERT_EQ(kEntryBlock, defs[1].type);
+    ASSERT_EQ(kExitBlock, defs[2].type);
+    for (size_t i = 0u; i != count; ++i) {
+      const BBDef* def = &defs[i];
+      BasicBlock* bb = cu_.mir_graph->CreateNewBB(def->type);
+      if (def->num_successors <= 2) {
+        bb->successor_block_list_type = kNotUsed;
+        bb->fall_through = (def->num_successors >= 1) ? def->successors[0] : 0u;
+        bb->taken = (def->num_successors >= 2) ? def->successors[1] : 0u;
+      } else {
+        bb->successor_block_list_type = kPackedSwitch;
+        bb->fall_through = 0u;
+        bb->taken = 0u;
+        bb->successor_blocks.reserve(def->num_successors);
+        for (size_t j = 0u; j != def->num_successors; ++j) {
+          SuccessorBlockInfo* successor_block_info =
+              static_cast<SuccessorBlockInfo*>(cu_.arena.Alloc(sizeof(SuccessorBlockInfo),
+                                                               kArenaAllocSuccessor));
+          successor_block_info->block = j;
+          successor_block_info->key = 0u;  // Not used by class init check elimination.
+          bb->successor_blocks.push_back(successor_block_info);
+        }
+      }
+      bb->predecessors.assign(def->predecessors, def->predecessors + def->num_predecessors);
+      if (def->type == kDalvikByteCode || def->type == kEntryBlock || def->type == kExitBlock) {
+        bb->data_flow_info = static_cast<BasicBlockDataFlow*>(
+            cu_.arena.Alloc(sizeof(BasicBlockDataFlow), kArenaAllocDFInfo));
+        bb->data_flow_info->live_in_v = live_in_v_;
+        bb->data_flow_info->vreg_to_ssa_map_exit = nullptr;
+      }
+    }
+    ASSERT_EQ(count, cu_.mir_graph->block_list_.size());
+    cu_.mir_graph->entry_block_ = cu_.mir_graph->block_list_[1];
+    ASSERT_EQ(kEntryBlock, cu_.mir_graph->entry_block_->block_type);
+    cu_.mir_graph->exit_block_ = cu_.mir_graph->block_list_[2];
+    ASSERT_EQ(kExitBlock, cu_.mir_graph->exit_block_->block_type);
+  }
+
+  template <size_t count>
+  void PrepareBasicBlocks(const BBDef (&defs)[count]) {
+    DoPrepareBasicBlocks(defs, count);
+  }
+
+  int SRegToVReg(int32_t s_reg, bool wide) {
+    int v_reg = cu_.mir_graph->SRegToVReg(s_reg);
+    CHECK_LT(static_cast<size_t>(v_reg), num_vregs_);
+    if (wide) {
+      CHECK_LT(static_cast<size_t>(v_reg + 1), num_vregs_);
+    }
+    return v_reg;
+  }
+
+  int SRegToVReg(int32_t* uses, size_t* use, bool wide) {
+    int v_reg = SRegToVReg(uses[*use], wide);
+    if (wide) {
+      CHECK_EQ(uses[*use] + 1, uses[*use + 1]);
+      *use += 2u;
+    } else {
+      *use += 1u;
+    }
+    return v_reg;
+  }
+
+  void DoPrepareMIRs(const MIRDef* defs, size_t count) {
+    mir_count_ = count;
+    mirs_ = reinterpret_cast<MIR*>(cu_.arena.Alloc(sizeof(MIR) * count, kArenaAllocMIR));
+    ssa_reps_.resize(count);
+    for (size_t i = 0u; i != count; ++i) {
+      const MIRDef* def = &defs[i];
+      MIR* mir = &mirs_[i];
+      ASSERT_LT(def->bbid, cu_.mir_graph->block_list_.size());
+      BasicBlock* bb = cu_.mir_graph->block_list_[def->bbid];
+      bb->AppendMIR(mir);
+      mir->dalvikInsn.opcode = def->opcode;
+      mir->dalvikInsn.vB = static_cast<int32_t>(def->value);
+      mir->dalvikInsn.vB_wide = def->value;
+      if (IsInstructionIGetOrIPut(def->opcode)) {
+        ASSERT_LT(def->field_info, cu_.mir_graph->ifield_lowering_infos_.size());
+        mir->meta.ifield_lowering_info = def->field_info;
+        ASSERT_EQ(cu_.mir_graph->ifield_lowering_infos_[def->field_info].MemAccessType(),
+                  IGetOrIPutMemAccessType(def->opcode));
+      } else if (IsInstructionSGetOrSPut(def->opcode)) {
+        ASSERT_LT(def->field_info, cu_.mir_graph->sfield_lowering_infos_.size());
+        mir->meta.sfield_lowering_info = def->field_info;
+        ASSERT_EQ(cu_.mir_graph->sfield_lowering_infos_[def->field_info].MemAccessType(),
+                  SGetOrSPutMemAccessType(def->opcode));
+      } else if (def->opcode == static_cast<Instruction::Code>(kMirOpPhi)) {
+        mir->meta.phi_incoming =
+            allocator_->AllocArray<BasicBlockId>(def->num_uses, kArenaAllocDFInfo);
+        ASSERT_EQ(def->num_uses, bb->predecessors.size());
+        std::copy(bb->predecessors.begin(), bb->predecessors.end(), mir->meta.phi_incoming);
+      }
+      mir->ssa_rep = &ssa_reps_[i];
+      cu_.mir_graph->AllocateSSAUseData(mir, def->num_uses);
+      std::copy_n(def->uses, def->num_uses, mir->ssa_rep->uses);
+      // Keep mir->ssa_rep->fp_use[.] zero-initialized (false). Not used by DCE, only copied.
+      cu_.mir_graph->AllocateSSADefData(mir, def->num_defs);
+      std::copy_n(def->defs, def->num_defs, mir->ssa_rep->defs);
+      // Keep mir->ssa_rep->fp_def[.] zero-initialized (false). Not used by DCE, only copied.
+      mir->dalvikInsn.opcode = def->opcode;
+      mir->offset = i;  // LVN uses offset only for debug output
+      mir->optimization_flags = 0u;
+      uint64_t df_attrs = MIRGraph::GetDataFlowAttributes(mir);
+      if ((df_attrs & DF_DA) != 0) {
+        CHECK_NE(def->num_defs, 0u);
+        mir->dalvikInsn.vA = SRegToVReg(def->defs[0], (df_attrs & DF_A_WIDE) != 0);
+        bb->data_flow_info->vreg_to_ssa_map_exit[mir->dalvikInsn.vA] = def->defs[0];
+        if ((df_attrs & DF_A_WIDE) != 0) {
+          CHECK_EQ(def->defs[0] + 1, def->defs[1]);
+          bb->data_flow_info->vreg_to_ssa_map_exit[mir->dalvikInsn.vA + 1u] = def->defs[0] + 1;
+        }
+      }
+      if ((df_attrs & (DF_UA | DF_UB | DF_UC)) != 0) {
+        size_t use = 0;
+        if ((df_attrs & DF_UA) != 0) {
+          mir->dalvikInsn.vA = SRegToVReg(mir->ssa_rep->uses, &use, (df_attrs & DF_A_WIDE) != 0);
+        }
+        if ((df_attrs & DF_UB) != 0) {
+          mir->dalvikInsn.vB = SRegToVReg(mir->ssa_rep->uses, &use, (df_attrs & DF_B_WIDE) != 0);
+        }
+        if ((df_attrs & DF_UC) != 0) {
+          mir->dalvikInsn.vC = SRegToVReg(mir->ssa_rep->uses, &use, (df_attrs & DF_C_WIDE) != 0);
+        }
+        DCHECK_EQ(def->num_uses, use);
+      }
+    }
+    DexFile::CodeItem* code_item = static_cast<DexFile::CodeItem*>(
+        cu_.arena.Alloc(sizeof(DexFile::CodeItem), kArenaAllocMisc));
+    code_item->insns_size_in_code_units_ = 2u * count;
+    code_item->registers_size_ = kMaxVRegs;
+    cu_.mir_graph->current_code_item_ = code_item;
+  }
+
+  template <size_t count>
+  void PrepareMIRs(const MIRDef (&defs)[count]) {
+    DoPrepareMIRs(defs, count);
+  }
+
+  template <size_t count>
+  void PrepareSRegToVRegMap(const int (&map)[count]) {
+    cu_.mir_graph->ssa_base_vregs_.assign(map, map + count);
+    num_vregs_ = *std::max_element(map, map + count) + 1u;
+    AllNodesIterator iterator(cu_.mir_graph.get());
+    for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
+      if (bb->data_flow_info != nullptr) {
+        bb->data_flow_info->vreg_to_ssa_map_exit = static_cast<int32_t*>(
+            cu_.arena.Alloc(sizeof(int32_t) * num_vregs_, kArenaAllocDFInfo));
+        std::fill_n(bb->data_flow_info->vreg_to_ssa_map_exit, num_vregs_, INVALID_SREG);
+      }
+    }
+  }
+
+  void PerformGVN() {
+    cu_.mir_graph->SSATransformationStart();
+    cu_.mir_graph->ComputeDFSOrders();
+    cu_.mir_graph->ComputeDominators();
+    cu_.mir_graph->ComputeTopologicalSortOrder();
+    cu_.mir_graph->SSATransformationEnd();
+    cu_.mir_graph->temp_.gvn.ifield_ids =  GlobalValueNumbering::PrepareGvnFieldIds(
+        allocator_.get(), cu_.mir_graph->ifield_lowering_infos_);
+    cu_.mir_graph->temp_.gvn.sfield_ids =  GlobalValueNumbering::PrepareGvnFieldIds(
+        allocator_.get(), cu_.mir_graph->sfield_lowering_infos_);
+    ASSERT_TRUE(gvn_ == nullptr);
+    gvn_.reset(new (allocator_.get()) GlobalValueNumbering(&cu_, allocator_.get(),
+                                                           GlobalValueNumbering::kModeGvn));
+    value_names_.resize(mir_count_, 0xffffu);
+    LoopRepeatingTopologicalSortIterator iterator(cu_.mir_graph.get());
+    bool change = false;
+    for (BasicBlock* bb = iterator.Next(change); bb != nullptr; bb = iterator.Next(change)) {
+      LocalValueNumbering* lvn = gvn_->PrepareBasicBlock(bb);
+      if (lvn != nullptr) {
+        for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+          value_names_[mir - mirs_] = lvn->GetValueNumber(mir);
+        }
+      }
+      change = (lvn != nullptr) && gvn_->FinishBasicBlock(bb);
+      ASSERT_TRUE(gvn_->Good());
+    }
+  }
+
+  void PerformGVNCodeModifications() {
+    ASSERT_TRUE(gvn_ != nullptr);
+    ASSERT_TRUE(gvn_->Good());
+    gvn_->StartPostProcessing();
+    TopologicalSortIterator iterator(cu_.mir_graph.get());
+    for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
+      LocalValueNumbering* lvn = gvn_->PrepareBasicBlock(bb);
+      if (lvn != nullptr) {
+        for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+          uint16_t value_name = lvn->GetValueNumber(mir);
+          ASSERT_EQ(value_name, value_names_[mir - mirs_]);
+        }
+      }
+      bool change = (lvn != nullptr) && gvn_->FinishBasicBlock(bb);
+      ASSERT_FALSE(change);
+      ASSERT_TRUE(gvn_->Good());
+    }
+  }
+
+  void FillVregToSsaRegExitMaps() {
+    // Fill in vreg_to_ssa_map_exit for each BB.
+    PreOrderDfsIterator iterator(cu_.mir_graph.get());
+    for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
+      if (bb->block_type == kDalvikByteCode) {
+        CHECK(!bb->predecessors.empty());
+        BasicBlock* pred_bb = cu_.mir_graph->GetBasicBlock(bb->predecessors[0]);
+        for (size_t v_reg = 0; v_reg != num_vregs_; ++v_reg) {
+          if (bb->data_flow_info->vreg_to_ssa_map_exit[v_reg] == INVALID_SREG) {
+            bb->data_flow_info->vreg_to_ssa_map_exit[v_reg] =
+                pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg];
+          }
+        }
+      }
+    }
+  }
+
+  void PerformDCE() {
+    FillVregToSsaRegExitMaps();
+    cu_.mir_graph->GetNumOfCodeAndTempVRs();
+    dce_.reset(new (allocator_.get()) GvnDeadCodeElimination(gvn_.get(), allocator_.get()));
+    PreOrderDfsIterator iterator(cu_.mir_graph.get());
+    for (BasicBlock* bb = iterator.Next(); bb != nullptr; bb = iterator.Next()) {
+      if (bb->block_type == kDalvikByteCode) {
+        dce_->Apply(bb);
+      }
+    }
+  }
+
+  void PerformGVN_DCE() {
+    PerformGVN();
+    PerformGVNCodeModifications();  // Eliminate null/range checks.
+    PerformDCE();
+  }
+
+  template <size_t count>
+  void ExpectValueNamesNE(const size_t (&indexes)[count]) {
+    for (size_t i1 = 0; i1 != count; ++i1) {
+      size_t idx1 = indexes[i1];
+      for (size_t i2 = i1 + 1; i2 != count; ++i2) {
+        size_t idx2 = indexes[i2];
+        EXPECT_NE(value_names_[idx1], value_names_[idx2]) << idx1 << " " << idx2;
+      }
+    }
+  }
+
+  template <size_t count>
+  void ExpectNoNullCheck(const size_t (&indexes)[count]) {
+    for (size_t i = 0; i != count; ++i) {
+      size_t idx = indexes[i];
+      EXPECT_EQ(MIR_IGNORE_NULL_CHECK, mirs_[idx].optimization_flags & MIR_IGNORE_NULL_CHECK)
+          << idx;
+    }
+    size_t num_no_null_ck = 0u;
+    for (size_t i = 0; i != mir_count_; ++i) {
+      if ((mirs_[i].optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) {
+        ++num_no_null_ck;
+      }
+    }
+    EXPECT_EQ(count, num_no_null_ck);
+  }
+
+  GvnDeadCodeEliminationTest()
+      : pool_(),
+        cu_(&pool_, kRuntimeISA, nullptr, nullptr),
+        num_vregs_(0u),
+        mir_count_(0u),
+        mirs_(nullptr),
+        ssa_reps_(),
+        allocator_(),
+        gvn_(),
+        dce_(),
+        value_names_(),
+        live_in_v_(new (&cu_.arena) ArenaBitVector(&cu_.arena, kMaxSsaRegs, false, kBitMapMisc)) {
+    cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
+    cu_.access_flags = kAccStatic;  // Don't let "this" interfere with this test.
+    allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
+    // By default, the zero-initialized reg_location_[.] with ref == false tells LVN that
+    // 0 constants are integral, not references. Nothing else is used by LVN/GVN.
+    cu_.mir_graph->reg_location_ = static_cast<RegLocation*>(cu_.arena.Alloc(
+        kMaxSsaRegs * sizeof(cu_.mir_graph->reg_location_[0]), kArenaAllocRegAlloc));
+    // Bind all possible sregs to live vregs for test purposes.
+    live_in_v_->SetInitialBits(kMaxSsaRegs);
+    cu_.mir_graph->ssa_base_vregs_.reserve(kMaxSsaRegs);
+    cu_.mir_graph->ssa_subscripts_.reserve(kMaxSsaRegs);
+    for (unsigned int i = 0; i < kMaxSsaRegs; i++) {
+      cu_.mir_graph->ssa_base_vregs_.push_back(i);
+      cu_.mir_graph->ssa_subscripts_.push_back(0);
+    }
+    // Set shorty for a void-returning method without arguments.
+    cu_.shorty = "V";
+  }
+
+  static constexpr size_t kMaxSsaRegs = 16384u;
+  static constexpr size_t kMaxVRegs = 256u;
+
+  ArenaPool pool_;
+  CompilationUnit cu_;
+  size_t num_vregs_;
+  size_t mir_count_;
+  MIR* mirs_;
+  std::vector<SSARepresentation> ssa_reps_;
+  std::unique_ptr<ScopedArenaAllocator> allocator_;
+  std::unique_ptr<GlobalValueNumbering> gvn_;
+  std::unique_ptr<GvnDeadCodeElimination> dce_;
+  std::vector<uint16_t> value_names_;
+  ArenaBitVector* live_in_v_;
+};
+
+constexpr uint16_t GvnDeadCodeEliminationTest::kNoValue;
+
+class GvnDeadCodeEliminationTestSimple : public GvnDeadCodeEliminationTest {
+ public:
+  GvnDeadCodeEliminationTestSimple();
+
+ private:
+  static const BBDef kSimpleBbs[];
+};
+
+const GvnDeadCodeEliminationTest::BBDef GvnDeadCodeEliminationTestSimple::kSimpleBbs[] = {
+    DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+    DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+    DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(3)),
+    DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(1)),
+};
+
+GvnDeadCodeEliminationTestSimple::GvnDeadCodeEliminationTestSimple()
+    : GvnDeadCodeEliminationTest() {
+  PrepareBasicBlocks(kSimpleBbs);
+}
+
+class GvnDeadCodeEliminationTestDiamond : public GvnDeadCodeEliminationTest {
+ public:
+  GvnDeadCodeEliminationTestDiamond();
+
+ private:
+  static const BBDef kDiamondBbs[];
+};
+
+const GvnDeadCodeEliminationTest::BBDef GvnDeadCodeEliminationTestDiamond::kDiamondBbs[] = {
+    DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+    DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+    DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(6)),
+    DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 5), DEF_PRED1(1)),  // Block #3, top of the diamond.
+    DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),     // Block #4, left side.
+    DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),     // Block #5, right side.
+    DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(4, 5)),  // Block #6, bottom.
+};
+
+GvnDeadCodeEliminationTestDiamond::GvnDeadCodeEliminationTestDiamond()
+    : GvnDeadCodeEliminationTest() {
+  PrepareBasicBlocks(kDiamondBbs);
+}
+
+class GvnDeadCodeEliminationTestLoop : public GvnDeadCodeEliminationTest {
+ public:
+  GvnDeadCodeEliminationTestLoop();
+
+ private:
+  static const BBDef kLoopBbs[];
+};
+
+const GvnDeadCodeEliminationTest::BBDef GvnDeadCodeEliminationTestLoop::kLoopBbs[] = {
+    DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+    DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+    DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
+    DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
+    DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 4), DEF_PRED2(3, 4)),  // "taken" loops to self.
+    DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(4)),
+};
+
+GvnDeadCodeEliminationTestLoop::GvnDeadCodeEliminationTestLoop()
+    : GvnDeadCodeEliminationTest() {
+  PrepareBasicBlocks(kLoopBbs);
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, Rename1) {
+  static const IFieldDef ifields[] = {
+      { 0u, 1u, 0u, false, kDexMemAccessWord },
+      { 1u, 1u, 1u, false, kDexMemAccessWord },
+  };
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+      DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
+      DEF_MOVE(3, Instruction::MOVE_OBJECT, 2u, 0u),
+      DEF_IGET(3, Instruction::IGET, 3u, 2u, 1u),
+  };
+
+  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 2 };
+  PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformGVN_DCE();
+
+  ASSERT_EQ(arraysize(mirs), value_names_.size());
+  static const size_t diff_indexes[] = { 0, 1, 3 };
+  ExpectValueNamesNE(diff_indexes);
+  EXPECT_EQ(value_names_[0], value_names_[2]);
+
+  const size_t no_null_ck_indexes[] = { 1, 3 };
+  ExpectNoNullCheck(no_null_ck_indexes);
+
+  static const bool eliminated[] = {
+      false, false, true, false
+  };
+  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(eliminated); ++i) {
+    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+  }
+  // Check that the IGET uses the s_reg 0, v_reg 0, defined by mirs_[0].
+  ASSERT_EQ(1, mirs_[3].ssa_rep->num_uses);
+  EXPECT_EQ(0, mirs_[3].ssa_rep->uses[0]);
+  EXPECT_EQ(0u, mirs_[3].dalvikInsn.vB);
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, Rename2) {
+  static const IFieldDef ifields[] = {
+      { 0u, 1u, 0u, false, kDexMemAccessWord },
+      { 1u, 1u, 1u, false, kDexMemAccessWord },
+  };
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+      DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
+      DEF_MOVE(3, Instruction::MOVE_OBJECT, 2u, 0u),
+      DEF_IGET(3, Instruction::IGET, 3u, 2u, 1u),
+      DEF_CONST(3, Instruction::CONST, 4u, 1000),
+  };
+
+  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 2 };
+  PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformGVN_DCE();
+
+  ASSERT_EQ(arraysize(mirs), value_names_.size());
+  static const size_t diff_indexes[] = { 0, 1, 3, 4 };
+  ExpectValueNamesNE(diff_indexes);
+  EXPECT_EQ(value_names_[0], value_names_[2]);
+
+  const size_t no_null_ck_indexes[] = { 1, 3 };
+  ExpectNoNullCheck(no_null_ck_indexes);
+
+  static const bool eliminated[] = {
+      false, false, true, false, false
+  };
+  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(eliminated); ++i) {
+    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+  }
+  // Check that the IGET uses the s_reg 0, v_reg 0, defined by mirs_[0].
+  ASSERT_EQ(1, mirs_[3].ssa_rep->num_uses);
+  EXPECT_EQ(0, mirs_[3].ssa_rep->uses[0]);
+  EXPECT_EQ(0u, mirs_[3].dalvikInsn.vB);
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, Rename3) {
+  static const IFieldDef ifields[] = {
+      { 0u, 1u, 0u, false, kDexMemAccessWord },
+      { 1u, 1u, 1u, false, kDexMemAccessWord },
+  };
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+      DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
+      DEF_MOVE(3, Instruction::MOVE_OBJECT, 2u, 0u),
+      DEF_IGET(3, Instruction::IGET, 3u, 2u, 1u),
+  };
+
+  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 0 };
+  PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformGVN_DCE();
+
+  ASSERT_EQ(arraysize(mirs), value_names_.size());
+  static const size_t diff_indexes[] = { 0, 1, 3 };
+  ExpectValueNamesNE(diff_indexes);
+  EXPECT_EQ(value_names_[0], value_names_[2]);
+
+  const size_t no_null_ck_indexes[] = { 1, 3 };
+  ExpectNoNullCheck(no_null_ck_indexes);
+
+  static const bool eliminated[] = {
+      false, false, true, false
+  };
+  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(eliminated); ++i) {
+    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+  }
+  // Check that the NEW_INSTANCE defines the s_reg 2, v_reg 2, originally defined by the move.
+  ASSERT_EQ(1, mirs_[0].ssa_rep->num_defs);
+  EXPECT_EQ(2, mirs_[0].ssa_rep->defs[0]);
+  EXPECT_EQ(2u, mirs_[0].dalvikInsn.vA);
+  // Check that the first IGET is using the s_reg 2, v_reg 2.
+  ASSERT_EQ(1, mirs_[1].ssa_rep->num_uses);
+  EXPECT_EQ(2, mirs_[1].ssa_rep->uses[0]);
+  EXPECT_EQ(2u, mirs_[1].dalvikInsn.vB);
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, Rename4) {
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+      DEF_MOVE(3, Instruction::MOVE_OBJECT, 1u, 0u),
+      DEF_MOVE(3, Instruction::MOVE_OBJECT, 2u, 1u),
+      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 3u, 1000u),
+  };
+
+  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 0, 1 /* high word */ };
+  PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+  PrepareMIRs(mirs);
+  PerformGVN_DCE();
+
+  ASSERT_EQ(arraysize(mirs), value_names_.size());
+  static const size_t diff_indexes[] = { 0, 3 };
+  ExpectValueNamesNE(diff_indexes);
+  EXPECT_EQ(value_names_[0], value_names_[1]);
+  EXPECT_EQ(value_names_[0], value_names_[2]);
+
+  static const bool eliminated[] = {
+      false, true, true, false
+  };
+  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(eliminated); ++i) {
+    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+  }
+  // Check that the NEW_INSTANCE defines the s_reg 2, v_reg 2, originally defined by the move 2u.
+  ASSERT_EQ(1, mirs_[0].ssa_rep->num_defs);
+  EXPECT_EQ(2, mirs_[0].ssa_rep->defs[0]);
+  EXPECT_EQ(2u, mirs_[0].dalvikInsn.vA);
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, Rename5) {
+  static const IFieldDef ifields[] = {
+      { 0u, 1u, 0u, false, kDexMemAccessWord },
+  };
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+      DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
+      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 2u, 1u),
+      DEF_MOVE(3, Instruction::MOVE_OBJECT, 3u, 0u),
+      DEF_MOVE(3, Instruction::MOVE_OBJECT, 4u, 3u),
+      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 5u, 1000u),
+  };
+
+  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 1, 3, 0, 1 /* high word */ };
+  PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformGVN_DCE();
+
+  ASSERT_EQ(arraysize(mirs), value_names_.size());
+  static const size_t diff_indexes[] = { 0, 1, 2, 5 };
+  ExpectValueNamesNE(diff_indexes);
+  EXPECT_EQ(value_names_[0], value_names_[3]);
+  EXPECT_EQ(value_names_[0], value_names_[4]);
+
+  static const bool eliminated[] = {
+      false, false, false, true, true, false
+  };
+  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(eliminated); ++i) {
+    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+  }
+  // Check that the NEW_INSTANCE defines the s_reg 4, v_reg 3, originally defined by the move 4u.
+  ASSERT_EQ(1, mirs_[0].ssa_rep->num_defs);
+  EXPECT_EQ(4, mirs_[0].ssa_rep->defs[0]);
+  EXPECT_EQ(3u, mirs_[0].dalvikInsn.vA);
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, Rename6) {
+  static const MIRDef mirs[] = {
+      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 0u, 1000u),
+      DEF_MOVE_WIDE(3, Instruction::MOVE_WIDE, 2u, 0u),
+  };
+
+  static const int32_t sreg_to_vreg_map[] = { 0, 1 /* high word */, 1, 2 /* high word */ };
+  PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+  PrepareMIRs(mirs);
+  PerformGVN_DCE();
+
+  ASSERT_EQ(arraysize(mirs), value_names_.size());
+  EXPECT_EQ(value_names_[0], value_names_[1]);
+
+  static const bool eliminated[] = {
+      false, true
+  };
+  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(eliminated); ++i) {
+    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+  }
+  // Check that the CONST_WIDE defines the s_reg 2, v_reg 1, originally defined by the move 2u.
+  ASSERT_EQ(2, mirs_[0].ssa_rep->num_defs);
+  EXPECT_EQ(2, mirs_[0].ssa_rep->defs[0]);
+  EXPECT_EQ(3, mirs_[0].ssa_rep->defs[1]);
+  EXPECT_EQ(1u, mirs_[0].dalvikInsn.vA);
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, Rename7) {
+  static const MIRDef mirs[] = {
+      DEF_CONST(3, Instruction::CONST, 0u, 1000u),
+      DEF_MOVE(3, Instruction::MOVE, 1u, 0u),
+      DEF_BINOP(3, Instruction::ADD_INT, 2u, 0u, 1u),
+  };
+
+  static const int32_t sreg_to_vreg_map[] = { 0, 1, 0 };
+  PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+  PrepareMIRs(mirs);
+  PerformGVN_DCE();
+
+  ASSERT_EQ(arraysize(mirs), value_names_.size());
+  EXPECT_NE(value_names_[0], value_names_[2]);
+  EXPECT_EQ(value_names_[0], value_names_[1]);
+
+  static const bool eliminated[] = {
+      false, true, false
+  };
+  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(eliminated); ++i) {
+    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+  }
+  // Check that the CONST defines the s_reg 1, v_reg 1, originally defined by the move 1u.
+  ASSERT_EQ(1, mirs_[0].ssa_rep->num_defs);
+  EXPECT_EQ(1, mirs_[0].ssa_rep->defs[0]);
+  EXPECT_EQ(1u, mirs_[0].dalvikInsn.vA);
+  // Check that the ADD_INT inputs are both s_reg1, vreg 1.
+  ASSERT_EQ(2, mirs_[2].ssa_rep->num_uses);
+  EXPECT_EQ(1, mirs_[2].ssa_rep->uses[0]);
+  EXPECT_EQ(1, mirs_[2].ssa_rep->uses[1]);
+  EXPECT_EQ(1u, mirs_[2].dalvikInsn.vB);
+  EXPECT_EQ(1u, mirs_[2].dalvikInsn.vC);
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, Rename8) {
+  static const MIRDef mirs[] = {
+      DEF_CONST(3, Instruction::CONST, 0u, 1000u),
+      DEF_MOVE(3, Instruction::MOVE, 1u, 0u),
+      DEF_BINOP(3, Instruction::ADD_INT_2ADDR, 2u, 0u, 1u),
+  };
+
+  static const int32_t sreg_to_vreg_map[] = { 0, 1, 0 };
+  PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+  PrepareMIRs(mirs);
+  PerformGVN_DCE();
+
+  ASSERT_EQ(arraysize(mirs), value_names_.size());
+  EXPECT_NE(value_names_[0], value_names_[2]);
+  EXPECT_EQ(value_names_[0], value_names_[1]);
+
+  static const bool eliminated[] = {
+      false, true, false
+  };
+  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(eliminated); ++i) {
+    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+  }
+  // Check that the CONST defines the s_reg 1, v_reg 1, originally defined by the move 1u.
+  ASSERT_EQ(1, mirs_[0].ssa_rep->num_defs);
+  EXPECT_EQ(1, mirs_[0].ssa_rep->defs[0]);
+  EXPECT_EQ(1u, mirs_[0].dalvikInsn.vA);
+  // Check that the ADD_INT_2ADDR was replaced by ADD_INT and inputs are both s_reg 1, vreg 1.
+  EXPECT_EQ(Instruction::ADD_INT, mirs_[2].dalvikInsn.opcode);
+  ASSERT_EQ(2, mirs_[2].ssa_rep->num_uses);
+  EXPECT_EQ(1, mirs_[2].ssa_rep->uses[0]);
+  EXPECT_EQ(1, mirs_[2].ssa_rep->uses[1]);
+  EXPECT_EQ(1u, mirs_[2].dalvikInsn.vB);
+  EXPECT_EQ(1u, mirs_[2].dalvikInsn.vC);
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, Rename9) {
+  static const MIRDef mirs[] = {
+      DEF_CONST(3, Instruction::CONST, 0u, 1000u),
+      DEF_BINOP(3, Instruction::ADD_INT_2ADDR, 1u, 0u, 0u),
+      DEF_MOVE(3, Instruction::MOVE, 2u, 1u),
+      DEF_CONST(3, Instruction::CONST, 3u, 3000u),
+  };
+
+  static const int32_t sreg_to_vreg_map[] = { 0, 0, 1, 0 };
+  PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+  PrepareMIRs(mirs);
+  PerformGVN_DCE();
+
+  ASSERT_EQ(arraysize(mirs), value_names_.size());
+  static const size_t diff_indexes[] = { 0, 1, 3 };
+  ExpectValueNamesNE(diff_indexes);
+  EXPECT_EQ(value_names_[1], value_names_[2]);
+
+  static const bool eliminated[] = {
+      false, false, true, false
+  };
+  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(eliminated); ++i) {
+    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+  }
+  // Check that the ADD_INT_2ADDR was replaced by ADD_INT and output is in s_reg 2, vreg 1.
+  EXPECT_EQ(Instruction::ADD_INT, mirs_[1].dalvikInsn.opcode);
+  ASSERT_EQ(2, mirs_[1].ssa_rep->num_uses);
+  EXPECT_EQ(0, mirs_[1].ssa_rep->uses[0]);
+  EXPECT_EQ(0, mirs_[1].ssa_rep->uses[1]);
+  EXPECT_EQ(0u, mirs_[1].dalvikInsn.vB);
+  EXPECT_EQ(0u, mirs_[1].dalvikInsn.vC);
+  ASSERT_EQ(1, mirs_[1].ssa_rep->num_defs);
+  EXPECT_EQ(2, mirs_[1].ssa_rep->defs[0]);
+  EXPECT_EQ(1u, mirs_[1].dalvikInsn.vA);
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, NoRename1) {
+  static const IFieldDef ifields[] = {
+      { 0u, 1u, 0u, false, kDexMemAccessWord },
+      { 1u, 1u, 1u, false, kDexMemAccessWord },
+  };
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+      DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
+      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 2u, 1u),
+      DEF_MOVE(3, Instruction::MOVE_OBJECT, 3u, 0u),
+      DEF_CONST(3, Instruction::CONST, 4u, 1000),
+      DEF_IGET(3, Instruction::IGET, 5u, 3u, 1u),
+  };
+
+  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 1, 0, 1 };
+  PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformGVN_DCE();
+
+  ASSERT_EQ(arraysize(mirs), value_names_.size());
+  static const size_t diff_indexes[] = { 0, 1, 2, 4, 5 };
+  ExpectValueNamesNE(diff_indexes);
+  EXPECT_EQ(value_names_[0], value_names_[3]);
+
+  const size_t no_null_ck_indexes[] = { 1, 5 };
+  ExpectNoNullCheck(no_null_ck_indexes);
+
+  static const bool eliminated[] = {
+      false, false, false, false, false, false
+  };
+  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(eliminated); ++i) {
+    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+  }
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, NoRename2) {
+  static const IFieldDef ifields[] = {
+      { 0u, 1u, 0u, false, kDexMemAccessWord },
+      { 1u, 1u, 1u, false, kDexMemAccessWord },
+  };
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+      DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
+      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 2u),
+      DEF_MOVE(3, Instruction::MOVE_OBJECT, 3u, 0u),
+      DEF_CONST(3, Instruction::CONST, 4u, 1000),
+      DEF_IGET(3, Instruction::IGET, 5u, 3u, 1u),
+      DEF_CONST(3, Instruction::CONST, 6u, 2000),
+  };
+
+  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 2, 0, 3, 2 };
+  PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformGVN_DCE();
+
+  ASSERT_EQ(arraysize(mirs), value_names_.size());
+  static const size_t diff_indexes[] = { 0, 1, 2, 4, 5, 6 };
+  ExpectValueNamesNE(diff_indexes);
+  EXPECT_EQ(value_names_[0], value_names_[3]);
+
+  const size_t no_null_ck_indexes[] = { 1, 5 };
+  ExpectNoNullCheck(no_null_ck_indexes);
+
+  static const bool eliminated[] = {
+      false, false, false, false, false, false, false
+  };
+  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(eliminated); ++i) {
+    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+  }
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, NoRename3) {
+  static const IFieldDef ifields[] = {
+      { 0u, 1u, 0u, false, kDexMemAccessWord },
+      { 1u, 1u, 1u, false, kDexMemAccessWord },
+      { 2u, 1u, 2u, false, kDexMemAccessWord },
+  };
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+      DEF_IGET(3, Instruction::IGET, 1u, 0u, 0u),
+      DEF_IGET(3, Instruction::IGET, 2u, 0u, 2u),
+      DEF_BINOP(3, Instruction::ADD_INT, 3u, 1u, 2u),
+      DEF_MOVE(3, Instruction::MOVE_OBJECT, 4u, 0u),
+      DEF_IGET(3, Instruction::IGET, 5u, 4u, 1u),
+  };
+
+  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 2, 0 };
+  PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformGVN_DCE();
+
+  ASSERT_EQ(arraysize(mirs), value_names_.size());
+  static const size_t diff_indexes[] = { 0, 1, 2, 3, 5 };
+  ExpectValueNamesNE(diff_indexes);
+  EXPECT_EQ(value_names_[0], value_names_[4]);
+
+  const size_t no_null_ck_indexes[] = { 1, 2, 5 };
+  ExpectNoNullCheck(no_null_ck_indexes);
+
+  static const bool eliminated[] = {
+      false, false, false, false, false, false
+  };
+  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(eliminated); ++i) {
+    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+  }
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, Simple1) {
+  static const IFieldDef ifields[] = {
+      { 0u, 1u, 0u, false, kDexMemAccessObject },
+      { 1u, 1u, 1u, false, kDexMemAccessObject },
+      { 2u, 1u, 2u, false, kDexMemAccessWord },
+  };
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+      DEF_IGET(3, Instruction::IGET_OBJECT, 1u, 0u, 0u),
+      DEF_IGET(3, Instruction::IGET_OBJECT, 2u, 1u, 1u),
+      DEF_IGET(3, Instruction::IGET, 3u, 2u, 2u),
+      DEF_IGET(3, Instruction::IGET_OBJECT, 4u, 0u, 0u),
+      DEF_IGET(3, Instruction::IGET_OBJECT, 5u, 4u, 1u),
+  };
+
+  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 1, 2 };
+  PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformGVN_DCE();
+
+  ASSERT_EQ(arraysize(mirs), value_names_.size());
+  EXPECT_NE(value_names_[0], value_names_[1]);
+  EXPECT_NE(value_names_[0], value_names_[2]);
+  EXPECT_NE(value_names_[0], value_names_[3]);
+  EXPECT_NE(value_names_[1], value_names_[2]);
+  EXPECT_NE(value_names_[1], value_names_[3]);
+  EXPECT_NE(value_names_[2], value_names_[3]);
+  EXPECT_EQ(value_names_[1], value_names_[4]);
+  EXPECT_EQ(value_names_[2], value_names_[5]);
+
+  EXPECT_EQ(MIR_IGNORE_NULL_CHECK, mirs_[4].optimization_flags & MIR_IGNORE_NULL_CHECK);
+  EXPECT_EQ(MIR_IGNORE_NULL_CHECK, mirs_[5].optimization_flags & MIR_IGNORE_NULL_CHECK);
+
+  static const bool eliminated[] = {
+      false, false, false, false, true, true
+  };
+  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(eliminated); ++i) {
+    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+  }
+  // Check that the sregs have been renamed correctly.
+  ASSERT_EQ(1, mirs_[1].ssa_rep->num_defs);
+  EXPECT_EQ(4, mirs_[1].ssa_rep->defs[0]);
+  ASSERT_EQ(1, mirs_[1].ssa_rep->num_uses);
+  EXPECT_EQ(0, mirs_[1].ssa_rep->uses[0]);
+  ASSERT_EQ(1, mirs_[2].ssa_rep->num_defs);
+  EXPECT_EQ(5, mirs_[2].ssa_rep->defs[0]);
+  ASSERT_EQ(1, mirs_[2].ssa_rep->num_uses);
+  EXPECT_EQ(4, mirs_[2].ssa_rep->uses[0]);
+  ASSERT_EQ(1, mirs_[3].ssa_rep->num_defs);
+  EXPECT_EQ(3, mirs_[3].ssa_rep->defs[0]);
+  ASSERT_EQ(1, mirs_[3].ssa_rep->num_uses);
+  EXPECT_EQ(5, mirs_[3].ssa_rep->uses[0]);
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, Simple2) {
+  static const IFieldDef ifields[] = {
+      { 0u, 1u, 0u, false, kDexMemAccessWord },
+  };
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+      DEF_CONST(3, Instruction::CONST, 1u, 1000),
+      DEF_IGET(3, Instruction::IGET, 2u, 0u, 0u),
+      DEF_BINOP(3, Instruction::ADD_INT_2ADDR, 3u, 2u, 1u),
+      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 4u, 3u),
+      DEF_IGET(3, Instruction::IGET, 5u, 0u, 0u),
+      DEF_BINOP(3, Instruction::ADD_INT_2ADDR, 6u, 5u, 1u),
+  };
+
+  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 2, 3, 2, 2 };
+  PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformGVN_DCE();
+
+  ASSERT_EQ(arraysize(mirs), value_names_.size());
+  static const size_t diff_indexes[] = { 0, 1, 2, 3 };
+  ExpectValueNamesNE(diff_indexes);
+  EXPECT_EQ(value_names_[2], value_names_[5]);
+  EXPECT_EQ(value_names_[3], value_names_[6]);
+
+  const size_t no_null_ck_indexes[] = { 2, 5 };
+  ExpectNoNullCheck(no_null_ck_indexes);
+
+  static const bool eliminated[] = {
+      false, false, false, false, false, true, true
+  };
+  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(eliminated); ++i) {
+    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+  }
+  // Check that the sregs have been renamed correctly.
+  ASSERT_EQ(1, mirs_[3].ssa_rep->num_defs);
+  EXPECT_EQ(6, mirs_[3].ssa_rep->defs[0]);
+  ASSERT_EQ(2, mirs_[3].ssa_rep->num_uses);
+  EXPECT_EQ(2, mirs_[3].ssa_rep->uses[0]);
+  EXPECT_EQ(1, mirs_[3].ssa_rep->uses[1]);
+  ASSERT_EQ(1, mirs_[4].ssa_rep->num_defs);
+  EXPECT_EQ(4, mirs_[4].ssa_rep->defs[0]);
+  ASSERT_EQ(1, mirs_[4].ssa_rep->num_uses);
+  EXPECT_EQ(6, mirs_[4].ssa_rep->uses[0]);
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, Simple3) {
+  static const IFieldDef ifields[] = {
+      { 0u, 1u, 0u, false, kDexMemAccessWord },
+  };
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+      DEF_CONST(3, Instruction::CONST, 1u, 1000),
+      DEF_CONST(3, Instruction::CONST, 2u, 2000),
+      DEF_CONST(3, Instruction::CONST, 3u, 3000),
+      DEF_IGET(3, Instruction::IGET, 4u, 0u, 0u),
+      DEF_BINOP(3, Instruction::ADD_INT, 5u, 4u, 1u),
+      DEF_BINOP(3, Instruction::MUL_INT, 6u, 5u, 2u),
+      DEF_BINOP(3, Instruction::SUB_INT, 7u, 6u, 3u),
+      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 8u, 7u),
+      DEF_IGET(3, Instruction::IGET, 9u, 0u, 0u),
+      DEF_BINOP(3, Instruction::ADD_INT, 10u, 9u, 1u),
+      DEF_BINOP(3, Instruction::MUL_INT, 11u, 10u, 2u),  // Simple elimination of ADD+MUL
+      DEF_BINOP(3, Instruction::SUB_INT, 12u, 11u, 3u),  // allows simple elimination of IGET+SUB.
+  };
+
+  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 5, 4, 6, 4, 5, 5, 4 };
+  PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformGVN_DCE();
+
+  ASSERT_EQ(arraysize(mirs), value_names_.size());
+  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8 };
+  ExpectValueNamesNE(diff_indexes);
+  EXPECT_EQ(value_names_[4], value_names_[9]);
+  EXPECT_EQ(value_names_[5], value_names_[10]);
+  EXPECT_EQ(value_names_[6], value_names_[11]);
+  EXPECT_EQ(value_names_[7], value_names_[12]);
+
+  const size_t no_null_ck_indexes[] = { 4, 9 };
+  ExpectNoNullCheck(no_null_ck_indexes);
+
+  static const bool eliminated[] = {
+      false, false, false, false, false, false, false, false, false, true, true, true, true
+  };
+  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(eliminated); ++i) {
+    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+  }
+  // Check that the sregs have been renamed correctly.
+  ASSERT_EQ(1, mirs_[6].ssa_rep->num_defs);
+  EXPECT_EQ(11, mirs_[6].ssa_rep->defs[0]);  // 6 -> 11
+  ASSERT_EQ(2, mirs_[6].ssa_rep->num_uses);
+  EXPECT_EQ(5, mirs_[6].ssa_rep->uses[0]);
+  EXPECT_EQ(2, mirs_[6].ssa_rep->uses[1]);
+  ASSERT_EQ(1, mirs_[7].ssa_rep->num_defs);
+  EXPECT_EQ(12, mirs_[7].ssa_rep->defs[0]);  // 7 -> 12
+  ASSERT_EQ(2, mirs_[7].ssa_rep->num_uses);
+  EXPECT_EQ(11, mirs_[7].ssa_rep->uses[0]);  // 6 -> 11
+  EXPECT_EQ(3, mirs_[7].ssa_rep->uses[1]);
+  ASSERT_EQ(1, mirs_[8].ssa_rep->num_defs);
+  EXPECT_EQ(8, mirs_[8].ssa_rep->defs[0]);
+  ASSERT_EQ(1, mirs_[8].ssa_rep->num_uses);
+  EXPECT_EQ(12, mirs_[8].ssa_rep->uses[0]);  // 7 -> 12
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, Simple4) {
+  static const IFieldDef ifields[] = {
+      { 0u, 1u, 0u, false, kDexMemAccessWord },
+  };
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 1u, INT64_C(1)),
+      DEF_BINOP(3, Instruction::LONG_TO_FLOAT, 3u, 1u, 2u),
+      DEF_IGET(3, Instruction::IGET, 4u, 0u, 0u),
+      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 5u, 4u),
+      DEF_CONST_WIDE(3, Instruction::CONST_WIDE, 6u, INT64_C(1)),
+      DEF_BINOP(3, Instruction::LONG_TO_FLOAT, 8u, 6u, 7u),
+      DEF_IGET(3, Instruction::IGET, 9u, 0u, 0u),
+  };
+
+  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 1, 2, 3, 1, 2, 1, 2 };
+  PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformGVN_DCE();
+
+  ASSERT_EQ(arraysize(mirs), value_names_.size());
+  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4 };
+  ExpectValueNamesNE(diff_indexes);
+  EXPECT_EQ(value_names_[1], value_names_[5]);
+  EXPECT_EQ(value_names_[2], value_names_[6]);
+  EXPECT_EQ(value_names_[3], value_names_[7]);
+
+  const size_t no_null_ck_indexes[] = { 3, 7 };
+  ExpectNoNullCheck(no_null_ck_indexes);
+
+  static const bool eliminated[] = {
+      // Simple elimination of CONST_WIDE+LONG_TO_FLOAT allows simple eliminatiion of IGET.
+      false, false, false, false, false, true, true, true
+  };
+  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(eliminated); ++i) {
+    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+  }
+  // Check that the sregs have been renamed correctly.
+  ASSERT_EQ(1, mirs_[2].ssa_rep->num_defs);
+  EXPECT_EQ(8, mirs_[2].ssa_rep->defs[0]);   // 3 -> 8
+  ASSERT_EQ(2, mirs_[2].ssa_rep->num_uses);
+  EXPECT_EQ(1, mirs_[2].ssa_rep->uses[0]);
+  EXPECT_EQ(2, mirs_[2].ssa_rep->uses[1]);
+  ASSERT_EQ(1, mirs_[3].ssa_rep->num_defs);
+  EXPECT_EQ(9, mirs_[3].ssa_rep->defs[0]);   // 4 -> 9
+  ASSERT_EQ(1, mirs_[3].ssa_rep->num_uses);
+  EXPECT_EQ(0, mirs_[3].ssa_rep->uses[0]);
+  ASSERT_EQ(1, mirs_[4].ssa_rep->num_defs);
+  EXPECT_EQ(5, mirs_[4].ssa_rep->defs[0]);
+  ASSERT_EQ(1, mirs_[4].ssa_rep->num_uses);
+  EXPECT_EQ(9, mirs_[4].ssa_rep->uses[0]);   // 4 -> 9
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, KillChain1) {
+  static const IFieldDef ifields[] = {
+      { 0u, 1u, 0u, false, kDexMemAccessWord },
+  };
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+      DEF_CONST(3, Instruction::CONST, 1u, 1000),
+      DEF_CONST(3, Instruction::CONST, 2u, 2000),
+      DEF_CONST(3, Instruction::CONST, 3u, 3000),
+      DEF_IGET(3, Instruction::IGET, 4u, 0u, 0u),
+      DEF_BINOP(3, Instruction::ADD_INT, 5u, 4u, 1u),
+      DEF_BINOP(3, Instruction::MUL_INT, 6u, 5u, 2u),
+      DEF_BINOP(3, Instruction::SUB_INT, 7u, 6u, 3u),
+      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 8u, 7u),
+      DEF_IGET(3, Instruction::IGET, 9u, 0u, 0u),
+      DEF_BINOP(3, Instruction::ADD_INT, 10u, 9u, 1u),
+      DEF_BINOP(3, Instruction::MUL_INT, 11u, 10u, 2u),
+      DEF_BINOP(3, Instruction::SUB_INT, 12u, 11u, 3u),
+  };
+
+  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 4, 5, 6, 4, 5, 4, 5 };
+  PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformGVN_DCE();
+
+  ASSERT_EQ(arraysize(mirs), value_names_.size());
+  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8 };
+  ExpectValueNamesNE(diff_indexes);
+  EXPECT_EQ(value_names_[4], value_names_[9]);
+  EXPECT_EQ(value_names_[5], value_names_[10]);
+  EXPECT_EQ(value_names_[6], value_names_[11]);
+  EXPECT_EQ(value_names_[7], value_names_[12]);
+
+  const size_t no_null_ck_indexes[] = { 4, 9 };
+  ExpectNoNullCheck(no_null_ck_indexes);
+
+  static const bool eliminated[] = {
+      false, false, false, false, false, false, false, false, false, true, true, true, true
+  };
+  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(eliminated); ++i) {
+    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+  }
+  // Check that the sregs have been renamed correctly.
+  ASSERT_EQ(1, mirs_[6].ssa_rep->num_defs);
+  EXPECT_EQ(11, mirs_[6].ssa_rep->defs[0]);  // 6 -> 11
+  ASSERT_EQ(2, mirs_[6].ssa_rep->num_uses);
+  EXPECT_EQ(5, mirs_[6].ssa_rep->uses[0]);
+  EXPECT_EQ(2, mirs_[6].ssa_rep->uses[1]);
+  ASSERT_EQ(1, mirs_[7].ssa_rep->num_defs);
+  EXPECT_EQ(12, mirs_[7].ssa_rep->defs[0]);  // 7 -> 12
+  ASSERT_EQ(2, mirs_[7].ssa_rep->num_uses);
+  EXPECT_EQ(11, mirs_[7].ssa_rep->uses[0]);  // 6 -> 11
+  EXPECT_EQ(3, mirs_[7].ssa_rep->uses[1]);
+  ASSERT_EQ(1, mirs_[8].ssa_rep->num_defs);
+  EXPECT_EQ(8, mirs_[8].ssa_rep->defs[0]);
+  ASSERT_EQ(1, mirs_[8].ssa_rep->num_uses);
+  EXPECT_EQ(12, mirs_[8].ssa_rep->uses[0]);   // 7 -> 12
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, KillChain2) {
+  static const IFieldDef ifields[] = {
+      { 0u, 1u, 0u, false, kDexMemAccessWord },
+  };
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+      DEF_CONST(3, Instruction::CONST, 1u, 1000),
+      DEF_CONST(3, Instruction::CONST, 2u, 2000),
+      DEF_CONST(3, Instruction::CONST, 3u, 3000),
+      DEF_IGET(3, Instruction::IGET, 4u, 0u, 0u),
+      DEF_BINOP(3, Instruction::ADD_INT, 5u, 4u, 1u),
+      DEF_BINOP(3, Instruction::MUL_INT, 6u, 5u, 2u),
+      DEF_BINOP(3, Instruction::SUB_INT, 7u, 6u, 3u),
+      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 8u, 7u),
+      DEF_IGET(3, Instruction::IGET, 9u, 0u, 0u),
+      DEF_BINOP(3, Instruction::ADD_INT, 10u, 9u, 1u),
+      DEF_BINOP(3, Instruction::MUL_INT, 11u, 10u, 2u),
+      DEF_BINOP(3, Instruction::SUB_INT, 12u, 11u, 3u),
+      DEF_CONST(3, Instruction::CONST, 13u, 4000),
+  };
+
+  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 5, 4, 6, 4, 7, 7, 4, 7 };
+  PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformGVN_DCE();
+
+  ASSERT_EQ(arraysize(mirs), value_names_.size());
+  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 13 };
+  ExpectValueNamesNE(diff_indexes);
+  EXPECT_EQ(value_names_[4], value_names_[9]);
+  EXPECT_EQ(value_names_[5], value_names_[10]);
+  EXPECT_EQ(value_names_[6], value_names_[11]);
+  EXPECT_EQ(value_names_[7], value_names_[12]);
+
+  const size_t no_null_ck_indexes[] = { 4, 9 };
+  ExpectNoNullCheck(no_null_ck_indexes);
+
+  static const bool eliminated[] = {
+      false, false, false, false, false, false, false, false, false, true, true, true, true, false
+  };
+  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(eliminated); ++i) {
+    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+  }
+  // Check that the sregs have been renamed correctly.
+  ASSERT_EQ(1, mirs_[7].ssa_rep->num_defs);
+  EXPECT_EQ(12, mirs_[7].ssa_rep->defs[0]);  // 7 -> 12
+  ASSERT_EQ(2, mirs_[7].ssa_rep->num_uses);
+  EXPECT_EQ(6, mirs_[7].ssa_rep->uses[0]);
+  EXPECT_EQ(3, mirs_[7].ssa_rep->uses[1]);
+  ASSERT_EQ(1, mirs_[8].ssa_rep->num_defs);
+  EXPECT_EQ(8, mirs_[8].ssa_rep->defs[0]);
+  ASSERT_EQ(1, mirs_[8].ssa_rep->num_uses);
+  EXPECT_EQ(12, mirs_[8].ssa_rep->uses[0]);   // 7 -> 12
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, KillChain3) {
+  static const IFieldDef ifields[] = {
+      { 0u, 1u, 0u, false, kDexMemAccessWord },
+  };
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+      DEF_CONST(3, Instruction::CONST, 1u, 1000),
+      DEF_CONST(3, Instruction::CONST, 2u, 2000),
+      DEF_CONST(3, Instruction::CONST, 3u, 3000),
+      DEF_IGET(3, Instruction::IGET, 4u, 0u, 0u),
+      DEF_BINOP(3, Instruction::ADD_INT, 5u, 4u, 1u),
+      DEF_BINOP(3, Instruction::MUL_INT, 6u, 5u, 2u),
+      DEF_BINOP(3, Instruction::SUB_INT, 7u, 6u, 3u),
+      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 8u, 7u),
+      DEF_IGET(3, Instruction::IGET, 9u, 0u, 0u),
+      DEF_BINOP(3, Instruction::ADD_INT, 10u, 9u, 1u),
+      DEF_BINOP(3, Instruction::MUL_INT, 11u, 10u, 2u),
+      DEF_CONST(3, Instruction::CONST, 12u, 4000),
+      DEF_BINOP(3, Instruction::SUB_INT, 13u, 11u, 3u),
+  };
+
+  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 5, 4, 6, 4, 7, 4, 7, 4 };
+  PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformGVN_DCE();
+
+  ASSERT_EQ(arraysize(mirs), value_names_.size());
+  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 12 };
+  ExpectValueNamesNE(diff_indexes);
+  EXPECT_EQ(value_names_[4], value_names_[9]);
+  EXPECT_EQ(value_names_[5], value_names_[10]);
+  EXPECT_EQ(value_names_[6], value_names_[11]);
+  EXPECT_EQ(value_names_[7], value_names_[13]);
+
+  const size_t no_null_ck_indexes[] = { 4, 9 };
+  ExpectNoNullCheck(no_null_ck_indexes);
+
+  static const bool eliminated[] = {
+      false, false, false, false, false, false, false, false, false, true, true, true, false, true
+  };
+  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(eliminated); ++i) {
+    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+  }
+  // Check that the sregs have been renamed correctly.
+  ASSERT_EQ(1, mirs_[7].ssa_rep->num_defs);
+  EXPECT_EQ(13, mirs_[7].ssa_rep->defs[0]);  // 7 -> 13
+  ASSERT_EQ(2, mirs_[7].ssa_rep->num_uses);
+  EXPECT_EQ(6, mirs_[7].ssa_rep->uses[0]);
+  EXPECT_EQ(3, mirs_[7].ssa_rep->uses[1]);
+  ASSERT_EQ(1, mirs_[8].ssa_rep->num_defs);
+  EXPECT_EQ(8, mirs_[8].ssa_rep->defs[0]);
+  ASSERT_EQ(1, mirs_[8].ssa_rep->num_uses);
+  EXPECT_EQ(13, mirs_[8].ssa_rep->uses[0]);   // 7 -> 13
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, KeepChain1) {
+  // KillChain2 without the final CONST.
+  static const IFieldDef ifields[] = {
+      { 0u, 1u, 0u, false, kDexMemAccessWord },
+  };
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+      DEF_CONST(3, Instruction::CONST, 1u, 1000),
+      DEF_CONST(3, Instruction::CONST, 2u, 2000),
+      DEF_CONST(3, Instruction::CONST, 3u, 3000),
+      DEF_IGET(3, Instruction::IGET, 4u, 0u, 0u),
+      DEF_BINOP(3, Instruction::ADD_INT, 5u, 4u, 1u),
+      DEF_BINOP(3, Instruction::MUL_INT, 6u, 5u, 2u),
+      DEF_BINOP(3, Instruction::SUB_INT, 7u, 6u, 3u),
+      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 8u, 7u),
+      DEF_IGET(3, Instruction::IGET, 9u, 0u, 0u),
+      DEF_BINOP(3, Instruction::ADD_INT, 10u, 9u, 1u),
+      DEF_BINOP(3, Instruction::MUL_INT, 11u, 10u, 2u),
+      DEF_BINOP(3, Instruction::SUB_INT, 12u, 11u, 3u),
+  };
+
+  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 5, 4, 6, 4, 7, 7, 4 };
+  PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformGVN_DCE();
+
+  ASSERT_EQ(arraysize(mirs), value_names_.size());
+  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8 };
+  ExpectValueNamesNE(diff_indexes);
+  EXPECT_EQ(value_names_[4], value_names_[9]);
+  EXPECT_EQ(value_names_[5], value_names_[10]);
+  EXPECT_EQ(value_names_[6], value_names_[11]);
+  EXPECT_EQ(value_names_[7], value_names_[12]);
+
+  const size_t no_null_ck_indexes[] = { 4, 9 };
+  ExpectNoNullCheck(no_null_ck_indexes);
+
+  static const bool eliminated[] = {
+      false, false, false, false, false, false, false, false, false, false, false, false, false
+  };
+  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(eliminated); ++i) {
+    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+  }
+}
+
+TEST_F(GvnDeadCodeEliminationTestSimple, KeepChain2) {
+  // KillChain1 with MIRs in the middle of the chain.
+  static const IFieldDef ifields[] = {
+      { 0u, 1u, 0u, false, kDexMemAccessWord },
+  };
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+      DEF_CONST(3, Instruction::CONST, 1u, 1000),
+      DEF_CONST(3, Instruction::CONST, 2u, 2000),
+      DEF_CONST(3, Instruction::CONST, 3u, 3000),
+      DEF_IGET(3, Instruction::IGET, 4u, 0u, 0u),
+      DEF_BINOP(3, Instruction::ADD_INT, 5u, 4u, 1u),
+      DEF_BINOP(3, Instruction::MUL_INT, 6u, 5u, 2u),
+      DEF_BINOP(3, Instruction::SUB_INT, 7u, 6u, 3u),
+      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 8u, 7u),
+      DEF_IGET(3, Instruction::IGET, 9u, 0u, 0u),
+      DEF_BINOP(3, Instruction::ADD_INT, 10u, 9u, 1u),
+      DEF_CONST(3, Instruction::CONST, 11u, 4000),
+      DEF_UNOP(3, Instruction::INT_TO_FLOAT, 12u, 11u),
+      DEF_BINOP(3, Instruction::MUL_INT, 13u, 10u, 2u),
+      DEF_BINOP(3, Instruction::SUB_INT, 14u, 13u, 3u),
+  };
+
+  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3, 4, 5, 4, 5, 6, 4, 5, 4, 7, 4, 5 };
+  PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformGVN_DCE();
+
+  ASSERT_EQ(arraysize(mirs), value_names_.size());
+  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8 };
+  ExpectValueNamesNE(diff_indexes);
+  EXPECT_EQ(value_names_[4], value_names_[9]);
+  EXPECT_EQ(value_names_[5], value_names_[10]);
+  EXPECT_EQ(value_names_[6], value_names_[13]);
+  EXPECT_EQ(value_names_[7], value_names_[14]);
+
+  const size_t no_null_ck_indexes[] = { 4, 9 };
+  ExpectNoNullCheck(no_null_ck_indexes);
+
+  static const bool eliminated[] = {
+      false, false, false, false, false, false, false, false, false,
+      false, false, false, false, false, false
+  };
+  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(eliminated); ++i) {
+    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+  }
+}
+
+TEST_F(GvnDeadCodeEliminationTestDiamond, CreatePhi1) {
+  static const MIRDef mirs[] = {
+      DEF_CONST(3, Instruction::CONST, 0u, 1000),
+      DEF_CONST(4, Instruction::CONST, 1u, 1000),
+  };
+
+  static const int32_t sreg_to_vreg_map[] = { 0, 0 };
+  PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+  PrepareMIRs(mirs);
+  PerformGVN_DCE();
+
+  ASSERT_EQ(arraysize(mirs), value_names_.size());
+  EXPECT_EQ(value_names_[0], value_names_[1]);
+
+  static const bool eliminated[] = {
+      false, true,
+  };
+  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(eliminated); ++i) {
+    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+  }
+  // Check that we've created a single-input Phi to replace the CONST 3u.
+  BasicBlock* bb4 = cu_.mir_graph->GetBasicBlock(4);
+  MIR* phi = bb4->first_mir_insn;
+  ASSERT_TRUE(phi != nullptr);
+  ASSERT_EQ(kMirOpPhi, static_cast<int>(phi->dalvikInsn.opcode));
+  ASSERT_EQ(1, phi->ssa_rep->num_uses);
+  EXPECT_EQ(0, phi->ssa_rep->uses[0]);
+  ASSERT_EQ(1, phi->ssa_rep->num_defs);
+  EXPECT_EQ(1, phi->ssa_rep->defs[0]);
+  EXPECT_EQ(0u, phi->dalvikInsn.vA);
+}
+
+TEST_F(GvnDeadCodeEliminationTestDiamond, CreatePhi2) {
+  static const IFieldDef ifields[] = {
+      { 0u, 1u, 0u, false, kDexMemAccessWord },
+  };
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+      DEF_CONST(4, Instruction::CONST, 1u, 1000),
+      DEF_IPUT(4, Instruction::IPUT, 1u, 0u, 0u),
+      DEF_CONST(5, Instruction::CONST, 3u, 2000),
+      DEF_IPUT(5, Instruction::IPUT, 3u, 0u, 0u),
+      DEF_IGET(6, Instruction::IGET, 5u, 0u, 0u),
+  };
+
+  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2 /* dummy */, 1, 2 /* dummy */, 1 };
+  PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformGVN_DCE();
+
+  ASSERT_EQ(arraysize(mirs), value_names_.size());
+  static const size_t diff_indexes[] = { 0, 1, 3, 5 };
+  ExpectValueNamesNE(diff_indexes);
+
+  const size_t no_null_ck_indexes[] = { 2, 4, 5 };
+  ExpectNoNullCheck(no_null_ck_indexes);
+
+  static const bool eliminated[] = {
+      false, false, false, false, false, true,
+  };
+  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(eliminated); ++i) {
+    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+  }
+  // Check that we've created a two-input Phi to replace the IGET 5u.
+  BasicBlock* bb6 = cu_.mir_graph->GetBasicBlock(6);
+  MIR* phi = bb6->first_mir_insn;
+  ASSERT_TRUE(phi != nullptr);
+  ASSERT_EQ(kMirOpPhi, static_cast<int>(phi->dalvikInsn.opcode));
+  ASSERT_EQ(2, phi->ssa_rep->num_uses);
+  EXPECT_EQ(1, phi->ssa_rep->uses[0]);
+  EXPECT_EQ(3, phi->ssa_rep->uses[1]);
+  ASSERT_EQ(1, phi->ssa_rep->num_defs);
+  EXPECT_EQ(5, phi->ssa_rep->defs[0]);
+  EXPECT_EQ(1u, phi->dalvikInsn.vA);
+}
+
+TEST_F(GvnDeadCodeEliminationTestDiamond, KillChainInAnotherBlock1) {
+  static const IFieldDef ifields[] = {
+      { 0u, 1u, 0u, false, kDexMemAccessObject },  // linked list
+  };
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+      DEF_IGET(3, Instruction::IGET_OBJECT, 1u, 0u, 0u),
+      DEF_IGET(3, Instruction::IGET_OBJECT, 2u, 1u, 0u),
+      DEF_IGET(3, Instruction::IGET_OBJECT, 3u, 2u, 0u),
+      DEF_IGET(3, Instruction::IGET_OBJECT, 4u, 3u, 0u),
+      DEF_IFZ(3, Instruction::IF_NEZ, 4u),
+      DEF_IGET(4, Instruction::IGET_OBJECT, 6u, 0u, 0u),
+      DEF_IGET(4, Instruction::IGET_OBJECT, 7u, 6u, 0u),
+      DEF_IGET(4, Instruction::IGET_OBJECT, 8u, 7u, 0u),
+      DEF_IGET(4, Instruction::IGET_OBJECT, 9u, 8u, 0u),
+  };
+
+  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 1, 2, 3 /* dummy */, 1, 2, 1, 2 };
+  PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformGVN_DCE();
+
+  ASSERT_EQ(arraysize(mirs), value_names_.size());
+  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4 };
+  ExpectValueNamesNE(diff_indexes);
+  EXPECT_EQ(value_names_[1], value_names_[6]);
+  EXPECT_EQ(value_names_[2], value_names_[7]);
+  EXPECT_EQ(value_names_[3], value_names_[8]);
+  EXPECT_EQ(value_names_[4], value_names_[9]);
+
+  const size_t no_null_ck_indexes[] = { 1, 6, 7, 8, 9 };
+  ExpectNoNullCheck(no_null_ck_indexes);
+
+  static const bool eliminated[] = {
+      false, false, false, false, false, false, true, true, true, true,
+  };
+  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(eliminated); ++i) {
+    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+  }
+  // Check that we've created two single-input Phis to replace the IGET 8u and IGET 9u;
+  // the IGET 6u and IGET 7u were killed without a replacement.
+  BasicBlock* bb4 = cu_.mir_graph->GetBasicBlock(4);
+  MIR* phi1 = bb4->first_mir_insn;
+  ASSERT_TRUE(phi1 != nullptr);
+  ASSERT_EQ(kMirOpPhi, static_cast<int>(phi1->dalvikInsn.opcode));
+  MIR* phi2 = phi1->next;
+  ASSERT_TRUE(phi2 != nullptr);
+  ASSERT_EQ(kMirOpPhi, static_cast<int>(phi2->dalvikInsn.opcode));
+  ASSERT_TRUE(phi2->next == &mirs_[6]);
+  if (phi1->dalvikInsn.vA == 2u) {
+    std::swap(phi1, phi2);
+  }
+  ASSERT_EQ(1, phi1->ssa_rep->num_uses);
+  EXPECT_EQ(3, phi1->ssa_rep->uses[0]);
+  ASSERT_EQ(1, phi1->ssa_rep->num_defs);
+  EXPECT_EQ(8, phi1->ssa_rep->defs[0]);
+  EXPECT_EQ(1u, phi1->dalvikInsn.vA);
+  ASSERT_EQ(1, phi2->ssa_rep->num_uses);
+  EXPECT_EQ(4, phi2->ssa_rep->uses[0]);
+  ASSERT_EQ(1, phi2->ssa_rep->num_defs);
+  EXPECT_EQ(9, phi2->ssa_rep->defs[0]);
+  EXPECT_EQ(2u, phi2->dalvikInsn.vA);
+}
+
+TEST_F(GvnDeadCodeEliminationTestDiamond, KillChainInAnotherBlock2) {
+  static const IFieldDef ifields[] = {
+      { 0u, 1u, 0u, false, kDexMemAccessObject },  // linked list
+  };
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+      DEF_IGET(3, Instruction::IGET_OBJECT, 1u, 0u, 0u),
+      DEF_IGET(3, Instruction::IGET_OBJECT, 2u, 1u, 0u),
+      DEF_IGET(3, Instruction::IGET_OBJECT, 3u, 2u, 0u),
+      DEF_IGET(3, Instruction::IGET_OBJECT, 4u, 3u, 0u),
+      DEF_IFZ(3, Instruction::IF_NEZ, 4u),
+      DEF_IGET(4, Instruction::IGET_OBJECT, 6u, 0u, 0u),
+      DEF_IGET(4, Instruction::IGET_OBJECT, 7u, 6u, 0u),
+      DEF_IGET(4, Instruction::IGET_OBJECT, 8u, 7u, 0u),
+      DEF_CONST(4, Instruction::CONST, 9u, 1000),
+  };
+
+  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 1, 2, 3 /* dummy */, 1, 2, 1, 2 };
+  PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformGVN_DCE();
+
+  ASSERT_EQ(arraysize(mirs), value_names_.size());
+  static const size_t diff_indexes[] = { 0, 1, 2, 3, 4, 9 };
+  ExpectValueNamesNE(diff_indexes);
+  EXPECT_EQ(value_names_[1], value_names_[6]);
+  EXPECT_EQ(value_names_[2], value_names_[7]);
+  EXPECT_EQ(value_names_[3], value_names_[8]);
+
+  const size_t no_null_ck_indexes[] = { 1, 6, 7, 8 };
+  ExpectNoNullCheck(no_null_ck_indexes);
+
+  static const bool eliminated[] = {
+      false, false, false, false, false, false, true, true, true, false,
+  };
+  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(eliminated); ++i) {
+    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+  }
+  // Check that we've created a single-input Phi to replace the IGET 8u;
+  // the IGET 6u and IGET 7u were killed without a replacement.
+  BasicBlock* bb4 = cu_.mir_graph->GetBasicBlock(4);
+  MIR* phi = bb4->first_mir_insn;
+  ASSERT_TRUE(phi != nullptr);
+  ASSERT_EQ(kMirOpPhi, static_cast<int>(phi->dalvikInsn.opcode));
+  ASSERT_TRUE(phi->next == &mirs_[6]);
+  ASSERT_EQ(1, phi->ssa_rep->num_uses);
+  EXPECT_EQ(3, phi->ssa_rep->uses[0]);
+  ASSERT_EQ(1, phi->ssa_rep->num_defs);
+  EXPECT_EQ(8, phi->ssa_rep->defs[0]);
+  EXPECT_EQ(1u, phi->dalvikInsn.vA);
+}
+
+TEST_F(GvnDeadCodeEliminationTestLoop, IFieldLoopVariable) {
+  static const IFieldDef ifields[] = {
+      { 0u, 1u, 0u, false, kDexMemAccessWord },
+  };
+  static const MIRDef mirs[] = {
+      DEF_UNIQUE_REF(3, Instruction::NEW_INSTANCE, 0u),
+      DEF_CONST(3, Instruction::CONST, 1u, 1),
+      DEF_CONST(3, Instruction::CONST, 2u, 0),
+      DEF_IPUT(3, Instruction::IPUT, 2u, 0u, 0u),
+      DEF_IGET(4, Instruction::IGET, 4u, 0u, 0u),
+      DEF_BINOP(4, Instruction::ADD_INT, 5u, 4u, 1u),
+      DEF_IPUT(4, Instruction::IPUT, 5u, 0u, 0u),
+  };
+
+  static const int32_t sreg_to_vreg_map[] = { 0, 1, 2, 3 /* dummy */, 2, 2 };
+  PrepareSRegToVRegMap(sreg_to_vreg_map);
+
+  PrepareIFields(ifields);
+  PrepareMIRs(mirs);
+  PerformGVN_DCE();
+
+  ASSERT_EQ(arraysize(mirs), value_names_.size());
+  static const size_t diff_indexes[] = { 0, 1, 2, 4, 5 };
+  ExpectValueNamesNE(diff_indexes);
+
+  const size_t no_null_ck_indexes[] = { 3, 4, 6 };
+  ExpectNoNullCheck(no_null_ck_indexes);
+
+  static const bool eliminated[] = {
+      false, false, false, false, true, false, false,
+  };
+  static_assert(arraysize(eliminated) == arraysize(mirs), "array size mismatch");
+  for (size_t i = 0; i != arraysize(eliminated); ++i) {
+    bool actually_eliminated = (static_cast<int>(mirs_[i].dalvikInsn.opcode) == kMirOpNop);
+    EXPECT_EQ(eliminated[i], actually_eliminated) << i;
+  }
+  // Check that we've created a two-input Phi to replace the IGET 3u.
+  BasicBlock* bb4 = cu_.mir_graph->GetBasicBlock(4);
+  MIR* phi = bb4->first_mir_insn;
+  ASSERT_TRUE(phi != nullptr);
+  ASSERT_EQ(kMirOpPhi, static_cast<int>(phi->dalvikInsn.opcode));
+  ASSERT_TRUE(phi->next == &mirs_[4]);
+  ASSERT_EQ(2, phi->ssa_rep->num_uses);
+  EXPECT_EQ(2, phi->ssa_rep->uses[0]);
+  EXPECT_EQ(5, phi->ssa_rep->uses[1]);
+  ASSERT_EQ(1, phi->ssa_rep->num_defs);
+  EXPECT_EQ(4, phi->ssa_rep->defs[0]);
+  EXPECT_EQ(2u, phi->dalvikInsn.vA);
+}
+
+}  // namespace art
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index aecde51..99b6683 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -901,9 +901,9 @@
     // Calculate merged values for the intersection.
     for (auto& load_value_entry : my_values->load_value_map) {
       uint16_t location = load_value_entry.first;
-      bool same_values = true;
-      uint16_t value_name = kNoValue;
       merge_names_.clear();
+      uint16_t value_name = kNoValue;
+      bool same_values = true;
       for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
         value_name = Versions::LookupMergeValue(gvn_, lvn, key, location);
         same_values = same_values && (merge_names_.empty() || value_name == merge_names_.back());
@@ -937,6 +937,10 @@
 void LocalValueNumbering::Merge(MergeType merge_type) {
   DCHECK_GE(gvn_->merge_lvns_.size(), 2u);
 
+  // Always reserve space in merge_names_. Even if we don't use it in Merge() we may need it
+  // in GetStartingVregValueNumberImpl() when the merge_names_'s allocator is not the top.
+  merge_names_.reserve(gvn_->merge_lvns_.size());
+
   IntersectSregValueMaps<&LocalValueNumbering::sreg_value_map_>();
   IntersectSregValueMaps<&LocalValueNumbering::sreg_wide_value_map_>();
   if (merge_type == kReturnMerge) {
@@ -1169,8 +1173,8 @@
   int first_s_reg = uses[pos];
   bool wide = (first_lvn->sreg_wide_value_map_.count(first_s_reg) != 0u);
   // Iterate over *merge_lvns_ and skip incoming sregs for BBs without associated LVN.
-  uint16_t value_name = kNoValue;
   merge_names_.clear();
+  uint16_t value_name = kNoValue;
   bool same_values = true;
   for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
     DCHECK_LT(pos, mir->ssa_rep->num_uses);
@@ -1210,6 +1214,31 @@
   return value_name;
 }
 
+uint16_t LocalValueNumbering::HandleConst(MIR* mir, uint32_t value) {
+  RegLocation raw_dest = gvn_->GetMirGraph()->GetRawDest(mir);
+  uint16_t res;
+  if (value == 0u && raw_dest.ref) {
+    res = GlobalValueNumbering::kNullValue;
+  } else {
+    Instruction::Code op = raw_dest.fp ? Instruction::CONST_HIGH16 : Instruction::CONST;
+    res = gvn_->LookupValue(op, Low16Bits(value), High16Bits(value), 0);
+  }
+  SetOperandValue(mir->ssa_rep->defs[0], res);
+  return res;
+}
+
+uint16_t LocalValueNumbering::HandleConstWide(MIR* mir, uint64_t value) {
+  RegLocation raw_dest = gvn_->GetMirGraph()->GetRawDest(mir);
+  Instruction::Code op = raw_dest.fp ? Instruction::CONST_HIGH16 : Instruction::CONST;
+  uint32_t low_word = Low32Bits(value);
+  uint32_t high_word = High32Bits(value);
+  uint16_t low_res = gvn_->LookupValue(op, Low16Bits(low_word), High16Bits(low_word), 1);
+  uint16_t high_res = gvn_->LookupValue(op, Low16Bits(high_word), High16Bits(high_word), 2);
+  uint16_t res = gvn_->LookupValue(op, low_res, high_res, 3);
+  SetOperandValueWide(mir->ssa_rep->defs[0], res);
+  return res;
+}
+
 uint16_t LocalValueNumbering::HandleAGet(MIR* mir, uint16_t opcode) {
   uint16_t array = GetOperandValue(mir->ssa_rep->uses[0]);
   HandleNullCheck(mir, array);
@@ -1592,12 +1621,18 @@
       break;
     case Instruction::MOVE_EXCEPTION:
     case Instruction::NEW_INSTANCE:
-    case Instruction::CONST_CLASS:
     case Instruction::NEW_ARRAY:
       // 1 result, treat as unique each time, use result s_reg - will be unique.
       res = MarkNonAliasingNonNull(mir);
       SetOperandValue(mir->ssa_rep->defs[0], res);
       break;
+    case Instruction::CONST_CLASS:
+      DCHECK_EQ(Low16Bits(mir->dalvikInsn.vB), mir->dalvikInsn.vB);
+      res = gvn_->LookupValue(Instruction::CONST_CLASS, mir->dalvikInsn.vB, 0, 0);
+      SetOperandValue(mir->ssa_rep->defs[0], res);
+      null_checked_.insert(res);
+      non_aliasing_refs_.insert(res);
+      break;
     case Instruction::CONST_STRING:
     case Instruction::CONST_STRING_JUMBO:
       // These strings are internalized, so assign value based on the string pool index.
@@ -1641,53 +1676,29 @@
       SetOperandValueWide(mir->ssa_rep->defs[0], res);
       break;
 
+    case Instruction::CONST_HIGH16:
+      res = HandleConst(mir, mir->dalvikInsn.vB << 16);
+      break;
     case Instruction::CONST:
     case Instruction::CONST_4:
     case Instruction::CONST_16:
-      res = gvn_->LookupValue(Instruction::CONST, Low16Bits(mir->dalvikInsn.vB),
-                              High16Bits(mir->dalvikInsn.vB), 0);
-      SetOperandValue(mir->ssa_rep->defs[0], res);
-      break;
-
-    case Instruction::CONST_HIGH16:
-      res = gvn_->LookupValue(Instruction::CONST, 0, mir->dalvikInsn.vB, 0);
-      SetOperandValue(mir->ssa_rep->defs[0], res);
+      res = HandleConst(mir, mir->dalvikInsn.vB);
       break;
 
     case Instruction::CONST_WIDE_16:
-    case Instruction::CONST_WIDE_32: {
-        uint16_t low_res = gvn_->LookupValue(Instruction::CONST, Low16Bits(mir->dalvikInsn.vB),
-                                             High16Bits(mir->dalvikInsn.vB), 1);
-        uint16_t high_res;
-        if (mir->dalvikInsn.vB & 0x80000000) {
-          high_res = gvn_->LookupValue(Instruction::CONST, 0xffff, 0xffff, 2);
-        } else {
-          high_res = gvn_->LookupValue(Instruction::CONST, 0, 0, 2);
-        }
-        res = gvn_->LookupValue(Instruction::CONST, low_res, high_res, 3);
-        SetOperandValueWide(mir->ssa_rep->defs[0], res);
-      }
+    case Instruction::CONST_WIDE_32:
+      res = HandleConstWide(
+          mir,
+          mir->dalvikInsn.vB +
+              ((mir->dalvikInsn.vB & 0x80000000) != 0 ? UINT64_C(0xffffffff00000000) : 0u));
       break;
 
-    case Instruction::CONST_WIDE: {
-        uint32_t low_word = Low32Bits(mir->dalvikInsn.vB_wide);
-        uint32_t high_word = High32Bits(mir->dalvikInsn.vB_wide);
-        uint16_t low_res = gvn_->LookupValue(Instruction::CONST, Low16Bits(low_word),
-                                             High16Bits(low_word), 1);
-        uint16_t high_res = gvn_->LookupValue(Instruction::CONST, Low16Bits(high_word),
-                                              High16Bits(high_word), 2);
-        res = gvn_->LookupValue(Instruction::CONST, low_res, high_res, 3);
-        SetOperandValueWide(mir->ssa_rep->defs[0], res);
-      }
+    case Instruction::CONST_WIDE:
+      res = HandleConstWide(mir, mir->dalvikInsn.vB_wide);
       break;
 
-    case Instruction::CONST_WIDE_HIGH16: {
-        uint16_t low_res = gvn_->LookupValue(Instruction::CONST, 0, 0, 1);
-        uint16_t high_res = gvn_->LookupValue(Instruction::CONST, 0,
-                                              Low16Bits(mir->dalvikInsn.vB), 2);
-        res = gvn_->LookupValue(Instruction::CONST, low_res, high_res, 3);
-        SetOperandValueWide(mir->ssa_rep->defs[0], res);
-      }
+    case Instruction::CONST_WIDE_HIGH16:
+      res = HandleConstWide(mir, static_cast<uint64_t>(mir->dalvikInsn.vB) << 48);
       break;
 
     case Instruction::ARRAY_LENGTH: {
@@ -1956,4 +1967,55 @@
   return res;
 }
 
+uint16_t LocalValueNumbering::GetEndingVregValueNumberImpl(int v_reg, bool wide) const {
+  const BasicBlock* bb = gvn_->GetBasicBlock(Id());
+  DCHECK(bb != nullptr);
+  int s_reg = bb->data_flow_info->vreg_to_ssa_map_exit[v_reg];
+  if (s_reg == INVALID_SREG) {
+    return kNoValue;
+  }
+  if (wide) {
+    int high_s_reg = bb->data_flow_info->vreg_to_ssa_map_exit[v_reg + 1];
+    if (high_s_reg != s_reg + 1) {
+      return kNoValue;  // High word has been overwritten.
+    }
+    return GetSregValueWide(s_reg);
+  } else {
+    return GetSregValue(s_reg);
+  }
+}
+
+uint16_t LocalValueNumbering::GetStartingVregValueNumberImpl(int v_reg, bool wide) const {
+  DCHECK_EQ(gvn_->mode_, GlobalValueNumbering::kModeGvnPostProcessing);
+  DCHECK(gvn_->CanModify());
+  const BasicBlock* bb = gvn_->GetBasicBlock(Id());
+  DCHECK(bb != nullptr);
+  DCHECK_NE(bb->predecessors.size(), 0u);
+  if (bb->predecessors.size() == 1u) {
+    return gvn_->GetLvn(bb->predecessors[0])->GetEndingVregValueNumberImpl(v_reg, wide);
+  }
+  merge_names_.clear();
+  uint16_t value_name = kNoValue;
+  bool same_values = true;
+  for (BasicBlockId pred_id : bb->predecessors) {
+    value_name = gvn_->GetLvn(pred_id)->GetEndingVregValueNumberImpl(v_reg, wide);
+    if (value_name == kNoValue) {
+      return kNoValue;
+    }
+    same_values = same_values && (merge_names_.empty() || value_name == merge_names_.back());
+    merge_names_.push_back(value_name);
+  }
+  if (same_values) {
+    // value_name already contains the result.
+  } else {
+    auto lb = merge_map_.lower_bound(merge_names_);
+    if (lb != merge_map_.end() && !merge_map_.key_comp()(merge_names_, lb->first)) {
+      value_name = lb->second;
+    } else {
+      value_name = kNoValue;  // We never assigned a value name to this set of merged names.
+    }
+  }
+  return value_name;
+}
+
 }    // namespace art
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index aef8c6d..bfacf8e 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -52,13 +52,22 @@
     return div_zero_checked_.find(value_name) != div_zero_checked_.end();
   }
 
-  bool IsSregValue(uint16_t s_reg, uint16_t value_name) const {
-    auto it = sreg_value_map_.find(s_reg);
-    if (it != sreg_value_map_.end()) {
-      return it->second == value_name;
-    } else {
-      return gvn_->HasValue(kNoValue, s_reg, kNoValue, kNoValue, value_name);
-    }
+  uint16_t GetSregValue(uint16_t s_reg) const {
+    return GetSregValueImpl(s_reg, &sreg_value_map_);
+  }
+
+  uint16_t GetSregValueWide(uint16_t s_reg) const {
+    return GetSregValueImpl(s_reg, &sreg_wide_value_map_);
+  }
+
+  // Get the starting value number for a given dalvik register.
+  uint16_t GetStartingVregValueNumber(int v_reg) const {
+    return GetStartingVregValueNumberImpl(v_reg, false);
+  }
+
+  // Get the starting value number for a given wide dalvik register.
+  uint16_t GetStartingVregValueNumberWide(int v_reg) const {
+    return GetStartingVregValueNumberImpl(v_reg, true);
   }
 
   enum MergeType {
@@ -80,6 +89,20 @@
   // Key is s_reg, value is value name.
   typedef ScopedArenaSafeMap<uint16_t, uint16_t> SregValueMap;
 
+  uint16_t GetEndingVregValueNumberImpl(int v_reg, bool wide) const;
+  uint16_t GetStartingVregValueNumberImpl(int v_reg, bool wide) const;
+
+  uint16_t GetSregValueImpl(int s_reg, const SregValueMap* map) const {
+    uint16_t res = kNoValue;
+    auto lb = map->find(s_reg);
+    if (lb != map->end()) {
+      res = lb->second;
+    } else {
+      res = gvn_->FindValue(kNoValue, s_reg, kNoValue, kNoValue);
+    }
+    return res;
+  }
+
   void SetOperandValueImpl(uint16_t s_reg, uint16_t value, SregValueMap* map) {
     DCHECK_EQ(map->count(s_reg), 0u) << PrettyMethod(gvn_->cu_->method_idx, *gvn_->cu_->dex_file)
         << " LVN id: " << id_ << ", s_reg: " << s_reg;
@@ -285,6 +308,8 @@
   void HandleEscapingRef(uint16_t base);
   void HandleInvokeArgs(const MIR* mir, const LocalValueNumbering* mir_lvn);
   uint16_t HandlePhi(MIR* mir);
+  uint16_t HandleConst(MIR* mir, uint32_t value);
+  uint16_t HandleConstWide(MIR* mir, uint64_t value);
   uint16_t HandleAGet(MIR* mir, uint16_t opcode);
   void HandleAPut(MIR* mir, uint16_t opcode);
   uint16_t HandleIGet(MIR* mir, uint16_t opcode);
@@ -370,9 +395,9 @@
   ValueNameSet div_zero_checked_;
 
   // Reuse one vector for all merges to avoid leaking too much memory on the ArenaStack.
-  ScopedArenaVector<BasicBlockId> merge_names_;
+  mutable ScopedArenaVector<uint16_t> merge_names_;
   // Map to identify when different locations merge the same values.
-  ScopedArenaSafeMap<ScopedArenaVector<BasicBlockId>, uint16_t> merge_map_;
+  ScopedArenaSafeMap<ScopedArenaVector<uint16_t>, uint16_t> merge_map_;
   // New memory version for merge, kNoValue if all memory versions matched.
   uint16_t merge_new_memory_version_;
 
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index a5cbb5f..d1c3a6b 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -185,9 +185,9 @@
   }
 
   void PerformLVN() {
-    cu_.mir_graph->temp_.gvn.ifield_ids_ =  GlobalValueNumbering::PrepareGvnFieldIds(
+    cu_.mir_graph->temp_.gvn.ifield_ids =  GlobalValueNumbering::PrepareGvnFieldIds(
         allocator_.get(), cu_.mir_graph->ifield_lowering_infos_);
-    cu_.mir_graph->temp_.gvn.sfield_ids_ =  GlobalValueNumbering::PrepareGvnFieldIds(
+    cu_.mir_graph->temp_.gvn.sfield_ids =  GlobalValueNumbering::PrepareGvnFieldIds(
         allocator_.get(), cu_.mir_graph->sfield_lowering_infos_);
     gvn_.reset(new (allocator_.get()) GlobalValueNumbering(&cu_, allocator_.get(),
                                                            GlobalValueNumbering::kModeLvn));
@@ -211,8 +211,14 @@
         value_names_() {
     cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
     allocator_.reset(ScopedArenaAllocator::Create(&cu_.arena_stack));
+    // By default, the zero-initialized reg_location_[.] with ref == false tells LVN that
+    // 0 constants are integral, not references. Nothing else is used by LVN/GVN.
+    cu_.mir_graph->reg_location_ = static_cast<RegLocation*>(cu_.arena.Alloc(
+        kMaxSsaRegs * sizeof(cu_.mir_graph->reg_location_[0]), kArenaAllocRegAlloc));
   }
 
+  static constexpr size_t kMaxSsaRegs = 16384u;
+
   ArenaPool pool_;
   CompilationUnit cu_;
   size_t mir_count_;
@@ -774,6 +780,7 @@
 
 TEST_F(LocalValueNumberingTest, ConstWide) {
   static const MIRDef mirs[] = {
+      // Core reg constants.
       DEF_CONST(Instruction::CONST_WIDE_16, 0u, 0),
       DEF_CONST(Instruction::CONST_WIDE_16, 1u, 1),
       DEF_CONST(Instruction::CONST_WIDE_16, 2u, -1),
@@ -795,9 +802,86 @@
       DEF_CONST(Instruction::CONST_WIDE, 18u, (INT64_C(1) << 48) - 1),
       DEF_CONST(Instruction::CONST_WIDE, 19u, (INT64_C(-1) << 48) + 1),
       DEF_CONST(Instruction::CONST_WIDE, 20u, (INT64_C(-1) << 48) - 1),
+      // FP reg constants.
+      DEF_CONST(Instruction::CONST_WIDE_16, 21u, 0),
+      DEF_CONST(Instruction::CONST_WIDE_16, 22u, 1),
+      DEF_CONST(Instruction::CONST_WIDE_16, 23u, -1),
+      DEF_CONST(Instruction::CONST_WIDE_32, 24u, 1 << 16),
+      DEF_CONST(Instruction::CONST_WIDE_32, 25u, -1 << 16),
+      DEF_CONST(Instruction::CONST_WIDE_32, 26u, (1 << 16) + 1),
+      DEF_CONST(Instruction::CONST_WIDE_32, 27u, (1 << 16) - 1),
+      DEF_CONST(Instruction::CONST_WIDE_32, 28u, -(1 << 16) + 1),
+      DEF_CONST(Instruction::CONST_WIDE_32, 29u, -(1 << 16) - 1),
+      DEF_CONST(Instruction::CONST_WIDE, 30u, INT64_C(1) << 32),
+      DEF_CONST(Instruction::CONST_WIDE, 31u, INT64_C(-1) << 32),
+      DEF_CONST(Instruction::CONST_WIDE, 32u, (INT64_C(1) << 32) + 1),
+      DEF_CONST(Instruction::CONST_WIDE, 33u, (INT64_C(1) << 32) - 1),
+      DEF_CONST(Instruction::CONST_WIDE, 34u, (INT64_C(-1) << 32) + 1),
+      DEF_CONST(Instruction::CONST_WIDE, 35u, (INT64_C(-1) << 32) - 1),
+      DEF_CONST(Instruction::CONST_WIDE_HIGH16, 36u, 1),       // Effectively 1 << 48.
+      DEF_CONST(Instruction::CONST_WIDE_HIGH16, 37u, 0xffff),  // Effectively -1 << 48.
+      DEF_CONST(Instruction::CONST_WIDE, 38u, (INT64_C(1) << 48) + 1),
+      DEF_CONST(Instruction::CONST_WIDE, 39u, (INT64_C(1) << 48) - 1),
+      DEF_CONST(Instruction::CONST_WIDE, 40u, (INT64_C(-1) << 48) + 1),
+      DEF_CONST(Instruction::CONST_WIDE, 41u, (INT64_C(-1) << 48) - 1),
   };
 
   PrepareMIRs(mirs);
+  for (size_t i = arraysize(mirs) / 2u; i != arraysize(mirs); ++i) {
+    cu_.mir_graph->reg_location_[mirs_[i].ssa_rep->defs[0]].fp = true;
+  }
+  PerformLVN();
+  for (size_t i = 0u; i != mir_count_; ++i) {
+    for (size_t j = i + 1u; j != mir_count_; ++j) {
+      EXPECT_NE(value_names_[i], value_names_[j]) << i << " " << j;
+    }
+  }
+}
+
+TEST_F(LocalValueNumberingTest, Const) {
+  static const MIRDef mirs[] = {
+      // Core reg constants.
+      DEF_CONST(Instruction::CONST_4, 0u, 0),
+      DEF_CONST(Instruction::CONST_4, 1u, 1),
+      DEF_CONST(Instruction::CONST_4, 2u, -1),
+      DEF_CONST(Instruction::CONST_16, 3u, 1 << 4),
+      DEF_CONST(Instruction::CONST_16, 4u, -1 << 4),
+      DEF_CONST(Instruction::CONST_16, 5u, (1 << 4) + 1),
+      DEF_CONST(Instruction::CONST_16, 6u, (1 << 4) - 1),
+      DEF_CONST(Instruction::CONST_16, 7u, -(1 << 4) + 1),
+      DEF_CONST(Instruction::CONST_16, 8u, -(1 << 4) - 1),
+      DEF_CONST(Instruction::CONST_HIGH16, 9u, 1),       // Effectively 1 << 16.
+      DEF_CONST(Instruction::CONST_HIGH16, 10u, 0xffff),  // Effectively -1 << 16.
+      DEF_CONST(Instruction::CONST, 11u, (1 << 16) + 1),
+      DEF_CONST(Instruction::CONST, 12u, (1 << 16) - 1),
+      DEF_CONST(Instruction::CONST, 13u, (-1 << 16) + 1),
+      DEF_CONST(Instruction::CONST, 14u, (-1 << 16) - 1),
+      // FP reg constants.
+      DEF_CONST(Instruction::CONST_4, 15u, 0),
+      DEF_CONST(Instruction::CONST_4, 16u, 1),
+      DEF_CONST(Instruction::CONST_4, 17u, -1),
+      DEF_CONST(Instruction::CONST_16, 18u, 1 << 4),
+      DEF_CONST(Instruction::CONST_16, 19u, -1 << 4),
+      DEF_CONST(Instruction::CONST_16, 20u, (1 << 4) + 1),
+      DEF_CONST(Instruction::CONST_16, 21u, (1 << 4) - 1),
+      DEF_CONST(Instruction::CONST_16, 22u, -(1 << 4) + 1),
+      DEF_CONST(Instruction::CONST_16, 23u, -(1 << 4) - 1),
+      DEF_CONST(Instruction::CONST_HIGH16, 24u, 1),       // Effectively 1 << 16.
+      DEF_CONST(Instruction::CONST_HIGH16, 25u, 0xffff),  // Effectively -1 << 16.
+      DEF_CONST(Instruction::CONST, 26u, (1 << 16) + 1),
+      DEF_CONST(Instruction::CONST, 27u, (1 << 16) - 1),
+      DEF_CONST(Instruction::CONST, 28u, (-1 << 16) + 1),
+      DEF_CONST(Instruction::CONST, 29u, (-1 << 16) - 1),
+      // null reference constant.
+      DEF_CONST(Instruction::CONST_4, 30u, 0),
+  };
+
+  PrepareMIRs(mirs);
+  static_assert((arraysize(mirs) & 1) != 0, "missing null or unmatched fp/core");
+  cu_.mir_graph->reg_location_[arraysize(mirs) - 1].ref = true;
+  for (size_t i = arraysize(mirs) / 2u; i != arraysize(mirs) - 1; ++i) {
+    cu_.mir_graph->reg_location_[mirs_[i].ssa_rep->defs[0]].fp = true;
+  }
   PerformLVN();
   for (size_t i = 0u; i != mir_count_; ++i) {
     for (size_t j = i + 1u; j != mir_count_; ++j) {
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index 1f56276..f9f7e22 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -910,11 +910,6 @@
   DF_FORMAT_EXTENDED,
 };
 
-/* Return the base virtual register for a SSA name */
-int MIRGraph::SRegToVReg(int ssa_reg) const {
-  return ssa_base_vregs_[ssa_reg];
-}
-
 /* Any register that is used before being defined is considered live-in */
 void MIRGraph::HandleLiveInUse(ArenaBitVector* use_v, ArenaBitVector* def_v,
                                ArenaBitVector* live_in_v, int dalvik_reg_id) {
diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h
index ff427f8..98b2da8 100644
--- a/compiler/dex/mir_field_info.h
+++ b/compiler/dex/mir_field_info.h
@@ -149,6 +149,7 @@
 
   friend class NullCheckEliminationTest;
   friend class GlobalValueNumberingTest;
+  friend class GvnDeadCodeEliminationTest;
   friend class LocalValueNumberingTest;
 };
 
@@ -223,6 +224,7 @@
 
   friend class ClassInitCheckEliminationTest;
   friend class GlobalValueNumberingTest;
+  friend class GvnDeadCodeEliminationTest;
   friend class LocalValueNumberingTest;
 };
 
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 9f98589..08ca1b2 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -2559,4 +2559,13 @@
   return m_units_[m_unit_index]->GetCodeItem()->insns_;
 }
 
+void MIRGraph::SetPuntToInterpreter(bool val) {
+  punt_to_interpreter_ = val;
+  if (val) {
+    // Disable all subsequent optimizations. They may not be safe to run. (For example,
+    // LVN/GVN assumes there are no conflicts found by the type inference pass.)
+    cu_->disable_opt = ~static_cast<decltype(cu_->disable_opt)>(0);
+  }
+}
+
 }  // namespace art
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index c33825b..020136c 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -37,6 +37,7 @@
 class DexCompilationUnit;
 class DexFileMethodInliner;
 class GlobalValueNumbering;
+class GvnDeadCodeElimination;
 
 // Forward declaration.
 class MIRGraph;
@@ -1052,7 +1053,12 @@
 
   void DumpCheckStats();
   MIR* FindMoveResult(BasicBlock* bb, MIR* mir);
-  int SRegToVReg(int ssa_reg) const;
+
+  /* Return the base virtual register for a SSA name */
+  int SRegToVReg(int ssa_reg) const {
+    return ssa_base_vregs_[ssa_reg];
+  }
+
   void VerifyDataflow();
   void CheckForDominanceFrontier(BasicBlock* dom_bb, const BasicBlock* succ_bb);
   bool EliminateNullChecksGate();
@@ -1065,6 +1071,9 @@
   bool ApplyGlobalValueNumberingGate();
   bool ApplyGlobalValueNumbering(BasicBlock* bb);
   void ApplyGlobalValueNumberingEnd();
+  bool EliminateDeadCodeGate();
+  bool EliminateDeadCode(BasicBlock* bb);
+  void EliminateDeadCodeEnd();
   bool EliminateSuspendChecksGate();
   bool EliminateSuspendChecks(BasicBlock* bb);
   void EliminateSuspendChecksEnd();
@@ -1072,15 +1081,15 @@
   uint16_t GetGvnIFieldId(MIR* mir) const {
     DCHECK(IsInstructionIGetOrIPut(mir->dalvikInsn.opcode));
     DCHECK_LT(mir->meta.ifield_lowering_info, ifield_lowering_infos_.size());
-    DCHECK(temp_.gvn.ifield_ids_ != nullptr);
-    return temp_.gvn.ifield_ids_[mir->meta.ifield_lowering_info];
+    DCHECK(temp_.gvn.ifield_ids != nullptr);
+    return temp_.gvn.ifield_ids[mir->meta.ifield_lowering_info];
   }
 
   uint16_t GetGvnSFieldId(MIR* mir) const {
     DCHECK(IsInstructionSGetOrSPut(mir->dalvikInsn.opcode));
     DCHECK_LT(mir->meta.sfield_lowering_info, sfield_lowering_infos_.size());
-    DCHECK(temp_.gvn.sfield_ids_ != nullptr);
-    return temp_.gvn.sfield_ids_[mir->meta.sfield_lowering_info];
+    DCHECK(temp_.gvn.sfield_ids != nullptr);
+    return temp_.gvn.sfield_ids[mir->meta.sfield_lowering_info];
   }
 
   /*
@@ -1115,9 +1124,7 @@
     return punt_to_interpreter_;
   }
 
-  void SetPuntToInterpreter(bool val) {
-    punt_to_interpreter_ = val;
-  }
+  void SetPuntToInterpreter(bool val);
 
   void DisassembleExtendedInstr(const MIR* mir, std::string* decoded_mir);
   char* GetDalvikDisassembly(const MIR* mir);
@@ -1284,7 +1291,8 @@
    * @param mir The mir to check.
    * @return Returns 'true' if the given MIR might throw an exception.
    */
-  bool CanThrow(MIR* mir);
+  bool CanThrow(MIR* mir) const;
+
   /**
    * @brief Combine multiply and add/sub MIRs into corresponding extended MAC MIR.
    * @param mul_mir The multiply MIR to be combined.
@@ -1382,8 +1390,9 @@
     // Global value numbering.
     struct {
       GlobalValueNumbering* gvn;
-      uint16_t* ifield_ids_;  // Part of GVN/LVN but cached here for LVN to avoid recalculation.
-      uint16_t* sfield_ids_;  // Ditto.
+      uint16_t* ifield_ids;  // Part of GVN/LVN but cached here for LVN to avoid recalculation.
+      uint16_t* sfield_ids;  // Ditto.
+      GvnDeadCodeElimination* dce;
     } gvn;
     // Suspend check elimination.
     struct {
@@ -1437,6 +1446,7 @@
   friend class SuspendCheckEliminationTest;
   friend class NullCheckEliminationTest;
   friend class GlobalValueNumberingTest;
+  friend class GvnDeadCodeEliminationTest;
   friend class LocalValueNumberingTest;
   friend class TopologicalSortOrderTest;
 };
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index dac0210..2f547ea 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -21,6 +21,7 @@
 #include "driver/compiler_driver.h"
 #include "driver/dex_compilation_unit.h"
 #include "global_value_numbering.h"
+#include "gvn_dead_code_elimination.h"
 #include "local_value_numbering.h"
 #include "mir_field_info.h"
 #include "quick/dex_file_method_inliner.h"
@@ -1333,9 +1334,9 @@
 
   DCHECK(temp_scoped_alloc_ == nullptr);
   temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
-  temp_.gvn.ifield_ids_ =
+  temp_.gvn.ifield_ids =
       GlobalValueNumbering::PrepareGvnFieldIds(temp_scoped_alloc_.get(), ifield_lowering_infos_);
-  temp_.gvn.sfield_ids_ =
+  temp_.gvn.sfield_ids =
       GlobalValueNumbering::PrepareGvnFieldIds(temp_scoped_alloc_.get(), sfield_lowering_infos_);
   DCHECK(temp_.gvn.gvn == nullptr);
   temp_.gvn.gvn = new (temp_scoped_alloc_.get()) GlobalValueNumbering(
@@ -1359,8 +1360,8 @@
   // Perform modifications.
   DCHECK(temp_.gvn.gvn != nullptr);
   if (temp_.gvn.gvn->Good()) {
+    temp_.gvn.gvn->StartPostProcessing();
     if (max_nested_loops_ != 0u) {
-      temp_.gvn.gvn->StartPostProcessing();
       TopologicalSortIterator iter(this);
       for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
         ScopedArenaAllocator allocator(&cu_->arena_stack);  // Reclaim memory after each LVN.
@@ -1378,12 +1379,45 @@
     cu_->disable_opt |= (1u << kLocalValueNumbering);
   } else {
     LOG(WARNING) << "GVN failed for " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
+    cu_->disable_opt |= (1u << kGvnDeadCodeElimination);
   }
 
+  if ((cu_->disable_opt & (1 << kGvnDeadCodeElimination)) != 0) {
+    EliminateDeadCodeEnd();
+  }  // else preserve GVN data for CSE.
+}
+
+bool MIRGraph::EliminateDeadCodeGate() {
+  if ((cu_->disable_opt & (1 << kGvnDeadCodeElimination)) != 0) {
+    return false;
+  }
+  DCHECK(temp_scoped_alloc_ != nullptr);
+  temp_.gvn.dce = new (temp_scoped_alloc_.get()) GvnDeadCodeElimination(temp_.gvn.gvn,
+                                                                        temp_scoped_alloc_.get());
+  return true;
+}
+
+bool MIRGraph::EliminateDeadCode(BasicBlock* bb) {
+  DCHECK(temp_scoped_alloc_ != nullptr);
+  DCHECK(temp_.gvn.gvn != nullptr);
+  if (bb->block_type != kDalvikByteCode) {
+    return false;
+  }
+  DCHECK(temp_.gvn.dce != nullptr);
+  temp_.gvn.dce->Apply(bb);
+  return false;  // No need to repeat.
+}
+
+void MIRGraph::EliminateDeadCodeEnd() {
+  DCHECK_EQ(temp_.gvn.dce != nullptr, (cu_->disable_opt & (1 << kGvnDeadCodeElimination)) == 0);
+  if (temp_.gvn.dce != nullptr) {
+    delete temp_.gvn.dce;
+    temp_.gvn.dce = nullptr;
+  }
   delete temp_.gvn.gvn;
   temp_.gvn.gvn = nullptr;
-  temp_.gvn.ifield_ids_ = nullptr;
-  temp_.gvn.sfield_ids_ = nullptr;
+  temp_.gvn.ifield_ids = nullptr;
+  temp_.gvn.sfield_ids = nullptr;
   DCHECK(temp_scoped_alloc_ != nullptr);
   temp_scoped_alloc_.reset();
 }
@@ -1553,9 +1587,9 @@
 void MIRGraph::BasicBlockOptimizationStart() {
   if ((cu_->disable_opt & (1 << kLocalValueNumbering)) == 0) {
     temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
-    temp_.gvn.ifield_ids_ =
+    temp_.gvn.ifield_ids =
         GlobalValueNumbering::PrepareGvnFieldIds(temp_scoped_alloc_.get(), ifield_lowering_infos_);
-    temp_.gvn.sfield_ids_ =
+    temp_.gvn.sfield_ids =
         GlobalValueNumbering::PrepareGvnFieldIds(temp_scoped_alloc_.get(), sfield_lowering_infos_);
   }
 }
@@ -1581,8 +1615,8 @@
 
 void MIRGraph::BasicBlockOptimizationEnd() {
   // Clean up after LVN.
-  temp_.gvn.ifield_ids_ = nullptr;
-  temp_.gvn.sfield_ids_ = nullptr;
+  temp_.gvn.ifield_ids = nullptr;
+  temp_.gvn.sfield_ids = nullptr;
   temp_scoped_alloc_.reset();
 }
 
@@ -1684,7 +1718,7 @@
   temp_.sce.inliner = nullptr;
 }
 
-bool MIRGraph::CanThrow(MIR* mir) {
+bool MIRGraph::CanThrow(MIR* mir) const {
   if ((mir->dalvikInsn.FlagsOf() & Instruction::kThrow) == 0) {
     return false;
   }
@@ -1718,7 +1752,6 @@
     // Non-throwing only if range check has been eliminated.
     return ((opt_flags & MIR_IGNORE_RANGE_CHECK) == 0);
   } else if (mir->dalvikInsn.opcode == Instruction::ARRAY_LENGTH ||
-      mir->dalvikInsn.opcode == Instruction::FILL_ARRAY_DATA ||
       static_cast<int>(mir->dalvikInsn.opcode) == kMirOpNullCheck) {
     // No more checks for these (null check was processed above).
     return false;
diff --git a/compiler/dex/pass_driver_me_opts.cc b/compiler/dex/pass_driver_me_opts.cc
index 8c8bde6..320d06a 100644
--- a/compiler/dex/pass_driver_me_opts.cc
+++ b/compiler/dex/pass_driver_me_opts.cc
@@ -45,6 +45,7 @@
   pass_manager->AddPass(new BBCombine);
   pass_manager->AddPass(new CodeLayout);
   pass_manager->AddPass(new GlobalValueNumberingPass);
+  pass_manager->AddPass(new DeadCodeEliminationPass);
   pass_manager->AddPass(new ConstantPropagation);
   pass_manager->AddPass(new MethodUseCount);
   pass_manager->AddPass(new BBOptimizations);
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 88a4605..055c39f 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -865,7 +865,6 @@
     DCHECK(!new_label->flags.use_def_invalid);
     new_label->u.m.def_mask = &kEncodeAll;
     InsertLIRAfter(boundary_lir, new_label);
-    res = new_label;
   }
   return res;
 }
diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc
index 9f36e35..db844bc 100644
--- a/compiler/dex/quick/gen_loadstore.cc
+++ b/compiler/dex/quick/gen_loadstore.cc
@@ -44,7 +44,9 @@
 void Mir2Lir::Workaround7250540(RegLocation rl_dest, RegStorage zero_reg) {
   if (rl_dest.fp) {
     int pmap_index = SRegToPMap(rl_dest.s_reg_low);
-    if (promotion_map_[pmap_index].fp_location == kLocPhysReg) {
+    const bool is_fp_promoted = promotion_map_[pmap_index].fp_location == kLocPhysReg;
+    const bool is_core_promoted = promotion_map_[pmap_index].core_location == kLocPhysReg;
+    if (is_fp_promoted || is_core_promoted) {
       // Now, determine if this vreg is ever used as a reference.  If not, we're done.
       bool used_as_reference = false;
       int base_vreg = mir_graph_->SRegToVReg(rl_dest.s_reg_low);
@@ -61,7 +63,7 @@
         temp_reg = AllocTemp();
         LoadConstant(temp_reg, 0);
       }
-      if (promotion_map_[pmap_index].core_location == kLocPhysReg) {
+      if (is_core_promoted) {
         // Promoted - just copy in a zero
         OpRegCopy(RegStorage::Solo32(promotion_map_[pmap_index].core_reg), temp_reg);
       } else {
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index 909077e..19c2a5a 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -560,6 +560,7 @@
   // (1 << kNullCheckElimination) |
   // (1 << kClassInitCheckElimination) |
   // (1 << kGlobalValueNumbering) |
+  (1 << kGvnDeadCodeElimination) |
   // (1 << kLocalValueNumbering) |
   // (1 << kPromoteRegs) |
   // (1 << kTrackLiveTemps) |
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index d0739a6..bf3ed14 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -40,6 +40,16 @@
   return mirror::ObjectArray<mirror::Object>::OffsetOfElement(index).SizeValue();
 }
 
+static bool IsSingleGoto(HBasicBlock* block) {
+  HLoopInformation* loop_info = block->GetLoopInformation();
+  // TODO: Remove the null check b/19084197.
+  return (block->GetFirstInstruction() != nullptr)
+      && (block->GetFirstInstruction() == block->GetLastInstruction())
+      && block->GetLastInstruction()->IsGoto()
+      // Back edges generate the suspend check.
+      && (loop_info == nullptr || !loop_info->IsBackEdge(block));
+}
+
 void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) {
   Initialize();
   if (!is_leaf) {
@@ -56,12 +66,38 @@
   CompileInternal(allocator, /* is_baseline */ true);
 }
 
+bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const {
+  DCHECK_EQ(block_order_->Get(current_block_index_), current);
+  return GetNextBlockToEmit() == FirstNonEmptyBlock(next);
+}
+
+HBasicBlock* CodeGenerator::GetNextBlockToEmit() const {
+  for (size_t i = current_block_index_ + 1; i < block_order_->Size(); ++i) {
+    HBasicBlock* block = block_order_->Get(i);
+    if (!IsSingleGoto(block)) {
+      return block;
+    }
+  }
+  return nullptr;
+}
+
+HBasicBlock* CodeGenerator::FirstNonEmptyBlock(HBasicBlock* block) const {
+  while (IsSingleGoto(block)) {
+    block = block->GetSuccessors().Get(0);
+  }
+  return block;
+}
+
 void CodeGenerator::CompileInternal(CodeAllocator* allocator, bool is_baseline) {
   HGraphVisitor* instruction_visitor = GetInstructionVisitor();
   DCHECK_EQ(current_block_index_, 0u);
   GenerateFrameEntry();
   for (size_t e = block_order_->Size(); current_block_index_ < e; ++current_block_index_) {
     HBasicBlock* block = block_order_->Get(current_block_index_);
+    // Don't generate code for an empty block. Its predecessors will branch to its successor
+    // directly. Also, the label of that block will not be emitted, so this helps catch
+    // errors where we reference that label.
+    if (IsSingleGoto(block)) continue;
     Bind(block);
     for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
       HInstruction* current = it.Current();
@@ -338,12 +374,6 @@
   }
 }
 
-bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const {
-  DCHECK_EQ(block_order_->Get(current_block_index_), current);
-  return (current_block_index_ < block_order_->Size() - 1)
-      && (block_order_->Get(current_block_index_ + 1) == next);
-}
-
 CodeGenerator* CodeGenerator::Create(HGraph* graph,
                                      InstructionSet instruction_set,
                                      const InstructionSetFeatures& isa_features,
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index efd0c84..6c78f10 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -91,6 +91,8 @@
 
   HGraph* GetGraph() const { return graph_; }
 
+  HBasicBlock* GetNextBlockToEmit() const;
+  HBasicBlock* FirstNonEmptyBlock(HBasicBlock* block) const;
   bool GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const;
 
   size_t GetStackSlotOfParameter(HParameterValue* parameter) const {
@@ -314,6 +316,14 @@
     return GetFrameSize() == (CallPushesPC() ? GetWordSize() : 0);
   }
 
+  // Arm64 has its own type for a label, so we need to templatize this method
+  // to share the logic.
+  template <typename T>
+  T* CommonGetLabelOf(T* raw_pointer_to_labels_array, HBasicBlock* block) const {
+    block = FirstNonEmptyBlock(block);
+    return raw_pointer_to_labels_array + block->GetBlockId();
+  }
+
   // Frame size required for this method.
   uint32_t frame_size_;
   uint32_t core_spill_mask_;
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 47d81ff..f1a3729 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -252,7 +252,7 @@
   void MarkGCCard(Register temp, Register card, Register object, Register value);
 
   Label* GetLabelOf(HBasicBlock* block) const {
-    return block_labels_.GetRawStorage() + block->GetBlockId();
+    return CommonGetLabelOf<Label>(block_labels_.GetRawStorage(), block);
   }
 
   void Initialize() OVERRIDE {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 2e937e2..afb7fc3 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -214,7 +214,7 @@
   void Bind(HBasicBlock* block) OVERRIDE;
 
   vixl::Label* GetLabelOf(HBasicBlock* block) const {
-    return block_labels_ + block->GetBlockId();
+    return CommonGetLabelOf<vixl::Label>(block_labels_, block);
   }
 
   void Move(HInstruction* instruction, Location location, HInstruction* move_for) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 107ddaf..f5a9b7d 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -234,7 +234,7 @@
   void LoadCurrentMethod(Register reg);
 
   Label* GetLabelOf(HBasicBlock* block) const {
-    return block_labels_.GetRawStorage() + block->GetBlockId();
+    return CommonGetLabelOf<Label>(block_labels_.GetRawStorage(), block);
   }
 
   void Initialize() OVERRIDE {
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index dbdbf86..707c999 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -232,7 +232,7 @@
   void LoadCurrentMethod(CpuRegister reg);
 
   Label* GetLabelOf(HBasicBlock* block) const {
-    return block_labels_.GetRawStorage() + block->GetBlockId();
+    return CommonGetLabelOf<Label>(block_labels_.GetRawStorage(), block);
   }
 
   void Initialize() OVERRIDE {
diff --git a/compiler/utils/dex_instruction_utils.h b/compiler/utils/dex_instruction_utils.h
index 2c6e525..bb2c592 100644
--- a/compiler/utils/dex_instruction_utils.h
+++ b/compiler/utils/dex_instruction_utils.h
@@ -110,6 +110,10 @@
   return Instruction::AGET <= code && code <= Instruction::APUT_SHORT;
 }
 
+constexpr bool IsInstructionBinOp2Addr(Instruction::Code code) {
+  return Instruction::ADD_INT_2ADDR <= code && code <= Instruction::REM_DOUBLE_2ADDR;
+}
+
 // TODO: Remove the #if guards below when we fully migrate to C++14.
 
 constexpr bool IsInvokeInstructionRange(Instruction::Code opcode) {
diff --git a/runtime/base/variant_map.h b/runtime/base/variant_map.h
index cf7977e..c9718fc 100644
--- a/runtime/base/variant_map.h
+++ b/runtime/base/variant_map.h
@@ -271,8 +271,11 @@
   // Set a value for a given key, overwriting the previous value if any.
   template <typename TValue>
   void Set(const TKey<TValue>& key, const TValue& value) {
+    // Clone the value first, to protect against &value == GetValuePtr(key).
+    auto* new_value = new TValue(value);
+
     Remove(key);
-    storage_map_.insert({{key.Clone(), new TValue(value)}});
+    storage_map_.insert({{key.Clone(), new_value}});
   }
 
   // Set a value for a given key, only if there was no previous value before.
diff --git a/runtime/base/variant_map_test.cc b/runtime/base/variant_map_test.cc
index 827de46..f306a48 100644
--- a/runtime/base/variant_map_test.cc
+++ b/runtime/base/variant_map_test.cc
@@ -38,10 +38,12 @@
 
     static const Key<int> Apple;
     static const Key<double> Orange;
+    static const Key<std::string> Label;
   };
 
   const FruitMap::Key<int> FruitMap::Apple;
   const FruitMap::Key<double> FruitMap::Orange;
+  const FruitMap::Key<std::string> FruitMap::Label;
 }  // namespace
 
 TEST(VariantMaps, BasicReadWrite) {
@@ -67,6 +69,7 @@
   EXPECT_DOUBLE_EQ(555.0, *fm.Get(FruitMap::Orange));
   EXPECT_EQ(size_t(2), fm.Size());
 
+  // Simple remove
   fm.Remove(FruitMap::Apple);
   EXPECT_FALSE(fm.Exists(FruitMap::Apple));
 
@@ -75,6 +78,24 @@
   EXPECT_FALSE(fm.Exists(FruitMap::Orange));
 }
 
+TEST(VariantMaps, SetPreviousValue) {
+  FruitMap fm;
+
+  // Indirect remove by setting yourself again
+  fm.Set(FruitMap::Label, std::string("hello_world"));
+  auto* ptr = fm.Get(FruitMap::Label);
+  ASSERT_TRUE(ptr != nullptr);
+  *ptr = "foobar";
+
+  // Set the value to the same exact pointer which we got out of the map.
+  // This should cleanly 'just work' and not try to delete the value too early.
+  fm.Set(FruitMap::Label, *ptr);
+
+  auto* new_ptr = fm.Get(FruitMap::Label);
+  ASSERT_TRUE(ptr != nullptr);
+  EXPECT_EQ(std::string("foobar"), *new_ptr);
+}
+
 TEST(VariantMaps, RuleOfFive) {
   // Test empty constructor
   FruitMap fmEmpty;
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 3592d2c..8609807 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -3511,7 +3511,7 @@
           StringPrintf("Rejecting class %s that attempts to sub-class erroneous class %s",
                        PrettyDescriptor(klass.Get()).c_str(),
                        PrettyDescriptor(super.Get()).c_str()));
-      LOG(ERROR) << error_msg  << " in " << klass->GetDexCache()->GetLocation()->ToModifiedUtf8();
+      LOG(WARNING) << error_msg  << " in " << klass->GetDexCache()->GetLocation()->ToModifiedUtf8();
       Handle<mirror::Throwable> cause(hs.NewHandle(self->GetException(nullptr)));
       if (cause.Get() != nullptr) {
         self->ClearException();
@@ -3584,7 +3584,7 @@
       }
     }
   } else {
-    LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(klass.Get())
+    LOG(WARNING) << "Verification failed on class " << PrettyDescriptor(klass.Get())
         << " in " << klass->GetDexCache()->GetLocation()->ToModifiedUtf8()
         << " because: " << error_msg;
     self->AssertNoPendingException();
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 678b29a..a3d3b47 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -297,6 +297,9 @@
 // Was there a -Xrunjdwp or -agentlib:jdwp= argument on the command line?
 static bool gJdwpConfigured = false;
 
+// JDWP options for debugging. Only valid if IsJdwpConfigured() is true.
+static JDWP::JdwpOptions gJdwpOptions;
+
 // Runtime JDWP state.
 static JDWP::JdwpState* gJdwpState = nullptr;
 static bool gDebuggerConnected;  // debugger or DDMS is connected.
@@ -529,13 +532,11 @@
   }
 }
 
-void Dbg::StartJdwp(const JDWP::JdwpOptions* jdwp_options) {
-  gJdwpConfigured = (jdwp_options != nullptr);
+void Dbg::StartJdwp() {
   if (!gJdwpAllowed || !IsJdwpConfigured()) {
     // No JDWP for you!
     return;
   }
-  DCHECK_NE(jdwp_options->transport, JDWP::kJdwpTransportUnknown);
 
   CHECK(gRegistry == nullptr);
   gRegistry = new ObjectRegistry;
@@ -543,7 +544,7 @@
   // Init JDWP if the debugger is enabled. This may connect out to a
   // debugger, passively listen for a debugger, or block waiting for a
   // debugger.
-  gJdwpState = JDWP::JdwpState::Create(jdwp_options);
+  gJdwpState = JDWP::JdwpState::Create(&gJdwpOptions);
   if (gJdwpState == nullptr) {
     // We probably failed because some other process has the port already, which means that
     // if we don't abort the user is likely to think they're talking to us when they're actually
@@ -720,6 +721,12 @@
   return gDebuggerActive;
 }
 
+void Dbg::ConfigureJdwp(const JDWP::JdwpOptions& jdwp_options) {
+  CHECK_NE(jdwp_options.transport, JDWP::kJdwpTransportUnknown);
+  gJdwpOptions = jdwp_options;
+  gJdwpConfigured = true;
+}
+
 bool Dbg::IsJdwpConfigured() {
   return gJdwpConfigured;
 }
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 6762608..0c22148 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -228,7 +228,7 @@
 
   static void SetJdwpAllowed(bool allowed);
 
-  static void StartJdwp(const JDWP::JdwpOptions* jdwp_options);
+  static void StartJdwp();
   static void StopJdwp();
 
   // Invoked by the GC in case we need to keep DDMS informed.
@@ -254,6 +254,9 @@
   // just DDMS (or nothing at all).
   static bool IsDebuggerActive();
 
+  // Configures JDWP with parsed command-line options.
+  static void ConfigureJdwp(const JDWP::JdwpOptions& jdwp_options);
+
   // Returns true if we had -Xrunjdwp or -agentlib:jdwp= on the command line.
   static bool IsJdwpConfigured();
 
diff --git a/runtime/native/java_lang_Runtime.cc b/runtime/native/java_lang_Runtime.cc
index dc0cb7b..97b17bf 100644
--- a/runtime/native/java_lang_Runtime.cc
+++ b/runtime/native/java_lang_Runtime.cc
@@ -65,7 +65,7 @@
       Fn android_update_LD_LIBRARY_PATH = reinterpret_cast<Fn>(sym);
       (*android_update_LD_LIBRARY_PATH)(ldLibraryPath.c_str());
     } else {
-      LOG(ERROR) << "android_update_LD_LIBRARY_PATH not found; .so dependencies will not work!";
+      LOG(WARNING) << "android_update_LD_LIBRARY_PATH not found; .so dependencies will not work!";
     }
   }
 
diff --git a/runtime/parsed_options_test.cc b/runtime/parsed_options_test.cc
index 79dc2fa..658b656 100644
--- a/runtime/parsed_options_test.cc
+++ b/runtime/parsed_options_test.cc
@@ -98,4 +98,20 @@
   EXPECT_EQ("baz=qux", properties_list[1]);
 }
 
+TEST_F(ParsedOptionsTest, ParsedOptionsGc) {
+  RuntimeOptions options;
+  options.push_back(std::make_pair("-Xgc:MC", nullptr));
+
+  RuntimeArgumentMap map;
+  std::unique_ptr<ParsedOptions> parsed(ParsedOptions::Create(options, false, &map));
+  ASSERT_TRUE(parsed.get() != NULL);
+  ASSERT_NE(0u, map.Size());
+
+  using Opt = RuntimeArgumentMap;
+
+  EXPECT_TRUE(map.Exists(Opt::GcOption));
+
+  XGcOption xgc = map.GetOrDefault(Opt::GcOption);
+  EXPECT_EQ(gc::kCollectorTypeMC, xgc.collector_type_);}
+
 }  // namespace art
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 6704963..c6e858b 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -173,8 +173,7 @@
       implicit_null_checks_(false),
       implicit_so_checks_(false),
       implicit_suspend_checks_(false),
-      is_native_bridge_loaded_(false),
-      jdwp_options_(nullptr) {
+      is_native_bridge_loaded_(false) {
   CheckAsmSupportOffsetsAndSizes();
 }
 
@@ -228,10 +227,6 @@
 
   // Make sure our internal threads are dead before we start tearing down things they're using.
   Dbg::StopJdwp();
-  if (jdwp_options_ != nullptr) {
-    delete jdwp_options_;
-  }
-
   delete signal_catcher_;
 
   // Make sure all other non-daemon threads have terminated, and all daemon threads are suspended.
@@ -595,7 +590,7 @@
 
   // Start the JDWP thread. If the command-line debugger flags specified "suspend=y",
   // this will pause the runtime, so we probably want this to come last.
-  Dbg::StartJdwp(jdwp_options_);
+  Dbg::StartJdwp();
 }
 
 void Runtime::StartSignalCatcher() {
@@ -805,8 +800,7 @@
   dump_gc_performance_on_shutdown_ = runtime_options.Exists(Opt::DumpGCPerformanceOnShutdown);
 
   if (runtime_options.Exists(Opt::JdwpOptions)) {
-    JDWP::JdwpOptions options = runtime_options.GetOrDefault(Opt::JdwpOptions);
-    jdwp_options_ = new JDWP::JdwpOptions(options);
+    Dbg::ConfigureJdwp(runtime_options.GetOrDefault(Opt::JdwpOptions));
   }
 
   BlockSignals();
diff --git a/runtime/runtime.h b/runtime/runtime.h
index d98eb8d..118c838 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -47,9 +47,6 @@
     class GarbageCollector;
   }  // namespace collector
 }  // namespace gc
-namespace JDWP {
-  struct JdwpOptions;
-}  // namespace JDWP
 namespace mirror {
   class ArtMethod;
   class ClassLoader;
@@ -685,9 +682,6 @@
   // that there's no native bridge.
   bool is_native_bridge_loaded_;
 
-  // JDWP options for debugging.
-  const JDWP::JdwpOptions* jdwp_options_;
-
   DISALLOW_COPY_AND_ASSIGN(Runtime);
 };
 std::ostream& operator<<(std::ostream& os, const Runtime::CalleeSaveType& rhs);
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 16edab3..cb6ed64 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1854,7 +1854,7 @@
 }
 
 void Thread::ThrowOutOfMemoryError(const char* msg) {
-  LOG(ERROR) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s",
+  LOG(WARNING) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s",
       msg, (tls32_.throwing_OutOfMemoryError ? " (recursive case)" : ""));
   ThrowLocation throw_location = GetCurrentLocationForThrow();
   if (!tls32_.throwing_OutOfMemoryError) {
@@ -1862,7 +1862,7 @@
     ThrowNewException(throw_location, "Ljava/lang/OutOfMemoryError;", msg);
     tls32_.throwing_OutOfMemoryError = false;
   } else {
-    Dump(LOG(ERROR));  // The pre-allocated OOME has no stack, so help out and log one.
+    Dump(LOG(WARNING));  // The pre-allocated OOME has no stack, so help out and log one.
     SetException(throw_location, Runtime::Current()->GetPreAllocatedOutOfMemoryError());
   }
 }
diff --git a/test/030-bad-finalizer/check b/test/030-bad-finalizer/check
new file mode 100755
index 0000000..e5d5c4e
--- /dev/null
+++ b/test/030-bad-finalizer/check
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Strip timeout logging. These are "E/System" messages.
+sed -e '/^E\/System/d' "$2" > "$2.tmp"
+
+diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
diff --git a/test/059-finalizer-throw/check b/test/059-finalizer-throw/check
new file mode 100755
index 0000000..8bc59c6
--- /dev/null
+++ b/test/059-finalizer-throw/check
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Strip uncaught exception logging. These are "E/System" messages.
+sed -e '/^E\/System/d' "$2" > "$2.tmp"
+
+diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
diff --git a/test/099-vmdebug/check b/test/099-vmdebug/check
new file mode 100755
index 0000000..7b47ac1
--- /dev/null
+++ b/test/099-vmdebug/check
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Strip the process pids and line numbers from exact error messages.
+sed -e 's/^art E.*\] //' "$2" > "$2.tmp"
+
+diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null
diff --git a/test/099-vmdebug/expected.txt b/test/099-vmdebug/expected.txt
index 579f98f..392efe5 100644
--- a/test/099-vmdebug/expected.txt
+++ b/test/099-vmdebug/expected.txt
@@ -7,13 +7,17 @@
 status=0
 Test starting when already started
 status=1
+Trace already in progress, ignoring this request
 status=1
 Test stopping when already stopped
 status=0
+Trace stop requested, but no trace currently running
 status=0
 Test tracing with empty filename
+Unable to open trace file '': No such file or directory
 Got expected exception
 Test tracing with bogus (< 1024 && != 0) filesize
 Got expected exception
 Test sampling with bogus (<= 0) interval
+Invalid sampling interval: 0
 Got expected exception
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 04eea4e..0c49674 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -370,7 +370,7 @@
     if [ "$DEV_MODE" = "y" ]; then
         export ANDROID_LOG_TAGS='*:d'
     else
-        export ANDROID_LOG_TAGS='*:s'
+        export ANDROID_LOG_TAGS='*:e'
     fi
     export ANDROID_DATA="$DEX_LOCATION"
     export ANDROID_ROOT="${ANDROID_ROOT}"