Merge "Optimizing: Determine invoke-static/-direct dispatch early."
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index 4abd191..c53479c 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -89,9 +89,11 @@
 HOST_CORE_DEX_FILES   := $(foreach jar,$(HOST_CORE_JARS),  $(call intermediates-dir-for,JAVA_LIBRARIES,$(jar),t,COMMON)/javalib.jar)
 TARGET_CORE_DEX_FILES := $(foreach jar,$(TARGET_CORE_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar)
 
+ifeq ($(ANDROID_COMPILE_WITH_JACK),true)
 # Classpath for Jack compilation: we only need core-libart.
 HOST_JACK_CLASSPATH_DEPENDENCIES   := $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart-hostdex,t,COMMON)/classes.jack
 HOST_JACK_CLASSPATH                := $(foreach dep,$(HOST_JACK_CLASSPATH_DEPENDENCIES),$(abspath $(dep)))
 TARGET_JACK_CLASSPATH_DEPENDENCIES := $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart, ,COMMON)/classes.jack
 TARGET_JACK_CLASSPATH              := $(foreach dep,$(TARGET_JACK_CLASSPATH_DEPENDENCIES),$(abspath $(dep)))
+endif
 endif # ART_ANDROID_COMMON_PATH_MK
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 1b3fcc6..b65ed77 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -78,6 +78,7 @@
 	optimizing/instruction_simplifier.cc \
 	optimizing/intrinsics.cc \
 	optimizing/licm.cc \
+	optimizing/load_store_elimination.cc \
 	optimizing/locations.cc \
 	optimizing/nodes.cc \
 	optimizing/optimization.cc \
@@ -94,7 +95,6 @@
 	optimizing/ssa_phi_elimination.cc \
 	optimizing/stack_map_stream.cc \
 	trampolines/trampoline_compiler.cc \
-	utils/arena_bit_vector.cc \
 	utils/assembler.cc \
 	utils/swap_space.cc \
 	buffered_output_stream.cc \
diff --git a/compiler/dex/gvn_dead_code_elimination.cc b/compiler/dex/gvn_dead_code_elimination.cc
index 4de3410..445859c 100644
--- a/compiler/dex/gvn_dead_code_elimination.cc
+++ b/compiler/dex/gvn_dead_code_elimination.cc
@@ -18,6 +18,7 @@
 
 #include "gvn_dead_code_elimination.h"
 
+#include "base/arena_bit_vector.h"
 #include "base/bit_vector-inl.h"
 #include "base/macros.h"
 #include "base/allocator.h"
@@ -26,7 +27,6 @@
 #include "dex_instruction.h"
 #include "dex/mir_graph.h"
 #include "local_value_numbering.h"
-#include "utils/arena_bit_vector.h"
 
 namespace art {
 
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 097abdc..2da8a98 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -19,6 +19,7 @@
 
 #include <stdint.h>
 
+#include "base/arena_bit_vector.h"
 #include "base/arena_containers.h"
 #include "base/bit_utils.h"
 #include "base/scoped_arena_containers.h"
@@ -30,7 +31,6 @@
 #include "mir_method_info.h"
 #include "reg_location.h"
 #include "reg_storage.h"
-#include "utils/arena_bit_vector.h"
 
 namespace art {
 
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 6f61954..ed193c7 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -1172,12 +1172,14 @@
                                                            field_index,
                                                            dex_pc);
     } else {
+      uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex();
       field_set = new (arena_) HInstanceFieldSet(null_check,
                                                  value,
                                                  field_type,
                                                  resolved_field->GetOffset(),
                                                  resolved_field->IsVolatile(),
                                                  field_index,
+                                                 class_def_index,
                                                  *dex_file_,
                                                  dex_compilation_unit_->GetDexCache(),
                                                  dex_pc);
@@ -1192,11 +1194,13 @@
                                                            field_index,
                                                            dex_pc);
     } else {
+      uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex();
       field_get = new (arena_) HInstanceFieldGet(null_check,
                                                  field_type,
                                                  resolved_field->GetOffset(),
                                                  resolved_field->IsVolatile(),
                                                  field_index,
+                                                 class_def_index,
                                                  *dex_file_,
                                                  dex_compilation_unit_->GetDexCache(),
                                                  dex_pc);
@@ -1338,6 +1342,8 @@
     cls = new (arena_) HClinitCheck(constant, dex_pc);
     current_block_->AddInstruction(cls);
   }
+
+  uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex();
   if (is_put) {
     // We need to keep the class alive before loading the value.
     Temporaries temps(graph_);
@@ -1350,6 +1356,7 @@
                                                                 resolved_field->GetOffset(),
                                                                 resolved_field->IsVolatile(),
                                                                 field_index,
+                                                                class_def_index,
                                                                 *dex_file_,
                                                                 dex_cache_,
                                                                 dex_pc));
@@ -1359,6 +1366,7 @@
                                                                 resolved_field->GetOffset(),
                                                                 resolved_field->IsVolatile(),
                                                                 field_index,
+                                                                class_def_index,
                                                                 *dex_file_,
                                                                 dex_cache_,
                                                                 dex_pc));
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 602d568..b0be446 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1580,6 +1580,21 @@
   HandleBinaryOp(instruction);
 }
 
+void LocationsBuilderARM64::VisitArm64IntermediateAddress(HArm64IntermediateAddress* instruction) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->GetOffset(), instruction));
+  locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM64::VisitArm64IntermediateAddress(
+    HArm64IntermediateAddress* instruction) {
+  __ Add(OutputRegister(instruction),
+         InputRegisterAt(instruction, 0),
+         Operand(InputOperandAt(instruction, 1)));
+}
+
 void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) {
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
@@ -1593,14 +1608,16 @@
 }
 
 void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
   Primitive::Type type = instruction->GetType();
   Register obj = InputRegisterAt(instruction, 0);
-  Location index = locations->InAt(1);
+  Location index = instruction->GetLocations()->InAt(1);
   size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(type)).Uint32Value();
   MemOperand source = HeapOperand(obj);
+  CPURegister dest = OutputCPURegister(instruction);
+
   MacroAssembler* masm = GetVIXLAssembler();
   UseScratchRegisterScope temps(masm);
+  // Block pools between `Load` and `MaybeRecordImplicitNullCheck`.
   BlockPoolsScope block_pools(masm);
 
   if (index.IsConstant()) {
@@ -1608,15 +1625,26 @@
     source = HeapOperand(obj, offset);
   } else {
     Register temp = temps.AcquireSameSizeAs(obj);
-    __ Add(temp, obj, offset);
+    if (instruction->GetArray()->IsArm64IntermediateAddress()) {
+      // We do not need to compute the intermediate address from the array: the
+      // input instruction has done it already. See the comment in
+      // `InstructionSimplifierArm64::TryExtractArrayAccessAddress()`.
+      if (kIsDebugBuild) {
+        HArm64IntermediateAddress* tmp = instruction->GetArray()->AsArm64IntermediateAddress();
+        DCHECK(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64() == offset);
+      }
+      temp = obj;
+    } else {
+      __ Add(temp, obj, offset);
+    }
     source = HeapOperand(temp, XRegisterFrom(index), LSL, Primitive::ComponentSizeShift(type));
   }
 
-  codegen_->Load(type, OutputCPURegister(instruction), source);
+  codegen_->Load(type, dest, source);
   codegen_->MaybeRecordImplicitNullCheck(instruction);
 
-  if (type == Primitive::kPrimNot) {
-    GetAssembler()->MaybeUnpoisonHeapReference(OutputCPURegister(instruction).W());
+  if (instruction->GetType() == Primitive::kPrimNot) {
+    GetAssembler()->MaybeUnpoisonHeapReference(dest.W());
   }
 }
 
@@ -1670,7 +1698,18 @@
     } else {
       UseScratchRegisterScope temps(masm);
       Register temp = temps.AcquireSameSizeAs(array);
-      __ Add(temp, array, offset);
+      if (instruction->GetArray()->IsArm64IntermediateAddress()) {
+        // We do not need to compute the intermediate address from the array: the
+        // input instruction has done it already. See the comment in
+        // `InstructionSimplifierArm64::TryExtractArrayAccessAddress()`.
+        if (kIsDebugBuild) {
+          HArm64IntermediateAddress* tmp = instruction->GetArray()->AsArm64IntermediateAddress();
+          DCHECK(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64() == offset);
+        }
+        temp = array;
+      } else {
+        __ Add(temp, array, offset);
+      }
       destination = HeapOperand(temp,
                                 XRegisterFrom(index),
                                 LSL,
@@ -1680,6 +1719,7 @@
     codegen_->MaybeRecordImplicitNullCheck(instruction);
   } else {
     DCHECK(needs_write_barrier);
+    DCHECK(!instruction->GetArray()->IsArm64IntermediateAddress());
     vixl::Label done;
     SlowPathCodeARM64* slow_path = nullptr;
     {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index f509358..ab684ea 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -382,7 +382,7 @@
                      uint32_t dex_pc,
                      SlowPathCode* slow_path);
 
-  ParallelMoveResolverARM64* GetMoveResolver() { return &move_resolver_; }
+  ParallelMoveResolverARM64* GetMoveResolver() OVERRIDE { return &move_resolver_; }
 
   bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
     return false;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 0101574..6aed444 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1000,6 +1000,9 @@
 void CodeGeneratorMIPS::AddLocationAsTemp(Location location, LocationSummary* locations) {
   if (location.IsRegister()) {
     locations->AddTemp(location);
+  } else if (location.IsRegisterPair()) {
+    locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairLow<Register>()));
+    locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairHigh<Register>()));
   } else {
     UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
   }
@@ -3054,7 +3057,7 @@
       UNREACHABLE();
     case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
       // T9 = callee_method->entry_point_from_quick_compiled_code_;
-      __ LoadFromOffset(kLoadDoubleword,
+      __ LoadFromOffset(kLoadWord,
                         T9,
                         callee_method.AsRegister<Register>(),
                         ArtMethod::EntryPointFromQuickCompiledCodeOffset(
diff --git a/compiler/optimizing/common_arm64.h b/compiler/optimizing/common_arm64.h
index 4abe5e9..e1a8c9c 100644
--- a/compiler/optimizing/common_arm64.h
+++ b/compiler/optimizing/common_arm64.h
@@ -203,19 +203,23 @@
 
   int64_t value = CodeGenerator::GetInt64ValueOf(constant);
 
-  if (instr->IsAdd() || instr->IsSub() || instr->IsCondition() ||
-      instr->IsCompare() || instr->IsBoundsCheck()) {
+  if (instr->IsAnd() || instr->IsOr() || instr->IsXor()) {
+    // Uses logical operations.
+    return vixl::Assembler::IsImmLogical(value, vixl::kXRegSize);
+  } else if (instr->IsNeg()) {
+    // Uses mov -immediate.
+    return vixl::Assembler::IsImmMovn(value, vixl::kXRegSize);
+  } else {
+    DCHECK(instr->IsAdd() ||
+           instr->IsArm64IntermediateAddress() ||
+           instr->IsBoundsCheck() ||
+           instr->IsCompare() ||
+           instr->IsCondition() ||
+           instr->IsSub());
     // Uses aliases of ADD/SUB instructions.
     // If `value` does not fit but `-value` does, VIXL will automatically use
     // the 'opposite' instruction.
     return vixl::Assembler::IsImmAddSub(value) || vixl::Assembler::IsImmAddSub(-value);
-  } else if (instr->IsAnd() || instr->IsOr() || instr->IsXor()) {
-    // Uses logical operations.
-    return vixl::Assembler::IsImmLogical(value, vixl::kXRegSize);
-  } else {
-    DCHECK(instr->IsNeg());
-    // Uses mov -immediate.
-    return vixl::Assembler::IsImmMovn(value, vixl::kXRegSize);
   }
 }
 
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index 0a1758a..c36de84 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -16,11 +16,11 @@
 
 #include "gvn.h"
 
+#include "base/arena_bit_vector.h"
 #include "base/arena_containers.h"
 #include "base/bit_vector-inl.h"
 #include "side_effects_analysis.h"
 #include "utils.h"
-#include "utils/arena_bit_vector.h"
 
 namespace art {
 
diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc
index aa375f6..de60cf2 100644
--- a/compiler/optimizing/gvn_test.cc
+++ b/compiler/optimizing/gvn_test.cc
@@ -49,6 +49,7 @@
                                                            MemberOffset(42),
                                                            false,
                                                            kUnknownFieldIndex,
+                                                           kUnknownClassDefIndex,
                                                            graph->GetDexFile(),
                                                            dex_cache,
                                                            0));
@@ -57,6 +58,7 @@
                                                            MemberOffset(42),
                                                            false,
                                                            kUnknownFieldIndex,
+                                                           kUnknownClassDefIndex,
                                                            graph->GetDexFile(),
                                                            dex_cache,
                                                            0));
@@ -66,6 +68,7 @@
                                                            MemberOffset(43),
                                                            false,
                                                            kUnknownFieldIndex,
+                                                           kUnknownClassDefIndex,
                                                            graph->GetDexFile(),
                                                            dex_cache,
                                                            0));
@@ -77,6 +80,7 @@
                                                            MemberOffset(42),
                                                            false,
                                                            kUnknownFieldIndex,
+                                                           kUnknownClassDefIndex,
                                                            graph->GetDexFile(),
                                                            dex_cache,
                                                            0));
@@ -85,6 +89,7 @@
                                                            MemberOffset(42),
                                                            false,
                                                            kUnknownFieldIndex,
+                                                           kUnknownClassDefIndex,
                                                            graph->GetDexFile(),
                                                            dex_cache,
                                                            0));
@@ -128,6 +133,7 @@
                                                            MemberOffset(42),
                                                            false,
                                                            kUnknownFieldIndex,
+                                                           kUnknownClassDefIndex,
                                                            graph->GetDexFile(),
                                                            dex_cache,
                                                            0));
@@ -150,6 +156,7 @@
                                                           MemberOffset(42),
                                                           false,
                                                           kUnknownFieldIndex,
+                                                          kUnknownClassDefIndex,
                                                           graph->GetDexFile(),
                                                           dex_cache,
                                                           0));
@@ -159,6 +166,7 @@
                                                            MemberOffset(42),
                                                            false,
                                                            kUnknownFieldIndex,
+                                                           kUnknownClassDefIndex,
                                                            graph->GetDexFile(),
                                                            dex_cache,
                                                            0));
@@ -168,6 +176,7 @@
                                                           MemberOffset(42),
                                                           false,
                                                           kUnknownFieldIndex,
+                                                          kUnknownClassDefIndex,
                                                           graph->GetDexFile(),
                                                           dex_cache,
                                                           0));
@@ -208,6 +217,7 @@
                                                            MemberOffset(42),
                                                            false,
                                                            kUnknownFieldIndex,
+                                                           kUnknownClassDefIndex,
                                                            graph->GetDexFile(),
                                                            dex_cache,
                                                            0));
@@ -230,6 +240,7 @@
                                                                  MemberOffset(42),
                                                                  false,
                                                                  kUnknownFieldIndex,
+                                                                 kUnknownClassDefIndex,
                                                                  graph->GetDexFile(),
                                                                  dex_cache,
                                                                  0));
@@ -244,6 +255,7 @@
                                                                MemberOffset(42),
                                                                false,
                                                                kUnknownFieldIndex,
+                                                               kUnknownClassDefIndex,
                                                                graph->GetDexFile(),
                                                                dex_cache,
                                                                0));
@@ -253,6 +265,7 @@
                                                                MemberOffset(42),
                                                                false,
                                                                kUnknownFieldIndex,
+                                                               kUnknownClassDefIndex,
                                                                graph->GetDexFile(),
                                                                dex_cache,
                                                                0));
@@ -264,6 +277,7 @@
                                                           MemberOffset(42),
                                                           false,
                                                           kUnknownFieldIndex,
+                                                          kUnknownClassDefIndex,
                                                           graph->GetDexFile(),
                                                           dex_cache,
                                                           0));
@@ -364,6 +378,7 @@
                                                              MemberOffset(42),
                                                              false,
                                                              kUnknownFieldIndex,
+                                                             kUnknownClassDefIndex,
                                                              graph->GetDexFile(),
                                                              dex_cache,
                                                              0));
@@ -388,6 +403,7 @@
                                            MemberOffset(42),
                                            false,
                                            kUnknownFieldIndex,
+                                           kUnknownClassDefIndex,
                                            graph->GetDexFile(),
                                            dex_cache,
                                            0),
@@ -413,6 +429,7 @@
                                            MemberOffset(42),
                                            false,
                                            kUnknownFieldIndex,
+                                           kUnknownClassDefIndex,
                                            graph->GetDexFile(),
                                            dex_cache,
                                            0),
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index 4b2d36f..eb79f46 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -16,8 +16,65 @@
 
 #include "instruction_simplifier_arm64.h"
 
+#include "mirror/array-inl.h"
+
 namespace art {
 namespace arm64 {
 
+void InstructionSimplifierArm64Visitor::TryExtractArrayAccessAddress(HInstruction* access,
+                                                                     HInstruction* array,
+                                                                     HInstruction* index,
+                                                                     int access_size) {
+  if (index->IsConstant() ||
+      (index->IsBoundsCheck() && index->AsBoundsCheck()->GetIndex()->IsConstant())) {
+    // When the index is a constant all the addressing can be fitted in the
+    // memory access instruction, so do not split the access.
+    return;
+  }
+  if (access->IsArraySet() &&
+      access->AsArraySet()->GetValue()->GetType() == Primitive::kPrimNot) {
+    // The access may require a runtime call or the original array pointer.
+    return;
+  }
+
+  // Proceed to extract the base address computation.
+  ArenaAllocator* arena = GetGraph()->GetArena();
+
+  HIntConstant* offset =
+      GetGraph()->GetIntConstant(mirror::Array::DataOffset(access_size).Uint32Value());
+  HArm64IntermediateAddress* address =
+      new (arena) HArm64IntermediateAddress(array, offset, kNoDexPc);
+  access->GetBlock()->InsertInstructionBefore(address, access);
+  access->ReplaceInput(address, 0);
+  // Both instructions must depend on GC to prevent any instruction that can
+  // trigger GC to be inserted between the two.
+  access->AddSideEffects(SideEffects::DependsOnGC());
+  DCHECK(address->GetSideEffects().Includes(SideEffects::DependsOnGC()));
+  DCHECK(access->GetSideEffects().Includes(SideEffects::DependsOnGC()));
+  // TODO: Code generation for HArrayGet and HArraySet will check whether the input address
+  // is an HArm64IntermediateAddress and generate appropriate code.
+  // We would like to replace the `HArrayGet` and `HArraySet` with custom instructions (maybe
+  // `HArm64Load` and `HArm64Store`). We defer these changes because these new instructions would
+  // not bring any advantages yet.
+  // Also see the comments in
+  // `InstructionCodeGeneratorARM64::VisitArrayGet()` and
+  // `InstructionCodeGeneratorARM64::VisitArraySet()`.
+  RecordSimplification();
+}
+
+void InstructionSimplifierArm64Visitor::VisitArrayGet(HArrayGet* instruction) {
+  TryExtractArrayAccessAddress(instruction,
+                               instruction->GetArray(),
+                               instruction->GetIndex(),
+                               Primitive::ComponentSize(instruction->GetType()));
+}
+
+void InstructionSimplifierArm64Visitor::VisitArraySet(HArraySet* instruction) {
+  TryExtractArrayAccessAddress(instruction,
+                               instruction->GetArray(),
+                               instruction->GetIndex(),
+                               Primitive::ComponentSize(instruction->GetComponentType()));
+}
+
 }  // namespace arm64
 }  // namespace art
diff --git a/compiler/optimizing/instruction_simplifier_arm64.h b/compiler/optimizing/instruction_simplifier_arm64.h
index d7f4eae..4b697db 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.h
+++ b/compiler/optimizing/instruction_simplifier_arm64.h
@@ -35,6 +35,14 @@
     }
   }
 
+  void TryExtractArrayAccessAddress(HInstruction* access,
+                                    HInstruction* array,
+                                    HInstruction* index,
+                                    int access_size);
+
+  void VisitArrayGet(HArrayGet* instruction) OVERRIDE;
+  void VisitArraySet(HArraySet* instruction) OVERRIDE;
+
   OptimizingCompilerStats* stats_;
 };
 
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 58e479a..0a5acc3 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -961,6 +961,14 @@
   CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
 }
 void IntrinsicLocationsBuilderARM::VisitUnsafeCASObject(HInvoke* invoke) {
+  // The UnsafeCASObject intrinsic does not always work when heap
+  // poisoning is enabled (it breaks run-test 004-UnsafeTest); turn it
+  // off temporarily as a quick fix.
+  // TODO(rpl): Fix it and turn it back on.
+  if (kPoisonHeapReferences) {
+    return;
+  }
+
   CreateIntIntIntIntIntToIntPlusTemps(arena_, invoke);
 }
 void IntrinsicCodeGeneratorARM::VisitUnsafeCASInt(HInvoke* invoke) {
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 4da94ee..059abf0 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -1087,6 +1087,14 @@
   CreateIntIntIntIntIntToInt(arena_, invoke);
 }
 void IntrinsicLocationsBuilderARM64::VisitUnsafeCASObject(HInvoke* invoke) {
+  // The UnsafeCASObject intrinsic does not always work when heap
+  // poisoning is enabled (it breaks run-test 004-UnsafeTest); turn it
+  // off temporarily as a quick fix.
+  // TODO(rpl): Fix it and turn it back on.
+  if (kPoisonHeapReferences) {
+    return;
+  }
+
   CreateIntIntIntIntIntToInt(arena_, invoke);
 }
 
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 56c4177..0ab0b80 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -272,7 +272,9 @@
   GenReverseBytes(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler());
 }
 
-static void GenNumberOfLeadingZeroes(LocationSummary* locations, bool is64bit, Mips64Assembler* assembler) {
+static void GenNumberOfLeadingZeroes(LocationSummary* locations,
+                                     bool is64bit,
+                                     Mips64Assembler* assembler) {
   GpuRegister in  = locations->InAt(0).AsRegister<GpuRegister>();
   GpuRegister out = locations->Out().AsRegister<GpuRegister>();
 
@@ -301,7 +303,9 @@
   GenNumberOfLeadingZeroes(invoke->GetLocations(), true, GetAssembler());
 }
 
-static void GenNumberOfTrailingZeroes(LocationSummary* locations, bool is64bit, Mips64Assembler* assembler) {
+static void GenNumberOfTrailingZeroes(LocationSummary* locations,
+                                      bool is64bit,
+                                      Mips64Assembler* assembler) {
   Location in = locations->InAt(0);
   Location out = locations->Out();
 
@@ -383,7 +387,7 @@
   GenRotateRight(invoke, Primitive::kPrimInt, GetAssembler());
 }
 
-// int java.lang.Long.rotateRight(long i, int distance)
+// long java.lang.Long.rotateRight(long i, int distance)
 void IntrinsicLocationsBuilderMIPS64::VisitLongRotateRight(HInvoke* invoke) {
   LocationSummary* locations = new (arena_) LocationSummary(invoke,
                                                            LocationSummary::kNoCall,
@@ -446,7 +450,7 @@
   GenRotateLeft(invoke, Primitive::kPrimInt, GetAssembler());
 }
 
-// int java.lang.Long.rotateLeft(long i, int distance)
+// long java.lang.Long.rotateLeft(long i, int distance)
 void IntrinsicLocationsBuilderMIPS64::VisitLongRotateLeft(HInvoke* invoke) {
   LocationSummary* locations = new (arena_) LocationSummary(invoke,
                                                            LocationSummary::kNoCall,
@@ -754,17 +758,19 @@
   __ SqrtD(out, in);
 }
 
-static void CreateFPToFP(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateFPToFP(ArenaAllocator* arena,
+                         HInvoke* invoke,
+                         Location::OutputOverlap overlaps = Location::kOutputOverlap) {
   LocationSummary* locations = new (arena) LocationSummary(invoke,
                                                            LocationSummary::kNoCall,
                                                            kIntrinsified);
   locations->SetInAt(0, Location::RequiresFpuRegister());
-  locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+  locations->SetOut(Location::RequiresFpuRegister(), overlaps);
 }
 
 // double java.lang.Math.rint(double)
 void IntrinsicLocationsBuilderMIPS64::VisitMathRint(HInvoke* invoke) {
-  CreateFPToFP(arena_, invoke);
+  CreateFPToFP(arena_, invoke, Location::kNoOutputOverlap);
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitMathRint(HInvoke* invoke) {
@@ -788,15 +794,22 @@
                                              kQuietNaN |
                                              kSignalingNaN;
 
-void IntrinsicCodeGeneratorMIPS64::VisitMathFloor(HInvoke* invoke) {
-  LocationSummary* locations = invoke->GetLocations();
-  Mips64Assembler* assembler = GetAssembler();
+enum FloatRoundingMode {
+  kFloor,
+  kCeil,
+};
+
+static void GenRoundingMode(LocationSummary* locations,
+                            FloatRoundingMode mode,
+                            Mips64Assembler* assembler) {
   FpuRegister in = locations->InAt(0).AsFpuRegister<FpuRegister>();
   FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
 
+  DCHECK_NE(in, out);
+
   Label done;
 
-  // double floor(double in) {
+  // double floor/ceil(double in) {
   //     if in.isNaN || in.isInfinite || in.isZero {
   //         return in;
   //     }
@@ -806,19 +819,23 @@
   __ MovD(out, in);
   __ Bnezc(AT, &done);
 
-  //     Long outLong = floor(in);
+  //     Long outLong = floor/ceil(in);
   //     if outLong == Long.MAX_VALUE {
-  //         // floor() has almost certainly returned a value which
-  //         // can't be successfully represented as a signed 64-bit
-  //         // number.  Java expects that the input value will be
-  //         // returned in these cases.
-  //         // There is also a small probability that floor(in)
-  //         // correctly truncates the input value to Long.MAX_VALUE.  In
-  //         // that case, this exception handling code still does the
-  //         // correct thing.
+  //         // floor()/ceil() has almost certainly returned a value
+  //         // which can't be successfully represented as a signed
+  //         // 64-bit number.  Java expects that the input value will
+  //         // be returned in these cases.
+  //         // There is also a small probability that floor(in)/ceil(in)
+  //         // correctly truncates/rounds up the input value to
+  //         // Long.MAX_VALUE.  In that case, this exception handling
+  //         // code still does the correct thing.
   //         return in;
   //     }
-  __ FloorLD(out, in);
+  if (mode == kFloor) {
+    __ FloorLD(out, in);
+  } else  if (mode == kCeil) {
+    __ CeilLD(out, in);
+  }
   __ Dmfc1(AT, out);
   __ MovD(out, in);
   __ LoadConst64(TMP, kPrimLongMax);
@@ -832,53 +849,17 @@
   // }
 }
 
+void IntrinsicCodeGeneratorMIPS64::VisitMathFloor(HInvoke* invoke) {
+  GenRoundingMode(invoke->GetLocations(), kFloor, GetAssembler());
+}
+
 // double java.lang.Math.ceil(double)
 void IntrinsicLocationsBuilderMIPS64::VisitMathCeil(HInvoke* invoke) {
   CreateFPToFP(arena_, invoke);
 }
 
 void IntrinsicCodeGeneratorMIPS64::VisitMathCeil(HInvoke* invoke) {
-  LocationSummary* locations = invoke->GetLocations();
-  Mips64Assembler* assembler = GetAssembler();
-  FpuRegister in = locations->InAt(0).AsFpuRegister<FpuRegister>();
-  FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
-
-  Label done;
-
-  // double ceil(double in) {
-  //     if in.isNaN || in.isInfinite || in.isZero {
-  //         return in;
-  //     }
-  __ ClassD(out, in);
-  __ Dmfc1(AT, out);
-  __ Andi(AT, AT, kFPLeaveUnchanged);   // +0.0 | +Inf | -0.0 | -Inf | qNaN | sNaN
-  __ MovD(out, in);
-  __ Bnezc(AT, &done);
-
-  //     Long outLong = ceil(in);
-  //     if outLong == Long.MAX_VALUE {
-  //         // ceil() has almost certainly returned a value which
-  //         // can't be successfully represented as a signed 64-bit
-  //         // number.  Java expects that the input value will be
-  //         // returned in these cases.
-  //         // There is also a small probability that ceil(in)
-  //         // correctly rounds up the input value to Long.MAX_VALUE.  In
-  //         // that case, this exception handling code still does the
-  //         // correct thing.
-  //         return in;
-  //     }
-  __ CeilLD(out, in);
-  __ Dmfc1(AT, out);
-  __ MovD(out, in);
-  __ LoadConst64(TMP, kPrimLongMax);
-  __ Beqc(AT, TMP, &done);
-
-  //     double out = outLong;
-  //     return out;
-  __ Dmtc1(AT, out);
-  __ Cvtdl(out, out);
-  __ Bind(&done);
-  // }
+  GenRoundingMode(invoke->GetLocations(), kCeil, GetAssembler());
 }
 
 // byte libcore.io.Memory.peekByte(long address)
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 8a7aded..040bf6a 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -45,7 +45,7 @@
 
 
 X86Assembler* IntrinsicCodeGeneratorX86::GetAssembler() {
-  return reinterpret_cast<X86Assembler*>(codegen_->GetAssembler());
+  return down_cast<X86Assembler*>(codegen_->GetAssembler());
 }
 
 ArenaAllocator* IntrinsicCodeGeneratorX86::GetAllocator() {
@@ -1728,7 +1728,7 @@
                          Primitive::Type type,
                          bool is_volatile,
                          CodeGeneratorX86* codegen) {
-  X86Assembler* assembler = reinterpret_cast<X86Assembler*>(codegen->GetAssembler());
+  X86Assembler* assembler = down_cast<X86Assembler*>(codegen->GetAssembler());
   Register base = locations->InAt(1).AsRegister<Register>();
   Register offset = locations->InAt(2).AsRegisterPairLow<Register>();
   Location value_loc = locations->InAt(3);
@@ -1822,7 +1822,7 @@
   locations->SetOut(Location::RegisterLocation(EAX));
   if (type == Primitive::kPrimNot) {
     // Need temp registers for card-marking.
-    locations->AddTemp(Location::RequiresRegister());
+    locations->AddTemp(Location::RequiresRegister());  // Possibly used for reference poisoning too.
     // Need a byte register for marking.
     locations->AddTemp(Location::RegisterLocation(ECX));
   }
@@ -1837,20 +1837,11 @@
 }
 
 void IntrinsicLocationsBuilderX86::VisitUnsafeCASObject(HInvoke* invoke) {
-  // The UnsafeCASObject intrinsic does not always work when heap
-  // poisoning is enabled (it breaks several libcore tests); turn it
-  // off temporarily as a quick fix.
-  // TODO(rpl): Fix it and turn it back on.
-  if (kPoisonHeapReferences) {
-    return;
-  }
-
   CreateIntIntIntIntIntToInt(arena_, Primitive::kPrimNot, invoke);
 }
 
 static void GenCAS(Primitive::Type type, HInvoke* invoke, CodeGeneratorX86* codegen) {
-  X86Assembler* assembler =
-    reinterpret_cast<X86Assembler*>(codegen->GetAssembler());
+  X86Assembler* assembler = down_cast<X86Assembler*>(codegen->GetAssembler());
   LocationSummary* locations = invoke->GetLocations();
 
   Register base = locations->InAt(1).AsRegister<Register>();
@@ -1858,47 +1849,92 @@
   Location out = locations->Out();
   DCHECK_EQ(out.AsRegister<Register>(), EAX);
 
-  if (type == Primitive::kPrimLong) {
-    DCHECK_EQ(locations->InAt(3).AsRegisterPairLow<Register>(), EAX);
-    DCHECK_EQ(locations->InAt(3).AsRegisterPairHigh<Register>(), EDX);
-    DCHECK_EQ(locations->InAt(4).AsRegisterPairLow<Register>(), EBX);
-    DCHECK_EQ(locations->InAt(4).AsRegisterPairHigh<Register>(), ECX);
-    __ LockCmpxchg8b(Address(base, offset, TIMES_1, 0));
-  } else {
-    // Integer or object.
+  if (type == Primitive::kPrimNot) {
     Register expected = locations->InAt(3).AsRegister<Register>();
+    // Ensure `expected` is in EAX (required by the CMPXCHG instruction).
     DCHECK_EQ(expected, EAX);
     Register value = locations->InAt(4).AsRegister<Register>();
-    if (type == Primitive::kPrimNot) {
-      // Mark card for object assuming new value is stored.
-      bool value_can_be_null = true;  // TODO: Worth finding out this information?
-      codegen->MarkGCCard(locations->GetTemp(0).AsRegister<Register>(),
-                          locations->GetTemp(1).AsRegister<Register>(),
-                          base,
-                          value,
-                          value_can_be_null);
 
-      if (kPoisonHeapReferences) {
-        __ PoisonHeapReference(expected);
-        __ PoisonHeapReference(value);
+    // Mark card for object assuming new value is stored.
+    bool value_can_be_null = true;  // TODO: Worth finding out this information?
+    codegen->MarkGCCard(locations->GetTemp(0).AsRegister<Register>(),
+                        locations->GetTemp(1).AsRegister<Register>(),
+                        base,
+                        value,
+                        value_can_be_null);
+
+    bool base_equals_value = (base == value);
+    if (kPoisonHeapReferences) {
+      if (base_equals_value) {
+        // If `base` and `value` are the same register location, move
+        // `value` to a temporary register.  This way, poisoning
+        // `value` won't invalidate `base`.
+        value = locations->GetTemp(0).AsRegister<Register>();
+        __ movl(value, base);
       }
+
+      // Check that the register allocator did not assign the location
+      // of `expected` (EAX) to `value` nor to `base`, so that heap
+      // poisoning (when enabled) works as intended below.
+      // - If `value` were equal to `expected`, both references would
+      //   be poisoned twice, meaning they would not be poisoned at
+      //   all, as heap poisoning uses address negation.
+      // - If `base` were equal to `expected`, poisoning `expected`
+      //   would invalidate `base`.
+      DCHECK_NE(value, expected);
+      DCHECK_NE(base, expected);
+
+      __ PoisonHeapReference(expected);
+      __ PoisonHeapReference(value);
     }
 
     __ LockCmpxchgl(Address(base, offset, TIMES_1, 0), value);
-  }
 
-  // locked cmpxchg has full barrier semantics, and we don't need scheduling
-  // barriers at this time.
+    // locked cmpxchg has full barrier semantics, and we don't need
+    // scheduling barriers at this time.
 
-  // Convert ZF into the boolean result.
-  __ setb(kZero, out.AsRegister<Register>());
-  __ movzxb(out.AsRegister<Register>(), out.AsRegister<ByteRegister>());
+    // Convert ZF into the boolean result.
+    __ setb(kZero, out.AsRegister<Register>());
+    __ movzxb(out.AsRegister<Register>(), out.AsRegister<ByteRegister>());
 
-  if (kPoisonHeapReferences && type == Primitive::kPrimNot) {
-    Register value = locations->InAt(4).AsRegister<Register>();
-    __ UnpoisonHeapReference(value);
-    // Do not unpoison the reference contained in register `expected`,
-    // as it is the same as register `out`.
+    if (kPoisonHeapReferences) {
+      if (base_equals_value) {
+        // `value` has been moved to a temporary register, no need to
+        // unpoison it.
+      } else {
+        // Ensure `value` is different from `out`, so that unpoisoning
+        // the former does not invalidate the latter.
+        DCHECK_NE(value, out.AsRegister<Register>());
+        __ UnpoisonHeapReference(value);
+      }
+      // Do not unpoison the reference contained in register
+      // `expected`, as it is the same as register `out` (EAX).
+    }
+  } else {
+    if (type == Primitive::kPrimInt) {
+      // Ensure the expected value is in EAX (required by the CMPXCHG
+      // instruction).
+      DCHECK_EQ(locations->InAt(3).AsRegister<Register>(), EAX);
+      __ LockCmpxchgl(Address(base, offset, TIMES_1, 0),
+                      locations->InAt(4).AsRegister<Register>());
+    } else if (type == Primitive::kPrimLong) {
+      // Ensure the expected value is in EAX:EDX and that the new
+      // value is in EBX:ECX (required by the CMPXCHG8B instruction).
+      DCHECK_EQ(locations->InAt(3).AsRegisterPairLow<Register>(), EAX);
+      DCHECK_EQ(locations->InAt(3).AsRegisterPairHigh<Register>(), EDX);
+      DCHECK_EQ(locations->InAt(4).AsRegisterPairLow<Register>(), EBX);
+      DCHECK_EQ(locations->InAt(4).AsRegisterPairHigh<Register>(), ECX);
+      __ LockCmpxchg8b(Address(base, offset, TIMES_1, 0));
+    } else {
+      LOG(FATAL) << "Unexpected CAS type " << type;
+    }
+
+    // locked cmpxchg has full barrier semantics, and we don't need
+    // scheduling barriers at this time.
+
+    // Convert ZF into the boolean result.
+    __ setb(kZero, out.AsRegister<Register>());
+    __ movzxb(out.AsRegister<Register>(), out.AsRegister<ByteRegister>());
   }
 }
 
@@ -1936,8 +1972,7 @@
 }
 
 void IntrinsicCodeGeneratorX86::VisitIntegerReverse(HInvoke* invoke) {
-  X86Assembler* assembler =
-    reinterpret_cast<X86Assembler*>(codegen_->GetAssembler());
+  X86Assembler* assembler = down_cast<X86Assembler*>(codegen_->GetAssembler());
   LocationSummary* locations = invoke->GetLocations();
 
   Register reg = locations->InAt(0).AsRegister<Register>();
@@ -1968,8 +2003,7 @@
 }
 
 void IntrinsicCodeGeneratorX86::VisitLongReverse(HInvoke* invoke) {
-  X86Assembler* assembler =
-    reinterpret_cast<X86Assembler*>(codegen_->GetAssembler());
+  X86Assembler* assembler = down_cast<X86Assembler*>(codegen_->GetAssembler());
   LocationSummary* locations = invoke->GetLocations();
 
   Register reg_low = locations->InAt(0).AsRegisterPairLow<Register>();
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 7a1d92d..14c65c9 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -41,7 +41,7 @@
 
 
 X86_64Assembler* IntrinsicCodeGeneratorX86_64::GetAssembler() {
-  return reinterpret_cast<X86_64Assembler*>(codegen_->GetAssembler());
+  return down_cast<X86_64Assembler*>(codegen_->GetAssembler());
 }
 
 ArenaAllocator* IntrinsicCodeGeneratorX86_64::GetAllocator() {
@@ -1822,7 +1822,7 @@
 // memory model.
 static void GenUnsafePut(LocationSummary* locations, Primitive::Type type, bool is_volatile,
                          CodeGeneratorX86_64* codegen) {
-  X86_64Assembler* assembler = reinterpret_cast<X86_64Assembler*>(codegen->GetAssembler());
+  X86_64Assembler* assembler = down_cast<X86_64Assembler*>(codegen->GetAssembler());
   CpuRegister base = locations->InAt(1).AsRegister<CpuRegister>();
   CpuRegister offset = locations->InAt(2).AsRegister<CpuRegister>();
   CpuRegister value = locations->InAt(3).AsRegister<CpuRegister>();
@@ -1895,7 +1895,7 @@
   locations->SetOut(Location::RequiresRegister());
   if (type == Primitive::kPrimNot) {
     // Need temp registers for card-marking.
-    locations->AddTemp(Location::RequiresRegister());
+    locations->AddTemp(Location::RequiresRegister());  // Possibly used for reference poisoning too.
     locations->AddTemp(Location::RequiresRegister());
   }
 }
@@ -1909,61 +1909,95 @@
 }
 
 void IntrinsicLocationsBuilderX86_64::VisitUnsafeCASObject(HInvoke* invoke) {
-  // The UnsafeCASObject intrinsic does not always work when heap
-  // poisoning is enabled (it breaks several libcore tests); turn it
-  // off temporarily as a quick fix.
-  // TODO(rpl): Fix it and turn it back on.
-  if (kPoisonHeapReferences) {
-    return;
-  }
-
   CreateIntIntIntIntIntToInt(arena_, Primitive::kPrimNot, invoke);
 }
 
 static void GenCAS(Primitive::Type type, HInvoke* invoke, CodeGeneratorX86_64* codegen) {
-  X86_64Assembler* assembler =
-    reinterpret_cast<X86_64Assembler*>(codegen->GetAssembler());
+  X86_64Assembler* assembler = down_cast<X86_64Assembler*>(codegen->GetAssembler());
   LocationSummary* locations = invoke->GetLocations();
 
   CpuRegister base = locations->InAt(1).AsRegister<CpuRegister>();
   CpuRegister offset = locations->InAt(2).AsRegister<CpuRegister>();
   CpuRegister expected = locations->InAt(3).AsRegister<CpuRegister>();
+  // Ensure `expected` is in RAX (required by the CMPXCHG instruction).
   DCHECK_EQ(expected.AsRegister(), RAX);
   CpuRegister value = locations->InAt(4).AsRegister<CpuRegister>();
   CpuRegister out = locations->Out().AsRegister<CpuRegister>();
 
-  if (type == Primitive::kPrimLong) {
-    __ LockCmpxchgq(Address(base, offset, TIMES_1, 0), value);
-  } else {
-    // Integer or object.
-    if (type == Primitive::kPrimNot) {
-      // Mark card for object assuming new value is stored.
-      bool value_can_be_null = true;  // TODO: Worth finding out this information?
-      codegen->MarkGCCard(locations->GetTemp(0).AsRegister<CpuRegister>(),
-                          locations->GetTemp(1).AsRegister<CpuRegister>(),
-                          base,
-                          value,
-                          value_can_be_null);
+  if (type == Primitive::kPrimNot) {
+    // Mark card for object assuming new value is stored.
+    bool value_can_be_null = true;  // TODO: Worth finding out this information?
+    codegen->MarkGCCard(locations->GetTemp(0).AsRegister<CpuRegister>(),
+                        locations->GetTemp(1).AsRegister<CpuRegister>(),
+                        base,
+                        value,
+                        value_can_be_null);
 
-      if (kPoisonHeapReferences) {
-        __ PoisonHeapReference(expected);
-        __ PoisonHeapReference(value);
+    bool base_equals_value = (base.AsRegister() == value.AsRegister());
+    Register value_reg = value.AsRegister();
+    if (kPoisonHeapReferences) {
+      if (base_equals_value) {
+        // If `base` and `value` are the same register location, move
+        // `value_reg` to a temporary register.  This way, poisoning
+        // `value_reg` won't invalidate `base`.
+        value_reg = locations->GetTemp(0).AsRegister<CpuRegister>().AsRegister();
+        __ movl(CpuRegister(value_reg), base);
       }
+
+      // Check that the register allocator did not assign the location
+      // of `expected` (RAX) to `value` nor to `base`, so that heap
+      // poisoning (when enabled) works as intended below.
+      // - If `value` were equal to `expected`, both references would
+      //   be poisoned twice, meaning they would not be poisoned at
+      //   all, as heap poisoning uses address negation.
+      // - If `base` were equal to `expected`, poisoning `expected`
+      //   would invalidate `base`.
+      DCHECK_NE(value_reg, expected.AsRegister());
+      DCHECK_NE(base.AsRegister(), expected.AsRegister());
+
+      __ PoisonHeapReference(expected);
+      __ PoisonHeapReference(CpuRegister(value_reg));
     }
 
-    __ LockCmpxchgl(Address(base, offset, TIMES_1, 0), value);
-  }
+    __ LockCmpxchgl(Address(base, offset, TIMES_1, 0), CpuRegister(value_reg));
 
-  // locked cmpxchg has full barrier semantics, and we don't need scheduling
-  // barriers at this time.
+    // locked cmpxchg has full barrier semantics, and we don't need
+    // scheduling barriers at this time.
 
-  // Convert ZF into the boolean result.
-  __ setcc(kZero, out);
-  __ movzxb(out, out);
+    // Convert ZF into the boolean result.
+    __ setcc(kZero, out);
+    __ movzxb(out, out);
 
-  if (kPoisonHeapReferences && type == Primitive::kPrimNot) {
-    __ UnpoisonHeapReference(value);
-    __ UnpoisonHeapReference(expected);
+    if (kPoisonHeapReferences) {
+      if (base_equals_value) {
+        // `value_reg` has been moved to a temporary register, no need
+        // to unpoison it.
+      } else {
+        // Ensure `value` is different from `out`, so that unpoisoning
+        // the former does not invalidate the latter.
+        DCHECK_NE(value_reg, out.AsRegister());
+        __ UnpoisonHeapReference(CpuRegister(value_reg));
+      }
+      // Ensure `expected` is different from `out`, so that unpoisoning
+      // the former does not invalidate the latter.
+      DCHECK_NE(expected.AsRegister(), out.AsRegister());
+      __ UnpoisonHeapReference(expected);
+    }
+  } else {
+    if (type == Primitive::kPrimInt) {
+      __ LockCmpxchgl(Address(base, offset, TIMES_1, 0), value);
+    } else if (type == Primitive::kPrimLong) {
+      __ LockCmpxchgq(Address(base, offset, TIMES_1, 0), value);
+    } else {
+      LOG(FATAL) << "Unexpected CAS type " << type;
+    }
+
+    // locked cmpxchg has full barrier semantics, and we don't need
+    // scheduling barriers at this time.
+
+    // Convert ZF into the boolean result.
+    __ setcc(kZero, out);
+    __ movzxb(out, out);
   }
 }
 
@@ -2001,8 +2035,7 @@
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitIntegerReverse(HInvoke* invoke) {
-  X86_64Assembler* assembler =
-    reinterpret_cast<X86_64Assembler*>(codegen_->GetAssembler());
+  X86_64Assembler* assembler = down_cast<X86_64Assembler*>(codegen_->GetAssembler());
   LocationSummary* locations = invoke->GetLocations();
 
   CpuRegister reg = locations->InAt(0).AsRegister<CpuRegister>();
@@ -2046,8 +2079,7 @@
 }
 
 void IntrinsicCodeGeneratorX86_64::VisitLongReverse(HInvoke* invoke) {
-  X86_64Assembler* assembler =
-    reinterpret_cast<X86_64Assembler*>(codegen_->GetAssembler());
+  X86_64Assembler* assembler = down_cast<X86_64Assembler*>(codegen_->GetAssembler());
   LocationSummary* locations = invoke->GetLocations();
 
   CpuRegister reg = locations->InAt(0).AsRegister<CpuRegister>();
diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc
index a036bd5..47457de 100644
--- a/compiler/optimizing/licm_test.cc
+++ b/compiler/optimizing/licm_test.cc
@@ -104,13 +104,19 @@
 
   // Populate the loop with instructions: set/get field with different types.
   NullHandle<mirror::DexCache> dex_cache;
-  HInstruction* get_field = new (&allocator_) HInstanceFieldGet(
-      parameter_, Primitive::kPrimLong, MemberOffset(10),
-      false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache, 0);
+  HInstruction* get_field = new (&allocator_) HInstanceFieldGet(parameter_,
+                                                                Primitive::kPrimLong,
+                                                                MemberOffset(10),
+                                                                false,
+                                                                kUnknownFieldIndex,
+                                                                kUnknownClassDefIndex,
+                                                                graph_->GetDexFile(),
+                                                                dex_cache,
+                                                                0);
   loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
   HInstruction* set_field = new (&allocator_) HInstanceFieldSet(
       parameter_, constant_, Primitive::kPrimInt, MemberOffset(20),
-      false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache, 0);
+      false, kUnknownFieldIndex, kUnknownClassDefIndex, graph_->GetDexFile(), dex_cache, 0);
   loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
 
   EXPECT_EQ(get_field->GetBlock(), loop_body_);
@@ -125,13 +131,26 @@
 
   // Populate the loop with instructions: set/get field with same types.
   NullHandle<mirror::DexCache> dex_cache;
-  HInstruction* get_field = new (&allocator_) HInstanceFieldGet(
-      parameter_, Primitive::kPrimLong, MemberOffset(10),
-      false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache, 0);
+  HInstruction* get_field = new (&allocator_) HInstanceFieldGet(parameter_,
+                                                                Primitive::kPrimLong,
+                                                                MemberOffset(10),
+                                                                false,
+                                                                kUnknownFieldIndex,
+                                                                kUnknownClassDefIndex,
+                                                                graph_->GetDexFile(),
+                                                                dex_cache,
+                                                                0);
   loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction());
-  HInstruction* set_field = new (&allocator_) HInstanceFieldSet(
-      parameter_, get_field, Primitive::kPrimLong, MemberOffset(10),
-      false, kUnknownFieldIndex, graph_->GetDexFile(), dex_cache, 0);
+  HInstruction* set_field = new (&allocator_) HInstanceFieldSet(parameter_,
+                                                                get_field,
+                                                                Primitive::kPrimLong,
+                                                                MemberOffset(10),
+                                                                false,
+                                                                kUnknownFieldIndex,
+                                                                kUnknownClassDefIndex,
+                                                                graph_->GetDexFile(),
+                                                                dex_cache,
+                                                                0);
   loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction());
 
   EXPECT_EQ(get_field->GetBlock(), loop_body_);
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
new file mode 100644
index 0000000..90f28e5
--- /dev/null
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -0,0 +1,913 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "load_store_elimination.h"
+#include "side_effects_analysis.h"
+
+#include <iostream>
+
+namespace art {
+
+class ReferenceInfo;
+
+// A cap for the number of heap locations to prevent pathological time/space consumption.
+// The number of heap locations for most of the methods stays below this threshold.
+constexpr size_t kMaxNumberOfHeapLocations = 32;
+
+// A ReferenceInfo contains additional info about a reference such as
+// whether it's a singleton, returned, etc.
+class ReferenceInfo : public ArenaObject<kArenaAllocMisc> {
+ public:
+  ReferenceInfo(HInstruction* reference, size_t pos) : reference_(reference), position_(pos) {
+    is_singleton_ = true;
+    is_singleton_and_not_returned_ = true;
+    if (!reference_->IsNewInstance() && !reference_->IsNewArray()) {
+      // For references not allocated in the method, don't assume anything.
+      is_singleton_ = false;
+      is_singleton_and_not_returned_ = false;
+      return;
+    }
+
+    // Visit all uses to determine if this reference can spread into the heap,
+    // a method call, etc.
+    for (HUseIterator<HInstruction*> use_it(reference_->GetUses());
+         !use_it.Done();
+         use_it.Advance()) {
+      HInstruction* use = use_it.Current()->GetUser();
+      DCHECK(!use->IsNullCheck()) << "NullCheck should have been eliminated";
+      if (use->IsBoundType()) {
+        // BoundType shouldn't normally be necessary for a NewInstance.
+        // Just be conservative for the uncommon cases.
+        is_singleton_ = false;
+        is_singleton_and_not_returned_ = false;
+        return;
+      }
+      if (use->IsPhi() || use->IsInvoke() ||
+          (use->IsInstanceFieldSet() && (reference_ == use->InputAt(1))) ||
+          (use->IsUnresolvedInstanceFieldSet() && (reference_ == use->InputAt(1))) ||
+          (use->IsStaticFieldSet() && (reference_ == use->InputAt(1))) ||
+          (use->IsUnresolvedStaticFieldSet() && (reference_ == use->InputAt(1))) ||
+          (use->IsArraySet() && (reference_ == use->InputAt(2)))) {
+        // reference_ is merged to a phi, passed to a callee, or stored to heap.
+        // reference_ isn't the only name that can refer to its value anymore.
+        is_singleton_ = false;
+        is_singleton_and_not_returned_ = false;
+        return;
+      }
+      if (use->IsReturn()) {
+        is_singleton_and_not_returned_ = false;
+      }
+    }
+  }
+
+  HInstruction* GetReference() const {
+    return reference_;
+  }
+
+  size_t GetPosition() const {
+    return position_;
+  }
+
+  // Returns true if reference_ is the only name that can refer to its value during
+  // the lifetime of the method. So it's guaranteed to not have any alias in
+  // the method (including its callees).
+  bool IsSingleton() const {
+    return is_singleton_;
+  }
+
+  // Returns true if reference_ is a singleton and not returned to the caller.
+  // The allocation and stores into reference_ may be eliminated for such cases.
+  bool IsSingletonAndNotReturned() const {
+    return is_singleton_and_not_returned_;
+  }
+
+ private:
+  HInstruction* const reference_;
+  const size_t position_;     // position in HeapLocationCollector's ref_info_array_.
+  bool is_singleton_;         // can only be referred to by a single name in the method.
+  bool is_singleton_and_not_returned_;  // reference_ is singleton and not returned to caller.
+
+  DISALLOW_COPY_AND_ASSIGN(ReferenceInfo);
+};
+
+// A heap location is a reference-offset/index pair that a value can be loaded from
+// or stored to.
+class HeapLocation : public ArenaObject<kArenaAllocMisc> {
+ public:
+  static constexpr size_t kInvalidFieldOffset = -1;
+
+  // TODO: more fine-grained array types.
+  static constexpr int16_t kDeclaringClassDefIndexForArrays = -1;
+
+  HeapLocation(ReferenceInfo* ref_info,
+               size_t offset,
+               HInstruction* index,
+               int16_t declaring_class_def_index)
+      : ref_info_(ref_info),
+        offset_(offset),
+        index_(index),
+        declaring_class_def_index_(declaring_class_def_index),
+        may_become_unknown_(true) {
+    DCHECK(ref_info != nullptr);
+    DCHECK((offset == kInvalidFieldOffset && index != nullptr) ||
+           (offset != kInvalidFieldOffset && index == nullptr));
+
+    if (ref_info->IsSingletonAndNotReturned()) {
+      // We try to track stores to singletons that aren't returned to eliminate the stores
+      // since values in singleton's fields cannot be killed due to aliasing. Those values
+      // can still be killed due to merging values since we don't build phi for merging heap
+      // values. SetMayBecomeUnknown(true) may be called later once such merge becomes possible.
+      may_become_unknown_ = false;
+    }
+  }
+
+  ReferenceInfo* GetReferenceInfo() const { return ref_info_; }
+  size_t GetOffset() const { return offset_; }
+  HInstruction* GetIndex() const { return index_; }
+
+  // Returns the definition of declaring class' dex index.
+  // It's kDeclaringClassDefIndexForArrays for an array element.
+  int16_t GetDeclaringClassDefIndex() const {
+    return declaring_class_def_index_;
+  }
+
+  bool IsArrayElement() const {
+    return index_ != nullptr;
+  }
+
+  // Returns true if this heap location's value may become unknown after it's
+  // set to a value, due to merge of values, or killed due to aliasing.
+  bool MayBecomeUnknown() const {
+    return may_become_unknown_;
+  }
+  void SetMayBecomeUnknown(bool val) {
+    may_become_unknown_ = val;
+  }
+
+ private:
+  ReferenceInfo* const ref_info_;      // reference for instance/static field or array access.
+  const size_t offset_;                // offset of static/instance field.
+  HInstruction* const index_;          // index of an array element.
+  const int16_t declaring_class_def_index_;  // declaring class's def's dex index.
+  bool may_become_unknown_;            // value may become kUnknownHeapValue.
+
+  DISALLOW_COPY_AND_ASSIGN(HeapLocation);
+};
+
+static HInstruction* HuntForOriginalReference(HInstruction* ref) {
+  DCHECK(ref != nullptr);
+  while (ref->IsNullCheck() || ref->IsBoundType()) {
+    ref = ref->InputAt(0);
+  }
+  return ref;
+}
+
+// A HeapLocationCollector collects all relevant heap locations and keeps
+// an aliasing matrix for all locations.
+class HeapLocationCollector : public HGraphVisitor {
+ public:
+  static constexpr size_t kHeapLocationNotFound = -1;
+  // Start with a single uint32_t word. That's enough bits for pair-wise
+  // aliasing matrix of 8 heap locations.
+  static constexpr uint32_t kInitialAliasingMatrixBitVectorSize = 32;
+
+  explicit HeapLocationCollector(HGraph* graph)
+      : HGraphVisitor(graph),
+        ref_info_array_(graph->GetArena()->Adapter(kArenaAllocLSE)),
+        heap_locations_(graph->GetArena()->Adapter(kArenaAllocLSE)),
+        aliasing_matrix_(graph->GetArena(), kInitialAliasingMatrixBitVectorSize, true),
+        has_heap_stores_(false),
+        has_volatile_(false),
+        has_monitor_operations_(false),
+        may_deoptimize_(false) {}
+
+  size_t GetNumberOfHeapLocations() const {
+    return heap_locations_.size();
+  }
+
+  HeapLocation* GetHeapLocation(size_t index) const {
+    return heap_locations_[index];
+  }
+
+  ReferenceInfo* FindReferenceInfoOf(HInstruction* ref) const {
+    for (size_t i = 0; i < ref_info_array_.size(); i++) {
+      ReferenceInfo* ref_info = ref_info_array_[i];
+      if (ref_info->GetReference() == ref) {
+        DCHECK_EQ(i, ref_info->GetPosition());
+        return ref_info;
+      }
+    }
+    return nullptr;
+  }
+
+  bool HasHeapStores() const {
+    return has_heap_stores_;
+  }
+
+  bool HasVolatile() const {
+    return has_volatile_;
+  }
+
+  bool HasMonitorOps() const {
+    return has_monitor_operations_;
+  }
+
+  // Returns whether this method may be deoptimized.
+  // Currently we don't have meta data support for deoptimizing
+  // a method that eliminates allocations/stores.
+  bool MayDeoptimize() const {
+    return may_deoptimize_;
+  }
+
+  // Find and return the heap location index in heap_locations_.
+  size_t FindHeapLocationIndex(ReferenceInfo* ref_info,
+                               size_t offset,
+                               HInstruction* index,
+                               int16_t declaring_class_def_index) const {
+    for (size_t i = 0; i < heap_locations_.size(); i++) {
+      HeapLocation* loc = heap_locations_[i];
+      if (loc->GetReferenceInfo() == ref_info &&
+          loc->GetOffset() == offset &&
+          loc->GetIndex() == index &&
+          loc->GetDeclaringClassDefIndex() == declaring_class_def_index) {
+        return i;
+      }
+    }
+    return kHeapLocationNotFound;
+  }
+
+  // Returns true if heap_locations_[index1] and heap_locations_[index2] may alias.
+  bool MayAlias(size_t index1, size_t index2) const {
+    if (index1 < index2) {
+      return aliasing_matrix_.IsBitSet(AliasingMatrixPosition(index1, index2));
+    } else if (index1 > index2) {
+      return aliasing_matrix_.IsBitSet(AliasingMatrixPosition(index2, index1));
+    } else {
+      DCHECK(false) << "index1 and index2 are expected to be different";
+      return true;
+    }
+  }
+
+  void BuildAliasingMatrix() {
+    const size_t number_of_locations = heap_locations_.size();
+    if (number_of_locations == 0) {
+      return;
+    }
+    size_t pos = 0;
+    // Compute aliasing info between every pair of different heap locations.
+    // Save the result in a matrix represented as a BitVector.
+    for (size_t i = 0; i < number_of_locations - 1; i++) {
+      for (size_t j = i + 1; j < number_of_locations; j++) {
+        if (ComputeMayAlias(i, j)) {
+          aliasing_matrix_.SetBit(CheckedAliasingMatrixPosition(i, j, pos));
+        }
+        pos++;
+      }
+    }
+  }
+
+ private:
+  // An allocation cannot alias with a name which already exists at the point
+  // of the allocation, such as a parameter or a load happening before the allocation.
+  bool MayAliasWithPreexistenceChecking(ReferenceInfo* ref_info1, ReferenceInfo* ref_info2) const {
+    if (ref_info1->GetReference()->IsNewInstance() || ref_info1->GetReference()->IsNewArray()) {
+      // Any reference that can alias with the allocation must appear after it in the block/in
+      // the block's successors. In reverse post order, those instructions will be visited after
+      // the allocation.
+      return ref_info2->GetPosition() >= ref_info1->GetPosition();
+    }
+    return true;
+  }
+
+  bool CanReferencesAlias(ReferenceInfo* ref_info1, ReferenceInfo* ref_info2) const {
+    if (ref_info1 == ref_info2) {
+      return true;
+    } else if (ref_info1->IsSingleton()) {
+      return false;
+    } else if (ref_info2->IsSingleton()) {
+      return false;
+    } else if (!MayAliasWithPreexistenceChecking(ref_info1, ref_info2) ||
+        !MayAliasWithPreexistenceChecking(ref_info2, ref_info1)) {
+      return false;
+    }
+    return true;
+  }
+
+  // `index1` and `index2` are indices in the array of collected heap locations.
+  // Returns the position in the bit vector that tracks whether the two heap
+  // locations may alias.
+  size_t AliasingMatrixPosition(size_t index1, size_t index2) const {
+    DCHECK(index2 > index1);
+    const size_t number_of_locations = heap_locations_.size();
+    // It's (num_of_locations - 1) + ... + (num_of_locations - index1) + (index2 - index1 - 1).
+    return (number_of_locations * index1 - (1 + index1) * index1 / 2 + (index2 - index1 - 1));
+  }
+
+  // An additional position is passed in to make sure the calculated position is correct.
+  size_t CheckedAliasingMatrixPosition(size_t index1, size_t index2, size_t position) {
+    size_t calculated_position = AliasingMatrixPosition(index1, index2);
+    DCHECK_EQ(calculated_position, position);
+    return calculated_position;
+  }
+
+  // Compute if two locations may alias to each other.
+  bool ComputeMayAlias(size_t index1, size_t index2) const {
+    HeapLocation* loc1 = heap_locations_[index1];
+    HeapLocation* loc2 = heap_locations_[index2];
+    if (loc1->GetOffset() != loc2->GetOffset()) {
+      // Either two different instance fields, or one is an instance
+      // field and the other is an array element.
+      return false;
+    }
+    if (loc1->GetDeclaringClassDefIndex() != loc2->GetDeclaringClassDefIndex()) {
+      // Different types.
+      return false;
+    }
+    if (!CanReferencesAlias(loc1->GetReferenceInfo(), loc2->GetReferenceInfo())) {
+      return false;
+    }
+    if (loc1->IsArrayElement() && loc2->IsArrayElement()) {
+      HInstruction* array_index1 = loc1->GetIndex();
+      HInstruction* array_index2 = loc2->GetIndex();
+      DCHECK(array_index1 != nullptr);
+      DCHECK(array_index2 != nullptr);
+      if (array_index1->IsIntConstant() &&
+          array_index2->IsIntConstant() &&
+          array_index1->AsIntConstant()->GetValue() != array_index2->AsIntConstant()->GetValue()) {
+        // Different constant indices do not alias.
+        return false;
+      }
+    }
+    return true;
+  }
+
+  ReferenceInfo* GetOrCreateReferenceInfo(HInstruction* ref) {
+    ReferenceInfo* ref_info = FindReferenceInfoOf(ref);
+    if (ref_info == nullptr) {
+      size_t pos = ref_info_array_.size();
+      ref_info = new (GetGraph()->GetArena()) ReferenceInfo(ref, pos);
+      ref_info_array_.push_back(ref_info);
+    }
+    return ref_info;
+  }
+
+  HeapLocation* GetOrCreateHeapLocation(HInstruction* ref,
+                                        size_t offset,
+                                        HInstruction* index,
+                                        int16_t declaring_class_def_index) {
+    HInstruction* original_ref = HuntForOriginalReference(ref);
+    ReferenceInfo* ref_info = GetOrCreateReferenceInfo(original_ref);
+    size_t heap_location_idx = FindHeapLocationIndex(
+        ref_info, offset, index, declaring_class_def_index);
+    if (heap_location_idx == kHeapLocationNotFound) {
+      HeapLocation* heap_loc = new (GetGraph()->GetArena())
+          HeapLocation(ref_info, offset, index, declaring_class_def_index);
+      heap_locations_.push_back(heap_loc);
+      return heap_loc;
+    }
+    return heap_locations_[heap_location_idx];
+  }
+
+  void VisitFieldAccess(HInstruction* field_access,
+                        HInstruction* ref,
+                        const FieldInfo& field_info,
+                        bool is_store) {
+    if (field_info.IsVolatile()) {
+      has_volatile_ = true;
+    }
+    const uint16_t declaring_class_def_index = field_info.GetDeclaringClassDefIndex();
+    const size_t offset = field_info.GetFieldOffset().SizeValue();
+    HeapLocation* location = GetOrCreateHeapLocation(ref, offset, nullptr, declaring_class_def_index);
+    // A store of a value may be eliminated if all future loads for that value can be eliminated.
+    // For a value that's stored into a singleton field, the value will not be killed due
+    // to aliasing. However if the value is set in a block that doesn't post dominate the definition,
+    // the value may be killed due to merging later. Before we have post dominating info, we check
+    // if the store is in the same block as the definition just to be conservative.
+    if (is_store &&
+        location->GetReferenceInfo()->IsSingletonAndNotReturned() &&
+        field_access->GetBlock() != ref->GetBlock()) {
+      location->SetMayBecomeUnknown(true);
+    }
+  }
+
+  void VisitArrayAccess(HInstruction* array, HInstruction* index) {
+    GetOrCreateHeapLocation(array, HeapLocation::kInvalidFieldOffset,
+        index, HeapLocation::kDeclaringClassDefIndexForArrays);
+  }
+
+  void VisitInstanceFieldGet(HInstanceFieldGet* instruction) OVERRIDE {
+    VisitFieldAccess(instruction, instruction->InputAt(0), instruction->GetFieldInfo(), false);
+  }
+
+  void VisitInstanceFieldSet(HInstanceFieldSet* instruction) OVERRIDE {
+    VisitFieldAccess(instruction, instruction->InputAt(0), instruction->GetFieldInfo(), true);
+    has_heap_stores_ = true;
+  }
+
+  void VisitStaticFieldGet(HStaticFieldGet* instruction) OVERRIDE {
+    VisitFieldAccess(instruction, instruction->InputAt(0), instruction->GetFieldInfo(), false);
+  }
+
+  void VisitStaticFieldSet(HStaticFieldSet* instruction) OVERRIDE {
+    VisitFieldAccess(instruction, instruction->InputAt(0), instruction->GetFieldInfo(), true);
+    has_heap_stores_ = true;
+  }
+
+  // We intentionally don't collect HUnresolvedInstanceField/HUnresolvedStaticField accesses
+  // since we cannot accurately track the fields.
+
+  void VisitArrayGet(HArrayGet* instruction) OVERRIDE {
+    VisitArrayAccess(instruction->InputAt(0), instruction->InputAt(1));
+  }
+
+  void VisitArraySet(HArraySet* instruction) OVERRIDE {
+    VisitArrayAccess(instruction->InputAt(0), instruction->InputAt(1));
+    has_heap_stores_ = true;
+  }
+
+  void VisitNewInstance(HNewInstance* new_instance) OVERRIDE {
+    // Any references appearing in the ref_info_array_ so far cannot alias with new_instance.
+    GetOrCreateReferenceInfo(new_instance);
+  }
+
+  void VisitDeoptimize(HDeoptimize* instruction ATTRIBUTE_UNUSED) OVERRIDE {
+    may_deoptimize_ = true;
+  }
+
+  void VisitMonitorOperation(HMonitorOperation* monitor ATTRIBUTE_UNUSED) OVERRIDE {
+    has_monitor_operations_ = true;
+  }
+
+  ArenaVector<ReferenceInfo*> ref_info_array_;   // All references used for heap accesses.
+  ArenaVector<HeapLocation*> heap_locations_;    // All heap locations.
+  ArenaBitVector aliasing_matrix_;    // aliasing info between each pair of locations.
+  bool has_heap_stores_;    // If there is no heap stores, LSE acts as GVN with better
+                            // alias analysis and won't be as effective.
+  bool has_volatile_;       // If there are volatile field accesses.
+  bool has_monitor_operations_;    // If there are monitor operations.
+  bool may_deoptimize_;
+
+  DISALLOW_COPY_AND_ASSIGN(HeapLocationCollector);
+};
+
+// An unknown heap value. Loads with such a value in the heap location cannot be eliminated.
+static HInstruction* const kUnknownHeapValue =
+    reinterpret_cast<HInstruction*>(static_cast<uintptr_t>(-1));
+// Default heap value after an allocation.
+static HInstruction* const kDefaultHeapValue =
+    reinterpret_cast<HInstruction*>(static_cast<uintptr_t>(-2));
+
+class LSEVisitor : public HGraphVisitor {
+ public:
+  LSEVisitor(HGraph* graph,
+             const HeapLocationCollector& heap_locations_collector,
+             const SideEffectsAnalysis& side_effects)
+      : HGraphVisitor(graph),
+        heap_location_collector_(heap_locations_collector),
+        side_effects_(side_effects),
+        heap_values_for_(graph->GetBlocks().size(),
+                         ArenaVector<HInstruction*>(heap_locations_collector.
+                                                        GetNumberOfHeapLocations(),
+                                                    kUnknownHeapValue,
+                                                    graph->GetArena()->Adapter(kArenaAllocLSE)),
+                         graph->GetArena()->Adapter(kArenaAllocLSE)),
+        removed_instructions_(graph->GetArena()->Adapter(kArenaAllocLSE)),
+        substitute_instructions_(graph->GetArena()->Adapter(kArenaAllocLSE)),
+        singleton_new_instances_(graph->GetArena()->Adapter(kArenaAllocLSE)) {
+  }
+
+  void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
+    int block_id = block->GetBlockId();
+    ArenaVector<HInstruction*>& heap_values = heap_values_for_[block_id];
+    // TODO: try to reuse the heap_values array from one predecessor if possible.
+    if (block->IsLoopHeader()) {
+      // We do a single pass in reverse post order. For loops, use the side effects as a hint
+      // to see if the heap values should be killed.
+      if (side_effects_.GetLoopEffects(block).DoesAnyWrite()) {
+        // Leave all values as kUnknownHeapValue.
+      } else {
+        // Inherit the values from pre-header.
+        HBasicBlock* pre_header = block->GetLoopInformation()->GetPreHeader();
+        ArenaVector<HInstruction*>& pre_header_heap_values =
+            heap_values_for_[pre_header->GetBlockId()];
+        for (size_t i = 0; i < heap_values.size(); i++) {
+          heap_values[i] = pre_header_heap_values[i];
+        }
+      }
+    } else {
+      MergePredecessorValues(block);
+    }
+    HGraphVisitor::VisitBasicBlock(block);
+  }
+
+  // Remove recorded instructions that should be eliminated.
+  void RemoveInstructions() {
+    size_t size = removed_instructions_.size();
+    DCHECK_EQ(size, substitute_instructions_.size());
+    for (size_t i = 0; i < size; i++) {
+      HInstruction* instruction = removed_instructions_[i];
+      DCHECK(instruction != nullptr);
+      HInstruction* substitute = substitute_instructions_[i];
+      if (substitute != nullptr) {
+        // Keep tracing substitute till one that's not removed.
+        HInstruction* sub_sub = FindSubstitute(substitute);
+        while (sub_sub != substitute) {
+          substitute = sub_sub;
+          sub_sub = FindSubstitute(substitute);
+        }
+        instruction->ReplaceWith(substitute);
+      }
+      instruction->GetBlock()->RemoveInstruction(instruction);
+    }
+    // TODO: remove unnecessary allocations.
+    // Eliminate instructions in singleton_new_instances_ that:
+    // - don't have uses,
+    // - don't have finalizers,
+    // - are instantiable and accessible,
+    // - have no/separate clinit check.
+  }
+
+ private:
+  void MergePredecessorValues(HBasicBlock* block) {
+    const ArenaVector<HBasicBlock*>& predecessors = block->GetPredecessors();
+    if (predecessors.size() == 0) {
+      return;
+    }
+    ArenaVector<HInstruction*>& heap_values = heap_values_for_[block->GetBlockId()];
+    for (size_t i = 0; i < heap_values.size(); i++) {
+      HInstruction* value = heap_values_for_[predecessors[0]->GetBlockId()][i];
+      if (value != kUnknownHeapValue) {
+        for (size_t j = 1; j < predecessors.size(); j++) {
+          if (heap_values_for_[predecessors[j]->GetBlockId()][i] != value) {
+            value = kUnknownHeapValue;
+            break;
+          }
+        }
+      }
+      heap_values[i] = value;
+    }
+  }
+
+  // `instruction` is being removed. Try to see if the null check on it
+  // can be removed. This can happen if the same value is set in two branches
+  // but not in dominators. Such as:
+  //   int[] a = foo();
+  //   if () {
+  //     a[0] = 2;
+  //   } else {
+  //     a[0] = 2;
+  //   }
+  //   // a[0] can now be replaced with constant 2, and the null check on it can be removed.
+  void TryRemovingNullCheck(HInstruction* instruction) {
+    HInstruction* prev = instruction->GetPrevious();
+    if ((prev != nullptr) && prev->IsNullCheck() && (prev == instruction->InputAt(0))) {
+      // Previous instruction is a null check for this instruction. Remove the null check.
+      prev->ReplaceWith(prev->InputAt(0));
+      prev->GetBlock()->RemoveInstruction(prev);
+    }
+  }
+
+  HInstruction* GetDefaultValue(Primitive::Type type) {
+    switch (type) {
+      case Primitive::kPrimNot:
+        return GetGraph()->GetNullConstant();
+      case Primitive::kPrimBoolean:
+      case Primitive::kPrimByte:
+      case Primitive::kPrimChar:
+      case Primitive::kPrimShort:
+      case Primitive::kPrimInt:
+        return GetGraph()->GetIntConstant(0);
+      case Primitive::kPrimLong:
+        return GetGraph()->GetLongConstant(0);
+      case Primitive::kPrimFloat:
+        return GetGraph()->GetFloatConstant(0);
+      case Primitive::kPrimDouble:
+        return GetGraph()->GetDoubleConstant(0);
+      default:
+        UNREACHABLE();
+    }
+  }
+
+  void VisitGetLocation(HInstruction* instruction,
+                        HInstruction* ref,
+                        size_t offset,
+                        HInstruction* index,
+                        int16_t declaring_class_def_index) {
+    HInstruction* original_ref = HuntForOriginalReference(ref);
+    ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(original_ref);
+    size_t idx = heap_location_collector_.FindHeapLocationIndex(
+        ref_info, offset, index, declaring_class_def_index);
+    DCHECK_NE(idx, HeapLocationCollector::kHeapLocationNotFound);
+    ArenaVector<HInstruction*>& heap_values =
+        heap_values_for_[instruction->GetBlock()->GetBlockId()];
+    HInstruction* heap_value = heap_values[idx];
+    if (heap_value == kDefaultHeapValue) {
+      HInstruction* constant = GetDefaultValue(instruction->GetType());
+      removed_instructions_.push_back(instruction);
+      substitute_instructions_.push_back(constant);
+      heap_values[idx] = constant;
+      return;
+    }
+    if ((heap_value != kUnknownHeapValue) &&
+        // Keep the load due to possible I/F, J/D array aliasing.
+        // See b/22538329 for details.
+        (heap_value->GetType() == instruction->GetType())) {
+      removed_instructions_.push_back(instruction);
+      substitute_instructions_.push_back(heap_value);
+      TryRemovingNullCheck(instruction);
+      return;
+    }
+
+    if (heap_value == kUnknownHeapValue) {
+      // Put the load as the value into the HeapLocation.
+      // This acts like GVN but with better aliasing analysis.
+      heap_values[idx] = instruction;
+    }
+  }
+
+  bool Equal(HInstruction* heap_value, HInstruction* value) {
+    if (heap_value == value) {
+      return true;
+    }
+    if (heap_value == kDefaultHeapValue && GetDefaultValue(value->GetType()) == value) {
+      return true;
+    }
+    return false;
+  }
+
+  void VisitSetLocation(HInstruction* instruction,
+                        HInstruction* ref,
+                        size_t offset,
+                        HInstruction* index,
+                        int16_t declaring_class_def_index,
+                        HInstruction* value) {
+    HInstruction* original_ref = HuntForOriginalReference(ref);
+    ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(original_ref);
+    size_t idx = heap_location_collector_.FindHeapLocationIndex(
+        ref_info, offset, index, declaring_class_def_index);
+    DCHECK_NE(idx, HeapLocationCollector::kHeapLocationNotFound);
+    ArenaVector<HInstruction*>& heap_values =
+        heap_values_for_[instruction->GetBlock()->GetBlockId()];
+    HInstruction* heap_value = heap_values[idx];
+    bool redundant_store = false;
+    if (Equal(heap_value, value)) {
+      // Store into the heap location with the same value.
+      redundant_store = true;
+    } else if (index != nullptr) {
+      // For array element, don't eliminate stores since it can be easily aliased
+      // with non-constant index.
+    } else if (!heap_location_collector_.MayDeoptimize() &&
+               ref_info->IsSingletonAndNotReturned() &&
+               !heap_location_collector_.GetHeapLocation(idx)->MayBecomeUnknown()) {
+      // Store into a field of a singleton that's not returned. And that value cannot be
+      // killed due to merge. It's redundant since future loads will get the value
+      // set by this instruction.
+      Primitive::Type type = Primitive::kPrimVoid;
+      if (instruction->IsInstanceFieldSet()) {
+        type = instruction->AsInstanceFieldSet()->GetFieldInfo().GetFieldType();
+      } else if (instruction->IsStaticFieldSet()) {
+        type = instruction->AsStaticFieldSet()->GetFieldInfo().GetFieldType();
+      } else {
+        DCHECK(false) << "Must be an instance/static field set instruction.";
+      }
+      if (value->GetType() != type) {
+        // I/F, J/D aliasing should not happen for fields.
+        DCHECK(Primitive::IsIntegralType(value->GetType()));
+        DCHECK(!Primitive::Is64BitType(value->GetType()));
+        DCHECK(Primitive::IsIntegralType(type));
+        DCHECK(!Primitive::Is64BitType(type));
+        // Keep the store since the corresponding load isn't eliminated due to different types.
+        // TODO: handle the different int types so that we can eliminate this store.
+        redundant_store = false;
+      } else {
+        redundant_store = true;
+      }
+      // TODO: eliminate the store if the singleton object is not finalizable.
+      redundant_store = false;
+    }
+    if (redundant_store) {
+      removed_instructions_.push_back(instruction);
+      substitute_instructions_.push_back(nullptr);
+      TryRemovingNullCheck(instruction);
+    }
+
+    heap_values[idx] = value;
+    // This store may kill values in other heap locations due to aliasing.
+    for (size_t i = 0; i < heap_values.size(); i++) {
+      if (heap_values[i] == value) {
+        // Same value should be kept even if aliasing happens.
+        continue;
+      }
+      if (heap_values[i] == kUnknownHeapValue) {
+        // Value is already unknown, no need for aliasing check.
+        continue;
+      }
+      if (heap_location_collector_.MayAlias(i, idx)) {
+        // Kill heap locations that may alias.
+        heap_values[i] = kUnknownHeapValue;
+      }
+    }
+  }
+
+  void VisitInstanceFieldGet(HInstanceFieldGet* instruction) OVERRIDE {
+    HInstruction* obj = instruction->InputAt(0);
+    size_t offset = instruction->GetFieldInfo().GetFieldOffset().SizeValue();
+    int16_t declaring_class_def_index = instruction->GetFieldInfo().GetDeclaringClassDefIndex();
+    VisitGetLocation(instruction, obj, offset, nullptr, declaring_class_def_index);
+  }
+
+  void VisitInstanceFieldSet(HInstanceFieldSet* instruction) OVERRIDE {
+    HInstruction* obj = instruction->InputAt(0);
+    size_t offset = instruction->GetFieldInfo().GetFieldOffset().SizeValue();
+    int16_t declaring_class_def_index = instruction->GetFieldInfo().GetDeclaringClassDefIndex();
+    HInstruction* value = instruction->InputAt(1);
+    VisitSetLocation(instruction, obj, offset, nullptr, declaring_class_def_index, value);
+  }
+
+  void VisitStaticFieldGet(HStaticFieldGet* instruction) OVERRIDE {
+    HInstruction* cls = instruction->InputAt(0);
+    size_t offset = instruction->GetFieldInfo().GetFieldOffset().SizeValue();
+    int16_t declaring_class_def_index = instruction->GetFieldInfo().GetDeclaringClassDefIndex();
+    VisitGetLocation(instruction, cls, offset, nullptr, declaring_class_def_index);
+  }
+
+  void VisitStaticFieldSet(HStaticFieldSet* instruction) OVERRIDE {
+    HInstruction* cls = instruction->InputAt(0);
+    size_t offset = instruction->GetFieldInfo().GetFieldOffset().SizeValue();
+    int16_t declaring_class_def_index = instruction->GetFieldInfo().GetDeclaringClassDefIndex();
+    HInstruction* value = instruction->InputAt(1);
+    VisitSetLocation(instruction, cls, offset, nullptr, declaring_class_def_index, value);
+  }
+
+  void VisitArrayGet(HArrayGet* instruction) OVERRIDE {
+    HInstruction* array = instruction->InputAt(0);
+    HInstruction* index = instruction->InputAt(1);
+    VisitGetLocation(instruction,
+                     array,
+                     HeapLocation::kInvalidFieldOffset,
+                     index,
+                     HeapLocation::kDeclaringClassDefIndexForArrays);
+  }
+
+  void VisitArraySet(HArraySet* instruction) OVERRIDE {
+    HInstruction* array = instruction->InputAt(0);
+    HInstruction* index = instruction->InputAt(1);
+    HInstruction* value = instruction->InputAt(2);
+    VisitSetLocation(instruction,
+                     array,
+                     HeapLocation::kInvalidFieldOffset,
+                     index,
+                     HeapLocation::kDeclaringClassDefIndexForArrays,
+                     value);
+  }
+
+  void HandleInvoke(HInstruction* invoke) {
+    ArenaVector<HInstruction*>& heap_values =
+        heap_values_for_[invoke->GetBlock()->GetBlockId()];
+    for (size_t i = 0; i < heap_values.size(); i++) {
+      ReferenceInfo* ref_info = heap_location_collector_.GetHeapLocation(i)->GetReferenceInfo();
+      if (ref_info->IsSingleton()) {
+        // Singleton references cannot be seen by the callee.
+      } else {
+        heap_values[i] = kUnknownHeapValue;
+      }
+    }
+  }
+
+  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
+    HandleInvoke(invoke);
+  }
+
+  void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
+    HandleInvoke(invoke);
+  }
+
+  void VisitInvokeInterface(HInvokeInterface* invoke) OVERRIDE {
+    HandleInvoke(invoke);
+  }
+
+  void VisitInvokeUnresolved(HInvokeUnresolved* invoke) OVERRIDE {
+    HandleInvoke(invoke);
+  }
+
+  void VisitClinitCheck(HClinitCheck* clinit) OVERRIDE {
+    HandleInvoke(clinit);
+  }
+
+  void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instruction) OVERRIDE {
+    // Conservatively treat it as an invocation.
+    HandleInvoke(instruction);
+  }
+
+  void VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet* instruction) OVERRIDE {
+    // Conservatively treat it as an invocation.
+    HandleInvoke(instruction);
+  }
+
+  void VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet* instruction) OVERRIDE {
+    // Conservatively treat it as an invocation.
+    HandleInvoke(instruction);
+  }
+
+  void VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet* instruction) OVERRIDE {
+    // Conservatively treat it as an invocation.
+    HandleInvoke(instruction);
+  }
+
+  void VisitNewInstance(HNewInstance* new_instance) OVERRIDE {
+    ReferenceInfo* ref_info = heap_location_collector_.FindReferenceInfoOf(new_instance);
+    if (ref_info == nullptr) {
+      // new_instance isn't used for field accesses. No need to process it.
+      return;
+    }
+    if (!heap_location_collector_.MayDeoptimize() &&
+        ref_info->IsSingletonAndNotReturned()) {
+      // The allocation might be eliminated.
+      singleton_new_instances_.push_back(new_instance);
+    }
+    ArenaVector<HInstruction*>& heap_values =
+        heap_values_for_[new_instance->GetBlock()->GetBlockId()];
+    for (size_t i = 0; i < heap_values.size(); i++) {
+      HInstruction* ref =
+          heap_location_collector_.GetHeapLocation(i)->GetReferenceInfo()->GetReference();
+      size_t offset = heap_location_collector_.GetHeapLocation(i)->GetOffset();
+      if (ref == new_instance && offset >= mirror::kObjectHeaderSize) {
+        // Instance fields except the header fields are set to default heap values.
+        heap_values[i] = kDefaultHeapValue;
+      }
+    }
+  }
+
+  // Find an instruction's substitute if it should be removed.
+  // Return the same instruction if it should not be removed.
+  HInstruction* FindSubstitute(HInstruction* instruction) {
+    size_t size = removed_instructions_.size();
+    for (size_t i = 0; i < size; i++) {
+      if (removed_instructions_[i] == instruction) {
+        return substitute_instructions_[i];
+      }
+    }
+    return instruction;
+  }
+
+  const HeapLocationCollector& heap_location_collector_;
+  const SideEffectsAnalysis& side_effects_;
+
+  // One array of heap values for each block.
+  ArenaVector<ArenaVector<HInstruction*>> heap_values_for_;
+
+  // We record the instructions that should be eliminated but may be
+  // used by heap locations. They'll be removed in the end.
+  ArenaVector<HInstruction*> removed_instructions_;
+  ArenaVector<HInstruction*> substitute_instructions_;
+  ArenaVector<HInstruction*> singleton_new_instances_;
+
+  DISALLOW_COPY_AND_ASSIGN(LSEVisitor);
+};
+
+void LoadStoreElimination::Run() {
+  if (graph_->IsDebuggable()) {
+    // Debugger may set heap values or trigger deoptimization of callers.
+    // Skip this optimization.
+    return;
+  }
+  HeapLocationCollector heap_location_collector(graph_);
+  for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
+    heap_location_collector.VisitBasicBlock(it.Current());
+  }
+  if (heap_location_collector.GetNumberOfHeapLocations() > kMaxNumberOfHeapLocations) {
+    // Bail out if there are too many heap locations to deal with.
+    return;
+  }
+  if (!heap_location_collector.HasHeapStores()) {
+    // Without heap stores, this pass would act mostly as GVN on heap accesses.
+    return;
+  }
+  if (heap_location_collector.HasVolatile() || heap_location_collector.HasMonitorOps()) {
+    // Don't do load/store elimination if the method has volatile field accesses or
+    // monitor operations, for now.
+    // TODO: do it right.
+    return;
+  }
+  heap_location_collector.BuildAliasingMatrix();
+  LSEVisitor lse_visitor(graph_, heap_location_collector, side_effects_);
+  for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
+    lse_visitor.VisitBasicBlock(it.Current());
+  }
+  lse_visitor.RemoveInstructions();
+}
+
+}  // namespace art
diff --git a/compiler/optimizing/load_store_elimination.h b/compiler/optimizing/load_store_elimination.h
new file mode 100644
index 0000000..1d9e5c8
--- /dev/null
+++ b/compiler/optimizing/load_store_elimination.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_LOAD_STORE_ELIMINATION_H_
+#define ART_COMPILER_OPTIMIZING_LOAD_STORE_ELIMINATION_H_
+
+#include "optimization.h"
+
+namespace art {
+
+class SideEffectsAnalysis;
+
+class LoadStoreElimination : public HOptimization {
+ public:
+  LoadStoreElimination(HGraph* graph, const SideEffectsAnalysis& side_effects)
+      : HOptimization(graph, kLoadStoreEliminationPassName),
+        side_effects_(side_effects) {}
+
+  void Run() OVERRIDE;
+
+  static constexpr const char* kLoadStoreEliminationPassName = "load_store_elimination";
+
+ private:
+  const SideEffectsAnalysis& side_effects_;
+
+  DISALLOW_COPY_AND_ASSIGN(LoadStoreElimination);
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_LOAD_STORE_ELIMINATION_H_
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 2ef3217..6028d4b 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -21,6 +21,7 @@
 #include <array>
 #include <type_traits>
 
+#include "base/arena_bit_vector.h"
 #include "base/arena_containers.h"
 #include "base/arena_object.h"
 #include "base/stl_util.h"
@@ -34,7 +35,6 @@
 #include "mirror/class.h"
 #include "offsets.h"
 #include "primitive.h"
-#include "utils/arena_bit_vector.h"
 
 namespace art {
 
@@ -75,6 +75,7 @@
 static constexpr uint64_t kMaxLongShiftValue = 0x3f;
 
 static constexpr uint32_t kUnknownFieldIndex = static_cast<uint32_t>(-1);
+static constexpr uint16_t kUnknownClassDefIndex = static_cast<uint16_t>(-1);
 
 static constexpr InvokeType kInvalidInvokeType = static_cast<InvokeType>(-1);
 
@@ -1079,16 +1080,25 @@
 
 #define FOR_EACH_CONCRETE_INSTRUCTION_ARM(M)
 
+#ifndef ART_ENABLE_CODEGEN_arm64
 #define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M)
+#else
+#define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M)                          \
+  M(Arm64IntermediateAddress, Instruction)
+#endif
 
 #define FOR_EACH_CONCRETE_INSTRUCTION_MIPS(M)
 
 #define FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(M)
 
+#ifndef ART_ENABLE_CODEGEN_x86
+#define FOR_EACH_CONCRETE_INSTRUCTION_X86(M)
+#else
 #define FOR_EACH_CONCRETE_INSTRUCTION_X86(M)                            \
   M(X86ComputeBaseMethodAddress, Instruction)                           \
   M(X86LoadFromConstantTable, Instruction)                              \
   M(X86PackedSwitch, Instruction)
+#endif
 
 #define FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M)
 
@@ -1373,6 +1383,10 @@
     return SideEffects(flags_ & ~other.flags_);
   }
 
+  void Add(SideEffects other) {
+    flags_ |= other.flags_;
+  }
+
   bool Includes(SideEffects other) const {
     return (other.flags_ & flags_) == other.flags_;
   }
@@ -1946,6 +1960,7 @@
   }
 
   SideEffects GetSideEffects() const { return side_effects_; }
+  void AddSideEffects(SideEffects other) { side_effects_.Add(other); }
 
   size_t GetLifetimePosition() const { return lifetime_position_; }
   void SetLifetimePosition(size_t position) { lifetime_position_ = position; }
@@ -2017,7 +2032,7 @@
   // order of blocks where this instruction's live interval start.
   size_t lifetime_position_;
 
-  const SideEffects side_effects_;
+  SideEffects side_effects_;
 
   // TODO: for primitive types this should be marked as invalid.
   ReferenceTypeInfo reference_type_info_;
@@ -4327,18 +4342,21 @@
             Primitive::Type field_type,
             bool is_volatile,
             uint32_t index,
+            uint16_t declaring_class_def_index,
             const DexFile& dex_file,
             Handle<mirror::DexCache> dex_cache)
       : field_offset_(field_offset),
         field_type_(field_type),
         is_volatile_(is_volatile),
         index_(index),
+        declaring_class_def_index_(declaring_class_def_index),
         dex_file_(dex_file),
         dex_cache_(dex_cache) {}
 
   MemberOffset GetFieldOffset() const { return field_offset_; }
   Primitive::Type GetFieldType() const { return field_type_; }
   uint32_t GetFieldIndex() const { return index_; }
+  uint16_t GetDeclaringClassDefIndex() const { return declaring_class_def_index_;}
   const DexFile& GetDexFile() const { return dex_file_; }
   bool IsVolatile() const { return is_volatile_; }
   Handle<mirror::DexCache> GetDexCache() const { return dex_cache_; }
@@ -4348,6 +4366,7 @@
   const Primitive::Type field_type_;
   const bool is_volatile_;
   const uint32_t index_;
+  const uint16_t declaring_class_def_index_;
   const DexFile& dex_file_;
   const Handle<mirror::DexCache> dex_cache_;
 };
@@ -4359,13 +4378,20 @@
                     MemberOffset field_offset,
                     bool is_volatile,
                     uint32_t field_idx,
+                    uint16_t declaring_class_def_index,
                     const DexFile& dex_file,
                     Handle<mirror::DexCache> dex_cache,
                     uint32_t dex_pc)
-      : HExpression(
-            field_type,
-            SideEffects::FieldReadOfType(field_type, is_volatile), dex_pc),
-        field_info_(field_offset, field_type, is_volatile, field_idx, dex_file, dex_cache) {
+      : HExpression(field_type,
+                    SideEffects::FieldReadOfType(field_type, is_volatile),
+                    dex_pc),
+        field_info_(field_offset,
+                    field_type,
+                    is_volatile,
+                    field_idx,
+                    declaring_class_def_index,
+                    dex_file,
+                    dex_cache) {
     SetRawInputAt(0, value);
   }
 
@@ -4405,12 +4431,19 @@
                     MemberOffset field_offset,
                     bool is_volatile,
                     uint32_t field_idx,
+                    uint16_t declaring_class_def_index,
                     const DexFile& dex_file,
                     Handle<mirror::DexCache> dex_cache,
                     uint32_t dex_pc)
-      : HTemplateInstruction(
-          SideEffects::FieldWriteOfType(field_type, is_volatile), dex_pc),
-        field_info_(field_offset, field_type, is_volatile, field_idx, dex_file, dex_cache),
+      : HTemplateInstruction(SideEffects::FieldWriteOfType(field_type, is_volatile),
+                             dex_pc),
+        field_info_(field_offset,
+                    field_type,
+                    is_volatile,
+                    field_idx,
+                    declaring_class_def_index,
+                    dex_file,
+                    dex_cache),
         value_can_be_null_(true) {
     SetRawInputAt(0, object);
     SetRawInputAt(1, value);
@@ -4442,8 +4475,11 @@
   HArrayGet(HInstruction* array,
             HInstruction* index,
             Primitive::Type type,
-            uint32_t dex_pc)
-      : HExpression(type, SideEffects::ArrayReadOfType(type), dex_pc) {
+            uint32_t dex_pc,
+            SideEffects additional_side_effects = SideEffects::None())
+      : HExpression(type,
+                    SideEffects::ArrayReadOfType(type).Union(additional_side_effects),
+                    dex_pc) {
     SetRawInputAt(0, array);
     SetRawInputAt(1, index);
   }
@@ -4478,10 +4514,13 @@
             HInstruction* index,
             HInstruction* value,
             Primitive::Type expected_component_type,
-            uint32_t dex_pc)
+            uint32_t dex_pc,
+            SideEffects additional_side_effects = SideEffects::None())
       : HTemplateInstruction(
             SideEffects::ArrayWriteOfType(expected_component_type).Union(
-                SideEffectsForArchRuntimeCalls(value->GetType())), dex_pc),
+                SideEffectsForArchRuntimeCalls(value->GetType())).Union(
+                    additional_side_effects),
+            dex_pc),
         expected_component_type_(expected_component_type),
         needs_type_check_(value->GetType() == Primitive::kPrimNot),
         value_can_be_null_(true),
@@ -4536,6 +4575,10 @@
         : expected_component_type_;
   }
 
+  Primitive::Type GetRawExpectedComponentType() const {
+    return expected_component_type_;
+  }
+
   static SideEffects SideEffectsForArchRuntimeCalls(Primitive::Type value_type) {
     return (value_type == Primitive::kPrimNot) ? SideEffects::CanTriggerGC() : SideEffects::None();
   }
@@ -4594,6 +4637,7 @@
 
   bool CanThrow() const OVERRIDE { return true; }
 
+  HInstruction* GetIndex() const { return InputAt(0); }
 
   DECLARE_INSTRUCTION(BoundsCheck);
 
@@ -4825,14 +4869,20 @@
                   MemberOffset field_offset,
                   bool is_volatile,
                   uint32_t field_idx,
+                  uint16_t declaring_class_def_index,
                   const DexFile& dex_file,
                   Handle<mirror::DexCache> dex_cache,
                   uint32_t dex_pc)
-      : HExpression(
-            field_type,
-            SideEffects::FieldReadOfType(field_type, is_volatile),
-            dex_pc),
-        field_info_(field_offset, field_type, is_volatile, field_idx, dex_file, dex_cache) {
+      : HExpression(field_type,
+                    SideEffects::FieldReadOfType(field_type, is_volatile),
+                    dex_pc),
+        field_info_(field_offset,
+                    field_type,
+                    is_volatile,
+                    field_idx,
+                    declaring_class_def_index,
+                    dex_file,
+                    dex_cache) {
     SetRawInputAt(0, cls);
   }
 
@@ -4869,13 +4919,19 @@
                   MemberOffset field_offset,
                   bool is_volatile,
                   uint32_t field_idx,
+                  uint16_t declaring_class_def_index,
                   const DexFile& dex_file,
                   Handle<mirror::DexCache> dex_cache,
                   uint32_t dex_pc)
-      : HTemplateInstruction(
-          SideEffects::FieldWriteOfType(field_type, is_volatile),
-          dex_pc),
-        field_info_(field_offset, field_type, is_volatile, field_idx, dex_file, dex_cache),
+      : HTemplateInstruction(SideEffects::FieldWriteOfType(field_type, is_volatile),
+                             dex_pc),
+        field_info_(field_offset,
+                    field_type,
+                    is_volatile,
+                    field_idx,
+                    declaring_class_def_index,
+                    dex_file,
+                    dex_cache),
         value_can_be_null_(true) {
     SetRawInputAt(0, cls);
     SetRawInputAt(1, value);
@@ -5409,6 +5465,9 @@
 
 }  // namespace art
 
+#ifdef ART_ENABLE_CODEGEN_arm64
+#include "nodes_arm64.h"
+#endif
 #ifdef ART_ENABLE_CODEGEN_x86
 #include "nodes_x86.h"
 #endif
diff --git a/compiler/optimizing/nodes_arm64.h b/compiler/optimizing/nodes_arm64.h
new file mode 100644
index 0000000..885d3a2
--- /dev/null
+++ b/compiler/optimizing/nodes_arm64.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_NODES_ARM64_H_
+#define ART_COMPILER_OPTIMIZING_NODES_ARM64_H_
+
+namespace art {
+
+// This instruction computes an intermediate address pointing in the 'middle' of an object. The
+// result pointer cannot be handled by GC, so extra care is taken to make sure that this value is
+// never used across anything that can trigger GC.
+class HArm64IntermediateAddress : public HExpression<2> {
+ public:
+  HArm64IntermediateAddress(HInstruction* base_address, HInstruction* offset, uint32_t dex_pc)
+      : HExpression(Primitive::kPrimNot, SideEffects::DependsOnGC(), dex_pc) {
+    SetRawInputAt(0, base_address);
+    SetRawInputAt(1, offset);
+  }
+
+  bool CanBeMoved() const OVERRIDE { return true; }
+  bool InstructionDataEquals(HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE { return true; }
+
+  HInstruction* GetBaseAddress() const { return InputAt(0); }
+  HInstruction* GetOffset() const { return InputAt(1); }
+
+  DECLARE_INSTRUCTION(Arm64IntermediateAddress);
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(HArm64IntermediateAddress);
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_NODES_ARM64_H_
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 51d5923..2584d3f 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -58,6 +58,7 @@
 #include "intrinsics.h"
 #include "licm.h"
 #include "jni/quick/jni_compiler.h"
+#include "load_store_elimination.h"
 #include "nodes.h"
 #include "prepare_for_register_allocation.h"
 #include "reference_type_propagation.h"
@@ -464,6 +465,7 @@
   SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
   GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects);
   LICM* licm = new (arena) LICM(graph, *side_effects);
+  LoadStoreElimination* lse = new (arena) LoadStoreElimination(graph, *side_effects);
   HInductionVarAnalysis* induction = new (arena) HInductionVarAnalysis(graph);
   BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph, induction);
   ReferenceTypePropagation* type_propagation =
@@ -518,6 +520,7 @@
       induction,
       bce,
       simplify3,
+      lse,
       dce2,
       // The codegen has a few assumptions that only the instruction simplifier
       // can satisfy. For example, the code generator does not expect to see a
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index ed5419e..080f970 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -488,6 +488,7 @@
                                                          MemberOffset(22),
                                                          false,
                                                          kUnknownFieldIndex,
+                                                         kUnknownClassDefIndex,
                                                          graph->GetDexFile(),
                                                          dex_cache,
                                                          0);
@@ -514,6 +515,7 @@
                                               MemberOffset(42),
                                               false,
                                               kUnknownFieldIndex,
+                                              kUnknownClassDefIndex,
                                               graph->GetDexFile(),
                                               dex_cache,
                                               0);
@@ -522,6 +524,7 @@
                                             MemberOffset(42),
                                             false,
                                             kUnknownFieldIndex,
+                                            kUnknownClassDefIndex,
                                             graph->GetDexFile(),
                                             dex_cache,
                                             0);
@@ -638,6 +641,7 @@
                                              MemberOffset(42),
                                              false,
                                              kUnknownFieldIndex,
+                                             kUnknownClassDefIndex,
                                              graph->GetDexFile(),
                                              dex_cache,
                                              0);
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index c4a3b28..560502f 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -15,8 +15,9 @@
  */
 
 #include "stack_map.h"
+
+#include "base/arena_bit_vector.h"
 #include "stack_map_stream.h"
-#include "utils/arena_bit_vector.h"
 
 #include "gtest/gtest.h"
 
diff --git a/compiler/utils/arena_allocator_test.cc b/compiler/utils/arena_allocator_test.cc
index 7065527..7f67ef1 100644
--- a/compiler/utils/arena_allocator_test.cc
+++ b/compiler/utils/arena_allocator_test.cc
@@ -15,8 +15,8 @@
  */
 
 #include "base/arena_allocator.h"
+#include "base/arena_bit_vector.h"
 #include "gtest/gtest.h"
-#include "utils/arena_bit_vector.h"
 
 namespace art {
 
diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc
index 16f29b0..4413906 100644
--- a/compiler/utils/mips64/assembler_mips64_test.cc
+++ b/compiler/utils/mips64/assembler_mips64_test.cc
@@ -391,10 +391,30 @@
   DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Srl, 5, "srl ${reg1}, ${reg2}, {imm}"), "srl");
 }
 
+TEST_F(AssemblerMIPS64Test, Rotr) {
+  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Rotr, 5, "rotr ${reg1}, ${reg2}, {imm}"), "rotr");
+}
+
 TEST_F(AssemblerMIPS64Test, Sra) {
   DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Sra, 5, "sra ${reg1}, ${reg2}, {imm}"), "sra");
 }
 
+TEST_F(AssemblerMIPS64Test, Sllv) {
+  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Sllv, "sllv ${reg1}, ${reg2}, ${reg3}"), "sllv");
+}
+
+TEST_F(AssemblerMIPS64Test, Srlv) {
+  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Srlv, "srlv ${reg1}, ${reg2}, ${reg3}"), "srlv");
+}
+
+TEST_F(AssemblerMIPS64Test, Rotrv) {
+  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Rotrv, "rotrv ${reg1}, ${reg2}, ${reg3}"), "rotrv");
+}
+
+TEST_F(AssemblerMIPS64Test, Srav) {
+  DriverStr(RepeatRRR(&mips64::Mips64Assembler::Srav, "srav ${reg1}, ${reg2}, ${reg3}"), "srav");
+}
+
 TEST_F(AssemblerMIPS64Test, Dsll) {
   DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsll, 5, "dsll ${reg1}, ${reg2}, {imm}"), "dsll");
 }
@@ -403,20 +423,33 @@
   DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsrl, 5, "dsrl ${reg1}, ${reg2}, {imm}"), "dsrl");
 }
 
+TEST_F(AssemblerMIPS64Test, Drotr) {
+  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Drotr, 5, "drotr ${reg1}, ${reg2}, {imm}"),
+            "drotr");
+}
+
 TEST_F(AssemblerMIPS64Test, Dsra) {
   DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsra, 5, "dsra ${reg1}, ${reg2}, {imm}"), "dsra");
 }
 
 TEST_F(AssemblerMIPS64Test, Dsll32) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsll32, 5, "dsll32 ${reg1}, ${reg2}, {imm}"), "dsll32");
+  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsll32, 5, "dsll32 ${reg1}, ${reg2}, {imm}"),
+            "dsll32");
 }
 
 TEST_F(AssemblerMIPS64Test, Dsrl32) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsrl32, 5, "dsrl32 ${reg1}, ${reg2}, {imm}"), "dsrl32");
+  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsrl32, 5, "dsrl32 ${reg1}, ${reg2}, {imm}"),
+            "dsrl32");
+}
+
+TEST_F(AssemblerMIPS64Test, Drotr32) {
+  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Drotr32, 5, "drotr32 ${reg1}, ${reg2}, {imm}"),
+            "drotr32");
 }
 
 TEST_F(AssemblerMIPS64Test, Dsra32) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsra32, 5, "dsra32 ${reg1}, ${reg2}, {imm}"), "dsra32");
+  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Dsra32, 5, "dsra32 ${reg1}, ${reg2}, {imm}"),
+            "dsra32");
 }
 
 TEST_F(AssemblerMIPS64Test, Sc) {
@@ -435,10 +468,6 @@
   DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Lld, -9, "lld ${reg1}, {imm}(${reg2})"), "lld");
 }
 
-TEST_F(AssemblerMIPS64Test, Rotr) {
-  DriverStr(RepeatRRIb(&mips64::Mips64Assembler::Rotr, 5, "rotr ${reg1}, ${reg2}, {imm}"), "rotr");
-}
-
 TEST_F(AssemblerMIPS64Test, Seleqz) {
   DriverStr(RepeatRRR(&mips64::Mips64Assembler::Seleqz, "seleqz ${reg1}, ${reg2}, ${reg3}"),
             "seleqz");
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 9236ffb..09d7311 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -25,6 +25,7 @@
   barrier.cc \
   base/allocator.cc \
   base/arena_allocator.cc \
+  base/arena_bit_vector.cc \
   base/bit_vector.cc \
   base/hex_dump.cc \
   base/logging.cc \
diff --git a/runtime/arch/mips/context_mips.cc b/runtime/arch/mips/context_mips.cc
index 4dedb33..375a03a 100644
--- a/runtime/arch/mips/context_mips.cc
+++ b/runtime/arch/mips/context_mips.cc
@@ -28,11 +28,11 @@
   std::fill_n(gprs_, arraysize(gprs_), nullptr);
   std::fill_n(fprs_, arraysize(fprs_), nullptr);
   gprs_[SP] = &sp_;
-  gprs_[RA] = &ra_;
+  gprs_[T9] = &t9_;
   gprs_[A0] = &arg0_;
   // Initialize registers with easy to spot debug values.
   sp_ = MipsContext::kBadGprBase + SP;
-  ra_ = MipsContext::kBadGprBase + RA;
+  t9_ = MipsContext::kBadGprBase + T9;
   arg0_ = 0;
 }
 
diff --git a/runtime/arch/mips/context_mips.h b/runtime/arch/mips/context_mips.h
index f1e2905..7dcff63 100644
--- a/runtime/arch/mips/context_mips.h
+++ b/runtime/arch/mips/context_mips.h
@@ -41,7 +41,7 @@
   }
 
   void SetPC(uintptr_t new_pc) OVERRIDE {
-    SetGPR(RA, new_pc);
+    SetGPR(T9, new_pc);
   }
 
   bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
@@ -86,9 +86,10 @@
   // Pointers to registers in the stack, initialized to null except for the special cases below.
   uintptr_t* gprs_[kNumberOfCoreRegisters];
   uint32_t* fprs_[kNumberOfFRegisters];
-  // Hold values for sp and ra (return address) if they are not located within a stack frame, as
-  // well as the first argument.
-  uintptr_t sp_, ra_, arg0_;
+  // Hold values for sp and t9 if they are not located within a stack frame. We use t9 for the
+  // PC (as ra is required to be valid for single-frame deopt and must not be clobbered). We
+  // also need the first argument for single-frame deopt.
+  uintptr_t sp_, t9_, arg0_;
 };
 }  // namespace mips
 }  // namespace art
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index ba58c3f..0691f2a 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -374,7 +374,7 @@
     lw      $ra, 124($a0)
     lw      $a0, 16($a0)
     move    $v0, $zero          # clear result registers r0 and r1
-    jalr    $zero, $ra          # do long jump
+    jalr    $zero, $t9          # do long jump
     move    $v1, $zero
 END art_quick_do_long_jump
 
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
index f9960ac..ad255b8 100644
--- a/runtime/base/allocator.h
+++ b/runtime/base/allocator.h
@@ -19,6 +19,7 @@
 
 #include <map>
 #include <set>
+#include <unordered_map>
 
 #include "atomic.h"
 #include "base/macros.h"
@@ -150,19 +151,24 @@
 template<class T, AllocatorTag kTag>
 // C++ doesn't allow template typedefs. This is a workaround template typedef which is
 // TrackingAllocatorImpl<T> if kEnableTrackingAllocator is true, std::allocator<T> otherwise.
-class TrackingAllocator : public TypeStaticIf<kEnableTrackingAllocator,
-                                              TrackingAllocatorImpl<T, kTag>,
-                                              std::allocator<T>>::type {
-};
+using TrackingAllocator = typename TypeStaticIf<kEnableTrackingAllocator,
+                                                TrackingAllocatorImpl<T, kTag>,
+                                                std::allocator<T>>::type;
 
 template<class Key, class T, AllocatorTag kTag, class Compare = std::less<Key>>
-class AllocationTrackingMultiMap : public std::multimap<
-    Key, T, Compare, TrackingAllocator<std::pair<Key, T>, kTag>> {
-};
+using AllocationTrackingMultiMap = std::multimap<
+    Key, T, Compare, TrackingAllocator<std::pair<Key, T>, kTag>>;
 
 template<class Key, AllocatorTag kTag, class Compare = std::less<Key>>
-class AllocationTrackingSet : public std::set<Key, Compare, TrackingAllocator<Key, kTag>> {
-};
+using AllocationTrackingSet = std::set<Key, Compare, TrackingAllocator<Key, kTag>>;
+
+template<class Key,
+         class T,
+         AllocatorTag kTag,
+         class Hash = std::hash<Key>,
+         class Pred = std::equal_to<Key>>
+using AllocationTrackingUnorderedMap = std::unordered_map<
+    Key, T, Hash, Pred, TrackingAllocator<std::pair<const Key, T>, kTag>>;
 
 }  // namespace art
 
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index 1704688..71afa0f 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -94,6 +94,8 @@
   "CodeGen      ",
   "ParallelMove ",
   "GraphChecker ",
+  "LSE          ",
+  "Verifier     ",
 };
 
 template <bool kCount>
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 4e9282f..ace6c38 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -106,6 +106,8 @@
   kArenaAllocCodeGenerator,
   kArenaAllocParallelMoveResolver,
   kArenaAllocGraphChecker,
+  kArenaAllocLSE,
+  kArenaAllocVerifier,
   kNumArenaAllocKinds
 };
 
diff --git a/compiler/utils/arena_bit_vector.cc b/runtime/base/arena_bit_vector.cc
similarity index 100%
rename from compiler/utils/arena_bit_vector.cc
rename to runtime/base/arena_bit_vector.cc
diff --git a/compiler/utils/arena_bit_vector.h b/runtime/base/arena_bit_vector.h
similarity index 92%
rename from compiler/utils/arena_bit_vector.h
rename to runtime/base/arena_bit_vector.h
index f2a7452..d606166 100644
--- a/compiler/utils/arena_bit_vector.h
+++ b/runtime/base/arena_bit_vector.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ART_COMPILER_UTILS_ARENA_BIT_VECTOR_H_
-#define ART_COMPILER_UTILS_ARENA_BIT_VECTOR_H_
+#ifndef ART_RUNTIME_BASE_ARENA_BIT_VECTOR_H_
+#define ART_RUNTIME_BASE_ARENA_BIT_VECTOR_H_
 
 #include "base/arena_object.h"
 #include "base/bit_vector.h"
@@ -65,4 +65,4 @@
 
 }  // namespace art
 
-#endif  // ART_COMPILER_UTILS_ARENA_BIT_VECTOR_H_
+#endif  // ART_RUNTIME_BASE_ARENA_BIT_VECTOR_H_
diff --git a/runtime/base/dchecked_vector.h b/runtime/base/dchecked_vector.h
index 6ec573a..2bd12df 100644
--- a/runtime/base/dchecked_vector.h
+++ b/runtime/base/dchecked_vector.h
@@ -59,8 +59,10 @@
       : Base() { }
   explicit dchecked_vector(const allocator_type& alloc)
       : Base(alloc) { }
+  // Note that we cannot forward to std::vector(size_type, const allocator_type&) because it is not
+  // available in C++11, which is the latest GCC can support. http://b/25022512
   explicit dchecked_vector(size_type n, const allocator_type& alloc = allocator_type())
-      : Base(n, alloc) { }
+      : Base(alloc) { resize(n); }
   dchecked_vector(size_type n,
                   const value_type& value,
                   const allocator_type& alloc = allocator_type())
diff --git a/runtime/base/hash_set.h b/runtime/base/hash_set.h
index 4819f06..95baa82 100644
--- a/runtime/base/hash_set.h
+++ b/runtime/base/hash_set.h
@@ -420,6 +420,19 @@
     Resize(Size() / max_load_factor_);
   }
 
+  // Reserve enough room to insert until Size() == num_elements without requiring to grow the hash
+  // set. No-op if the hash set is already large enough to do this.
+  void Reserve(size_t num_elements) {
+    size_t num_buckets = num_elements / max_load_factor_;
+    // Deal with rounding errors. Add one for rounding.
+    while (static_cast<size_t>(num_buckets * max_load_factor_) <= num_elements + 1u) {
+      ++num_buckets;
+    }
+    if (num_buckets > NumBuckets()) {
+      Resize(num_buckets);
+    }
+  }
+
   // To distance that inserted elements were probed. Used for measuring how good hash functions
   // are.
   size_t TotalProbeDistance() const {
@@ -488,6 +501,15 @@
     }
   }
 
+  // The hash set expands when Size() reaches ElementsUntilExpand().
+  size_t ElementsUntilExpand() const {
+    return elements_until_expand_;
+  }
+
+  size_t NumBuckets() const {
+    return num_buckets_;
+  }
+
  private:
   T& ElementForIndex(size_t index) {
     DCHECK_LT(index, NumBuckets());
@@ -543,10 +565,6 @@
     return emptyfn_.IsEmpty(ElementForIndex(index));
   }
 
-  size_t NumBuckets() const {
-    return num_buckets_;
-  }
-
   // Allocate a number of buckets.
   void AllocateStorage(size_t num_buckets) {
     num_buckets_ = num_buckets;
diff --git a/runtime/base/hash_set_test.cc b/runtime/base/hash_set_test.cc
index 743e98e..8254063 100644
--- a/runtime/base/hash_set_test.cc
+++ b/runtime/base/hash_set_test.cc
@@ -333,4 +333,25 @@
   ASSERT_NE(hash_set.end(), hash_set.Find(std::forward_list<int>({1, 2, 3, 4})));
 }
 
+TEST_F(HashSetTest, TestReserve) {
+  HashSet<std::string, IsEmptyFnString> hash_set;
+  std::vector<size_t> sizes = {1, 10, 25, 55, 128, 1024, 4096};
+  for (size_t size : sizes) {
+    hash_set.Reserve(size);
+    const size_t buckets_before = hash_set.NumBuckets();
+    // Check that we expanded enough.
+    CHECK_GE(hash_set.ElementsUntilExpand(), size);
+    // Try inserting elements until we are at our reserve size and ensure the hash set did not
+    // expand.
+    while (hash_set.Size() < size) {
+      hash_set.Insert(std::to_string(hash_set.Size()));
+    }
+    CHECK_EQ(hash_set.NumBuckets(), buckets_before);
+  }
+  // Check the behaviour for shrinking, it does not necessarily resize down.
+  constexpr size_t size = 100;
+  hash_set.Reserve(size);
+  CHECK_GE(hash_set.ElementsUntilExpand(), size);
+}
+
 }  // namespace art
diff --git a/runtime/base/scoped_arena_allocator.h b/runtime/base/scoped_arena_allocator.h
index 2554fb0..a30c73d 100644
--- a/runtime/base/scoped_arena_allocator.h
+++ b/runtime/base/scoped_arena_allocator.h
@@ -31,6 +31,16 @@
 template <typename T>
 class ScopedArenaAllocatorAdapter;
 
+// Tag associated with each allocation to help prevent double free.
+enum class ArenaFreeTag : uint8_t {
+  // Allocation is used and has not yet been destroyed.
+  kUsed,
+  // Allocation has been destroyed.
+  kFree,
+};
+
+static constexpr size_t kArenaAlignment = 8;
+
 // Holds a list of Arenas for use by ScopedArenaAllocator stack.
 class ArenaStack : private DebugStackRefCounter, private ArenaAllocatorMemoryTool {
  public:
@@ -50,6 +60,12 @@
 
   MemStats GetPeakStats() const;
 
+  // Return the arena tag associated with a pointer.
+  static ArenaFreeTag& ArenaTagForAllocation(void* ptr) {
+    DCHECK(kIsDebugBuild) << "Only debug builds have tags";
+    return *(reinterpret_cast<ArenaFreeTag*>(ptr) - 1);
+  }
+
  private:
   struct Peak;
   struct Current;
@@ -72,13 +88,18 @@
     if (UNLIKELY(IsRunningOnMemoryTool())) {
       return AllocWithMemoryTool(bytes, kind);
     }
-    size_t rounded_bytes = RoundUp(bytes, 8);
+    // Add kArenaAlignment for the free or used tag. Required to preserve alignment.
+    size_t rounded_bytes = RoundUp(bytes + (kIsDebugBuild ? kArenaAlignment : 0u), kArenaAlignment);
     uint8_t* ptr = top_ptr_;
     if (UNLIKELY(static_cast<size_t>(top_end_ - ptr) < rounded_bytes)) {
       ptr = AllocateFromNextArena(rounded_bytes);
     }
     CurrentStats()->RecordAlloc(bytes, kind);
     top_ptr_ = ptr + rounded_bytes;
+    if (kIsDebugBuild) {
+      ptr += kArenaAlignment;
+      ArenaTagForAllocation(ptr) = ArenaFreeTag::kUsed;
+    }
     return ptr;
   }
 
diff --git a/runtime/base/scoped_arena_containers.h b/runtime/base/scoped_arena_containers.h
index 562c2bf..1236585 100644
--- a/runtime/base/scoped_arena_containers.h
+++ b/runtime/base/scoped_arena_containers.h
@@ -20,6 +20,7 @@
 #include <deque>
 #include <queue>
 #include <set>
+#include <type_traits>
 #include <unordered_map>
 #include <utility>
 
@@ -196,6 +197,47 @@
   return ScopedArenaAllocatorAdapter<void>(this, kind);
 }
 
+// Special deleter that only calls the destructor. Also checks for double free errors.
+template <typename T>
+class ArenaDelete {
+  static constexpr uint8_t kMagicFill = 0xCE;
+ public:
+  void operator()(T* ptr) const {
+    ptr->~T();
+    if (RUNNING_ON_MEMORY_TOOL > 0) {
+      // Writing to the memory will fail if it we already destroyed the pointer with
+      // DestroyOnlyDelete since we make it no access.
+      memset(ptr, kMagicFill, sizeof(T));
+      MEMORY_TOOL_MAKE_NOACCESS(ptr, sizeof(T));
+    } else if (kIsDebugBuild) {
+      CHECK(ArenaStack::ArenaTagForAllocation(reinterpret_cast<void*>(ptr)) == ArenaFreeTag::kUsed)
+          << "Freeing invalid object " << ptr;
+      ArenaStack::ArenaTagForAllocation(reinterpret_cast<void*>(ptr)) = ArenaFreeTag::kFree;
+      // Write a magic value to try and catch use after free error.
+      memset(ptr, kMagicFill, sizeof(T));
+    }
+  }
+};
+
+// In general we lack support for arrays. We would need to call the destructor on each element,
+// which requires access to the array size. Support for that is future work.
+//
+// However, we can support trivially destructible component types, as then a destructor doesn't
+// need to be called.
+template <typename T>
+class ArenaDelete<T[]> {
+ public:
+  void operator()(T* ptr ATTRIBUTE_UNUSED) const {
+    static_assert(std::is_trivially_destructible<T>::value,
+                  "ArenaUniquePtr does not support non-trivially-destructible arrays.");
+    // TODO: Implement debug checks, and MEMORY_TOOL support.
+  }
+};
+
+// Arena unique ptr that only calls the destructor of the element.
+template <typename T>
+using ArenaUniquePtr = std::unique_ptr<T, ArenaDelete<T>>;
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_BASE_SCOPED_ARENA_CONTAINERS_H_
diff --git a/runtime/base/stringpiece.h b/runtime/base/stringpiece.h
index 9c83cf5..46743e9 100644
--- a/runtime/base/stringpiece.h
+++ b/runtime/base/stringpiece.h
@@ -165,7 +165,7 @@
   // Pointer to char data, not necessarily zero terminated.
   const char* ptr_;
   // Length of data.
-  size_type   length_;
+  size_type length_;
 };
 
 // This large function is defined inline so that in a fairly common case where
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index 48a12e5..2871f76 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -248,7 +248,7 @@
 
   // VRegA
   bool HasVRegA() const;
-  int32_t VRegA() const;
+  ALWAYS_INLINE int32_t VRegA() const;
 
   int8_t VRegA_10t() const {
     return VRegA_10t(Fetch16(0));
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 657fcb5..1d38525 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -26,6 +26,7 @@
 
 #include "art_field-inl.h"
 #include "base/allocator.h"
+#include "base/arena_allocator.h"
 #include "base/dumpable.h"
 #include "base/histogram-inl.h"
 #include "base/stl_util.h"
@@ -1258,11 +1259,11 @@
 }
 
 void Heap::Trim(Thread* self) {
+  Runtime* const runtime = Runtime::Current();
   if (!CareAboutPauseTimes()) {
     ATRACE_BEGIN("Deflating monitors");
     // Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
     // about pauses.
-    Runtime* runtime = Runtime::Current();
     {
       ScopedSuspendAll ssa(__FUNCTION__);
       uint64_t start_time = NanoTime();
@@ -1274,6 +1275,10 @@
   }
   TrimIndirectReferenceTables(self);
   TrimSpaces(self);
+  // Trim arenas that may have been used by JIT or verifier.
+  ATRACE_BEGIN("Trimming arena maps");
+  runtime->GetArenaPool()->TrimMaps();
+  ATRACE_END();
 }
 
 class TrimIndirectReferenceTableClosure : public Closure {
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 6c459a3..556ba56 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -326,7 +326,7 @@
     if (self == nullptr) {
       os << "(Aborting thread was not attached to runtime!)\n";
       DumpKernelStack(os, GetTid(), "  kernel: ", false);
-      DumpNativeStack(os, GetTid(), "  native: ", nullptr);
+      DumpNativeStack(os, GetTid(), nullptr, "  native: ", nullptr);
     } else {
       os << "Aborting thread:\n";
       if (Locks::mutator_lock_->IsExclusiveHeld(self) || Locks::mutator_lock_->IsSharedHeld(self)) {
diff --git a/runtime/runtime_linux.cc b/runtime/runtime_linux.cc
index f0b3c4e..122dcb1 100644
--- a/runtime/runtime_linux.cc
+++ b/runtime/runtime_linux.cc
@@ -41,7 +41,7 @@
  public:
   explicit Backtrace(void* raw_context) : raw_context_(raw_context) {}
   void Dump(std::ostream& os) const {
-    DumpNativeStack(os, GetTid(), "\t", nullptr, raw_context_);
+    DumpNativeStack(os, GetTid(), nullptr, "\t", nullptr, raw_context_);
   }
  private:
   // Stores the context of the signal that was unexpected and will terminate the runtime. The
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 114e0f6..b0cf418 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -919,9 +919,9 @@
      << "]";
 }
 
-void Thread::Dump(std::ostream& os) const {
+void Thread::Dump(std::ostream& os, BacktraceMap* backtrace_map) const {
   DumpState(os);
-  DumpStack(os);
+  DumpStack(os, backtrace_map);
 }
 
 mirror::String* Thread::GetThreadName(const ScopedObjectAccessAlreadyRunnable& soa) const {
@@ -1480,7 +1480,7 @@
   }
 }
 
-void Thread::DumpStack(std::ostream& os) const {
+void Thread::DumpStack(std::ostream& os, BacktraceMap* backtrace_map) const {
   // TODO: we call this code when dying but may not have suspended the thread ourself. The
   //       IsSuspended check is therefore racy with the use for dumping (normally we inhibit
   //       the race with the thread_suspend_count_lock_).
@@ -1496,7 +1496,7 @@
     if (dump_for_abort || ShouldShowNativeStack(this)) {
       DumpKernelStack(os, GetTid(), "  kernel: ", false);
       ArtMethod* method = GetCurrentMethod(nullptr, !dump_for_abort);
-      DumpNativeStack(os, GetTid(), "  native: ", method);
+      DumpNativeStack(os, GetTid(), backtrace_map, "  native: ", method);
     }
     DumpJavaStack(os);
   } else {
diff --git a/runtime/thread.h b/runtime/thread.h
index 8f3461a..138c143 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -42,6 +42,8 @@
 #include "stack.h"
 #include "thread_state.h"
 
+class BacktraceMap;
+
 namespace art {
 
 namespace gc {
@@ -184,7 +186,7 @@
   void ShortDump(std::ostream& os) const;
 
   // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
-  void Dump(std::ostream& os) const
+  void Dump(std::ostream& os, BacktraceMap* backtrace_map = nullptr) const
       REQUIRES(!Locks::thread_suspend_count_lock_)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
@@ -1042,7 +1044,7 @@
   void VerifyStackImpl() SHARED_REQUIRES(Locks::mutator_lock_);
 
   void DumpState(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_);
-  void DumpStack(std::ostream& os) const
+  void DumpStack(std::ostream& os, BacktraceMap* backtrace_map = nullptr) const
       REQUIRES(!Locks::thread_suspend_count_lock_)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 6176acd..bdd5d10 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -18,6 +18,7 @@
 
 #define ATRACE_TAG ATRACE_TAG_DALVIK
 
+#include <backtrace/BacktraceMap.h>
 #include <cutils/trace.h>
 #include <dirent.h>
 #include <ScopedLocalRef.h>
@@ -109,9 +110,10 @@
 
 void ThreadList::DumpNativeStacks(std::ostream& os) {
   MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+  std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid()));
   for (const auto& thread : list_) {
     os << "DUMPING THREAD " << thread->GetTid() << "\n";
-    DumpNativeStack(os, thread->GetTid(), "\t");
+    DumpNativeStack(os, thread->GetTid(), map.get(), "\t");
     os << "\n";
   }
 }
@@ -138,7 +140,7 @@
   // TODO: Reenable this when the native code in system_server can handle it.
   // Currently "adb shell kill -3 `pid system_server`" will cause it to exit.
   if (false) {
-    DumpNativeStack(os, tid, "  native: ");
+    DumpNativeStack(os, tid, nullptr, "  native: ");
   }
   os << "\n";
 }
@@ -175,7 +177,8 @@
 // A closure used by Thread::Dump.
 class DumpCheckpoint FINAL : public Closure {
  public:
-  explicit DumpCheckpoint(std::ostream* os) : os_(os), barrier_(0) {}
+  explicit DumpCheckpoint(std::ostream* os)
+      : os_(os), barrier_(0), backtrace_map_(BacktraceMap::Create(GetTid())) {}
 
   void Run(Thread* thread) OVERRIDE {
     // Note thread and self may not be equal if thread was already suspended at the point of the
@@ -184,7 +187,7 @@
     std::ostringstream local_os;
     {
       ScopedObjectAccess soa(self);
-      thread->Dump(local_os);
+      thread->Dump(local_os, backtrace_map_.get());
     }
     local_os << "\n";
     {
@@ -213,6 +216,8 @@
   std::ostream* const os_;
   // The barrier to be passed through and for the requestor to wait upon.
   Barrier barrier_;
+  // A backtrace map, so that all threads use a shared info and don't reacquire/parse separately.
+  std::unique_ptr<BacktraceMap> backtrace_map_;
 };
 
 void ThreadList::Dump(std::ostream& os) {
@@ -1217,7 +1222,7 @@
       std::string thread_name;
       self->GetThreadName(thread_name);
       std::ostringstream os;
-      DumpNativeStack(os, GetTid(), "  native: ", nullptr);
+      DumpNativeStack(os, GetTid(), nullptr, "  native: ", nullptr);
       LOG(ERROR) << "Request to unregister unattached thread " << thread_name << "\n" << os.str();
       break;
     } else {
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 62af380..dee4f9c 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -46,7 +46,9 @@
 #include <sys/syscall.h>
 #endif
 
-#include <backtrace/Backtrace.h>  // For DumpNativeStack.
+// For DumpNativeStack.
+#include <backtrace/Backtrace.h>
+#include <backtrace/BacktraceMap.h>
 
 #if defined(__linux__)
 #include <linux/unistd.h>
@@ -1102,7 +1104,7 @@
 }
 #endif
 
-void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix,
+void DumpNativeStack(std::ostream& os, pid_t tid, BacktraceMap* existing_map, const char* prefix,
     ArtMethod* current_method, void* ucontext_ptr) {
 #if __linux__
   // b/18119146
@@ -1110,7 +1112,13 @@
     return;
   }
 
-  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS, tid));
+  BacktraceMap* map = existing_map;
+  std::unique_ptr<BacktraceMap> tmp_map;
+  if (map == nullptr) {
+    tmp_map.reset(BacktraceMap::Create(tid));
+    map = tmp_map.get();
+  }
+  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS, tid, map));
   if (!backtrace->Unwind(0, reinterpret_cast<ucontext*>(ucontext_ptr))) {
     os << prefix << "(backtrace::Unwind failed for thread " << tid << ")\n";
     return;
@@ -1174,7 +1182,7 @@
     }
   }
 #else
-  UNUSED(os, tid, prefix, current_method, ucontext_ptr);
+  UNUSED(os, tid, existing_map, prefix, current_method, ucontext_ptr);
 #endif
 }
 
diff --git a/runtime/utils.h b/runtime/utils.h
index 79502c7..bd52b68 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -31,6 +31,8 @@
 #include "globals.h"
 #include "primitive.h"
 
+class BacktraceMap;
+
 namespace art {
 
 class ArtCode;
@@ -221,12 +223,19 @@
 void SetThreadName(const char* thread_name);
 
 // Dumps the native stack for thread 'tid' to 'os'.
-void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix = "",
-    ArtMethod* current_method = nullptr, void* ucontext = nullptr)
+void DumpNativeStack(std::ostream& os,
+                     pid_t tid,
+                     BacktraceMap* map = nullptr,
+                     const char* prefix = "",
+                     ArtMethod* current_method = nullptr,
+                     void* ucontext = nullptr)
     NO_THREAD_SAFETY_ANALYSIS;
 
 // Dumps the kernel stack for thread 'tid' to 'os'. Note that this is only available on linux-x86.
-void DumpKernelStack(std::ostream& os, pid_t tid, const char* prefix = "", bool include_count = true);
+void DumpKernelStack(std::ostream& os,
+                     pid_t tid,
+                     const char* prefix = "",
+                     bool include_count = true);
 
 // Find $ANDROID_ROOT, /system, or abort.
 const char* GetAndroidRoot();
diff --git a/runtime/verifier/method_verifier-inl.h b/runtime/verifier/method_verifier-inl.h
index 2d9fd53..f52d011 100644
--- a/runtime/verifier/method_verifier-inl.h
+++ b/runtime/verifier/method_verifier-inl.h
@@ -38,6 +38,10 @@
   return insn_flags_[index];
 }
 
+inline InstructionFlags& MethodVerifier::GetInstructionFlags(size_t index) {
+  return insn_flags_[index];
+}
+
 inline mirror::ClassLoader* MethodVerifier::GetClassLoader() {
   return class_loader_.Get();
 }
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 4051a1c..e1d4160 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -58,12 +58,14 @@
 // On VLOG(verifier), should we dump the whole state when we run into a hard failure?
 static constexpr bool kDumpRegLinesOnHardFailureIfVLOG = true;
 
+PcToRegisterLineTable::PcToRegisterLineTable(ScopedArenaAllocator& arena)
+    : register_lines_(arena.Adapter(kArenaAllocVerifier)) {}
+
 void PcToRegisterLineTable::Init(RegisterTrackingMode mode, InstructionFlags* flags,
                                  uint32_t insns_size, uint16_t registers_size,
                                  MethodVerifier* verifier) {
   DCHECK_GT(insns_size, 0U);
-  register_lines_.reset(new RegisterLine*[insns_size]());
-  size_ = insns_size;
+  register_lines_.resize(insns_size);
   for (uint32_t i = 0; i < insns_size; i++) {
     bool interesting = false;
     switch (mode) {
@@ -80,19 +82,12 @@
         break;
     }
     if (interesting) {
-      register_lines_[i] = RegisterLine::Create(registers_size, verifier);
+      register_lines_[i].reset(RegisterLine::Create(registers_size, verifier));
     }
   }
 }
 
-PcToRegisterLineTable::~PcToRegisterLineTable() {
-  for (size_t i = 0; i < size_; i++) {
-    delete register_lines_[i];
-    if (kIsDebugBuild) {
-      register_lines_[i] = nullptr;
-    }
-  }
-}
+PcToRegisterLineTable::~PcToRegisterLineTable() {}
 
 // Note: returns true on failure.
 ALWAYS_INLINE static inline bool FailOrAbort(MethodVerifier* verifier, bool condition,
@@ -398,7 +393,10 @@
                                bool need_precise_constants, bool verify_to_dump,
                                bool allow_thread_suspension)
     : self_(self),
-      reg_types_(can_load_classes),
+      arena_stack_(Runtime::Current()->GetArenaPool()),
+      arena_(&arena_stack_),
+      reg_types_(can_load_classes, arena_),
+      reg_table_(arena_),
       work_insn_idx_(DexFile::kDexNoIndex),
       dex_method_idx_(dex_method_idx),
       mirror_method_(method),
@@ -702,7 +700,11 @@
   }
 
   // Allocate and initialize an array to hold instruction data.
-  insn_flags_.reset(new InstructionFlags[code_item_->insns_size_in_code_units_]());
+  insn_flags_.reset(arena_.AllocArray<InstructionFlags>(code_item_->insns_size_in_code_units_));
+  DCHECK(insn_flags_ != nullptr);
+  std::uninitialized_fill_n(insn_flags_.get(),
+                            code_item_->insns_size_in_code_units_,
+                            InstructionFlags());
   // Run through the instructions and see if the width checks out.
   bool result = ComputeWidthsAndCountOps();
   // Flag instructions guarded by a "try" block and check exception handlers.
@@ -848,7 +850,7 @@
         break;
     }
     size_t inst_size = inst->SizeInCodeUnits();
-    insn_flags_[dex_pc].SetIsOpcode();
+    GetInstructionFlags(dex_pc).SetIsOpcode();
     dex_pc += inst_size;
     inst = inst->RelativeAt(inst_size);
   }
@@ -881,7 +883,7 @@
                                         << " endAddr=" << end << " (size=" << insns_size << ")";
       return false;
     }
-    if (!insn_flags_[start].IsOpcode()) {
+    if (!GetInstructionFlags(start).IsOpcode()) {
       Fail(VERIFY_ERROR_BAD_CLASS_HARD)
           << "'try' block starts inside an instruction (" << start << ")";
       return false;
@@ -889,7 +891,7 @@
     uint32_t dex_pc = start;
     const Instruction* inst = Instruction::At(code_item_->insns_ + dex_pc);
     while (dex_pc < end) {
-      insn_flags_[dex_pc].SetInTry();
+      GetInstructionFlags(dex_pc).SetInTry();
       size_t insn_size = inst->SizeInCodeUnits();
       dex_pc += insn_size;
       inst = inst->RelativeAt(insn_size);
@@ -903,7 +905,7 @@
     CatchHandlerIterator iterator(handlers_ptr);
     for (; iterator.HasNext(); iterator.Next()) {
       uint32_t dex_pc= iterator.GetHandlerAddress();
-      if (!insn_flags_[dex_pc].IsOpcode()) {
+      if (!GetInstructionFlags(dex_pc).IsOpcode()) {
         Fail(VERIFY_ERROR_BAD_CLASS_HARD)
             << "exception handler starts at bad address (" << dex_pc << ")";
         return false;
@@ -913,7 +915,7 @@
             << "exception handler begins with move-result* (" << dex_pc << ")";
         return false;
       }
-      insn_flags_[dex_pc].SetBranchTarget();
+      GetInstructionFlags(dex_pc).SetBranchTarget();
       // Ensure exception types are resolved so that they don't need resolution to be delivered,
       // unresolved exception types will be ignored by exception delivery
       if (iterator.GetHandlerTypeIndex() != DexFile::kDexNoIndex16) {
@@ -935,8 +937,8 @@
   const Instruction* inst = Instruction::At(code_item_->insns_);
 
   /* Flag the start of the method as a branch target, and a GC point due to stack overflow errors */
-  insn_flags_[0].SetBranchTarget();
-  insn_flags_[0].SetCompileTimeInfoPoint();
+  GetInstructionFlags(0).SetBranchTarget();
+  GetInstructionFlags(0).SetCompileTimeInfoPoint();
 
   uint32_t insns_size = code_item_->insns_size_in_code_units_;
   for (uint32_t dex_pc = 0; dex_pc < insns_size;) {
@@ -948,18 +950,18 @@
     // All invoke points are marked as "Throw" points already.
     // We are relying on this to also count all the invokes as interesting.
     if (inst->IsBranch()) {
-      insn_flags_[dex_pc].SetCompileTimeInfoPoint();
+      GetInstructionFlags(dex_pc).SetCompileTimeInfoPoint();
       // The compiler also needs safepoints for fall-through to loop heads.
       // Such a loop head must be a target of a branch.
       int32_t offset = 0;
       bool cond, self_ok;
       bool target_ok = GetBranchOffset(dex_pc, &offset, &cond, &self_ok);
       DCHECK(target_ok);
-      insn_flags_[dex_pc + offset].SetCompileTimeInfoPoint();
+      GetInstructionFlags(dex_pc + offset).SetCompileTimeInfoPoint();
     } else if (inst->IsSwitch() || inst->IsThrow()) {
-      insn_flags_[dex_pc].SetCompileTimeInfoPoint();
+      GetInstructionFlags(dex_pc).SetCompileTimeInfoPoint();
     } else if (inst->IsReturn()) {
-      insn_flags_[dex_pc].SetCompileTimeInfoPointAndReturn();
+      GetInstructionFlags(dex_pc).SetCompileTimeInfoPointAndReturn();
     }
     dex_pc += inst->SizeInCodeUnits();
     inst = inst->Next();
@@ -1202,7 +1204,7 @@
   }
   // Make sure the array-data is marked as an opcode. This ensures that it was reached when
   // traversing the code item linearly. It is an approximation for a by-spec padding value.
-  if (!insn_flags_[cur_offset + array_data_offset].IsOpcode()) {
+  if (!GetInstructionFlags(cur_offset + array_data_offset).IsOpcode()) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array data table at " << cur_offset
                                       << ", data offset " << array_data_offset
                                       << " not correctly visited, probably bad padding.";
@@ -1245,13 +1247,13 @@
   int32_t abs_offset = cur_offset + offset;
   if (abs_offset < 0 ||
       (uint32_t) abs_offset >= insn_count ||
-      !insn_flags_[abs_offset].IsOpcode()) {
+      !GetInstructionFlags(abs_offset).IsOpcode()) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid branch target " << offset << " (-> "
                                       << reinterpret_cast<void*>(abs_offset) << ") at "
                                       << reinterpret_cast<void*>(cur_offset);
     return false;
   }
-  insn_flags_[abs_offset].SetBranchTarget();
+  GetInstructionFlags(abs_offset).SetBranchTarget();
   return true;
 }
 
@@ -1315,7 +1317,7 @@
   }
   // Make sure the switch data is marked as an opcode. This ensures that it was reached when
   // traversing the code item linearly. It is an approximation for a by-spec padding value.
-  if (!insn_flags_[cur_offset + switch_offset].IsOpcode()) {
+  if (!GetInstructionFlags(cur_offset + switch_offset).IsOpcode()) {
     Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "switch table at " << cur_offset
                                       << ", switch offset " << switch_offset
                                       << " not correctly visited, probably bad padding.";
@@ -1387,14 +1389,14 @@
     int32_t abs_offset = cur_offset + offset;
     if (abs_offset < 0 ||
         abs_offset >= static_cast<int32_t>(insn_count) ||
-        !insn_flags_[abs_offset].IsOpcode()) {
+        !GetInstructionFlags(abs_offset).IsOpcode()) {
       Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch target " << offset
                                         << " (-> " << reinterpret_cast<void*>(abs_offset) << ") at "
                                         << reinterpret_cast<void*>(cur_offset)
                                         << "[" << targ << "]";
       return false;
     }
-    insn_flags_[abs_offset].SetBranchTarget();
+    GetInstructionFlags(abs_offset).SetBranchTarget();
   }
   return true;
 }
@@ -1435,7 +1437,6 @@
                   registers_size,
                   this);
 
-
   work_line_.reset(RegisterLine::Create(registers_size, this));
   saved_line_.reset(RegisterLine::Create(registers_size, this));
 
@@ -1491,7 +1492,7 @@
       vios->Stream() << reg_line->Dump(this) << "\n";
     }
     vios->Stream()
-        << StringPrintf("0x%04zx", dex_pc) << ": " << insn_flags_[dex_pc].ToString() << " ";
+        << StringPrintf("0x%04zx", dex_pc) << ": " << GetInstructionFlags(dex_pc).ToString() << " ";
     const bool kDumpHexOfInstruction = false;
     if (kDumpHexOfInstruction) {
       vios->Stream() << inst->DumpHex(5) << " ";
@@ -1677,7 +1678,7 @@
   const uint32_t insns_size = code_item_->insns_size_in_code_units_;
 
   /* Begin by marking the first instruction as "changed". */
-  insn_flags_[0].SetChanged();
+  GetInstructionFlags(0).SetChanged();
   uint32_t start_guess = 0;
 
   /* Continue until no instructions are marked "changed". */
@@ -1688,7 +1689,7 @@
     // Find the first marked one. Use "start_guess" as a way to find one quickly.
     uint32_t insn_idx = start_guess;
     for (; insn_idx < insns_size; insn_idx++) {
-      if (insn_flags_[insn_idx].IsChanged())
+      if (GetInstructionFlags(insn_idx).IsChanged())
         break;
     }
     if (insn_idx == insns_size) {
@@ -1708,7 +1709,7 @@
     // situation where we have a stray "changed" flag set on an instruction that isn't a branch
     // target.
     work_insn_idx_ = insn_idx;
-    if (insn_flags_[insn_idx].IsBranchTarget()) {
+    if (GetInstructionFlags(insn_idx).IsBranchTarget()) {
       work_line_->CopyFromLine(reg_table_.GetLine(insn_idx));
     } else if (kIsDebugBuild) {
       /*
@@ -1734,8 +1735,8 @@
       return false;
     }
     /* Clear "changed" and mark as visited. */
-    insn_flags_[insn_idx].SetVisited();
-    insn_flags_[insn_idx].ClearChanged();
+    GetInstructionFlags(insn_idx).SetVisited();
+    GetInstructionFlags(insn_idx).ClearChanged();
   }
 
   if (gDebugVerify) {
@@ -1762,10 +1763,10 @@
            (insns[insn_idx + 1] == Instruction::kPackedSwitchSignature ||
             insns[insn_idx + 1] == Instruction::kSparseSwitchSignature ||
             insns[insn_idx + 1] == Instruction::kArrayDataSignature))) {
-        insn_flags_[insn_idx].SetVisited();
+        GetInstructionFlags(insn_idx).SetVisited();
       }
 
-      if (!insn_flags_[insn_idx].IsVisited()) {
+      if (!GetInstructionFlags(insn_idx).IsVisited()) {
         if (dead_start < 0)
           dead_start = insn_idx;
       } else if (dead_start >= 0) {
@@ -1895,8 +1896,8 @@
   // We need to ensure the work line is consistent while performing validation. When we spot a
   // peephole pattern we compute a new line for either the fallthrough instruction or the
   // branch target.
-  std::unique_ptr<RegisterLine> branch_line;
-  std::unique_ptr<RegisterLine> fallthrough_line;
+  ArenaUniquePtr<RegisterLine> branch_line;
+  ArenaUniquePtr<RegisterLine> fallthrough_line;
 
   switch (inst->Opcode()) {
     case Instruction::NOP:
@@ -2144,9 +2145,9 @@
       work_line_->PushMonitor(this, inst->VRegA_11x(), work_insn_idx_);
       // Check whether the previous instruction is a move-object with vAA as a source, creating
       // untracked lock aliasing.
-      if (0 != work_insn_idx_ && !insn_flags_[work_insn_idx_].IsBranchTarget()) {
+      if (0 != work_insn_idx_ && !GetInstructionFlags(work_insn_idx_).IsBranchTarget()) {
         uint32_t prev_idx = work_insn_idx_ - 1;
-        while (0 != prev_idx && !insn_flags_[prev_idx].IsOpcode()) {
+        while (0 != prev_idx && !GetInstructionFlags(prev_idx).IsOpcode()) {
           prev_idx--;
         }
         const Instruction* prev_inst = Instruction::At(code_item_->insns_ + prev_idx);
@@ -2427,10 +2428,10 @@
       uint32_t instance_of_idx = 0;
       if (0 != work_insn_idx_) {
         instance_of_idx = work_insn_idx_ - 1;
-        while (0 != instance_of_idx && !insn_flags_[instance_of_idx].IsOpcode()) {
+        while (0 != instance_of_idx && !GetInstructionFlags(instance_of_idx).IsOpcode()) {
           instance_of_idx--;
         }
-        if (FailOrAbort(this, insn_flags_[instance_of_idx].IsOpcode(),
+        if (FailOrAbort(this, GetInstructionFlags(instance_of_idx).IsOpcode(),
                         "Unable to get previous instruction of if-eqz/if-nez for work index ",
                         work_insn_idx_)) {
           break;
@@ -2486,15 +2487,15 @@
           update_line->SetRegisterType<LockOp::kKeep>(this,
                                                       instance_of_inst->VRegB_22c(),
                                                       cast_type);
-          if (!insn_flags_[instance_of_idx].IsBranchTarget() && 0 != instance_of_idx) {
+          if (!GetInstructionFlags(instance_of_idx).IsBranchTarget() && 0 != instance_of_idx) {
             // See if instance-of was preceded by a move-object operation, common due to the small
             // register encoding space of instance-of, and propagate type information to the source
             // of the move-object.
             uint32_t move_idx = instance_of_idx - 1;
-            while (0 != move_idx && !insn_flags_[move_idx].IsOpcode()) {
+            while (0 != move_idx && !GetInstructionFlags(move_idx).IsOpcode()) {
               move_idx--;
             }
-            if (FailOrAbort(this, insn_flags_[move_idx].IsOpcode(),
+            if (FailOrAbort(this, GetInstructionFlags(move_idx).IsOpcode(),
                             "Unable to get previous instruction of if-eqz/if-nez for work index ",
                             work_insn_idx_)) {
               break;
@@ -2786,8 +2787,7 @@
         work_line_->MarkRefsAsInitialized(this, this_type, this_reg, work_insn_idx_);
       }
       if (return_type == nullptr) {
-        return_type = &reg_types_.FromDescriptor(GetClassLoader(), return_type_descriptor,
-                                                 false);
+        return_type = &reg_types_.FromDescriptor(GetClassLoader(), return_type_descriptor, false);
       }
       if (!return_type->IsLowHalf()) {
         work_line_->SetResultRegisterType(this, *return_type);
@@ -2860,7 +2860,7 @@
         uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
         const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
         uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
-        descriptor =  dex_file_->StringByTypeIdx(return_type_idx);
+        descriptor = dex_file_->StringByTypeIdx(return_type_idx);
       } else {
         descriptor = abs_method->GetReturnTypeDescriptor();
       }
@@ -3309,7 +3309,7 @@
       return false;
     }
     /* update branch target, set "changed" if appropriate */
-    if (nullptr != branch_line.get()) {
+    if (nullptr != branch_line) {
       if (!UpdateRegisters(work_insn_idx_ + branch_target, branch_line.get(), false)) {
         return false;
       }
@@ -3364,7 +3364,7 @@
    * Handle instructions that can throw and that are sitting in a "try" block. (If they're not in a
    * "try" block when they throw, control transfers out of the method.)
    */
-  if ((opcode_flags & Instruction::kThrow) != 0 && insn_flags_[work_insn_idx_].IsInTry()) {
+  if ((opcode_flags & Instruction::kThrow) != 0 && GetInstructionFlags(work_insn_idx_).IsInTry()) {
     bool has_catch_all_handler = false;
     CatchHandlerIterator iterator(*code_item_, work_insn_idx_);
 
@@ -3434,11 +3434,11 @@
     if (!CheckNotMoveException(code_item_->insns_, next_insn_idx)) {
       return false;
     }
-    if (nullptr != fallthrough_line.get()) {
+    if (nullptr != fallthrough_line) {
       // Make workline consistent with fallthrough computed from peephole optimization.
       work_line_->CopyFromLine(fallthrough_line.get());
     }
-    if (insn_flags_[next_insn_idx].IsReturn()) {
+    if (GetInstructionFlags(next_insn_idx).IsReturn()) {
       // For returns we only care about the operand to the return, all other registers are dead.
       const Instruction* ret_inst = Instruction::At(code_item_->insns_ + next_insn_idx);
       AdjustReturnLine(this, ret_inst, work_line_.get());
@@ -3456,7 +3456,7 @@
        * We're not recording register data for the next instruction, so we don't know what the
        * prior state was. We have to assume that something has changed and re-evaluate it.
        */
-      insn_flags_[next_insn_idx].SetChanged();
+      GetInstructionFlags(next_insn_idx).SetChanged();
     }
   }
 
@@ -3480,7 +3480,7 @@
   }
 
   DCHECK_LT(*start_guess, code_item_->insns_size_in_code_units_);
-  DCHECK(insn_flags_[*start_guess].IsOpcode());
+  DCHECK(GetInstructionFlags(*start_guess).IsOpcode());
 
   if (have_pending_runtime_throw_failure_) {
     have_any_pending_runtime_throw_failure_ = true;
@@ -3491,30 +3491,55 @@
   return true;
 }  // NOLINT(readability/fn_size)
 
+void MethodVerifier::UninstantiableError(const char* descriptor) {
+  Fail(VerifyError::VERIFY_ERROR_NO_CLASS) << "Could not create precise reference for "
+                                           << "non-instantiable klass " << descriptor;
+}
+
+inline bool MethodVerifier::IsInstantiableOrPrimitive(mirror::Class* klass) {
+  return klass->IsInstantiable() || klass->IsPrimitive();
+}
+
 const RegType& MethodVerifier::ResolveClassAndCheckAccess(uint32_t class_idx) {
-  const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
-  const RegType& referrer = GetDeclaringClass();
   mirror::Class* klass = dex_cache_->GetResolvedType(class_idx);
-  const RegType& result = klass != nullptr ?
-      FromClass(descriptor, klass, klass->CannotBeAssignedFromOtherTypes()) :
-      reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
-  if (result.IsConflict()) {
-    Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "accessing broken descriptor '" << descriptor
-        << "' in " << referrer;
-    return result;
+  const RegType* result = nullptr;
+  if (klass != nullptr) {
+    bool precise = klass->CannotBeAssignedFromOtherTypes();
+    if (precise && !IsInstantiableOrPrimitive(klass)) {
+      const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
+      UninstantiableError(descriptor);
+      precise = false;
+    }
+    result = reg_types_.FindClass(klass, precise);
+    if (result == nullptr) {
+      const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
+      result = reg_types_.InsertClass(descriptor, klass, precise);
+    }
+  } else {
+    const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
+    result = &reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
   }
-  if (klass == nullptr && !result.IsUnresolvedTypes()) {
-    dex_cache_->SetResolvedType(class_idx, result.GetClass());
+  DCHECK(result != nullptr);
+  if (result->IsConflict()) {
+    const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
+    Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "accessing broken descriptor '" << descriptor
+        << "' in " << GetDeclaringClass();
+    return *result;
+  }
+  if (klass == nullptr && !result->IsUnresolvedTypes()) {
+    dex_cache_->SetResolvedType(class_idx, result->GetClass());
   }
   // Check if access is allowed. Unresolved types use xxxWithAccessCheck to
   // check at runtime if access is allowed and so pass here. If result is
   // primitive, skip the access check.
-  if (result.IsNonZeroReferenceTypes() && !result.IsUnresolvedTypes() &&
-      !referrer.IsUnresolvedTypes() && !referrer.CanAccess(result)) {
-    Fail(VERIFY_ERROR_ACCESS_CLASS) << "illegal class access: '"
-                                    << referrer << "' -> '" << result << "'";
+  if (result->IsNonZeroReferenceTypes() && !result->IsUnresolvedTypes()) {
+    const RegType& referrer = GetDeclaringClass();
+    if (!referrer.IsUnresolvedTypes() && !referrer.CanAccess(*result)) {
+      Fail(VERIFY_ERROR_ACCESS_CLASS) << "illegal class access: '"
+                                      << referrer << "' -> '" << result << "'";
+    }
   }
-  return result;
+  return *result;
 }
 
 const RegType& MethodVerifier::GetCaughtExceptionType() {
@@ -3720,9 +3745,10 @@
       } else {
         const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
         const uint16_t class_idx = dex_file_->GetMethodId(method_idx).class_idx_;
-        res_method_class = &reg_types_.FromDescriptor(GetClassLoader(),
-                                                      dex_file_->StringByTypeIdx(class_idx),
-                                                      false);
+        res_method_class = &reg_types_.FromDescriptor(
+            GetClassLoader(),
+            dex_file_->StringByTypeIdx(class_idx),
+            false);
       }
       if (!res_method_class->IsAssignableFrom(actual_arg_type)) {
         Fail(actual_arg_type.IsUnresolvedTypes() ? VERIFY_ERROR_NO_CLASS:
@@ -4476,14 +4502,16 @@
         field->GetType<false>();
 
     if (field_type_class != nullptr) {
-      field_type = &FromClass(field->GetTypeDescriptor(), field_type_class,
+      field_type = &FromClass(field->GetTypeDescriptor(),
+                              field_type_class,
                               field_type_class->CannotBeAssignedFromOtherTypes());
     } else {
       Thread* self = Thread::Current();
       DCHECK(!can_load_classes_ || self->IsExceptionPending());
       self->ClearException();
       field_type = &reg_types_.FromDescriptor(field->GetDeclaringClass()->GetClassLoader(),
-                                              field->GetTypeDescriptor(), false);
+                                              field->GetTypeDescriptor(),
+                                              false);
     }
     if (field_type == nullptr) {
       Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer field type from " << inst->Name();
@@ -4604,14 +4632,14 @@
                                      bool update_merge_line) {
   bool changed = true;
   RegisterLine* target_line = reg_table_.GetLine(next_insn);
-  if (!insn_flags_[next_insn].IsVisitedOrChanged()) {
+  if (!GetInstructionFlags(next_insn).IsVisitedOrChanged()) {
     /*
      * We haven't processed this instruction before, and we haven't touched the registers here, so
      * there's nothing to "merge". Copy the registers over and mark it as changed. (This is the
      * only way a register can transition out of "unknown", so this is not just an optimization.)
      */
     target_line->CopyFromLine(merge_line);
-    if (insn_flags_[next_insn].IsReturn()) {
+    if (GetInstructionFlags(next_insn).IsReturn()) {
       // Verify that the monitor stack is empty on return.
       merge_line->VerifyMonitorStackEmpty(this);
 
@@ -4621,10 +4649,9 @@
       AdjustReturnLine(this, ret_inst, target_line);
     }
   } else {
-    std::unique_ptr<RegisterLine> copy(gDebugVerify ?
-                                 RegisterLine::Create(target_line->NumRegs(), this) :
-                                 nullptr);
+    ArenaUniquePtr<RegisterLine> copy;
     if (gDebugVerify) {
+      copy.reset(RegisterLine::Create(target_line->NumRegs(), this));
       copy->CopyFromLine(target_line);
     }
     changed = target_line->MergeRegisters(this, merge_line);
@@ -4643,13 +4670,13 @@
     }
   }
   if (changed) {
-    insn_flags_[next_insn].SetChanged();
+    GetInstructionFlags(next_insn).SetChanged();
   }
   return true;
 }
 
 InstructionFlags* MethodVerifier::CurrentInsnFlags() {
-  return &insn_flags_[work_insn_idx_];
+  return &GetInstructionFlags(work_insn_idx_);
 }
 
 const RegType& MethodVerifier::GetMethodReturnType() {
@@ -4685,8 +4712,7 @@
         = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(method_id.class_idx_));
     if (mirror_method_ != nullptr) {
       mirror::Class* klass = mirror_method_->GetDeclaringClass();
-      declaring_class_ = &FromClass(descriptor, klass,
-                                    klass->CannotBeAssignedFromOtherTypes());
+      declaring_class_ = &FromClass(descriptor, klass, klass->CannotBeAssignedFromOtherTypes());
     } else {
       declaring_class_ = &reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
     }
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index ba694b7..7b51d6e 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -21,7 +21,10 @@
 #include <sstream>
 #include <vector>
 
+#include "base/arena_allocator.h"
 #include "base/macros.h"
+#include "base/scoped_arena_containers.h"
+#include "base/stl_util.h"
 #include "dex_file.h"
 #include "handle.h"
 #include "instruction_flags.h"
@@ -107,7 +110,7 @@
 // execution of that instruction.
 class PcToRegisterLineTable {
  public:
-  PcToRegisterLineTable() : size_(0) {}
+  explicit PcToRegisterLineTable(ScopedArenaAllocator& arena);
   ~PcToRegisterLineTable();
 
   // Initialize the RegisterTable. Every instruction address can have a different set of information
@@ -116,14 +119,12 @@
   void Init(RegisterTrackingMode mode, InstructionFlags* flags, uint32_t insns_size,
             uint16_t registers_size, MethodVerifier* verifier);
 
-  RegisterLine* GetLine(size_t idx) {
-    DCHECK_LT(idx, size_);
-    return register_lines_[idx];
+  RegisterLine* GetLine(size_t idx) const {
+    return register_lines_[idx].get();
   }
 
  private:
-  std::unique_ptr<RegisterLine*[]> register_lines_;
-  size_t size_;
+  ScopedArenaVector<ArenaUniquePtr<RegisterLine>> register_lines_;
 
   DISALLOW_COPY_AND_ASSIGN(PcToRegisterLineTable);
 };
@@ -240,7 +241,8 @@
   // Accessors used by the compiler via CompilerCallback
   const DexFile::CodeItem* CodeItem() const;
   RegisterLine* GetRegLine(uint32_t dex_pc);
-  const InstructionFlags& GetInstructionFlags(size_t index) const;
+  ALWAYS_INLINE const InstructionFlags& GetInstructionFlags(size_t index) const;
+  ALWAYS_INLINE InstructionFlags& GetInstructionFlags(size_t index);
   mirror::ClassLoader* GetClassLoader() SHARED_REQUIRES(Locks::mutator_lock_);
   mirror::DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
   MethodReference GetMethodReference() const;
@@ -275,7 +277,14 @@
     return IsConstructor() && !IsStatic();
   }
 
+  ScopedArenaAllocator& GetArena() {
+    return arena_;
+  }
+
  private:
+  void UninstantiableError(const char* descriptor);
+  static bool IsInstantiableOrPrimitive(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+
   // Is the method being verified a constructor? See the comment on the field.
   bool IsConstructor() const {
     return is_constructor_;
@@ -687,19 +696,23 @@
   // The thread we're verifying on.
   Thread* const self_;
 
+  // Arena allocator.
+  ArenaStack arena_stack_;
+  ScopedArenaAllocator arena_;
+
   RegTypeCache reg_types_;
 
   PcToRegisterLineTable reg_table_;
 
   // Storage for the register status we're currently working on.
-  std::unique_ptr<RegisterLine> work_line_;
+  ArenaUniquePtr<RegisterLine> work_line_;
 
   // The address of the instruction we're currently working on, note that this is in 2 byte
   // quantities
   uint32_t work_insn_idx_;
 
   // Storage for the register status we're saving for later.
-  std::unique_ptr<RegisterLine> saved_line_;
+  ArenaUniquePtr<RegisterLine> saved_line_;
 
   const uint32_t dex_method_idx_;  // The method we're working on.
   // Its object representation if known.
@@ -715,7 +728,8 @@
   const DexFile::CodeItem* const code_item_;  // The code item containing the code for the method.
   const RegType* declaring_class_;  // Lazily computed reg type of the method's declaring class.
   // Instruction widths and flags, one entry per code unit.
-  std::unique_ptr<InstructionFlags[]> insn_flags_;
+  // Owned, but not unique_ptr since insn_flags_ are allocated in arenas.
+  ArenaUniquePtr<InstructionFlags[]> insn_flags_;
   // The dex PC of a FindLocksAtDexPc request, -1 otherwise.
   uint32_t interesting_dex_pc_;
   // The container into which FindLocksAtDexPc should write the registers containing held locks,
diff --git a/runtime/verifier/reg_type-inl.h b/runtime/verifier/reg_type-inl.h
index f445132..11a53e5 100644
--- a/runtime/verifier/reg_type-inl.h
+++ b/runtime/verifier/reg_type-inl.h
@@ -20,6 +20,7 @@
 #include "reg_type.h"
 
 #include "base/casts.h"
+#include "base/scoped_arena_allocator.h"
 #include "mirror/class.h"
 
 namespace art {
@@ -180,6 +181,10 @@
   return instance_;
 }
 
+inline void* RegType::operator new(size_t size, ScopedArenaAllocator* arena) {
+  return arena->Alloc(size, kArenaAllocMisc);
+}
+
 }  // namespace verifier
 }  // namespace art
 
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index b86a4c8..16cab03 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -16,6 +16,7 @@
 
 #include "reg_type-inl.h"
 
+#include "base/arena_bit_vector.h"
 #include "base/bit_vector-inl.h"
 #include "base/casts.h"
 #include "class_linker-inl.h"
@@ -46,20 +47,17 @@
 const DoubleHiType* DoubleHiType::instance_ = nullptr;
 const IntegerType* IntegerType::instance_ = nullptr;
 
-PrimitiveType::PrimitiveType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-    SHARED_REQUIRES(Locks::mutator_lock_)
+PrimitiveType::PrimitiveType(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id)
     : RegType(klass, descriptor, cache_id) {
   CHECK(klass != nullptr);
   CHECK(!descriptor.empty());
 }
 
-Cat1Type::Cat1Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-    SHARED_REQUIRES(Locks::mutator_lock_)
+Cat1Type::Cat1Type(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id)
     : PrimitiveType(klass, descriptor, cache_id) {
 }
 
-Cat2Type::Cat2Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
-    SHARED_REQUIRES(Locks::mutator_lock_)
+Cat2Type::Cat2Type(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id)
     : PrimitiveType(klass, descriptor, cache_id) {
 }
 
@@ -121,11 +119,11 @@
 }
 
 std::string IntegerType::Dump() const {
-    return "Integer";
+  return "Integer";
 }
 
 const DoubleHiType* DoubleHiType::CreateInstance(mirror::Class* klass,
-                                                 const std::string& descriptor,
+                                                 const StringPiece& descriptor,
                                                  uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new DoubleHiType(klass, descriptor, cache_id);
@@ -140,7 +138,7 @@
 }
 
 const DoubleLoType* DoubleLoType::CreateInstance(mirror::Class* klass,
-                                                 const std::string& descriptor,
+                                                 const StringPiece& descriptor,
                                                  uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new DoubleLoType(klass, descriptor, cache_id);
@@ -154,14 +152,14 @@
   }
 }
 
-const LongLoType* LongLoType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const LongLoType* LongLoType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
                                              uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new LongLoType(klass, descriptor, cache_id);
   return instance_;
 }
 
-const LongHiType* LongHiType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const LongHiType* LongHiType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
                                              uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new LongHiType(klass, descriptor, cache_id);
@@ -182,7 +180,7 @@
   }
 }
 
-const FloatType* FloatType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const FloatType* FloatType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
                                            uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new FloatType(klass, descriptor, cache_id);
@@ -196,7 +194,7 @@
   }
 }
 
-const CharType* CharType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const CharType* CharType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
                                          uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new CharType(klass, descriptor, cache_id);
@@ -210,7 +208,7 @@
   }
 }
 
-const ShortType* ShortType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const ShortType* ShortType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
                                            uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new ShortType(klass, descriptor, cache_id);
@@ -224,7 +222,7 @@
   }
 }
 
-const ByteType* ByteType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const ByteType* ByteType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
                                          uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new ByteType(klass, descriptor, cache_id);
@@ -238,7 +236,7 @@
   }
 }
 
-const IntegerType* IntegerType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const IntegerType* IntegerType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
                                                uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new IntegerType(klass, descriptor, cache_id);
@@ -253,7 +251,7 @@
 }
 
 const ConflictType* ConflictType::CreateInstance(mirror::Class* klass,
-                                                 const std::string& descriptor,
+                                                 const StringPiece& descriptor,
                                                  uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new ConflictType(klass, descriptor, cache_id);
@@ -267,7 +265,7 @@
   }
 }
 
-const BooleanType* BooleanType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const BooleanType* BooleanType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
                                          uint16_t cache_id) {
   CHECK(BooleanType::instance_ == nullptr);
   instance_ = new BooleanType(klass, descriptor, cache_id);
@@ -286,7 +284,7 @@
 }
 
 const UndefinedType* UndefinedType::CreateInstance(mirror::Class* klass,
-                                                   const std::string& descriptor,
+                                                   const StringPiece& descriptor,
                                                    uint16_t cache_id) {
   CHECK(instance_ == nullptr);
   instance_ = new UndefinedType(klass, descriptor, cache_id);
@@ -300,7 +298,7 @@
   }
 }
 
-PreciseReferenceType::PreciseReferenceType(mirror::Class* klass, const std::string& descriptor,
+PreciseReferenceType::PreciseReferenceType(mirror::Class* klass, const StringPiece& descriptor,
                                            uint16_t cache_id)
     : RegType(klass, descriptor, cache_id) {
   // Note: no check for IsInstantiable() here. We may produce this in case an InstantiationError
@@ -335,14 +333,14 @@
 
 std::string UnresolvedReferenceType::Dump() const {
   std::stringstream result;
-  result << "Unresolved Reference" << ": " << PrettyDescriptor(GetDescriptor().c_str());
+  result << "Unresolved Reference" << ": " << PrettyDescriptor(GetDescriptor().as_string().c_str());
   return result.str();
 }
 
 std::string UnresolvedUninitializedRefType::Dump() const {
   std::stringstream result;
   result << "Unresolved And Uninitialized Reference" << ": "
-      << PrettyDescriptor(GetDescriptor().c_str())
+      << PrettyDescriptor(GetDescriptor().as_string().c_str())
       << " Allocation PC: " << GetAllocationPc();
   return result.str();
 }
@@ -350,7 +348,7 @@
 std::string UnresolvedUninitializedThisRefType::Dump() const {
   std::stringstream result;
   result << "Unresolved And Uninitialized This Reference"
-      << PrettyDescriptor(GetDescriptor().c_str());
+      << PrettyDescriptor(GetDescriptor().as_string().c_str());
   return result.str();
 }
 
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 2834a9a..80b751c 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -22,9 +22,11 @@
 #include <set>
 #include <string>
 
+#include "base/arena_object.h"
 #include "base/bit_vector.h"
 #include "base/macros.h"
 #include "base/mutex.h"
+#include "base/stringpiece.h"
 #include "gc_root.h"
 #include "handle_scope.h"
 #include "object_callbacks.h"
@@ -35,6 +37,9 @@
 class Class;
 }  // namespace mirror
 
+class ArenaBitVector;
+class ScopedArenaAllocator;
+
 namespace verifier {
 
 class RegTypeCache;
@@ -173,7 +178,7 @@
   bool IsJavaLangObjectArray() const
       SHARED_REQUIRES(Locks::mutator_lock_);
   bool IsInstantiableTypes() const SHARED_REQUIRES(Locks::mutator_lock_);
-  const std::string& GetDescriptor() const {
+  const StringPiece& GetDescriptor() const {
     DCHECK(HasClass() ||
            (IsUnresolvedTypes() && !IsUnresolvedMergedReference() &&
             !IsUnresolvedSuperClass()));
@@ -274,10 +279,20 @@
   void VisitRoots(RootVisitor* visitor, const RootInfo& root_info) const
       SHARED_REQUIRES(Locks::mutator_lock_);
 
+  static void* operator new(size_t size) noexcept {
+    return ::operator new(size);
+  }
+
+  static void* operator new(size_t size, ArenaAllocator* arena) = delete;
+  static void* operator new(size_t size, ScopedArenaAllocator* arena);
+
  protected:
-  RegType(mirror::Class* klass, const std::string& descriptor,
+  RegType(mirror::Class* klass,
+          const StringPiece& descriptor,
           uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
-      : descriptor_(descriptor), klass_(klass), cache_id_(cache_id) {
+      : descriptor_(descriptor),
+        klass_(klass),
+        cache_id_(cache_id) {
     if (kIsDebugBuild) {
       CheckInvariants();
     }
@@ -285,9 +300,8 @@
 
   void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
 
-  const std::string descriptor_;
-  mutable GcRoot<mirror::Class>
-      klass_;  // Non-const only due to moving classes.
+  const StringPiece descriptor_;
+  mutable GcRoot<mirror::Class> klass_;  // Non-const only due to moving classes.
   const uint16_t cache_id_;
 
   friend class RegTypeCache;
@@ -311,7 +325,7 @@
 
   // Create the singleton instance.
   static const ConflictType* CreateInstance(mirror::Class* klass,
-                                            const std::string& descriptor,
+                                            const StringPiece& descriptor,
                                             uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
@@ -319,7 +333,7 @@
   static void Destroy();
 
  private:
-  ConflictType(mirror::Class* klass, const std::string& descriptor,
+  ConflictType(mirror::Class* klass, const StringPiece& descriptor,
                uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : RegType(klass, descriptor, cache_id) {}
 
@@ -340,7 +354,7 @@
 
   // Create the singleton instance.
   static const UndefinedType* CreateInstance(mirror::Class* klass,
-                                             const std::string& descriptor,
+                                             const StringPiece& descriptor,
                                              uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
@@ -348,7 +362,7 @@
   static void Destroy();
 
  private:
-  UndefinedType(mirror::Class* klass, const std::string& descriptor,
+  UndefinedType(mirror::Class* klass, const StringPiece& descriptor,
                 uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : RegType(klass, descriptor, cache_id) {}
 
@@ -357,7 +371,7 @@
 
 class PrimitiveType : public RegType {
  public:
-  PrimitiveType(mirror::Class* klass, const std::string& descriptor,
+  PrimitiveType(mirror::Class* klass, const StringPiece& descriptor,
                 uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
 
   bool HasClassVirtual() const OVERRIDE { return true; }
@@ -365,7 +379,7 @@
 
 class Cat1Type : public PrimitiveType {
  public:
-  Cat1Type(mirror::Class* klass, const std::string& descriptor,
+  Cat1Type(mirror::Class* klass, const StringPiece& descriptor,
            uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
 };
 
@@ -374,14 +388,14 @@
   bool IsInteger() const OVERRIDE { return true; }
   std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   static const IntegerType* CreateInstance(mirror::Class* klass,
-                                           const std::string& descriptor,
+                                           const StringPiece& descriptor,
                                            uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const IntegerType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  IntegerType(mirror::Class* klass, const std::string& descriptor,
+  IntegerType(mirror::Class* klass, const StringPiece& descriptor,
               uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {}
   static const IntegerType* instance_;
@@ -392,14 +406,14 @@
   bool IsBoolean() const OVERRIDE { return true; }
   std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   static const BooleanType* CreateInstance(mirror::Class* klass,
-                                           const std::string& descriptor,
+                                           const StringPiece& descriptor,
                                            uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const BooleanType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  BooleanType(mirror::Class* klass, const std::string& descriptor,
+  BooleanType(mirror::Class* klass, const StringPiece& descriptor,
               uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {}
 
@@ -411,14 +425,14 @@
   bool IsByte() const OVERRIDE { return true; }
   std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   static const ByteType* CreateInstance(mirror::Class* klass,
-                                        const std::string& descriptor,
+                                        const StringPiece& descriptor,
                                         uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const ByteType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  ByteType(mirror::Class* klass, const std::string& descriptor,
+  ByteType(mirror::Class* klass, const StringPiece& descriptor,
            uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {}
   static const ByteType* instance_;
@@ -429,14 +443,14 @@
   bool IsShort() const OVERRIDE { return true; }
   std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   static const ShortType* CreateInstance(mirror::Class* klass,
-                                         const std::string& descriptor,
+                                         const StringPiece& descriptor,
                                          uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const ShortType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  ShortType(mirror::Class* klass, const std::string& descriptor,
+  ShortType(mirror::Class* klass, const StringPiece& descriptor,
             uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {}
   static const ShortType* instance_;
@@ -447,14 +461,14 @@
   bool IsChar() const OVERRIDE { return true; }
   std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   static const CharType* CreateInstance(mirror::Class* klass,
-                                        const std::string& descriptor,
+                                        const StringPiece& descriptor,
                                         uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const CharType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  CharType(mirror::Class* klass, const std::string& descriptor,
+  CharType(mirror::Class* klass, const StringPiece& descriptor,
            uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {}
   static const CharType* instance_;
@@ -465,14 +479,14 @@
   bool IsFloat() const OVERRIDE { return true; }
   std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   static const FloatType* CreateInstance(mirror::Class* klass,
-                                         const std::string& descriptor,
+                                         const StringPiece& descriptor,
                                          uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const FloatType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  FloatType(mirror::Class* klass, const std::string& descriptor,
+  FloatType(mirror::Class* klass, const StringPiece& descriptor,
             uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat1Type(klass, descriptor, cache_id) {}
   static const FloatType* instance_;
@@ -480,7 +494,7 @@
 
 class Cat2Type : public PrimitiveType {
  public:
-  Cat2Type(mirror::Class* klass, const std::string& descriptor,
+  Cat2Type(mirror::Class* klass, const StringPiece& descriptor,
            uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
 };
 
@@ -490,14 +504,14 @@
   bool IsLongLo() const OVERRIDE { return true; }
   bool IsLong() const OVERRIDE { return true; }
   static const LongLoType* CreateInstance(mirror::Class* klass,
-                                          const std::string& descriptor,
+                                          const StringPiece& descriptor,
                                           uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const LongLoType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  LongLoType(mirror::Class* klass, const std::string& descriptor,
+  LongLoType(mirror::Class* klass, const StringPiece& descriptor,
              uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat2Type(klass, descriptor, cache_id) {}
   static const LongLoType* instance_;
@@ -508,14 +522,14 @@
   std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   bool IsLongHi() const OVERRIDE { return true; }
   static const LongHiType* CreateInstance(mirror::Class* klass,
-                                          const std::string& descriptor,
+                                          const StringPiece& descriptor,
                                           uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const LongHiType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  LongHiType(mirror::Class* klass, const std::string& descriptor,
+  LongHiType(mirror::Class* klass, const StringPiece& descriptor,
              uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat2Type(klass, descriptor, cache_id) {}
   static const LongHiType* instance_;
@@ -527,14 +541,14 @@
   bool IsDoubleLo() const OVERRIDE { return true; }
   bool IsDouble() const OVERRIDE { return true; }
   static const DoubleLoType* CreateInstance(mirror::Class* klass,
-                                            const std::string& descriptor,
+                                            const StringPiece& descriptor,
                                             uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const DoubleLoType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  DoubleLoType(mirror::Class* klass, const std::string& descriptor,
+  DoubleLoType(mirror::Class* klass, const StringPiece& descriptor,
                uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat2Type(klass, descriptor, cache_id) {}
   static const DoubleLoType* instance_;
@@ -545,14 +559,14 @@
   std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
   virtual bool IsDoubleHi() const OVERRIDE { return true; }
   static const DoubleHiType* CreateInstance(mirror::Class* klass,
-                                      const std::string& descriptor,
+                                      const StringPiece& descriptor,
                                       uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
   static const DoubleHiType* GetInstance() PURE;
   static void Destroy();
 
  private:
-  DoubleHiType(mirror::Class* klass, const std::string& descriptor,
+  DoubleHiType(mirror::Class* klass, const StringPiece& descriptor,
                uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : Cat2Type(klass, descriptor, cache_id) {}
   static const DoubleHiType* instance_;
@@ -677,7 +691,7 @@
 // instructions and must be passed to a constructor.
 class UninitializedType : public RegType {
  public:
-  UninitializedType(mirror::Class* klass, const std::string& descriptor,
+  UninitializedType(mirror::Class* klass, const StringPiece& descriptor,
                     uint32_t allocation_pc, uint16_t cache_id)
       : RegType(klass, descriptor, cache_id), allocation_pc_(allocation_pc) {}
 
@@ -697,7 +711,7 @@
 class UninitializedReferenceType FINAL : public UninitializedType {
  public:
   UninitializedReferenceType(mirror::Class* klass,
-                             const std::string& descriptor,
+                             const StringPiece& descriptor,
                              uint32_t allocation_pc, uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_)
       : UninitializedType(klass, descriptor, allocation_pc, cache_id) {}
@@ -713,7 +727,7 @@
 // constructor.
 class UnresolvedUninitializedRefType FINAL : public UninitializedType {
  public:
-  UnresolvedUninitializedRefType(const std::string& descriptor,
+  UnresolvedUninitializedRefType(const StringPiece& descriptor,
                                  uint32_t allocation_pc, uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_)
       : UninitializedType(nullptr, descriptor, allocation_pc, cache_id) {
@@ -737,7 +751,7 @@
 class UninitializedThisReferenceType FINAL : public UninitializedType {
  public:
   UninitializedThisReferenceType(mirror::Class* klass,
-                                 const std::string& descriptor,
+                                 const StringPiece& descriptor,
                                  uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_)
       : UninitializedType(klass, descriptor, 0, cache_id) {
@@ -758,7 +772,7 @@
 
 class UnresolvedUninitializedThisRefType FINAL : public UninitializedType {
  public:
-  UnresolvedUninitializedThisRefType(const std::string& descriptor,
+  UnresolvedUninitializedThisRefType(const StringPiece& descriptor,
                                      uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_)
       : UninitializedType(nullptr, descriptor, 0, cache_id) {
@@ -781,7 +795,7 @@
 // sub-class.
 class ReferenceType FINAL : public RegType {
  public:
-  ReferenceType(mirror::Class* klass, const std::string& descriptor,
+  ReferenceType(mirror::Class* klass, const StringPiece& descriptor,
                 uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
       : RegType(klass, descriptor, cache_id) {}
 
@@ -799,7 +813,7 @@
 // type.
 class PreciseReferenceType FINAL : public RegType {
  public:
-  PreciseReferenceType(mirror::Class* klass, const std::string& descriptor,
+  PreciseReferenceType(mirror::Class* klass, const StringPiece& descriptor,
                        uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
@@ -815,7 +829,7 @@
 // Common parent of unresolved types.
 class UnresolvedType : public RegType {
  public:
-  UnresolvedType(const std::string& descriptor, uint16_t cache_id)
+  UnresolvedType(const StringPiece& descriptor, uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_)
       : RegType(nullptr, descriptor, cache_id) {}
 
@@ -827,7 +841,7 @@
 // of this type must be conservative.
 class UnresolvedReferenceType FINAL : public UnresolvedType {
  public:
-  UnresolvedReferenceType(const std::string& descriptor, uint16_t cache_id)
+  UnresolvedReferenceType(const StringPiece& descriptor, uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_)
       : UnresolvedType(descriptor, cache_id) {
     if (kIsDebugBuild) {
@@ -882,8 +896,10 @@
 class UnresolvedMergedType FINAL : public UnresolvedType {
  public:
   // Note: the constructor will copy the unresolved BitVector, not use it directly.
-  UnresolvedMergedType(const RegType& resolved, const BitVector& unresolved,
-                       const RegTypeCache* reg_type_cache, uint16_t cache_id)
+  UnresolvedMergedType(const RegType& resolved,
+                       const BitVector& unresolved,
+                       const RegTypeCache* reg_type_cache,
+                       uint16_t cache_id)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
   // The resolved part. See description below.
diff --git a/runtime/verifier/reg_type_cache-inl.h b/runtime/verifier/reg_type_cache-inl.h
index b6f253b..68af62e 100644
--- a/runtime/verifier/reg_type_cache-inl.h
+++ b/runtime/verifier/reg_type_cache-inl.h
@@ -118,6 +118,18 @@
   }
 }
 
+template <class RegTypeType>
+inline RegTypeType& RegTypeCache::AddEntry(RegTypeType* new_entry) {
+  DCHECK(new_entry != nullptr);
+  entries_.push_back(new_entry);
+  if (new_entry->HasClass()) {
+    mirror::Class* klass = new_entry->GetClass();
+    DCHECK(!klass->IsPrimitive());
+    klass_entries_.push_back(std::make_pair(GcRoot<mirror::Class>(klass), new_entry));
+  }
+  return *new_entry;
+}
+
 }  // namespace verifier
 }  // namespace art
 #endif  // ART_RUNTIME_VERIFIER_REG_TYPE_CACHE_INL_H_
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index bb756e9..71ed4a2 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -16,7 +16,9 @@
 
 #include "reg_type_cache-inl.h"
 
+#include "base/arena_bit_vector.h"
 #include "base/casts.h"
+#include "base/scoped_arena_allocator.h"
 #include "base/stl_util.h"
 #include "class_linker-inl.h"
 #include "dex_file-inl.h"
@@ -29,9 +31,10 @@
 
 bool RegTypeCache::primitive_initialized_ = false;
 uint16_t RegTypeCache::primitive_count_ = 0;
-const PreciseConstType* RegTypeCache::small_precise_constants_[kMaxSmallConstant - kMinSmallConstant + 1];
+const PreciseConstType* RegTypeCache::small_precise_constants_[kMaxSmallConstant -
+                                                               kMinSmallConstant + 1];
 
-static bool MatchingPrecisionForClass(const RegType* entry, bool precise)
+ALWAYS_INLINE static inline bool MatchingPrecisionForClass(const RegType* entry, bool precise)
     SHARED_REQUIRES(Locks::mutator_lock_) {
   if (entry->IsPreciseReference() == precise) {
     // We were or weren't looking for a precise reference and we found what we need.
@@ -67,7 +70,8 @@
   DCHECK_EQ(entries_.size(), primitive_count_);
 }
 
-const RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader, const char* descriptor,
+const RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader,
+                                            const char* descriptor,
                                             bool precise) {
   DCHECK(RegTypeCache::primitive_initialized_);
   if (descriptor[1] == '\0') {
@@ -159,13 +163,20 @@
   return klass;
 }
 
-const RegType& RegTypeCache::From(mirror::ClassLoader* loader, const char* descriptor,
+StringPiece RegTypeCache::AddString(const StringPiece& string_piece) {
+  char* ptr = arena_.AllocArray<char>(string_piece.length());
+  memcpy(ptr, string_piece.data(), string_piece.length());
+  return StringPiece(ptr, string_piece.length());
+}
+
+const RegType& RegTypeCache::From(mirror::ClassLoader* loader,
+                                  const char* descriptor,
                                   bool precise) {
+  StringPiece sp_descriptor(descriptor);
   // Try looking up the class in the cache first. We use a StringPiece to avoid continual strlen
   // operations on the descriptor.
-  StringPiece descriptor_sp(descriptor);
   for (size_t i = primitive_count_; i < entries_.size(); i++) {
-    if (MatchDescriptor(i, descriptor_sp, precise)) {
+    if (MatchDescriptor(i, sp_descriptor, precise)) {
       return *(entries_[i]);
     }
   }
@@ -186,12 +197,11 @@
     if (klass->CannotBeAssignedFromOtherTypes() || precise) {
       DCHECK(!(klass->IsAbstract()) || klass->IsArrayClass());
       DCHECK(!klass->IsInterface());
-      entry = new PreciseReferenceType(klass, descriptor_sp.as_string(), entries_.size());
+      entry = new (&arena_) PreciseReferenceType(klass, AddString(sp_descriptor), entries_.size());
     } else {
-      entry = new ReferenceType(klass, descriptor_sp.as_string(), entries_.size());
+      entry = new (&arena_) ReferenceType(klass, AddString(sp_descriptor), entries_.size());
     }
-    AddEntry(entry);
-    return *entry;
+    return AddEntry(entry);
   } else {  // Class not resolved.
     // We tried loading the class and failed, this might get an exception raised
     // so we want to clear it before we go on.
@@ -202,9 +212,8 @@
       DCHECK(!Thread::Current()->IsExceptionPending());
     }
     if (IsValidDescriptor(descriptor)) {
-      RegType* entry = new UnresolvedReferenceType(descriptor_sp.as_string(), entries_.size());
-      AddEntry(entry);
-      return *entry;
+      return AddEntry(
+          new (&arena_) UnresolvedReferenceType(AddString(sp_descriptor), entries_.size()));
     } else {
       // The descriptor is broken return the unknown type as there's nothing sensible that
       // could be done at runtime
@@ -213,50 +222,65 @@
   }
 }
 
-const RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* klass, bool precise) {
+const RegType* RegTypeCache::FindClass(mirror::Class* klass, bool precise) const {
   DCHECK(klass != nullptr);
   if (klass->IsPrimitive()) {
     // Note: precise isn't used for primitive classes. A char is assignable to an int. All
     // primitive classes are final.
-    return RegTypeFromPrimitiveType(klass->GetPrimitiveType());
-  } else {
-    // Look for the reference in the list of entries to have.
-    for (size_t i = primitive_count_; i < entries_.size(); i++) {
-      const RegType* cur_entry = entries_[i];
-      if (cur_entry->klass_.Read() == klass && MatchingPrecisionForClass(cur_entry, precise)) {
-        return *cur_entry;
+    return &RegTypeFromPrimitiveType(klass->GetPrimitiveType());
+  }
+  for (auto& pair : klass_entries_) {
+    mirror::Class* const reg_klass = pair.first.Read();
+    if (reg_klass == klass) {
+      const RegType* reg_type = pair.second;
+      if (MatchingPrecisionForClass(reg_type, precise)) {
+        return reg_type;
       }
     }
-    // No reference to the class was found, create new reference.
-    RegType* entry;
-    if (precise) {
-      entry = new PreciseReferenceType(klass, descriptor, entries_.size());
-    } else {
-      entry = new ReferenceType(klass, descriptor, entries_.size());
-    }
-    AddEntry(entry);
-    return *entry;
   }
+  return nullptr;
 }
 
-RegTypeCache::RegTypeCache(bool can_load_classes) : can_load_classes_(can_load_classes) {
+const RegType* RegTypeCache::InsertClass(const StringPiece& descriptor,
+                                         mirror::Class* klass,
+                                         bool precise) {
+  // No reference to the class was found, create new reference.
+  DCHECK(FindClass(klass, precise) == nullptr);
+  RegType* const reg_type = precise
+      ? static_cast<RegType*>(
+          new (&arena_) PreciseReferenceType(klass, descriptor, entries_.size()))
+      : new (&arena_) ReferenceType(klass, descriptor, entries_.size());
+  return &AddEntry(reg_type);
+}
+
+const RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* klass, bool precise) {
+  DCHECK(klass != nullptr);
+  const RegType* reg_type = FindClass(klass, precise);
+  if (reg_type == nullptr) {
+    reg_type = InsertClass(AddString(StringPiece(descriptor)), klass, precise);
+  }
+  return *reg_type;
+}
+
+RegTypeCache::RegTypeCache(bool can_load_classes, ScopedArenaAllocator& arena)
+    : entries_(arena.Adapter(kArenaAllocVerifier)),
+      klass_entries_(arena.Adapter(kArenaAllocVerifier)),
+      can_load_classes_(can_load_classes),
+      arena_(arena) {
   if (kIsDebugBuild) {
     Thread::Current()->AssertThreadSuspensionIsAllowable(gAborting == 0);
   }
-  entries_.reserve(64);
+  // The klass_entries_ array does not have primitives or small constants.
+  static constexpr size_t kNumReserveEntries = 32;
+  klass_entries_.reserve(kNumReserveEntries);
+  // We want to have room for additional entries after inserting primitives and small
+  // constants.
+  entries_.reserve(kNumReserveEntries + kNumPrimitivesAndSmallConstants);
   FillPrimitiveAndSmallConstantTypes();
 }
 
 RegTypeCache::~RegTypeCache() {
-  CHECK_LE(primitive_count_, entries_.size());
-  // Delete only the non primitive types.
-  if (entries_.size() == kNumPrimitivesAndSmallConstants) {
-    // All entries are from the global pool, nothing to delete.
-    return;
-  }
-  std::vector<const RegType*>::iterator non_primitive_begin = entries_.begin();
-  std::advance(non_primitive_begin, kNumPrimitivesAndSmallConstants);
-  STLDeleteContainerPointers(non_primitive_begin, entries_.end());
+  DCHECK_LE(primitive_count_, entries_.size());
 }
 
 void RegTypeCache::ShutDown() {
@@ -318,9 +342,9 @@
 }
 
 const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left, const RegType& right) {
-  BitVector types(1,                                    // Allocate at least a word.
-                  true,                                 // Is expandable.
-                  Allocator::GetMallocAllocator());     // TODO: Arenas in the verifier.
+  ArenaBitVector types(&arena_,
+                       kDefaultArenaBitVectorBytes * kBitsPerByte,  // Allocate at least 8 bytes.
+                       true);                                       // Is expandable.
   const RegType* left_resolved;
   if (left.IsUnresolvedMergedReference()) {
     const UnresolvedMergedType* left_merge = down_cast<const UnresolvedMergedType*>(&left);
@@ -361,20 +385,15 @@
       const BitVector& unresolved_part = cmp_type->GetUnresolvedTypes();
       // Use SameBitsSet. "types" is expandable to allow merging in the components, but the
       // BitVector in the final RegType will be made non-expandable.
-      if (&resolved_part == &resolved_parts_merged &&
-              types.SameBitsSet(&unresolved_part)) {
+      if (&resolved_part == &resolved_parts_merged && types.SameBitsSet(&unresolved_part)) {
         return *cur_entry;
       }
     }
   }
-
-  // Create entry.
-  RegType* entry = new UnresolvedMergedType(resolved_parts_merged,
-                                            types,
-                                            this,
-                                            entries_.size());
-  AddEntry(entry);
-  return *entry;
+  return AddEntry(new (&arena_) UnresolvedMergedType(resolved_parts_merged,
+                                                     types,
+                                                     this,
+                                                     entries_.size()));
 }
 
 const RegType& RegTypeCache::FromUnresolvedSuperClass(const RegType& child) {
@@ -391,14 +410,12 @@
       }
     }
   }
-  RegType* entry = new UnresolvedSuperClass(child.GetId(), this, entries_.size());
-  AddEntry(entry);
-  return *entry;
+  return AddEntry(new (&arena_) UnresolvedSuperClass(child.GetId(), this, entries_.size()));
 }
 
 const UninitializedType& RegTypeCache::Uninitialized(const RegType& type, uint32_t allocation_pc) {
   UninitializedType* entry = nullptr;
-  const std::string& descriptor(type.GetDescriptor());
+  const StringPiece& descriptor(type.GetDescriptor());
   if (type.IsUnresolvedTypes()) {
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
       const RegType* cur_entry = entries_[i];
@@ -409,7 +426,9 @@
         return *down_cast<const UnresolvedUninitializedRefType*>(cur_entry);
       }
     }
-    entry = new UnresolvedUninitializedRefType(descriptor, allocation_pc, entries_.size());
+    entry = new (&arena_) UnresolvedUninitializedRefType(descriptor,
+                                                         allocation_pc,
+                                                         entries_.size());
   } else {
     mirror::Class* klass = type.GetClass();
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
@@ -421,17 +440,19 @@
         return *down_cast<const UninitializedReferenceType*>(cur_entry);
       }
     }
-    entry = new UninitializedReferenceType(klass, descriptor, allocation_pc, entries_.size());
+    entry = new (&arena_) UninitializedReferenceType(klass,
+                                                     descriptor,
+                                                     allocation_pc,
+                                                     entries_.size());
   }
-  AddEntry(entry);
-  return *entry;
+  return AddEntry(entry);
 }
 
 const RegType& RegTypeCache::FromUninitialized(const RegType& uninit_type) {
   RegType* entry;
 
   if (uninit_type.IsUnresolvedTypes()) {
-    const std::string& descriptor(uninit_type.GetDescriptor());
+    const StringPiece& descriptor(uninit_type.GetDescriptor());
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
       const RegType* cur_entry = entries_[i];
       if (cur_entry->IsUnresolvedReference() &&
@@ -439,7 +460,7 @@
         return *cur_entry;
       }
     }
-    entry = new UnresolvedReferenceType(descriptor, entries_.size());
+    entry = new (&arena_) UnresolvedReferenceType(descriptor, entries_.size());
   } else {
     mirror::Class* klass = uninit_type.GetClass();
     if (uninit_type.IsUninitializedThisReference() && !klass->IsFinal()) {
@@ -450,7 +471,7 @@
           return *cur_entry;
         }
       }
-      entry = new ReferenceType(klass, "", entries_.size());
+      entry = new (&arena_) ReferenceType(klass, "", entries_.size());
     } else if (!klass->IsPrimitive()) {
       // We're uninitialized because of allocation, look or create a precise type as allocations
       // may only create objects of that type.
@@ -469,18 +490,19 @@
           return *cur_entry;
         }
       }
-      entry = new PreciseReferenceType(klass, uninit_type.GetDescriptor(), entries_.size());
+      entry = new (&arena_) PreciseReferenceType(klass,
+                                                 uninit_type.GetDescriptor(),
+                                                 entries_.size());
     } else {
       return Conflict();
     }
   }
-  AddEntry(entry);
-  return *entry;
+  return AddEntry(entry);
 }
 
 const UninitializedType& RegTypeCache::UninitializedThisArgument(const RegType& type) {
   UninitializedType* entry;
-  const std::string& descriptor(type.GetDescriptor());
+  const StringPiece& descriptor(type.GetDescriptor());
   if (type.IsUnresolvedTypes()) {
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
       const RegType* cur_entry = entries_[i];
@@ -489,7 +511,7 @@
         return *down_cast<const UninitializedType*>(cur_entry);
       }
     }
-    entry = new UnresolvedUninitializedThisRefType(descriptor, entries_.size());
+    entry = new (&arena_) UnresolvedUninitializedThisRefType(descriptor, entries_.size());
   } else {
     mirror::Class* klass = type.GetClass();
     for (size_t i = primitive_count_; i < entries_.size(); i++) {
@@ -498,10 +520,9 @@
         return *down_cast<const UninitializedType*>(cur_entry);
       }
     }
-    entry = new UninitializedThisReferenceType(klass, descriptor, entries_.size());
+    entry = new (&arena_) UninitializedThisReferenceType(klass, descriptor, entries_.size());
   }
-  AddEntry(entry);
-  return *entry;
+  return AddEntry(entry);
 }
 
 const ConstantType& RegTypeCache::FromCat1NonSmallConstant(int32_t value, bool precise) {
@@ -515,12 +536,11 @@
   }
   ConstantType* entry;
   if (precise) {
-    entry = new PreciseConstType(value, entries_.size());
+    entry = new (&arena_) PreciseConstType(value, entries_.size());
   } else {
-    entry = new ImpreciseConstType(value, entries_.size());
+    entry = new (&arena_) ImpreciseConstType(value, entries_.size());
   }
-  AddEntry(entry);
-  return *entry;
+  return AddEntry(entry);
 }
 
 const ConstantType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) {
@@ -533,12 +553,11 @@
   }
   ConstantType* entry;
   if (precise) {
-    entry = new PreciseConstLoType(value, entries_.size());
+    entry = new (&arena_) PreciseConstLoType(value, entries_.size());
   } else {
-    entry = new ImpreciseConstLoType(value, entries_.size());
+    entry = new (&arena_) ImpreciseConstLoType(value, entries_.size());
   }
-  AddEntry(entry);
-  return *entry;
+  return AddEntry(entry);
 }
 
 const ConstantType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) {
@@ -551,32 +570,30 @@
   }
   ConstantType* entry;
   if (precise) {
-    entry = new PreciseConstHiType(value, entries_.size());
+    entry = new (&arena_) PreciseConstHiType(value, entries_.size());
   } else {
-    entry = new ImpreciseConstHiType(value, entries_.size());
+    entry = new (&arena_) ImpreciseConstHiType(value, entries_.size());
   }
-  AddEntry(entry);
-  return *entry;
+  return AddEntry(entry);
 }
 
 const RegType& RegTypeCache::GetComponentType(const RegType& array, mirror::ClassLoader* loader) {
   if (!array.IsArrayTypes()) {
     return Conflict();
   } else if (array.IsUnresolvedTypes()) {
-    const std::string& descriptor(array.GetDescriptor());
-    const std::string component(descriptor.substr(1, descriptor.size() - 1));
-    return FromDescriptor(loader, component.c_str(), false);
+    const std::string descriptor(array.GetDescriptor().as_string());
+    return FromDescriptor(loader, descriptor.c_str() + 1, false);
   } else {
     mirror::Class* klass = array.GetClass()->GetComponentType();
     std::string temp;
+    const char* descriptor = klass->GetDescriptor(&temp);
     if (klass->IsErroneous()) {
       // Arrays may have erroneous component types, use unresolved in that case.
       // We assume that the primitive classes are not erroneous, so we know it is a
       // reference type.
-      return FromDescriptor(loader, klass->GetDescriptor(&temp), false);
+      return FromDescriptor(loader, descriptor, false);
     } else {
-      return FromClass(klass->GetDescriptor(&temp), klass,
-                       klass->CannotBeAssignedFromOtherTypes());
+      return FromClass(descriptor, klass, klass->CannotBeAssignedFromOtherTypes());
     }
   }
 }
@@ -618,10 +635,10 @@
   for (size_t i = primitive_count_; i < entries_.size(); ++i) {
     entries_[i]->VisitRoots(visitor, root_info);
   }
-}
-
-void RegTypeCache::AddEntry(RegType* new_entry) {
-  entries_.push_back(new_entry);
+  for (auto& pair : klass_entries_) {
+    GcRoot<mirror::Class>& root = pair.first;
+    root.VisitRoot(visitor, root_info);
+  }
 }
 
 }  // namespace verifier
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index 93948a1..6f9a04e 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -19,6 +19,7 @@
 
 #include "base/casts.h"
 #include "base/macros.h"
+#include "base/scoped_arena_containers.h"
 #include "object_callbacks.h"
 #include "reg_type.h"
 #include "runtime.h"
@@ -31,15 +32,19 @@
   class Class;
   class ClassLoader;
 }  // namespace mirror
+class ScopedArenaAllocator;
 class StringPiece;
 
 namespace verifier {
 
 class RegType;
 
+// Use 8 bytes since that is the default arena allocator alignment.
+static constexpr size_t kDefaultArenaBitVectorBytes = 8;
+
 class RegTypeCache {
  public:
-  explicit RegTypeCache(bool can_load_classes);
+  explicit RegTypeCache(bool can_load_classes, ScopedArenaAllocator& arena);
   ~RegTypeCache();
   static void Init() SHARED_REQUIRES(Locks::mutator_lock_) {
     if (!RegTypeCache::primitive_initialized_) {
@@ -53,6 +58,13 @@
   const art::verifier::RegType& GetFromId(uint16_t id) const;
   const RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise)
       SHARED_REQUIRES(Locks::mutator_lock_);
+  // Find a RegType, returns null if not found.
+  const RegType* FindClass(mirror::Class* klass, bool precise) const
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  // Insert a new class with a specified descriptor, must not already be in the cache.
+  const RegType* InsertClass(const StringPiece& descriptor, mirror::Class* klass, bool precise)
+      SHARED_REQUIRES(Locks::mutator_lock_);
+  // Get or insert a reg type for a description, klass, and precision.
   const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
       SHARED_REQUIRES(Locks::mutator_lock_);
   const ConstantType& FromCat1Const(int32_t value, bool precise)
@@ -150,7 +162,13 @@
   const ConstantType& FromCat1NonSmallConstant(int32_t value, bool precise)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void AddEntry(RegType* new_entry);
+  // Returns the pass in RegType.
+  template <class RegTypeType>
+  RegTypeType& AddEntry(RegTypeType* new_entry) SHARED_REQUIRES(Locks::mutator_lock_);
+
+  // Add a string piece to the arena allocator so that it stays live for the lifetime of the
+  // verifier.
+  StringPiece AddString(const StringPiece& string_piece);
 
   template <class Type>
   static const Type* CreatePrimitiveTypeInstance(const std::string& descriptor)
@@ -160,7 +178,8 @@
   // A quick look up for popular small constants.
   static constexpr int32_t kMinSmallConstant = -1;
   static constexpr int32_t kMaxSmallConstant = 4;
-  static const PreciseConstType* small_precise_constants_[kMaxSmallConstant - kMinSmallConstant + 1];
+  static const PreciseConstType* small_precise_constants_[kMaxSmallConstant -
+                                                          kMinSmallConstant + 1];
 
   static constexpr size_t kNumPrimitivesAndSmallConstants =
       12 + (kMaxSmallConstant - kMinSmallConstant + 1);
@@ -172,11 +191,17 @@
   static uint16_t primitive_count_;
 
   // The actual storage for the RegTypes.
-  std::vector<const RegType*> entries_;
+  ScopedArenaVector<const RegType*> entries_;
+
+  // Fast lookup for quickly finding entries that have a matching class.
+  ScopedArenaVector<std::pair<GcRoot<mirror::Class>, const RegType*>> klass_entries_;
 
   // Whether or not we're allowed to load classes.
   const bool can_load_classes_;
 
+  // Arena allocator.
+  ScopedArenaAllocator& arena_;
+
   DISALLOW_COPY_AND_ASSIGN(RegTypeCache);
 };
 
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index 971b1f5..22ac7e4 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -20,6 +20,7 @@
 
 #include "base/bit_vector.h"
 #include "base/casts.h"
+#include "base/scoped_arena_allocator.h"
 #include "common_runtime_test.h"
 #include "reg_type_cache-inl.h"
 #include "reg_type-inl.h"
@@ -29,12 +30,23 @@
 namespace art {
 namespace verifier {
 
-class RegTypeTest : public CommonRuntimeTest {};
+class BaseRegTypeTest : public CommonRuntimeTest {
+ public:
+  void PostRuntimeCreate() OVERRIDE {
+    stack.reset(new ArenaStack(Runtime::Current()->GetArenaPool()));
+    allocator.reset(new ScopedArenaAllocator(stack.get()));
+  }
+
+  std::unique_ptr<ArenaStack> stack;
+  std::unique_ptr<ScopedArenaAllocator> allocator;
+};
+
+class RegTypeTest : public BaseRegTypeTest {};
 
 TEST_F(RegTypeTest, ConstLoHi) {
   // Tests creating primitive types types.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true);
+  RegTypeCache cache(true, *allocator);
   const RegType& ref_type_const_0 = cache.FromCat1Const(10, true);
   const RegType& ref_type_const_1 = cache.FromCat1Const(10, true);
   const RegType& ref_type_const_2 = cache.FromCat1Const(30, true);
@@ -56,7 +68,7 @@
 
 TEST_F(RegTypeTest, Pairs) {
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true);
+  RegTypeCache cache(true, *allocator);
   int64_t val = static_cast<int32_t>(1234);
   const RegType& precise_lo = cache.FromCat2ConstLo(static_cast<int32_t>(val), true);
   const RegType& precise_hi = cache.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
@@ -80,7 +92,7 @@
 
 TEST_F(RegTypeTest, Primitives) {
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true);
+  RegTypeCache cache(true, *allocator);
 
   const RegType& bool_reg_type = cache.Boolean();
   EXPECT_FALSE(bool_reg_type.IsUndefined());
@@ -347,13 +359,13 @@
   EXPECT_TRUE(double_reg_type.HasClass());
 }
 
-class RegTypeReferenceTest : public CommonRuntimeTest {};
+class RegTypeReferenceTest : public BaseRegTypeTest {};
 
 TEST_F(RegTypeReferenceTest, JavalangObjectImprecise) {
   // Tests matching precisions. A reference type that was created precise doesn't
   // match the one that is imprecise.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true);
+  RegTypeCache cache(true, *allocator);
   const RegType& imprecise_obj = cache.JavaLangObject(false);
   const RegType& precise_obj = cache.JavaLangObject(true);
   const RegType& precise_obj_2 = cache.FromDescriptor(nullptr, "Ljava/lang/Object;", true);
@@ -368,7 +380,7 @@
   // Tests creating unresolved types. Miss for the first time asking the cache and
   // a hit second time.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true);
+  RegTypeCache cache(true, *allocator);
   const RegType& ref_type_0 = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
   EXPECT_TRUE(ref_type_0.IsNonZeroReferenceTypes());
@@ -384,7 +396,7 @@
 TEST_F(RegTypeReferenceTest, UnresolvedUnintializedType) {
   // Tests creating types uninitialized types from unresolved types.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true);
+  RegTypeCache cache(true, *allocator);
   const RegType& ref_type_0 = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
   EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
   const RegType& ref_type = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
@@ -406,7 +418,7 @@
 TEST_F(RegTypeReferenceTest, Dump) {
   // Tests types for proper Dump messages.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true);
+  RegTypeCache cache(true, *allocator);
   const RegType& unresolved_ref = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
   const RegType& unresolved_ref_another = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExistEither;", true);
   const RegType& resolved_ref = cache.JavaLangString();
@@ -431,7 +443,7 @@
   // Hit the second time. Then check for the same effect when using
   // The JavaLangObject method instead of FromDescriptor. String class is final.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true);
+  RegTypeCache cache(true, *allocator);
   const RegType& ref_type = cache.JavaLangString();
   const RegType& ref_type_2 = cache.JavaLangString();
   const RegType& ref_type_3 = cache.FromDescriptor(nullptr, "Ljava/lang/String;", true);
@@ -451,7 +463,7 @@
   // Hit the second time. Then I am checking for the same effect when using
   // The JavaLangObject method instead of FromDescriptor. Object Class in not final.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache(true);
+  RegTypeCache cache(true, *allocator);
   const RegType& ref_type = cache.JavaLangObject(true);
   const RegType& ref_type_2 = cache.JavaLangObject(true);
   const RegType& ref_type_3 = cache.FromDescriptor(nullptr, "Ljava/lang/Object;", true);
@@ -464,7 +476,7 @@
   // Tests merging logic
   // String and object , LUB is object.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache_new(true);
+  RegTypeCache cache_new(true, *allocator);
   const RegType& string = cache_new.JavaLangString();
   const RegType& Object = cache_new.JavaLangObject(true);
   EXPECT_TRUE(string.Merge(Object, &cache_new).IsJavaLangObject());
@@ -487,7 +499,7 @@
 TEST_F(RegTypeTest, MergingFloat) {
   // Testing merging logic with float and float constants.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache_new(true);
+  RegTypeCache cache_new(true, *allocator);
 
   constexpr int32_t kTestConstantValue = 10;
   const RegType& float_type = cache_new.Float();
@@ -518,7 +530,7 @@
 TEST_F(RegTypeTest, MergingLong) {
   // Testing merging logic with long and long constants.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache_new(true);
+  RegTypeCache cache_new(true, *allocator);
 
   constexpr int32_t kTestConstantValue = 10;
   const RegType& long_lo_type = cache_new.LongLo();
@@ -572,7 +584,7 @@
 TEST_F(RegTypeTest, MergingDouble) {
   // Testing merging logic with double and double constants.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache_new(true);
+  RegTypeCache cache_new(true, *allocator);
 
   constexpr int32_t kTestConstantValue = 10;
   const RegType& double_lo_type = cache_new.DoubleLo();
@@ -626,7 +638,7 @@
 TEST_F(RegTypeTest, ConstPrecision) {
   // Tests creating primitive types types.
   ScopedObjectAccess soa(Thread::Current());
-  RegTypeCache cache_new(true);
+  RegTypeCache cache_new(true, *allocator);
   const RegType& imprecise_const = cache_new.FromCat1Const(10, false);
   const RegType& precise_const = cache_new.FromCat1Const(10, true);
 
diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h
index 1df2428..57fb701 100644
--- a/runtime/verifier/register_line-inl.h
+++ b/runtime/verifier/register_line-inl.h
@@ -182,6 +182,21 @@
   }
 }
 
+inline RegisterLine* RegisterLine::Create(size_t num_regs, MethodVerifier* verifier) {
+  void* memory = verifier->GetArena().Alloc(OFFSETOF_MEMBER(RegisterLine, line_) +
+                                                (num_regs * sizeof(uint16_t)));
+  return new (memory) RegisterLine(num_regs, verifier);
+}
+
+inline RegisterLine::RegisterLine(size_t num_regs, MethodVerifier* verifier)
+    : num_regs_(num_regs),
+      monitors_(verifier->GetArena().Adapter(kArenaAllocVerifier)),
+      reg_to_lock_depths_(std::less<uint32_t>(), verifier->GetArena().Adapter(kArenaAllocVerifier)),
+      this_initialized_(false) {
+  std::uninitialized_fill_n(line_, num_regs_, 0u);
+  SetResultTypeToUnknown(verifier);
+}
+
 }  // namespace verifier
 }  // namespace art
 
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index f48b1e1..37343b5 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -412,12 +412,9 @@
   }
 }
 
-// Check whether there is another register in the search map that is locked the same way as the
-// register in the src map. This establishes an alias.
-static bool FindLockAliasedRegister(
-    uint32_t src,
-    const AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier>& src_map,
-    const AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier>& search_map) {
+bool FindLockAliasedRegister(uint32_t src,
+                             const RegisterLine::RegToLockDepthsMap& src_map,
+                             const RegisterLine::RegToLockDepthsMap& search_map) {
   auto it = src_map.find(src);
   if (it == src_map.end()) {
     // "Not locked" is trivially aliased.
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index 46db1c6..b2f5555 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -20,6 +20,7 @@
 #include <memory>
 #include <vector>
 
+#include "base/scoped_arena_containers.h"
 #include "safe_map.h"
 
 namespace art {
@@ -58,11 +59,11 @@
 // stack of entered monitors (identified by code unit offset).
 class RegisterLine {
  public:
-  static RegisterLine* Create(size_t num_regs, MethodVerifier* verifier) {
-    void* memory = operator new(sizeof(RegisterLine) + (num_regs * sizeof(uint16_t)));
-    RegisterLine* rl = new (memory) RegisterLine(num_regs, verifier);
-    return rl;
-  }
+  // A map from register to a bit vector of indices into the monitors_ stack.
+  using RegToLockDepthsMap = ScopedArenaSafeMap<uint32_t, uint32_t>;
+
+  // Create a register line of num_regs registers.
+  static RegisterLine* Create(size_t num_regs, MethodVerifier* verifier);
 
   // Implement category-1 "move" instructions. Copy a 32-bit value from "vsrc" to "vdst".
   void CopyRegister1(MethodVerifier* verifier, uint32_t vdst, uint32_t vsrc, TypeCategory cat)
@@ -311,11 +312,11 @@
   // Write a bit at each register location that holds a reference.
   void WriteReferenceBitMap(MethodVerifier* verifier, std::vector<uint8_t>* data, size_t max_bytes);
 
-  size_t GetMonitorEnterCount() {
+  size_t GetMonitorEnterCount() const {
     return monitors_.size();
   }
 
-  uint32_t GetMonitorEnterDexPc(size_t i) {
+  uint32_t GetMonitorEnterDexPc(size_t i) const {
     return monitors_[i];
   }
 
@@ -375,11 +376,7 @@
     reg_to_lock_depths_.erase(reg);
   }
 
-  RegisterLine(size_t num_regs, MethodVerifier* verifier)
-      : num_regs_(num_regs), this_initialized_(false) {
-    memset(&line_, 0, num_regs_ * sizeof(uint16_t));
-    SetResultTypeToUnknown(verifier);
-  }
+  RegisterLine(size_t num_regs, MethodVerifier* verifier);
 
   // Storage for the result register's type, valid after an invocation.
   uint16_t result_[2];
@@ -388,17 +385,18 @@
   const uint32_t num_regs_;
 
   // A stack of monitor enter locations.
-  std::vector<uint32_t, TrackingAllocator<uint32_t, kAllocatorTagVerifier>> monitors_;
+  ScopedArenaVector<uint32_t> monitors_;
+
   // A map from register to a bit vector of indices into the monitors_ stack. As we pop the monitor
   // stack we verify that monitor-enter/exit are correctly nested. That is, if there was a
   // monitor-enter on v5 and then on v6, we expect the monitor-exit to be on v6 then on v5.
-  AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier> reg_to_lock_depths_;
+  RegToLockDepthsMap reg_to_lock_depths_;
 
   // Whether "this" initialization (a constructor supercall) has happened.
   bool this_initialized_;
 
   // An array of RegType Ids associated with each dex register.
-  uint16_t line_[0];
+  uint16_t line_[1];
 
   DISALLOW_COPY_AND_ASSIGN(RegisterLine);
 };
diff --git a/test/004-UnsafeTest/src/Main.java b/test/004-UnsafeTest/src/Main.java
index c93db50..5b22e88 100644
--- a/test/004-UnsafeTest/src/Main.java
+++ b/test/004-UnsafeTest/src/Main.java
@@ -129,13 +129,36 @@
         System.out.println("Unexpectedly not succeeding compareAndSwapLong...");
     }
 
-    if (unsafe.compareAndSwapObject(t, objectOffset, null, new Object())) {
+    // We do not use `null` as argument to sun.misc.Unsafe.compareAndSwapObject
+    // in those tests, as this value is not affected by heap poisoning
+    // (which uses address negation to poison and unpoison heap object
+    // references).  This way, when heap poisoning is enabled, we can
+    // better exercise its implementation within that method.
+    if (unsafe.compareAndSwapObject(t, objectOffset, new Object(), new Object())) {
         System.out.println("Unexpectedly succeeding compareAndSwapObject...");
     }
-    if (!unsafe.compareAndSwapObject(t, objectOffset, objectValue, null)) {
+    Object objectValue2 = new Object();
+    if (!unsafe.compareAndSwapObject(t, objectOffset, objectValue, objectValue2)) {
         System.out.println("Unexpectedly not succeeding compareAndSwapObject...");
     }
-    if (!unsafe.compareAndSwapObject(t, objectOffset, null, new Object())) {
+    Object objectValue3 = new Object();
+    if (!unsafe.compareAndSwapObject(t, objectOffset, objectValue2, objectValue3)) {
+        System.out.println("Unexpectedly not succeeding compareAndSwapObject...");
+    }
+
+    // Exercise sun.misc.Unsafe.compareAndSwapObject using the same
+    // object (`t`) for the `obj` and `newValue` arguments.
+    if (!unsafe.compareAndSwapObject(t, objectOffset, objectValue3, t)) {
+        System.out.println("Unexpectedly not succeeding compareAndSwapObject...");
+    }
+    // Exercise sun.misc.Unsafe.compareAndSwapObject using the same
+    // object (`t`) for the `obj`, `expectedValue` and `newValue` arguments.
+    if (!unsafe.compareAndSwapObject(t, objectOffset, t, t)) {
+        System.out.println("Unexpectedly not succeeding compareAndSwapObject...");
+    }
+    // Exercise sun.misc.Unsafe.compareAndSwapObject using the same
+    // object (`t`) for the `obj` and `expectedValue` arguments.
+    if (!unsafe.compareAndSwapObject(t, objectOffset, t, new Object())) {
         System.out.println("Unexpectedly not succeeding compareAndSwapObject...");
     }
   }
diff --git a/test/527-checker-array-access-split/expected.txt b/test/527-checker-array-access-split/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/527-checker-array-access-split/expected.txt
diff --git a/test/527-checker-array-access-split/info.txt b/test/527-checker-array-access-split/info.txt
new file mode 100644
index 0000000..9206804
--- /dev/null
+++ b/test/527-checker-array-access-split/info.txt
@@ -0,0 +1 @@
+Test arm64-specific array access optimization.
diff --git a/test/527-checker-array-access-split/src/Main.java b/test/527-checker-array-access-split/src/Main.java
new file mode 100644
index 0000000..ead9446
--- /dev/null
+++ b/test/527-checker-array-access-split/src/Main.java
@@ -0,0 +1,341 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+
+  public static void assertIntEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  /**
+   * Test that HArrayGet with a constant index is not split.
+   */
+
+  /// CHECK-START-ARM64: int Main.constantIndexGet(int[]) instruction_simplifier_arm64 (before)
+  /// CHECK:             <<Array:l\d+>>         NullCheck
+  /// CHECK:             <<Index:i\d+>>         BoundsCheck
+  /// CHECK:                                    ArrayGet [<<Array>>,<<Index>>]
+
+  /// CHECK-START-ARM64: int Main.constantIndexGet(int[]) instruction_simplifier_arm64 (after)
+  /// CHECK:             <<Array:l\d+>>         NullCheck
+  /// CHECK:             <<Index:i\d+>>         BoundsCheck
+  /// CHECK-NOT:                                Arm64IntermediateAddress
+  /// CHECK:                                    ArrayGet [<<Array>>,<<Index>>]
+
+  public static int constantIndexGet(int array[]) {
+    return array[1];
+  }
+
+  /**
+   * Test that HArraySet with a constant index is not split.
+   */
+
+  /// CHECK-START-ARM64: void Main.constantIndexSet(int[]) instruction_simplifier_arm64 (before)
+  /// CHECK:             <<Const2:i\d+>>        IntConstant 2
+  /// CHECK:             <<Array:l\d+>>         NullCheck
+  /// CHECK:             <<Index:i\d+>>         BoundsCheck
+  /// CHECK:                                    ArraySet [<<Array>>,<<Index>>,<<Const2>>]
+
+  /// CHECK-START-ARM64: void Main.constantIndexSet(int[]) instruction_simplifier_arm64 (after)
+  /// CHECK:             <<Const2:i\d+>>        IntConstant 2
+  /// CHECK:             <<Array:l\d+>>         NullCheck
+  /// CHECK:             <<Index:i\d+>>         BoundsCheck
+  /// CHECK-NOT:                                Arm64IntermediateAddress
+  /// CHECK:                                    ArraySet [<<Array>>,<<Index>>,<<Const2>>]
+
+
+  public static void constantIndexSet(int array[]) {
+    array[1] = 2;
+  }
+
+  /**
+   * Test basic splitting of HArrayGet.
+   */
+
+  /// CHECK-START-ARM64: int Main.get(int[], int) instruction_simplifier_arm64 (before)
+  /// CHECK:             <<Array:l\d+>>         NullCheck
+  /// CHECK:             <<Index:i\d+>>         BoundsCheck
+  /// CHECK:                                    ArrayGet [<<Array>>,<<Index>>]
+
+  /// CHECK-START-ARM64: int Main.get(int[], int) instruction_simplifier_arm64 (after)
+  /// CHECK:             <<DataOffset:i\d+>>    IntConstant
+  /// CHECK:             <<Array:l\d+>>         NullCheck
+  /// CHECK:             <<Index:i\d+>>         BoundsCheck
+  /// CHECK:             <<Address:l\d+>>       Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-NEXT:                               ArrayGet [<<Address>>,<<Index>>]
+
+  public static int get(int array[], int index) {
+    return array[index];
+  }
+
+  /**
+   * Test basic splitting of HArraySet.
+   */
+
+  /// CHECK-START-ARM64: void Main.set(int[], int, int) instruction_simplifier_arm64 (before)
+  /// CHECK:                                    ParameterValue
+  /// CHECK:                                    ParameterValue
+  /// CHECK:             <<Arg:i\d+>>           ParameterValue
+  /// CHECK:             <<Array:l\d+>>         NullCheck
+  /// CHECK:             <<Index:i\d+>>         BoundsCheck
+  /// CHECK:                                    ArraySet [<<Array>>,<<Index>>,<<Arg>>]
+
+  /// CHECK-START-ARM64: void Main.set(int[], int, int) instruction_simplifier_arm64 (after)
+  /// CHECK:                                    ParameterValue
+  /// CHECK:                                    ParameterValue
+  /// CHECK:             <<Arg:i\d+>>           ParameterValue
+  /// CHECK:             <<DataOffset:i\d+>>    IntConstant
+  /// CHECK:             <<Array:l\d+>>         NullCheck
+  /// CHECK:             <<Index:i\d+>>         BoundsCheck
+  /// CHECK:             <<Address:l\d+>>       Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-NEXT:                               ArraySet [<<Address>>,<<Index>>,<<Arg>>]
+
+  public static void set(int array[], int index, int value) {
+    array[index] = value;
+  }
+
+  /**
+   * Check that the intermediate address can be shared after GVN.
+   */
+
+  /// CHECK-START-ARM64: void Main.getSet(int[], int) instruction_simplifier_arm64 (before)
+  /// CHECK:             <<Const1:i\d+>>        IntConstant 1
+  /// CHECK:             <<Array:l\d+>>         NullCheck
+  /// CHECK:             <<Index:i\d+>>         BoundsCheck
+  /// CHECK:             <<ArrayGet:i\d+>>      ArrayGet [<<Array>>,<<Index>>]
+  /// CHECK:             <<Add:i\d+>>           Add [<<ArrayGet>>,<<Const1>>]
+  /// CHECK:                                    ArraySet [<<Array>>,<<Index>>,<<Add>>]
+
+  /// CHECK-START-ARM64: void Main.getSet(int[], int) instruction_simplifier_arm64 (after)
+  /// CHECK-DAG:         <<Const1:i\d+>>        IntConstant 1
+  /// CHECK-DAG:         <<DataOffset:i\d+>>    IntConstant
+  /// CHECK:             <<Array:l\d+>>         NullCheck
+  /// CHECK:             <<Index:i\d+>>         BoundsCheck
+  /// CHECK:             <<Address1:l\d+>>      Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-NEXT:        <<ArrayGet:i\d+>>      ArrayGet [<<Address1>>,<<Index>>]
+  /// CHECK:             <<Add:i\d+>>           Add [<<ArrayGet>>,<<Const1>>]
+  /// CHECK:             <<Address2:l\d+>>      Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-NEXT:                               ArraySet [<<Address2>>,<<Index>>,<<Add>>]
+
+  /// CHECK-START-ARM64: void Main.getSet(int[], int) GVN_after_arch (after)
+  /// CHECK-DAG:         <<Const1:i\d+>>        IntConstant 1
+  /// CHECK-DAG:         <<DataOffset:i\d+>>    IntConstant
+  /// CHECK:             <<Array:l\d+>>         NullCheck
+  /// CHECK:             <<Index:i\d+>>         BoundsCheck
+  /// CHECK:             <<Address:l\d+>>       Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK:             <<ArrayGet:i\d+>>      ArrayGet [<<Address>>,<<Index>>]
+  /// CHECK:             <<Add:i\d+>>           Add [<<ArrayGet>>,<<Const1>>]
+  /// CHECK-NOT:                                Arm64IntermediateAddress
+  /// CHECK:                                    ArraySet [<<Address>>,<<Index>>,<<Add>>]
+
+  public static void getSet(int array[], int index) {
+    array[index] = array[index] + 1;
+  }
+
+  /**
+   * Check that the intermediate address computation is not reordered or merged
+   * across IRs that can trigger GC.
+   */
+
+  /// CHECK-START-ARM64: int[] Main.accrossGC(int[], int) instruction_simplifier_arm64 (before)
+  /// CHECK:             <<Const1:i\d+>>        IntConstant 1
+  /// CHECK:             <<Array:l\d+>>         NullCheck
+  /// CHECK:             <<Index:i\d+>>         BoundsCheck
+  /// CHECK:             <<ArrayGet:i\d+>>      ArrayGet [<<Array>>,<<Index>>]
+  /// CHECK:             <<Add:i\d+>>           Add [<<ArrayGet>>,<<Const1>>]
+  /// CHECK:                                    NewArray
+  /// CHECK:                                    ArraySet [<<Array>>,<<Index>>,<<Add>>]
+
+  /// CHECK-START-ARM64: int[] Main.accrossGC(int[], int) instruction_simplifier_arm64 (after)
+  /// CHECK-DAG:         <<Const1:i\d+>>        IntConstant 1
+  /// CHECK-DAG:         <<DataOffset:i\d+>>    IntConstant
+  /// CHECK:             <<Array:l\d+>>         NullCheck
+  /// CHECK:             <<Index:i\d+>>         BoundsCheck
+  /// CHECK:             <<Address1:l\d+>>      Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-NEXT:        <<ArrayGet:i\d+>>      ArrayGet [<<Address1>>,<<Index>>]
+  /// CHECK:             <<Add:i\d+>>           Add [<<ArrayGet>>,<<Const1>>]
+  /// CHECK:                                    NewArray
+  /// CHECK:             <<Address2:l\d+>>      Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-NEXT:                               ArraySet [<<Address2>>,<<Index>>,<<Add>>]
+
+  /// CHECK-START-ARM64: int[] Main.accrossGC(int[], int) GVN_after_arch (after)
+  /// CHECK-DAG:         <<Const1:i\d+>>        IntConstant 1
+  /// CHECK-DAG:         <<DataOffset:i\d+>>    IntConstant
+  /// CHECK:             <<Array:l\d+>>         NullCheck
+  /// CHECK:             <<Index:i\d+>>         BoundsCheck
+  /// CHECK:             <<Address1:l\d+>>      Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK:             <<ArrayGet:i\d+>>      ArrayGet [<<Address1>>,<<Index>>]
+  /// CHECK:             <<Add:i\d+>>           Add [<<ArrayGet>>,<<Const1>>]
+  /// CHECK:                                    NewArray
+  /// CHECK:             <<Address2:l\d+>>      Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK:                                    ArraySet [<<Address2>>,<<Index>>,<<Add>>]
+
+  public static int[] accrossGC(int array[], int index) {
+    int tmp = array[index] + 1;
+    int[] new_array = new int[1];
+    array[index] = tmp;
+    return new_array;
+  }
+
+  /**
+   * Test that the intermediate address is shared between array accesses after
+   * the bounds check have been removed by BCE.
+   */
+
+  /// CHECK-START-ARM64: int Main.canMergeAfterBCE1() instruction_simplifier_arm64 (before)
+  /// CHECK:             <<Const1:i\d+>>        IntConstant 1
+  /// CHECK:             <<Array:l\d+>>         NewArray
+  /// CHECK:             <<Index:i\d+>>         Phi
+  /// CHECK:                                    If
+  //  -------------- Loop
+  /// CHECK:             <<ArrayGet:i\d+>>      ArrayGet [<<Array>>,<<Index>>]
+  /// CHECK:             <<Add:i\d+>>           Add [<<ArrayGet>>,<<Const1>>]
+  /// CHECK:                                    ArraySet [<<Array>>,<<Index>>,<<Add>>]
+
+  // By the time we reach the architecture-specific instruction simplifier, BCE
+  // has removed the bounds checks in the loop.
+
+  // Note that we do not care that the `DataOffset` is `12`. But if we do not
+  // specify it and any other `IntConstant` appears before that instruction,
+  // checker will match the previous `IntConstant`, and we will thus fail the
+  // check.
+
+  /// CHECK-START-ARM64: int Main.canMergeAfterBCE1() instruction_simplifier_arm64 (after)
+  /// CHECK-DAG:         <<Const1:i\d+>>        IntConstant 1
+  /// CHECK-DAG:         <<DataOffset:i\d+>>    IntConstant 12
+  /// CHECK:             <<Array:l\d+>>         NewArray
+  /// CHECK:             <<Index:i\d+>>         Phi
+  /// CHECK:                                    If
+  //  -------------- Loop
+  /// CHECK:             <<Address1:l\d+>>      Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-NEXT:        <<ArrayGet:i\d+>>      ArrayGet [<<Address1>>,<<Index>>]
+  /// CHECK:             <<Add:i\d+>>           Add [<<ArrayGet>>,<<Const1>>]
+  /// CHECK:             <<Address2:l\d+>>      Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-NEXT:                               ArraySet [<<Address2>>,<<Index>>,<<Add>>]
+
+  /// CHECK-START-ARM64: int Main.canMergeAfterBCE1() GVN_after_arch (after)
+  /// CHECK-DAG:         <<Const1:i\d+>>        IntConstant 1
+  /// CHECK-DAG:         <<DataOffset:i\d+>>    IntConstant 12
+  /// CHECK:             <<Array:l\d+>>         NewArray
+  /// CHECK:             <<Index:i\d+>>         Phi
+  /// CHECK:                                    If
+  //  -------------- Loop
+  /// CHECK:             <<Address:l\d+>>       Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK:             <<ArrayGet:i\d+>>      ArrayGet [<<Address>>,<<Index>>]
+  /// CHECK:             <<Add:i\d+>>           Add [<<ArrayGet>>,<<Const1>>]
+  /// CHECK-NOT:                                Arm64IntermediateAddress
+  /// CHECK:                                    ArraySet [<<Address>>,<<Index>>,<<Add>>]
+
+  public static int canMergeAfterBCE1() {
+    int[] array = {0, 1, 2, 3};
+    for (int i = 0; i < array.length; i++) {
+      array[i] = array[i] + 1;
+    }
+    return array[array.length - 1];
+  }
+
+  /**
+   * This test case is similar to `canMergeAfterBCE1`, but with different
+   * indexes for the accesses.
+   */
+
+  /// CHECK-START-ARM64: int Main.canMergeAfterBCE2() instruction_simplifier_arm64 (before)
+  /// CHECK:             <<Const1:i\d+>>        IntConstant 1
+  /// CHECK:             <<Array:l\d+>>         NewArray
+  /// CHECK:             <<Index:i\d+>>         Phi
+  /// CHECK:                                    If
+  //  -------------- Loop
+  /// CHECK-DAG:         <<Index1:i\d+>>        Add [<<Index>>,<<Const1>>]
+  /// CHECK-DAG:         <<ArrayGetI:i\d+>>     ArrayGet [<<Array>>,<<Index>>]
+  /// CHECK-DAG:         <<ArrayGetI1:i\d+>>    ArrayGet [<<Array>>,<<Index1>>]
+  /// CHECK:             <<Add:i\d+>>           Add [<<ArrayGetI>>,<<ArrayGetI1>>]
+  /// CHECK:                                    ArraySet [<<Array>>,<<Index1>>,<<Add>>]
+
+  // Note that we do not care that the `DataOffset` is `12`. But if we do not
+  // specify it and any other `IntConstant` appears before that instruction,
+  // checker will match the previous `IntConstant`, and we will thus fail the
+  // check.
+
+  /// CHECK-START-ARM64: int Main.canMergeAfterBCE2() instruction_simplifier_arm64 (after)
+  /// CHECK-DAG:         <<Const1:i\d+>>        IntConstant 1
+  /// CHECK-DAG:         <<DataOffset:i\d+>>    IntConstant 12
+  /// CHECK:             <<Array:l\d+>>         NewArray
+  /// CHECK:             <<Index:i\d+>>         Phi
+  /// CHECK:                                    If
+  //  -------------- Loop
+  /// CHECK-DAG:         <<Index1:i\d+>>        Add [<<Index>>,<<Const1>>]
+  /// CHECK-DAG:         <<Address1:l\d+>>      Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-DAG:         <<ArrayGetI:i\d+>>     ArrayGet [<<Address1>>,<<Index>>]
+  /// CHECK-DAG:         <<Address2:l\d+>>      Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-DAG:         <<ArrayGetI1:i\d+>>    ArrayGet [<<Address2>>,<<Index1>>]
+  /// CHECK:             <<Add:i\d+>>           Add [<<ArrayGetI>>,<<ArrayGetI1>>]
+  /// CHECK:             <<Address3:l\d+>>      Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK:                                    ArraySet [<<Address3>>,<<Index1>>,<<Add>>]
+
+  /// CHECK-START-ARM64: int Main.canMergeAfterBCE2() GVN_after_arch (after)
+  /// CHECK-DAG:         <<Const1:i\d+>>        IntConstant 1
+  /// CHECK-DAG:         <<DataOffset:i\d+>>    IntConstant 12
+  /// CHECK:             <<Array:l\d+>>         NewArray
+  /// CHECK:             <<Index:i\d+>>         Phi
+  /// CHECK:                                    If
+  //  -------------- Loop
+  /// CHECK-DAG:         <<Index1:i\d+>>        Add [<<Index>>,<<Const1>>]
+  /// CHECK-DAG:         <<Address:l\d+>>       Arm64IntermediateAddress [<<Array>>,<<DataOffset>>]
+  /// CHECK-DAG:         <<ArrayGetI:i\d+>>     ArrayGet [<<Address>>,<<Index>>]
+  /// CHECK-DAG:         <<ArrayGetI1:i\d+>>    ArrayGet [<<Address>>,<<Index1>>]
+  /// CHECK:             <<Add:i\d+>>           Add [<<ArrayGetI>>,<<ArrayGetI1>>]
+  /// CHECK:                                    ArraySet [<<Address>>,<<Index1>>,<<Add>>]
+
+  // There should be only one intermediate address computation in the loop.
+
+  /// CHECK-START-ARM64: int Main.canMergeAfterBCE2() GVN_after_arch (after)
+  /// CHECK:                                    Arm64IntermediateAddress
+  /// CHECK-NOT:                                Arm64IntermediateAddress
+
+  public static int canMergeAfterBCE2() {
+    int[] array = {0, 1, 2, 3};
+    for (int i = 0; i < array.length - 1; i++) {
+      array[i + 1] = array[i] + array[i + 1];
+    }
+    return array[array.length - 1];
+  }
+
+
+  public static void main(String[] args) {
+    int[] array = {123, 456, 789};
+
+    assertIntEquals(456, constantIndexGet(array));
+
+    constantIndexSet(array);
+    assertIntEquals(2, array[1]);
+
+    assertIntEquals(789, get(array, 2));
+
+    set(array, 1, 456);
+    assertIntEquals(456, array[1]);
+
+    getSet(array, 0);
+    assertIntEquals(124, array[0]);
+
+    accrossGC(array, 0);
+    assertIntEquals(125, array[0]);
+
+    assertIntEquals(4, canMergeAfterBCE1());
+    assertIntEquals(6, canMergeAfterBCE2());
+  }
+}
diff --git a/test/530-checker-lse/expected.txt b/test/530-checker-lse/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/530-checker-lse/expected.txt
diff --git a/test/530-checker-lse/info.txt b/test/530-checker-lse/info.txt
new file mode 100644
index 0000000..5b45e20
--- /dev/null
+++ b/test/530-checker-lse/info.txt
@@ -0,0 +1 @@
+Checker test for testing load-store elimination.
diff --git a/test/530-checker-lse/src/Main.java b/test/530-checker-lse/src/Main.java
new file mode 100644
index 0000000..c766aaa
--- /dev/null
+++ b/test/530-checker-lse/src/Main.java
@@ -0,0 +1,512 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+class Circle {
+  Circle(double radius) {
+    this.radius = radius;
+  }
+  public double getArea() {
+    return radius * radius * Math.PI;
+  }
+  private double radius;
+};
+
+class TestClass {
+  TestClass() {
+  }
+  TestClass(int i, int j) {
+    this.i = i;
+    this.j = j;
+  }
+  int i;
+  int j;
+  volatile int k;
+  TestClass next;
+  static int si;
+};
+
+class SubTestClass extends TestClass {
+  int k;
+};
+
+class TestClass2 {
+  int i;
+  int j;
+};
+
+public class Main {
+
+  /// CHECK-START: double Main.calcCircleArea(double) load_store_elimination (before)
+  /// CHECK: NewInstance
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: double Main.calcCircleArea(double) load_store_elimination (after)
+  /// CHECK: NewInstance
+  /// CHECK: InstanceFieldSet
+  /// CHECK-NOT: InstanceFieldGet
+
+  static double calcCircleArea(double radius) {
+    return new Circle(radius).getArea();
+  }
+
+  /// CHECK-START: int Main.test1(TestClass, TestClass) load_store_elimination (before)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: int Main.test1(TestClass, TestClass) load_store_elimination (after)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK-NOT: NullCheck
+  /// CHECK-NOT: InstanceFieldGet
+
+  // Different fields shouldn't alias.
+  static int test1(TestClass obj1, TestClass obj2) {
+    obj1.i = 1;
+    obj2.j = 2;
+    return obj1.i + obj2.j;
+  }
+
+  /// CHECK-START: int Main.test2(TestClass) load_store_elimination (before)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: int Main.test2(TestClass) load_store_elimination (after)
+  /// CHECK: InstanceFieldSet
+  /// CHECK-NOT: NullCheck
+  /// CHECK-NOT: InstanceFieldSet
+  /// CHECK-NOT: InstanceFieldGet
+
+  // Redundant store of the same value.
+  static int test2(TestClass obj) {
+    obj.j = 1;
+    obj.j = 1;
+    return obj.j;
+  }
+
+  /// CHECK-START: int Main.test3(TestClass) load_store_elimination (before)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: NewInstance
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: int Main.test3(TestClass) load_store_elimination (after)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: NewInstance
+  /// CHECK: InstanceFieldSet
+  /// CHECK-NOT: InstanceFieldGet
+
+  // A new allocation shouldn't alias with pre-existing values.
+  static int test3(TestClass obj) {
+    obj.i = 1;
+    obj.next.j = 2;
+    TestClass obj2 = new TestClass();
+    obj2.i = 3;
+    obj2.j = 4;
+    return obj.i + obj.next.j + obj2.i + obj2.j;
+  }
+
+  /// CHECK-START: int Main.test4(TestClass, boolean) load_store_elimination (before)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: Return
+  /// CHECK: InstanceFieldSet
+
+  /// CHECK-START: int Main.test4(TestClass, boolean) load_store_elimination (after)
+  /// CHECK: InstanceFieldSet
+  /// CHECK-NOT: NullCheck
+  /// CHECK-NOT: InstanceFieldGet
+  /// CHECK: Return
+  /// CHECK: InstanceFieldSet
+
+  // Set and merge the same value in two branches.
+  static int test4(TestClass obj, boolean b) {
+    if (b) {
+      obj.i = 1;
+    } else {
+      obj.i = 1;
+    }
+    return obj.i;
+  }
+
+  /// CHECK-START: int Main.test5(TestClass, boolean) load_store_elimination (before)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: Return
+  /// CHECK: InstanceFieldSet
+
+  /// CHECK-START: int Main.test5(TestClass, boolean) load_store_elimination (after)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: Return
+  /// CHECK: InstanceFieldSet
+
+  // Set and merge different values in two branches.
+  static int test5(TestClass obj, boolean b) {
+    if (b) {
+      obj.i = 1;
+    } else {
+      obj.i = 2;
+    }
+    return obj.i;
+  }
+
+  /// CHECK-START: int Main.test6(TestClass, TestClass, boolean) load_store_elimination (before)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: int Main.test6(TestClass, TestClass, boolean) load_store_elimination (after)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+  /// CHECK-NOT: NullCheck
+  /// CHECK-NOT: InstanceFieldGet
+
+  // Setting the same value doesn't clear the value for aliased locations.
+  static int test6(TestClass obj1, TestClass obj2, boolean b) {
+    obj1.i = 1;
+    obj1.j = 2;
+    if (b) {
+      obj2.j = 2;
+    }
+    return obj1.j + obj2.j;
+  }
+
+  /// CHECK-START: int Main.test7(TestClass) load_store_elimination (before)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: int Main.test7(TestClass) load_store_elimination (after)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+
+  // Invocation should kill values in non-singleton heap locations.
+  static int test7(TestClass obj) {
+    obj.i = 1;
+    System.out.print("");
+    return obj.i;
+  }
+
+  /// CHECK-START: int Main.test8() load_store_elimination (before)
+  /// CHECK: NewInstance
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InvokeVirtual
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: int Main.test8() load_store_elimination (after)
+  /// CHECK: NewInstance
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InvokeVirtual
+  /// CHECK-NOT: NullCheck
+  /// CHECK-NOT: InstanceFieldGet
+
+  // Invocation should not kill values in singleton heap locations.
+  static int test8() {
+    TestClass obj = new TestClass();
+    obj.i = 1;
+    System.out.print("");
+    return obj.i;
+  }
+
+  /// CHECK-START: int Main.test9(TestClass) load_store_elimination (before)
+  /// CHECK: NewInstance
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: int Main.test9(TestClass) load_store_elimination (after)
+  /// CHECK: NewInstance
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+
+  // Invocation should kill values in non-singleton heap locations.
+  static int test9(TestClass obj) {
+    TestClass obj2 = new TestClass();
+    obj2.i = 1;
+    obj.next = obj2;
+    System.out.print("");
+    return obj2.i;
+  }
+
+  /// CHECK-START: int Main.test10(TestClass) load_store_elimination (before)
+  /// CHECK: StaticFieldGet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: StaticFieldSet
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: int Main.test10(TestClass) load_store_elimination (after)
+  /// CHECK: StaticFieldGet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: StaticFieldSet
+  /// CHECK-NOT: NullCheck
+  /// CHECK-NOT: InstanceFieldGet
+
+  // Static fields shouldn't alias with instance fields.
+  static int test10(TestClass obj) {
+    TestClass.si += obj.i;
+    return obj.i;
+  }
+
+  /// CHECK-START: int Main.test11(TestClass) load_store_elimination (before)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: int Main.test11(TestClass) load_store_elimination (after)
+  /// CHECK: InstanceFieldSet
+  /// CHECK-NOT: NullCheck
+  /// CHECK-NOT: InstanceFieldGet
+
+  // Loop without heap writes.
+  // obj.i is actually hoisted to the loop pre-header by licm already.
+  static int test11(TestClass obj) {
+    obj.i = 1;
+    int sum = 0;
+    for (int i = 0; i < 10; i++) {
+      sum += obj.i;
+    }
+    return sum;
+  }
+
+  /// CHECK-START: int Main.test12(TestClass, TestClass) load_store_elimination (before)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: InstanceFieldSet
+
+  /// CHECK-START: int Main.test12(TestClass, TestClass) load_store_elimination (after)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: InstanceFieldSet
+
+  // Loop with heap writes.
+  static int test12(TestClass obj1, TestClass obj2) {
+    obj1.i = 1;
+    int sum = 0;
+    for (int i = 0; i < 10; i++) {
+      sum += obj1.i;
+      obj2.i = sum;
+    }
+    return sum;
+  }
+
+  /// CHECK-START: int Main.test13(TestClass, TestClass2) load_store_elimination (before)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: int Main.test13(TestClass, TestClass2) load_store_elimination (after)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK-NOT: NullCheck
+  /// CHECK-NOT: InstanceFieldGet
+
+  // Different classes shouldn't alias.
+  static int test13(TestClass obj1, TestClass2 obj2) {
+    obj1.i = 1;
+    obj2.i = 2;
+    return obj1.i + obj2.i;
+  }
+
+  /// CHECK-START: int Main.test14(TestClass, SubTestClass) load_store_elimination (before)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: int Main.test14(TestClass, SubTestClass) load_store_elimination (after)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+
+  // Subclass may alias with super class.
+  static int test14(TestClass obj1, SubTestClass obj2) {
+    obj1.i = 1;
+    obj2.i = 2;
+    return obj1.i;
+  }
+
+  /// CHECK-START: int Main.test15() load_store_elimination (before)
+  /// CHECK: StaticFieldSet
+  /// CHECK: StaticFieldSet
+  /// CHECK: StaticFieldGet
+
+  /// CHECK-START: int Main.test15() load_store_elimination (after)
+  /// CHECK: <<Const2:i\d+>> IntConstant 2
+  /// CHECK: StaticFieldSet
+  /// CHECK: StaticFieldSet
+  /// CHECK-NOT: StaticFieldGet
+  /// CHECK: Return [<<Const2>>]
+
+  // Static field access from subclass's name.
+  static int test15() {
+    TestClass.si = 1;
+    SubTestClass.si = 2;
+    return TestClass.si;
+  }
+
+  /// CHECK-START: int Main.test16() load_store_elimination (before)
+  /// CHECK: NewInstance
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: int Main.test16() load_store_elimination (after)
+  /// CHECK: NewInstance
+  /// CHECK-NOT: StaticFieldSet
+  /// CHECK-NOT: StaticFieldGet
+
+  // Test inlined constructor.
+  static int test16() {
+    TestClass obj = new TestClass(1, 2);
+    return obj.i + obj.j;
+  }
+
+  /// CHECK-START: int Main.test17() load_store_elimination (before)
+  /// CHECK: NewInstance
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: int Main.test17() load_store_elimination (after)
+  /// CHECK: <<Const0:i\d+>> IntConstant 0
+  /// CHECK: NewInstance
+  /// CHECK-NOT: StaticFieldSet
+  /// CHECK-NOT: StaticFieldGet
+  /// CHECK: Return [<<Const0>>]
+
+  // Test getting default value.
+  static int test17() {
+    TestClass obj = new TestClass();
+    obj.j = 1;
+    return obj.i;
+  }
+
+  /// CHECK-START: int Main.test18(TestClass) load_store_elimination (before)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+
+  /// CHECK-START: int Main.test18(TestClass) load_store_elimination (after)
+  /// CHECK: InstanceFieldSet
+  /// CHECK: InstanceFieldGet
+
+  // Volatile field load/store shouldn't be eliminated.
+  static int test18(TestClass obj) {
+    obj.k = 1;
+    return obj.k;
+  }
+
+  /// CHECK-START: float Main.test19(float[], float[]) load_store_elimination (before)
+  /// CHECK: <<IntTypeValue:i\d+>> ArrayGet
+  /// CHECK: ArraySet
+  /// CHECK: <<FloatTypeValue:f\d+>> ArrayGet
+
+  /// CHECK-START: float Main.test19(float[], float[]) load_store_elimination (after)
+  /// CHECK: <<IntTypeValue:i\d+>> ArrayGet
+  /// CHECK: ArraySet
+  /// CHECK: <<FloatTypeValue:f\d+>> ArrayGet
+
+  // I/F, J/D aliasing should keep the load/store.
+  static float test19(float[] fa1, float[] fa2) {
+    fa1[0] = fa2[0];
+    return fa1[0];
+  }
+
+  /// CHECK-START: TestClass Main.test20() load_store_elimination (before)
+  /// CHECK: NewInstance
+  /// CHECK: InstanceFieldSet
+
+  /// CHECK-START: TestClass Main.test20() load_store_elimination (after)
+  /// CHECK: NewInstance
+  /// CHECK-NOT: InstanceFieldSet
+
+  // Storing default heap value is redundant if the heap location has the
+  // default heap value.
+  static TestClass test20() {
+    TestClass obj = new TestClass();
+    obj.i = 0;
+    return obj;
+  }
+
+  public static void assertIntEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  public static void assertFloatEquals(float expected, float result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  public static void assertDoubleEquals(double expected, double result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  public static void main(String[] args) {
+    assertDoubleEquals(Math.PI * Math.PI * Math.PI, calcCircleArea(Math.PI));
+    assertIntEquals(test1(new TestClass(), new TestClass()), 3);
+    assertIntEquals(test2(new TestClass()), 1);
+    TestClass obj1 = new TestClass();
+    TestClass obj2 = new TestClass();
+    obj1.next = obj2;
+    assertIntEquals(test3(obj1), 10);
+    assertIntEquals(test4(new TestClass(), true), 1);
+    assertIntEquals(test4(new TestClass(), false), 1);
+    assertIntEquals(test5(new TestClass(), true), 1);
+    assertIntEquals(test5(new TestClass(), false), 2);
+    assertIntEquals(test6(new TestClass(), new TestClass(), true), 4);
+    assertIntEquals(test6(new TestClass(), new TestClass(), false), 2);
+    assertIntEquals(test7(new TestClass()), 1);
+    assertIntEquals(test8(), 1);
+    obj1 = new TestClass();
+    obj2 = new TestClass();
+    obj1.next = obj2;
+    assertIntEquals(test9(new TestClass()), 1);
+    assertIntEquals(test10(new TestClass(3, 4)), 3);
+    assertIntEquals(TestClass.si, 3);
+    assertIntEquals(test11(new TestClass()), 10);
+    assertIntEquals(test12(new TestClass(), new TestClass()), 10);
+    assertIntEquals(test13(new TestClass(), new TestClass2()), 3);
+    SubTestClass obj3 = new SubTestClass();
+    assertIntEquals(test14(obj3, obj3), 2);
+    assertIntEquals(test15(), 2);
+    assertIntEquals(test16(), 3);
+    assertIntEquals(test17(), 0);
+    assertIntEquals(test18(new TestClass()), 1);
+    float[] fa1 = { 0.8f };
+    float[] fa2 = { 1.8f };
+    assertFloatEquals(test19(fa1, fa2), 1.8f);
+    assertFloatEquals(test20().i, 0);
+  }
+}
diff --git a/test/532-checker-nonnull-arrayset/src/Main.java b/test/532-checker-nonnull-arrayset/src/Main.java
index 7d8fff4..2c701bb 100644
--- a/test/532-checker-nonnull-arrayset/src/Main.java
+++ b/test/532-checker-nonnull-arrayset/src/Main.java
@@ -29,10 +29,10 @@
   /// CHECK-NOT:      test
   /// CHECK:          ReturnVoid
   public static void test() {
-    Object[] array = new Object[1];
+    Object[] array = new Object[2];
     Object nonNull = array[0];
     nonNull.getClass(); // Ensure nonNull has an implicit null check.
-    array[0] = nonNull;
+    array[1] = nonNull;
   }
 
   public static void main(String[] args) {}
diff --git a/test/955-lambda-smali/run b/test/955-lambda-smali/run
index b754680..2fb2f89 100755
--- a/test/955-lambda-smali/run
+++ b/test/955-lambda-smali/run
@@ -15,4 +15,4 @@
 # limitations under the License.
 
 # Ensure that the lambda experimental opcodes are turned on for dalvikvm and dex2oat
-${RUN} "$@" --runtime-option -Xexperimental:lambdas -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:lambdas
+${RUN} "$@" --experimental lambdas
diff --git a/test/960-default-smali/build b/test/960-default-smali/build
index c786687..06692f9 100755
--- a/test/960-default-smali/build
+++ b/test/960-default-smali/build
@@ -18,7 +18,7 @@
 set -e
 
 # Generate the smali Main.smali file or fail
-./util-src/generate_smali.py ./smali
+${ANDROID_BUILD_TOP}/art/test/utils/python/generate_smali_main.py ./smali
 
 if [[ $@ == *"--jvm"* ]]; then
   # Build the Java files if we are running a --jvm test
@@ -29,5 +29,5 @@
 fi
 
 # Build the smali files and make a dex
-${SMALI} -JXmx256m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+${SMALI} -JXmx256m ${SMALI_ARGS} --output classes.dex $(find smali -name '*.smali')
 zip "$TEST_NAME.jar" classes.dex
diff --git a/test/960-default-smali/info.txt b/test/960-default-smali/info.txt
index eb596e2..9583abb 100644
--- a/test/960-default-smali/info.txt
+++ b/test/960-default-smali/info.txt
@@ -2,15 +2,16 @@
 
 Obviously needs to run under ART or a Java 8 Language runtime and compiler.
 
-When run a Main.smali file will be generated by the util-src/generate_smali.py
-script. If we run with --jvm we will use the tools/extract-embedded-java script to
-turn the smali into equivalent Java using the embedded Java code.
+When run a Main.smali file will be generated by the
+test/utils/python/generate_smali_main.py script. If we run with --jvm we will
+use the tools/extract-embedded-java script to turn the smali into equivalent
+Java using the embedded Java code.
 
 When updating be sure to write the equivalent Java code in comments of the smali
 files.
 
-Care should be taken when updating the generate_smali.py script. It must always
-return equivalent output when run multiple times.
+Care should be taken when updating the generate_smali_main.py script. It must
+always return equivalent output when run multiple times.
 
 To update the test files do the following steps:
     <Add new classes/interfaces>
diff --git a/test/960-default-smali/run b/test/960-default-smali/run
index e378b06..22f6800 100755
--- a/test/960-default-smali/run
+++ b/test/960-default-smali/run
@@ -14,8 +14,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-if echo $@ | grep -q -- "--jvm"; then
-  ${RUN} "$@"
-else
-  ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
-fi
+${RUN} --experimental default-methods "$@"
diff --git a/test/961-default-iface-resolution-generated/build b/test/961-default-iface-resolution-generated/build
index 707c17e..5eb851f 100755
--- a/test/961-default-iface-resolution-generated/build
+++ b/test/961-default-iface-resolution-generated/build
@@ -40,7 +40,7 @@
 fi
 
 # Build the smali files and make a dex
-${SMALI} -JXmx512m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+${SMALI} -JXmx512m ${SMALI_ARGS} --output classes.dex $(find smali -name '*.smali')
 zip $TEST_NAME.jar classes.dex
 
 # Reset the ulimit back to its initial value
diff --git a/test/961-default-iface-resolution-generated/run b/test/961-default-iface-resolution-generated/run
index e378b06..22f6800 100755
--- a/test/961-default-iface-resolution-generated/run
+++ b/test/961-default-iface-resolution-generated/run
@@ -14,8 +14,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-if echo $@ | grep -q -- "--jvm"; then
-  ${RUN} "$@"
-else
-  ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
-fi
+${RUN} --experimental default-methods "$@"
diff --git a/test/962-iface-static/build b/test/962-iface-static/build
index 5ad82f7..06bb3bd 100755
--- a/test/962-iface-static/build
+++ b/test/962-iface-static/build
@@ -26,5 +26,5 @@
 fi
 
 # Build the smali files and make a dex
-${SMALI} -JXmx512m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+${SMALI} -JXmx512m ${SMALI_ARGS} --output classes.dex $(find smali -name '*.smali')
 zip $TEST_NAME.jar classes.dex
diff --git a/test/962-iface-static/run b/test/962-iface-static/run
index e713708..d37737f 100755
--- a/test/962-iface-static/run
+++ b/test/962-iface-static/run
@@ -14,8 +14,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-if echo $@ | grep -q -- "--jvm"; then
-  ${RUN} "$@"
-else
-  ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
-fi
+${RUN} --experimental default-methods "$@"
diff --git a/test/963-default-range-smali/build b/test/963-default-range-smali/build
index 5ad82f7..06bb3bd 100755
--- a/test/963-default-range-smali/build
+++ b/test/963-default-range-smali/build
@@ -26,5 +26,5 @@
 fi
 
 # Build the smali files and make a dex
-${SMALI} -JXmx512m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+${SMALI} -JXmx512m ${SMALI_ARGS} --output classes.dex $(find smali -name '*.smali')
 zip $TEST_NAME.jar classes.dex
diff --git a/test/963-default-range-smali/run b/test/963-default-range-smali/run
index e713708..d37737f 100755
--- a/test/963-default-range-smali/run
+++ b/test/963-default-range-smali/run
@@ -14,8 +14,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-if echo $@ | grep -q -- "--jvm"; then
-  ${RUN} "$@"
-else
-  ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
-fi
+${RUN} --experimental default-methods "$@"
diff --git a/test/964-default-iface-init-generated/build b/test/964-default-iface-init-generated/build
index deef803..b0fbe4b 100755
--- a/test/964-default-iface-init-generated/build
+++ b/test/964-default-iface-init-generated/build
@@ -38,7 +38,7 @@
 fi
 
 # Build the smali files and make a dex
-${SMALI} -JXmx512m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+${SMALI} -JXmx512m ${SMALI_ARGS} --output classes.dex $(find smali -name '*.smali')
 zip $TEST_NAME.jar classes.dex
 
 # Reset the ulimit back to its initial value
diff --git a/test/964-default-iface-init-generated/run b/test/964-default-iface-init-generated/run
index e378b06..22f6800 100755
--- a/test/964-default-iface-init-generated/run
+++ b/test/964-default-iface-init-generated/run
@@ -14,8 +14,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-if echo $@ | grep -q -- "--jvm"; then
-  ${RUN} "$@"
-else
-  ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
-fi
+${RUN} --experimental default-methods "$@"
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 9c04135..6ce3d94 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -441,55 +441,9 @@
 # Known broken tests for the mips32 optimizing compiler backend.
 TEST_ART_BROKEN_OPTIMIZING_MIPS_RUN_TESTS := \
     441-checker-inliner \
-    442-checker-constant-folding \
-    444-checker-nce \
-    445-checker-licm \
-    446-checker-inliner2 \
-    447-checker-inliner3 \
-    449-checker-bce \
-    450-checker-types \
-    455-checker-gvn \
-    458-checker-instruction-simplification \
-    462-checker-inlining-across-dex-files \
-    463-checker-boolean-simplifier \
-    464-checker-inline-sharpen-calls \
-    465-checker-clinit-gvn \
-    468-checker-bool-simplifier-regression \
-    473-checker-inliner-constants \
-    474-checker-boolean-input \
-    476-checker-ctor-memory-barrier \
-    477-checker-bound-type \
-    478-checker-clinit-check-pruning \
-    478-checker-inliner-nested-loop \
-    480-checker-dead-blocks \
-    482-checker-loop-back-edge-use \
-    484-checker-register-hints \
-    485-checker-dce-loop-update \
-    485-checker-dce-switch \
-    486-checker-must-do-null-check \
-    487-checker-inline-calls \
-    488-checker-inline-recursive-calls \
-    490-checker-inline \
-    492-checker-inline-invoke-interface \
-    493-checker-inline-invoke-interface \
-    494-checker-instanceof-tests \
-    495-checker-checkcast-tests \
-    496-checker-inlining-and-class-loader \
-    508-checker-disassembly \
     510-checker-try-catch \
-    517-checker-builder-fallthrough \
     521-checker-array-set-null \
-    522-checker-regression-monitor-exit \
-    523-checker-can-throw-regression \
-    525-checker-arrays-and-fields \
-    526-checker-caller-callee-regs \
-    529-checker-unresolved \
-    530-checker-loops \
-    530-checker-regression-reftype-final \
-    532-checker-nonnull-arrayset \
-    534-checker-bce-deoptimization \
     536-checker-intrinsic-optimization \
-    537-checker-debuggable \
 
 ifeq (mips,$(TARGET_ARCH))
   ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
diff --git a/test/etc/default-build b/test/etc/default-build
index c92402b..4743216 100755
--- a/test/etc/default-build
+++ b/test/etc/default-build
@@ -96,7 +96,7 @@
 
 if [ -d smali ]; then
   # Compile Smali classes
-  ${SMALI} -JXmx256m --experimental --api-level 23 --output smali_classes.dex `find smali -name '*.smali'`
+  ${SMALI} -JXmx256m ${SMALI_ARGS} --output smali_classes.dex `find smali -name '*.smali'`
 
   # Don't bother with dexmerger if we provide our own main function in a smali file.
   if [ ${SKIP_DX_MERGER} = "false" ]; then
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index fbefa07..280b4bc 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -18,6 +18,7 @@
 DEBUGGER="n"
 DEV_MODE="n"
 DEX2OAT=""
+EXPERIMENTAL=""
 FALSE_BIN="/system/bin/false"
 FLAGS=""
 GDB=""
@@ -196,6 +197,13 @@
         FLAGS="${FLAGS} -Xcompiler-option --compile-pic"
         COMPILE_FLAGS="${COMPILE_FLAGS} --compile-pic"
         shift
+    elif [ "x$1" = "x--experimental" ]; then
+        if [ "$#" -lt 2 ]; then
+            echo "missing --experimental option" 1>&2
+            exit 1
+        fi
+        EXPERIMENTAL="$EXPERIMENTAL $2"
+        shift 2
     elif expr "x$1" : "x--" >/dev/null 2>&1; then
         echo "unknown $0 option: $1" 1>&2
         exit 1
@@ -204,6 +212,13 @@
     fi
 done
 
+if [ "$USE_JVM" = "n" ]; then
+    for feature in ${EXPERIMENTAL}; do
+        FLAGS="${FLAGS} -Xexperimental:${feature}"
+        COMPILE_FLAGS="${COMPILE_FLAGS} --runtime-arg -Xexperimental:${feature}"
+    done
+fi
+
 if [ "x$1" = "x" ] ; then
   MAIN="Main"
 else
diff --git a/test/run-test b/test/run-test
index 293779f..5a43fb0 100755
--- a/test/run-test
+++ b/test/run-test
@@ -46,6 +46,7 @@
 export DEX_LOCATION=/data/run-test/${test_dir}
 export NEED_DEX="true"
 export USE_JACK="false"
+export SMALI_ARGS="--experimental --api-level 23"
 
 # If dx was not set by the environment variable, assume it is in the path.
 if [ -z "$DX" ]; then
diff --git a/test/960-default-smali/util-src/generate_smali.py b/test/utils/python/generate_smali_main.py
similarity index 98%
rename from test/960-default-smali/util-src/generate_smali.py
rename to test/utils/python/generate_smali_main.py
index b2bf1f0..d796d31 100755
--- a/test/960-default-smali/util-src/generate_smali.py
+++ b/test/utils/python/generate_smali_main.py
@@ -15,7 +15,7 @@
 # limitations under the License.
 
 """
-Generate Smali Main file for test 960
+Generate Smali Main file from a classes.xml file.
 """
 
 import os
diff --git a/tools/generate-operator-out.py b/tools/generate-operator-out.py
index c74508d..3bd62fe 100755
--- a/tools/generate-operator-out.py
+++ b/tools/generate-operator-out.py
@@ -86,8 +86,10 @@
       if m:
         enclosing_classes.append(m.group(1))
         continue
-      m = re.compile(r'^\s*\}( .*)?;').search(raw_line)
-      if m:
+
+      # End of class/struct -- be careful not to match "do { ... } while" constructs by accident
+      m = re.compile(r'^\s*\}(\s+)?(while)?(.+)?;').search(raw_line)
+      if m and not m.group(2):
         enclosing_classes = enclosing_classes[0:len(enclosing_classes) - 1]
         continue
 
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index b4f686f..9a8b462 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -159,10 +159,10 @@
   bug: 22786792
 },
 {
-  description: "Formatting failures",
+  description: "Lack of IPv6 on some buildbot slaves",
   result: EXEC_FAILED,
-  names: ["libcore.java.text.NumberFormatTest#test_currencyFromLocale",
-          "libcore.java.text.NumberFormatTest#test_currencyWithPatternDigits"],
-  bug: 25136848
+  names: ["libcore.io.OsTest#test_byteBufferPositions_sendto_recvfrom_af_inet6",
+          "libcore.io.OsTest#test_sendtoSocketAddress_af_inet6"],
+  bug: 25178637
 }
 ]
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index edec362..9aed271 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -88,7 +88,8 @@
   fi
 done
 
-vm_args="--vm-arg $image"
+vm_args="--vm-arg $image --vm-arg -Xusejit:true"
+debuggee_args="$debuggee_args -Xusejit:true"
 if [[ $debug == "yes" ]]; then
   art="$art -d"
   art_debugee="$art_debugee -d"
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index 80f7a37..67a7983 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -102,4 +102,4 @@
 # Run the tests using vogar.
 echo "Running tests for the following test packages:"
 echo ${working_packages[@]} | tr " " "\n"
-vogar $vogar_args --expectations art/tools/libcore_failures.txt --classpath $jsr166_test_jar --classpath $test_jar ${working_packages[@]}
+vogar $vogar_args --vm-arg -Xusejit:true --expectations art/tools/libcore_failures.txt --classpath $jsr166_test_jar --classpath $test_jar ${working_packages[@]}