Merge "Fix 2ND arch gtest testing."
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 1b96b0c..a71aec1 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -67,10 +67,14 @@
 
 ART_TEST_GTEST_VerifierDeps_SRC := $(abspath $(wildcard $(LOCAL_PATH)/VerifierDeps/*.smali))
 ART_TEST_HOST_GTEST_VerifierDeps_DEX := $(dir $(ART_TEST_HOST_GTEST_Main_DEX))$(subst Main,VerifierDeps,$(basename $(notdir $(ART_TEST_HOST_GTEST_Main_DEX))))$(suffix $(ART_TEST_HOST_GTEST_Main_DEX))
+ART_TEST_TARGET_GTEST_VerifierDeps_DEX := $(dir $(ART_TEST_TARGET_GTEST_Main_DEX))$(subst Main,VerifierDeps,$(basename $(notdir $(ART_TEST_TARGET_GTEST_Main_DEX))))$(suffix $(ART_TEST_TARGET_GTEST_Main_DEX))
 
 $(ART_TEST_HOST_GTEST_VerifierDeps_DEX): $(ART_TEST_GTEST_VerifierDeps_SRC) $(HOST_OUT_EXECUTABLES)/smali
 	 $(HOST_OUT_EXECUTABLES)/smali --output=$@ $(filter %.smali,$^)
 
+$(ART_TEST_TARGET_GTEST_VerifierDeps_DEX): $(ART_TEST_GTEST_VerifierDeps_SRC) $(HOST_OUT_EXECUTABLES)/smali
+	 $(HOST_OUT_EXECUTABLES)/smali --output=$@ $(filter %.smali,$^)
+
 # Dex file dependencies for each gtest.
 ART_GTEST_dex2oat_environment_tests_DEX_DEPS := Main MainStripped MultiDex MultiDexModifiedSecondary Nested
 
@@ -610,5 +614,6 @@
 ART_TEST_TARGET_GTEST_MainStripped_DEX :=
 ART_TEST_GTEST_VerifierDeps_SRC :=
 ART_TEST_HOST_GTEST_VerifierDeps_DEX :=
+ART_TEST_TARGET_GTEST_VerifierDeps_DEX :=
 GTEST_DEX_DIRECTORIES :=
 LOCAL_PATH :=
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 28db29c..0518213 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1069,15 +1069,11 @@
       __ movsd(Address(ESP, destination.GetStackIndex()), source.AsFpuRegister<XmmRegister>());
     } else if (source.IsConstant()) {
       HConstant* constant = source.GetConstant();
-      int64_t value;
-      if (constant->IsLongConstant()) {
-        value = constant->AsLongConstant()->GetValue();
-      } else {
-        DCHECK(constant->IsDoubleConstant());
-        value = bit_cast<int64_t, double>(constant->AsDoubleConstant()->GetValue());
-      }
+      DCHECK(constant->IsLongConstant() || constant->IsDoubleConstant());
+      int64_t value = GetInt64ValueOf(constant);
       __ movl(Address(ESP, destination.GetStackIndex()), Immediate(Low32Bits(value)));
-      __ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)), Immediate(High32Bits(value)));
+      __ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)),
+              Immediate(High32Bits(value)));
     } else {
       DCHECK(source.IsDoubleStackSlot()) << source;
       EmitParallelMoves(
@@ -1427,14 +1423,7 @@
     Location lhs = condition->GetLocations()->InAt(0);
     Location rhs = condition->GetLocations()->InAt(1);
     // LHS is guaranteed to be in a register (see LocationsBuilderX86::HandleCondition).
-    if (rhs.IsRegister()) {
-      __ cmpl(lhs.AsRegister<Register>(), rhs.AsRegister<Register>());
-    } else if (rhs.IsConstant()) {
-      int32_t constant = CodeGenerator::GetInt32ValueOf(rhs.GetConstant());
-      codegen_->Compare32BitValue(lhs.AsRegister<Register>(), constant);
-    } else {
-      __ cmpl(lhs.AsRegister<Register>(), Address(ESP, rhs.GetStackIndex()));
-    }
+    codegen_->GenerateIntCompare(lhs, rhs);
     if (true_target == nullptr) {
       __ j(X86Condition(condition->GetOppositeCondition()), false_target);
     } else {
@@ -1528,18 +1517,6 @@
   locations->SetOut(Location::SameAsFirstInput());
 }
 
-void CodeGeneratorX86::GenerateIntCompare(Location lhs, Location rhs) {
-  Register lhs_reg = lhs.AsRegister<Register>();
-  if (rhs.IsConstant()) {
-    int32_t value = CodeGenerator::GetInt32ValueOf(rhs.GetConstant());
-    Compare32BitValue(lhs_reg, value);
-  } else if (rhs.IsStackSlot()) {
-    assembler_.cmpl(lhs_reg, Address(ESP, rhs.GetStackIndex()));
-  } else {
-    assembler_.cmpl(lhs_reg, rhs.AsRegister<Register>());
-  }
-}
-
 void InstructionCodeGeneratorX86::VisitSelect(HSelect* select) {
   LocationSummary* locations = select->GetLocations();
   DCHECK(locations->InAt(0).Equals(locations->Out()));
@@ -3621,7 +3598,7 @@
       } else {
         DCHECK(value.IsConstant()) << value;
         if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
-        __ jmp(slow_path->GetEntryLabel());
+          __ jmp(slow_path->GetEntryLabel());
         }
       }
       break;
@@ -5033,56 +5010,31 @@
   switch (type) {
     case Primitive::kPrimBoolean: {
       Register out = out_loc.AsRegister<Register>();
-      if (index.IsConstant()) {
-        __ movzxb(out, Address(obj,
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
-      } else {
-        __ movzxb(out, Address(obj, index.AsRegister<Register>(), TIMES_1, data_offset));
-      }
+      __ movzxb(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_1, data_offset));
       break;
     }
 
     case Primitive::kPrimByte: {
       Register out = out_loc.AsRegister<Register>();
-      if (index.IsConstant()) {
-        __ movsxb(out, Address(obj,
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
-      } else {
-        __ movsxb(out, Address(obj, index.AsRegister<Register>(), TIMES_1, data_offset));
-      }
+      __ movsxb(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_1, data_offset));
       break;
     }
 
     case Primitive::kPrimShort: {
       Register out = out_loc.AsRegister<Register>();
-      if (index.IsConstant()) {
-        __ movsxw(out, Address(obj,
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
-      } else {
-        __ movsxw(out, Address(obj, index.AsRegister<Register>(), TIMES_2, data_offset));
-      }
+      __ movsxw(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_2, data_offset));
       break;
     }
 
     case Primitive::kPrimChar: {
       Register out = out_loc.AsRegister<Register>();
-      if (index.IsConstant()) {
-        __ movzxw(out, Address(obj,
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
-      } else {
-        __ movzxw(out, Address(obj, index.AsRegister<Register>(), TIMES_2, data_offset));
-      }
+      __ movzxw(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_2, data_offset));
       break;
     }
 
     case Primitive::kPrimInt: {
       Register out = out_loc.AsRegister<Register>();
-      if (index.IsConstant()) {
-        __ movl(out, Address(obj,
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset));
-      } else {
-        __ movl(out, Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset));
-      }
+      __ movl(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_4, data_offset));
       break;
     }
 
@@ -5099,21 +5051,16 @@
             instruction, out_loc, obj, data_offset, index, /* needs_null_check */ true);
       } else {
         Register out = out_loc.AsRegister<Register>();
+        __ movl(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_4, data_offset));
+        codegen_->MaybeRecordImplicitNullCheck(instruction);
+        // If read barriers are enabled, emit read barriers other than
+        // Baker's using a slow path (and also unpoison the loaded
+        // reference, if heap poisoning is enabled).
         if (index.IsConstant()) {
           uint32_t offset =
               (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
-          __ movl(out, Address(obj, offset));
-          codegen_->MaybeRecordImplicitNullCheck(instruction);
-          // If read barriers are enabled, emit read barriers other than
-          // Baker's using a slow path (and also unpoison the loaded
-          // reference, if heap poisoning is enabled).
           codegen_->MaybeGenerateReadBarrierSlow(instruction, out_loc, out_loc, obj_loc, offset);
         } else {
-          __ movl(out, Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset));
-          codegen_->MaybeRecordImplicitNullCheck(instruction);
-          // If read barriers are enabled, emit read barriers other than
-          // Baker's using a slow path (and also unpoison the loaded
-          // reference, if heap poisoning is enabled).
           codegen_->MaybeGenerateReadBarrierSlow(
               instruction, out_loc, out_loc, obj_loc, data_offset, index);
         }
@@ -5123,40 +5070,23 @@
 
     case Primitive::kPrimLong: {
       DCHECK_NE(obj, out_loc.AsRegisterPairLow<Register>());
-      if (index.IsConstant()) {
-        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
-        __ movl(out_loc.AsRegisterPairLow<Register>(), Address(obj, offset));
-        codegen_->MaybeRecordImplicitNullCheck(instruction);
-        __ movl(out_loc.AsRegisterPairHigh<Register>(), Address(obj, offset + kX86WordSize));
-      } else {
-        __ movl(out_loc.AsRegisterPairLow<Register>(),
-                Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset));
-        codegen_->MaybeRecordImplicitNullCheck(instruction);
-        __ movl(out_loc.AsRegisterPairHigh<Register>(),
-                Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize));
-      }
+      __ movl(out_loc.AsRegisterPairLow<Register>(),
+              CodeGeneratorX86::ArrayAddress(obj, index, TIMES_8, data_offset));
+      codegen_->MaybeRecordImplicitNullCheck(instruction);
+      __ movl(out_loc.AsRegisterPairHigh<Register>(),
+              CodeGeneratorX86::ArrayAddress(obj, index, TIMES_8, data_offset + kX86WordSize));
       break;
     }
 
     case Primitive::kPrimFloat: {
       XmmRegister out = out_loc.AsFpuRegister<XmmRegister>();
-      if (index.IsConstant()) {
-        __ movss(out, Address(obj,
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset));
-      } else {
-        __ movss(out, Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset));
-      }
+      __ movss(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_4, data_offset));
       break;
     }
 
     case Primitive::kPrimDouble: {
       XmmRegister out = out_loc.AsFpuRegister<XmmRegister>();
-      if (index.IsConstant()) {
-        __ movsd(out, Address(obj,
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset));
-      } else {
-        __ movsd(out, Address(obj, index.AsRegister<Register>(), TIMES_8, data_offset));
-      }
+      __ movsd(out, CodeGeneratorX86::ArrayAddress(obj, index, TIMES_8, data_offset));
       break;
     }
 
@@ -5227,9 +5157,7 @@
     case Primitive::kPrimBoolean:
     case Primitive::kPrimByte: {
       uint32_t offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
-      Address address = index.IsConstant()
-          ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + offset)
-          : Address(array, index.AsRegister<Register>(), TIMES_1, offset);
+      Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_1, offset);
       if (value.IsRegister()) {
         __ movb(address, value.AsRegister<ByteRegister>());
       } else {
@@ -5242,9 +5170,7 @@
     case Primitive::kPrimShort:
     case Primitive::kPrimChar: {
       uint32_t offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
-      Address address = index.IsConstant()
-          ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + offset)
-          : Address(array, index.AsRegister<Register>(), TIMES_2, offset);
+      Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_2, offset);
       if (value.IsRegister()) {
         __ movw(address, value.AsRegister<Register>());
       } else {
@@ -5256,9 +5182,7 @@
 
     case Primitive::kPrimNot: {
       uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
-      Address address = index.IsConstant()
-          ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
-          : Address(array, index.AsRegister<Register>(), TIMES_4, offset);
+      Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_4, offset);
 
       if (!value.IsRegister()) {
         // Just setting null.
@@ -5354,9 +5278,7 @@
 
     case Primitive::kPrimInt: {
       uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
-      Address address = index.IsConstant()
-          ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
-          : Address(array, index.AsRegister<Register>(), TIMES_4, offset);
+      Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_4, offset);
       if (value.IsRegister()) {
         __ movl(address, value.AsRegister<Register>());
       } else {
@@ -5370,44 +5292,27 @@
 
     case Primitive::kPrimLong: {
       uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
-      if (index.IsConstant()) {
-        size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
-        if (value.IsRegisterPair()) {
-          __ movl(Address(array, offset), value.AsRegisterPairLow<Register>());
-          codegen_->MaybeRecordImplicitNullCheck(instruction);
-          __ movl(Address(array, offset + kX86WordSize), value.AsRegisterPairHigh<Register>());
-        } else {
-          DCHECK(value.IsConstant());
-          int64_t val = value.GetConstant()->AsLongConstant()->GetValue();
-          __ movl(Address(array, offset), Immediate(Low32Bits(val)));
-          codegen_->MaybeRecordImplicitNullCheck(instruction);
-          __ movl(Address(array, offset + kX86WordSize), Immediate(High32Bits(val)));
-        }
+      if (value.IsRegisterPair()) {
+        __ movl(CodeGeneratorX86::ArrayAddress(array, index, TIMES_8, data_offset),
+                value.AsRegisterPairLow<Register>());
+        codegen_->MaybeRecordImplicitNullCheck(instruction);
+        __ movl(CodeGeneratorX86::ArrayAddress(array, index, TIMES_8, data_offset + kX86WordSize),
+                value.AsRegisterPairHigh<Register>());
       } else {
-        if (value.IsRegisterPair()) {
-          __ movl(Address(array, index.AsRegister<Register>(), TIMES_8, data_offset),
-                  value.AsRegisterPairLow<Register>());
-          codegen_->MaybeRecordImplicitNullCheck(instruction);
-          __ movl(Address(array, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize),
-                  value.AsRegisterPairHigh<Register>());
-        } else {
-          DCHECK(value.IsConstant());
-          int64_t val = value.GetConstant()->AsLongConstant()->GetValue();
-          __ movl(Address(array, index.AsRegister<Register>(), TIMES_8, data_offset),
-                  Immediate(Low32Bits(val)));
-          codegen_->MaybeRecordImplicitNullCheck(instruction);
-          __ movl(Address(array, index.AsRegister<Register>(), TIMES_8, data_offset + kX86WordSize),
-                  Immediate(High32Bits(val)));
-        }
+        DCHECK(value.IsConstant());
+        int64_t val = value.GetConstant()->AsLongConstant()->GetValue();
+        __ movl(CodeGeneratorX86::ArrayAddress(array, index, TIMES_8, data_offset),
+                Immediate(Low32Bits(val)));
+        codegen_->MaybeRecordImplicitNullCheck(instruction);
+        __ movl(CodeGeneratorX86::ArrayAddress(array, index, TIMES_8, data_offset + kX86WordSize),
+                Immediate(High32Bits(val)));
       }
       break;
     }
 
     case Primitive::kPrimFloat: {
       uint32_t offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
-      Address address = index.IsConstant()
-          ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
-          : Address(array, index.AsRegister<Register>(), TIMES_4, offset);
+      Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_4, offset);
       if (value.IsFpuRegister()) {
         __ movss(address, value.AsFpuRegister<XmmRegister>());
       } else {
@@ -5421,17 +5326,13 @@
 
     case Primitive::kPrimDouble: {
       uint32_t offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
-      Address address = index.IsConstant()
-          ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + offset)
-          : Address(array, index.AsRegister<Register>(), TIMES_8, offset);
+      Address address = CodeGeneratorX86::ArrayAddress(array, index, TIMES_8, offset);
       if (value.IsFpuRegister()) {
         __ movsd(address, value.AsFpuRegister<XmmRegister>());
       } else {
         DCHECK(value.IsConstant());
-        Address address_hi = index.IsConstant() ?
-            Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) +
-                           offset + kX86WordSize) :
-            Address(array, index.AsRegister<Register>(), TIMES_8, offset + kX86WordSize);
+        Address address_hi =
+            CodeGeneratorX86::ArrayAddress(array, index, TIMES_8, offset + kX86WordSize);
         int64_t v = bit_cast<int64_t, double>(value.GetConstant()->AsDoubleConstant()->GetValue());
         __ movl(address, Immediate(Low32Bits(v)));
         codegen_->MaybeRecordImplicitNullCheck(instruction);
@@ -5525,13 +5426,7 @@
       }
       codegen_->MaybeRecordImplicitNullCheck(array_length);
     } else {
-      Register length = length_loc.AsRegister<Register>();
-      if (index_loc.IsConstant()) {
-        int32_t value = CodeGenerator::GetInt32ValueOf(index_loc.GetConstant());
-        __ cmpl(length, Immediate(value));
-      } else {
-        __ cmpl(length, index_loc.AsRegister<Register>());
-      }
+      codegen_->GenerateIntCompare(length_loc, index_loc);
     }
     codegen_->AddSlowPath(slow_path);
     __ j(kBelowEqual, slow_path->GetEntryLabel());
@@ -6909,9 +6804,7 @@
       "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
   // /* HeapReference<Object> */ ref =
   //     *(obj + data_offset + index * sizeof(HeapReference<Object>))
-  Address src = index.IsConstant() ?
-      Address(obj, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset) :
-      Address(obj, index.AsRegister<Register>(), TIMES_4, data_offset);
+  Address src = CodeGeneratorX86::ArrayAddress(obj, index, TIMES_4, data_offset);
   GenerateReferenceLoadWithBakerReadBarrier(instruction, ref, obj, src, needs_null_check);
 }
 
@@ -7392,6 +7285,27 @@
   }
 }
 
+void CodeGeneratorX86::GenerateIntCompare(Location lhs, Location rhs) {
+  Register lhs_reg = lhs.AsRegister<Register>();
+  if (rhs.IsConstant()) {
+    int32_t value = CodeGenerator::GetInt32ValueOf(rhs.GetConstant());
+    Compare32BitValue(lhs_reg, value);
+  } else if (rhs.IsStackSlot()) {
+    __ cmpl(lhs_reg, Address(ESP, rhs.GetStackIndex()));
+  } else {
+    __ cmpl(lhs_reg, rhs.AsRegister<Register>());
+  }
+}
+
+Address CodeGeneratorX86::ArrayAddress(Register obj,
+                                       Location index,
+                                       ScaleFactor scale,
+                                       uint32_t data_offset) {
+  return index.IsConstant() ?
+      Address(obj, (index.GetConstant()->AsIntConstant()->GetValue() << scale) + data_offset) :
+      Address(obj, index.AsRegister<Register>(), scale, data_offset);
+}
+
 Address CodeGeneratorX86::LiteralCaseTable(HX86PackedSwitch* switch_instr,
                                            Register reg,
                                            Register value) {
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 04a0a3d..5866e65 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -427,8 +427,6 @@
                   Register value,
                   bool value_can_be_null);
 
-  void GenerateIntCompare(Location lhs, Location rhs);
-
   void GenerateMemoryBarrier(MemBarrierKind kind);
 
   Label* GetLabelOf(HBasicBlock* block) const {
@@ -474,6 +472,15 @@
   // Compare a register with a 32-bit value in the most efficient manner.
   void Compare32BitValue(Register dest, int32_t value);
 
+  // Compare int values. Supports only register locations for `lhs`.
+  void GenerateIntCompare(Location lhs, Location rhs);
+
+  // Construct address for array access.
+  static Address ArrayAddress(Register obj,
+                              Location index,
+                              ScaleFactor scale,
+                              uint32_t data_offset);
+
   Address LiteralCaseTable(HX86PackedSwitch* switch_instr, Register reg, Register value);
 
   void Finalize(CodeAllocator* allocator) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 88d98fc..506a7b1 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1204,13 +1204,8 @@
                source.AsFpuRegister<XmmRegister>());
     } else if (source.IsConstant()) {
       HConstant* constant = source.GetConstant();
-      int64_t value;
-      if (constant->IsDoubleConstant()) {
-        value = bit_cast<int64_t, double>(constant->AsDoubleConstant()->GetValue());
-      } else {
-        DCHECK(constant->IsLongConstant());
-        value = constant->AsLongConstant()->GetValue();
-      }
+      DCHECK(constant->IsLongConstant() || constant->IsDoubleConstant());
+      int64_t value = GetInt64ValueOf(constant);
       Store64BitValueToStack(destination, value);
     } else {
       DCHECK(source.IsDoubleStackSlot());
@@ -1309,31 +1304,11 @@
     case Primitive::kPrimShort:
     case Primitive::kPrimInt:
     case Primitive::kPrimNot: {
-      CpuRegister left_reg = left.AsRegister<CpuRegister>();
-      if (right.IsConstant()) {
-        int32_t value = CodeGenerator::GetInt32ValueOf(right.GetConstant());
-        if (value == 0) {
-          __ testl(left_reg, left_reg);
-        } else {
-          __ cmpl(left_reg, Immediate(value));
-        }
-      } else if (right.IsStackSlot()) {
-        __ cmpl(left_reg, Address(CpuRegister(RSP), right.GetStackIndex()));
-      } else {
-        __ cmpl(left_reg, right.AsRegister<CpuRegister>());
-      }
+      codegen_->GenerateIntCompare(left, right);
       break;
     }
     case Primitive::kPrimLong: {
-      CpuRegister left_reg = left.AsRegister<CpuRegister>();
-      if (right.IsConstant()) {
-        int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
-        codegen_->Compare64BitValue(left_reg, value);
-      } else if (right.IsDoubleStackSlot()) {
-        __ cmpq(left_reg, Address(CpuRegister(RSP), right.GetStackIndex()));
-      } else {
-        __ cmpq(left_reg, right.AsRegister<CpuRegister>());
-      }
+      codegen_->GenerateLongCompare(left, right);
       break;
     }
     case Primitive::kPrimFloat: {
@@ -1488,15 +1463,7 @@
 
     Location lhs = condition->GetLocations()->InAt(0);
     Location rhs = condition->GetLocations()->InAt(1);
-    if (rhs.IsRegister()) {
-      __ cmpl(lhs.AsRegister<CpuRegister>(), rhs.AsRegister<CpuRegister>());
-    } else if (rhs.IsConstant()) {
-      int32_t constant = CodeGenerator::GetInt32ValueOf(rhs.GetConstant());
-      codegen_->Compare32BitValue(lhs.AsRegister<CpuRegister>(), constant);
-    } else {
-      __ cmpl(lhs.AsRegister<CpuRegister>(),
-              Address(CpuRegister(RSP), rhs.GetStackIndex()));
-    }
+    codegen_->GenerateIntCompare(lhs, rhs);
       if (true_target == nullptr) {
       __ j(X86_64IntegerCondition(condition->GetOppositeCondition()), false_target);
     } else {
@@ -1696,28 +1663,14 @@
       // Clear output register: setcc only sets the low byte.
       __ xorl(reg, reg);
 
-      if (rhs.IsRegister()) {
-        __ cmpl(lhs.AsRegister<CpuRegister>(), rhs.AsRegister<CpuRegister>());
-      } else if (rhs.IsConstant()) {
-        int32_t constant = CodeGenerator::GetInt32ValueOf(rhs.GetConstant());
-        codegen_->Compare32BitValue(lhs.AsRegister<CpuRegister>(), constant);
-      } else {
-        __ cmpl(lhs.AsRegister<CpuRegister>(), Address(CpuRegister(RSP), rhs.GetStackIndex()));
-      }
+      codegen_->GenerateIntCompare(lhs, rhs);
       __ setcc(X86_64IntegerCondition(cond->GetCondition()), reg);
       return;
     case Primitive::kPrimLong:
       // Clear output register: setcc only sets the low byte.
       __ xorl(reg, reg);
 
-      if (rhs.IsRegister()) {
-        __ cmpq(lhs.AsRegister<CpuRegister>(), rhs.AsRegister<CpuRegister>());
-      } else if (rhs.IsConstant()) {
-        int64_t value = rhs.GetConstant()->AsLongConstant()->GetValue();
-        codegen_->Compare64BitValue(lhs.AsRegister<CpuRegister>(), value);
-      } else {
-        __ cmpq(lhs.AsRegister<CpuRegister>(), Address(CpuRegister(RSP), rhs.GetStackIndex()));
-      }
+      codegen_->GenerateLongCompare(lhs, rhs);
       __ setcc(X86_64IntegerCondition(cond->GetCondition()), reg);
       return;
     case Primitive::kPrimFloat: {
@@ -1885,27 +1838,11 @@
     case Primitive::kPrimShort:
     case Primitive::kPrimChar:
     case Primitive::kPrimInt: {
-      CpuRegister left_reg = left.AsRegister<CpuRegister>();
-      if (right.IsConstant()) {
-        int32_t value = right.GetConstant()->AsIntConstant()->GetValue();
-        codegen_->Compare32BitValue(left_reg, value);
-      } else if (right.IsStackSlot()) {
-        __ cmpl(left_reg, Address(CpuRegister(RSP), right.GetStackIndex()));
-      } else {
-        __ cmpl(left_reg, right.AsRegister<CpuRegister>());
-      }
+      codegen_->GenerateIntCompare(left, right);
       break;
     }
     case Primitive::kPrimLong: {
-      CpuRegister left_reg = left.AsRegister<CpuRegister>();
-      if (right.IsConstant()) {
-        int64_t value = right.GetConstant()->AsLongConstant()->GetValue();
-        codegen_->Compare64BitValue(left_reg, value);
-      } else if (right.IsDoubleStackSlot()) {
-        __ cmpq(left_reg, Address(CpuRegister(RSP), right.GetStackIndex()));
-      } else {
-        __ cmpq(left_reg, right.AsRegister<CpuRegister>());
-      }
+      codegen_->GenerateLongCompare(left, right);
       break;
     }
     case Primitive::kPrimFloat: {
@@ -3714,7 +3651,7 @@
       } else {
         DCHECK(value.IsConstant()) << value;
         if (value.GetConstant()->AsIntConstant()->GetValue() == 0) {
-        __ jmp(slow_path->GetEntryLabel());
+          __ jmp(slow_path->GetEntryLabel());
         }
       }
       break;
@@ -3729,7 +3666,7 @@
       } else {
         DCHECK(value.IsConstant()) << value;
         if (value.GetConstant()->AsLongConstant()->GetValue() == 0) {
-        __ jmp(slow_path->GetEntryLabel());
+          __ jmp(slow_path->GetEntryLabel());
         }
       }
       break;
@@ -4538,56 +4475,31 @@
   switch (type) {
     case Primitive::kPrimBoolean: {
       CpuRegister out = out_loc.AsRegister<CpuRegister>();
-      if (index.IsConstant()) {
-        __ movzxb(out, Address(obj,
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
-      } else {
-        __ movzxb(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_1, data_offset));
-      }
+      __ movzxb(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_1, data_offset));
       break;
     }
 
     case Primitive::kPrimByte: {
       CpuRegister out = out_loc.AsRegister<CpuRegister>();
-      if (index.IsConstant()) {
-        __ movsxb(out, Address(obj,
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
-      } else {
-        __ movsxb(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_1, data_offset));
-      }
+      __ movsxb(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_1, data_offset));
       break;
     }
 
     case Primitive::kPrimShort: {
       CpuRegister out = out_loc.AsRegister<CpuRegister>();
-      if (index.IsConstant()) {
-        __ movsxw(out, Address(obj,
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
-      } else {
-        __ movsxw(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_2, data_offset));
-      }
+      __ movsxw(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_2, data_offset));
       break;
     }
 
     case Primitive::kPrimChar: {
       CpuRegister out = out_loc.AsRegister<CpuRegister>();
-      if (index.IsConstant()) {
-        __ movzxw(out, Address(obj,
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
-      } else {
-        __ movzxw(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_2, data_offset));
-      }
+      __ movzxw(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_2, data_offset));
       break;
     }
 
     case Primitive::kPrimInt: {
       CpuRegister out = out_loc.AsRegister<CpuRegister>();
-      if (index.IsConstant()) {
-        __ movl(out, Address(obj,
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset));
-      } else {
-        __ movl(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset));
-      }
+      __ movl(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_4, data_offset));
       break;
     }
 
@@ -4604,21 +4516,16 @@
             instruction, out_loc, obj, data_offset, index, /* needs_null_check */ true);
       } else {
         CpuRegister out = out_loc.AsRegister<CpuRegister>();
+        __ movl(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_4, data_offset));
+        codegen_->MaybeRecordImplicitNullCheck(instruction);
+        // If read barriers are enabled, emit read barriers other than
+        // Baker's using a slow path (and also unpoison the loaded
+        // reference, if heap poisoning is enabled).
         if (index.IsConstant()) {
           uint32_t offset =
               (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
-          __ movl(out, Address(obj, offset));
-          codegen_->MaybeRecordImplicitNullCheck(instruction);
-          // If read barriers are enabled, emit read barriers other than
-          // Baker's using a slow path (and also unpoison the loaded
-          // reference, if heap poisoning is enabled).
           codegen_->MaybeGenerateReadBarrierSlow(instruction, out_loc, out_loc, obj_loc, offset);
         } else {
-          __ movl(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset));
-          codegen_->MaybeRecordImplicitNullCheck(instruction);
-          // If read barriers are enabled, emit read barriers other than
-          // Baker's using a slow path (and also unpoison the loaded
-          // reference, if heap poisoning is enabled).
           codegen_->MaybeGenerateReadBarrierSlow(
               instruction, out_loc, out_loc, obj_loc, data_offset, index);
         }
@@ -4628,34 +4535,19 @@
 
     case Primitive::kPrimLong: {
       CpuRegister out = out_loc.AsRegister<CpuRegister>();
-      if (index.IsConstant()) {
-        __ movq(out, Address(obj,
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset));
-      } else {
-        __ movq(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset));
-      }
+      __ movq(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_8, data_offset));
       break;
     }
 
     case Primitive::kPrimFloat: {
       XmmRegister out = out_loc.AsFpuRegister<XmmRegister>();
-      if (index.IsConstant()) {
-        __ movss(out, Address(obj,
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset));
-      } else {
-        __ movss(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset));
-      }
+      __ movss(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_4, data_offset));
       break;
     }
 
     case Primitive::kPrimDouble: {
       XmmRegister out = out_loc.AsFpuRegister<XmmRegister>();
-      if (index.IsConstant()) {
-        __ movsd(out, Address(obj,
-            (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset));
-      } else {
-        __ movsd(out, Address(obj, index.AsRegister<CpuRegister>(), TIMES_8, data_offset));
-      }
+      __ movsd(out, CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_8, data_offset));
       break;
     }
 
@@ -4718,9 +4610,7 @@
     case Primitive::kPrimBoolean:
     case Primitive::kPrimByte: {
       uint32_t offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
-      Address address = index.IsConstant()
-          ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + offset)
-          : Address(array, index.AsRegister<CpuRegister>(), TIMES_1, offset);
+      Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_1, offset);
       if (value.IsRegister()) {
         __ movb(address, value.AsRegister<CpuRegister>());
       } else {
@@ -4733,9 +4623,7 @@
     case Primitive::kPrimShort:
     case Primitive::kPrimChar: {
       uint32_t offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
-      Address address = index.IsConstant()
-          ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + offset)
-          : Address(array, index.AsRegister<CpuRegister>(), TIMES_2, offset);
+      Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_2, offset);
       if (value.IsRegister()) {
         __ movw(address, value.AsRegister<CpuRegister>());
       } else {
@@ -4748,9 +4636,7 @@
 
     case Primitive::kPrimNot: {
       uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
-      Address address = index.IsConstant()
-          ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
-          : Address(array, index.AsRegister<CpuRegister>(), TIMES_4, offset);
+      Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_4, offset);
 
       if (!value.IsRegister()) {
         // Just setting null.
@@ -4846,9 +4732,7 @@
 
     case Primitive::kPrimInt: {
       uint32_t offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
-      Address address = index.IsConstant()
-          ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
-          : Address(array, index.AsRegister<CpuRegister>(), TIMES_4, offset);
+      Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_4, offset);
       if (value.IsRegister()) {
         __ movl(address, value.AsRegister<CpuRegister>());
       } else {
@@ -4862,18 +4746,14 @@
 
     case Primitive::kPrimLong: {
       uint32_t offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
-      Address address = index.IsConstant()
-          ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + offset)
-          : Address(array, index.AsRegister<CpuRegister>(), TIMES_8, offset);
+      Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_8, offset);
       if (value.IsRegister()) {
         __ movq(address, value.AsRegister<CpuRegister>());
         codegen_->MaybeRecordImplicitNullCheck(instruction);
       } else {
         int64_t v = value.GetConstant()->AsLongConstant()->GetValue();
-        Address address_high = index.IsConstant()
-            ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) +
-                offset + sizeof(int32_t))
-            : Address(array, index.AsRegister<CpuRegister>(), TIMES_8, offset + sizeof(int32_t));
+        Address address_high =
+            CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_8, offset + sizeof(int32_t));
         codegen_->MoveInt64ToAddress(address, address_high, v, instruction);
       }
       break;
@@ -4881,15 +4761,12 @@
 
     case Primitive::kPrimFloat: {
       uint32_t offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
-      Address address = index.IsConstant()
-          ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset)
-          : Address(array, index.AsRegister<CpuRegister>(), TIMES_4, offset);
+      Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_4, offset);
       if (value.IsFpuRegister()) {
         __ movss(address, value.AsFpuRegister<XmmRegister>());
       } else {
         DCHECK(value.IsConstant());
-        int32_t v =
-            bit_cast<int32_t, float>(value.GetConstant()->AsFloatConstant()->GetValue());
+        int32_t v = bit_cast<int32_t, float>(value.GetConstant()->AsFloatConstant()->GetValue());
         __ movl(address, Immediate(v));
       }
       codegen_->MaybeRecordImplicitNullCheck(instruction);
@@ -4898,19 +4775,15 @@
 
     case Primitive::kPrimDouble: {
       uint32_t offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
-      Address address = index.IsConstant()
-          ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + offset)
-          : Address(array, index.AsRegister<CpuRegister>(), TIMES_8, offset);
+      Address address = CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_8, offset);
       if (value.IsFpuRegister()) {
         __ movsd(address, value.AsFpuRegister<XmmRegister>());
         codegen_->MaybeRecordImplicitNullCheck(instruction);
       } else {
         int64_t v =
             bit_cast<int64_t, double>(value.GetConstant()->AsDoubleConstant()->GetValue());
-        Address address_high = index.IsConstant()
-            ? Address(array, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) +
-                offset + sizeof(int32_t))
-            : Address(array, index.AsRegister<CpuRegister>(), TIMES_8, offset + sizeof(int32_t));
+        Address address_high =
+            CodeGeneratorX86_64::ArrayAddress(array, index, TIMES_8, offset + sizeof(int32_t));
         codegen_->MoveInt64ToAddress(address, address_high, v, instruction);
       }
       break;
@@ -5001,13 +4874,7 @@
       }
       codegen_->MaybeRecordImplicitNullCheck(array_length);
     } else {
-      CpuRegister length = length_loc.AsRegister<CpuRegister>();
-      if (index_loc.IsConstant()) {
-        int32_t value = CodeGenerator::GetInt32ValueOf(index_loc.GetConstant());
-        __ cmpl(length, Immediate(value));
-      } else {
-        __ cmpl(length, index_loc.AsRegister<CpuRegister>());
-      }
+      codegen_->GenerateIntCompare(length_loc, index_loc);
     }
     codegen_->AddSlowPath(slow_path);
     __ j(kBelowEqual, slow_path->GetEntryLabel());
@@ -6361,9 +6228,7 @@
       "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
   // /* HeapReference<Object> */ ref =
   //     *(obj + data_offset + index * sizeof(HeapReference<Object>))
-  Address src = index.IsConstant() ?
-      Address(obj, (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset) :
-      Address(obj, index.AsRegister<CpuRegister>(), TIMES_4, data_offset);
+  Address src = CodeGeneratorX86_64::ArrayAddress(obj, index, TIMES_4, data_offset);
   GenerateReferenceLoadWithBakerReadBarrier(instruction, ref, obj, src, needs_null_check);
 }
 
@@ -6668,6 +6533,39 @@
   }
 }
 
+void CodeGeneratorX86_64::GenerateIntCompare(Location lhs, Location rhs) {
+  CpuRegister lhs_reg = lhs.AsRegister<CpuRegister>();
+  if (rhs.IsConstant()) {
+    int32_t value = CodeGenerator::GetInt32ValueOf(rhs.GetConstant());
+    Compare32BitValue(lhs_reg, value);
+  } else if (rhs.IsStackSlot()) {
+    __ cmpl(lhs_reg, Address(CpuRegister(RSP), rhs.GetStackIndex()));
+  } else {
+    __ cmpl(lhs_reg, rhs.AsRegister<CpuRegister>());
+  }
+}
+
+void CodeGeneratorX86_64::GenerateLongCompare(Location lhs, Location rhs) {
+  CpuRegister lhs_reg = lhs.AsRegister<CpuRegister>();
+  if (rhs.IsConstant()) {
+    int64_t value = rhs.GetConstant()->AsLongConstant()->GetValue();
+    Compare64BitValue(lhs_reg, value);
+  } else if (rhs.IsDoubleStackSlot()) {
+    __ cmpq(lhs_reg, Address(CpuRegister(RSP), rhs.GetStackIndex()));
+  } else {
+    __ cmpq(lhs_reg, rhs.AsRegister<CpuRegister>());
+  }
+}
+
+Address CodeGeneratorX86_64::ArrayAddress(CpuRegister obj,
+                                          Location index,
+                                          ScaleFactor scale,
+                                          uint32_t data_offset) {
+  return index.IsConstant() ?
+      Address(obj, (index.GetConstant()->AsIntConstant()->GetValue() << scale) + data_offset) :
+      Address(obj, index.AsRegister<CpuRegister>(), scale, data_offset);
+}
+
 void CodeGeneratorX86_64::Store64BitValueToStack(Location dest, int64_t value) {
   DCHECK(dest.IsDoubleStackSlot());
   if (IsInt<32>(value)) {
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 693d0b8..7108676 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -510,6 +510,18 @@
   void Compare32BitValue(CpuRegister dest, int32_t value);
   void Compare64BitValue(CpuRegister dest, int64_t value);
 
+  // Compare int values. Supports only register locations for `lhs`.
+  void GenerateIntCompare(Location lhs, Location rhs);
+
+  // Compare long values. Supports only register locations for `lhs`.
+  void GenerateLongCompare(Location lhs, Location rhs);
+
+  // Construct address for array access.
+  static Address ArrayAddress(CpuRegister obj,
+                              Location index,
+                              ScaleFactor scale,
+                              uint32_t data_offset);
+
   Address LiteralCaseTable(HPackedSwitch* switch_instr);
 
   // Store a 64 bit value into a DoubleStackSlot in the most efficient manner.
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 63f5e0c..d99d2d6 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -2792,6 +2792,8 @@
     return EXIT_FAILURE;
   }
 
+  // Helps debugging on device. Can be used to determine which dalvikvm instance invoked a dex2oat
+  // instance. Used by tools/bisection_search/bisection_search.py.
   VLOG(compiler) << "Running dex2oat (parent PID = " << getppid() << ")";
 
   bool result;
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 247cb96..2750fea 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -568,7 +568,11 @@
   if (kVerboseMode) {
     LOG(INFO) << "GC MarkingPhase";
   }
-  CHECK(weak_ref_access_enabled_);
+  Thread* self = Thread::Current();
+  if (kIsDebugBuild) {
+    MutexLock mu(self, *Locks::thread_list_lock_);
+    CHECK(weak_ref_access_enabled_);
+  }
 
   // Scan immune spaces.
   // Update all the fields in the immune spaces first without graying the objects so that we
@@ -627,7 +631,6 @@
     Runtime::Current()->VisitNonThreadRoots(this);
   }
 
-  Thread* self = Thread::Current();
   {
     TimingLogger::ScopedTiming split7("ProcessMarkStack", GetTimings());
     // We transition through three mark stack modes (thread-local, shared, GC-exclusive). The
@@ -695,7 +698,10 @@
     CheckEmptyMarkStack();
   }
 
-  CHECK(weak_ref_access_enabled_);
+  if (kIsDebugBuild) {
+    MutexLock mu(self, *Locks::thread_list_lock_);
+    CHECK(weak_ref_access_enabled_);
+  }
   if (kVerboseMode) {
     LOG(INFO) << "GC end of MarkingPhase";
   }
@@ -705,11 +711,10 @@
   if (kVerboseMode) {
     LOG(INFO) << "ReenableWeakRefAccess";
   }
-  weak_ref_access_enabled_.StoreRelaxed(true);  // This is for new threads.
-  QuasiAtomic::ThreadFenceForConstructor();
   // Iterate all threads (don't need to or can't use a checkpoint) and re-enable weak ref access.
   {
     MutexLock mu(self, *Locks::thread_list_lock_);
+    weak_ref_access_enabled_ = true;  // This is for new threads.
     std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
     for (Thread* thread : thread_list) {
       thread->SetWeakRefAccessEnabled(true);
@@ -744,12 +749,30 @@
   ConcurrentCopying* const concurrent_copying_;
 };
 
+class ConcurrentCopying::DisableMarkingCallback : public Closure {
+ public:
+  explicit DisableMarkingCallback(ConcurrentCopying* concurrent_copying)
+      : concurrent_copying_(concurrent_copying) {
+  }
+
+  void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
+    // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
+    // to avoid a race with ThreadList::Register().
+    CHECK(concurrent_copying_->is_marking_);
+    concurrent_copying_->is_marking_ = false;
+  }
+
+ private:
+  ConcurrentCopying* const concurrent_copying_;
+};
+
 void ConcurrentCopying::IssueDisableMarkingCheckpoint() {
   Thread* self = Thread::Current();
   DisableMarkingCheckpoint check_point(this);
   ThreadList* thread_list = Runtime::Current()->GetThreadList();
   gc_barrier_->Init(self, 0);
-  size_t barrier_count = thread_list->RunCheckpoint(&check_point);
+  DisableMarkingCallback dmc(this);
+  size_t barrier_count = thread_list->RunCheckpoint(&check_point, &dmc);
   // If there are no threads to wait which implies that all the checkpoint functions are finished,
   // then no need to release the mutator lock.
   if (barrier_count == 0) {
@@ -765,13 +788,9 @@
 }
 
 void ConcurrentCopying::DisableMarking() {
-  // Change the global is_marking flag to false. Do a fence before doing a checkpoint to update the
-  // thread-local flags so that a new thread starting up will get the correct is_marking flag.
-  is_marking_ = false;
-  QuasiAtomic::ThreadFenceForConstructor();
-  // Use a checkpoint to turn off the thread-local is_gc_marking flags and to ensure no threads are
-  // still in the middle of a read barrier which may have a from-space ref cached in a local
-  // variable.
+  // Use a checkpoint to turn off the global is_marking and the thread-local is_gc_marking flags and
+  // to ensure no threads are still in the middle of a read barrier which may have a from-space ref
+  // cached in a local variable.
   IssueDisableMarkingCheckpoint();
   if (kUseTableLookupReadBarrier) {
     heap_->rb_table_->ClearAll();
@@ -1158,12 +1177,13 @@
   const bool disable_weak_ref_access_;
 };
 
-void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access) {
+void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access,
+                                                    Closure* checkpoint_callback) {
   Thread* self = Thread::Current();
   RevokeThreadLocalMarkStackCheckpoint check_point(this, disable_weak_ref_access);
   ThreadList* thread_list = Runtime::Current()->GetThreadList();
   gc_barrier_->Init(self, 0);
-  size_t barrier_count = thread_list->RunCheckpoint(&check_point);
+  size_t barrier_count = thread_list->RunCheckpoint(&check_point, checkpoint_callback);
   // If there are no threads to wait which implys that all the checkpoint functions are finished,
   // then no need to release the mutator lock.
   if (barrier_count == 0) {
@@ -1213,7 +1233,7 @@
   MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
   if (mark_stack_mode == kMarkStackModeThreadLocal) {
     // Process the thread-local mark stacks and the GC mark stack.
-    count += ProcessThreadLocalMarkStacks(false);
+    count += ProcessThreadLocalMarkStacks(false, nullptr);
     while (!gc_mark_stack_->IsEmpty()) {
       mirror::Object* to_ref = gc_mark_stack_->PopBack();
       ProcessMarkStackRef(to_ref);
@@ -1265,9 +1285,10 @@
   return count == 0;
 }
 
-size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access) {
+size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access,
+                                                       Closure* checkpoint_callback) {
   // Run a checkpoint to collect all thread local mark stacks and iterate over them all.
-  RevokeThreadLocalMarkStacks(disable_weak_ref_access);
+  RevokeThreadLocalMarkStacks(disable_weak_ref_access, checkpoint_callback);
   size_t count = 0;
   std::vector<accounting::AtomicStack<mirror::Object>*> mark_stacks;
   {
@@ -1360,6 +1381,23 @@
   }
 }
 
+class ConcurrentCopying::DisableWeakRefAccessCallback : public Closure {
+ public:
+  explicit DisableWeakRefAccessCallback(ConcurrentCopying* concurrent_copying)
+      : concurrent_copying_(concurrent_copying) {
+  }
+
+  void Run(Thread* self ATTRIBUTE_UNUSED) OVERRIDE REQUIRES(Locks::thread_list_lock_) {
+    // This needs to run under the thread_list_lock_ critical section in ThreadList::RunCheckpoint()
+    // to avoid a deadlock b/31500969.
+    CHECK(concurrent_copying_->weak_ref_access_enabled_);
+    concurrent_copying_->weak_ref_access_enabled_ = false;
+  }
+
+ private:
+  ConcurrentCopying* const concurrent_copying_;
+};
+
 void ConcurrentCopying::SwitchToSharedMarkStackMode() {
   Thread* self = Thread::Current();
   CHECK(thread_running_gc_ != nullptr);
@@ -1369,12 +1407,10 @@
   CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode),
            static_cast<uint32_t>(kMarkStackModeThreadLocal));
   mark_stack_mode_.StoreRelaxed(kMarkStackModeShared);
-  CHECK(weak_ref_access_enabled_.LoadRelaxed());
-  weak_ref_access_enabled_.StoreRelaxed(false);
-  QuasiAtomic::ThreadFenceForConstructor();
+  DisableWeakRefAccessCallback dwrac(this);
   // Process the thread local mark stacks one last time after switching to the shared mark stack
   // mode and disable weak ref accesses.
-  ProcessThreadLocalMarkStacks(true);
+  ProcessThreadLocalMarkStacks(true, &dwrac);
   if (kVerboseMode) {
     LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
   }
@@ -1403,7 +1439,7 @@
   MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed();
   if (mark_stack_mode == kMarkStackModeThreadLocal) {
     // Thread-local mark stack mode.
-    RevokeThreadLocalMarkStacks(false);
+    RevokeThreadLocalMarkStacks(false, nullptr);
     MutexLock mu(Thread::Current(), mark_stack_lock_);
     if (!revoked_mark_stacks_.empty()) {
       for (accounting::AtomicStack<mirror::Object>* mark_stack : revoked_mark_stacks_) {
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 53473f0..81ffbc5 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -34,6 +34,7 @@
 #include <vector>
 
 namespace art {
+class Closure;
 class RootInfo;
 
 namespace gc {
@@ -120,8 +121,8 @@
   Barrier& GetBarrier() {
     return *gc_barrier_;
   }
-  bool IsWeakRefAccessEnabled() {
-    return weak_ref_access_enabled_.LoadRelaxed();
+  bool IsWeakRefAccessEnabled() REQUIRES(Locks::thread_list_lock_) {
+    return weak_ref_access_enabled_;
   }
   void RevokeThreadLocalMarkStack(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_);
@@ -161,9 +162,9 @@
   void VerifyGrayImmuneObjects()
       REQUIRES(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_);
-  size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access)
+  size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
-  void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access)
+  void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
       REQUIRES_SHARED(Locks::mutator_lock_);
   void SwitchToSharedMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!mark_stack_lock_);
@@ -269,7 +270,7 @@
                                 // without a lock. Other threads won't access the mark stack.
   };
   Atomic<MarkStackMode> mark_stack_mode_;
-  Atomic<bool> weak_ref_access_enabled_;
+  bool weak_ref_access_enabled_ GUARDED_BY(Locks::thread_list_lock_);
 
   // How many objects and bytes we moved. Used for accounting.
   Atomic<size_t> bytes_moved_;
@@ -311,7 +312,9 @@
   class AssertToSpaceInvariantRefsVisitor;
   class ClearBlackPtrsVisitor;
   class ComputeUnevacFromSpaceLiveRatioVisitor;
+  class DisableMarkingCallback;
   class DisableMarkingCheckpoint;
+  class DisableWeakRefAccessCallback;
   class FlipCallback;
   class GrayImmuneObjectVisitor;
   class ImmuneSpaceScanObjVisitor;
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 97911d4..f144b98 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -639,11 +639,7 @@
 
   system_class_loader_ = CreateSystemClassLoader(this);
 
-  if (is_zygote_) {
-    if (!InitZygote()) {
-      return false;
-    }
-  } else {
+  if (!is_zygote_) {
     if (is_native_bridge_loaded_) {
       PreInitializeNativeBridge(".");
     }
@@ -688,45 +684,6 @@
   }
 }
 
-// Do zygote-mode-only initialization.
-bool Runtime::InitZygote() {
-#ifdef __linux__
-  // zygote goes into its own process group
-  setpgid(0, 0);
-
-  // See storage config details at http://source.android.com/tech/storage/
-  // Create private mount namespace shared by all children
-  if (unshare(CLONE_NEWNS) == -1) {
-    PLOG(ERROR) << "Failed to unshare()";
-    return false;
-  }
-
-  // Mark rootfs as being a slave so that changes from default
-  // namespace only flow into our children.
-  if (mount("rootfs", "/", nullptr, (MS_SLAVE | MS_REC), nullptr) == -1) {
-    PLOG(ERROR) << "Failed to mount() rootfs as MS_SLAVE";
-    return false;
-  }
-
-  // Create a staging tmpfs that is shared by our children; they will
-  // bind mount storage into their respective private namespaces, which
-  // are isolated from each other.
-  const char* target_base = getenv("EMULATED_STORAGE_TARGET");
-  if (target_base != nullptr) {
-    if (mount("tmpfs", target_base, "tmpfs", MS_NOSUID | MS_NODEV,
-              "uid=0,gid=1028,mode=0751") == -1) {
-      PLOG(ERROR) << "Failed to mount tmpfs to " << target_base;
-      return false;
-    }
-  }
-
-  return true;
-#else
-  UNIMPLEMENTED(FATAL);
-  return false;
-#endif
-}
-
 void Runtime::InitNonZygoteOrPostFork(
     JNIEnv* env, bool is_system_server, NativeBridgeAction action, const char* isa) {
   is_zygote_ = false;
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 58068eb..9e63564 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -455,7 +455,6 @@
   bool UseJitCompilation() const;
 
   void PreZygoteFork();
-  bool InitZygote();
   void InitNonZygoteOrPostFork(
       JNIEnv* env, bool is_system_server, NativeBridgeAction action, const char* isa);
 
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index ab1f198..5e6c8a4 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -284,7 +284,7 @@
   }
 }
 
-size_t ThreadList::RunCheckpoint(Closure* checkpoint_function) {
+size_t ThreadList::RunCheckpoint(Closure* checkpoint_function, Closure* callback) {
   Thread* self = Thread::Current();
   Locks::mutator_lock_->AssertNotExclusiveHeld(self);
   Locks::thread_list_lock_->AssertNotHeld(self);
@@ -318,6 +318,10 @@
         }
       }
     }
+    // Run the callback to be called inside this critical section.
+    if (callback != nullptr) {
+      callback->Run(self);
+    }
   }
 
   // Run the checkpoint on ourself while we wait for threads to suspend.
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index cef4ed1..b455e31 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -94,8 +94,10 @@
 
   // Run a checkpoint on threads, running threads are not suspended but run the checkpoint inside
   // of the suspend check. Returns how many checkpoints that are expected to run, including for
-  // already suspended threads for b/24191051.
-  size_t RunCheckpoint(Closure* checkpoint_function)
+  // already suspended threads for b/24191051. Run the callback, if non-null, inside the
+  // thread_list_lock critical section after determining the runnable/suspended states of the
+  // threads.
+  size_t RunCheckpoint(Closure* checkpoint_function, Closure* callback = nullptr)
       REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
 
   size_t RunCheckpointOnRunnableThreads(Closure* checkpoint_function)
diff --git a/runtime/verifier/verifier_deps.cc b/runtime/verifier/verifier_deps.cc
index 4953483..350c838 100644
--- a/runtime/verifier/verifier_deps.cc
+++ b/runtime/verifier/verifier_deps.cc
@@ -17,6 +17,7 @@
 #include "verifier_deps.h"
 
 #include "compiler_callbacks.h"
+#include "leb128.h"
 #include "mirror/class-inl.h"
 #include "runtime.h"
 
@@ -317,5 +318,152 @@
   }
 }
 
+static inline uint32_t DecodeUint32WithOverflowCheck(const uint8_t** in, const uint8_t* end) {
+  CHECK_LT(*in, end);
+  return DecodeUnsignedLeb128(in);
+}
+
+template<typename T1, typename T2>
+static inline void EncodeTuple(std::vector<uint8_t>* out, const std::tuple<T1, T2>& t) {
+  EncodeUnsignedLeb128(out, std::get<0>(t));
+  EncodeUnsignedLeb128(out, std::get<1>(t));
+}
+
+template<typename T1, typename T2>
+static inline void DecodeTuple(const uint8_t** in, const uint8_t* end, std::tuple<T1, T2>* t) {
+  T1 v1 = static_cast<T1>(DecodeUint32WithOverflowCheck(in, end));
+  T2 v2 = static_cast<T2>(DecodeUint32WithOverflowCheck(in, end));
+  *t = std::make_tuple(v1, v2);
+}
+
+template<typename T1, typename T2, typename T3>
+static inline void EncodeTuple(std::vector<uint8_t>* out, const std::tuple<T1, T2, T3>& t) {
+  EncodeUnsignedLeb128(out, std::get<0>(t));
+  EncodeUnsignedLeb128(out, std::get<1>(t));
+  EncodeUnsignedLeb128(out, std::get<2>(t));
+}
+
+template<typename T1, typename T2, typename T3>
+static inline void DecodeTuple(const uint8_t** in, const uint8_t* end, std::tuple<T1, T2, T3>* t) {
+  T1 v1 = static_cast<T1>(DecodeUint32WithOverflowCheck(in, end));
+  T2 v2 = static_cast<T2>(DecodeUint32WithOverflowCheck(in, end));
+  T3 v3 = static_cast<T2>(DecodeUint32WithOverflowCheck(in, end));
+  *t = std::make_tuple(v1, v2, v3);
+}
+
+template<typename T>
+static inline void EncodeSet(std::vector<uint8_t>* out, const std::set<T>& set) {
+  EncodeUnsignedLeb128(out, set.size());
+  for (const T& entry : set) {
+    EncodeTuple(out, entry);
+  }
+}
+
+template<typename T>
+static inline void DecodeSet(const uint8_t** in, const uint8_t* end, std::set<T>* set) {
+  DCHECK(set->empty());
+  size_t num_entries = DecodeUint32WithOverflowCheck(in, end);
+  for (size_t i = 0; i < num_entries; ++i) {
+    T tuple;
+    DecodeTuple(in, end, &tuple);
+    set->emplace(tuple);
+  }
+}
+
+static inline void EncodeStringVector(std::vector<uint8_t>* out,
+                                      const std::vector<std::string>& strings) {
+  EncodeUnsignedLeb128(out, strings.size());
+  for (const std::string& str : strings) {
+    const uint8_t* data = reinterpret_cast<const uint8_t*>(str.c_str());
+    size_t length = str.length() + 1;
+    out->insert(out->end(), data, data + length);
+    DCHECK_EQ(0u, out->back());
+  }
+}
+
+static inline void DecodeStringVector(const uint8_t** in,
+                                      const uint8_t* end,
+                                      std::vector<std::string>* strings) {
+  DCHECK(strings->empty());
+  size_t num_strings = DecodeUint32WithOverflowCheck(in, end);
+  strings->reserve(num_strings);
+  for (size_t i = 0; i < num_strings; ++i) {
+    CHECK_LT(*in, end);
+    const char* string_start = reinterpret_cast<const char*>(*in);
+    strings->emplace_back(std::string(string_start));
+    *in += strings->back().length() + 1;
+  }
+}
+
+void VerifierDeps::Encode(std::vector<uint8_t>* buffer) const {
+  MutexLock mu(Thread::Current(), *Locks::verifier_deps_lock_);
+  for (auto& entry : dex_deps_) {
+    EncodeStringVector(buffer, entry.second->strings_);
+    EncodeSet(buffer, entry.second->assignable_types_);
+    EncodeSet(buffer, entry.second->unassignable_types_);
+    EncodeSet(buffer, entry.second->classes_);
+    EncodeSet(buffer, entry.second->fields_);
+    EncodeSet(buffer, entry.second->direct_methods_);
+    EncodeSet(buffer, entry.second->virtual_methods_);
+    EncodeSet(buffer, entry.second->interface_methods_);
+  }
+}
+
+VerifierDeps::VerifierDeps(const std::vector<const DexFile*>& dex_files, ArrayRef<uint8_t> data)
+    : VerifierDeps(dex_files) {
+  const uint8_t* data_start = data.data();
+  const uint8_t* data_end = data_start + data.size();
+  for (auto& entry : dex_deps_) {
+    DecodeStringVector(&data_start, data_end, &entry.second->strings_);
+    DecodeSet(&data_start, data_end, &entry.second->assignable_types_);
+    DecodeSet(&data_start, data_end, &entry.second->unassignable_types_);
+    DecodeSet(&data_start, data_end, &entry.second->classes_);
+    DecodeSet(&data_start, data_end, &entry.second->fields_);
+    DecodeSet(&data_start, data_end, &entry.second->direct_methods_);
+    DecodeSet(&data_start, data_end, &entry.second->virtual_methods_);
+    DecodeSet(&data_start, data_end, &entry.second->interface_methods_);
+  }
+  CHECK_LE(data_start, data_end);
+}
+
+bool VerifierDeps::Equals(const VerifierDeps& rhs) const {
+  MutexLock mu(Thread::Current(), *Locks::verifier_deps_lock_);
+
+  if (dex_deps_.size() != rhs.dex_deps_.size()) {
+    return false;
+  }
+
+  auto lhs_it = dex_deps_.begin();
+  auto rhs_it = rhs.dex_deps_.begin();
+
+  for (; (lhs_it != dex_deps_.end()) && (rhs_it != rhs.dex_deps_.end()); lhs_it++, rhs_it++) {
+    const DexFile* lhs_dex_file = lhs_it->first;
+    const DexFile* rhs_dex_file = rhs_it->first;
+    if (lhs_dex_file != rhs_dex_file) {
+      return false;
+    }
+
+    DexFileDeps* lhs_deps = lhs_it->second.get();
+    DexFileDeps* rhs_deps = rhs_it->second.get();
+    if (!lhs_deps->Equals(*rhs_deps)) {
+      return false;
+    }
+  }
+
+  DCHECK((lhs_it == dex_deps_.end()) && (rhs_it == rhs.dex_deps_.end()));
+  return true;
+}
+
+bool VerifierDeps::DexFileDeps::Equals(const VerifierDeps::DexFileDeps& rhs) const {
+  return (strings_ == rhs.strings_) &&
+         (assignable_types_ == rhs.assignable_types_) &&
+         (unassignable_types_ == rhs.unassignable_types_) &&
+         (classes_ == rhs.classes_) &&
+         (fields_ == rhs.fields_) &&
+         (direct_methods_ == rhs.direct_methods_) &&
+         (virtual_methods_ == rhs.virtual_methods_) &&
+         (interface_methods_ == rhs.interface_methods_);
+}
+
 }  // namespace verifier
 }  // namespace art
diff --git a/runtime/verifier/verifier_deps.h b/runtime/verifier/verifier_deps.h
index da63d67..dc8dfaf 100644
--- a/runtime/verifier/verifier_deps.h
+++ b/runtime/verifier/verifier_deps.h
@@ -23,6 +23,7 @@
 
 #include "art_field.h"
 #include "art_method.h"
+#include "base/array_ref.h"
 #include "base/mutex.h"
 #include "method_resolution_kind.h"
 #include "os.h"
@@ -84,14 +85,23 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::verifier_deps_lock_);
 
+  // Serialize the recorded dependencies and store the data into `buffer`.
+  void Encode(std::vector<uint8_t>* buffer) const
+      REQUIRES(!Locks::verifier_deps_lock_);
+
  private:
   static constexpr uint16_t kUnresolvedMarker = static_cast<uint16_t>(-1);
 
+  // Only used in tests to reconstruct the data structure from serialized data.
+  VerifierDeps(const std::vector<const DexFile*>& dex_files, ArrayRef<uint8_t> data)
+      REQUIRES(!Locks::verifier_deps_lock_);
+
   using ClassResolutionBase = std::tuple<uint32_t, uint16_t>;
   struct ClassResolution : public ClassResolutionBase {
+    ClassResolution() = default;
+    ClassResolution(const ClassResolution&) = default;
     ClassResolution(uint32_t type_idx, uint16_t access_flags)
         : ClassResolutionBase(type_idx, access_flags) {}
-    ClassResolution(const ClassResolution&) = default;
 
     bool IsResolved() const { return GetAccessFlags() != kUnresolvedMarker; }
     uint32_t GetDexTypeIndex() const { return std::get<0>(*this); }
@@ -100,9 +110,10 @@
 
   using FieldResolutionBase = std::tuple<uint32_t, uint16_t, uint32_t>;
   struct FieldResolution : public FieldResolutionBase {
+    FieldResolution() = default;
+    FieldResolution(const FieldResolution&) = default;
     FieldResolution(uint32_t field_idx, uint16_t access_flags, uint32_t declaring_class_idx)
         : FieldResolutionBase(field_idx, access_flags, declaring_class_idx) {}
-    FieldResolution(const FieldResolution&) = default;
 
     bool IsResolved() const { return GetAccessFlags() != kUnresolvedMarker; }
     uint32_t GetDexFieldIndex() const { return std::get<0>(*this); }
@@ -112,9 +123,10 @@
 
   using MethodResolutionBase = std::tuple<uint32_t, uint16_t, uint32_t>;
   struct MethodResolution : public MethodResolutionBase {
+    MethodResolution() = default;
+    MethodResolution(const MethodResolution&) = default;
     MethodResolution(uint32_t method_idx, uint16_t access_flags, uint32_t declaring_class_idx)
         : MethodResolutionBase(method_idx, access_flags, declaring_class_idx) {}
-    MethodResolution(const MethodResolution&) = default;
 
     bool IsResolved() const { return GetAccessFlags() != kUnresolvedMarker; }
     uint32_t GetDexMethodIndex() const { return std::get<0>(*this); }
@@ -124,9 +136,10 @@
 
   using TypeAssignabilityBase = std::tuple<uint32_t, uint32_t>;
   struct TypeAssignability : public std::tuple<uint32_t, uint32_t> {
+    TypeAssignability() = default;
+    TypeAssignability(const TypeAssignability&) = default;
     TypeAssignability(uint32_t destination_idx, uint32_t source_idx)
         : TypeAssignabilityBase(destination_idx, source_idx) {}
-    TypeAssignability(const TypeAssignability&) = default;
 
     uint32_t GetDestination() const { return std::get<0>(*this); }
     uint32_t GetSource() const { return std::get<1>(*this); }
@@ -150,6 +163,8 @@
     std::set<MethodResolution> direct_methods_;
     std::set<MethodResolution> virtual_methods_;
     std::set<MethodResolution> interface_methods_;
+
+    bool Equals(const DexFileDeps& rhs) const;
   };
 
   // Finds the DexFileDep instance associated with `dex_file`, or nullptr if
@@ -215,12 +230,16 @@
       REQUIRES_SHARED(Locks::mutator_lock_)
       REQUIRES(!Locks::verifier_deps_lock_);
 
+  bool Equals(const VerifierDeps& rhs) const
+      REQUIRES(!Locks::verifier_deps_lock_);
+
   // Map from DexFiles into dependencies collected from verification of their methods.
   std::map<const DexFile*, std::unique_ptr<DexFileDeps>> dex_deps_
       GUARDED_BY(Locks::verifier_deps_lock_);
 
   friend class VerifierDepsTest;
   ART_FRIEND_TEST(VerifierDepsTest, StringToId);
+  ART_FRIEND_TEST(VerifierDepsTest, EncodeDecode);
 };
 
 }  // namespace verifier
diff --git a/runtime/verifier/verifier_deps_test.cc b/runtime/verifier/verifier_deps_test.cc
index 41a9ad3..bbaf59f 100644
--- a/runtime/verifier/verifier_deps_test.cc
+++ b/runtime/verifier/verifier_deps_test.cc
@@ -59,11 +59,21 @@
     StackHandleScope<1> hs(Thread::Current());
     Handle<mirror::ClassLoader> class_loader_handle(
         hs.NewHandle(soa->Decode<mirror::ClassLoader*>(class_loader_)));
-    mirror::Class* result = class_linker_->FindClass(Thread::Current(),
-                                                     name.c_str(),
-                                                     class_loader_handle);
-    DCHECK(result != nullptr) << name;
-    return result;
+    mirror::Class* klass = class_linker_->FindClass(Thread::Current(),
+                                                    name.c_str(),
+                                                    class_loader_handle);
+    if (klass == nullptr) {
+      DCHECK(Thread::Current()->IsExceptionPending());
+      Thread::Current()->ClearException();
+    }
+    return klass;
+  }
+
+  void SetVerifierDeps(const std::vector<const DexFile*>& dex_files) {
+    verifier_deps_.reset(new verifier::VerifierDeps(dex_files));
+    VerifierDepsCompilerCallbacks* callbacks =
+        reinterpret_cast<VerifierDepsCompilerCallbacks*>(callbacks_.get());
+    callbacks->SetVerifierDeps(verifier_deps_.get());
   }
 
   void LoadDexFile(ScopedObjectAccess* soa) REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -72,16 +82,13 @@
     CHECK_EQ(dex_files.size(), 1u);
     dex_file_ = dex_files.front();
 
+    SetVerifierDeps(dex_files);
+
     mirror::ClassLoader* loader = soa->Decode<mirror::ClassLoader*>(class_loader_);
     class_linker_->RegisterDexFile(*dex_file_, loader);
 
     klass_Main_ = FindClassByName("LMain;", soa);
     CHECK(klass_Main_ != nullptr);
-
-    verifier_deps_.reset(new verifier::VerifierDeps(dex_files));
-    VerifierDepsCompilerCallbacks* callbacks =
-        reinterpret_cast<VerifierDepsCompilerCallbacks*>(callbacks_.get());
-    callbacks->SetVerifierDeps(verifier_deps_.get());
   }
 
   bool VerifyMethod(const std::string& method_name) {
@@ -138,15 +145,40 @@
     return !verifier.HasFailures();
   }
 
+  void VerifyDexFile() {
+    std::string error_msg;
+    ScopedObjectAccess soa(Thread::Current());
+
+    LoadDexFile(&soa);
+    SetVerifierDeps({ dex_file_ });
+
+    for (size_t i = 0; i < dex_file_->NumClassDefs(); i++) {
+      const char* descriptor = dex_file_->GetClassDescriptor(dex_file_->GetClassDef(i));
+      mirror::Class* klass = FindClassByName(descriptor, &soa);
+      if (klass != nullptr) {
+        MethodVerifier::VerifyClass(Thread::Current(),
+                                    klass,
+                                    nullptr,
+                                    true,
+                                    HardFailLogMode::kLogWarning,
+                                    &error_msg);
+      }
+    }
+  }
+
   bool TestAssignabilityRecording(const std::string& dst,
                                   const std::string& src,
                                   bool is_strict,
                                   bool is_assignable) {
     ScopedObjectAccess soa(Thread::Current());
     LoadDexFile(&soa);
+    mirror::Class* klass_dst = FindClassByName(dst, &soa);
+    DCHECK(klass_dst != nullptr);
+    mirror::Class* klass_src = FindClassByName(src, &soa);
+    DCHECK(klass_src != nullptr);
     verifier_deps_->AddAssignability(*dex_file_,
-                                     FindClassByName(dst, &soa),
-                                     FindClassByName(src, &soa),
+                                     klass_dst,
+                                     klass_src,
                                      is_strict,
                                      is_assignable);
     return true;
@@ -316,6 +348,34 @@
     return false;
   }
 
+  size_t NumberOfCompiledDexFiles() {
+    MutexLock mu(Thread::Current(), *Locks::verifier_deps_lock_);
+    return verifier_deps_->dex_deps_.size();
+  }
+
+  size_t HasEachKindOfRecord() {
+    MutexLock mu(Thread::Current(), *Locks::verifier_deps_lock_);
+
+    bool has_strings = false;
+    bool has_assignability = false;
+    bool has_classes = false;
+    bool has_fields = false;
+    bool has_methods = false;
+
+    for (auto& entry : verifier_deps_->dex_deps_) {
+      has_strings |= !entry.second->strings_.empty();
+      has_assignability |= !entry.second->assignable_types_.empty();
+      has_assignability |= !entry.second->unassignable_types_.empty();
+      has_classes |= !entry.second->classes_.empty();
+      has_fields |= !entry.second->fields_.empty();
+      has_methods |= !entry.second->direct_methods_.empty();
+      has_methods |= !entry.second->virtual_methods_.empty();
+      has_methods |= !entry.second->interface_methods_.empty();
+    }
+
+    return has_strings && has_assignability && has_classes && has_fields && has_methods;
+  }
+
   std::unique_ptr<verifier::VerifierDeps> verifier_deps_;
   const DexFile* dex_file_;
   jobject class_loader_;
@@ -982,5 +1042,19 @@
       "virtual", "Ljava/lang/Integer;", "intValue", "()I", true, "public", "Ljava/lang/Integer;"));
 }
 
+TEST_F(VerifierDepsTest, EncodeDecode) {
+  VerifyDexFile();
+
+  ASSERT_EQ(1u, NumberOfCompiledDexFiles());
+  ASSERT_TRUE(HasEachKindOfRecord());
+
+  std::vector<uint8_t> buffer;
+  verifier_deps_->Encode(&buffer);
+  ASSERT_FALSE(buffer.empty());
+
+  VerifierDeps decoded_deps({ dex_file_ }, ArrayRef<uint8_t>(buffer));
+  ASSERT_TRUE(verifier_deps_->Equals(decoded_deps));
+}
+
 }  // namespace verifier
 }  // namespace art