Merge "MIPS64: Support short and long branches"
diff --git a/Android.mk b/Android.mk
index fcf70ff..0d0003a 100644
--- a/Android.mk
+++ b/Android.mk
@@ -122,6 +122,16 @@
 include $(art_path)/test/Android.run-test.mk
 include $(art_path)/benchmark/Android.mk
 
+TEST_ART_ADB_ROOT_AND_REMOUNT := \
+    (adb root && \
+     adb wait-for-device remount && \
+     ((adb shell touch /system/testfile && \
+       (adb shell rm /system/testfile || true)) || \
+      (adb disable-verity && \
+       adb reboot && \
+       adb wait-for-device root && \
+       adb wait-for-device remount)))
+
 # Sync test files to the target, depends upon all things that must be pushed to the target.
 .PHONY: test-art-target-sync
 # Check if we need to sync. In case ART_TEST_ANDROID_ROOT is not empty,
@@ -130,12 +140,11 @@
 ifneq ($(ART_TEST_NO_SYNC),true)
 ifeq ($(ART_TEST_ANDROID_ROOT),)
 test-art-target-sync: $(TEST_ART_TARGET_SYNC_DEPS)
-	adb root
-	adb wait-for-device remount
+	$(TEST_ART_ADB_ROOT_AND_REMOUNT)
 	adb sync
 else
 test-art-target-sync: $(TEST_ART_TARGET_SYNC_DEPS)
-	adb root
+	$(TEST_ART_ADB_ROOT_AND_REMOUNT)
 	adb wait-for-device push $(ANDROID_PRODUCT_OUT)/system $(ART_TEST_ANDROID_ROOT)
 	adb push $(ANDROID_PRODUCT_OUT)/data /data
 endif
@@ -374,8 +383,7 @@
 
 .PHONY: oat-target-sync
 oat-target-sync: oat-target
-	adb root
-	adb wait-for-device remount
+	$(TEST_ART_ADB_ROOT_AND_REMOUNT)
 	adb sync
 
 ########################################################################
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 717403f..dcde5ab 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -29,7 +29,6 @@
   GetMethodSignature \
   Instrumentation \
   Interfaces \
-  LambdaInterfaces \
   Lookup \
   Main \
   MultiDex \
@@ -78,7 +77,6 @@
 ART_GTEST_oat_test_DEX_DEPS := Main
 ART_GTEST_object_test_DEX_DEPS := ProtoCompare ProtoCompare2 StaticsFromCode XandY
 ART_GTEST_proxy_test_DEX_DEPS := Interfaces
-ART_GTEST_lambda_proxy_test_DEX_DEPS := LambdaInterfaces
 ART_GTEST_reflection_test_DEX_DEPS := Main NonStaticLeafMethods StaticLeafMethods
 ART_GTEST_stub_test_DEX_DEPS := AllFields
 ART_GTEST_transaction_test_DEX_DEPS := Transaction
@@ -99,7 +97,6 @@
 
 # TODO: document why this is needed.
 ART_GTEST_proxy_test_HOST_DEPS := $(HOST_CORE_IMAGE_default_no-pic_64) $(HOST_CORE_IMAGE_default_no-pic_32)
-ART_GTEST_lambda_proxy_test_HOST_DEPS := $(HOST_CORE_IMAGE_default_no-pic_64) $(HOST_CORE_IMAGE_default_no-pic_32)
 
 # The dexdump test requires an image and the dexdump utility.
 # TODO: rename into dexdump when migration completes
@@ -236,7 +233,6 @@
 
 COMPILER_GTEST_COMMON_SRC_FILES := \
   runtime/jni_internal_test.cc \
-  runtime/lambda_proxy_test.cc \
   runtime/proxy_test.cc \
   runtime/reflection_test.cc \
   compiler/compiled_method_test.cc \
@@ -745,7 +741,6 @@
 ART_GTEST_oat_file_assistant_test_TARGET_DEPS :=
 ART_GTEST_object_test_DEX_DEPS :=
 ART_GTEST_proxy_test_DEX_DEPS :=
-ART_GTEST_lambda_proxy_test_DEX_DEPS :=
 ART_GTEST_reflection_test_DEX_DEPS :=
 ART_GTEST_stub_test_DEX_DEPS :=
 ART_GTEST_transaction_test_DEX_DEPS :=
diff --git a/compiler/optimizing/boolean_simplifier.cc b/compiler/optimizing/boolean_simplifier.cc
index f985745..f0cafc8 100644
--- a/compiler/optimizing/boolean_simplifier.cc
+++ b/compiler/optimizing/boolean_simplifier.cc
@@ -61,40 +61,6 @@
       && input_false->IsIntConstant() && input_false->AsIntConstant()->IsOne();
 }
 
-// Returns an instruction with the opposite boolean value from 'cond'.
-static HInstruction* GetOppositeCondition(HInstruction* cond) {
-  HGraph* graph = cond->GetBlock()->GetGraph();
-  ArenaAllocator* allocator = graph->GetArena();
-
-  if (cond->IsCondition()) {
-    HInstruction* lhs = cond->InputAt(0);
-    HInstruction* rhs = cond->InputAt(1);
-    switch (cond->AsCondition()->GetOppositeCondition()) {  // get *opposite*
-      case kCondEQ: return new (allocator) HEqual(lhs, rhs);
-      case kCondNE: return new (allocator) HNotEqual(lhs, rhs);
-      case kCondLT: return new (allocator) HLessThan(lhs, rhs);
-      case kCondLE: return new (allocator) HLessThanOrEqual(lhs, rhs);
-      case kCondGT: return new (allocator) HGreaterThan(lhs, rhs);
-      case kCondGE: return new (allocator) HGreaterThanOrEqual(lhs, rhs);
-      case kCondB:  return new (allocator) HBelow(lhs, rhs);
-      case kCondBE: return new (allocator) HBelowOrEqual(lhs, rhs);
-      case kCondA:  return new (allocator) HAbove(lhs, rhs);
-      case kCondAE: return new (allocator) HAboveOrEqual(lhs, rhs);
-    }
-  } else if (cond->IsIntConstant()) {
-    HIntConstant* int_const = cond->AsIntConstant();
-    if (int_const->IsZero()) {
-      return graph->GetIntConstant(1);
-    } else {
-      DCHECK(int_const->IsOne());
-      return graph->GetIntConstant(0);
-    }
-  }
-  // General case when 'cond' is another instruction of type boolean,
-  // as verified by SSAChecker.
-  return new (allocator) HBooleanNot(cond);
-}
-
 void HBooleanSimplifier::TryRemovingBooleanSelection(HBasicBlock* block) {
   DCHECK(block->EndsWithIf());
 
@@ -126,10 +92,7 @@
 
   HInstruction* replacement;
   if (NegatesCondition(true_value, false_value)) {
-    replacement = GetOppositeCondition(if_condition);
-    if (replacement->GetBlock() == nullptr) {
-      block->InsertInstructionBefore(replacement, if_instruction);
-    }
+    replacement = graph_->InsertOppositeCondition(if_condition, if_instruction);
   } else if (PreservesCondition(true_value, false_value)) {
     replacement = if_condition;
   } else {
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 3257de1..32968a5 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -876,12 +876,78 @@
                       clinit_check);
 }
 
+bool HGraphBuilder::BuildNewInstance(uint16_t type_index, uint32_t dex_pc) {
+  bool finalizable;
+  bool can_throw = NeedsAccessCheck(type_index, &finalizable);
+
+  // Only the non-resolved entrypoint handles the finalizable class case. If we
+  // need access checks, then we haven't resolved the method and the class may
+  // again be finalizable.
+  QuickEntrypointEnum entrypoint = (finalizable || can_throw)
+      ? kQuickAllocObject
+      : kQuickAllocObjectInitialized;
+
+  ScopedObjectAccess soa(Thread::Current());
+  StackHandleScope<3> hs(soa.Self());
+  Handle<mirror::DexCache> dex_cache(hs.NewHandle(
+      dex_compilation_unit_->GetClassLinker()->FindDexCache(
+          soa.Self(), *dex_compilation_unit_->GetDexFile())));
+  Handle<mirror::Class> resolved_class(hs.NewHandle(dex_cache->GetResolvedType(type_index)));
+  const DexFile& outer_dex_file = *outer_compilation_unit_->GetDexFile();
+  Handle<mirror::DexCache> outer_dex_cache(hs.NewHandle(
+      outer_compilation_unit_->GetClassLinker()->FindDexCache(soa.Self(), outer_dex_file)));
+
+  if (outer_dex_cache.Get() != dex_cache.Get()) {
+    // We currently do not support inlining allocations across dex files.
+    return false;
+  }
+
+  HLoadClass* load_class = new (arena_) HLoadClass(
+      graph_->GetCurrentMethod(),
+      type_index,
+      *dex_compilation_unit_->GetDexFile(),
+      IsOutermostCompilingClass(type_index),
+      dex_pc,
+      /*needs_access_check*/ can_throw);
+
+  current_block_->AddInstruction(load_class);
+  HInstruction* cls = load_class;
+  if (!IsInitialized(resolved_class, type_index)) {
+    cls = new (arena_) HClinitCheck(load_class, dex_pc);
+    current_block_->AddInstruction(cls);
+  }
+
+  current_block_->AddInstruction(new (arena_) HNewInstance(
+      cls,
+      graph_->GetCurrentMethod(),
+      dex_pc,
+      type_index,
+      *dex_compilation_unit_->GetDexFile(),
+      can_throw,
+      finalizable,
+      entrypoint));
+  return true;
+}
+
+bool HGraphBuilder::IsInitialized(Handle<mirror::Class> cls, uint16_t type_index) const {
+  if (cls.Get() == nullptr) {
+    return false;
+  }
+  if (GetOutermostCompilingClass() == cls.Get()) {
+    return true;
+  }
+  // TODO: find out why this check is needed.
+  bool is_in_dex_cache = compiler_driver_->CanAssumeTypeIsPresentInDexCache(
+      *outer_compilation_unit_->GetDexFile(), type_index);
+  return cls->IsInitialized() && is_in_dex_cache;
+}
+
 HClinitCheck* HGraphBuilder::ProcessClinitCheckForInvoke(
       uint32_t dex_pc,
       uint32_t method_idx,
       HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement) {
   ScopedObjectAccess soa(Thread::Current());
-  StackHandleScope<4> hs(soa.Self());
+  StackHandleScope<5> hs(soa.Self());
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(
       dex_compilation_unit_->GetClassLinker()->FindDexCache(
           soa.Self(), *dex_compilation_unit_->GetDexFile())));
@@ -927,13 +993,8 @@
     // whether we should add an explicit class initialization
     // check for its declaring class before the static method call.
 
-    // TODO: find out why this check is needed.
-    bool is_in_dex_cache = compiler_driver_->CanAssumeTypeIsPresentInDexCache(
-        *outer_compilation_unit_->GetDexFile(), storage_index);
-    bool is_initialized =
-        resolved_method->GetDeclaringClass()->IsInitialized() && is_in_dex_cache;
-
-    if (is_initialized) {
+    Handle<mirror::Class> cls(hs.NewHandle(resolved_method->GetDeclaringClass()));
+    if (IsInitialized(cls, storage_index)) {
       *clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kNone;
     } else {
       *clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit;
@@ -1272,7 +1333,7 @@
   uint16_t field_index = instruction.VRegB_21c();
 
   ScopedObjectAccess soa(Thread::Current());
-  StackHandleScope<4> hs(soa.Self());
+  StackHandleScope<5> hs(soa.Self());
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(
       dex_compilation_unit_->GetClassLinker()->FindDexCache(
           soa.Self(), *dex_compilation_unit_->GetDexFile())));
@@ -1318,11 +1379,6 @@
     }
   }
 
-  // TODO: find out why this check is needed.
-  bool is_in_dex_cache = compiler_driver_->CanAssumeTypeIsPresentInDexCache(
-      *outer_compilation_unit_->GetDexFile(), storage_index);
-  bool is_initialized = resolved_field->GetDeclaringClass()->IsInitialized() && is_in_dex_cache;
-
   HLoadClass* constant = new (arena_) HLoadClass(graph_->GetCurrentMethod(),
                                                  storage_index,
                                                  *dex_compilation_unit_->GetDexFile(),
@@ -1332,12 +1388,14 @@
   current_block_->AddInstruction(constant);
 
   HInstruction* cls = constant;
-  if (!is_initialized && !is_outer_class) {
+
+  Handle<mirror::Class> klass(hs.NewHandle(resolved_field->GetDeclaringClass()));
+  if (!IsInitialized(klass, storage_index)) {
     cls = new (arena_) HClinitCheck(constant, dex_pc);
     current_block_->AddInstruction(cls);
   }
 
-  uint16_t class_def_index = resolved_field->GetDeclaringClass()->GetDexClassDefIndex();
+  uint16_t class_def_index = klass->GetDexClassDefIndex();
   if (is_put) {
     // We need to keep the class alive before loading the value.
     Temporaries temps(graph_);
@@ -2509,20 +2567,9 @@
         current_block_->AddInstruction(fake_string);
         UpdateLocal(register_index, fake_string, dex_pc);
       } else {
-        bool finalizable;
-        bool can_throw = NeedsAccessCheck(type_index, &finalizable);
-        QuickEntrypointEnum entrypoint = can_throw
-            ? kQuickAllocObjectWithAccessCheck
-            : kQuickAllocObject;
-
-        current_block_->AddInstruction(new (arena_) HNewInstance(
-            graph_->GetCurrentMethod(),
-            dex_pc,
-            type_index,
-            *dex_compilation_unit_->GetDexFile(),
-            can_throw,
-            finalizable,
-            entrypoint));
+        if (!BuildNewInstance(type_index, dex_pc)) {
+          return false;
+        }
         UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction(), dex_pc);
       }
       break;
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index f857ef0..615b0cd 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -308,6 +308,14 @@
       uint32_t method_idx,
       HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement);
 
+  // Build a HNewInstance instruction.
+  bool BuildNewInstance(uint16_t type_index, uint32_t dex_pc);
+
+  // Return whether the compiler can assume `cls` is initialized. `type_index` is the index
+  // of the class in the outer dex file.
+  bool IsInitialized(Handle<mirror::Class> cls, uint16_t type_index) const
+      SHARED_REQUIRES(Locks::mutator_lock_);
+
   ArenaAllocator* const arena_;
 
   // A list of the size of the dex code holding block information for
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index cb6bed0..461319e 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -3361,7 +3361,19 @@
             __ mov(o_l, ShifterOperand(high));
             __ LoadImmediate(o_h, 0);
           }
-        } else {  // shift_value < 32
+        } else if (shift_value == 1) {
+          if (op->IsShl()) {
+            __ Lsls(o_l, low, 1);
+            __ adc(o_h, high, ShifterOperand(high));
+          } else if (op->IsShr()) {
+            __ Asrs(o_h, high, 1);
+            __ Rrx(o_l, low);
+          } else {
+            __ Lsrs(o_h, high, 1);
+            __ Rrx(o_l, low);
+          }
+        } else {
+          DCHECK(2 <= shift_value && shift_value < 32) << shift_value;
           if (op->IsShl()) {
             __ Lsl(o_h, high, shift_value);
             __ orr(o_h, o_h, ShifterOperand(low, LSR, 32 - shift_value));
@@ -3413,14 +3425,12 @@
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
   InvokeRuntimeCallingConvention calling_convention;
-  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+  locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
   locations->SetOut(Location::RegisterLocation(R0));
 }
 
 void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) {
-  InvokeRuntimeCallingConvention calling_convention;
-  __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
   // Note: if heap poisoning is enabled, the entry point takes cares
   // of poisoning the reference.
   codegen_->InvokeRuntime(instruction->GetEntrypoint(),
@@ -4320,7 +4330,7 @@
   if (needs_write_barrier) {
     // Temporary registers for the write barrier.
     locations->AddTemp(Location::RequiresRegister());  // Possibly used for ref. poisoning too.
-    locations->AddTemp(Location::RequiresRegister());  // Possibly used for read barrier too.
+    locations->AddTemp(Location::RequiresRegister());
   }
 }
 
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 2776b7d..d82cb67 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1628,6 +1628,47 @@
          Operand(InputOperandAt(instruction, 1)));
 }
 
+void LocationsBuilderARM64::VisitArm64MultiplyAccumulate(HArm64MultiplyAccumulate* instr) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(instr, LocationSummary::kNoCall);
+  locations->SetInAt(HArm64MultiplyAccumulate::kInputAccumulatorIndex,
+                     Location::RequiresRegister());
+  locations->SetInAt(HArm64MultiplyAccumulate::kInputMulLeftIndex, Location::RequiresRegister());
+  locations->SetInAt(HArm64MultiplyAccumulate::kInputMulRightIndex, Location::RequiresRegister());
+  locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
+}
+
+void InstructionCodeGeneratorARM64::VisitArm64MultiplyAccumulate(HArm64MultiplyAccumulate* instr) {
+  Register res = OutputRegister(instr);
+  Register accumulator = InputRegisterAt(instr, HArm64MultiplyAccumulate::kInputAccumulatorIndex);
+  Register mul_left = InputRegisterAt(instr, HArm64MultiplyAccumulate::kInputMulLeftIndex);
+  Register mul_right = InputRegisterAt(instr, HArm64MultiplyAccumulate::kInputMulRightIndex);
+
+  // Avoid emitting code that could trigger Cortex A53's erratum 835769.
+  // This fixup should be carried out for all multiply-accumulate instructions:
+  // madd, msub, smaddl, smsubl, umaddl and umsubl.
+  if (instr->GetType() == Primitive::kPrimLong &&
+      codegen_->GetInstructionSetFeatures().NeedFixCortexA53_835769()) {
+    MacroAssembler* masm = down_cast<CodeGeneratorARM64*>(codegen_)->GetVIXLAssembler();
+    vixl::Instruction* prev = masm->GetCursorAddress<vixl::Instruction*>() - vixl::kInstructionSize;
+    if (prev->IsLoadOrStore()) {
+      // Make sure we emit only exactly one nop.
+      vixl::CodeBufferCheckScope scope(masm,
+                                       vixl::kInstructionSize,
+                                       vixl::CodeBufferCheckScope::kCheck,
+                                       vixl::CodeBufferCheckScope::kExactSize);
+      __ nop();
+    }
+  }
+
+  if (instr->GetOpKind() == HInstruction::kAdd) {
+    __ Madd(res, mul_left, mul_right, accumulator);
+  } else {
+    DCHECK(instr->GetOpKind() == HInstruction::kSub);
+    __ Msub(res, mul_left, mul_right, accumulator);
+  }
+}
+
 void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) {
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
@@ -3372,17 +3413,13 @@
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
   InvokeRuntimeCallingConvention calling_convention;
-  locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
-  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
+  locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
+  locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
   locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
   CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
 }
 
 void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt);
-  DCHECK(type_index.Is(w0));
-  __ Mov(type_index, instruction->GetTypeIndex());
   // Note: if heap poisoning is enabled, the entry point takes cares
   // of poisoning the reference.
   codegen_->InvokeRuntime(instruction->GetEntrypoint(),
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 801e203..f3178bd 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -3478,17 +3478,12 @@
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
   InvokeRuntimeCallingConvention calling_convention;
-  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+  locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
   locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
 }
 
 void InstructionCodeGeneratorMIPS::VisitNewInstance(HNewInstance* instruction) {
-  InvokeRuntimeCallingConvention calling_convention;
-  Register current_method_register = calling_convention.GetRegisterAt(1);
-  __ Lw(current_method_register, SP, kCurrentMethodStackOffset);
-  // Move an uint16_t value to a register.
-  __ LoadConst32(calling_convention.GetRegisterAt(0), instruction->GetTypeIndex());
   codegen_->InvokeRuntime(
       GetThreadOffset<kMipsWordSize>(instruction->GetEntrypoint()).Int32Value(),
       instruction,
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 8530fe7..802c435 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -3270,15 +3270,12 @@
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
   InvokeRuntimeCallingConvention calling_convention;
-  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+  locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
   locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
 }
 
 void InstructionCodeGeneratorMIPS64::VisitNewInstance(HNewInstance* instruction) {
-  LocationSummary* locations = instruction->GetLocations();
-  // Move an uint16_t value to a register.
-  __ LoadConst32(locations->GetTemp(0).AsRegister<GpuRegister>(), instruction->GetTypeIndex());
   codegen_->InvokeRuntime(instruction->GetEntrypoint(),
                           instruction,
                           instruction->GetDexPc(),
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index a87e8ed..6a9177d 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -3769,13 +3769,11 @@
       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
   locations->SetOut(Location::RegisterLocation(EAX));
   InvokeRuntimeCallingConvention calling_convention;
-  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+  locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
 }
 
 void InstructionCodeGeneratorX86::VisitNewInstance(HNewInstance* instruction) {
-  InvokeRuntimeCallingConvention calling_convention;
-  __ movl(calling_convention.GetRegisterAt(0), Immediate(instruction->GetTypeIndex()));
   // Note: if heap poisoning is enabled, the entry point takes cares
   // of poisoning the reference.
   codegen_->InvokeRuntime(instruction->GetEntrypoint(),
@@ -4856,7 +4854,7 @@
     // Temporary registers for the write barrier.
     locations->AddTemp(Location::RequiresRegister());  // Possibly used for ref. poisoning too.
     // Ensure the card is in a byte register.
-    locations->AddTemp(Location::RegisterLocation(ECX));  // Possibly used for read barrier too.
+    locations->AddTemp(Location::RegisterLocation(ECX));
   }
 }
 
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index dcc1808..8cfd8cb 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -3765,18 +3765,14 @@
   LocationSummary* locations =
       new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
   InvokeRuntimeCallingConvention calling_convention;
-  locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
-  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+  locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+  locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
   locations->SetOut(Location::RegisterLocation(RAX));
 }
 
 void InstructionCodeGeneratorX86_64::VisitNewInstance(HNewInstance* instruction) {
-  InvokeRuntimeCallingConvention calling_convention;
-  codegen_->Load64BitValue(CpuRegister(calling_convention.GetRegisterAt(0)),
-                           instruction->GetTypeIndex());
   // Note: if heap poisoning is enabled, the entry point takes cares
   // of poisoning the reference.
-
   codegen_->InvokeRuntime(instruction->GetEntrypoint(),
                           instruction,
                           instruction->GetDexPc(),
@@ -4500,8 +4496,6 @@
     // This first temporary register is possibly used for heap
     // reference poisoning and/or read barrier emission too.
     locations->AddTemp(Location::RequiresRegister());
-    // This second temporary register is possibly used for read
-    // barrier emission too.
     locations->AddTemp(Location::RequiresRegister());
   }
 }
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index d166d00..4438190 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -422,6 +422,12 @@
     StartAttributeStream("kind") << (try_boundary->IsEntry() ? "entry" : "exit");
   }
 
+#ifdef ART_ENABLE_CODEGEN_arm64
+  void VisitArm64MultiplyAccumulate(HArm64MultiplyAccumulate* instruction) OVERRIDE {
+    StartAttributeStream("kind") << instruction->GetOpKind();
+  }
+#endif
+
   bool IsPass(const char* name) {
     return strcmp(pass_name_, name) == 0;
   }
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index c36de84..4af111b 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -377,9 +377,10 @@
 
   HInstruction* current = block->GetFirstInstruction();
   while (current != nullptr) {
-    set->Kill(current->GetSideEffects());
     // Save the next instruction in case `current` is removed from the graph.
     HInstruction* next = current->GetNext();
+    // Do not kill the set with the side effects of the instruction just now: if
+    // the instruction is GVN'ed, we don't need to kill.
     if (current->CanBeMoved()) {
       if (current->IsBinaryOperation() && current->AsBinaryOperation()->IsCommutative()) {
         // For commutative ops, (x op y) will be treated the same as (y op x)
@@ -395,8 +396,11 @@
         current->ReplaceWith(existing);
         current->GetBlock()->RemoveInstruction(current);
       } else {
+        set->Kill(current->GetSideEffects());
         set->Add(current);
       }
+    } else {
+      set->Kill(current->GetSideEffects());
     }
     current = next;
   }
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index 9ad2dd1..2f3df7f 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -169,16 +169,6 @@
       //    src
       instruction->ReplaceWith(input_other);
       instruction->GetBlock()->RemoveInstruction(instruction);
-    } else if (instruction->IsShl() && input_cst->IsOne()) {
-      // Replace Shl looking like
-      //    SHL dst, src, 1
-      // with
-      //    ADD dst, src, src
-      HAdd *add = new(GetGraph()->GetArena()) HAdd(instruction->GetType(),
-                                                   input_other,
-                                                   input_other);
-      instruction->GetBlock()->ReplaceAndRemoveInstructionWith(instruction, add);
-      RecordSimplification();
     }
   }
 }
@@ -372,9 +362,8 @@
         block->RemoveInstruction(equal);
         RecordSimplification();
       } else if (input_const->AsIntConstant()->IsZero()) {
-        // Replace (bool_value == false) with !bool_value
-        block->ReplaceAndRemoveInstructionWith(
-            equal, new (block->GetGraph()->GetArena()) HBooleanNot(input_value));
+        equal->ReplaceWith(GetGraph()->InsertOppositeCondition(input_value, equal));
+        block->RemoveInstruction(equal);
         RecordSimplification();
       } else {
         // Replace (bool_value == integer_not_zero_nor_one_constant) with false
@@ -399,9 +388,8 @@
       // We are comparing the boolean to a constant which is of type int and can
       // be any constant.
       if (input_const->AsIntConstant()->IsOne()) {
-        // Replace (bool_value != true) with !bool_value
-        block->ReplaceAndRemoveInstructionWith(
-            not_equal, new (block->GetGraph()->GetArena()) HBooleanNot(input_value));
+        not_equal->ReplaceWith(GetGraph()->InsertOppositeCondition(input_value, not_equal));
+        block->RemoveInstruction(not_equal);
         RecordSimplification();
       } else if (input_const->AsIntConstant()->IsZero()) {
         // Replace (bool_value != false) with bool_value
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index eb79f46..54dd2cc 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -62,6 +62,67 @@
   RecordSimplification();
 }
 
+bool InstructionSimplifierArm64Visitor::TrySimpleMultiplyAccumulatePatterns(
+    HMul* mul, HBinaryOperation* input_binop, HInstruction* input_other) {
+  DCHECK(Primitive::IsIntOrLongType(mul->GetType()));
+  DCHECK(input_binop->IsAdd() || input_binop->IsSub());
+  DCHECK_NE(input_binop, input_other);
+  if (!input_binop->HasOnlyOneNonEnvironmentUse()) {
+    return false;
+  }
+
+  // Try to interpret patterns like
+  //    a * (b <+/-> 1)
+  // as
+  //    (a * b) <+/-> a
+  HInstruction* input_a = input_other;
+  HInstruction* input_b = nullptr;  // Set to a non-null value if we found a pattern to optimize.
+  HInstruction::InstructionKind op_kind;
+
+  if (input_binop->IsAdd()) {
+    if ((input_binop->GetConstantRight() != nullptr) && input_binop->GetConstantRight()->IsOne()) {
+      // Interpret
+      //    a * (b + 1)
+      // as
+      //    (a * b) + a
+      input_b = input_binop->GetLeastConstantLeft();
+      op_kind = HInstruction::kAdd;
+    }
+  } else {
+    DCHECK(input_binop->IsSub());
+    if (input_binop->GetRight()->IsConstant() &&
+        input_binop->GetRight()->AsConstant()->IsMinusOne()) {
+      // Interpret
+      //    a * (b - (-1))
+      // as
+      //    a + (a * b)
+      input_b = input_binop->GetLeft();
+      op_kind = HInstruction::kAdd;
+    } else if (input_binop->GetLeft()->IsConstant() &&
+               input_binop->GetLeft()->AsConstant()->IsOne()) {
+      // Interpret
+      //    a * (1 - b)
+      // as
+      //    a - (a * b)
+      input_b = input_binop->GetRight();
+      op_kind = HInstruction::kSub;
+    }
+  }
+
+  if (input_b == nullptr) {
+    // We did not find a pattern we can optimize.
+    return false;
+  }
+
+  HArm64MultiplyAccumulate* mulacc = new(GetGraph()->GetArena()) HArm64MultiplyAccumulate(
+      mul->GetType(), op_kind, input_a, input_a, input_b, mul->GetDexPc());
+
+  mul->GetBlock()->ReplaceAndRemoveInstructionWith(mul, mulacc);
+  input_binop->GetBlock()->RemoveInstruction(input_binop);
+
+  return false;
+}
+
 void InstructionSimplifierArm64Visitor::VisitArrayGet(HArrayGet* instruction) {
   TryExtractArrayAccessAddress(instruction,
                                instruction->GetArray(),
@@ -76,5 +137,78 @@
                                Primitive::ComponentSize(instruction->GetComponentType()));
 }
 
+void InstructionSimplifierArm64Visitor::VisitMul(HMul* instruction) {
+  Primitive::Type type = instruction->GetType();
+  if (!Primitive::IsIntOrLongType(type)) {
+    return;
+  }
+
+  HInstruction* use = instruction->HasNonEnvironmentUses()
+      ? instruction->GetUses().GetFirst()->GetUser()
+      : nullptr;
+
+  if (instruction->HasOnlyOneNonEnvironmentUse() && (use->IsAdd() || use->IsSub())) {
+    // Replace code looking like
+    //    MUL tmp, x, y
+    //    SUB dst, acc, tmp
+    // with
+    //    MULSUB dst, acc, x, y
+    // Note that we do not want to (unconditionally) perform the merge when the
+    // multiplication has multiple uses and it can be merged in all of them.
+    // Multiple uses could happen on the same control-flow path, and we would
+    // then increase the amount of work. In the future we could try to evaluate
+    // whether all uses are on different control-flow paths (using dominance and
+    // reverse-dominance information) and only perform the merge when they are.
+    HInstruction* accumulator = nullptr;
+    HBinaryOperation* binop = use->AsBinaryOperation();
+    HInstruction* binop_left = binop->GetLeft();
+    HInstruction* binop_right = binop->GetRight();
+    // Be careful after GVN. This should not happen since the `HMul` has only
+    // one use.
+    DCHECK_NE(binop_left, binop_right);
+    if (binop_right == instruction) {
+      accumulator = binop_left;
+    } else if (use->IsAdd()) {
+      DCHECK_EQ(binop_left, instruction);
+      accumulator = binop_right;
+    }
+
+    if (accumulator != nullptr) {
+      HArm64MultiplyAccumulate* mulacc =
+          new (GetGraph()->GetArena()) HArm64MultiplyAccumulate(type,
+                                                                binop->GetKind(),
+                                                                accumulator,
+                                                                instruction->GetLeft(),
+                                                                instruction->GetRight());
+
+      binop->GetBlock()->ReplaceAndRemoveInstructionWith(binop, mulacc);
+      DCHECK(!instruction->HasUses());
+      instruction->GetBlock()->RemoveInstruction(instruction);
+      RecordSimplification();
+      return;
+    }
+  }
+
+  // Use multiply accumulate instruction for a few simple patterns.
+  // We prefer not applying the following transformations if the left and
+  // right inputs perform the same operation.
+  // We rely on GVN having squashed the inputs if appropriate. However the
+  // results are still correct even if that did not happen.
+  if (instruction->GetLeft() == instruction->GetRight()) {
+    return;
+  }
+
+  HInstruction* left = instruction->GetLeft();
+  HInstruction* right = instruction->GetRight();
+  if ((right->IsAdd() || right->IsSub()) &&
+      TrySimpleMultiplyAccumulatePatterns(instruction, right->AsBinaryOperation(), left)) {
+    return;
+  }
+  if ((left->IsAdd() || left->IsSub()) &&
+      TrySimpleMultiplyAccumulatePatterns(instruction, left->AsBinaryOperation(), right)) {
+    return;
+  }
+}
+
 }  // namespace arm64
 }  // namespace art
diff --git a/compiler/optimizing/instruction_simplifier_arm64.h b/compiler/optimizing/instruction_simplifier_arm64.h
index 4b697db..eed2276 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.h
+++ b/compiler/optimizing/instruction_simplifier_arm64.h
@@ -40,8 +40,14 @@
                                     HInstruction* index,
                                     int access_size);
 
+  bool TrySimpleMultiplyAccumulatePatterns(HMul* mul,
+                                           HBinaryOperation* input_binop,
+                                           HInstruction* input_other);
+
+  // HInstruction visitors, sorted alphabetically.
   void VisitArrayGet(HArrayGet* instruction) OVERRIDE;
   void VisitArraySet(HArraySet* instruction) OVERRIDE;
+  void VisitMul(HMul* instruction) OVERRIDE;
 
   OptimizingCompilerStats* stats_;
 };
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 0a39ff3..890598d 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2090,4 +2090,46 @@
   env_uses_.Clear();
 }
 
+// Returns an instruction with the opposite boolean value from 'cond'.
+HInstruction* HGraph::InsertOppositeCondition(HInstruction* cond, HInstruction* cursor) {
+  ArenaAllocator* allocator = GetArena();
+
+  if (cond->IsCondition() &&
+      !Primitive::IsFloatingPointType(cond->InputAt(0)->GetType())) {
+    // Can't reverse floating point conditions.  We have to use HBooleanNot in that case.
+    HInstruction* lhs = cond->InputAt(0);
+    HInstruction* rhs = cond->InputAt(1);
+    HInstruction* replacement = nullptr;
+    switch (cond->AsCondition()->GetOppositeCondition()) {  // get *opposite*
+      case kCondEQ: replacement = new (allocator) HEqual(lhs, rhs); break;
+      case kCondNE: replacement = new (allocator) HNotEqual(lhs, rhs); break;
+      case kCondLT: replacement = new (allocator) HLessThan(lhs, rhs); break;
+      case kCondLE: replacement = new (allocator) HLessThanOrEqual(lhs, rhs); break;
+      case kCondGT: replacement = new (allocator) HGreaterThan(lhs, rhs); break;
+      case kCondGE: replacement = new (allocator) HGreaterThanOrEqual(lhs, rhs); break;
+      case kCondB:  replacement = new (allocator) HBelow(lhs, rhs); break;
+      case kCondBE: replacement = new (allocator) HBelowOrEqual(lhs, rhs); break;
+      case kCondA:  replacement = new (allocator) HAbove(lhs, rhs); break;
+      case kCondAE: replacement = new (allocator) HAboveOrEqual(lhs, rhs); break;
+      default:
+        LOG(FATAL) << "Unexpected condition";
+        UNREACHABLE();
+    }
+    cursor->GetBlock()->InsertInstructionBefore(replacement, cursor);
+    return replacement;
+  } else if (cond->IsIntConstant()) {
+    HIntConstant* int_const = cond->AsIntConstant();
+    if (int_const->IsZero()) {
+      return GetIntConstant(1);
+    } else {
+      DCHECK(int_const->IsOne());
+      return GetIntConstant(0);
+    }
+  } else {
+    HInstruction* replacement = new (allocator) HBooleanNot(cond);
+    cursor->GetBlock()->InsertInstructionBefore(replacement, cursor);
+    return replacement;
+  }
+}
+
 }  // namespace art
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 4f894b0..1bd626f 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -371,6 +371,11 @@
   bool HasTryCatch() const { return has_try_catch_; }
   void SetHasTryCatch(bool value) { has_try_catch_ = value; }
 
+  // Returns an instruction with the opposite boolean value from 'cond'.
+  // The instruction has been inserted into the graph, either as a constant, or
+  // before cursor.
+  HInstruction* InsertOppositeCondition(HInstruction* cond, HInstruction* cursor);
+
  private:
   void FindBackEdges(ArenaBitVector* visited);
   void RemoveInstructionsAsUsersFromDeadBlocks(const ArenaBitVector& visited) const;
@@ -1096,7 +1101,8 @@
 #define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M)
 #else
 #define FOR_EACH_CONCRETE_INSTRUCTION_ARM64(M)                          \
-  M(Arm64IntermediateAddress, Instruction)
+  M(Arm64IntermediateAddress, Instruction)                              \
+  M(Arm64MultiplyAccumulate, Instruction)
 #endif
 
 #define FOR_EACH_CONCRETE_INSTRUCTION_MIPS(M)
@@ -1626,6 +1632,11 @@
     return holder_;
   }
 
+
+  bool IsFromInlinedInvoke() const {
+    return GetParent() != nullptr;
+  }
+
  private:
   // Record instructions' use entries of this environment for constant-time removal.
   // It should only be called by HInstruction when a new environment use is added.
@@ -3238,7 +3249,7 @@
   void SetIntrinsic(Intrinsics intrinsic, IntrinsicNeedsEnvironmentOrCache needs_env_or_cache);
 
   bool IsFromInlinedInvoke() const {
-    return GetEnvironment()->GetParent() != nullptr;
+    return GetEnvironment()->IsFromInlinedInvoke();
   }
 
   bool CanThrow() const OVERRIDE { return true; }
@@ -3652,9 +3663,10 @@
   DISALLOW_COPY_AND_ASSIGN(HInvokeInterface);
 };
 
-class HNewInstance : public HExpression<1> {
+class HNewInstance : public HExpression<2> {
  public:
-  HNewInstance(HCurrentMethod* current_method,
+  HNewInstance(HInstruction* cls,
+               HCurrentMethod* current_method,
                uint32_t dex_pc,
                uint16_t type_index,
                const DexFile& dex_file,
@@ -3667,7 +3679,8 @@
         can_throw_(can_throw),
         finalizable_(finalizable),
         entrypoint_(entrypoint) {
-    SetRawInputAt(0, current_method);
+    SetRawInputAt(0, cls);
+    SetRawInputAt(1, current_method);
   }
 
   uint16_t GetTypeIndex() const { return type_index_; }
@@ -3687,6 +3700,10 @@
 
   QuickEntrypointEnum GetEntrypoint() const { return entrypoint_; }
 
+  void SetEntrypoint(QuickEntrypointEnum entrypoint) {
+    entrypoint_ = entrypoint;
+  }
+
   DECLARE_INSTRUCTION(NewInstance);
 
  private:
@@ -3694,7 +3711,7 @@
   const DexFile& dex_file_;
   const bool can_throw_;
   const bool finalizable_;
-  const QuickEntrypointEnum entrypoint_;
+  QuickEntrypointEnum entrypoint_;
 
   DISALLOW_COPY_AND_ASSIGN(HNewInstance);
 };
@@ -4302,9 +4319,13 @@
       : HInstruction(SideEffects::None(), dex_pc),
         inputs_(number_of_inputs, arena->Adapter(kArenaAllocPhiInputs)),
         reg_number_(reg_number),
-        type_(type),
-        is_live_(false),
+        type_(ToPhiType(type)),
+        // Phis are constructed live and marked dead if conflicting or unused.
+        // Individual steps of SsaBuilder should assume that if a phi has been
+        // marked dead, it can be ignored and will be removed by SsaPhiElimination.
+        is_live_(true),
         can_be_null_(true) {
+    DCHECK_NE(type_, Primitive::kPrimVoid);
   }
 
   // Returns a type equivalent to the given `type`, but that a `HPhi` can hold.
@@ -4927,6 +4948,7 @@
     return true;
   }
 
+  bool CanThrow() const OVERRIDE { return true; }
 
   HLoadClass* GetLoadClass() const { return InputAt(0)->AsLoadClass(); }
 
diff --git a/compiler/optimizing/nodes_arm64.h b/compiler/optimizing/nodes_arm64.h
index 885d3a2..d07f019 100644
--- a/compiler/optimizing/nodes_arm64.h
+++ b/compiler/optimizing/nodes_arm64.h
@@ -42,6 +42,40 @@
   DISALLOW_COPY_AND_ASSIGN(HArm64IntermediateAddress);
 };
 
+class HArm64MultiplyAccumulate : public HExpression<3> {
+ public:
+  HArm64MultiplyAccumulate(Primitive::Type type,
+                           InstructionKind op,
+                           HInstruction* accumulator,
+                           HInstruction* mul_left,
+                           HInstruction* mul_right,
+                           uint32_t dex_pc = kNoDexPc)
+      : HExpression(type, SideEffects::None(), dex_pc), op_kind_(op) {
+    SetRawInputAt(kInputAccumulatorIndex, accumulator);
+    SetRawInputAt(kInputMulLeftIndex, mul_left);
+    SetRawInputAt(kInputMulRightIndex, mul_right);
+  }
+
+  static constexpr int kInputAccumulatorIndex = 0;
+  static constexpr int kInputMulLeftIndex = 1;
+  static constexpr int kInputMulRightIndex = 2;
+
+  bool CanBeMoved() const OVERRIDE { return true; }
+  bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
+    return op_kind_ == other->AsArm64MultiplyAccumulate()->op_kind_;
+  }
+
+  InstructionKind GetOpKind() const { return op_kind_; }
+
+  DECLARE_INSTRUCTION(Arm64MultiplyAccumulate);
+
+ private:
+  // Indicates if this is a MADD or MSUB.
+  InstructionKind op_kind_;
+
+  DISALLOW_COPY_AND_ASSIGN(HArm64MultiplyAccumulate);
+};
+
 }  // namespace art
 
 #endif  // ART_COMPILER_OPTIMIZING_NODES_ARM64_H_
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 2204921..dec08d8 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -110,24 +110,23 @@
 class PassObserver : public ValueObject {
  public:
   PassObserver(HGraph* graph,
-               const char* method_name,
                CodeGenerator* codegen,
                std::ostream* visualizer_output,
                CompilerDriver* compiler_driver)
       : graph_(graph),
-        method_name_(method_name),
+        cached_method_name_(),
         timing_logger_enabled_(compiler_driver->GetDumpPasses()),
-        timing_logger_(method_name, true, true),
+        timing_logger_(timing_logger_enabled_ ? GetMethodName() : "", true, true),
         disasm_info_(graph->GetArena()),
         visualizer_enabled_(!compiler_driver->GetDumpCfgFileName().empty()),
         visualizer_(visualizer_output, graph, *codegen),
         graph_in_bad_state_(false) {
     if (timing_logger_enabled_ || visualizer_enabled_) {
-      if (!IsVerboseMethod(compiler_driver, method_name)) {
+      if (!IsVerboseMethod(compiler_driver, GetMethodName())) {
         timing_logger_enabled_ = visualizer_enabled_ = false;
       }
       if (visualizer_enabled_) {
-        visualizer_.PrintHeader(method_name_);
+        visualizer_.PrintHeader(GetMethodName());
         codegen->SetDisassemblyInformation(&disasm_info_);
       }
     }
@@ -135,7 +134,7 @@
 
   ~PassObserver() {
     if (timing_logger_enabled_) {
-      LOG(INFO) << "TIMINGS " << method_name_;
+      LOG(INFO) << "TIMINGS " << GetMethodName();
       LOG(INFO) << Dumpable<TimingLogger>(timing_logger_);
     }
   }
@@ -148,6 +147,14 @@
 
   void SetGraphInBadState() { graph_in_bad_state_ = true; }
 
+  const char* GetMethodName() {
+    // PrettyMethod() is expensive, so we delay calling it until we actually have to.
+    if (cached_method_name_.empty()) {
+      cached_method_name_ = PrettyMethod(graph_->GetMethodIdx(), graph_->GetDexFile());
+    }
+    return cached_method_name_.c_str();
+  }
+
  private:
   void StartPass(const char* pass_name) {
     // Dump graph first, then start timer.
@@ -206,7 +213,8 @@
   }
 
   HGraph* const graph_;
-  const char* method_name_;
+
+  std::string cached_method_name_;
 
   bool timing_logger_enabled_;
   TimingLogger timing_logger_;
@@ -664,7 +672,6 @@
                                               jobject class_loader,
                                               const DexFile& dex_file,
                                               Handle<mirror::DexCache> dex_cache) const {
-  std::string method_name = PrettyMethod(method_idx, dex_file);
   MaybeRecordStat(MethodCompilationStat::kAttemptCompilation);
   CompilerDriver* compiler_driver = GetCompilerDriver();
   InstructionSet instruction_set = compiler_driver->GetInstructionSet();
@@ -728,7 +735,6 @@
       compiler_driver->GetCompilerOptions().GetGenerateDebugInfo());
 
   PassObserver pass_observer(graph,
-                             method_name.c_str(),
                              codegen.get(),
                              visualizer_output_.get(),
                              compiler_driver);
@@ -756,7 +762,7 @@
                         interpreter_metadata,
                         dex_cache);
 
-  VLOG(compiler) << "Building " << method_name;
+  VLOG(compiler) << "Building " << pass_observer.GetMethodName();
 
   {
     PassScope scope(HGraphBuilder::kBuilderPassName, &pass_observer);
@@ -766,13 +772,14 @@
     }
   }
 
-  VLOG(compiler) << "Optimizing " << method_name;
+  VLOG(compiler) << "Optimizing " << pass_observer.GetMethodName();
   if (run_optimizations_) {
     {
       PassScope scope(SsaBuilder::kSsaBuilderPassName, &pass_observer);
       if (!graph->TryBuildingSsa()) {
         // We could not transform the graph to SSA, bailout.
-        LOG(INFO) << "Skipping compilation of " << method_name << ": it contains a non natural loop";
+        LOG(INFO) << "Skipping compilation of " << pass_observer.GetMethodName()
+            << ": it contains a non natural loop";
         MaybeRecordStat(MethodCompilationStat::kNotCompiledCannotBuildSSA);
         pass_observer.SetGraphInBadState();
         return nullptr;
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index f3d075c..d1770b7 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -48,22 +48,34 @@
 }
 
 void PrepareForRegisterAllocation::VisitClinitCheck(HClinitCheck* check) {
-  // Try to find a static invoke from which this check originated.
-  HInvokeStaticOrDirect* invoke = nullptr;
+  // Try to find a static invoke or a new-instance from which this check originated.
+  HInstruction* implicit_clinit = nullptr;
   for (HUseIterator<HInstruction*> it(check->GetUses()); !it.Done(); it.Advance()) {
     HInstruction* user = it.Current()->GetUser();
-    if (user->IsInvokeStaticOrDirect() && CanMoveClinitCheck(check, user)) {
-      invoke = user->AsInvokeStaticOrDirect();
-      DCHECK(invoke->IsStaticWithExplicitClinitCheck());
-      invoke->RemoveExplicitClinitCheck(HInvokeStaticOrDirect::ClinitCheckRequirement::kImplicit);
+    if ((user->IsInvokeStaticOrDirect() || user->IsNewInstance()) &&
+        CanMoveClinitCheck(check, user)) {
+      implicit_clinit = user;
+      if (user->IsInvokeStaticOrDirect()) {
+        DCHECK(user->AsInvokeStaticOrDirect()->IsStaticWithExplicitClinitCheck());
+        user->AsInvokeStaticOrDirect()->RemoveExplicitClinitCheck(
+            HInvokeStaticOrDirect::ClinitCheckRequirement::kImplicit);
+      } else {
+        DCHECK(user->IsNewInstance());
+        // We delegate the initialization duty to the allocation.
+        if (user->AsNewInstance()->GetEntrypoint() == kQuickAllocObjectInitialized) {
+          user->AsNewInstance()->SetEntrypoint(kQuickAllocObjectResolved);
+        }
+      }
       break;
     }
   }
-  // If we found a static invoke for merging, remove the check from all other static invokes.
-  if (invoke != nullptr) {
+  // If we found a static invoke or new-instance for merging, remove the check
+  // from dominated static invokes.
+  if (implicit_clinit != nullptr) {
     for (HUseIterator<HInstruction*> it(check->GetUses()); !it.Done(); ) {
       HInstruction* user = it.Current()->GetUser();
-      DCHECK(invoke->StrictlyDominates(user));  // All other uses must be dominated.
+      // All other uses must be dominated.
+      DCHECK(implicit_clinit->StrictlyDominates(user) || (implicit_clinit == user));
       it.Advance();  // Advance before we remove the node, reference to the next node is preserved.
       if (user->IsInvokeStaticOrDirect()) {
         user->AsInvokeStaticOrDirect()->RemoveExplicitClinitCheck(
@@ -77,8 +89,8 @@
 
   check->ReplaceWith(load_class);
 
-  if (invoke != nullptr) {
-    // Remove the check from the graph. It has been merged into the invoke.
+  if (implicit_clinit != nullptr) {
+    // Remove the check from the graph. It has been merged into the invoke or new-instance.
     check->GetBlock()->RemoveInstruction(check);
     // Check if we can merge the load class as well.
     if (can_merge_with_load_class && !load_class->HasUses()) {
@@ -92,6 +104,29 @@
   }
 }
 
+void PrepareForRegisterAllocation::VisitNewInstance(HNewInstance* instruction) {
+  HLoadClass* load_class = instruction->InputAt(0)->AsLoadClass();
+  bool has_only_one_use = load_class->HasOnlyOneNonEnvironmentUse();
+  // Change the entrypoint to kQuickAllocObject if either:
+  // - the class is finalizable (only kQuickAllocObject handles finalizable classes),
+  // - the class needs access checks (we do not know if it's finalizable),
+  // - or the load class has only one use.
+  if (instruction->IsFinalizable() || has_only_one_use || load_class->NeedsAccessCheck()) {
+    instruction->SetEntrypoint(kQuickAllocObject);
+    instruction->ReplaceInput(GetGraph()->GetIntConstant(load_class->GetTypeIndex()), 0);
+    // The allocation entry point that deals with access checks does not work with inlined
+    // methods, so we need to check whether this allocation comes from an inlined method.
+    if (has_only_one_use && !instruction->GetEnvironment()->IsFromInlinedInvoke()) {
+      // We can remove the load class from the graph. If it needed access checks, we delegate
+      // the access check to the allocation.
+      if (load_class->NeedsAccessCheck()) {
+        instruction->SetEntrypoint(kQuickAllocObjectWithAccessCheck);
+      }
+      load_class->GetBlock()->RemoveInstruction(load_class);
+    }
+  }
+}
+
 void PrepareForRegisterAllocation::VisitCondition(HCondition* condition) {
   bool needs_materialization = false;
   if (!condition->GetUses().HasOnlyOneUse() || !condition->GetEnvUses().IsEmpty()) {
diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h
index a70fb30..9b24342 100644
--- a/compiler/optimizing/prepare_for_register_allocation.h
+++ b/compiler/optimizing/prepare_for_register_allocation.h
@@ -40,6 +40,7 @@
   void VisitClinitCheck(HClinitCheck* check) OVERRIDE;
   void VisitCondition(HCondition* condition) OVERRIDE;
   void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE;
+  void VisitNewInstance(HNewInstance* instruction) OVERRIDE;
 
   bool CanMoveClinitCheck(HInstruction* input, HInstruction* user);
 
diff --git a/compiler/optimizing/primitive_type_propagation.cc b/compiler/optimizing/primitive_type_propagation.cc
index c98f43e..bde54ee 100644
--- a/compiler/optimizing/primitive_type_propagation.cc
+++ b/compiler/optimizing/primitive_type_propagation.cc
@@ -63,7 +63,6 @@
             : SsaBuilder::GetFloatOrDoubleEquivalent(phi, input, new_type);
         phi->ReplaceInput(equivalent, i);
         if (equivalent->IsPhi()) {
-          equivalent->AsPhi()->SetLive();
           AddToWorklist(equivalent->AsPhi());
         } else if (equivalent == input) {
           // The input has changed its type. It can be an input of other phis,
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 5190eb3..9e6cfbe 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -22,6 +22,13 @@
 
 namespace art {
 
+// Returns whether this is a loop header phi which was eagerly created but later
+// found inconsistent due to the vreg being undefined in one of its predecessors.
+// Such phi is marked dead and should be ignored until its removal in SsaPhiElimination.
+static bool IsUndefinedLoopHeaderPhi(HPhi* phi) {
+  return phi->IsLoopHeaderPhi() && phi->InputCount() != phi->GetBlock()->GetPredecessors().size();
+}
+
 /**
  * A debuggable application may require to reviving phis, to ensure their
  * associated DEX register is available to a debugger. This class implements
@@ -165,17 +172,15 @@
 void DeadPhiHandling::VisitBasicBlock(HBasicBlock* block) {
   for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
     HPhi* phi = it.Current()->AsPhi();
+    if (IsUndefinedLoopHeaderPhi(phi)) {
+      DCHECK(phi->IsDead());
+      continue;
+    }
     if (phi->IsDead() && phi->HasEnvironmentUses()) {
       phi->SetLive();
       if (block->IsLoopHeader()) {
-        // Give a type to the loop phi to guarantee convergence of the algorithm.
-        // Note that the dead phi may already have a type if it is an equivalent
-        // generated for a typed LoadLocal. In that case we do not change the
-        // type because it could lead to an unsupported PrimNot/Float/Double ->
-        // PrimInt/Long transition and create same type equivalents.
-        if (phi->GetType() == Primitive::kPrimVoid) {
-          phi->SetType(phi->InputAt(0)->GetType());
-        }
+        // Loop phis must have a type to guarantee convergence of the algorithm.
+        DCHECK_NE(phi->GetType(), Primitive::kPrimVoid);
         AddToWorklist(phi);
       } else {
         // Because we are doing a reverse post order visit, all inputs of
@@ -220,6 +225,27 @@
   ProcessWorklist();
 }
 
+void SsaBuilder::SetLoopHeaderPhiInputs() {
+  for (size_t i = loop_headers_.size(); i > 0; --i) {
+    HBasicBlock* block = loop_headers_[i - 1];
+    for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
+      HPhi* phi = it.Current()->AsPhi();
+      size_t vreg = phi->GetRegNumber();
+      for (HBasicBlock* predecessor : block->GetPredecessors()) {
+        HInstruction* value = ValueOfLocal(predecessor, vreg);
+        if (value == nullptr) {
+          // Vreg is undefined at this predecessor. Mark it dead and leave with
+          // fewer inputs than predecessors. SsaChecker will fail if not removed.
+          phi->SetDead();
+          break;
+        } else {
+          phi->AddInput(value);
+        }
+      }
+    }
+  }
+}
+
 void SsaBuilder::FixNullConstantType() {
   // The order doesn't matter here.
   for (HReversePostOrderIterator itb(*GetGraph()); !itb.Done(); itb.Advance()) {
@@ -283,15 +309,7 @@
   }
 
   // 2) Set inputs of loop phis.
-  for (HBasicBlock* block : loop_headers_) {
-    for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
-      HPhi* phi = it.Current()->AsPhi();
-      for (HBasicBlock* predecessor : block->GetPredecessors()) {
-        HInstruction* input = ValueOfLocal(predecessor, phi->GetRegNumber());
-        phi->AddInput(input);
-      }
-    }
-  }
+  SetLoopHeaderPhiInputs();
 
   // 3) Mark dead phis. This will mark phis that are only used by environments:
   // at the DEX level, the type of these phis does not need to be consistent, but
@@ -403,8 +421,13 @@
       for (size_t i = 0; i < vregs; ++i) {
         // No point in creating the catch phi if it is already undefined at
         // the first throwing instruction.
-        if ((*current_locals_)[i] != nullptr) {
-          HPhi* phi = new (arena) HPhi(arena, i, 0, Primitive::kPrimVoid);
+        HInstruction* current_local_value = (*current_locals_)[i];
+        if (current_local_value != nullptr) {
+          HPhi* phi = new (arena) HPhi(
+              arena,
+              i,
+              0,
+              current_local_value->GetType());
           block->AddPhi(phi);
           (*locals)[i] = phi;
         }
@@ -451,7 +474,10 @@
       HInstruction* incoming = ValueOfLocal(block->GetLoopInformation()->GetPreHeader(), local);
       if (incoming != nullptr) {
         HPhi* phi = new (GetGraph()->GetArena()) HPhi(
-            GetGraph()->GetArena(), local, 0, Primitive::kPrimVoid);
+            GetGraph()->GetArena(),
+            local,
+            0,
+            incoming->GetType());
         block->AddPhi(phi);
         (*current_locals_)[local] = phi;
       }
@@ -484,8 +510,12 @@
       }
 
       if (is_different) {
+        HInstruction* first_input = ValueOfLocal(block->GetPredecessors()[0], local);
         HPhi* phi = new (GetGraph()->GetArena()) HPhi(
-            GetGraph()->GetArena(), local, block->GetPredecessors().size(), Primitive::kPrimVoid);
+            GetGraph()->GetArena(),
+            local,
+            block->GetPredecessors().size(),
+            first_input->GetType());
         for (size_t i = 0; i < block->GetPredecessors().size(); i++) {
           HInstruction* pred_value = ValueOfLocal(block->GetPredecessors()[i], local);
           phi->SetRawInputAt(i, pred_value);
@@ -583,8 +613,16 @@
     phi->GetBlock()->InsertPhiAfter(new_phi, phi);
     return new_phi;
   } else {
-    DCHECK_EQ(next->GetType(), type);
-    return next->AsPhi();
+    HPhi* next_phi = next->AsPhi();
+    DCHECK_EQ(next_phi->GetType(), type);
+    if (next_phi->IsDead()) {
+      // TODO(dbrazdil): Remove this SetLive (we should not need to revive phis)
+      // once we stop running MarkDeadPhis before PrimitiveTypePropagation. This
+      // cannot revive undefined loop header phis because they cannot have uses.
+      DCHECK(!IsUndefinedLoopHeaderPhi(next_phi));
+      next_phi->SetLive();
+    }
+    return next_phi;
   }
 }
 
@@ -638,7 +676,36 @@
 }
 
 void SsaBuilder::VisitStoreLocal(HStoreLocal* store) {
-  (*current_locals_)[store->GetLocal()->GetRegNumber()] = store->InputAt(1);
+  uint32_t reg_number = store->GetLocal()->GetRegNumber();
+  HInstruction* stored_value = store->InputAt(1);
+  Primitive::Type stored_type = stored_value->GetType();
+  DCHECK_NE(stored_type, Primitive::kPrimVoid);
+
+  // Storing into vreg `reg_number` may implicitly invalidate the surrounding
+  // registers. Consider the following cases:
+  // (1) Storing a wide value must overwrite previous values in both `reg_number`
+  //     and `reg_number+1`. We store `nullptr` in `reg_number+1`.
+  // (2) If vreg `reg_number-1` holds a wide value, writing into `reg_number`
+  //     must invalidate it. We store `nullptr` in `reg_number-1`.
+  // Consequently, storing a wide value into the high vreg of another wide value
+  // will invalidate both `reg_number-1` and `reg_number+1`.
+
+  if (reg_number != 0) {
+    HInstruction* local_low = (*current_locals_)[reg_number - 1];
+    if (local_low != nullptr && Primitive::Is64BitType(local_low->GetType())) {
+      // The vreg we are storing into was previously the high vreg of a pair.
+      // We need to invalidate its low vreg.
+      DCHECK((*current_locals_)[reg_number] == nullptr);
+      (*current_locals_)[reg_number - 1] = nullptr;
+    }
+  }
+
+  (*current_locals_)[reg_number] = stored_value;
+  if (Primitive::Is64BitType(stored_type)) {
+    // We are storing a pair. Invalidate the instruction in the high vreg.
+    (*current_locals_)[reg_number + 1] = nullptr;
+  }
+
   store->GetBlock()->RemoveInstruction(store);
 }
 
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index 79f1a28..dcce5e4 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -81,6 +81,7 @@
   static constexpr const char* kSsaBuilderPassName = "ssa_builder";
 
  private:
+  void SetLoopHeaderPhiInputs();
   void FixNullConstantType();
   void EquivalentPhisCleanup();
 
diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc
index 72f9ddd..a3219dc 100644
--- a/compiler/optimizing/ssa_phi_elimination.cc
+++ b/compiler/optimizing/ssa_phi_elimination.cc
@@ -16,6 +16,8 @@
 
 #include "ssa_phi_elimination.h"
 
+#include "base/arena_containers.h"
+
 namespace art {
 
 void SsaDeadPhiElimination::Run() {
@@ -24,22 +26,36 @@
 }
 
 void SsaDeadPhiElimination::MarkDeadPhis() {
+  // Phis are constructed live and should not be revived if previously marked
+  // dead. This algorithm temporarily breaks that invariant but we DCHECK that
+  // only phis which were initially live are revived.
+  ArenaSet<HPhi*> initially_live(graph_->GetArena()->Adapter());
+
   // Add to the worklist phis referenced by non-phi instructions.
   for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
     HBasicBlock* block = it.Current();
     for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
       HPhi* phi = inst_it.Current()->AsPhi();
-      // Set dead ahead of running through uses. The phi may have no use.
-      phi->SetDead();
+      if (phi->IsDead()) {
+        continue;
+      }
+
+      bool has_non_phi_use = false;
       for (HUseIterator<HInstruction*> use_it(phi->GetUses()); !use_it.Done(); use_it.Advance()) {
-        HUseListNode<HInstruction*>* current = use_it.Current();
-        HInstruction* user = current->GetUser();
-        if (!user->IsPhi()) {
-          worklist_.push_back(phi);
-          phi->SetLive();
+        if (!use_it.Current()->GetUser()->IsPhi()) {
+          has_non_phi_use = true;
           break;
         }
       }
+
+      if (has_non_phi_use) {
+        worklist_.push_back(phi);
+      } else {
+        phi->SetDead();
+        if (kIsDebugBuild) {
+          initially_live.insert(phi);
+        }
+      }
     }
   }
 
@@ -48,10 +64,13 @@
     HPhi* phi = worklist_.back();
     worklist_.pop_back();
     for (HInputIterator it(phi); !it.Done(); it.Advance()) {
-      HInstruction* input = it.Current();
-      if (input->IsPhi() && input->AsPhi()->IsDead()) {
-        worklist_.push_back(input->AsPhi());
-        input->AsPhi()->SetLive();
+      HPhi* input = it.Current()->AsPhi();
+      if (input != nullptr && input->IsDead()) {
+        // Input is a dead phi. Revive it and add to the worklist. We make sure
+        // that the phi was not dead initially (see definition of `initially_live`).
+        DCHECK(ContainsElement(initially_live, input));
+        input->SetLive();
+        worklist_.push_back(input);
       }
     }
   }
@@ -118,7 +137,6 @@
     }
 
     if (phi->InputCount() == 0) {
-      DCHECK(phi->IsCatchPhi());
       DCHECK(phi->IsDead());
       continue;
     }
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index 68e3956..dead8fd 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -342,9 +342,9 @@
       return IsAbsoluteUint<12>(offset);
     case kLoadSWord:
     case kLoadDWord:
-      return IsAbsoluteUint<10>(offset);  // VFP addressing mode.
+      return IsAbsoluteUint<10>(offset) && (offset & 3) == 0;  // VFP addressing mode.
     case kLoadWordPair:
-      return IsAbsoluteUint<10>(offset);
+      return IsAbsoluteUint<10>(offset) && (offset & 3) == 0;
     default:
       LOG(FATAL) << "UNREACHABLE";
       UNREACHABLE();
@@ -360,9 +360,9 @@
       return IsAbsoluteUint<12>(offset);
     case kStoreSWord:
     case kStoreDWord:
-      return IsAbsoluteUint<10>(offset);  // VFP addressing mode.
+      return IsAbsoluteUint<10>(offset) && (offset & 3) == 0;  // VFP addressing mode.
     case kStoreWordPair:
-      return IsAbsoluteUint<10>(offset);
+      return IsAbsoluteUint<10>(offset) && (offset & 3) == 0;
     default:
       LOG(FATAL) << "UNREACHABLE";
       UNREACHABLE();
diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h
index 5233dcb..ce3a872 100644
--- a/compiler/utils/arm/assembler_arm32.h
+++ b/compiler/utils/arm/assembler_arm32.h
@@ -389,8 +389,6 @@
   void EmitBranch(Condition cond, Label* label, bool link);
   static int32_t EncodeBranchOffset(int offset, int32_t inst);
   static int DecodeBranchOffset(int32_t inst);
-  int32_t EncodeTstOffset(int offset, int32_t inst);
-  int DecodeTstOffset(int32_t inst);
   bool ShifterOperandCanHoldArm32(uint32_t immediate, ShifterOperand* shifter_op);
 };
 
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 297cc54..7ad5b44 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -1349,7 +1349,8 @@
   int32_t encoding = 0;
   if (so.IsImmediate()) {
     // Check special cases.
-    if ((opcode == SUB || opcode == ADD) && (so.GetImmediate() < (1u << 12))) {
+    if ((opcode == SUB || opcode == ADD) && (so.GetImmediate() < (1u << 12)) &&
+        /* Prefer T3 encoding to T4. */ !ShifterOperandCanAlwaysHold(so.GetImmediate())) {
       if (set_cc != kCcSet) {
         if (opcode == SUB) {
           thumb_opcode = 5U;
@@ -3220,7 +3221,7 @@
 
 void Thumb2Assembler::Rrx(Register rd, Register rm, Condition cond, SetCc set_cc) {
   CheckCondition(cond);
-  EmitShift(rd, rm, RRX, rm, cond, set_cc);
+  EmitShift(rd, rm, RRX, 0, cond, set_cc);
 }
 
 
@@ -3469,6 +3470,73 @@
   }
 }
 
+int32_t Thumb2Assembler::GetAllowedLoadOffsetBits(LoadOperandType type) {
+  switch (type) {
+    case kLoadSignedByte:
+    case kLoadSignedHalfword:
+    case kLoadUnsignedHalfword:
+    case kLoadUnsignedByte:
+    case kLoadWord:
+      // We can encode imm12 offset.
+      return 0xfffu;
+    case kLoadSWord:
+    case kLoadDWord:
+    case kLoadWordPair:
+      // We can encode imm8:'00' offset.
+      return 0xff << 2;
+    default:
+      LOG(FATAL) << "UNREACHABLE";
+      UNREACHABLE();
+  }
+}
+
+int32_t Thumb2Assembler::GetAllowedStoreOffsetBits(StoreOperandType type) {
+  switch (type) {
+    case kStoreHalfword:
+    case kStoreByte:
+    case kStoreWord:
+      // We can encode imm12 offset.
+      return 0xfff;
+    case kStoreSWord:
+    case kStoreDWord:
+    case kStoreWordPair:
+      // We can encode imm8:'00' offset.
+      return 0xff << 2;
+    default:
+      LOG(FATAL) << "UNREACHABLE";
+      UNREACHABLE();
+  }
+}
+
+bool Thumb2Assembler::CanSplitLoadStoreOffset(int32_t allowed_offset_bits,
+                                              int32_t offset,
+                                              /*out*/ int32_t* add_to_base,
+                                              /*out*/ int32_t* offset_for_load_store) {
+  int32_t other_bits = offset & ~allowed_offset_bits;
+  if (ShifterOperandCanAlwaysHold(other_bits) || ShifterOperandCanAlwaysHold(-other_bits)) {
+    *add_to_base = offset & ~allowed_offset_bits;
+    *offset_for_load_store = offset & allowed_offset_bits;
+    return true;
+  }
+  return false;
+}
+
+int32_t Thumb2Assembler::AdjustLoadStoreOffset(int32_t allowed_offset_bits,
+                                               Register temp,
+                                               Register base,
+                                               int32_t offset,
+                                               Condition cond) {
+  DCHECK_NE(offset & ~allowed_offset_bits, 0);
+  int32_t add_to_base, offset_for_load;
+  if (CanSplitLoadStoreOffset(allowed_offset_bits, offset, &add_to_base, &offset_for_load)) {
+    AddConstant(temp, base, add_to_base, cond, kCcKeep);
+    return offset_for_load;
+  } else {
+    LoadImmediate(temp, offset, cond);
+    add(temp, temp, ShifterOperand(base), cond, kCcKeep);
+    return 0;
+  }
+}
 
 // Implementation note: this method must emit at most one instruction when
 // Address::CanHoldLoadOffsetThumb.
@@ -3479,12 +3547,26 @@
                                      Condition cond) {
   if (!Address::CanHoldLoadOffsetThumb(type, offset)) {
     CHECK_NE(base, IP);
-    LoadImmediate(IP, offset, cond);
-    add(IP, IP, ShifterOperand(base), cond);
-    base = IP;
-    offset = 0;
+    // Inlined AdjustLoadStoreOffset() allows us to pull a few more tricks.
+    int32_t allowed_offset_bits = GetAllowedLoadOffsetBits(type);
+    DCHECK_NE(offset & ~allowed_offset_bits, 0);
+    int32_t add_to_base, offset_for_load;
+    if (CanSplitLoadStoreOffset(allowed_offset_bits, offset, &add_to_base, &offset_for_load)) {
+      // Use reg for the adjusted base. If it's low reg, we may end up using 16-bit load.
+      AddConstant(reg, base, add_to_base, cond, kCcKeep);
+      base = reg;
+      offset = offset_for_load;
+    } else {
+      Register temp = (reg == base) ? IP : reg;
+      LoadImmediate(temp, offset, cond);
+      // TODO: Implement indexed load (not available for LDRD) and use it here to avoid the ADD.
+      // Use reg for the adjusted base. If it's low reg, we may end up using 16-bit load.
+      add(reg, reg, ShifterOperand((reg == base) ? IP : base), cond, kCcKeep);
+      base = reg;
+      offset = 0;
+    }
   }
-  CHECK(Address::CanHoldLoadOffsetThumb(type, offset));
+  DCHECK(Address::CanHoldLoadOffsetThumb(type, offset));
   switch (type) {
     case kLoadSignedByte:
       ldrsb(reg, Address(base, offset), cond);
@@ -3510,7 +3592,6 @@
   }
 }
 
-
 // Implementation note: this method must emit at most one instruction when
 // Address::CanHoldLoadOffsetThumb, as expected by JIT::GuardedLoadFromOffset.
 void Thumb2Assembler::LoadSFromOffset(SRegister reg,
@@ -3519,12 +3600,10 @@
                                       Condition cond) {
   if (!Address::CanHoldLoadOffsetThumb(kLoadSWord, offset)) {
     CHECK_NE(base, IP);
-    LoadImmediate(IP, offset, cond);
-    add(IP, IP, ShifterOperand(base), cond);
+    offset = AdjustLoadStoreOffset(GetAllowedLoadOffsetBits(kLoadSWord), IP, base, offset, cond);
     base = IP;
-    offset = 0;
   }
-  CHECK(Address::CanHoldLoadOffsetThumb(kLoadSWord, offset));
+  DCHECK(Address::CanHoldLoadOffsetThumb(kLoadSWord, offset));
   vldrs(reg, Address(base, offset), cond);
 }
 
@@ -3537,12 +3616,10 @@
                                       Condition cond) {
   if (!Address::CanHoldLoadOffsetThumb(kLoadDWord, offset)) {
     CHECK_NE(base, IP);
-    LoadImmediate(IP, offset, cond);
-    add(IP, IP, ShifterOperand(base), cond);
+    offset = AdjustLoadStoreOffset(GetAllowedLoadOffsetBits(kLoadDWord), IP, base, offset, cond);
     base = IP;
-    offset = 0;
   }
-  CHECK(Address::CanHoldLoadOffsetThumb(kLoadDWord, offset));
+  DCHECK(Address::CanHoldLoadOffsetThumb(kLoadDWord, offset));
   vldrd(reg, Address(base, offset), cond);
 }
 
@@ -3573,12 +3650,12 @@
         offset += kRegisterSize;
       }
     }
-    LoadImmediate(tmp_reg, offset, cond);
-    add(tmp_reg, tmp_reg, ShifterOperand(base), AL);
+    // TODO: Implement indexed store (not available for STRD), inline AdjustLoadStoreOffset()
+    // and in the "unsplittable" path get rid of the "add" by using the store indexed instead.
+    offset = AdjustLoadStoreOffset(GetAllowedStoreOffsetBits(type), tmp_reg, base, offset, cond);
     base = tmp_reg;
-    offset = 0;
   }
-  CHECK(Address::CanHoldStoreOffsetThumb(type, offset));
+  DCHECK(Address::CanHoldStoreOffsetThumb(type, offset));
   switch (type) {
     case kStoreByte:
       strb(reg, Address(base, offset), cond);
@@ -3611,12 +3688,10 @@
                                      Condition cond) {
   if (!Address::CanHoldStoreOffsetThumb(kStoreSWord, offset)) {
     CHECK_NE(base, IP);
-    LoadImmediate(IP, offset, cond);
-    add(IP, IP, ShifterOperand(base), cond);
+    offset = AdjustLoadStoreOffset(GetAllowedStoreOffsetBits(kStoreSWord), IP, base, offset, cond);
     base = IP;
-    offset = 0;
   }
-  CHECK(Address::CanHoldStoreOffsetThumb(kStoreSWord, offset));
+  DCHECK(Address::CanHoldStoreOffsetThumb(kStoreSWord, offset));
   vstrs(reg, Address(base, offset), cond);
 }
 
@@ -3629,12 +3704,10 @@
                                      Condition cond) {
   if (!Address::CanHoldStoreOffsetThumb(kStoreDWord, offset)) {
     CHECK_NE(base, IP);
-    LoadImmediate(IP, offset, cond);
-    add(IP, IP, ShifterOperand(base), cond);
+    offset = AdjustLoadStoreOffset(GetAllowedStoreOffsetBits(kStoreDWord), IP, base, offset, cond);
     base = IP;
-    offset = 0;
   }
-  CHECK(Address::CanHoldStoreOffsetThumb(kStoreDWord, offset));
+  DCHECK(Address::CanHoldStoreOffsetThumb(kStoreDWord, offset));
   vstrd(reg, Address(base, offset), cond);
 }
 
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index e183613..9aeece8 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -729,13 +729,23 @@
   void EmitBranch(Condition cond, Label* label, bool link, bool x);
   static int32_t EncodeBranchOffset(int32_t offset, int32_t inst);
   static int DecodeBranchOffset(int32_t inst);
-  int32_t EncodeTstOffset(int offset, int32_t inst);
-  int DecodeTstOffset(int32_t inst);
   void EmitShift(Register rd, Register rm, Shift shift, uint8_t amount,
                  Condition cond = AL, SetCc set_cc = kCcDontCare);
   void EmitShift(Register rd, Register rn, Shift shift, Register rm,
                  Condition cond = AL, SetCc set_cc = kCcDontCare);
 
+  static int32_t GetAllowedLoadOffsetBits(LoadOperandType type);
+  static int32_t GetAllowedStoreOffsetBits(StoreOperandType type);
+  bool CanSplitLoadStoreOffset(int32_t allowed_offset_bits,
+                               int32_t offset,
+                               /*out*/ int32_t* add_to_base,
+                               /*out*/ int32_t* offset_for_load_store);
+  int32_t AdjustLoadStoreOffset(int32_t allowed_offset_bits,
+                                Register temp,
+                                Register base,
+                                int32_t offset,
+                                Condition cond);
+
   // Whether the assembler can relocate branches. If false, unresolved branches will be
   // emitted on 32bits.
   bool can_relocate_branches_;
diff --git a/compiler/utils/arm/assembler_thumb2_test.cc b/compiler/utils/arm/assembler_thumb2_test.cc
index cb4b20b..7b32b0f 100644
--- a/compiler/utils/arm/assembler_thumb2_test.cc
+++ b/compiler/utils/arm/assembler_thumb2_test.cc
@@ -243,7 +243,7 @@
 
   const char* expected =
       "subs r1, r0, #42\n"
-      "subw r1, r0, #42\n"
+      "sub.w r1, r0, #42\n"
       "subs r1, r0, r2, asr #31\n"
       "sub r1, r0, r2, asr #31\n";
   DriverStr(expected, "sub");
@@ -257,7 +257,7 @@
 
   const char* expected =
       "adds r1, r0, #42\n"
-      "addw r1, r0, #42\n"
+      "add.w r1, r0, #42\n"
       "adds r1, r0, r2, asr #31\n"
       "add r1, r0, r2, asr #31\n";
   DriverStr(expected, "add");
@@ -305,21 +305,18 @@
   __ StoreToOffset(type, arm::IP, arm::R5, offset);
 
   const char* expected =
-      "mov ip, #4096\n"       // LoadImmediate(ip, 4096)
-      "add ip, ip, sp\n"
+      "add.w ip, sp, #4096\n"   // AddConstant(ip, sp, 4096)
       "str r0, [ip, #0]\n"
 
-      "str r5, [sp, #-4]!\n"  // Push(r5)
-      "movw r5, #4100\n"      // LoadImmediate(r5, 4096 + kRegisterSize)
-      "add r5, r5, sp\n"
-      "str ip, [r5, #0]\n"
-      "ldr r5, [sp], #4\n"    // Pop(r5)
+      "str r5, [sp, #-4]!\n"    // Push(r5)
+      "add.w r5, sp, #4096\n"   // AddConstant(r5, 4100 & ~0xfff)
+      "str ip, [r5, #4]\n"      // StoreToOffset(type, ip, r5, 4100 & 0xfff)
+      "ldr r5, [sp], #4\n"      // Pop(r5)
 
-      "str r6, [sp, #-4]!\n"  // Push(r6)
-      "mov r6, #4096\n"       // LoadImmediate(r6, 4096)
-      "add r6, r6, r5\n"
-      "str ip, [r6, #0]\n"
-      "ldr r6, [sp], #4\n";   // Pop(r6)
+      "str r6, [sp, #-4]!\n"    // Push(r6)
+      "add.w r6, r5, #4096\n"   // AddConstant(r6, r5, 4096 & ~0xfff)
+      "str ip, [r6, #0]\n"      // StoreToOffset(type, ip, r6, 4096 & 0xfff)
+      "ldr r6, [sp], #4\n";     // Pop(r6)
   DriverStr(expected, "StoreWordToNonThumbOffset");
 }
 
@@ -360,20 +357,17 @@
   __ StoreToOffset(type, arm::R11, arm::R5, offset);
 
   const char* expected =
-      "mov ip, #1024\n"           // LoadImmediate(ip, 1024)
-      "add ip, ip, sp\n"
+      "add.w ip, sp, #1024\n"     // AddConstant(ip, sp, 1024)
       "strd r0, r1, [ip, #0]\n"
 
       "str r5, [sp, #-4]!\n"      // Push(r5)
-      "movw r5, #1028\n"          // LoadImmediate(r5, 1024 + kRegisterSize)
-      "add r5, r5, sp\n"
-      "strd r11, ip, [r5, #0]\n"
+      "add.w r5, sp, #1024\n"     // AddConstant(r5, sp, (1024 + kRegisterSize) & ~0x3fc)
+      "strd r11, ip, [r5, #4]\n"  // StoreToOffset(type, r11, sp, (1024 + kRegisterSize) & 0x3fc)
       "ldr r5, [sp], #4\n"        // Pop(r5)
 
       "str r6, [sp, #-4]!\n"      // Push(r6)
-      "mov r6, #1024\n"           // LoadImmediate(r6, 1024)
-      "add r6, r6, r5\n"
-      "strd r11, ip, [r6, #0]\n"
+      "add.w r6, r5, #1024\n"     // AddConstant(r6, r5, 1024 & ~0x3fc)
+      "strd r11, ip, [r6, #0]\n"  // StoreToOffset(type, r11, r6, 1024 & 0x3fc)
       "ldr r6, [sp], #4\n";       // Pop(r6)
   DriverStr(expected, "StoreWordPairToNonThumbOffset");
 }
diff --git a/compiler/utils/assembler_thumb_test.cc b/compiler/utils/assembler_thumb_test.cc
index 2ae8841..1de51a2 100644
--- a/compiler/utils/assembler_thumb_test.cc
+++ b/compiler/utils/assembler_thumb_test.cc
@@ -466,6 +466,38 @@
   EmitAndCheck(&assembler, "DataProcessingShiftedRegister");
 }
 
+TEST(Thumb2AssemblerTest, ShiftImmediate) {
+  // Note: This test produces the same results as DataProcessingShiftedRegister
+  // but it does so using shift functions instead of mov().
+  arm::Thumb2Assembler assembler;
+
+  // 16-bit variants.
+  __ Lsl(R3, R4, 4);
+  __ Lsr(R3, R4, 5);
+  __ Asr(R3, R4, 6);
+
+  // 32-bit ROR because ROR immediate doesn't have the same 16-bit version as other shifts.
+  __ Ror(R3, R4, 7);
+
+  // 32-bit RRX because RRX has no 16-bit version.
+  __ Rrx(R3, R4);
+
+  // 32 bit variants (not setting condition codes).
+  __ Lsl(R3, R4, 4, AL, kCcKeep);
+  __ Lsr(R3, R4, 5, AL, kCcKeep);
+  __ Asr(R3, R4, 6, AL, kCcKeep);
+  __ Ror(R3, R4, 7, AL, kCcKeep);
+  __ Rrx(R3, R4, AL, kCcKeep);
+
+  // 32 bit variants (high registers).
+  __ Lsls(R8, R4, 4);
+  __ Lsrs(R8, R4, 5);
+  __ Asrs(R8, R4, 6);
+  __ Rors(R8, R4, 7);
+  __ Rrxs(R8, R4);
+
+  EmitAndCheck(&assembler, "ShiftImmediate");
+}
 
 TEST(Thumb2AssemblerTest, BasicLoad) {
   arm::Thumb2Assembler assembler;
@@ -823,29 +855,80 @@
 
   __ add(R2, SP, ShifterOperand(0xf00));  // 32 bit due to imm size.
   __ add(SP, SP, ShifterOperand(0xf00));  // 32 bit due to imm size.
+  __ add(SP, SP, ShifterOperand(0xffc));  // 32 bit due to imm size; encoding T4.
 
-  __ sub(SP, SP, ShifterOperand(0x50));     // 16 bit
-  __ sub(R0, SP, ShifterOperand(0x50));     // 32 bit
-  __ sub(R8, SP, ShifterOperand(0x50));     // 32 bit.
+  __ sub(SP, SP, ShifterOperand(0x50));   // 16 bit
+  __ sub(R0, SP, ShifterOperand(0x50));   // 32 bit
+  __ sub(R8, SP, ShifterOperand(0x50));   // 32 bit.
 
-  __ sub(SP, SP, ShifterOperand(0xf00));   // 32 bit due to imm size
+  __ sub(SP, SP, ShifterOperand(0xf00));  // 32 bit due to imm size
+  __ sub(SP, SP, ShifterOperand(0xffc));  // 32 bit due to imm size; encoding T4.
 
   EmitAndCheck(&assembler, "SpecialAddSub");
 }
 
+TEST(Thumb2AssemblerTest, LoadFromOffset) {
+  arm::Thumb2Assembler assembler;
+
+  __ LoadFromOffset(kLoadWord, R2, R4, 12);
+  __ LoadFromOffset(kLoadWord, R2, R4, 0xfff);
+  __ LoadFromOffset(kLoadWord, R2, R4, 0x1000);
+  __ LoadFromOffset(kLoadWord, R2, R4, 0x1000a4);
+  __ LoadFromOffset(kLoadWord, R2, R4, 0x101000);
+  __ LoadFromOffset(kLoadWord, R4, R4, 0x101000);
+  __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 12);
+  __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0xfff);
+  __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x1000);
+  __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x1000a4);
+  __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x101000);
+  __ LoadFromOffset(kLoadUnsignedHalfword, R4, R4, 0x101000);
+  __ LoadFromOffset(kLoadWordPair, R2, R4, 12);
+  __ LoadFromOffset(kLoadWordPair, R2, R4, 0x3fc);
+  __ LoadFromOffset(kLoadWordPair, R2, R4, 0x400);
+  __ LoadFromOffset(kLoadWordPair, R2, R4, 0x400a4);
+  __ LoadFromOffset(kLoadWordPair, R2, R4, 0x40400);
+  __ LoadFromOffset(kLoadWordPair, R4, R4, 0x40400);
+
+  __ LoadFromOffset(kLoadWord, R0, R12, 12);  // 32-bit because of R12.
+  __ LoadFromOffset(kLoadWord, R2, R4, 0xa4 - 0x100000);
+
+  __ LoadFromOffset(kLoadSignedByte, R2, R4, 12);
+  __ LoadFromOffset(kLoadUnsignedByte, R2, R4, 12);
+  __ LoadFromOffset(kLoadSignedHalfword, R2, R4, 12);
+
+  EmitAndCheck(&assembler, "LoadFromOffset");
+}
+
 TEST(Thumb2AssemblerTest, StoreToOffset) {
   arm::Thumb2Assembler assembler;
 
-  __ StoreToOffset(kStoreWord, R2, R4, 12);     // Simple
-  __ StoreToOffset(kStoreWord, R2, R4, 0x2000);     // Offset too big.
-  __ StoreToOffset(kStoreWord, R0, R12, 12);
-  __ StoreToOffset(kStoreHalfword, R0, R12, 12);
-  __ StoreToOffset(kStoreByte, R2, R12, 12);
+  __ StoreToOffset(kStoreWord, R2, R4, 12);
+  __ StoreToOffset(kStoreWord, R2, R4, 0xfff);
+  __ StoreToOffset(kStoreWord, R2, R4, 0x1000);
+  __ StoreToOffset(kStoreWord, R2, R4, 0x1000a4);
+  __ StoreToOffset(kStoreWord, R2, R4, 0x101000);
+  __ StoreToOffset(kStoreWord, R4, R4, 0x101000);
+  __ StoreToOffset(kStoreHalfword, R2, R4, 12);
+  __ StoreToOffset(kStoreHalfword, R2, R4, 0xfff);
+  __ StoreToOffset(kStoreHalfword, R2, R4, 0x1000);
+  __ StoreToOffset(kStoreHalfword, R2, R4, 0x1000a4);
+  __ StoreToOffset(kStoreHalfword, R2, R4, 0x101000);
+  __ StoreToOffset(kStoreHalfword, R4, R4, 0x101000);
+  __ StoreToOffset(kStoreWordPair, R2, R4, 12);
+  __ StoreToOffset(kStoreWordPair, R2, R4, 0x3fc);
+  __ StoreToOffset(kStoreWordPair, R2, R4, 0x400);
+  __ StoreToOffset(kStoreWordPair, R2, R4, 0x400a4);
+  __ StoreToOffset(kStoreWordPair, R2, R4, 0x40400);
+  __ StoreToOffset(kStoreWordPair, R4, R4, 0x40400);
+
+  __ StoreToOffset(kStoreWord, R0, R12, 12);  // 32-bit because of R12.
+  __ StoreToOffset(kStoreWord, R2, R4, 0xa4 - 0x100000);
+
+  __ StoreToOffset(kStoreByte, R2, R4, 12);
 
   EmitAndCheck(&assembler, "StoreToOffset");
 }
 
-
 TEST(Thumb2AssemblerTest, IfThen) {
   arm::Thumb2Assembler assembler;
 
diff --git a/compiler/utils/assembler_thumb_test_expected.cc.inc b/compiler/utils/assembler_thumb_test_expected.cc.inc
index b79c2e4..9246c82 100644
--- a/compiler/utils/assembler_thumb_test_expected.cc.inc
+++ b/compiler/utils/assembler_thumb_test_expected.cc.inc
@@ -132,8 +132,8 @@
 const char* DataProcessingImmediateResults[] = {
   "   0:	2055      	movs	r0, #85	; 0x55\n",
   "   2:	f06f 0055 	mvn.w	r0, #85	; 0x55\n",
-  "   6:	f201 0055 	addw	r0, r1, #85	; 0x55\n",
-  "   a:	f2a1 0055 	subw	r0, r1, #85	; 0x55\n",
+  "   6:	f101 0055 	add.w	r0, r1, #85	; 0x55\n",
+  "   a:	f1a1 0055 	sub.w	r0, r1, #85	; 0x55\n",
   "   e:	f001 0055 	and.w	r0, r1, #85	; 0x55\n",
   "  12:	f041 0055 	orr.w	r0, r1, #85	; 0x55\n",
   "  16:	f061 0055 	orn	r0, r1, #85	; 0x55\n",
@@ -201,6 +201,24 @@
   "  32:	ea5f 0834 	movs.w	r8, r4, rrx\n",
   nullptr
 };
+const char* ShiftImmediateResults[] = {
+  "   0:  0123        lsls  r3, r4, #4\n",
+  "   2:  0963        lsrs  r3, r4, #5\n",
+  "   4:  11a3        asrs  r3, r4, #6\n",
+  "   6:  ea4f 13f4   mov.w  r3, r4, ror #7\n",
+  "   a:  ea4f 0334   mov.w  r3, r4, rrx\n",
+  "   e:  ea4f 1304   mov.w r3, r4, lsl #4\n",
+  "  12:  ea4f 1354   mov.w r3, r4, lsr #5\n",
+  "  16:  ea4f 13a4   mov.w r3, r4, asr #6\n",
+  "  1a:  ea4f 13f4   mov.w r3, r4, ror #7\n",
+  "  1e:  ea4f 0334   mov.w r3, r4, rrx\n",
+  "  22:  ea5f 1804   movs.w  r8, r4, lsl #4\n",
+  "  26:  ea5f 1854   movs.w  r8, r4, lsr #5\n",
+  "  2a:  ea5f 18a4   movs.w  r8, r4, asr #6\n",
+  "  2e:  ea5f 18f4   movs.w  r8, r4, ror #7\n",
+  "  32:  ea5f 0834   movs.w  r8, r4, rrx\n",
+  nullptr
+};
 const char* BasicLoadResults[] = {
   "   0:	69a3      	ldr	r3, [r4, #24]\n",
   "   2:	7e23      	ldrb	r3, [r4, #24]\n",
@@ -434,23 +452,115 @@
 const char* SpecialAddSubResults[] = {
   "   0:	aa14      	add	r2, sp, #80	; 0x50\n",
   "   2:	b014      	add	sp, #80		; 0x50\n",
-  "   4:	f20d 0850 	addw	r8, sp, #80	; 0x50\n",
-  "   8:	f60d 7200 	addw	r2, sp, #3840	; 0xf00\n",
-  "   c:	f60d 7d00 	addw	sp, sp, #3840	; 0xf00\n",
-  "  10:	b094      	sub	sp, #80		; 0x50\n",
-  "  12:	f2ad 0050 	subw	r0, sp, #80	; 0x50\n",
-  "  16:	f2ad 0850 	subw	r8, sp, #80	; 0x50\n",
-  "  1a:	f6ad 7d00 	subw	sp, sp, #3840	; 0xf00\n",
+  "   4:	f10d 0850 	add.w	r8, sp, #80	; 0x50\n",
+  "   8:	f50d 6270 	add.w	r2, sp, #3840	; 0xf00\n",
+  "   c:	f50d 6d70 	add.w	sp, sp, #3840	; 0xf00\n",
+  "  10:	f60d 7dfc 	addw	sp, sp, #4092	; 0xffc\n",
+  "  14:	b094      	sub	sp, #80		; 0x50\n",
+  "  16:	f1ad 0050 	sub.w	r0, sp, #80	; 0x50\n",
+  "  1a:	f1ad 0850 	sub.w	r8, sp, #80	; 0x50\n",
+  "  1e:	f5ad 6d70 	sub.w	sp, sp, #3840	; 0xf00\n",
+  "  22:	f6ad 7dfc 	subw	sp, sp, #4092	; 0xffc\n",
+  nullptr
+};
+const char* LoadFromOffsetResults[] = {
+  "   0:	68e2      	ldr	r2, [r4, #12]\n",
+  "   2:	f8d4 2fff 	ldr.w	r2, [r4, #4095]	; 0xfff\n",
+  "   6:	f504 5280 	add.w	r2, r4, #4096	; 0x1000\n",
+  "   a:	6812      	ldr	r2, [r2, #0]\n",
+  "   c:	f504 1280 	add.w	r2, r4, #1048576	; 0x100000\n",
+  "  10:	f8d2 20a4 	ldr.w	r2, [r2, #164]	; 0xa4\n",
+  "  14:	f241 0200 	movw	r2, #4096	; 0x1000\n",
+  "  18:	f2c0 0210 	movt	r2, #16\n",
+  "  1c:	4422      	add	r2, r4\n",
+  "  1e:	6812      	ldr	r2, [r2, #0]\n",
+  "  20:	f241 0c00 	movw	ip, #4096	; 0x1000\n",
+  "  24:	f2c0 0c10 	movt	ip, #16\n",
+  "  28:	4464      	add	r4, ip\n",
+  "  2a:	6824      	ldr	r4, [r4, #0]\n",
+  "  2c:	89a2      	ldrh	r2, [r4, #12]\n",
+  "  2e:	f8b4 2fff 	ldrh.w	r2, [r4, #4095]	; 0xfff\n",
+  "  32:	f504 5280 	add.w	r2, r4, #4096	; 0x1000\n",
+  "  36:	8812      	ldrh	r2, [r2, #0]\n",
+  "  38:	f504 1280 	add.w	r2, r4, #1048576	; 0x100000\n",
+  "  3c:	f8b2 20a4 	ldrh.w	r2, [r2, #164]	; 0xa4\n",
+  "  40:	f241 0200 	movw	r2, #4096	; 0x1000\n",
+  "  44:	f2c0 0210 	movt	r2, #16\n",
+  "  48:	4422      	add	r2, r4\n",
+  "  4a:	8812      	ldrh	r2, [r2, #0]\n",
+  "  4c:	f241 0c00 	movw	ip, #4096	; 0x1000\n",
+  "  50:	f2c0 0c10 	movt	ip, #16\n",
+  "  54:	4464      	add	r4, ip\n",
+  "  56:	8824      	ldrh	r4, [r4, #0]\n",
+  "  58:	e9d4 2303 	ldrd	r2, r3, [r4, #12]\n",
+  "  5c:	e9d4 23ff 	ldrd	r2, r3, [r4, #1020]	; 0x3fc\n",
+  "  60:	f504 6280 	add.w	r2, r4, #1024	; 0x400\n",
+  "  64:	e9d2 2300 	ldrd	r2, r3, [r2]\n",
+  "  68:	f504 2280 	add.w	r2, r4, #262144	; 0x40000\n",
+  "  6c:	e9d2 2329 	ldrd	r2, r3, [r2, #164];	0xa4\n",
+  "  70:	f240 4200 	movw	r2, #1024	; 0x400\n",
+  "  74:	f2c0 0204 	movt	r2, #4\n",
+  "  78:	4422      	add	r2, r4\n",
+  "  7a:	e9d2 2300 	ldrd	r2, r3, [r2]\n",
+  "  7e:	f240 4c00 	movw	ip, #1024	; 0x400\n",
+  "  82:	f2c0 0c04 	movt	ip, #4\n",
+  "  86:	4464      	add	r4, ip\n",
+  "  88:	e9d4 4500 	ldrd	r4, r5, [r4]\n",
+  "  8c:	f8dc 000c 	ldr.w	r0, [ip, #12]\n",
+  "  90:	f5a4 1280 	sub.w	r2, r4, #1048576	; 0x100000\n",
+  "  94:	f8d2 20a4 	ldr.w	r2, [r2, #164]	; 0xa4\n",
+  "  98:	f994 200c 	ldrsb.w	r2, [r4, #12]\n",
+  "  9c:	7b22      	ldrb	r2, [r4, #12]\n",
+  "  9e:	f9b4 200c 	ldrsh.w	r2, [r4, #12]\n",
   nullptr
 };
 const char* StoreToOffsetResults[] = {
   "   0:	60e2      	str	r2, [r4, #12]\n",
-  "   2:	f44f 5c00 	mov.w	ip, #8192	; 0x2000\n",
-  "   6:	44a4      	add	ip, r4\n",
-  "   8:	f8cc 2000 	str.w	r2, [ip]\n",
-  "   c:	f8cc 000c 	str.w	r0, [ip, #12]\n",
-  "   10:	f8ac 000c 	strh.w	r0, [ip, #12]\n",
-  "   14:	f88c 200c 	strb.w	r2, [ip, #12]\n",
+  "   2:	f8c4 2fff 	str.w	r2, [r4, #4095]	; 0xfff\n",
+  "   6:	f504 5c80 	add.w	ip, r4, #4096	; 0x1000\n",
+  "   a:	f8cc 2000 	str.w	r2, [ip]\n",
+  "   e:	f504 1c80 	add.w	ip, r4, #1048576	; 0x100000\n",
+  "  12:	f8cc 20a4 	str.w	r2, [ip, #164]	; 0xa4\n",
+  "  16:	f241 0c00 	movw	ip, #4096	; 0x1000\n",
+  "  1a:	f2c0 0c10 	movt	ip, #16\n",
+  "  1e:	44a4      	add	ip, r4\n",
+  "  20:	f8cc 2000 	str.w	r2, [ip]\n",
+  "  24:	f241 0c00 	movw	ip, #4096	; 0x1000\n",
+  "  28:	f2c0 0c10 	movt	ip, #16\n",
+  "  2c:	44a4      	add	ip, r4\n",
+  "  2e:	f8cc 4000 	str.w	r4, [ip]\n",
+  "  32:	81a2      	strh	r2, [r4, #12]\n",
+  "  34:	f8a4 2fff 	strh.w	r2, [r4, #4095]	; 0xfff\n",
+  "  38:	f504 5c80 	add.w	ip, r4, #4096	; 0x1000\n",
+  "  3c:	f8ac 2000 	strh.w	r2, [ip]\n",
+  "  40:	f504 1c80 	add.w	ip, r4, #1048576	; 0x100000\n",
+  "  44:	f8ac 20a4 	strh.w	r2, [ip, #164]	; 0xa4\n",
+  "  48:	f241 0c00 	movw	ip, #4096	; 0x1000\n",
+  "  4c:	f2c0 0c10 	movt	ip, #16\n",
+  "  50:	44a4      	add	ip, r4\n",
+  "  52:	f8ac 2000 	strh.w	r2, [ip]\n",
+  "  56:	f241 0c00 	movw	ip, #4096	; 0x1000\n",
+  "  5a:	f2c0 0c10 	movt	ip, #16\n",
+  "  5e:	44a4      	add	ip, r4\n",
+  "  60:	f8ac 4000 	strh.w	r4, [ip]\n",
+  "  64:	e9c4 2303 	strd	r2, r3, [r4, #12]\n",
+  "  68:	e9c4 23ff 	strd	r2, r3, [r4, #1020]	; 0x3fc\n",
+  "  6c:	f504 6c80 	add.w	ip, r4, #1024	; 0x400\n",
+  "  70:	e9cc 2300 	strd	r2, r3, [ip]\n",
+  "  74:	f504 2c80 	add.w	ip, r4, #262144	; 0x40000\n",
+  "  78:	e9cc 2329 	strd	r2, r3, [ip, #164];	0xa4\n",
+  "  7c:	f240 4c00 	movw	ip, #1024	; 0x400\n",
+  "  80:	f2c0 0c04 	movt	ip, #4\n",
+  "  84:	44a4      	add	ip, r4\n",
+  "  86:	e9cc 2300 	strd	r2, r3, [ip]\n",
+  "  8a:	f240 4c00 	movw	ip, #1024	; 0x400\n",
+  "  8e:	f2c0 0c04 	movt	ip, #4\n",
+  "  92:	44a4      	add	ip, r4\n",
+  "  94:	e9cc 4500 	strd	r4, r5, [ip]\n",
+  "  98:	f8cc 000c 	str.w	r0, [ip, #12]\n",
+  "  9c:	f5a4 1c80 	sub.w	ip, r4, #1048576	; 0x100000\n",
+  "  a0:	f8cc 20a4 	str.w	r2, [ip, #164]	; 0xa4\n",
+  "  a4:	7322      	strb	r2, [r4, #12]\n",
   nullptr
 };
 const char* IfThenResults[] = {
@@ -4952,6 +5062,7 @@
     test_results["DataProcessingModifiedImmediate"] = DataProcessingModifiedImmediateResults;
     test_results["DataProcessingModifiedImmediates"] = DataProcessingModifiedImmediatesResults;
     test_results["DataProcessingShiftedRegister"] = DataProcessingShiftedRegisterResults;
+    test_results["ShiftImmediate"] = ShiftImmediateResults;
     test_results["BasicLoad"] = BasicLoadResults;
     test_results["BasicStore"] = BasicStoreResults;
     test_results["ComplexLoad"] = ComplexLoadResults;
@@ -4966,6 +5077,7 @@
     test_results["StoreMultiple"] = StoreMultipleResults;
     test_results["MovWMovT"] = MovWMovTResults;
     test_results["SpecialAddSub"] = SpecialAddSubResults;
+    test_results["LoadFromOffset"] = LoadFromOffsetResults;
     test_results["StoreToOffset"] = StoreToOffsetResults;
     test_results["IfThen"] = IfThenResults;
     test_results["CbzCbnz"] = CbzCbnzResults;
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 4f4792a..0b0f094 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -106,7 +106,6 @@
   jit/profiling_info.cc \
   lambda/art_lambda_method.cc \
   lambda/box_table.cc \
-  lambda/box_class_table.cc \
   lambda/closure.cc \
   lambda/closure_builder.cc \
   lambda/leaking_allocator.cc \
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index 771c8b7..d6ba304 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -46,15 +46,9 @@
   }
 };
 
-}  // namespace art
-
 // Common tests are declared next to the constants.
 #define ADD_TEST_EQ(x, y) EXPECT_EQ(x, y);
 #include "asm_support.h"
-// Important: Do not include this inside of another namespace, since asm_support.h
-// defines its own namespace which must not be nested.
-
-namespace art {
 
 TEST_F(ArchTest, CheckCommonOffsetsAndSizes) {
   CheckAsmSupportOffsetsAndSizes();
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 588268d..631b784 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -1045,26 +1045,6 @@
     DELIVER_PENDING_EXCEPTION
 END art_quick_proxy_invoke_handler
 
-// Forward call from boxed innate lambda to the underlying lambda closure's target method.
-     .extern artQuickLambdaProxyInvokeHandler
-ENTRY art_quick_lambda_proxy_invoke_handler
-// TODO: have a faster handler that doesn't need to set up a frame
-    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_R0
-    mov     r2, r9                 @ pass Thread::Current
-    mov     r3, sp                 @ pass SP
-    blx     artQuickLambdaProxyInvokeHandler  @ (Method* proxy method, receiver, Thread*, SP)
-    ldr     r2, [r9, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
-    // Tear down the callee-save frame. Skip arg registers.
-    add     sp, #(FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
-    .cfi_adjust_cfa_offset -(FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
-    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
-    cbnz    r2, 1f                 @ success if no exception is pending
-    vmov    d0, r0, r1             @ store into fpr, for when it's a fpr return...
-    bx      lr                     @ return on success
-1:
-    DELIVER_PENDING_EXCEPTION
-END art_quick_lambda_proxy_invoke_handler
-
     /*
      * Called to resolve an imt conflict. r12 is a hidden argument that holds the target method's
      * dex method index.
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 177873d..9ccabad 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1582,28 +1582,6 @@
     DELIVER_PENDING_EXCEPTION
 END art_quick_proxy_invoke_handler
 
-     /*
-     * Called by managed code that is attempting to call a method on a lambda proxy class. On entry
-     * x0 holds the lambda proxy method and x1 holds the receiver; The frame size of the invoked
-     * lambda proxy method agrees with a ref and args callee save frame.
-     */
-     .extern artQuickLambdaProxyInvokeHandler
-ENTRY art_quick_lambda_proxy_invoke_handler
-// TODO: have a faster way to invoke lambda proxies without setting up the whole frame.
-    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_X0
-    mov     x2, xSELF                   // pass Thread::Current
-    mov     x3, sp                      // pass SP
-    bl      artQuickLambdaProxyInvokeHandler  // (Method* proxy method, receiver, Thread*, SP)
-    ldr     x2, [xSELF, THREAD_EXCEPTION_OFFSET]
-    cbnz    x2, .Lexception_in_lambda_proxy    // success if no exception is pending
-    RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME // Restore frame
-    fmov    d0, x0                      // Store result in d0 in case it was float or double
-    ret                                 // return on success
-.Lexception_in_lambda_proxy:
-    RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
-    DELIVER_PENDING_EXCEPTION
-END art_quick_lambda_proxy_invoke_handler
-
     /*
      * Called to resolve an imt conflict. xIP1 is a hidden argument that holds the target method's
      * dex method index.
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index af79f5e..0691f2a 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -1377,10 +1377,6 @@
     DELIVER_PENDING_EXCEPTION
 END art_quick_proxy_invoke_handler
 
-// Forward call from boxed innate lambda to the underlying lambda closure's target method.
-    .extern artQuickLambdaProxyInvokeHandler
-UNIMPLEMENTED art_quick_lambda_proxy_invoke_handler
-
     /*
      * Called to resolve an imt conflict. t0 is a hidden argument that holds the target method's
      * dex method index.
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 5e70a95..66c8aad 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -1431,10 +1431,6 @@
     DELIVER_PENDING_EXCEPTION
 END art_quick_proxy_invoke_handler
 
-// Forward call from boxed innate lambda to the underlying lambda closure's target method.
-    .extern artQuickLambdaProxyInvokeHandler
-UNIMPLEMENTED art_quick_lambda_proxy_invoke_handler
-
     /*
      * Called to resolve an imt conflict. t0 is a hidden argument that holds the target method's
      * dex method index.
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 4fb6119..463c9cf 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -1391,149 +1391,6 @@
     RETURN_OR_DELIVER_PENDING_EXCEPTION    // return or deliver exception
 END_FUNCTION art_quick_proxy_invoke_handler
 
-#if LAMBDA_INVOKE_USES_LONG
-#undef LAMBDA_PROXY_SETUP_FRAME
-// We need to always do a 'pop' to readjust the stack, so we have to use the slower call instruction.
-#define LAMBDA_PROXY_SETUP_FRAME 1
-#define LAMBDA_INVOKE_REALIGN_STACK_FRAME 1
-#else
-#define LAMBDA_INVOKE_REALIGN_STACK_FRAME 0
-#endif
-
-#define LAMBDA_INVOKE_CALLS_INTO_RUNTIME LAMBDA_INVOKE_REALIGN_STACK_FRAME
-
-// Forward call from boxed innate lambda to the underlying lambda closure's target method.
-DEFINE_FUNCTION art_quick_lambda_proxy_invoke_handler
-    // This function is always called when the lambda is innate.
-    // Therefore we can assume the box is to an innate lambda.
-    // TODO: perhaps there should be a DCHECK to make sure it's innate?
-
-#if LAMBDA_PROXY_SETUP_FRAME
-    // Set up a quick frame when debugging so we can see that it's going through a stub.
-    // An invoke-virtual + a stub invocation is enough of a hint that we *could* be
-    // going through a lambda proxy.
-    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_EAX
-#endif
-
-#if !LAMBDA_INVOKE_CALLS_INTO_RUNTIME
-    // Rewrite the following 2 arguments, stored on stack frame:
-    //
-    //   |--------|
-    //   |receiver|   <- esp-4
-    //   |--------|
-    //   | method |   <- esp
-    //   |--------|
-
-    // Set up the new correct method receiver (swap object with closure).
-    // -- The original object is no longer available after this.
-    //
-    // (Before)
-    // ecx == mirror::Object* boxed_lambda;  // lambda proxy object.
-    movl MIRROR_OBJECT_BOXED_INNATE_LAMBDA_CLOSURE_POINTER_OFFSET(%ecx), %ecx
-    // (After)
-    // lambda::Closure* closure = boxed_lambda->closure_;
-    // boxed_lambda = closure; // Overwrite lambda proxy object
-    // ecx == closure
-
-    // Look up the new correct method target.
-    // -- The original method target is no longer available after this.
-    //
-    // (Before)
-    // eax == ArtMethod* old_receiver_method;
-    movl LAMBDA_CLOSURE_METHOD_OFFSET(%ecx), %eax
-    // (After)
-    // ArtLambdaMethod* lambda_method_target = closure->lambda_info_;
-    // eax = lambda_method_target
-    //
-    // Set up the correct method target from the lambda info.
-    movl ART_LAMBDA_METHOD_ART_METHOD_OFFSET(%eax), %eax  // Load new receiver method
-    // (After)
-    // ArtMethod* target_method = lambda_method_target->target_
-    // eax = target_method
-#endif
-
-#if LAMBDA_INVOKE_CALLS_INTO_RUNTIME
-    PUSH esp                      // pass SP
-    pushl %fs:THREAD_SELF_OFFSET  // pass Thread::Current()
-    CFI_ADJUST_CFA_OFFSET(4)
-    PUSH ecx                      // pass receiver
-    PUSH eax                      // pass proxy method
-    call SYMBOL(artQuickLambdaProxyInvokeHandler) // (proxy method, receiver, Thread*, SP)
-    movd %eax, %xmm0              // place return value also into floating point return value
-    movd %edx, %xmm1
-    punpckldq %xmm1, %xmm0
-    addl LITERAL(16 + FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE), %esp
-    CFI_ADJUST_CFA_OFFSET(-(16 + FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE - FRAME_SIZE_REFS_ONLY_CALLEE_SAVE))
-    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
-    RETURN_OR_DELIVER_PENDING_EXCEPTION    // return or deliver exception
-#endif
-
-#if LAMBDA_INVOKE_USES_LONG && !LAMBDA_INVOKE_REALIGN_STACK_FRAME
-    // As a temporary workaround, lambda functions look like
-    //  (J[Arg2][Arg3][Arg4]...)
-    // This means that we can't just pass in the lambda as a 32-bit pointer
-    // We pad the arguments with an extra 32-bit "0" where Arg2 used to be instead.
-
-    // Required arguments for a lambda method:
-    //
-    // Arg0 = eax = method
-    // Arg1 = ecx = closure (hi)
-    // Arg2 = edx = closure (lo)
-    // Arg3 = ebx = <?> (first user-defined argument)
-
-    // Transformation diagram:
-    //
-    // Arg0  Arg1  Arg2  Arg3 ... ArgN
-    //   |    |        \     \        \
-    //   |    |         \     \        \
-    // Arg0  Arg1  0x00  Arg2  Arg3 ... ArgN
-    //              /\
-    //           (inserted)
-    PUSH ebx          // Move out Arg3 into Arg4, and also for all K>3 ArgK into ArgK+1
-    mov %edx, %ebx    // Move out Arg2 into Arg3
-    xor %edx, %edx    // Clear closure 32-bit low register
-
-    // XX: Does this work at all ? This probably breaks the visitors (*and* its unaligned).
-
-    // FIXME: call into the runtime and do a proxy-like-invoke
-    // using a ShadowFrame quick visitor, and then use ArtMethod::Invoke
-    // to call into the actual method (which will take care of fixing up alignment).
-    // Trying to realign in the assembly itself won't actually work
-    // since then the visitor will unwind incorrectly (unless we also fixed up the ManagedStack).
-#endif
-
-    // TODO: avoid extra indirect load by subclass ArtLambdaMethod from ArtMethod.
-
-    // Forward the call to the overwritten receiver method.
-    // -- Arguments [2,N] are left completely untouched since the signature is otherwise identical.
-#if LAMBDA_PROXY_SETUP_FRAME
-  #if LAMBDA_INVOKE_CALLS_INTO_RUNTIME
-    // Have to call into runtime in order to re-align the stack frame to 16 bytes.
-    int3
-  #else
-    // Just call into the method directly. Don't worry about realigning.
-    call *ART_METHOD_QUICK_CODE_OFFSET_32(%eax)  // (new method, new receiver, old args...)
-
-    // The stack frame was manually adjusted, so make sure we have a pop here to fix it back.
-    #if LAMBDA_INVOKE_USES_LONG && !LAMBDA_INVOKE_REALIGN_STACK_FRAME
-
-    POP ecx   // OK: ecx is scratch register after the call.
-    // XX: use 'add esp, 4' instead if we need to keep the register? This way we get cleaner CFI.
-    #endif
-  #endif
-    RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
-
-#else
-    // Do not use 'call' here since the stack visitors wouldn't know how to visit this frame.
-    jmp *ART_METHOD_QUICK_CODE_OFFSET_32(%eax)   // tailcall (new method, new receiver, old args...)
-#endif
-
-#if LAMBDA_PROXY_SETUP_FRAME
-    ret
-#endif
-
-END_FUNCTION art_quick_lambda_proxy_invoke_handler
-
     /*
      * Called to resolve an imt conflict. xmm7 is a hidden argument that holds the target method's
      * dex method index.
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 0a54aa3..17d277e 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1297,6 +1297,7 @@
     RETURN_IF_EAX_ZERO                   // return or deliver exception
 END_FUNCTION art_quick_set64_static
 
+
 DEFINE_FUNCTION art_quick_proxy_invoke_handler
     SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_RDI
 
@@ -1308,60 +1309,6 @@
     RETURN_OR_DELIVER_PENDING_EXCEPTION
 END_FUNCTION art_quick_proxy_invoke_handler
 
-// Forward call from boxed innate lambda to the underlying lambda closure's target method.
-DEFINE_FUNCTION art_quick_lambda_proxy_invoke_handler
-    // This function is always called when the lambda is innate.
-    // Therefore we can assume the box is to an innate lambda.
-    // TODO: perhaps there should be a DCHECK to make sure it's innate?
-
-#if LAMBDA_PROXY_SETUP_FRAME
-    // Set up a quick frame when debugging so we can see that it's going through a stub.
-    // Our stack traces will contain the quick lambda proxy hander.
-    // Note that we *must* go through the handler (when spilling) otherwise we won't know how
-    // to move the spilled GC references from the caller to this stub.
-    SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME_WITH_METHOD_IN_RDI
-
-    movq %gs:THREAD_SELF_OFFSET, %rdx       // Pass Thread::Current().
-    movq %rsp, %rcx                         // Pass SP.
-    call SYMBOL(artQuickLambdaProxyInvokeHandler) // (proxy method, receiver, Thread*, SP)
-    RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
-    movq %rax, %xmm0                        // Copy return value in case of float returns.
-    RETURN_OR_DELIVER_PENDING_EXCEPTION
-#else
-    // Set up the new correct method receiver (swap object with closure).
-    // -- The original object is no longer available after this.
-    //
-    // (Before)
-    // rsi == mirror::Object* boxed_lambda;  // lambda proxy object.
-    movq MIRROR_OBJECT_BOXED_INNATE_LAMBDA_CLOSURE_POINTER_OFFSET(%rsi), %rsi
-    // (After)
-    // lambda::Closure* closure = boxed_lambda->closure_;  // Overwrite receiver object.
-    // rsi == closure
-
-    // Look up the new correct method target.
-    // -- The original method target is no longer available after this.
-    movq LAMBDA_CLOSURE_METHOD_OFFSET(%rsi), %rdi          // Overwrite old receiver method.
-    // (After)
-    // ArtLambdaMethod* lambda_method_target = closure->lambda_info_;
-    // rdi == lambda_method_target
-
-    // TODO: avoid extra indirect load by subclass ArtLambdaMethod from ArtMethod.
-
-    // Set up the correct method target from the lambda info.
-    movq ART_LAMBDA_METHOD_ART_METHOD_OFFSET(%rdi), %rdi  // Write new receiver method.
-    // (After)
-    // ArtMethod* method_target = lambda_method_target->target_;
-    // rdi == method_target
-
-    // Forward the call to the overwritten receiver method.
-    // -- Arguments [2,N] are left completely untouched since the signature is otherwise identical.
-    // Do not use 'call' here since the stack would be misaligned (8b instead of 16b).
-    // Also the stack visitors wouldn't know how to visit this frame if we used a call.
-    jmp *ART_METHOD_QUICK_CODE_OFFSET_64(%rdi)   // tailcall (new method, new receiver, old args...)
-#endif
-
-END_FUNCTION art_quick_lambda_proxy_invoke_handler
-
     /*
      * Called to resolve an imt conflict.
      * rax is a hidden argument that holds the target method's dex method index.
diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h
index ab42d0e..4166e22 100644
--- a/runtime/art_field-inl.h
+++ b/runtime/art_field-inl.h
@@ -255,7 +255,7 @@
 
 inline const char* ArtField::GetName() SHARED_REQUIRES(Locks::mutator_lock_) {
   uint32_t field_index = GetDexFieldIndex();
-  if (UNLIKELY(GetDeclaringClass()->IsAnyProxyClass())) {
+  if (UNLIKELY(GetDeclaringClass()->IsProxyClass())) {
     DCHECK(IsStatic());
     DCHECK_LT(field_index, 2U);
     return field_index == 0 ? "interfaces" : "throws";
@@ -266,7 +266,7 @@
 
 inline const char* ArtField::GetTypeDescriptor() SHARED_REQUIRES(Locks::mutator_lock_) {
   uint32_t field_index = GetDexFieldIndex();
-  if (UNLIKELY(GetDeclaringClass()->IsAnyProxyClass())) {
+  if (UNLIKELY(GetDeclaringClass()->IsProxyClass())) {
     DCHECK(IsStatic());
     DCHECK_LT(field_index, 2U);
     // 0 == Class[] interfaces; 1 == Class[][] throws;
@@ -290,8 +290,8 @@
 inline mirror::Class* ArtField::GetType() {
   const uint32_t field_index = GetDexFieldIndex();
   auto* declaring_class = GetDeclaringClass();
-  if (UNLIKELY(declaring_class->IsAnyProxyClass())) {
-    return AnyProxyFindSystemClass(GetTypeDescriptor());
+  if (UNLIKELY(declaring_class->IsProxyClass())) {
+    return ProxyFindSystemClass(GetTypeDescriptor());
   }
   auto* dex_cache = declaring_class->GetDexCache();
   const DexFile* const dex_file = dex_cache->GetDexFile();
diff --git a/runtime/art_field.cc b/runtime/art_field.cc
index 3ac563a..3737e0d 100644
--- a/runtime/art_field.cc
+++ b/runtime/art_field.cc
@@ -69,8 +69,8 @@
   return nullptr;
 }
 
-mirror::Class* ArtField::AnyProxyFindSystemClass(const char* descriptor) {
-  DCHECK(GetDeclaringClass()->IsAnyProxyClass());
+mirror::Class* ArtField::ProxyFindSystemClass(const char* descriptor) {
+  DCHECK(GetDeclaringClass()->IsProxyClass());
   return Runtime::Current()->GetClassLinker()->FindSystemClass(Thread::Current(), descriptor);
 }
 
diff --git a/runtime/art_field.h b/runtime/art_field.h
index 4ebe6fb..a943a34 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -191,9 +191,7 @@
   }
 
  private:
-  mirror::Class* AnyProxyFindSystemClass(const char* descriptor)
-      SHARED_REQUIRES(Locks::mutator_lock_);
-  mirror::Class* LambdaProxyFindSystemClass(const char* descriptor)
+  mirror::Class* ProxyFindSystemClass(const char* descriptor)
       SHARED_REQUIRES(Locks::mutator_lock_);
   mirror::Class* ResolveGetType(uint32_t type_idx) SHARED_REQUIRES(Locks::mutator_lock_);
   mirror::String* ResolveGetStringName(Thread* self, const DexFile& dex_file, uint32_t string_idx,
diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h
index b6e811f..cf548ad 100644
--- a/runtime/art_method-inl.h
+++ b/runtime/art_method-inl.h
@@ -292,7 +292,7 @@
 }
 
 inline const char* ArtMethod::GetShorty(uint32_t* out_length) {
-  DCHECK(!IsProxyMethod() || IsLambdaProxyMethod());  // OK: lambda proxies use parent dex cache.
+  DCHECK(!IsProxyMethod());
   const DexFile* dex_file = GetDexFile();
   return dex_file->GetMethodShorty(dex_file->GetMethodId(GetDexMethodIndex()), out_length);
 }
@@ -354,31 +354,10 @@
 }
 
 inline const DexFile::TypeList* ArtMethod::GetParameterTypeList() {
-  // XX: Do proxy methods have a dex file?  not sure.
+  DCHECK(!IsProxyMethod());
   const DexFile* dex_file = GetDexFile();
-  const DexFile::MethodId* method_id = nullptr;
-
-  if (kIsDebugBuild) {
-    if (UNLIKELY(IsProxyMethod())) {
-      // Proxy method case.
-      CHECK(IsLambdaProxyMethod()) << "Cannot GetParameterTypeList for java.lang.reflect.Proxy";
-
-      //
-      // We do not have a method ID, so look up one of the supers we overrode,
-      // it will have the same exact parameter type list as we do.
-
-      // Lambda proxy classes have the dex cache from their single interface parent.
-      // Proxy classes have multiple interface parents, so they use the root dexcache instead.
-      //
-      // For lambda proxy classes only, get the type list data from the parent.
-      // (code happens to look the same as the usual non-proxy path).
-    }
-  }
-
-  method_id = &dex_file->GetMethodId(GetDexMethodIndex());
-  DCHECK(method_id != nullptr);
-
-  const DexFile::ProtoId& proto = dex_file->GetMethodPrototype(*method_id);
+  const DexFile::ProtoId& proto = dex_file->GetMethodPrototype(
+      dex_file->GetMethodId(GetDexMethodIndex()));
   return dex_file->GetProtoParameters(proto);
 }
 
@@ -418,20 +397,12 @@
 }
 
 inline mirror::DexCache* ArtMethod::GetDexCache() {
-  DCHECK(!IsProxyMethod() || IsLambdaProxyMethod());  // OK: lambda proxies use parent dex cache.
+  DCHECK(!IsProxyMethod());
   return GetDeclaringClass()->GetDexCache();
 }
 
 inline bool ArtMethod::IsProxyMethod() {
-  return GetDeclaringClass()->IsAnyProxyClass();
-}
-
-inline bool ArtMethod::IsReflectProxyMethod() {
-  return GetDeclaringClass()->IsReflectProxyClass();
-}
-
-inline bool ArtMethod::IsLambdaProxyMethod() {
-  return GetDeclaringClass()->IsLambdaProxyClass();
+  return GetDeclaringClass()->IsProxyClass();
 }
 
 inline ArtMethod* ArtMethod::GetInterfaceMethodIfProxy(size_t pointer_size) {
@@ -477,9 +448,9 @@
 void ArtMethod::VisitRoots(RootVisitorType& visitor, size_t pointer_size) {
   ArtMethod* interface_method = nullptr;
   mirror::Class* klass = declaring_class_.Read();
-  if (UNLIKELY(klass != nullptr && klass->IsAnyProxyClass())) {
+  if (UNLIKELY(klass != nullptr && klass->IsProxyClass())) {
     // For normal methods, dex cache shortcuts will be visited through the declaring class.
-    // However, for any proxies we need to keep the interface method alive, so we visit its roots.
+    // However, for proxies we need to keep the interface method alive, so we visit its roots.
     interface_method = mirror::DexCache::GetElementPtrSize(
         GetDexCacheResolvedMethods(pointer_size),
         GetDexMethodIndex(),
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 98f5aee..5a2d6c3 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -171,16 +171,8 @@
     return (GetAccessFlags() & kAccSynthetic) != 0;
   }
 
-  // Does this method live on a declaring class that is itself any proxy class?
-  // -- Returns true for both java.lang.reflect.Proxy and java.lang.LambdaProxy subclasses.
   bool IsProxyMethod() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  // Does this method live in a java.lang.reflect.Proxy subclass?
-  bool IsReflectProxyMethod() SHARED_REQUIRES(Locks::mutator_lock_);
-
-  // Does this method live in a java.lang.LambdaProxy subclass?
-  bool IsLambdaProxyMethod() SHARED_REQUIRES(Locks::mutator_lock_);
-
   bool IsPreverified() {
     return (GetAccessFlags() & kAccPreverified) != 0;
   }
@@ -282,15 +274,7 @@
                                             uint32_t name_and_signature_idx)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
-  // Invoke this method, passing all the virtual registers in args.
-  // -- args_size must be the size in bytes (not size in words)!
-  // -- shorty must be the method shorty (i.e. it includes the return type).
-  // The result is set when the method finishes execution successfully.
-  void Invoke(Thread* self,
-              uint32_t* args,
-              uint32_t args_size,  // NOTE: size in bytes
-              /*out*/JValue* result,
-              const char* shorty)
+  void Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result, const char* shorty)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
   const void* GetEntryPointFromQuickCompiledCode() {
@@ -444,9 +428,6 @@
 
   mirror::DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  // Returns the current method ('this') if this is a regular, non-proxy method.
-  // Otherwise, when this class is a proxy (IsProxyMethod), look-up the original interface's
-  // method (that the proxy is "overriding") and return that.
   ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy(size_t pointer_size)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 785a9be..b548dfb 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -19,12 +19,9 @@
 
 #if defined(__cplusplus)
 #include "art_method.h"
-#include "lambda/art_lambda_method.h"
-#include "lambda/closure.h"
 #include "gc/allocator/rosalloc.h"
 #include "lock_word.h"
 #include "mirror/class.h"
-#include "mirror/lambda_proxy.h"
 #include "mirror/string.h"
 #include "runtime.h"
 #include "thread.h"
@@ -52,8 +49,6 @@
 #define ADD_TEST_EQ(x, y) CHECK_EQ(x, y);
 #endif
 
-namespace art {
-
 static inline void CheckAsmSupportOffsetsAndSizes() {
 #else
 #define ADD_TEST_EQ(x, y)
@@ -303,80 +298,9 @@
             static_cast<int32_t>(art::gc::allocator::RosAlloc::RunSlotNextOffset()))
 // Assert this so that we can avoid zeroing the next field by installing the class pointer.
 ADD_TEST_EQ(ROSALLOC_SLOT_NEXT_OFFSET, MIRROR_OBJECT_CLASS_OFFSET)
-// Working with raw lambdas (lambda::Closure) in raw memory:
-//
-//     |---------------------|
-//     | ArtLambdaMethod*    |  <-- pointer to lambda art method, has the info like the size.
-//     |---------------------|  <-- 'data offset'
-//     | [ Dynamic Size ]    |  <-- OPTIONAL: only if the ArtLambdaMethod::dynamic_size_ is true.
-//     |---------------------|
-//     | Captured Variables  |
-//     |        ...          |
-//     |---------------------|  <-- total length determined by "dynamic size" if it is present,
-//                                  otherwise by the ArtLambdaMethod::static_size_
-
-// Offset from start of lambda::Closure to the ArtLambdaMethod*.
-#define LAMBDA_CLOSURE_METHOD_OFFSET 0
-ADD_TEST_EQ(static_cast<size_t>(LAMBDA_CLOSURE_METHOD_OFFSET),
-            offsetof(art::lambda::ClosureStorage, lambda_info_))
-// Offset from the start of lambda::Closure to the data (captured vars or dynamic size).
-#define LAMBDA_CLOSURE_DATA_OFFSET __SIZEOF_POINTER__
-ADD_TEST_EQ(static_cast<size_t>(LAMBDA_CLOSURE_DATA_OFFSET),
-            offsetof(art::lambda::ClosureStorage, captured_))
-// Offsets to captured variables intentionally omitted as it needs a runtime branch.
-
-// The size of a lambda closure after it's been compressed down for storage.
-// -- Although a lambda closure is a virtual register pair (64-bit), we only need 32-bit
-//    to track the pointer when we are on 32-bit architectures.
-//    Both the compiler and the runtime therefore compress the closure down for 32-bit archs.
-#define LAMBDA_CLOSURE_COMPRESSED_POINTER_SIZE __SIZEOF_POINTER__
-ADD_TEST_EQ(static_cast<size_t>(LAMBDA_CLOSURE_COMPRESSED_POINTER_SIZE),
-            sizeof(art::lambda::Closure*))
-
-// Working with boxed innate lambdas (as a mirror::Object) in raw memory:
-// --- Note that this layout only applies to lambdas originally made with create-lambda.
-// --- Boxing a lambda created from a new-instance instruction is simply the original object.
-//
-//     |---------------------|
-//     |   object header     |
-//     |---------------------|
-//     | lambda::Closure*    | <-- long on 64-bit, int on 32-bit
-//     |---------------------|
-#define MIRROR_OBJECT_BOXED_INNATE_LAMBDA_CLOSURE_POINTER_OFFSET (MIRROR_OBJECT_HEADER_SIZE)
-ADD_TEST_EQ(static_cast<size_t>(MIRROR_OBJECT_BOXED_INNATE_LAMBDA_CLOSURE_POINTER_OFFSET),
-            art::mirror::LambdaProxy::GetInstanceFieldOffsetClosure().SizeValue())
-            // Equivalent to (private) offsetof(art::mirror::LambdaProxy, closure_))
-
-// Working with boxed innate lambdas (as a mirror::Object) in raw memory:
-// --- Note that this layout only applies to lambdas originally made with create-lambda.
-// --- Boxing a lambda created from a new-instance instruction is simply the original object.
-//
-//     |---------------------|
-//     |   object header     |
-//     |---------------------|
-//     | lambda::Closure*    | <-- long on 64-bit, int on 32-bit
-//     |---------------------|
-#define ART_LAMBDA_METHOD_ART_METHOD_OFFSET (0)
-ADD_TEST_EQ(static_cast<size_t>(ART_LAMBDA_METHOD_ART_METHOD_OFFSET),
-            art::lambda::ArtLambdaMethod::GetArtMethodOffset())
-
-#if defined(NDEBUG)
-// Release should be faaast. So just jump directly to the lambda method.
-#define LAMBDA_PROXY_SETUP_FRAME 0
-#else
-// Debug can be slower, and we want to get better stack traces. Set up a frame.
-#define LAMBDA_PROXY_SETUP_FRAME 1
-#endif
-
-// For WIP implementation, lambda types are all "longs"
-// which means on a 32-bit implementation we need to fill the argument with 32-bit 0s
-// whenever we invoke a method with a lambda in it.
-// TODO: remove all usages of this once we go to a proper \LambdaType; system.
-#define LAMBDA_INVOKE_USES_LONG 1
 
 #if defined(__cplusplus)
 }  // End of CheckAsmSupportOffsets.
-}  // namespace art
 #endif
 
 #endif  // ART_RUNTIME_ASM_SUPPORT_H_
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
index e2ade07..969f5b9 100644
--- a/runtime/base/allocator.h
+++ b/runtime/base/allocator.h
@@ -53,7 +53,6 @@
   kAllocatorTagClassTable,
   kAllocatorTagInternTable,
   kAllocatorTagLambdaBoxTable,
-  kAllocatorTagLambdaProxyClassBoxTable,
   kAllocatorTagMaps,
   kAllocatorTagLOS,
   kAllocatorTagSafeMap,
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 6ca56f5..70bd398 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -65,7 +65,6 @@
 Mutex* Locks::trace_lock_ = nullptr;
 Mutex* Locks::unexpected_signal_lock_ = nullptr;
 Mutex* Locks::lambda_table_lock_ = nullptr;
-Mutex* Locks::lambda_class_table_lock_ = nullptr;
 Uninterruptible Roles::uninterruptible_;
 
 struct AllMutexData {
@@ -955,7 +954,6 @@
     DCHECK(trace_lock_ != nullptr);
     DCHECK(unexpected_signal_lock_ != nullptr);
     DCHECK(lambda_table_lock_ != nullptr);
-    DCHECK(lambda_class_table_lock_ != nullptr);
   } else {
     // Create global locks in level order from highest lock level to lowest.
     LockLevel current_lock_level = kInstrumentEntrypointsLock;
@@ -1074,10 +1072,6 @@
     DCHECK(lambda_table_lock_ == nullptr);
     lambda_table_lock_ = new Mutex("lambda table lock", current_lock_level);
 
-    UPDATE_CURRENT_LOCK_LEVEL(kLambdaClassTableLock);
-    DCHECK(lambda_class_table_lock_ == nullptr);
-    lambda_class_table_lock_ = new Mutex("lambda class table lock", current_lock_level);
-
     UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
     DCHECK(abort_lock_ == nullptr);
     abort_lock_ = new Mutex("abort lock", current_lock_level, true);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index e2d7062..d4c9057 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -60,7 +60,6 @@
   kUnexpectedSignalLock,
   kThreadSuspendCountLock,
   kAbortLock,
-  kLambdaClassTableLock,
   kLambdaTableLock,
   kJdwpSocketLock,
   kRegionSpaceRegionLock,
@@ -693,10 +692,6 @@
   // Allow reader-writer mutual exclusion on the boxed table of lambda objects.
   // TODO: this should be a RW mutex lock, except that ConditionVariables don't work with it.
   static Mutex* lambda_table_lock_ ACQUIRED_AFTER(mutator_lock_);
-
-  // Allow reader-writer mutual exclusion on the boxed table of lambda proxy classes.
-  // TODO: this should be a RW mutex lock, except that ConditionVariables don't work with it.
-  static Mutex* lambda_class_table_lock_ ACQUIRED_AFTER(lambda_table_lock_);
 };
 
 class Roles {
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 8a0d8d4..2dd2a83 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -56,7 +56,6 @@
 #include "interpreter/interpreter.h"
 #include "jit/jit.h"
 #include "jit/jit_code_cache.h"
-#include "lambda/box_class_table.h"
 #include "leb128.h"
 #include "linear_alloc.h"
 #include "mirror/class.h"
@@ -65,7 +64,6 @@
 #include "mirror/dex_cache-inl.h"
 #include "mirror/field.h"
 #include "mirror/iftable-inl.h"
-#include "mirror/lambda_proxy.h"
 #include "mirror/method.h"
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
@@ -583,9 +581,6 @@
   // Create java.lang.reflect.Proxy root.
   SetClassRoot(kJavaLangReflectProxy, FindSystemClass(self, "Ljava/lang/reflect/Proxy;"));
 
-  // Create java.lang.LambdaProxy root.
-  SetClassRoot(kJavaLangLambdaProxy, FindSystemClass(self, "Ljava/lang/LambdaProxy;"));
-
   // Create java.lang.reflect.Field.class root.
   auto* class_root = FindSystemClass(self, "Ljava/lang/reflect/Field;");
   CHECK(class_root != nullptr);
@@ -1262,7 +1257,6 @@
   }
   delete data.allocator;
   delete data.class_table;
-  delete data.lambda_box_class_table;
 }
 
 mirror::PointerArray* ClassLinker::AllocPointerArray(Thread* self, size_t length) {
@@ -1904,10 +1898,8 @@
 // Special case to get oat code without overwriting a trampoline.
 const void* ClassLinker::GetQuickOatCodeFor(ArtMethod* method) {
   CHECK(method->IsInvokable()) << PrettyMethod(method);
-  if (method->IsReflectProxyMethod()) {
+  if (method->IsProxyMethod()) {
     return GetQuickProxyInvokeHandler();
-  } else if (method->IsLambdaProxyMethod()) {
-    return GetQuickLambdaProxyInvokeHandler();
   }
   bool found;
   OatFile::OatMethod oat_method = FindOatMethodFor(method, &found);
@@ -3265,7 +3257,7 @@
   klass->SetName(soa.Decode<mirror::String*>(name));
   klass->SetDexCache(GetClassRoot(kJavaLangReflectProxy)->GetDexCache());
   mirror::Class::SetStatus(klass, mirror::Class::kStatusIdx, self);
-  std::string descriptor(GetDescriptorForAnyProxy(klass.Get()));
+  std::string descriptor(GetDescriptorForProxy(klass.Get()));
   const size_t hash = ComputeModifiedUtf8Hash(descriptor.c_str());
 
   // Needs to be before we insert the class so that the allocator field is set.
@@ -3385,228 +3377,23 @@
                                                decoded_name->ToModifiedUtf8().c_str()));
     CHECK_EQ(PrettyField(klass->GetStaticField(1)), throws_field_name);
 
-    CHECK_EQ(klass.Get()->GetInterfacesForAnyProxy(),
+    CHECK_EQ(klass.Get()->GetInterfaces(),
              soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces));
-    CHECK_EQ(klass.Get()->GetThrowsForAnyProxy(),
+    CHECK_EQ(klass.Get()->GetThrows(),
              soa.Decode<mirror::ObjectArray<mirror::ObjectArray<mirror::Class>>*>(throws));
   }
   return klass.Get();
 }
 
-mirror::Class* ClassLinker::CreateLambdaProxyClass(ScopedObjectAccessAlreadyRunnable& soa,
-                                                   jstring name,
-                                                   jobjectArray interfaces,
-                                                   jobject loader,
-                                                   jobjectArray methods,
-                                                   jobjectArray throws,
-                                                   bool* already_exists) {
-  DCHECK(already_exists != nullptr);
-  *already_exists = false;
-
-  Thread* self = soa.Self();
-  StackHandleScope<10> hs(self);
-
-  // Allocate a new java.lang.Class object for a mirror::Proxy.
-  MutableHandle<mirror::Class> klass =
-      hs.NewHandle(AllocClass(self, GetClassRoot(kJavaLangClass), sizeof(mirror::Class)));
-  if (klass.Get() == nullptr) {
-    CHECK(self->IsExceptionPending());  // OOME.
-    return nullptr;
-  }
-  DCHECK(klass->GetClass() != nullptr);
-  klass->SetObjectSize(sizeof(mirror::LambdaProxy));
-
-  // Set the class access flags incl. preverified, so we do not try to set the flag on the methods.
-  klass->SetAccessFlags(kAccClassIsLambdaProxy | kAccPublic | kAccFinal | kAccPreverified);
-  klass->SetClassLoader(soa.Decode<mirror::ClassLoader*>(loader));
-  DCHECK_EQ(klass->GetPrimitiveType(), Primitive::kPrimNot);
-  klass->SetName(soa.Decode<mirror::String*>(name));
-  klass->SetDexCache(GetClassRoot(kJavaLangLambdaProxy)->GetDexCache());
-  // Set the status to be just before after loading it, but before anything is resolved.
-  mirror::Class::SetStatus(klass, mirror::Class::kStatusIdx, self);
-  // Convert "foo.bar.baz" string to "Lfoo/bar/baz;"
-  std::string type_descriptor(GetDescriptorForAnyProxy(klass.Get()));
-
-  mirror::Class* existing;
-  {
-    const size_t hash = ComputeModifiedUtf8Hash(type_descriptor.c_str());
-
-    // Insert the class before loading the fields as the field roots
-    // (ArtField::declaring_class_) are only visited from the class
-    // table. There can't be any suspend points between inserting the
-    // class and setting the field arrays below.
-    existing = InsertClass(type_descriptor.c_str(), klass.Get(), hash);
-  }
-  if (UNLIKELY(existing != nullptr)) {
-    // We had already made the lambda proxy previously. Return it.
-
-    *already_exists = true;
-    return existing;
-    // Let the GC clean up the class we had already allocated but isn't being used.
-  }
-
-  // Needs to be after we insert the class so that the allocator field is set.
-  LinearAlloc* const allocator = GetOrCreateAllocatorForClassLoader(klass->GetClassLoader());
-
-  // Instance fields are inherited, but we add a couple of static fields...
-  LengthPrefixedArray<ArtField>* sfields =
-      AllocArtFieldArray(self, allocator, mirror::LambdaProxy::kStaticFieldCount);
-  klass->SetSFieldsPtr(sfields);
-
-  // 1. Create a static field 'interfaces' that holds the _declared_ interfaces implemented by
-  // our proxy, so Class.getInterfaces doesn't return the flattened set.
-  // -- private static java.lang.Class[] interfaces;  // list of declared interfaces
-  ArtField& interfaces_sfield = sfields->At(mirror::LambdaProxy::kStaticFieldIndexInterfaces);
-  interfaces_sfield.SetDexFieldIndex(mirror::LambdaProxy::kStaticFieldIndexInterfaces);
-  interfaces_sfield.SetDeclaringClass(klass.Get());
-  interfaces_sfield.SetAccessFlags(kAccStatic | kAccPublic | kAccFinal);
-
-  // 2. Create a static field 'throws' that holds the classes of exceptions thrown by our methods.
-  // This is returned by java.lang.reflect.Method#getExceptionTypes()
-  // --- private static java.lang.Class[][] throws;  // maps vtable id to list of classes.
-  ArtField& throws_sfield = sfields->At(mirror::LambdaProxy::kStaticFieldIndexThrows);
-  throws_sfield.SetDexFieldIndex(mirror::LambdaProxy::kStaticFieldIndexThrows);
-  throws_sfield.SetDeclaringClass(klass.Get());
-  throws_sfield.SetAccessFlags(kAccStatic | kAccPublic | kAccFinal);
-
-  // Set up the Constructor method.
-  {
-    // Lambda proxies have 1 direct method, the constructor.
-    static constexpr size_t kNumDirectMethods = 1;
-    LengthPrefixedArray<ArtMethod>* directs = AllocArtMethodArray(self,
-                                                                  allocator,
-                                                                  kNumDirectMethods);
-    // Currently AllocArtMethodArray cannot return null, but the OOM logic is left there in case we
-    // want to throw OOM in the future.
-    if (UNLIKELY(directs == nullptr)) {
-      self->AssertPendingOOMException();
-      return nullptr;
-    }
-    klass->SetDirectMethodsPtr(directs);
-    CreateLambdaProxyConstructor(klass, klass->GetDirectMethodUnchecked(0, image_pointer_size_));
-  }
-
-  // Create virtual method using specified prototypes.
-  auto h_methods = hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Method>*>(methods));
-  DCHECK_EQ(h_methods->GetClass(), mirror::Method::ArrayClass())
-      << PrettyClass(h_methods->GetClass());
-  const size_t num_virtual_methods = h_methods->GetLength();
-  auto* virtuals = AllocArtMethodArray(self, allocator, num_virtual_methods);
-  // Currently AllocArtMethodArray cannot return null, but the OOM logic is left there in case we
-  // want to throw OOM in the future.
-  if (UNLIKELY(virtuals == nullptr)) {
-    self->AssertPendingOOMException();
-    return nullptr;
-  }
-  klass->SetVirtualMethodsPtr(virtuals);
-  size_t abstract_methods = 0;
-  for (size_t i = 0; i < num_virtual_methods; ++i) {
-    ArtMethod* virtual_method = klass->GetVirtualMethodUnchecked(i, image_pointer_size_);
-    ArtMethod* prototype = h_methods->Get(i)->GetArtMethod();
-    if (UNLIKELY((prototype->GetAccessFlags() & kAccDefault) != 0)) {
-      UNIMPLEMENTED(FATAL) << "Lambda proxies don't support default methods yet";
-    }
-    if (prototype->IsAbstract()) {
-      abstract_methods++;
-    }
-    VLOG(class_linker) << "Creating lambda proxy method for " << PrettyMethod(prototype);
-
-    CreateLambdaProxyMethod(klass, prototype, virtual_method);
-    DCHECK(virtual_method->GetDeclaringClass() != nullptr);
-    DCHECK(prototype->GetDeclaringClass() != nullptr);
-  }
-  // Ignore any methods from Object and default methods, it doesn't matter.
-  // Sanity check that the prototype interface is indeed compatible with lambdas.
-  DCHECK_EQ(abstract_methods, 1u)
-      << "Interface must be a single-abstract-method type" << PrettyClass(klass.Get());
-
-  // The super class is java.lang.LambdaProxy
-  klass->SetSuperClass(GetClassRoot(kJavaLangLambdaProxy));
-  // Now effectively in the loaded state.
-  mirror::Class::SetStatus(klass, mirror::Class::kStatusLoaded, self);
-  self->AssertNoPendingException();
-
-  MutableHandle<mirror::Class> new_class = hs.NewHandle<mirror::Class>(nullptr);
-  {
-    // Must hold lock on object when resolved.
-    ObjectLock<mirror::Class> resolution_lock(self, klass);
-    // Link the fields and virtual methods, creating vtable and iftables.
-    // The new class will replace the old one in the class table.
-    Handle<mirror::ObjectArray<mirror::Class>> h_interfaces(
-        hs.NewHandle(soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces)));
-
-    {
-      DCHECK_EQ(1, h_interfaces->GetLength()) << "Lambda proxies must implement 1 interface only";
-      mirror::Class* single_abstract_interface = h_interfaces->Get(0);
-      DCHECK(single_abstract_interface != nullptr);
-
-      // Use the dex cache from the interface, which will enable most of the
-      // dex-using mechanisms on the class and its methods will work.
-      klass->SetDexCache(single_abstract_interface->GetDexCache());
-    }
-
-    if (!LinkClass(self, type_descriptor.c_str(), klass, h_interfaces, &new_class)) {
-      mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
-      return nullptr;
-    }
-  }
-  CHECK(klass->IsRetired());
-  CHECK_NE(klass.Get(), new_class.Get());
-  klass.Assign(new_class.Get());
-
-  CHECK_EQ(interfaces_sfield.GetDeclaringClass(), klass.Get());
-  interfaces_sfield.SetObject<false>(klass.Get(),
-                                     soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces));
-
-  CHECK_EQ(throws_sfield.GetDeclaringClass(), klass.Get());
-  throws_sfield.SetObject<false>(
-      klass.Get(), soa.Decode<mirror::ObjectArray<mirror::ObjectArray<mirror::Class> >*>(throws));
-
-  {
-    // Lock on klass is released. Lock new class object.
-    ObjectLock<mirror::Class> initialization_lock(self, klass);
-    mirror::Class::SetStatus(klass, mirror::Class::kStatusInitialized, self);
-  }
-
-  // Sanity checks
-  if (kIsDebugBuild) {
-    CHECK(klass->GetIFieldsPtr() == nullptr);
-    CheckLambdaProxyConstructor(klass->GetDirectMethod(0, image_pointer_size_));
-
-    for (size_t i = 0; i < num_virtual_methods; ++i) {
-      ArtMethod* virtual_method = klass->GetVirtualMethodUnchecked(i, image_pointer_size_);
-      ArtMethod* prototype = h_methods->Get(i++)->GetArtMethod();
-      CheckLambdaProxyMethod(virtual_method, prototype);
-    }
-
-    StackHandleScope<1> hs2(self);
-    Handle<mirror::String> decoded_name = hs2.NewHandle(soa.Decode<mirror::String*>(name));
-    std::string interfaces_field_name(StringPrintf("java.lang.Class[] %s.interfaces",
-                                                   decoded_name->ToModifiedUtf8().c_str()));
-    CHECK_EQ(PrettyField(klass->GetStaticField(0)), interfaces_field_name);
-
-    std::string throws_field_name(StringPrintf("java.lang.Class[][] %s.throws",
-                                               decoded_name->ToModifiedUtf8().c_str()));
-    CHECK_EQ(PrettyField(klass->GetStaticField(1)), throws_field_name);
-
-    CHECK_EQ(klass.Get()->GetInterfacesForAnyProxy(),
-             soa.Decode<mirror::ObjectArray<mirror::Class>*>(interfaces));
-    CHECK_EQ(klass.Get()->GetThrowsForAnyProxy(),
-             soa.Decode<mirror::ObjectArray<mirror::ObjectArray<mirror::Class>>*>(throws));
-  }
-  return klass.Get();
-}
-
-std::string ClassLinker::GetDescriptorForAnyProxy(mirror::Class* proxy_class) {
-  DCHECK(proxy_class != nullptr);
-  DCHECK(proxy_class->IsAnyProxyClass());
+std::string ClassLinker::GetDescriptorForProxy(mirror::Class* proxy_class) {
+  DCHECK(proxy_class->IsProxyClass());
   mirror::String* name = proxy_class->GetName();
   DCHECK(name != nullptr);
   return DotToDescriptor(name->ToModifiedUtf8().c_str());
 }
 
 ArtMethod* ClassLinker::FindMethodForProxy(mirror::Class* proxy_class, ArtMethod* proxy_method) {
-  DCHECK(proxy_class->IsAnyProxyClass());
+  DCHECK(proxy_class->IsProxyClass());
   DCHECK(proxy_method->IsProxyMethod());
   {
     Thread* const self = Thread::Current();
@@ -3634,7 +3421,7 @@
 
 void ClassLinker::CreateProxyConstructor(Handle<mirror::Class> klass, ArtMethod* out) {
   // Create constructor for Proxy that must initialize the method.
-  CHECK_EQ(GetClassRoot(kJavaLangReflectProxy)->NumDirectMethods(), 18u);
+  CHECK_EQ(GetClassRoot(kJavaLangReflectProxy)->NumDirectMethods(), 16u);
   ArtMethod* proxy_constructor = GetClassRoot(kJavaLangReflectProxy)->GetDirectMethodUnchecked(
       2, image_pointer_size_);
   // Ensure constructor is in dex cache so that we can use the dex cache to look up the overridden
@@ -3650,38 +3437,6 @@
   out->SetDeclaringClass(klass.Get());
 }
 
-void ClassLinker::CreateLambdaProxyConstructor(Handle<mirror::Class> klass,
-                                               /*out*/ArtMethod* method_constructor) {
-  DCHECK(klass.Get() != nullptr);
-  DCHECK(method_constructor != nullptr);
-
-  // Create constructor for Proxy that must initialize the method.
-  // Lambda proxy superclass only has 1 direct method, the constructor (<init>()V)
-  CHECK_EQ(GetClassRoot(kJavaLangLambdaProxy)->NumDirectMethods(),
-           mirror::LambdaProxy::kDirectMethodCount);
-  // Get the constructor method.
-  ArtMethod* proxy_constructor = GetClassRoot(kJavaLangLambdaProxy)->GetDirectMethodUnchecked(
-      mirror::LambdaProxy::kDirectMethodIndexConstructor,
-      image_pointer_size_);
-
-  // Verify constructor method is indeed a constructor.
-  CHECK(proxy_constructor != nullptr);
-
-  // Ensure constructor is in dex cache so that we can use the dex cache to look up the overridden
-  // constructor method.
-  GetClassRoot(kJavaLangLambdaProxy)->GetDexCache()->SetResolvedMethod(
-      proxy_constructor->GetDexMethodIndex(),
-      proxy_constructor,
-      image_pointer_size_);
-
-  // Clone the existing constructor of LambdaProxy
-  // (our constructor would just invoke it so steal its code_ too).
-  method_constructor->CopyFrom(proxy_constructor, image_pointer_size_);
-  // Make this constructor public and fix the class to be our LambdaProxy version
-  method_constructor->SetAccessFlags((method_constructor->GetAccessFlags() & ~kAccProtected) | kAccPublic);
-  method_constructor->SetDeclaringClass(klass.Get());
-}
-
 void ClassLinker::CheckProxyConstructor(ArtMethod* constructor) const {
   CHECK(constructor->IsConstructor());
   auto* np = constructor->GetInterfaceMethodIfProxy(image_pointer_size_);
@@ -3690,14 +3445,6 @@
   DCHECK(constructor->IsPublic());
 }
 
-void ClassLinker::CheckLambdaProxyConstructor(ArtMethod* constructor) const {
-  CHECK(constructor->IsConstructor());
-  auto* np = constructor->GetInterfaceMethodIfProxy(image_pointer_size_);
-  CHECK_STREQ(np->GetName(), "<init>");
-  CHECK_STREQ(np->GetSignature().ToString().c_str(), "()V");
-  DCHECK(constructor->IsPublic());
-}
-
 void ClassLinker::CreateProxyMethod(Handle<mirror::Class> klass, ArtMethod* prototype,
                                     ArtMethod* out) {
   // Ensure prototype is in dex cache so that we can use the dex cache to look up the overridden
@@ -3709,7 +3456,6 @@
     dex_cache->SetResolvedMethod(
         prototype->GetDexMethodIndex(), prototype, image_pointer_size_);
   }
-
   // We steal everything from the prototype (such as DexCache, invoke stub, etc.) then specialize
   // as necessary
   DCHECK(out != nullptr);
@@ -3725,42 +3471,6 @@
   out->SetEntryPointFromQuickCompiledCode(GetQuickProxyInvokeHandler());
 }
 
-void ClassLinker::CreateLambdaProxyMethod(Handle<mirror::Class> klass,
-                                          ArtMethod* prototype,
-                                          ArtMethod* out) {
-  DCHECK(prototype != nullptr);
-  DCHECK(out != nullptr);
-
-  // DO NOT go through the proxy invoke handler for the default methods. They have no idea
-  // how to handle the raw closure, so they must get the regular object when invoked.
-  CHECK_EQ(prototype->GetAccessFlags() & kAccDefault, 0u) << "Default methods must not be proxied";
-
-  // Ensure prototype is in dex cache so that we can use the dex cache to look up the overridden
-  // prototype method
-  auto* dex_cache = prototype->GetDeclaringClass()->GetDexCache();
-  // Avoid dirtying the dex cache unless we need to.
-  if (dex_cache->GetResolvedMethod(prototype->GetDexMethodIndex(), image_pointer_size_) !=
-      prototype) {
-    dex_cache->SetResolvedMethod(
-        prototype->GetDexMethodIndex(), prototype, image_pointer_size_);
-  }
-  // We steal everything from the prototype (such as DexCache, invoke stub, etc.) then specialize
-  // as necessary
-  out->CopyFrom(prototype, image_pointer_size_);
-
-  // Set class to be the concrete proxy class and clear the abstract flag, modify exceptions to
-  // the intersection of throw exceptions as defined in Proxy
-  out->SetDeclaringClass(klass.Get());
-  out->SetAccessFlags((out->GetAccessFlags() & ~kAccAbstract) | kAccFinal);
-
-  // Setting the entry point isn't safe for AOT since ASLR loads it anywhere at runtime.
-  CHECK(!Runtime::Current()->IsAotCompiler());
-
-  // At runtime the method looks like a reference and argument saving method, clone the code
-  // related parameters from this method.
-  out->SetEntryPointFromQuickCompiledCode(GetQuickLambdaProxyInvokeHandler());
-}
-
 void ClassLinker::CheckProxyMethod(ArtMethod* method, ArtMethod* prototype) const {
   // Basic sanity
   CHECK(!prototype->IsFinal());
@@ -3782,11 +3492,6 @@
            prototype->GetReturnType(true /* resolve */, image_pointer_size_));
 }
 
-void ClassLinker::CheckLambdaProxyMethod(ArtMethod* method, ArtMethod* prototype) const {
-  // same as above.
-  return CheckProxyMethod(method, prototype);
-}
-
 bool ClassLinker::CanWeInitializeClass(mirror::Class* klass, bool can_init_statics,
                                        bool can_init_parents) {
   if (can_init_statics && can_init_parents) {
@@ -4418,9 +4123,7 @@
     class_loader->SetClassTable(data.class_table);
     // Should have been set when we registered the dex file.
     data.allocator = class_loader->GetAllocator();
-    CHECK(class_loader->GetLambdaProxyCache() == nullptr);
-    data.lambda_box_class_table = new lambda::BoxClassTable();
-    class_loader->SetLambdaProxyCache(data.lambda_box_class_table);
+    CHECK(data.allocator != nullptr);
     class_loaders_.push_back(data);
   }
   return class_table;
@@ -6863,7 +6566,6 @@
     "Ljava/lang/reflect/Field;",
     "Ljava/lang/reflect/Method;",
     "Ljava/lang/reflect/Proxy;",
-    "Ljava/lang/LambdaProxy;",
     "[Ljava/lang/String;",
     "[Ljava/lang/reflect/Constructor;",
     "[Ljava/lang/reflect/Field;",
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index f073cd8..29aac31 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -40,11 +40,6 @@
   class ImageSpace;
 }  // namespace space
 }  // namespace gc
-
-namespace lambda {
-  class BoxClassTable;
-}  // namespace lambda
-
 namespace mirror {
   class ClassLoader;
   class DexCache;
@@ -87,7 +82,6 @@
     kJavaLangReflectField,
     kJavaLangReflectMethod,
     kJavaLangReflectProxy,
-    kJavaLangLambdaProxy,
     kJavaLangStringArrayClass,
     kJavaLangReflectConstructorArrayClass,
     kJavaLangReflectFieldArrayClass,
@@ -430,46 +424,12 @@
                                   jobjectArray methods,
                                   jobjectArray throws)
       SHARED_REQUIRES(Locks::mutator_lock_);
-
-  // Get the long type descriptor, e.g. "LProxyName$1234;" for the requested proxy class.
-  static std::string GetDescriptorForAnyProxy(mirror::Class* proxy_class)
+  std::string GetDescriptorForProxy(mirror::Class* proxy_class)
       SHARED_REQUIRES(Locks::mutator_lock_);
   ArtMethod* FindMethodForProxy(mirror::Class* proxy_class, ArtMethod* proxy_method)
       REQUIRES(!dex_lock_)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
-  // Create a lambda proxy class.
-  // -- Nominally used when boxing an innate lambda, since that has no corresponding class.
-  //
-  // * name must be a fully-qualified class name (and dotted), e.g. "java.lang.Runnable"
-  // * interfaces is an array of java.lang.Class for interfaces that will be the supertype
-  //   (note that there must be exactly 1 element here for a lambda interface since lambda
-  //   types can only target 1 interface).
-  // * loader must be a java.lang.ClassLoader where the proxy class will be created
-  // * methods must be an array of java.lang.reflect.Method that consists of the
-  //   deduplicated methods from all of the interfaces specified.
-  // * throws must be an array of java.lang.Class[] where each index corresponds to that of
-  //   methods, and it signifies the "throws" keyword of each method
-  //   (this is not directly used by the runtime itself, but it is available via reflection).
-  //
-  // Returns a non-null pointer to a class upon success, otherwise null and throws an exception.
-  //
-  // If the class was already created previously (with the same name but potentially different
-  // parameters), already_exists is set to true; otherwise already_exists is set to false.
-  // The already_exists value is undefined when an exception was thrown.
-  //
-  // Sidenote: interfaces is an array to simplify the libcore code which creates a Java
-  // array in an attempt to reduce code duplication.
-  // TODO: this should probably also take the target single-abstract-method as well.
-  mirror::Class* CreateLambdaProxyClass(ScopedObjectAccessAlreadyRunnable& soa,
-                                        jstring name,
-                                        jobjectArray interfaces,
-                                        jobject loader,
-                                        jobjectArray methods,
-                                        jobjectArray throws,
-                                        /*out*/bool* already_exists)
-      SHARED_REQUIRES(Locks::mutator_lock_);
-
   // Get the oat code for a method when its class isn't yet initialized
   const void* GetQuickOatCodeFor(ArtMethod* method)
       SHARED_REQUIRES(Locks::mutator_lock_);
@@ -613,7 +573,6 @@
     jweak weak_root;  // Weak root to enable class unloading.
     ClassTable* class_table;
     LinearAlloc* allocator;
-    lambda::BoxClassTable* lambda_box_class_table;
   };
 
   // Ensures that the supertype of 'klass' ('supertype') is verified. Returns false and throws
@@ -948,12 +907,8 @@
 
   void CheckProxyConstructor(ArtMethod* constructor) const
       SHARED_REQUIRES(Locks::mutator_lock_);
-  void CheckLambdaProxyConstructor(ArtMethod* constructor) const
-      SHARED_REQUIRES(Locks::mutator_lock_);
   void CheckProxyMethod(ArtMethod* method, ArtMethod* prototype) const
       SHARED_REQUIRES(Locks::mutator_lock_);
-  void CheckLambdaProxyMethod(ArtMethod* method, ArtMethod* prototype) const
-      SHARED_REQUIRES(Locks::mutator_lock_);
 
   // For use by ImageWriter to find DexCaches for its roots
   ReaderWriterMutex* DexLock()
@@ -971,19 +926,9 @@
 
   void CreateProxyConstructor(Handle<mirror::Class> klass, ArtMethod* out)
       SHARED_REQUIRES(Locks::mutator_lock_);
-
-  // Copy the constructor from java.lang.LambdaProxy into the 'klass'.
-  // The copy is written into 'method_constructor'.
-  void CreateLambdaProxyConstructor(Handle<mirror::Class> klass,
-                                    /*out*/ArtMethod* method_constructor)
-      SHARED_REQUIRES(Locks::mutator_lock_);
-
   void CreateProxyMethod(Handle<mirror::Class> klass, ArtMethod* prototype, ArtMethod* out)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
-  void CreateLambdaProxyMethod(Handle<mirror::Class> klass, ArtMethod* prototype, ArtMethod* out)
-      SHARED_REQUIRES(Locks::mutator_lock_);
-
   // Ensures that methods have the kAccPreverified bit set. We use the kAccPreverfied bit on the
   // class access flags to determine whether this has been done before.
   void EnsurePreverifiedMethods(Handle<mirror::Class> c)
@@ -995,10 +940,7 @@
   // Returns null if not found.
   ClassTable* ClassTableForClassLoader(mirror::ClassLoader* class_loader)
       SHARED_REQUIRES(Locks::mutator_lock_, Locks::classlinker_classes_lock_);
-
-  // Insert a new class table if not found. Uses bootclasspath if class_loader is null.
-  // Returns either the existing table, or the new one if there wasn't one previously
-  // (the return value is always non-null).
+  // Insert a new class table if not found.
   ClassTable* InsertClassTableForClassLoader(mirror::ClassLoader* class_loader)
       SHARED_REQUIRES(Locks::mutator_lock_)
       REQUIRES(Locks::classlinker_classes_lock_);
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 4a9db1d..2c086c5 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -31,7 +31,6 @@
 #include "mirror/class-inl.h"
 #include "mirror/dex_cache.h"
 #include "mirror/field.h"
-#include "mirror/lambda_proxy.h"
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
 #include "mirror/proxy.h"
@@ -553,7 +552,6 @@
   ClassLoaderOffsets() : CheckOffsets<mirror::ClassLoader>(false, "Ljava/lang/ClassLoader;") {
     addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, allocator_), "allocator");
     addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, class_table_), "classTable");
-    addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, lambda_proxy_cache_), "lambdaProxyCache");
     addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, packages_), "packages");
     addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, parent_), "parent");
     addOffset(OFFSETOF_MEMBER(mirror::ClassLoader, proxyCache_), "proxyCache");
@@ -566,13 +564,6 @@
   };
 };
 
-struct LambdaProxyOffsets : public CheckOffsets<mirror::LambdaProxy> {
-  LambdaProxyOffsets() : CheckOffsets<mirror::LambdaProxy>(false, "Ljava/lang/LambdaProxy;") {
-    addOffset(OFFSETOF_MEMBER(mirror::LambdaProxy, closure_), "closure");
-  };
-};
-
-
 struct DexCacheOffsets : public CheckOffsets<mirror::DexCache> {
   DexCacheOffsets() : CheckOffsets<mirror::DexCache>(false, "Ljava/lang/DexCache;") {
     addOffset(OFFSETOF_MEMBER(mirror::DexCache, dex_), "dex");
@@ -648,7 +639,6 @@
   EXPECT_TRUE(StackTraceElementOffsets().Check());
   EXPECT_TRUE(ClassLoaderOffsets().Check());
   EXPECT_TRUE(ProxyOffsets().Check());
-  EXPECT_TRUE(LambdaProxyOffsets().Check());
   EXPECT_TRUE(DexCacheOffsets().Check());
   EXPECT_TRUE(ReferenceOffsets().Check());
   EXPECT_TRUE(FinalizerReferenceOffsets().Check());
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index e84313c..f705a50 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -55,6 +55,7 @@
   // Gtests can be very noisy. For example, an executable with multiple tests will trigger native
   // bridge warnings. The following line reduces the minimum log severity to ERROR and suppresses
   // everything else. In case you want to see all messages, comment out the line.
+  setenv("ANDROID_LOG_TAGS", "*:e", 1);
 
   art::InitLogging(argv);
   LOG(::art::INFO) << "Running main() from common_runtime_test.cc...";
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 2a92226..87e29ae 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -313,7 +313,7 @@
           reinterpret_cast<uintptr_t>(virtual_methods)) / method_size;
       CHECK_LT(throws_index, static_cast<int>(num_virtuals));
       mirror::ObjectArray<mirror::Class>* declared_exceptions =
-          proxy_class->GetThrowsForAnyProxy()->Get(throws_index);
+          proxy_class->GetThrows()->Get(throws_index);
       mirror::Class* exception_class = exception->GetClass();
       bool declares_exception = false;
       for (int32_t i = 0; i < declared_exceptions->GetLength() && !declares_exception; i++) {
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 8c2dc3e..abf9ac4 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -23,12 +23,9 @@
 #include "entrypoints/runtime_asm_entrypoints.h"
 #include "gc/accounting/card_table-inl.h"
 #include "interpreter/interpreter.h"
-#include "lambda/closure.h"
-#include "lambda/art_lambda_method.h"
 #include "method_reference.h"
 #include "mirror/class-inl.h"
 #include "mirror/dex_cache-inl.h"
-#include "mirror/lambda_proxy.h"
 #include "mirror/method.h"
 #include "mirror/object-inl.h"
 #include "mirror/object_array-inl.h"
@@ -297,8 +294,7 @@
   // 1st GPR.
   static mirror::Object* GetProxyThisObject(ArtMethod** sp)
       SHARED_REQUIRES(Locks::mutator_lock_) {
-    // TODO: Lambda proxies only set up a frame when debugging
-    CHECK((*sp)->IsReflectProxyMethod() || ((*sp)->IsLambdaProxyMethod() /*&& kIsDebugBuild*/));
+    CHECK((*sp)->IsProxyMethod());
     CHECK_GT(kNumQuickGprArgs, 0u);
     constexpr uint32_t kThisGprIndex = 0u;  // 'this' is in the 1st GPR.
     size_t this_arg_offset = kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset +
@@ -838,9 +834,8 @@
 extern "C" uint64_t artQuickProxyInvokeHandler(
     ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp)
     SHARED_REQUIRES(Locks::mutator_lock_) {
-  DCHECK(proxy_method->GetDeclaringClass()->IsReflectProxyClass()) << PrettyMethod(proxy_method);
-  DCHECK(proxy_method->IsReflectProxyMethod()) << PrettyMethod(proxy_method);
-  DCHECK(receiver->GetClass()->IsReflectProxyClass()) << PrettyMethod(proxy_method);
+  DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method);
+  DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method);
   // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
   const char* old_cause =
       self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
@@ -883,175 +878,6 @@
   return result.GetJ();
 }
 
-extern "C" uint64_t artQuickLambdaProxyInvokeHandler(
-    ArtMethod* proxy_method, mirror::LambdaProxy* receiver, Thread* self, ArtMethod** sp)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
-  using lambda::ShortyFieldType;
-
-  DCHECK(proxy_method->GetDeclaringClass()->IsLambdaProxyClass()) << PrettyMethod(proxy_method);
-  DCHECK(proxy_method->IsLambdaProxyMethod()) << PrettyMethod(proxy_method);
-  DCHECK(receiver->GetClass()->IsLambdaProxyClass()) << PrettyMethod(proxy_method);
-
-  lambda::Closure* lambda_closure = receiver->GetClosure();
-  DCHECK(lambda_closure != nullptr);  // Should've NPEd during the invoke-interface.
-  // Learned lambdas have their own implementation of the SAM, they must not go through here.
-  DCHECK(lambda_closure->GetLambdaInfo()->IsInnateLambda());
-  ArtMethod* target_method = lambda_closure->GetTargetMethod();
-
-  // Lambda targets are always static.
-  // TODO: This should really be a target_method->IsLambda(), once we add the access flag.
-  CHECK(target_method->IsStatic()) << PrettyMethod(proxy_method) << " "
-                                   << PrettyMethod(target_method);
-
-  // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
-  const char* old_cause =
-      self->StartAssertNoThreadSuspension("Adding to IRT/SF lambda proxy object arguments");
-  // Register the top of the managed stack, making stack crawlable.
-  DCHECK_EQ((*sp), proxy_method) << PrettyMethod(proxy_method);
-  self->VerifyStack();
-  // Start new JNI local reference state.
-  JNIEnvExt* env = self->GetJniEnv();
-  ScopedObjectAccessUnchecked soa(env);
-
-  // Placing arguments into args vector and remove the receiver.
-  ArtMethod* non_proxy_method = proxy_method->GetInterfaceMethodIfProxy(sizeof(void*));
-  CHECK(!non_proxy_method->IsStatic()) << PrettyMethod(proxy_method) << " "
-                                       << PrettyMethod(non_proxy_method);
-  uint32_t shorty_len = 0;
-  const char* shorty = non_proxy_method->GetShorty(/*out*/&shorty_len);
-
-  std::vector<jvalue> args;
-  // Make a quick visitor so we can restore the refs incase they move after a GC.
-  BuildQuickArgumentVisitor local_ref_visitor(sp,
-                                              false /*is_static*/,
-                                              shorty,
-                                              shorty_len,
-                                              &soa,
-                                              /*out*/&args);
-  local_ref_visitor.VisitArguments();
-
-  static_assert(lambda::kClosureIsStoredAsLong,
-                "Need to update this code once closures are no "
-                "longer treated as a 'long' in quick abi");
-
-  // Allocate one vreg more than usual because we need to convert our
-  // receiver Object (1 vreg) into a long (2 vregs).
-  // TODO: Ugly... move to traits instead?
-  const uint32_t first_arg_reg = ShortyFieldType(ShortyFieldType::kLambda).GetVirtualRegisterCount()
-        - ShortyFieldType(ShortyFieldType::kObject).GetVirtualRegisterCount();
-  const uint32_t num_vregs = lambda_closure->GetLambdaInfo()->GetArgumentVRegCount();
-  DCHECK_GE(num_vregs, first_arg_reg);
-  if (kIsDebugBuild) {
-    const char* method_shorty = non_proxy_method->GetShorty();
-    DCHECK_NE(*method_shorty, '\0') << method_shorty;
-    const char* arg_shorty = method_shorty + 1;  // Skip return type.
-
-    // Proxy method should have an object (1 vreg) receiver,
-    // Lambda method should have a lambda (2 vregs) receiver.
-    // -- All other args are the same as before.
-    // -- Make sure vreg count is what we thought it was.
-    uint32_t non_proxy_num_vregs =
-        ShortyFieldType::CountVirtualRegistersRequired(arg_shorty)  // doesn't count receiver
-        + ShortyFieldType(ShortyFieldType::kObject).GetVirtualRegisterCount();  // implicit receiver
-
-    CHECK_EQ(non_proxy_num_vregs + first_arg_reg, num_vregs)
-        << PrettyMethod(non_proxy_method) << " " << PrettyMethod(lambda_closure->GetTargetMethod());
-  }
-
-  ShadowFrameAllocaUniquePtr shadow_frame = CREATE_SHADOW_FRAME(num_vregs,
-                                                                /*link*/nullptr,
-                                                                target_method,
-                                                                /*dex_pc*/0);
-
-  // Copy our proxy method caller's arguments into this ShadowFrame.
-  BuildQuickShadowFrameVisitor local_sf_visitor(sp,
-                                                /*is_static*/false,
-                                                shorty,
-                                                shorty_len,
-                                                shadow_frame.get(),
-                                                first_arg_reg);
-
-  local_sf_visitor.VisitArguments();
-  // Now fix up the arguments, with each ArgK being a vreg:
-
-  // (Before):
-  // Arg0 = proxy receiver (LambdaProxy)
-  // Arg1 = first-user defined argument
-  // Arg2 = second user-defined argument
-  // ....
-  // ArgN = ...
-
-  // (After)
-  // Arg0 = closure (hi)
-  // Arg1 =  closure (lo) = 0x00 on 32-bit
-  // Arg2 = <?> (first user-defined argument)
-  // Arg3 = <?> (first user-defined argument)
-  // ...
-  // argN+1 = ...
-
-  // Transformation diagram:
-  /*
-     Arg0  Arg2  Arg3 ... ArgN
-       |       \     \        \
-       |        \     \        \
-     ClHi  ClLo  Arg2  Arg3 ... ArgN:
-   */
-
-  // 1) memmove vregs 1-N into 2-N+1
-  uint32_t* shadow_frame_vregs = shadow_frame->GetVRegArgs(/*i*/0);
-  if (lambda::kClosureIsStoredAsLong ||
-      sizeof(void*) != sizeof(mirror::CompressedReference<mirror::LambdaProxy>)) {
-    // Suspending here would be very bad since we are doing a raw memmove
-
-    // Move the primitive vregs over.
-    {
-      size_t shadow_frame_vregs_size = num_vregs;
-      memmove(shadow_frame_vregs + first_arg_reg,
-              shadow_frame_vregs,
-              shadow_frame_vregs_size - first_arg_reg);
-    }
-
-    // Move the reference vregs over.
-    if (LIKELY(shadow_frame->HasReferenceArray())) {
-      uint32_t* shadow_frame_references = shadow_frame_vregs + num_vregs;
-      size_t shadow_frame_references_size = num_vregs;
-      memmove(shadow_frame_references + first_arg_reg,
-              shadow_frame_references,
-              shadow_frame_references_size - first_arg_reg);
-    }
-
-    static_assert(lambda::kClosureSupportsReadBarrier == false,
-                  "Using this memmove code with a read barrier GC seems like it could be unsafe.");
-
-    static_assert(sizeof(mirror::CompressedReference<mirror::LambdaProxy>) == sizeof(uint32_t),
-                  "This block of code assumes a compressed reference fits into exactly 1 vreg");
-  }
-  // 2) replace proxy receiver with lambda
-  shadow_frame->SetVRegLong(0, static_cast<int64_t>(reinterpret_cast<uintptr_t>(lambda_closure)));
-
-  // OK: After we do the invoke, the target method takes over managing the arguments
-  //     and we won't ever access the shadow frame again (if any references moved).
-  self->EndAssertNoThreadSuspension(old_cause);
-
-  // The shadow frame vreg contents are now 'owned' by the Invoke method, and
-  // will be managed by it during a GC despite being a raw uint32_t array.
-  // We however have no guarantee that it is updated on the way out, so do not read out of the
-  // shadow frame after this call.
-  JValue result;
-  target_method->Invoke(self,
-                        shadow_frame_vregs,
-                        num_vregs * sizeof(uint32_t),
-                        /*out*/&result,
-                        target_method->GetShorty());
-
-  // Restore references on the proxy caller stack frame which might have moved.
-  // -- This is necessary because the QuickFrameInfo is just the generic runtime "RefsAndArgs"
-  //    which means that the regular stack visitor wouldn't know how to GC-move any references
-  //    that we spilled ourselves in the proxy stub.
-  local_ref_visitor.FixupReferences();
-  return result.GetJ();
-}
-
 // Read object references held in arguments from quick frames and place in a JNI local references,
 // so they don't get garbage collected.
 class RememberForGcArgumentVisitor FINAL : public QuickArgumentVisitor {
diff --git a/runtime/entrypoints/runtime_asm_entrypoints.h b/runtime/entrypoints/runtime_asm_entrypoints.h
index 1ef7585..2842c5a 100644
--- a/runtime/entrypoints/runtime_asm_entrypoints.h
+++ b/runtime/entrypoints/runtime_asm_entrypoints.h
@@ -17,10 +17,6 @@
 #ifndef ART_RUNTIME_ENTRYPOINTS_RUNTIME_ASM_ENTRYPOINTS_H_
 #define ART_RUNTIME_ENTRYPOINTS_RUNTIME_ASM_ENTRYPOINTS_H_
 
-// Define entry points to assembly routines.
-// All extern "C" functions here are defined in a corresponding assembly-only file.
-// The exact file paths are runtime/arch/$ISA/quick_entrypoints_$ISA.s
-
 namespace art {
 
 #ifndef BUILDING_LIBART
@@ -56,13 +52,6 @@
   return reinterpret_cast<const void*>(art_quick_proxy_invoke_handler);
 }
 
-// Return the address of quick stub code for handling transitions into the lambda proxy
-// invoke handler.
-extern "C" void art_quick_lambda_proxy_invoke_handler();
-static inline const void* GetQuickLambdaProxyInvokeHandler() {
-  return reinterpret_cast<const void*>(art_quick_lambda_proxy_invoke_handler);
-}
-
 // Return the address of quick stub code for resolving a method at first call.
 extern "C" void art_quick_resolution_trampoline(ArtMethod*);
 static inline const void* GetQuickResolutionStub() {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 07f0628..da9a79e 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -672,8 +672,8 @@
     return result;
   } else if (UNLIKELY(klass->IsPrimitive<kVerifyNone>())) {
     return Primitive::Descriptor(klass->GetPrimitiveType<kVerifyNone>());
-  } else if (UNLIKELY(klass->IsAnyProxyClass<kVerifyNone>())) {
-    return Runtime::Current()->GetClassLinker()->GetDescriptorForAnyProxy(klass);
+  } else if (UNLIKELY(klass->IsProxyClass<kVerifyNone>())) {
+    return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(klass);
   } else {
     mirror::DexCache* dex_cache = klass->GetDexCache<kVerifyNone>();
     if (!IsValidContinuousSpaceObjectAddress(dex_cache)) {
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 2de8e7e..9f6699f 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -888,56 +888,12 @@
     return false;
   }
 
-  StackHandleScope<1> hs{self};  // NOLINT: [readability/braces] [4];
-
-  // Use the lambda method's class loader since it's close enough.
-  // TODO: create-lambda should capture the current method's class loader and use that instead.
-  // TODO: Do we want create-lambda to work for static methods outside of the declaring class?
-  // --> then we need to store a classloader in the lambda method. otherwise we don't
-  //     because it would always use the declaring class's class loader.
-  // TODO: add a GetClassLoader to the lambda closure which knows how to do this,
-  //       don't hardcode this here.
-  Handle<ClassLoader> current_class_loader = hs.NewHandle(
-      lambda_closure->GetTargetMethod()->GetDeclaringClass()->GetClassLoader());
-
-  // TODO: get the type ID from the instruction
-  std::string class_name;
-  {
-    // Temporary hack to read the interface corresponding to a box-lambda.
-    // TODO: The box-lambda should encode the type ID instead, so we don't need to do this.
-    {
-      // Do a hack where we read from const-string the interface name
-      mirror::Object* string_reference = shadow_frame.GetVRegReference(vreg_target_object);
-
-      CHECK(string_reference != nullptr)
-          << "box-lambda needs the type name stored in string vA (target), but it was null";
-
-      CHECK(string_reference->IsString())
-          << "box-lambda needs the type name stored in string vA (target)";
-
-      mirror::String* as_string = string_reference->AsString();
-      class_name = as_string->ToModifiedUtf8();
-    }
-
-    // Trigger class loading of the functional interface.
-    // TODO: This should actually be done by the create-lambda...
-    if (Runtime::Current()->GetClassLinker()
-            ->FindClass(self, class_name.c_str(), current_class_loader) == nullptr) {
-      CHECK(self->IsExceptionPending());
-      self->AssertPendingException();
-      return false;
-    }
-  }
-
   mirror::Object* closure_as_object =
-      Runtime::Current()->GetLambdaBoxTable()->BoxLambda(lambda_closure,
-                                                         class_name.c_str(),
-                                                         current_class_loader.Get());
+      Runtime::Current()->GetLambdaBoxTable()->BoxLambda(lambda_closure);
 
   // Failed to box the lambda, an exception was raised.
   if (UNLIKELY(closure_as_object == nullptr)) {
     CHECK(self->IsExceptionPending());
-    shadow_frame.SetVRegReference(vreg_target_object, nullptr);
     return false;
   }
 
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 11a8c2e..bf95a0e 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -102,8 +102,6 @@
   size_t lambda_captured_variable_index = 0;
   while (true) {
     dex_pc = inst->GetDexPc(insns);
-    DCHECK_LE(dex_pc, code_item->insns_size_in_code_units_)
-        << "Dex PC overflowed code item size; missing return instruction?";
     shadow_frame.SetDexPC(dex_pc);
     TraceExecution(shadow_frame, inst, dex_pc);
     inst_data = inst->Fetch16(0);
diff --git a/runtime/lambda/art_lambda_method.cc b/runtime/lambda/art_lambda_method.cc
index 0690cd1..6f9f8bb 100644
--- a/runtime/lambda/art_lambda_method.cc
+++ b/runtime/lambda/art_lambda_method.cc
@@ -14,7 +14,6 @@
  * limitations under the License.
  */
 
-#include "art_method-inl.h"
 #include "lambda/art_lambda_method.h"
 
 #include "base/logging.h"
@@ -74,12 +73,5 @@
   }
 }
 
-size_t ArtLambdaMethod::GetArgumentVRegCount() const {
-  DCHECK(GetArtMethod()->IsStatic());  // Instance methods don't have receiver in shorty.
-  const char* method_shorty = GetArtMethod()->GetShorty();
-  DCHECK_NE(*method_shorty, '\0') << method_shorty;
-  return ShortyFieldType::CountVirtualRegistersRequired(method_shorty + 1);  // skip return type
-}
-
 }  // namespace lambda
 }  // namespace art
diff --git a/runtime/lambda/art_lambda_method.h b/runtime/lambda/art_lambda_method.h
index a858bf9..ea13eb7 100644
--- a/runtime/lambda/art_lambda_method.h
+++ b/runtime/lambda/art_lambda_method.h
@@ -90,17 +90,6 @@
     return strlen(captured_variables_shorty_);
   }
 
-  // Return the offset in bytes from the start of ArtLambdaMethod to the method_.
-  // -- Only should be used by assembly (stubs) support code and compiled code.
-  static constexpr size_t GetArtMethodOffset() {
-    return offsetof(ArtLambdaMethod, method_);
-  }
-
-  // Calculate how many vregs all the arguments will use when doing an invoke.
-  // (Most primitives are 1 vregs, double/long are 2, reference is 1, lambda is 2).
-  // -- This is used to know how big to set up shadow frame when invoking into the target method.
-  size_t GetArgumentVRegCount() const SHARED_REQUIRES(Locks::mutator_lock_);
-
  private:
   // TODO: ArtMethod, or at least the entry points should be inlined into this struct
   // to avoid an extra indirect load when doing invokes.
diff --git a/runtime/lambda/box_class_table-inl.h b/runtime/lambda/box_class_table-inl.h
deleted file mode 100644
index 2fc34a7..0000000
--- a/runtime/lambda/box_class_table-inl.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_LAMBDA_BOX_CLASS_TABLE_INL_H_
-#define ART_RUNTIME_LAMBDA_BOX_CLASS_TABLE_INL_H_
-
-#include "lambda/box_class_table.h"
-#include "thread.h"
-
-namespace art {
-namespace lambda {
-
-template <typename Visitor>
-inline void BoxClassTable::VisitRoots(const Visitor& visitor) {
-  MutexLock mu(Thread::Current(), *Locks::lambda_class_table_lock_);
-  for (std::pair<UnorderedMapKeyType, ValueType>& key_value : map_) {
-    ValueType& gc_root = key_value.second;
-    visitor.VisitRoot(gc_root.AddressWithoutBarrier());
-  }
-}
-
-}  // namespace lambda
-}  // namespace art
-
-#endif  // ART_RUNTIME_LAMBDA_BOX_CLASS_TABLE_INL_H_
diff --git a/runtime/lambda/box_class_table.cc b/runtime/lambda/box_class_table.cc
deleted file mode 100644
index 1e49886..0000000
--- a/runtime/lambda/box_class_table.cc
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "lambda/box_class_table.h"
-
-#include "base/mutex.h"
-#include "common_throws.h"
-#include "gc_root-inl.h"
-#include "lambda/closure.h"
-#include "lambda/leaking_allocator.h"
-#include "mirror/method.h"
-#include "mirror/object-inl.h"
-#include "thread.h"
-
-#include <string>
-#include <vector>
-
-namespace art {
-namespace lambda {
-
-// Create the lambda proxy class given the name of the lambda interface (e.g. Ljava/lang/Runnable;)
-// Also needs a proper class loader (or null for bootclasspath) where the proxy will be created
-// into.
-//
-// The class must **not** have already been created.
-// Returns a non-null ptr on success, otherwise returns null and has an exception set.
-static mirror::Class* CreateClass(Thread* self,
-                                  const std::string& class_name,
-                                  const Handle<mirror::ClassLoader>& class_loader)
-    SHARED_REQUIRES(Locks::mutator_lock_) {
-  ScopedObjectAccessUnchecked soa(self);
-  StackHandleScope<2> hs(self);
-
-  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-
-  // Find the java.lang.Class for our class name (from the class loader).
-  Handle<mirror::Class> lambda_interface =
-      hs.NewHandle(class_linker->FindClass(self, class_name.c_str(), class_loader));
-  // TODO: use LookupClass in a loop
-  // TODO: DCHECK That this doesn't actually cause the class to be loaded,
-  //       since the create-lambda should've loaded it already
-  DCHECK(lambda_interface.Get() != nullptr) << "CreateClass with class_name=" << class_name;
-  DCHECK(lambda_interface->IsInterface()) << "CreateClass with class_name=" << class_name;
-  jobject lambda_interface_class = soa.AddLocalReference<jobject>(lambda_interface.Get());
-
-  // Look up java.lang.reflect.Proxy#getLambdaProxyClass method.
-  Handle<mirror::Class> java_lang_reflect_proxy =
-      hs.NewHandle(class_linker->FindSystemClass(soa.Self(), "Ljava/lang/reflect/Proxy;"));
-  jclass java_lang_reflect_proxy_class =
-      soa.AddLocalReference<jclass>(java_lang_reflect_proxy.Get());
-  DCHECK(java_lang_reflect_proxy.Get() != nullptr);
-
-  jmethodID proxy_factory_method_id =
-      soa.Env()->GetStaticMethodID(java_lang_reflect_proxy_class,
-                                  "getLambdaProxyClass",
-                                  "(Ljava/lang/ClassLoader;Ljava/lang/Class;)Ljava/lang/Class;");
-  DCHECK(!soa.Env()->ExceptionCheck());
-
-  // Call into the java code to do the hard work of figuring out which methods and throws
-  // our lambda interface proxy needs to implement. It then calls back into the class linker
-  // on our behalf to make the proxy itself.
-  jobject generated_lambda_proxy_class =
-      soa.Env()->CallStaticObjectMethod(java_lang_reflect_proxy_class,
-                                        proxy_factory_method_id,
-                                        class_loader.ToJObject(),
-                                        lambda_interface_class);
-
-  // This can throw in which case we return null. Caller must handle.
-  return soa.Decode<mirror::Class*>(generated_lambda_proxy_class);
-}
-
-BoxClassTable::BoxClassTable() {
-}
-
-BoxClassTable::~BoxClassTable() {
-  // Don't need to do anything, classes are deleted automatically by GC
-  // when the classloader is deleted.
-  //
-  // Our table will not outlive the classloader since the classloader owns it.
-}
-
-mirror::Class* BoxClassTable::GetOrCreateBoxClass(const char* class_name,
-                                                  const Handle<mirror::ClassLoader>& class_loader) {
-  DCHECK(class_name != nullptr);
-
-  Thread* self = Thread::Current();
-
-  std::string class_name_str = class_name;
-
-  {
-    MutexLock mu(self, *Locks::lambda_class_table_lock_);
-
-    // Attempt to look up this class, it's possible it was already created previously.
-    // If this is the case we *must* return the same class as before to maintain
-    // referential equality between box instances.
-    //
-    // In managed code:
-    //   Functional f = () -> 5;  // vF = create-lambda
-    //   Object a = f;            // vA = box-lambda vA
-    //   Object b = f;            // vB = box-lambda vB
-    //   assert(a.getClass() == b.getClass())
-    //   assert(a == b)
-    ValueType value = FindBoxedClass(class_name_str);
-    if (!value.IsNull()) {
-      return value.Read();
-    }
-  }
-
-  // Otherwise we need to generate a class ourselves and insert it into the hash map
-
-  // Release the table lock here, which implicitly allows other threads to suspend
-  // (since the GC callbacks will not block on trying to acquire our lock).
-  // We also don't want to call into the class linker with the lock held because
-  // our lock level is lower.
-  self->AllowThreadSuspension();
-
-  // Create a lambda proxy class, within the specified class loader.
-  mirror::Class* lambda_proxy_class = CreateClass(self, class_name_str, class_loader);
-
-  // There are no thread suspension points after this, so we don't need to put it into a handle.
-  ScopedAssertNoThreadSuspension soants{self, "BoxClassTable::GetOrCreateBoxClass"};  // NOLINT:  [readability/braces] [4]
-
-  if (UNLIKELY(lambda_proxy_class == nullptr)) {
-    // Most likely an OOM has occurred.
-    CHECK(self->IsExceptionPending());
-    return nullptr;
-  }
-
-  {
-    MutexLock mu(self, *Locks::lambda_class_table_lock_);
-
-    // Possible, but unlikely, that someone already came in and made a proxy class
-    // on another thread.
-    ValueType value = FindBoxedClass(class_name_str);
-    if (UNLIKELY(!value.IsNull())) {
-      DCHECK_EQ(lambda_proxy_class, value.Read());
-      return value.Read();
-    }
-
-    // Otherwise we made a brand new proxy class.
-    // The class itself is cleaned up by the GC (e.g. class unloading) later.
-
-    // Actually insert into the table.
-    map_.Insert({std::move(class_name_str), ValueType(lambda_proxy_class)});
-  }
-
-  return lambda_proxy_class;
-}
-
-BoxClassTable::ValueType BoxClassTable::FindBoxedClass(const std::string& class_name) const {
-  auto map_iterator = map_.Find(class_name);
-  if (map_iterator != map_.end()) {
-    const std::pair<UnorderedMapKeyType, ValueType>& key_value_pair = *map_iterator;
-    const ValueType& value = key_value_pair.second;
-
-    DCHECK(!value.IsNull());  // Never store null boxes.
-    return value;
-  }
-
-  return ValueType(nullptr);
-}
-
-void BoxClassTable::EmptyFn::MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const {
-  item.first.clear();
-
-  Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
-  item.second = ValueType();  // Also clear the GC root.
-}
-
-bool BoxClassTable::EmptyFn::IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const {
-  bool is_empty = item.first.empty();
-  DCHECK_EQ(item.second.IsNull(), is_empty);
-
-  return is_empty;
-}
-
-bool BoxClassTable::EqualsFn::operator()(const UnorderedMapKeyType& lhs,
-                                         const UnorderedMapKeyType& rhs) const {
-  // Be damn sure the classes don't just move around from under us.
-  Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
-
-  // Being the same class name isn't enough, must also have the same class loader.
-  // When we are in the same class loader, classes are equal via the pointer.
-  return lhs == rhs;
-}
-
-size_t BoxClassTable::HashFn::operator()(const UnorderedMapKeyType& key) const {
-  return std::hash<std::string>()(key);
-}
-
-}  // namespace lambda
-}  // namespace art
diff --git a/runtime/lambda/box_class_table.h b/runtime/lambda/box_class_table.h
deleted file mode 100644
index 17e1026..0000000
--- a/runtime/lambda/box_class_table.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef ART_RUNTIME_LAMBDA_BOX_CLASS_TABLE_H_
-#define ART_RUNTIME_LAMBDA_BOX_CLASS_TABLE_H_
-
-#include "base/allocator.h"
-#include "base/hash_map.h"
-#include "gc_root.h"
-#include "base/macros.h"
-#include "base/mutex.h"
-#include "object_callbacks.h"
-
-#include <stdint.h>
-
-namespace art {
-
-class ArtMethod;  // forward declaration
-template<class T> class Handle;  // forward declaration
-
-namespace mirror {
-class Class;  // forward declaration
-class ClassLoader;  // forward declaration
-class LambdaProxy;  // forward declaration
-class Object;  // forward declaration
-}  // namespace mirror
-
-namespace lambda {
-struct Closure;  // forward declaration
-
-/*
- * Store a table of boxed lambdas. This is required to maintain object referential equality
- * when a lambda is re-boxed.
- *
- * Conceptually, we store a mapping of Class Name -> Weak Reference<Class>.
- * When too many objects get GCd, we shrink the underlying table to use less space.
- */
-class BoxClassTable FINAL {
- public:
-  // TODO: This should take a LambdaArtMethod instead, read class name from that.
-  // Note: null class_loader means bootclasspath.
-  mirror::Class* GetOrCreateBoxClass(const char* class_name,
-                                     const Handle<mirror::ClassLoader>& class_loader)
-      REQUIRES(!Locks::lambda_class_table_lock_, !Roles::uninterruptible_)
-      SHARED_REQUIRES(Locks::mutator_lock_);
-
-  // Sweep strong references to lambda class boxes. Update the addresses if the objects
-  // have been moved, and delete them from the table if the objects have been cleaned up.
-  template <typename Visitor>
-  void VisitRoots(const Visitor& visitor)
-      NO_THREAD_SAFETY_ANALYSIS  // for object marking requiring heap bitmap lock
-      REQUIRES(!Locks::lambda_class_table_lock_)
-      SHARED_REQUIRES(Locks::mutator_lock_);
-
-  BoxClassTable();
-  ~BoxClassTable();
-
- private:
-  // We only store strong GC roots in our table.
-  using ValueType = GcRoot<mirror::Class>;
-
-  // Attempt to look up the class in the map, or return null if it's not there yet.
-  ValueType FindBoxedClass(const std::string& class_name) const
-      SHARED_REQUIRES(Locks::lambda_class_table_lock_);
-
-  // Store the key as a string so that we can have our own copy of the class name.
-  using UnorderedMapKeyType = std::string;
-
-  // EmptyFn implementation for art::HashMap
-  struct EmptyFn {
-    void MakeEmpty(std::pair<UnorderedMapKeyType, ValueType>& item) const
-        NO_THREAD_SAFETY_ANALYSIS;
-        // SHARED_REQUIRES(Locks::mutator_lock_);
-
-    bool IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const;
-  };
-
-  // HashFn implementation for art::HashMap
-  struct HashFn {
-    size_t operator()(const UnorderedMapKeyType& key) const
-        NO_THREAD_SAFETY_ANALYSIS;
-        // SHARED_REQUIRES(Locks::mutator_lock_);
-  };
-
-  // EqualsFn implementation for art::HashMap
-  struct EqualsFn {
-    bool operator()(const UnorderedMapKeyType& lhs, const UnorderedMapKeyType& rhs) const
-        NO_THREAD_SAFETY_ANALYSIS;
-        // SHARED_REQUIRES(Locks::mutator_lock_);
-  };
-
-  using UnorderedMap = art::HashMap<UnorderedMapKeyType,
-                                    ValueType,
-                                    EmptyFn,
-                                    HashFn,
-                                    EqualsFn,
-                                    TrackingAllocator<std::pair<UnorderedMapKeyType, ValueType>,
-                                                      kAllocatorTagLambdaProxyClassBoxTable>>;
-
-  // Map of strong GC roots (lambda interface name -> lambda proxy class)
-  UnorderedMap map_ GUARDED_BY(Locks::lambda_class_table_lock_);
-
-  // Shrink the map when we get below this load factor.
-  // (This is an arbitrary value that should be large enough to prevent aggressive map erases
-  // from shrinking the table too often.)
-  static constexpr double kMinimumLoadFactor = UnorderedMap::kDefaultMinLoadFactor / 2;
-
-  DISALLOW_COPY_AND_ASSIGN(BoxClassTable);
-};
-
-}  // namespace lambda
-}  // namespace art
-
-#endif  // ART_RUNTIME_LAMBDA_BOX_CLASS_TABLE_H_
diff --git a/runtime/lambda/box_table.cc b/runtime/lambda/box_table.cc
index 0032d08..9918bb7 100644
--- a/runtime/lambda/box_table.cc
+++ b/runtime/lambda/box_table.cc
@@ -18,10 +18,8 @@
 #include "base/mutex.h"
 #include "common_throws.h"
 #include "gc_root-inl.h"
-#include "lambda/box_class_table.h"
 #include "lambda/closure.h"
 #include "lambda/leaking_allocator.h"
-#include "mirror/lambda_proxy.h"
 #include "mirror/method.h"
 #include "mirror/object-inl.h"
 #include "thread.h"
@@ -30,13 +28,12 @@
 
 namespace art {
 namespace lambda {
-// All closures are boxed into a subtype of LambdaProxy which implements the lambda's interface.
-using BoxedClosurePointerType = mirror::LambdaProxy*;
+// Temporarily represent the lambda Closure as its raw bytes in an array.
+// TODO: Generate a proxy class for the closure when boxing the first time.
+using BoxedClosurePointerType = mirror::ByteArray*;
 
-// Returns the base class for all boxed closures.
-// Note that concrete closure boxes are actually a subtype of mirror::LambdaProxy.
-static mirror::Class* GetBoxedClosureBaseClass() SHARED_REQUIRES(Locks::mutator_lock_) {
-  return Runtime::Current()->GetClassLinker()->GetClassRoot(ClassLinker::kJavaLangLambdaProxy);
+static mirror::Class* GetBoxedClosureClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  return mirror::ByteArray::GetArrayClass();
 }
 
 namespace {
@@ -57,14 +54,6 @@
       return closure;
     }
   };
-
-  struct DeleterForClosure {
-    void operator()(Closure* closure) const {
-      ClosureAllocator::Delete(closure);
-    }
-  };
-
-  using UniqueClosurePtr = std::unique_ptr<Closure, DeleterForClosure>;
 }  // namespace
 
 BoxTable::BoxTable()
@@ -86,9 +75,7 @@
   }
 }
 
-mirror::Object* BoxTable::BoxLambda(const ClosureType& closure,
-                                    const char* class_name,
-                                    mirror::ClassLoader* class_loader) {
+mirror::Object* BoxTable::BoxLambda(const ClosureType& closure) {
   Thread* self = Thread::Current();
 
   {
@@ -104,7 +91,7 @@
     //   Functional f = () -> 5;  // vF = create-lambda
     //   Object a = f;            // vA = box-lambda vA
     //   Object b = f;            // vB = box-lambda vB
-    //   assert(a == b)
+    //   assert(a == f)
     ValueType value = FindBoxedLambda(closure);
     if (!value.IsNull()) {
       return value.Read();
@@ -113,62 +100,30 @@
     // Otherwise we need to box ourselves and insert it into the hash map
   }
 
-  // Convert the Closure into a managed object instance, whose supertype of java.lang.LambdaProxy.
-
-  // TODO: Boxing a learned lambda (i.e. made with unbox-lambda) should return the original object
-  StackHandleScope<2> hs{self};  // NOLINT: [readability/braces] [4]
-
-  Handle<mirror::ClassLoader> class_loader_handle = hs.NewHandle(class_loader);
-
   // Release the lambda table lock here, so that thread suspension is allowed.
-  self->AllowThreadSuspension();
 
-  lambda::BoxClassTable* lambda_box_class_table;
+  // Convert the Closure into a managed byte[] which will serve
+  // as the temporary 'boxed' version of the lambda. This is good enough
+  // to check all the basic object identities that a boxed lambda must retain.
+  // It's also good enough to contain all the captured primitive variables.
 
-  // Find the lambda box class table, which can be in the system class loader if classloader is null
-  if (class_loader == nullptr) {
-    ScopedObjectAccessUnchecked soa(self);
-    mirror::ClassLoader* system_class_loader =
-        soa.Decode<mirror::ClassLoader*>(Runtime::Current()->GetSystemClassLoader());
-    lambda_box_class_table = system_class_loader->GetLambdaProxyCache();
-  } else {
-    lambda_box_class_table = class_loader_handle->GetLambdaProxyCache();
-    // OK: can't be deleted while we hold a handle to the class loader.
-  }
-  DCHECK(lambda_box_class_table != nullptr);
+  // TODO: Boxing an innate lambda (i.e. made with create-lambda) should make a proxy class
+  // TODO: Boxing a learned lambda (i.e. made with unbox-lambda) should return the original object
+  BoxedClosurePointerType closure_as_array_object =
+      mirror::ByteArray::Alloc(self, closure->GetSize());
 
-  Handle<mirror::Class> closure_class(hs.NewHandle(
-      lambda_box_class_table->GetOrCreateBoxClass(class_name, class_loader_handle)));
-  if (UNLIKELY(closure_class.Get() == nullptr)) {
+  // There are no thread suspension points after this, so we don't need to put it into a handle.
+
+  if (UNLIKELY(closure_as_array_object == nullptr)) {
     // Most likely an OOM has occurred.
-    self->AssertPendingException();
+    CHECK(self->IsExceptionPending());
     return nullptr;
   }
 
-  BoxedClosurePointerType closure_as_object = nullptr;
-  UniqueClosurePtr closure_table_copy;
-  // Create an instance of the class, and assign the pointer to the closure into it.
-  {
-    closure_as_object = down_cast<BoxedClosurePointerType>(closure_class->AllocObject(self));
-    if (UNLIKELY(closure_as_object == nullptr)) {
-      self->AssertPendingOOMException();
-      return nullptr;
-    }
-
-    // Make a copy of the closure that we will store in the hash map.
-    // The proxy instance will also point to this same hash map.
-    // Note that the closure pointer is cleaned up only after the proxy is GCd.
-    closure_table_copy.reset(ClosureAllocator::Allocate(closure->GetSize()));
-    closure_as_object->SetClosure(closure_table_copy.get());
-  }
-
-  // There are no thread suspension points after this, so we don't need to put it into a handle.
-  ScopedAssertNoThreadSuspension soants{self,                                                    // NOLINT: [whitespace/braces] [5]
-                                        "box lambda table - box lambda - no more suspensions"};  // NOLINT: [whitespace/braces] [5]
-
-  // Write the raw closure data into the proxy instance's copy of the closure.
-  closure->CopyTo(closure_table_copy.get(),
-                  closure->GetSize());
+  // Write the raw closure data into the byte[].
+  closure->CopyTo(closure_as_array_object->GetRawData(sizeof(uint8_t),  // component size
+                                                      0 /*index*/),     // index
+                  closure_as_array_object->GetLength());
 
   // The method has been successfully boxed into an object, now insert it into the hash map.
   {
@@ -179,21 +134,24 @@
     // we were allocating the object before.
     ValueType value = FindBoxedLambda(closure);
     if (UNLIKELY(!value.IsNull())) {
-      // Let the GC clean up closure_as_object at a later time.
-      // (We will not see this object when sweeping, it wasn't inserted yet.)
-      closure_as_object->SetClosure(nullptr);
+      // Let the GC clean up method_as_object at a later time.
       return value.Read();
     }
 
     // Otherwise we need to insert it into the hash map in this thread.
 
-    // The closure_table_copy is deleted by us manually when we erase it from the map.
+    // Make a copy for the box table to keep, in case the closure gets collected from the stack.
+    // TODO: GC may need to sweep for roots in the box table's copy of the closure.
+    Closure* closure_table_copy = ClosureAllocator::Allocate(closure->GetSize());
+    closure->CopyTo(closure_table_copy, closure->GetSize());
+
+    // The closure_table_copy needs to be deleted by us manually when we erase it from the map.
 
     // Actually insert into the table.
-    map_.Insert({closure_table_copy.release(), ValueType(closure_as_object)});
+    map_.Insert({closure_table_copy, ValueType(closure_as_array_object)});
   }
 
-  return closure_as_object;
+  return closure_as_array_object;
 }
 
 bool BoxTable::UnboxLambda(mirror::Object* object, ClosureType* out_closure) {
@@ -207,35 +165,29 @@
 
   mirror::Object* boxed_closure_object = object;
 
-  // Raise ClassCastException if object is not instanceof LambdaProxy
-  if (UNLIKELY(!boxed_closure_object->InstanceOf(GetBoxedClosureBaseClass()))) {
-    ThrowClassCastException(GetBoxedClosureBaseClass(), boxed_closure_object->GetClass());
+  // Raise ClassCastException if object is not instanceof byte[]
+  if (UNLIKELY(!boxed_closure_object->InstanceOf(GetBoxedClosureClass()))) {
+    ThrowClassCastException(GetBoxedClosureClass(), boxed_closure_object->GetClass());
     return false;
   }
 
   // TODO(iam): We must check that the closure object extends/implements the type
-  // specified in [type id]. This is not currently implemented since the type id is unavailable.
+  // specified in [type id]. This is not currently implemented since it's always a byte[].
 
   // If we got this far, the inputs are valid.
-  // Shuffle the java.lang.LambdaProxy back into a raw closure, then allocate it, copy,
-  // and return it.
-  BoxedClosurePointerType boxed_closure =
+  // Shuffle the byte[] back into a raw closure, then allocate it, copy, and return it.
+  BoxedClosurePointerType boxed_closure_as_array =
       down_cast<BoxedClosurePointerType>(boxed_closure_object);
 
-  DCHECK_ALIGNED(boxed_closure->GetClosure(), alignof(Closure));
-  const Closure* aligned_interior_closure = boxed_closure->GetClosure();
-  DCHECK(aligned_interior_closure != nullptr);
-
-  // TODO: we probably don't need to make a copy here later on, once there's GC support.
+  const int8_t* unaligned_interior_closure = boxed_closure_as_array->GetData();
 
   // Allocate a copy that can "escape" and copy the closure data into that.
   Closure* unboxed_closure =
-      LeakingAllocator::MakeFlexibleInstance<Closure>(self, aligned_interior_closure->GetSize());
-  DCHECK_ALIGNED(unboxed_closure, alignof(Closure));
+      LeakingAllocator::MakeFlexibleInstance<Closure>(self, boxed_closure_as_array->GetLength());
   // TODO: don't just memcpy the closure, it's unsafe when we add references to the mix.
-  memcpy(unboxed_closure, aligned_interior_closure, aligned_interior_closure->GetSize());
+  memcpy(unboxed_closure, unaligned_interior_closure, boxed_closure_as_array->GetLength());
 
-  DCHECK_EQ(unboxed_closure->GetSize(), aligned_interior_closure->GetSize());
+  DCHECK_EQ(unboxed_closure->GetSize(), static_cast<size_t>(boxed_closure_as_array->GetLength()));
 
   *out_closure = unboxed_closure;
   return true;
@@ -284,10 +236,9 @@
 
     if (new_value == nullptr) {
       // The object has been swept away.
-      Closure* closure = key_value_pair.first;
+      const ClosureType& closure = key_value_pair.first;
 
       // Delete the entry from the map.
-      // (Remove from map first to avoid accessing dangling pointer).
       map_iterator = map_.Erase(map_iterator);
 
       // Clean up the memory by deleting the closure.
@@ -339,10 +290,7 @@
 }
 
 bool BoxTable::EmptyFn::IsEmpty(const std::pair<UnorderedMapKeyType, ValueType>& item) const {
-  bool is_empty = item.first == nullptr;
-  DCHECK_EQ(item.second.IsNull(), is_empty);
-
-  return is_empty;
+  return item.first == nullptr;
 }
 
 bool BoxTable::EqualsFn::operator()(const UnorderedMapKeyType& lhs,
diff --git a/runtime/lambda/box_table.h b/runtime/lambda/box_table.h
index 9dca6ab..adb7332 100644
--- a/runtime/lambda/box_table.h
+++ b/runtime/lambda/box_table.h
@@ -30,9 +30,6 @@
 class ArtMethod;  // forward declaration
 
 namespace mirror {
-class Class;   // forward declaration
-class ClassLoader;  // forward declaration
-class LambdaProxy;  // forward declaration
 class Object;  // forward declaration
 }  // namespace mirror
 
@@ -51,11 +48,8 @@
   using ClosureType = art::lambda::Closure*;
 
   // Boxes a closure into an object. Returns null and throws an exception on failure.
-  mirror::Object* BoxLambda(const ClosureType& closure,
-                            const char* class_name,
-                            mirror::ClassLoader* class_loader)
-      REQUIRES(!Locks::lambda_table_lock_, !Roles::uninterruptible_)
-      SHARED_REQUIRES(Locks::mutator_lock_);
+  mirror::Object* BoxLambda(const ClosureType& closure)
+      SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::lambda_table_lock_);
 
   // Unboxes an object back into the lambda. Returns false and throws an exception on failure.
   bool UnboxLambda(mirror::Object* object, ClosureType* out_closure)
@@ -134,16 +128,7 @@
                                     TrackingAllocator<std::pair<ClosureType, ValueType>,
                                                       kAllocatorTagLambdaBoxTable>>;
 
-  using ClassMap = art::HashMap<std::string,
-                                GcRoot<mirror::Class>,
-                                EmptyFn,
-                                HashFn,
-                                EqualsFn,
-                                TrackingAllocator<std::pair<ClosureType, ValueType>,
-                                                      kAllocatorTagLambdaProxyClassBoxTable>>;
-
   UnorderedMap map_                                          GUARDED_BY(Locks::lambda_table_lock_);
-  UnorderedMap classes_map_                                  GUARDED_BY(Locks::lambda_table_lock_);
   bool allow_new_weaks_                                      GUARDED_BY(Locks::lambda_table_lock_);
   ConditionVariable new_weaks_condition_                     GUARDED_BY(Locks::lambda_table_lock_);
 
diff --git a/runtime/lambda/closure.cc b/runtime/lambda/closure.cc
index f935e04..179e4ee 100644
--- a/runtime/lambda/closure.cc
+++ b/runtime/lambda/closure.cc
@@ -20,6 +20,9 @@
 #include "lambda/art_lambda_method.h"
 #include "runtime/mirror/object_reference.h"
 
+static constexpr const bool kClosureSupportsReferences = false;
+static constexpr const bool kClosureSupportsGarbageCollection = false;
+
 namespace art {
 namespace lambda {
 
@@ -125,10 +128,6 @@
   return const_cast<ArtMethod*>(lambda_info_->GetArtMethod());
 }
 
-ArtLambdaMethod* Closure::GetLambdaInfo() const {
-  return const_cast<ArtLambdaMethod*>(lambda_info_);
-}
-
 uint32_t Closure::GetHashCode() const {
   // Start with a non-zero constant, a prime number.
   uint32_t result = 17;
diff --git a/runtime/lambda/closure.h b/runtime/lambda/closure.h
index 38ec063..31ff194 100644
--- a/runtime/lambda/closure.h
+++ b/runtime/lambda/closure.h
@@ -33,52 +33,12 @@
 class ArtLambdaMethod;  // forward declaration
 class ClosureBuilder;   // forward declaration
 
-// TODO: Remove these constants once closures are supported properly.
-
-// Does the lambda closure support containing references? If so, all the users of lambdas
-// must be updated to also support references.
-static constexpr const bool kClosureSupportsReferences = false;
-// Does the lambda closure support being garbage collected? If so, all the users of lambdas
-// must be updated to also support garbage collection.
-static constexpr const bool kClosureSupportsGarbageCollection = false;
-// Does the lambda closure support being garbage collected with a read barrier? If so,
-// all the users of the lambdas msut also be updated to support read barrier GC.
-static constexpr const bool kClosureSupportsReadBarrier = false;
-
-// Is this closure being stored as a 'long' in shadow frames and the quick ABI?
-static constexpr const bool kClosureIsStoredAsLong = true;
-
-
-// Raw memory layout for the lambda closure.
-//
-// WARNING:
-// * This should only be used by the compiler and tests, as they need to offsetof the raw fields.
-// * Runtime/interpreter should always access closures through a Closure pointer.
-struct ClosureStorage {
-  // Compile-time known lambda information such as the type descriptor and size.
-  ArtLambdaMethod* lambda_info_;
-
-  // A contiguous list of captured variables, and possibly the closure size.
-  // The runtime size can always be determined through GetSize().
-  union {
-    // Read from here if the closure size is static (ArtLambdaMethod::IsStatic)
-    uint8_t static_variables_[0];
-    struct {
-      // Read from here if the closure size is dynamic (ArtLambdaMethod::IsDynamic)
-      size_t size_;  // The lambda_info_ and the size_ itself is also included as part of the size.
-      uint8_t variables_[0];
-    } dynamic_;
-  } captured_[0];
-  // captured_ will always consist of one array element at runtime.
-  // Set to [0] so that 'size_' is not counted in sizeof(Closure).
-};
-
 // Inline representation of a lambda closure.
 // Contains the target method and the set of packed captured variables as a copy.
 //
 // The closure itself is logically immutable, although in practice any object references
 // it (recursively) contains can be moved and updated by the GC.
-struct Closure : private ClosureStorage {
+struct PACKED(sizeof(ArtLambdaMethod*)) Closure {
   // Get the size of the Closure in bytes.
   // This is necessary in order to allocate a large enough area to copy the Closure into.
   // Do *not* copy the closure with memcpy, since references also need to get moved.
@@ -92,9 +52,6 @@
   // Get the target method, i.e. the method that will be dispatched into with invoke-lambda.
   ArtMethod* GetTargetMethod() const;
 
-  // Get the static lambda info that never changes.
-  ArtLambdaMethod* GetLambdaInfo() const;
-
   // Calculates the hash code. Value is recomputed each time.
   uint32_t GetHashCode() const SHARED_REQUIRES(Locks::mutator_lock_);
 
@@ -199,15 +156,28 @@
   static size_t GetClosureSize(const uint8_t* closure);
 
   ///////////////////////////////////////////////////////////////////////////////////
-  // NOTE: Actual fields are declared in ClosureStorage.
+
+  // Compile-time known lambda information such as the type descriptor and size.
+  ArtLambdaMethod* lambda_info_;
+
+  // A contiguous list of captured variables, and possibly the closure size.
+  // The runtime size can always be determined through GetSize().
+  union {
+    // Read from here if the closure size is static (ArtLambdaMethod::IsStatic)
+    uint8_t static_variables_[0];
+    struct {
+      // Read from here if the closure size is dynamic (ArtLambdaMethod::IsDynamic)
+      size_t size_;  // The lambda_info_ and the size_ itself is also included as part of the size.
+      uint8_t variables_[0];
+    } dynamic_;
+  } captured_[0];
+  // captured_ will always consist of one array element at runtime.
+  // Set to [0] so that 'size_' is not counted in sizeof(Closure).
+
+  friend class ClosureBuilder;
   friend class ClosureTest;
 };
 
-// ABI guarantees:
-// * Closure same size as a ClosureStorage
-// * ClosureStorage begins at the same point a Closure would begin.
-static_assert(sizeof(Closure) == sizeof(ClosureStorage), "Closure size must match ClosureStorage");
-
 }  // namespace lambda
 }  // namespace art
 
diff --git a/runtime/lambda/closure_builder.cc b/runtime/lambda/closure_builder.cc
index 7b36042..739e965 100644
--- a/runtime/lambda/closure_builder.cc
+++ b/runtime/lambda/closure_builder.cc
@@ -75,7 +75,7 @@
   if (LIKELY(is_dynamic_size_ == false)) {
     // Write in the extra bytes to store the dynamic size the first time.
     is_dynamic_size_ = true;
-    size_ += sizeof(ClosureStorage::captured_[0].dynamic_.size_);
+    size_ += sizeof(Closure::captured_[0].dynamic_.size_);
   }
 
   // A closure may be sized dynamically, so always query it for the true size.
@@ -107,40 +107,38 @@
     << "number of variables captured at runtime does not match "
     << "number of variables captured at compile time";
 
-  ClosureStorage* closure_storage = new (memory) ClosureStorage;
-  closure_storage->lambda_info_ = target_method;
+  Closure* closure = new (memory) Closure;
+  closure->lambda_info_ = target_method;
 
-  static_assert(offsetof(ClosureStorage, captured_) == kInitialSize, "wrong initial size");
+  static_assert(offsetof(Closure, captured_) == kInitialSize, "wrong initial size");
 
   size_t written_size;
   if (UNLIKELY(is_dynamic_size_)) {
     // The closure size must be set dynamically (i.e. nested lambdas).
-    closure_storage->captured_[0].dynamic_.size_ = GetSize();
-    size_t header_size = offsetof(ClosureStorage, captured_[0].dynamic_.variables_);
+    closure->captured_[0].dynamic_.size_ = GetSize();
+    size_t header_size = offsetof(Closure, captured_[0].dynamic_.variables_);
     DCHECK_LE(header_size, GetSize());
     size_t variables_size = GetSize() - header_size;
     written_size =
         WriteValues(target_method,
-                    closure_storage->captured_[0].dynamic_.variables_,
+                    closure->captured_[0].dynamic_.variables_,
                     header_size,
                     variables_size);
   } else {
     // The closure size is known statically (i.e. no nested lambdas).
     DCHECK(GetSize() == target_method->GetStaticClosureSize());
-    size_t header_size = offsetof(ClosureStorage, captured_[0].static_variables_);
+    size_t header_size = offsetof(Closure, captured_[0].static_variables_);
     DCHECK_LE(header_size, GetSize());
     size_t variables_size = GetSize() - header_size;
     written_size =
         WriteValues(target_method,
-                    closure_storage->captured_[0].static_variables_,
+                    closure->captured_[0].static_variables_,
                     header_size,
                     variables_size);
   }
 
-  // OK: The closure storage is guaranteed to be the same as a closure.
-  Closure* closure = reinterpret_cast<Closure*>(closure_storage);
-
   DCHECK_EQ(written_size, closure->GetSize());
+
   return closure;
 }
 
diff --git a/runtime/lambda/shorty_field_type.h b/runtime/lambda/shorty_field_type.h
index 54bb4d4..46ddaa9 100644
--- a/runtime/lambda/shorty_field_type.h
+++ b/runtime/lambda/shorty_field_type.h
@@ -285,39 +285,6 @@
     }
   }
 
-  // Get the number of virtual registers necessary to represent this type as a stack local.
-  inline size_t GetVirtualRegisterCount() const {
-    if (IsPrimitiveNarrow()) {
-      return 1;
-    } else if (IsPrimitiveWide()) {
-      return 2;
-    } else if (IsObject()) {
-      return kObjectReferenceSize / sizeof(uint32_t);
-    } else if (IsLambda()) {
-      return 2;
-    } else {
-      DCHECK(false) << "unknown shorty field type '" << static_cast<char>(value_) << "'";
-      UNREACHABLE();
-    }
-  }
-
-  // Count how many virtual registers would be necessary in order to store this list of shorty
-  // field types.
-  inline size_t static CountVirtualRegistersRequired(const char* shorty) {
-    size_t size = 0;
-
-    while (shorty != nullptr && *shorty != '\0') {
-      // Each argument appends to the size.
-      ShortyFieldType shorty_field{*shorty};  // NOLINT [readability/braces] [4]
-
-      size += shorty_field.GetVirtualRegisterCount();
-
-      ++shorty;
-  }
-
-    return size;
-  }
-
   // Implicitly convert to the anonymous nested inner type. Used for exhaustive switch detection.
   inline operator decltype(kByte)() const {
     return value_;
diff --git a/runtime/lambda/shorty_field_type_test.cc b/runtime/lambda/shorty_field_type_test.cc
index 430e39e..32bade9 100644
--- a/runtime/lambda/shorty_field_type_test.cc
+++ b/runtime/lambda/shorty_field_type_test.cc
@@ -218,56 +218,6 @@
   }
 }  // TEST_F
 
-TEST_F(ShortyFieldTypeTest, TestCalculateVRegSize) {
-  // Make sure the single calculation for each value is correct.
-  std::pair<size_t, char> expected_actual_single[] = {
-      // Primitives
-      { 1u, 'Z' },
-      { 1u, 'B' },
-      { 1u, 'C' },
-      { 1u, 'S' },
-      { 1u, 'I' },
-      { 1u, 'F' },
-      { 2u, 'J' },
-      { 2u, 'D' },
-      // Non-primitives
-      { 1u, 'L' },
-      { 2u, '\\' },
-  };
-
-  for (auto pair : expected_actual_single) {
-    SCOPED_TRACE(pair.second);
-    EXPECT_EQ(pair.first, ShortyFieldType(pair.second).GetVirtualRegisterCount());
-  }
-
-  // Make sure we are correctly calculating how many virtual registers a shorty descriptor takes.
-  std::pair<size_t, const char*> expected_actual[] = {
-      // Empty list
-      { 0u, "" },
-      // Primitives
-      { 1u, "Z" },
-      { 1u, "B" },
-      { 1u, "C" },
-      { 1u, "S" },
-      { 1u, "I" },
-      { 1u, "F" },
-      { 2u, "J" },
-      { 2u, "D" },
-      // Non-primitives
-      { 1u, "L" },
-      { 2u, "\\" },
-      // Multiple things at once:
-      { 10u, "ZBCSIFJD" },
-      { 5u, "LLSSI" },
-      { 6u, "LLL\\L" }
-  };
-
-  for (auto pair : expected_actual) {
-    SCOPED_TRACE(pair.second);
-    EXPECT_EQ(pair.first, ShortyFieldType::CountVirtualRegistersRequired(pair.second));
-  }
-}  // TEST_F
-
 // Helper class to probe a shorty's characteristics by minimizing copy-and-paste tests.
 template <typename T, decltype(ShortyFieldType::kByte) kShortyEnum>
 struct ShortyTypeCharacteristics {
diff --git a/runtime/lambda_proxy_test.cc b/runtime/lambda_proxy_test.cc
deleted file mode 100644
index 63d6ccc..0000000
--- a/runtime/lambda_proxy_test.cc
+++ /dev/null
@@ -1,367 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <jni.h>
-#include <vector>
-
-#include "art_field-inl.h"
-#include "class_linker-inl.h"
-#include "compiler_callbacks.h"
-#include "common_compiler_test.h"
-#include "mirror/field-inl.h"
-#include "mirror/lambda_proxy.h"
-#include "mirror/method.h"
-#include "scoped_thread_state_change.h"
-
-namespace art {
-
-// The enclosing class of all the interfaces used by this test.
-// -- Defined as a macro to allow for string concatenation.
-#define TEST_INTERFACE_ENCLOSING_CLASS_NAME "LambdaInterfaces"
-// Generate out "LLambdaInterfaces$<<iface>>;" , replacing <<iface>> with the interface name.
-#define MAKE_TEST_INTERFACE_NAME(iface) ("L" TEST_INTERFACE_ENCLOSING_CLASS_NAME "$" iface ";")
-
-#define ASSERT_NOT_NULL(x) ASSERT_TRUE((x) != nullptr)
-#define ASSERT_NULL(x) ASSERT_TRUE((x) == nullptr)
-#define EXPECT_NULL(x) EXPECT_TRUE((x) == nullptr)
-
-class LambdaProxyTest  // : public CommonCompilerTest {
-    : public CommonRuntimeTest {
- public:
-  // Generate a lambda proxy class with the given name and interfaces. This is a simplification from what
-  // libcore does to fit to our test needs. We do not check for duplicated interfaces or methods and
-  // we do not declare exceptions.
-  mirror::Class* GenerateProxyClass(ScopedObjectAccess& soa,
-                                    jobject jclass_loader,
-                                    const char* class_name,
-                                    const std::vector<mirror::Class*>& interfaces)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
-    CHECK(class_name != nullptr);
-    CHECK(jclass_loader != nullptr);
-
-    mirror::Class* java_lang_object =
-        class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;");
-    CHECK(java_lang_object != nullptr);
-
-    jclass java_lang_class = soa.AddLocalReference<jclass>(mirror::Class::GetJavaLangClass());
-
-    // Builds the interfaces array.
-    jobjectArray proxy_class_interfaces = soa.Env()->NewObjectArray(interfaces.size(),
-                                                                    java_lang_class,
-                                                                    nullptr);  // No initial element.
-    soa.Self()->AssertNoPendingException();
-    for (size_t i = 0; i < interfaces.size(); ++i) {
-      soa.Env()->SetObjectArrayElement(proxy_class_interfaces,
-                                       i,
-                                       soa.AddLocalReference<jclass>(interfaces[i]));
-    }
-
-    // Builds the method array.
-    jsize methods_count = 3;  // Object.equals, Object.hashCode and Object.toString.
-    for (mirror::Class* interface : interfaces) {
-      methods_count += interface->NumVirtualMethods();
-    }
-    jobjectArray proxy_class_methods =
-        soa.Env()->NewObjectArray(methods_count,
-                                  soa.AddLocalReference<jclass>(mirror::Method::StaticClass()),
-                                  nullptr);  // No initial element.
-    soa.Self()->AssertNoPendingException();
-
-    jsize array_index = 0;
-
-    //
-    // Fill the method array with the Object and all the interface's virtual methods.
-    //
-
-    // Add a method to 'proxy_class_methods'
-    auto add_method_to_array = [&](ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) {
-      CHECK(method != nullptr);
-      soa.Env()->SetObjectArrayElement(proxy_class_methods,
-                                       array_index++,
-                                       soa.AddLocalReference<jobject>(
-                                           mirror::Method::CreateFromArtMethod(soa.Self(),
-                                                                               method))
-                                      );  // NOLINT: [whitespace/parens] [2]
-
-      LOG(DEBUG) << "Add " << PrettyMethod(method) << " to list of methods to generate proxy";
-    };
-    // Add a method to 'proxy_class_methods' by looking it up from java.lang.Object
-    auto add_method_to_array_by_lookup = [&](const char* name, const char* method_descriptor)
-        SHARED_REQUIRES(Locks::mutator_lock_) {
-      ArtMethod* method = java_lang_object->FindDeclaredVirtualMethod(name,
-                                                                      method_descriptor,
-                                                                      sizeof(void*));
-      add_method_to_array(method);
-    };
-
-    // Add all methods from Object.
-    add_method_to_array_by_lookup("equals",   "(Ljava/lang/Object;)Z");
-    add_method_to_array_by_lookup("hashCode", "()I");
-    add_method_to_array_by_lookup("toString", "()Ljava/lang/String;");
-
-    // Now adds all interfaces virtual methods.
-    for (mirror::Class* interface : interfaces) {
-      mirror::Class* next_class = interface;
-      do {
-        for (ArtMethod& method : next_class->GetVirtualMethods(sizeof(void*))) {
-          add_method_to_array(&method);
-        }
-        next_class = next_class->GetSuperClass();
-      } while (!next_class->IsObjectClass());
-      // Skip adding any methods from "Object".
-    }
-    CHECK_EQ(array_index, methods_count);
-
-    // Builds an empty exception array.
-    jobjectArray proxy_class_throws = soa.Env()->NewObjectArray(0 /* length */,
-                                                                java_lang_class,
-                                                                nullptr /* initial element*/);
-    soa.Self()->AssertNoPendingException();
-
-    bool already_exists;
-    mirror::Class* proxy_class =
-        class_linker_->CreateLambdaProxyClass(soa,
-                                              soa.Env()->NewStringUTF(class_name),
-                                              proxy_class_interfaces,
-                                              jclass_loader,
-                                              proxy_class_methods,
-                                              proxy_class_throws,
-                                              /*out*/&already_exists);
-
-    CHECK(!already_exists);
-
-    soa.Self()->AssertNoPendingException();
-    return proxy_class;
-  }
-
-  LambdaProxyTest() {
-  }
-
-  virtual void SetUp() {
-    CommonRuntimeTest::SetUp();
-  }
-
-  virtual void SetUpRuntimeOptions(RuntimeOptions* options ATTRIBUTE_UNUSED) {
-    // Do not have any compiler options because we don't want to run as an AOT
-    // (In particular the lambda proxy class generation isn't currently supported for AOT).
-    this->callbacks_.reset();
-  }
-
-  template <typename THandleScope>
-  Handle<mirror::Class> GenerateProxyClass(THandleScope& hs,
-                                           const char* name,
-                                           const std::vector<mirror::Class*>& interfaces)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
-    return hs.NewHandle(GenerateProxyClass(*soa_, jclass_loader_, name, interfaces));
-  }
-
- protected:
-  ScopedObjectAccess* soa_ = nullptr;
-  jobject jclass_loader_ = nullptr;
-};
-
-// Creates a lambda proxy class and check ClassHelper works correctly.
-TEST_F(LambdaProxyTest, ProxyClassHelper) {
-  // gLogVerbosity.class_linker = true;  // Uncomment to enable class linker logging.
-
-  ASSERT_NOT_NULL(Thread::Current());
-
-  ScopedObjectAccess soa(Thread::Current());
-  soa_ = &soa;
-
-  // Must happen after CommonRuntimeTest finishes constructing the runtime.
-  jclass_loader_ = LoadDex(TEST_INTERFACE_ENCLOSING_CLASS_NAME);
-  jobject jclass_loader = jclass_loader_;
-
-  StackHandleScope<4> hs(soa.Self());
-  Handle<mirror::ClassLoader> class_loader(
-      hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
-
-  Handle<mirror::Class> J(hs.NewHandle(
-      class_linker_->FindClass(soa.Self(), MAKE_TEST_INTERFACE_NAME("J"), class_loader)));
-  ASSERT_TRUE(J.Get() != nullptr);
-
-  std::vector<mirror::Class*> interfaces;
-  interfaces.push_back(J.Get());
-  Handle<mirror::Class> proxy_class(hs.NewHandle(
-      GenerateProxyClass(soa, jclass_loader, "$Proxy1234", interfaces)));
-  interfaces.clear();  // Don't least possibly stale objects in the array as good practice.
-  ASSERT_TRUE(proxy_class.Get() != nullptr);
-  ASSERT_TRUE(proxy_class->IsLambdaProxyClass());
-  ASSERT_TRUE(proxy_class->IsInitialized());
-
-  EXPECT_EQ(1U, proxy_class->NumDirectInterfaces());  // LambdaInterfaces$J.
-  EXPECT_EQ(J.Get(), mirror::Class::GetDirectInterface(soa.Self(), proxy_class, 0));
-  std::string temp;
-  const char* proxy_class_descriptor = proxy_class->GetDescriptor(&temp);
-  EXPECT_STREQ("L$Proxy1234;", proxy_class_descriptor);
-  EXPECT_EQ(nullptr, proxy_class->GetSourceFile());
-
-  // Make sure all the virtual methods are marked as a proxy
-  for (ArtMethod& method : proxy_class->GetVirtualMethods(sizeof(void*))) {
-    SCOPED_TRACE(PrettyMethod(&method, /* with_signature */true));
-    EXPECT_TRUE(method.IsProxyMethod());
-    EXPECT_TRUE(method.IsLambdaProxyMethod());
-    EXPECT_FALSE(method.IsReflectProxyMethod());
-  }
-}
-
-// Creates a proxy class and check FieldHelper works correctly.
-TEST_F(LambdaProxyTest, ProxyFieldHelper) {
-  // gLogVerbosity.class_linker = true;  // Uncomment to enable class linker logging.
-
-  ASSERT_NOT_NULL(Thread::Current());
-
-  ScopedObjectAccess soa(Thread::Current());
-  soa_ = &soa;
-
-  // Must happen after CommonRuntimeTest finishes constructing the runtime.
-  jclass_loader_ = LoadDex(TEST_INTERFACE_ENCLOSING_CLASS_NAME);
-  jobject jclass_loader = jclass_loader_;
-
-  StackHandleScope<9> hs(soa.Self());
-  Handle<mirror::ClassLoader> class_loader(
-      hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
-
-  Handle<mirror::Class> I(hs.NewHandle(
-      class_linker_->FindClass(soa.Self(), MAKE_TEST_INTERFACE_NAME("I"), class_loader)));
-  ASSERT_NOT_NULL(I.Get());
-
-  // Create the lambda proxy which implements interfaces "I".
-  Handle<mirror::Class> proxy_class = GenerateProxyClass(hs,
-                                                         "$Proxy1234",
-                                                         { I.Get() });  // Interfaces.
-
-  ASSERT_NOT_NULL(proxy_class.Get());
-  EXPECT_TRUE(proxy_class->IsLambdaProxyClass());
-  EXPECT_TRUE(proxy_class->IsInitialized());
-  EXPECT_NULL(proxy_class->GetIFieldsPtr());
-
-  LengthPrefixedArray<ArtField>* static_fields = proxy_class->GetSFieldsPtr();
-  ASSERT_NOT_NULL(static_fields);
-
-  // Must have "throws" and "interfaces" static fields.
-  ASSERT_EQ(+mirror::LambdaProxy::kStaticFieldCount, proxy_class->NumStaticFields());
-
-  static constexpr const char* kInterfacesClassName = "[Ljava/lang/Class;";
-  static constexpr const char* kThrowsClassName     = "[[Ljava/lang/Class;";
-
-  // Class for "interfaces" field.
-  Handle<mirror::Class> interfaces_field_class =
-      hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), kInterfacesClassName));
-  ASSERT_NOT_NULL(interfaces_field_class.Get());
-
-  // Class for "throws" field.
-  Handle<mirror::Class> throws_field_class =
-      hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), kThrowsClassName));
-  ASSERT_NOT_NULL(throws_field_class.Get());
-
-  // Helper to test the static fields for correctness.
-  auto test_static_field = [&](size_t index,
-                               const char* field_name,
-                               Handle<mirror::Class>& handle_class,
-                               const char* class_name)
-      SHARED_REQUIRES(Locks::mutator_lock_) {
-    ArtField* field = &static_fields->At(index);
-    EXPECT_STREQ(field_name, field->GetName());
-    EXPECT_STREQ(class_name, field->GetTypeDescriptor());
-    EXPECT_EQ(handle_class.Get(), field->GetType</*kResolve*/true>())
-        << "Expected: " << PrettyClass(interfaces_field_class.Get()) << ", "
-        << "Actual: " << PrettyClass(field->GetType</*kResolve*/true>()) << ", "
-        << "field_name: " << field_name;
-    std::string temp;
-    EXPECT_STREQ("L$Proxy1234;", field->GetDeclaringClass()->GetDescriptor(&temp));
-    EXPECT_FALSE(field->IsPrimitiveType());
-  };
-
-  // Test "Class[] interfaces" field.
-  test_static_field(mirror::LambdaProxy::kStaticFieldIndexInterfaces,
-                    "interfaces",
-                    interfaces_field_class,
-                    kInterfacesClassName);
-
-  // Test "Class[][] throws" field.
-  test_static_field(mirror::LambdaProxy::kStaticFieldIndexThrows,
-                    "throws",
-                    throws_field_class,
-                    kThrowsClassName);
-}
-
-// Creates two proxy classes and check the art/mirror fields of their static fields.
-TEST_F(LambdaProxyTest, CheckArtMirrorFieldsOfProxyStaticFields) {
-  // gLogVerbosity.class_linker = true;  // Uncomment to enable class linker logging.
-
-  ASSERT_NOT_NULL(Thread::Current());
-
-  ScopedObjectAccess soa(Thread::Current());
-  soa_ = &soa;
-
-  // Must happen after CommonRuntimeTest finishes constructing the runtime.
-  jclass_loader_ = LoadDex(TEST_INTERFACE_ENCLOSING_CLASS_NAME);
-  jobject jclass_loader = jclass_loader_;
-
-  StackHandleScope<8> hs(soa.Self());
-  Handle<mirror::ClassLoader> class_loader(
-      hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader)));
-
-  Handle<mirror::Class> proxyClass0;
-  Handle<mirror::Class> proxyClass1;
-  {
-    Handle<mirror::Class> L(hs.NewHandle(
-        class_linker_->FindClass(soa.Self(), MAKE_TEST_INTERFACE_NAME("L"), class_loader)));
-    ASSERT_TRUE(L.Get() != nullptr);
-
-    std::vector<mirror::Class*> interfaces = { L.Get() };
-    proxyClass0 = hs.NewHandle(GenerateProxyClass(soa, jclass_loader, "$Proxy0", interfaces));
-    proxyClass1 = hs.NewHandle(GenerateProxyClass(soa, jclass_loader, "$Proxy1", interfaces));
-  }
-
-  ASSERT_TRUE(proxyClass0.Get() != nullptr);
-  ASSERT_TRUE(proxyClass0->IsLambdaProxyClass());
-  ASSERT_TRUE(proxyClass0->IsInitialized());
-  ASSERT_TRUE(proxyClass1.Get() != nullptr);
-  ASSERT_TRUE(proxyClass1->IsLambdaProxyClass());
-  ASSERT_TRUE(proxyClass1->IsInitialized());
-
-  LengthPrefixedArray<ArtField>* static_fields0 = proxyClass0->GetSFieldsPtr();
-  ASSERT_TRUE(static_fields0 != nullptr);
-  ASSERT_EQ(2u, static_fields0->size());
-  LengthPrefixedArray<ArtField>* static_fields1 = proxyClass1->GetSFieldsPtr();
-  ASSERT_TRUE(static_fields1 != nullptr);
-  ASSERT_EQ(2u, static_fields1->size());
-
-  EXPECT_EQ(static_fields0->At(0).GetDeclaringClass(), proxyClass0.Get());
-  EXPECT_EQ(static_fields0->At(1).GetDeclaringClass(), proxyClass0.Get());
-  EXPECT_EQ(static_fields1->At(0).GetDeclaringClass(), proxyClass1.Get());
-  EXPECT_EQ(static_fields1->At(1).GetDeclaringClass(), proxyClass1.Get());
-
-  Handle<mirror::Field> field00 =
-      hs.NewHandle(mirror::Field::CreateFromArtField(soa.Self(), &static_fields0->At(0), true));
-  Handle<mirror::Field> field01 =
-      hs.NewHandle(mirror::Field::CreateFromArtField(soa.Self(), &static_fields0->At(1), true));
-  Handle<mirror::Field> field10 =
-      hs.NewHandle(mirror::Field::CreateFromArtField(soa.Self(), &static_fields1->At(0), true));
-  Handle<mirror::Field> field11 =
-      hs.NewHandle(mirror::Field::CreateFromArtField(soa.Self(), &static_fields1->At(1), true));
-  EXPECT_EQ(field00->GetArtField(), &static_fields0->At(0));
-  EXPECT_EQ(field01->GetArtField(), &static_fields0->At(1));
-  EXPECT_EQ(field10->GetArtField(), &static_fields1->At(0));
-  EXPECT_EQ(field11->GetArtField(), &static_fields1->At(1));
-}
-
-// TODO: make sure there's a non-abstract implementation of the single-abstract-method on the class.
-
-}  // namespace art
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index a8685b8..9e416dc 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -695,11 +695,7 @@
 }
 
 inline const DexFile& Class::GetDexFile() {
-  DexCache* dex_cache = GetDexCache();
-  DCHECK(dex_cache != nullptr);
-  const DexFile* dex_file = dex_cache->GetDexFile();
-  DCHECK(dex_file != nullptr);
-  return *dex_file;
+  return *GetDexCache()->GetDexFile();
 }
 
 inline bool Class::DescriptorEquals(const char* match) {
@@ -707,8 +703,8 @@
     return match[0] == '[' && GetComponentType()->DescriptorEquals(match + 1);
   } else if (IsPrimitive()) {
     return strcmp(Primitive::Descriptor(GetPrimitiveType()), match) == 0;
-  } else if (IsAnyProxyClass()) {
-    return AnyProxyDescriptorEquals(match);
+  } else if (IsProxyClass()) {
+    return ProxyDescriptorEquals(match);
   } else {
     const DexFile& dex_file = GetDexFile();
     const DexFile::TypeId& type_id = dex_file.GetTypeId(GetClassDef()->class_idx_);
@@ -724,32 +720,22 @@
   }
 }
 
-inline ObjectArray<Class>* Class::GetInterfacesForAnyProxy() {
-  CHECK(IsAnyProxyClass());
+inline ObjectArray<Class>* Class::GetInterfaces() {
+  CHECK(IsProxyClass());
   // First static field.
   auto* field = GetStaticField(0);
   DCHECK_STREQ(field->GetName(), "interfaces");
   MemberOffset field_offset = field->GetOffset();
-  ObjectArray<Class>* interfaces_array = GetFieldObject<ObjectArray<Class>>(field_offset);
-
-  CHECK(interfaces_array != nullptr);
-  if (UNLIKELY(IsLambdaProxyClass())) {
-    DCHECK_EQ(1, interfaces_array->GetLength())
-        << "Lambda proxies cannot have multiple direct interfaces implemented";
-  }
-  return interfaces_array;
+  return GetFieldObject<ObjectArray<Class>>(field_offset);
 }
 
-inline ObjectArray<ObjectArray<Class>>* Class::GetThrowsForAnyProxy() {
-  CHECK(IsAnyProxyClass());
+inline ObjectArray<ObjectArray<Class>>* Class::GetThrows() {
+  CHECK(IsProxyClass());
   // Second static field.
   auto* field = GetStaticField(1);
   DCHECK_STREQ(field->GetName(), "throws");
-
   MemberOffset field_offset = field->GetOffset();
-  auto* throws_array = GetFieldObject<ObjectArray<ObjectArray<Class>>>(field_offset);
-  CHECK(throws_array != nullptr);
-  return throws_array;
+  return GetFieldObject<ObjectArray<ObjectArray<Class>>>(field_offset);
 }
 
 inline MemberOffset Class::GetDisableIntrinsicFlagOffset() {
@@ -810,8 +796,8 @@
     return 0;
   } else if (IsArrayClass()) {
     return 2;
-  } else if (IsAnyProxyClass()) {
-    mirror::ObjectArray<mirror::Class>* interfaces = GetInterfacesForAnyProxy();
+  } else if (IsProxyClass()) {
+    mirror::ObjectArray<mirror::Class>* interfaces = GetInterfaces();
     return interfaces != nullptr ? interfaces->GetLength() : 0;
   } else {
     const DexFile::TypeList* interfaces = GetInterfaceTypeList();
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index b201293..05a9039 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -538,7 +538,6 @@
 
 ArtMethod* Class::FindClassInitializer(size_t pointer_size) {
   for (ArtMethod& method : GetDirectMethods(pointer_size)) {
-    DCHECK(reinterpret_cast<volatile void*>(&method) != nullptr);
     if (method.IsClassInitializer()) {
       DCHECK_STREQ(method.GetName(), "<clinit>");
       DCHECK_STREQ(method.GetSignature().ToString().c_str(), "()V");
@@ -743,8 +742,8 @@
     return Primitive::Descriptor(GetPrimitiveType());
   } else if (IsArrayClass()) {
     return GetArrayDescriptor(storage);
-  } else if (IsAnyProxyClass()) {
-    *storage = Runtime::Current()->GetClassLinker()->GetDescriptorForAnyProxy(this);
+  } else if (IsProxyClass()) {
+    *storage = Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(this);
     return storage->c_str();
   } else {
     const DexFile& dex_file = GetDexFile();
@@ -787,10 +786,8 @@
       DCHECK_EQ(1U, idx);
       return class_linker->FindSystemClass(self, "Ljava/io/Serializable;");
     }
-  } else if (klass->IsAnyProxyClass()) {
-    // Proxies don't have a dex cache, so look at the
-    // interfaces through the magic static field "interfaces" from the proxy class itself.
-    mirror::ObjectArray<mirror::Class>* interfaces = klass.Get()->GetInterfacesForAnyProxy();
+  } else if (klass->IsProxyClass()) {
+    mirror::ObjectArray<mirror::Class>* interfaces = klass.Get()->GetInterfaces();
     DCHECK(interfaces != nullptr);
     return interfaces->Get(idx);
   } else {
@@ -829,7 +826,7 @@
 
 std::string Class::GetLocation() {
   mirror::DexCache* dex_cache = GetDexCache();
-  if (dex_cache != nullptr && !IsAnyProxyClass()) {
+  if (dex_cache != nullptr && !IsProxyClass()) {
     return dex_cache->GetLocation()->ToModifiedUtf8();
   }
   // Arrays and proxies are generated and have no corresponding dex file location.
@@ -947,9 +944,9 @@
   return new_class->AsClass();
 }
 
-bool Class::AnyProxyDescriptorEquals(const char* match) {
-  DCHECK(IsAnyProxyClass());
-  return Runtime::Current()->GetClassLinker()->GetDescriptorForAnyProxy(this) == match;
+bool Class::ProxyDescriptorEquals(const char* match) {
+  DCHECK(IsProxyClass());
+  return Runtime::Current()->GetClassLinker()->GetDescriptorForProxy(this) == match;
 }
 
 // TODO: Move this to java_lang_Class.cc?
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index fcfb4b9..0ab5b97 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -352,16 +352,8 @@
   static String* ComputeName(Handle<Class> h_this) SHARED_REQUIRES(Locks::mutator_lock_)
       REQUIRES(!Roles::uninterruptible_);
 
-  // Is this either a java.lang.reflect.Proxy or a boxed lambda (java.lang.LambdaProxy)?
-  // -- Most code doesn't need to make the distinction, and this is the preferred thing to check.
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsAnyProxyClass() SHARED_REQUIRES(Locks::mutator_lock_) {
-    return IsReflectProxyClass() || IsLambdaProxyClass();
-  }
-
-  // Is this a java.lang.reflect.Proxy ?
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsReflectProxyClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+  bool IsProxyClass() SHARED_REQUIRES(Locks::mutator_lock_) {
     // Read access flags without using getter as whether something is a proxy can be check in
     // any loaded state
     // TODO: switch to a check if the super class is java.lang.reflect.Proxy?
@@ -369,17 +361,6 @@
     return (access_flags & kAccClassIsProxy) != 0;
   }
 
-  // Is this a boxed lambda (java.lang.LambdaProxy)?
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  bool IsLambdaProxyClass() SHARED_REQUIRES(Locks::mutator_lock_) {
-    // Read access flags without using getter as whether something is a proxy can be check in
-    // any loaded state
-    // TODO: switch to a check if the super class is java.lang.reflect.Proxy?
-    uint32_t access_flags = GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
-    return (access_flags & kAccClassIsLambdaProxy) != 0;
-  }
-
-
   static MemberOffset PrimitiveTypeOffset() {
     return OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_);
   }
@@ -696,8 +677,6 @@
     return MemberOffset(OFFSETOF_MEMBER(Class, super_class_));
   }
 
-  // Returns the class's ClassLoader.
-  // A null value is returned if and only if this is a boot classpath class.
   ClassLoader* GetClassLoader() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
 
   void SetClassLoader(ClassLoader* new_cl) SHARED_REQUIRES(Locks::mutator_lock_);
@@ -1097,8 +1076,6 @@
 
   bool DescriptorEquals(const char* match) SHARED_REQUIRES(Locks::mutator_lock_);
 
-  // Returns the backing DexFile's class definition for this class.
-  // This returns null if and only if the class has no backing DexFile.
   const DexFile::ClassDef* GetClassDef() SHARED_REQUIRES(Locks::mutator_lock_);
 
   ALWAYS_INLINE uint32_t NumDirectInterfaces() SHARED_REQUIRES(Locks::mutator_lock_);
@@ -1125,15 +1102,11 @@
                 size_t pointer_size)
       SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
 
-  // For any proxy class only. Returns list of directly implemented interfaces.
-  // The value returned is always non-null.
-  ObjectArray<Class>* GetInterfacesForAnyProxy() SHARED_REQUIRES(Locks::mutator_lock_);
+  // For proxy class only.
+  ObjectArray<Class>* GetInterfaces() SHARED_REQUIRES(Locks::mutator_lock_);
 
-  // For any proxy class only. Returns a 2d array of classes.
-  // -- The 0th dimension correponds to the vtable index.
-  // -- The 1st dimension is a list of checked exception classes.
-  // The value returned is always non-null.
-  ObjectArray<ObjectArray<Class>>* GetThrowsForAnyProxy() SHARED_REQUIRES(Locks::mutator_lock_);
+  // For proxy class only.
+  ObjectArray<ObjectArray<Class>>* GetThrows() SHARED_REQUIRES(Locks::mutator_lock_);
 
   // For reference class only.
   MemberOffset GetDisableIntrinsicFlagOffset() SHARED_REQUIRES(Locks::mutator_lock_);
@@ -1221,7 +1194,7 @@
   IterationRange<StrideIterator<ArtField>> GetIFieldsUnchecked()
       SHARED_REQUIRES(Locks::mutator_lock_);
 
-  bool AnyProxyDescriptorEquals(const char* match) SHARED_REQUIRES(Locks::mutator_lock_);
+  bool ProxyDescriptorEquals(const char* match) SHARED_REQUIRES(Locks::mutator_lock_);
 
   // Check that the pointer size matches the one in the class linker.
   ALWAYS_INLINE static void CheckPointerSize(size_t pointer_size);
diff --git a/runtime/mirror/class_loader-inl.h b/runtime/mirror/class_loader-inl.h
index 3139117..e22ddd7 100644
--- a/runtime/mirror/class_loader-inl.h
+++ b/runtime/mirror/class_loader-inl.h
@@ -21,7 +21,6 @@
 
 #include "base/mutex-inl.h"
 #include "class_table-inl.h"
-#include "lambda/box_class_table-inl.h"
 
 namespace art {
 namespace mirror {
@@ -36,10 +35,6 @@
   if (class_table != nullptr) {
     class_table->VisitRoots(visitor);
   }
-  lambda::BoxClassTable* const lambda_box_class_table = GetLambdaProxyCache();
-  if (lambda_box_class_table != nullptr) {
-    lambda_box_class_table->VisitRoots(visitor);
-  }
 }
 
 }  // namespace mirror
diff --git a/runtime/mirror/class_loader.h b/runtime/mirror/class_loader.h
index 9d4fe96..c2a65d6 100644
--- a/runtime/mirror/class_loader.h
+++ b/runtime/mirror/class_loader.h
@@ -24,12 +24,6 @@
 struct ClassLoaderOffsets;
 class ClassTable;
 
-namespace lambda {
-
-class BoxClassTable;
-
-}  // namespace lambda
-
 namespace mirror {
 
 class Class;
@@ -66,16 +60,6 @@
                       reinterpret_cast<uint64_t>(allocator));
   }
 
-  lambda::BoxClassTable* GetLambdaProxyCache() SHARED_REQUIRES(Locks::mutator_lock_) {
-    return reinterpret_cast<lambda::BoxClassTable*>(
-        GetField64(OFFSET_OF_OBJECT_MEMBER(ClassLoader, lambda_proxy_cache_)));
-  }
-
-  void SetLambdaProxyCache(lambda::BoxClassTable* cache) SHARED_REQUIRES(Locks::mutator_lock_) {
-    SetField64<false>(OFFSET_OF_OBJECT_MEMBER(ClassLoader, lambda_proxy_cache_),
-                      reinterpret_cast<uint64_t>(cache));
-  }
-
  private:
   // Visit instance fields of the class loader as well as its associated classes.
   // Null class loader is handled by ClassLinker::VisitClassRoots.
@@ -92,7 +76,6 @@
   uint32_t padding_ ATTRIBUTE_UNUSED;
   uint64_t allocator_;
   uint64_t class_table_;
-  uint64_t lambda_proxy_cache_;
 
   friend struct art::ClassLoaderOffsets;  // for verifying offset information
   friend class Object;  // For VisitReferences
diff --git a/runtime/mirror/field-inl.h b/runtime/mirror/field-inl.h
index 49c443e..8a0daec 100644
--- a/runtime/mirror/field-inl.h
+++ b/runtime/mirror/field-inl.h
@@ -57,15 +57,14 @@
   const auto pointer_size = kTransactionActive ?
       Runtime::Current()->GetClassLinker()->GetImagePointerSize() : sizeof(void*);
   auto dex_field_index = field->GetDexFieldIndex();
-  if (field->GetDeclaringClass()->IsAnyProxyClass()) {
+  auto* resolved_field = field->GetDexCache()->GetResolvedField(dex_field_index, pointer_size);
+  if (field->GetDeclaringClass()->IsProxyClass()) {
     DCHECK(field->IsStatic());
     DCHECK_LT(dex_field_index, 2U);
     // The two static fields (interfaces, throws) of all proxy classes
     // share the same dex file indices 0 and 1. So, we can't resolve
     // them in the dex cache.
   } else {
-    ArtField* resolved_field =
-        field->GetDexCache()->GetResolvedField(dex_field_index, pointer_size);
     if (resolved_field != nullptr) {
       DCHECK_EQ(resolved_field, field);
     } else {
diff --git a/runtime/mirror/field.cc b/runtime/mirror/field.cc
index b02e5b5..ff6847c 100644
--- a/runtime/mirror/field.cc
+++ b/runtime/mirror/field.cc
@@ -56,7 +56,7 @@
 
 ArtField* Field::GetArtField() {
   mirror::Class* declaring_class = GetDeclaringClass();
-  if (UNLIKELY(declaring_class->IsAnyProxyClass())) {
+  if (UNLIKELY(declaring_class->IsProxyClass())) {
     DCHECK(IsStatic());
     DCHECK_EQ(declaring_class->NumStaticFields(), 2U);
     // 0 == Class[] interfaces; 1 == Class[][] throws;
diff --git a/runtime/mirror/lambda_proxy.h b/runtime/mirror/lambda_proxy.h
deleted file mode 100644
index cff3a12..0000000
--- a/runtime/mirror/lambda_proxy.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_MIRROR_LAMBDA_PROXY_H_
-#define ART_RUNTIME_MIRROR_LAMBDA_PROXY_H_
-
-#include "lambda/closure.h"
-#include "object.h"
-
-namespace art {
-
-struct LambdaProxyOffsets;
-
-namespace mirror {
-
-// C++ mirror of a lambda proxy. Does not yet have a Java-equivalent source file.
-class MANAGED LambdaProxy FINAL : public Object {
- public:
-  // Note that the runtime subclasses generate the following static fields:
-
-  // private static java.lang.Class[] interfaces;  // Declared interfaces for the lambda interface.
-  static constexpr size_t kStaticFieldIndexInterfaces = 0;
-  // private static java.lang.Class[][] throws;    // Maps vtable id to list of classes.
-  static constexpr size_t kStaticFieldIndexThrows = 1;
-  static constexpr size_t kStaticFieldCount = 2;   // Number of fields total.
-
-  // The offset from the start of 'LambdaProxy' object, to the closure_ field, in bytes.
-  // -- This is exposed publically in order to avoid exposing 'closure_' publically.
-  // -- Only meant to be used in stubs and other compiled code, not in runtime.
-  static inline MemberOffset GetInstanceFieldOffsetClosure() {
-    return OFFSET_OF_OBJECT_MEMBER(LambdaProxy, closure_);
-  }
-
-  // Direct methods available on the class:
-  static constexpr size_t kDirectMethodIndexConstructor = 0;  // <init>()V
-  static constexpr size_t kDirectMethodCount = 1;             // Only the constructor.
-
-  // Accessors to the fields:
-
-  // Get the native closure pointer. Usually non-null outside of lambda proxy contexts.
-  lambda::Closure* GetClosure() SHARED_REQUIRES(Locks::mutator_lock_) {
-    return reinterpret_cast<lambda::Closure*>(
-        GetField64(GetInstanceFieldOffsetClosure()));
-  }
-
-  // Set the native closure pointer. Usually should be non-null outside of lambda proxy contexts.
-  void SetClosure(lambda::Closure* closure) SHARED_REQUIRES(Locks::mutator_lock_) {
-    SetField64<false>(GetInstanceFieldOffsetClosure(),
-                      reinterpret_cast<uint64_t>(closure));
-  }
-
- private:
-  // Instance fields, present in the base class and every generated subclass:
-
-  // private long closure;
-  union {
-    lambda::Closure* actual;
-    uint64_t padding;         // Don't trip up GetObjectSize checks, since the Java code has a long.
-  } closure_;
-
-  // Friends for generating offset tests:
-  friend struct art::LambdaProxyOffsets;              // for verifying offset information
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(LambdaProxy);
-};
-
-}  // namespace mirror
-}  // namespace art
-
-#endif  // ART_RUNTIME_MIRROR_LAMBDA_PROXY_H_
diff --git a/runtime/modifiers.h b/runtime/modifiers.h
index 36aa57f..9946eab 100644
--- a/runtime/modifiers.h
+++ b/runtime/modifiers.h
@@ -54,8 +54,6 @@
 // if any particular method needs to be a default conflict. Used to figure out at runtime if
 // invoking this method will throw an exception.
 static constexpr uint32_t kAccDefaultConflict =      0x00800000;  // method (runtime)
-// Set by the class linker when creating a class that's a subtype of LambdaProxy.
-static constexpr uint32_t kAccClassIsLambdaProxy =   0x01000000;  // class  (dex only)
 
 // Special runtime-only flags.
 // Interface and all its super-interfaces with default methods have been recursively initialized.
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 6cebd4d..5e42392 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -103,7 +103,7 @@
 static jobjectArray Class_getProxyInterfaces(JNIEnv* env, jobject javaThis) {
   ScopedFastNativeObjectAccess soa(env);
   mirror::Class* c = DecodeClass(soa, javaThis);
-  return soa.AddLocalReference<jobjectArray>(c->GetInterfacesForAnyProxy()->Clone(soa.Self()));
+  return soa.AddLocalReference<jobjectArray>(c->GetInterfaces()->Clone(soa.Self()));
 }
 
 static mirror::ObjectArray<mirror::Field>* GetDeclaredFields(
@@ -489,7 +489,7 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<2> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
-  if (klass->IsAnyProxyClass() || klass->GetDexCache() == nullptr) {
+  if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
     return nullptr;
   }
   Handle<mirror::Class> annotation_class(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
@@ -501,7 +501,7 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
-  if (klass->IsAnyProxyClass() || klass->GetDexCache() == nullptr) {
+  if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
     // Return an empty array instead of a null pointer.
     mirror::Class* annotation_array_class =
         soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_annotation_Annotation__array);
@@ -517,7 +517,7 @@
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
   mirror::ObjectArray<mirror::Class>* classes = nullptr;
-  if (!klass->IsAnyProxyClass() && klass->GetDexCache() != nullptr) {
+  if (!klass->IsProxyClass() && klass->GetDexCache() != nullptr) {
     classes = klass->GetDexFile().GetDeclaredClasses(klass);
   }
   if (classes == nullptr) {
@@ -543,7 +543,7 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
-  if (klass->IsAnyProxyClass() || klass->GetDexCache() == nullptr) {
+  if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
     return nullptr;
   }
   return soa.AddLocalReference<jclass>(klass->GetDexFile().GetEnclosingClass(klass));
@@ -553,7 +553,7 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
-  if (klass->IsAnyProxyClass() || klass->GetDexCache() == nullptr) {
+  if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
     return nullptr;
   }
   mirror::Object* method = klass->GetDexFile().GetEnclosingMethod(klass);
@@ -570,7 +570,7 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
-  if (klass->IsAnyProxyClass() || klass->GetDexCache() == nullptr) {
+  if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
     return nullptr;
   }
   mirror::Object* method = klass->GetDexFile().GetEnclosingMethod(klass);
@@ -587,7 +587,7 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
-  if (klass->IsAnyProxyClass() || klass->GetDexCache() == nullptr) {
+  if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
     return defaultValue;
   }
   uint32_t flags;
@@ -601,7 +601,7 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
-  if (klass->IsAnyProxyClass() || klass->GetDexCache() == nullptr) {
+  if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
     return nullptr;
   }
   mirror::String* class_name = nullptr;
@@ -615,7 +615,7 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
-  if (klass->IsAnyProxyClass() || klass->GetDexCache() == nullptr) {
+  if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
     return false;
   }
   mirror::String* class_name = nullptr;
@@ -630,7 +630,7 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<2> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
-  if (klass->IsAnyProxyClass() || klass->GetDexCache() == nullptr) {
+  if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
     return false;
   }
   Handle<mirror::Class> annotation_class(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
@@ -641,7 +641,7 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<1> hs(soa.Self());
   Handle<mirror::Class> klass(hs.NewHandle(DecodeClass(soa, javaThis)));
-  if (klass->IsAnyProxyClass() || klass->GetDexCache() == nullptr) {
+  if (klass->IsProxyClass() || klass->GetDexCache() == nullptr) {
     return nullptr;
   }
   // Return null for anonymous classes.
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index 9166ecc..aac800a 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -419,7 +419,7 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<1> hs(soa.Self());
   ArtField* field = soa.Decode<mirror::Field*>(javaField)->GetArtField();
-  if (field->GetDeclaringClass()->IsAnyProxyClass()) {
+  if (field->GetDeclaringClass()->IsProxyClass()) {
     return nullptr;
   }
   Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
@@ -429,7 +429,7 @@
 static jobjectArray Field_getDeclaredAnnotations(JNIEnv* env, jobject javaField) {
   ScopedFastNativeObjectAccess soa(env);
   ArtField* field = soa.Decode<mirror::Field*>(javaField)->GetArtField();
-  if (field->GetDeclaringClass()->IsAnyProxyClass()) {
+  if (field->GetDeclaringClass()->IsProxyClass()) {
     // Return an empty array instead of a null pointer.
     mirror::Class* annotation_array_class =
         soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_annotation_Annotation__array);
@@ -443,7 +443,7 @@
 static jobjectArray Field_getSignatureAnnotation(JNIEnv* env, jobject javaField) {
   ScopedFastNativeObjectAccess soa(env);
   ArtField* field = soa.Decode<mirror::Field*>(javaField)->GetArtField();
-  if (field->GetDeclaringClass()->IsAnyProxyClass()) {
+  if (field->GetDeclaringClass()->IsProxyClass()) {
     return nullptr;
   }
   return soa.AddLocalReference<jobjectArray>(
@@ -455,7 +455,7 @@
   ScopedFastNativeObjectAccess soa(env);
   StackHandleScope<1> hs(soa.Self());
   ArtField* field = soa.Decode<mirror::Field*>(javaField)->GetArtField();
-  if (field->GetDeclaringClass()->IsAnyProxyClass()) {
+  if (field->GetDeclaringClass()->IsProxyClass()) {
     return false;
   }
   Handle<mirror::Class> klass(hs.NewHandle(soa.Decode<mirror::Class*>(annotationType)));
diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc
index 7894c9b..caacba6 100644
--- a/runtime/native/java_lang_reflect_Method.cc
+++ b/runtime/native/java_lang_reflect_Method.cc
@@ -32,7 +32,7 @@
 static jobject Method_getAnnotationNative(JNIEnv* env, jobject javaMethod, jclass annotationType) {
   ScopedFastNativeObjectAccess soa(env);
   ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
-  if (method->GetDeclaringClass()->IsAnyProxyClass()) {
+  if (method->GetDeclaringClass()->IsProxyClass()) {
     return nullptr;
   }
   StackHandleScope<1> hs(soa.Self());
@@ -44,7 +44,7 @@
 static jobjectArray Method_getDeclaredAnnotations(JNIEnv* env, jobject javaMethod) {
   ScopedFastNativeObjectAccess soa(env);
   ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
-  if (method->GetDeclaringClass()->IsAnyProxyClass()) {
+  if (method->GetDeclaringClass()->IsProxyClass()) {
     // Return an empty array instead of a null pointer.
     mirror::Class* annotation_array_class =
         soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_annotation_Annotation__array);
@@ -67,7 +67,7 @@
 static jobjectArray Method_getExceptionTypes(JNIEnv* env, jobject javaMethod) {
   ScopedFastNativeObjectAccess soa(env);
   ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
-  if (method->GetDeclaringClass()->IsAnyProxyClass()) {
+  if (method->GetDeclaringClass()->IsProxyClass()) {
     mirror::Class* klass = method->GetDeclaringClass();
     int throws_index = -1;
     size_t i = 0;
@@ -79,8 +79,7 @@
       ++i;
     }
     CHECK_NE(throws_index, -1);
-    mirror::ObjectArray<mirror::Class>* declared_exceptions =
-        klass->GetThrowsForAnyProxy()->Get(throws_index);
+    mirror::ObjectArray<mirror::Class>* declared_exceptions = klass->GetThrows()->Get(throws_index);
     return soa.AddLocalReference<jobjectArray>(declared_exceptions->Clone(soa.Self()));
   } else {
     mirror::ObjectArray<mirror::Class>* result_array =
@@ -105,7 +104,7 @@
 static jobjectArray Method_getParameterAnnotationsNative(JNIEnv* env, jobject javaMethod) {
   ScopedFastNativeObjectAccess soa(env);
   ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
-  if (method->GetDeclaringClass()->IsAnyProxyClass()) {
+  if (method->GetDeclaringClass()->IsProxyClass()) {
     return nullptr;
   }
   return soa.AddLocalReference<jobjectArray>(method->GetDexFile()->GetParameterAnnotations(method));
@@ -121,7 +120,7 @@
                                                  jclass annotationType) {
   ScopedFastNativeObjectAccess soa(env);
   ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod);
-  if (method->GetDeclaringClass()->IsAnyProxyClass()) {
+  if (method->GetDeclaringClass()->IsProxyClass()) {
     return false;
   }
   StackHandleScope<1> hs(soa.Self());
diff --git a/runtime/native/java_lang_reflect_Proxy.cc b/runtime/native/java_lang_reflect_Proxy.cc
index 647cec0..4a6ab40 100644
--- a/runtime/native/java_lang_reflect_Proxy.cc
+++ b/runtime/native/java_lang_reflect_Proxy.cc
@@ -27,31 +27,15 @@
 namespace art {
 
 static jclass Proxy_generateProxy(JNIEnv* env, jclass, jstring name, jobjectArray interfaces,
-                                  jobject loader, jobjectArray methods, jobjectArray throws,
-                                  jboolean is_lambda_proxy) {
+                                  jobject loader, jobjectArray methods, jobjectArray throws) {
   ScopedFastNativeObjectAccess soa(env);
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-
-  mirror::Class* proxy_class = nullptr;
-
-  if (UNLIKELY(is_lambda_proxy)) {
-    bool already_exists;  // XX: Perhaps add lambdaProxyCache to java.lang.ClassLoader ?
-    proxy_class = class_linker->CreateLambdaProxyClass(soa,
-                                                       name,
-                                                       interfaces,
-                                                       loader,
-                                                       methods,
-                                                       throws,
-                                                       /*out*/&already_exists);
-  } else {
-    proxy_class = class_linker->CreateProxyClass(soa, name, interfaces, loader, methods, throws);
-  }
-
-  return soa.AddLocalReference<jclass>(proxy_class);
+  return soa.AddLocalReference<jclass>(class_linker->CreateProxyClass(
+      soa, name, interfaces, loader, methods, throws));
 }
 
 static JNINativeMethod gMethods[] = {
-  NATIVE_METHOD(Proxy, generateProxy, "!(Ljava/lang/String;[Ljava/lang/Class;Ljava/lang/ClassLoader;[Ljava/lang/reflect/Method;[[Ljava/lang/Class;Z)Ljava/lang/Class;"),
+  NATIVE_METHOD(Proxy, generateProxy, "!(Ljava/lang/String;[Ljava/lang/Class;Ljava/lang/ClassLoader;[Ljava/lang/reflect/Method;[[Ljava/lang/Class;)Ljava/lang/Class;"),
 };
 
 void register_java_lang_reflect_Proxy(JNIEnv* env) {
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index 57aafcc..57472ad 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -121,7 +121,7 @@
       GenerateProxyClass(soa, jclass_loader, "$Proxy1234", interfaces)));
   interfaces.clear();  // Don't least possibly stale objects in the array as good practice.
   ASSERT_TRUE(proxy_class.Get() != nullptr);
-  ASSERT_TRUE(proxy_class->IsReflectProxyClass());
+  ASSERT_TRUE(proxy_class->IsProxyClass());
   ASSERT_TRUE(proxy_class->IsInitialized());
 
   EXPECT_EQ(2U, proxy_class->NumDirectInterfaces());  // Interfaces$I and Interfaces$J.
@@ -157,7 +157,7 @@
   }
 
   ASSERT_TRUE(proxyClass.Get() != nullptr);
-  ASSERT_TRUE(proxyClass->IsReflectProxyClass());
+  ASSERT_TRUE(proxyClass->IsProxyClass());
   ASSERT_TRUE(proxyClass->IsInitialized());
 
   EXPECT_TRUE(proxyClass->GetIFieldsPtr() == nullptr);
@@ -208,10 +208,10 @@
   }
 
   ASSERT_TRUE(proxyClass0.Get() != nullptr);
-  ASSERT_TRUE(proxyClass0->IsReflectProxyClass());
+  ASSERT_TRUE(proxyClass0->IsProxyClass());
   ASSERT_TRUE(proxyClass0->IsInitialized());
   ASSERT_TRUE(proxyClass1.Get() != nullptr);
-  ASSERT_TRUE(proxyClass1->IsReflectProxyClass());
+  ASSERT_TRUE(proxyClass1->IsProxyClass());
   ASSERT_TRUE(proxyClass1->IsInitialized());
 
   LengthPrefixedArray<ArtField>* static_fields0 = proxyClass0->GetSFieldsPtr();
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 2ff9fd2..9098d38 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -172,23 +172,12 @@
     } else {
       return cur_shadow_frame_->GetVRegReference(0);
     }
-  } else if (m->IsReflectProxyMethod()) {
+  } else if (m->IsProxyMethod()) {
     if (cur_quick_frame_ != nullptr) {
       return artQuickGetProxyThisObject(cur_quick_frame_);
     } else {
       return cur_shadow_frame_->GetVRegReference(0);
     }
-  } else if (m->IsLambdaProxyMethod()) {
-    if (cur_quick_frame_ != nullptr) {
-      // XX: Should be safe to return null here, the lambda proxies
-      // don't set up their own quick frame because they don't need to spill any registers.
-      // By the time we are executing inside of the final target of the proxy invoke,
-      // the original 'this' reference is no longer live.
-      LOG(WARNING) << "Lambda proxies don't have a quick frame, do they?!";
-      return nullptr;
-    } else {
-      return cur_shadow_frame_->GetVRegReference(0);
-    }
   } else {
     const DexFile::CodeItem* code_item = m->GetCodeItem();
     if (code_item == nullptr) {
@@ -825,27 +814,7 @@
     // compiled method without any stubs. Therefore the method must have a OatQuickMethodHeader.
     DCHECK(!method->IsDirect() && !method->IsConstructor())
         << "Constructors of proxy classes must have a OatQuickMethodHeader";
-
-    if (method->IsReflectProxyMethod()) {
-      return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
-    } else if (method->IsLambdaProxyMethod()) {
-      // Set this to true later once every stub works without a frame.
-      // This is currently 'false' because using a closure as a "long"
-      // requires a quick frame to be set up on 32-bit architectures.
-      constexpr bool kLambdaProxyStubsSupportFrameless = false;
-      if (kIsDebugBuild || !kLambdaProxyStubsSupportFrameless) {
-        // When debugging we always use the 'RefAndArgs' quick frame to allow us
-        // to see a runtime stub when unwinding.
-        return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
-      } else {
-        // Lambda proxies don't bother setting up a quick frame for release builds.
-        LOG(FATAL) << "Requested QuickMethodFrameInfo for a lambda proxy,"
-                   << "but it doesn't have one, for method: " << PrettyMethod(method);
-        UNREACHABLE();
-      }
-    } else {
-      LOG(FATAL) << "Unknown type of proxy method " << PrettyMethod(method);
-    }
+    return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
   }
 
   // The only remaining case is if the method is native and uses the generic JNI stub.
diff --git a/test/458-checker-instruction-simplification/src/Main.java b/test/458-checker-instruction-simplification/src/Main.java
index d5fed2a..6151fc1 100644
--- a/test/458-checker-instruction-simplification/src/Main.java
+++ b/test/458-checker-instruction-simplification/src/Main.java
@@ -389,24 +389,6 @@
     return arg << 0;
   }
 
-  /// CHECK-START: int Main.Shl1(int) instruction_simplifier (before)
-  /// CHECK-DAG:     <<Arg:i\d+>>      ParameterValue
-  /// CHECK-DAG:     <<Const1:i\d+>>   IntConstant 1
-  /// CHECK-DAG:     <<Shl:i\d+>>      Shl [<<Arg>>,<<Const1>>]
-  /// CHECK-DAG:                       Return [<<Shl>>]
-
-  /// CHECK-START: int Main.Shl1(int) instruction_simplifier (after)
-  /// CHECK-DAG:     <<Arg:i\d+>>      ParameterValue
-  /// CHECK-DAG:     <<Add:i\d+>>      Add [<<Arg>>,<<Arg>>]
-  /// CHECK-DAG:                       Return [<<Add>>]
-
-  /// CHECK-START: int Main.Shl1(int) instruction_simplifier (after)
-  /// CHECK-NOT:                       Shl
-
-  public static int Shl1(int arg) {
-    return arg << 1;
-  }
-
   /// CHECK-START: long Main.Shr0(long) instruction_simplifier (before)
   /// CHECK-DAG:     <<Arg:j\d+>>      ParameterValue
   /// CHECK-DAG:     <<Const0:i\d+>>   IntConstant 0
@@ -1245,7 +1227,6 @@
     return arg * 9;
   }
 
-
   /**
    * Test strength reduction of factors of the form (2^n - 1).
    */
@@ -1265,6 +1246,91 @@
     return arg * 31;
   }
 
+  /// CHECK-START: int Main.booleanFieldNotEqualOne() instruction_simplifier (before)
+  /// CHECK-DAG:      <<Const1:i\d+>>   IntConstant 1
+  /// CHECK-DAG:      <<Field:z\d+>>    StaticFieldGet
+  /// CHECK-DAG:      <<NE:z\d+>>       NotEqual [<<Field>>,<<Const1>>]
+  /// CHECK-DAG:                        If [<<NE>>]
+
+  /// CHECK-START: int Main.booleanFieldNotEqualOne() instruction_simplifier (after)
+  /// CHECK-DAG:      <<Field:z\d+>>    StaticFieldGet
+  /// CHECK-DAG:      <<Not:z\d+>>      BooleanNot [<<Field>>]
+  /// CHECK-DAG:                        If [<<Not>>]
+
+  public static int booleanFieldNotEqualOne() {
+    return (booleanField == true) ? 13 : 54;
+  }
+
+  /// CHECK-START: int Main.booleanFieldEqualZero() instruction_simplifier (before)
+  /// CHECK-DAG:      <<Const0:i\d+>>   IntConstant 0
+  /// CHECK-DAG:      <<Field:z\d+>>    StaticFieldGet
+  /// CHECK-DAG:      <<EQ:z\d+>>       Equal [<<Field>>,<<Const0>>]
+  /// CHECK-DAG:                        If [<<EQ>>]
+
+  /// CHECK-START: int Main.booleanFieldEqualZero() instruction_simplifier (after)
+  /// CHECK-DAG:      <<Field:z\d+>>    StaticFieldGet
+  /// CHECK-DAG:      <<Not:z\d+>>      BooleanNot [<<Field>>]
+  /// CHECK-DAG:                        If [<<Not>>]
+
+  public static int booleanFieldEqualZero() {
+    return (booleanField != false) ? 13 : 54;
+  }
+
+  /// CHECK-START: int Main.intConditionNotEqualOne(int) instruction_simplifier_after_bce (before)
+  /// CHECK-DAG:      <<Arg:i\d+>>      ParameterValue
+  /// CHECK-DAG:      <<Const1:i\d+>>   IntConstant 1
+  /// CHECK-DAG:      <<Const42:i\d+>>  IntConstant 42
+  /// CHECK-DAG:      <<GT:z\d+>>       GreaterThan [<<Arg>>,<<Const42>>]
+  /// CHECK-DAG:      <<NE:z\d+>>       NotEqual [<<GT>>,<<Const1>>]
+  /// CHECK-DAG:                        If [<<NE>>]
+
+  /// CHECK-START: int Main.intConditionNotEqualOne(int) instruction_simplifier_after_bce (after)
+  /// CHECK-DAG:      <<Arg:i\d+>>      ParameterValue
+  /// CHECK-DAG:      <<Const42:i\d+>>  IntConstant 42
+  /// CHECK-DAG:                        If [<<LE:z\d+>>]
+  /// CHECK-DAG:      <<LE>>            LessThanOrEqual [<<Arg>>,<<Const42>>]
+  // Note that we match `LE` from If because there are two identical LessThanOrEqual instructions.
+
+  public static int intConditionNotEqualOne(int i) {
+    return ((i > 42) == true) ? 13 : 54;
+  }
+
+  /// CHECK-START: int Main.intConditionEqualZero(int) instruction_simplifier_after_bce (before)
+  /// CHECK-DAG:      <<Arg:i\d+>>      ParameterValue
+  /// CHECK-DAG:      <<Const0:i\d+>>   IntConstant 0
+  /// CHECK-DAG:      <<Const42:i\d+>>  IntConstant 42
+  /// CHECK-DAG:      <<GT:z\d+>>       GreaterThan [<<Arg>>,<<Const42>>]
+  /// CHECK-DAG:      <<EQ:z\d+>>       Equal [<<GT>>,<<Const0>>]
+  /// CHECK-DAG:                        If [<<EQ>>]
+
+  /// CHECK-START: int Main.intConditionEqualZero(int) instruction_simplifier_after_bce (after)
+  /// CHECK-DAG:      <<Arg:i\d+>>      ParameterValue
+  /// CHECK-DAG:      <<Const42:i\d+>>  IntConstant 42
+  /// CHECK-DAG:                        If [<<LE:z\d+>>]
+  /// CHECK-DAG:      <<LE>>            LessThanOrEqual [<<Arg>>,<<Const42>>]
+  // Note that we match `LE` from If because there are two identical LessThanOrEqual instructions.
+
+  public static int intConditionEqualZero(int i) {
+    return ((i > 42) != false) ? 13 : 54;
+  }
+
+  // Test that conditions on float/double are not flipped.
+
+  /// CHECK-START: int Main.floatConditionNotEqualOne(float) register (before)
+  /// CHECK-DAG:      <<Const1:i\d+>>   IntConstant 1
+  /// CHECK-DAG:                        NotEqual [{{i\d+}},<<Const1>>]
+
+  public static int floatConditionNotEqualOne(float f) {
+    return ((f > 42.0f) == true) ? 13 : 54;
+  }
+
+  /// CHECK-START: int Main.doubleConditionEqualZero(double) register (before)
+  /// CHECK-DAG:      <<Const0:i\d+>>   IntConstant 0
+  /// CHECK-DAG:                        Equal [{{i\d+}},<<Const0>>]
+
+  public static int doubleConditionEqualZero(double d) {
+    return ((d > 42.0) != false) ? 13 : 54;
+  }
 
   public static void main(String[] args) {
     int arg = 123456;
@@ -1314,7 +1380,6 @@
     assertDoubleEquals(Div2(150.0), 75.0);
     assertFloatEquals(DivMP25(100.0f), -400.0f);
     assertDoubleEquals(DivMP25(150.0), -600.0);
-    assertLongEquals(Shl1(100), 200);
     assertIntEquals(UShr28And15(0xc1234567), 0xc);
     assertLongEquals(UShr60And15(0xc123456787654321L), 0xcL);
     assertIntEquals(UShr28And7(0xc1234567), 0x4);
@@ -1333,5 +1398,22 @@
     assertLongEquals(62, mulPow2Minus1(2));
     assertLongEquals(3100, mulPow2Minus1(100));
     assertLongEquals(382695, mulPow2Minus1(12345));
+
+    booleanField = false;
+    assertIntEquals(booleanFieldNotEqualOne(), 54);
+    assertIntEquals(booleanFieldEqualZero(), 54);
+    booleanField = true;
+    assertIntEquals(booleanFieldNotEqualOne(), 13);
+    assertIntEquals(booleanFieldEqualZero(), 13);
+    assertIntEquals(intConditionNotEqualOne(6), 54);
+    assertIntEquals(intConditionNotEqualOne(43), 13);
+    assertIntEquals(intConditionEqualZero(6), 54);
+    assertIntEquals(intConditionEqualZero(43), 13);
+    assertIntEquals(floatConditionNotEqualOne(6.0f), 54);
+    assertIntEquals(floatConditionNotEqualOne(43.0f), 13);
+    assertIntEquals(doubleConditionEqualZero(6.0), 54);
+    assertIntEquals(doubleConditionEqualZero(43.0), 13);
   }
+
+  public static boolean booleanField;
 }
diff --git a/test/530-checker-lse/src/Main.java b/test/530-checker-lse/src/Main.java
index 13c4722..17e88ce 100644
--- a/test/530-checker-lse/src/Main.java
+++ b/test/530-checker-lse/src/Main.java
@@ -136,6 +136,9 @@
 
   // A new allocation shouldn't alias with pre-existing values.
   static int test3(TestClass obj) {
+    // Do an allocation here to avoid the HLoadClass and HClinitCheck
+    // at the second allocation.
+    new TestClass();
     obj.i = 1;
     obj.next.j = 2;
     TestClass obj2 = new TestClass();
diff --git a/test/538-checker-embed-constants/src/Main.java b/test/538-checker-embed-constants/src/Main.java
index 12f0380..f791adf 100644
--- a/test/538-checker-embed-constants/src/Main.java
+++ b/test/538-checker-embed-constants/src/Main.java
@@ -260,26 +260,43 @@
     return arg ^ 0xf00000000000000fL;
   }
 
-  /// CHECK-START-ARM: long Main.shl2(long) disassembly (after)
-  /// CHECK:                lsl{{s?|.w}} <<oh:r\d+>>, {{r\d+}}, #2
-  /// CHECK:                orr.w <<oh>>, <<oh>>, <<low:r\d+>>, lsr #30
-  /// CHECK-DAG:            lsl{{s?|.w}} {{r\d+}}, <<low>>, #2
+  /// CHECK-START-ARM: long Main.shl1(long) disassembly (after)
+  /// CHECK:                lsls{{(\.w)?}} {{r\d+}}, {{r\d+}}, #1
+  /// CHECK:                adc{{(\.w)?}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+
+  /// CHECK-START-ARM: long Main.shl1(long) disassembly (after)
+  /// CHECK-NOT:            lsl{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+
+  /// CHECK-START-X86: long Main.shl1(long) disassembly (after)
+  /// CHECK:                add
+  /// CHECK:                adc
+
+  /// CHECK-START-X86: long Main.shl1(long) disassembly (after)
+  /// CHECK-NOT:            shl
+
+  public static long shl1(long arg) {
+    return arg << 1;
+  }
 
   /// CHECK-START-ARM: long Main.shl2(long) disassembly (after)
-  /// CHECK-NOT:            lsl{{s?|.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+  /// CHECK:                lsl{{s?|\.w}} <<oh:r\d+>>, {{r\d+}}, #2
+  /// CHECK:                orr.w <<oh>>, <<oh>>, <<low:r\d+>>, lsr #30
+  /// CHECK:                lsl{{s?|\.w}} {{r\d+}}, <<low>>, #2
+
+  /// CHECK-START-ARM: long Main.shl2(long) disassembly (after)
+  /// CHECK-NOT:            lsl{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
 
   public static long shl2(long arg) {
-    // Note: Shl(x, 1) is transformed to Add(x, x), so test Shl(x, 2).
     return arg << 2;
   }
 
   /// CHECK-START-ARM: long Main.shl31(long) disassembly (after)
-  /// CHECK:                lsl{{s?|.w}} <<oh:r\d+>>, {{r\d+}}, #31
+  /// CHECK:                lsl{{s?|\.w}} <<oh:r\d+>>, {{r\d+}}, #31
   /// CHECK:                orr.w <<oh>>, <<oh>>, <<low:r\d+>>, lsr #1
-  /// CHECK:                lsl{{s?|.w}} {{r\d+}}, <<low>>, #31
+  /// CHECK:                lsl{{s?|\.w}} {{r\d+}}, <<low>>, #31
 
   /// CHECK-START-ARM: long Main.shl31(long) disassembly (after)
-  /// CHECK-NOT:            lsl{{s?|.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+  /// CHECK-NOT:            lsl{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
 
   public static long shl31(long arg) {
     return arg << 31;
@@ -287,114 +304,136 @@
 
   /// CHECK-START-ARM: long Main.shl32(long) disassembly (after)
   /// CHECK-DAG:            mov {{r\d+}}, {{r\d+}}
-  /// CHECK-DAG:            mov{{s?|.w}} {{r\d+}}, #0
+  /// CHECK-DAG:            mov{{s?|\.w}} {{r\d+}}, #0
 
   /// CHECK-START-ARM: long Main.shl32(long) disassembly (after)
-  /// CHECK-NOT:            lsl{{s?|.w}}
+  /// CHECK-NOT:            lsl{{s?|\.w}}
 
   public static long shl32(long arg) {
     return arg << 32;
   }
 
   /// CHECK-START-ARM: long Main.shl33(long) disassembly (after)
-  /// CHECK-DAG:            lsl{{s?|.w}} {{r\d+}}, <<high:r\d+>>, #1
-  /// CHECK-DAG:            mov{{s?|.w}} {{r\d+}}, #0
+  /// CHECK-DAG:            lsl{{s?|\.w}} {{r\d+}}, <<high:r\d+>>, #1
+  /// CHECK-DAG:            mov{{s?|\.w}} {{r\d+}}, #0
 
   /// CHECK-START-ARM: long Main.shl33(long) disassembly (after)
-  /// CHECK-NOT:            lsl{{s?|.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+  /// CHECK-NOT:            lsl{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
 
   public static long shl33(long arg) {
     return arg << 33;
   }
 
   /// CHECK-START-ARM: long Main.shl63(long) disassembly (after)
-  /// CHECK-DAG:            lsl{{s?|.w}} {{r\d+}}, <<high:r\d+>>, #31
-  /// CHECK-DAG:            mov{{s?|.w}} {{r\d+}}, #0
+  /// CHECK-DAG:            lsl{{s?|\.w}} {{r\d+}}, <<high:r\d+>>, #31
+  /// CHECK-DAG:            mov{{s?|\.w}} {{r\d+}}, #0
 
   /// CHECK-START-ARM: long Main.shl63(long) disassembly (after)
-  /// CHECK-NOT:            lsl{{s?|.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+  /// CHECK-NOT:            lsl{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
 
   public static long shl63(long arg) {
     return arg << 63;
   }
 
   /// CHECK-START-ARM: long Main.shr1(long) disassembly (after)
-  /// CHECK:                lsr{{s?|.w}} <<ol:r\d+>>, {{r\d+}}, #1
-  /// CHECK:                orr.w <<ol>>, <<ol>>, <<high:r\d+>>, lsl #31
-  /// CHECK-DAG:            asr{{s?|.w}} {{r\d+}}, <<high>>, #1
+  /// CHECK:                asrs{{(\.w)?}} {{r\d+}}, {{r\d+}}, #1
+  /// CHECK:                mov.w {{r\d+}}, {{r\d+}}, rrx
 
   /// CHECK-START-ARM: long Main.shr1(long) disassembly (after)
-  /// CHECK-NOT:            asr{{s?|.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+  /// CHECK-NOT:            asr{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
 
   public static long shr1(long arg) {
     return arg >> 1;
   }
 
-  /// CHECK-START-ARM: long Main.shr31(long) disassembly (after)
-  /// CHECK:                lsr{{s?|.w}} <<ol:r\d+>>, {{r\d+}}, #31
-  /// CHECK:                orr.w <<ol>>, <<ol>>, <<high:r\d+>>, lsl #1
-  /// CHECK:                asr{{s?|.w}} {{r\d+}}, <<high>>, #31
+  /// CHECK-START-ARM: long Main.shr2(long) disassembly (after)
+  /// CHECK:                lsr{{s?|\.w}} <<ol:r\d+>>, {{r\d+}}, #2
+  /// CHECK:                orr.w <<ol>>, <<ol>>, <<high:r\d+>>, lsl #30
+  /// CHECK-DAG:            asr{{s?|\.w}} {{r\d+}}, <<high>>, #2
+
+  /// CHECK-START-ARM: long Main.shr2(long) disassembly (after)
+  /// CHECK-NOT:            asr{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+
+  public static long shr2(long arg) {
+    return arg >> 2;
+  }
 
   /// CHECK-START-ARM: long Main.shr31(long) disassembly (after)
-  /// CHECK-NOT:            asr{{s?|.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+  /// CHECK:                lsr{{s?|\.w}} <<ol:r\d+>>, {{r\d+}}, #31
+  /// CHECK:                orr.w <<ol>>, <<ol>>, <<high:r\d+>>, lsl #1
+  /// CHECK:                asr{{s?|\.w}} {{r\d+}}, <<high>>, #31
+
+  /// CHECK-START-ARM: long Main.shr31(long) disassembly (after)
+  /// CHECK-NOT:            asr{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
 
   public static long shr31(long arg) {
     return arg >> 31;
   }
 
   /// CHECK-START-ARM: long Main.shr32(long) disassembly (after)
-  /// CHECK-DAG:            asr{{s?|.w}} {{r\d+}}, <<high:r\d+>>, #31
+  /// CHECK-DAG:            asr{{s?|\.w}} {{r\d+}}, <<high:r\d+>>, #31
   /// CHECK-DAG:            mov {{r\d+}}, <<high>>
 
   /// CHECK-START-ARM: long Main.shr32(long) disassembly (after)
-  /// CHECK-NOT:            asr{{s?|.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
-  /// CHECK-NOT:            lsr{{s?|.w}}
+  /// CHECK-NOT:            asr{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+  /// CHECK-NOT:            lsr{{s?|\.w}}
 
   public static long shr32(long arg) {
     return arg >> 32;
   }
 
   /// CHECK-START-ARM: long Main.shr33(long) disassembly (after)
-  /// CHECK-DAG:            asr{{s?|.w}} {{r\d+}}, <<high:r\d+>>, #1
-  /// CHECK-DAG:            asr{{s?|.w}} {{r\d+}}, <<high>>, #31
+  /// CHECK-DAG:            asr{{s?|\.w}} {{r\d+}}, <<high:r\d+>>, #1
+  /// CHECK-DAG:            asr{{s?|\.w}} {{r\d+}}, <<high>>, #31
 
   /// CHECK-START-ARM: long Main.shr33(long) disassembly (after)
-  /// CHECK-NOT:            asr{{s?|.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+  /// CHECK-NOT:            asr{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
 
   public static long shr33(long arg) {
     return arg >> 33;
   }
 
   /// CHECK-START-ARM: long Main.shr63(long) disassembly (after)
-  /// CHECK-DAG:            asr{{s?|.w}} {{r\d+}}, <<high:r\d+>>, #31
-  /// CHECK-DAG:            asr{{s?|.w}} {{r\d+}}, <<high>>, #31
+  /// CHECK-DAG:            asr{{s?|\.w}} {{r\d+}}, <<high:r\d+>>, #31
+  /// CHECK-DAG:            asr{{s?|\.w}} {{r\d+}}, <<high>>, #31
 
   /// CHECK-START-ARM: long Main.shr63(long) disassembly (after)
-  /// CHECK-NOT:            asr{{s?|.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+  /// CHECK-NOT:            asr{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
 
   public static long shr63(long arg) {
     return arg >> 63;
   }
 
   /// CHECK-START-ARM: long Main.ushr1(long) disassembly (after)
-  /// CHECK:                lsr{{s?|.w}} <<ol:r\d+>>, {{r\d+}}, #1
-  /// CHECK:                orr.w <<ol>>, <<ol>>, <<high:r\d+>>, lsl #31
-  /// CHECK-DAG:            lsr{{s?|.w}} {{r\d+}}, <<high>>, #1
+  /// CHECK:                lsrs{{|.w}} {{r\d+}}, {{r\d+}}, #1
+  /// CHECK:                mov.w {{r\d+}}, {{r\d+}}, rrx
 
   /// CHECK-START-ARM: long Main.ushr1(long) disassembly (after)
-  /// CHECK-NOT:            lsr{{s?|.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+  /// CHECK-NOT:            lsr{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
 
   public static long ushr1(long arg) {
     return arg >>> 1;
   }
 
-  /// CHECK-START-ARM: long Main.ushr31(long) disassembly (after)
-  /// CHECK:                lsr{{s?|.w}} <<ol:r\d+>>, {{r\d+}}, #31
-  /// CHECK:                orr.w <<ol>>, <<ol>>, <<high:r\d+>>, lsl #1
-  /// CHECK:                lsr{{s?|.w}} {{r\d+}}, <<high>>, #31
+  /// CHECK-START-ARM: long Main.ushr2(long) disassembly (after)
+  /// CHECK:                lsr{{s?|\.w}} <<ol:r\d+>>, {{r\d+}}, #2
+  /// CHECK:                orr.w <<ol>>, <<ol>>, <<high:r\d+>>, lsl #30
+  /// CHECK-DAG:            lsr{{s?|\.w}} {{r\d+}}, <<high>>, #2
+
+  /// CHECK-START-ARM: long Main.ushr2(long) disassembly (after)
+  /// CHECK-NOT:            lsr{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+
+  public static long ushr2(long arg) {
+    return arg >>> 2;
+  }
 
   /// CHECK-START-ARM: long Main.ushr31(long) disassembly (after)
-  /// CHECK-NOT:            lsr{{s?|.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+  /// CHECK:                lsr{{s?|\.w}} <<ol:r\d+>>, {{r\d+}}, #31
+  /// CHECK:                orr.w <<ol>>, <<ol>>, <<high:r\d+>>, lsl #1
+  /// CHECK:                lsr{{s?|\.w}} {{r\d+}}, <<high>>, #31
+
+  /// CHECK-START-ARM: long Main.ushr31(long) disassembly (after)
+  /// CHECK-NOT:            lsr{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
 
   public static long ushr31(long arg) {
     return arg >>> 31;
@@ -402,32 +441,32 @@
 
   /// CHECK-START-ARM: long Main.ushr32(long) disassembly (after)
   /// CHECK-DAG:            mov {{r\d+}}, {{r\d+}}
-  /// CHECK-DAG:            mov{{s?|.w}} {{r\d+}}, #0
+  /// CHECK-DAG:            mov{{s?|\.w}} {{r\d+}}, #0
 
   /// CHECK-START-ARM: long Main.ushr32(long) disassembly (after)
-  /// CHECK-NOT:            lsr{{s?|.w}}
+  /// CHECK-NOT:            lsr{{s?|\.w}}
 
   public static long ushr32(long arg) {
     return arg >>> 32;
   }
 
   /// CHECK-START-ARM: long Main.ushr33(long) disassembly (after)
-  /// CHECK-DAG:            lsr{{s?|.w}} {{r\d+}}, {{r\d+}}, #1
-  /// CHECK-DAG:            mov{{s?|.w}} {{r\d+}}, #0
+  /// CHECK-DAG:            lsr{{s?|\.w}} {{r\d+}}, {{r\d+}}, #1
+  /// CHECK-DAG:            mov{{s?|\.w}} {{r\d+}}, #0
 
   /// CHECK-START-ARM: long Main.ushr33(long) disassembly (after)
-  /// CHECK-NOT:            lsr{{s?|.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+  /// CHECK-NOT:            lsr{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
 
   public static long ushr33(long arg) {
     return arg >>> 33;
   }
 
   /// CHECK-START-ARM: long Main.ushr63(long) disassembly (after)
-  /// CHECK-DAG:            lsr{{s?|.w}} {{r\d+}}, {{r\d+}}, #31
-  /// CHECK-DAG:            mov{{s?|.w}} {{r\d+}}, #0
+  /// CHECK-DAG:            lsr{{s?|\.w}} {{r\d+}}, {{r\d+}}, #31
+  /// CHECK-DAG:            mov{{s?|\.w}} {{r\d+}}, #0
 
   /// CHECK-START-ARM: long Main.ushr63(long) disassembly (after)
-  /// CHECK-NOT:            lsr{{s?|.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
+  /// CHECK-NOT:            lsr{{s?|\.w}} {{r\d+}}, {{r\d+}}, {{r\d+}}
 
   public static long ushr63(long arg) {
     return arg >>> 63;
@@ -485,11 +524,13 @@
 
     assertLongEquals(14, addM1(7));
 
+    assertLongEquals(shl1(longArg), 0x2468acf10eca8642L);
     assertLongEquals(shl2(longArg), 0x48d159e21d950c84L);
     assertLongEquals(shl31(longArg), 0x43b2a19080000000L);
     assertLongEquals(shl32(longArg), 0x8765432100000000L);
     assertLongEquals(shl33(longArg), 0x0eca864200000000L);
     assertLongEquals(shl63(longArg), 0x8000000000000000L);
+    assertLongEquals(shl1(~longArg), 0xdb97530ef13579bcL);
     assertLongEquals(shl2(~longArg), 0xb72ea61de26af378L);
     assertLongEquals(shl31(~longArg), 0xbc4d5e6f00000000L);
     assertLongEquals(shl32(~longArg), 0x789abcde00000000L);
@@ -497,22 +538,26 @@
     assertLongEquals(shl63(~longArg), 0x0000000000000000L);
 
     assertLongEquals(shr1(longArg), 0x091a2b3c43b2a190L);
+    assertLongEquals(shr2(longArg), 0x048d159e21d950c8L);
     assertLongEquals(shr31(longArg), 0x000000002468acf1L);
     assertLongEquals(shr32(longArg), 0x0000000012345678L);
     assertLongEquals(shr33(longArg), 0x00000000091a2b3cL);
     assertLongEquals(shr63(longArg), 0x0000000000000000L);
     assertLongEquals(shr1(~longArg), 0xf6e5d4c3bc4d5e6fL);
+    assertLongEquals(shr2(~longArg), 0xfb72ea61de26af37L);
     assertLongEquals(shr31(~longArg), 0xffffffffdb97530eL);
     assertLongEquals(shr32(~longArg), 0xffffffffedcba987L);
     assertLongEquals(shr33(~longArg), 0xfffffffff6e5d4c3L);
     assertLongEquals(shr63(~longArg), 0xffffffffffffffffL);
 
     assertLongEquals(ushr1(longArg), 0x091a2b3c43b2a190L);
+    assertLongEquals(ushr2(longArg), 0x048d159e21d950c8L);
     assertLongEquals(ushr31(longArg), 0x000000002468acf1L);
     assertLongEquals(ushr32(longArg), 0x0000000012345678L);
     assertLongEquals(ushr33(longArg), 0x00000000091a2b3cL);
     assertLongEquals(ushr63(longArg), 0x0000000000000000L);
     assertLongEquals(ushr1(~longArg), 0x76e5d4c3bc4d5e6fL);
+    assertLongEquals(ushr2(~longArg), 0x3b72ea61de26af37L);
     assertLongEquals(ushr31(~longArg), 0x00000001db97530eL);
     assertLongEquals(ushr32(~longArg), 0x00000000edcba987L);
     assertLongEquals(ushr33(~longArg), 0x0000000076e5d4c3L);
diff --git a/test/543-env-long-ref/env_long_ref.cc b/test/543-env-long-ref/env_long_ref.cc
new file mode 100644
index 0000000..4108323
--- /dev/null
+++ b/test/543-env-long-ref/env_long_ref.cc
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "arch/context.h"
+#include "art_method-inl.h"
+#include "jni.h"
+#include "scoped_thread_state_change.h"
+#include "stack.h"
+#include "thread.h"
+
+namespace art {
+
+namespace {
+
+class TestVisitor : public StackVisitor {
+ public:
+  TestVisitor(const ScopedObjectAccess& soa, Context* context, jobject expected_value)
+      SHARED_REQUIRES(Locks::mutator_lock_)
+      : StackVisitor(soa.Self(), context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+        expected_value_(expected_value),
+        found_(false),
+        soa_(soa) {}
+
+  bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+    ArtMethod* m = GetMethod();
+    std::string m_name(m->GetName());
+
+    if (m_name == "testCase") {
+      found_ = true;
+      uint32_t value = 0;
+      CHECK(GetVReg(m, 1, kReferenceVReg, &value));
+      CHECK_EQ(reinterpret_cast<mirror::Object*>(value),
+               soa_.Decode<mirror::Object*>(expected_value_));
+    }
+    return true;
+  }
+
+  jobject expected_value_;
+  bool found_;
+  const ScopedObjectAccess& soa_;
+};
+
+}  // namespace
+
+extern "C" JNIEXPORT void JNICALL Java_Main_lookForMyRegisters(JNIEnv*, jclass, jobject value) {
+  ScopedObjectAccess soa(Thread::Current());
+  std::unique_ptr<Context> context(Context::Create());
+  TestVisitor visitor(soa, context.get(), value);
+  visitor.WalkStack();
+  CHECK(visitor.found_);
+}
+
+}  // namespace art
diff --git a/test/543-env-long-ref/expected.txt b/test/543-env-long-ref/expected.txt
new file mode 100644
index 0000000..89f155b
--- /dev/null
+++ b/test/543-env-long-ref/expected.txt
@@ -0,0 +1,2 @@
+JNI_OnLoad called
+42
diff --git a/test/543-env-long-ref/info.txt b/test/543-env-long-ref/info.txt
new file mode 100644
index 0000000..6a42533
--- /dev/null
+++ b/test/543-env-long-ref/info.txt
@@ -0,0 +1,3 @@
+Regression test for optimizing that used to not return
+the right dex register in debuggable when a new value
+was overwriting the high dex register of a wide value.
diff --git a/test/543-env-long-ref/smali/TestCase.smali b/test/543-env-long-ref/smali/TestCase.smali
new file mode 100644
index 0000000..608d6eb
--- /dev/null
+++ b/test/543-env-long-ref/smali/TestCase.smali
@@ -0,0 +1,26 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTestCase;
+.super Ljava/lang/Object;
+
+.method public static testCase()I
+  .registers 5
+  const-wide/16 v0, 0x1
+  invoke-static {v0, v1}, LMain;->$noinline$allocate(J)LMain;
+  move-result-object v1
+  invoke-static {v1}, LMain;->lookForMyRegisters(LMain;)V
+  iget v2, v1, LMain;->field:I
+  return v2
+.end method
diff --git a/test/543-env-long-ref/src/Main.java b/test/543-env-long-ref/src/Main.java
new file mode 100644
index 0000000..e723789
--- /dev/null
+++ b/test/543-env-long-ref/src/Main.java
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+  // Workaround for b/18051191.
+  class InnerClass {}
+
+  public static void main(String[] args) throws Throwable {
+    System.loadLibrary(args[0]);
+    Class<?> c = Class.forName("TestCase");
+    Method m = c.getMethod("testCase");
+    Integer a = (Integer)m.invoke(null, (Object[]) null);
+    System.out.println(a);
+  }
+
+  public static Main $noinline$allocate(long a) {
+    try {
+      return new Main();
+    } catch (Exception e) {
+      throw new Error(e);
+    }
+  }
+
+  public static native void lookForMyRegisters(Main m);
+
+  int field = 42;
+}
diff --git a/test/550-checker-multiply-accumulate/expected.txt b/test/550-checker-multiply-accumulate/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/550-checker-multiply-accumulate/expected.txt
diff --git a/test/550-checker-multiply-accumulate/info.txt b/test/550-checker-multiply-accumulate/info.txt
new file mode 100644
index 0000000..10e998c
--- /dev/null
+++ b/test/550-checker-multiply-accumulate/info.txt
@@ -0,0 +1 @@
+Test the merging of instructions into the shifter operand on arm64.
diff --git a/test/550-checker-multiply-accumulate/src/Main.java b/test/550-checker-multiply-accumulate/src/Main.java
new file mode 100644
index 0000000..2d0688d
--- /dev/null
+++ b/test/550-checker-multiply-accumulate/src/Main.java
@@ -0,0 +1,234 @@
+/*
+* Copyright (C) 2015 The Android Open Source Project
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+*      http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+public class Main {
+
+  // A dummy value to defeat inlining of these routines.
+  static boolean doThrow = false;
+
+  public static void assertIntEquals(int expected, int result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  public static void assertLongEquals(long expected, long result) {
+    if (expected != result) {
+      throw new Error("Expected: " + expected + ", found: " + result);
+    }
+  }
+
+  /**
+   * Test basic merging of `MUL+ADD` into `MULADD`.
+   */
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$mulAdd(int, int, int) instruction_simplifier_arm64 (before)
+  /// CHECK:       <<Acc:i\d+>>         ParameterValue
+  /// CHECK:       <<Left:i\d+>>        ParameterValue
+  /// CHECK:       <<Right:i\d+>>       ParameterValue
+  /// CHECK:       <<Mul:i\d+>>         Mul [<<Left>>,<<Right>>]
+  /// CHECK:       <<Add:i\d+>>         Add [<<Acc>>,<<Mul>>]
+  /// CHECK:                            Return [<<Add>>]
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$mulAdd(int, int, int) instruction_simplifier_arm64 (after)
+  /// CHECK:       <<Acc:i\d+>>         ParameterValue
+  /// CHECK:       <<Left:i\d+>>        ParameterValue
+  /// CHECK:       <<Right:i\d+>>       ParameterValue
+  /// CHECK:       <<MulAdd:i\d+>>      Arm64MultiplyAccumulate [<<Acc>>,<<Left>>,<<Right>>] kind:Add
+  /// CHECK:                            Return [<<MulAdd>>]
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$mulAdd(int, int, int) instruction_simplifier_arm64 (after)
+  /// CHECK-NOT:                        Mul
+  /// CHECK-NOT:                        Add
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$mulAdd(int, int, int) disassembly (after)
+  /// CHECK:                            madd w{{\d+}}, w{{\d+}}, w{{\d+}}, w{{\d+}}
+
+  public static int $opt$noinline$mulAdd(int acc, int left, int right) {
+    if (doThrow) throw new Error();
+    return acc + left * right;
+  }
+
+  /**
+   * Test basic merging of `MUL+SUB` into `MULSUB`.
+   */
+
+  /// CHECK-START-ARM64: long Main.$opt$noinline$mulSub(long, long, long) instruction_simplifier_arm64 (before)
+  /// CHECK:       <<Acc:j\d+>>         ParameterValue
+  /// CHECK:       <<Left:j\d+>>        ParameterValue
+  /// CHECK:       <<Right:j\d+>>       ParameterValue
+  /// CHECK:       <<Mul:j\d+>>         Mul [<<Left>>,<<Right>>]
+  /// CHECK:       <<Sub:j\d+>>         Sub [<<Acc>>,<<Mul>>]
+  /// CHECK:                            Return [<<Sub>>]
+
+  /// CHECK-START-ARM64: long Main.$opt$noinline$mulSub(long, long, long) instruction_simplifier_arm64 (after)
+  /// CHECK:       <<Acc:j\d+>>         ParameterValue
+  /// CHECK:       <<Left:j\d+>>        ParameterValue
+  /// CHECK:       <<Right:j\d+>>       ParameterValue
+  /// CHECK:       <<MulSub:j\d+>>      Arm64MultiplyAccumulate [<<Acc>>,<<Left>>,<<Right>>] kind:Sub
+  /// CHECK:                            Return [<<MulSub>>]
+
+  /// CHECK-START-ARM64: long Main.$opt$noinline$mulSub(long, long, long) instruction_simplifier_arm64 (after)
+  /// CHECK-NOT:                        Mul
+  /// CHECK-NOT:                        Sub
+
+  /// CHECK-START-ARM64: long Main.$opt$noinline$mulSub(long, long, long) disassembly (after)
+  /// CHECK:                            msub x{{\d+}}, x{{\d+}}, x{{\d+}}, x{{\d+}}
+
+  public static long $opt$noinline$mulSub(long acc, long left, long right) {
+    if (doThrow) throw new Error();
+    return acc - left * right;
+  }
+
+  /**
+   * Test that we do not create a multiply-accumulate instruction when there
+   * are other uses of the multiplication that cannot merge it.
+   */
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$multipleUses1(int, int, int) instruction_simplifier_arm64 (before)
+  /// CHECK:       <<Acc:i\d+>>         ParameterValue
+  /// CHECK:       <<Left:i\d+>>        ParameterValue
+  /// CHECK:       <<Right:i\d+>>       ParameterValue
+  /// CHECK:       <<Mul:i\d+>>         Mul [<<Left>>,<<Right>>]
+  /// CHECK:       <<Add:i\d+>>         Add [<<Acc>>,<<Mul>>]
+  /// CHECK:       <<Or:i\d+>>          Or [<<Mul>>,<<Add>>]
+  /// CHECK:                            Return [<<Or>>]
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$multipleUses1(int, int, int) instruction_simplifier_arm64 (after)
+  /// CHECK:       <<Acc:i\d+>>         ParameterValue
+  /// CHECK:       <<Left:i\d+>>        ParameterValue
+  /// CHECK:       <<Right:i\d+>>       ParameterValue
+  /// CHECK:       <<Mul:i\d+>>         Mul [<<Left>>,<<Right>>]
+  /// CHECK:       <<Add:i\d+>>         Add [<<Acc>>,<<Mul>>]
+  /// CHECK:       <<Or:i\d+>>          Or [<<Mul>>,<<Add>>]
+  /// CHECK:                            Return [<<Or>>]
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$multipleUses1(int, int, int) instruction_simplifier_arm64 (after)
+  /// CHECK-NOT:                        Arm64MultiplyAccumulate
+
+  public static int $opt$noinline$multipleUses1(int acc, int left, int right) {
+    if (doThrow) throw new Error();
+    int temp = left * right;
+    return temp | (acc + temp);
+  }
+
+  /**
+   * Test that we do not create a multiply-accumulate instruction even when all
+   * uses of the multiplication can merge it.
+   */
+
+  /// CHECK-START-ARM64: long Main.$opt$noinline$multipleUses2(long, long, long) instruction_simplifier_arm64 (before)
+  /// CHECK:       <<Acc:j\d+>>         ParameterValue
+  /// CHECK:       <<Left:j\d+>>        ParameterValue
+  /// CHECK:       <<Right:j\d+>>       ParameterValue
+  /// CHECK:       <<Mul:j\d+>>         Mul [<<Left>>,<<Right>>]
+  /// CHECK:       <<Add:j\d+>>         Add [<<Acc>>,<<Mul>>]
+  /// CHECK:       <<Sub:j\d+>>         Sub [<<Acc>>,<<Mul>>]
+  /// CHECK:       <<Res:j\d+>>         Add [<<Add>>,<<Sub>>]
+  /// CHECK:                            Return [<<Res>>]
+
+  /// CHECK-START-ARM64: long Main.$opt$noinline$multipleUses2(long, long, long) instruction_simplifier_arm64 (after)
+  /// CHECK:       <<Acc:j\d+>>         ParameterValue
+  /// CHECK:       <<Left:j\d+>>        ParameterValue
+  /// CHECK:       <<Right:j\d+>>       ParameterValue
+  /// CHECK:       <<Mul:j\d+>>         Mul [<<Left>>,<<Right>>]
+  /// CHECK:       <<Add:j\d+>>         Add [<<Acc>>,<<Mul>>]
+  /// CHECK:       <<Sub:j\d+>>         Sub [<<Acc>>,<<Mul>>]
+  /// CHECK:       <<Res:j\d+>>         Add [<<Add>>,<<Sub>>]
+  /// CHECK:                            Return [<<Res>>]
+
+  /// CHECK-START-ARM64: long Main.$opt$noinline$multipleUses2(long, long, long) instruction_simplifier_arm64 (after)
+  /// CHECK-NOT:                        Arm64MultiplyAccumulate
+
+
+  public static long $opt$noinline$multipleUses2(long acc, long left, long right) {
+    if (doThrow) throw new Error();
+    long temp = left * right;
+    return (acc + temp) + (acc - temp);
+  }
+
+
+  /**
+   * Test the interpretation of `a * (b + 1)` as `a + (a * b)`.
+   */
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$mulPlusOne(int, int) instruction_simplifier_arm64 (before)
+  /// CHECK:       <<Acc:i\d+>>         ParameterValue
+  /// CHECK:       <<Var:i\d+>>         ParameterValue
+  /// CHECK:       <<Const1:i\d+>>      IntConstant 1
+  /// CHECK:       <<Add:i\d+>>         Add [<<Var>>,<<Const1>>]
+  /// CHECK:       <<Mul:i\d+>>         Mul [<<Acc>>,<<Add>>]
+  /// CHECK:                            Return [<<Mul>>]
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$mulPlusOne(int, int) instruction_simplifier_arm64 (after)
+  /// CHECK:       <<Acc:i\d+>>         ParameterValue
+  /// CHECK:       <<Var:i\d+>>         ParameterValue
+  /// CHECK:       <<MulAdd:i\d+>>      Arm64MultiplyAccumulate [<<Acc>>,<<Acc>>,<<Var>>] kind:Add
+  /// CHECK:                            Return [<<MulAdd>>]
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$mulPlusOne(int, int) instruction_simplifier_arm64 (after)
+  /// CHECK-NOT:                        Mul
+  /// CHECK-NOT:                        Add
+
+  /// CHECK-START-ARM64: int Main.$opt$noinline$mulPlusOne(int, int) disassembly (after)
+  /// CHECK:                            madd w{{\d+}}, w{{\d+}}, w{{\d+}}, w{{\d+}}
+
+  public static int $opt$noinline$mulPlusOne(int acc, int var) {
+    if (doThrow) throw new Error();
+    return acc * (var + 1);
+  }
+
+
+  /**
+   * Test the interpretation of `a * (1 - b)` as `a - (a * b)`.
+   */
+
+  /// CHECK-START-ARM64: long Main.$opt$noinline$mulMinusOne(long, long) instruction_simplifier_arm64 (before)
+  /// CHECK:       <<Acc:j\d+>>         ParameterValue
+  /// CHECK:       <<Var:j\d+>>         ParameterValue
+  /// CHECK:       <<Const1:j\d+>>      LongConstant 1
+  /// CHECK:       <<Sub:j\d+>>         Sub [<<Const1>>,<<Var>>]
+  /// CHECK:       <<Mul:j\d+>>         Mul [<<Acc>>,<<Sub>>]
+  /// CHECK:                            Return [<<Mul>>]
+
+  /// CHECK-START-ARM64: long Main.$opt$noinline$mulMinusOne(long, long) instruction_simplifier_arm64 (after)
+  /// CHECK:       <<Acc:j\d+>>         ParameterValue
+  /// CHECK:       <<Var:j\d+>>         ParameterValue
+  /// CHECK:       <<MulSub:j\d+>>      Arm64MultiplyAccumulate [<<Acc>>,<<Acc>>,<<Var>>] kind:Sub
+  /// CHECK:                            Return [<<MulSub>>]
+
+  /// CHECK-START-ARM64: long Main.$opt$noinline$mulMinusOne(long, long) instruction_simplifier_arm64 (after)
+  /// CHECK-NOT:                        Mul
+  /// CHECK-NOT:                        Sub
+
+  /// CHECK-START-ARM64: long Main.$opt$noinline$mulMinusOne(long, long) disassembly (after)
+  /// CHECK:                            msub x{{\d+}}, x{{\d+}}, x{{\d+}}, x{{\d+}}
+
+  public static long $opt$noinline$mulMinusOne(long acc, long var) {
+    if (doThrow) throw new Error();
+    return acc * (1 - var);
+  }
+
+
+  public static void main(String[] args) {
+    assertIntEquals(7, $opt$noinline$mulAdd(1, 2, 3));
+    assertLongEquals(-26, $opt$noinline$mulSub(4, 5, 6));
+    assertIntEquals(79, $opt$noinline$multipleUses1(7, 8, 9));
+    assertLongEquals(20, $opt$noinline$multipleUses2(10, 11, 12));
+    assertIntEquals(195, $opt$noinline$mulPlusOne(13, 14));
+    assertLongEquals(-225, $opt$noinline$mulMinusOne(15, 16));
+  }
+}
diff --git a/test/550-checker-regression-wide-store/expected.txt b/test/550-checker-regression-wide-store/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/550-checker-regression-wide-store/expected.txt
diff --git a/test/550-checker-regression-wide-store/info.txt b/test/550-checker-regression-wide-store/info.txt
new file mode 100644
index 0000000..6cf04bc
--- /dev/null
+++ b/test/550-checker-regression-wide-store/info.txt
@@ -0,0 +1,3 @@
+Test an SsaBuilder regression where storing into the high vreg of a pair
+would not invalidate the low vreg. The resulting environment would generate
+an incorrect stack map, causing deopt and try/catch to use a wrong location.
\ No newline at end of file
diff --git a/test/550-checker-regression-wide-store/smali/TestCase.smali b/test/550-checker-regression-wide-store/smali/TestCase.smali
new file mode 100644
index 0000000..7974d56
--- /dev/null
+++ b/test/550-checker-regression-wide-store/smali/TestCase.smali
@@ -0,0 +1,82 @@
+# Copyright (C) 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+.class public LTestCase;
+.super Ljava/lang/Object;
+
+.method public static $noinline$throw()V
+  .registers 1
+  new-instance v0, Ljava/lang/Exception;
+  invoke-direct {v0}, Ljava/lang/Exception;-><init>()V
+  throw v0
+.end method
+
+# Test storing into the high vreg of a wide pair. This scenario has runtime
+# behaviour implications so we run it from Main.main.
+
+## CHECK-START: int TestCase.invalidateLow(long) ssa_builder (after)
+## CHECK-DAG: <<Cst0:i\d+>> IntConstant 0
+## CHECK-DAG: <<Arg:j\d+>>  ParameterValue
+## CHECK-DAG: <<Cast:i\d+>> TypeConversion [<<Arg>>]
+## CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.System.nanoTime env:[[_,<<Cst0>>,<<Arg>>,_]]
+## CHECK-DAG: InvokeStaticOrDirect method_name:TestCase.$noinline$throw  env:[[_,<<Cast>>,<<Arg>>,_]]
+
+.method public static invalidateLow(J)I
+  .registers 4
+
+  const/4 v1, 0x0
+
+  :try_start
+  invoke-static {}, Ljava/lang/System;->nanoTime()J
+  move-wide v0, p0
+  long-to-int v1, v0
+  invoke-static {}, LTestCase;->$noinline$throw()V
+  :try_end
+  .catchall {:try_start .. :try_end} :catchall
+
+  :catchall
+  return v1
+
+.end method
+
+# Test that storing a wide invalidates the value in the high vreg. This
+# cannot be detected from runtime so we only test the environment with Checker.
+
+## CHECK-START: void TestCase.invalidateHigh1(long) ssa_builder (after)
+## CHECK-DAG: <<Arg:j\d+>>  ParameterValue
+## CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.System.nanoTime env:[[<<Arg>>,_,<<Arg>>,_]]
+
+.method public static invalidateHigh1(J)V
+  .registers 4
+
+  const/4 v1, 0x0
+  move-wide v0, p0
+  invoke-static {}, Ljava/lang/System;->nanoTime()J
+  return-void
+
+.end method
+
+## CHECK-START: void TestCase.invalidateHigh2(long) ssa_builder (after)
+## CHECK-DAG: <<Arg:j\d+>>  ParameterValue
+## CHECK-DAG: InvokeStaticOrDirect method_name:java.lang.System.nanoTime env:[[<<Arg>>,_,_,<<Arg>>,_]]
+
+.method public static invalidateHigh2(J)V
+  .registers 5
+
+  move-wide v1, p0
+  move-wide v0, p0
+  invoke-static {}, Ljava/lang/System;->nanoTime()J
+  return-void
+
+.end method
diff --git a/test/550-checker-regression-wide-store/src/Main.java b/test/550-checker-regression-wide-store/src/Main.java
new file mode 100644
index 0000000..9b502df
--- /dev/null
+++ b/test/550-checker-regression-wide-store/src/Main.java
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.lang.reflect.Method;
+
+public class Main {
+
+  // Workaround for b/18051191.
+  class InnerClass {}
+
+  private static int runTestCase(String name, long arg) throws Exception {
+    Class<?> c = Class.forName("TestCase");
+    Method m = c.getMethod(name, long.class);
+    int result = (Integer) m.invoke(null, arg);
+    return result;
+  }
+
+  private static void assertEquals(int expected, int actual) {
+    if (expected != actual) {
+      throw new Error("Wrong result: " + expected + " != " + actual);
+    }
+  }
+
+  public static void main(String[] args) throws Exception {
+    assertEquals(42, runTestCase("invalidateLow", 42L));
+  }
+}
diff --git a/test/550-new-instance-clinit/expected.txt b/test/550-new-instance-clinit/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/550-new-instance-clinit/expected.txt
diff --git a/test/550-new-instance-clinit/info.txt b/test/550-new-instance-clinit/info.txt
new file mode 100644
index 0000000..c5fa3c7
--- /dev/null
+++ b/test/550-new-instance-clinit/info.txt
@@ -0,0 +1,3 @@
+Regression test for optimizing which used to treat
+HNewInstance as not having side effects even though it
+could invoke a clinit method.
diff --git a/test/LambdaInterfaces/LambdaInterfaces.java b/test/550-new-instance-clinit/src/Main.java
similarity index 60%
rename from test/LambdaInterfaces/LambdaInterfaces.java
rename to test/550-new-instance-clinit/src/Main.java
index 261163d..45e259e 100644
--- a/test/LambdaInterfaces/LambdaInterfaces.java
+++ b/test/550-new-instance-clinit/src/Main.java
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2015 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,20 +14,20 @@
  * limitations under the License.
  */
 
-class LambdaInterfaces {
-    interface I {
-        public int i();
+public class Main {
+  public static void main(String[] args) {
+    int foo = Main.a;
+    new Bar();
+    foo = Main.a;
+    if (foo != 43) {
+      throw new Error("Expected 43, got " + foo);
     }
-    interface J {
-        public String foo = "foo";
-        public void j1();
-    }
-    interface K extends J {
-    }
-    interface L {
-      public int sum(int a, int b);
-    }
-    interface C {
-      public String concat(String a, String b);
-    }
+  }
+  static int a = 42;
+}
+
+class Bar {
+  static {
+    Main.a++;
+  }
 }
diff --git a/test/955-lambda-smali/expected.txt b/test/955-lambda-smali/expected.txt
index 8afe4bc..16381e4 100644
--- a/test/955-lambda-smali/expected.txt
+++ b/test/955-lambda-smali/expected.txt
@@ -26,5 +26,3 @@
 (CaptureVariables) (0-args, 1 captured variable 'D'): value is -Infinity
 (CaptureVariables) (0-args, 8 captured variable 'ZBCSIJFD'): value is true,R,∂,1000,12345678,3287471278325742,Infinity,-Infinity
 (CaptureVariables) Caught NPE
-(BoxInvoke) Hello boxing world! (0-args, no closure) void
-(BoxInvoke) Hello boxing world!(1-args, no closure) returned: 12345678
diff --git a/test/955-lambda-smali/smali/BoxInvoke.smali b/test/955-lambda-smali/smali/BoxInvoke.smali
deleted file mode 100644
index 8b53333..0000000
--- a/test/955-lambda-smali/smali/BoxInvoke.smali
+++ /dev/null
@@ -1,103 +0,0 @@
-#  Copyright (C) 2015 The Android Open Source Project
-#
-#  Licensed under the Apache License, Version 2.0 (the "License");
-#  you may not use this file except in compliance with the License.
-#  You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#  Unless required by applicable law or agreed to in writing, software
-#  distributed under the License is distributed on an "AS IS" BASIS,
-#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#  See the License for the specific language governing permissions and
-#  limitations under the License.
-#
-.class public LBoxInvoke;
-.super Ljava/lang/Object;
-
-.method public constructor <init>()V
-.registers 1
-    invoke-direct {p0}, Ljava/lang/Object;-><init>()V
-    return-void
-.end method
-
-.method public static run()V
-    .registers 0
-
-    invoke-static {}, LBoxInvoke;->testBoxInvoke()V
-    invoke-static {}, LBoxInvoke;->forceGC()V
-
-    return-void
-.end method
-
-# Test that invoke-virtual works on boxed innate lambdas.
-.method public static testBoxInvoke()V
-    .registers 100
-
-    # Try invoking 0-arg void return lambda
-    create-lambda v0, LBoxInvoke;->doHelloWorld0(J)V
-    const-string v2, "Ljava/lang/Runnable;"
-    box-lambda v2, v0 # Ljava/lang/Runnable;
-    invoke-interface {v2}, Ljava/lang/Runnable;->run()V
-
-    # Try invoking 1-arg int return lambda
-    create-lambda v3, LBoxInvoke;->doHelloWorld1(JLjava/lang/Object;)I
-    const-string v5, "Ljava/lang/Comparable;"
-    box-lambda v5, v3 # Ljava/lang/Comparable;
-    const-string v6, "Hello boxing world!"
-    invoke-interface {v5, v6}, Ljava/lang/Comparable;->compareTo(Ljava/lang/Object;)I
-    move-result v7
-    sget-object v8, Ljava/lang/System;->out:Ljava/io/PrintStream;
-    invoke-virtual {v8, v7}, Ljava/io/PrintStream;->println(I)V
-
-    return-void
-
-    # TODO: more tests once box-lambda can take a type descriptor.
-
-.end method
-
-#TODO: should use a closure type instead of a long.
-.method public static doHelloWorld0(J)V
-    .registers 4 # 1 wide parameters, 2 locals
-
-    const-string v0, "(BoxInvoke) Hello boxing world! (0-args, no closure) void"
-
-    sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
-    invoke-virtual {v1, v0}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V
-
-    return-void
-.end method
-
-#TODO: should use a closure type instead of a long.
-.method public static doHelloWorld1(JLjava/lang/Object;)I
-    # J = closure, L = obj, I = return type
-    .registers 6 # 1 wide parameters, 1 narrow parameter, 3 locals
-
-    # Prints "<before> $parameter1(Object) <after>:" without the line terminator. 
-
-    const-string v0, "(BoxInvoke) "
-
-    sget-object v1, Ljava/lang/System;->out:Ljava/io/PrintStream;
-    # System.out.print("<before>");
-    invoke-virtual {v1, v0}, Ljava/io/PrintStream;->print(Ljava/lang/String;)V
-
-    # System.out.print(obj);
-    invoke-virtual {v1, p2}, Ljava/io/PrintStream;->print(Ljava/lang/Object;)V
-
-    # System.out.print("<after>: ");
-    const-string v0, "(1-args, no closure) returned: "
-    invoke-virtual {v1, v0}, Ljava/io/PrintStream;->print(Ljava/lang/String;)V
-
-    const v2, 12345678
-    return v2
-.end method
-
-# Force a GC. Used to ensure our weak reference table of boxed lambdas is getting swept.
-.method private static forceGC()V
-    .registers 1
-    invoke-static {}, Ljava/lang/Runtime;->getRuntime()Ljava/lang/Runtime;
-    move-result-object v0
-    invoke-virtual {v0}, Ljava/lang/Runtime;->gc()V
-
-    return-void
-.end method
diff --git a/test/955-lambda-smali/smali/BoxUnbox.smali b/test/955-lambda-smali/smali/BoxUnbox.smali
index 157adb3..915de2d 100644
--- a/test/955-lambda-smali/smali/BoxUnbox.smali
+++ b/test/955-lambda-smali/smali/BoxUnbox.smali
@@ -51,7 +51,6 @@
     .registers 3
 
     create-lambda v0, LBoxUnbox;->doHelloWorld(J)V
-    const-string v2, "Ljava/lang/Runnable;"
     box-lambda v2, v0 # v2 = box(v0)
     unbox-lambda v0, v2, J # v0 = unbox(v2)
     invoke-lambda v0, {}
@@ -64,9 +63,7 @@
    .registers 6 # 0 parameters, 6 locals
 
     create-lambda v0, LBoxUnbox;->doHelloWorld(J)V
-    const-string v2, "Ljava/lang/Runnable;"
     box-lambda v2, v0 # v2 = box(v0)
-    const-string v3, "Ljava/lang/Runnable;"
     box-lambda v3, v0 # v3 = box(v0)
 
     # The objects should be not-null, and they should have the same reference
@@ -119,7 +116,6 @@
     const v0, 0  # v0 = null
     const v1, 0  # v1 = null
 :start
-    const-string v2, "Ljava/lang/Runnable;"
     box-lambda v2, v0  # attempting to box a null lambda will throw NPE
 :end
     return-void
diff --git a/test/955-lambda-smali/smali/CaptureVariables.smali b/test/955-lambda-smali/smali/CaptureVariables.smali
index 531c259..f18b7ff 100644
--- a/test/955-lambda-smali/smali/CaptureVariables.smali
+++ b/test/955-lambda-smali/smali/CaptureVariables.smali
@@ -243,8 +243,6 @@
     # TODO: create-lambda should not write to both v0 and v1
     invoke-lambda v0, {}
 
-    return-void
-
 .end method
 
 #TODO: should use a closure type instead of a long
diff --git a/test/955-lambda-smali/smali/Main.smali b/test/955-lambda-smali/smali/Main.smali
index e8ab84c..9892d61 100644
--- a/test/955-lambda-smali/smali/Main.smali
+++ b/test/955-lambda-smali/smali/Main.smali
@@ -25,7 +25,6 @@
     invoke-static {}, LBoxUnbox;->run()V
     invoke-static {}, LMoveResult;->run()V
     invoke-static {}, LCaptureVariables;->run()V
-    invoke-static {}, LBoxInvoke;->run()V
 
 # TODO: add tests when verification fails
 
diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk
index 7a22e1b..f74a516 100644
--- a/test/Android.libarttest.mk
+++ b/test/Android.libarttest.mk
@@ -37,7 +37,8 @@
   457-regs/regs_jni.cc \
   461-get-reference-vreg/get_reference_vreg_jni.cc \
   466-get-live-vreg/get_live_vreg_jni.cc \
-  497-inlining-and-class-loader/clear_dex_cache.cc
+  497-inlining-and-class-loader/clear_dex_cache.cc \
+  543-env-long-ref/env_long_ref.cc
 
 ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttest.so
 ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttestd.so
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index 0747712..47fc50f 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -28,6 +28,18 @@
   exit 1
 fi
 
+if [ "x$ART_USE_READ_BARRIER" = xtrue ]; then
+  # For the moment, skip JDWP tests when read barriers are enabled, as
+  # they sometimes exhibit a deadlock issue with the concurrent
+  # copying collector in the read barrier configuration, between the
+  # HeapTaskDeamon and the JDWP thread (b/25800335).
+  #
+  # TODO: Re-enable the JDWP tests when this deadlock issue is fixed.
+  echo "JDWP tests are temporarily disabled in the read barrier configuration because of"
+  echo "a deadlock issue (b/25800335)."
+  exit 0
+fi
+
 art="/data/local/tmp/system/bin/art"
 art_debugee="sh /data/local/tmp/system/bin/art"
 args=$@