am a024a068: Compilation filter

* commit 'a024a0686c3b0fea13f362bff70d65981e5febc5':
  Compilation filter
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index e069d88..ac3b821 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -60,7 +60,10 @@
 
 ifeq ($(ART_SEA_IR_MODE),true)
 TEST_COMMON_SRC_FILES += \
-	compiler/utils/scoped_hashtable_test.cc
+	compiler/utils/scoped_hashtable_test.cc \
+	compiler/sea_ir/types/type_data_test.cc \
+	compiler/sea_ir/types/type_inference_visitor_test.cc \
+	compiler/sea_ir/ir/regions_test.cc
 endif
 
 TEST_TARGET_SRC_FILES := \
diff --git a/compiler/Android.mk b/compiler/Android.mk
index fec1e11..5caf688 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -75,8 +75,7 @@
 	llvm/runtime_support_builder_arm.cc \
 	llvm/runtime_support_builder_thumb2.cc \
 	llvm/runtime_support_builder_x86.cc \
-	stubs/portable/stubs.cc \
-	stubs/quick/stubs.cc \
+	trampolines/trampoline_compiler.cc \
 	utils/arm/assembler_arm.cc \
 	utils/arm/managed_register_arm.cc \
 	utils/assembler.cc \
@@ -95,9 +94,12 @@
 ifeq ($(ART_SEA_IR_MODE),true)
 LIBART_COMPILER_SRC_FILES += \
 	sea_ir/frontend.cc \
-	sea_ir/instruction_tools.cc \
-	sea_ir/sea.cc \
-	sea_ir/code_gen.cc
+	sea_ir/ir/instruction_tools.cc \
+	sea_ir/ir/sea.cc \
+	sea_ir/code_gen/code_gen.cc \
+	sea_ir/types/type_inference.cc \
+	sea_ir/types/type_inference_visitor.cc \
+	sea_ir/debug/dot_gen.cc
 endif
 
 LIBART_COMPILER_CFLAGS :=
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index 1ee29cb..60e638c 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -209,8 +209,8 @@
 
 void DexCompiler::CompileReturnVoid(Instruction* inst, uint32_t dex_pc) {
   DCHECK(inst->Opcode() == Instruction::RETURN_VOID);
-  // Are we compiling a constructor ?
-  if ((unit_.GetAccessFlags() & kAccConstructor) == 0) {
+  // Are we compiling a non-clinit constructor?
+  if (!unit_.IsConstructor() || unit_.IsStatic()) {
     return;
   }
   // Do we need a constructor barrier ?
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 745e43d..2d8e24f 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -432,7 +432,7 @@
   // Making a call - use explicit registers
   FlushAllRegs();   /* Everything to home location */
   LoadValueDirectFixed(rl_src, r0);
-  LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode),
+  LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData).Int32Value(),
                rARM_LR);
   // Materialize a pointer to the fill data image
   NewLIR3(kThumb2Adr, r1, 0, reinterpret_cast<uintptr_t>(tab_rec));
@@ -488,7 +488,7 @@
   OpRegImm(kOpCmp, r1, 0);
   OpIT(kCondNe, "T");
   // Go expensive route - artLockObjectFromCode(self, obj);
-  LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pLockObjectFromCode), rARM_LR);
+  LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pLockObject).Int32Value(), rARM_LR);
   ClobberCalleeSave();
   LIR* call_inst = OpReg(kOpBlx, rARM_LR);
   MarkSafepointPC(call_inst);
@@ -519,7 +519,7 @@
   OpIT(kCondEq, "EE");
   StoreWordDisp(r0, mirror::Object::MonitorOffset().Int32Value(), r3);
   // Go expensive route - UnlockObjectFromCode(obj);
-  LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARM_LR);
+  LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pUnlockObject).Int32Value(), rARM_LR);
   ClobberCalleeSave();
   LIR* call_inst = OpReg(kOpBlx, rARM_LR);
   MarkSafepointPC(call_inst);
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 1599941..f1ccfa0 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -28,7 +28,7 @@
     // Required for target - codegen helpers.
     bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src,
                                     RegLocation rl_dest, int lit);
-    int LoadHelper(int offset);
+    int LoadHelper(ThreadOffset offset);
     LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
     LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
                           int s_reg);
@@ -153,12 +153,12 @@
     LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value);
     LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2);
     LIR* OpTestSuspend(LIR* target);
-    LIR* OpThreadMem(OpKind op, int thread_offset);
+    LIR* OpThreadMem(OpKind op, ThreadOffset thread_offset);
     LIR* OpVldm(int rBase, int count);
     LIR* OpVstm(int rBase, int count);
     void OpLea(int rBase, int reg1, int reg2, int scale, int offset);
     void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi);
-    void OpTlsCmp(int offset, int val);
+    void OpTlsCmp(ThreadOffset offset, int val);
 
     RegLocation ArgLoc(RegLocation loc);
     LIR* LoadBaseDispBody(int rBase, int displacement, int r_dest, int r_dest_hi, OpSize size,
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 9db1016..c258019 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -498,7 +498,7 @@
   LOG(FATAL) << "Unexpected use of OpLea for Arm";
 }
 
-void ArmMir2Lir::OpTlsCmp(int offset, int val) {
+void ArmMir2Lir::OpTlsCmp(ThreadOffset offset, int val) {
   LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm";
 }
 
@@ -665,7 +665,7 @@
      */
     RegLocation rl_result;
     if (BadOverlap(rl_src1, rl_dest) || (BadOverlap(rl_src2, rl_dest))) {
-      int func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul);
+      ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul);
       FlushAllRegs();
       CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false);
       rl_result = GetReturnWide(false);
@@ -956,7 +956,7 @@
 
   // Get the array's class.
   LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
-  CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+  CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElement), r_value,
                           r_array_class, true);
   // Redo LoadValues in case they didn't survive the call.
   LoadValueDirectFixed(rl_array, r_array);  // Reload array
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 6f37798..47d3d97 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -714,8 +714,8 @@
   FreeTemp(r3);
 }
 
-int ArmMir2Lir::LoadHelper(int offset) {
-  LoadWordDisp(rARM_SELF, offset, rARM_LR);
+int ArmMir2Lir::LoadHelper(ThreadOffset offset) {
+  LoadWordDisp(rARM_SELF, offset.Int32Value(), rARM_LR);
   return rARM_LR;
 }
 
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index afc8a66..c63de69 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -1029,7 +1029,7 @@
   return res;
 }
 
-LIR* ArmMir2Lir::OpThreadMem(OpKind op, int thread_offset) {
+LIR* ArmMir2Lir::OpThreadMem(OpKind op, ThreadOffset thread_offset) {
   LOG(FATAL) << "Unexpected use of OpThreadMem for Arm";
   return NULL;
 }
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index ebe10bb..298d389 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -208,12 +208,12 @@
 void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest,
                           RegLocation rl_src) {
   FlushAllRegs();  /* Everything to home location */
-  int func_offset;
+  ThreadOffset func_offset(-1);
   if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
                                                        type_idx)) {
-    func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocArrayFromCode);
+    func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocArray);
   } else {
-    func_offset= QUICK_ENTRYPOINT_OFFSET(pAllocArrayFromCodeWithAccessCheck);
+    func_offset= QUICK_ENTRYPOINT_OFFSET(pAllocArrayWithAccessCheck);
   }
   CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true);
   RegLocation rl_result = GetReturn(false);
@@ -230,12 +230,12 @@
   int elems = info->num_arg_words;
   int type_idx = info->index;
   FlushAllRegs();  /* Everything to home location */
-  int func_offset;
+  ThreadOffset func_offset(-1);
   if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
                                                        type_idx)) {
-    func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCode);
+    func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArray);
   } else {
-    func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCodeWithAccessCheck);
+    func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArrayWithAccessCheck);
   }
   CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true);
   FreeTemp(TargetReg(kArg2));
@@ -408,9 +408,10 @@
     FreeTemp(rBase);
   } else {
     FlushAllRegs();  // Everything to home locations
-    int setter_offset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Static) :
-        (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjStatic)
-        : QUICK_ENTRYPOINT_OFFSET(pSet32Static));
+    ThreadOffset setter_offset =
+        is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Static)
+                          : (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjStatic)
+                                       : QUICK_ENTRYPOINT_OFFSET(pSet32Static));
     CallRuntimeHelperImmRegLocation(setter_offset, field_idx, rl_src, true);
   }
 }
@@ -483,9 +484,10 @@
     }
   } else {
     FlushAllRegs();  // Everything to home locations
-    int getterOffset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Static) :
-        (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjStatic)
-        : QUICK_ENTRYPOINT_OFFSET(pGet32Static));
+    ThreadOffset getterOffset =
+        is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Static)
+                          :(is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjStatic)
+                                      : QUICK_ENTRYPOINT_OFFSET(pGet32Static));
     CallRuntimeHelperImm(getterOffset, field_idx, true);
     if (is_long_or_double) {
       RegLocation rl_result = GetReturnWide(rl_dest.fp);
@@ -499,7 +501,7 @@
 
 void Mir2Lir::HandleSuspendLaunchPads() {
   int num_elems = suspend_launchpads_.Size();
-  int helper_offset = QUICK_ENTRYPOINT_OFFSET(pTestSuspendFromCode);
+  ThreadOffset helper_offset = QUICK_ENTRYPOINT_OFFSET(pTestSuspend);
   for (int i = 0; i < num_elems; i++) {
     ResetRegPool();
     ResetDefTracking();
@@ -539,13 +541,13 @@
     LIR* lab = throw_launchpads_.Get(i);
     current_dalvik_offset_ = lab->operands[1];
     AppendLIR(lab);
-    int func_offset = 0;
+    ThreadOffset func_offset(-1);
     int v1 = lab->operands[2];
     int v2 = lab->operands[3];
     bool target_x86 = (cu_->instruction_set == kX86);
     switch (lab->operands[0]) {
       case kThrowNullPointer:
-        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowNullPointerFromCode);
+        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowNullPointer);
         break;
       case kThrowConstantArrayBounds:  // v1 is length reg (for Arm/Mips), v2 constant index
         // v1 holds the constant array index.  Mips/Arm uses v2 for length, x86 reloads.
@@ -557,7 +559,7 @@
         // Make sure the following LoadConstant doesn't mess with kArg1.
         LockTemp(TargetReg(kArg1));
         LoadConstant(TargetReg(kArg0), v2);
-        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
+        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBounds);
         break;
       case kThrowArrayBounds:
         // Move v1 (array index) to kArg0 and v2 (array length) to kArg1
@@ -590,18 +592,18 @@
             OpRegCopy(TargetReg(kArg0), v1);
           }
         }
-        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
+        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBounds);
         break;
       case kThrowDivZero:
-        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowDivZeroFromCode);
+        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowDivZero);
         break;
       case kThrowNoSuchMethod:
         OpRegCopy(TargetReg(kArg0), v1);
         func_offset =
-          QUICK_ENTRYPOINT_OFFSET(pThrowNoSuchMethodFromCode);
+          QUICK_ENTRYPOINT_OFFSET(pThrowNoSuchMethod);
         break;
       case kThrowStackOverflow:
-        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowStackOverflowFromCode);
+        func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowStackOverflow);
         // Restore stack alignment
         if (target_x86) {
           OpRegImm(kOpAdd, TargetReg(kSp), frame_size_);
@@ -664,9 +666,10 @@
       StoreValue(rl_dest, rl_result);
     }
   } else {
-    int getterOffset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Instance) :
-        (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjInstance)
-        : QUICK_ENTRYPOINT_OFFSET(pGet32Instance));
+    ThreadOffset getterOffset =
+        is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Instance)
+                          : (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjInstance)
+                                       : QUICK_ENTRYPOINT_OFFSET(pGet32Instance));
     CallRuntimeHelperImmRegLocation(getterOffset, field_idx, rl_obj, true);
     if (is_long_or_double) {
       RegLocation rl_result = GetReturnWide(rl_dest.fp);
@@ -719,9 +722,10 @@
       }
     }
   } else {
-    int setter_offset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Instance) :
-        (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjInstance)
-        : QUICK_ENTRYPOINT_OFFSET(pSet32Instance));
+    ThreadOffset setter_offset =
+        is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Instance)
+                          : (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjInstance)
+                                       : QUICK_ENTRYPOINT_OFFSET(pSet32Instance));
     CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_idx, rl_obj, rl_src, true);
   }
 }
@@ -735,7 +739,7 @@
                                                    type_idx)) {
     // Call out to helper which resolves type and verifies access.
     // Resolved type returned in kRet0.
-    CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+    CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess),
                             type_idx, rl_method.low_reg, true);
     RegLocation rl_result = GetReturn(false);
     StoreValue(rl_dest, rl_result);
@@ -764,7 +768,7 @@
       // TUNING: move slow path to end & remove unconditional branch
       LIR* target1 = NewLIR0(kPseudoTargetLabel);
       // Call out to helper, which will return resolved type in kArg0
-      CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx,
+      CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx,
                               rl_method.low_reg, true);
       RegLocation rl_result = GetReturn(false);
       StoreValue(rl_dest, rl_result);
@@ -797,7 +801,7 @@
     LoadWordDisp(TargetReg(kArg2),
                  mirror::AbstractMethod::DexCacheStringsOffset().Int32Value(), TargetReg(kArg0));
     // Might call out to helper, which will return resolved string in kRet0
-    int r_tgt = CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(pResolveStringFromCode));
+    int r_tgt = CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(pResolveString));
     LoadWordDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0));
     LoadConstant(TargetReg(kArg1), string_idx);
     if (cu_->instruction_set == kThumb2) {
@@ -821,7 +825,7 @@
       branch->target = target;
     } else {
       DCHECK_EQ(cu_->instruction_set, kX86);
-      CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pResolveStringFromCode), TargetReg(kArg2),
+      CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pResolveString), TargetReg(kArg2),
                               TargetReg(kArg1), true);
     }
     GenBarrier();
@@ -845,12 +849,12 @@
   FlushAllRegs();  /* Everything to home location */
   // alloc will always check for resolution, do we also need to verify
   // access because the verifier was unable to?
-  int func_offset;
+  ThreadOffset func_offset(-1);
   if (cu_->compiler_driver->CanAccessInstantiableTypeWithoutChecks(
       cu_->method_idx, *cu_->dex_file, type_idx)) {
-    func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectFromCode);
+    func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObject);
   } else {
-    func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectFromCodeWithAccessCheck);
+    func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectWithAccessCheck);
   }
   CallRuntimeHelperImmMethod(func_offset, type_idx, true);
   RegLocation rl_result = GetReturn(false);
@@ -929,7 +933,7 @@
   if (needs_access_check) {
     // Check we have access to type_idx and if not throw IllegalAccessError,
     // returns Class* in kArg0
-    CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+    CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess),
                          type_idx, true);
     OpRegCopy(class_reg, TargetReg(kRet0));  // Align usage with fast path
     LoadValueDirectFixed(rl_src, TargetReg(kArg0));  // kArg0 <= ref
@@ -951,7 +955,7 @@
       LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
       // Not resolved
       // Call out to helper, which will return resolved type in kRet0
-      CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, true);
+      CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx, true);
       OpRegCopy(TargetReg(kArg2), TargetReg(kRet0));  // Align usage with fast path
       LoadValueDirectFixed(rl_src, TargetReg(kArg0));  /* reload Ref */
       // Rejoin code paths
@@ -986,7 +990,7 @@
     }
   } else {
     if (cu_->instruction_set == kThumb2) {
-      int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+      int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
       if (!type_known_abstract) {
       /* Uses conditional nullification */
         OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2));  // Same?
@@ -1003,13 +1007,13 @@
         branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
       }
       if (cu_->instruction_set != kX86) {
-        int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+        int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
         OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));    // .ne case - arg0 <= class
         OpReg(kOpBlx, r_tgt);    // .ne case: helper(class, ref->class)
         FreeTemp(r_tgt);
       } else {
         OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
-        OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+        OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivial));
       }
     }
   }
@@ -1069,7 +1073,7 @@
     // Check we have access to type_idx and if not throw IllegalAccessError,
     // returns Class* in kRet0
     // InitializeTypeAndVerifyAccess(idx, method)
-    CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+    CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccess),
                             type_idx, TargetReg(kArg1), true);
     OpRegCopy(class_reg, TargetReg(kRet0));  // Align usage with fast path
   } else if (use_declaring_class) {
@@ -1089,7 +1093,7 @@
       // Not resolved
       // Call out to helper, which will return resolved type in kArg0
       // InitializeTypeFromCode(idx, method)
-      CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx,
+      CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeType), type_idx,
                               TargetReg(kArg1), true);
       OpRegCopy(class_reg, TargetReg(kRet0));  // Align usage with fast path
       // Rejoin code paths
@@ -1109,7 +1113,7 @@
   if (!type_known_abstract) {
     branch2 = OpCmpBranch(kCondEq, TargetReg(kArg1), class_reg, NULL);
   }
-  CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCheckCastFromCode), TargetReg(kArg1),
+  CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCheckCast), TargetReg(kArg1),
                           TargetReg(kArg2), true);
   /* branch target here */
   LIR* target = NewLIR0(kPseudoTargetLabel);
@@ -1168,7 +1172,7 @@
 
 void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
                              RegLocation rl_src1, RegLocation rl_shift) {
-  int func_offset = -1;  // Make gcc happy
+  ThreadOffset func_offset(-1);
 
   switch (opcode) {
     case Instruction::SHL_LONG:
@@ -1303,7 +1307,7 @@
       }
       rl_result = GenDivRem(rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv);
     } else {
-      int func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod);
+      ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod);
       FlushAllRegs();   /* Send everything to home location */
       LoadValueDirectFixed(rl_src2, TargetReg(kArg1));
       int r_tgt = CallHelperSetup(func_offset);
@@ -1558,7 +1562,7 @@
         FlushAllRegs();   /* Everything to home location */
         LoadValueDirectFixed(rl_src, TargetReg(kArg0));
         Clobber(TargetReg(kArg0));
-        int func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod);
+        ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod);
         CallRuntimeHelperRegImm(func_offset, TargetReg(kArg0), lit, false);
         if (is_div)
           rl_result = GetReturn(false);
@@ -1589,7 +1593,7 @@
   OpKind second_op = kOpBkpt;
   bool call_out = false;
   bool check_zero = false;
-  int func_offset;
+  ThreadOffset func_offset(-1);
   int ret_reg = TargetReg(kRet0);
 
   switch (opcode) {
@@ -1709,7 +1713,7 @@
   }
 }
 
-void Mir2Lir::GenConversionCall(int func_offset,
+void Mir2Lir::GenConversionCall(ThreadOffset func_offset,
                                 RegLocation rl_dest, RegLocation rl_src) {
   /*
    * Don't optimize the register usage since it calls out to support
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 1b34e99..20d683a 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -37,12 +37,12 @@
  * has a memory call operation, part 1 is a NOP for x86.  For other targets,
  * load arguments between the two parts.
  */
-int Mir2Lir::CallHelperSetup(int helper_offset) {
+int Mir2Lir::CallHelperSetup(ThreadOffset helper_offset) {
   return (cu_->instruction_set == kX86) ? 0 : LoadHelper(helper_offset);
 }
 
 /* NOTE: if r_tgt is a temp, it will be freed following use */
-LIR* Mir2Lir::CallHelper(int r_tgt, int helper_offset, bool safepoint_pc) {
+LIR* Mir2Lir::CallHelper(int r_tgt, ThreadOffset helper_offset, bool safepoint_pc) {
   LIR* call_inst;
   if (cu_->instruction_set == kX86) {
     call_inst = OpThreadMem(kOpBlx, helper_offset);
@@ -56,21 +56,22 @@
   return call_inst;
 }
 
-void Mir2Lir::CallRuntimeHelperImm(int helper_offset, int arg0, bool safepoint_pc) {
+void Mir2Lir::CallRuntimeHelperImm(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   LoadConstant(TargetReg(kArg0), arg0);
   ClobberCalleeSave();
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Mir2Lir::CallRuntimeHelperReg(int helper_offset, int arg0, bool safepoint_pc) {
+void Mir2Lir::CallRuntimeHelperReg(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   OpRegCopy(TargetReg(kArg0), arg0);
   ClobberCalleeSave();
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Mir2Lir::CallRuntimeHelperRegLocation(int helper_offset, RegLocation arg0, bool safepoint_pc) {
+void Mir2Lir::CallRuntimeHelperRegLocation(ThreadOffset helper_offset, RegLocation arg0,
+                                           bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   if (arg0.wide == 0) {
     LoadValueDirectFixed(arg0, TargetReg(kArg0));
@@ -81,7 +82,7 @@
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Mir2Lir::CallRuntimeHelperImmImm(int helper_offset, int arg0, int arg1,
+void Mir2Lir::CallRuntimeHelperImmImm(ThreadOffset helper_offset, int arg0, int arg1,
                                       bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   LoadConstant(TargetReg(kArg0), arg0);
@@ -90,7 +91,7 @@
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Mir2Lir::CallRuntimeHelperImmRegLocation(int helper_offset, int arg0,
+void Mir2Lir::CallRuntimeHelperImmRegLocation(ThreadOffset helper_offset, int arg0,
                                               RegLocation arg1, bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   if (arg1.wide == 0) {
@@ -103,7 +104,7 @@
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Mir2Lir::CallRuntimeHelperRegLocationImm(int helper_offset, RegLocation arg0, int arg1,
+void Mir2Lir::CallRuntimeHelperRegLocationImm(ThreadOffset helper_offset, RegLocation arg0, int arg1,
                                               bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   LoadValueDirectFixed(arg0, TargetReg(kArg0));
@@ -112,7 +113,7 @@
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Mir2Lir::CallRuntimeHelperImmReg(int helper_offset, int arg0, int arg1,
+void Mir2Lir::CallRuntimeHelperImmReg(ThreadOffset helper_offset, int arg0, int arg1,
                                       bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   OpRegCopy(TargetReg(kArg1), arg1);
@@ -121,8 +122,8 @@
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Mir2Lir::CallRuntimeHelperRegImm(int helper_offset, int arg0, int arg1,
-                             bool safepoint_pc) {
+void Mir2Lir::CallRuntimeHelperRegImm(ThreadOffset helper_offset, int arg0, int arg1,
+                                      bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   OpRegCopy(TargetReg(kArg0), arg0);
   LoadConstant(TargetReg(kArg1), arg1);
@@ -130,7 +131,7 @@
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Mir2Lir::CallRuntimeHelperImmMethod(int helper_offset, int arg0, bool safepoint_pc) {
+void Mir2Lir::CallRuntimeHelperImmMethod(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   LoadCurrMethodDirect(TargetReg(kArg1));
   LoadConstant(TargetReg(kArg0), arg0);
@@ -138,7 +139,7 @@
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(int helper_offset, RegLocation arg0,
+void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset helper_offset, RegLocation arg0,
                                                       RegLocation arg1, bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   if (arg0.wide == 0) {
@@ -168,7 +169,8 @@
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Mir2Lir::CallRuntimeHelperRegReg(int helper_offset, int arg0, int arg1, bool safepoint_pc) {
+void Mir2Lir::CallRuntimeHelperRegReg(ThreadOffset helper_offset, int arg0, int arg1,
+                                      bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   DCHECK_NE(TargetReg(kArg0), arg1);  // check copy into arg0 won't clobber arg1
   OpRegCopy(TargetReg(kArg0), arg0);
@@ -177,7 +179,7 @@
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Mir2Lir::CallRuntimeHelperRegRegImm(int helper_offset, int arg0, int arg1,
+void Mir2Lir::CallRuntimeHelperRegRegImm(ThreadOffset helper_offset, int arg0, int arg1,
                                          int arg2, bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   DCHECK_NE(TargetReg(kArg0), arg1);  // check copy into arg0 won't clobber arg1
@@ -188,7 +190,7 @@
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(int helper_offset,
+void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(ThreadOffset helper_offset,
                                                     int arg0, RegLocation arg2, bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   LoadValueDirectFixed(arg2, TargetReg(kArg2));
@@ -198,7 +200,7 @@
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Mir2Lir::CallRuntimeHelperImmMethodImm(int helper_offset, int arg0,
+void Mir2Lir::CallRuntimeHelperImmMethodImm(ThreadOffset helper_offset, int arg0,
                                             int arg2, bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
   LoadCurrMethodDirect(TargetReg(kArg1));
@@ -208,7 +210,7 @@
   CallHelper(r_tgt, helper_offset, safepoint_pc);
 }
 
-void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(int helper_offset,
+void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset helper_offset,
                                                          int arg0, RegLocation arg1,
                                                          RegLocation arg2, bool safepoint_pc) {
   int r_tgt = CallHelperSetup(helper_offset);
@@ -470,14 +472,14 @@
     // Disable sharpening
     direct_method = 0;
   }
-  int trampoline = (cu->instruction_set == kX86) ? 0
-      : QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline);
+  ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline);
 
   if (direct_method != 0) {
     switch (state) {
       case 0:  // Load the trampoline target [sets kInvokeTgt].
         if (cu->instruction_set != kX86) {
-          cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
+          cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline.Int32Value(),
+                           cg->TargetReg(kInvokeTgt));
         }
         // Get the interface Method* [sets kArg0]
         if (direct_method != static_cast<unsigned int>(-1)) {
@@ -506,7 +508,8 @@
         cg->LoadCurrMethodDirect(cg->TargetReg(kArg0));
         // Load the trampoline target [sets kInvokeTgt].
         if (cu->instruction_set != kX86) {
-          cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
+          cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline.Int32Value(),
+                           cg->TargetReg(kInvokeTgt));
         }
         break;
     case 1:  // Get method->dex_cache_resolved_methods_ [set/use kArg0]
@@ -528,7 +531,7 @@
   return state + 1;
 }
 
-static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, int trampoline,
+static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, ThreadOffset trampoline,
                             int state, const MethodReference& target_method,
                             uint32_t method_idx) {
   Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
@@ -539,7 +542,7 @@
   if (state == 0) {
     if (cu->instruction_set != kX86) {
       // Load trampoline target
-      cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
+      cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline.Int32Value(), cg->TargetReg(kInvokeTgt));
     }
     // Load kArg0 with method index
     CHECK_EQ(cu->dex_file, target_method.dex_file);
@@ -555,7 +558,7 @@
                                 uint32_t method_idx,
                                 uintptr_t unused, uintptr_t unused2,
                                 InvokeType unused3) {
-  int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
+  ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
   return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
 }
 
@@ -563,7 +566,7 @@
                                 const MethodReference& target_method,
                                 uint32_t method_idx, uintptr_t unused,
                                 uintptr_t unused2, InvokeType unused3) {
-  int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
+  ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
   return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
 }
 
@@ -571,7 +574,7 @@
                                const MethodReference& target_method,
                                uint32_t method_idx, uintptr_t unused,
                                uintptr_t unused2, InvokeType unused3) {
-  int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
+  ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
   return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
 }
 
@@ -579,7 +582,7 @@
                            const MethodReference& target_method,
                            uint32_t method_idx, uintptr_t unused,
                            uintptr_t unused2, InvokeType unused3) {
-  int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
+  ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
   return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
 }
 
@@ -589,7 +592,7 @@
                                                 uint32_t unused,
                                                 uintptr_t unused2, uintptr_t unused3,
                                                 InvokeType unused4) {
-  int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
+  ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
   return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
 }
 
@@ -1108,9 +1111,9 @@
 bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
   RegLocation rl_dest = InlineTarget(info);
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  int offset = Thread::PeerOffset().Int32Value();
+  ThreadOffset offset = Thread::PeerOffset();
   if (cu_->instruction_set == kThumb2 || cu_->instruction_set == kMips) {
-    LoadWordDisp(TargetReg(kSelf), offset, rl_result.low_reg);
+    LoadWordDisp(TargetReg(kSelf), offset.Int32Value(), rl_result.low_reg);
   } else {
     CHECK(cu_->instruction_set == kX86);
     reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.low_reg, offset);
@@ -1406,7 +1409,7 @@
       call_inst = OpMem(kOpBlx, TargetReg(kArg0),
                         mirror::AbstractMethod::GetEntryPointFromCompiledCodeOffset().Int32Value());
     } else {
-      int trampoline = 0;
+      ThreadOffset trampoline(-1);
       switch (info->type) {
       case kInterface:
         trampoline = fast_path ? QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline)
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 846c055..eaae0e1 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -247,7 +247,7 @@
   GenBarrier();
   NewLIR0(kMipsCurrPC);  // Really a jal to .+8
   // Now, fill the branch delay slot with the helper load
-  int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode));
+  int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData));
   GenBarrier();  // Scheduling barrier
 
   // Construct BaseLabel and set up table base register
@@ -272,7 +272,7 @@
   LockCallTemps();  // Prepare for explicit register usage
   GenNullCheck(rl_src.s_reg_low, rMIPS_ARG0, opt_flags);
   // Go expensive route - artLockObjectFromCode(self, obj);
-  int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pLockObjectFromCode));
+  int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pLockObject));
   ClobberCalleeSave();
   LIR* call_inst = OpReg(kOpBlx, r_tgt);
   MarkSafepointPC(call_inst);
@@ -287,7 +287,7 @@
   LockCallTemps();  // Prepare for explicit register usage
   GenNullCheck(rl_src.s_reg_low, rMIPS_ARG0, opt_flags);
   // Go expensive route - UnlockObjectFromCode(obj);
-  int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pUnlockObjectFromCode));
+  int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pUnlockObject));
   ClobberCalleeSave();
   LIR* call_inst = OpReg(kOpBlx, r_tgt);
   MarkSafepointPC(call_inst);
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 802ff62..6100396 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -29,7 +29,7 @@
     // Required for target - codegen utilities.
     bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src,
                                     RegLocation rl_dest, int lit);
-    int LoadHelper(int offset);
+    int LoadHelper(ThreadOffset offset);
     LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
     LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
                                   int s_reg);
@@ -154,12 +154,12 @@
     LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value);
     LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2);
     LIR* OpTestSuspend(LIR* target);
-    LIR* OpThreadMem(OpKind op, int thread_offset);
+    LIR* OpThreadMem(OpKind op, ThreadOffset thread_offset);
     LIR* OpVldm(int rBase, int count);
     LIR* OpVstm(int rBase, int count);
     void OpLea(int rBase, int reg1, int reg2, int scale, int offset);
     void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi);
-    void OpTlsCmp(int offset, int val);
+    void OpTlsCmp(ThreadOffset offset, int val);
 
     LIR* LoadBaseDispBody(int rBase, int displacement, int r_dest, int r_dest_hi, OpSize size,
                           int s_reg);
diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc
index 3203017..9e2fea9 100644
--- a/compiler/dex/quick/mips/fp_mips.cc
+++ b/compiler/dex/quick/mips/fp_mips.cc
@@ -176,7 +176,7 @@
 void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest,
                            RegLocation rl_src1, RegLocation rl_src2) {
   bool wide = true;
-  int offset = -1;  // Make gcc happy.
+  ThreadOffset offset(-1);
 
   switch (opcode) {
     case Instruction::CMPL_FLOAT:
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index bd044c6..4a48c87 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -254,7 +254,7 @@
   LOG(FATAL) << "Unexpected use of OpLea for Arm";
 }
 
-void MipsMir2Lir::OpTlsCmp(int offset, int val) {
+void MipsMir2Lir::OpTlsCmp(ThreadOffset offset, int val) {
   LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm";
 }
 
@@ -579,7 +579,7 @@
 
   // Get the array's class.
   LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
-  CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+  CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElement), r_value,
                           r_array_class, true);
   // Redo LoadValues in case they didn't survive the call.
   LoadValueDirectFixed(rl_array, r_array);  // Reload array
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index 0a17fb1..7a9e91a 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -505,8 +505,8 @@
  * ensure that all branch instructions can be restarted if
  * there is a trap in the shadow.  Allocate a temp register.
  */
-int MipsMir2Lir::LoadHelper(int offset) {
-  LoadWordDisp(rMIPS_SELF, offset, r_T9);
+int MipsMir2Lir::LoadHelper(ThreadOffset offset) {
+  LoadWordDisp(rMIPS_SELF, offset.Int32Value(), r_T9);
   return r_T9;
 }
 
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 68b26f1..5d9ae33 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -632,7 +632,7 @@
   return StoreBaseDispBody(rBase, displacement, r_src_lo, r_src_hi, kLong);
 }
 
-LIR* MipsMir2Lir::OpThreadMem(OpKind op, int thread_offset) {
+LIR* MipsMir2Lir::OpThreadMem(OpKind op, ThreadOffset thread_offset) {
   LOG(FATAL) << "Unexpected use of OpThreadMem for MIPS";
   return NULL;
 }
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index a34e929..2794bf5 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -424,42 +424,42 @@
                           RegLocation rl_src, int lit);
     void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
                         RegLocation rl_src1, RegLocation rl_src2);
-    void GenConversionCall(int func_offset, RegLocation rl_dest,
+    void GenConversionCall(ThreadOffset func_offset, RegLocation rl_dest,
                            RegLocation rl_src);
     void GenSuspendTest(int opt_flags);
     void GenSuspendTestAndBranch(int opt_flags, LIR* target);
 
     // Shared by all targets - implemented in gen_invoke.cc.
-    int CallHelperSetup(int helper_offset);
-    LIR* CallHelper(int r_tgt, int helper_offset, bool safepoint_pc);
-    void CallRuntimeHelperImm(int helper_offset, int arg0, bool safepoint_pc);
-    void CallRuntimeHelperReg(int helper_offset, int arg0, bool safepoint_pc);
-    void CallRuntimeHelperRegLocation(int helper_offset, RegLocation arg0,
-                                       bool safepoint_pc);
-    void CallRuntimeHelperImmImm(int helper_offset, int arg0, int arg1,
+    int CallHelperSetup(ThreadOffset helper_offset);
+    LIR* CallHelper(int r_tgt, ThreadOffset helper_offset, bool safepoint_pc);
+    void CallRuntimeHelperImm(ThreadOffset helper_offset, int arg0, bool safepoint_pc);
+    void CallRuntimeHelperReg(ThreadOffset helper_offset, int arg0, bool safepoint_pc);
+    void CallRuntimeHelperRegLocation(ThreadOffset helper_offset, RegLocation arg0,
+                                      bool safepoint_pc);
+    void CallRuntimeHelperImmImm(ThreadOffset helper_offset, int arg0, int arg1,
                                  bool safepoint_pc);
-    void CallRuntimeHelperImmRegLocation(int helper_offset, int arg0,
+    void CallRuntimeHelperImmRegLocation(ThreadOffset helper_offset, int arg0,
                                          RegLocation arg1, bool safepoint_pc);
-    void CallRuntimeHelperRegLocationImm(int helper_offset, RegLocation arg0,
+    void CallRuntimeHelperRegLocationImm(ThreadOffset helper_offset, RegLocation arg0,
                                          int arg1, bool safepoint_pc);
-    void CallRuntimeHelperImmReg(int helper_offset, int arg0, int arg1,
+    void CallRuntimeHelperImmReg(ThreadOffset helper_offset, int arg0, int arg1,
                                  bool safepoint_pc);
-    void CallRuntimeHelperRegImm(int helper_offset, int arg0, int arg1,
+    void CallRuntimeHelperRegImm(ThreadOffset helper_offset, int arg0, int arg1,
                                  bool safepoint_pc);
-    void CallRuntimeHelperImmMethod(int helper_offset, int arg0,
+    void CallRuntimeHelperImmMethod(ThreadOffset helper_offset, int arg0,
                                     bool safepoint_pc);
-    void CallRuntimeHelperRegLocationRegLocation(int helper_offset,
+    void CallRuntimeHelperRegLocationRegLocation(ThreadOffset helper_offset,
                                                  RegLocation arg0, RegLocation arg1,
                                                  bool safepoint_pc);
-    void CallRuntimeHelperRegReg(int helper_offset, int arg0, int arg1,
+    void CallRuntimeHelperRegReg(ThreadOffset helper_offset, int arg0, int arg1,
                                  bool safepoint_pc);
-    void CallRuntimeHelperRegRegImm(int helper_offset, int arg0, int arg1,
+    void CallRuntimeHelperRegRegImm(ThreadOffset helper_offset, int arg0, int arg1,
                                     int arg2, bool safepoint_pc);
-    void CallRuntimeHelperImmMethodRegLocation(int helper_offset, int arg0,
+    void CallRuntimeHelperImmMethodRegLocation(ThreadOffset helper_offset, int arg0,
                                                RegLocation arg2, bool safepoint_pc);
-    void CallRuntimeHelperImmMethodImm(int helper_offset, int arg0, int arg2,
+    void CallRuntimeHelperImmMethodImm(ThreadOffset helper_offset, int arg0, int arg2,
                                        bool safepoint_pc);
-    void CallRuntimeHelperImmRegLocationRegLocation(int helper_offset,
+    void CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset helper_offset,
                                                     int arg0, RegLocation arg1, RegLocation arg2,
                                                     bool safepoint_pc);
     void GenInvoke(CallInfo* info);
@@ -526,7 +526,7 @@
     // Required for target - codegen helpers.
     virtual bool SmallLiteralDivide(Instruction::Code dalvik_opcode,
                                     RegLocation rl_src, RegLocation rl_dest, int lit) = 0;
-    virtual int LoadHelper(int offset) = 0;
+    virtual int LoadHelper(ThreadOffset offset) = 0;
     virtual LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg) = 0;
     virtual LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
                                   int s_reg) = 0;
@@ -674,14 +674,14 @@
     virtual LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1,
                              int r_src2) = 0;
     virtual LIR* OpTestSuspend(LIR* target) = 0;
-    virtual LIR* OpThreadMem(OpKind op, int thread_offset) = 0;
+    virtual LIR* OpThreadMem(OpKind op, ThreadOffset thread_offset) = 0;
     virtual LIR* OpVldm(int rBase, int count) = 0;
     virtual LIR* OpVstm(int rBase, int count) = 0;
     virtual void OpLea(int rBase, int reg1, int reg2, int scale,
                        int offset) = 0;
     virtual void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo,
                                int src_hi) = 0;
-    virtual void OpTlsCmp(int offset, int val) = 0;
+    virtual void OpTlsCmp(ThreadOffset offset, int val) = 0;
     virtual bool InexpensiveConstantInt(int32_t value) = 0;
     virtual bool InexpensiveConstantFloat(int32_t value) = 0;
     virtual bool InexpensiveConstantLong(int64_t value) = 0;
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 1c395de..6e3e55f 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -148,7 +148,7 @@
   NewLIR1(kX86StartOfMethod, rX86_ARG2);
   NewLIR2(kX86PcRelAdr, rX86_ARG1, reinterpret_cast<uintptr_t>(tab_rec));
   NewLIR2(kX86Add32RR, rX86_ARG1, rX86_ARG2);
-  CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rX86_ARG0,
+  CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData), rX86_ARG0,
                           rX86_ARG1, true);
 }
 
@@ -165,7 +165,7 @@
   NewLIR3(kX86LockCmpxchgMR, rCX, mirror::Object::MonitorOffset().Int32Value(), rDX);
   LIR* branch = NewLIR2(kX86Jcc8, 0, kX86CondEq);
   // If lock is held, go the expensive route - artLockObjectFromCode(self, obj);
-  CallRuntimeHelperReg(QUICK_ENTRYPOINT_OFFSET(pLockObjectFromCode), rCX, true);
+  CallRuntimeHelperReg(QUICK_ENTRYPOINT_OFFSET(pLockObject), rCX, true);
   branch->target = NewLIR0(kPseudoTargetLabel);
 }
 
@@ -185,7 +185,7 @@
   LIR* branch2 = NewLIR1(kX86Jmp8, 0);
   branch->target = NewLIR0(kPseudoTargetLabel);
   // Otherwise, go the expensive route - UnlockObjectFromCode(obj);
-  CallRuntimeHelperReg(QUICK_ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rAX, true);
+  CallRuntimeHelperReg(QUICK_ENTRYPOINT_OFFSET(pUnlockObject), rAX, true);
   branch2->target = NewLIR0(kPseudoTargetLabel);
 }
 
@@ -243,7 +243,7 @@
   if (!skip_overflow_check) {
     // cmp rX86_SP, fs:[stack_end_]; jcc throw_launchpad
     LIR* tgt = RawLIR(0, kPseudoThrowTarget, kThrowStackOverflow, 0, 0, 0, 0);
-    OpRegThreadMem(kOpCmp, rX86_SP, Thread::StackEndOffset().Int32Value());
+    OpRegThreadMem(kOpCmp, rX86_SP, Thread::StackEndOffset());
     OpCondBranch(kCondUlt, tgt);
     // Remember branch target - will process later
     throw_launchpads_.Insert(tgt);
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index edb5ae5..21328d5 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -29,7 +29,7 @@
     // Required for target - codegen helpers.
     bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src,
                                     RegLocation rl_dest, int lit);
-    int LoadHelper(int offset);
+    int LoadHelper(ThreadOffset offset);
     LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
     LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
                                   int s_reg);
@@ -154,14 +154,14 @@
     LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value);
     LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2);
     LIR* OpTestSuspend(LIR* target);
-    LIR* OpThreadMem(OpKind op, int thread_offset);
+    LIR* OpThreadMem(OpKind op, ThreadOffset thread_offset);
     LIR* OpVldm(int rBase, int count);
     LIR* OpVstm(int rBase, int count);
     void OpLea(int rBase, int reg1, int reg2, int scale, int offset);
     void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi);
-    void OpTlsCmp(int offset, int val);
+    void OpTlsCmp(ThreadOffset offset, int val);
 
-    void OpRegThreadMem(OpKind op, int r_dest, int thread_offset);
+    void OpRegThreadMem(OpKind op, int r_dest, ThreadOffset thread_offset);
     void SpillCoreRegs();
     void UnSpillCoreRegs();
     static const X86EncodingMap EncodingMap[kX86Last];
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 0b4b4be..377d134 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -240,8 +240,8 @@
   NewLIR5(kX86Lea32RA, rBase, reg1, reg2, scale, offset);
 }
 
-void X86Mir2Lir::OpTlsCmp(int offset, int val) {
-  NewLIR2(kX86Cmp16TI8, offset, val);
+void X86Mir2Lir::OpTlsCmp(ThreadOffset offset, int val) {
+  NewLIR2(kX86Cmp16TI8, offset.Int32Value(), val);
 }
 
 bool X86Mir2Lir::GenInlinedCas32(CallInfo* info, bool need_write_barrier) {
@@ -285,7 +285,7 @@
 
 // Test suspend flag, return target of taken suspend branch
 LIR* X86Mir2Lir::OpTestSuspend(LIR* target) {
-  OpTlsCmp(Thread::ThreadFlagsOffset().Int32Value(), 0);
+  OpTlsCmp(Thread::ThreadFlagsOffset(), 0);
   return OpCondBranch((target == NULL) ? kCondNe : kCondEq, target);
 }
 
@@ -403,7 +403,7 @@
   StoreValueWide(rl_dest, rl_result);
 }
 
-void X86Mir2Lir::OpRegThreadMem(OpKind op, int r_dest, int thread_offset) {
+void X86Mir2Lir::OpRegThreadMem(OpKind op, int r_dest, ThreadOffset thread_offset) {
   X86OpCode opcode = kX86Bkpt;
   switch (op) {
   case kOpCmp: opcode = kX86Cmp32RT;  break;
@@ -412,7 +412,7 @@
     LOG(FATAL) << "Bad opcode: " << op;
     break;
   }
-  NewLIR2(opcode, r_dest, thread_offset);
+  NewLIR2(opcode, r_dest, thread_offset.Int32Value());
 }
 
 /*
@@ -532,7 +532,7 @@
 
   // Get the array's class.
   LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
-  CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+  CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElement), r_value,
                           r_array_class, true);
   // Redo LoadValues in case they didn't survive the call.
   LoadValueDirectFixed(rl_array, r_array);  // Reload array
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 2c9b3c8..699f3ae 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -524,7 +524,7 @@
 }
 
 // Not used in x86
-int X86Mir2Lir::LoadHelper(int offset) {
+int X86Mir2Lir::LoadHelper(ThreadOffset offset) {
   LOG(FATAL) << "Unexpected use of LoadHelper in x86";
   return INVALID_REG;
 }
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index e15995f..c519bfe 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -292,7 +292,7 @@
   return OpRegImm(op, r_dest, value);
 }
 
-LIR* X86Mir2Lir::OpThreadMem(OpKind op, int thread_offset) {
+LIR* X86Mir2Lir::OpThreadMem(OpKind op, ThreadOffset thread_offset) {
   X86OpCode opcode = kX86Bkpt;
   switch (op) {
     case kOpBlx: opcode = kX86CallT;  break;
@@ -300,7 +300,7 @@
       LOG(FATAL) << "Bad opcode: " << op;
       break;
   }
-  return NewLIR1(opcode, thread_offset);
+  return NewLIR1(opcode, thread_offset.Int32Value());
 }
 
 LIR* X86Mir2Lir::OpMem(OpKind op, int rBase, int disp) {
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index e7ba402..56b629c 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -41,9 +41,9 @@
 #include "mirror/throwable.h"
 #include "scoped_thread_state_change.h"
 #include "ScopedLocalRef.h"
-#include "stubs/stubs.h"
 #include "thread.h"
 #include "thread_pool.h"
+#include "trampolines/trampoline_compiler.h"
 #include "verifier/method_verifier.h"
 
 #if defined(ART_USE_PORTABLE_COMPILER)
@@ -433,64 +433,38 @@
   return res;
 }
 
+const std::vector<uint8_t>* CompilerDriver::CreateInterpreterToInterpreterBridge() const {
+  return CreateTrampoline(instruction_set_, kInterpreterAbi,
+                          INTERPRETER_ENTRYPOINT_OFFSET(pInterpreterToInterpreterBridge));
+}
+
+const std::vector<uint8_t>* CompilerDriver::CreateInterpreterToCompiledCodeBridge() const {
+  return CreateTrampoline(instruction_set_, kInterpreterAbi,
+                          INTERPRETER_ENTRYPOINT_OFFSET(pInterpreterToCompiledCodeBridge));
+}
+
+const std::vector<uint8_t>* CompilerDriver::CreateJniDlsymLookup() const {
+  return CreateTrampoline(instruction_set_, kJniAbi, JNI_ENTRYPOINT_OFFSET(pDlsymLookup));
+}
+
 const std::vector<uint8_t>* CompilerDriver::CreatePortableResolutionTrampoline() const {
-  switch (instruction_set_) {
-    case kArm:
-    case kThumb2:
-      return arm::CreatePortableResolutionTrampoline();
-    case kMips:
-      return mips::CreatePortableResolutionTrampoline();
-    case kX86:
-      return x86::CreatePortableResolutionTrampoline();
-    default:
-      LOG(FATAL) << "Unknown InstructionSet: " << instruction_set_;
-      return NULL;
-  }
+  return CreateTrampoline(instruction_set_, kPortableAbi,
+                          PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampoline));
+}
+
+const std::vector<uint8_t>* CompilerDriver::CreatePortableToInterpreterBridge() const {
+  return CreateTrampoline(instruction_set_, kPortableAbi,
+                          PORTABLE_ENTRYPOINT_OFFSET(pPortableToInterpreterBridge));
 }
 
 const std::vector<uint8_t>* CompilerDriver::CreateQuickResolutionTrampoline() const {
-  switch (instruction_set_) {
-    case kArm:
-    case kThumb2:
-      return arm::CreateQuickResolutionTrampoline();
-    case kMips:
-      return mips::CreateQuickResolutionTrampoline();
-    case kX86:
-      return x86::CreateQuickResolutionTrampoline();
-    default:
-      LOG(FATAL) << "Unknown InstructionSet: " << instruction_set_;
-      return NULL;
-  }
+  return CreateTrampoline(instruction_set_, kQuickAbi,
+                          QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampoline));
 }
 
-const std::vector<uint8_t>* CompilerDriver::CreateInterpreterToInterpreterEntry() const {
-  switch (instruction_set_) {
-    case kArm:
-    case kThumb2:
-      return arm::CreateInterpreterToInterpreterEntry();
-    case kMips:
-      return mips::CreateInterpreterToInterpreterEntry();
-    case kX86:
-      return x86::CreateInterpreterToInterpreterEntry();
-    default:
-      LOG(FATAL) << "Unknown InstructionSet: " << instruction_set_;
-      return NULL;
-  }
-}
-
-const std::vector<uint8_t>* CompilerDriver::CreateInterpreterToQuickEntry() const {
-  switch (instruction_set_) {
-    case kArm:
-    case kThumb2:
-      return arm::CreateInterpreterToQuickEntry();
-    case kMips:
-      return mips::CreateInterpreterToQuickEntry();
-    case kX86:
-      return x86::CreateInterpreterToQuickEntry();
-    default:
-      LOG(FATAL) << "Unknown InstructionSet: " << instruction_set_;
-      return NULL;
-  }
+const std::vector<uint8_t>* CompilerDriver::CreateQuickToInterpreterBridge() const {
+  return CreateTrampoline(instruction_set_, kQuickAbi,
+                          QUICK_ENTRYPOINT_OFFSET(pQuickToInterpreterBridge));
 }
 
 void CompilerDriver::CompileAll(jobject class_loader,
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 18f852d..b5222c9 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -48,6 +48,17 @@
   kNoBackend
 };
 
+enum EntryPointCallingConvention {
+  // ABI of invocations to a method's interpreter entry point.
+  kInterpreterAbi,
+  // ABI of calls to a method's native code, only used for native methods.
+  kJniAbi,
+  // ABI of calls to a method's portable code entry point.
+  kPortableAbi,
+  // ABI of calls to a method's quick code entry point.
+  kQuickAbi
+};
+
 enum DexToDexCompilationLevel {
   kDontDexToDexCompile,   // Only meaning wrt image time interpretation.
   kRequired,              // Dex-to-dex compilation required for correctness.
@@ -110,13 +121,19 @@
   CompilerTls* GetTls();
 
   // Generate the trampolines that are invoked by unresolved direct methods.
+  const std::vector<uint8_t>* CreateInterpreterToInterpreterBridge() const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const std::vector<uint8_t>* CreateInterpreterToCompiledCodeBridge() const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const std::vector<uint8_t>* CreateJniDlsymLookup() const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   const std::vector<uint8_t>* CreatePortableResolutionTrampoline() const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const std::vector<uint8_t>* CreatePortableToInterpreterBridge() const
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   const std::vector<uint8_t>* CreateQuickResolutionTrampoline() const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry() const
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  const std::vector<uint8_t>* CreateInterpreterToQuickEntry() const
+  const std::vector<uint8_t>* CreateQuickToInterpreterBridge() const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   CompiledClass* GetCompiledClass(ClassReference ref) const
diff --git a/compiler/driver/dex_compilation_unit.h b/compiler/driver/dex_compilation_unit.h
index 5bf0086..465139b 100644
--- a/compiler/driver/dex_compilation_unit.h
+++ b/compiler/driver/dex_compilation_unit.h
@@ -80,6 +80,10 @@
     return access_flags_;
   }
 
+  bool IsConstructor() const {
+    return ((access_flags_ & kAccConstructor) != 0);
+  }
+
   bool IsNative() const {
     return ((access_flags_ & kAccNative) != 0);
   }
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index e73d021..550d642 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -90,11 +90,23 @@
     return false;
   }
   class_linker->RegisterOatFile(*oat_file_);
-  interpreter_to_interpreter_entry_offset_ = oat_file_->GetOatHeader().GetInterpreterToInterpreterEntryOffset();
-  interpreter_to_quick_entry_offset_ = oat_file_->GetOatHeader().GetInterpreterToQuickEntryOffset();
-  portable_resolution_trampoline_offset_ = oat_file_->GetOatHeader().GetPortableResolutionTrampolineOffset();
-  quick_resolution_trampoline_offset_ = oat_file_->GetOatHeader().GetQuickResolutionTrampolineOffset();
 
+  interpreter_to_interpreter_bridge_offset_ =
+      oat_file_->GetOatHeader().GetInterpreterToInterpreterBridgeOffset();
+  interpreter_to_compiled_code_bridge_offset_ =
+      oat_file_->GetOatHeader().GetInterpreterToCompiledCodeBridgeOffset();
+
+  jni_dlsym_lookup_offset_ = oat_file_->GetOatHeader().GetJniDlsymLookupOffset();
+
+  portable_resolution_trampoline_offset_ =
+      oat_file_->GetOatHeader().GetPortableResolutionTrampolineOffset();
+  portable_to_interpreter_bridge_offset_ =
+      oat_file_->GetOatHeader().GetPortableToInterpreterBridgeOffset();
+
+  quick_resolution_trampoline_offset_ =
+      oat_file_->GetOatHeader().GetQuickResolutionTrampolineOffset();
+  quick_to_interpreter_bridge_offset_ =
+      oat_file_->GetOatHeader().GetQuickToInterpreterBridgeOffset();
   {
     Thread::Current()->TransitionFromSuspendedToRunnable();
     PruneNonImageClasses();  // Remove junk
@@ -490,57 +502,62 @@
 void ImageWriter::FixupMethod(const AbstractMethod* orig, AbstractMethod* copy) {
   FixupInstanceFields(orig, copy);
 
-  // OatWriter replaces the code_ with an offset value.
-  // Here we readjust to a pointer relative to oat_begin_
-  if (orig->IsAbstract()) {
-    // Code for abstract methods is set to the abstract method error stub when we load the image.
-    copy->SetEntryPointFromCompiledCode(NULL);
-    copy->SetEntryPointFromInterpreter(reinterpret_cast<EntryPointFromInterpreter*>
-                                       (GetOatAddress(interpreter_to_interpreter_entry_offset_)));
-    return;
-  } else {
-    copy->SetEntryPointFromInterpreter(reinterpret_cast<EntryPointFromInterpreter*>
-                                       (GetOatAddress(interpreter_to_quick_entry_offset_)));
-  }
+  // OatWriter replaces the code_ with an offset value. Here we re-adjust to a pointer relative to
+  // oat_begin_
 
-  if (orig == Runtime::Current()->GetResolutionMethod()) {
+  // The resolution method has a special trampoline to call.
+  if (UNLIKELY(orig == Runtime::Current()->GetResolutionMethod())) {
 #if defined(ART_USE_PORTABLE_COMPILER)
     copy->SetEntryPointFromCompiledCode(GetOatAddress(portable_resolution_trampoline_offset_));
 #else
     copy->SetEntryPointFromCompiledCode(GetOatAddress(quick_resolution_trampoline_offset_));
 #endif
-    return;
-  }
-
-  // Use original code if it exists. Otherwise, set the code pointer to the resolution trampoline.
-  const byte* code = GetOatAddress(orig->GetOatCodeOffset());
-  if (code != NULL) {
-    copy->SetEntryPointFromCompiledCode(code);
   } else {
+    // We assume all methods have code. If they don't currently then we set them to the use the
+    // resolution trampoline. Abstract methods never have code and so we need to make sure their
+    // use results in an AbstractMethodError. We use the interpreter to achieve this.
+    if (UNLIKELY(orig->IsAbstract())) {
 #if defined(ART_USE_PORTABLE_COMPILER)
-    copy->SetEntryPointFromCompiledCode(GetOatAddress(portable_resolution_trampoline_offset_));
+      copy->SetEntryPointFromCompiledCode(GetOatAddress(portable_to_interpreter_bridge_offset_));
 #else
-    copy->SetEntryPointFromCompiledCode(GetOatAddress(quick_resolution_trampoline_offset_));
+      copy->SetEntryPointFromCompiledCode(GetOatAddress(quick_to_interpreter_bridge_offset_));
 #endif
-  }
+      copy->SetEntryPointFromInterpreter(reinterpret_cast<EntryPointFromInterpreter*>
+      (GetOatAddress(interpreter_to_interpreter_bridge_offset_)));
+    } else {
+      copy->SetEntryPointFromInterpreter(reinterpret_cast<EntryPointFromInterpreter*>
+      (GetOatAddress(interpreter_to_compiled_code_bridge_offset_)));
+      // Use original code if it exists. Otherwise, set the code pointer to the resolution
+      // trampoline.
+      const byte* code = GetOatAddress(orig->GetOatCodeOffset());
+      if (code != NULL) {
+        copy->SetEntryPointFromCompiledCode(code);
+      } else {
+#if defined(ART_USE_PORTABLE_COMPILER)
+        copy->SetEntryPointFromCompiledCode(GetOatAddress(portable_resolution_trampoline_offset_));
+#else
+        copy->SetEntryPointFromCompiledCode(GetOatAddress(quick_resolution_trampoline_offset_));
+#endif
+      }
+      if (orig->IsNative()) {
+        // The native method's pointer is set to a stub to lookup via dlsym.
+        // Note this is not the code_ pointer, that is handled above.
+        copy->SetNativeMethod(GetOatAddress(jni_dlsym_lookup_offset_));
+      } else {
+        // Normal (non-abstract non-native) methods have various tables to relocate.
+        uint32_t mapping_table_off = orig->GetOatMappingTableOffset();
+        const byte* mapping_table = GetOatAddress(mapping_table_off);
+        copy->SetMappingTable(reinterpret_cast<const uint32_t*>(mapping_table));
 
-  if (orig->IsNative()) {
-    // The native method's pointer is set to a stub to lookup via dlsym when we load the image.
-    // Note this is not the code_ pointer, that is handled above.
-    copy->SetNativeMethod(NULL);
-  } else {
-    // normal (non-abstract non-native) methods have mapping tables to relocate
-    uint32_t mapping_table_off = orig->GetOatMappingTableOffset();
-    const byte* mapping_table = GetOatAddress(mapping_table_off);
-    copy->SetMappingTable(reinterpret_cast<const uint32_t*>(mapping_table));
+        uint32_t vmap_table_offset = orig->GetOatVmapTableOffset();
+        const byte* vmap_table = GetOatAddress(vmap_table_offset);
+        copy->SetVmapTable(reinterpret_cast<const uint16_t*>(vmap_table));
 
-    uint32_t vmap_table_offset = orig->GetOatVmapTableOffset();
-    const byte* vmap_table = GetOatAddress(vmap_table_offset);
-    copy->SetVmapTable(reinterpret_cast<const uint16_t*>(vmap_table));
-
-    uint32_t native_gc_map_offset = orig->GetOatNativeGcMapOffset();
-    const byte* native_gc_map = GetOatAddress(native_gc_map_offset);
-    copy->SetNativeGcMap(reinterpret_cast<const uint8_t*>(native_gc_map));
+        uint32_t native_gc_map_offset = orig->GetOatNativeGcMapOffset();
+        const byte* native_gc_map = GetOatAddress(native_gc_map_offset);
+        copy->SetNativeGcMap(reinterpret_cast<const uint8_t*>(native_gc_map));
+      }
+    }
   }
 }
 
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index e43ec63..545534f 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -39,8 +39,8 @@
  public:
   explicit ImageWriter(const CompilerDriver& compiler_driver)
       : compiler_driver_(compiler_driver), oat_file_(NULL), image_end_(0), image_begin_(NULL),
-        oat_data_begin_(NULL), interpreter_to_interpreter_entry_offset_(0),
-        interpreter_to_quick_entry_offset_(0), portable_resolution_trampoline_offset_(0),
+        oat_data_begin_(NULL), interpreter_to_interpreter_bridge_offset_(0),
+        interpreter_to_compiled_code_bridge_offset_(0), portable_resolution_trampoline_offset_(0),
         quick_resolution_trampoline_offset_(0) {}
 
   ~ImageWriter() {}
@@ -195,10 +195,13 @@
   const byte* oat_data_begin_;
 
   // Offset from oat_data_begin_ to the stubs.
-  uint32_t interpreter_to_interpreter_entry_offset_;
-  uint32_t interpreter_to_quick_entry_offset_;
+  uint32_t interpreter_to_interpreter_bridge_offset_;
+  uint32_t interpreter_to_compiled_code_bridge_offset_;
+  uint32_t jni_dlsym_lookup_offset_;
   uint32_t portable_resolution_trampoline_offset_;
+  uint32_t portable_to_interpreter_bridge_offset_;
   uint32_t quick_resolution_trampoline_offset_;
+  uint32_t quick_to_interpreter_bridge_offset_;
 
   // DexCaches seen while scanning for fixing up CodeAndDirectMethods
   typedef std::set<mirror::DexCache*> Set;
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index b069fbd..9713fe9 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -172,8 +172,8 @@
   //    can occur. The result is the saved JNI local state that is restored by the exit call. We
   //    abuse the JNI calling convention here, that is guaranteed to support passing 2 pointer
   //    arguments.
-  uintptr_t jni_start = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(pJniMethodStartSynchronized)
-                                        : QUICK_ENTRYPOINT_OFFSET(pJniMethodStart);
+  ThreadOffset jni_start = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(pJniMethodStartSynchronized)
+                                           : QUICK_ENTRYPOINT_OFFSET(pJniMethodStart);
   main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
   FrameOffset locked_object_sirt_offset(0);
   if (is_synchronized) {
@@ -301,7 +301,7 @@
   // 12. Call into JNI method end possibly passing a returned reference, the method and the current
   //     thread.
   end_jni_conv->ResetIterator(FrameOffset(end_out_arg_size));
-  uintptr_t jni_end;
+  ThreadOffset jni_end(-1);
   if (reference_return) {
     // Pass result.
     jni_end = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(pJniMethodEndWithReferenceSynchronized)
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 5eb837b..21c5317 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -51,11 +51,14 @@
     size_oat_header_(0),
     size_oat_header_image_file_location_(0),
     size_dex_file_(0),
-    size_interpreter_to_interpreter_entry_(0),
-    size_interpreter_to_quick_entry_(0),
+    size_interpreter_to_interpreter_bridge_(0),
+    size_interpreter_to_compiled_code_bridge_(0),
+    size_jni_dlsym_lookup_(0),
     size_portable_resolution_trampoline_(0),
+    size_portable_to_interpreter_bridge_(0),
     size_quick_resolution_trampoline_(0),
-    size_stubs_alignment_(0),
+    size_quick_to_interpreter_bridge_(0),
+    size_trampoline_alignment_(0),
     size_code_size_(0),
     size_code_(0),
     size_code_alignment_(0),
@@ -176,30 +179,30 @@
   size_executable_offset_alignment_ = offset - old_offset;
   if (compiler_driver_->IsImage()) {
     InstructionSet instruction_set = compiler_driver_->GetInstructionSet();
-    oat_header_->SetInterpreterToInterpreterEntryOffset(offset);
-    interpreter_to_interpreter_entry_.reset(
-        compiler_driver_->CreateInterpreterToInterpreterEntry());
-    offset += interpreter_to_interpreter_entry_->size();
 
-    offset = CompiledCode::AlignCode(offset, instruction_set);
-    oat_header_->SetInterpreterToQuickEntryOffset(offset);
-    interpreter_to_quick_entry_.reset(compiler_driver_->CreateInterpreterToQuickEntry());
-    offset += interpreter_to_quick_entry_->size();
+    #define DO_TRAMPOLINE(field, fn_name) \
+      offset = CompiledCode::AlignCode(offset, instruction_set); \
+      oat_header_->Set ## fn_name ## Offset(offset); \
+      field.reset(compiler_driver_->Create ## fn_name()); \
+      offset += field->size();
 
-    offset = CompiledCode::AlignCode(offset, instruction_set);
-    oat_header_->SetPortableResolutionTrampolineOffset(offset);
-    portable_resolution_trampoline_.reset(compiler_driver_->CreatePortableResolutionTrampoline());
-    offset += portable_resolution_trampoline_->size();
+    DO_TRAMPOLINE(interpreter_to_interpreter_bridge_, InterpreterToInterpreterBridge);
+    DO_TRAMPOLINE(interpreter_to_compiled_code_bridge_, InterpreterToCompiledCodeBridge);
+    DO_TRAMPOLINE(jni_dlsym_lookup_, JniDlsymLookup);
+    DO_TRAMPOLINE(portable_resolution_trampoline_, PortableResolutionTrampoline);
+    DO_TRAMPOLINE(portable_to_interpreter_bridge_, PortableToInterpreterBridge);
+    DO_TRAMPOLINE(quick_resolution_trampoline_, QuickResolutionTrampoline);
+    DO_TRAMPOLINE(quick_to_interpreter_bridge_, QuickToInterpreterBridge);
 
-    offset = CompiledCode::AlignCode(offset, instruction_set);
-    oat_header_->SetQuickResolutionTrampolineOffset(offset);
-    quick_resolution_trampoline_.reset(compiler_driver_->CreateQuickResolutionTrampoline());
-    offset += quick_resolution_trampoline_->size();
+    #undef DO_TRAMPOLINE
   } else {
-    oat_header_->SetInterpreterToInterpreterEntryOffset(0);
-    oat_header_->SetInterpreterToQuickEntryOffset(0);
+    oat_header_->SetInterpreterToInterpreterBridgeOffset(0);
+    oat_header_->SetInterpreterToCompiledCodeBridgeOffset(0);
+    oat_header_->SetJniDlsymLookupOffset(0);
     oat_header_->SetPortableResolutionTrampolineOffset(0);
+    oat_header_->SetPortableToInterpreterBridgeOffset(0);
     oat_header_->SetQuickResolutionTrampolineOffset(0);
+    oat_header_->SetQuickToInterpreterBridgeOffset(0);
   }
   return offset;
 }
@@ -469,11 +472,14 @@
     DO_STAT(size_oat_header_);
     DO_STAT(size_oat_header_image_file_location_);
     DO_STAT(size_dex_file_);
-    DO_STAT(size_interpreter_to_interpreter_entry_);
-    DO_STAT(size_interpreter_to_quick_entry_);
+    DO_STAT(size_interpreter_to_interpreter_bridge_);
+    DO_STAT(size_interpreter_to_compiled_code_bridge_);
+    DO_STAT(size_jni_dlsym_lookup_);
     DO_STAT(size_portable_resolution_trampoline_);
+    DO_STAT(size_portable_to_interpreter_bridge_);
     DO_STAT(size_quick_resolution_trampoline_);
-    DO_STAT(size_stubs_alignment_);
+    DO_STAT(size_quick_to_interpreter_bridge_);
+    DO_STAT(size_trampoline_alignment_);
     DO_STAT(size_code_size_);
     DO_STAT(size_code_);
     DO_STAT(size_code_alignment_);
@@ -545,52 +551,30 @@
   DCHECK_OFFSET();
   if (compiler_driver_->IsImage()) {
     InstructionSet instruction_set = compiler_driver_->GetInstructionSet();
-    if (!out.WriteFully(&(*interpreter_to_interpreter_entry_)[0],
-                        interpreter_to_interpreter_entry_->size())) {
-      PLOG(ERROR) << "Failed to write interpreter to interpreter entry to " << out.GetLocation();
-      return false;
-    }
-    size_interpreter_to_interpreter_entry_ += interpreter_to_interpreter_entry_->size();
-    relative_offset += interpreter_to_interpreter_entry_->size();
-    DCHECK_OFFSET();
 
-    uint32_t aligned_offset = CompiledCode::AlignCode(relative_offset, instruction_set);
-    uint32_t alignment_padding = aligned_offset - relative_offset;
-    out.Seek(alignment_padding, kSeekCurrent);
-    size_stubs_alignment_ += alignment_padding;
-    if (!out.WriteFully(&(*interpreter_to_quick_entry_)[0], interpreter_to_quick_entry_->size())) {
-      PLOG(ERROR) << "Failed to write interpreter to quick entry to " << out.GetLocation();
-      return false;
-    }
-    size_interpreter_to_quick_entry_ += interpreter_to_quick_entry_->size();
-    relative_offset += alignment_padding + interpreter_to_quick_entry_->size();
-    DCHECK_OFFSET();
+    #define DO_TRAMPOLINE(field) \
+      do { \
+        uint32_t aligned_offset = CompiledCode::AlignCode(relative_offset, instruction_set); \
+        uint32_t alignment_padding = aligned_offset - relative_offset; \
+        out.Seek(alignment_padding, kSeekCurrent); \
+        size_trampoline_alignment_ += alignment_padding; \
+        if (!out.WriteFully(&(*field)[0], field->size())) { \
+          PLOG(ERROR) << "Failed to write " # field " to " << out.GetLocation(); \
+          return false; \
+        } \
+        size_ ## field += field->size(); \
+        relative_offset += alignment_padding + field->size(); \
+        DCHECK_OFFSET(); \
+      } while (false)
 
-    aligned_offset = CompiledCode::AlignCode(relative_offset, instruction_set);
-    alignment_padding = aligned_offset - relative_offset;
-    out.Seek(alignment_padding, kSeekCurrent);
-    size_stubs_alignment_ += alignment_padding;
-    if (!out.WriteFully(&(*portable_resolution_trampoline_)[0],
-                        portable_resolution_trampoline_->size())) {
-      PLOG(ERROR) << "Failed to write portable resolution trampoline to " << out.GetLocation();
-      return false;
-    }
-    size_portable_resolution_trampoline_ += portable_resolution_trampoline_->size();
-    relative_offset += alignment_padding + portable_resolution_trampoline_->size();
-    DCHECK_OFFSET();
-
-    aligned_offset = CompiledCode::AlignCode(relative_offset, instruction_set);
-    alignment_padding = aligned_offset - relative_offset;
-    out.Seek(alignment_padding, kSeekCurrent);
-    size_stubs_alignment_ += alignment_padding;
-    if (!out.WriteFully(&(*quick_resolution_trampoline_)[0],
-                        quick_resolution_trampoline_->size())) {
-      PLOG(ERROR) << "Failed to write quick resolution trampoline to " << out.GetLocation();
-      return false;
-    }
-    size_quick_resolution_trampoline_ += quick_resolution_trampoline_->size();
-    relative_offset += alignment_padding + quick_resolution_trampoline_->size();
-    DCHECK_OFFSET();
+    DO_TRAMPOLINE(interpreter_to_interpreter_bridge_);
+    DO_TRAMPOLINE(interpreter_to_compiled_code_bridge_);
+    DO_TRAMPOLINE(jni_dlsym_lookup_);
+    DO_TRAMPOLINE(portable_resolution_trampoline_);
+    DO_TRAMPOLINE(portable_to_interpreter_bridge_);
+    DO_TRAMPOLINE(quick_resolution_trampoline_);
+    DO_TRAMPOLINE(quick_to_interpreter_bridge_);
+    #undef DO_TRAMPOLINE
   }
   return relative_offset;
 }
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index f2c5626..e6cc0bc 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -181,10 +181,13 @@
   OatHeader* oat_header_;
   std::vector<OatDexFile*> oat_dex_files_;
   std::vector<OatClass*> oat_classes_;
-  UniquePtr<const std::vector<uint8_t> > interpreter_to_interpreter_entry_;
-  UniquePtr<const std::vector<uint8_t> > interpreter_to_quick_entry_;
+  UniquePtr<const std::vector<uint8_t> > interpreter_to_interpreter_bridge_;
+  UniquePtr<const std::vector<uint8_t> > interpreter_to_compiled_code_bridge_;
+  UniquePtr<const std::vector<uint8_t> > jni_dlsym_lookup_;
   UniquePtr<const std::vector<uint8_t> > portable_resolution_trampoline_;
+  UniquePtr<const std::vector<uint8_t> > portable_to_interpreter_bridge_;
   UniquePtr<const std::vector<uint8_t> > quick_resolution_trampoline_;
+  UniquePtr<const std::vector<uint8_t> > quick_to_interpreter_bridge_;
 
   // output stats
   uint32_t size_dex_file_alignment_;
@@ -192,11 +195,14 @@
   uint32_t size_oat_header_;
   uint32_t size_oat_header_image_file_location_;
   uint32_t size_dex_file_;
-  uint32_t size_interpreter_to_interpreter_entry_;
-  uint32_t size_interpreter_to_quick_entry_;
+  uint32_t size_interpreter_to_interpreter_bridge_;
+  uint32_t size_interpreter_to_compiled_code_bridge_;
+  uint32_t size_jni_dlsym_lookup_;
   uint32_t size_portable_resolution_trampoline_;
+  uint32_t size_portable_to_interpreter_bridge_;
   uint32_t size_quick_resolution_trampoline_;
-  uint32_t size_stubs_alignment_;
+  uint32_t size_quick_to_interpreter_bridge_;
+  uint32_t size_trampoline_alignment_;
   uint32_t size_code_size_;
   uint32_t size_code_;
   uint32_t size_code_alignment_;
diff --git a/compiler/sea_ir/code_gen.cc b/compiler/sea_ir/code_gen/code_gen.cc
similarity index 93%
rename from compiler/sea_ir/code_gen.cc
rename to compiler/sea_ir/code_gen/code_gen.cc
index a513907..cb150e5 100644
--- a/compiler/sea_ir/code_gen.cc
+++ b/compiler/sea_ir/code_gen/code_gen.cc
@@ -15,8 +15,8 @@
  */
 
 #include <llvm/Support/raw_ostream.h>
-#include "sea.h"
-#include "code_gen.h"
+#include "sea_ir/ir/sea.h"
+#include "sea_ir/code_gen/code_gen.h"
 
 namespace sea_ir {
 
@@ -114,6 +114,14 @@
   std::string instr = instruction->GetInstruction()->DumpString(NULL);
   DCHECK(0);  // This whole function is useful only during development.
 }
+
+void CodeGenVisitor::Visit(UnnamedConstInstructionNode* instruction) {
+  std::string instr = instruction->GetInstruction()->DumpString(NULL);
+  std::cout << "1.Instruction: " << instr << std::endl;
+  llvm_data_->AddValue(instruction,
+      llvm::ConstantInt::get(*llvm_data_->context_, llvm::APInt(32, instruction->GetConstValue())));
+}
+
 void CodeGenVisitor::Visit(ConstInstructionNode* instruction) {
   std::string instr = instruction->GetInstruction()->DumpString(NULL);
   std::cout << "1.Instruction: " << instr << std::endl;
@@ -123,14 +131,14 @@
 void CodeGenVisitor::Visit(ReturnInstructionNode* instruction) {
   std::string instr = instruction->GetInstruction()->DumpString(NULL);
   std::cout << "2.Instruction: " << instr << std::endl;
-  DCHECK_GT(instruction->GetSSAUses().size(), 0u);
-  llvm::Value* return_value = llvm_data_->GetValue(instruction->GetSSAUses().at(0));
+  DCHECK_GT(instruction->GetSSAProducers().size(), 0u);
+  llvm::Value* return_value = llvm_data_->GetValue(instruction->GetSSAProducers().at(0));
   llvm_data_->builder_.CreateRet(return_value);
 }
 void CodeGenVisitor::Visit(IfNeInstructionNode* instruction) {
   std::string instr = instruction->GetInstruction()->DumpString(NULL);
   std::cout << "3.Instruction: " << instr << std::endl;
-  std::vector<InstructionNode*> ssa_uses = instruction->GetSSAUses();
+  std::vector<InstructionNode*> ssa_uses = instruction->GetSSAProducers();
   DCHECK_GT(ssa_uses.size(), 1u);
   InstructionNode* use_l = ssa_uses.at(0);
   llvm::Value* left = llvm_data_->GetValue(use_l);
@@ -171,7 +179,7 @@
   // since their purpose of minimizing the number of opcodes in dex is
   // not relevant for the IR. (Will need to have different
   // instruction subclasses for functions and procedures.)
-  std::vector<InstructionNode*> ssa_uses = instruction->GetSSAUses();
+  std::vector<InstructionNode*> ssa_uses = instruction->GetSSAProducers();
   InstructionNode* use_l = ssa_uses.at(0);
   llvm::Value* left = llvm_data_->GetValue(use_l);
   llvm::Value* right = llvm::ConstantInt::get(*llvm_data_->context_, llvm::APInt(32, 0));
@@ -187,7 +195,7 @@
   // TODO: Add proper checking of the matching between formal and actual signature.
   DCHECK(NULL != callee);
   std::vector<llvm::Value*> parameter_values;
-  std::vector<InstructionNode*> parameter_sources = invoke->GetSSAUses();
+  std::vector<InstructionNode*> parameter_sources = invoke->GetSSAProducers();
   for (std::vector<InstructionNode*>::const_iterator cit = parameter_sources.begin();
       cit != parameter_sources.end(); ++cit) {
     llvm::Value* parameter_value = llvm_data_->GetValue((*cit));
@@ -201,7 +209,7 @@
 void CodeGenVisitor::Visit(AddIntInstructionNode* instruction) {
   std::string instr = instruction->GetInstruction()->DumpString(NULL);
   std::cout << "7.Instruction: " << instr << std::endl;
-  std::vector<InstructionNode*> ssa_uses = instruction->GetSSAUses();
+  std::vector<InstructionNode*> ssa_uses = instruction->GetSSAProducers();
   DCHECK_GT(ssa_uses.size(), 1u);
   InstructionNode* use_l = ssa_uses.at(0);
   InstructionNode* use_r = ssa_uses.at(1);
@@ -221,7 +229,7 @@
 void CodeGenVisitor::Visit(IfEqzInstructionNode* instruction) {
   std::string instr = instruction->GetInstruction()->DumpString(NULL);
   std::cout << "9. Instruction: " << instr << "; Id: " <<instruction << std::endl;
-  std::vector<InstructionNode*> ssa_uses = instruction->GetSSAUses();
+  std::vector<InstructionNode*> ssa_uses = instruction->GetSSAProducers();
   DCHECK_GT(ssa_uses.size(), 0u);
   InstructionNode* use_l = ssa_uses.at(0);
   llvm::Value* left = llvm_data_->GetValue(use_l);
diff --git a/compiler/sea_ir/code_gen.h b/compiler/sea_ir/code_gen/code_gen.h
similarity index 93%
rename from compiler/sea_ir/code_gen.h
rename to compiler/sea_ir/code_gen/code_gen.h
index aba8d5c..b1bc4dc 100644
--- a/compiler/sea_ir/code_gen.h
+++ b/compiler/sea_ir/code_gen/code_gen.h
@@ -14,14 +14,15 @@
  * limitations under the License.
  */
 
-#ifndef ART_COMPILER_SEA_IR_CODE_GEN_H_
-#define ART_COMPILER_SEA_IR_CODE_GEN_H_
+#ifndef ART_COMPILER_SEA_IR_CODE_GEN_CODE_GEN_H_
+#define ART_COMPILER_SEA_IR_CODE_GEN_CODE_GEN_H_
 
+#include "llvm/Analysis/Verifier.h"
 #include "llvm/IR/IRBuilder.h"
 #include "llvm/IR/LLVMContext.h"
 #include "llvm/IR/Module.h"
 #include "llvm/Analysis/Verifier.h"
-#include "visitor.h"
+#include "sea_ir/ir/visitor.h"
 
 namespace sea_ir {
 // Abstracts away the containers we use to map SEA IR objects to LLVM IR objects.
@@ -100,6 +101,8 @@
   void Visit(SignatureNode* region);
   void Visit(Region* region);
   void Visit(InstructionNode* instruction) { }
+
+  void Visit(UnnamedConstInstructionNode* instruction) { }
   void Visit(ConstInstructionNode* instruction) { }
   void Visit(ReturnInstructionNode* instruction) { }
   void Visit(IfNeInstructionNode* instruction) { }
@@ -119,6 +122,7 @@
   void Visit(SignatureNode* region);
   void Visit(Region* region);
   void Visit(InstructionNode* region) { }
+  void Visit(UnnamedConstInstructionNode* instruction) { }
   void Visit(ConstInstructionNode* instruction) { }
   void Visit(ReturnInstructionNode* instruction) { }
   void Visit(IfNeInstructionNode* instruction) { }
@@ -138,10 +142,10 @@
   void Visit(SignatureNode* region);
   void Visit(Region* region);
   void Visit(InstructionNode* region);
+  void Visit(UnnamedConstInstructionNode* instruction);
   void Visit(ConstInstructionNode* instruction);
   void Visit(ReturnInstructionNode* instruction);
   void Visit(IfNeInstructionNode* instruction);
-  // void Visit(AddIntLitInstructionNode* instruction);
   void Visit(MoveResultInstructionNode* instruction);
   void Visit(InvokeStaticInstructionNode* instruction);
   void Visit(AddIntInstructionNode* instruction);
@@ -150,4 +154,4 @@
   void Visit(PhiInstructionNode* region) { }
 };
 }  // namespace sea_ir
-#endif  // ART_COMPILER_SEA_IR_CODE_GEN_H_
+#endif  // ART_COMPILER_SEA_IR_CODE_GEN_CODE_GEN_H_
diff --git a/compiler/sea_ir/debug/dot_gen.cc b/compiler/sea_ir/debug/dot_gen.cc
new file mode 100644
index 0000000..9442684
--- /dev/null
+++ b/compiler/sea_ir/debug/dot_gen.cc
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "scoped_thread_state_change.h"
+#include "sea_ir/debug/dot_gen.h"
+
+namespace sea_ir {
+
+void DotGenerationVisitor::Initialize(SeaGraph* graph) {
+  graph_ = graph;
+  Region* root_region;
+  ordered_regions_.clear();
+  for (std::vector<Region*>::const_iterator cit = graph->GetRegions()->begin();
+      cit != graph->GetRegions()->end(); cit++ ) {
+    if ((*cit)->GetIDominator() == (*cit)) {
+      root_region = *cit;
+    }
+  }
+  ordered_regions_.push_back(root_region);
+  for (unsigned int id = 0; id < ordered_regions_.size(); id++) {
+    Region* current_region = ordered_regions_.at(id);
+    const std::set<Region*>* dominated_regions = current_region->GetIDominatedSet();
+    for (std::set<Region*>::const_iterator cit = dominated_regions->begin();
+        cit != dominated_regions->end(); cit++ ) {
+      ordered_regions_.push_back(*cit);
+    }
+  }
+}
+
+void DotGenerationVisitor::ToDotSSAEdges(InstructionNode* instruction) {
+  std::map<int, InstructionNode*>* definition_edges = instruction->GetSSAProducersMap();
+  // SSA definitions:
+  for (std::map<int, InstructionNode*>::const_iterator
+      def_it = definition_edges->begin();
+      def_it != definition_edges->end(); def_it++) {
+    if (NULL != def_it->second) {
+      dot_text_ += def_it->second->StringId() + " -> ";
+      dot_text_ += instruction->StringId() + "[color=gray,label=\"";
+      dot_text_ += art::StringPrintf("vR = %d", def_it->first);
+      art::SafeMap<int, const Type*>::const_iterator type_it = types_->find(def_it->second->Id());
+      if (type_it != types_->end()) {
+        art::ScopedObjectAccess soa(art::Thread::Current());
+        dot_text_ += "(" + type_it->second->Dump() + ")";
+      } else {
+        dot_text_ += "()";
+      }
+      dot_text_ += "\"] ; // SSA edge\n";
+    }
+  }
+
+  // SSA used-by:
+  if (options_->WillSaveUseEdges()) {
+    std::vector<InstructionNode*>* used_in = instruction->GetSSAConsumers();
+    for (std::vector<InstructionNode*>::const_iterator cit = used_in->begin();
+        cit != used_in->end(); cit++) {
+      dot_text_ += (*cit)->StringId() + " -> " + instruction->StringId() + "[color=gray,label=\"";
+      dot_text_ += "\"] ; // SSA used-by edge\n";
+    }
+  }
+}
+
+void DotGenerationVisitor::ToDotSSAEdges(PhiInstructionNode* instruction) {
+  std::vector<InstructionNode*> definition_edges = instruction->GetSSAProducers();
+  // SSA definitions:
+  for (std::vector<InstructionNode*>::const_iterator
+      def_it = definition_edges.begin();
+      def_it != definition_edges.end(); def_it++) {
+    if (NULL != *def_it) {
+      dot_text_ += (*def_it)->StringId() + " -> ";
+      dot_text_ += instruction->StringId() + "[color=gray,label=\"";
+      dot_text_ += art::StringPrintf("vR = %d", instruction->GetRegisterNumber());
+      art::SafeMap<int, const Type*>::const_iterator type_it = types_->find((*def_it)->Id());
+      if (type_it != types_->end()) {
+        art::ScopedObjectAccess soa(art::Thread::Current());
+        dot_text_ += "(" + type_it->second->Dump() + ")";
+      } else {
+        dot_text_ += "()";
+      }
+      dot_text_ += "\"] ; // SSA edge\n";
+    }
+  }
+
+  // SSA used-by:
+  if (options_->WillSaveUseEdges()) {
+    std::vector<InstructionNode*>* used_in = instruction->GetSSAConsumers();
+    for (std::vector<InstructionNode*>::const_iterator cit = used_in->begin();
+        cit != used_in->end(); cit++) {
+      dot_text_ += (*cit)->StringId() + " -> " + instruction->StringId() + "[color=gray,label=\"";
+      dot_text_ += "\"] ; // SSA used-by edge\n";
+    }
+  }
+}
+
+void DotGenerationVisitor::Visit(SignatureNode* parameter) {
+  dot_text_ += parameter->StringId() +" [label=\"[" + parameter->StringId() + "] signature:";
+  dot_text_ += art::StringPrintf("r%d", parameter->GetResultRegister());
+  dot_text_ += "\"] // signature node\n";
+  ToDotSSAEdges(parameter);
+}
+
+// Appends to @result a dot language formatted string representing the node and
+//    (by convention) outgoing edges, so that the composition of theToDot() of all nodes
+//    builds a complete dot graph (without prolog and epilog though).
+void DotGenerationVisitor::Visit(Region* region) {
+  dot_text_ += "\n// Region: \nsubgraph " + region->StringId();
+  dot_text_ += " { label=\"region " + region->StringId() + "(rpo=";
+  dot_text_ += art::StringPrintf("%d", region->GetRPO());
+  if (NULL != region->GetIDominator()) {
+    dot_text_ += " dom=" + region->GetIDominator()->StringId();
+  }
+  dot_text_ += ")\";\n";
+
+  std::vector<PhiInstructionNode*>* phi_instructions = region->GetPhiNodes();
+  for (std::vector<PhiInstructionNode*>::const_iterator cit = phi_instructions->begin();
+        cit != phi_instructions->end(); cit++) {
+    dot_text_ += (*cit)->StringId() +";\n";
+  }
+  std::vector<InstructionNode*>* instructions = region->GetInstructions();
+  for (std::vector<InstructionNode*>::const_iterator cit = instructions->begin();
+        cit != instructions->end(); cit++) {
+      dot_text_ += (*cit)->StringId() +";\n";
+    }
+
+  dot_text_ += "} // End Region.\n";
+  std::vector<Region*>* successors =  region->GetSuccessors();
+  for (std::vector<Region*>::const_iterator cit = successors->begin(); cit != successors->end();
+      cit++) {
+    DCHECK(NULL != *cit) << "Null successor found for SeaNode" <<
+        region->GetLastChild()->StringId() << ".";
+    dot_text_ += region->GetLastChild()->StringId() + " -> " +
+        (*cit)->GetLastChild()->StringId() +
+        "[lhead=" + (*cit)->StringId() + ", " + "ltail=" + region->StringId() + "];\n\n";
+  }
+}
+void DotGenerationVisitor::Visit(InstructionNode* instruction) {
+  dot_text_ += "// Instruction ("+instruction->StringId()+"): \n" + instruction->StringId() +
+      " [label=\"[" + instruction->StringId() + "] " +
+      instruction->GetInstruction()->DumpString(graph_->GetDexFile()) + "\"";
+  dot_text_ += "];\n";
+  ToDotSSAEdges(instruction);
+}
+
+void DotGenerationVisitor::Visit(UnnamedConstInstructionNode* instruction) {
+  dot_text_ += "// Instruction ("+instruction->StringId()+"): \n" + instruction->StringId() +
+        " [label=\"[" + instruction->StringId() + "] const/x v-3, #" +
+        art::StringPrintf("%d", instruction->GetConstValue()) + "\"";
+  dot_text_ += "];\n";
+  ToDotSSAEdges(instruction);
+}
+
+void DotGenerationVisitor::Visit(PhiInstructionNode* phi) {
+  dot_text_ += "// PhiInstruction: \n" + phi->StringId() +
+      " [label=\"[" + phi->StringId() + "] PHI(";
+  dot_text_ += art::StringPrintf("%d", phi->GetRegisterNumber());
+  dot_text_ += ")\"";
+  dot_text_ += "];\n";
+  ToDotSSAEdges(phi);
+}
+}  // namespace sea_ir
diff --git a/compiler/sea_ir/debug/dot_gen.h b/compiler/sea_ir/debug/dot_gen.h
new file mode 100644
index 0000000..675d83d
--- /dev/null
+++ b/compiler/sea_ir/debug/dot_gen.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_SEA_IR_DEBUG_DOT_GEN_H_
+#define ART_COMPILER_SEA_IR_DEBUG_DOT_GEN_H_
+
+#include "safe_map.h"
+#include "base/stringprintf.h"
+#include "file_output_stream.h"
+#include "sea_ir/ir/sea.h"
+#include "sea_ir/types/type_inference.h"
+
+namespace sea_ir {
+
+class DotConversionOptions {
+ public:
+  DotConversionOptions(): save_use_edges_(false) { }
+  bool WillSaveUseEdges() const {
+    return save_use_edges_;
+  }
+ private:
+  bool save_use_edges_;
+};
+
+class DotGenerationVisitor: public IRVisitor {
+ public:
+  explicit DotGenerationVisitor(const DotConversionOptions* const options,
+      art::SafeMap<int, const Type*>* types): graph_(), types_(types), options_(options) { }
+
+  virtual void Initialize(SeaGraph* graph);
+  // Saves the ssa def->use edges corresponding to @instruction.
+  void ToDotSSAEdges(InstructionNode* instruction);
+  void ToDotSSAEdges(PhiInstructionNode* instruction);
+  void Visit(SeaGraph* graph) {
+    dot_text_ += "digraph seaOfNodes {\ncompound=true\n";
+  }
+  void Visit(SignatureNode* parameter);
+
+  // Appends to @result a dot language formatted string representing the node and
+  //    (by convention) outgoing edges, so that the composition of theToDot() of all nodes
+  //    builds a complete dot graph (without prolog and epilog though).
+  void Visit(Region* region);
+  void Visit(InstructionNode* instruction);
+  void Visit(PhiInstructionNode* phi);
+  void Visit(UnnamedConstInstructionNode* instruction);
+
+  void Visit(ConstInstructionNode* instruction) {
+    Visit(reinterpret_cast<InstructionNode*>(instruction));
+  }
+  void Visit(ReturnInstructionNode* instruction) {
+    Visit(reinterpret_cast<InstructionNode*>(instruction));
+  }
+  void Visit(IfNeInstructionNode* instruction) {
+    Visit(reinterpret_cast<InstructionNode*>(instruction));
+  }
+  void Visit(MoveResultInstructionNode* instruction) {
+    Visit(reinterpret_cast<InstructionNode*>(instruction));
+  }
+  void Visit(InvokeStaticInstructionNode* instruction) {
+    Visit(reinterpret_cast<InstructionNode*>(instruction));
+  }
+  void Visit(AddIntInstructionNode* instruction) {
+    Visit(reinterpret_cast<InstructionNode*>(instruction));
+  }
+  void Visit(GotoInstructionNode* instruction) {
+    Visit(reinterpret_cast<InstructionNode*>(instruction));
+  }
+  void Visit(IfEqzInstructionNode* instruction) {
+    Visit(reinterpret_cast<InstructionNode*>(instruction));
+  }
+
+  std::string GetResult() const {
+    return dot_text_;
+  }
+
+ private:
+  std::string dot_text_;
+  SeaGraph* graph_;
+  art::SafeMap<int, const Type*>* types_;
+  const DotConversionOptions* const options_;
+};
+
+// Stores options for turning a SEA IR graph to a .dot file.
+class DotConversion {
+ public:
+  DotConversion(): options_() { }
+  // Saves to @filename the .dot representation of @graph with the options @options.
+  void DumpSea(SeaGraph* graph, std::string filename,
+      art::SafeMap<int, const Type*>* types) const {
+    LOG(INFO) << "Starting to write SEA string to file.";
+    DotGenerationVisitor dgv = DotGenerationVisitor(&options_, types);
+    graph->Accept(&dgv);
+    art::File* file = art::OS::OpenFile(filename.c_str(), true, true);
+    art::FileOutputStream fos(file);
+    std::string graph_as_string = dgv.GetResult();
+    graph_as_string += "}";
+    fos.WriteFully(graph_as_string.c_str(), graph_as_string.size());
+    LOG(INFO) << "Written SEA string to file.";
+  }
+
+ private:
+  DotConversionOptions options_;
+};
+
+}  // namespace sea_ir
+#endif  // ART_COMPILER_SEA_IR_DEBUG_DOT_GEN_H_
diff --git a/compiler/sea_ir/frontend.cc b/compiler/sea_ir/frontend.cc
index 5843388..e24d07d 100644
--- a/compiler/sea_ir/frontend.cc
+++ b/compiler/sea_ir/frontend.cc
@@ -23,14 +23,17 @@
 #include "llvm/llvm_compilation_unit.h"
 #include "mirror/object.h"
 #include "runtime.h"
-#include "sea_ir/sea.h"
+#include "safe_map.h"
 
+#include "sea_ir/ir/sea.h"
+#include "sea_ir/debug/dot_gen.h"
+#include "sea_ir/types/types.h"
 namespace art {
 
 static CompiledMethod* CompileMethodWithSeaIr(CompilerDriver& compiler,
                                      const CompilerBackend compiler_backend,
                                      const DexFile::CodeItem* code_item,
-                                     uint32_t access_flags, InvokeType invoke_type,
+                                     uint32_t method_access_flags, InvokeType invoke_type,
                                      uint32_t class_def_idx, uint32_t method_idx,
                                      jobject class_loader, const DexFile& dex_file
 #if defined(ART_USE_PORTABLE_COMPILER)
@@ -40,9 +43,11 @@
   // NOTE: Instead of keeping the convention from the Dalvik frontend.cc
   //       and silencing the cpplint.py warning, I just corrected the formatting.
   VLOG(compiler) << "Compiling " << PrettyMethod(method_idx, dex_file) << "...";
-  sea_ir::SeaGraph* sg = sea_ir::SeaGraph::GetCurrentGraph(dex_file);
-  sg->CompileMethod(code_item, class_def_idx, method_idx, dex_file);
-  sg->DumpSea("/tmp/temp.dot");
+  sea_ir::SeaGraph* ir_graph = sea_ir::SeaGraph::GetGraph(dex_file);
+  ir_graph->CompileMethod(code_item, class_def_idx, method_idx, method_access_flags, dex_file);
+  sea_ir::DotConversion dc;
+  SafeMap<int, const sea_ir::Type*>*  types = ir_graph->ti_->GetTypeMap();
+  dc.DumpSea(ir_graph, "/tmp/temp.dot", types);
   CHECK(0 && "No SEA compiled function exists yet.");
   return NULL;
 }
@@ -50,14 +55,14 @@
 CompiledMethod* SeaIrCompileOneMethod(CompilerDriver& compiler,
                                  const CompilerBackend backend,
                                  const DexFile::CodeItem* code_item,
-                                 uint32_t access_flags,
+                                 uint32_t method_access_flags,
                                  InvokeType invoke_type,
                                  uint32_t class_def_idx,
                                  uint32_t method_idx,
                                  jobject class_loader,
                                  const DexFile& dex_file,
                                  llvm::LlvmCompilationUnit* llvm_compilation_unit) {
-  return CompileMethodWithSeaIr(compiler, backend, code_item, access_flags, invoke_type,
+  return CompileMethodWithSeaIr(compiler, backend, code_item, method_access_flags, invoke_type,
       class_def_idx, method_idx, class_loader, dex_file
 #if defined(ART_USE_PORTABLE_COMPILER)
                        , llvm_compilation_unit
@@ -68,13 +73,13 @@
 extern "C" art::CompiledMethod*
     SeaIrCompileMethod(art::CompilerDriver& compiler,
                           const art::DexFile::CodeItem* code_item,
-                          uint32_t access_flags, art::InvokeType invoke_type,
+                          uint32_t method_access_flags, art::InvokeType invoke_type,
                           uint32_t class_def_idx, uint32_t method_idx, jobject class_loader,
                           const art::DexFile& dex_file) {
   // TODO: Check method fingerprint here to determine appropriate backend type.
   //       Until then, use build default
   art::CompilerBackend backend = compiler.GetCompilerBackend();
-  return art::SeaIrCompileOneMethod(compiler, backend, code_item, access_flags, invoke_type,
+  return art::SeaIrCompileOneMethod(compiler, backend, code_item, method_access_flags, invoke_type,
                                class_def_idx, method_idx, class_loader, dex_file,
                                NULL /* use thread llvm_info */);
 }
diff --git a/compiler/sea_ir/instruction_nodes.h b/compiler/sea_ir/ir/instruction_nodes.h
similarity index 86%
rename from compiler/sea_ir/instruction_nodes.h
rename to compiler/sea_ir/ir/instruction_nodes.h
index 6f9bddd..906a10f 100644
--- a/compiler/sea_ir/instruction_nodes.h
+++ b/compiler/sea_ir/ir/instruction_nodes.h
@@ -14,11 +14,12 @@
  * limitations under the License.
  */
 
-#ifndef ART_COMPILER_SEA_IR_INSTRUCTION_NODES_H_
-#define ART_COMPILER_SEA_IR_INSTRUCTION_NODES_H_
-#include "sea_node.h"
-#include "visitor.h"
+#ifndef ART_COMPILER_SEA_IR_IR_INSTRUCTION_NODES_H_
+#define ART_COMPILER_SEA_IR_IR_INSTRUCTION_NODES_H_
 #include "dex_instruction-inl.h"
+#include "sea_ir/ir/sea_node.h"
+#include "sea_ir/ir/visitor.h"
+
 
 namespace sea_ir {
 
@@ -48,9 +49,7 @@
   // Returns the set of registers defined by the current instruction.
   virtual std::vector<int> GetDefinitions() const;
   // Returns the set of register numbers that are used by the instruction.
-  virtual std::vector<int> GetUses();
-  // Appends to @result the .dot string representation of the instruction.
-  virtual void ToDot(std::string& result, const art::DexFile& dex_file) const;
+  virtual std::vector<int> GetUses() const;
   // Mark the current instruction as a downward exposed definition.
   void MarkAsDEDef();
   // Rename the use of @reg_no to refer to the instruction @definition,
@@ -61,7 +60,7 @@
   }
   // Returns the ordered set of Instructions that define the input operands of this instruction.
   // Precondition: SeaGraph.ConvertToSSA().
-  std::vector<InstructionNode*> GetSSAUses() {
+  virtual std::vector<InstructionNode*> GetSSAProducers() {
     std::vector<int> uses = GetUses();
     std::vector<InstructionNode*> ssa_uses;
     for (std::vector<int>::const_iterator cit = uses.begin(); cit != uses.end(); cit++) {
@@ -69,11 +68,15 @@
     }
     return ssa_uses;
   }
-
+  std::map<int, InstructionNode* >* GetSSAProducersMap() {
+    return &definition_edges_;
+  }
+  std::vector<InstructionNode*>* GetSSAConsumers() {
+    return &used_in_;
+  }
   virtual void AddSSAUse(InstructionNode* use) {
     used_in_.push_back(use);
   }
-
   void Accept(IRVisitor* v) {
     v->Visit(this);
     v->Traverse(this);
@@ -91,11 +94,10 @@
  protected:
   explicit InstructionNode(const art::Instruction* in):
       SeaNode(), instruction_(in), used_in_(), de_def_(false), region_(NULL) { }
-  void ToDotSSAEdges(std::string& result) const;
 
  protected:
   const art::Instruction* const instruction_;
-  std::map<int, InstructionNode* > definition_edges_;
+  std::map<int, InstructionNode* > definition_edges_;  // Maps used registers to their definitions.
   // Stores pointers to instructions that use the result of the current instruction.
   std::vector<InstructionNode*> used_in_;
   bool de_def_;
@@ -121,6 +123,7 @@
  public:
   explicit UnnamedConstInstructionNode(const art::Instruction* inst, int32_t value):
       ConstInstructionNode(inst), value_(value) { }
+
   void Accept(IRVisitor* v) {
     v->Visit(this);
     v->Traverse(this);
@@ -134,19 +137,6 @@
     return value_;
   }
 
-  void ToDot(std::string& result, const art::DexFile& dex_file) const {
-    std::ostringstream sstream;
-    sstream << GetConstValue();
-    const std::string value_as_string(sstream.str());
-    result += "// Instruction ("+StringId()+"): \n" + StringId() +
-        " [label=\"const/x v-3, #"+ value_as_string + "\"";
-    if (de_def_) {
-      result += "style=bold";
-    }
-    result += "];\n";
-    ToDotSSAEdges(result);
-  }
-
  private:
   const int32_t value_;
 };
@@ -176,7 +166,7 @@
 class MoveResultInstructionNode: public InstructionNode {
  public:
   explicit MoveResultInstructionNode(const art::Instruction* inst): InstructionNode(inst) { }
-  std::vector<int> GetUses() {
+  std::vector<int> GetUses() const {
     std::vector<int> uses;  // Using vector<> instead of set<> because order matters.
     uses.push_back(RETURN_REGISTER);
     return uses;
@@ -213,7 +203,7 @@
   explicit AddIntLitInstructionNode(const art::Instruction* inst):
       AddIntInstructionNode(inst) { }
 
-  std::vector<int> GetUses() {
+  std::vector<int> GetUses() const {
     std::vector<int> uses =  AddIntInstructionNode::GetUses();
     uses.push_back(UNNAMED_CONST_REGISTER);
     return uses;
@@ -245,4 +235,4 @@
   }
 };
 }  // namespace sea_ir
-#endif  // ART_COMPILER_SEA_IR_INSTRUCTION_NODES_H_
+#endif  // ART_COMPILER_SEA_IR_IR_INSTRUCTION_NODES_H_
diff --git a/compiler/sea_ir/instruction_tools.cc b/compiler/sea_ir/ir/instruction_tools.cc
similarity index 99%
rename from compiler/sea_ir/instruction_tools.cc
rename to compiler/sea_ir/ir/instruction_tools.cc
index 9627497..143209d 100644
--- a/compiler/sea_ir/instruction_tools.cc
+++ b/compiler/sea_ir/ir/instruction_tools.cc
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#include "instruction_tools.h"
+#include "sea_ir/ir/instruction_tools.h"
 
 namespace sea_ir {
 
diff --git a/compiler/sea_ir/instruction_tools.h b/compiler/sea_ir/ir/instruction_tools.h
similarity index 96%
rename from compiler/sea_ir/instruction_tools.h
rename to compiler/sea_ir/ir/instruction_tools.h
index d387100..895e017 100644
--- a/compiler/sea_ir/instruction_tools.h
+++ b/compiler/sea_ir/ir/instruction_tools.h
@@ -17,8 +17,8 @@
 #include "sea.h"
 #include "dex_instruction.h"
 
-#ifndef ART_COMPILER_SEA_IR_INSTRUCTION_TOOLS_H_
-#define ART_COMPILER_SEA_IR_INSTRUCTION_TOOLS_H_
+#ifndef ART_COMPILER_SEA_IR_IR_INSTRUCTION_TOOLS_H_
+#define ART_COMPILER_SEA_IR_IR_INSTRUCTION_TOOLS_H_
 
 
 // Note: This file has content cannibalized for SEA_IR from the MIR implementation,
@@ -122,4 +122,4 @@
   static const int instruction_attributes_[];
 };
 }  // namespace sea_ir
-#endif  // ART_COMPILER_SEA_IR_INSTRUCTION_TOOLS_H_
+#endif  // ART_COMPILER_SEA_IR_IR_INSTRUCTION_TOOLS_H_
diff --git a/compiler/sea_ir/ir/regions_test.cc b/compiler/sea_ir/ir/regions_test.cc
new file mode 100644
index 0000000..9813465
--- /dev/null
+++ b/compiler/sea_ir/ir/regions_test.cc
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common_test.h"
+#include "sea_ir/ir/sea.h"
+
+using utils::ScopedHashtable;
+
+namespace sea_ir {
+
+class RegionsTest : public art::CommonTest {
+};
+
+TEST_F(RegionsTest, Basics) {
+  sea_ir::SeaGraph sg(*java_lang_dex_file_);
+  sea_ir::Region* root = sg.GetNewRegion();
+  sea_ir::Region* then_region = sg.GetNewRegion();
+  sea_ir::Region* else_region = sg.GetNewRegion();
+  std::vector<sea_ir::Region*>* regions = sg.GetRegions();
+  // Test that regions have been registered correctly as children of the graph.
+  EXPECT_TRUE(std::find(regions->begin(), regions->end(), root) != regions->end());
+  EXPECT_TRUE(std::find(regions->begin(), regions->end(), then_region) != regions->end());
+  EXPECT_TRUE(std::find(regions->begin(), regions->end(), else_region) != regions->end());
+  // Check that an edge recorded correctly in both the head and the tail.
+  sg.AddEdge(root, then_region);
+  std::vector<sea_ir::Region*>* succs = root->GetSuccessors();
+  EXPECT_EQ(1U, succs->size());
+  EXPECT_EQ(then_region, succs->at(0));
+  std::vector<sea_ir::Region*>* preds = then_region->GetPredecessors();
+  EXPECT_EQ(1U, preds->size());
+  EXPECT_EQ(root, preds->at(0));
+  // Check that two edges are recorded properly for both head and tail.
+  sg.AddEdge(root, else_region);
+  succs = root->GetSuccessors();
+  EXPECT_EQ(2U, succs->size());
+  EXPECT_TRUE(std::find(succs->begin(), succs->end(), then_region) != succs->end());
+  EXPECT_TRUE(std::find(succs->begin(), succs->end(), else_region) != succs->end());
+  preds = then_region->GetPredecessors();
+  EXPECT_EQ(1U, preds->size());
+  EXPECT_EQ(root, preds->at(0));
+  preds = else_region->GetPredecessors();
+  EXPECT_EQ(1U, preds->size());
+  EXPECT_EQ(root, preds->at(0));
+}
+
+}  // namespace art
diff --git a/compiler/sea_ir/sea.cc b/compiler/sea_ir/ir/sea.cc
similarity index 84%
rename from compiler/sea_ir/sea.cc
rename to compiler/sea_ir/ir/sea.cc
index 99b21f8..08fe0e1 100644
--- a/compiler/sea_ir/sea.cc
+++ b/compiler/sea_ir/ir/sea.cc
@@ -14,10 +14,10 @@
  * limitations under the License.
  */
 #include "base/stringprintf.h"
-#include "file_output_stream.h"
-#include "instruction_tools.h"
-#include "sea.h"
-#include "code_gen.h"
+#include "sea_ir/ir/instruction_tools.h"
+#include "sea_ir/ir/sea.h"
+#include "sea_ir/code_gen/code_gen.h"
+#include "sea_ir/types/type_inference.h"
 
 #define MAX_REACHING_DEF_ITERERATIONS (10)
 // TODO: When development is done, this define should not
@@ -35,7 +35,6 @@
       cit != phis->end(); cit++) {
     (*cit)->Accept(this);
   }
-
   std::vector<InstructionNode*>* instructions = region->GetInstructions();
   for (std::vector<InstructionNode*>::const_iterator cit = instructions->begin();
       cit != instructions->end(); cit++) {
@@ -50,24 +49,10 @@
   }
 }
 
-SeaGraph* SeaGraph::GetCurrentGraph(const art::DexFile& dex_file) {
+SeaGraph* SeaGraph::GetGraph(const art::DexFile& dex_file) {
   return new SeaGraph(dex_file);
 }
 
-void SeaGraph::DumpSea(std::string filename) const {
-  LOG(INFO) << "Starting to write SEA string to file.";
-  std::string result;
-  result += "digraph seaOfNodes {\ncompound=true\n";
-  for (std::vector<Region*>::const_iterator cit = regions_.begin(); cit != regions_.end(); cit++) {
-    (*cit)->ToDot(result, dex_file_);
-  }
-  result += "}\n";
-  art::File* file = art::OS::OpenFile(filename.c_str(), true, true);
-  art::FileOutputStream fos(file);
-  fos.WriteFully(result.c_str(), result.size());
-  LOG(INFO) << "Written SEA string to file.";
-}
-
 void SeaGraph::AddEdge(Region* src, Region* dst) const {
   src->AddSuccessor(dst);
   dst->AddPredecessor(src);
@@ -191,10 +176,12 @@
 
 
 void SeaGraph::BuildMethodSeaGraph(const art::DexFile::CodeItem* code_item,
-    const art::DexFile& dex_file, uint32_t class_def_idx, uint32_t method_idx) {
+    const art::DexFile& dex_file, uint32_t class_def_idx,
+    uint32_t method_idx, uint32_t method_access_flags) {
+  code_item_ = code_item;
   class_def_idx_ = class_def_idx;
   method_idx_ = method_idx;
-
+  method_access_flags_ = method_access_flags;
   const uint16_t* code = code_item->insns_;
   const size_t size_in_code_units = code_item->insns_size_in_code_units_;
   // This maps target instruction pointers to their corresponding region objects.
@@ -225,8 +212,9 @@
   // Insert one SignatureNode per function argument,
   // to serve as placeholder definitions in dataflow analysis.
   for (unsigned int crt_offset = 0; crt_offset < code_item->ins_size_; crt_offset++) {
+    int position = crt_offset;  // TODO: Is this the correct offset in the signature?
     SignatureNode* parameter_def_node =
-        new sea_ir::SignatureNode(code_item->registers_size_ - 1 - crt_offset);
+        new sea_ir::SignatureNode(code_item->registers_size_ - 1 - crt_offset, position);
     AddParameterNode(parameter_def_node);
     r->AddChild(parameter_def_node);
   }
@@ -260,12 +248,8 @@
         }
         r = nextRegion;
       }
-      bool definesRegister = (0 != InstructionTools::instruction_attributes_[inst->Opcode()]
-          && (1 << kDA));
-      LOG(INFO)<< inst->GetDexPc(code) << "*** " << inst->DumpString(&dex_file)
-      << " region:" <<r->StringId() << "Definition?" << definesRegister << std::endl;
       r->AddChild(node);
-      }
+    }
     i += inst->SizeInCodeUnits();
   }
 }
@@ -417,10 +401,10 @@
   code_gen_postpass_visitor.Write(std::string("my_file.llvm"));
 }
 
-void SeaGraph::CompileMethod(const art::DexFile::CodeItem* code_item,
-  uint32_t class_def_idx, uint32_t method_idx, const art::DexFile& dex_file) {
+void SeaGraph::CompileMethod(const art::DexFile::CodeItem* code_item, uint32_t class_def_idx,
+    uint32_t method_idx, uint32_t method_access_flags, const art::DexFile& dex_file) {
   // Two passes: Builds the intermediate structure (non-SSA) of the sea-ir for the function.
-  BuildMethodSeaGraph(code_item, dex_file, class_def_idx, method_idx);
+  BuildMethodSeaGraph(code_item, dex_file, class_def_idx, method_idx, method_access_flags);
   // Pass: Compute reverse post-order of regions.
   ComputeRPO();
   // Multiple passes: compute immediate dominators.
@@ -433,6 +417,8 @@
   ComputeDominanceFrontier();
   // Two Passes: Phi node insertion.
   ConvertToSSA();
+  // Pass: type inference
+  ti_->ComputeTypes(this);
   // Pass: Generate LLVM IR.
   GenerateLLVM();
 }
@@ -465,18 +451,10 @@
   regions_.push_back(r);
 }
 
-/*
-void SeaNode::AddSuccessor(Region* successor) {
-  DCHECK(successor) << "Tried to add NULL successor to SEA node.";
-  successors_.push_back(successor);
-  return;
-}
+SeaGraph::SeaGraph(const art::DexFile& df)
+    :ti_(new TypeInference()), class_def_idx_(0), method_idx_(0),  method_access_flags_(),
+     regions_(), parameters_(), dex_file_(df), code_item_(NULL) { }
 
-void SeaNode::AddPredecessor(Region* predecessor) {
-  DCHECK(predecessor) << "Tried to add NULL predecessor to SEA node.";
-  predecessors_.push_back(predecessor);
-}
-*/
 void Region::AddChild(sea_ir::InstructionNode* instruction) {
   DCHECK(instruction) << "Tried to add NULL instruction to region node.";
   instructions_.push_back(instruction);
@@ -490,46 +468,6 @@
   return NULL;
 }
 
-void Region::ToDot(std::string& result, const art::DexFile& dex_file) const {
-  result += "\n// Region: \nsubgraph " + StringId() + " { label=\"region " + StringId() + "(rpo=";
-  result += art::StringPrintf("%d", rpo_number_);
-  if (NULL != GetIDominator()) {
-    result += " dom=" + GetIDominator()->StringId();
-  }
-  result += ")\";\n";
-
-  for (std::vector<PhiInstructionNode*>::const_iterator cit = phi_instructions_.begin();
-        cit != phi_instructions_.end(); cit++) {
-    result += (*cit)->StringId() +";\n";
-  }
-
-  for (std::vector<InstructionNode*>::const_iterator cit = instructions_.begin();
-        cit != instructions_.end(); cit++) {
-      result += (*cit)->StringId() +";\n";
-    }
-
-  result += "} // End Region.\n";
-
-  // Save phi-nodes.
-  for (std::vector<PhiInstructionNode*>::const_iterator cit = phi_instructions_.begin();
-      cit != phi_instructions_.end(); cit++) {
-    (*cit)->ToDot(result, dex_file);
-  }
-
-  // Save instruction nodes.
-  for (std::vector<InstructionNode*>::const_iterator cit = instructions_.begin();
-      cit != instructions_.end(); cit++) {
-    (*cit)->ToDot(result, dex_file);
-  }
-
-  for (std::vector<Region*>::const_iterator cit = successors_.begin(); cit != successors_.end();
-      cit++) {
-    DCHECK(NULL != *cit) << "Null successor found for SeaNode" << GetLastChild()->StringId() << ".";
-    result += GetLastChild()->StringId() + " -> " + (*cit)->GetLastChild()->StringId() +
-         "[lhead=" + (*cit)->StringId() + ", " + "ltail=" + StringId() + "];\n\n";
-  }
-}
-
 void Region::ComputeDownExposedDefs() {
   for (std::vector<InstructionNode*>::const_iterator inst_it = instructions_.begin();
       inst_it != instructions_.end(); inst_it++) {
@@ -692,38 +630,6 @@
   return sea_instructions;
 }
 
-void InstructionNode::ToDotSSAEdges(std::string& result) const {
-  // SSA definitions:
-  for (std::map<int, InstructionNode*>::const_iterator def_it = definition_edges_.begin();
-      def_it != definition_edges_.end(); def_it++) {
-    if (NULL != def_it->second) {
-      result += def_it->second->StringId() + " -> " + StringId() + "[color=gray,label=\"";
-      result += art::StringPrintf("vR = %d", def_it->first);
-      result += "\"] ; // ssa edge\n";
-    }
-  }
-
-  // SSA used-by:
-  if (DotConversion::SaveUseEdges()) {
-    for (std::vector<InstructionNode*>::const_iterator cit = used_in_.begin();
-        cit != used_in_.end(); cit++) {
-      result += (*cit)->StringId() + " -> " + StringId() + "[color=gray,label=\"";
-      result += "\"] ; // SSA used-by edge\n";
-    }
-  }
-}
-
-void InstructionNode::ToDot(std::string& result, const art::DexFile& dex_file) const {
-  result += "// Instruction ("+StringId()+"): \n" + StringId() +
-      " [label=\"" + instruction_->DumpString(&dex_file) + "\"";
-  if (de_def_) {
-    result += "style=bold";
-  }
-  result += "];\n";
-
-  ToDotSSAEdges(result);
-}
-
 void InstructionNode::MarkAsDEDef() {
   de_def_ = true;
 }
@@ -747,7 +653,7 @@
   return definitions;
 }
 
-std::vector<int> InstructionNode::GetUses() {
+std::vector<int> InstructionNode::GetUses() const {
   std::vector<int> uses;  // Using vector<> instead of set<> because order matters.
   if (!InstructionTools::IsDefinition(instruction_) && (instruction_->HasVRegA())) {
     int vA = instruction_->VRegA();
@@ -763,13 +669,4 @@
   }
   return uses;
 }
-
-void PhiInstructionNode::ToDot(std::string& result, const art::DexFile& dex_file) const {
-  result += "// PhiInstruction: \n" + StringId() +
-      " [label=\"" + "PHI(";
-  result += art::StringPrintf("%d", register_no_);
-  result += ")\"";
-  result += "];\n";
-  ToDotSSAEdges(result);
-}
 }  // namespace sea_ir
diff --git a/compiler/sea_ir/sea.h b/compiler/sea_ir/ir/sea.h
similarity index 82%
rename from compiler/sea_ir/sea.h
rename to compiler/sea_ir/ir/sea.h
index 5cb8424..0b20ed7 100644
--- a/compiler/sea_ir/sea.h
+++ b/compiler/sea_ir/ir/sea.h
@@ -15,17 +15,18 @@
  */
 
 
-#ifndef ART_COMPILER_SEA_IR_SEA_H_
-#define ART_COMPILER_SEA_IR_SEA_H_
+#ifndef ART_COMPILER_SEA_IR_IR_SEA_H_
+#define ART_COMPILER_SEA_IR_IR_SEA_H_
 
 #include <set>
 #include <map>
 
+#include "utils/scoped_hashtable.h"
+#include "gtest/gtest_prod.h"
 #include "dex_file.h"
 #include "dex_instruction.h"
-#include "instruction_tools.h"
-#include "instruction_nodes.h"
-#include "utils/scoped_hashtable.h"
+#include "sea_ir/ir/instruction_tools.h"
+#include "sea_ir/ir/instruction_nodes.h"
 
 namespace sea_ir {
 
@@ -35,19 +36,9 @@
   VISITING = -2
 };
 
-// Stores options for turning a SEA IR graph to a .dot file.
-class DotConversion {
- public:
-  static bool SaveUseEdges() {
-    return save_use_edges_;
-  }
-
- private:
-  static const bool save_use_edges_ =  false;  // TODO: Enable per-sea graph configuration.
-};
+class TypeInference;
 
 class Region;
-
 class InstructionNode;
 class PhiInstructionNode;
 class SignatureNode;
@@ -57,21 +48,20 @@
 // can return from the GetSSAUses() calls, instead of having missing SSA edges.
 class SignatureNode: public InstructionNode {
  public:
-  explicit SignatureNode(unsigned int parameter_register):InstructionNode(NULL),
-    parameter_register_(parameter_register) { }
-
-  void ToDot(std::string& result, const art::DexFile& dex_file) const {
-    result += StringId() +" [label=\"signature:";
-    result += art::StringPrintf("r%d", GetResultRegister());
-    result += "\"] // signature node\n";
-    ToDotSSAEdges(result);
-  }
+  // Creates a new signature node representing the initial definition of the
+  // register @parameter_register which is the @position-th argument to the method.
+  explicit SignatureNode(unsigned int parameter_register, unsigned int position):
+    InstructionNode(NULL), parameter_register_(parameter_register), position_(position) { }
 
   int GetResultRegister() const {
     return parameter_register_;
   }
 
-  std::vector<int> GetUses() {
+  unsigned int GetPositionInSignature() const {
+    return position_;
+  }
+
+  std::vector<int> GetUses() const {
     return std::vector<int>();
   }
 
@@ -81,15 +71,15 @@
   }
 
  private:
-  unsigned int parameter_register_;
+  const unsigned int parameter_register_;
+  const unsigned int position_;     // The position of this parameter node is
+                                    // in the function parameter list.
 };
 
 class PhiInstructionNode: public InstructionNode {
  public:
   explicit PhiInstructionNode(int register_no):
     InstructionNode(NULL), register_no_(register_no), definition_edges_() {}
-  // Appends to @result the .dot string representation of the instruction.
-  void ToDot(std::string& result, const art::DexFile& dex_file) const;
   // Returns the register on which this phi-function is used.
   int GetRegisterNumber() const {
     return register_no_;
@@ -113,6 +103,17 @@
     definition->AddSSAUse(this);
   }
 
+  // Returns the ordered set of Instructions that define the input operands of this instruction.
+  // Precondition: SeaGraph.ConvertToSSA().
+  std::vector<InstructionNode*> GetSSAProducers() {
+    std::vector<InstructionNode*> producers;
+    for (std::vector<std::vector<InstructionNode*>*>::const_iterator
+        cit = definition_edges_.begin(); cit != definition_edges_.end(); cit++) {
+      producers.insert(producers.end(), (*cit)->begin(), (*cit)->end());
+    }
+    return producers;
+  }
+
   // Returns the instruction that defines the phi register from predecessor
   // on position @predecessor_pos. Note that the return value is vector<> just
   // for consistency with the return value of GetSSAUses() on regular instructions,
@@ -128,6 +129,9 @@
 
  private:
   int register_no_;
+  // This vector has one entry for each predecessors, each with a single
+  // element, storing the id of the instruction that defines the register
+  // corresponding to this phi function.
   std::vector<std::vector<InstructionNode*>*> definition_edges_;
 };
 
@@ -150,10 +154,7 @@
   std::vector<InstructionNode*>* GetInstructions() {
     return &instructions_;
   }
-  // Appends to @result a dot language formatted string representing the node and
-  //    (by convention) outgoing edges, so that the composition of theToDot() of all nodes
-  //    builds a complete dot graph (without prolog and epilog though).
-  virtual void ToDot(std::string& result, const art::DexFile& dex_file) const;
+
   // Computes Downward Exposed Definitions for the current node.
   void ComputeDownExposedDefs();
   const std::map<int, sea_ir::InstructionNode*>* GetDownExposedDefs() const;
@@ -257,16 +258,14 @@
 // and acts as starting point for visitors (ex: during code generation).
 class SeaGraph: IVisitable {
  public:
-  static SeaGraph* GetCurrentGraph(const art::DexFile&);
+  static SeaGraph* GetGraph(const art::DexFile&);
 
-  void CompileMethod(const art::DexFile::CodeItem* code_item,
-      uint32_t class_def_idx, uint32_t method_idx, const art::DexFile& dex_file);
+  void CompileMethod(const art::DexFile::CodeItem* code_item, uint32_t class_def_idx,
+      uint32_t method_idx, uint32_t method_access_flags, const art::DexFile& dex_file);
   // Returns all regions corresponding to this SeaGraph.
   std::vector<Region*>* GetRegions() {
     return &regions_;
   }
-  // Returns a string representation of the region and its Instruction children.
-  void DumpSea(std::string filename) const;
   // Recursively computes the reverse postorder value for @crt_bb and successors.
   static void ComputeRPO(Region* crt_bb, int& crt_rpo);
   // Returns the "lowest common ancestor" of @i and @j in the dominator tree.
@@ -275,13 +274,28 @@
   std::vector<SignatureNode*>* GetParameterNodes() {
     return &parameters_;
   }
+
+  const art::DexFile* GetDexFile() const {
+    return &dex_file_;
+  }
+
+  virtual void Accept(IRVisitor* visitor) {
+    visitor->Initialize(this);
+    visitor->Visit(this);
+    visitor->Traverse(this);
+  }
+
+  TypeInference* ti_;
   uint32_t class_def_idx_;
   uint32_t method_idx_;
+  uint32_t method_access_flags_;
+
+ protected:
+  explicit SeaGraph(const art::DexFile& df);
+  virtual ~SeaGraph() { }
 
  private:
-  explicit SeaGraph(const art::DexFile& df):
-    class_def_idx_(0), method_idx_(0), regions_(), parameters_(), dex_file_(df) {
-  }
+  FRIEND_TEST(RegionsTest, Basics);
   // Registers @childReg as a region belonging to the SeaGraph instance.
   void AddRegion(Region* childReg);
   // Returns new region and registers it with the  SeaGraph instance.
@@ -295,7 +309,8 @@
   // Builds the non-SSA sea-ir representation of the function @code_item from @dex_file
   // with class id @class_def_idx and method id @method_idx.
   void BuildMethodSeaGraph(const art::DexFile::CodeItem* code_item,
-      const art::DexFile& dex_file, uint32_t class_def_idx, uint32_t method_idx);
+      const art::DexFile& dex_file, uint32_t class_def_idx,
+      uint32_t method_idx, uint32_t method_access_flags);
   // Computes immediate dominators for each region.
   // Precondition: ComputeMethodSeaGraph()
   void ComputeIDominators();
@@ -320,14 +335,6 @@
   // Identifies the definitions corresponding to uses for region @node
   // by using the scoped hashtable of names @ scoped_table.
   void RenameAsSSA(Region* node, utils::ScopedHashtable<int, InstructionNode*>* scoped_table);
-
-  virtual void Accept(IRVisitor* visitor) {
-    visitor->Initialize(this);
-    visitor->Visit(this);
-    visitor->Traverse(this);
-  }
-
-  virtual ~SeaGraph() {}
   // Generate LLVM IR for the method.
   // Precondition: ConvertToSSA().
   void GenerateLLVM();
@@ -336,6 +343,7 @@
   std::vector<Region*> regions_;
   std::vector<SignatureNode*> parameters_;
   const art::DexFile& dex_file_;
+  const art::DexFile::CodeItem* code_item_;
 };
 }  // namespace sea_ir
-#endif  // ART_COMPILER_SEA_IR_SEA_H_
+#endif  // ART_COMPILER_SEA_IR_IR_SEA_H_
diff --git a/compiler/sea_ir/sea_node.h b/compiler/sea_ir/ir/sea_node.h
similarity index 82%
rename from compiler/sea_ir/sea_node.h
rename to compiler/sea_ir/ir/sea_node.h
index c13e5d6..4dab5cb 100644
--- a/compiler/sea_ir/sea_node.h
+++ b/compiler/sea_ir/ir/sea_node.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ART_COMPILER_SEA_IR_SEA_NODE_H_
-#define ART_COMPILER_SEA_IR_SEA_NODE_H_
+#ifndef ART_COMPILER_SEA_IR_IR_SEA_NODE_H_
+#define ART_COMPILER_SEA_IR_IR_SEA_NODE_H_
 
 #include "base/stringprintf.h"
 
@@ -56,10 +56,6 @@
   int Id() const {
     return id_;
   }
-  // Appends to @result a dot language formatted string representing the node and
-  //    (by convention) outgoing edges, so that the composition of theToDot() of all nodes
-  //    builds a complete dot graph, but without prolog ("digraph {") and epilog ("}").
-  virtual void ToDot(std::string& result, const art::DexFile& dex_file) const = 0;
 
   virtual ~SeaNode() { }
 
@@ -78,4 +74,4 @@
   DISALLOW_COPY_AND_ASSIGN(SeaNode);
 };
 }  // namespace sea_ir
-#endif  // ART_COMPILER_SEA_IR_SEA_NODE_H_
+#endif  // ART_COMPILER_SEA_IR_IR_SEA_NODE_H_
diff --git a/compiler/sea_ir/visitor.h b/compiler/sea_ir/ir/visitor.h
similarity index 84%
rename from compiler/sea_ir/visitor.h
rename to compiler/sea_ir/ir/visitor.h
index a4fec7b..cc7b5d1 100644
--- a/compiler/sea_ir/visitor.h
+++ b/compiler/sea_ir/ir/visitor.h
@@ -14,16 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ART_COMPILER_SEA_IR_VISITOR_H_
-#define ART_COMPILER_SEA_IR_VISITOR_H_
-
-#include "llvm/IR/IRBuilder.h"
-#include "llvm/IR/LLVMContext.h"
-#include "llvm/IR/Module.h"
-#include "llvm/Analysis/Verifier.h"
-// TODO: Separating the root visitor from the code_gen visitor
-// would allow me to not include llvm headers here.
-
+#ifndef ART_COMPILER_SEA_IR_IR_VISITOR_H_
+#define ART_COMPILER_SEA_IR_IR_VISITOR_H_
 
 namespace sea_ir {
 
@@ -32,6 +24,7 @@
 class InstructionNode;
 class PhiInstructionNode;
 class SignatureNode;
+class UnnamedConstInstructionNode;
 class ConstInstructionNode;
 class ReturnInstructionNode;
 class IfNeInstructionNode;
@@ -48,7 +41,7 @@
 
 class IRVisitor {
  public:
-  explicit IRVisitor():ordered_regions_() { }
+  explicit IRVisitor(): ordered_regions_() { }
   virtual void Initialize(SeaGraph* graph) = 0;
   virtual void Visit(SeaGraph* graph) = 0;
   virtual void Visit(Region* region) = 0;
@@ -57,16 +50,16 @@
 
   virtual void Visit(InstructionNode* region) = 0;
   virtual void Visit(ConstInstructionNode* instruction) = 0;
+  virtual void Visit(UnnamedConstInstructionNode* instruction) = 0;
   virtual void Visit(ReturnInstructionNode* instruction) = 0;
   virtual void Visit(IfNeInstructionNode* instruction) = 0;
-  // virtual void Visit(AddIntLitInstructionNode* instruction) = 0;
   virtual void Visit(MoveResultInstructionNode* instruction) = 0;
   virtual void Visit(InvokeStaticInstructionNode* instruction) = 0;
   virtual void Visit(AddIntInstructionNode* instruction) = 0;
   virtual void Visit(GotoInstructionNode* instruction) = 0;
   virtual void Visit(IfEqzInstructionNode* instruction) = 0;
 
-  // Note: This favor of visitor separates the traversal functions from the actual visiting part
+  // Note: This flavor of visitor separates the traversal functions from the actual visiting part
   //       so that the Visitor subclasses don't duplicate code and can't get the traversal wrong.
   //       The disadvantage is the increased number of functions (and calls).
   virtual void Traverse(SeaGraph* graph);
@@ -91,4 +84,4 @@
   std::vector<Region*> ordered_regions_;
 };
 }  // namespace sea_ir
-#endif  // ART_COMPILER_SEA_IR_VISITOR_H_
+#endif  // ART_COMPILER_SEA_IR_IR_VISITOR_H_
diff --git a/compiler/sea_ir/types/type_data_test.cc b/compiler/sea_ir/types/type_data_test.cc
new file mode 100644
index 0000000..a66ebce
--- /dev/null
+++ b/compiler/sea_ir/types/type_data_test.cc
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common_test.h"
+#include "sea_ir/types/types.h"
+
+namespace sea_ir {
+
+class TypeDataTest : public art::CommonTest {
+};
+
+TEST_F(TypeDataTest, Basics) {
+  TypeData td;
+  art::verifier::RegTypeCache type_cache(false);
+  int first_instruction_id = 1;
+  int second_instruction_id = 3;
+  EXPECT_TRUE(NULL == td.FindTypeOf(first_instruction_id));
+  const Type* int_type = &type_cache.Integer();
+  const Type* byte_type = &type_cache.Byte();
+  td.SetTypeOf(first_instruction_id, int_type);
+  EXPECT_TRUE(int_type == td.FindTypeOf(first_instruction_id));
+  EXPECT_TRUE(NULL == td.FindTypeOf(second_instruction_id));
+  td.SetTypeOf(second_instruction_id, byte_type);
+  EXPECT_TRUE(int_type == td.FindTypeOf(first_instruction_id));
+  EXPECT_TRUE(byte_type == td.FindTypeOf(second_instruction_id));
+}
+
+}  // namespace art
diff --git a/compiler/sea_ir/types/type_inference.cc b/compiler/sea_ir/types/type_inference.cc
new file mode 100644
index 0000000..31d7f0f
--- /dev/null
+++ b/compiler/sea_ir/types/type_inference.cc
@@ -0,0 +1,180 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "scoped_thread_state_change.h"
+#include "sea_ir/types/type_inference.h"
+#include "sea_ir/types/type_inference_visitor.h"
+#include "sea_ir/ir/sea.h"
+
+namespace sea_ir {
+
+bool TypeInference::IsPrimitiveDescriptor(char descriptor) {
+  switch (descriptor) {
+  case 'I':
+  case 'C':
+  case 'S':
+  case 'B':
+  case 'Z':
+  case 'F':
+  case 'D':
+  case 'J':
+    return true;
+  default:
+    return false;
+  }
+}
+
+FunctionTypeInfo::FunctionTypeInfo(const SeaGraph* graph, art::verifier::RegTypeCache* types)
+    : dex_file_(graph->GetDexFile()), dex_method_idx_(graph->method_idx_), type_cache_(types),
+    method_access_flags_(graph->method_access_flags_) {
+  const art::DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
+  const char* descriptor = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(method_id.class_idx_));
+  declaring_class_ = &(type_cache_->FromDescriptor(NULL, descriptor, false));
+}
+
+FunctionTypeInfo::FunctionTypeInfo(const SeaGraph* graph, InstructionNode* inst,
+    art::verifier::RegTypeCache* types): dex_file_(graph->GetDexFile()),
+        dex_method_idx_(inst->GetInstruction()->VRegB_35c()), type_cache_(types),
+        method_access_flags_(0) {
+  // TODO: Test that GetDeclaredArgumentTypes() works correctly when using this constructor.
+  const art::DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
+  const char* descriptor = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(method_id.class_idx_));
+  declaring_class_ = &(type_cache_->FromDescriptor(NULL, descriptor, false));
+}
+
+const Type* FunctionTypeInfo::GetReturnValueType() {
+  const art::DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
+  uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
+  const char* descriptor = dex_file_->StringByTypeIdx(return_type_idx);
+  art::ScopedObjectAccess soa(art::Thread::Current());
+  const Type& return_type = type_cache_->FromDescriptor(NULL, descriptor, false);
+  return &return_type;
+}
+
+
+
+std::vector<const Type*> FunctionTypeInfo::GetDeclaredArgumentTypes() {
+  art::ScopedObjectAccess soa(art::Thread::Current());
+  std::vector<const Type*> argument_types;
+  // Include the "this" pointer.
+  size_t cur_arg = 0;
+  if (!IsStatic()) {
+    // If this is a constructor for a class other than java.lang.Object, mark the first ("this")
+    // argument as uninitialized. This restricts field access until the superclass constructor is
+    // called.
+    const art::verifier::RegType& declaring_class = GetDeclaringClass();
+    if (IsConstructor() && !declaring_class.IsJavaLangObject()) {
+      argument_types.push_back(&(type_cache_->UninitializedThisArgument(declaring_class)));
+    } else {
+      argument_types.push_back(&declaring_class);
+    }
+    cur_arg++;
+  }
+
+  const art::DexFile::ProtoId& proto_id =
+      dex_file_->GetMethodPrototype(dex_file_->GetMethodId(dex_method_idx_));
+  art::DexFileParameterIterator iterator(*dex_file_, proto_id);
+
+  for (; iterator.HasNext(); iterator.Next()) {
+    const char* descriptor = iterator.GetDescriptor();
+    if (descriptor == NULL) {
+      LOG(FATAL) << "Error: Encountered null type descriptor for function argument.";
+    }
+    switch (descriptor[0]) {
+      case 'L':
+      case '[':
+        // We assume that reference arguments are initialized. The only way it could be otherwise
+        // (assuming the caller was verified) is if the current method is <init>, but in that case
+        // it's effectively considered initialized the instant we reach here (in the sense that we
+        // can return without doing anything or call virtual methods).
+        {
+          const Type& reg_type = type_cache_->FromDescriptor(NULL, descriptor, false);
+          argument_types.push_back(&reg_type);
+        }
+        break;
+      case 'Z':
+        argument_types.push_back(&type_cache_->Boolean());
+        break;
+      case 'C':
+        argument_types.push_back(&type_cache_->Char());
+        break;
+      case 'B':
+        argument_types.push_back(&type_cache_->Byte());
+        break;
+      case 'I':
+        argument_types.push_back(&type_cache_->Integer());
+        break;
+      case 'S':
+        argument_types.push_back(&type_cache_->Short());
+        break;
+      case 'F':
+        argument_types.push_back(&type_cache_->Float());
+        break;
+      case 'J':
+      case 'D': {
+        // TODO: Figure out strategy for two-register operands (double, long)
+        LOG(FATAL) << "Error: Type inference for 64-bit variables has not been implemented.";
+        break;
+      }
+      default:
+        LOG(FATAL) << "Error: Unexpected signature encountered during type inference.";
+    }
+    cur_arg++;
+  }
+  return argument_types;
+}
+
+// TODO: Lock is only used for dumping types (during development). Remove this for performance.
+void TypeInference::ComputeTypes(SeaGraph* graph) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  std::vector<Region*>* regions = graph->GetRegions();
+  std::list<InstructionNode*> worklist;
+  // Fill the work-list with all instructions.
+  for (std::vector<Region*>::const_iterator region_it = regions->begin();
+      region_it != regions->end(); region_it++) {
+    std::vector<PhiInstructionNode*>* phi_instructions = (*region_it)->GetPhiNodes();
+    std::copy(phi_instructions->begin(), phi_instructions->end(), std::back_inserter(worklist));
+    std::vector<InstructionNode*>* instructions = (*region_it)->GetInstructions();
+    std::copy(instructions->begin(), instructions->end(), std::back_inserter(worklist));
+  }
+  TypeInferenceVisitor tiv(graph, &type_data_, type_cache_);
+  // Sparse (SSA) fixed-point algorithm that processes each instruction in the work-list,
+  // adding consumers of instructions whose result changed type back into the work-list.
+  // Note: According to [1] list iterators should not be invalidated on insertion,
+  //       which simplifies the implementation; not 100% sure other STL implementations
+  //       maintain this invariant, but they should.
+  //       [1] http://www.sgi.com/tech/stl/List.html
+  // TODO: Making this conditional (as in sparse conditional constant propagation) would be good.
+  // TODO: Remove elements as I go.
+  for (std::list<InstructionNode*>::const_iterator instruction_it = worklist.begin();
+        instruction_it != worklist.end(); instruction_it++) {
+    std::cout << "[TI] Instruction: " << (*instruction_it)->Id() << std::endl;
+    (*instruction_it)->Accept(&tiv);
+    const Type* old_type = type_data_.FindTypeOf((*instruction_it)->Id());
+    const Type* new_type = tiv.GetType();
+    bool type_changed = (old_type != new_type);
+    if (type_changed) {
+      std::cout << " New type:" << new_type->IsIntegralTypes() << std::endl;
+      std::cout << " Descrip:" << new_type->Dump()<< " on " << (*instruction_it)->Id() << std::endl;
+      type_data_.SetTypeOf((*instruction_it)->Id(), new_type);
+      // Add SSA consumers of the current instruction to the work-list.
+      std::vector<InstructionNode*>* consumers = (*instruction_it)->GetSSAConsumers();
+      for (std::vector<InstructionNode*>::iterator consumer = consumers->begin();
+          consumer != consumers->end(); consumer++) {
+        worklist.push_back(*consumer);
+      }
+    }
+  }
+}
+}   // namespace sea_ir
diff --git a/compiler/sea_ir/types/type_inference.h b/compiler/sea_ir/types/type_inference.h
new file mode 100644
index 0000000..d951d82
--- /dev/null
+++ b/compiler/sea_ir/types/type_inference.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_SEA_IR_TYPES_TYPE_INFERENCE_H_
+#define ART_COMPILER_SEA_IR_TYPES_TYPE_INFERENCE_H_
+
+#include "safe_map.h"
+#include "dex_file-inl.h"
+#include "sea_ir/types/types.h"
+
+namespace sea_ir {
+
+class SeaGraph;
+class InstructionNode;
+
+// The type inference in SEA IR is different from the verifier in that it is concerned
+// with a rich type hierarchy (TODO) usable in optimization and does not perform
+// precise verification (which is the job of the verifier).
+class TypeInference {
+ public:
+  TypeInference() {
+    type_cache_ = new art::verifier::RegTypeCache(false);
+  }
+
+  // Computes the types for the method with SEA IR representation provided by @graph.
+  void ComputeTypes(SeaGraph* graph);
+
+  art::SafeMap<int, const Type*>* GetTypeMap() {
+    return type_data_.GetTypeMap();
+  }
+  // Returns true if @descriptor corresponds to a primitive type.
+  static bool IsPrimitiveDescriptor(char descriptor);
+
+ protected:
+  art::verifier::RegTypeCache* type_cache_;
+  TypeData type_data_;
+};
+
+// Stores information about the exact type of  a function.
+class FunctionTypeInfo {
+ public:
+  // Finds method information about the method encoded by a SEA IR graph.
+  // @graph provides the input method SEA IR representation.
+  // @types provides the input cache of types from which the
+  //        parameter types of the function are found.
+  FunctionTypeInfo(const SeaGraph* graph, art::verifier::RegTypeCache* types);
+  // Finds method information about the method encoded by
+  // an invocation instruction in a SEA IR graph.
+  // @graph provides the input method SEA IR representation.
+  // @inst  is an invocation instruction for the desired method.
+  // @types provides the input cache of types from which the
+  //        parameter types of the function are found.
+  FunctionTypeInfo(const SeaGraph* graph, InstructionNode* inst,
+      art::verifier::RegTypeCache* types);
+  // Returns the ordered vector of types corresponding to the function arguments.
+  std::vector<const Type*> GetDeclaredArgumentTypes();
+  // Returns the declared return value type.
+  const Type* GetReturnValueType();
+  // Returns the type corresponding to the class that declared the method.
+  const Type& GetDeclaringClass() {
+    return *declaring_class_;
+  }
+
+  bool IsConstructor() const {
+    return (method_access_flags_ & kAccConstructor) != 0;
+  }
+
+  bool IsStatic() const {
+    return (method_access_flags_ & kAccStatic) != 0;
+  }
+
+ protected:
+  const Type* declaring_class_;
+  const art::DexFile* dex_file_;
+  const uint32_t dex_method_idx_;
+  art::verifier::RegTypeCache* type_cache_;
+  const uint32_t method_access_flags_;  // Method's access flags.
+};
+}  // namespace sea_ir
+
+#endif  // ART_COMPILER_SEA_IR_TYPES_TYPE_INFERENCE_H_
diff --git a/compiler/sea_ir/types/type_inference_visitor.cc b/compiler/sea_ir/types/type_inference_visitor.cc
new file mode 100644
index 0000000..3da2fc1
--- /dev/null
+++ b/compiler/sea_ir/types/type_inference_visitor.cc
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "scoped_thread_state_change.h"
+#include "sea_ir/types/type_inference_visitor.h"
+#include "sea_ir/types/type_inference.h"
+#include "sea_ir/ir/sea.h"
+
+namespace sea_ir {
+
+void TypeInferenceVisitor::Visit(SignatureNode* parameter) {
+  FunctionTypeInfo fti(graph_, type_cache_);
+  std::vector<const Type*> arguments = fti.GetDeclaredArgumentTypes();
+  DCHECK_LT(parameter->GetPositionInSignature(), arguments.size())
+    << "Signature node position not present in signature.";
+  crt_type_.push_back(arguments.at(parameter->GetPositionInSignature()));
+}
+
+void TypeInferenceVisitor::Visit(UnnamedConstInstructionNode* instruction) {
+  crt_type_.push_back(&type_cache_->Integer());
+}
+
+void TypeInferenceVisitor::Visit(PhiInstructionNode* instruction) {
+  std::vector<const Type*> types_to_merge = GetOperandTypes(instruction);
+  const Type* result_type = MergeTypes(types_to_merge);
+  crt_type_.push_back(result_type);
+}
+
+void TypeInferenceVisitor::Visit(AddIntInstructionNode* instruction) {
+  std::vector<const Type*> operand_types = GetOperandTypes(instruction);
+  for (std::vector<const Type*>::const_iterator cit = operand_types.begin();
+      cit != operand_types.end(); cit++) {
+    if (*cit != NULL) {
+      DCHECK((*cit)->IsInteger());
+    }
+  }
+  crt_type_.push_back(&type_cache_->Integer());
+}
+
+void TypeInferenceVisitor::Visit(MoveResultInstructionNode* instruction) {
+  std::vector<const Type*> operand_types = GetOperandTypes(instruction);
+  const Type* operand_type = operand_types.at(0);
+  crt_type_.push_back(operand_type);
+}
+
+void TypeInferenceVisitor::Visit(InvokeStaticInstructionNode* instruction) {
+  FunctionTypeInfo fti(graph_, instruction, type_cache_);
+  const Type* result_type = fti.GetReturnValueType();
+  crt_type_.push_back(result_type);
+}
+
+std::vector<const Type*> TypeInferenceVisitor::GetOperandTypes(
+    InstructionNode* instruction) const {
+  std::vector<InstructionNode*> sources = instruction->GetSSAProducers();
+  std::vector<const Type*> types_to_merge;
+  for (std::vector<InstructionNode*>::const_iterator cit = sources.begin(); cit != sources.end();
+      cit++) {
+    const Type* source_type = type_data_->FindTypeOf((*cit)->Id());
+    if (source_type != NULL) {
+      types_to_merge.push_back(source_type);
+    }
+  }
+  return types_to_merge;
+}
+
+const Type* TypeInferenceVisitor::MergeTypes(std::vector<const Type*>& types) const {
+  const Type* type = NULL;
+  if (types.size()>0) {
+    type = *(types.begin());
+    if (types.size()>1) {
+      for (std::vector<const Type*>::const_iterator cit = types.begin();
+          cit != types.end(); cit++) {
+        if (!type->Equals(**cit)) {
+          type = MergeTypes(type, *cit);
+        }
+      }
+    }
+  }
+  return type;
+}
+
+const Type* TypeInferenceVisitor::MergeTypes(const Type* t1, const Type* t2) const {
+  DCHECK(t2 != NULL);
+  DCHECK(t1 != NULL);
+  art::ScopedObjectAccess soa(art::Thread::Current());
+  const Type* result = &(t1->Merge(*t2, type_cache_));
+  return result;
+}
+
+}   // namespace sea_ir
diff --git a/compiler/sea_ir/types/type_inference_visitor.h b/compiler/sea_ir/types/type_inference_visitor.h
new file mode 100644
index 0000000..200b9f0
--- /dev/null
+++ b/compiler/sea_ir/types/type_inference_visitor.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_SEA_IR_TYPES_TYPE_INFERENCE_VISITOR_H_
+#define ART_COMPILER_SEA_IR_TYPES_TYPE_INFERENCE_VISITOR_H_
+
+
+#include "dex_file-inl.h"
+#include "sea_ir/ir/visitor.h"
+#include "sea_ir/types/types.h"
+
+namespace sea_ir {
+
+// The TypeInferenceVisitor visits each instruction and computes its type taking into account
+//   the current type of the operands. The type is stored in the visitor.
+// We may be better off by using a separate visitor type hierarchy that has return values
+//   or that passes data as parameters, than to use fields to store information that should
+//   in fact be returned after visiting each element. Ideally, I would prefer to use templates
+//   to specify the returned value type, but I am not aware of a possible implementation
+//   that does not horribly duplicate the visitor infrastructure code (version 1: no return value,
+//   version 2: with template return value).
+class TypeInferenceVisitor: public IRVisitor {
+ public:
+  TypeInferenceVisitor(SeaGraph* graph, TypeData* type_data,
+      art::verifier::RegTypeCache* types):
+    graph_(graph), type_data_(type_data), type_cache_(types), crt_type_() {
+  }
+  // There are no type related actions to be performed on these classes.
+  void Initialize(SeaGraph* graph) { }
+  void Visit(SeaGraph* graph) { }
+  void Visit(Region* region) { }
+
+  void Visit(PhiInstructionNode* instruction);
+  void Visit(SignatureNode* parameter);
+  void Visit(InstructionNode* instruction) { }
+  void Visit(UnnamedConstInstructionNode* instruction);
+  void Visit(ConstInstructionNode* instruction) { }
+  void Visit(ReturnInstructionNode* instruction) { }
+  void Visit(IfNeInstructionNode* instruction) { }
+  void Visit(MoveResultInstructionNode* instruction);
+  void Visit(InvokeStaticInstructionNode* instruction);
+  void Visit(AddIntInstructionNode* instruction);
+  void Visit(GotoInstructionNode* instruction) { }
+  void Visit(IfEqzInstructionNode* instruction) { }
+
+  const Type* MergeTypes(std::vector<const Type*>& types) const;
+  const Type* MergeTypes(const Type* t1, const Type* t2) const;
+  std::vector<const Type*> GetOperandTypes(InstructionNode* instruction) const;
+  const Type* GetType() {
+    // TODO: Currently multiple defined types are not supported.
+    if (crt_type_.size()>0) {
+      const Type* single_type = crt_type_.at(0);
+      crt_type_.clear();
+      return single_type;
+    }
+    return NULL;
+  }
+
+ protected:
+  const SeaGraph* const graph_;
+  TypeData* type_data_;
+  art::verifier::RegTypeCache* type_cache_;
+  std::vector<const Type*> crt_type_;             // Stored temporarily between two calls to Visit.
+};
+
+}  // namespace sea_ir
+
+#endif  // ART_COMPILER_SEA_IR_TYPES_TYPE_INFERENCE_VISITOR_H_
diff --git a/compiler/sea_ir/types/type_inference_visitor_test.cc b/compiler/sea_ir/types/type_inference_visitor_test.cc
new file mode 100644
index 0000000..8a249eb
--- /dev/null
+++ b/compiler/sea_ir/types/type_inference_visitor_test.cc
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common_test.h"
+#include "sea_ir/types/type_inference_visitor.h"
+#include "sea_ir/ir/sea.h"
+
+namespace sea_ir {
+
+class TestInstructionNode:public InstructionNode {
+ public:
+  explicit TestInstructionNode(std::vector<InstructionNode*> prods): InstructionNode(NULL),
+      producers_(prods) { }
+  std::vector<InstructionNode*> GetSSAProducers() {
+    return producers_;
+  }
+ protected:
+  std::vector<InstructionNode*> producers_;
+};
+
+class TypeInferenceVisitorTest : public art::CommonTest {
+};
+
+TEST_F(TypeInferenceVisitorTest, MergeIntWithByte) {
+  TypeData td;
+  art::verifier::RegTypeCache type_cache(false);
+  TypeInferenceVisitor tiv(NULL, &td, &type_cache);
+  const Type* int_type = &type_cache.Integer();
+  const Type* byte_type = &type_cache.Byte();
+  const Type* ib_type = tiv.MergeTypes(int_type, byte_type);
+  const Type* bi_type = tiv.MergeTypes(byte_type, int_type);
+  EXPECT_TRUE(ib_type == int_type);
+  EXPECT_TRUE(bi_type == int_type);
+}
+
+TEST_F(TypeInferenceVisitorTest, MergeIntWithShort) {
+  TypeData td;
+  art::verifier::RegTypeCache type_cache(false);
+  TypeInferenceVisitor tiv(NULL, &td, &type_cache);
+  const Type* int_type = &type_cache.Integer();
+  const Type* short_type = &type_cache.Short();
+  const Type* is_type = tiv.MergeTypes(int_type, short_type);
+  const Type* si_type = tiv.MergeTypes(short_type, int_type);
+  EXPECT_TRUE(is_type == int_type);
+  EXPECT_TRUE(si_type == int_type);
+}
+
+TEST_F(TypeInferenceVisitorTest, MergeMultipleInts) {
+  int N = 10;  // Number of types to merge.
+  TypeData td;
+  art::verifier::RegTypeCache type_cache(false);
+  TypeInferenceVisitor tiv(NULL, &td, &type_cache);
+  std::vector<const Type*> types;
+  for (int i = 0; i < N; i++) {
+    const Type* new_type = &type_cache.Integer();
+    types.push_back(new_type);
+  }
+  const Type* merged_type = tiv.MergeTypes(types);
+  EXPECT_TRUE(merged_type == &type_cache.Integer());
+}
+
+TEST_F(TypeInferenceVisitorTest, MergeMultipleShorts) {
+  int N = 10;  // Number of types to merge.
+  TypeData td;
+  art::verifier::RegTypeCache type_cache(false);
+  TypeInferenceVisitor tiv(NULL, &td, &type_cache);
+  std::vector<const Type*> types;
+  for (int i = 0; i < N; i++) {
+    const Type* new_type = &type_cache.Short();
+    types.push_back(new_type);
+  }
+  const Type* merged_type = tiv.MergeTypes(types);
+  EXPECT_TRUE(merged_type == &type_cache.Short());
+}
+
+TEST_F(TypeInferenceVisitorTest, MergeMultipleIntsWithShorts) {
+  int N = 10;  // Number of types to merge.
+  TypeData td;
+  art::verifier::RegTypeCache type_cache(false);
+  TypeInferenceVisitor tiv(NULL, &td, &type_cache);
+  std::vector<const Type*> types;
+  for (int i = 0; i < N; i++) {
+    const Type* short_type = &type_cache.Short();
+    const Type* int_type = &type_cache.Integer();
+    types.push_back(short_type);
+    types.push_back(int_type);
+  }
+  const Type* merged_type = tiv.MergeTypes(types);
+  EXPECT_TRUE(merged_type == &type_cache.Integer());
+}
+
+TEST_F(TypeInferenceVisitorTest, GetOperandTypes) {
+  int N = 10;  // Number of types to merge.
+  TypeData td;
+  art::verifier::RegTypeCache type_cache(false);
+  TypeInferenceVisitor tiv(NULL, &td, &type_cache);
+  std::vector<const Type*> types;
+  std::vector<InstructionNode*> preds;
+  for (int i = 0; i < N; i++) {
+    const Type* short_type = &type_cache.Short();
+    const Type* int_type = &type_cache.Integer();
+    TestInstructionNode* short_inst =
+        new TestInstructionNode(std::vector<InstructionNode*>());
+    TestInstructionNode* int_inst =
+        new TestInstructionNode(std::vector<InstructionNode*>());
+    preds.push_back(short_inst);
+    preds.push_back(int_inst);
+    td.SetTypeOf(short_inst->Id(), short_type);
+    td.SetTypeOf(int_inst->Id(), int_type);
+    types.push_back(short_type);
+    types.push_back(int_type);
+  }
+  TestInstructionNode* inst_to_test = new TestInstructionNode(preds);
+  std::vector<const Type*> result = tiv.GetOperandTypes(inst_to_test);
+  EXPECT_TRUE(result.size() == types.size());
+  EXPECT_TRUE(true == std::equal(types.begin(), types.begin() + 2, result.begin()));
+}
+
+
+}  // namespace art
diff --git a/compiler/sea_ir/types/types.h b/compiler/sea_ir/types/types.h
new file mode 100644
index 0000000..64f2524
--- /dev/null
+++ b/compiler/sea_ir/types/types.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_SEA_IR_TYPES_TYPES_H_
+#define ART_COMPILER_SEA_IR_TYPES_TYPES_H_
+
+#include "safe_map.h"
+#include "verifier/reg_type.h"
+#include "verifier/reg_type_cache.h"
+
+namespace sea_ir {
+
+// TODO: Replace typedef with an actual class implementation when we have more types.
+typedef art::verifier::RegType Type;
+
+// Stores information about the result type of each instruction.
+// Note: Main purpose is to encapsulate the map<instruction id, type*>,
+//       so that we can replace the underlying storage at any time.
+class TypeData {
+ public:
+  art::SafeMap<int, const Type*>* GetTypeMap() {
+    return &type_map_;
+  }
+  // Returns the type associated with instruction with @instruction_id.
+  const Type* FindTypeOf(int instruction_id) {
+    art::SafeMap<int, const Type*>::const_iterator result_it = type_map_.find(instruction_id);
+    if (type_map_.end() != result_it) {
+      return result_it->second;
+    }
+    return NULL;
+  }
+
+  // Saves the fact that instruction @instruction_id produces a value of type @type.
+  void SetTypeOf(int instruction_id, const Type* type) {
+    type_map_.Overwrite(instruction_id, type);
+  }
+
+ private:
+  art::SafeMap<int, const Type*> type_map_;
+};
+
+
+
+}  // namespace sea_ir
+#endif  // ART_COMPILER_SEA_IR_TYPES_TYPES_H_
diff --git a/compiler/stubs/portable/stubs.cc b/compiler/stubs/portable/stubs.cc
deleted file mode 100644
index def43e2..0000000
--- a/compiler/stubs/portable/stubs.cc
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "stubs/stubs.h"
-
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "jni_internal.h"
-#include "utils/arm/assembler_arm.h"
-#include "utils/mips/assembler_mips.h"
-#include "utils/x86/assembler_x86.h"
-#include "stack_indirect_reference_table.h"
-#include "sirt_ref.h"
-
-#define __ assembler->
-
-namespace art {
-
-namespace arm {
-const std::vector<uint8_t>* CreatePortableResolutionTrampoline() {
-  UniquePtr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kArm)));
-  RegList save = (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3) | (1 << LR);
-
-  __ PushList(save);
-  __ LoadFromOffset(kLoadWord, R12, TR,
-                    PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode));
-  __ mov(R3, ShifterOperand(TR));  // Pass Thread::Current() in R3
-  __ mov(R2, ShifterOperand(SP));  // Pass sp for Method** callee_addr
-  __ IncreaseFrameSize(12);         // 3 words of space for alignment
-  // Call to resolution trampoline (callee, receiver, callee_addr, Thread*)
-  __ blx(R12);
-  __ mov(R12, ShifterOperand(R0));  // Save code address returned into R12
-  __ DecreaseFrameSize(12);
-  __ PopList(save);
-  __ cmp(R12, ShifterOperand(0));
-  __ bx(R12, NE);                   // If R12 != 0 tail call method's code
-  __ bx(LR);                        // Return to caller to handle exception
-
-  assembler->EmitSlowPaths();
-  size_t cs = assembler->CodeSize();
-  UniquePtr<std::vector<uint8_t> > resolution_trampoline(new std::vector<uint8_t>(cs));
-  MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size());
-  assembler->FinalizeInstructions(code);
-
-  return resolution_trampoline.release();
-}
-}  // namespace arm
-
-namespace mips {
-const std::vector<uint8_t>* CreatePortableResolutionTrampoline() {
-  UniquePtr<MipsAssembler> assembler(static_cast<MipsAssembler*>(Assembler::Create(kMips)));
-  // Build frame and save argument registers and RA.
-  __ AddConstant(SP, SP, -32);
-  __ StoreToOffset(kStoreWord, RA, SP, 28);
-  __ StoreToOffset(kStoreWord, A3, SP, 12);
-  __ StoreToOffset(kStoreWord, A2, SP, 8);
-  __ StoreToOffset(kStoreWord, A1, SP, 4);
-  __ StoreToOffset(kStoreWord, A0, SP, 0);
-
-  __ LoadFromOffset(kLoadWord, T9, S1,
-                    PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode));
-  __ Move(A3, S1);  // Pass Thread::Current() in A3
-  __ Move(A2, SP);  // Pass SP for Method** callee_addr
-  __ Jalr(T9);  // Call to resolution trampoline (callee, receiver, callee_addr, Thread*)
-
-  // Restore frame, argument registers, and RA.
-  __ LoadFromOffset(kLoadWord, A0, SP, 0);
-  __ LoadFromOffset(kLoadWord, A1, SP, 4);
-  __ LoadFromOffset(kLoadWord, A2, SP, 8);
-  __ LoadFromOffset(kLoadWord, A3, SP, 12);
-  __ LoadFromOffset(kLoadWord, RA, SP, 28);
-  __ AddConstant(SP, SP, 32);
-
-  Label resolve_fail;
-  __ EmitBranch(V0, ZERO, &resolve_fail, true);
-  __ Jr(V0);  // If V0 != 0 tail call method's code
-  __ Bind(&resolve_fail, false);
-  __ Jr(RA);  // Return to caller to handle exception
-
-  assembler->EmitSlowPaths();
-  size_t cs = assembler->CodeSize();
-  UniquePtr<std::vector<uint8_t> > resolution_trampoline(new std::vector<uint8_t>(cs));
-  MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size());
-  assembler->FinalizeInstructions(code);
-
-  return resolution_trampoline.release();
-}
-}  // namespace mips
-
-namespace x86 {
-const std::vector<uint8_t>* CreatePortableResolutionTrampoline() {
-  UniquePtr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
-
-  __ pushl(EBP);
-  __ movl(EBP, ESP);          // save ESP
-  __ subl(ESP, Immediate(8));  // Align stack
-  __ movl(EAX, Address(EBP, 8));  // Method* called
-  __ leal(EDX, Address(EBP, 8));  // Method** called_addr
-  __ fs()->pushl(Address::Absolute(Thread::SelfOffset()));  // pass thread
-  __ pushl(EDX);  // pass called_addr
-  __ pushl(ECX);  // pass receiver
-  __ pushl(EAX);  // pass called
-  // Call to resolve method.
-  __ Call(ThreadOffset(PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)),
-          X86ManagedRegister::FromCpuRegister(ECX));
-  __ leave();
-
-  Label resolve_fail;  // forward declaration
-  __ cmpl(EAX, Immediate(0));
-  __ j(kEqual, &resolve_fail);
-  __ jmp(EAX);
-  // Tail call to intended method.
-  __ Bind(&resolve_fail);
-  __ ret();
-
-  assembler->EmitSlowPaths();
-  size_t cs = assembler->CodeSize();
-  UniquePtr<std::vector<uint8_t> > resolution_trampoline(new std::vector<uint8_t>(cs));
-  MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size());
-  assembler->FinalizeInstructions(code);
-
-  return resolution_trampoline.release();
-}
-}  // namespace x86
-
-}  // namespace art
diff --git a/compiler/stubs/quick/stubs.cc b/compiler/stubs/quick/stubs.cc
deleted file mode 100644
index 912f1c0..0000000
--- a/compiler/stubs/quick/stubs.cc
+++ /dev/null
@@ -1,263 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "stubs/stubs.h"
-
-#include "entrypoints/quick/quick_entrypoints.h"
-#include "jni_internal.h"
-#include "utils/arm/assembler_arm.h"
-#include "utils/mips/assembler_mips.h"
-#include "utils/x86/assembler_x86.h"
-#include "sirt_ref.h"
-#include "stack_indirect_reference_table.h"
-
-#define __ assembler->
-
-namespace art {
-
-namespace arm {
-const std::vector<uint8_t>* CreateQuickResolutionTrampoline() {
-  UniquePtr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kArm)));
-  // | Out args |
-  // | Method*  | <- SP on entry
-  // | LR       |    return address into caller
-  // | ...      |    callee saves
-  // | R3       |    possible argument
-  // | R2       |    possible argument
-  // | R1       |    possible argument
-  // | R0       |    junk on call to QuickResolutionTrampolineFromCode, holds result Method*
-  // | Method*  |    Callee save Method* set up by QuickResoltuionTrampolineFromCode
-  // Save callee saves and ready frame for exception delivery
-  RegList save = (1 << R1) | (1 << R2) | (1 << R3) | (1 << R5) | (1 << R6) | (1 << R7) | (1 << R8) |
-                 (1 << R10) | (1 << R11) | (1 << LR);
-  // TODO: enable when GetCalleeSaveMethod is available at stub generation time
-  // DCHECK_EQ(save, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetCoreSpillMask());
-  __ PushList(save);
-  __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode));
-  __ mov(R3, ShifterOperand(TR));  // Pass Thread::Current() in R3
-  __ IncreaseFrameSize(8);         // 2 words of space for alignment
-  __ mov(R2, ShifterOperand(SP));  // Pass SP
-  // Call to resolution trampoline (method_idx, receiver, sp, Thread*)
-  __ blx(R12);
-  __ mov(R12, ShifterOperand(R0));  // Save code address returned into R12
-  // Restore registers which may have been modified by GC, "R0" will hold the Method*
-  __ DecreaseFrameSize(4);
-  __ PopList((1 << R0) | save);
-  __ bx(R12);  // Leaf call to method's code
-  __ bkpt(0);
-
-  assembler->EmitSlowPaths();
-  size_t cs = assembler->CodeSize();
-  UniquePtr<std::vector<uint8_t> > resolution_trampoline(new std::vector<uint8_t>(cs));
-  MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size());
-  assembler->FinalizeInstructions(code);
-
-  return resolution_trampoline.release();
-}
-
-const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry() {
-  UniquePtr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kArm)));
-
-  __ LoadFromOffset(kLoadWord, PC, R0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
-  __ bkpt(0);
-
-  size_t cs = assembler->CodeSize();
-  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
-  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
-  assembler->FinalizeInstructions(code);
-
-  return entry_stub.release();
-}
-
-const std::vector<uint8_t>* CreateInterpreterToQuickEntry() {
-  UniquePtr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kArm)));
-
-  __ LoadFromOffset(kLoadWord, PC, R0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToQuickEntry));
-  __ bkpt(0);
-
-  size_t cs = assembler->CodeSize();
-  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
-  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
-  assembler->FinalizeInstructions(code);
-
-  return entry_stub.release();
-}
-}  // namespace arm
-
-namespace mips {
-const std::vector<uint8_t>* CreateQuickResolutionTrampoline() {
-  UniquePtr<MipsAssembler> assembler(static_cast<MipsAssembler*>(Assembler::Create(kMips)));
-  // | Out args   |
-  // | Method*    | <- SP on entry
-  // | RA         |    return address into caller
-  // | ...        |    callee saves
-  // | A3         |    possible argument
-  // | A2         |    possible argument
-  // | A1         |    possible argument
-  // | A0/Method* |    Callee save Method* set up by UnresolvedDirectMethodTrampolineFromCode
-  // Save callee saves and ready frame for exception delivery
-  __ AddConstant(SP, SP, -64);
-  __ StoreToOffset(kStoreWord, RA, SP, 60);
-  __ StoreToOffset(kStoreWord, FP, SP, 56);
-  __ StoreToOffset(kStoreWord, GP, SP, 52);
-  __ StoreToOffset(kStoreWord, S7, SP, 48);
-  __ StoreToOffset(kStoreWord, S6, SP, 44);
-  __ StoreToOffset(kStoreWord, S5, SP, 40);
-  __ StoreToOffset(kStoreWord, S4, SP, 36);
-  __ StoreToOffset(kStoreWord, S3, SP, 32);
-  __ StoreToOffset(kStoreWord, S2, SP, 28);
-  __ StoreToOffset(kStoreWord, A3, SP, 12);
-  __ StoreToOffset(kStoreWord, A2, SP, 8);
-  __ StoreToOffset(kStoreWord, A1, SP, 4);
-
-  __ LoadFromOffset(kLoadWord, T9, S1, QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode));
-  __ Move(A3, S1);  // Pass Thread::Current() in A3
-  __ Move(A2, SP);  // Pass SP for Method** callee_addr
-  __ Jalr(T9);  // Call to resolution trampoline (method_idx, receiver, sp, Thread*)
-
-  // Restore registers which may have been modified by GC
-  __ LoadFromOffset(kLoadWord, A0, SP, 0);
-  __ LoadFromOffset(kLoadWord, A1, SP, 4);
-  __ LoadFromOffset(kLoadWord, A2, SP, 8);
-  __ LoadFromOffset(kLoadWord, A3, SP, 12);
-  __ LoadFromOffset(kLoadWord, S2, SP, 28);
-  __ LoadFromOffset(kLoadWord, S3, SP, 32);
-  __ LoadFromOffset(kLoadWord, S4, SP, 36);
-  __ LoadFromOffset(kLoadWord, S5, SP, 40);
-  __ LoadFromOffset(kLoadWord, S6, SP, 44);
-  __ LoadFromOffset(kLoadWord, S7, SP, 48);
-  __ LoadFromOffset(kLoadWord, GP, SP, 52);
-  __ LoadFromOffset(kLoadWord, FP, SP, 56);
-  __ LoadFromOffset(kLoadWord, RA, SP, 60);
-  __ AddConstant(SP, SP, 64);
-
-  __ Move(T9, V0);  // Put method's code in T9
-  __ Jr(T9);  // Leaf call to method's code
-
-  __ Break();
-
-  assembler->EmitSlowPaths();
-  size_t cs = assembler->CodeSize();
-  UniquePtr<std::vector<uint8_t> > resolution_trampoline(new std::vector<uint8_t>(cs));
-  MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size());
-  assembler->FinalizeInstructions(code);
-
-  return resolution_trampoline.release();
-}
-
-const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry() {
-  UniquePtr<MipsAssembler> assembler(static_cast<MipsAssembler*>(Assembler::Create(kMips)));
-
-  __ LoadFromOffset(kLoadWord, T9, A0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
-  __ Jr(T9);
-  __ Break();
-
-  size_t cs = assembler->CodeSize();
-  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
-  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
-  assembler->FinalizeInstructions(code);
-
-  return entry_stub.release();
-}
-
-const std::vector<uint8_t>* CreateInterpreterToQuickEntry() {
-  UniquePtr<MipsAssembler> assembler(static_cast<MipsAssembler*>(Assembler::Create(kMips)));
-
-  __ LoadFromOffset(kLoadWord, T9, A0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
-  __ Jr(T9);
-  __ Break();
-
-  size_t cs = assembler->CodeSize();
-  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
-  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
-  assembler->FinalizeInstructions(code);
-
-  return entry_stub.release();
-}
-}  // namespace mips
-
-namespace x86 {
-const std::vector<uint8_t>* CreateQuickResolutionTrampoline() {
-  UniquePtr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
-  // Set up the callee save frame to conform with Runtime::CreateCalleeSaveMethod(kRefsAndArgs)
-  // return address
-  __ pushl(EDI);
-  __ pushl(ESI);
-  __ pushl(EBP);
-  __ pushl(EBX);
-  __ pushl(EDX);
-  __ pushl(ECX);
-  __ pushl(EAX);  // <-- callee save Method* to go here
-  __ movl(EDX, ESP);          // save ESP
-  __ fs()->pushl(Address::Absolute(Thread::SelfOffset()));  // pass Thread*
-  __ pushl(EDX);              // pass ESP for Method*
-  __ pushl(ECX);              // pass receiver
-  __ pushl(EAX);              // pass Method*
-
-  // Call to resolve method.
-  __ Call(ThreadOffset(QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)),
-          X86ManagedRegister::FromCpuRegister(ECX));
-
-  __ movl(EDI, EAX);  // save code pointer in EDI
-  __ addl(ESP, Immediate(16));  // Pop arguments
-  __ popl(EAX);  // Restore args.
-  __ popl(ECX);
-  __ popl(EDX);
-  __ popl(EBX);
-  __ popl(EBP);  // Restore callee saves.
-  __ popl(ESI);
-  // Swap EDI callee save with code pointer
-  __ xchgl(EDI, Address(ESP, 0));
-  // Tail call to intended method.
-  __ ret();
-
-  assembler->EmitSlowPaths();
-  size_t cs = assembler->CodeSize();
-  UniquePtr<std::vector<uint8_t> > resolution_trampoline(new std::vector<uint8_t>(cs));
-  MemoryRegion code(&(*resolution_trampoline)[0], resolution_trampoline->size());
-  assembler->FinalizeInstructions(code);
-
-  return resolution_trampoline.release();
-}
-
-const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry() {
-  UniquePtr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
-
-  __ fs()->jmp(Address::Absolute(ThreadOffset(QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry))));
-
-  size_t cs = assembler->CodeSize();
-  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
-  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
-  assembler->FinalizeInstructions(code);
-
-  return entry_stub.release();
-}
-
-const std::vector<uint8_t>* CreateInterpreterToQuickEntry() {
-  UniquePtr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
-
-  __ fs()->jmp(Address::Absolute(ThreadOffset(QUICK_ENTRYPOINT_OFFSET(pInterpreterToQuickEntry))));
-
-  size_t cs = assembler->CodeSize();
-  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
-  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
-  assembler->FinalizeInstructions(code);
-
-  return entry_stub.release();
-}
-}  // namespace x86
-
-}  // namespace art
diff --git a/compiler/stubs/stubs.h b/compiler/stubs/stubs.h
deleted file mode 100644
index d85eae8..0000000
--- a/compiler/stubs/stubs.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_STUBS_STUBS_H_
-#define ART_COMPILER_STUBS_STUBS_H_
-
-#include "runtime.h"
-
-namespace art {
-
-namespace arm {
-const std::vector<uint8_t>* CreatePortableResolutionTrampoline()
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-const std::vector<uint8_t>* CreateQuickResolutionTrampoline()
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry()
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-const std::vector<uint8_t>* CreateInterpreterToQuickEntry()
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-}
-
-namespace mips {
-const std::vector<uint8_t>* CreatePortableResolutionTrampoline()
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-const std::vector<uint8_t>* CreateQuickResolutionTrampoline()
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry()
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-const std::vector<uint8_t>* CreateInterpreterToQuickEntry()
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-}
-
-namespace x86 {
-const std::vector<uint8_t>* CreatePortableResolutionTrampoline()
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-const std::vector<uint8_t>* CreateQuickResolutionTrampoline()
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry()
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-const std::vector<uint8_t>* CreateInterpreterToQuickEntry()
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-}
-
-}  // namespace art
-
-#endif  // ART_COMPILER_STUBS_STUBS_H_
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
new file mode 100644
index 0000000..32ae558
--- /dev/null
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "trampoline_compiler.h"
+
+#include "jni_internal.h"
+#include "utils/arm/assembler_arm.h"
+#include "utils/mips/assembler_mips.h"
+#include "utils/x86/assembler_x86.h"
+
+#define __ assembler->
+
+namespace art {
+
+namespace arm {
+static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi,
+                                                    ThreadOffset offset) {
+  UniquePtr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kArm)));
+
+  switch (abi) {
+    case kInterpreterAbi:  // Thread* is first argument (R0) in interpreter ABI.
+      __ LoadFromOffset(kLoadWord, PC, R0, offset.Int32Value());
+      break;
+    case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (R0).
+      __ LoadFromOffset(kLoadWord, IP, R0, JNIEnvExt::SelfOffset().Int32Value());
+      __ LoadFromOffset(kLoadWord, PC, IP, offset.Int32Value());
+      break;
+    case kPortableAbi:  // R9 holds Thread*.
+    case kQuickAbi:  // Fall-through.
+      __ LoadFromOffset(kLoadWord, PC, R9, offset.Int32Value());
+  }
+  __ bkpt(0);
+
+  size_t cs = assembler->CodeSize();
+  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
+  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
+  assembler->FinalizeInstructions(code);
+
+  return entry_stub.release();
+}
+}  // namespace arm
+
+namespace mips {
+static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi,
+                                                    ThreadOffset offset) {
+  UniquePtr<MipsAssembler> assembler(static_cast<MipsAssembler*>(Assembler::Create(kMips)));
+
+  switch (abi) {
+    case kInterpreterAbi:  // Thread* is first argument (A0) in interpreter ABI.
+      __ LoadFromOffset(kLoadWord, T9, A0, offset.Int32Value());
+      break;
+    case kJniAbi:  // Load via Thread* held in JNIEnv* in first argument (A0).
+      __ LoadFromOffset(kLoadWord, T9, A0, JNIEnvExt::SelfOffset().Int32Value());
+      __ LoadFromOffset(kLoadWord, T9, T9, offset.Int32Value());
+      break;
+    case kPortableAbi:  // S1 holds Thread*.
+    case kQuickAbi:  // Fall-through.
+      __ LoadFromOffset(kLoadWord, T9, S1, offset.Int32Value());
+  }
+  __ Jr(T9);
+  __ Nop();
+  __ Break();
+
+  size_t cs = assembler->CodeSize();
+  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
+  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
+  assembler->FinalizeInstructions(code);
+
+  return entry_stub.release();
+}
+}  // namespace mips
+
+namespace x86 {
+static const std::vector<uint8_t>* CreateTrampoline(ThreadOffset offset) {
+  UniquePtr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
+
+  // All x86 trampolines call via the Thread* held in fs.
+  __ fs()->jmp(Address::Absolute(offset));
+  __ int3();
+
+  size_t cs = assembler->CodeSize();
+  UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
+  MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
+  assembler->FinalizeInstructions(code);
+
+  return entry_stub.release();
+}
+}  // namespace x86
+
+const std::vector<uint8_t>* CreateTrampoline(InstructionSet isa, EntryPointCallingConvention abi,
+                                             ThreadOffset offset) {
+  switch (isa) {
+    case kArm:
+    case kThumb2:
+      return arm::CreateTrampoline(abi, offset);
+    case kMips:
+      return mips::CreateTrampoline(abi, offset);
+    case kX86:
+      return x86::CreateTrampoline(offset);
+    default:
+      LOG(FATAL) << "Unknown InstructionSet: " << isa;
+      return NULL;
+  }
+}
+
+}  // namespace art
diff --git a/compiler/trampolines/trampoline_compiler.h b/compiler/trampolines/trampoline_compiler.h
new file mode 100644
index 0000000..21245db
--- /dev/null
+++ b/compiler/trampolines/trampoline_compiler.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_TRAMPOLINES_TRAMPOLINE_COMPILER_H_
+#define ART_COMPILER_TRAMPOLINES_TRAMPOLINE_COMPILER_H_
+
+#include <stdint.h>
+#include <vector>
+
+#include "locks.h"
+#include "driver/compiler_driver.h"
+
+namespace art {
+
+// Create code that will invoke the function held in thread local storage.
+const std::vector<uint8_t>* CreateTrampoline(InstructionSet isa, EntryPointCallingConvention abi,
+                                             ThreadOffset entry_point_offset)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+}  // namespace art
+
+#endif  // ART_COMPILER_TRAMPOLINES_TRAMPOLINE_COMPILER_H_
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index fa202c3..f0d11d8 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -1246,10 +1246,10 @@
 // Implementation note: this method must emit at most one instruction when
 // Address::CanHoldLoadOffset.
 void ArmAssembler::LoadFromOffset(LoadOperandType type,
-                               Register reg,
-                               Register base,
-                               int32_t offset,
-                               Condition cond) {
+                                  Register reg,
+                                  Register base,
+                                  int32_t offset,
+                                  Condition cond) {
   if (!Address::CanHoldLoadOffset(type, offset)) {
     CHECK(base != IP);
     LoadImmediate(IP, offset, cond);
@@ -1884,7 +1884,7 @@
   // Don't care about preserving R0 as this call won't return
   __ mov(R0, ShifterOperand(scratch_.AsCoreRegister()));
   // Set up call to Thread::Current()->pDeliverException
-  __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(pDeliverException));
+  __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(pDeliverException).Int32Value());
   __ blx(R12);
   // Call never returns
   __ bkpt(0);
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index 931d7ab..2be3d56 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -813,14 +813,7 @@
 
 void MipsAssembler::Copy(FrameOffset /*dest*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
                          ManagedRegister /*mscratch*/, size_t /*size*/) {
-  UNIMPLEMENTED(FATAL) << "no arm implementation";
-#if 0
-  Register scratch = mscratch.AsMips().AsCoreRegister();
-  CHECK_EQ(size, 4u);
-  movl(scratch, Address(ESP, src_base));
-  movl(scratch, Address(scratch, src_offset));
-  movl(Address(ESP, dest), scratch);
-#endif
+  UNIMPLEMENTED(FATAL) << "no mips implementation";
 }
 
 void MipsAssembler::Copy(ManagedRegister dest, Offset dest_offset,
@@ -834,24 +827,11 @@
 
 void MipsAssembler::Copy(FrameOffset /*dest*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset /*src_offset*/,
                          ManagedRegister /*mscratch*/, size_t /*size*/) {
-  UNIMPLEMENTED(FATAL) << "no arm implementation";
-#if 0
-  Register scratch = mscratch.AsMips().AsCoreRegister();
-  CHECK_EQ(size, 4u);
-  CHECK_EQ(dest.Int32Value(), src.Int32Value());
-  movl(scratch, Address(ESP, src));
-  pushl(Address(scratch, src_offset));
-  popl(Address(scratch, dest_offset));
-#endif
+  UNIMPLEMENTED(FATAL) << "no mips implementation";
 }
 
 void MipsAssembler::MemoryBarrier(ManagedRegister) {
-  UNIMPLEMENTED(FATAL) << "NEEDS TO BE IMPLEMENTED";
-#if 0
-#if ANDROID_SMP != 0
-  mfence();
-#endif
-#endif
+  UNIMPLEMENTED(FATAL) << "no mips implementation";
 }
 
 void MipsAssembler::CreateSirtEntry(ManagedRegister mout_reg,
@@ -953,10 +933,7 @@
 }
 
 void MipsAssembler::Call(ThreadOffset /*offset*/, ManagedRegister /*mscratch*/) {
-  UNIMPLEMENTED(FATAL) << "no arm implementation";
-#if 0
-  fs()->call(Address::Absolute(offset));
-#endif
+  UNIMPLEMENTED(FATAL) << "no mips implementation";
 }
 
 void MipsAssembler::GetCurrentThread(ManagedRegister tr) {
@@ -988,7 +965,7 @@
   // Don't care about preserving A0 as this call won't return
   __ Move(A0, scratch_.AsCoreRegister());
   // Set up call to Thread::Current()->pDeliverException
-  __ LoadFromOffset(kLoadWord, T9, S1, QUICK_ENTRYPOINT_OFFSET(pDeliverException));
+  __ LoadFromOffset(kLoadWord, T9, S1, QUICK_ENTRYPOINT_OFFSET(pDeliverException).Int32Value());
   __ Jr(T9);
   // Call never returns
   __ Break();
diff --git a/compiler/utils/scoped_hashtable_test.cc b/compiler/utils/scoped_hashtable_test.cc
index d5f9f7d..68608f0 100644
--- a/compiler/utils/scoped_hashtable_test.cc
+++ b/compiler/utils/scoped_hashtable_test.cc
@@ -15,7 +15,7 @@
  */
 
 #include "common_test.h"
-#include "scoped_hashtable.h"
+#include "utils/scoped_hashtable.h"
 
 using utils::ScopedHashtable;
 
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 51bb3eb..4f25c00 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -142,6 +142,7 @@
 	arch/x86/registers_x86.cc \
 	arch/mips/registers_mips.cc \
 	entrypoints/entrypoint_utils.cc \
+	entrypoints/interpreter/interpreter_entrypoints.cc \
 	entrypoints/jni/jni_entrypoints.cc \
 	entrypoints/math_entrypoints.cc \
 	entrypoints/portable/portable_alloc_entrypoints.cc \
@@ -163,15 +164,13 @@
 	entrypoints/quick/quick_field_entrypoints.cc \
 	entrypoints/quick/quick_fillarray_entrypoints.cc \
 	entrypoints/quick/quick_instrumentation_entrypoints.cc \
-	entrypoints/quick/quick_interpreter_entrypoints.cc \
 	entrypoints/quick/quick_invoke_entrypoints.cc \
 	entrypoints/quick/quick_jni_entrypoints.cc \
 	entrypoints/quick/quick_lock_entrypoints.cc \
 	entrypoints/quick/quick_math_entrypoints.cc \
-	entrypoints/quick/quick_proxy_entrypoints.cc \
-	entrypoints/quick/quick_stub_entrypoints.cc \
 	entrypoints/quick/quick_thread_entrypoints.cc \
-	entrypoints/quick/quick_throw_entrypoints.cc
+	entrypoints/quick/quick_throw_entrypoints.cc \
+	entrypoints/quick/quick_trampoline_entrypoints.cc
 
 LIBART_TARGET_SRC_FILES := \
 	$(LIBART_COMMON_SRC_FILES) \
diff --git a/runtime/arch/arm/asm_support_arm.S b/runtime/arch/arm/asm_support_arm.S
index ed655e9..559788f 100644
--- a/runtime/arch/arm/asm_support_arm.S
+++ b/runtime/arch/arm/asm_support_arm.S
@@ -35,4 +35,11 @@
     .size \name, .-\name
 .endm
 
+.macro UNIMPLEMENTED name
+    ENTRY \name
+    bkpt
+    bkpt
+    END \name
+.endm
+
 #endif  // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index b71a158..848bacc 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+#include "entrypoints/interpreter/interpreter_entrypoints.h"
 #include "entrypoints/portable/portable_entrypoints.h"
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "entrypoints/entrypoint_utils.h"
@@ -21,49 +22,61 @@
 
 namespace art {
 
+// Interpreter entrypoints.
+extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh,
+                                                 const DexFile::CodeItem* code_item,
+                                                 ShadowFrame* shadow_frame, JValue* result);
+extern "C" void artInterperterToCompiledCodeBridge(Thread* self, MethodHelper& mh,
+                                           const DexFile::CodeItem* code_item,
+                                           ShadowFrame* shadow_frame, JValue* result);
+
+// Portable entrypoints.
+extern "C" void art_portable_resolution_trampoline(mirror::AbstractMethod*);
+extern "C" void art_portable_to_interpreter_bridge(mirror::AbstractMethod*);
+
 // Alloc entrypoints.
-extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method);
-extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method);
-extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t);
-extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_array(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_array_with_access_check(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_object(uint32_t type_idx, void* method);
+extern "C" void* art_quick_alloc_object_with_access_check(uint32_t type_idx, void* method);
+extern "C" void* art_quick_check_and_alloc_array(uint32_t, void*, int32_t);
+extern "C" void* art_quick_check_and_alloc_array_with_access_check(uint32_t, void*, int32_t);
 
 // Cast entrypoints.
 extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass,
                                             const mirror::Class* ref_class);
-extern "C" void art_quick_can_put_array_element_from_code(void*, void*);
-extern "C" void art_quick_check_cast_from_code(void*, void*);
+extern "C" void art_quick_can_put_array_element(void*, void*);
+extern "C" void art_quick_check_cast(void*, void*);
 
 // DexCache entrypoints.
-extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*);
-extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*);
-extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*);
-extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t);
+extern "C" void* art_quick_initialize_static_storage(uint32_t, void*);
+extern "C" void* art_quick_initialize_type(uint32_t, void*);
+extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*);
+extern "C" void* art_quick_resolve_string(void*, uint32_t);
 
 // Exception entrypoints.
 extern "C" void* GetAndClearException(Thread*);
 
 // Field entrypoints.
-extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t);
-extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t);
-extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t);
-extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t);
-extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*);
-extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*);
-extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*);
-extern "C" int32_t art_quick_get32_static_from_code(uint32_t);
-extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*);
-extern "C" int64_t art_quick_get64_static_from_code(uint32_t);
-extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*);
-extern "C" void* art_quick_get_obj_static_from_code(uint32_t);
+extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t);
+extern "C" int art_quick_set32_static(uint32_t, int32_t);
+extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t);
+extern "C" int art_quick_set64_static(uint32_t, int64_t);
+extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*);
+extern "C" int art_quick_set_obj_static(uint32_t, void*);
+extern "C" int32_t art_quick_get32_instance(uint32_t, void*);
+extern "C" int32_t art_quick_get32_static(uint32_t);
+extern "C" int64_t art_quick_get64_instance(uint32_t, void*);
+extern "C" int64_t art_quick_get64_static(uint32_t);
+extern "C" void* art_quick_get_obj_instance(uint32_t, void*);
+extern "C" void* art_quick_get_obj_static(uint32_t);
 
 // FillArray entrypoint.
-extern "C" void art_quick_handle_fill_data_from_code(void*, void*);
+extern "C" void art_quick_handle_fill_data(void*, void*);
 
 // Lock entrypoints.
-extern "C" void art_quick_lock_object_from_code(void*);
-extern "C" void art_quick_unlock_object_from_code(void*);
+extern "C" void art_quick_lock_object(void*);
+extern "C" void art_quick_unlock_object(void*);
 
 // Math entrypoints.
 extern int32_t CmpgDouble(double a, double b);
@@ -93,26 +106,14 @@
 extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t);
 extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t);
 
-// Interpreter entrypoints.
-extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh,
-                                                 const DexFile::CodeItem* code_item,
-                                                 ShadowFrame* shadow_frame, JValue* result);
-extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh,
-                                           const DexFile::CodeItem* code_item,
-                                           ShadowFrame* shadow_frame, JValue* result);
-
 // Intrinsic entrypoints.
 extern "C" int32_t __memcmp16(void*, void*, int32_t);
 extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
 extern "C" int32_t art_quick_string_compareto(void*, void*);
 
 // Invoke entrypoints.
-extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called,
-                                                       mirror::Object* receiver,
-                                                       mirror::AbstractMethod** sp, Thread* thread);
-extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called,
-                                                    mirror::Object* receiver,
-                                                    mirror::AbstractMethod** sp, Thread* thread);
+extern "C" void art_quick_resolution_trampoline(mirror::AbstractMethod*);
+extern "C" void art_quick_to_interpreter_bridge(mirror::AbstractMethod*);
 extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
 extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*);
 extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
@@ -125,49 +126,61 @@
 extern "C" void art_quick_test_suspend();
 
 // Throw entrypoints.
-extern "C" void art_quick_deliver_exception_from_code(void*);
-extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit);
-extern "C" void art_quick_throw_div_zero_from_code();
-extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx);
-extern "C" void art_quick_throw_null_pointer_exception_from_code();
-extern "C" void art_quick_throw_stack_overflow_from_code(void*);
+extern "C" void art_quick_deliver_exception(void*);
+extern "C" void art_quick_throw_array_bounds(int32_t index, int32_t limit);
+extern "C" void art_quick_throw_div_zero();
+extern "C" void art_quick_throw_no_such_method(int32_t method_idx);
+extern "C" void art_quick_throw_null_pointer_exception();
+extern "C" void art_quick_throw_stack_overflow(void*);
 
-void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) {
+void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
+                     PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
+  // Interpreter
+  ipoints->pInterpreterToInterpreterBridge = artInterpreterToInterpreterBridge;
+  ipoints->pInterpreterToCompiledCodeBridge = artInterperterToCompiledCodeBridge;
+
+  // JNI
+  jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
+
+  // Portable
+  ppoints->pPortableResolutionTrampoline = art_portable_resolution_trampoline;
+  ppoints->pPortableToInterpreterBridge = art_portable_to_interpreter_bridge;
+
   // Alloc
-  qpoints->pAllocArrayFromCode = art_quick_alloc_array_from_code;
-  qpoints->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check;
-  qpoints->pAllocObjectFromCode = art_quick_alloc_object_from_code;
-  qpoints->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check;
-  qpoints->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code;
-  qpoints->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check;
+  qpoints->pAllocArray = art_quick_alloc_array;
+  qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check;
+  qpoints->pAllocObject = art_quick_alloc_object;
+  qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check;
+  qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array;
+  qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check;
 
   // Cast
-  qpoints->pInstanceofNonTrivialFromCode = artIsAssignableFromCode;
-  qpoints->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code;
-  qpoints->pCheckCastFromCode = art_quick_check_cast_from_code;
+  qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
+  qpoints->pCanPutArrayElement = art_quick_can_put_array_element;
+  qpoints->pCheckCast = art_quick_check_cast;
 
   // DexCache
-  qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code;
-  qpoints->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code;
-  qpoints->pInitializeTypeFromCode = art_quick_initialize_type_from_code;
-  qpoints->pResolveStringFromCode = art_quick_resolve_string_from_code;
+  qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
+  qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access;
+  qpoints->pInitializeType = art_quick_initialize_type;
+  qpoints->pResolveString = art_quick_resolve_string;
 
   // Field
-  qpoints->pSet32Instance = art_quick_set32_instance_from_code;
-  qpoints->pSet32Static = art_quick_set32_static_from_code;
-  qpoints->pSet64Instance = art_quick_set64_instance_from_code;
-  qpoints->pSet64Static = art_quick_set64_static_from_code;
-  qpoints->pSetObjInstance = art_quick_set_obj_instance_from_code;
-  qpoints->pSetObjStatic = art_quick_set_obj_static_from_code;
-  qpoints->pGet32Instance = art_quick_get32_instance_from_code;
-  qpoints->pGet64Instance = art_quick_get64_instance_from_code;
-  qpoints->pGetObjInstance = art_quick_get_obj_instance_from_code;
-  qpoints->pGet32Static = art_quick_get32_static_from_code;
-  qpoints->pGet64Static = art_quick_get64_static_from_code;
-  qpoints->pGetObjStatic = art_quick_get_obj_static_from_code;
+  qpoints->pSet32Instance = art_quick_set32_instance;
+  qpoints->pSet32Static = art_quick_set32_static;
+  qpoints->pSet64Instance = art_quick_set64_instance;
+  qpoints->pSet64Static = art_quick_set64_static;
+  qpoints->pSetObjInstance = art_quick_set_obj_instance;
+  qpoints->pSetObjStatic = art_quick_set_obj_static;
+  qpoints->pGet32Instance = art_quick_get32_instance;
+  qpoints->pGet64Instance = art_quick_get64_instance;
+  qpoints->pGetObjInstance = art_quick_get_obj_instance;
+  qpoints->pGet32Static = art_quick_get32_static;
+  qpoints->pGet64Static = art_quick_get64_static;
+  qpoints->pGetObjStatic = art_quick_get_obj_static;
 
   // FillArray
-  qpoints->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code;
+  qpoints->pHandleFillArrayData = art_quick_handle_fill_data;
 
   // JNI
   qpoints->pJniMethodStart = JniMethodStart;
@@ -178,8 +191,8 @@
   qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
 
   // Locks
-  qpoints->pLockObjectFromCode = art_quick_lock_object_from_code;
-  qpoints->pUnlockObjectFromCode = art_quick_unlock_object_from_code;
+  qpoints->pLockObject = art_quick_lock_object;
+  qpoints->pUnlockObject = art_quick_unlock_object;
 
   // Math
   qpoints->pCmpgDouble = CmpgDouble;
@@ -203,10 +216,6 @@
   qpoints->pShrLong = art_quick_shr_long;
   qpoints->pUshrLong = art_quick_ushr_long;
 
-  // Interpreter
-  qpoints->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry;
-  qpoints->pInterpreterToQuickEntry = artInterpreterToQuickEntry;
-
   // Intrinsics
   qpoints->pIndexOf = art_quick_indexof;
   qpoints->pMemcmp16 = __memcmp16;
@@ -214,7 +223,8 @@
   qpoints->pMemcpy = memcpy;
 
   // Invocation
-  qpoints->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline;
+  qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
+  qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
   qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
   qpoints->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline;
   qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
@@ -223,19 +233,16 @@
   qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
 
   // Thread
-  qpoints->pCheckSuspendFromCode = CheckSuspendFromCode;
-  qpoints->pTestSuspendFromCode = art_quick_test_suspend;
+  qpoints->pCheckSuspend = CheckSuspendFromCode;
+  qpoints->pTestSuspend = art_quick_test_suspend;
 
   // Throws
-  qpoints->pDeliverException = art_quick_deliver_exception_from_code;
-  qpoints->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code;
-  qpoints->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code;
-  qpoints->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code;
-  qpoints->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code;
-  qpoints->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code;
-
-  // Portable
-  ppoints->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline;
+  qpoints->pDeliverException = art_quick_deliver_exception;
+  qpoints->pThrowArrayBounds = art_quick_throw_array_bounds;
+  qpoints->pThrowDivZero = art_quick_throw_div_zero;
+  qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method;
+  qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception;
+  qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
 };
 
 }  // namespace art
diff --git a/runtime/arch/arm/jni_entrypoints_arm.S b/runtime/arch/arm/jni_entrypoints_arm.S
index 0a0d06a..f51f121 100644
--- a/runtime/arch/arm/jni_entrypoints_arm.S
+++ b/runtime/arch/arm/jni_entrypoints_arm.S
@@ -16,6 +16,8 @@
 
 #include "asm_support_arm.S"
 
+    .cfi_sections   .debug_frame
+
     /*
      * Jni dlsym lookup stub.
      */
@@ -28,8 +30,7 @@
     sub    sp, #12                        @ pad stack pointer to align frame
     .pad #12
     .cfi_adjust_cfa_offset 12
-    mov    r0, r9                         @ pass Thread::Current
-    blx    artFindNativeMethod            @ (Thread*)
+    blx    artFindNativeMethod
     mov    r12, r0                        @ save result in r12
     add    sp, #12                        @ restore stack pointer
     .cfi_adjust_cfa_offset -12
@@ -44,7 +45,7 @@
      * Entry point of native methods when JNI bug compatibility is enabled.
      */
     .extern artWorkAroundAppJniBugs
-ENTRY art_quick_work_around_app_jni_bugs
+ENTRY art_work_around_app_jni_bugs
     @ save registers that may contain arguments and LR that will be crushed by a call
     push {r0-r3, lr}
     .save {r0-r3, lr}
@@ -62,4 +63,4 @@
     pop {r0-r3, lr}  @ restore possibly modified argument registers
     .cfi_adjust_cfa_offset -16
     bx  r12          @ tail call into JNI routine
-END art_quick_work_around_app_jni_bugs
+END art_work_around_app_jni_bugs
diff --git a/runtime/arch/arm/portable_entrypoints_arm.S b/runtime/arch/arm/portable_entrypoints_arm.S
index 4cc6654..adfd22b 100644
--- a/runtime/arch/arm/portable_entrypoints_arm.S
+++ b/runtime/arch/arm/portable_entrypoints_arm.S
@@ -16,6 +16,8 @@
 
 #include "asm_support_arm.S"
 
+    .cfi_sections   .debug_frame
+
     /*
      * Portable invocation stub.
      * On entry:
@@ -94,3 +96,6 @@
     .cfi_adjust_cfa_offset -48
     bx      lr                     @ return
 END art_portable_proxy_invoke_handler
+
+UNIMPLEMENTED art_portable_resolution_trampoline
+UNIMPLEMENTED art_portable_to_interpreter_bridge
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 9b8d238..d9bb433 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -16,6 +16,8 @@
 
 #include "asm_support_arm.S"
 
+    .cfi_sections   .debug_frame
+
     /* Deliver the given exception */
     .extern artDeliverExceptionFromCode
     /* Deliver an exception pending on a thread */
@@ -157,33 +159,33 @@
      * Called by managed code, saves callee saves and then calls artThrowException
      * that will place a mock Method* at the bottom of the stack. Arg1 holds the exception.
      */
-ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception_from_code, artDeliverExceptionFromCode
+ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCode
 
     /*
      * Called by managed code to create and deliver a NullPointerException.
      */
-NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception_from_code, artThrowNullPointerExceptionFromCode
+NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode
 
     /*
      * Called by managed code to create and deliver an ArithmeticException.
      */
-NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero_from_code, artThrowDivZeroFromCode
+NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode
 
     /*
      * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds
      * index, arg2 holds limit.
      */
-TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds_from_code, artThrowArrayBoundsFromCode
+TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode
 
     /*
      * Called by managed code to create and deliver a StackOverflowError.
      */
-NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow_from_code, artThrowStackOverflowFromCode
+NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode
 
     /*
      * Called by managed code to create and deliver a NoSuchMethodError.
      */
-ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method_from_code, artThrowNoSuchMethodFromCode
+ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFromCode
 
     /*
      * All generated callsites for interface invokes and invocation slow paths will load arguments
@@ -294,7 +296,7 @@
      * failure.
      */
     .extern artHandleFillArrayDataFromCode
-ENTRY art_quick_handle_fill_data_from_code
+ENTRY art_quick_handle_fill_data
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case exception allocation triggers GC
     mov    r2, r9                          @ pass Thread::Current
     mov    r3, sp                          @ pass SP
@@ -303,25 +305,25 @@
     cmp    r0, #0                          @ success?
     bxeq   lr                              @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_handle_fill_data_from_code
+END art_quick_handle_fill_data
 
     /*
      * Entry from managed code that calls artLockObjectFromCode, may block for GC.
      */
     .extern artLockObjectFromCode
-ENTRY art_quick_lock_object_from_code
+ENTRY art_quick_lock_object
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case we block
     mov    r1, r9                     @ pass Thread::Current
     mov    r2, sp                     @ pass SP
     bl     artLockObjectFromCode      @ (Object* obj, Thread*, SP)
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
-END art_quick_lock_object_from_code
+END art_quick_lock_object
 
     /*
      * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
      */
     .extern artUnlockObjectFromCode
-ENTRY art_quick_unlock_object_from_code
+ENTRY art_quick_unlock_object
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case exception allocation triggers GC
     mov    r1, r9                   @ pass Thread::Current
     mov    r2, sp                   @ pass SP
@@ -330,13 +332,13 @@
     cmp    r0, #0                   @ success?
     bxeq   lr                       @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_unlock_object_from_code
+END art_quick_unlock_object
 
     /*
      * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure.
      */
     .extern artCheckCastFromCode
-ENTRY art_quick_check_cast_from_code
+ENTRY art_quick_check_cast
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME    @ save callee saves in case exception allocation triggers GC
     mov    r2, r9                       @ pass Thread::Current
     mov    r3, sp                       @ pass SP
@@ -345,14 +347,14 @@
     cmp    r0, #0                       @ success?
     bxeq   lr                           @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_check_cast_from_code
+END art_quick_check_cast
 
     /*
      * Entry from managed code that calls artCanPutArrayElementFromCode and delivers exception on
      * failure.
      */
     .extern artCanPutArrayElementFromCode
-ENTRY art_quick_can_put_array_element_from_code
+ENTRY art_quick_can_put_array_element
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME    @ save callee saves in case exception allocation triggers GC
     mov    r2, r9                         @ pass Thread::Current
     mov    r3, sp                         @ pass SP
@@ -361,7 +363,7 @@
     cmp    r0, #0                         @ success?
     bxeq   lr                             @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_can_put_array_element_from_code
+END art_quick_can_put_array_element
 
     /*
      * Entry from managed code when uninitialized static storage, this stub will run the class
@@ -369,7 +371,7 @@
      * returned.
      */
     .extern artInitializeStaticStorageFromCode
-ENTRY art_quick_initialize_static_storage_from_code
+ENTRY art_quick_initialize_static_storage
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME           @ save callee saves in case of GC
     mov    r2, r9                              @ pass Thread::Current
     mov    r3, sp                              @ pass SP
@@ -379,13 +381,13 @@
     cmp    r0, #0                              @ success if result is non-null
     bxne   lr                                  @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_initialize_static_storage_from_code
+END art_quick_initialize_static_storage
 
     /*
      * Entry from managed code when dex cache misses for a type_idx
      */
     .extern artInitializeTypeFromCode
-ENTRY art_quick_initialize_type_from_code
+ENTRY art_quick_initialize_type
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME           @ save callee saves in case of GC
     mov    r2, r9                              @ pass Thread::Current
     mov    r3, sp                              @ pass SP
@@ -395,14 +397,14 @@
     cmp    r0, #0                              @ success if result is non-null
     bxne   lr                                  @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_initialize_type_from_code
+END art_quick_initialize_type
 
     /*
      * Entry from managed code when type_idx needs to be checked for access and dex cache may also
      * miss.
      */
     .extern artInitializeTypeAndVerifyAccessFromCode
-ENTRY art_quick_initialize_type_and_verify_access_from_code
+ENTRY art_quick_initialize_type_and_verify_access
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME           @ save callee saves in case of GC
     mov    r2, r9                              @ pass Thread::Current
     mov    r3, sp                              @ pass SP
@@ -412,13 +414,13 @@
     cmp    r0, #0                              @ success if result is non-null
     bxne   lr                                  @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_initialize_type_and_verify_access_from_code
+END art_quick_initialize_type_and_verify_access
 
     /*
      * Called by managed code to resolve a static field and load a 32-bit primitive value.
      */
     .extern artGet32StaticFromCode
-ENTRY art_quick_get32_static_from_code
+ENTRY art_quick_get32_static
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
     ldr    r1, [sp, #32]                 @ pass referrer
     mov    r2, r9                        @ pass Thread::Current
@@ -429,13 +431,13 @@
     cmp    r12, #0                       @ success if no exception is pending
     bxeq   lr                            @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_get32_static_from_code
+END art_quick_get32_static
 
     /*
      * Called by managed code to resolve a static field and load a 64-bit primitive value.
      */
     .extern artGet64StaticFromCode
-ENTRY art_quick_get64_static_from_code
+ENTRY art_quick_get64_static
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
     ldr    r1, [sp, #32]                 @ pass referrer
     mov    r2, r9                        @ pass Thread::Current
@@ -446,13 +448,13 @@
     cmp    r12, #0                       @ success if no exception is pending
     bxeq    lr                           @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_get64_static_from_code
+END art_quick_get64_static
 
     /*
      * Called by managed code to resolve a static field and load an object reference.
      */
     .extern artGetObjStaticFromCode
-ENTRY art_quick_get_obj_static_from_code
+ENTRY art_quick_get_obj_static
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
     ldr    r1, [sp, #32]                 @ pass referrer
     mov    r2, r9                        @ pass Thread::Current
@@ -463,13 +465,13 @@
     cmp    r12, #0                       @ success if no exception is pending
     bxeq   lr                            @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_get_obj_static_from_code
+END art_quick_get_obj_static
 
     /*
      * Called by managed code to resolve an instance field and load a 32-bit primitive value.
      */
     .extern artGet32InstanceFromCode
-ENTRY art_quick_get32_instance_from_code
+ENTRY art_quick_get32_instance
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
     ldr    r2, [sp, #32]                 @ pass referrer
     mov    r3, r9                        @ pass Thread::Current
@@ -482,13 +484,13 @@
     cmp    r12, #0                       @ success if no exception is pending
     bxeq   lr                            @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_get32_instance_from_code
+END art_quick_get32_instance
 
     /*
      * Called by managed code to resolve an instance field and load a 64-bit primitive value.
      */
     .extern artGet64InstanceFromCode
-ENTRY art_quick_get64_instance_from_code
+ENTRY art_quick_get64_instance
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
     ldr    r2, [sp, #32]                 @ pass referrer
     mov    r3, r9                        @ pass Thread::Current
@@ -504,13 +506,13 @@
     cmp    r12, #0                       @ success if no exception is pending
     bxeq    lr                           @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_get64_instance_from_code
+END art_quick_get64_instance
 
     /*
      * Called by managed code to resolve an instance field and load an object reference.
      */
     .extern artGetObjInstanceFromCode
-ENTRY art_quick_get_obj_instance_from_code
+ENTRY art_quick_get_obj_instance
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
     ldr    r2, [sp, #32]                 @ pass referrer
     mov    r3, r9                        @ pass Thread::Current
@@ -526,13 +528,13 @@
     cmp    r12, #0                       @ success if no exception is pending
     bxeq   lr                            @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_get_obj_instance_from_code
+END art_quick_get_obj_instance
 
     /*
      * Called by managed code to resolve a static field and store a 32-bit primitive value.
      */
     .extern artSet32StaticFromCode
-ENTRY art_quick_set32_static_from_code
+ENTRY art_quick_set32_static
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
     ldr    r2, [sp, #32]                 @ pass referrer
     mov    r3, r9                        @ pass Thread::Current
@@ -547,14 +549,14 @@
     cmp    r0, #0                        @ success if result is 0
     bxeq   lr                            @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_set32_static_from_code
+END art_quick_set32_static
 
     /*
      * Called by managed code to resolve a static field and store a 64-bit primitive value.
      * On entry r0 holds field index, r1:r2 hold new_val
      */
     .extern artSet64StaticFromCode
-ENTRY art_quick_set64_static_from_code
+ENTRY art_quick_set64_static
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
     mov    r3, r2                        @ pass one half of wide argument
     mov    r2, r1                        @ pass other half of wide argument
@@ -573,13 +575,13 @@
     cmp    r0, #0                        @ success if result is 0
     bxeq   lr                            @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_set64_static_from_code
+END art_quick_set64_static
 
     /*
      * Called by managed code to resolve a static field and store an object reference.
      */
     .extern artSetObjStaticFromCode
-ENTRY art_quick_set_obj_static_from_code
+ENTRY art_quick_set_obj_static
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
     ldr    r2, [sp, #32]                 @ pass referrer
     mov    r3, r9                        @ pass Thread::Current
@@ -594,13 +596,13 @@
     cmp    r0, #0                        @ success if result is 0
     bxeq   lr                            @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_set_obj_static_from_code
+END art_quick_set_obj_static
 
     /*
      * Called by managed code to resolve an instance field and store a 32-bit primitive value.
      */
     .extern artSet32InstanceFromCode
-ENTRY art_quick_set32_instance_from_code
+ENTRY art_quick_set32_instance
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
     ldr    r3, [sp, #32]                 @ pass referrer
     mov    r12, sp                       @ save SP
@@ -619,13 +621,13 @@
     cmp    r0, #0                        @ success if result is 0
     bxeq   lr                            @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_set32_instance_from_code
+END art_quick_set32_instance
 
     /*
      * Called by managed code to resolve an instance field and store a 64-bit primitive value.
      */
     .extern artSet32InstanceFromCode
-ENTRY art_quick_set64_instance_from_code
+ENTRY art_quick_set64_instance
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
     mov    r12, sp                       @ save SP
     sub    sp, #8                        @ grow frame for alignment with stack args
@@ -642,13 +644,13 @@
     cmp    r0, #0                        @ success if result is 0
     bxeq   lr                            @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_set64_instance_from_code
+END art_quick_set64_instance
 
     /*
      * Called by managed code to resolve an instance field and store an object reference.
      */
     .extern artSetObjInstanceFromCode
-ENTRY art_quick_set_obj_instance_from_code
+ENTRY art_quick_set_obj_instance
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
     ldr    r3, [sp, #32]                 @ pass referrer
     mov    r12, sp                       @ save SP
@@ -666,7 +668,7 @@
     cmp    r0, #0                        @ success if result is 0
     bxeq   lr                            @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_set_obj_instance_from_code
+END art_quick_set_obj_instance
 
     /*
      * Entry from managed code to resolve a string, this stub will allocate a String and deliver an
@@ -675,7 +677,7 @@
      * performed.
      */
     .extern artResolveStringFromCode
-ENTRY art_quick_resolve_string_from_code
+ENTRY art_quick_resolve_string
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case of GC
     mov    r2, r9                     @ pass Thread::Current
     mov    r3, sp                     @ pass SP
@@ -685,13 +687,13 @@
     cmp    r0, #0                     @ success if result is non-null
     bxne   lr                         @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_resolve_string_from_code
+END art_quick_resolve_string
 
     /*
      * Called by managed code to allocate an object
      */
     .extern artAllocObjectFromCode
-ENTRY art_quick_alloc_object_from_code
+ENTRY art_quick_alloc_object
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case of GC
     mov    r2, r9                     @ pass Thread::Current
     mov    r3, sp                     @ pass SP
@@ -700,14 +702,14 @@
     cmp    r0, #0                     @ success if result is non-null
     bxne   lr                         @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_alloc_object_from_code
+END art_quick_alloc_object
 
     /*
      * Called by managed code to allocate an object when the caller doesn't know whether it has
      * access to the created type.
      */
     .extern artAllocObjectFromCodeWithAccessCheck
-ENTRY art_quick_alloc_object_from_code_with_access_check
+ENTRY art_quick_alloc_object_with_access_check
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case of GC
     mov    r2, r9                     @ pass Thread::Current
     mov    r3, sp                     @ pass SP
@@ -716,13 +718,13 @@
     cmp    r0, #0                     @ success if result is non-null
     bxne   lr                         @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_alloc_object_from_code_with_access_check
+END art_quick_alloc_object_with_access_check
 
     /*
      * Called by managed code to allocate an array.
      */
     .extern artAllocArrayFromCode
-ENTRY art_quick_alloc_array_from_code
+ENTRY art_quick_alloc_array
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case of GC
     mov    r3, r9                     @ pass Thread::Current
     mov    r12, sp
@@ -737,14 +739,14 @@
     cmp    r0, #0                     @ success if result is non-null
     bxne   lr                         @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_alloc_array_from_code
+END art_quick_alloc_array
 
     /*
      * Called by managed code to allocate an array when the caller doesn't know whether it has
      * access to the created type.
      */
     .extern artAllocArrayFromCodeWithAccessCheck
-ENTRY art_quick_alloc_array_from_code_with_access_check
+ENTRY art_quick_alloc_array_with_access_check
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case of GC
     mov    r3, r9                     @ pass Thread::Current
     mov    r12, sp
@@ -759,13 +761,13 @@
     cmp    r0, #0                     @ success if result is non-null
     bxne   lr                         @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_alloc_array_from_code_with_access_check
+END art_quick_alloc_array_with_access_check
 
     /*
      * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
      */
     .extern artCheckAndAllocArrayFromCode
-ENTRY art_quick_check_and_alloc_array_from_code
+ENTRY art_quick_check_and_alloc_array
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case of GC
     mov    r3, r9                     @ pass Thread::Current
     mov    r12, sp
@@ -780,13 +782,13 @@
     cmp    r0, #0                     @ success if result is non-null
     bxne   lr                         @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_check_and_alloc_array_from_code
+END art_quick_check_and_alloc_array
 
     /*
      * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
      */
     .extern artCheckAndAllocArrayFromCodeWithAccessCheck
-ENTRY art_quick_check_and_alloc_array_from_code_with_access_check
+ENTRY art_quick_check_and_alloc_array_with_access_check
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case of GC
     mov    r3, r9                     @ pass Thread::Current
     mov    r12, sp
@@ -801,7 +803,7 @@
     cmp    r0, #0                     @ success if result is non-null
     bxne   lr                         @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_check_and_alloc_array_from_code_with_access_check
+END art_quick_check_and_alloc_array_with_access_check
 
     /*
      * Called by managed code when the value in rSUSPEND has been decremented to 0.
@@ -840,13 +842,33 @@
     DELIVER_PENDING_EXCEPTION
 END art_quick_proxy_invoke_handler
 
-    .extern artInterpreterEntry
-ENTRY art_quick_interpreter_entry
+    .extern artQuickResolutionTrampoline
+ENTRY art_quick_resolution_trampoline
     SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
-    str     r0, [sp, #0]           @ place proxy method at bottom of frame
+    mov     r2, r9                 @ pass Thread::Current
+    mov     r3, sp                 @ pass SP
+    blx     artQuickResolutionTrampoline  @ (Method* called, receiver, Thread*, SP)
+    cmp     r0, #0                 @ is code pointer null?
+    beq     1f                     @ goto exception
+    mov     r12, r0
+    ldr  r0, [sp, #0]              @ load resolved method in r0
+    ldr  r1, [sp, #8]              @ restore non-callee save r1
+    ldrd r2, [sp, #12]             @ restore non-callee saves r2-r3
+    ldr  lr, [sp, #44]             @ restore lr
+    add  sp, #48                   @ rewind sp
+    .cfi_adjust_cfa_offset -48
+    bx      r12                    @ tail-call into actual code
+1:
+    RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+    DELIVER_PENDING_EXCEPTION
+END art_quick_resolution_trampoline
+
+    .extern artQuickToInterpreterBridge
+ENTRY art_quick_to_interpreter_bridge
+    SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
     mov     r1, r9                 @ pass Thread::Current
     mov     r2, sp                 @ pass SP
-    blx     artInterpreterEntry    @ (Method* method, Thread*, SP)
+    blx     artQuickToInterpreterBridge    @ (Method* method, Thread*, SP)
     ldr     r12, [r9, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
     ldr     lr,  [sp, #44]         @ restore lr
     add     sp,  #48               @ pop frame
@@ -854,14 +876,14 @@
     cmp     r12, #0                @ success if no exception is pending
     bxeq    lr                     @ return on success
     DELIVER_PENDING_EXCEPTION
-END art_quick_interpreter_entry
+END art_quick_to_interpreter_bridge
 
     /*
      * Routine that intercepts method calls and returns.
      */
     .extern artInstrumentationMethodEntryFromCode
     .extern artInstrumentationMethodExitFromCode
-ENTRY art_quick_instrumentation_entry_from_code
+ENTRY art_quick_instrumentation_entry
     SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
     str   r0, [sp, #4]     @ preserve r0
     mov   r12, sp          @ remember sp
@@ -877,11 +899,11 @@
     mov   r12, r0        @ r12 holds reference to code
     ldr   r0, [sp, #4]   @ restore r0
     RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
-    blx   r12            @ call method with lr set to art_quick_instrumentation_exit_from_code
-END art_quick_instrumentation_entry_from_code
-    .type art_quick_instrumentation_exit_from_code, #function
-    .global art_quick_instrumentation_exit_from_code
-art_quick_instrumentation_exit_from_code:
+    blx   r12            @ call method with lr set to art_quick_instrumentation_exit
+END art_quick_instrumentation_entry
+    .type art_quick_instrumentation_exit, #function
+    .global art_quick_instrumentation_exit
+art_quick_instrumentation_exit:
     .cfi_startproc
     .fnstart
     mov   lr, #0         @ link register is to here, so clobber with 0 for later checks
@@ -910,7 +932,7 @@
     add sp, #32          @ remove callee save frame
     .cfi_adjust_cfa_offset -32
     bx    r2             @ return
-END art_quick_instrumentation_exit_from_code
+END art_quick_instrumentation_exit
 
     /*
      * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
@@ -925,25 +947,6 @@
 END art_quick_deoptimize
 
     /*
-     * Portable abstract method error stub. r0 contains method* on entry. SP unused in portable.
-     */
-    .extern artThrowAbstractMethodErrorFromCode
-ENTRY art_portable_abstract_method_error_stub
-    mov    r1, r9         @ pass Thread::Current
-    b      artThrowAbstractMethodErrorFromCode  @ (Method*, Thread*, SP)
-END art_portable_abstract_method_error_stub
-
-    /*
-     * Quick abstract method error stub. r0 contains method* on entry.
-     */
-ENTRY art_quick_abstract_method_error_stub
-    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
-    mov    r1, r9         @ pass Thread::Current
-    mov    r2, sp         @ pass SP
-    b      artThrowAbstractMethodErrorFromCode  @ (Method*, Thread*, SP)
-END art_quick_abstract_method_error_stub
-
-    /*
      * Signed 64-bit integer multiply.
      *
      * Consider WXxYZ (r1r0 x r3r2) with a long multiply:
diff --git a/runtime/arch/mips/asm_support_mips.S b/runtime/arch/mips/asm_support_mips.S
index 8a34b9d..fe932d2 100644
--- a/runtime/arch/mips/asm_support_mips.S
+++ b/runtime/arch/mips/asm_support_mips.S
@@ -38,4 +38,12 @@
     .cpload $t9
 .endm
 
+.macro UNIMPLEMENTED name
+    ENTRY \name
+    break
+    break
+    END \name
+.endm
+
+
 #endif  // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 0a62a40..a18079b 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -21,49 +21,61 @@
 
 namespace art {
 
+// Interpreter entrypoints.
+extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh,
+                                                 const DexFile::CodeItem* code_item,
+                                                 ShadowFrame* shadow_frame, JValue* result);
+extern "C" void artInterperterToCompiledCodeBridge(Thread* self, MethodHelper& mh,
+                                           const DexFile::CodeItem* code_item,
+                                           ShadowFrame* shadow_frame, JValue* result);
+
+// Portable entrypoints.
+extern "C" void art_portable_resolution_trampoline(mirror::AbstractMethod*);
+extern "C" void art_portable_to_interpreter_bridge(mirror::AbstractMethod*);
+
 // Alloc entrypoints.
-extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method);
-extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method);
-extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t);
-extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_array(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_array_with_access_check(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_object(uint32_t type_idx, void* method);
+extern "C" void* art_quick_alloc_object_with_access_check(uint32_t type_idx, void* method);
+extern "C" void* art_quick_check_and_alloc_array(uint32_t, void*, int32_t);
+extern "C" void* art_quick_check_and_alloc_array_with_access_check(uint32_t, void*, int32_t);
 
 // Cast entrypoints.
 extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass,
                                             const mirror::Class* ref_class);
-extern "C" void art_quick_can_put_array_element_from_code(void*, void*);
-extern "C" void art_quick_check_cast_from_code(void*, void*);
+extern "C" void art_quick_can_put_array_element(void*, void*);
+extern "C" void art_quick_check_cast(void*, void*);
 
 // DexCache entrypoints.
-extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*);
-extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*);
-extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*);
-extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t);
+extern "C" void* art_quick_initialize_static_storage(uint32_t, void*);
+extern "C" void* art_quick_initialize_type(uint32_t, void*);
+extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*);
+extern "C" void* art_quick_resolve_string(void*, uint32_t);
 
 // Exception entrypoints.
 extern "C" void* GetAndClearException(Thread*);
 
 // Field entrypoints.
-extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t);
-extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t);
-extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t);
-extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t);
-extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*);
-extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*);
-extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*);
-extern "C" int32_t art_quick_get32_static_from_code(uint32_t);
-extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*);
-extern "C" int64_t art_quick_get64_static_from_code(uint32_t);
-extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*);
-extern "C" void* art_quick_get_obj_static_from_code(uint32_t);
+extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t);
+extern "C" int art_quick_set32_static(uint32_t, int32_t);
+extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t);
+extern "C" int art_quick_set64_static(uint32_t, int64_t);
+extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*);
+extern "C" int art_quick_set_obj_static(uint32_t, void*);
+extern "C" int32_t art_quick_get32_instance(uint32_t, void*);
+extern "C" int32_t art_quick_get32_static(uint32_t);
+extern "C" int64_t art_quick_get64_instance(uint32_t, void*);
+extern "C" int64_t art_quick_get64_static(uint32_t);
+extern "C" void* art_quick_get_obj_instance(uint32_t, void*);
+extern "C" void* art_quick_get_obj_static(uint32_t);
 
 // FillArray entrypoint.
-extern "C" void art_quick_handle_fill_data_from_code(void*, void*);
+extern "C" void art_quick_handle_fill_data(void*, void*);
 
 // Lock entrypoints.
-extern "C" void art_quick_lock_object_from_code(void*);
-extern "C" void art_quick_unlock_object_from_code(void*);
+extern "C" void art_quick_lock_object(void*);
+extern "C" void art_quick_unlock_object(void*);
 
 // Math entrypoints.
 extern int32_t CmpgDouble(double a, double b);
@@ -95,26 +107,14 @@
 extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t);
 extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t);
 
-// Interpreter entrypoints.
-extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh,
-                                                 const DexFile::CodeItem* code_item,
-                                                 ShadowFrame* shadow_frame, JValue* result);
-extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh,
-                                           const DexFile::CodeItem* code_item,
-                                           ShadowFrame* shadow_frame, JValue* result);
-
 // Intrinsic entrypoints.
 extern "C" int32_t __memcmp16(void*, void*, int32_t);
 extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
 extern "C" int32_t art_quick_string_compareto(void*, void*);
 
 // Invoke entrypoints.
-extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called,
-                                                       mirror::Object* receiver,
-                                                       mirror::AbstractMethod** sp, Thread* thread);
-extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called,
-                                                    mirror::Object* receiver,
-                                                    mirror::AbstractMethod** sp, Thread* thread);
+extern "C" void art_quick_resolution_trampoline(mirror::AbstractMethod*);
+extern "C" void art_quick_to_interpreter_bridge(mirror::AbstractMethod*);
 extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
 extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*);
 extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
@@ -127,49 +127,61 @@
 extern "C" void art_quick_test_suspend();
 
 // Throw entrypoints.
-extern "C" void art_quick_deliver_exception_from_code(void*);
-extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit);
-extern "C" void art_quick_throw_div_zero_from_code();
-extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx);
-extern "C" void art_quick_throw_null_pointer_exception_from_code();
-extern "C" void art_quick_throw_stack_overflow_from_code(void*);
+extern "C" void art_quick_deliver_exception(void*);
+extern "C" void art_quick_throw_array_bounds(int32_t index, int32_t limit);
+extern "C" void art_quick_throw_div_zero();
+extern "C" void art_quick_throw_no_such_method(int32_t method_idx);
+extern "C" void art_quick_throw_null_pointer_exception();
+extern "C" void art_quick_throw_stack_overflow(void*);
 
-void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) {
+void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
+                     PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
+  // Interpreter
+  ipoints->pInterpreterToInterpreterBridge = artInterpreterToInterpreterBridge;
+  ipoints->pInterpreterToCompiledCodeBridge = artInterperterToCompiledCodeBridge;
+
+  // JNI
+  jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
+
+  // Portable
+  ppoints->pPortableResolutionTrampoline = art_portable_resolution_trampoline;
+  ppoints->pPortableToInterpreterBridge = art_portable_to_interpreter_bridge;
+
   // Alloc
-  qpoints->pAllocArrayFromCode = art_quick_alloc_array_from_code;
-  qpoints->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check;
-  qpoints->pAllocObjectFromCode = art_quick_alloc_object_from_code;
-  qpoints->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check;
-  qpoints->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code;
-  qpoints->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check;
+  qpoints->pAllocArray = art_quick_alloc_array;
+  qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check;
+  qpoints->pAllocObject = art_quick_alloc_object;
+  qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check;
+  qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array;
+  qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check;
 
   // Cast
-  qpoints->pInstanceofNonTrivialFromCode = artIsAssignableFromCode;
-  qpoints->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code;
-  qpoints->pCheckCastFromCode = art_quick_check_cast_from_code;
+  qpoints->pInstanceofNonTrivial = artIsAssignableFromCode;
+  qpoints->pCanPutArrayElement = art_quick_can_put_array_element;
+  qpoints->pCheckCast = art_quick_check_cast;
 
   // DexCache
-  qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code;
-  qpoints->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code;
-  qpoints->pInitializeTypeFromCode = art_quick_initialize_type_from_code;
-  qpoints->pResolveStringFromCode = art_quick_resolve_string_from_code;
+  qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
+  qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access;
+  qpoints->pInitializeType = art_quick_initialize_type;
+  qpoints->pResolveString = art_quick_resolve_string;
 
   // Field
-  qpoints->pSet32Instance = art_quick_set32_instance_from_code;
-  qpoints->pSet32Static = art_quick_set32_static_from_code;
-  qpoints->pSet64Instance = art_quick_set64_instance_from_code;
-  qpoints->pSet64Static = art_quick_set64_static_from_code;
-  qpoints->pSetObjInstance = art_quick_set_obj_instance_from_code;
-  qpoints->pSetObjStatic = art_quick_set_obj_static_from_code;
-  qpoints->pGet32Instance = art_quick_get32_instance_from_code;
-  qpoints->pGet64Instance = art_quick_get64_instance_from_code;
-  qpoints->pGetObjInstance = art_quick_get_obj_instance_from_code;
-  qpoints->pGet32Static = art_quick_get32_static_from_code;
-  qpoints->pGet64Static = art_quick_get64_static_from_code;
-  qpoints->pGetObjStatic = art_quick_get_obj_static_from_code;
+  qpoints->pSet32Instance = art_quick_set32_instance;
+  qpoints->pSet32Static = art_quick_set32_static;
+  qpoints->pSet64Instance = art_quick_set64_instance;
+  qpoints->pSet64Static = art_quick_set64_static;
+  qpoints->pSetObjInstance = art_quick_set_obj_instance;
+  qpoints->pSetObjStatic = art_quick_set_obj_static;
+  qpoints->pGet32Instance = art_quick_get32_instance;
+  qpoints->pGet64Instance = art_quick_get64_instance;
+  qpoints->pGetObjInstance = art_quick_get_obj_instance;
+  qpoints->pGet32Static = art_quick_get32_static;
+  qpoints->pGet64Static = art_quick_get64_static;
+  qpoints->pGetObjStatic = art_quick_get_obj_static;
 
   // FillArray
-  qpoints->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code;
+  qpoints->pHandleFillArrayData = art_quick_handle_fill_data;
 
   // JNI
   qpoints->pJniMethodStart = JniMethodStart;
@@ -180,8 +192,8 @@
   qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
 
   // Locks
-  qpoints->pLockObjectFromCode = art_quick_lock_object_from_code;
-  qpoints->pUnlockObjectFromCode = art_quick_unlock_object_from_code;
+  qpoints->pLockObject = art_quick_lock_object;
+  qpoints->pUnlockObject = art_quick_unlock_object;
 
   // Math
   qpoints->pCmpgDouble = CmpgDouble;
@@ -204,10 +216,6 @@
   qpoints->pShrLong = art_quick_shr_long;
   qpoints->pUshrLong = art_quick_ushr_long;
 
-  // Interpreter
-  qpoints->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry;
-  qpoints->pInterpreterToQuickEntry = artInterpreterToQuickEntry;
-
   // Intrinsics
   qpoints->pIndexOf = art_quick_indexof;
   qpoints->pMemcmp16 = __memcmp16;
@@ -215,7 +223,8 @@
   qpoints->pMemcpy = memcpy;
 
   // Invocation
-  qpoints->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline;
+  qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
+  qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
   qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
   qpoints->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline;
   qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
@@ -224,19 +233,16 @@
   qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
 
   // Thread
-  qpoints->pCheckSuspendFromCode = CheckSuspendFromCode;
-  qpoints->pTestSuspendFromCode = art_quick_test_suspend;
+  qpoints->pCheckSuspend = CheckSuspendFromCode;
+  qpoints->pTestSuspend = art_quick_test_suspend;
 
   // Throws
-  qpoints->pDeliverException = art_quick_deliver_exception_from_code;
-  qpoints->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code;
-  qpoints->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code;
-  qpoints->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code;
-  qpoints->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code;
-  qpoints->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code;
-
-  // Portable
-  ppoints->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline;
+  qpoints->pDeliverException = art_quick_deliver_exception;
+  qpoints->pThrowArrayBounds = art_quick_throw_array_bounds;
+  qpoints->pThrowDivZero = art_quick_throw_div_zero;
+  qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method;
+  qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception;
+  qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
 };
 
 }  // namespace art
diff --git a/runtime/arch/mips/jni_entrypoints_mips.S b/runtime/arch/mips/jni_entrypoints_mips.S
index fca6d77..ad7c021 100644
--- a/runtime/arch/mips/jni_entrypoints_mips.S
+++ b/runtime/arch/mips/jni_entrypoints_mips.S
@@ -59,7 +59,7 @@
      * Entry point of native methods when JNI bug compatibility is enabled.
      */
     .extern artWorkAroundAppJniBugs
-ENTRY art_quick_work_around_app_jni_bugs
+ENTRY art_work_around_app_jni_bugs
     GENERATE_GLOBAL_POINTER
     # save registers that may contain arguments and LR that will be crushed by a call
     addiu    $sp, $sp, -32
@@ -86,4 +86,4 @@
     jr       $t9              # tail call into JNI routine
     addiu    $sp, $sp, 32
     .cfi_adjust_cfa_offset -32
-END art_quick_work_around_app_jni_bugs
+END art_work_around_app_jni_bugs
diff --git a/runtime/arch/mips/portable_entrypoints_mips.S b/runtime/arch/mips/portable_entrypoints_mips.S
index e7a9b0f..9208a8a 100644
--- a/runtime/arch/mips/portable_entrypoints_mips.S
+++ b/runtime/arch/mips/portable_entrypoints_mips.S
@@ -61,13 +61,5 @@
     .cfi_adjust_cfa_offset -64
 END art_portable_proxy_invoke_handler
 
-    /*
-     * Portable abstract method error stub. $a0 contains method* on entry. SP unused in portable.
-     */
-    .extern artThrowAbstractMethodErrorFromCode
-ENTRY art_portable_abstract_method_error_stub
-    GENERATE_GLOBAL_POINTER
-    la       $t9, artThrowAbstractMethodErrorFromCode
-    jr       $t9            # (Method*, Thread*, SP)
-    move     $a1, $s1       # pass Thread::Current
-END art_portable_abstract_method_error_stub
+UNIMPLEMENTED art_portable_resolution_trampoline
+UNIMPLEMENTED art_portable_to_interpreter_bridge
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index d32a2b4..004fda6 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -143,7 +143,7 @@
     lw     $a1, 4($sp)            # restore non-callee save $a1
     lw     $a2, 8($sp)            # restore non-callee save $a2
     lw     $a3, 12($sp)           # restore non-callee save $a3
-    addiu  $sp, $sp, 64           # strip frame
+    addiu  $sp, $sp, 64           # pop frame
     .cfi_adjust_cfa_offset -64
 .endm
 
@@ -268,79 +268,79 @@
      * the bottom of the stack. artDeliverExceptionFromCode will place the callee save Method* at
      * the bottom of the thread. On entry r0 holds Throwable*
      */
-ENTRY art_quick_deliver_exception_from_code
+ENTRY art_quick_deliver_exception
     GENERATE_GLOBAL_POINTER
     SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
     move $a1, rSELF                 # pass Thread::Current
     la   $t9, artDeliverExceptionFromCode
     jr   $t9                        # artDeliverExceptionFromCode(Throwable*, Thread*, $sp)
     move $a2, $sp                   # pass $sp
-END art_quick_deliver_exception_from_code
+END art_quick_deliver_exception
 
     /*
      * Called by managed code to create and deliver a NullPointerException
      */
     .extern artThrowNullPointerExceptionFromCode
-ENTRY art_quick_throw_null_pointer_exception_from_code
+ENTRY art_quick_throw_null_pointer_exception
     GENERATE_GLOBAL_POINTER
     SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
     move $a0, rSELF                 # pass Thread::Current
     la   $t9, artThrowNullPointerExceptionFromCode
     jr   $t9                        # artThrowNullPointerExceptionFromCode(Thread*, $sp)
     move $a1, $sp                   # pass $sp
-END art_quick_throw_null_pointer_exception_from_code
+END art_quick_throw_null_pointer_exception
 
     /*
      * Called by managed code to create and deliver an ArithmeticException
      */
     .extern artThrowDivZeroFromCode
-ENTRY art_quick_throw_div_zero_from_code
+ENTRY art_quick_throw_div_zero
     GENERATE_GLOBAL_POINTER
     SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
     move $a0, rSELF                 # pass Thread::Current
     la   $t9, artThrowDivZeroFromCode
     jr   $t9                        # artThrowDivZeroFromCode(Thread*, $sp)
     move $a1, $sp                   # pass $sp
-END art_quick_throw_div_zero_from_code
+END art_quick_throw_div_zero
 
     /*
      * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException
      */
     .extern artThrowArrayBoundsFromCode
-ENTRY art_quick_throw_array_bounds_from_code
+ENTRY art_quick_throw_array_bounds
     GENERATE_GLOBAL_POINTER
     SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
     move $a2, rSELF                 # pass Thread::Current
     la   $t9, artThrowArrayBoundsFromCode
     jr   $t9                        # artThrowArrayBoundsFromCode(index, limit, Thread*, $sp)
     move $a3, $sp                   # pass $sp
-END art_quick_throw_array_bounds_from_code
+END art_quick_throw_array_bounds
 
     /*
      * Called by managed code to create and deliver a StackOverflowError.
      */
     .extern artThrowStackOverflowFromCode
-ENTRY art_quick_throw_stack_overflow_from_code
+ENTRY art_quick_throw_stack_overflow
     GENERATE_GLOBAL_POINTER
     SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
     move $a0, rSELF                 # pass Thread::Current
     la   $t9, artThrowStackOverflowFromCode
     jr   $t9                        # artThrowStackOverflowFromCode(Thread*, $sp)
     move $a1, $sp                   # pass $sp
-END art_quick_throw_stack_overflow_from_code
+END art_quick_throw_stack_overflow
 
     /*
      * Called by managed code to create and deliver a NoSuchMethodError.
      */
     .extern artThrowNoSuchMethodFromCode
-ENTRY art_quick_throw_no_such_method_from_code
+ENTRY art_quick_throw_no_such_method
     GENERATE_GLOBAL_POINTER
     SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
     move $a1, rSELF                 # pass Thread::Current
     la   $t9, artThrowNoSuchMethodFromCode
     jr   $t9                        # artThrowNoSuchMethodFromCode(method_idx, Thread*, $sp)
     move $a2, $sp                   # pass $sp
-END art_quick_throw_no_such_method_from_code
+END art_quick_throw_no_such_method
 
     /*
      * All generated callsites for interface invokes and invocation slow paths will load arguments
@@ -466,67 +466,67 @@
      * failure.
      */
     .extern artHandleFillArrayDataFromCode
-ENTRY art_quick_handle_fill_data_from_code
+ENTRY art_quick_handle_fill_data
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case exception allocation triggers GC
     move    $a2, rSELF                         # pass Thread::Current
     jal     artHandleFillArrayDataFromCode     # (Array*, const DexFile::Payload*, Thread*, $sp)
     move    $a3, $sp                           # pass $sp
     RETURN_IF_ZERO
-END art_quick_handle_fill_data_from_code
+END art_quick_handle_fill_data
 
     /*
      * Entry from managed code that calls artLockObjectFromCode, may block for GC.
      */
     .extern artLockObjectFromCode
-ENTRY art_quick_lock_object_from_code
+ENTRY art_quick_lock_object
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME      # save callee saves in case we block
     move    $a1, rSELF                    # pass Thread::Current
     jal     artLockObjectFromCode         # (Object* obj, Thread*, $sp)
     move    $a2, $sp                      # pass $sp
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
-END art_quick_lock_object_from_code
+END art_quick_lock_object
 
     /*
      * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
      */
     .extern artUnlockObjectFromCode
-ENTRY art_quick_unlock_object_from_code
+ENTRY art_quick_unlock_object
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case exception allocation triggers GC
     move    $a1, rSELF                # pass Thread::Current
     jal     artUnlockObjectFromCode   # (Object* obj, Thread*, $sp)
     move    $a2, $sp                  # pass $sp
     RETURN_IF_ZERO
-END art_quick_unlock_object_from_code
+END art_quick_unlock_object
 
     /*
      * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure.
      */
     .extern artCheckCastFromCode
-ENTRY art_quick_check_cast_from_code
+ENTRY art_quick_check_cast
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case exception allocation triggers GC
     move    $a2, rSELF                # pass Thread::Current
     jal     artCheckCastFromCode      # (Class* a, Class* b, Thread*, $sp)
     move    $a3, $sp                  # pass $sp
     RETURN_IF_ZERO
-END art_quick_check_cast_from_code
+END art_quick_check_cast
 
     /*
      * Entry from managed code that calls artCanPutArrayElementFromCode and delivers exception on
      * failure.
      */
     .extern artCanPutArrayElementFromCode
-ENTRY art_quick_can_put_array_element_from_code
+ENTRY art_quick_can_put_array_element
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME    # save callee saves in case exception allocation triggers GC
     move    $a2, rSELF                     # pass Thread::Current
     jal     artCanPutArrayElementFromCode  # (Object* element, Class* array_class, Thread*, $sp)
     move    $a3, $sp                       # pass $sp
     RETURN_IF_ZERO
-END art_quick_can_put_array_element_from_code
+END art_quick_can_put_array_element
 
     /*
      * Entry from managed code when uninitialized static storage, this stub will run the class
@@ -534,7 +534,7 @@
      * returned.
      */
     .extern artInitializeStaticStorageFromCode
-ENTRY art_quick_initialize_static_storage_from_code
+ENTRY art_quick_initialize_static_storage
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME            # save callee saves in case of GC
     move    $a2, rSELF                          # pass Thread::Current
@@ -542,13 +542,13 @@
     jal     artInitializeStaticStorageFromCode
     move    $a3, $sp                            # pass $sp
     RETURN_IF_NONZERO
-END art_quick_initialize_static_storage_from_code
+END art_quick_initialize_static_storage
 
     /*
      * Entry from managed code when dex cache misses for a type_idx.
      */
     .extern artInitializeTypeFromCode
-ENTRY art_quick_initialize_type_from_code
+ENTRY art_quick_initialize_type
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME           # save callee saves in case of GC
     move    $a2, rSELF                         # pass Thread::Current
@@ -556,14 +556,14 @@
     jal     artInitializeTypeFromCode
     move    $a3, $sp                           # pass $sp
     RETURN_IF_NONZERO
-END art_quick_initialize_type_from_code
+END art_quick_initialize_type
 
     /*
      * Entry from managed code when type_idx needs to be checked for access and dex cache may also
      * miss.
      */
     .extern artInitializeTypeAndVerifyAccessFromCode
-ENTRY art_quick_initialize_type_and_verify_access_from_code
+ENTRY art_quick_initialize_type_and_verify_access
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME           # save callee saves in case of GC
     move    $a2, rSELF                         # pass Thread::Current
@@ -571,13 +571,13 @@
     jal     artInitializeTypeAndVerifyAccessFromCode
     move    $a3, $sp                           # pass $sp
     RETURN_IF_NONZERO
-END art_quick_initialize_type_and_verify_access_from_code
+END art_quick_initialize_type_and_verify_access
 
     /*
      * Called by managed code to resolve a static field and load a 32-bit primitive value.
      */
     .extern artGet32StaticFromCode
-ENTRY art_quick_get32_static_from_code
+ENTRY art_quick_get32_static
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
     lw     $a1, 64($sp)                  # pass referrer's Method*
@@ -585,13 +585,13 @@
     jal    artGet32StaticFromCode        # (uint32_t field_idx, const Method* referrer, Thread*, $sp)
     move   $a3, $sp                      # pass $sp
     RETURN_IF_NO_EXCEPTION
-END art_quick_get32_static_from_code
+END art_quick_get32_static
 
     /*
      * Called by managed code to resolve a static field and load a 64-bit primitive value.
      */
     .extern artGet64StaticFromCode
-ENTRY art_quick_get64_static_from_code
+ENTRY art_quick_get64_static
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
     lw     $a1, 64($sp)                  # pass referrer's Method*
@@ -599,13 +599,13 @@
     jal    artGet64StaticFromCode        # (uint32_t field_idx, const Method* referrer, Thread*, $sp)
     move   $a3, $sp                      # pass $sp
     RETURN_IF_NO_EXCEPTION
-END art_quick_get64_static_from_code
+END art_quick_get64_static
 
     /*
      * Called by managed code to resolve a static field and load an object reference.
      */
     .extern artGetObjStaticFromCode
-ENTRY art_quick_get_obj_static_from_code
+ENTRY art_quick_get_obj_static
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
     lw     $a1, 64($sp)                  # pass referrer's Method*
@@ -613,13 +613,13 @@
     jal    artGetObjStaticFromCode       # (uint32_t field_idx, const Method* referrer, Thread*, $sp)
     move   $a3, $sp                      # pass $sp
     RETURN_IF_NO_EXCEPTION
-END art_quick_get_obj_static_from_code
+END art_quick_get_obj_static
 
     /*
      * Called by managed code to resolve an instance field and load a 32-bit primitive value.
      */
     .extern artGet32InstanceFromCode
-ENTRY art_quick_get32_instance_from_code
+ENTRY art_quick_get32_instance
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
     lw     $a2, 64($sp)                  # pass referrer's Method*
@@ -627,13 +627,13 @@
     jal    artGet32InstanceFromCode      # (field_idx, Object*, referrer, Thread*, $sp)
     sw     $sp, 16($sp)                  # pass $sp
     RETURN_IF_NO_EXCEPTION
-END art_quick_get32_instance_from_code
+END art_quick_get32_instance
 
     /*
      * Called by managed code to resolve an instance field and load a 64-bit primitive value.
      */
     .extern artGet64InstanceFromCode
-ENTRY art_quick_get64_instance_from_code
+ENTRY art_quick_get64_instance
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
     lw     $a2, 64($sp)                  # pass referrer's Method*
@@ -641,13 +641,13 @@
     jal    artGet64InstanceFromCode      # (field_idx, Object*, referrer, Thread*, $sp)
     sw     $sp, 16($sp)                  # pass $sp
     RETURN_IF_NO_EXCEPTION
-END art_quick_get64_instance_from_code
+END art_quick_get64_instance
 
     /*
      * Called by managed code to resolve an instance field and load an object reference.
      */
     .extern artGetObjInstanceFromCode
-ENTRY art_quick_get_obj_instance_from_code
+ENTRY art_quick_get_obj_instance
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
     lw     $a2, 64($sp)                  # pass referrer's Method*
@@ -655,13 +655,13 @@
     jal    artGetObjInstanceFromCode     # (field_idx, Object*, referrer, Thread*, $sp)
     sw     $sp, 16($sp)                  # pass $sp
     RETURN_IF_NO_EXCEPTION
-END art_quick_get_obj_instance_from_code
+END art_quick_get_obj_instance
 
     /*
      * Called by managed code to resolve a static field and store a 32-bit primitive value.
      */
     .extern artSet32StaticFromCode
-ENTRY art_quick_set32_static_from_code
+ENTRY art_quick_set32_static
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
     lw     $a2, 64($sp)                  # pass referrer's Method*
@@ -669,13 +669,13 @@
     jal    artSet32StaticFromCode        # (field_idx, new_val, referrer, Thread*, $sp)
     sw     $sp, 16($sp)                  # pass $sp
     RETURN_IF_ZERO
-END art_quick_set32_static_from_code
+END art_quick_set32_static
 
     /*
      * Called by managed code to resolve a static field and store a 64-bit primitive value.
      */
     .extern artSet32StaticFromCode
-ENTRY art_quick_set64_static_from_code
+ENTRY art_quick_set64_static
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
     lw     $a1, 64($sp)                  # pass referrer's Method*
@@ -683,13 +683,13 @@
     jal    artSet64StaticFromCode        # (field_idx, referrer, new_val, Thread*, $sp)
     sw     $sp, 20($sp)                  # pass $sp
     RETURN_IF_ZERO
-END art_quick_set64_static_from_code
+END art_quick_set64_static
 
     /*
      * Called by managed code to resolve a static field and store an object reference.
      */
     .extern artSetObjStaticFromCode
-ENTRY art_quick_set_obj_static_from_code
+ENTRY art_quick_set_obj_static
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
     lw     $a2, 64($sp)                  # pass referrer's Method*
@@ -697,13 +697,13 @@
     jal    artSetObjStaticFromCode       # (field_idx, new_val, referrer, Thread*, $sp)
     sw     $sp, 16($sp)                  # pass $sp
     RETURN_IF_ZERO
-END art_quick_set_obj_static_from_code
+END art_quick_set_obj_static
 
     /*
      * Called by managed code to resolve an instance field and store a 32-bit primitive value.
      */
     .extern artSet32InstanceFromCode
-ENTRY art_quick_set32_instance_from_code
+ENTRY art_quick_set32_instance
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
     lw     $a3, 64($sp)                  # pass referrer's Method*
@@ -711,26 +711,26 @@
     jal    artSet32InstanceFromCode      # (field_idx, Object*, new_val, referrer, Thread*, $sp)
     sw     $sp, 20($sp)                  # pass $sp
     RETURN_IF_ZERO
-END art_quick_set32_instance_from_code
+END art_quick_set32_instance
 
     /*
      * Called by managed code to resolve an instance field and store a 64-bit primitive value.
      */
     .extern artSet32InstanceFromCode
-ENTRY art_quick_set64_instance_from_code
+ENTRY art_quick_set64_instance
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
     sw     rSELF, 16($sp)                # pass Thread::Current
     jal    artSet64InstanceFromCode      # (field_idx, Object*, new_val, Thread*, $sp)
     sw     $sp, 20($sp)                  # pass $sp
     RETURN_IF_ZERO
-END art_quick_set64_instance_from_code
+END art_quick_set64_instance
 
     /*
      * Called by managed code to resolve an instance field and store an object reference.
      */
     .extern artSetObjInstanceFromCode
-ENTRY art_quick_set_obj_instance_from_code
+ENTRY art_quick_set_obj_instance
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
     lw     $a3, 64($sp)                  # pass referrer's Method*
@@ -738,7 +738,7 @@
     jal    artSetObjInstanceFromCode     # (field_idx, Object*, new_val, referrer, Thread*, $sp)
     sw     $sp, 20($sp)                  # pass $sp
     RETURN_IF_ZERO
-END art_quick_set_obj_instance_from_code
+END art_quick_set_obj_instance
 
     /*
      * Entry from managed code to resolve a string, this stub will allocate a String and deliver an
@@ -747,7 +747,7 @@
      * performed.
      */
     .extern artResolveStringFromCode
-ENTRY art_quick_resolve_string_from_code
+ENTRY art_quick_resolve_string
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
     move    $a2, rSELF                # pass Thread::Current
@@ -755,40 +755,40 @@
     jal     artResolveStringFromCode
     move    $a3, $sp                  # pass $sp
     RETURN_IF_NONZERO
-END art_quick_resolve_string_from_code
+END art_quick_resolve_string
 
     /*
      * Called by managed code to allocate an object.
      */
     .extern artAllocObjectFromCode
-ENTRY art_quick_alloc_object_from_code
+ENTRY art_quick_alloc_object
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
     move    $a2, rSELF                # pass Thread::Current
     jal     artAllocObjectFromCode    # (uint32_t type_idx, Method* method, Thread*, $sp)
     move    $a3, $sp                  # pass $sp
     RETURN_IF_NONZERO
-END art_quick_alloc_object_from_code
+END art_quick_alloc_object
 
     /*
      * Called by managed code to allocate an object when the caller doesn't know whether it has
      * access to the created type.
      */
     .extern artAllocObjectFromCodeWithAccessCheck
-ENTRY art_quick_alloc_object_from_code_with_access_check
+ENTRY art_quick_alloc_object_with_access_check
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
     move    $a2, rSELF                # pass Thread::Current
     jal     artAllocObjectFromCodeWithAccessCheck  # (uint32_t type_idx, Method* method, Thread*, $sp)
     move    $a3, $sp                  # pass $sp
     RETURN_IF_NONZERO
-END art_quick_alloc_object_from_code_with_access_check
+END art_quick_alloc_object_with_access_check
 
     /*
      * Called by managed code to allocate an array.
      */
     .extern artAllocArrayFromCode
-ENTRY art_quick_alloc_array_from_code
+ENTRY art_quick_alloc_array
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
     move    $a3, rSELF                # pass Thread::Current
@@ -796,14 +796,14 @@
     jal     artAllocArrayFromCode
     sw      $sp, 16($sp)              # pass $sp
     RETURN_IF_NONZERO
-END art_quick_alloc_array_from_code
+END art_quick_alloc_array
 
     /*
      * Called by managed code to allocate an array when the caller doesn't know whether it has
      * access to the created type.
      */
     .extern artAllocArrayFromCodeWithAccessCheck
-ENTRY art_quick_alloc_array_from_code_with_access_check
+ENTRY art_quick_alloc_array_with_access_check
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
     move    $a3, rSELF                # pass Thread::Current
@@ -811,13 +811,13 @@
     jal     artAllocArrayFromCodeWithAccessCheck
     sw      $sp, 16($sp)              # pass $sp
     RETURN_IF_NONZERO
-END art_quick_alloc_array_from_code_with_access_check
+END art_quick_alloc_array_with_access_check
 
     /*
      * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
      */
     .extern artCheckAndAllocArrayFromCode
-ENTRY art_quick_check_and_alloc_array_from_code
+ENTRY art_quick_check_and_alloc_array
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
     move    $a3, rSELF                # pass Thread::Current
@@ -825,13 +825,13 @@
     jal     artCheckAndAllocArrayFromCode
     sw      $sp, 16($sp)              # pass $sp
     RETURN_IF_NONZERO
-END art_quick_check_and_alloc_array_from_code
+END art_quick_check_and_alloc_array
 
     /*
      * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
      */
     .extern artCheckAndAllocArrayFromCodeWithAccessCheck
-ENTRY art_quick_check_and_alloc_array_from_code_with_access_check
+ENTRY art_quick_check_and_alloc_array_with_access_check
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  # save callee saves in case of GC
     move    $a3, rSELF                # pass Thread::Current
@@ -839,7 +839,7 @@
     jal     artCheckAndAllocArrayFromCodeWithAccessCheck
     sw      $sp, 16($sp)              # pass $sp
     RETURN_IF_NONZERO
-END art_quick_check_and_alloc_array_from_code_with_access_check
+END art_quick_check_and_alloc_array_with_access_check
 
     /*
      * Called by managed code when the value in rSUSPEND has been decremented to 0.
@@ -884,13 +884,33 @@
     DELIVER_PENDING_EXCEPTION
 END art_quick_proxy_invoke_handler
 
-    .extern artInterpreterEntry
-ENTRY art_quick_interpreter_entry
+    .extern artQuickResolutionTrampoline
+ENTRY art_quick_resolution_trampoline
     GENERATE_GLOBAL_POINTER
     SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
-    sw      $a0, 0($sp)            # place proxy method at bottom of frame
+    move    $a2, rSELF             # pass Thread::Current
+    jal     artQuickProxyInvokeHandler  # (Method* called, receiver, Thread*, SP)
+    move    $a3, $sp               # pass $sp
+    lw      $gp, 52($sp)           # restore $gp
+    lw      $ra, 60($sp)           # restore $ra
+    beqz    $v0, 1f
+    lw      $a0, 0($sp)            # load resolved method to $a0
+    lw      $a1, 4($sp)            # restore non-callee save $a1
+    lw      $a2, 8($sp)            # restore non-callee save $a2
+    lw      $a3, 12($sp)           # restore non-callee save $a3
+    jr      $v0                    # tail call to method
+1:
+    addiu   $sp, $sp, 64           # pop frame
+    .cfi_adjust_cfa_offset -64
+    DELIVER_PENDING_EXCEPTION
+END art_quick_resolution_trampoline
+
+    .extern artQuickToInterpreterBridge
+ENTRY art_quick_to_interpreter_bridge
+    GENERATE_GLOBAL_POINTER
+    SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
     move    $a1, rSELF             # pass Thread::Current
-    jal     artInterpreterEntry    # (Method* method, Thread*, SP)
+    jal     artQuickToInterpreterBridge    # (Method* method, Thread*, SP)
     move    $a2, $sp               # pass $sp
     lw      $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_
     lw      $gp, 52($sp)           # restore $gp
@@ -902,14 +922,14 @@
     nop
 1:
     DELIVER_PENDING_EXCEPTION
-END art_quick_interpreter_entry
+END art_quick_to_interpreter_bridge
 
     /*
      * Routine that intercepts method calls and returns.
      */
     .extern artInstrumentationMethodEntryFromCode
     .extern artInstrumentationMethodExitFromCode
-ENTRY art_quick_instrumentation_entry_from_code
+ENTRY art_quick_instrumentation_entry
     GENERATE_GLOBAL_POINTER
     SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
     move     $t0, $sp       # remember bottom of caller's frame
@@ -927,10 +947,10 @@
     RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
     jalr     $t9            # call method
     nop
-END art_quick_instrumentation_entry_from_code
+END art_quick_instrumentation_entry
     /* intentional fallthrough */
-    .global art_quick_instrumentation_exit_from_code
-art_quick_instrumentation_exit_from_code:
+    .global art_quick_instrumentation_exit
+art_quick_instrumentation_exit:
     .cfi_startproc
     addiu    $t9, $ra, 4    # put current address into $t9 to rebuild $gp
     GENERATE_GLOBAL_POINTER
@@ -960,7 +980,7 @@
     jr       $t0            # return
     addiu    $sp, $sp, 112  # 48 bytes of args + 64 bytes of callee save frame
     .cfi_adjust_cfa_offset -112
-END art_quick_instrumentation_exit_from_code
+END art_quick_instrumentation_exit
 
     /*
      * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
@@ -978,18 +998,6 @@
 END art_quick_deoptimize
 
     /*
-     * Quick abstract method error stub. $a0 contains method* on entry.
-     */
-ENTRY art_quick_abstract_method_error_stub
-    GENERATE_GLOBAL_POINTER
-    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
-    move     $a1, $s1       # pass Thread::Current
-    la       $t9, artThrowAbstractMethodErrorFromCode
-    jr       $t9            # (Method*, Thread*, SP)
-    move     $a2, $sp       # pass SP
-END art_quick_abstract_method_error_stub
-
-    /*
      * Long integer shift.  This is different from the generic 32/64-bit
      * binary operations because vAA/vBB are 64-bit but vCC (the shift
      * distance) is 32-bit.  Also, Dalvik requires us to ignore all but the low
diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S
index 7e6dce9..7a3fdfa 100644
--- a/runtime/arch/x86/asm_support_x86.S
+++ b/runtime/arch/x86/asm_support_x86.S
@@ -88,4 +88,16 @@
   .cfi_restore REG_VAR(reg,0)
 END_MACRO
 
+MACRO1(UNIMPLEMENTED,name)
+    .type VAR(name, 0), @function
+    .globl VAR(name, 0)
+    ALIGN_FUNCTION_ENTRY
+VAR(name, 0):
+    .cfi_startproc
+    int3
+    int3
+    .cfi_endproc
+    .size \name, .-\name
+END_MACRO
+
 #endif  // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index d47dfef..9152674 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -20,70 +20,74 @@
 
 namespace art {
 
-// Alloc entrypoints.
-extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method);
-extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method);
-extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t);
-extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
-
-// Cast entrypoints.
-extern "C" uint32_t art_quick_is_assignable_from_code(const mirror::Class* klass,
-                                                const mirror::Class* ref_class);
-extern "C" void art_quick_can_put_array_element_from_code(void*, void*);
-extern "C" void art_quick_check_cast_from_code(void*, void*);
-
-// DexCache entrypoints.
-extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*);
-extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*);
-extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*);
-extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t);
-
-// Field entrypoints.
-extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t);
-extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t);
-extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t);
-extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t);
-extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*);
-extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*);
-extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*);
-extern "C" int32_t art_quick_get32_static_from_code(uint32_t);
-extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*);
-extern "C" int64_t art_quick_get64_static_from_code(uint32_t);
-extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*);
-extern "C" void* art_quick_get_obj_static_from_code(uint32_t);
-
-// FillArray entrypoint.
-extern "C" void art_quick_handle_fill_data_from_code(void*, void*);
-
-// Lock entrypoints.
-extern "C" void art_quick_lock_object_from_code(void*);
-extern "C" void art_quick_unlock_object_from_code(void*);
-
-// Math entrypoints.
-extern "C" double art_quick_fmod_from_code(double, double);
-extern "C" float art_quick_fmodf_from_code(float, float);
-extern "C" double art_quick_l2d_from_code(int64_t);
-extern "C" float art_quick_l2f_from_code(int64_t);
-extern "C" int64_t art_quick_d2l_from_code(double);
-extern "C" int64_t art_quick_f2l_from_code(float);
-extern "C" int32_t art_quick_idivmod_from_code(int32_t, int32_t);
-extern "C" int64_t art_quick_ldiv_from_code(int64_t, int64_t);
-extern "C" int64_t art_quick_ldivmod_from_code(int64_t, int64_t);
-extern "C" int64_t art_quick_lmul_from_code(int64_t, int64_t);
-extern "C" uint64_t art_quick_lshl_from_code(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_lshr_from_code(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_lushr_from_code(uint64_t, uint32_t);
-
 // Interpreter entrypoints.
-extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh,
-                                                 const DexFile::CodeItem* code_item,
-                                                 ShadowFrame* shadow_frame, JValue* result);
-extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh,
+extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh,
+                                                  const DexFile::CodeItem* code_item,
+                                                  ShadowFrame* shadow_frame, JValue* result);
+extern "C" void artInterperterToCompiledCodeBridge(Thread* self, MethodHelper& mh,
                                            const DexFile::CodeItem* code_item,
                                            ShadowFrame* shadow_frame, JValue* result);
 
+// Portable entrypoints.
+extern "C" void art_portable_resolution_trampoline(mirror::AbstractMethod*);
+extern "C" void art_portable_to_interpreter_bridge(mirror::AbstractMethod*);
+
+// Alloc entrypoints.
+extern "C" void* art_quick_alloc_array(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_array_with_access_check(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_object(uint32_t type_idx, void* method);
+extern "C" void* art_quick_alloc_object_with_access_check(uint32_t type_idx, void* method);
+extern "C" void* art_quick_check_and_alloc_array(uint32_t, void*, int32_t);
+extern "C" void* art_quick_check_and_alloc_array_with_access_check(uint32_t, void*, int32_t);
+
+// Cast entrypoints.
+extern "C" uint32_t art_quick_is_assignable(const mirror::Class* klass,
+                                                const mirror::Class* ref_class);
+extern "C" void art_quick_can_put_array_element(void*, void*);
+extern "C" void art_quick_check_cast(void*, void*);
+
+// DexCache entrypoints.
+extern "C" void* art_quick_initialize_static_storage(uint32_t, void*);
+extern "C" void* art_quick_initialize_type(uint32_t, void*);
+extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*);
+extern "C" void* art_quick_resolve_string(void*, uint32_t);
+
+// Field entrypoints.
+extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t);
+extern "C" int art_quick_set32_static(uint32_t, int32_t);
+extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t);
+extern "C" int art_quick_set64_static(uint32_t, int64_t);
+extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*);
+extern "C" int art_quick_set_obj_static(uint32_t, void*);
+extern "C" int32_t art_quick_get32_instance(uint32_t, void*);
+extern "C" int32_t art_quick_get32_static(uint32_t);
+extern "C" int64_t art_quick_get64_instance(uint32_t, void*);
+extern "C" int64_t art_quick_get64_static(uint32_t);
+extern "C" void* art_quick_get_obj_instance(uint32_t, void*);
+extern "C" void* art_quick_get_obj_static(uint32_t);
+
+// FillArray entrypoint.
+extern "C" void art_quick_handle_fill_data(void*, void*);
+
+// Lock entrypoints.
+extern "C" void art_quick_lock_object(void*);
+extern "C" void art_quick_unlock_object(void*);
+
+// Math entrypoints.
+extern "C" double art_quick_fmod(double, double);
+extern "C" float art_quick_fmodf(float, float);
+extern "C" double art_quick_l2d(int64_t);
+extern "C" float art_quick_l2f(int64_t);
+extern "C" int64_t art_quick_d2l(double);
+extern "C" int64_t art_quick_f2l(float);
+extern "C" int32_t art_quick_idivmod(int32_t, int32_t);
+extern "C" int64_t art_quick_ldiv(int64_t, int64_t);
+extern "C" int64_t art_quick_ldivmod(int64_t, int64_t);
+extern "C" int64_t art_quick_lmul(int64_t, int64_t);
+extern "C" uint64_t art_quick_lshl(uint64_t, uint32_t);
+extern "C" uint64_t art_quick_lshr(uint64_t, uint32_t);
+extern "C" uint64_t art_quick_lushr(uint64_t, uint32_t);
+
 // Intrinsic entrypoints.
 extern "C" int32_t art_quick_memcmp16(void*, void*, int32_t);
 extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
@@ -91,12 +95,8 @@
 extern "C" void* art_quick_memcpy(void*, const void*, size_t);
 
 // Invoke entrypoints.
-extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called,
-                                                       mirror::Object* receiver,
-                                                       mirror::AbstractMethod** sp, Thread* thread);
-extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called,
-                                                    mirror::Object* receiver,
-                                                    mirror::AbstractMethod** sp, Thread* thread);
+extern "C" void art_quick_resolution_trampoline(mirror::AbstractMethod*);
+extern "C" void art_quick_to_interpreter_bridge(mirror::AbstractMethod*);
 extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
 extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*);
 extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
@@ -109,49 +109,61 @@
 extern "C" void art_quick_test_suspend();
 
 // Throw entrypoints.
-extern "C" void art_quick_deliver_exception_from_code(void*);
-extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit);
-extern "C" void art_quick_throw_div_zero_from_code();
-extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx);
-extern "C" void art_quick_throw_null_pointer_exception_from_code();
-extern "C" void art_quick_throw_stack_overflow_from_code(void*);
+extern "C" void art_quick_deliver_exception(void*);
+extern "C" void art_quick_throw_array_bounds(int32_t index, int32_t limit);
+extern "C" void art_quick_throw_div_zero();
+extern "C" void art_quick_throw_no_such_method(int32_t method_idx);
+extern "C" void art_quick_throw_null_pointer_exception();
+extern "C" void art_quick_throw_stack_overflow(void*);
 
-void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) {
+void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
+                     PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) {
+  // Interpreter
+  ipoints->pInterpreterToInterpreterBridge = artInterpreterToInterpreterBridge;
+  ipoints->pInterpreterToCompiledCodeBridge = artInterperterToCompiledCodeBridge;
+
+  // JNI
+  jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub;
+
+  // Portable
+  ppoints->pPortableResolutionTrampoline = art_portable_resolution_trampoline;
+  ppoints->pPortableToInterpreterBridge = art_portable_to_interpreter_bridge;
+
   // Alloc
-  qpoints->pAllocArrayFromCode = art_quick_alloc_array_from_code;
-  qpoints->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check;
-  qpoints->pAllocObjectFromCode = art_quick_alloc_object_from_code;
-  qpoints->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check;
-  qpoints->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code;
-  qpoints->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check;
+  qpoints->pAllocArray = art_quick_alloc_array;
+  qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check;
+  qpoints->pAllocObject = art_quick_alloc_object;
+  qpoints->pAllocObjectWithAccessCheck = art_quick_alloc_object_with_access_check;
+  qpoints->pCheckAndAllocArray = art_quick_check_and_alloc_array;
+  qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check;
 
   // Cast
-  qpoints->pInstanceofNonTrivialFromCode = art_quick_is_assignable_from_code;
-  qpoints->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code;
-  qpoints->pCheckCastFromCode = art_quick_check_cast_from_code;
+  qpoints->pInstanceofNonTrivial = art_quick_is_assignable;
+  qpoints->pCanPutArrayElement = art_quick_can_put_array_element;
+  qpoints->pCheckCast = art_quick_check_cast;
 
   // DexCache
-  qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code;
-  qpoints->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code;
-  qpoints->pInitializeTypeFromCode = art_quick_initialize_type_from_code;
-  qpoints->pResolveStringFromCode = art_quick_resolve_string_from_code;
+  qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage;
+  qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access;
+  qpoints->pInitializeType = art_quick_initialize_type;
+  qpoints->pResolveString = art_quick_resolve_string;
 
   // Field
-  qpoints->pSet32Instance = art_quick_set32_instance_from_code;
-  qpoints->pSet32Static = art_quick_set32_static_from_code;
-  qpoints->pSet64Instance = art_quick_set64_instance_from_code;
-  qpoints->pSet64Static = art_quick_set64_static_from_code;
-  qpoints->pSetObjInstance = art_quick_set_obj_instance_from_code;
-  qpoints->pSetObjStatic = art_quick_set_obj_static_from_code;
-  qpoints->pGet32Instance = art_quick_get32_instance_from_code;
-  qpoints->pGet64Instance = art_quick_get64_instance_from_code;
-  qpoints->pGetObjInstance = art_quick_get_obj_instance_from_code;
-  qpoints->pGet32Static = art_quick_get32_static_from_code;
-  qpoints->pGet64Static = art_quick_get64_static_from_code;
-  qpoints->pGetObjStatic = art_quick_get_obj_static_from_code;
+  qpoints->pSet32Instance = art_quick_set32_instance;
+  qpoints->pSet32Static = art_quick_set32_static;
+  qpoints->pSet64Instance = art_quick_set64_instance;
+  qpoints->pSet64Static = art_quick_set64_static;
+  qpoints->pSetObjInstance = art_quick_set_obj_instance;
+  qpoints->pSetObjStatic = art_quick_set_obj_static;
+  qpoints->pGet32Instance = art_quick_get32_instance;
+  qpoints->pGet64Instance = art_quick_get64_instance;
+  qpoints->pGetObjInstance = art_quick_get_obj_instance;
+  qpoints->pGet32Static = art_quick_get32_static;
+  qpoints->pGet64Static = art_quick_get64_static;
+  qpoints->pGetObjStatic = art_quick_get_obj_static;
 
   // FillArray
-  qpoints->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code;
+  qpoints->pHandleFillArrayData = art_quick_handle_fill_data;
 
   // JNI
   qpoints->pJniMethodStart = JniMethodStart;
@@ -162,33 +174,29 @@
   qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
 
   // Locks
-  qpoints->pLockObjectFromCode = art_quick_lock_object_from_code;
-  qpoints->pUnlockObjectFromCode = art_quick_unlock_object_from_code;
+  qpoints->pLockObject = art_quick_lock_object;
+  qpoints->pUnlockObject = art_quick_unlock_object;
 
   // Math
   // points->pCmpgDouble = NULL;  // Not needed on x86.
   // points->pCmpgFloat = NULL;  // Not needed on x86.
   // points->pCmplDouble = NULL;  // Not needed on x86.
   // points->pCmplFloat = NULL;  // Not needed on x86.
-  qpoints->pFmod = art_quick_fmod_from_code;
-  qpoints->pL2d = art_quick_l2d_from_code;
-  qpoints->pFmodf = art_quick_fmodf_from_code;
-  qpoints->pL2f = art_quick_l2f_from_code;
+  qpoints->pFmod = art_quick_fmod;
+  qpoints->pL2d = art_quick_l2d;
+  qpoints->pFmodf = art_quick_fmodf;
+  qpoints->pL2f = art_quick_l2f;
   // points->pD2iz = NULL;  // Not needed on x86.
   // points->pF2iz = NULL;  // Not needed on x86.
-  qpoints->pIdivmod = art_quick_idivmod_from_code;
-  qpoints->pD2l = art_quick_d2l_from_code;
-  qpoints->pF2l = art_quick_f2l_from_code;
-  qpoints->pLdiv = art_quick_ldiv_from_code;
-  qpoints->pLdivmod = art_quick_ldivmod_from_code;
-  qpoints->pLmul = art_quick_lmul_from_code;
-  qpoints->pShlLong = art_quick_lshl_from_code;
-  qpoints->pShrLong = art_quick_lshr_from_code;
-  qpoints->pUshrLong = art_quick_lushr_from_code;
-
-  // Interpreter
-  qpoints->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry;
-  qpoints->pInterpreterToQuickEntry = artInterpreterToQuickEntry;
+  qpoints->pIdivmod = art_quick_idivmod;
+  qpoints->pD2l = art_quick_d2l;
+  qpoints->pF2l = art_quick_f2l;
+  qpoints->pLdiv = art_quick_ldiv;
+  qpoints->pLdivmod = art_quick_ldivmod;
+  qpoints->pLmul = art_quick_lmul;
+  qpoints->pShlLong = art_quick_lshl;
+  qpoints->pShrLong = art_quick_lshr;
+  qpoints->pUshrLong = art_quick_lushr;
 
   // Intrinsics
   qpoints->pIndexOf = art_quick_indexof;
@@ -197,7 +205,8 @@
   qpoints->pMemcpy = art_quick_memcpy;
 
   // Invocation
-  qpoints->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline;
+  qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline;
+  qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge;
   qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
   qpoints->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline;
   qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
@@ -206,19 +215,16 @@
   qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
 
   // Thread
-  qpoints->pCheckSuspendFromCode = CheckSuspendFromCode;
-  qpoints->pTestSuspendFromCode = art_quick_test_suspend;
+  qpoints->pCheckSuspend = CheckSuspendFromCode;
+  qpoints->pTestSuspend = art_quick_test_suspend;
 
   // Throws
-  qpoints->pDeliverException = art_quick_deliver_exception_from_code;
-  qpoints->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code;
-  qpoints->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code;
-  qpoints->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code;
-  qpoints->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code;
-  qpoints->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code;
-
-  // Portable
-  ppoints->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline;
+  qpoints->pDeliverException = art_quick_deliver_exception;
+  qpoints->pThrowArrayBounds = art_quick_throw_array_bounds;
+  qpoints->pThrowDivZero = art_quick_throw_div_zero;
+  qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method;
+  qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception;
+  qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow;
 };
 
 }  // namespace art
diff --git a/runtime/arch/x86/portable_entrypoints_x86.S b/runtime/arch/x86/portable_entrypoints_x86.S
index a0fca6c..0313d4b 100644
--- a/runtime/arch/x86/portable_entrypoints_x86.S
+++ b/runtime/arch/x86/portable_entrypoints_x86.S
@@ -90,20 +90,5 @@
     ret
 END_FUNCTION art_portable_proxy_invoke_handler
 
-    /*
-     * Portable abstract method error stub. method* is at %esp + 4 on entry.
-     */
-DEFINE_FUNCTION art_portable_abstract_method_error_stub
-    PUSH ebp
-    movl %esp, %ebp               // Remember SP.
-    .cfi_def_cfa_register ebp
-    subl LITERAL(12), %esp        // Align stack.
-    PUSH esp                      // Pass sp (not used).
-    pushl %fs:THREAD_SELF_OFFSET  // Pass Thread::Current().
-    pushl 8(%ebp)                 // Pass Method*.
-    call SYMBOL(artThrowAbstractMethodErrorFromCode)  // (Method*, Thread*, SP)
-    leave                         // Restore the stack and %ebp.
-    .cfi_def_cfa esp, 4
-    .cfi_restore ebp
-    ret                           // Return to caller to handle pending exception.
-END_FUNCTION art_portable_abstract_method_error_stub
+UNIMPLEMENTED art_portable_resolution_trampoline
+UNIMPLEMENTED art_portable_to_interpreter_bridge
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 89ea71a..dbf552f 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -135,34 +135,34 @@
     /*
      * Called by managed code to create and deliver a NullPointerException.
      */
-NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception_from_code, artThrowNullPointerExceptionFromCode
+NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode
 
     /*
      * Called by managed code to create and deliver an ArithmeticException.
      */
-NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero_from_code, artThrowDivZeroFromCode
+NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode
 
     /*
      * Called by managed code to create and deliver a StackOverflowError.
      */
-NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow_from_code, artThrowStackOverflowFromCode
+NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode
 
     /*
      * Called by managed code, saves callee saves and then calls artThrowException
      * that will place a mock Method* at the bottom of the stack. Arg1 holds the exception.
      */
-ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception_from_code, artDeliverExceptionFromCode
+ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCode
 
     /*
      * Called by managed code to create and deliver a NoSuchMethodError.
      */
-ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method_from_code, artThrowNoSuchMethodFromCode
+ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFromCode
 
     /*
      * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds
      * index, arg2 holds limit.
      */
-TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds_from_code, artThrowArrayBoundsFromCode
+TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode
 
     /*
      * All generated callsites for interface invokes and invocation slow paths will load arguments
@@ -382,24 +382,24 @@
     DELIVER_PENDING_EXCEPTION
 END_MACRO
 
-TWO_ARG_DOWNCALL art_quick_alloc_object_from_code, artAllocObjectFromCode, RETURN_IF_EAX_NOT_ZERO
-TWO_ARG_DOWNCALL art_quick_alloc_object_from_code_with_access_check, artAllocObjectFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO
-THREE_ARG_DOWNCALL art_quick_alloc_array_from_code, artAllocArrayFromCode, RETURN_IF_EAX_NOT_ZERO
-THREE_ARG_DOWNCALL art_quick_alloc_array_from_code_with_access_check, artAllocArrayFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO
-THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_from_code, artCheckAndAllocArrayFromCode, RETURN_IF_EAX_NOT_ZERO
-THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_from_code_with_access_check, artCheckAndAllocArrayFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO
+TWO_ARG_DOWNCALL art_quick_alloc_object, artAllocObjectFromCode, RETURN_IF_EAX_NOT_ZERO
+TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check, artAllocObjectFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO
+THREE_ARG_DOWNCALL art_quick_alloc_array, artAllocArrayFromCode, RETURN_IF_EAX_NOT_ZERO
+THREE_ARG_DOWNCALL art_quick_alloc_array_with_access_check, artAllocArrayFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO
+THREE_ARG_DOWNCALL art_quick_check_and_alloc_array, artCheckAndAllocArrayFromCode, RETURN_IF_EAX_NOT_ZERO
+THREE_ARG_DOWNCALL art_quick_check_and_alloc_array_with_access_check, artCheckAndAllocArrayFromCodeWithAccessCheck, RETURN_IF_EAX_NOT_ZERO
 
-TWO_ARG_DOWNCALL art_quick_resolve_string_from_code, artResolveStringFromCode, RETURN_IF_EAX_NOT_ZERO
-TWO_ARG_DOWNCALL art_quick_initialize_static_storage_from_code, artInitializeStaticStorageFromCode, RETURN_IF_EAX_NOT_ZERO
-TWO_ARG_DOWNCALL art_quick_initialize_type_from_code, artInitializeTypeFromCode, RETURN_IF_EAX_NOT_ZERO
-TWO_ARG_DOWNCALL art_quick_initialize_type_and_verify_access_from_code, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_EAX_NOT_ZERO
+TWO_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_EAX_NOT_ZERO
+TWO_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_EAX_NOT_ZERO
+TWO_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_EAX_NOT_ZERO
+TWO_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_EAX_NOT_ZERO
 
-ONE_ARG_DOWNCALL art_quick_lock_object_from_code, artLockObjectFromCode, ret
-ONE_ARG_DOWNCALL art_quick_unlock_object_from_code, artUnlockObjectFromCode, RETURN_IF_EAX_ZERO
+ONE_ARG_DOWNCALL art_quick_lock_object, artLockObjectFromCode, ret
+ONE_ARG_DOWNCALL art_quick_unlock_object, artUnlockObjectFromCode, RETURN_IF_EAX_ZERO
 
-TWO_ARG_DOWNCALL art_quick_handle_fill_data_from_code, artHandleFillArrayDataFromCode, RETURN_IF_EAX_ZERO
+TWO_ARG_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode, RETURN_IF_EAX_ZERO
 
-DEFINE_FUNCTION art_quick_is_assignable_from_code
+DEFINE_FUNCTION art_quick_is_assignable
     PUSH eax                     // alignment padding
     PUSH ecx                    // pass arg2
     PUSH eax                     // pass arg1
@@ -407,7 +407,7 @@
     addl LITERAL(12), %esp        // pop arguments
     .cfi_adjust_cfa_offset -12
     ret
-END_FUNCTION art_quick_is_assignable_from_code
+END_FUNCTION art_quick_is_assignable
 
 DEFINE_FUNCTION art_quick_memcpy
     PUSH edx                      // pass arg3
@@ -419,12 +419,12 @@
     ret
 END_FUNCTION art_quick_memcpy
 
-TWO_ARG_DOWNCALL art_quick_check_cast_from_code, artCheckCastFromCode, RETURN_IF_EAX_ZERO
-TWO_ARG_DOWNCALL art_quick_can_put_array_element_from_code, artCanPutArrayElementFromCode, RETURN_IF_EAX_ZERO
+TWO_ARG_DOWNCALL art_quick_check_cast, artCheckCastFromCode, RETURN_IF_EAX_ZERO
+TWO_ARG_DOWNCALL art_quick_can_put_array_element, artCanPutArrayElementFromCode, RETURN_IF_EAX_ZERO
 
 NO_ARG_DOWNCALL art_quick_test_suspend, artTestSuspendFromCode, ret
 
-DEFINE_FUNCTION art_quick_fmod_from_code
+DEFINE_FUNCTION art_quick_fmod
     subl LITERAL(12), %esp        // alignment padding
     .cfi_adjust_cfa_offset 12
     PUSH ebx                      // pass arg4 b.hi
@@ -437,9 +437,9 @@
     addl LITERAL(28), %esp        // pop arguments
     .cfi_adjust_cfa_offset -28
     ret
-END_FUNCTION art_quick_fmod_from_code
+END_FUNCTION art_quick_fmod
 
-DEFINE_FUNCTION art_quick_fmodf_from_code
+DEFINE_FUNCTION art_quick_fmodf
     PUSH eax                      // alignment padding
     PUSH ecx                      // pass arg2 b
     PUSH eax                      // pass arg1 a
@@ -449,9 +449,9 @@
     addl LITERAL(12), %esp        // pop arguments
     .cfi_adjust_cfa_offset -12
     ret
-END_FUNCTION art_quick_fmodf_from_code
+END_FUNCTION art_quick_fmodf
 
-DEFINE_FUNCTION art_quick_l2d_from_code
+DEFINE_FUNCTION art_quick_l2d
     PUSH ecx                      // push arg2 a.hi
     PUSH eax                      // push arg1 a.lo
     fildll (%esp)                 // load as integer and push into st0
@@ -460,9 +460,9 @@
     addl LITERAL(8), %esp         // pop arguments
     .cfi_adjust_cfa_offset -8
     ret
-END_FUNCTION art_quick_l2d_from_code
+END_FUNCTION art_quick_l2d
 
-DEFINE_FUNCTION art_quick_l2f_from_code
+DEFINE_FUNCTION art_quick_l2f
     PUSH ecx                      // push arg2 a.hi
     PUSH eax                      // push arg1 a.lo
     fildll (%esp)                 // load as integer and push into st0
@@ -471,9 +471,9 @@
     addl LITERAL(8), %esp         // pop argument
     .cfi_adjust_cfa_offset -8
     ret
-END_FUNCTION art_quick_l2f_from_code
+END_FUNCTION art_quick_l2f
 
-DEFINE_FUNCTION art_quick_d2l_from_code
+DEFINE_FUNCTION art_quick_d2l
     PUSH eax                      // alignment padding
     PUSH ecx                      // pass arg2 a.hi
     PUSH eax                      // pass arg1 a.lo
@@ -481,9 +481,9 @@
     addl LITERAL(12), %esp        // pop arguments
     .cfi_adjust_cfa_offset -12
     ret
-END_FUNCTION art_quick_d2l_from_code
+END_FUNCTION art_quick_d2l
 
-DEFINE_FUNCTION art_quick_f2l_from_code
+DEFINE_FUNCTION art_quick_f2l
     subl LITERAL(8), %esp         // alignment padding
     .cfi_adjust_cfa_offset 8
     PUSH eax                      // pass arg1 a
@@ -491,9 +491,9 @@
     addl LITERAL(12), %esp        // pop arguments
     .cfi_adjust_cfa_offset -12
     ret
-END_FUNCTION art_quick_f2l_from_code
+END_FUNCTION art_quick_f2l
 
-DEFINE_FUNCTION art_quick_idivmod_from_code
+DEFINE_FUNCTION art_quick_idivmod
     cmpl LITERAL(0x80000000), %eax
     je check_arg2  // special case
 args_ok:
@@ -505,9 +505,9 @@
     jne args_ok
     xorl %edx, %edx
     ret         // eax already holds min int
-END_FUNCTION art_quick_idivmod_from_code
+END_FUNCTION art_quick_idivmod
 
-DEFINE_FUNCTION art_quick_ldiv_from_code
+DEFINE_FUNCTION art_quick_ldiv
     subl LITERAL(12), %esp        // alignment padding
     .cfi_adjust_cfa_offset 12
     PUSH ebx                     // pass arg4 b.hi
@@ -518,9 +518,9 @@
     addl LITERAL(28), %esp        // pop arguments
     .cfi_adjust_cfa_offset -28
     ret
-END_FUNCTION art_quick_ldiv_from_code
+END_FUNCTION art_quick_ldiv
 
-DEFINE_FUNCTION art_quick_ldivmod_from_code
+DEFINE_FUNCTION art_quick_ldivmod
     subl LITERAL(12), %esp        // alignment padding
     .cfi_adjust_cfa_offset 12
     PUSH ebx                     // pass arg4 b.hi
@@ -531,18 +531,18 @@
     addl LITERAL(28), %esp        // pop arguments
     .cfi_adjust_cfa_offset -28
     ret
-END_FUNCTION art_quick_ldivmod_from_code
+END_FUNCTION art_quick_ldivmod
 
-DEFINE_FUNCTION art_quick_lmul_from_code
+DEFINE_FUNCTION art_quick_lmul
     imul %eax, %ebx              // ebx = a.lo(eax) * b.hi(ebx)
     imul %edx, %ecx              // ecx = b.lo(edx) * a.hi(ecx)
     mul  %edx                    // edx:eax = a.lo(eax) * b.lo(edx)
     add  %ebx, %ecx
     add  %ecx, %edx              // edx += (a.lo * b.hi) + (b.lo * a.hi)
     ret
-END_FUNCTION art_quick_lmul_from_code
+END_FUNCTION art_quick_lmul
 
-DEFINE_FUNCTION art_quick_lshl_from_code
+DEFINE_FUNCTION art_quick_lshl
     // ecx:eax << edx
     xchg %edx, %ecx
     shld %cl,%eax,%edx
@@ -553,9 +553,9 @@
     xor %eax, %eax
 1:
     ret
-END_FUNCTION art_quick_lshl_from_code
+END_FUNCTION art_quick_lshl
 
-DEFINE_FUNCTION art_quick_lshr_from_code
+DEFINE_FUNCTION art_quick_lshr
     // ecx:eax >> edx
     xchg %edx, %ecx
     shrd %cl,%edx,%eax
@@ -566,9 +566,9 @@
     sar LITERAL(31), %edx
 1:
     ret
-END_FUNCTION art_quick_lshr_from_code
+END_FUNCTION art_quick_lshr
 
-DEFINE_FUNCTION art_quick_lushr_from_code
+DEFINE_FUNCTION art_quick_lushr
     // ecx:eax >>> edx
     xchg %edx, %ecx
     shrd %cl,%edx,%eax
@@ -579,9 +579,9 @@
     xor %edx, %edx
 1:
     ret
-END_FUNCTION art_quick_lushr_from_code
+END_FUNCTION art_quick_lushr
 
-DEFINE_FUNCTION art_quick_set32_instance_from_code
+DEFINE_FUNCTION art_quick_set32_instance
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME       // save ref containing registers for GC
     mov %esp, %ebx                // remember SP
     subl LITERAL(8), %esp         // alignment padding
@@ -599,9 +599,9 @@
     .cfi_adjust_cfa_offset -32
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME     // restore frame up to return address
     RETURN_IF_EAX_ZERO            // return or deliver exception
-END_FUNCTION art_quick_set32_instance_from_code
+END_FUNCTION art_quick_set32_instance
 
-DEFINE_FUNCTION art_quick_set64_instance_from_code
+DEFINE_FUNCTION art_quick_set64_instance
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save ref containing registers for GC
     subl LITERAL(8), %esp         // alignment padding
     .cfi_adjust_cfa_offset 8
@@ -618,9 +618,9 @@
     .cfi_adjust_cfa_offset -32
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME  // restore frame up to return address
     RETURN_IF_EAX_ZERO            // return or deliver exception
-END_FUNCTION art_quick_set64_instance_from_code
+END_FUNCTION art_quick_set64_instance
 
-DEFINE_FUNCTION art_quick_set_obj_instance_from_code
+DEFINE_FUNCTION art_quick_set_obj_instance
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save ref containing registers for GC
     mov %esp, %ebx                // remember SP
     subl LITERAL(8), %esp         // alignment padding
@@ -638,9 +638,9 @@
     .cfi_adjust_cfa_offset -32
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME     // restore frame up to return address
     RETURN_IF_EAX_ZERO            // return or deliver exception
-END_FUNCTION art_quick_set_obj_instance_from_code
+END_FUNCTION art_quick_set_obj_instance
 
-DEFINE_FUNCTION art_quick_get32_instance_from_code
+DEFINE_FUNCTION art_quick_get32_instance
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save ref containing registers for GC
     mov %esp, %ebx                // remember SP
     mov 32(%esp), %edx            // get referrer
@@ -657,9 +657,9 @@
     .cfi_adjust_cfa_offset -32
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME     // restore frame up to return address
     RETURN_OR_DELIVER_PENDING_EXCEPTION    // return or deliver exception
-END_FUNCTION art_quick_get32_instance_from_code
+END_FUNCTION art_quick_get32_instance
 
-DEFINE_FUNCTION art_quick_get64_instance_from_code
+DEFINE_FUNCTION art_quick_get64_instance
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME       // save ref containing registers for GC
     mov %esp, %ebx                // remember SP
     mov 32(%esp), %edx            // get referrer
@@ -676,9 +676,9 @@
     .cfi_adjust_cfa_offset -32
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME     // restore frame up to return address
     RETURN_OR_DELIVER_PENDING_EXCEPTION    // return or deliver exception
-END_FUNCTION art_quick_get64_instance_from_code
+END_FUNCTION art_quick_get64_instance
 
-DEFINE_FUNCTION art_quick_get_obj_instance_from_code
+DEFINE_FUNCTION art_quick_get_obj_instance
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME       // save ref containing registers for GC
     mov %esp, %ebx                // remember SP
     mov 32(%esp), %edx            // get referrer
@@ -695,9 +695,9 @@
     .cfi_adjust_cfa_offset -32
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME     // restore frame up to return address
     RETURN_OR_DELIVER_PENDING_EXCEPTION    // return or deliver exception
-END_FUNCTION art_quick_get_obj_instance_from_code
+END_FUNCTION art_quick_get_obj_instance
 
-DEFINE_FUNCTION art_quick_set32_static_from_code
+DEFINE_FUNCTION art_quick_set32_static
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME       // save ref containing registers for GC
     mov %esp, %ebx                // remember SP
     mov 32(%esp), %edx            // get referrer
@@ -714,9 +714,9 @@
     .cfi_adjust_cfa_offset -32
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME     // restore frame up to return address
     RETURN_IF_EAX_ZERO            // return or deliver exception
-END_FUNCTION art_quick_set32_static_from_code
+END_FUNCTION art_quick_set32_static
 
-DEFINE_FUNCTION art_quick_set64_static_from_code
+DEFINE_FUNCTION art_quick_set64_static
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save ref containing registers for GC
     mov %esp, %ebx                // remember SP
     subl LITERAL(8), %esp         // alignment padding
@@ -734,9 +734,9 @@
     .cfi_adjust_cfa_offset -32
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME  // restore frame up to return address
     RETURN_IF_EAX_ZERO            // return or deliver exception
-END_FUNCTION art_quick_set64_static_from_code
+END_FUNCTION art_quick_set64_static
 
-DEFINE_FUNCTION art_quick_set_obj_static_from_code
+DEFINE_FUNCTION art_quick_set_obj_static
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save ref containing registers for GC
     mov %esp, %ebx                // remember SP
     mov 32(%esp), %edx            // get referrer
@@ -752,9 +752,9 @@
     addl LITERAL(32), %esp        // pop arguments
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME  // restore frame up to return address
     RETURN_IF_EAX_ZERO            // return or deliver exception
-END_FUNCTION art_quick_set_obj_static_from_code
+END_FUNCTION art_quick_set_obj_static
 
-DEFINE_FUNCTION art_quick_get32_static_from_code
+DEFINE_FUNCTION art_quick_get32_static
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME       // save ref containing registers for GC
     mov %esp, %edx                // remember SP
     mov 32(%esp), %ecx            // get referrer
@@ -768,9 +768,9 @@
     .cfi_adjust_cfa_offset -16
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME     // restore frame up to return address
     RETURN_OR_DELIVER_PENDING_EXCEPTION    // return or deliver exception
-END_FUNCTION art_quick_get32_static_from_code
+END_FUNCTION art_quick_get32_static
 
-DEFINE_FUNCTION art_quick_get64_static_from_code
+DEFINE_FUNCTION art_quick_get64_static
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save ref containing registers for GC
     mov %esp, %edx                // remember SP
     mov 32(%esp), %ecx            // get referrer
@@ -784,9 +784,9 @@
     .cfi_adjust_cfa_offset -16
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME     // restore frame up to return address
     RETURN_OR_DELIVER_PENDING_EXCEPTION    // return or deliver exception
-END_FUNCTION art_quick_get64_static_from_code
+END_FUNCTION art_quick_get64_static
 
-DEFINE_FUNCTION art_quick_get_obj_static_from_code
+DEFINE_FUNCTION art_quick_get_obj_static
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME       // save ref containing registers for GC
     mov %esp, %edx                // remember SP
     mov 32(%esp), %ecx            // get referrer
@@ -800,7 +800,7 @@
     .cfi_adjust_cfa_offset -16
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME     // restore frame up to return address
     RETURN_OR_DELIVER_PENDING_EXCEPTION    // return or deliver exception
-END_FUNCTION art_quick_get_obj_static_from_code
+END_FUNCTION art_quick_get_obj_static
 
 DEFINE_FUNCTION art_quick_proxy_invoke_handler
     SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME   // save frame and Method*
@@ -818,7 +818,32 @@
     RETURN_OR_DELIVER_PENDING_EXCEPTION    // return or deliver exception
 END_FUNCTION art_quick_proxy_invoke_handler
 
-DEFINE_FUNCTION art_quick_interpreter_entry
+DEFINE_FUNCTION art_quick_resolution_trampoline
+    SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+    PUSH esp                      // pass SP
+    pushl %fs:THREAD_SELF_OFFSET  // pass Thread::Current()
+    .cfi_adjust_cfa_offset 4
+    PUSH ecx                      // pass receiver
+    PUSH eax                      // pass method
+    call SYMBOL(artQuickResolutionTrampoline) // (Method* called, receiver, Thread*, SP)
+    movl %eax, %edi               // remember code pointer in EDI
+    addl LITERAL(16), %esp        // pop arguments
+    test %eax, %eax               // if code pointer is NULL goto deliver pending exception
+    jz 1f
+    POP eax                       // called method
+    POP ecx                       // restore args
+    POP edx
+    POP ebx
+    POP ebp                       // restore callee saves except EDI
+    POP esi
+    xchgl 0(%esp),%edi            // restore EDI and place code pointer as only value on stack
+    ret                           // tail call into method
+1:
+    RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+    DELIVER_PENDING_EXCEPTION
+END_FUNCTION art_quick_resolution_trampoline
+
+DEFINE_FUNCTION art_quick_to_interpreter_bridge
     SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME   // save frame
     mov %esp, %edx                // remember SP
     PUSH eax                      // alignment padding
@@ -826,19 +851,19 @@
     pushl %fs:THREAD_SELF_OFFSET  // pass Thread::Current()
     .cfi_adjust_cfa_offset 4
     PUSH eax                      // pass  method
-    call SYMBOL(artInterpreterEntry)  // (method, Thread*, SP)
+    call SYMBOL(artQuickToInterpreterBridge)  // (method, Thread*, SP)
     movd %eax, %xmm0              // place return value also into floating point return value
     movd %edx, %xmm1
     punpckldq %xmm1, %xmm0
     addl LITERAL(44), %esp        // pop arguments
     .cfi_adjust_cfa_offset -44
     RETURN_OR_DELIVER_PENDING_EXCEPTION    // return or deliver exception
-END_FUNCTION art_quick_interpreter_entry
+END_FUNCTION art_quick_to_interpreter_bridge
 
     /*
      * Routine that intercepts method calls and returns.
      */
-DEFINE_FUNCTION art_quick_instrumentation_entry_from_code
+DEFINE_FUNCTION art_quick_instrumentation_entry
     SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
     movl  %esp, %edx              // Save SP.
     PUSH eax                      // Save eax which will be clobbered by the callee-save method.
@@ -855,7 +880,7 @@
     addl  LITERAL(28), %esp       // Pop arguments upto saved Method*.
     movl 28(%esp), %edi           // Restore edi.
     movl %eax, 28(%esp)           // Place code* over edi, just under return pc.
-    movl LITERAL(SYMBOL(art_quick_instrumentation_exit_from_code)), 32(%esp)
+    movl LITERAL(SYMBOL(art_quick_instrumentation_exit)), 32(%esp)
                                   // Place instrumentation exit as return pc.
     movl (%esp), %eax             // Restore eax.
     movl 8(%esp), %ecx            // Restore ecx.
@@ -865,9 +890,9 @@
     movl 24(%esp), %esi           // Restore esi.
     addl LITERAL(28), %esp        // Wind stack back upto code*.
     ret                           // Call method (and pop).
-END_FUNCTION art_quick_instrumentation_entry_from_code
+END_FUNCTION art_quick_instrumentation_entry
 
-DEFINE_FUNCTION art_quick_instrumentation_exit_from_code
+DEFINE_FUNCTION art_quick_instrumentation_exit
     pushl LITERAL(0)              // Push a fake return PC as there will be none on the stack.
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME
     mov  %esp, %ecx               // Remember SP
@@ -900,7 +925,7 @@
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
     addl LITERAL(4), %esp         // Remove fake return pc.
     jmp   *%ecx                   // Return.
-END_FUNCTION art_quick_instrumentation_exit_from_code
+END_FUNCTION art_quick_instrumentation_exit
 
     /*
      * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
@@ -920,21 +945,6 @@
 END_FUNCTION art_quick_deoptimize
 
     /*
-     * Quick abstract method error stub. %eax contains method* on entry.
-     */
-DEFINE_FUNCTION art_quick_abstract_method_error_stub
-    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
-    movl %esp, %ecx               // Remember SP.
-    PUSH eax                      // Align frame.
-    PUSH ecx                      // Pass SP for Method*.
-    pushl %fs:THREAD_SELF_OFFSET  // Pass Thread::Current().
-    .cfi_adjust_cfa_offset 4
-    PUSH eax                      // Pass Method*.
-    call SYMBOL(artThrowAbstractMethodErrorFromCode)  // (Method*, Thread*, SP)
-    int3                          // Unreachable.
-END_FUNCTION art_quick_abstract_method_error_stub
-
-    /*
      * String's indexOf.
      *
      * On entry:
@@ -1030,12 +1040,5 @@
     ret
 END_FUNCTION art_quick_string_compareto
 
-MACRO1(UNIMPLEMENTED,name)
-    .globl VAR(name, 0)
-    ALIGN_FUNCTION_ENTRY
-VAR(name, 0):
-    int3
-END_MACRO
-
     // TODO: implement these!
 UNIMPLEMENTED art_quick_memcmp16
diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc
index 83ecca8..7d54baf 100644
--- a/runtime/base/logging.cc
+++ b/runtime/base/logging.cc
@@ -156,8 +156,6 @@
   if (data_->severity == FATAL) {
     Runtime::Abort();
   }
-
-  delete data_;
 }
 
 HexDump::HexDump(const void* address, size_t byte_count, bool show_actual_addresses)
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index d641ae4..eafa050 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -24,6 +24,7 @@
 #include <signal.h>
 #include "base/macros.h"
 #include "log_severity.h"
+#include "UniquePtr.h"
 
 #define CHECK(x) \
   if (UNLIKELY(!(x))) \
@@ -194,7 +195,7 @@
  private:
   static void LogLine(const LogMessageData& data, const char*);
 
-  LogMessageData* const data_;
+  const UniquePtr<LogMessageData> data_;
 
   friend void HandleUnexpectedSignal(int signal_number, siginfo_t* info, void* raw_context);
   friend class Mutex;
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index b924798..2f41bcd 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -179,7 +179,6 @@
   const bool recursive_;  // Can the lock be recursively held?
   unsigned int recursion_count_;
   friend class ConditionVariable;
-  friend class MutexTester;
   DISALLOW_COPY_AND_ASSIGN(Mutex);
 };
 
@@ -238,7 +237,7 @@
 
   // Assert the current thread has exclusive access to the ReaderWriterMutex.
   void AssertExclusiveHeld(const Thread* self) {
-    if (kDebugLocking & (gAborting == 0)) {
+    if (kDebugLocking && (gAborting == 0)) {
       CHECK(IsExclusiveHeld(self)) << *this;
     }
   }
@@ -246,7 +245,7 @@
 
   // Assert the current thread doesn't have exclusive access to the ReaderWriterMutex.
   void AssertNotExclusiveHeld(const Thread* self) {
-    if (kDebugLocking & (gAborting == 0)) {
+    if (kDebugLocking && (gAborting == 0)) {
       CHECK(!IsExclusiveHeld(self)) << *this;
     }
   }
@@ -257,7 +256,7 @@
 
   // Assert the current thread has shared access to the ReaderWriterMutex.
   void AssertSharedHeld(const Thread* self) {
-    if (kDebugLocking  & (gAborting == 0)) {
+    if (kDebugLocking && (gAborting == 0)) {
       // TODO: we can only assert this well when self != NULL.
       CHECK(IsSharedHeld(self) || self == NULL) << *this;
     }
@@ -290,7 +289,6 @@
 #else
   pthread_rwlock_t rwlock_;
 #endif
-  friend class MutexTester;
   DISALLOW_COPY_AND_ASSIGN(ReaderWriterMutex);
 };
 
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 6052993..71959c6 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -71,7 +71,7 @@
 
 namespace art {
 
-extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh,
+extern "C" void artInterperterToCompiledCodeBridge(Thread* self, MethodHelper& mh,
                                            const DexFile::CodeItem* code_item,
                                            ShadowFrame* shadow_frame, JValue* result);
 
@@ -944,6 +944,43 @@
   return oat_file;
 }
 
+static void InitFromImageCallbackCommon(mirror::Object* obj, ClassLinker* class_linker,
+                                        bool interpret_only_mode)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  DCHECK(obj != NULL);
+  DCHECK(class_linker != NULL);
+
+  if (obj->GetClass()->IsStringClass()) {
+    class_linker->GetInternTable()->RegisterStrong(obj->AsString());
+  } else if (obj->IsClass()) {
+    // Restore class to ClassLinker::classes_ table.
+    mirror::Class* klass = obj->AsClass();
+    ClassHelper kh(klass, class_linker);
+    mirror::Class* existing = class_linker->InsertClass(kh.GetDescriptor(), klass, true);
+    DCHECK(existing == NULL) << kh.GetDescriptor();
+  } else if (interpret_only_mode && obj->IsMethod()) {
+    mirror::AbstractMethod* method = obj->AsMethod();
+    if (!method->IsNative()) {
+      method->SetEntryPointFromInterpreter(interpreter::artInterpreterToInterpreterBridge);
+      if (method != Runtime::Current()->GetResolutionMethod()) {
+        method->SetEntryPointFromCompiledCode(GetCompiledCodeToInterpreterBridge());
+      }
+    }
+  }
+}
+
+static void InitFromImageCallback(mirror::Object* obj, void* arg)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ClassLinker* class_linker = reinterpret_cast<ClassLinker*>(arg);
+  InitFromImageCallbackCommon(obj, class_linker, false);
+}
+
+static void InitFromImageInterpretOnlyCallback(mirror::Object* obj, void* arg)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  ClassLinker* class_linker = reinterpret_cast<ClassLinker*>(arg);
+  InitFromImageCallbackCommon(obj, class_linker, true);
+}
+
 void ClassLinker::InitFromImage() {
   VLOG(startup) << "ClassLinker::InitFromImage entering";
   CHECK(!init_done_);
@@ -997,7 +1034,11 @@
   {
     ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
     heap->FlushAllocStack();
-    heap->GetLiveBitmap()->Walk(InitFromImageCallback, this);
+    if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) {
+      heap->GetLiveBitmap()->Walk(InitFromImageInterpretOnlyCallback, this);
+    } else {
+      heap->GetLiveBitmap()->Walk(InitFromImageCallback, this);
+    }
   }
 
   // reinit class_roots_
@@ -1025,40 +1066,6 @@
   VLOG(startup) << "ClassLinker::InitFromImage exiting";
 }
 
-void ClassLinker::InitFromImageCallback(mirror::Object* obj, void* arg) {
-  DCHECK(obj != NULL);
-  DCHECK(arg != NULL);
-  ClassLinker* class_linker = reinterpret_cast<ClassLinker*>(arg);
-
-  if (obj->GetClass()->IsStringClass()) {
-    class_linker->intern_table_->RegisterStrong(obj->AsString());
-    return;
-  }
-  if (obj->IsClass()) {
-    // restore class to ClassLinker::classes_ table
-    mirror::Class* klass = obj->AsClass();
-    ClassHelper kh(klass, class_linker);
-    mirror::Class* existing = class_linker->InsertClass(kh.GetDescriptor(), klass, true);
-    DCHECK(existing == NULL) << kh.GetDescriptor();
-    return;
-  }
-
-  if (obj->IsMethod()) {
-    mirror::AbstractMethod* method = obj->AsMethod();
-    // Set entry points to interpreter for methods in interpreter only mode.
-    if (Runtime::Current()->GetInstrumentation()->InterpretOnly() && !method->IsNative()) {
-      method->SetEntryPointFromInterpreter(interpreter::artInterpreterToInterpreterEntry);
-      if (method != Runtime::Current()->GetResolutionMethod()) {
-        method->SetEntryPointFromCompiledCode(GetInterpreterEntryPoint());
-      }
-    }
-    // Populate native method pointer with jni lookup stub.
-    if (method->IsNative()) {
-      method->UnregisterNative(Thread::Current());
-    }
-  }
-}
-
 // Keep in sync with InitCallback. Anything we visit, we need to
 // reinit references to when reinitializing a ClassLinker from a
 // mapped image.
@@ -1558,7 +1565,7 @@
   const void* result = GetOatMethodFor(method).GetCode();
   if (result == NULL) {
     // No code? You must mean to go into the interpreter.
-    result = GetInterpreterEntryPoint();
+    result = GetCompiledCodeToInterpreterBridge();
   }
   return result;
 }
@@ -1619,7 +1626,7 @@
     const bool enter_interpreter = NeedsInterpreter(method, code);
     if (enter_interpreter) {
       // Use interpreter entry point.
-      code = GetInterpreterEntryPoint();
+      code = GetCompiledCodeToInterpreterBridge();
     }
     runtime->GetInstrumentation()->UpdateMethodsCode(method, code);
   }
@@ -1640,13 +1647,13 @@
   Runtime* runtime = Runtime::Current();
   bool enter_interpreter = NeedsInterpreter(method.get(), method->GetEntryPointFromCompiledCode());
   if (enter_interpreter) {
-    method->SetEntryPointFromInterpreter(interpreter::artInterpreterToInterpreterEntry);
+    method->SetEntryPointFromInterpreter(interpreter::artInterpreterToInterpreterBridge);
   } else {
-    method->SetEntryPointFromInterpreter(artInterpreterToQuickEntry);
+    method->SetEntryPointFromInterpreter(artInterperterToCompiledCodeBridge);
   }
 
   if (method->IsAbstract()) {
-    method->SetEntryPointFromCompiledCode(GetAbstractMethodErrorStub());
+    method->SetEntryPointFromCompiledCode(GetCompiledCodeToInterpreterBridge());
     return;
   }
 
@@ -1657,7 +1664,7 @@
     method->SetEntryPointFromCompiledCode(GetResolutionTrampoline(runtime->GetClassLinker()));
   } else if (enter_interpreter) {
     // Set entry point from compiled code if there's no code or in interpreter only mode.
-    method->SetEntryPointFromCompiledCode(GetInterpreterEntryPoint());
+    method->SetEntryPointFromCompiledCode(GetCompiledCodeToInterpreterBridge());
   }
 
   if (method->IsNative()) {
@@ -2625,12 +2632,8 @@
   method->SetCoreSpillMask(refs_and_args->GetCoreSpillMask());
   method->SetFpSpillMask(refs_and_args->GetFpSpillMask());
   method->SetFrameSizeInBytes(refs_and_args->GetFrameSizeInBytes());
-#if !defined(ART_USE_PORTABLE_COMPILER)
-  method->SetEntryPointFromCompiledCode(reinterpret_cast<void*>(art_quick_proxy_invoke_handler));
-#else
-  method->SetEntryPointFromCompiledCode(reinterpret_cast<void*>(art_portable_proxy_invoke_handler));
-#endif
-  method->SetEntryPointFromInterpreter(artInterpreterToQuickEntry);
+  method->SetEntryPointFromCompiledCode(GetProxyInvokeHandler());
+  method->SetEntryPointFromInterpreter(artInterperterToCompiledCodeBridge);
 
   return method;
 }
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index fdf75c2..67be2ff 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -347,6 +347,17 @@
     return quick_resolution_trampoline_;
   }
 
+  InternTable* GetInternTable() const {
+    return intern_table_;
+  }
+
+  // Attempts to insert a class into a class table.  Returns NULL if
+  // the class was inserted, otherwise returns an existing class with
+  // the same descriptor and ClassLoader.
+  mirror::Class* InsertClass(const StringPiece& descriptor, mirror::Class* klass, bool image_class)
+      LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
  private:
   explicit ClassLinker(InternTable*);
 
@@ -362,8 +373,6 @@
   OatFile& GetImageOatFile(gc::space::ImageSpace* space)
       LOCKS_EXCLUDED(dex_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static void InitFromImageCallback(mirror::Object* obj, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void FinishInit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -423,13 +432,6 @@
   const OatFile::OatClass* GetOatClass(const DexFile& dex_file, const char* descriptor)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Attempts to insert a class into a class table.  Returns NULL if
-  // the class was inserted, otherwise returns an existing class with
-  // the same descriptor and ClassLoader.
-  mirror::Class* InsertClass(const StringPiece& descriptor, mirror::Class* klass, bool image_class)
-      LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
   void RegisterDexFileLocked(const DexFile& dex_file, SirtRef<mirror::DexCache>& dex_cache)
       EXCLUSIVE_LOCKS_REQUIRED(dex_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -615,9 +617,7 @@
   const void* portable_resolution_trampoline_;
   const void* quick_resolution_trampoline_;
 
-  friend class CommonTest;
   friend class ImageWriter;  // for GetClassRoots
-  friend class ObjectTest;
   FRIEND_TEST(ClassLinkerTest, ClassRootDescriptors);
   FRIEND_TEST(mirror::DexCacheTest, Open);
   FRIEND_TEST(ExceptionTest, FindExceptionHandler);
diff --git a/runtime/common_test.h b/runtime/common_test.h
index 7ee6fe2..a543617 100644
--- a/runtime/common_test.h
+++ b/runtime/common_test.h
@@ -192,10 +192,7 @@
       compiled_method =
           compiler_driver_->GetCompiledMethod(MethodReference(&dex_file,
                                                               method->GetDexMethodIndex()));
-
-#ifndef ART_LIGHT_MODE
       CHECK(compiled_method != NULL) << PrettyMethod(method);
-#endif
     }
     if (compiled_method != NULL) {
       const std::vector<uint8_t>& code = compiled_method->GetCode();
@@ -213,12 +210,8 @@
       oat_method.LinkMethod(method);
     } else {
       const void* method_code;
-      if (method->IsAbstract()) {
-        method_code = GetAbstractMethodErrorStub();
-      } else {
-        // No code? You must mean to go into the interpreter.
-        method_code = GetInterpreterEntryPoint();
-      }
+      // No code? You must mean to go into the interpreter.
+      method_code = GetCompiledCodeToInterpreterBridge();
       LOG(INFO) << "MakeExecutable " << PrettyMethod(method) << " code=" << method_code;
       OatFile::OatMethod oat_method = CreateOatMethod(method_code,
                                                       kStackAlignment,
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index cbdc430..aaff0fc 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -313,7 +313,6 @@
   method_ids_ = reinterpret_cast<const MethodId*>(b + h->method_ids_off_);
   proto_ids_ = reinterpret_cast<const ProtoId*>(b + h->proto_ids_off_);
   class_defs_ = reinterpret_cast<const ClassDef*>(b + h->class_defs_off_);
-  DCHECK_EQ(size_, header_->file_size_) << GetLocation();
 }
 
 bool DexFile::CheckMagicAndVersion() const {
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 76947e5..a60a139 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -209,7 +209,7 @@
     }
 
     const TypeItem& GetTypeItem(uint32_t idx) const {
-      CHECK_LT(idx, this->size_);
+      DCHECK_LT(idx, this->size_);
       return this->list_[idx];
     }
 
@@ -494,7 +494,7 @@
 
   // Returns the FieldId at the specified index.
   const FieldId& GetFieldId(uint32_t idx) const {
-    CHECK_LT(idx, NumFieldIds()) << GetLocation();
+    DCHECK_LT(idx, NumFieldIds()) << GetLocation();
     return field_ids_[idx];
   }
 
@@ -585,7 +585,7 @@
 
   // Returns the ClassDef at the specified index.
   const ClassDef& GetClassDef(uint32_t idx) const {
-    CHECK_LT(idx, NumClassDefs()) << GetLocation();
+    DCHECK_LT(idx, NumClassDefs()) << GetLocation();
     return class_defs_[idx];
   }
 
@@ -1025,7 +1025,7 @@
     if (pos_ < EndOfInstanceFieldsPos()) {
       return last_idx_ + field_.field_idx_delta_;
     } else {
-      CHECK_LT(pos_, EndOfVirtualMethodsPos());
+      DCHECK_LT(pos_, EndOfVirtualMethodsPos());
       return last_idx_ + method_.method_idx_delta_;
     }
   }
@@ -1033,7 +1033,7 @@
     if (pos_ < EndOfInstanceFieldsPos()) {
       return field_.access_flags_;
     } else {
-      CHECK_LT(pos_, EndOfVirtualMethodsPos());
+      DCHECK_LT(pos_, EndOfVirtualMethodsPos());
       return method_.access_flags_;
     }
   }
@@ -1045,7 +1045,7 @@
         return kDirect;
       }
     } else {
-      CHECK_EQ(GetMemberAccessFlags() & kAccStatic, 0U);
+      DCHECK_EQ(GetMemberAccessFlags() & kAccStatic, 0U);
       if ((class_def.access_flags_ & kAccInterface) != 0) {
         return kInterface;
       } else if ((GetMemberAccessFlags() & kAccConstructor) != 0) {
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index 3f28b5e..b6781c0 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -30,24 +30,13 @@
 #include "object_utils.h"
 #include "thread.h"
 
-extern "C" void art_interpreter_invoke_handler();
-extern "C" void art_jni_dlsym_lookup_stub();
-extern "C" void art_portable_abstract_method_error_stub();
-extern "C" void art_portable_proxy_invoke_handler();
-extern "C" void art_quick_abstract_method_error_stub();
-extern "C" void art_quick_deoptimize();
-extern "C" void art_quick_instrumentation_entry_from_code(void*);
-extern "C" void art_quick_instrumentation_exit_from_code();
-extern "C" void art_quick_interpreter_entry(void*);
-extern "C" void art_quick_proxy_invoke_handler();
-extern "C" void art_work_around_app_jni_bugs();
-
 namespace art {
+
 namespace mirror {
-class Class;
-class Field;
-class Object;
-}
+  class Class;
+  class Field;
+  class Object;
+}  // namespace mirror
 
 // Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it
 // cannot be resolved, throw an error. If it can, use it to create an instance.
@@ -350,25 +339,43 @@
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
 // Entry point for deoptimization.
-static inline uintptr_t GetDeoptimizationEntryPoint() {
+extern "C" void art_quick_deoptimize();
+static inline uintptr_t GetQuickDeoptimizationEntryPoint() {
   return reinterpret_cast<uintptr_t>(art_quick_deoptimize);
 }
 
 // Return address of instrumentation stub.
-static inline void* GetInstrumentationEntryPoint() {
-  return reinterpret_cast<void*>(art_quick_instrumentation_entry_from_code);
+extern "C" void art_quick_instrumentation_entry(void*);
+static inline void* GetQuickInstrumentationEntryPoint() {
+  return reinterpret_cast<void*>(art_quick_instrumentation_entry);
 }
 
 // The return_pc of instrumentation exit stub.
-static inline uintptr_t GetInstrumentationExitPc() {
-  return reinterpret_cast<uintptr_t>(art_quick_instrumentation_exit_from_code);
+extern "C" void art_quick_instrumentation_exit();
+static inline uintptr_t GetQuickInstrumentationExitPc() {
+  return reinterpret_cast<uintptr_t>(art_quick_instrumentation_exit);
+}
+
+extern "C" void art_portable_to_interpreter_bridge(mirror::AbstractMethod*);
+static inline const void* GetPortableToInterpreterBridge() {
+  return reinterpret_cast<void*>(art_portable_to_interpreter_bridge);
+}
+
+extern "C" void art_quick_to_interpreter_bridge(mirror::AbstractMethod*);
+static inline const void* GetQuickToInterpreterBridge() {
+  return reinterpret_cast<void*>(art_quick_to_interpreter_bridge);
 }
 
 // Return address of interpreter stub.
-static inline void* GetInterpreterEntryPoint() {
-  return reinterpret_cast<void*>(art_quick_interpreter_entry);
+static inline const void* GetCompiledCodeToInterpreterBridge() {
+#if defined(ART_USE_PORTABLE_COMPILER)
+  return GetPortableToInterpreterBridge();
+#else
+  return GetQuickToInterpreterBridge();
+#endif
 }
 
+
 static inline const void* GetPortableResolutionTrampoline(ClassLinker* class_linker) {
   return class_linker->GetPortableResolutionTrampoline();
 }
@@ -386,23 +393,25 @@
 #endif
 }
 
-static inline void* GetPortableAbstractMethodErrorStub() {
-  return reinterpret_cast<void*>(art_portable_abstract_method_error_stub);
+extern "C" void art_portable_proxy_invoke_handler();
+static inline const void* GetPortableProxyInvokeHandler() {
+  return reinterpret_cast<void*>(art_portable_proxy_invoke_handler);
 }
 
-static inline void* GetQuickAbstractMethodErrorStub() {
-  return reinterpret_cast<void*>(art_quick_abstract_method_error_stub);
+extern "C" void art_quick_proxy_invoke_handler();
+static inline const void* GetQuickProxyInvokeHandler() {
+  return reinterpret_cast<void*>(art_quick_proxy_invoke_handler);
 }
 
-// Return address of abstract method error stub for defined compiler.
-static inline void* GetAbstractMethodErrorStub() {
+static inline const void* GetProxyInvokeHandler() {
 #if defined(ART_USE_PORTABLE_COMPILER)
-  return GetPortableAbstractMethodErrorStub();
+  return GetPortableProxyInvokeHandler();
 #else
-  return GetQuickAbstractMethodErrorStub();
+  return GetQuickProxyInvokeHandler();
 #endif
 }
 
+extern "C" void* art_jni_dlsym_lookup_stub(JNIEnv*, jobject);
 static inline void* GetJniDlsymLookupStub() {
   return reinterpret_cast<void*>(art_jni_dlsym_lookup_stub);
 }
diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.cc b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
new file mode 100644
index 0000000..d99c43e
--- /dev/null
+++ b/runtime/entrypoints/interpreter/interpreter_entrypoints.cc
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "class_linker.h"
+#include "interpreter/interpreter.h"
+#include "invoke_arg_array_builder.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+#include "object_utils.h"
+#include "runtime.h"
+#include "stack.h"
+
+namespace art {
+
+extern "C" void artInterperterToCompiledCodeBridge(Thread* self, MethodHelper& mh,
+                                                   const DexFile::CodeItem* code_item,
+                                                   ShadowFrame* shadow_frame, JValue* result)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::AbstractMethod* method = shadow_frame->GetMethod();
+  // Ensure static methods are initialized.
+  if (method->IsStatic()) {
+    Runtime::Current()->GetClassLinker()->EnsureInitialized(method->GetDeclaringClass(), true, true);
+  }
+  uint16_t arg_offset = (code_item == NULL) ? 0 : code_item->registers_size_ - code_item->ins_size_;
+  ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
+  arg_array.BuildArgArray(shadow_frame, arg_offset);
+  method->Invoke(self, arg_array.GetArray(), arg_array.GetNumBytes(), result, mh.GetShorty()[0]);
+}
+
+}  // namespace art
diff --git a/runtime/entrypoints/interpreter/interpreter_entrypoints.h b/runtime/entrypoints/interpreter/interpreter_entrypoints.h
new file mode 100644
index 0000000..c7df4e6
--- /dev/null
+++ b/runtime/entrypoints/interpreter/interpreter_entrypoints.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ENTRYPOINTS_INTERPRETER_INTERPRETER_ENTRYPOINTS_H_
+#define ART_RUNTIME_ENTRYPOINTS_INTERPRETER_INTERPRETER_ENTRYPOINTS_H_
+
+#include "base/macros.h"
+#include "dex_file.h"
+#include "offsets.h"
+
+#define INTERPRETER_ENTRYPOINT_OFFSET(x) \
+    ThreadOffset(static_cast<uintptr_t>(OFFSETOF_MEMBER(Thread, interpreter_entrypoints_)) + \
+                 static_cast<uintptr_t>(OFFSETOF_MEMBER(InterpreterEntryPoints, x)))
+
+namespace art {
+
+union JValue;
+class MethodHelper;
+class ShadowFrame;
+class Thread;
+
+// Pointers to functions that are called by interpreter trampolines via thread-local storage.
+struct PACKED(4) InterpreterEntryPoints {
+  void (*pInterpreterToInterpreterBridge)(Thread* self, MethodHelper& mh,
+                                          const DexFile::CodeItem* code_item,
+                                          ShadowFrame* shadow_frame, JValue* result);
+  void (*pInterpreterToCompiledCodeBridge)(Thread* self, MethodHelper& mh,
+                                           const DexFile::CodeItem* code_item,
+                                           ShadowFrame* shadow_frame, JValue* result);
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_ENTRYPOINTS_INTERPRETER_INTERPRETER_ENTRYPOINTS_H_
diff --git a/runtime/entrypoints/jni/jni_entrypoints.cc b/runtime/entrypoints/jni/jni_entrypoints.cc
index 98f7b12..88b4936 100644
--- a/runtime/entrypoints/jni/jni_entrypoints.cc
+++ b/runtime/entrypoints/jni/jni_entrypoints.cc
@@ -15,23 +15,26 @@
  */
 
 #include "base/logging.h"
-#include "mirror/abstract_method.h"
+#include "entrypoints/entrypoint_utils.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+#include "object_utils.h"
 #include "scoped_thread_state_change.h"
 #include "thread.h"
 
 namespace art {
 
 // Used by the JNI dlsym stub to find the native method to invoke if none is registered.
-extern "C" void* artFindNativeMethod(Thread* self) {
+extern "C" void* artFindNativeMethod() {
+  Thread* self = Thread::Current();
   Locks::mutator_lock_->AssertNotHeld(self);  // We come here as Native.
-  DCHECK(Thread::Current() == self);
   ScopedObjectAccess soa(self);
 
   mirror::AbstractMethod* method = self->GetCurrentMethod(NULL);
   DCHECK(method != NULL);
 
-  // Lookup symbol address for method, on failure we'll return NULL with an
-  // exception set, otherwise we return the address of the method we found.
+  // Lookup symbol address for method, on failure we'll return NULL with an exception set,
+  // otherwise we return the address of the method we found.
   void* native_code = soa.Vm()->FindCodeForNativeMethod(method);
   if (native_code == NULL) {
     DCHECK(self->IsExceptionPending());
@@ -43,4 +46,78 @@
   }
 }
 
+static void WorkAroundJniBugsForJobject(intptr_t* arg_ptr) {
+  intptr_t value = *arg_ptr;
+  mirror::Object** value_as_jni_rep = reinterpret_cast<mirror::Object**>(value);
+  mirror::Object* value_as_work_around_rep = value_as_jni_rep != NULL ? *value_as_jni_rep : NULL;
+  CHECK(Runtime::Current()->GetHeap()->IsHeapAddress(value_as_work_around_rep))
+      << value_as_work_around_rep;
+  *arg_ptr = reinterpret_cast<intptr_t>(value_as_work_around_rep);
+}
+
+extern "C" const void* artWorkAroundAppJniBugs(Thread* self, intptr_t* sp)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  DCHECK(Thread::Current() == self);
+  // TODO: this code is specific to ARM
+  // On entry the stack pointed by sp is:
+  // | arg3   | <- Calling JNI method's frame (and extra bit for out args)
+  // | LR     |
+  // | R3     |    arg2
+  // | R2     |    arg1
+  // | R1     |    jclass/jobject
+  // | R0     |    JNIEnv
+  // | unused |
+  // | unused |
+  // | unused | <- sp
+  mirror::AbstractMethod* jni_method = self->GetCurrentMethod(NULL);
+  DCHECK(jni_method->IsNative()) << PrettyMethod(jni_method);
+  intptr_t* arg_ptr = sp + 4;  // pointer to r1 on stack
+  // Fix up this/jclass argument
+  WorkAroundJniBugsForJobject(arg_ptr);
+  arg_ptr++;
+  // Fix up jobject arguments
+  MethodHelper mh(jni_method);
+  int reg_num = 2;  // Current register being processed, -1 for stack arguments.
+  for (uint32_t i = 1; i < mh.GetShortyLength(); i++) {
+    char shorty_char = mh.GetShorty()[i];
+    if (shorty_char == 'L') {
+      WorkAroundJniBugsForJobject(arg_ptr);
+    }
+    if (shorty_char == 'J' || shorty_char == 'D') {
+      if (reg_num == 2) {
+        arg_ptr = sp + 8;  // skip to out arguments
+        reg_num = -1;
+      } else if (reg_num == 3) {
+        arg_ptr = sp + 10;  // skip to out arguments plus 2 slots as long must be aligned
+        reg_num = -1;
+      } else {
+        DCHECK_EQ(reg_num, -1);
+        if ((reinterpret_cast<intptr_t>(arg_ptr) & 7) == 4) {
+          arg_ptr += 3;  // unaligned, pad and move through stack arguments
+        } else {
+          arg_ptr += 2;  // aligned, move through stack arguments
+        }
+      }
+    } else {
+      if (reg_num == 2) {
+        arg_ptr++;  // move through register arguments
+        reg_num++;
+      } else if (reg_num == 3) {
+        arg_ptr = sp + 8;  // skip to outgoing stack arguments
+        reg_num = -1;
+      } else {
+        DCHECK_EQ(reg_num, -1);
+        arg_ptr++;  // move through stack arguments
+      }
+    }
+  }
+  // Load expected destination, see Method::RegisterNative
+  const void* code = reinterpret_cast<const void*>(jni_method->GetNativeGcMap());
+  if (UNLIKELY(code == NULL)) {
+    code = GetJniDlsymLookupStub();
+    jni_method->RegisterNative(self, code);
+  }
+  return code;
+}
+
 }  // namespace art
diff --git a/runtime/entrypoints/jni/jni_entrypoints.h b/runtime/entrypoints/jni/jni_entrypoints.h
new file mode 100644
index 0000000..0a53447
--- /dev/null
+++ b/runtime/entrypoints/jni/jni_entrypoints.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ENTRYPOINTS_JNI_JNI_ENTRYPOINTS_H_
+#define ART_RUNTIME_ENTRYPOINTS_JNI_JNI_ENTRYPOINTS_H_
+
+#include "base/macros.h"
+#include "offsets.h"
+
+#define JNI_ENTRYPOINT_OFFSET(x) \
+    ThreadOffset(static_cast<uintptr_t>(OFFSETOF_MEMBER(Thread, jni_entrypoints_)) + \
+                 static_cast<uintptr_t>(OFFSETOF_MEMBER(JniEntryPoints, x)))
+
+namespace art {
+
+// Pointers to functions that are called by JNI trampolines via thread-local storage.
+struct PACKED(4) JniEntryPoints {
+  // Called when the JNI method isn't registered.
+  void* (*pDlsymLookup)(JNIEnv* env, jobject);
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_ENTRYPOINTS_JNI_JNI_ENTRYPOINTS_H_
diff --git a/runtime/entrypoints/portable/portable_entrypoints.h b/runtime/entrypoints/portable/portable_entrypoints.h
index a229c76..ec9e4f8 100644
--- a/runtime/entrypoints/portable/portable_entrypoints.h
+++ b/runtime/entrypoints/portable/portable_entrypoints.h
@@ -28,15 +28,15 @@
 class Thread;
 
 #define PORTABLE_ENTRYPOINT_OFFSET(x) \
-    (static_cast<uintptr_t>(OFFSETOF_MEMBER(Thread, portable_entrypoints_)) + \
-        static_cast<uintptr_t>(OFFSETOF_MEMBER(PortableEntryPoints, x)))
+    ThreadOffset(static_cast<uintptr_t>(OFFSETOF_MEMBER(Thread, portable_entrypoints_)) + \
+                 static_cast<uintptr_t>(OFFSETOF_MEMBER(PortableEntryPoints, x)))
 
 // Pointers to functions that are called by code generated by compiler's adhering to the portable
 // compiler ABI.
 struct PACKED(4) PortableEntryPoints {
   // Invocation
-  const void* (*pPortableResolutionTrampolineFromCode)(mirror::AbstractMethod*, mirror::Object*,
-                                                       mirror::AbstractMethod**, Thread*);
+  void (*pPortableResolutionTrampoline)(mirror::AbstractMethod*);
+  void (*pPortableToInterpreterBridge)(mirror::AbstractMethod*);
 };
 
 }  // namespace art
diff --git a/runtime/entrypoints/quick/quick_argument_visitor.h b/runtime/entrypoints/quick/quick_argument_visitor.h
deleted file mode 100644
index 35fa972..0000000
--- a/runtime/entrypoints/quick/quick_argument_visitor.h
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ARGUMENT_VISITOR_H_
-#define ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ARGUMENT_VISITOR_H_
-
-#include "object_utils.h"
-
-namespace art {
-
-// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame.
-class QuickArgumentVisitor {
- public:
-// Offset to first (not the Method*) argument in a Runtime::kRefAndArgs callee save frame.
-// Size of Runtime::kRefAndArgs callee save frame.
-// Size of Method* and register parameters in out stack arguments.
-#if defined(__arm__)
-#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 8
-#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 48
-#define QUICK_STACK_ARG_SKIP 16
-#elif defined(__mips__)
-#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4
-#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 64
-#define QUICK_STACK_ARG_SKIP 16
-#elif defined(__i386__)
-#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4
-#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 32
-#define QUICK_STACK_ARG_SKIP 16
-#else
-#error "Unsupported architecture"
-#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 0
-#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 0
-#define QUICK_STACK_ARG_SKIP 0
-#endif
-
-  QuickArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
-    caller_mh_(caller_mh),
-    args_in_regs_(ComputeArgsInRegs(caller_mh)),
-    num_params_(caller_mh.NumArgs()),
-    reg_args_(reinterpret_cast<byte*>(sp) + QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET),
-    stack_args_(reinterpret_cast<byte*>(sp) + QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE
-                + QUICK_STACK_ARG_SKIP),
-    cur_args_(reg_args_),
-    cur_arg_index_(0),
-    param_index_(0),
-    is_split_long_or_double_(false) {
-  }
-
-  virtual ~QuickArgumentVisitor() {}
-
-  virtual void Visit() = 0;
-
-  bool IsParamAReference() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return caller_mh_.IsParamAReference(param_index_);
-  }
-
-  bool IsParamALongOrDouble() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return caller_mh_.IsParamALongOrDouble(param_index_);
-  }
-
-  Primitive::Type GetParamPrimitiveType() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return caller_mh_.GetParamPrimitiveType(param_index_);
-  }
-
-  byte* GetParamAddress() const {
-    return cur_args_ + (cur_arg_index_ * kPointerSize);
-  }
-
-  bool IsSplitLongOrDouble() const {
-    return is_split_long_or_double_;
-  }
-
-  uint64_t ReadSplitLongParam() const {
-    DCHECK(IsSplitLongOrDouble());
-    uint64_t low_half = *reinterpret_cast<uint32_t*>(GetParamAddress());
-    uint64_t high_half = *reinterpret_cast<uint32_t*>(stack_args_);
-    return (low_half & 0xffffffffULL) | (high_half << 32);
-  }
-
-  void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    for (cur_arg_index_ = 0;  cur_arg_index_ < args_in_regs_ && param_index_ < num_params_; ) {
-      is_split_long_or_double_ = (cur_arg_index_ == 2) && IsParamALongOrDouble();
-      Visit();
-      cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1);
-      param_index_++;
-    }
-    cur_args_ = stack_args_;
-    cur_arg_index_ = is_split_long_or_double_ ? 1 : 0;
-    is_split_long_or_double_ = false;
-    while (param_index_ < num_params_) {
-      Visit();
-      cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1);
-      param_index_++;
-    }
-  }
-
- private:
-  static size_t ComputeArgsInRegs(MethodHelper& mh) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    size_t args_in_regs = 0;
-    size_t num_params = mh.NumArgs();
-    for (size_t i = 0; i < num_params; i++) {
-      args_in_regs = args_in_regs + (mh.IsParamALongOrDouble(i) ? 2 : 1);
-      if (args_in_regs > 3) {
-        args_in_regs = 3;
-        break;
-      }
-    }
-    return args_in_regs;
-  }
-  MethodHelper& caller_mh_;
-  const size_t args_in_regs_;
-  const size_t num_params_;
-  byte* const reg_args_;
-  byte* const stack_args_;
-  byte* cur_args_;
-  size_t cur_arg_index_;
-  size_t param_index_;
-  // Does a 64bit parameter straddle the register and stack arguments?
-  bool is_split_long_or_double_;
-};
-
-}  // namespace art
-
-#endif  // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ARGUMENT_VISITOR_H_
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index 74b8cfd..e76679b 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -17,44 +17,45 @@
 #ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_H_
 #define ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_H_
 
-#include "dex_file-inl.h"
-#include "runtime.h"
+#include <jni.h>
+
+#include "base/macros.h"
+#include "offsets.h"
 
 #define QUICK_ENTRYPOINT_OFFSET(x) \
-    (static_cast<uintptr_t>(OFFSETOF_MEMBER(Thread, quick_entrypoints_)) + \
-        static_cast<uintptr_t>(OFFSETOF_MEMBER(QuickEntryPoints, x)))
+    ThreadOffset(static_cast<uintptr_t>(OFFSETOF_MEMBER(Thread, quick_entrypoints_)) + \
+                 static_cast<uintptr_t>(OFFSETOF_MEMBER(QuickEntryPoints, x)))
 
 namespace art {
+
 namespace mirror {
   class AbstractMethod;
   class Class;
   class Object;
 }  // namespace mirror
-class DvmDex;
-class MethodHelper;
-class ShadowFrame;
+
 class Thread;
 
 // Pointers to functions that are called by quick compiler generated code via thread-local storage.
 struct PACKED(4) QuickEntryPoints {
   // Alloc
-  void* (*pAllocArrayFromCode)(uint32_t, void*, int32_t);
-  void* (*pAllocArrayFromCodeWithAccessCheck)(uint32_t, void*, int32_t);
-  void* (*pAllocObjectFromCode)(uint32_t, void*);
-  void* (*pAllocObjectFromCodeWithAccessCheck)(uint32_t, void*);
-  void* (*pCheckAndAllocArrayFromCode)(uint32_t, void*, int32_t);
-  void* (*pCheckAndAllocArrayFromCodeWithAccessCheck)(uint32_t, void*, int32_t);
+  void* (*pAllocArray)(uint32_t, void*, int32_t);
+  void* (*pAllocArrayWithAccessCheck)(uint32_t, void*, int32_t);
+  void* (*pAllocObject)(uint32_t, void*);
+  void* (*pAllocObjectWithAccessCheck)(uint32_t, void*);
+  void* (*pCheckAndAllocArray)(uint32_t, void*, int32_t);
+  void* (*pCheckAndAllocArrayWithAccessCheck)(uint32_t, void*, int32_t);
 
   // Cast
-  uint32_t (*pInstanceofNonTrivialFromCode)(const mirror::Class*, const mirror::Class*);
-  void (*pCanPutArrayElementFromCode)(void*, void*);
-  void (*pCheckCastFromCode)(void*, void*);
+  uint32_t (*pInstanceofNonTrivial)(const mirror::Class*, const mirror::Class*);
+  void (*pCanPutArrayElement)(void*, void*);
+  void (*pCheckCast)(void*, void*);
 
   // DexCache
   void* (*pInitializeStaticStorage)(uint32_t, void*);
-  void* (*pInitializeTypeAndVerifyAccessFromCode)(uint32_t, void*);
-  void* (*pInitializeTypeFromCode)(uint32_t, void*);
-  void* (*pResolveStringFromCode)(void*, uint32_t);
+  void* (*pInitializeTypeAndVerifyAccess)(uint32_t, void*);
+  void* (*pInitializeType)(uint32_t, void*);
+  void* (*pResolveString)(void*, uint32_t);
 
   // Field
   int (*pSet32Instance)(uint32_t, void*, int32_t);  // field_idx, obj, src
@@ -71,7 +72,7 @@
   void* (*pGetObjStatic)(uint32_t);
 
   // FillArray
-  void (*pHandleFillArrayDataFromCode)(void*, void*);
+  void (*pHandleFillArrayData)(void*, void*);
 
   // JNI
   uint32_t (*pJniMethodStart)(Thread*);
@@ -83,8 +84,8 @@
                                                     jobject locked, Thread* self);
 
   // Locks
-  void (*pLockObjectFromCode)(void*);
-  void (*pUnlockObjectFromCode)(void*);
+  void (*pLockObject)(void*);
+  void (*pUnlockObject)(void*);
 
   // Math
   int32_t (*pCmpgDouble)(double, double);
@@ -108,14 +109,6 @@
   uint64_t (*pShrLong)(uint64_t, uint32_t);
   uint64_t (*pUshrLong)(uint64_t, uint32_t);
 
-  // Interpreter
-  void (*pInterpreterToInterpreterEntry)(Thread* self, MethodHelper& mh,
-                                         const DexFile::CodeItem* code_item,
-                                         ShadowFrame* shadow_frame, JValue* result);
-  void (*pInterpreterToQuickEntry)(Thread* self, MethodHelper& mh,
-                                   const DexFile::CodeItem* code_item,
-                                   ShadowFrame* shadow_frame, JValue* result);
-
   // Intrinsics
   int32_t (*pIndexOf)(void*, uint32_t, uint32_t, uint32_t);
   int32_t (*pMemcmp16)(void*, void*, int32_t);
@@ -123,8 +116,8 @@
   void* (*pMemcpy)(void*, const void*, size_t);
 
   // Invocation
-  const void* (*pQuickResolutionTrampolineFromCode)(mirror::AbstractMethod*, mirror::Object*,
-                                                    mirror::AbstractMethod**, Thread*);
+  void (*pQuickResolutionTrampoline)(mirror::AbstractMethod*);
+  void (*pQuickToInterpreterBridge)(mirror::AbstractMethod*);
   void (*pInvokeDirectTrampolineWithAccessCheck)(uint32_t, void*);
   void (*pInvokeInterfaceTrampoline)(uint32_t, void*);
   void (*pInvokeInterfaceTrampolineWithAccessCheck)(uint32_t, void*);
@@ -133,22 +126,21 @@
   void (*pInvokeVirtualTrampolineWithAccessCheck)(uint32_t, void*);
 
   // Thread
-  void (*pCheckSuspendFromCode)(Thread*);  // Stub that is called when the suspend count is non-zero
-  void (*pTestSuspendFromCode)();  // Stub that is periodically called to test the suspend count
+  void (*pCheckSuspend)(Thread*);  // Stub that is called when the suspend count is non-zero
+  void (*pTestSuspend)();  // Stub that is periodically called to test the suspend count
 
   // Throws
   void (*pDeliverException)(void*);
-  void (*pThrowArrayBoundsFromCode)(int32_t, int32_t);
-  void (*pThrowDivZeroFromCode)();
-  void (*pThrowNoSuchMethodFromCode)(int32_t);
-  void (*pThrowNullPointerFromCode)();
-  void (*pThrowStackOverflowFromCode)(void*);
+  void (*pThrowArrayBounds)(int32_t, int32_t);
+  void (*pThrowDivZero)();
+  void (*pThrowNoSuchMethod)(int32_t);
+  void (*pThrowNullPointer)();
+  void (*pThrowStackOverflow)(void*);
 };
 
 
 // JNI entrypoints.
-extern uint32_t JniMethodStart(Thread* self)
-    UNLOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR;
+extern uint32_t JniMethodStart(Thread* self) UNLOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR;
 extern uint32_t JniMethodStartSynchronized(jobject to_lock, Thread* self)
     UNLOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR;
 extern void JniMethodEnd(uint32_t saved_local_ref_cookie, Thread* self)
diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
index 7ecd296..0e61942 100644
--- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
@@ -32,7 +32,7 @@
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
   instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
   const void* result = instrumentation->GetQuickCodeFor(method);
-  bool interpreter_entry = (result == GetInterpreterEntryPoint());
+  bool interpreter_entry = (result == GetQuickToInterpreterBridge());
   instrumentation->PushInstrumentationStackFrame(self, method->IsStatic() ? NULL : this_object,
                                                  method, lr, interpreter_entry);
   CHECK(result != NULL) << PrettyMethod(method);
diff --git a/runtime/entrypoints/quick/quick_interpreter_entrypoints.cc b/runtime/entrypoints/quick/quick_interpreter_entrypoints.cc
deleted file mode 100644
index 656df8d..0000000
--- a/runtime/entrypoints/quick/quick_interpreter_entrypoints.cc
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "quick_argument_visitor.h"
-#include "callee_save_frame.h"
-#include "dex_file-inl.h"
-#include "interpreter/interpreter.h"
-#include "invoke_arg_array_builder.h"
-#include "mirror/abstract_method-inl.h"
-#include "mirror/class-inl.h"
-#include "mirror/object-inl.h"
-#include "mirror/object_array-inl.h"
-#include "object_utils.h"
-
-namespace art {
-
-// Visits arguments on the stack placing them into the shadow frame.
-class BuildShadowFrameVisitor : public QuickArgumentVisitor {
- public:
-  BuildShadowFrameVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp,
-                          ShadowFrame& sf, size_t first_arg_reg) :
-    QuickArgumentVisitor(caller_mh, sp), sf_(sf), cur_reg_(first_arg_reg) {}
-
-  virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    Primitive::Type type = GetParamPrimitiveType();
-    switch (type) {
-      case Primitive::kPrimLong:  // Fall-through.
-      case Primitive::kPrimDouble:
-        if (IsSplitLongOrDouble()) {
-          sf_.SetVRegLong(cur_reg_, ReadSplitLongParam());
-        } else {
-          sf_.SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress()));
-        }
-        ++cur_reg_;
-        break;
-      case Primitive::kPrimNot:
-        sf_.SetVRegReference(cur_reg_, *reinterpret_cast<mirror::Object**>(GetParamAddress()));
-        break;
-      case Primitive::kPrimBoolean:  // Fall-through.
-      case Primitive::kPrimByte:     // Fall-through.
-      case Primitive::kPrimChar:     // Fall-through.
-      case Primitive::kPrimShort:    // Fall-through.
-      case Primitive::kPrimInt:      // Fall-through.
-      case Primitive::kPrimFloat:
-        sf_.SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress()));
-        break;
-      case Primitive::kPrimVoid:
-        LOG(FATAL) << "UNREACHABLE";
-        break;
-    }
-    ++cur_reg_;
-  }
-
- private:
-  ShadowFrame& sf_;
-  size_t cur_reg_;
-
-  DISALLOW_COPY_AND_ASSIGN(BuildShadowFrameVisitor);
-};
-
-extern "C" uint64_t artInterpreterEntry(mirror::AbstractMethod* method, Thread* self,
-                                        mirror::AbstractMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  // Ensure we don't get thread suspension until the object arguments are safely in the shadow
-  // frame.
-  const char* old_cause = self->StartAssertNoThreadSuspension("Building interpreter shadow frame");
-  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
-
-  MethodHelper mh(method);
-  const DexFile::CodeItem* code_item = mh.GetCodeItem();
-  uint16_t num_regs = code_item->registers_size_;
-  void* memory = alloca(ShadowFrame::ComputeSize(num_regs));
-  ShadowFrame* shadow_frame(ShadowFrame::Create(num_regs, NULL,  // No last shadow coming from quick.
-                                                method, 0, memory));
-  size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_;
-  BuildShadowFrameVisitor shadow_frame_builder(mh, sp, *shadow_frame, first_arg_reg);
-  shadow_frame_builder.VisitArguments();
-  // Push a transition back into managed code onto the linked list in thread.
-  ManagedStack fragment;
-  self->PushManagedStackFragment(&fragment);
-  self->PushShadowFrame(shadow_frame);
-  self->EndAssertNoThreadSuspension(old_cause);
-
-  if (method->IsStatic() && !method->GetDeclaringClass()->IsInitializing()) {
-    // Ensure static method's class is initialized.
-    if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(method->GetDeclaringClass(),
-                                                                 true, true)) {
-      DCHECK(Thread::Current()->IsExceptionPending());
-      self->PopManagedStackFragment(fragment);
-      return 0;
-    }
-  }
-
-  JValue result = interpreter::EnterInterpreterFromStub(self, mh, code_item, *shadow_frame);
-  // Pop transition.
-  self->PopManagedStackFragment(fragment);
-  return result.GetJ();
-}
-
-extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh,
-                                           const DexFile::CodeItem* code_item,
-                                           ShadowFrame* shadow_frame, JValue* result)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  mirror::AbstractMethod* method = shadow_frame->GetMethod();
-  // Ensure static methods are initialized.
-  if (method->IsStatic()) {
-    Runtime::Current()->GetClassLinker()->EnsureInitialized(method->GetDeclaringClass(), true, true);
-  }
-  uint16_t arg_offset = (code_item == NULL) ? 0 : code_item->registers_size_ - code_item->ins_size_;
-  ArgArray arg_array(mh.GetShorty(), mh.GetShortyLength());
-  arg_array.BuildArgArray(shadow_frame, arg_offset);
-  method->Invoke(self, arg_array.GetArray(), arg_array.GetNumBytes(), result, mh.GetShorty()[0]);
-}
-
-}  // namespace art
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 23a28f9..9907c04 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -94,78 +94,4 @@
   return o;
 }
 
-static void WorkAroundJniBugsForJobject(intptr_t* arg_ptr) {
-  intptr_t value = *arg_ptr;
-  mirror::Object** value_as_jni_rep = reinterpret_cast<mirror::Object**>(value);
-  mirror::Object* value_as_work_around_rep = value_as_jni_rep != NULL ? *value_as_jni_rep : NULL;
-  CHECK(Runtime::Current()->GetHeap()->IsHeapAddress(value_as_work_around_rep))
-      << value_as_work_around_rep;
-  *arg_ptr = reinterpret_cast<intptr_t>(value_as_work_around_rep);
-}
-
-extern "C" const void* artWorkAroundAppJniBugs(Thread* self, intptr_t* sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  DCHECK(Thread::Current() == self);
-  // TODO: this code is specific to ARM
-  // On entry the stack pointed by sp is:
-  // | arg3   | <- Calling JNI method's frame (and extra bit for out args)
-  // | LR     |
-  // | R3     |    arg2
-  // | R2     |    arg1
-  // | R1     |    jclass/jobject
-  // | R0     |    JNIEnv
-  // | unused |
-  // | unused |
-  // | unused | <- sp
-  mirror::AbstractMethod* jni_method = self->GetCurrentMethod(NULL);
-  DCHECK(jni_method->IsNative()) << PrettyMethod(jni_method);
-  intptr_t* arg_ptr = sp + 4;  // pointer to r1 on stack
-  // Fix up this/jclass argument
-  WorkAroundJniBugsForJobject(arg_ptr);
-  arg_ptr++;
-  // Fix up jobject arguments
-  MethodHelper mh(jni_method);
-  int reg_num = 2;  // Current register being processed, -1 for stack arguments.
-  for (uint32_t i = 1; i < mh.GetShortyLength(); i++) {
-    char shorty_char = mh.GetShorty()[i];
-    if (shorty_char == 'L') {
-      WorkAroundJniBugsForJobject(arg_ptr);
-    }
-    if (shorty_char == 'J' || shorty_char == 'D') {
-      if (reg_num == 2) {
-        arg_ptr = sp + 8;  // skip to out arguments
-        reg_num = -1;
-      } else if (reg_num == 3) {
-        arg_ptr = sp + 10;  // skip to out arguments plus 2 slots as long must be aligned
-        reg_num = -1;
-      } else {
-        DCHECK_EQ(reg_num, -1);
-        if ((reinterpret_cast<intptr_t>(arg_ptr) & 7) == 4) {
-          arg_ptr += 3;  // unaligned, pad and move through stack arguments
-        } else {
-          arg_ptr += 2;  // aligned, move through stack arguments
-        }
-      }
-    } else {
-      if (reg_num == 2) {
-        arg_ptr++;  // move through register arguments
-        reg_num++;
-      } else if (reg_num == 3) {
-        arg_ptr = sp + 8;  // skip to outgoing stack arguments
-        reg_num = -1;
-      } else {
-        DCHECK_EQ(reg_num, -1);
-        arg_ptr++;  // move through stack arguments
-      }
-    }
-  }
-  // Load expected destination, see Method::RegisterNative
-  const void* code = reinterpret_cast<const void*>(jni_method->GetNativeGcMap());
-  if (UNLIKELY(code == NULL)) {
-    code = GetJniDlsymLookupStub();
-    jni_method->RegisterNative(self, code);
-  }
-  return code;
-}
-
 }  // namespace art
diff --git a/runtime/entrypoints/quick/quick_proxy_entrypoints.cc b/runtime/entrypoints/quick/quick_proxy_entrypoints.cc
deleted file mode 100644
index 4e3d749..0000000
--- a/runtime/entrypoints/quick/quick_proxy_entrypoints.cc
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "quick_argument_visitor.h"
-#include "dex_file-inl.h"
-#include "entrypoints/entrypoint_utils.h"
-#include "mirror/abstract_method-inl.h"
-#include "mirror/object_array-inl.h"
-#include "mirror/object-inl.h"
-#include "object_utils.h"
-#include "reflection.h"
-#include "scoped_thread_state_change.h"
-#include "thread.h"
-#include "well_known_classes.h"
-
-#include "ScopedLocalRef.h"
-
-namespace art {
-
-// Visits arguments on the stack placing them into the args vector, Object* arguments are converted
-// to jobjects.
-class BuildQuickArgumentVisitor : public QuickArgumentVisitor {
- public:
-  BuildQuickArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp,
-                            ScopedObjectAccessUnchecked& soa, std::vector<jvalue>& args) :
-    QuickArgumentVisitor(caller_mh, sp), soa_(soa), args_(args) {}
-
-  virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    jvalue val;
-    Primitive::Type type = GetParamPrimitiveType();
-    switch (type) {
-      case Primitive::kPrimNot: {
-        mirror::Object* obj = *reinterpret_cast<mirror::Object**>(GetParamAddress());
-        val.l = soa_.AddLocalReference<jobject>(obj);
-        break;
-      }
-      case Primitive::kPrimLong:  // Fall-through.
-      case Primitive::kPrimDouble:
-        if (IsSplitLongOrDouble()) {
-          val.j = ReadSplitLongParam();
-        } else {
-          val.j = *reinterpret_cast<jlong*>(GetParamAddress());
-        }
-        break;
-      case Primitive::kPrimBoolean:  // Fall-through.
-      case Primitive::kPrimByte:     // Fall-through.
-      case Primitive::kPrimChar:     // Fall-through.
-      case Primitive::kPrimShort:    // Fall-through.
-      case Primitive::kPrimInt:      // Fall-through.
-      case Primitive::kPrimFloat:
-        val.i =  *reinterpret_cast<jint*>(GetParamAddress());
-        break;
-      case Primitive::kPrimVoid:
-        LOG(FATAL) << "UNREACHABLE";
-        val.j = 0;
-        break;
-    }
-    args_.push_back(val);
-  }
-
- private:
-  ScopedObjectAccessUnchecked& soa_;
-  std::vector<jvalue>& args_;
-
-  DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor);
-};
-
-// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
-// which is responsible for recording callee save registers. We explicitly place into jobjects the
-// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
-// field within the proxy object, which will box the primitive arguments and deal with error cases.
-extern "C" uint64_t artQuickProxyInvokeHandler(mirror::AbstractMethod* proxy_method,
-                                          mirror::Object* receiver,
-                                          Thread* self, mirror::AbstractMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
-  const char* old_cause =
-      self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
-  // Register the top of the managed stack, making stack crawlable.
-  DCHECK_EQ(*sp, proxy_method);
-  self->SetTopOfStack(sp, 0);
-  DCHECK_EQ(proxy_method->GetFrameSizeInBytes(),
-            Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
-  self->VerifyStack();
-  // Start new JNI local reference state.
-  JNIEnvExt* env = self->GetJniEnv();
-  ScopedObjectAccessUnchecked soa(env);
-  ScopedJniEnvLocalRefState env_state(env);
-  // Create local ref. copies of proxy method and the receiver.
-  jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
-
-  // Placing arguments into args vector and remove the receiver.
-  MethodHelper proxy_mh(proxy_method);
-  std::vector<jvalue> args;
-  BuildQuickArgumentVisitor local_ref_visitor(proxy_mh, sp, soa, args);
-  local_ref_visitor.VisitArguments();
-  args.erase(args.begin());
-
-  // Convert proxy method into expected interface method.
-  mirror::AbstractMethod* interface_method = proxy_method->FindOverriddenMethod();
-  DCHECK(interface_method != NULL);
-  DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
-  jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method);
-
-  // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
-  // that performs allocations.
-  self->EndAssertNoThreadSuspension(old_cause);
-  JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(),
-                                               rcvr_jobj, interface_method_jobj, args);
-  return result.GetJ();
-}
-
-}  // namespace art
diff --git a/runtime/entrypoints/quick/quick_stub_entrypoints.cc b/runtime/entrypoints/quick/quick_stub_entrypoints.cc
deleted file mode 100644
index d78bbf3..0000000
--- a/runtime/entrypoints/quick/quick_stub_entrypoints.cc
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "callee_save_frame.h"
-#include "class_linker-inl.h"
-#include "dex_file-inl.h"
-#include "dex_instruction-inl.h"
-#include "mirror/class-inl.h"
-#include "mirror/abstract_method-inl.h"
-#include "mirror/object_array-inl.h"
-#include "mirror/object-inl.h"
-#include "object_utils.h"
-#include "scoped_thread_state_change.h"
-
-// Architecture specific assembler helper to deliver exception.
-extern "C" void art_quick_deliver_exception_from_code(void*);
-
-namespace art {
-
-// Lazily resolve a method for quick. Called by stub code.
-extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called,
-                                                    mirror::Object* receiver,
-                                                    mirror::AbstractMethod** sp, Thread* thread)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#if defined(__arm__)
-  // On entry the stack pointed by sp is:
-  // | argN       |  |
-  // | ...        |  |
-  // | arg4       |  |
-  // | arg3 spill |  |  Caller's frame
-  // | arg2 spill |  |
-  // | arg1 spill |  |
-  // | Method*    | ---
-  // | LR         |
-  // | ...        |    callee saves
-  // | R3         |    arg3
-  // | R2         |    arg2
-  // | R1         |    arg1
-  // | R0         |
-  // | Method*    |  <- sp
-  DCHECK_EQ(48U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
-  mirror::AbstractMethod** caller_sp = reinterpret_cast<mirror::AbstractMethod**>(reinterpret_cast<byte*>(sp) + 48);
-  uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp) + kPointerSize);
-  uint32_t pc_offset = 10;
-  uintptr_t caller_pc = regs[pc_offset];
-#elif defined(__i386__)
-  // On entry the stack pointed by sp is:
-  // | argN        |  |
-  // | ...         |  |
-  // | arg4        |  |
-  // | arg3 spill  |  |  Caller's frame
-  // | arg2 spill  |  |
-  // | arg1 spill  |  |
-  // | Method*     | ---
-  // | Return      |
-  // | EBP,ESI,EDI |    callee saves
-  // | EBX         |    arg3
-  // | EDX         |    arg2
-  // | ECX         |    arg1
-  // | EAX/Method* |  <- sp
-  DCHECK_EQ(32U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
-  mirror::AbstractMethod** caller_sp = reinterpret_cast<mirror::AbstractMethod**>(reinterpret_cast<byte*>(sp) + 32);
-  uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp));
-  uintptr_t caller_pc = regs[7];
-#elif defined(__mips__)
-  // On entry the stack pointed by sp is:
-  // | argN       |  |
-  // | ...        |  |
-  // | arg4       |  |
-  // | arg3 spill |  |  Caller's frame
-  // | arg2 spill |  |
-  // | arg1 spill |  |
-  // | Method*    | ---
-  // | RA         |
-  // | ...        |    callee saves
-  // | A3         |    arg3
-  // | A2         |    arg2
-  // | A1         |    arg1
-  // | A0/Method* |  <- sp
-  DCHECK_EQ(64U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
-  mirror::AbstractMethod** caller_sp = reinterpret_cast<mirror::AbstractMethod**>(reinterpret_cast<byte*>(sp) + 64);
-  uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp));
-  uint32_t pc_offset = 15;
-  uintptr_t caller_pc = regs[pc_offset];
-#else
-  UNIMPLEMENTED(FATAL);
-  mirror::AbstractMethod** caller_sp = NULL;
-  uintptr_t* regs = NULL;
-  uintptr_t caller_pc = 0;
-#endif
-  FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsAndArgs);
-  // Start new JNI local reference state
-  JNIEnvExt* env = thread->GetJniEnv();
-  ScopedObjectAccessUnchecked soa(env);
-  ScopedJniEnvLocalRefState env_state(env);
-
-  // Compute details about the called method (avoid GCs)
-  ClassLinker* linker = Runtime::Current()->GetClassLinker();
-  mirror::AbstractMethod* caller = *caller_sp;
-  InvokeType invoke_type;
-  uint32_t dex_method_idx;
-#if !defined(__i386__)
-  const char* shorty;
-  uint32_t shorty_len;
-#endif
-  if (called->IsRuntimeMethod()) {
-    uint32_t dex_pc = caller->ToDexPc(caller_pc);
-    const DexFile::CodeItem* code = MethodHelper(caller).GetCodeItem();
-    CHECK_LT(dex_pc, code->insns_size_in_code_units_);
-    const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
-    Instruction::Code instr_code = instr->Opcode();
-    bool is_range;
-    switch (instr_code) {
-      case Instruction::INVOKE_DIRECT:
-        invoke_type = kDirect;
-        is_range = false;
-        break;
-      case Instruction::INVOKE_DIRECT_RANGE:
-        invoke_type = kDirect;
-        is_range = true;
-        break;
-      case Instruction::INVOKE_STATIC:
-        invoke_type = kStatic;
-        is_range = false;
-        break;
-      case Instruction::INVOKE_STATIC_RANGE:
-        invoke_type = kStatic;
-        is_range = true;
-        break;
-      case Instruction::INVOKE_SUPER:
-        invoke_type = kSuper;
-        is_range = false;
-        break;
-      case Instruction::INVOKE_SUPER_RANGE:
-        invoke_type = kSuper;
-        is_range = true;
-        break;
-      case Instruction::INVOKE_VIRTUAL:
-        invoke_type = kVirtual;
-        is_range = false;
-        break;
-      case Instruction::INVOKE_VIRTUAL_RANGE:
-        invoke_type = kVirtual;
-        is_range = true;
-        break;
-      case Instruction::INVOKE_INTERFACE:
-        invoke_type = kInterface;
-        is_range = false;
-        break;
-      case Instruction::INVOKE_INTERFACE_RANGE:
-        invoke_type = kInterface;
-        is_range = true;
-        break;
-      default:
-        LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL);
-        // Avoid used uninitialized warnings.
-        invoke_type = kDirect;
-        is_range = false;
-    }
-    dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c();
-#if !defined(__i386__)
-    shorty = linker->MethodShorty(dex_method_idx, caller, &shorty_len);
-#endif
-  } else {
-    invoke_type = kStatic;
-    dex_method_idx = called->GetDexMethodIndex();
-#if !defined(__i386__)
-    MethodHelper mh(called);
-    shorty = mh.GetShorty();
-    shorty_len = mh.GetShortyLength();
-#endif
-  }
-#if !defined(__i386__)
-  // Discover shorty (avoid GCs)
-  size_t args_in_regs = 0;
-  for (size_t i = 1; i < shorty_len; i++) {
-    char c = shorty[i];
-    args_in_regs = args_in_regs + (c == 'J' || c == 'D' ? 2 : 1);
-    if (args_in_regs > 3) {
-      args_in_regs = 3;
-      break;
-    }
-  }
-  // Place into local references incoming arguments from the caller's register arguments
-  size_t cur_arg = 1;   // skip method_idx in R0, first arg is in R1
-  if (invoke_type != kStatic) {
-    mirror::Object* obj = reinterpret_cast<mirror::Object*>(regs[cur_arg]);
-    cur_arg++;
-    if (args_in_regs < 3) {
-      // If we thought we had fewer than 3 arguments in registers, account for the receiver
-      args_in_regs++;
-    }
-    soa.AddLocalReference<jobject>(obj);
-  }
-  size_t shorty_index = 1;  // skip return value
-  // Iterate while arguments and arguments in registers (less 1 from cur_arg which is offset to skip
-  // R0)
-  while ((cur_arg - 1) < args_in_regs && shorty_index < shorty_len) {
-    char c = shorty[shorty_index];
-    shorty_index++;
-    if (c == 'L') {
-      mirror::Object* obj = reinterpret_cast<mirror::Object*>(regs[cur_arg]);
-      soa.AddLocalReference<jobject>(obj);
-    }
-    cur_arg = cur_arg + (c == 'J' || c == 'D' ? 2 : 1);
-  }
-  // Place into local references incoming arguments from the caller's stack arguments
-  cur_arg += pc_offset + 1;  // skip LR/RA, Method* and spills for R1-R3/A1-A3 and callee saves
-  while (shorty_index < shorty_len) {
-    char c = shorty[shorty_index];
-    shorty_index++;
-    if (c == 'L') {
-      mirror::Object* obj = reinterpret_cast<mirror::Object*>(regs[cur_arg]);
-      soa.AddLocalReference<jobject>(obj);
-    }
-    cur_arg = cur_arg + (c == 'J' || c == 'D' ? 2 : 1);
-  }
-#endif
-  // Resolve method filling in dex cache
-  if (called->IsRuntimeMethod()) {
-    called = linker->ResolveMethod(dex_method_idx, caller, invoke_type);
-  }
-  const void* code = NULL;
-  if (LIKELY(!thread->IsExceptionPending())) {
-    // Incompatible class change should have been handled in resolve method.
-    CHECK(!called->CheckIncompatibleClassChange(invoke_type));
-    // Refine called method based on receiver.
-    if (invoke_type == kVirtual) {
-      called = receiver->GetClass()->FindVirtualMethodForVirtual(called);
-    } else if (invoke_type == kInterface) {
-      called = receiver->GetClass()->FindVirtualMethodForInterface(called);
-    }
-    // Ensure that the called method's class is initialized.
-    mirror::Class* called_class = called->GetDeclaringClass();
-    linker->EnsureInitialized(called_class, true, true);
-    if (LIKELY(called_class->IsInitialized())) {
-      code = called->GetEntryPointFromCompiledCode();
-    } else if (called_class->IsInitializing()) {
-      if (invoke_type == kStatic) {
-        // Class is still initializing, go to oat and grab code (trampoline must be left in place
-        // until class is initialized to stop races between threads).
-        code = linker->GetOatCodeFor(called);
-      } else {
-        // No trampoline for non-static methods.
-        code = called->GetEntryPointFromCompiledCode();
-      }
-    } else {
-      DCHECK(called_class->IsErroneous());
-    }
-  }
-  if (UNLIKELY(code == NULL)) {
-    // Something went wrong in ResolveMethod or EnsureInitialized,
-    // go into deliver exception with the pending exception in r0
-    CHECK(thread->IsExceptionPending());
-    code = reinterpret_cast<void*>(art_quick_deliver_exception_from_code);
-    regs[0] = reinterpret_cast<uintptr_t>(thread->GetException(NULL));
-    thread->ClearException();
-  } else {
-    // Expect class to at least be initializing.
-    DCHECK(called->GetDeclaringClass()->IsInitializing());
-    // Don't want infinite recursion.
-    DCHECK(code != GetResolutionTrampoline(linker));
-    // Set up entry into main method
-    regs[0] = reinterpret_cast<uintptr_t>(called);
-  }
-  return code;
-}
-
-// Called by the abstract method error stub.
-extern "C" void artThrowAbstractMethodErrorFromCode(mirror::AbstractMethod* method, Thread* self,
-                                                    mirror::AbstractMethod** sp)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#if !defined(ART_USE_PORTABLE_COMPILER)
-  FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
-#else
-  UNUSED(sp);
-#endif
-  ThrowAbstractMethodError(method);
-  self->QuickDeliverException();
-}
-
-}  // namespace art
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
new file mode 100644
index 0000000..9bf02e8
--- /dev/null
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -0,0 +1,558 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "callee_save_frame.h"
+#include "dex_file-inl.h"
+#include "dex_instruction-inl.h"
+#include "interpreter/interpreter.h"
+#include "invoke_arg_array_builder.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "object_utils.h"
+#include "runtime.h"
+
+namespace art {
+
+// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame.
+class QuickArgumentVisitor {
+ public:
+// Offset to first (not the Method*) argument in a Runtime::kRefAndArgs callee save frame.
+// Size of Runtime::kRefAndArgs callee save frame.
+// Size of Method* and register parameters in out stack arguments.
+#if defined(__arm__)
+  // The callee save frame is pointed to by SP.
+  // | argN       |  |
+  // | ...        |  |
+  // | arg4       |  |
+  // | arg3 spill |  |  Caller's frame
+  // | arg2 spill |  |
+  // | arg1 spill |  |
+  // | Method*    | ---
+  // | LR         |
+  // | ...        |    callee saves
+  // | R3         |    arg3
+  // | R2         |    arg2
+  // | R1         |    arg1
+  // | R0         |
+  // | Method*    |  <- sp
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 8
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__LR_OFFSET 44
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 48
+#define QUICK_STACK_ARG_SKIP 16
+#elif defined(__mips__)
+  // The callee save frame is pointed to by SP.
+  // | argN       |  |
+  // | ...        |  |
+  // | arg4       |  |
+  // | arg3 spill |  |  Caller's frame
+  // | arg2 spill |  |
+  // | arg1 spill |  |
+  // | Method*    | ---
+  // | RA         |
+  // | ...        |    callee saves
+  // | A3         |    arg3
+  // | A2         |    arg2
+  // | A1         |    arg1
+  // | A0/Method* |  <- sp
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__LR_OFFSET 60
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 64
+#define QUICK_STACK_ARG_SKIP 16
+#elif defined(__i386__)
+  // The callee save frame is pointed to by SP.
+  // | argN        |  |
+  // | ...         |  |
+  // | arg4        |  |
+  // | arg3 spill  |  |  Caller's frame
+  // | arg2 spill  |  |
+  // | arg1 spill  |  |
+  // | Method*     | ---
+  // | Return      |
+  // | EBP,ESI,EDI |    callee saves
+  // | EBX         |    arg3
+  // | EDX         |    arg2
+  // | ECX         |    arg1
+  // | EAX/Method* |  <- sp
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__LR_OFFSET 28
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 32
+#define QUICK_STACK_ARG_SKIP 16
+#else
+#error "Unsupported architecture"
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 0
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__LR_OFFSET 0
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 0
+#define QUICK_STACK_ARG_SKIP 0
+#endif
+
+  static mirror::AbstractMethod* GetCallingMethod(mirror::AbstractMethod** sp) {
+    byte* previous_sp = reinterpret_cast<byte*>(sp) +
+        QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE;
+    return *reinterpret_cast<mirror::AbstractMethod**>(previous_sp);
+  }
+
+  static uintptr_t GetCallingPc(mirror::AbstractMethod** sp) {
+    byte* lr = reinterpret_cast<byte*>(sp) + QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__LR_OFFSET;
+    return *reinterpret_cast<uintptr_t*>(lr);
+  }
+
+  QuickArgumentVisitor(mirror::AbstractMethod** sp, bool is_static,
+                       const char* shorty, uint32_t shorty_len)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
+    is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len),
+    args_in_regs_(ComputeArgsInRegs(is_static, shorty, shorty_len)),
+    num_params_((is_static ? 0 : 1) + shorty_len - 1),  // +1 for this, -1 for return type
+    reg_args_(reinterpret_cast<byte*>(sp) + QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET),
+    stack_args_(reinterpret_cast<byte*>(sp) + QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE
+                + QUICK_STACK_ARG_SKIP),
+    cur_args_(reg_args_),
+    cur_arg_index_(0),
+    param_index_(0),
+    is_split_long_or_double_(false) {
+    DCHECK_EQ(static_cast<size_t>(QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE),
+              Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
+  }
+
+  virtual ~QuickArgumentVisitor() {}
+
+  virtual void Visit() = 0;
+
+  Primitive::Type GetParamPrimitiveType() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    size_t index = param_index_;
+    if (is_static_) {
+      index++;  // 0th argument must skip return value at start of the shorty
+    } else if (index == 0) {
+      return Primitive::kPrimNot;
+    }
+    CHECK_LT(index, shorty_len_);
+    return Primitive::GetType(shorty_[index]);
+  }
+
+  byte* GetParamAddress() const {
+    return cur_args_ + (cur_arg_index_ * kPointerSize);
+  }
+
+  bool IsSplitLongOrDouble() const {
+    return is_split_long_or_double_;
+  }
+
+  bool IsParamAReference() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return GetParamPrimitiveType() == Primitive::kPrimNot;
+  }
+
+  bool IsParamALongOrDouble() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    Primitive::Type type = GetParamPrimitiveType();
+    return type == Primitive::kPrimLong || type == Primitive::kPrimDouble;
+  }
+
+  uint64_t ReadSplitLongParam() const {
+    DCHECK(IsSplitLongOrDouble());
+    uint64_t low_half = *reinterpret_cast<uint32_t*>(GetParamAddress());
+    uint64_t high_half = *reinterpret_cast<uint32_t*>(stack_args_);
+    return (low_half & 0xffffffffULL) | (high_half << 32);
+  }
+
+  void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    for (cur_arg_index_ = 0;  cur_arg_index_ < args_in_regs_ && param_index_ < num_params_; ) {
+      is_split_long_or_double_ = (cur_arg_index_ == 2) && IsParamALongOrDouble();
+      Visit();
+      cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1);
+      param_index_++;
+    }
+    cur_args_ = stack_args_;
+    cur_arg_index_ = is_split_long_or_double_ ? 1 : 0;
+    is_split_long_or_double_ = false;
+    while (param_index_ < num_params_) {
+      Visit();
+      cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1);
+      param_index_++;
+    }
+  }
+
+ private:
+  static size_t ComputeArgsInRegs(bool is_static, const char* shorty, uint32_t shorty_len)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    size_t args_in_regs = (is_static ? 0 : 1);
+    for (size_t i = 0; i < shorty_len; i++) {
+      char s = shorty[i];
+      if (s == 'J' || s == 'D') {
+        args_in_regs += 2;
+      } else {
+        args_in_regs++;
+      }
+      if (args_in_regs > 3) {
+        args_in_regs = 3;
+        break;
+      }
+    }
+    return args_in_regs;
+  }
+
+  const bool is_static_;
+  const char* const shorty_;
+  const uint32_t shorty_len_;
+  const size_t args_in_regs_;
+  const size_t num_params_;
+  byte* const reg_args_;
+  byte* const stack_args_;
+  byte* cur_args_;
+  size_t cur_arg_index_;
+  size_t param_index_;
+  // Does a 64bit parameter straddle the register and stack arguments?
+  bool is_split_long_or_double_;
+};
+
+// Visits arguments on the stack placing them into the shadow frame.
+class BuildShadowFrameVisitor : public QuickArgumentVisitor {
+ public:
+  BuildShadowFrameVisitor(mirror::AbstractMethod** sp, bool is_static, const char* shorty,
+                          uint32_t shorty_len, ShadowFrame& sf, size_t first_arg_reg) :
+    QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
+
+  virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    Primitive::Type type = GetParamPrimitiveType();
+    switch (type) {
+      case Primitive::kPrimLong:  // Fall-through.
+      case Primitive::kPrimDouble:
+        if (IsSplitLongOrDouble()) {
+          sf_.SetVRegLong(cur_reg_, ReadSplitLongParam());
+        } else {
+          sf_.SetVRegLong(cur_reg_, *reinterpret_cast<jlong*>(GetParamAddress()));
+        }
+        ++cur_reg_;
+        break;
+      case Primitive::kPrimNot:
+        sf_.SetVRegReference(cur_reg_, *reinterpret_cast<mirror::Object**>(GetParamAddress()));
+        break;
+      case Primitive::kPrimBoolean:  // Fall-through.
+      case Primitive::kPrimByte:     // Fall-through.
+      case Primitive::kPrimChar:     // Fall-through.
+      case Primitive::kPrimShort:    // Fall-through.
+      case Primitive::kPrimInt:      // Fall-through.
+      case Primitive::kPrimFloat:
+        sf_.SetVReg(cur_reg_, *reinterpret_cast<jint*>(GetParamAddress()));
+        break;
+      case Primitive::kPrimVoid:
+        LOG(FATAL) << "UNREACHABLE";
+        break;
+    }
+    ++cur_reg_;
+  }
+
+ private:
+  ShadowFrame& sf_;
+  size_t cur_reg_;
+
+  DISALLOW_COPY_AND_ASSIGN(BuildShadowFrameVisitor);
+};
+
+extern "C" uint64_t artQuickToInterpreterBridge(mirror::AbstractMethod* method, Thread* self,
+                                                mirror::AbstractMethod** sp)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  // Ensure we don't get thread suspension until the object arguments are safely in the shadow
+  // frame.
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
+
+  if (method->IsAbstract()) {
+    ThrowAbstractMethodError(method);
+    return 0;
+  } else {
+    const char* old_cause = self->StartAssertNoThreadSuspension("Building interpreter shadow frame");
+    MethodHelper mh(method);
+    const DexFile::CodeItem* code_item = mh.GetCodeItem();
+    uint16_t num_regs = code_item->registers_size_;
+    void* memory = alloca(ShadowFrame::ComputeSize(num_regs));
+    ShadowFrame* shadow_frame(ShadowFrame::Create(num_regs, NULL,  // No last shadow coming from quick.
+                                                  method, 0, memory));
+    size_t first_arg_reg = code_item->registers_size_ - code_item->ins_size_;
+    BuildShadowFrameVisitor shadow_frame_builder(sp, mh.IsStatic(), mh.GetShorty(),
+                                                 mh.GetShortyLength(),
+                                                 *shadow_frame, first_arg_reg);
+    shadow_frame_builder.VisitArguments();
+    // Push a transition back into managed code onto the linked list in thread.
+    ManagedStack fragment;
+    self->PushManagedStackFragment(&fragment);
+    self->PushShadowFrame(shadow_frame);
+    self->EndAssertNoThreadSuspension(old_cause);
+
+    if (method->IsStatic() && !method->GetDeclaringClass()->IsInitializing()) {
+      // Ensure static method's class is initialized.
+      if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(method->GetDeclaringClass(),
+                                                                   true, true)) {
+        DCHECK(Thread::Current()->IsExceptionPending());
+        self->PopManagedStackFragment(fragment);
+        return 0;
+      }
+    }
+
+    JValue result = interpreter::EnterInterpreterFromStub(self, mh, code_item, *shadow_frame);
+    // Pop transition.
+    self->PopManagedStackFragment(fragment);
+    return result.GetJ();
+  }
+}
+
+// Visits arguments on the stack placing them into the args vector, Object* arguments are converted
+// to jobjects.
+class BuildQuickArgumentVisitor : public QuickArgumentVisitor {
+ public:
+  BuildQuickArgumentVisitor(mirror::AbstractMethod** sp, bool is_static, const char* shorty,
+                            uint32_t shorty_len, ScopedObjectAccessUnchecked* soa,
+                            std::vector<jvalue>* args) :
+    QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
+
+  virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    jvalue val;
+    Primitive::Type type = GetParamPrimitiveType();
+    switch (type) {
+      case Primitive::kPrimNot: {
+        mirror::Object* obj = *reinterpret_cast<mirror::Object**>(GetParamAddress());
+        val.l = soa_->AddLocalReference<jobject>(obj);
+        break;
+      }
+      case Primitive::kPrimLong:  // Fall-through.
+      case Primitive::kPrimDouble:
+        if (IsSplitLongOrDouble()) {
+          val.j = ReadSplitLongParam();
+        } else {
+          val.j = *reinterpret_cast<jlong*>(GetParamAddress());
+        }
+        break;
+      case Primitive::kPrimBoolean:  // Fall-through.
+      case Primitive::kPrimByte:     // Fall-through.
+      case Primitive::kPrimChar:     // Fall-through.
+      case Primitive::kPrimShort:    // Fall-through.
+      case Primitive::kPrimInt:      // Fall-through.
+      case Primitive::kPrimFloat:
+        val.i =  *reinterpret_cast<jint*>(GetParamAddress());
+        break;
+      case Primitive::kPrimVoid:
+        LOG(FATAL) << "UNREACHABLE";
+        val.j = 0;
+        break;
+    }
+    args_->push_back(val);
+  }
+
+ private:
+  ScopedObjectAccessUnchecked* soa_;
+  std::vector<jvalue>* args_;
+
+  DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor);
+};
+
+// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
+// which is responsible for recording callee save registers. We explicitly place into jobjects the
+// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
+// field within the proxy object, which will box the primitive arguments and deal with error cases.
+extern "C" uint64_t artQuickProxyInvokeHandler(mirror::AbstractMethod* proxy_method,
+                                               mirror::Object* receiver,
+                                               Thread* self, mirror::AbstractMethod** sp)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
+  const char* old_cause =
+      self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
+  // Register the top of the managed stack, making stack crawlable.
+  DCHECK_EQ(*sp, proxy_method);
+  self->SetTopOfStack(sp, 0);
+  DCHECK_EQ(proxy_method->GetFrameSizeInBytes(),
+            Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
+  self->VerifyStack();
+  // Start new JNI local reference state.
+  JNIEnvExt* env = self->GetJniEnv();
+  ScopedObjectAccessUnchecked soa(env);
+  ScopedJniEnvLocalRefState env_state(env);
+  // Create local ref. copies of proxy method and the receiver.
+  jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
+
+  // Placing arguments into args vector and remove the receiver.
+  MethodHelper proxy_mh(proxy_method);
+  std::vector<jvalue> args;
+  BuildQuickArgumentVisitor local_ref_visitor(sp, proxy_mh.IsStatic(), proxy_mh.GetShorty(),
+                                              proxy_mh.GetShortyLength(), &soa, &args);
+  local_ref_visitor.VisitArguments();
+  args.erase(args.begin());
+
+  // Convert proxy method into expected interface method.
+  mirror::AbstractMethod* interface_method = proxy_method->FindOverriddenMethod();
+  DCHECK(interface_method != NULL);
+  DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
+  jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method);
+
+  // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
+  // that performs allocations.
+  self->EndAssertNoThreadSuspension(old_cause);
+  JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(),
+                                               rcvr_jobj, interface_method_jobj, args);
+  return result.GetJ();
+}
+
+// Read object references held in arguments from quick frames and place in a JNI local references,
+// so they don't get garbage collected.
+class RememberFoGcArgumentVisitor : public QuickArgumentVisitor {
+ public:
+  RememberFoGcArgumentVisitor(mirror::AbstractMethod** sp, bool is_static, const char* shorty,
+                              uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) :
+    QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
+
+  virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    if (IsParamAReference()) {
+      soa_->AddLocalReference<jobject>(*reinterpret_cast<mirror::Object**>(GetParamAddress()));
+    }
+  }
+
+ private:
+  ScopedObjectAccessUnchecked* soa_;
+
+  DISALLOW_COPY_AND_ASSIGN(RememberFoGcArgumentVisitor);
+};
+
+// Lazily resolve a method for quick. Called by stub code.
+extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called,
+                                                    mirror::Object* receiver,
+                                                    Thread* thread, mirror::AbstractMethod** sp)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsAndArgs);
+  // Start new JNI local reference state
+  JNIEnvExt* env = thread->GetJniEnv();
+  ScopedObjectAccessUnchecked soa(env);
+  ScopedJniEnvLocalRefState env_state(env);
+  const char* old_cause = thread->StartAssertNoThreadSuspension("Quick method resolution set up");
+
+  // Compute details about the called method (avoid GCs)
+  ClassLinker* linker = Runtime::Current()->GetClassLinker();
+  mirror::AbstractMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp);
+  InvokeType invoke_type;
+  const DexFile* dex_file;
+  uint32_t dex_method_idx;
+  if (called->IsRuntimeMethod()) {
+    uint32_t dex_pc = caller->ToDexPc(QuickArgumentVisitor::GetCallingPc(sp));
+    const DexFile::CodeItem* code;
+    {
+      MethodHelper mh(caller);
+      dex_file = &mh.GetDexFile();
+      code = mh.GetCodeItem();
+    }
+    CHECK_LT(dex_pc, code->insns_size_in_code_units_);
+    const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
+    Instruction::Code instr_code = instr->Opcode();
+    bool is_range;
+    switch (instr_code) {
+      case Instruction::INVOKE_DIRECT:
+        invoke_type = kDirect;
+        is_range = false;
+        break;
+      case Instruction::INVOKE_DIRECT_RANGE:
+        invoke_type = kDirect;
+        is_range = true;
+        break;
+      case Instruction::INVOKE_STATIC:
+        invoke_type = kStatic;
+        is_range = false;
+        break;
+      case Instruction::INVOKE_STATIC_RANGE:
+        invoke_type = kStatic;
+        is_range = true;
+        break;
+      case Instruction::INVOKE_SUPER:
+        invoke_type = kSuper;
+        is_range = false;
+        break;
+      case Instruction::INVOKE_SUPER_RANGE:
+        invoke_type = kSuper;
+        is_range = true;
+        break;
+      case Instruction::INVOKE_VIRTUAL:
+        invoke_type = kVirtual;
+        is_range = false;
+        break;
+      case Instruction::INVOKE_VIRTUAL_RANGE:
+        invoke_type = kVirtual;
+        is_range = true;
+        break;
+      case Instruction::INVOKE_INTERFACE:
+        invoke_type = kInterface;
+        is_range = false;
+        break;
+      case Instruction::INVOKE_INTERFACE_RANGE:
+        invoke_type = kInterface;
+        is_range = true;
+        break;
+      default:
+        LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL);
+        // Avoid used uninitialized warnings.
+        invoke_type = kDirect;
+        is_range = false;
+    }
+    dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c();
+
+  } else {
+    invoke_type = kStatic;
+    dex_file = &MethodHelper(called).GetDexFile();
+    dex_method_idx = called->GetDexMethodIndex();
+  }
+  uint32_t shorty_len;
+  const char* shorty =
+      dex_file->GetMethodShorty(dex_file->GetMethodId(dex_method_idx), &shorty_len);
+  RememberFoGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa);
+  visitor.VisitArguments();
+  thread->EndAssertNoThreadSuspension(old_cause);
+  // Resolve method filling in dex cache.
+  if (called->IsRuntimeMethod()) {
+    called = linker->ResolveMethod(dex_method_idx, caller, invoke_type);
+  }
+  const void* code = NULL;
+  if (LIKELY(!thread->IsExceptionPending())) {
+    // Incompatible class change should have been handled in resolve method.
+    CHECK(!called->CheckIncompatibleClassChange(invoke_type));
+    // Refine called method based on receiver.
+    if (invoke_type == kVirtual) {
+      called = receiver->GetClass()->FindVirtualMethodForVirtual(called);
+    } else if (invoke_type == kInterface) {
+      called = receiver->GetClass()->FindVirtualMethodForInterface(called);
+    }
+    // Ensure that the called method's class is initialized.
+    mirror::Class* called_class = called->GetDeclaringClass();
+    linker->EnsureInitialized(called_class, true, true);
+    if (LIKELY(called_class->IsInitialized())) {
+      code = called->GetEntryPointFromCompiledCode();
+    } else if (called_class->IsInitializing()) {
+      if (invoke_type == kStatic) {
+        // Class is still initializing, go to oat and grab code (trampoline must be left in place
+        // until class is initialized to stop races between threads).
+        code = linker->GetOatCodeFor(called);
+      } else {
+        // No trampoline for non-static methods.
+        code = called->GetEntryPointFromCompiledCode();
+      }
+    } else {
+      DCHECK(called_class->IsErroneous());
+    }
+  }
+  CHECK_EQ(code == NULL, thread->IsExceptionPending());
+#ifdef MOVING_GARBAGE_COLLECTOR
+  // TODO: locally saved objects may have moved during a GC during resolution. Need to update the
+  //       registers so that the stale objects aren't passed to the method we've resolved.
+    UNIMPLEMENTED(WARNING);
+#endif
+  // Place called method in callee-save frame to be placed as first argument to quick method.
+  *sp = called;
+  return code;
+}
+
+}  // namespace art
diff --git a/runtime/gc/accounting/card_table-inl.h b/runtime/gc/accounting/card_table-inl.h
index f8f2773..fb0f61b 100644
--- a/runtime/gc/accounting/card_table-inl.h
+++ b/runtime/gc/accounting/card_table-inl.h
@@ -41,10 +41,9 @@
   return success;
 }
 
-template <typename Visitor, typename FingerVisitor>
+template <typename Visitor>
 inline void CardTable::Scan(SpaceBitmap* bitmap, byte* scan_begin, byte* scan_end,
-                            const Visitor& visitor, const FingerVisitor& finger_visitor,
-                            const byte minimum_age) const {
+                            const Visitor& visitor, const byte minimum_age) const {
   DCHECK(bitmap->HasAddress(scan_begin));
   DCHECK(bitmap->HasAddress(scan_end - 1));  // scan_end is the byte after the last byte we scan.
   byte* card_cur = CardFromAddr(scan_begin);
@@ -57,7 +56,7 @@
     if (*card_cur >= minimum_age) {
       uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(card_cur));
       uintptr_t end = start + kCardSize;
-      bitmap->VisitMarkedRange(start, end, visitor, finger_visitor);
+      bitmap->VisitMarkedRange(start, end, visitor);
     }
     ++card_cur;
   }
@@ -87,7 +86,7 @@
         << "card " << static_cast<size_t>(card_byte) << " word " << (start_word & 0xFF);
         uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(card));
         uintptr_t end = start + kCardSize;
-        bitmap->VisitMarkedRange(start, end, visitor, finger_visitor);
+        bitmap->VisitMarkedRange(start, end, visitor);
       }
       start_word >>= 8;
     }
@@ -100,7 +99,7 @@
     if (*card_cur >= minimum_age) {
       uintptr_t start = reinterpret_cast<uintptr_t>(AddrFromCard(card_cur));
       uintptr_t end = start + kCardSize;
-      bitmap->VisitMarkedRange(start, end, visitor, finger_visitor);
+      bitmap->VisitMarkedRange(start, end, visitor);
     }
     ++card_cur;
   }
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index 1acaf5b..f030626 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -101,9 +101,8 @@
 
   // For every dirty at least minumum age between begin and end invoke the visitor with the
   // specified argument.
-  template <typename Visitor, typename FingerVisitor>
-  void Scan(SpaceBitmap* bitmap, byte* scan_begin, byte* scan_end,
-            const Visitor& visitor, const FingerVisitor& finger_visitor,
+  template <typename Visitor>
+  void Scan(SpaceBitmap* bitmap, byte* scan_begin, byte* scan_end, const Visitor& visitor,
             const byte minimum_age = kCardDirty) const
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/gc/accounting/heap_bitmap-inl.h b/runtime/gc/accounting/heap_bitmap-inl.h
index f6cf2b5..0524ccb 100644
--- a/runtime/gc/accounting/heap_bitmap-inl.h
+++ b/runtime/gc/accounting/heap_bitmap-inl.h
@@ -30,7 +30,7 @@
   for (It it = continuous_space_bitmaps_.begin(), end = continuous_space_bitmaps_.end();
       it != end; ++it) {
     SpaceBitmap* bitmap = *it;
-    bitmap->VisitMarkedRange(bitmap->HeapBegin(), bitmap->HeapLimit(), visitor, VoidFunctor());
+    bitmap->VisitMarkedRange(bitmap->HeapBegin(), bitmap->HeapLimit(), visitor);
   }
   // TODO: C++0x auto
   typedef SpaceSetMapVector::iterator It2;
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 718dcf0..0363acb 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -261,7 +261,7 @@
       space::ContinuousSpace* space =
           heap->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false);
       SpaceBitmap* live_bitmap = space->GetLiveBitmap();
-      live_bitmap->VisitMarkedRange(start, end, visitor, VoidFunctor());
+      live_bitmap->VisitMarkedRange(start, end, visitor);
     }
   }
 }
@@ -307,12 +307,11 @@
     uintptr_t end = start + CardTable::kCardSize;
     SpaceBitmap* live_bitmap =
         heap->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false)->GetLiveBitmap();
-    live_bitmap->VisitMarkedRange(start, end, visitor, VoidFunctor());
+    live_bitmap->VisitMarkedRange(start, end, visitor);
 
     // Update the corresponding references for the card.
     // TODO: C++0x auto
-    SafeMap<const byte*, std::vector<const Object*> >::iterator
-        found = references_.find(card);
+    SafeMap<const byte*, std::vector<const Object*> >::iterator found = references_.find(card);
     if (found == references_.end()) {
       if (cards_references.empty()) {
         // No reason to add empty array.
@@ -364,7 +363,7 @@
     space::ContinuousSpace* cur_space =
         heap_->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false);
     accounting::SpaceBitmap* cur_live_bitmap = cur_space->GetLiveBitmap();
-    cur_live_bitmap->VisitMarkedRange(start, end, visitor, VoidFunctor());
+    cur_live_bitmap->VisitMarkedRange(start, end, visitor);
     for (++it; it != cc_end; ++it) {
       card = *it;
       start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
@@ -373,7 +372,7 @@
         cur_space = heap_->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false);
         cur_live_bitmap = cur_space->GetLiveBitmap();
       }
-      cur_live_bitmap->VisitMarkedRange(start, end, visitor, VoidFunctor());
+      cur_live_bitmap->VisitMarkedRange(start, end, visitor);
     }
   }
 }
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index a4cfe3c..1dde18d 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -53,13 +53,10 @@
   return (bitmap_begin_[OffsetToIndex(offset)] & OffsetToMask(offset)) != 0;
 }
 
-template <typename Visitor, typename FingerVisitor>
+template <typename Visitor>
 void SpaceBitmap::VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end,
-                                   const Visitor& visitor,
-                                   const FingerVisitor& finger_visitor) const {
+                                   const Visitor& visitor) const {
   DCHECK_LT(visit_begin, visit_end);
-
-  const size_t word_span = kAlignment * kBitsPerWord;  // Equals IndexToOffset(1).
   const size_t bit_index_start = (visit_begin - heap_begin_) / kAlignment;
   const size_t bit_index_end = (visit_end - heap_begin_ - 1) / kAlignment;
 
@@ -79,7 +76,6 @@
   // If word_start == word_end then handle this case at the same place we handle the right edge.
   if (edge_word != 0 && word_start < word_end) {
     uintptr_t ptr_base = IndexToOffset(word_start) + heap_begin_;
-    finger_visitor(reinterpret_cast<void*>(ptr_base + word_span));
     do {
       const size_t shift = CLZ(edge_word);
       mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
@@ -93,7 +89,6 @@
     size_t w = bitmap_begin_[i];
     if (w != 0) {
       uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
-      finger_visitor(reinterpret_cast<void*>(ptr_base + word_span));
       do {
         const size_t shift = CLZ(w);
         mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
@@ -114,7 +109,6 @@
   // Bits that we trim off the right.
   edge_word &= ~((static_cast<size_t>(kWordHighBitMask) >> right_bits) - 1);
   uintptr_t ptr_base = IndexToOffset(word_end) + heap_begin_;
-  finger_visitor(reinterpret_cast<void*>(ptr_base + word_span));
   while (edge_word != 0) {
     const size_t shift = CLZ(edge_word);
     mirror::Object* obj = reinterpret_cast<mirror::Object*>(ptr_base + shift * kAlignment);
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 674c262..26ab1de 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -118,9 +118,8 @@
     }
   }
 
-  template <typename Visitor, typename FingerVisitor>
-  void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end,
-                        const Visitor& visitor, const FingerVisitor& finger_visitor) const
+  template <typename Visitor>
+  void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, const Visitor& visitor) const
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -130,10 +129,8 @@
   void InOrderWalk(Callback* callback, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
 
-  static void SweepWalk(const SpaceBitmap& live,
-                        const SpaceBitmap& mark,
-                        uintptr_t base, uintptr_t max,
-                        SweepCallback* thunk, void* arg);
+  static void SweepWalk(const SpaceBitmap& live, const SpaceBitmap& mark, uintptr_t base,
+                        uintptr_t max, SweepCallback* thunk, void* arg);
 
   void CopyFrom(SpaceBitmap* source_bitmap);
 
@@ -179,7 +176,8 @@
  private:
   // TODO: heap_end_ is initialized so that the heap bitmap is empty, this doesn't require the -1,
   // however, we document that this is expected on heap_end_
-  SpaceBitmap(const std::string& name, MemMap* mem_map, word* bitmap_begin, size_t bitmap_size, const void* heap_begin)
+  SpaceBitmap(const std::string& name, MemMap* mem_map, word* bitmap_begin, size_t bitmap_size,
+              const void* heap_begin)
       : mem_map_(mem_map), bitmap_begin_(bitmap_begin), bitmap_size_(bitmap_size),
         heap_begin_(reinterpret_cast<uintptr_t>(heap_begin)),
         name_(name) {}
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 89c768a..8a08f08 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -71,18 +71,6 @@
 static const bool kCountTasks = false;
 static const bool kCountJavaLangRefs = false;
 
-class SetFingerVisitor {
- public:
-  explicit SetFingerVisitor(MarkSweep* const mark_sweep) : mark_sweep_(mark_sweep) {}
-
-  void operator()(void* finger) const {
-    mark_sweep_->SetFinger(reinterpret_cast<Object*>(finger));
-  }
-
- private:
-  MarkSweep* const mark_sweep_;
-};
-
 void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) {
   // Bind live to mark bitmap if necessary.
   if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
@@ -139,7 +127,6 @@
       current_mark_bitmap_(NULL),
       java_lang_Class_(NULL),
       mark_stack_(NULL),
-      finger_(NULL),
       immune_begin_(NULL),
       immune_end_(NULL),
       soft_reference_list_(NULL),
@@ -159,7 +146,6 @@
   timings_.StartSplit("InitializePhase");
   mark_stack_ = GetHeap()->mark_stack_.get();
   DCHECK(mark_stack_ != NULL);
-  finger_ = NULL;
   SetImmuneRange(NULL, NULL);
   soft_reference_list_ = NULL;
   weak_reference_list_ = NULL;
@@ -216,16 +202,6 @@
     // This second sweep makes sure that we don't have any objects in the live stack which point to
     // freed objects. These cause problems since their references may be previously freed objects.
     SweepArray(allocation_stack, false);
-  } else {
-    timings_.NewSplit("UnMarkAllocStack");
-    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-    // The allocation stack contains things allocated since the start of the GC. These may have been
-    // marked during this GC meaning they won't be eligible for reclaiming in the next sticky GC.
-    // Remove these objects from the mark bitmaps so that they will be eligible for sticky
-    // collection.
-    heap_->UnMarkAllocStack(GetHeap()->alloc_space_->GetMarkBitmap(),
-                            GetHeap()->large_object_space_->GetMarkObjects(),
-                            allocation_stack);
   }
   return true;
 }
@@ -278,7 +254,6 @@
   live_stack->Reset();
   // Recursively mark all the non-image bits set in the mark bitmap.
   RecursiveMark();
-  DisableFinger();
 }
 
 void MarkSweep::ReclaimPhase() {
@@ -286,6 +261,26 @@
 
   if (!IsConcurrent()) {
     ProcessReferences(self);
+  } else {
+    timings_.NewSplit("UnMarkAllocStack");
+    accounting::ObjectStack* allocation_stack = GetHeap()->allocation_stack_.get();
+    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+    // The allocation stack contains things allocated since the start of the GC. These may have been
+    // marked during this GC meaning they won't be eligible for reclaiming in the next sticky GC.
+    // Remove these objects from the mark bitmaps so that they will be eligible for sticky
+    // collection.
+    // There is a race here which is safely handled. Another thread such as the hprof could
+    // have flushed the alloc stack after we resumed the threads. This is safe however, since
+    // reseting the allocation stack zeros it out with madvise. This means that we will either
+    // read NULLs or attempt to unmark a newly allocated object which will not be marked in the
+    // first place.
+    mirror::Object** end = allocation_stack->End();
+    for (mirror::Object** it = allocation_stack->Begin(); it != end; ++it) {
+      Object* obj = *it;
+      if (obj != NULL) {
+        UnMarkObjectNonNull(obj);
+      }
+    }
   }
 
   // Before freeing anything, lets verify the heap.
@@ -348,22 +343,39 @@
   }
 }
 
-inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj, bool check_finger) {
+inline void MarkSweep::MarkObjectNonNullParallel(const Object* obj) {
   DCHECK(obj != NULL);
   if (MarkObjectParallel(obj)) {
-    if (kDisableFinger || (check_finger && obj < finger_)) {
-      while (UNLIKELY(!mark_stack_->AtomicPushBack(const_cast<Object*>(obj)))) {
-        // Only reason a push can fail is that the mark stack is full.
-        ExpandMarkStack();
-      }
+    while (UNLIKELY(!mark_stack_->AtomicPushBack(const_cast<Object*>(obj)))) {
+      // Only reason a push can fail is that the mark stack is full.
+      ExpandMarkStack();
     }
   }
 }
 
-inline void MarkSweep::MarkObjectNonNull(const Object* obj, bool check_finger) {
+inline void MarkSweep::UnMarkObjectNonNull(const Object* obj) {
+  DCHECK(!IsImmune(obj));
+  // Try to take advantage of locality of references within a space, failing this find the space
+  // the hard way.
+  accounting::SpaceBitmap* object_bitmap = current_mark_bitmap_;
+  if (UNLIKELY(!object_bitmap->HasAddress(obj))) {
+    accounting::SpaceBitmap* new_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
+    if (LIKELY(new_bitmap != NULL)) {
+      object_bitmap = new_bitmap;
+    } else {
+      MarkLargeObject(obj, false);
+      return;
+    }
+  }
+
+  DCHECK(object_bitmap->HasAddress(obj));
+  object_bitmap->Clear(obj);
+}
+
+inline void MarkSweep::MarkObjectNonNull(const Object* obj) {
   DCHECK(obj != NULL);
 
-  if (obj >= immune_begin_ && obj < immune_end_) {
+  if (IsImmune(obj)) {
     DCHECK(IsMarked(obj));
     return;
   }
@@ -376,7 +388,7 @@
     if (LIKELY(new_bitmap != NULL)) {
       object_bitmap = new_bitmap;
     } else {
-      MarkLargeObject(obj);
+      MarkLargeObject(obj, true);
       return;
     }
   }
@@ -384,19 +396,17 @@
   // This object was not previously marked.
   if (!object_bitmap->Test(obj)) {
     object_bitmap->Set(obj);
-    if (kDisableFinger || (check_finger && obj < finger_)) {
-      // Do we need to expand the mark stack?
-      if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
-        ExpandMarkStack();
-      }
-      // The object must be pushed on to the mark stack.
-      mark_stack_->PushBack(const_cast<Object*>(obj));
+    // Do we need to expand the mark stack?
+    if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
+      ExpandMarkStack();
     }
+    // The object must be pushed on to the mark stack.
+    mark_stack_->PushBack(const_cast<Object*>(obj));
   }
 }
 
 // Rare case, probably not worth inlining since it will increase instruction cache miss rate.
-bool MarkSweep::MarkLargeObject(const Object* obj) {
+bool MarkSweep::MarkLargeObject(const Object* obj, bool set) {
   // TODO: support >1 discontinuous space.
   space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
   accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects();
@@ -413,8 +423,11 @@
     if (kProfileLargeObjects) {
       ++large_object_mark_;
     }
-    large_objects->Set(obj);
-    // Don't need to check finger since large objects never have any object references.
+    if (set) {
+      large_objects->Set(obj);
+    } else {
+      large_objects->Clear(obj);
+    }
     return true;
   }
   return false;
@@ -423,7 +436,7 @@
 inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
   DCHECK(obj != NULL);
 
-  if (obj >= immune_begin_ && obj < immune_end_) {
+  if (IsImmune(obj)) {
     DCHECK(IsMarked(obj));
     return false;
   }
@@ -439,7 +452,7 @@
       // TODO: Remove the Thread::Current here?
       // TODO: Convert this to some kind of atomic marking?
       MutexLock mu(Thread::Current(), large_object_lock_);
-      return MarkLargeObject(obj);
+      return MarkLargeObject(obj, true);
     }
   }
 
@@ -454,13 +467,13 @@
 // need to be added to the mark stack.
 void MarkSweep::MarkObject(const Object* obj) {
   if (obj != NULL) {
-    MarkObjectNonNull(obj, true);
+    MarkObjectNonNull(obj);
   }
 }
 
 void MarkSweep::MarkRoot(const Object* obj) {
   if (obj != NULL) {
-    MarkObjectNonNull(obj, false);
+    MarkObjectNonNull(obj);
   }
 }
 
@@ -468,21 +481,21 @@
   DCHECK(root != NULL);
   DCHECK(arg != NULL);
   MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
-  mark_sweep->MarkObjectNonNullParallel(root, false);
+  mark_sweep->MarkObjectNonNullParallel(root);
 }
 
 void MarkSweep::MarkObjectCallback(const Object* root, void* arg) {
   DCHECK(root != NULL);
   DCHECK(arg != NULL);
   MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
-  mark_sweep->MarkObjectNonNull(root, false);
+  mark_sweep->MarkObjectNonNull(root);
 }
 
 void MarkSweep::ReMarkObjectVisitor(const Object* root, void* arg) {
   DCHECK(root != NULL);
   DCHECK(arg != NULL);
   MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
-  mark_sweep->MarkObjectNonNull(root, true);
+  mark_sweep->MarkObjectNonNull(root);
 }
 
 void MarkSweep::VerifyRootCallback(const Object* root, void* arg, size_t vreg,
@@ -582,7 +595,6 @@
   accounting::CardTable* card_table = GetHeap()->GetCardTable();
   const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
   ScanObjectVisitor visitor(this);
-  SetFingerVisitor finger_visitor(this);
   // TODO: C++0x
   typedef std::vector<space::ContinuousSpace*>::const_iterator It;
   for (It it = spaces.begin(), space_end = spaces.end(); it != space_end; ++it) {
@@ -602,7 +614,7 @@
     byte* end = space->End();
     // Image spaces are handled properly since live == marked for them.
     accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
-    card_table->Scan(mark_bitmap, begin, end, visitor, finger_visitor, minimum_age);
+    card_table->Scan(mark_bitmap, begin, end, visitor, minimum_age);
   }
 }
 
@@ -637,7 +649,7 @@
       uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
       accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
       DCHECK(live_bitmap != NULL);
-      live_bitmap->VisitMarkedRange(begin, end, visitor, VoidFunctor());
+      live_bitmap->VisitMarkedRange(begin, end, visitor);
     }
   }
 }
@@ -655,10 +667,8 @@
   CHECK(cleared_reference_list_ == NULL);
 
   const bool partial = GetGcType() == kGcTypePartial;
-  SetFingerVisitor set_finger_visitor(this);
   ScanObjectVisitor scan_visitor(this);
   if (!kDisableFinger) {
-    finger_ = NULL;
     const std::vector<space::ContinuousSpace*>& spaces = GetHeap()->GetContinuousSpaces();
     // TODO: C++0x
     typedef std::vector<space::ContinuousSpace*>::const_iterator It;
@@ -674,11 +684,10 @@
         // This function does not handle heap end increasing, so we must use the space end.
         uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
         uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
-        current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor, set_finger_visitor);
+        current_mark_bitmap_->VisitMarkedRange(begin, end, scan_visitor);
       }
     }
   }
-  DisableFinger();
   timings_.NewSplit("ProcessMarkStack");
   ProcessMarkStack();
 }
@@ -1342,7 +1351,7 @@
 
 inline bool MarkSweep::IsMarked(const Object* object) const
     SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
-  if (object >= immune_begin_ && object < immune_end_) {
+  if (IsImmune(object)) {
     return true;
   }
   DCHECK(current_mark_bitmap_ != NULL);
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index d386fd6..e39e2f7 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -167,14 +167,6 @@
   void ScanObjectVisit(const mirror::Object* obj, const MarkVisitor& visitor)
       NO_THREAD_SAFETY_ANALYSIS;
 
-  void SetFinger(mirror::Object* new_finger) {
-    finger_ = new_finger;
-  }
-
-  void DisableFinger() {
-    SetFinger(reinterpret_cast<mirror::Object*>(~static_cast<uintptr_t>(0)));
-  }
-
   size_t GetFreedBytes() const {
     return freed_bytes_;
   }
@@ -261,13 +253,22 @@
       SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
                             Locks::mutator_lock_);
 
-  void MarkObjectNonNull(const mirror::Object* obj, bool check_finger)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+  void MarkObjectNonNull(const mirror::Object* obj)
+        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+        EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
-  void MarkObjectNonNullParallel(const mirror::Object* obj, bool check_finger);
+  // Unmarks an object by clearing the bit inside of the corresponding bitmap, or if it is in a
+  // space set, removing the object from the set.
+  void UnMarkObjectNonNull(const mirror::Object* obj)
+        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+        EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
-  bool MarkLargeObject(const mirror::Object* obj)
+  // Marks an object atomically, safe to use from multiple threads.
+  void MarkObjectNonNullParallel(const mirror::Object* obj);
+
+  // Marks or unmarks a large object based on whether or not set is true. If set is true, then we
+  // mark, otherwise we unmark.
+  bool MarkLargeObject(const mirror::Object* obj, bool set)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Returns true if we need to add obj to a mark stack.
@@ -295,6 +296,11 @@
   // Expand mark stack to 2x its current size. Thread safe.
   void ExpandMarkStack();
 
+  // Returns true if an object is inside of the immune region (assumed to be marked).
+  bool IsImmune(const mirror::Object* obj) const {
+    return obj >= immune_begin_ && obj < immune_end_;
+  }
+
   static void VerifyRootCallback(const mirror::Object* root, void* arg, size_t vreg,
                                  const StackVisitor *visitor);
 
@@ -386,8 +392,6 @@
 
   accounting::ObjectStack* mark_stack_;
 
-  mirror::Object* finger_;
-
   // Immune range, every object inside the immune range is assumed to be marked.
   mirror::Object* immune_begin_;
   mirror::Object* immune_end_;
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index 71e580d..5505336 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -51,7 +51,6 @@
 }
 
 void StickyMarkSweep::MarkReachableObjects() {
-  DisableFinger();
   RecursiveMarkDirtyObjects(accounting::CardTable::kCardDirty - 1);
 }
 
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 47e9b75..4a894be 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -21,6 +21,7 @@
 
 #include <limits>
 #include <vector>
+#include <valgrind.h>
 
 #include "base/stl_util.h"
 #include "common_throws.h"
@@ -34,6 +35,7 @@
 #include "gc/collector/mark_sweep-inl.h"
 #include "gc/collector/partial_mark_sweep.h"
 #include "gc/collector/sticky_mark_sweep.h"
+#include "gc/space/dlmalloc_space-inl.h"
 #include "gc/space/image_space.h"
 #include "gc/space/large_object_space.h"
 #include "gc/space/space-inl.h"
@@ -66,6 +68,8 @@
 // Minimum amount of remaining bytes before a concurrent GC is triggered.
 static const size_t kMinConcurrentRemainingBytes = 128 * KB;
 const double Heap::kDefaultTargetUtilization = 0.5;
+// If true, measure the total allocation time.
+static const bool kMeasureAllocationTime = false;
 
 Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
            double target_utilization, size_t capacity, const std::string& original_image_file_name,
@@ -119,9 +123,9 @@
       max_free_(max_free),
       target_utilization_(target_utilization),
       total_wait_time_(0),
-      measure_allocation_time_(false),
       total_allocation_time_(0),
-      verify_object_mode_(kHeapVerificationNotPermitted) {
+      verify_object_mode_(kHeapVerificationNotPermitted),
+      running_on_valgrind_(RUNNING_ON_VALGRIND) {
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
     LOG(INFO) << "Heap() entering";
   }
@@ -147,16 +151,6 @@
     }
   }
 
-  // Allocate the large object space.
-  const bool kUseFreeListSpaceForLOS  = false;
-  if (kUseFreeListSpaceForLOS) {
-    large_object_space_ = space::FreeListSpace::Create("large object space", NULL, capacity);
-  } else {
-    large_object_space_ = space::LargeObjectMapSpace::Create("large object space");
-  }
-  CHECK(large_object_space_ != NULL) << "Failed to create large object space";
-  AddDiscontinuousSpace(large_object_space_);
-
   alloc_space_ = space::DlMallocSpace::Create(Runtime::Current()->IsZygote() ? "zygote space" : "alloc space",
                                               initial_size,
                                               growth_limit, capacity,
@@ -165,6 +159,16 @@
   alloc_space_->SetFootprintLimit(alloc_space_->Capacity());
   AddContinuousSpace(alloc_space_);
 
+  // Allocate the large object space.
+  const bool kUseFreeListSpaceForLOS = false;
+  if (kUseFreeListSpaceForLOS) {
+    large_object_space_ = space::FreeListSpace::Create("large object space", NULL, capacity);
+  } else {
+    large_object_space_ = space::LargeObjectMapSpace::Create("large object space");
+  }
+  CHECK(large_object_space_ != NULL) << "Failed to create large object space";
+  AddDiscontinuousSpace(large_object_space_);
+
   // Compute heap capacity. Continuous spaces are sorted in order of Begin().
   byte* heap_begin = continuous_spaces_.front()->Begin();
   size_t heap_capacity = continuous_spaces_.back()->End() - continuous_spaces_.front()->Begin();
@@ -348,7 +352,7 @@
   }
   os << "Total number of allocations: " << total_objects_allocated << "\n";
   os << "Total bytes allocated " << PrettySize(total_bytes_allocated) << "\n";
-  if (measure_allocation_time_) {
+  if (kMeasureAllocationTime) {
     os << "Total time spent allocating: " << PrettyDuration(allocation_time) << "\n";
     os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated)
        << "\n";
@@ -445,32 +449,29 @@
   DCHECK_GE(byte_count, sizeof(mirror::Object));
 
   mirror::Object* obj = NULL;
-  size_t size = 0;
+  size_t bytes_allocated = 0;
   uint64_t allocation_start = 0;
-  if (UNLIKELY(measure_allocation_time_)) {
+  if (UNLIKELY(kMeasureAllocationTime)) {
     allocation_start = NanoTime() / kTimeAdjust;
   }
 
   // We need to have a zygote space or else our newly allocated large object can end up in the
   // Zygote resulting in it being prematurely freed.
-  // We can only do this for primive objects since large objects will not be within the card table
+  // We can only do this for primitive objects since large objects will not be within the card table
   // range. This also means that we rely on SetClass not dirtying the object's card.
   bool large_object_allocation =
       byte_count >= large_object_threshold_ && have_zygote_space_ && c->IsPrimitiveArray();
   if (UNLIKELY(large_object_allocation)) {
-    size = RoundUp(byte_count, kPageSize);
-    obj = Allocate(self, large_object_space_, size);
+    obj = Allocate(self, large_object_space_, byte_count, &bytes_allocated);
     // Make sure that our large object didn't get placed anywhere within the space interval or else
     // it breaks the immune range.
     DCHECK(obj == NULL ||
            reinterpret_cast<byte*>(obj) < continuous_spaces_.front()->Begin() ||
            reinterpret_cast<byte*>(obj) >= continuous_spaces_.back()->End());
   } else {
-    obj = Allocate(self, alloc_space_, byte_count);
-
+    obj = Allocate(self, alloc_space_, byte_count, &bytes_allocated);
     // Ensure that we did not allocate into a zygote space.
     DCHECK(obj == NULL || !have_zygote_space_ || !FindSpaceFromObject(obj, false)->IsZygoteSpace());
-    size = alloc_space_->AllocationSize(obj);
   }
 
   if (LIKELY(obj != NULL)) {
@@ -478,19 +479,21 @@
 
     // Record allocation after since we want to use the atomic add for the atomic fence to guard
     // the SetClass since we do not want the class to appear NULL in another thread.
-    RecordAllocation(size, obj);
+    RecordAllocation(bytes_allocated, obj);
 
     if (Dbg::IsAllocTrackingEnabled()) {
       Dbg::RecordAllocation(c, byte_count);
     }
-    if (static_cast<size_t>(num_bytes_allocated_) >= concurrent_start_bytes_) {
+    if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_) >= concurrent_start_bytes_)) {
       // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint.
       SirtRef<mirror::Object> ref(self, obj);
       RequestConcurrentGC(self);
     }
-    VerifyObject(obj);
+    if (kDesiredHeapVerification > kNoHeapVerification) {
+      VerifyObject(obj);
+    }
 
-    if (UNLIKELY(measure_allocation_time_)) {
+    if (UNLIKELY(kMeasureAllocationTime)) {
       total_allocation_time_.fetch_add(NanoTime() / kTimeAdjust - allocation_start);
     }
 
@@ -646,7 +649,7 @@
   GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
 }
 
-void Heap::RecordAllocation(size_t size, mirror::Object* obj) {
+inline void Heap::RecordAllocation(size_t size, mirror::Object* obj) {
   DCHECK(obj != NULL);
   DCHECK_GT(size, 0u);
   num_bytes_allocated_.fetch_add(size);
@@ -685,37 +688,55 @@
   }
 }
 
-mirror::Object* Heap::TryToAllocate(Thread* self, space::AllocSpace* space, size_t alloc_size,
-                                    bool grow) {
-  // Should we try to use a CAS here and fix up num_bytes_allocated_ later with AllocationSize?
-  if (num_bytes_allocated_ + alloc_size > max_allowed_footprint_) {
-    // max_allowed_footprint_ <= growth_limit_ so it is safe to check in here.
-    if (num_bytes_allocated_ + alloc_size > growth_limit_) {
-      // Completely out of memory.
-      return NULL;
-    }
-  }
-
-  return space->Alloc(self, alloc_size);
+inline bool Heap::IsOutOfMemoryOnAllocation(size_t alloc_size) {
+  return num_bytes_allocated_ + alloc_size > growth_limit_;
 }
 
-mirror::Object* Heap::Allocate(Thread* self, space::AllocSpace* space, size_t alloc_size) {
+inline mirror::Object* Heap::TryToAllocate(Thread* self, space::AllocSpace* space, size_t alloc_size,
+                                           bool grow, size_t* bytes_allocated) {
+  if (IsOutOfMemoryOnAllocation(alloc_size)) {
+    return NULL;
+  }
+  return space->Alloc(self, alloc_size, bytes_allocated);
+}
+
+// DlMallocSpace-specific version.
+inline mirror::Object* Heap::TryToAllocate(Thread* self, space::DlMallocSpace* space, size_t alloc_size,
+                                           bool grow, size_t* bytes_allocated) {
+  if (IsOutOfMemoryOnAllocation(alloc_size)) {
+    return NULL;
+  }
+  if (!running_on_valgrind_) {
+    return space->AllocNonvirtual(self, alloc_size, bytes_allocated);
+  } else {
+    return space->Alloc(self, alloc_size, bytes_allocated);
+  }
+}
+
+template <class T>
+inline mirror::Object* Heap::Allocate(Thread* self, T* space, size_t alloc_size, size_t* bytes_allocated) {
   // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
   // done in the runnable state where suspension is expected.
   DCHECK_EQ(self->GetState(), kRunnable);
   self->AssertThreadSuspensionIsAllowable();
 
-  mirror::Object* ptr = TryToAllocate(self, space, alloc_size, false);
+  mirror::Object* ptr = TryToAllocate(self, space, alloc_size, false, bytes_allocated);
   if (ptr != NULL) {
     return ptr;
   }
+  return AllocateInternalWithGc(self, space, alloc_size, bytes_allocated);
+}
+
+mirror::Object* Heap::AllocateInternalWithGc(Thread* self, space::AllocSpace* space, size_t alloc_size,
+                                             size_t* bytes_allocated) {
+  mirror::Object* ptr;
 
   // The allocation failed. If the GC is running, block until it completes, and then retry the
   // allocation.
   collector::GcType last_gc = WaitForConcurrentGcToComplete(self);
   if (last_gc != collector::kGcTypeNone) {
     // A GC was in progress and we blocked, retry allocation now that memory has been freed.
-    ptr = TryToAllocate(self, space, alloc_size, false);
+    ptr = TryToAllocate(self, space, alloc_size, false, bytes_allocated);
     if (ptr != NULL) {
       return ptr;
     }
@@ -750,7 +771,7 @@
       i = static_cast<size_t>(gc_type_ran);
 
       // Did we free sufficient memory for the allocation to succeed?
-      ptr = TryToAllocate(self, space, alloc_size, false);
+      ptr = TryToAllocate(self, space, alloc_size, false, bytes_allocated);
       if (ptr != NULL) {
         return ptr;
       }
@@ -759,7 +780,7 @@
 
   // Allocations have failed after GCs;  this is an exceptional state.
   // Try harder, growing the heap if necessary.
-  ptr = TryToAllocate(self, space, alloc_size, true);
+  ptr = TryToAllocate(self, space, alloc_size, true, bytes_allocated);
   if (ptr != NULL) {
     return ptr;
   }
@@ -774,7 +795,7 @@
 
   // We don't need a WaitForConcurrentGcToComplete here either.
   CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, true);
-  return TryToAllocate(self, space, alloc_size, true);
+  return TryToAllocate(self, space, alloc_size, true, bytes_allocated);
 }
 
 void Heap::SetTargetHeapUtilization(float target) {
@@ -1029,20 +1050,6 @@
   }
 }
 
-void Heap::UnMarkAllocStack(accounting::SpaceBitmap* bitmap, accounting::SpaceSetMap* large_objects,
-                            accounting::ObjectStack* stack) {
-  mirror::Object** limit = stack->End();
-  for (mirror::Object** it = stack->Begin(); it != limit; ++it) {
-    const mirror::Object* obj = *it;
-    DCHECK(obj != NULL);
-    if (LIKELY(bitmap->HasAddress(obj))) {
-      bitmap->Clear(obj);
-    } else {
-      large_objects->Clear(obj);
-    }
-  }
-}
-
 collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCause gc_cause,
                                                bool clear_soft_references) {
   Thread* self = Thread::Current();
@@ -1259,8 +1266,7 @@
         ScanVisitor scan_visitor;
         byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
         card_table->Scan(bitmap, byte_cover_begin,
-                         byte_cover_begin + accounting::CardTable::kCardSize,
-                         scan_visitor, VoidFunctor());
+                         byte_cover_begin + accounting::CardTable::kCardSize, scan_visitor);
 
         // Search to see if any of the roots reference our object.
         void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj));
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 3f91553..54e905b 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -377,11 +377,6 @@
                       accounting::ObjectStack* stack)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
-  // Unmark all the objects in the allocation stack in the specified bitmap.
-  void UnMarkAllocStack(accounting::SpaceBitmap* bitmap, accounting::SpaceSetMap* large_objects,
-                        accounting::ObjectStack* stack)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
   // Update and mark mod union table based on gc type.
   void UpdateAndMarkModUnion(collector::MarkSweep* mark_sweep, base::TimingLogger& timings,
                              collector::GcType gc_type)
@@ -417,15 +412,31 @@
  private:
   // Allocates uninitialized storage. Passing in a null space tries to place the object in the
   // large object space.
-  mirror::Object* Allocate(Thread* self, space::AllocSpace* space, size_t num_bytes)
+  template <class T> mirror::Object* Allocate(Thread* self, T* space, size_t num_bytes, size_t* bytes_allocated)
+      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  // Handles Allocate()'s slow allocation path with GC involved after
+  // an initial allocation attempt failed.
+  mirror::Object* AllocateInternalWithGc(Thread* self, space::AllocSpace* space, size_t num_bytes,
+                                         size_t* bytes_allocated)
       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Try to allocate a number of bytes, this function never does any GCs.
-  mirror::Object* TryToAllocate(Thread* self, space::AllocSpace* space, size_t alloc_size, bool grow)
+  mirror::Object* TryToAllocate(Thread* self, space::AllocSpace* space, size_t alloc_size, bool grow,
+                                size_t* bytes_allocated)
       LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  // Try to allocate a number of bytes, this function never does any GCs. DlMallocSpace-specialized version.
+  mirror::Object* TryToAllocate(Thread* self, space::DlMallocSpace* space, size_t alloc_size, bool grow,
+                                size_t* bytes_allocated)
+      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  bool IsOutOfMemoryOnAllocation(size_t alloc_size);
+
   // Pushes a list of cleared references out to the managed heap.
   void EnqueueClearedReferences(mirror::Object** cleared_references);
 
@@ -643,7 +654,6 @@
   uint64_t total_wait_time_;
 
   // Total number of objects allocated in microseconds.
-  const bool measure_allocation_time_;
   AtomicInteger total_allocation_time_;
 
   // The current state of heap verification, may be enabled or disabled.
@@ -651,6 +661,8 @@
 
   std::vector<collector::MarkSweep*> mark_sweep_collectors_;
 
+  const bool running_on_valgrind_;
+
   friend class collector::MarkSweep;
   friend class VerifyReferenceCardVisitor;
   friend class VerifyReferenceVisitor;
diff --git a/runtime/gc/space/dlmalloc_space-inl.h b/runtime/gc/space/dlmalloc_space-inl.h
new file mode 100644
index 0000000..5481141
--- /dev/null
+++ b/runtime/gc/space/dlmalloc_space-inl.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_SPACE_DLMALLOC_SPACE_INL_H_
+#define ART_RUNTIME_GC_SPACE_DLMALLOC_SPACE_INL_H_
+
+#include "dlmalloc_space.h"
+
+namespace art {
+namespace gc {
+namespace space {
+
+inline mirror::Object* DlMallocSpace::AllocNonvirtual(Thread* self, size_t num_bytes,
+                                                      size_t* bytes_allocated) {
+  mirror::Object* obj;
+  {
+    MutexLock mu(self, lock_);
+    obj = AllocWithoutGrowthLocked(num_bytes, bytes_allocated);
+  }
+  if (obj != NULL) {
+    // Zero freshly allocated memory, done while not holding the space's lock.
+    memset(obj, 0, num_bytes);
+  }
+  return obj;
+}
+
+inline mirror::Object* DlMallocSpace::AllocWithoutGrowthLocked(size_t num_bytes, size_t* bytes_allocated) {
+  mirror::Object* result = reinterpret_cast<mirror::Object*>(mspace_malloc(mspace_, num_bytes));
+  if (result != NULL) {
+    if (kDebugSpaces) {
+      CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
+            << ") not in bounds of allocation space " << *this;
+    }
+    size_t allocation_size = AllocationSizeNonvirtual(result);
+    DCHECK(bytes_allocated != NULL);
+    *bytes_allocated = allocation_size;
+    num_bytes_allocated_ += allocation_size;
+    total_bytes_allocated_ += allocation_size;
+    ++total_objects_allocated_;
+    ++num_objects_allocated_;
+  }
+  return result;
+}
+
+}  // namespace space
+}  // namespace gc
+}  // namespace art
+
+#endif  // ART_RUNTIME_GC_SPACE_DLMALLOC_SPACE_INL_H_
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index d539aa2..8b99e96 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 #include "dlmalloc_space.h"
+#include "dlmalloc_space-inl.h"
 #include "gc/accounting/card_table.h"
 #include "gc/heap.h"
 #include "runtime.h"
@@ -46,8 +47,9 @@
 // A specialization of DlMallocSpace that provides information to valgrind wrt allocations.
 class ValgrindDlMallocSpace : public DlMallocSpace {
  public:
-  virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes) {
-    void* obj_with_rdz = DlMallocSpace::AllocWithGrowth(self, num_bytes + 2 * kValgrindRedZoneBytes);
+  virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
+    void* obj_with_rdz = DlMallocSpace::AllocWithGrowth(self, num_bytes + 2 * kValgrindRedZoneBytes,
+                                                        bytes_allocated);
     if (obj_with_rdz == NULL) {
       return NULL;
     }
@@ -59,8 +61,9 @@
     return result;
   }
 
-  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes) {
-    void* obj_with_rdz = DlMallocSpace::Alloc(self, num_bytes + 2 * kValgrindRedZoneBytes);
+  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
+    void* obj_with_rdz = DlMallocSpace::Alloc(self, num_bytes + 2 * kValgrindRedZoneBytes,
+                                              bytes_allocated);
     if (obj_with_rdz == NULL) {
      return NULL;
     }
@@ -234,37 +237,27 @@
   mark_bitmap_->SetName(temp_name);
 }
 
-mirror::Object* DlMallocSpace::AllocWithoutGrowthLocked(size_t num_bytes) {
-  mirror::Object* result = reinterpret_cast<mirror::Object*>(mspace_calloc(mspace_, 1, num_bytes));
-  if (result != NULL) {
-    if (kDebugSpaces) {
-      CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
-            << ") not in bounds of allocation space " << *this;
-    }
-    size_t allocation_size = InternalAllocationSize(result);
-    num_bytes_allocated_ += allocation_size;
-    total_bytes_allocated_ += allocation_size;
-    ++total_objects_allocated_;
-    ++num_objects_allocated_;
+mirror::Object* DlMallocSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
+  return AllocNonvirtual(self, num_bytes, bytes_allocated);
+}
+
+mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
+  mirror::Object* result;
+  {
+    MutexLock mu(self, lock_);
+    // Grow as much as possible within the mspace.
+    size_t max_allowed = Capacity();
+    mspace_set_footprint_limit(mspace_, max_allowed);
+    // Try the allocation.
+    result = AllocWithoutGrowthLocked(num_bytes, bytes_allocated);
+    // Shrink back down as small as possible.
+    size_t footprint = mspace_footprint(mspace_);
+    mspace_set_footprint_limit(mspace_, footprint);
   }
-  return result;
-}
-
-mirror::Object* DlMallocSpace::Alloc(Thread* self, size_t num_bytes) {
-  MutexLock mu(self, lock_);
-  return AllocWithoutGrowthLocked(num_bytes);
-}
-
-mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes) {
-  MutexLock mu(self, lock_);
-  // Grow as much as possible within the mspace.
-  size_t max_allowed = Capacity();
-  mspace_set_footprint_limit(mspace_, max_allowed);
-  // Try the allocation.
-  mirror::Object* result = AllocWithoutGrowthLocked(num_bytes);
-  // Shrink back down as small as possible.
-  size_t footprint = mspace_footprint(mspace_);
-  mspace_set_footprint_limit(mspace_, footprint);
+  if (result != NULL) {
+    // Zero freshly allocated memory, done while not holding the space's lock.
+    memset(result, 0, num_bytes);
+  }
   // Return the new allocation or NULL.
   CHECK(!kDebugSpaces || result == NULL || Contains(result));
   return result;
@@ -415,8 +408,7 @@
 
 // Virtual functions can't get inlined.
 inline size_t DlMallocSpace::InternalAllocationSize(const mirror::Object* obj) {
-  return mspace_usable_size(const_cast<void*>(reinterpret_cast<const void*>(obj))) +
-      kChunkOverhead;
+  return AllocationSizeNonvirtual(obj);
 }
 
 size_t DlMallocSpace::AllocationSize(const mirror::Object* obj) {
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index c15d0ba..6d52c26 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -50,16 +50,24 @@
                                size_t capacity, byte* requested_begin);
 
   // Allocate num_bytes without allowing the underlying mspace to grow.
-  virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes);
+  virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes,
+                                          size_t* bytes_allocated) LOCKS_EXCLUDED(lock_);
 
   // Allocate num_bytes allowing the underlying mspace to grow.
-  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes);
+  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated);
 
   // Return the storage space required by obj.
   virtual size_t AllocationSize(const mirror::Object* obj);
   virtual size_t Free(Thread* self, mirror::Object* ptr);
   virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs);
 
+  mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated);
+
+  size_t AllocationSizeNonvirtual(const mirror::Object* obj) {
+    return mspace_usable_size(const_cast<void*>(reinterpret_cast<const void*>(obj))) +
+        kChunkOverhead;
+  }
+
   void* MoreCore(intptr_t increment);
 
   void* GetMspace() const {
@@ -71,7 +79,7 @@
 
   // Perform a mspace_inspect_all which calls back for each allocation chunk. The chunk may not be
   // in use, indicated by num_bytes equaling zero.
-  void Walk(WalkCallback callback, void* arg);
+  void Walk(WalkCallback callback, void* arg) LOCKS_EXCLUDED(lock_);
 
   // Returns the number of bytes that the space has currently obtained from the system. This is
   // greater or equal to the amount of live data in the space.
@@ -141,7 +149,8 @@
 
  private:
   size_t InternalAllocationSize(const mirror::Object* obj);
-  mirror::Object* AllocWithoutGrowthLocked(size_t num_bytes) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+  mirror::Object* AllocWithoutGrowthLocked(size_t num_bytes, size_t* bytes_allocated)
+      EXCLUSIVE_LOCKS_REQUIRED(lock_);
 
   bool Init(size_t initial_size, size_t maximum_size, size_t growth_size, byte* requested_base);
 
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index d7db561..a174c0a 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -55,7 +55,7 @@
   return new LargeObjectMapSpace(name);
 }
 
-mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes) {
+mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
   MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", NULL, num_bytes,
                                          PROT_READ | PROT_WRITE);
   if (mem_map == NULL) {
@@ -66,6 +66,8 @@
   large_objects_.push_back(obj);
   mem_maps_.Put(obj, mem_map);
   size_t allocation_size = mem_map->Size();
+  DCHECK(bytes_allocated != NULL);
+  *bytes_allocated = allocation_size;
   num_bytes_allocated_ += allocation_size;
   total_bytes_allocated_ += allocation_size;
   ++num_objects_allocated_;
@@ -138,89 +140,97 @@
       end_(end),
       mem_map_(mem_map),
       lock_("free list space lock", kAllocSpaceLock) {
-  chunks_.resize(Size() / kAlignment + 1);
-  // Add a dummy chunk so we don't need to handle chunks having no next chunk.
-  chunks_.back().SetSize(kAlignment, false);
-  // Start out with one large free chunk.
-  AddFreeChunk(begin_, end_ - begin_, NULL);
+  free_end_ = end - begin;
 }
 
 FreeListSpace::~FreeListSpace() {}
 
-void FreeListSpace::AddFreeChunk(void* address, size_t size, Chunk* previous) {
-  Chunk* chunk = ChunkFromAddr(address);
-  chunk->SetSize(size, true);
-  chunk->SetPrevious(previous);
-  Chunk* next_chunk = GetNextChunk(chunk);
-  next_chunk->SetPrevious(chunk);
-  free_chunks_.insert(chunk);
-}
-
-FreeListSpace::Chunk* FreeListSpace::ChunkFromAddr(void* address) {
-  size_t offset = reinterpret_cast<byte*>(address) - Begin();
-  DCHECK(IsAligned<kAlignment>(offset));
-  DCHECK_LT(offset, Size());
-  return &chunks_[offset / kAlignment];
-}
-
-void* FreeListSpace::AddrFromChunk(Chunk* chunk) {
-  return reinterpret_cast<void*>(Begin() + (chunk - &chunks_.front()) * kAlignment);
-}
-
-void FreeListSpace::RemoveFreeChunk(Chunk* chunk) {
-  // TODO: C++0x
-  // TODO: Improve performance, this might be slow.
-  std::pair<FreeChunks::iterator, FreeChunks::iterator> range = free_chunks_.equal_range(chunk);
-  for (FreeChunks::iterator it = range.first; it != range.second; ++it) {
-    if (*it == chunk) {
-      free_chunks_.erase(it);
-      return;
-    }
+void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
+  MutexLock mu(Thread::Current(), lock_);
+  uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
+  AllocationHeader* cur_header = reinterpret_cast<AllocationHeader*>(Begin());
+  while (reinterpret_cast<uintptr_t>(cur_header) < free_end_start) {
+    cur_header = cur_header->GetNextNonFree();
+    size_t alloc_size = cur_header->AllocationSize();
+    byte* byte_start = reinterpret_cast<byte*>(cur_header->GetObjectAddress());
+    byte* byte_end = byte_start + alloc_size - sizeof(AllocationHeader);
+    callback(byte_start, byte_end, alloc_size, arg);
+    callback(NULL, NULL, 0, arg);
+    cur_header = reinterpret_cast<AllocationHeader*>(byte_end);
   }
 }
 
-void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
-  MutexLock mu(Thread::Current(), lock_);
-  for (Chunk* chunk = &chunks_.front(); chunk < &chunks_.back(); ) {
-    if (!chunk->IsFree()) {
-      size_t size = chunk->GetSize();
-      void* begin = AddrFromChunk(chunk);
-      void* end = reinterpret_cast<void*>(reinterpret_cast<byte*>(begin) + size);
-      callback(begin, end, size, arg);
-      callback(NULL, NULL, 0, arg);
-    }
-    chunk = GetNextChunk(chunk);
+void FreeListSpace::RemoveFreePrev(AllocationHeader* header) {
+  CHECK(!header->IsFree());
+  CHECK_GT(header->GetPrevFree(), size_t(0));
+  FreeBlocks::iterator found = free_blocks_.lower_bound(header);
+  CHECK(found != free_blocks_.end());
+  CHECK_EQ(*found, header);
+  free_blocks_.erase(found);
+}
+
+FreeListSpace::AllocationHeader* FreeListSpace::GetAllocationHeader(const mirror::Object* obj) {
+  DCHECK(Contains(obj));
+  return reinterpret_cast<AllocationHeader*>(reinterpret_cast<uintptr_t>(obj) -
+      sizeof(AllocationHeader));
+}
+
+FreeListSpace::AllocationHeader* FreeListSpace::AllocationHeader::GetNextNonFree() {
+  // We know that there has to be at least one object after us or else we would have
+  // coalesced with the free end region. May be worth investigating a better way to do this
+  // as it may be expensive for large allocations.
+  for (uintptr_t pos = reinterpret_cast<uintptr_t>(this);; pos += kAlignment) {
+    AllocationHeader* cur = reinterpret_cast<AllocationHeader*>(pos);
+    if (!cur->IsFree()) return cur;
   }
 }
 
 size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) {
   MutexLock mu(self, lock_);
-  CHECK(Contains(obj));
-  // Check adjacent chunks to see if we need to combine.
-  Chunk* chunk = ChunkFromAddr(obj);
-  CHECK(!chunk->IsFree());
-
-  size_t allocation_size = chunk->GetSize();
-  if (kIsDebugBuild) {
-    memset(obj, 0xEB, allocation_size);
+  DCHECK(Contains(obj));
+  AllocationHeader* header = GetAllocationHeader(obj);
+  CHECK(IsAligned<kAlignment>(header));
+  size_t allocation_size = header->AllocationSize();
+  DCHECK_GT(allocation_size, size_t(0));
+  DCHECK(IsAligned<kAlignment>(allocation_size));
+  // Look at the next chunk.
+  AllocationHeader* next_header = header->GetNextAllocationHeader();
+  // Calculate the start of the end free block.
+  uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
+  size_t header_prev_free = header->GetPrevFree();
+  size_t new_free_size = allocation_size;
+  if (header_prev_free) {
+    new_free_size += header_prev_free;
+    RemoveFreePrev(header);
   }
-  madvise(obj, allocation_size, MADV_DONTNEED);
-  num_objects_allocated_--;
-  num_bytes_allocated_ -= allocation_size;
-  Chunk* prev = chunk->GetPrevious();
-  Chunk* next = GetNextChunk(chunk);
-
-  // Combine any adjacent free chunks
-  size_t extra_size = chunk->GetSize();
-  if (next->IsFree()) {
-    extra_size += next->GetSize();
-    RemoveFreeChunk(next);
-  }
-  if (prev != NULL && prev->IsFree()) {
-    RemoveFreeChunk(prev);
-    AddFreeChunk(AddrFromChunk(prev), prev->GetSize() + extra_size, prev->GetPrevious());
+  if (reinterpret_cast<uintptr_t>(next_header) >= free_end_start) {
+    // Easy case, the next chunk is the end free region.
+    CHECK_EQ(reinterpret_cast<uintptr_t>(next_header), free_end_start);
+    free_end_ += new_free_size;
   } else {
-    AddFreeChunk(AddrFromChunk(chunk), extra_size, prev);
+    AllocationHeader* new_free_header;
+    DCHECK(IsAligned<kAlignment>(next_header));
+    if (next_header->IsFree()) {
+      // Find the next chunk by reading each page until we hit one with non-zero chunk.
+      AllocationHeader* next_next_header = next_header->GetNextNonFree();
+      DCHECK(IsAligned<kAlignment>(next_next_header));
+      DCHECK(IsAligned<kAlignment>(next_next_header->AllocationSize()));
+      RemoveFreePrev(next_next_header);
+      new_free_header = next_next_header;
+      new_free_size += next_next_header->GetPrevFree();
+    } else {
+      new_free_header = next_header;
+    }
+    new_free_header->prev_free_ = new_free_size;
+    free_blocks_.insert(new_free_header);
+  }
+  --num_objects_allocated_;
+  DCHECK_LE(allocation_size, num_bytes_allocated_);
+  num_bytes_allocated_ -= allocation_size;
+  madvise(header, allocation_size, MADV_DONTNEED);
+  if (kIsDebugBuild) {
+    // Can't disallow reads since we use them to find next chunks during coalescing.
+    mprotect(header, allocation_size, PROT_READ);
   }
   return allocation_size;
 }
@@ -229,50 +239,91 @@
   return mem_map_->HasAddress(obj);
 }
 
-FreeListSpace::Chunk* FreeListSpace::GetNextChunk(Chunk* chunk) {
-  return chunk + chunk->GetSize() / kAlignment;
-}
-
 size_t FreeListSpace::AllocationSize(const mirror::Object* obj) {
-  Chunk* chunk = ChunkFromAddr(const_cast<mirror::Object*>(obj));
-  CHECK(!chunk->IsFree());
-  return chunk->GetSize();
+  AllocationHeader* header = GetAllocationHeader(obj);
+  DCHECK(Contains(obj));
+  DCHECK(!header->IsFree());
+  return header->AllocationSize();
 }
 
-mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes) {
+mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
   MutexLock mu(self, lock_);
-  num_bytes = RoundUp(num_bytes, kAlignment);
-  Chunk temp;
-  temp.SetSize(num_bytes);
+  size_t allocation_size = RoundUp(num_bytes + sizeof(AllocationHeader), kAlignment);
+  AllocationHeader temp;
+  temp.SetPrevFree(allocation_size);
+  temp.SetAllocationSize(0);
+  AllocationHeader* new_header;
   // Find the smallest chunk at least num_bytes in size.
-  FreeChunks::iterator found = free_chunks_.lower_bound(&temp);
-  if (found == free_chunks_.end()) {
-    // Out of memory, or too much fragmentation.
-    return NULL;
-  }
-  Chunk* chunk = *found;
-  free_chunks_.erase(found);
-  CHECK(chunk->IsFree());
-  void* addr = AddrFromChunk(chunk);
-  size_t chunk_size = chunk->GetSize();
-  chunk->SetSize(num_bytes);
-  if (chunk_size > num_bytes) {
-    // Split the chunk into two chunks.
-    Chunk* new_chunk = GetNextChunk(chunk);
-    AddFreeChunk(AddrFromChunk(new_chunk), chunk_size - num_bytes, chunk);
+  FreeBlocks::iterator found = free_blocks_.lower_bound(&temp);
+  if (found != free_blocks_.end()) {
+    AllocationHeader* header = *found;
+    free_blocks_.erase(found);
+
+    // Fit our object in the previous free header space.
+    new_header = header->GetPrevFreeAllocationHeader();
+
+    // Remove the newly allocated block from the header and update the prev_free_.
+    header->prev_free_ -= allocation_size;
+    if (header->prev_free_ > 0) {
+      // If there is remaining space, insert back into the free set.
+      free_blocks_.insert(header);
+    }
+  } else {
+    // Try to steal some memory from the free space at the end of the space.
+    if (LIKELY(free_end_ >= allocation_size)) {
+      // Fit our object at the start of the end free block.
+      new_header = reinterpret_cast<AllocationHeader*>(end_ - free_end_);
+      free_end_ -= allocation_size;
+    } else {
+      return NULL;
+    }
   }
 
-  num_objects_allocated_++;
-  total_objects_allocated_++;
-  num_bytes_allocated_ += num_bytes;
-  total_bytes_allocated_ += num_bytes;
-  return reinterpret_cast<mirror::Object*>(addr);
+  DCHECK(bytes_allocated != NULL);
+  *bytes_allocated = allocation_size;
+
+  // Need to do these inside of the lock.
+  ++num_objects_allocated_;
+  ++total_objects_allocated_;
+  num_bytes_allocated_ += allocation_size;
+  total_bytes_allocated_ += allocation_size;
+
+  // We always put our object at the start of the free block, there can not be another free block
+  // before it.
+  if (kIsDebugBuild) {
+    mprotect(new_header, allocation_size, PROT_READ | PROT_WRITE);
+  }
+  new_header->SetPrevFree(0);
+  new_header->SetAllocationSize(allocation_size);
+  return new_header->GetObjectAddress();
 }
 
 void FreeListSpace::Dump(std::ostream& os) const {
+  MutexLock mu(Thread::Current(), const_cast<Mutex&>(lock_));
   os << GetName() << " -"
      << " begin: " << reinterpret_cast<void*>(Begin())
-     << " end: " << reinterpret_cast<void*>(End());
+     << " end: " << reinterpret_cast<void*>(End()) << "\n";
+  uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
+  AllocationHeader* cur_header = reinterpret_cast<AllocationHeader*>(Begin());
+  while (reinterpret_cast<uintptr_t>(cur_header) < free_end_start) {
+    byte* free_start = reinterpret_cast<byte*>(cur_header);
+    cur_header = cur_header->GetNextNonFree();
+    byte* free_end = reinterpret_cast<byte*>(cur_header);
+    if (free_start != free_end) {
+      os << "Free block at address: " << reinterpret_cast<const void*>(free_start)
+         << " of length " << free_end - free_start << " bytes\n";
+    }
+    size_t alloc_size = cur_header->AllocationSize();
+    byte* byte_start = reinterpret_cast<byte*>(cur_header->GetObjectAddress());
+    byte* byte_end = byte_start + alloc_size - sizeof(AllocationHeader);
+    os << "Large object at address: " << reinterpret_cast<const void*>(free_start)
+       << " of length " << byte_end - byte_start << " bytes\n";
+    cur_header = reinterpret_cast<AllocationHeader*>(byte_end);
+  }
+  if (free_end_) {
+    os << "Free block at address: " << reinterpret_cast<const void*>(free_end_start)
+       << " of length " << free_end_ << " bytes\n";
+  }
 }
 
 }  // namespace space
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 8cd5088..a703e86 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -83,9 +83,9 @@
 
   // Return the storage space required by obj.
   size_t AllocationSize(const mirror::Object* obj);
-  mirror::Object* Alloc(Thread* self, size_t num_bytes);
+  mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated);
   size_t Free(Thread* self, mirror::Object* ptr);
-  void Walk(DlMallocSpace::WalkCallback, void* arg);
+  void Walk(DlMallocSpace::WalkCallback, void* arg) LOCKS_EXCLUDED(lock_);
   // TODO: disabling thread safety analysis as this may be called when we already hold lock_.
   bool Contains(const mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS;
 
@@ -103,17 +103,16 @@
 };
 
 // A continuous large object space with a free-list to handle holes.
-// TODO: this implementation is buggy.
 class FreeListSpace : public LargeObjectSpace {
  public:
   virtual ~FreeListSpace();
   static FreeListSpace* Create(const std::string& name, byte* requested_begin, size_t capacity);
 
   size_t AllocationSize(const mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(lock_);
-  mirror::Object* Alloc(Thread* self, size_t num_bytes);
+  mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated);
   size_t Free(Thread* self, mirror::Object* obj);
   bool Contains(const mirror::Object* obj) const;
-  void Walk(DlMallocSpace::WalkCallback callback, void* arg);
+  void Walk(DlMallocSpace::WalkCallback callback, void* arg) LOCKS_EXCLUDED(lock_);
 
   // Address at which the space begins.
   byte* Begin() const {
@@ -135,57 +134,100 @@
  private:
   static const size_t kAlignment = kPageSize;
 
-  class Chunk {
+  class AllocationHeader {
    public:
-    static const size_t kFreeFlag = 0x80000000;
+    // Returns the allocation size, includes the header.
+    size_t AllocationSize() const {
+      return alloc_size_;
+    }
 
-    struct SortBySize {
-      bool operator()(const Chunk* a, const Chunk* b) const {
-        return a->GetSize() < b->GetSize();
+    // Updates the allocation size in the header, the allocation size includes the header itself.
+    void SetAllocationSize(size_t size) {
+      DCHECK(IsAligned<kPageSize>(size));
+      alloc_size_ = size;
+    }
+
+    bool IsFree() const {
+      return AllocationSize() == 0;
+    }
+
+    // Returns the previous free allocation header by using the prev_free_ member to figure out
+    // where it is. If prev free is 0 then we just return ourself.
+    AllocationHeader* GetPrevFreeAllocationHeader() {
+      return reinterpret_cast<AllocationHeader*>(reinterpret_cast<uintptr_t>(this) - prev_free_);
+    }
+
+    // Returns the address of the object associated with this allocation header.
+    mirror::Object* GetObjectAddress() {
+      return reinterpret_cast<mirror::Object*>(reinterpret_cast<uintptr_t>(this) + sizeof(*this));
+    }
+
+    // Returns the next allocation header after the object associated with this allocation header.
+    AllocationHeader* GetNextAllocationHeader() {
+      DCHECK_NE(alloc_size_, 0U);
+      return reinterpret_cast<AllocationHeader*>(reinterpret_cast<uintptr_t>(this) + alloc_size_);
+    }
+
+    // Returns how many free bytes there is before the block.
+    size_t GetPrevFree() const {
+      return prev_free_;
+    }
+
+    // Update the size of the free block prior to the allocation.
+    void SetPrevFree(size_t prev_free) {
+      DCHECK(IsAligned<kPageSize>(prev_free));
+      prev_free_ = prev_free;
+    }
+
+    // Finds and returns the next non free allocation header after ourself.
+    // TODO: Optimize, currently O(n) for n free following pages.
+    AllocationHeader* GetNextNonFree();
+
+    // Used to implement best fit object allocation. Each allocation has an AllocationHeader which
+    // contains the size of the previous free block preceding it. Implemented in such a way that we
+    // can also find the iterator for any allocation header pointer.
+    class SortByPrevFree {
+     public:
+      bool operator()(const AllocationHeader* a, const AllocationHeader* b) const {
+        if (a->GetPrevFree() < b->GetPrevFree()) return true;
+        if (a->GetPrevFree() > b->GetPrevFree()) return false;
+        if (a->AllocationSize() < b->AllocationSize()) return true;
+        if (a->AllocationSize() > b->AllocationSize()) return false;
+        return reinterpret_cast<uintptr_t>(a) < reinterpret_cast<uintptr_t>(b);
       }
     };
 
-    bool IsFree() const {
-      return (m_size & kFreeFlag) != 0;
-    }
-
-    void SetSize(size_t size, bool is_free = false) {
-      m_size = size | (is_free ? kFreeFlag : 0);
-    }
-
-    size_t GetSize() const {
-      return m_size & (kFreeFlag - 1);
-    }
-
-    Chunk* GetPrevious() {
-      return m_previous;
-    }
-
-    void SetPrevious(Chunk* previous) {
-      m_previous = previous;
-      DCHECK(m_previous == NULL ||
-            (m_previous != NULL && m_previous + m_previous->GetSize() / kAlignment == this));
-    }
-
    private:
-    size_t m_size;
-    Chunk* m_previous;
+    // Contains the size of the previous free block, if 0 then the memory preceding us is an
+    // allocation.
+    size_t prev_free_;
+
+    // Allocation size of this object, 0 means that the allocation header is free memory.
+    size_t alloc_size_;
+
+    friend class FreeListSpace;
   };
 
   FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end);
-  void AddFreeChunk(void* address, size_t size, Chunk* previous) EXCLUSIVE_LOCKS_REQUIRED(lock_);
-  Chunk* ChunkFromAddr(void* address) EXCLUSIVE_LOCKS_REQUIRED(lock_);
-  void* AddrFromChunk(Chunk* chunk) EXCLUSIVE_LOCKS_REQUIRED(lock_);
-  void RemoveFreeChunk(Chunk* chunk) EXCLUSIVE_LOCKS_REQUIRED(lock_);
-  Chunk* GetNextChunk(Chunk* chunk);
 
-  typedef std::multiset<Chunk*, Chunk::SortBySize> FreeChunks;
+  // Removes header from the free blocks set by finding the corresponding iterator and erasing it.
+  void RemoveFreePrev(AllocationHeader* header) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+  // Finds the allocation header corresponding to obj.
+  AllocationHeader* GetAllocationHeader(const mirror::Object* obj);
+
+  typedef std::set<AllocationHeader*, AllocationHeader::SortByPrevFree,
+                   accounting::GCAllocator<AllocationHeader*> > FreeBlocks;
+
   byte* const begin_;
   byte* const end_;
+
+  // There is not footer for any allocations at the end of the space, so we keep track of how much
+  // free space there is at the end manually.
   UniquePtr<MemMap> mem_map_;
   Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
-  std::vector<Chunk> chunks_ GUARDED_BY(lock_);
-  FreeChunks free_chunks_ GUARDED_BY(lock_);
+  size_t free_end_ GUARDED_BY(lock_);
+  FreeBlocks free_blocks_ GUARDED_BY(lock_);
 };
 
 }  // namespace space
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index bc6e818..231cabc 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -154,8 +154,10 @@
   // Number of objects allocated since the space was created.
   virtual uint64_t GetTotalObjectsAllocated() const = 0;
 
-  // Allocate num_bytes without allowing growth.
-  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes) = 0;
+  // Allocate num_bytes without allowing growth. If the allocation
+  // succeeds, the output parameter bytes_allocated will be set to the
+  // actually allocated bytes which is >= num_bytes.
+  virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) = 0;
 
   // Return the storage space required by obj.
   virtual size_t AllocationSize(const mirror::Object* obj) = 0;
diff --git a/runtime/gc/space/space_test.cc b/runtime/gc/space/space_test.cc
index 3003140..455168c 100644
--- a/runtime/gc/space/space_test.cc
+++ b/runtime/gc/space/space_test.cc
@@ -15,6 +15,7 @@
  */
 
 #include "dlmalloc_space.h"
+#include "large_object_space.h"
 
 #include "common_test.h"
 #include "globals.h"
@@ -37,6 +38,11 @@
   }
 };
 
+static size_t test_rand(size_t* seed) {
+  *seed = *seed * 1103515245 + 12345;
+  return *seed;
+}
+
 TEST_F(SpaceTest, Init) {
   {
     // Init < max == growth
@@ -80,6 +86,7 @@
 // allocations after the ZygoteSpace is created. The test should also do some GCs to ensure that
 // the GC works with the ZygoteSpace.
 TEST_F(SpaceTest, ZygoteSpace) {
+    size_t dummy = 0;
     DlMallocSpace* space(DlMallocSpace::Create("test", 4 * MB, 16 * MB, 16 * MB, NULL));
     ASSERT_TRUE(space != NULL);
 
@@ -88,32 +95,35 @@
     Thread* self = Thread::Current();
 
     // Succeeds, fits without adjusting the footprint limit.
-    mirror::Object* ptr1 = space->Alloc(self, 1 * MB);
+    mirror::Object* ptr1 = space->Alloc(self, 1 * MB, &dummy);
     EXPECT_TRUE(ptr1 != NULL);
 
     // Fails, requires a higher footprint limit.
-    mirror::Object* ptr2 = space->Alloc(self, 8 * MB);
+    mirror::Object* ptr2 = space->Alloc(self, 8 * MB, &dummy);
     EXPECT_TRUE(ptr2 == NULL);
 
     // Succeeds, adjusts the footprint.
-    mirror::Object* ptr3 = space->AllocWithGrowth(self, 8 * MB);
+    size_t ptr3_bytes_allocated;
+    mirror::Object* ptr3 = space->AllocWithGrowth(self, 8 * MB, &ptr3_bytes_allocated);
     EXPECT_TRUE(ptr3 != NULL);
+    EXPECT_LE(8U * MB, ptr3_bytes_allocated);
 
     // Fails, requires a higher footprint limit.
-    mirror::Object* ptr4 = space->Alloc(self, 8 * MB);
+    mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy);
     EXPECT_TRUE(ptr4 == NULL);
 
     // Also fails, requires a higher allowed footprint.
-    mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB);
+    mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy);
     EXPECT_TRUE(ptr5 == NULL);
 
     // Release some memory.
     size_t free3 = space->AllocationSize(ptr3);
+    EXPECT_EQ(free3, ptr3_bytes_allocated);
     EXPECT_EQ(free3, space->Free(self, ptr3));
     EXPECT_LE(8U * MB, free3);
 
     // Succeeds, now that memory has been freed.
-    void* ptr6 = space->AllocWithGrowth(self, 9 * MB);
+    void* ptr6 = space->AllocWithGrowth(self, 9 * MB, &dummy);
     EXPECT_TRUE(ptr6 != NULL);
 
     // Final clean up.
@@ -122,22 +132,22 @@
     EXPECT_LE(1U * MB, free1);
 
     // Make sure that the zygote space isn't directly at the start of the space.
-    space->Alloc(self, 1U * MB);
+    space->Alloc(self, 1U * MB, &dummy);
     space = space->CreateZygoteSpace("alloc space");
 
     // Make space findable to the heap, will also delete space when runtime is cleaned up
     AddContinuousSpace(space);
 
     // Succeeds, fits without adjusting the footprint limit.
-    ptr1 = space->Alloc(self, 1 * MB);
+    ptr1 = space->Alloc(self, 1 * MB, &dummy);
     EXPECT_TRUE(ptr1 != NULL);
 
     // Fails, requires a higher footprint limit.
-    ptr2 = space->Alloc(self, 8 * MB);
+    ptr2 = space->Alloc(self, 8 * MB, &dummy);
     EXPECT_TRUE(ptr2 == NULL);
 
     // Succeeds, adjusts the footprint.
-    ptr3 = space->AllocWithGrowth(self, 2 * MB);
+    ptr3 = space->AllocWithGrowth(self, 2 * MB, &dummy);
     EXPECT_TRUE(ptr3 != NULL);
     space->Free(self, ptr3);
 
@@ -148,6 +158,7 @@
 }
 
 TEST_F(SpaceTest, AllocAndFree) {
+  size_t dummy = 0;
   DlMallocSpace* space(DlMallocSpace::Create("test", 4 * MB, 16 * MB, 16 * MB, NULL));
   ASSERT_TRUE(space != NULL);
   Thread* self = Thread::Current();
@@ -156,32 +167,35 @@
   AddContinuousSpace(space);
 
   // Succeeds, fits without adjusting the footprint limit.
-  mirror::Object* ptr1 = space->Alloc(self, 1 * MB);
+  mirror::Object* ptr1 = space->Alloc(self, 1 * MB, &dummy);
   EXPECT_TRUE(ptr1 != NULL);
 
   // Fails, requires a higher footprint limit.
-  mirror::Object* ptr2 = space->Alloc(self, 8 * MB);
+  mirror::Object* ptr2 = space->Alloc(self, 8 * MB, &dummy);
   EXPECT_TRUE(ptr2 == NULL);
 
   // Succeeds, adjusts the footprint.
-  mirror::Object* ptr3 = space->AllocWithGrowth(self, 8 * MB);
+  size_t ptr3_bytes_allocated;
+  mirror::Object* ptr3 = space->AllocWithGrowth(self, 8 * MB, &ptr3_bytes_allocated);
   EXPECT_TRUE(ptr3 != NULL);
+  EXPECT_LE(8U * MB, ptr3_bytes_allocated);
 
   // Fails, requires a higher footprint limit.
-  mirror::Object* ptr4 = space->Alloc(self, 8 * MB);
+  mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy);
   EXPECT_TRUE(ptr4 == NULL);
 
   // Also fails, requires a higher allowed footprint.
-  mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB);
+  mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy);
   EXPECT_TRUE(ptr5 == NULL);
 
   // Release some memory.
   size_t free3 = space->AllocationSize(ptr3);
+  EXPECT_EQ(free3, ptr3_bytes_allocated);
   space->Free(self, ptr3);
   EXPECT_LE(8U * MB, free3);
 
   // Succeeds, now that memory has been freed.
-  void* ptr6 = space->AllocWithGrowth(self, 9 * MB);
+  void* ptr6 = space->AllocWithGrowth(self, 9 * MB, &dummy);
   EXPECT_TRUE(ptr6 != NULL);
 
   // Final clean up.
@@ -190,6 +204,67 @@
   EXPECT_LE(1U * MB, free1);
 }
 
+TEST_F(SpaceTest, LargeObjectTest) {
+  size_t rand_seed = 0;
+  for (size_t i = 0; i < 2; ++i) {
+    LargeObjectSpace* los = NULL;
+    if (i == 0) {
+      los = space::LargeObjectMapSpace::Create("large object space");
+    } else {
+      los = space::FreeListSpace::Create("large object space", NULL, 128 * MB);
+    }
+
+    static const size_t num_allocations = 64;
+    static const size_t max_allocation_size = 0x100000;
+    std::vector<std::pair<mirror::Object*, size_t> > requests;
+
+    for (size_t phase = 0; phase < 2; ++phase) {
+      while (requests.size() < num_allocations) {
+        size_t request_size = test_rand(&rand_seed) % max_allocation_size;
+        size_t allocation_size = 0;
+        mirror::Object* obj = los->Alloc(Thread::Current(), request_size, &allocation_size);
+        ASSERT_TRUE(obj != NULL);
+        ASSERT_EQ(allocation_size, los->AllocationSize(obj));
+        ASSERT_GE(allocation_size, request_size);
+        // Fill in our magic value.
+        byte magic = (request_size & 0xFF) | 1;
+        memset(obj, magic, request_size);
+        requests.push_back(std::make_pair(obj, request_size));
+      }
+
+      // "Randomly" shuffle the requests.
+      for (size_t k = 0; k < 10; ++k) {
+        for (size_t j = 0; j < requests.size(); ++j) {
+          std::swap(requests[j], requests[test_rand(&rand_seed) % requests.size()]);
+        }
+      }
+
+      // Free 1 / 2 the allocations the first phase, and all the second phase.
+      size_t limit = !phase ? requests.size() / 2 : 0;
+      while (requests.size() > limit) {
+        mirror::Object* obj = requests.back().first;
+        size_t request_size = requests.back().second;
+        requests.pop_back();
+        byte magic = (request_size & 0xFF) | 1;
+        for (size_t k = 0; k < request_size; ++k) {
+          ASSERT_EQ(reinterpret_cast<const byte*>(obj)[k], magic);
+        }
+        ASSERT_GE(los->Free(Thread::Current(), obj), request_size);
+      }
+    }
+
+    size_t bytes_allocated = 0;
+    // Checks that the coalescing works.
+    mirror::Object* obj = los->Alloc(Thread::Current(), 100 * MB, &bytes_allocated);
+    EXPECT_TRUE(obj != NULL);
+    los->Free(Thread::Current(), obj);
+
+    EXPECT_EQ(0U, los->GetBytesAllocated());
+    EXPECT_EQ(0U, los->GetObjectsAllocated());
+    delete los;
+  }
+}
+
 TEST_F(SpaceTest, AllocAndFreeList) {
   DlMallocSpace* space(DlMallocSpace::Create("test", 4 * MB, 16 * MB, 16 * MB, NULL));
   ASSERT_TRUE(space != NULL);
@@ -201,7 +276,9 @@
   // Succeeds, fits without adjusting the max allowed footprint.
   mirror::Object* lots_of_objects[1024];
   for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
-    lots_of_objects[i] = space->Alloc(self, 16);
+    size_t allocation_size = 0;
+    lots_of_objects[i] = space->Alloc(self, 16, &allocation_size);
+    EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i]));
     EXPECT_TRUE(lots_of_objects[i] != NULL);
   }
 
@@ -213,7 +290,9 @@
 
   // Succeeds, fits by adjusting the max allowed footprint.
   for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
-    lots_of_objects[i] = space->AllocWithGrowth(self, 1024);
+    size_t allocation_size = 0;
+    lots_of_objects[i] = space->AllocWithGrowth(self, 1024, &allocation_size);
+    EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i]));
     EXPECT_TRUE(lots_of_objects[i] != NULL);
   }
 
@@ -224,11 +303,6 @@
   }
 }
 
-static size_t test_rand() {
-  // TODO: replace this with something random yet deterministic
-  return rand();
-}
-
 void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(DlMallocSpace* space, intptr_t object_size,
                                                     int round, size_t growth_limit) {
   if (((object_size > 0 && object_size >= static_cast<intptr_t>(growth_limit))) ||
@@ -261,6 +335,7 @@
   size_t last_object = 0;  // last object for which allocation succeeded
   size_t amount_allocated = 0;  // amount of space allocated
   Thread* self = Thread::Current();
+  size_t rand_seed = 123456789;
   for (size_t i = 0; i < max_objects; i++) {
     size_t alloc_fails = 0;  // number of failed allocations
     size_t max_fails = 30;  // number of times we fail allocation before giving up
@@ -269,22 +344,24 @@
       if (object_size > 0) {
         alloc_size = object_size;
       } else {
-        alloc_size = test_rand() % static_cast<size_t>(-object_size);
+        alloc_size = test_rand(&rand_seed) % static_cast<size_t>(-object_size);
         if (alloc_size < 8) {
           alloc_size = 8;
         }
       }
       mirror::Object* object;
+      size_t bytes_allocated = 0;
       if (round <= 1) {
-        object = space->Alloc(self, alloc_size);
+        object = space->Alloc(self, alloc_size, &bytes_allocated);
       } else {
-        object = space->AllocWithGrowth(self, alloc_size);
+        object = space->AllocWithGrowth(self, alloc_size, &bytes_allocated);
       }
       footprint = mspace_footprint(mspace);
       EXPECT_GE(space->Size(), footprint);  // invariant
       if (object != NULL) {  // allocation succeeded
         lots_of_objects.get()[i] = object;
         size_t allocation_size = space->AllocationSize(object);
+        EXPECT_EQ(bytes_allocated, allocation_size);
         if (object_size > 0) {
           EXPECT_GE(allocation_size, static_cast<size_t>(object_size));
         } else {
@@ -354,10 +431,11 @@
   // All memory was released, try a large allocation to check freed memory is being coalesced
   mirror::Object* large_object;
   size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4);
+  size_t bytes_allocated = 0;
   if (round <= 1) {
-    large_object = space->Alloc(self, three_quarters_space);
+    large_object = space->Alloc(self, three_quarters_space, &bytes_allocated);
   } else {
-    large_object = space->AllocWithGrowth(self, three_quarters_space);
+    large_object = space->AllocWithGrowth(self, three_quarters_space, &bytes_allocated);
   }
   EXPECT_TRUE(large_object != NULL);
 
diff --git a/runtime/image_test.cc b/runtime/image_test.cc
index 22bed2e..7f90505 100644
--- a/runtime/image_test.cc
+++ b/runtime/image_test.cc
@@ -46,6 +46,11 @@
       ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
       base::TimingLogger timings("ImageTest::WriteRead", false, false);
       timings.StartSplit("CompileAll");
+#if defined(ART_USE_PORTABLE_COMPILER)
+      // TODO: we disable this for portable so the test executes in a reasonable amount of time.
+      //       We shouldn't need to do this.
+      runtime_->SetSmallMode(true);
+#endif
       compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), timings);
 
       ScopedObjectAccess soa(Thread::Current());
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index c0b85f4..c3b66b3 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -60,7 +60,7 @@
       const void* new_code;
       if (uninstall) {
         if (forced_interpret_only_ && !method->IsNative() && !method->IsProxyMethod()) {
-          new_code = GetInterpreterEntryPoint();
+          new_code = GetCompiledCodeToInterpreterBridge();
         } else if (is_initialized || !method->IsStatic() || method->IsConstructor()) {
           new_code = class_linker->GetOatCodeFor(method);
         } else {
@@ -68,9 +68,9 @@
         }
       } else {  // !uninstall
         if (!interpreter_stubs_installed_ || method->IsNative()) {
-          new_code = GetInstrumentationEntryPoint();
+          new_code = GetQuickInstrumentationEntryPoint();
         } else {
-          new_code = GetInterpreterEntryPoint();
+          new_code = GetCompiledCodeToInterpreterBridge();
         }
       }
       method->SetEntryPointFromCompiledCode(new_code);
@@ -82,15 +82,15 @@
       const void* new_code;
       if (uninstall) {
         if (forced_interpret_only_ && !method->IsNative() && !method->IsProxyMethod()) {
-          new_code = GetInterpreterEntryPoint();
+          new_code = GetCompiledCodeToInterpreterBridge();
         } else {
           new_code = class_linker->GetOatCodeFor(method);
         }
       } else {  // !uninstall
         if (!interpreter_stubs_installed_ || method->IsNative()) {
-          new_code = GetInstrumentationEntryPoint();
+          new_code = GetQuickInstrumentationEntryPoint();
         } else {
-          new_code = GetInterpreterEntryPoint();
+          new_code = GetCompiledCodeToInterpreterBridge();
         }
       }
       method->SetEntryPointFromCompiledCode(new_code);
@@ -159,7 +159,7 @@
     LOG(INFO) << "Installing exit stubs in " << thread_name;
   }
   UniquePtr<Context> context(Context::Create());
-  uintptr_t instrumentation_exit_pc = GetInstrumentationExitPc();
+  uintptr_t instrumentation_exit_pc = GetQuickInstrumentationExitPc();
   InstallStackVisitor visitor(thread, context.get(), instrumentation_exit_pc);
   visitor.WalkStack(true);
 
@@ -251,7 +251,7 @@
   std::deque<instrumentation::InstrumentationStackFrame>* stack = thread->GetInstrumentationStack();
   if (stack->size() > 0) {
     Instrumentation* instrumentation = reinterpret_cast<Instrumentation*>(arg);
-    uintptr_t instrumentation_exit_pc = GetInstrumentationExitPc();
+    uintptr_t instrumentation_exit_pc = GetQuickInstrumentationExitPc();
     RestoreStackVisitor visitor(thread, instrumentation_exit_pc, instrumentation);
     visitor.WalkStack(true);
     CHECK_EQ(visitor.frames_removed_, stack->size());
@@ -384,9 +384,9 @@
     method->SetEntryPointFromCompiledCode(code);
   } else {
     if (!interpreter_stubs_installed_ || method->IsNative()) {
-      method->SetEntryPointFromCompiledCode(GetInstrumentationEntryPoint());
+      method->SetEntryPointFromCompiledCode(GetQuickInstrumentationEntryPoint());
     } else {
-      method->SetEntryPointFromCompiledCode(GetInterpreterEntryPoint());
+      method->SetEntryPointFromCompiledCode(GetCompiledCodeToInterpreterBridge());
     }
   }
 }
@@ -396,8 +396,8 @@
   if (LIKELY(!instrumentation_stubs_installed_)) {
     const void* code = method->GetEntryPointFromCompiledCode();
     DCHECK(code != NULL);
-    if (LIKELY(code != GetResolutionTrampoline(runtime->GetClassLinker()) &&
-               code != GetInterpreterEntryPoint())) {
+    if (LIKELY(code != GetQuickResolutionTrampoline(runtime->GetClassLinker()) &&
+               code != GetQuickToInterpreterBridge())) {
       return code;
     }
   }
@@ -548,7 +548,7 @@
           << " result is " << std::hex << return_value.GetJ();
     }
     self->SetDeoptimizationReturnValue(return_value);
-    return static_cast<uint64_t>(GetDeoptimizationEntryPoint()) |
+    return static_cast<uint64_t>(GetQuickDeoptimizationEntryPoint()) |
         (static_cast<uint64_t>(*return_pc) << 32);
   } else {
     if (kVerboseInstrumentation) {
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index ef4b95c..6e35d93 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -148,7 +148,7 @@
     }
   } else {
     // Not special, continue with regular interpreter execution.
-    artInterpreterToInterpreterEntry(self, mh, code_item, shadow_frame, result);
+    artInterpreterToInterpreterBridge(self, mh, code_item, shadow_frame, result);
   }
 }
 
@@ -3039,6 +3039,10 @@
 
 static inline JValue Execute(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
                              ShadowFrame& shadow_frame, JValue result_register) {
+  DCHECK(shadow_frame.GetMethod() == mh.GetMethod() ||
+         shadow_frame.GetMethod()->GetDeclaringClass()->IsProxyClass());
+  DCHECK(!shadow_frame.GetMethod()->IsAbstract());
+  DCHECK(!shadow_frame.GetMethod()->IsNative());
   if (shadow_frame.GetMethod()->IsPreverified()) {
     // Enter the "without access check" interpreter.
     return ExecuteImpl<false>(self, mh, code_item, shadow_frame, result_register);
@@ -3150,8 +3154,7 @@
 }
 
 JValue EnterInterpreterFromStub(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
-                                ShadowFrame& shadow_frame)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+                                ShadowFrame& shadow_frame) {
   DCHECK_EQ(self, Thread::Current());
   if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) {
     ThrowStackOverflowError(self);
@@ -3161,10 +3164,9 @@
   return Execute(self, mh, code_item, shadow_frame, JValue());
 }
 
-void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh,
-                                      const DexFile::CodeItem* code_item,
-                                      ShadowFrame* shadow_frame, JValue* result)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh,
+                                                  const DexFile::CodeItem* code_item,
+                                                  ShadowFrame* shadow_frame, JValue* result) {
   if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) {
     ThrowStackOverflowError(self);
     return;
diff --git a/runtime/interpreter/interpreter.h b/runtime/interpreter/interpreter.h
index 17884b9..af4a147 100644
--- a/runtime/interpreter/interpreter.h
+++ b/runtime/interpreter/interpreter.h
@@ -47,9 +47,9 @@
                                        ShadowFrame& shadow_frame)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh,
-                                                const DexFile::CodeItem* code_item,
-                                                ShadowFrame* shadow_frame, JValue* result)
+extern "C" void artInterpreterToInterpreterBridge(Thread* self, MethodHelper& mh,
+                                                  const DexFile::CodeItem* code_item,
+                                                  ShadowFrame* shadow_frame, JValue* result)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
 }  // namespace interpreter
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 6681d56..d1de6e6 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -2853,10 +2853,11 @@
 
   VLOG(jni) << "[Added shared library \"" << path << "\" for ClassLoader " << class_loader << "]";
 
-  bool result = true;
+  bool was_successful = false;
   void* sym = dlsym(handle, "JNI_OnLoad");
   if (sym == NULL) {
     VLOG(jni) << "[No JNI_OnLoad found in \"" << path << "\"]";
+    was_successful = true;
   } else {
     // Call JNI_OnLoad.  We have to override the current class
     // loader, which will always be "null" since the stuff at the
@@ -2876,7 +2877,9 @@
 
     self->SetClassLoaderOverride(old_class_loader);
 
-    if (IsBadJniVersion(version)) {
+    if (version == JNI_ERR) {
+      StringAppendF(&detail, "JNI_ERR returned from JNI_OnLoad in \"%s\"", path.c_str());
+    } else if (IsBadJniVersion(version)) {
       StringAppendF(&detail, "Bad JNI version returned from JNI_OnLoad in \"%s\": %d",
                     path.c_str(), version);
       // It's unwise to call dlclose() here, but we can mark it
@@ -2885,14 +2888,15 @@
       // be some partially-initialized stuff accessible through
       // newly-registered native method calls.  We could try to
       // unregister them, but that doesn't seem worthwhile.
-      result = false;
+    } else {
+      was_successful = true;
     }
-    VLOG(jni) << "[Returned " << (result ? "successfully" : "failure")
+    VLOG(jni) << "[Returned " << (was_successful ? "successfully" : "failure")
               << " from JNI_OnLoad in \"" << path << "\"]";
   }
 
-  library->SetResult(result);
-  return result;
+  library->SetResult(was_successful);
+  return was_successful;
 }
 
 void* JavaVMExt::FindCodeForNativeMethod(AbstractMethod* m) {
diff --git a/runtime/jni_internal.h b/runtime/jni_internal.h
index ad66ada..fcac481 100644
--- a/runtime/jni_internal.h
+++ b/runtime/jni_internal.h
@@ -144,6 +144,10 @@
     return Offset(OFFSETOF_MEMBER(JNIEnvExt, local_ref_cookie));
   }
 
+  static Offset SelfOffset() {
+    return Offset(OFFSETOF_MEMBER(JNIEnvExt, self));
+  }
+
   Thread* const self;
   JavaVMExt* vm;
 
diff --git a/runtime/mirror/abstract_method-inl.h b/runtime/mirror/abstract_method-inl.h
index d235e3e..8fde99b 100644
--- a/runtime/mirror/abstract_method-inl.h
+++ b/runtime/mirror/abstract_method-inl.h
@@ -114,11 +114,11 @@
   if (IsNative() || IsRuntimeMethod() || IsProxyMethod()) {
     return;
   }
-  if (pc == GetInstrumentationExitPc()) {
+  if (pc == GetQuickInstrumentationExitPc()) {
     return;
   }
   const void* code = GetEntryPointFromCompiledCode();
-  if (code == GetInterpreterEntryPoint() || code == GetInstrumentationEntryPoint()) {
+  if (code == GetCompiledCodeToInterpreterBridge() || code == GetQuickInstrumentationEntryPoint()) {
     return;
   }
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
diff --git a/runtime/mirror/abstract_method.cc b/runtime/mirror/abstract_method.cc
index 4d7f99e..93065e7 100644
--- a/runtime/mirror/abstract_method.cc
+++ b/runtime/mirror/abstract_method.cc
@@ -321,6 +321,7 @@
   return native_method != jni_stub;
 }
 
+extern "C" void art_work_around_app_jni_bugs(JNIEnv*, jobject);
 void AbstractMethod::RegisterNative(Thread* self, const void* native_method) {
   DCHECK(Thread::Current() == self);
   CHECK(IsNative()) << PrettyMethod(this);
@@ -332,10 +333,10 @@
     // around JNI bugs, that include not giving Object** SIRT references to native methods. Direct
     // the native method to runtime support and store the target somewhere runtime support will
     // find it.
-#if defined(__arm__) && !defined(ART_USE_PORTABLE_COMPILER)
-    SetNativeMethod(native_method);
-#else
+#if defined(__i386__)
     UNIMPLEMENTED(FATAL);
+#else
+    SetNativeMethod(reinterpret_cast<void*>(art_work_around_app_jni_bugs));
 #endif
     SetFieldPtr<const uint8_t*>(OFFSET_OF_OBJECT_MEMBER(AbstractMethod, gc_map_),
         reinterpret_cast<const uint8_t*>(native_method), false);
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index 97126cb..c64caa8 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -101,7 +101,8 @@
 uint16_t String::CharAt(int32_t index) const {
   // TODO: do we need this? Equals is the only caller, and could
   // bounds check itself.
-  if (index < 0 || index >= count_) {
+  DCHECK_GE(count_, 0);  // ensures the unsigned comparison is safe.
+  if (UNLIKELY(static_cast<uint32_t>(index) >= static_cast<uint32_t>(count_))) {
     Thread* self = Thread::Current();
     ThrowLocation throw_location = self->GetCurrentLocationForThrow();
     self->ThrowNewExceptionF(throw_location, "Ljava/lang/StringIndexOutOfBoundsException;",
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index b352d08..dab9cda 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -131,11 +131,9 @@
   return env->NewStringUTF(kIsDebugBuild ? "libartd.so" : "libart.so");
 }
 
-#if !defined(ART_USE_PORTABLE_COMPILER)
 static void DisableCheckJniCallback(Thread* t, void*) {
   t->GetJniEnv()->SetCheckJniEnabled(false);
 }
-#endif
 
 static void VMRuntime_setTargetSdkVersion(JNIEnv* env, jobject, jint targetSdkVersion) {
   // This is the target SDK version of the app we're about to run.
@@ -144,8 +142,6 @@
   if (targetSdkVersion > 0 && targetSdkVersion <= 13 /* honeycomb-mr2 */) {
     Runtime* runtime = Runtime::Current();
     JavaVMExt* vm = runtime->GetJavaVM();
-
-#if !defined(ART_USE_PORTABLE_COMPILER)
     if (vm->check_jni) {
       LOG(WARNING) << "Turning off CheckJNI so we can turn on JNI app bug workarounds...";
       Thread* self = static_cast<JNIEnvExt*>(env)->self;
@@ -158,11 +154,6 @@
               << targetSdkVersion << "...";
 
     vm->work_around_app_jni_bugs = true;
-#else
-    UNUSED(env);
-    LOG(WARNING) << "LLVM does not work-around app jni bugs.";
-    vm->work_around_app_jni_bugs = false;
-#endif
   }
 }
 
diff --git a/runtime/oat.cc b/runtime/oat.cc
index e606953..c01f77c 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -22,7 +22,7 @@
 namespace art {
 
 const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' };
-const uint8_t OatHeader::kOatVersion[] = { '0', '0', '6', '\0' };
+const uint8_t OatHeader::kOatVersion[] = { '0', '0', '7', '\0' };
 
 OatHeader::OatHeader() {
   memset(this, 0, sizeof(*this));
@@ -57,10 +57,13 @@
   UpdateChecksum(image_file_location.data(), image_file_location_size_);
 
   executable_offset_ = 0;
-  interpreter_to_interpreter_entry_offset_ = 0;
-  interpreter_to_quick_entry_offset_ = 0;
+  interpreter_to_interpreter_bridge_offset_ = 0;
+  interpreter_to_compiled_code_bridge_offset_ = 0;
+  jni_dlsym_lookup_offset_ = 0;
   portable_resolution_trampoline_offset_ = 0;
+  portable_to_interpreter_bridge_offset_ = 0;
   quick_resolution_trampoline_offset_ = 0;
+  quick_to_interpreter_bridge_offset_ = 0;
 }
 
 bool OatHeader::IsValid() const {
@@ -111,42 +114,61 @@
   UpdateChecksum(&executable_offset_, sizeof(executable_offset));
 }
 
-const void* OatHeader::GetInterpreterToInterpreterEntry() const {
-  return reinterpret_cast<const uint8_t*>(this) + GetInterpreterToInterpreterEntryOffset();
+const void* OatHeader::GetInterpreterToInterpreterBridge() const {
+  return reinterpret_cast<const uint8_t*>(this) + GetInterpreterToInterpreterBridgeOffset();
 }
 
-uint32_t OatHeader::GetInterpreterToInterpreterEntryOffset() const {
+uint32_t OatHeader::GetInterpreterToInterpreterBridgeOffset() const {
   DCHECK(IsValid());
-  CHECK_GE(interpreter_to_interpreter_entry_offset_, executable_offset_);
-  return interpreter_to_interpreter_entry_offset_;
+  CHECK_GE(interpreter_to_interpreter_bridge_offset_, executable_offset_);
+  return interpreter_to_interpreter_bridge_offset_;
 }
 
-void OatHeader::SetInterpreterToInterpreterEntryOffset(uint32_t offset) {
+void OatHeader::SetInterpreterToInterpreterBridgeOffset(uint32_t offset) {
   CHECK(offset == 0 || offset >= executable_offset_);
   DCHECK(IsValid());
-  DCHECK_EQ(interpreter_to_interpreter_entry_offset_, 0U) << offset;
+  DCHECK_EQ(interpreter_to_interpreter_bridge_offset_, 0U) << offset;
 
-  interpreter_to_interpreter_entry_offset_ = offset;
-  UpdateChecksum(&interpreter_to_interpreter_entry_offset_, sizeof(offset));
+  interpreter_to_interpreter_bridge_offset_ = offset;
+  UpdateChecksum(&interpreter_to_interpreter_bridge_offset_, sizeof(offset));
 }
 
-const void* OatHeader::GetInterpreterToQuickEntry() const {
-  return reinterpret_cast<const uint8_t*>(this) + GetInterpreterToQuickEntryOffset();
+const void* OatHeader::GetInterpreterToCompiledCodeBridge() const {
+  return reinterpret_cast<const uint8_t*>(this) + GetInterpreterToCompiledCodeBridgeOffset();
 }
 
-uint32_t OatHeader::GetInterpreterToQuickEntryOffset() const {
+uint32_t OatHeader::GetInterpreterToCompiledCodeBridgeOffset() const {
   DCHECK(IsValid());
-  CHECK_GE(interpreter_to_quick_entry_offset_, interpreter_to_interpreter_entry_offset_);
-  return interpreter_to_quick_entry_offset_;
+  CHECK_GE(interpreter_to_compiled_code_bridge_offset_, interpreter_to_interpreter_bridge_offset_);
+  return interpreter_to_compiled_code_bridge_offset_;
 }
 
-void OatHeader::SetInterpreterToQuickEntryOffset(uint32_t offset) {
-  CHECK(offset == 0 || offset >= interpreter_to_interpreter_entry_offset_);
+void OatHeader::SetInterpreterToCompiledCodeBridgeOffset(uint32_t offset) {
+  CHECK(offset == 0 || offset >= interpreter_to_interpreter_bridge_offset_);
   DCHECK(IsValid());
-  DCHECK_EQ(interpreter_to_quick_entry_offset_, 0U) << offset;
+  DCHECK_EQ(interpreter_to_compiled_code_bridge_offset_, 0U) << offset;
 
-  interpreter_to_quick_entry_offset_ = offset;
-  UpdateChecksum(&interpreter_to_quick_entry_offset_, sizeof(offset));
+  interpreter_to_compiled_code_bridge_offset_ = offset;
+  UpdateChecksum(&interpreter_to_compiled_code_bridge_offset_, sizeof(offset));
+}
+
+const void* OatHeader::GetJniDlsymLookup() const {
+  return reinterpret_cast<const uint8_t*>(this) + GetJniDlsymLookupOffset();
+}
+
+uint32_t OatHeader::GetJniDlsymLookupOffset() const {
+  DCHECK(IsValid());
+  CHECK_GE(jni_dlsym_lookup_offset_, interpreter_to_compiled_code_bridge_offset_);
+  return jni_dlsym_lookup_offset_;
+}
+
+void OatHeader::SetJniDlsymLookupOffset(uint32_t offset) {
+  CHECK(offset == 0 || offset >= interpreter_to_compiled_code_bridge_offset_);
+  DCHECK(IsValid());
+  DCHECK_EQ(jni_dlsym_lookup_offset_, 0U) << offset;
+
+  jni_dlsym_lookup_offset_ = offset;
+  UpdateChecksum(&jni_dlsym_lookup_offset_, sizeof(offset));
 }
 
 const void* OatHeader::GetPortableResolutionTrampoline() const {
@@ -155,12 +177,12 @@
 
 uint32_t OatHeader::GetPortableResolutionTrampolineOffset() const {
   DCHECK(IsValid());
-  CHECK_GE(portable_resolution_trampoline_offset_, interpreter_to_quick_entry_offset_);
+  CHECK_GE(portable_resolution_trampoline_offset_, jni_dlsym_lookup_offset_);
   return portable_resolution_trampoline_offset_;
 }
 
 void OatHeader::SetPortableResolutionTrampolineOffset(uint32_t offset) {
-  CHECK(offset == 0 || offset >= interpreter_to_quick_entry_offset_);
+  CHECK(offset == 0 || offset >= jni_dlsym_lookup_offset_);
   DCHECK(IsValid());
   DCHECK_EQ(portable_resolution_trampoline_offset_, 0U) << offset;
 
@@ -168,18 +190,37 @@
   UpdateChecksum(&portable_resolution_trampoline_offset_, sizeof(offset));
 }
 
+const void* OatHeader::GetPortableToInterpreterBridge() const {
+  return reinterpret_cast<const uint8_t*>(this) + GetPortableToInterpreterBridgeOffset();
+}
+
+uint32_t OatHeader::GetPortableToInterpreterBridgeOffset() const {
+  DCHECK(IsValid());
+  CHECK_GE(portable_to_interpreter_bridge_offset_, portable_resolution_trampoline_offset_);
+  return portable_to_interpreter_bridge_offset_;
+}
+
+void OatHeader::SetPortableToInterpreterBridgeOffset(uint32_t offset) {
+  CHECK(offset == 0 || offset >= portable_resolution_trampoline_offset_);
+  DCHECK(IsValid());
+  DCHECK_EQ(portable_to_interpreter_bridge_offset_, 0U) << offset;
+
+  portable_to_interpreter_bridge_offset_ = offset;
+  UpdateChecksum(&portable_to_interpreter_bridge_offset_, sizeof(offset));
+}
+
 const void* OatHeader::GetQuickResolutionTrampoline() const {
   return reinterpret_cast<const uint8_t*>(this) + GetQuickResolutionTrampolineOffset();
 }
 
 uint32_t OatHeader::GetQuickResolutionTrampolineOffset() const {
   DCHECK(IsValid());
-  CHECK_GE(quick_resolution_trampoline_offset_, portable_resolution_trampoline_offset_);
+  CHECK_GE(quick_resolution_trampoline_offset_, portable_to_interpreter_bridge_offset_);
   return quick_resolution_trampoline_offset_;
 }
 
 void OatHeader::SetQuickResolutionTrampolineOffset(uint32_t offset) {
-  CHECK(offset == 0 || offset >= portable_resolution_trampoline_offset_);
+  CHECK(offset == 0 || offset >= portable_to_interpreter_bridge_offset_);
   DCHECK(IsValid());
   DCHECK_EQ(quick_resolution_trampoline_offset_, 0U) << offset;
 
@@ -187,6 +228,25 @@
   UpdateChecksum(&quick_resolution_trampoline_offset_, sizeof(offset));
 }
 
+const void* OatHeader::GetQuickToInterpreterBridge() const {
+  return reinterpret_cast<const uint8_t*>(this) + GetQuickToInterpreterBridgeOffset();
+}
+
+uint32_t OatHeader::GetQuickToInterpreterBridgeOffset() const {
+  DCHECK(IsValid());
+  CHECK_GE(quick_to_interpreter_bridge_offset_, quick_resolution_trampoline_offset_);
+  return quick_to_interpreter_bridge_offset_;
+}
+
+void OatHeader::SetQuickToInterpreterBridgeOffset(uint32_t offset) {
+  CHECK(offset == 0 || offset >= quick_resolution_trampoline_offset_);
+  DCHECK(IsValid());
+  DCHECK_EQ(quick_to_interpreter_bridge_offset_, 0U) << offset;
+
+  quick_to_interpreter_bridge_offset_ = offset;
+  UpdateChecksum(&quick_to_interpreter_bridge_offset_, sizeof(offset));
+}
+
 uint32_t OatHeader::GetImageFileLocationOatChecksum() const {
   CHECK(IsValid());
   return image_file_location_oat_checksum_;
diff --git a/runtime/oat.h b/runtime/oat.h
index 4bd1871..a5c6bed 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -44,18 +44,32 @@
   }
   uint32_t GetExecutableOffset() const;
   void SetExecutableOffset(uint32_t executable_offset);
-  const void* GetInterpreterToInterpreterEntry() const;
-  uint32_t GetInterpreterToInterpreterEntryOffset() const;
-  void SetInterpreterToInterpreterEntryOffset(uint32_t offset);
-  const void* GetInterpreterToQuickEntry() const;
-  uint32_t GetInterpreterToQuickEntryOffset() const;
-  void SetInterpreterToQuickEntryOffset(uint32_t offset);
+
+  const void* GetInterpreterToInterpreterBridge() const;
+  uint32_t GetInterpreterToInterpreterBridgeOffset() const;
+  void SetInterpreterToInterpreterBridgeOffset(uint32_t offset);
+  const void* GetInterpreterToCompiledCodeBridge() const;
+  uint32_t GetInterpreterToCompiledCodeBridgeOffset() const;
+  void SetInterpreterToCompiledCodeBridgeOffset(uint32_t offset);
+
+  const void* GetJniDlsymLookup() const;
+  uint32_t GetJniDlsymLookupOffset() const;
+  void SetJniDlsymLookupOffset(uint32_t offset);
+
   const void* GetPortableResolutionTrampoline() const;
   uint32_t GetPortableResolutionTrampolineOffset() const;
   void SetPortableResolutionTrampolineOffset(uint32_t offset);
+  const void* GetPortableToInterpreterBridge() const;
+  uint32_t GetPortableToInterpreterBridgeOffset() const;
+  void SetPortableToInterpreterBridgeOffset(uint32_t offset);
+
   const void* GetQuickResolutionTrampoline() const;
   uint32_t GetQuickResolutionTrampolineOffset() const;
   void SetQuickResolutionTrampolineOffset(uint32_t offset);
+  const void* GetQuickToInterpreterBridge() const;
+  uint32_t GetQuickToInterpreterBridgeOffset() const;
+  void SetQuickToInterpreterBridgeOffset(uint32_t offset);
+
   InstructionSet GetInstructionSet() const;
   uint32_t GetImageFileLocationOatChecksum() const;
   uint32_t GetImageFileLocationOatDataBegin() const;
@@ -74,10 +88,13 @@
   InstructionSet instruction_set_;
   uint32_t dex_file_count_;
   uint32_t executable_offset_;
-  uint32_t interpreter_to_interpreter_entry_offset_;
-  uint32_t interpreter_to_quick_entry_offset_;
+  uint32_t interpreter_to_interpreter_bridge_offset_;
+  uint32_t interpreter_to_compiled_code_bridge_offset_;
+  uint32_t jni_dlsym_lookup_offset_;
   uint32_t portable_resolution_trampoline_offset_;
+  uint32_t portable_to_interpreter_bridge_offset_;
   uint32_t quick_resolution_trampoline_offset_;
+  uint32_t quick_to_interpreter_bridge_offset_;
 
   uint32_t image_file_location_oat_checksum_;
   uint32_t image_file_location_oat_data_begin_;
diff --git a/runtime/oat_test.cc b/runtime/oat_test.cc
index 5d0dca9..68595c8 100644
--- a/runtime/oat_test.cc
+++ b/runtime/oat_test.cc
@@ -141,7 +141,7 @@
 TEST_F(OatTest, OatHeaderSizeCheck) {
   // If this test is failing and you have to update these constants,
   // it is time to update OatHeader::kOatVersion
-  EXPECT_EQ(52U, sizeof(OatHeader));
+  EXPECT_EQ(64U, sizeof(OatHeader));
   EXPECT_EQ(28U, sizeof(OatMethodOffsets));
 }
 
diff --git a/runtime/object_utils.h b/runtime/object_utils.h
index fa7763e..3639a80 100644
--- a/runtime/object_utils.h
+++ b/runtime/object_utils.h
@@ -411,6 +411,10 @@
     shorty_ = NULL;
   }
 
+  const mirror::AbstractMethod* GetMethod() const {
+    return method_;
+  }
+
   const char* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     const DexFile& dex_file = GetDexFile();
     uint32_t dex_method_idx = method_->GetDexMethodIndex();
diff --git a/runtime/stack.cc b/runtime/stack.cc
index aeb15f0..7f3f40c 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -266,7 +266,11 @@
     // Frame sanity.
     size_t frame_size = method->GetFrameSizeInBytes();
     CHECK_NE(frame_size, 0u);
-    CHECK_LT(frame_size, 1024u);
+    // A rough guess at an upper size we expect to see for a frame. The 256 is
+    // a dex register limit. The 16 incorporates callee save spills and
+    // outgoing argument set up.
+    const size_t kMaxExpectedFrameSize = 256 * sizeof(word) + 16;
+    CHECK_LE(frame_size, kMaxExpectedFrameSize);
     size_t return_pc_offset = method->GetReturnPcOffsetInBytes();
     CHECK_LT(return_pc_offset, frame_size);
   }
@@ -304,7 +308,7 @@
         if (UNLIKELY(exit_stubs_installed)) {
           // While profiling, the return pc is restored from the side stack, except when walking
           // the stack for an exception where the side stack will be unwound in VisitFrame.
-          if (GetInstrumentationExitPc() == return_pc) {
+          if (GetQuickInstrumentationExitPc() == return_pc) {
             instrumentation::InstrumentationStackFrame instrumentation_frame =
                 GetInstrumentationStackFrame(instrumentation_stack_depth);
             instrumentation_stack_depth++;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 97a1410..c79caa2 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -86,23 +86,25 @@
 }
 #endif
 
-void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints);
+void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
+                     PortableEntryPoints* ppoints, QuickEntryPoints* qpoints);
 
-void Thread::InitFunctionPointers() {
+void Thread::InitTlsEntryPoints() {
 #if !defined(__APPLE__)  // The Mac GCC is too old to accept this code.
   // Insert a placeholder so we can easily tell if we call an unimplemented entry point.
-  uintptr_t* begin = reinterpret_cast<uintptr_t*>(&quick_entrypoints_);
+  uintptr_t* begin = reinterpret_cast<uintptr_t*>(&interpreter_entrypoints_);
   uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) + sizeof(quick_entrypoints_));
   for (uintptr_t* it = begin; it != end; ++it) {
     *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
   }
-  begin = reinterpret_cast<uintptr_t*>(&portable_entrypoints_);
+  begin = reinterpret_cast<uintptr_t*>(&interpreter_entrypoints_);
   end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) + sizeof(portable_entrypoints_));
   for (uintptr_t* it = begin; it != end; ++it) {
     *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
   }
 #endif
-  InitEntryPoints(&quick_entrypoints_, &portable_entrypoints_);
+  InitEntryPoints(&interpreter_entrypoints_, &jni_entrypoints_, &portable_entrypoints_,
+                  &quick_entrypoints_);
 }
 
 void Thread::SetDeoptimizationShadowFrame(ShadowFrame* sf) {
@@ -292,7 +294,7 @@
   CHECK(Thread::Current() == NULL);
   SetUpAlternateSignalStack();
   InitCpu();
-  InitFunctionPointers();
+  InitTlsEntryPoints();
   InitCardTable();
   InitTid();
   // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this
@@ -1589,22 +1591,29 @@
   uint32_t offset;
   const char* name;
 };
-#define QUICK_ENTRY_POINT_INFO(x) { QUICK_ENTRYPOINT_OFFSET(x), #x }
-#define PORTABLE_ENTRY_POINT_INFO(x) { PORTABLE_ENTRYPOINT_OFFSET(x), #x }
+#define INTERPRETER_ENTRY_POINT_INFO(x) { INTERPRETER_ENTRYPOINT_OFFSET(x).Uint32Value(), #x }
+#define JNI_ENTRY_POINT_INFO(x)         { JNI_ENTRYPOINT_OFFSET(x).Uint32Value(), #x }
+#define PORTABLE_ENTRY_POINT_INFO(x)    { PORTABLE_ENTRYPOINT_OFFSET(x).Uint32Value(), #x }
+#define QUICK_ENTRY_POINT_INFO(x)       { QUICK_ENTRYPOINT_OFFSET(x).Uint32Value(), #x }
 static const EntryPointInfo gThreadEntryPointInfo[] = {
-  QUICK_ENTRY_POINT_INFO(pAllocArrayFromCode),
-  QUICK_ENTRY_POINT_INFO(pAllocArrayFromCodeWithAccessCheck),
-  QUICK_ENTRY_POINT_INFO(pAllocObjectFromCode),
-  QUICK_ENTRY_POINT_INFO(pAllocObjectFromCodeWithAccessCheck),
-  QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayFromCode),
-  QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayFromCodeWithAccessCheck),
-  QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivialFromCode),
-  QUICK_ENTRY_POINT_INFO(pCanPutArrayElementFromCode),
-  QUICK_ENTRY_POINT_INFO(pCheckCastFromCode),
+  INTERPRETER_ENTRY_POINT_INFO(pInterpreterToInterpreterBridge),
+  INTERPRETER_ENTRY_POINT_INFO(pInterpreterToCompiledCodeBridge),
+  JNI_ENTRY_POINT_INFO(pDlsymLookup),
+  PORTABLE_ENTRY_POINT_INFO(pPortableResolutionTrampoline),
+  PORTABLE_ENTRY_POINT_INFO(pPortableToInterpreterBridge),
+  QUICK_ENTRY_POINT_INFO(pAllocArray),
+  QUICK_ENTRY_POINT_INFO(pAllocArrayWithAccessCheck),
+  QUICK_ENTRY_POINT_INFO(pAllocObject),
+  QUICK_ENTRY_POINT_INFO(pAllocObjectWithAccessCheck),
+  QUICK_ENTRY_POINT_INFO(pCheckAndAllocArray),
+  QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayWithAccessCheck),
+  QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial),
+  QUICK_ENTRY_POINT_INFO(pCanPutArrayElement),
+  QUICK_ENTRY_POINT_INFO(pCheckCast),
   QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage),
-  QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccessFromCode),
-  QUICK_ENTRY_POINT_INFO(pInitializeTypeFromCode),
-  QUICK_ENTRY_POINT_INFO(pResolveStringFromCode),
+  QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccess),
+  QUICK_ENTRY_POINT_INFO(pInitializeType),
+  QUICK_ENTRY_POINT_INFO(pResolveString),
   QUICK_ENTRY_POINT_INFO(pSet32Instance),
   QUICK_ENTRY_POINT_INFO(pSet32Static),
   QUICK_ENTRY_POINT_INFO(pSet64Instance),
@@ -1617,15 +1626,15 @@
   QUICK_ENTRY_POINT_INFO(pGet64Static),
   QUICK_ENTRY_POINT_INFO(pGetObjInstance),
   QUICK_ENTRY_POINT_INFO(pGetObjStatic),
-  QUICK_ENTRY_POINT_INFO(pHandleFillArrayDataFromCode),
+  QUICK_ENTRY_POINT_INFO(pHandleFillArrayData),
   QUICK_ENTRY_POINT_INFO(pJniMethodStart),
   QUICK_ENTRY_POINT_INFO(pJniMethodStartSynchronized),
   QUICK_ENTRY_POINT_INFO(pJniMethodEnd),
   QUICK_ENTRY_POINT_INFO(pJniMethodEndSynchronized),
   QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReference),
   QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized),
-  QUICK_ENTRY_POINT_INFO(pLockObjectFromCode),
-  QUICK_ENTRY_POINT_INFO(pUnlockObjectFromCode),
+  QUICK_ENTRY_POINT_INFO(pLockObject),
+  QUICK_ENTRY_POINT_INFO(pUnlockObject),
   QUICK_ENTRY_POINT_INFO(pCmpgDouble),
   QUICK_ENTRY_POINT_INFO(pCmpgFloat),
   QUICK_ENTRY_POINT_INFO(pCmplDouble),
@@ -1646,28 +1655,26 @@
   QUICK_ENTRY_POINT_INFO(pShlLong),
   QUICK_ENTRY_POINT_INFO(pShrLong),
   QUICK_ENTRY_POINT_INFO(pUshrLong),
-  QUICK_ENTRY_POINT_INFO(pInterpreterToInterpreterEntry),
-  QUICK_ENTRY_POINT_INFO(pInterpreterToQuickEntry),
   QUICK_ENTRY_POINT_INFO(pIndexOf),
   QUICK_ENTRY_POINT_INFO(pMemcmp16),
   QUICK_ENTRY_POINT_INFO(pStringCompareTo),
   QUICK_ENTRY_POINT_INFO(pMemcpy),
-  QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampolineFromCode),
+  QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampoline),
+  QUICK_ENTRY_POINT_INFO(pQuickToInterpreterBridge),
   QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck),
   QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampoline),
   QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck),
   QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck),
   QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck),
   QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck),
-  QUICK_ENTRY_POINT_INFO(pCheckSuspendFromCode),
-  QUICK_ENTRY_POINT_INFO(pTestSuspendFromCode),
+  QUICK_ENTRY_POINT_INFO(pCheckSuspend),
+  QUICK_ENTRY_POINT_INFO(pTestSuspend),
   QUICK_ENTRY_POINT_INFO(pDeliverException),
-  QUICK_ENTRY_POINT_INFO(pThrowArrayBoundsFromCode),
-  QUICK_ENTRY_POINT_INFO(pThrowDivZeroFromCode),
-  QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethodFromCode),
-  QUICK_ENTRY_POINT_INFO(pThrowNullPointerFromCode),
-  QUICK_ENTRY_POINT_INFO(pThrowStackOverflowFromCode),
-  PORTABLE_ENTRY_POINT_INFO(pPortableResolutionTrampolineFromCode),
+  QUICK_ENTRY_POINT_INFO(pThrowArrayBounds),
+  QUICK_ENTRY_POINT_INFO(pThrowDivZero),
+  QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethod),
+  QUICK_ENTRY_POINT_INFO(pThrowNullPointer),
+  QUICK_ENTRY_POINT_INFO(pThrowStackOverflow),
 };
 #undef QUICK_ENTRY_POINT_INFO
 
@@ -1695,8 +1702,9 @@
 
   size_t entry_point_count = arraysize(gThreadEntryPointInfo);
   CHECK_EQ(entry_point_count * size_of_pointers,
-           sizeof(QuickEntryPoints) + sizeof(PortableEntryPoints));
-  uint32_t expected_offset = OFFSETOF_MEMBER(Thread, quick_entrypoints_);
+           sizeof(InterpreterEntryPoints) + sizeof(JniEntryPoints) + sizeof(PortableEntryPoints) +
+           sizeof(QuickEntryPoints));
+  uint32_t expected_offset = OFFSETOF_MEMBER(Thread, interpreter_entrypoints_);
   for (size_t i = 0; i < entry_point_count; ++i) {
     CHECK_EQ(gThreadEntryPointInfo[i].offset, expected_offset) << gThreadEntryPointInfo[i].name;
     expected_offset += size_of_pointers;
@@ -1739,7 +1747,7 @@
       return false;  // End stack walk.
     } else {
       if (UNLIKELY(method_tracing_active_ &&
-                   GetInstrumentationExitPc() == GetReturnPc())) {
+                   GetQuickInstrumentationExitPc() == GetReturnPc())) {
         // Keep count of the number of unwinds during instrumentation.
         instrumentation_frames_to_pop_++;
       }
diff --git a/runtime/thread.h b/runtime/thread.h
index ff0fe22..8b6771e 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -26,6 +26,8 @@
 #include <string>
 
 #include "base/macros.h"
+#include "entrypoints/interpreter/interpreter_entrypoints.h"
+#include "entrypoints/jni/jni_entrypoints.h"
 #include "entrypoints/portable/portable_entrypoints.h"
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "globals.h"
@@ -43,17 +45,17 @@
 namespace art {
 
 namespace mirror {
-class AbstractMethod;
-class Array;
-class Class;
-class ClassLoader;
-class Object;
-template<class T> class ObjectArray;
-template<class T> class PrimitiveArray;
-typedef PrimitiveArray<int32_t> IntArray;
-class StackTraceElement;
-class StaticStorageBase;
-class Throwable;
+  class AbstractMethod;
+  class Array;
+  class Class;
+  class ClassLoader;
+  class Object;
+  template<class T> class ObjectArray;
+  template<class T> class PrimitiveArray;
+  typedef PrimitiveArray<int32_t> IntArray;
+  class StackTraceElement;
+  class StaticStorageBase;
+  class Throwable;
 }  // namespace mirror
 class BaseMutex;
 class ClassLinker;
@@ -614,7 +616,7 @@
   void Init(ThreadList*, JavaVMExt*) EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
   void InitCardTable();
   void InitCpu();
-  void InitFunctionPointers();
+  void InitTlsEntryPoints();
   void InitTid();
   void InitPthreadKeySelf();
   void InitStackHwm();
@@ -776,8 +778,10 @@
  public:
   // Entrypoint function pointers
   // TODO: move this near the top, since changing its offset requires all oats to be recompiled!
-  QuickEntryPoints quick_entrypoints_;
+  InterpreterEntryPoints interpreter_entrypoints_;
+  JniEntryPoints jni_entrypoints_;
   PortableEntryPoints portable_entrypoints_;
+  QuickEntryPoints quick_entrypoints_;
 
  private:
   // How many times has our pthread key's destructor been called?
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index f1de565..eb6e3c3 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -2507,8 +2507,8 @@
 
     // Special instructions.
     case Instruction::RETURN_VOID_BARRIER:
-      DCHECK(Runtime::Current()->IsStarted());
-      if (!IsConstructor()) {
+      DCHECK(Runtime::Current()->IsStarted()) << PrettyMethod(dex_method_idx_, *dex_file_);
+      if (!IsConstructor() || IsStatic()) {
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "return-void-barrier not expected";
       }
       break;
@@ -2819,8 +2819,10 @@
     dex_cache_->SetResolvedType(class_idx, result.GetClass());
   }
   // Check if access is allowed. Unresolved types use xxxWithAccessCheck to
-  // check at runtime if access is allowed and so pass here.
-  if (!result.IsUnresolvedTypes() && !referrer.IsUnresolvedTypes() && !referrer.CanAccess(result)) {
+  // check at runtime if access is allowed and so pass here. If result is
+  // primitive, skip the access check.
+  if (result.IsNonZeroReferenceTypes() && !result.IsUnresolvedTypes() &&
+      !referrer.IsUnresolvedTypes() && !referrer.CanAccess(result)) {
     Fail(VERIFY_ERROR_ACCESS_CLASS) << "illegal class access: '"
                                     << referrer << "' -> '" << result << "'";
   }
@@ -3297,6 +3299,43 @@
   }
 }
 
+void MethodVerifier::VerifyPrimitivePut(const RegType& target_type, const RegType& insn_type,
+                                        const uint32_t vregA) {
+  // Primitive assignability rules are weaker than regular assignability rules.
+  bool instruction_compatible;
+  bool value_compatible;
+  const RegType& value_type = work_line_->GetRegisterType(vregA);
+  if (target_type.IsIntegralTypes()) {
+    instruction_compatible = target_type.Equals(insn_type);
+    value_compatible = value_type.IsIntegralTypes();
+  } else if (target_type.IsFloat()) {
+    instruction_compatible = insn_type.IsInteger();  // no put-float, so expect put-int
+    value_compatible = value_type.IsFloatTypes();
+  } else if (target_type.IsLong()) {
+    instruction_compatible = insn_type.IsLong();
+    value_compatible = value_type.IsLongTypes();
+  } else if (target_type.IsDouble()) {
+    instruction_compatible = insn_type.IsLong();  // no put-double, so expect put-long
+    value_compatible = value_type.IsDoubleTypes();
+  } else {
+    instruction_compatible = false;  // reference with primitive store
+    value_compatible = false;  // unused
+  }
+  if (!instruction_compatible) {
+    // This is a global failure rather than a class change failure as the instructions and
+    // the descriptors for the type should have been consistent within the same file at
+    // compile time.
+    Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "put insn has type '" << insn_type
+        << "' but expected type '" << target_type << "'";
+    return;
+  }
+  if (!value_compatible) {
+    Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected value in v" << vregA
+        << " of type " << value_type << " but expected " << target_type << " for put";
+    return;
+  }
+}
+
 void MethodVerifier::VerifyAPut(const Instruction* inst,
                              const RegType& insn_type, bool is_primitive) {
   const RegType& index_type = work_line_->GetRegisterType(inst->VRegC_23x());
@@ -3310,25 +3349,20 @@
     } else if (!array_type.IsArrayTypes()) {
       Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "not array type " << array_type << " with aput";
     } else {
-      /* verify the class */
       const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_);
-      if (!component_type.IsReferenceTypes() && !is_primitive) {
-        Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "primitive array type " << array_type
-            << " source for aput-object";
-      } else if (component_type.IsNonZeroReferenceTypes() && is_primitive) {
-        Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "reference array type " << array_type
-            << " source for category 1 aput";
-      } else if (is_primitive && !insn_type.Equals(component_type) &&
-                 !((insn_type.IsInteger() && component_type.IsFloat()) ||
-                   (insn_type.IsLong() && component_type.IsDouble()))) {
-        Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array type " << array_type
-            << " incompatible with aput of type " << insn_type;
+      const uint32_t vregA = inst->VRegA_23x();
+      if (is_primitive) {
+        VerifyPrimitivePut(component_type, insn_type, vregA);
       } else {
-        // The instruction agrees with the type of array, confirm the value to be stored does too
-        // Note: we use the instruction type (rather than the component type) for aput-object as
-        // incompatible classes will be caught at runtime as an array store exception
-        work_line_->VerifyRegisterType(inst->VRegA_23x(),
-                                       is_primitive ? component_type : insn_type);
+        if (!component_type.IsReferenceTypes()) {
+          Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "primitive array type " << array_type
+              << " source for aput-object";
+        } else {
+          // The instruction agrees with the type of array, confirm the value to be stored does too
+          // Note: we use the instruction type (rather than the component type) for aput-object as
+          // incompatible classes will be caught at runtime as an array store exception
+          work_line_->VerifyRegisterType(vregA, insn_type);
+        }
       }
     }
   }
@@ -3458,8 +3492,8 @@
   const uint32_t vregA = (is_static) ? inst->VRegA_21c() : inst->VRegA_22c();
   if (is_primitive) {
     if (field_type.Equals(insn_type) ||
-        (field_type.IsFloat() && insn_type.IsIntegralTypes()) ||
-        (field_type.IsDouble() && insn_type.IsLongTypes())) {
+        (field_type.IsFloat() && insn_type.IsInteger()) ||
+        (field_type.IsDouble() && insn_type.IsLong())) {
       // expected that read is of the correct primitive type or that int reads are reading
       // floats or long reads are reading doubles
     } else {
@@ -3518,43 +3552,7 @@
   }
   const uint32_t vregA = (is_static) ? inst->VRegA_21c() : inst->VRegA_22c();
   if (is_primitive) {
-    // Primitive field assignability rules are weaker than regular assignability rules
-    bool instruction_compatible;
-    bool value_compatible;
-    const RegType& value_type = work_line_->GetRegisterType(vregA);
-    if (field_type.IsIntegralTypes()) {
-      instruction_compatible = insn_type.IsIntegralTypes();
-      value_compatible = value_type.IsIntegralTypes();
-    } else if (field_type.IsFloat()) {
-      instruction_compatible = insn_type.IsInteger();  // no [is]put-float, so expect [is]put-int
-      value_compatible = value_type.IsFloatTypes();
-    } else if (field_type.IsLong()) {
-      instruction_compatible = insn_type.IsLong();
-      value_compatible = value_type.IsLongTypes();
-    } else if (field_type.IsDouble()) {
-      instruction_compatible = insn_type.IsLong();  // no [is]put-double, so expect [is]put-long
-      value_compatible = value_type.IsDoubleTypes();
-    } else {
-      instruction_compatible = false;  // reference field with primitive store
-      value_compatible = false;  // unused
-    }
-    if (!instruction_compatible) {
-      // This is a global failure rather than a class change failure as the instructions and
-      // the descriptors for the type should have been consistent within the same file at
-      // compile time
-      Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected field " << PrettyField(field)
-                                        << " to be of type '" << insn_type
-                                        << "' but found type '" << field_type
-                                        << "' in put";
-      return;
-    }
-    if (!value_compatible) {
-      Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected value in v" << vregA
-          << " of type " << value_type
-          << " but expected " << field_type
-          << " for store to " << PrettyField(field) << " in put";
-      return;
-    }
+    VerifyPrimitivePut(field_type, insn_type, vregA);
   } else {
     if (!insn_type.IsAssignableFrom(field_type)) {
       Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "expected field " << PrettyField(field)
@@ -3756,6 +3754,10 @@
     if (!insn_flags_[next_insn].IsReturn()) {
       target_line->CopyFromLine(merge_line);
     } else {
+      // Verify that the monitor stack is empty on return.
+      if (!merge_line->VerifyMonitorStackEmpty()) {
+        return false;
+      }
       // For returns we only care about the operand to the return, all other registers are dead.
       // Initialize them as conflicts so they don't add to GC and deoptimization information.
       const Instruction* ret_inst = Instruction::At(code_item_->insns_ + next_insn);
@@ -4061,20 +4063,19 @@
 
 void  MethodVerifier::SetSafeCastMap(MethodReference ref, const MethodSafeCastSet* cast_set) {
   DCHECK(Runtime::Current()->IsCompiler());
-  MutexLock mu(Thread::Current(), *safecast_map_lock_);
+  WriterMutexLock mu(Thread::Current(), *safecast_map_lock_);
   SafeCastMap::iterator it = safecast_map_->find(ref);
   if (it != safecast_map_->end()) {
     delete it->second;
     safecast_map_->erase(it);
   }
-
   safecast_map_->Put(ref, cast_set);
   DCHECK(safecast_map_->find(ref) != safecast_map_->end());
 }
 
 bool MethodVerifier::IsSafeCast(MethodReference ref, uint32_t pc) {
   DCHECK(Runtime::Current()->IsCompiler());
-  MutexLock mu(Thread::Current(), *safecast_map_lock_);
+  ReaderMutexLock mu(Thread::Current(), *safecast_map_lock_);
   SafeCastMap::const_iterator it = safecast_map_->find(ref);
   if (it == safecast_map_->end()) {
     return false;
@@ -4186,7 +4187,7 @@
 ReaderWriterMutex* MethodVerifier::dex_gc_maps_lock_ = NULL;
 MethodVerifier::DexGcMapTable* MethodVerifier::dex_gc_maps_ = NULL;
 
-Mutex* MethodVerifier::safecast_map_lock_ = NULL;
+ReaderWriterMutex* MethodVerifier::safecast_map_lock_ = NULL;
 MethodVerifier::SafeCastMap* MethodVerifier::safecast_map_ = NULL;
 
 ReaderWriterMutex* MethodVerifier::devirt_maps_lock_ = NULL;
@@ -4204,9 +4205,9 @@
       dex_gc_maps_ = new MethodVerifier::DexGcMapTable;
     }
 
-    safecast_map_lock_ = new Mutex("verifier Cast Elision lock");
+    safecast_map_lock_ = new ReaderWriterMutex("verifier Cast Elision lock");
     {
-      MutexLock mu(self, *safecast_map_lock_);
+      WriterMutexLock mu(self, *safecast_map_lock_);
       safecast_map_ = new MethodVerifier::SafeCastMap();
     }
 
@@ -4239,7 +4240,7 @@
     dex_gc_maps_lock_ = NULL;
 
     {
-      MutexLock mu(self, *safecast_map_lock_);
+      WriterMutexLock mu(self, *safecast_map_lock_);
       STLDeleteValues(safecast_map_);
       delete safecast_map_;
       safecast_map_ = NULL;
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 3f98a00..e01f2c0 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -230,6 +230,10 @@
                  uint32_t access_flags, bool can_load_classes, bool allow_soft_failures)
           SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  ~MethodVerifier() {
+    STLDeleteElements(&failure_messages_);
+  }
+
   // Run verification on the method. Returns true if verification completes and false if the input
   // has an irrecoverable corruption.
   bool Verify() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -476,6 +480,10 @@
   void VerifyNewArray(const Instruction* inst, bool is_filled, bool is_range)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  // Helper to perform verification on puts of primitive type.
+  void VerifyPrimitivePut(const RegType& target_type, const RegType& insn_type,
+                          const uint32_t vregA) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   // Perform verification of an aget instruction. The destination register's type will be set to
   // be that of component type of the array unless the array type is unknown, in which case a
   // bottom type inferred from the type of instruction is used. is_primitive is false for an
@@ -640,7 +648,7 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   static void SetSafeCastMap(MethodReference ref, const MethodSafeCastSet* mscs);
       LOCKS_EXCLUDED(safecast_map_lock_);
-  static Mutex* safecast_map_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  static ReaderWriterMutex* safecast_map_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   static SafeCastMap* safecast_map_ GUARDED_BY(safecast_map_lock_);
 
   // Devirtualization map.
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 7965c06..24a626b 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -38,7 +38,7 @@
 bool RegisterLine::SetRegisterType(uint32_t vdst, const RegType& new_type) {
   DCHECK_LT(vdst, num_regs_);
   if (new_type.IsLowHalf() || new_type.IsHighHalf()) {
-    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "Expected category1 register type not '"
+    verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Expected category1 register type not '"
         << new_type << "'";
     return false;
   } else if (new_type.IsConflict()) {  // should only be set during a merge
@@ -448,7 +448,7 @@
   }
 }
 
-bool RegisterLine::VerifyMonitorStackEmpty() {
+bool RegisterLine::VerifyMonitorStackEmpty() const {
   if (MonitorStackDepth() != 0) {
     verifier_->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected empty monitor stack";
     return false;
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index f380877..f19dcca 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -268,7 +268,7 @@
 
   // We expect no monitors to be held at certain points, such a method returns. Verify the stack
   // is empty, failing and returning false if not.
-  bool VerifyMonitorStackEmpty();
+  bool VerifyMonitorStackEmpty() const;
 
   bool MergeRegisters(const RegisterLine* incoming_line)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);