Change 64 bit ArtMethod fields to be pointer sized

Changed the 64 bit entrypoint and gc map fields in ArtMethod to be
pointer sized. This saves a large amount of memory on 32 bit systems.
Reduces ArtMethod size by 16 bytes on 32 bit.

Total number of ArtMethod on low memory mako: 169957
Image size: 49203 methods -> 787248 image size reduction.
Zygote space size: 1070 methods -> 17120 size reduction.
App methods: ~120k -> 2 MB savings.

Savings per app on low memory mako: 125K+ per app
(less active apps -> more image methods per app).

Savings depend on how often the shared methods are on dirty pages vs
shared.

TODO in another CL, delete gc map field from ArtMethod since we
should be able to get it from the Oat method header.

Bug: 17643507

Change-Id: Ie9508f05907a9f693882d4d32a564460bf273ee8
diff --git a/compiler/compilers.cc b/compiler/compilers.cc
index 250924a..2481128 100644
--- a/compiler/compilers.cc
+++ b/compiler/compilers.cc
@@ -84,7 +84,9 @@
 }
 
 uintptr_t QuickCompiler::GetEntryPointOf(mirror::ArtMethod* method) const {
-  return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode());
+  size_t pointer_size = InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet());
+  return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
+      pointer_size));
 }
 
 bool QuickCompiler::WriteElf(art::File* file,
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 7958886..7d89e19 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -477,9 +477,10 @@
                                                        const RegStorage* alt_from,
                                                        const CompilationUnit* cu, Mir2Lir* cg) {
   if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
+    int32_t offset = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+        InstructionSetPointerSize(cu->instruction_set)).Int32Value();
     // Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt]
-    cg->LoadWordDisp(alt_from == nullptr ? cg->TargetReg(kArg0, kRef) : *alt_from,
-                     mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(),
+    cg->LoadWordDisp(alt_from == nullptr ? cg->TargetReg(kArg0, kRef) : *alt_from, offset,
                      cg->TargetPtrReg(kInvokeTgt));
     return true;
   }
@@ -1802,8 +1803,9 @@
         call_inst =
           reinterpret_cast<X86Mir2Lir*>(this)->CallWithLinkerFixup(target_method, info->type);
       } else {
-        call_inst = OpMem(kOpBlx, TargetReg(kArg0, kRef),
-                          mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
+        int32_t offset = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+            InstructionSetPointerSize(cu_->instruction_set)).Int32Value();
+        call_inst = OpMem(kOpBlx, TargetReg(kArg0, kRef), offset);
       }
     } else {
       call_inst = GenInvokeNoInlineCall(this, info->type);
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 871889f..1b66adb 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -81,6 +81,7 @@
 
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
 
+  target_ptr_size_ = InstructionSetPointerSize(compiler_driver_.GetInstructionSet());
   std::unique_ptr<File> oat_file(OS::OpenFileReadWrite(oat_filename.c_str()));
   if (oat_file.get() == NULL) {
     LOG(ERROR) << "Failed to open oat file " << oat_filename << " for " << oat_location;
@@ -212,7 +213,14 @@
 void ImageWriter::AssignImageOffset(mirror::Object* object) {
   DCHECK(object != nullptr);
   SetImageOffset(object, image_end_);
-  image_end_ += RoundUp(object->SizeOf(), 8);  // 64-bit alignment
+  size_t object_size;
+  if (object->IsArtMethod()) {
+    // Methods are sized based on the target pointer size.
+    object_size = mirror::ArtMethod::InstanceSize(target_ptr_size_);
+  } else {
+    object_size = object->SizeOf();
+  }
+  image_end_ += RoundUp(object_size, 8);  // 64-bit alignment
   DCHECK_LT(image_end_, image_->Size());
 }
 
@@ -609,7 +617,14 @@
   size_t offset = image_writer->GetImageOffset(obj);
   byte* dst = image_writer->image_->Begin() + offset;
   const byte* src = reinterpret_cast<const byte*>(obj);
-  size_t n = obj->SizeOf();
+  size_t n;
+  if (obj->IsArtMethod()) {
+    // Size without pointer fields since we don't want to overrun the buffer if target art method
+    // is 32 bits but source is 64 bits.
+    n = mirror::ArtMethod::SizeWithoutPointerFields();
+  } else {
+    n = obj->SizeOf();
+  }
   DCHECK_LT(offset + n, image_writer->image_->Size());
   memcpy(dst, src, n);
   Object* copy = reinterpret_cast<Object*>(dst);
@@ -688,6 +703,10 @@
   }
   if (orig->IsArtMethod<kVerifyNone>()) {
     FixupMethod(orig->AsArtMethod<kVerifyNone>(), down_cast<ArtMethod*>(copy));
+  } else if (orig->IsClass() && orig->AsClass()->IsArtMethodClass()) {
+    // Set the right size for the target.
+    size_t size = mirror::ArtMethod::InstanceSize(target_ptr_size_);
+    down_cast<mirror::Class*>(copy)->SetObjectSizeWithoutChecks(size);
   }
 }
 
@@ -746,42 +765,63 @@
 void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) {
   // OatWriter replaces the code_ with an offset value. Here we re-adjust to a pointer relative to
   // oat_begin_
+  // For 64 bit targets we need to repack the current runtime pointer sized fields to the right
+  // locations.
+  // Copy all of the fields from the runtime methods to the target methods first since we did a
+  // bytewise copy earlier.
+#if defined(ART_USE_PORTABLE_COMPILER)
+  copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
+      orig->GetEntryPointFromPortableCompiledCode(), target_ptr_size_);
+#endif
+  copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>(orig->GetEntryPointFromInterpreter(),
+                                                         target_ptr_size_);
+  copy->SetEntryPointFromJniPtrSize<kVerifyNone>(orig->GetEntryPointFromJni(), target_ptr_size_);
+  copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
+      orig->GetEntryPointFromQuickCompiledCode(), target_ptr_size_);
+  copy->SetNativeGcMapPtrSize<kVerifyNone>(orig->GetNativeGcMap(), target_ptr_size_);
 
   // The resolution method has a special trampoline to call.
   Runtime* runtime = Runtime::Current();
   if (UNLIKELY(orig == runtime->GetResolutionMethod())) {
 #if defined(ART_USE_PORTABLE_COMPILER)
-    copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_resolution_trampoline_offset_));
+    copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
+        GetOatAddress(portable_resolution_trampoline_offset_), target_ptr_size_);
 #endif
-    copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_resolution_trampoline_offset_));
+    copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
+        GetOatAddress(quick_resolution_trampoline_offset_), target_ptr_size_);
   } else if (UNLIKELY(orig == runtime->GetImtConflictMethod() ||
                       orig == runtime->GetImtUnimplementedMethod())) {
 #if defined(ART_USE_PORTABLE_COMPILER)
-    copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_imt_conflict_trampoline_offset_));
+    copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(
+        GetOatAddress(portable_imt_conflict_trampoline_offset_), target_ptr_size_);
 #endif
-    copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_imt_conflict_trampoline_offset_));
+    copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
+        GetOatAddress(quick_imt_conflict_trampoline_offset_), target_ptr_size_);
   } else {
     // We assume all methods have code. If they don't currently then we set them to the use the
     // resolution trampoline. Abstract methods never have code and so we need to make sure their
     // use results in an AbstractMethodError. We use the interpreter to achieve this.
     if (UNLIKELY(orig->IsAbstract())) {
 #if defined(ART_USE_PORTABLE_COMPILER)
-      copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(GetOatAddress(portable_to_interpreter_bridge_offset_));
+      copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(
+          GetOatAddress(portable_to_interpreter_bridge_offset_), target_ptr_size_);
 #endif
-      copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(GetOatAddress(quick_to_interpreter_bridge_offset_));
-      copy->SetEntryPointFromInterpreter<kVerifyNone>(reinterpret_cast<EntryPointFromInterpreter*>
-          (const_cast<byte*>(GetOatAddress(interpreter_to_interpreter_bridge_offset_))));
+      copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(
+          GetOatAddress(quick_to_interpreter_bridge_offset_), target_ptr_size_);
+      copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>(
+          reinterpret_cast<EntryPointFromInterpreter*>(const_cast<byte*>(
+              GetOatAddress(interpreter_to_interpreter_bridge_offset_))), target_ptr_size_);
     } else {
       bool quick_is_interpreted;
       const byte* quick_code = GetQuickCode(orig, &quick_is_interpreted);
-      copy->SetEntryPointFromQuickCompiledCode<kVerifyNone>(quick_code);
+      copy->SetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(quick_code, target_ptr_size_);
 
       // Portable entrypoint:
       bool portable_is_interpreted = false;
 #if defined(ART_USE_PORTABLE_COMPILER)
       const byte* portable_code = GetOatAddress(orig->GetPortableOatCodeOffset());
-      if (portable_code != nullptr &&
-          (!orig->IsStatic() || orig->IsConstructor() || orig->GetDeclaringClass()->IsInitialized())) {
+      if (portable_code != nullptr && (!orig->IsStatic() || orig->IsConstructor() ||
+          orig->GetDeclaringClass()->IsInitialized())) {
         // We have code for a non-static or initialized method, just use the code.
       } else if (portable_code == nullptr && orig->IsNative() &&
           (!orig->IsStatic() || orig->GetDeclaringClass()->IsInitialized())) {
@@ -798,18 +838,20 @@
         // initialization.
         portable_code = GetOatAddress(portable_resolution_trampoline_offset_);
       }
-      copy->SetEntryPointFromPortableCompiledCode<kVerifyNone>(portable_code);
+      copy->SetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(
+          portable_code, target_ptr_size_);
 #endif
       // JNI entrypoint:
       if (orig->IsNative()) {
         // The native method's pointer is set to a stub to lookup via dlsym.
         // Note this is not the code_ pointer, that is handled above.
-        copy->SetNativeMethod<kVerifyNone>(GetOatAddress(jni_dlsym_lookup_offset_));
+        copy->SetEntryPointFromJniPtrSize<kVerifyNone>(GetOatAddress(jni_dlsym_lookup_offset_),
+                                                       target_ptr_size_);
       } else {
         // Normal (non-abstract non-native) methods have various tables to relocate.
         uint32_t native_gc_map_offset = orig->GetOatNativeGcMapOffset();
-        const byte* native_gc_map = GetOatAddress(native_gc_map_offset);
-        copy->SetNativeGcMap<kVerifyNone>(reinterpret_cast<const uint8_t*>(native_gc_map));
+        const uint8_t* native_gc_map = GetOatAddress(native_gc_map_offset);
+        copy->SetNativeGcMapPtrSize<kVerifyNone>(native_gc_map, target_ptr_size_);
       }
 
       // Interpreter entrypoint:
@@ -817,9 +859,11 @@
       uint32_t interpreter_code = (quick_is_interpreted && portable_is_interpreted)
           ? interpreter_to_interpreter_bridge_offset_
           : interpreter_to_compiled_code_bridge_offset_;
-      copy->SetEntryPointFromInterpreter<kVerifyNone>(
+      EntryPointFromInterpreter* interpreter_entrypoint =
           reinterpret_cast<EntryPointFromInterpreter*>(
-              const_cast<byte*>(GetOatAddress(interpreter_code))));
+              const_cast<byte*>(GetOatAddress(interpreter_code)));
+      copy->SetEntryPointFromInterpreterPtrSize<kVerifyNone>(
+          interpreter_entrypoint, target_ptr_size_);
     }
   }
 }
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 61365fe..6a9df10 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -204,6 +204,9 @@
   uint32_t quick_to_interpreter_bridge_offset_;
   bool compile_pic_;
 
+  // Size of pointers on the target architecture.
+  size_t target_ptr_size_;
+
   friend class FixupVisitor;
   friend class FixupClassVisitor;
   DISALLOW_COPY_AND_ASSIGN(ImageWriter);
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index c38cfaf..35b7294 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -308,7 +308,9 @@
   }
 
   // 9. Plant call to native code associated with method.
-  __ Call(main_jni_conv->MethodStackOffset(), mirror::ArtMethod::NativeMethodOffset(),
+  MemberOffset jni_entrypoint_offset = mirror::ArtMethod::EntryPointFromJniOffset(
+      InstructionSetPointerSize(instruction_set));
+  __ Call(main_jni_conv->MethodStackOffset(), jni_entrypoint_offset,
           mr_conv->InterproceduralScratchRegister());
 
   // 10. Fix differences in result widths.
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 2c954a0..7822ee5 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -854,8 +854,8 @@
   // temp = temp[index_in_cache]
   __ ldr(temp, Address(temp, index_in_cache));
   // LR = temp[offset_of_quick_compiled_code]
-  __ ldr(LR, Address(temp,
-                     mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
+  __ ldr(LR, Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+      kArmPointerSize).Int32Value()));
   // LR()
   __ blx(LR);
 
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index f544d47..1b6fb6b 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -796,7 +796,8 @@
   // temp = temp[index_in_cache]
   __ movl(temp, Address(temp, index_in_cache));
   // (temp + offset_of_quick_compiled_code)()
-  __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
+  __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+                  kX86PointerSize).Int32Value()));
 
   DCHECK(!codegen_->IsLeafMethod());
   codegen_->RecordPcInfo(invoke->GetDexPc());
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index e1807dc..1ee8271 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -738,7 +738,8 @@
   // temp = temp[index_in_cache]
   __ movl(temp, Address(temp, index_in_cache));
   // (temp + offset_of_quick_compiled_code)()
-  __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue()));
+  __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
+                  kX86_64PointerSize).SizeValue()));
 
   DCHECK(!codegen_->IsLeafMethod());
   codegen_->RecordPcInfo(invoke->GetDexPc());
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index b9637d0..9f55c94 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -176,7 +176,7 @@
   }
   gc::space::ImageSpace* ispc = Runtime::Current()->GetHeap()->GetImageSpace();
 
-  PatchOat p(image.release(), ispc->GetLiveBitmap(), ispc->GetMemMap(),
+  PatchOat p(isa, image.release(), ispc->GetLiveBitmap(), ispc->GetMemMap(),
              delta, timings);
   t.NewTiming("Patching files");
   if (!p.PatchImage()) {
@@ -298,7 +298,7 @@
     CHECK(is_oat_pic == NOT_PIC);
   }
 
-  PatchOat p(elf.release(), image.release(), ispc->GetLiveBitmap(), ispc->GetMemMap(),
+  PatchOat p(isa, elf.release(), image.release(), ispc->GetLiveBitmap(), ispc->GetMemMap(),
              delta, timings);
   t.NewTiming("Patching files");
   if (!skip_patching_oat && !p.PatchElf()) {
@@ -523,41 +523,46 @@
   PatchOat::PatchVisitor visitor(this, copy);
   object->VisitReferences<true, kVerifyNone>(visitor, visitor);
   if (object->IsArtMethod<kVerifyNone>()) {
-    FixupMethod(static_cast<mirror::ArtMethod*>(object),
-                static_cast<mirror::ArtMethod*>(copy));
+    FixupMethod(down_cast<mirror::ArtMethod*>(object), down_cast<mirror::ArtMethod*>(copy));
   }
 }
 
 void PatchOat::FixupMethod(mirror::ArtMethod* object, mirror::ArtMethod* copy) {
+  const size_t pointer_size = InstructionSetPointerSize(isa_);
   // Just update the entry points if it looks like we should.
   // TODO: sanity check all the pointers' values
 #if defined(ART_USE_PORTABLE_COMPILER)
   uintptr_t portable = reinterpret_cast<uintptr_t>(
-      object->GetEntryPointFromPortableCompiledCode<kVerifyNone>());
+      object->GetEntryPointFromPortableCompiledCodePtrSize<kVerifyNone>(pointer_size));
   if (portable != 0) {
-    copy->SetEntryPointFromPortableCompiledCode(reinterpret_cast<void*>(portable + delta_));
+    copy->SetEntryPointFromPortableCompiledCodePtrSize(reinterpret_cast<void*>(portable + delta_),
+                                                       pointer_size);
   }
 #endif
   uintptr_t quick= reinterpret_cast<uintptr_t>(
-      object->GetEntryPointFromQuickCompiledCode<kVerifyNone>());
+      object->GetEntryPointFromQuickCompiledCodePtrSize<kVerifyNone>(pointer_size));
   if (quick != 0) {
-    copy->SetEntryPointFromQuickCompiledCode(reinterpret_cast<void*>(quick + delta_));
+    copy->SetEntryPointFromQuickCompiledCodePtrSize(reinterpret_cast<void*>(quick + delta_),
+                                                    pointer_size);
   }
   uintptr_t interpreter = reinterpret_cast<uintptr_t>(
-      object->GetEntryPointFromInterpreter<kVerifyNone>());
+      object->GetEntryPointFromInterpreterPtrSize<kVerifyNone>(pointer_size));
   if (interpreter != 0) {
-    copy->SetEntryPointFromInterpreter(
-        reinterpret_cast<mirror::EntryPointFromInterpreter*>(interpreter + delta_));
+    copy->SetEntryPointFromInterpreterPtrSize(
+        reinterpret_cast<mirror::EntryPointFromInterpreter*>(interpreter + delta_), pointer_size);
   }
 
-  uintptr_t native_method = reinterpret_cast<uintptr_t>(object->GetNativeMethod());
+  uintptr_t native_method = reinterpret_cast<uintptr_t>(
+      object->GetEntryPointFromJniPtrSize(pointer_size));
   if (native_method != 0) {
-    copy->SetNativeMethod(reinterpret_cast<void*>(native_method + delta_));
+    copy->SetEntryPointFromJniPtrSize(reinterpret_cast<void*>(native_method + delta_),
+                                      pointer_size);
   }
 
-  uintptr_t native_gc_map = reinterpret_cast<uintptr_t>(object->GetNativeGcMap());
+  uintptr_t native_gc_map = reinterpret_cast<uintptr_t>(
+      object->GetNativeGcMapPtrSize(pointer_size));
   if (native_gc_map != 0) {
-    copy->SetNativeGcMap(reinterpret_cast<uint8_t*>(native_gc_map + delta_));
+    copy->SetNativeGcMapPtrSize(reinterpret_cast<uint8_t*>(native_gc_map + delta_), pointer_size);
   }
 }
 
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index 21041fb..03d915a 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -60,15 +60,16 @@
  private:
   // Takes ownership only of the ElfFile. All other pointers are only borrowed.
   PatchOat(ElfFile* oat_file, off_t delta, TimingLogger* timings)
-      : oat_file_(oat_file), delta_(delta), timings_(timings) {}
-  PatchOat(MemMap* image, gc::accounting::ContinuousSpaceBitmap* bitmap,
+      : oat_file_(oat_file), delta_(delta), isa_(kNone), timings_(timings) {}
+  PatchOat(InstructionSet isa, MemMap* image, gc::accounting::ContinuousSpaceBitmap* bitmap,
            MemMap* heap, off_t delta, TimingLogger* timings)
       : image_(image), bitmap_(bitmap), heap_(heap),
-        delta_(delta), timings_(timings) {}
-  PatchOat(ElfFile* oat_file, MemMap* image, gc::accounting::ContinuousSpaceBitmap* bitmap,
-           MemMap* heap, off_t delta, TimingLogger* timings)
+        delta_(delta), isa_(isa), timings_(timings) {}
+  PatchOat(InstructionSet isa, ElfFile* oat_file, MemMap* image,
+           gc::accounting::ContinuousSpaceBitmap* bitmap, MemMap* heap, off_t delta,
+           TimingLogger* timings)
       : oat_file_(oat_file), image_(image), bitmap_(bitmap), heap_(heap),
-        delta_(delta), timings_(timings) {}
+        delta_(delta), isa_(isa), timings_(timings) {}
   ~PatchOat() {}
 
   // Was the .art image at image_path made with --compile-pic ?
@@ -147,6 +148,9 @@
   const MemMap* heap_;
   // The amount we are changing the offset by.
   off_t delta_;
+  // Active instruction set, used to know the entrypoint size.
+  const InstructionSet isa_;
+
   TimingLogger* timings_;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(PatchOat);
diff --git a/runtime/arch/arm/portable_entrypoints_arm.S b/runtime/arch/arm/portable_entrypoints_arm.S
index 3491c18..a714bca 100644
--- a/runtime/arch/arm/portable_entrypoints_arm.S
+++ b/runtime/arch/arm/portable_entrypoints_arm.S
@@ -53,7 +53,7 @@
     mov    ip, #0                          @ set ip to 0
     str    ip, [sp]                        @ store NULL for method* at bottom of frame
     add    sp, #16                         @ first 4 args are not passed on stack for portable
-    ldr    ip, [r0, #METHOD_PORTABLE_CODE_OFFSET]  @ get pointer to the code
+    ldr    ip, [r0, #METHOD_PORTABLE_CODE_OFFSET_32]  @ get pointer to the code
     blx    ip                              @ call the method
     mov    sp, r11                         @ restore the stack pointer
     ldr    ip, [sp, #24]                   @ load the result pointer
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 1b30c9c..26e6937 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -322,7 +322,7 @@
     ldr    r3, [sp, #12]                   @ copy arg value for r3
     mov    ip, #0                          @ set ip to 0
     str    ip, [sp]                        @ store NULL for method* at bottom of frame
-    ldr    ip, [r0, #METHOD_QUICK_CODE_OFFSET]  @ get pointer to the code
+    ldr    ip, [r0, #METHOD_QUICK_CODE_OFFSET_32]  @ get pointer to the code
     blx    ip                              @ call the method
     mov    sp, r11                         @ restore the stack pointer
     ldr    ip, [sp, #24]                   @ load the result pointer
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 2a19e27..3c5db50 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -551,7 +551,7 @@
 .macro INVOKE_STUB_CALL_AND_RETURN
 
     // load method-> METHOD_QUICK_CODE_OFFSET
-    ldr x9, [x0 , #METHOD_QUICK_CODE_OFFSET]
+    ldr x9, [x0 , #METHOD_QUICK_CODE_OFFSET_64]
     // Branch to method.
     blr x9
 
diff --git a/runtime/arch/mips/portable_entrypoints_mips.S b/runtime/arch/mips/portable_entrypoints_mips.S
index 7545ce0..1e9fe33 100644
--- a/runtime/arch/mips/portable_entrypoints_mips.S
+++ b/runtime/arch/mips/portable_entrypoints_mips.S
@@ -100,7 +100,7 @@
     lw    $a1, 4($sp)           # copy arg value for a1
     lw    $a2, 8($sp)           # copy arg value for a2
     lw    $a3, 12($sp)          # copy arg value for a3
-    lw    $t9, METHOD_PORTABLE_CODE_OFFSET($a0)  # get pointer to the code
+    lw    $t9, METHOD_PORTABLE_CODE_OFFSET_32($a0)  # get pointer to the code
     jalr  $t9                   # call the method
     sw    $zero, 0($sp)         # store NULL for method* at bottom of frame
     move  $sp, $fp              # restore the stack
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 8786222..08b74c6 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -507,7 +507,7 @@
     lw    $a1, 4($sp)           # copy arg value for a1
     lw    $a2, 8($sp)           # copy arg value for a2
     lw    $a3, 12($sp)          # copy arg value for a3
-    lw    $t9, METHOD_QUICK_CODE_OFFSET($a0)  # get pointer to the code
+    lw    $t9, METHOD_QUICK_CODE_OFFSET_32($a0)  # get pointer to the code
     jalr  $t9                   # call the method
     sw    $zero, 0($sp)         # store NULL for method* at bottom of frame
     move  $sp, $fp              # restore the stack
diff --git a/runtime/arch/x86/portable_entrypoints_x86.S b/runtime/arch/x86/portable_entrypoints_x86.S
index 9365795..f8e05dd 100644
--- a/runtime/arch/x86/portable_entrypoints_x86.S
+++ b/runtime/arch/x86/portable_entrypoints_x86.S
@@ -46,7 +46,7 @@
     addl LITERAL(12), %esp        // pop arguments to memcpy
     mov 12(%ebp), %eax            // move method pointer into eax
     mov %eax, (%esp)              // push method pointer onto stack
-    call *METHOD_PORTABLE_CODE_OFFSET(%eax) // call the method
+    call *METHOD_PORTABLE_CODE_OFFSET_32(%eax) // call the method
     mov %ebp, %esp                // restore stack pointer
     POP ebx                       // pop ebx
     POP ebp                       // pop ebp
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 75c8646..6a10755 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -292,7 +292,7 @@
     mov 4(%esp), %ecx             // copy arg1 into ecx
     mov 8(%esp), %edx             // copy arg2 into edx
     mov 12(%esp), %ebx            // copy arg3 into ebx
-    call *METHOD_QUICK_CODE_OFFSET(%eax) // call the method
+    call *METHOD_QUICK_CODE_OFFSET_32(%eax) // call the method
     mov %ebp, %esp                // restore stack pointer
     CFI_DEF_CFA_REGISTER(esp)
     POP ebx                       // pop ebx
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 5798092..0de8dfd 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -481,7 +481,7 @@
     LOOP_OVER_SHORTY_LOADING_GPRS r8, r8d, .Lgpr_setup_finished
     LOOP_OVER_SHORTY_LOADING_GPRS r9, r9d, .Lgpr_setup_finished
 .Lgpr_setup_finished:
-    call *METHOD_QUICK_CODE_OFFSET(%rdi) // Call the method.
+    call *METHOD_QUICK_CODE_OFFSET_64(%rdi) // Call the method.
     movq %rbp, %rsp               // Restore stack pointer.
     CFI_DEF_CFA_REGISTER(rsp)
     POP r9                        // Pop r9 - shorty*.
@@ -564,7 +564,7 @@
     LOOP_OVER_SHORTY_LOADING_GPRS r8, r8d, .Lgpr_setup_finished2
     LOOP_OVER_SHORTY_LOADING_GPRS r9, r9d, .Lgpr_setup_finished2
 .Lgpr_setup_finished2:
-    call *METHOD_QUICK_CODE_OFFSET(%rdi) // Call the method.
+    call *METHOD_QUICK_CODE_OFFSET_64(%rdi) // Call the method.
     movq %rbp, %rsp               // Restore stack pointer.
     CFI_DEF_CFA_REGISTER(rsp)
     POP r9                        // Pop r9 - shorty*.
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index b36b1b7..701f44f 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -44,13 +44,12 @@
 
 // Offsets within java.lang.Method.
 #define METHOD_DEX_CACHE_METHODS_OFFSET 12
-#if defined(ART_USE_PORTABLE_COMPILER)
-#define METHOD_PORTABLE_CODE_OFFSET 40
-#define METHOD_QUICK_CODE_OFFSET 48
-#else
-#define METHOD_PORTABLE_CODE_OFFSET 40
-#define METHOD_QUICK_CODE_OFFSET 40
-#endif  // ART_USE_PORTABLE_COMPILER
+
+// Verified by object_test.
+#define METHOD_QUICK_CODE_OFFSET_32 48
+#define METHOD_QUICK_CODE_OFFSET_64 56
+#define METHOD_PORTABLE_CODE_OFFSET_32 56
+#define METHOD_PORTABLE_CODE_OFFSET_64 72
 
 #else
 
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index b80aa5a..1541093 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -183,7 +183,8 @@
       portable_imt_conflict_trampoline_(nullptr),
       quick_imt_conflict_trampoline_(nullptr),
       quick_generic_jni_trampoline_(nullptr),
-      quick_to_interpreter_bridge_trampoline_(nullptr) {
+      quick_to_interpreter_bridge_trampoline_(nullptr),
+      image_pointer_size_(sizeof(void*)) {
   CHECK_EQ(arraysize(class_roots_descriptors_), size_t(kClassRootsMax));
   memset(find_array_class_cache_, 0, kFindArrayCacheSize * sizeof(mirror::Class*));
 }
@@ -318,10 +319,9 @@
   Handle<mirror::Class> java_lang_reflect_ArtMethod(hs.NewHandle(
     AllocClass(self, java_lang_Class.Get(), mirror::ArtMethod::ClassSize())));
   CHECK(java_lang_reflect_ArtMethod.Get() != nullptr);
-  java_lang_reflect_ArtMethod->SetObjectSize(mirror::ArtMethod::InstanceSize());
+  java_lang_reflect_ArtMethod->SetObjectSize(mirror::ArtMethod::InstanceSize(sizeof(void*)));
   SetClassRoot(kJavaLangReflectArtMethod, java_lang_reflect_ArtMethod.Get());
   java_lang_reflect_ArtMethod->SetStatus(mirror::Class::kStatusResolved, self);
-
   mirror::ArtMethod::SetClass(java_lang_reflect_ArtMethod.Get());
 
   // Set up array classes for string, field, method
@@ -347,8 +347,7 @@
   // DexCache instances. Needs to be after String, Field, Method arrays since AllocDexCache uses
   // these roots.
   CHECK_NE(0U, boot_class_path.size());
-  for (size_t i = 0; i != boot_class_path.size(); ++i) {
-    const DexFile* dex_file = boot_class_path[i];
+  for (const DexFile* dex_file : boot_class_path) {
     CHECK(dex_file != nullptr);
     AppendToBootClassPath(*dex_file);
   }
@@ -1631,6 +1630,20 @@
   // Set classes on AbstractMethod early so that IsMethod tests can be performed during the live
   // bitmap walk.
   mirror::ArtMethod::SetClass(GetClassRoot(kJavaLangReflectArtMethod));
+  size_t art_method_object_size = mirror::ArtMethod::GetJavaLangReflectArtMethod()->GetObjectSize();
+  if (!Runtime::Current()->IsCompiler()) {
+    // Compiler supports having an image with a different pointer size than the runtime. This
+    // happens on the host for compile 32 bit tests since we use a 64 bit libart compiler. We may
+    // also use 32 bit dex2oat on a system with 64 bit apps.
+    CHECK_EQ(art_method_object_size, mirror::ArtMethod::InstanceSize(sizeof(void*)))
+        << sizeof(void*);
+  }
+  if (art_method_object_size == mirror::ArtMethod::InstanceSize(4)) {
+    image_pointer_size_ = 4;
+  } else {
+    CHECK_EQ(art_method_object_size, mirror::ArtMethod::InstanceSize(8));
+    image_pointer_size_ = 8;
+  }
 
   // Set entry point to interpreter if in InterpretOnly mode.
   if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) {
@@ -1644,7 +1657,7 @@
 
   // reinit array_iftable_ from any array class instance, they should be ==
   array_iftable_ = GcRoot<mirror::IfTable>(GetClassRoot(kObjectArrayClass)->GetIfTable());
-  DCHECK(array_iftable_.Read() == GetClassRoot(kBooleanArrayClass)->GetIfTable());
+  DCHECK_EQ(array_iftable_.Read(), GetClassRoot(kBooleanArrayClass)->GetIfTable());
   // String class root was set above
   mirror::Reference::SetClass(GetClassRoot(kJavaLangRefReference));
   mirror::ArtField::SetClass(GetClassRoot(kJavaLangReflectArtField));
@@ -5266,14 +5279,18 @@
   } else {
     klass->SetNumReferenceInstanceFields(num_reference_fields);
     if (!klass->IsVariableSize()) {
-      std::string temp;
-      DCHECK_GE(size, sizeof(mirror::Object)) << klass->GetDescriptor(&temp);
-      size_t previous_size = klass->GetObjectSize();
-      if (previous_size != 0) {
-        // Make sure that we didn't originally have an incorrect size.
-        CHECK_EQ(previous_size, size) << klass->GetDescriptor(&temp);
+      if (klass->DescriptorEquals("Ljava/lang/reflect/ArtMethod;")) {
+        klass->SetObjectSize(mirror::ArtMethod::InstanceSize(sizeof(void*)));
+      } else {
+        std::string temp;
+        DCHECK_GE(size, sizeof(mirror::Object)) << klass->GetDescriptor(&temp);
+        size_t previous_size = klass->GetObjectSize();
+        if (previous_size != 0) {
+          // Make sure that we didn't originally have an incorrect size.
+          CHECK_EQ(previous_size, size) << klass->GetDescriptor(&temp);
+        }
+        klass->SetObjectSize(size);
       }
-      klass->SetObjectSize(size);
     }
   }
 
@@ -5324,7 +5341,6 @@
     }
     CHECK_EQ(current_ref_offset.Uint32Value(), end_ref_offset.Uint32Value());
   }
-
   return true;
 }
 
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 7fc394a..a5c1c24 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -813,6 +813,9 @@
   const void* quick_generic_jni_trampoline_;
   const void* quick_to_interpreter_bridge_trampoline_;
 
+  // Image pointer size.
+  size_t image_pointer_size_;
+
   friend class ImageWriter;  // for GetClassRoots
   friend class ImageDumper;  // for FindOpenedOatFileFromOatLocation
   friend class ElfPatcher;  // for FindOpenedOatFileForDexFile & FindOpenedOatFileFromOatLocation
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index c170b8d..384a2bf 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -492,13 +492,6 @@
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_strings_),                    "dexCacheStrings"));
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_code_item_offset_),           "dexCodeItemOffset"));
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_method_index_),               "dexMethodIndex"));
-    offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, entry_point_from_interpreter_),            "entryPointFromInterpreter"));
-    offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, entry_point_from_jni_),                    "entryPointFromJni"));
-#if defined(ART_USE_PORTABLE_COMPILER)
-    offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, entry_point_from_portable_compiled_code_), "entryPointFromPortableCompiledCode"));
-#endif
-    offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, entry_point_from_quick_compiled_code_),    "entryPointFromQuickCompiledCode"));
-    offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, gc_map_),                                  "gcMap"));
     offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, method_index_),                   "methodIndex"));
   };
 };
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index dfd2e11..d4f9b49 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -1637,7 +1637,7 @@
   *(sp32 - 1) = cookie;
 
   // Retrieve the stored native code.
-  const void* nativeCode = called->GetNativeMethod();
+  void* nativeCode = called->GetEntryPointFromJni();
 
   // There are two cases for the content of nativeCode:
   // 1) Pointer to the native function.
diff --git a/runtime/image.cc b/runtime/image.cc
index 81044df..b4496e3 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,7 +24,7 @@
 namespace art {
 
 const byte ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const byte ImageHeader::kImageVersion[] = { '0', '1', '1', '\0' };
+const byte ImageHeader::kImageVersion[] = { '0', '1', '2', '\0' };
 
 ImageHeader::ImageHeader(uint32_t image_begin,
                          uint32_t image_size,
diff --git a/runtime/instruction_set.h b/runtime/instruction_set.h
index ae8eeac..da7d153 100644
--- a/runtime/instruction_set.h
+++ b/runtime/instruction_set.h
@@ -125,6 +125,10 @@
   }
 }
 
+static inline size_t InstructionSetPointerSize(InstructionSet isa) {
+  return Is64BitInstructionSet(isa) ? 8U : 4U;
+}
+
 static inline size_t GetBytesPerGprSpillLocation(InstructionSet isa) {
   switch (isa) {
     case kArm:
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 47a7f0d..26c7099 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -138,7 +138,7 @@
   if (method->IsStatic()) {
     if (shorty == "L") {
       typedef jobject (fntype)(JNIEnv*, jclass);
-      fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+      fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       jobject jresult;
@@ -149,35 +149,35 @@
       result->SetL(soa.Decode<Object*>(jresult));
     } else if (shorty == "V") {
       typedef void (fntype)(JNIEnv*, jclass);
-      fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+      fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedThreadStateChange tsc(self, kNative);
       fn(soa.Env(), klass.get());
     } else if (shorty == "Z") {
       typedef jboolean (fntype)(JNIEnv*, jclass);
-      fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+      fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedThreadStateChange tsc(self, kNative);
       result->SetZ(fn(soa.Env(), klass.get()));
     } else if (shorty == "BI") {
       typedef jbyte (fntype)(JNIEnv*, jclass, jint);
-      fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+      fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedThreadStateChange tsc(self, kNative);
       result->SetB(fn(soa.Env(), klass.get(), args[0]));
     } else if (shorty == "II") {
       typedef jint (fntype)(JNIEnv*, jclass, jint);
-      fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+      fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedThreadStateChange tsc(self, kNative);
       result->SetI(fn(soa.Env(), klass.get(), args[0]));
     } else if (shorty == "LL") {
       typedef jobject (fntype)(JNIEnv*, jclass, jobject);
-      fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+      fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedLocalRef<jobject> arg0(soa.Env(),
@@ -190,14 +190,15 @@
       result->SetL(soa.Decode<Object*>(jresult));
     } else if (shorty == "IIZ") {
       typedef jint (fntype)(JNIEnv*, jclass, jint, jboolean);
-      fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+      fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedThreadStateChange tsc(self, kNative);
       result->SetI(fn(soa.Env(), klass.get(), args[0], args[1]));
     } else if (shorty == "ILI") {
       typedef jint (fntype)(JNIEnv*, jclass, jobject, jint);
-      fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+      fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(
+          method->GetEntryPointFromJni()));
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedLocalRef<jobject> arg0(soa.Env(),
@@ -206,21 +207,21 @@
       result->SetI(fn(soa.Env(), klass.get(), arg0.get(), args[1]));
     } else if (shorty == "SIZ") {
       typedef jshort (fntype)(JNIEnv*, jclass, jint, jboolean);
-      fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+      fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetEntryPointFromJni()));
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedThreadStateChange tsc(self, kNative);
       result->SetS(fn(soa.Env(), klass.get(), args[0], args[1]));
     } else if (shorty == "VIZ") {
       typedef void (fntype)(JNIEnv*, jclass, jint, jboolean);
-      fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+      fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedThreadStateChange tsc(self, kNative);
       fn(soa.Env(), klass.get(), args[0], args[1]);
     } else if (shorty == "ZLL") {
       typedef jboolean (fntype)(JNIEnv*, jclass, jobject, jobject);
-      fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+      fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedLocalRef<jobject> arg0(soa.Env(),
@@ -231,7 +232,7 @@
       result->SetZ(fn(soa.Env(), klass.get(), arg0.get(), arg1.get()));
     } else if (shorty == "ZILL") {
       typedef jboolean (fntype)(JNIEnv*, jclass, jint, jobject, jobject);
-      fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+      fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedLocalRef<jobject> arg1(soa.Env(),
@@ -242,7 +243,7 @@
       result->SetZ(fn(soa.Env(), klass.get(), args[0], arg1.get(), arg2.get()));
     } else if (shorty == "VILII") {
       typedef void (fntype)(JNIEnv*, jclass, jint, jobject, jint, jint);
-      fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+      fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedLocalRef<jobject> arg1(soa.Env(),
@@ -251,7 +252,7 @@
       fn(soa.Env(), klass.get(), args[0], arg1.get(), args[2], args[3]);
     } else if (shorty == "VLILII") {
       typedef void (fntype)(JNIEnv*, jclass, jobject, jint, jobject, jint, jint);
-      fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+      fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jclass> klass(soa.Env(),
                                    soa.AddLocalReference<jclass>(method->GetDeclaringClass()));
       ScopedLocalRef<jobject> arg0(soa.Env(),
@@ -267,7 +268,7 @@
   } else {
     if (shorty == "L") {
       typedef jobject (fntype)(JNIEnv*, jobject);
-      fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+      fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jobject> rcvr(soa.Env(),
                                    soa.AddLocalReference<jobject>(receiver));
       jobject jresult;
@@ -278,14 +279,14 @@
       result->SetL(soa.Decode<Object*>(jresult));
     } else if (shorty == "V") {
       typedef void (fntype)(JNIEnv*, jobject);
-      fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+      fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jobject> rcvr(soa.Env(),
                                    soa.AddLocalReference<jobject>(receiver));
       ScopedThreadStateChange tsc(self, kNative);
       fn(soa.Env(), rcvr.get());
     } else if (shorty == "LL") {
       typedef jobject (fntype)(JNIEnv*, jobject, jobject);
-      fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+      fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jobject> rcvr(soa.Env(),
                                    soa.AddLocalReference<jobject>(receiver));
       ScopedLocalRef<jobject> arg0(soa.Env(),
@@ -299,7 +300,7 @@
       ScopedThreadStateChange tsc(self, kNative);
     } else if (shorty == "III") {
       typedef jint (fntype)(JNIEnv*, jobject, jint, jint);
-      fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod()));
+      fntype* const fn = reinterpret_cast<fntype*>(method->GetEntryPointFromJni());
       ScopedLocalRef<jobject> rcvr(soa.Env(),
                                    soa.AddLocalReference<jobject>(receiver));
       ScopedThreadStateChange tsc(self, kNative);
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index b02f456..33dd19f 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -352,12 +352,6 @@
   return pc - reinterpret_cast<uintptr_t>(quick_entry_point);
 }
 
-template<VerifyObjectFlags kVerifyFlags>
-inline void ArtMethod::SetNativeMethod(const void* native_method) {
-  SetFieldPtr<false, true, kVerifyFlags>(
-      OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_), native_method);
-}
-
 inline QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo() {
   if (UNLIKELY(IsPortableCompiled())) {
     // Portable compiled dex bytecode or jni stub.
@@ -562,6 +556,13 @@
                         new_dex_cache_classes);
 }
 
+inline void ArtMethod::CheckObjectSizeEqualsMirrorSize() {
+  // Using the default, check the class object size to make sure it matches the size of the
+  // object.
+  DCHECK_EQ(GetClass()->GetObjectSize(), sizeof(*this));
+}
+
+
 }  // namespace mirror
 }  // namespace art
 
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index 5b833b9..5ed7f2e 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -360,7 +360,7 @@
   if (is_fast) {
     SetAccessFlags(GetAccessFlags() | kAccFastNative);
   }
-  SetNativeMethod(native_method);
+  SetEntryPointFromJni(native_method);
 }
 
 void ArtMethod::UnregisterNative(Thread* self) {
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 36bb9e0..894c3ce 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -47,11 +47,6 @@
   // Size of java.lang.reflect.ArtMethod.class.
   static uint32_t ClassSize();
 
-  // Size of an instance of java.lang.reflect.ArtMethod not including its value array.
-  static constexpr uint32_t InstanceSize() {
-    return sizeof(ArtMethod);
-  }
-
   static ArtMethod* FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa,
                                         jobject jlr_method)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -248,51 +243,94 @@
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   EntryPointFromInterpreter* GetEntryPointFromInterpreter()
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return GetFieldPtr<EntryPointFromInterpreter*, kVerifyFlags>(
-        OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_interpreter_));
+    CheckObjectSizeEqualsMirrorSize();
+    return GetEntryPointFromInterpreterPtrSize(sizeof(void*));
+  }
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  EntryPointFromInterpreter* GetEntryPointFromInterpreterPtrSize(size_t pointer_size)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return GetFieldPtrWithSize<EntryPointFromInterpreter*, kVerifyFlags>(
+        EntryPointFromInterpreterOffset(pointer_size), pointer_size);
   }
 
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   void SetEntryPointFromInterpreter(EntryPointFromInterpreter* entry_point_from_interpreter)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    SetFieldPtr<false, true, kVerifyFlags>(
-        OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_interpreter_),
-        entry_point_from_interpreter);
+    CheckObjectSizeEqualsMirrorSize();
+    SetEntryPointFromInterpreterPtrSize(entry_point_from_interpreter, sizeof(void*));
+  }
+  template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  void SetEntryPointFromInterpreterPtrSize(EntryPointFromInterpreter* entry_point_from_interpreter,
+                                           size_t pointer_size)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SetFieldPtrWithSize<false, true, kVerifyFlags>(
+        EntryPointFromInterpreterOffset(pointer_size), entry_point_from_interpreter, pointer_size);
   }
 
 #if defined(ART_USE_PORTABLE_COMPILER)
-  static MemberOffset EntryPointFromPortableCompiledCodeOffset() {
-    return MemberOffset(OFFSETOF_MEMBER(ArtMethod, entry_point_from_portable_compiled_code_));
+  ALWAYS_INLINE static MemberOffset EntryPointFromPortableCompiledCodeOffset(size_t pointer_size) {
+    return MemberOffset(PtrSizedFieldsOffset() + OFFSETOF_MEMBER(
+        PtrSizedFields, entry_point_from_portable_compiled_code_) / sizeof(void*) * pointer_size);
   }
 
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  const void* GetEntryPointFromPortableCompiledCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return GetFieldPtr<const void*, kVerifyFlags>(
-        EntryPointFromPortableCompiledCodeOffset());
+  template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  const void* GetEntryPointFromPortableCompiledCode()
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    CheckObjectSizeEqualsMirrorSize();
+    return GetEntryPointFromPortableCompiledCodePtrSize(sizeof(void*));
   }
 
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  ALWAYS_INLINE const void* GetEntryPointFromPortableCompiledCodePtrSize(size_t pointer_size)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return GetFieldPtrWithSize<const void*, kVerifyFlags>(
+        EntryPointFromPortableCompiledCodeOffset(pointer_size), pointer_size);
+  }
+
+  template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   void SetEntryPointFromPortableCompiledCode(const void* entry_point_from_portable_compiled_code)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    SetFieldPtr<false, true, kVerifyFlags>(
-        EntryPointFromPortableCompiledCodeOffset(), entry_point_from_portable_compiled_code);
+    CheckObjectSizeEqualsMirrorSize();
+    return SetEntryPointFromPortableCompiledCodePtrSize(entry_point_from_portable_compiled_code,
+                                                        sizeof(void*));
+  }
+
+  template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  void SetEntryPointFromPortableCompiledCodePtrSize(
+      const void* entry_point_from_portable_compiled_code, size_t pointer_size)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SetFieldPtrWithSize<false, true, kVerifyFlags>(
+        EntryPointFromPortableCompiledCodeOffset(pointer_size),
+        entry_point_from_portable_compiled_code, pointer_size);
   }
 #endif
 
-  static MemberOffset EntryPointFromQuickCompiledCodeOffset() {
-    return MemberOffset(OFFSETOF_MEMBER(ArtMethod, entry_point_from_quick_compiled_code_));
-  }
-
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   const void* GetEntryPointFromQuickCompiledCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return GetFieldPtr<const void*, kVerifyFlags>(EntryPointFromQuickCompiledCodeOffset());
+    CheckObjectSizeEqualsMirrorSize();
+    return GetEntryPointFromQuickCompiledCodePtrSize(sizeof(void*));
+  }
+  template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  ALWAYS_INLINE const void* GetEntryPointFromQuickCompiledCodePtrSize(size_t pointer_size)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return GetFieldPtrWithSize<const void*, kVerifyFlags>(
+        EntryPointFromQuickCompiledCodeOffset(pointer_size), pointer_size);
   }
 
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   void SetEntryPointFromQuickCompiledCode(const void* entry_point_from_quick_compiled_code)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    SetFieldPtr<false, true, kVerifyFlags>(
-        EntryPointFromQuickCompiledCodeOffset(), entry_point_from_quick_compiled_code);
+    CheckObjectSizeEqualsMirrorSize();
+    SetEntryPointFromQuickCompiledCodePtrSize(entry_point_from_quick_compiled_code,
+                                              sizeof(void*));
+  }
+  template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  ALWAYS_INLINE void SetEntryPointFromQuickCompiledCodePtrSize(
+      const void* entry_point_from_quick_compiled_code, size_t pointer_size)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SetFieldPtrWithSize<false, true, kVerifyFlags>(
+        EntryPointFromQuickCompiledCodeOffset(pointer_size), entry_point_from_quick_compiled_code,
+        pointer_size);
   }
 
   uint32_t GetCodeSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -321,7 +359,7 @@
   uint32_t GetQuickOatCodeOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void SetQuickOatCodeOffset(uint32_t code_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  static const void* EntryPointToCodePointer(const void* entry_point) ALWAYS_INLINE {
+  ALWAYS_INLINE static const void* EntryPointToCodePointer(const void* entry_point) {
     uintptr_t code = reinterpret_cast<uintptr_t>(entry_point);
     code &= ~0x1;  // TODO: Make this Thumb2 specific.
     return reinterpret_cast<const void*>(code);
@@ -343,11 +381,23 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   const uint8_t* GetNativeGcMap() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return GetFieldPtr<uint8_t*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, gc_map_));
+    CheckObjectSizeEqualsMirrorSize();
+    return GetNativeGcMapPtrSize(sizeof(void*));
   }
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  ALWAYS_INLINE const uint8_t* GetNativeGcMapPtrSize(size_t pointer_size)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return GetFieldPtrWithSize<uint8_t*>(GcMapOffset(pointer_size), pointer_size);
+  }
+  template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
   void SetNativeGcMap(const uint8_t* data) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    SetFieldPtr<false, true, kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, gc_map_), data);
+    CheckObjectSizeEqualsMirrorSize();
+    SetNativeGcMapPtrSize(data, sizeof(void*));
+  }
+  template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  ALWAYS_INLINE void SetNativeGcMapPtrSize(const uint8_t* data, size_t pointer_size)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SetFieldPtrWithSize<false, true, kVerifyFlags>(GcMapOffset(pointer_size), data,
+                                                   pointer_size);
   }
 
   // When building the oat need a convenient place to stuff the offset of the native GC map.
@@ -386,16 +436,46 @@
 
   void UnregisterNative(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  static MemberOffset NativeMethodOffset() {
-    return OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_);
+  static MemberOffset EntryPointFromInterpreterOffset(size_t pointer_size) {
+    return MemberOffset(PtrSizedFieldsOffset() + OFFSETOF_MEMBER(
+        PtrSizedFields, entry_point_from_interpreter_) / sizeof(void*) * pointer_size);
   }
 
-  const void* GetNativeMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return GetFieldPtr<const void*>(NativeMethodOffset());
+  static MemberOffset EntryPointFromJniOffset(size_t pointer_size) {
+    return MemberOffset(PtrSizedFieldsOffset() + OFFSETOF_MEMBER(
+        PtrSizedFields, entry_point_from_jni_) / sizeof(void*) * pointer_size);
   }
 
-  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
-  void SetNativeMethod(const void*) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  static MemberOffset EntryPointFromQuickCompiledCodeOffset(size_t pointer_size) {
+    return MemberOffset(PtrSizedFieldsOffset() + OFFSETOF_MEMBER(
+        PtrSizedFields, entry_point_from_quick_compiled_code_) / sizeof(void*) * pointer_size);
+  }
+
+  static MemberOffset GcMapOffset(size_t pointer_size) {
+    return MemberOffset(PtrSizedFieldsOffset() + OFFSETOF_MEMBER(
+        PtrSizedFields, gc_map_) / sizeof(void*) * pointer_size);
+  }
+
+  void* GetEntryPointFromJni() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    CheckObjectSizeEqualsMirrorSize();
+    return GetEntryPointFromJniPtrSize(sizeof(void*));
+  }
+  ALWAYS_INLINE void* GetEntryPointFromJniPtrSize(size_t pointer_size)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return GetFieldPtrWithSize<void*>(EntryPointFromJniOffset(pointer_size), pointer_size);
+  }
+
+  template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  void SetEntryPointFromJni(const void* entrypoint) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    CheckObjectSizeEqualsMirrorSize();
+    SetEntryPointFromJniPtrSize<kVerifyFlags>(entrypoint, sizeof(void*));
+  }
+  template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  ALWAYS_INLINE void SetEntryPointFromJniPtrSize(const void* entrypoint, size_t pointer_size)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    SetFieldPtrWithSize<false, true, kVerifyFlags>(
+        EntryPointFromJniOffset(pointer_size), entrypoint, pointer_size);
+  }
 
   static MemberOffset GetMethodIndexOffset() {
     return OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_);
@@ -484,6 +564,15 @@
 
   ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  static size_t SizeWithoutPointerFields() {
+    return sizeof(ArtMethod) - sizeof(PtrSizedFields);
+  }
+
+  // Size of an instance of java.lang.reflect.ArtMethod not including its value array.
+  static size_t InstanceSize(size_t pointer_size) {
+    return SizeWithoutPointerFields() + (sizeof(PtrSizedFields) / sizeof(void*)) * pointer_size;
+  }
+
  protected:
   // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
   // The class we are a part of.
@@ -498,28 +587,6 @@
   // Short cuts to declaring_class_->dex_cache_ member for fast compiled code access.
   HeapReference<ObjectArray<String>> dex_cache_strings_;
 
-  // Method dispatch from the interpreter invokes this pointer which may cause a bridge into
-  // compiled code.
-  uint64_t entry_point_from_interpreter_;
-
-  // Pointer to JNI function registered to this method, or a function to resolve the JNI function.
-  uint64_t entry_point_from_jni_;
-
-  // Method dispatch from portable compiled code invokes this pointer which may cause bridging into
-  // quick compiled code or the interpreter.
-#if defined(ART_USE_PORTABLE_COMPILER)
-  uint64_t entry_point_from_portable_compiled_code_;
-#endif
-
-  // Method dispatch from quick compiled code invokes this pointer which may cause bridging into
-  // portable compiled code or the interpreter.
-  uint64_t entry_point_from_quick_compiled_code_;
-
-  // Pointer to a data structure created by the compiler and used by the garbage collector to
-  // determine which registers hold live references to objects within the heap. Keyed by native PC
-  // offsets for the quick compiler and dex PCs for the portable.
-  uint64_t gc_map_;
-
   // Access flags; low 16 bits are defined by spec.
   uint32_t access_flags_;
 
@@ -538,15 +605,48 @@
   // ifTable.
   uint32_t method_index_;
 
+  // Add alignment word here if necessary.
+
+  // Must be the last fields in the method.
+  struct PACKED(4) PtrSizedFields {
+    // Method dispatch from the interpreter invokes this pointer which may cause a bridge into
+    // compiled code.
+    void* entry_point_from_interpreter_;
+
+    // Pointer to JNI function registered to this method, or a function to resolve the JNI function.
+    void* entry_point_from_jni_;
+
+    // Method dispatch from quick compiled code invokes this pointer which may cause bridging into
+    // portable compiled code or the interpreter.
+    void* entry_point_from_quick_compiled_code_;
+
+    // Pointer to a data structure created by the compiler and used by the garbage collector to
+    // determine which registers hold live references to objects within the heap. Keyed by native PC
+    // offsets for the quick compiler and dex PCs for the portable.
+    void* gc_map_;
+
+    // Method dispatch from portable compiled code invokes this pointer which may cause bridging
+    // into quick compiled code or the interpreter. Last to simplify entrypoint logic.
+#if defined(ART_USE_PORTABLE_COMPILER)
+    void* entry_point_from_portable_compiled_code_;
+#endif
+  } ptr_sized_fields_;
+
   static GcRoot<Class> java_lang_reflect_ArtMethod_;
 
  private:
+  ALWAYS_INLINE void CheckObjectSizeEqualsMirrorSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   ALWAYS_INLINE ObjectArray<ArtMethod>* GetDexCacheResolvedMethods()
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   ALWAYS_INLINE ObjectArray<Class>* GetDexCacheResolvedTypes()
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  static size_t PtrSizedFieldsOffset() {
+    return OFFSETOF_MEMBER(ArtMethod, ptr_sized_fields_);
+  }
+
   friend struct art::ArtMethodOffsets;  // for verifying offset information
   DISALLOW_IMPLICIT_CONSTRUCTORS(ArtMethod);
 };
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index d00c3ef..63aa675 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -518,6 +518,13 @@
     return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size);
   }
 
+  void SetObjectSizeWithoutChecks(uint32_t new_object_size)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    // Not called within a transaction.
+    return SetField32<false, false, kVerifyNone>(
+        OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size);
+  }
+
   // Returns true if this class is in the same packages as that class.
   bool IsInSamePackage(Class* that) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 97a8ee7..7a383e4 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -402,8 +402,7 @@
   }
   DCHECK_GE(result, sizeof(Object))
       << " class=" << PrettyTypeOf(GetClass<kNewFlags, kReadBarrierOption>());
-  DCHECK(!(IsArtField<kNewFlags, kReadBarrierOption>())  || result == sizeof(ArtField));
-  DCHECK(!(IsArtMethod<kNewFlags, kReadBarrierOption>()) || result == sizeof(ArtMethod));
+  DCHECK(!(IsArtField<kNewFlags, kReadBarrierOption>()) || result == sizeof(ArtField));
   return result;
 }
 
@@ -817,7 +816,6 @@
     }
   }
 }
-
 }  // namespace mirror
 }  // namespace art
 
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index a6b6227..ae1aeb5 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -316,15 +316,26 @@
       VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T>
   void SetFieldPtr(MemberOffset field_offset, T new_value)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#ifndef __LP64__
-    SetField32<kTransactionActive, kCheckTransaction, kVerifyFlags>(
-        field_offset, reinterpret_cast<int32_t>(new_value));
-#else
-    SetField64<kTransactionActive, kCheckTransaction, kVerifyFlags>(
-        field_offset, reinterpret_cast<int64_t>(new_value));
-#endif
+    SetFieldPtrWithSize<kTransactionActive, kCheckTransaction, kVerifyFlags>(
+        field_offset, new_value, sizeof(void*));
   }
 
+  template<bool kTransactionActive, bool kCheckTransaction = true,
+      VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T>
+  ALWAYS_INLINE void SetFieldPtrWithSize(MemberOffset field_offset, T new_value,
+                                         size_t pointer_size)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size;
+    if (pointer_size == 4) {
+      intptr_t ptr  = reinterpret_cast<intptr_t>(new_value);
+      DCHECK_EQ(static_cast<int32_t>(ptr), ptr);  // Check that we dont lose any non 0 bits.
+      SetField32<kTransactionActive, kCheckTransaction, kVerifyFlags>(
+          field_offset, static_cast<int32_t>(ptr));
+    } else {
+      SetField64<kTransactionActive, kCheckTransaction, kVerifyFlags>(
+          field_offset, static_cast<int64_t>(reinterpret_cast<intptr_t>(new_value)));
+    }
+  }
   // TODO fix thread safety analysis broken by the use of template. This should be
   // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
   template <const bool kVisitClass, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -337,11 +348,21 @@
   template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   T GetFieldPtr(MemberOffset field_offset)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#ifndef __LP64__
-    return reinterpret_cast<T>(GetField32<kVerifyFlags, kIsVolatile>(field_offset));
-#else
-    return reinterpret_cast<T>(GetField64<kVerifyFlags, kIsVolatile>(field_offset));
-#endif
+    return GetFieldPtrWithSize<T, kVerifyFlags, kIsVolatile>(field_offset, sizeof(void*));
+  }
+
+  template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
+  ALWAYS_INLINE T GetFieldPtrWithSize(MemberOffset field_offset, size_t pointer_size)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size;
+    if (pointer_size == 4) {
+      return reinterpret_cast<T>(GetField32<kVerifyFlags, kIsVolatile>(field_offset));
+    } else {
+      int64_t v = GetField64<kVerifyFlags, kIsVolatile>(field_offset);
+      // Check that we dont lose any non 0 bits.
+      DCHECK_EQ(reinterpret_cast<int64_t>(reinterpret_cast<T>(v)), v);
+      return reinterpret_cast<T>(v);
+    }
   }
 
   // TODO: Fixme when anotatalysis works with visitors.
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index ede3b64..3e29e5f 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -89,11 +89,18 @@
   EXPECT_EQ(STRING_OFFSET_OFFSET, String::OffsetOffset().Int32Value());
   EXPECT_EQ(STRING_DATA_OFFSET, Array::DataOffset(sizeof(uint16_t)).Int32Value());
 
-  EXPECT_EQ(METHOD_DEX_CACHE_METHODS_OFFSET, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
+  EXPECT_EQ(METHOD_DEX_CACHE_METHODS_OFFSET,
+            ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
 #if defined(ART_USE_PORTABLE_COMPILER)
-  EXPECT_EQ(METHOD_PORTABLE_CODE_OFFSET, ArtMethod::EntryPointFromPortableCompiledCodeOffset().Int32Value());
+  EXPECT_EQ(METHOD_PORTABLE_CODE_OFFSET_32,
+            ArtMethod::EntryPointFromPortableCompiledCodeOffset(4).Int32Value());
+  EXPECT_EQ(METHOD_PORTABLE_CODE_OFFSET_64,
+            ArtMethod::EntryPointFromPortableCompiledCodeOffset(8).Int32Value());
 #endif
-  EXPECT_EQ(METHOD_QUICK_CODE_OFFSET, ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value());
+  EXPECT_EQ(METHOD_QUICK_CODE_OFFSET_32,
+            ArtMethod::EntryPointFromQuickCompiledCodeOffset(4).Int32Value());
+  EXPECT_EQ(METHOD_QUICK_CODE_OFFSET_64,
+            ArtMethod::EntryPointFromQuickCompiledCodeOffset(8).Int32Value());
 }
 
 TEST_F(ObjectTest, IsInSamePackage) {
diff --git a/runtime/native_bridge_art_interface.cc b/runtime/native_bridge_art_interface.cc
index 9f77e55..809381b 100644
--- a/runtime/native_bridge_art_interface.cc
+++ b/runtime/native_bridge_art_interface.cc
@@ -71,7 +71,7 @@
       if (count < method_count) {
         methods[count].name = m->GetName();
         methods[count].signature = m->GetShorty();
-        methods[count].fnPtr = const_cast<void*>(m->GetNativeMethod());
+        methods[count].fnPtr = m->GetEntryPointFromJni();
         count++;
       } else {
         LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(m);
@@ -84,7 +84,7 @@
       if (count < method_count) {
         methods[count].name = m->GetName();
         methods[count].signature = m->GetShorty();
-        methods[count].fnPtr = const_cast<void*>(m->GetNativeMethod());
+        methods[count].fnPtr = m->GetEntryPointFromJni();
         count++;
       } else {
         LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(m);