Refactor runtime support.

Change-Id: Id7470a4105838150d5ceb73ab2c8c83e739660df
diff --git a/src/oat/jni/arm/calling_convention_arm.cc b/src/oat/jni/arm/calling_convention_arm.cc
new file mode 100644
index 0000000..75c0380
--- /dev/null
+++ b/src/oat/jni/arm/calling_convention_arm.cc
@@ -0,0 +1,227 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "calling_convention_arm.h"
+#include "logging.h"
+#include "oat/utils/arm/managed_register_arm.h"
+
+namespace art {
+namespace arm {
+
+// Calling convention
+
+ManagedRegister ArmManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
+  return ArmManagedRegister::FromCoreRegister(IP);  // R12
+}
+
+ManagedRegister ArmJniCallingConvention::InterproceduralScratchRegister() {
+  return ArmManagedRegister::FromCoreRegister(IP);  // R12
+}
+
+static ManagedRegister ReturnRegisterForShorty(const char* shorty) {
+  if (shorty[0] == 'F') {
+    return ArmManagedRegister::FromCoreRegister(R0);
+  } else if (shorty[0] == 'D') {
+    return ArmManagedRegister::FromRegisterPair(R0_R1);
+  } else if (shorty[0] == 'J') {
+    return ArmManagedRegister::FromRegisterPair(R0_R1);
+  } else if (shorty[0] == 'V') {
+    return ArmManagedRegister::NoRegister();
+  } else {
+    return ArmManagedRegister::FromCoreRegister(R0);
+  }
+}
+
+ManagedRegister ArmManagedRuntimeCallingConvention::ReturnRegister() {
+  return ReturnRegisterForShorty(GetShorty());
+}
+
+ManagedRegister ArmJniCallingConvention::ReturnRegister() {
+  return ReturnRegisterForShorty(GetShorty());
+}
+
+// Managed runtime calling convention
+
+std::vector<ManagedRegister> ArmManagedRuntimeCallingConvention::entry_spills_;
+
+ManagedRegister ArmManagedRuntimeCallingConvention::MethodRegister() {
+  return ArmManagedRegister::FromCoreRegister(R0);
+}
+
+bool ArmManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
+  return itr_slots_ < 3;
+}
+
+bool ArmManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
+  if (itr_slots_ < 2) {
+    return false;
+  } else if (itr_slots_ > 2) {
+    return true;
+  } else {
+    // handle funny case of a long/double straddling registers and the stack
+    return IsParamALongOrDouble(itr_args_);
+  }
+}
+
+static const Register kManagedArgumentRegisters[] = {
+  R1, R2, R3
+};
+ManagedRegister ArmManagedRuntimeCallingConvention::CurrentParamRegister() {
+  CHECK(IsCurrentParamInRegister());
+  if (IsParamALongOrDouble(itr_args_)) {
+    if (itr_slots_ == 0) {
+      return ArmManagedRegister::FromRegisterPair(R1_R2);
+    } else if (itr_slots_ == 1) {
+      return ArmManagedRegister::FromRegisterPair(R2_R3);
+    } else {
+      // This is a long/double split between registers and the stack
+      return ArmManagedRegister::FromCoreRegister(
+        kManagedArgumentRegisters[itr_slots_]);
+    }
+  } else {
+    return
+      ArmManagedRegister::FromCoreRegister(kManagedArgumentRegisters[itr_slots_]);
+  }
+}
+
+FrameOffset ArmManagedRuntimeCallingConvention::CurrentParamStackOffset() {
+  CHECK(IsCurrentParamOnStack());
+  FrameOffset result =
+      FrameOffset(displacement_.Int32Value() +   // displacement
+                  kPointerSize +                 // Method*
+                  (itr_slots_ * kPointerSize));  // offset into in args
+  if (itr_slots_ == 2) {
+    // the odd spanning case, bump the offset to skip the first half of the
+    // input which is in a register
+    CHECK(IsCurrentParamInRegister());
+    result = FrameOffset(result.Int32Value() + 4);
+  }
+  return result;
+}
+
+// JNI calling convention
+
+ArmJniCallingConvention::ArmJniCallingConvention(bool is_static, bool is_synchronized,
+                                                 const char* shorty)
+    : JniCallingConvention(is_static, is_synchronized, shorty) {
+  // Compute padding to ensure longs and doubles are not split in AAPCS. Ignore the 'this' jobject
+  // or jclass for static methods and the JNIEnv. We start at the aligned register r2.
+  size_t padding = 0;
+  for (size_t cur_arg = IsStatic() ? 0 : 1, cur_reg = 2; cur_arg < NumArgs(); cur_arg++) {
+    if (IsParamALongOrDouble(cur_arg)) {
+      if ((cur_reg & 1) != 0) {
+        padding += 4;
+        cur_reg++;  // additional bump to ensure alignment
+      }
+      cur_reg++;  // additional bump to skip extra long word
+    }
+    cur_reg++;  // bump the iterator for every argument
+  }
+  padding_ = padding;
+
+  callee_save_regs_.push_back(ArmManagedRegister::FromCoreRegister(R5));
+  callee_save_regs_.push_back(ArmManagedRegister::FromCoreRegister(R6));
+  callee_save_regs_.push_back(ArmManagedRegister::FromCoreRegister(R7));
+  callee_save_regs_.push_back(ArmManagedRegister::FromCoreRegister(R8));
+  callee_save_regs_.push_back(ArmManagedRegister::FromCoreRegister(R10));
+  callee_save_regs_.push_back(ArmManagedRegister::FromCoreRegister(R11));
+}
+
+uint32_t ArmJniCallingConvention::CoreSpillMask() const {
+  // Compute spill mask to agree with callee saves initialized in the constructor
+  uint32_t result = 0;
+  result =  1 << R5 | 1 << R6 | 1 << R7 | 1 << R8 | 1 << R10 | 1 << R11 | 1 << LR;
+  return result;
+}
+
+ManagedRegister ArmJniCallingConvention::ReturnScratchRegister() const {
+  return ArmManagedRegister::FromCoreRegister(R2);
+}
+
+size_t ArmJniCallingConvention::FrameSize() {
+  // Method*, LR and callee save area size, local reference segment state
+  size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kPointerSize;
+  // References plus 2 words for SIRT header
+  size_t sirt_size = (ReferenceCount() + 2) * kPointerSize;
+  // Plus return value spill area size
+  return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(), kStackAlignment);
+}
+
+size_t ArmJniCallingConvention::OutArgSize() {
+  return RoundUp(NumberOfOutgoingStackArgs() * kPointerSize + padding_,
+                 kStackAlignment);
+}
+
+// Will reg be crushed by an outgoing argument?
+bool ArmJniCallingConvention::IsMethodRegisterClobberedPreCall() {
+  return true;  // The method register R0 is always clobbered by the JNIEnv
+}
+
+// JniCallingConvention ABI follows AAPCS where longs and doubles must occur
+// in even register numbers and stack slots
+void ArmJniCallingConvention::Next() {
+  JniCallingConvention::Next();
+  size_t arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+  if ((itr_args_ >= 2) &&
+      (arg_pos < NumArgs()) &&
+      IsParamALongOrDouble(arg_pos)) {
+    // itr_slots_ needs to be an even number, according to AAPCS.
+    if ((itr_slots_ & 0x1u) != 0) {
+      itr_slots_++;
+    }
+  }
+}
+
+bool ArmJniCallingConvention::IsCurrentParamInRegister() {
+  return itr_slots_ < 4;
+}
+
+bool ArmJniCallingConvention::IsCurrentParamOnStack() {
+  return !IsCurrentParamInRegister();
+}
+
+static const Register kJniArgumentRegisters[] = {
+  R0, R1, R2, R3
+};
+ManagedRegister ArmJniCallingConvention::CurrentParamRegister() {
+  CHECK_LT(itr_slots_, 4u);
+  int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+  if ((itr_args_ >= 2) && IsParamALongOrDouble(arg_pos)) {
+    CHECK_EQ(itr_slots_, 2u);
+    return ArmManagedRegister::FromRegisterPair(R2_R3);
+  } else {
+    return
+      ArmManagedRegister::FromCoreRegister(kJniArgumentRegisters[itr_slots_]);
+  }
+}
+
+FrameOffset ArmJniCallingConvention::CurrentParamStackOffset() {
+  CHECK_GE(itr_slots_, 4u);
+  size_t offset = displacement_.Int32Value() - OutArgSize() + ((itr_slots_ - 4) * kPointerSize);
+  CHECK_LT(offset, OutArgSize());
+  return FrameOffset(offset);
+}
+
+size_t ArmJniCallingConvention::NumberOfOutgoingStackArgs() {
+  size_t static_args = IsStatic() ? 1 : 0;  // count jclass
+  // regular argument parameters and this
+  size_t param_args = NumArgs() + NumLongOrDoubleArgs();
+  // count JNIEnv* less arguments in registers
+  return static_args + param_args + 1 - 4;
+}
+
+}  // namespace arm
+}  // namespace art
diff --git a/src/oat/jni/arm/calling_convention_arm.h b/src/oat/jni/arm/calling_convention_arm.h
new file mode 100644
index 0000000..cb1d6a9
--- /dev/null
+++ b/src/oat/jni/arm/calling_convention_arm.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_JNI_ARM_CALLING_CONVENTION_ARM_H_
+#define ART_SRC_OAT_JNI_ARM_CALLING_CONVENTION_ARM_H_
+
+#include "oat/jni/calling_convention.h"
+
+namespace art {
+namespace arm {
+
+class ArmManagedRuntimeCallingConvention : public ManagedRuntimeCallingConvention {
+ public:
+  ArmManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty) :
+      ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty) {}
+  virtual ~ArmManagedRuntimeCallingConvention() {}
+  // Calling convention
+  virtual ManagedRegister ReturnRegister();
+  virtual ManagedRegister InterproceduralScratchRegister();
+  // Managed runtime calling convention
+  virtual ManagedRegister MethodRegister();
+  virtual bool IsCurrentParamInRegister();
+  virtual bool IsCurrentParamOnStack();
+  virtual ManagedRegister CurrentParamRegister();
+  virtual FrameOffset CurrentParamStackOffset();
+  virtual const std::vector<ManagedRegister>& EntrySpills() {
+    DCHECK(entry_spills_.empty());
+    return entry_spills_;
+  }
+ private:
+  static std::vector<ManagedRegister> entry_spills_;
+
+  DISALLOW_COPY_AND_ASSIGN(ArmManagedRuntimeCallingConvention);
+};
+
+class ArmJniCallingConvention : public JniCallingConvention {
+ public:
+  explicit ArmJniCallingConvention(bool is_static, bool is_synchronized, const char* shorty);
+  virtual ~ArmJniCallingConvention() {}
+  // Calling convention
+  virtual ManagedRegister ReturnRegister();
+  virtual ManagedRegister InterproceduralScratchRegister();
+  // JNI calling convention
+  virtual void Next();  // Override default behavior for AAPCS
+  virtual size_t FrameSize();
+  virtual size_t OutArgSize();
+  virtual const std::vector<ManagedRegister>& CalleeSaveRegisters() const {
+    return callee_save_regs_;
+  }
+  virtual ManagedRegister ReturnScratchRegister() const;
+  virtual uint32_t CoreSpillMask() const;
+  virtual uint32_t FpSpillMask() const {
+    return 0;  // Floats aren't spilled in JNI down call
+  }
+  virtual bool IsMethodRegisterClobberedPreCall();
+  virtual bool IsCurrentParamInRegister();
+  virtual bool IsCurrentParamOnStack();
+  virtual ManagedRegister CurrentParamRegister();
+  virtual FrameOffset CurrentParamStackOffset();
+
+ protected:
+  virtual size_t NumberOfOutgoingStackArgs();
+
+ private:
+  // TODO: these values aren't unique and can be shared amongst instances
+  std::vector<ManagedRegister> callee_save_regs_;
+
+  // Padding to ensure longs and doubles are not split in AAPCS
+  size_t padding_;
+
+  DISALLOW_COPY_AND_ASSIGN(ArmJniCallingConvention);
+};
+
+}  // namespace arm
+}  // namespace art
+
+#endif  // ART_SRC_OAT_JNI_ARM_CALLING_CONVENTION_ARM_H_
diff --git a/src/oat/jni/arm/jni_internal_arm.cc b/src/oat/jni/arm/jni_internal_arm.cc
new file mode 100644
index 0000000..2227742
--- /dev/null
+++ b/src/oat/jni/arm/jni_internal_arm.cc
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include <algorithm>
+
+#include "asm_support.h"
+#include "compiled_method.h"
+#include "compiler.h"
+#include "oat/utils/arm/assembler_arm.h"
+#include "oat/utils/assembler.h"
+#include "object.h"
+
+namespace art {
+namespace arm {
+
+// Creates a function which invokes a managed method with an array of
+// arguments.
+//
+// At the time of call, the environment looks something like this:
+//
+// R0 = method pointer
+// R1 = receiver pointer or NULL for static methods
+// R2 = (managed) thread pointer
+// R3 = argument array or NULL for no argument methods
+// [SP] = JValue* result or NULL for void returns
+//
+// As the JNI call has already transitioned the thread into the
+// "running" state the remaining responsibilities of this routine are
+// to save the native register value and restore the managed thread
+// register and transfer arguments from the array into register and on
+// the stack, if needed.  On return, the thread register must be
+// shuffled and the return value must be store into the result JValue.
+CompiledInvokeStub* CreateInvokeStub(bool is_static, const char* shorty, uint32_t shorty_len) {
+  UniquePtr<ArmAssembler> assembler(down_cast<ArmAssembler*>(Assembler::Create(kArm)));
+#define __ assembler->
+  size_t num_arg_array_bytes = NumArgArrayBytes(shorty, shorty_len);
+  // Size of frame = spill of R4,R9/LR + Method* + possible receiver + arg array size
+  // Note, space is left in the frame to flush arguments in registers back to out locations.
+  size_t unpadded_frame_size = (4 * kPointerSize) +
+                               (is_static ? 0 : kPointerSize) +
+                               num_arg_array_bytes;
+  size_t frame_size = RoundUp(unpadded_frame_size, kStackAlignment);
+
+  // Spill R4,R9 and LR
+  RegList save = (1 << R9) | (1 << R4);
+  __ PushList(save | (1 << LR));
+
+  // Move the managed thread pointer into R9.
+  __ mov(R9, ShifterOperand(R2));
+
+  // Reset R4 to suspend check interval
+  __ LoadImmediate(R4, SUSPEND_CHECK_INTERVAL);
+
+  // Move frame down for arguments less 3 pushed values above
+  __ AddConstant(SP, -frame_size + (3 * kPointerSize));
+
+  // Can either get 3 or 2 arguments into registers
+  size_t reg_bytes = (is_static ? 3 : 2) * kPointerSize;
+  // Bytes passed by stack
+  size_t stack_bytes;
+  if (num_arg_array_bytes > reg_bytes) {
+    stack_bytes = num_arg_array_bytes - reg_bytes;
+  } else {
+    stack_bytes = 0;
+    reg_bytes = num_arg_array_bytes;
+  }
+
+  // Method* at bottom of frame is null thereby terminating managed stack crawls
+  __ LoadImmediate(IP, 0, AL);
+  __ StoreToOffset(kStoreWord, IP, SP, 0);
+
+  // Copy values onto the stack.
+  size_t src_offset = 0;
+  size_t dst_offset = (is_static ? 1 : 2) * kPointerSize;
+  for (size_t i = 1; i < shorty_len; ++i) {
+    switch (shorty[i]) {
+      case 'D':
+      case 'J':
+        // Move both pointers 64 bits.
+        __ LoadFromOffset(kLoadWord, IP, R3, src_offset);
+        src_offset += kPointerSize;
+        __ StoreToOffset(kStoreWord, IP, SP, dst_offset);
+        dst_offset += kPointerSize;
+
+        __ LoadFromOffset(kLoadWord, IP, R3, src_offset);
+        src_offset += kPointerSize;
+        __ StoreToOffset(kStoreWord, IP, SP, dst_offset);
+        dst_offset += kPointerSize;
+        break;
+      default:
+        // Move the source pointer sizeof(JValue) and the destination pointer 32 bits.
+        __ LoadFromOffset(kLoadWord, IP, R3, src_offset);
+        src_offset += sizeof(JValue);
+        __ StoreToOffset(kStoreWord, IP, SP, dst_offset);
+        dst_offset += kPointerSize;
+        break;
+    }
+  }
+
+  // Move all the register arguments into place.
+  dst_offset = (is_static ? 1 : 2) * kPointerSize;
+  if (is_static) {
+    if (reg_bytes > 0 && num_arg_array_bytes > 0) {
+      __ LoadFromOffset(kLoadWord, R1, SP, dst_offset + 0);
+      if (reg_bytes > 4 && num_arg_array_bytes > 4) {
+        __ LoadFromOffset(kLoadWord, R2, SP, dst_offset + 4);
+        if (reg_bytes > 8 && num_arg_array_bytes > 8) {
+          __ LoadFromOffset(kLoadWord, R3, SP, dst_offset + 8);
+        }
+      }
+    }
+  } else {
+    if (reg_bytes > 0 && num_arg_array_bytes > 0) {
+      __ LoadFromOffset(kLoadWord, R2, SP, dst_offset + 0);
+      if (reg_bytes > 4 && num_arg_array_bytes > 4) {
+        __ LoadFromOffset(kLoadWord, R3, SP, dst_offset + 4);
+      }
+    }
+  }
+
+  // Load the code pointer we are about to call.
+  __ LoadFromOffset(kLoadWord, IP, R0, Method::GetCodeOffset().Int32Value());
+
+  // Do the call.
+  __ blx(IP);
+
+  // If the method returns a value, store it to the result pointer.
+  if (shorty[0] != 'V') {
+    // Load the result JValue pointer of the stub caller's out args.
+    __ LoadFromOffset(kLoadWord, IP, SP, frame_size);
+    StoreOperandType type = (shorty[0] == 'J' || shorty[0] == 'D') ? kStoreWordPair : kStoreWord;
+    __ StoreToOffset(type, R0, IP, 0);
+  }
+
+  // Remove the frame less the spilled R4, R9 and LR
+  __ AddConstant(SP, frame_size - (3 * kPointerSize));
+
+  // Pop R4, R9 and the LR into PC
+  __ PopList(save | (1 << PC));
+  // TODO: store native_entry in the stub table
+  std::vector<uint8_t> code(assembler->CodeSize());
+  MemoryRegion region(&code[0], code.size());
+  assembler->FinalizeInstructions(region);
+  return new CompiledInvokeStub(code);
+#undef __
+}
+
+}  // namespace arm
+}  // namespace art
+
+extern "C" art::CompiledInvokeStub* ArtCreateInvokeStub(art::Compiler& /*compiler*/, bool is_static,
+                                                        const char* shorty, uint32_t shorty_len) {
+  return art::arm::CreateInvokeStub(is_static, shorty, shorty_len);
+}
diff --git a/src/oat/jni/calling_convention.cc b/src/oat/jni/calling_convention.cc
new file mode 100644
index 0000000..e7b9cc8
--- /dev/null
+++ b/src/oat/jni/calling_convention.cc
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "calling_convention.h"
+
+#include "oat/jni/arm/calling_convention_arm.h"
+#include "oat/jni/x86/calling_convention_x86.h"
+#include "logging.h"
+#include "utils.h"
+
+namespace art {
+
+// Offset of Method within the frame
+FrameOffset CallingConvention::MethodStackOffset() {
+  return displacement_;
+}
+
+// Managed runtime calling convention
+
+ManagedRuntimeCallingConvention* ManagedRuntimeCallingConvention::Create(
+    bool is_static, bool is_synchronized, const char* shorty, InstructionSet instruction_set) {
+  if (instruction_set == kX86) {
+    return new x86::X86ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
+  } else {
+    CHECK(instruction_set == kArm || instruction_set == kThumb2);
+    return new arm::ArmManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
+  }
+}
+
+bool ManagedRuntimeCallingConvention::HasNext() {
+  return itr_args_ < NumArgs();
+}
+
+void ManagedRuntimeCallingConvention::Next() {
+  CHECK(HasNext());
+  if (IsCurrentArgExplicit() &&  // don't query parameter type of implicit args
+      IsParamALongOrDouble(itr_args_)) {
+    itr_longs_and_doubles_++;
+    itr_slots_++;
+  }
+  if (IsCurrentParamAReference()) {
+    itr_refs_++;
+  }
+  itr_args_++;
+  itr_slots_++;
+}
+
+bool ManagedRuntimeCallingConvention::IsCurrentArgExplicit() {
+  // Static methods have no implicit arguments, others implicitly pass this
+  return IsStatic() || (itr_args_ != 0);
+}
+
+bool ManagedRuntimeCallingConvention::IsCurrentArgPossiblyNull() {
+  return IsCurrentArgExplicit();  // any user parameter may be null
+}
+
+size_t ManagedRuntimeCallingConvention::CurrentParamSize() {
+  return ParamSize(itr_args_);
+}
+
+bool ManagedRuntimeCallingConvention::IsCurrentParamAReference() {
+  return IsParamAReference(itr_args_);
+}
+
+// JNI calling convention
+
+JniCallingConvention* JniCallingConvention::Create(bool is_static, bool is_synchronized,
+                                                   const char* shorty,
+                                                   InstructionSet instruction_set) {
+  if (instruction_set == kX86) {
+    return new x86::X86JniCallingConvention(is_static, is_synchronized, shorty);
+  } else {
+    CHECK(instruction_set == kArm || instruction_set == kThumb2);
+    return new arm::ArmJniCallingConvention(is_static, is_synchronized, shorty);
+  }
+}
+
+size_t JniCallingConvention::ReferenceCount() const {
+  return NumReferenceArgs() + (IsStatic() ? 1 : 0);
+}
+
+FrameOffset JniCallingConvention::SavedLocalReferenceCookieOffset() const {
+  size_t start_of_sirt = SirtLinkOffset().Int32Value() +  kPointerSize;
+  size_t references_size = kPointerSize * ReferenceCount();  // size excluding header
+  return FrameOffset(start_of_sirt + references_size);
+}
+
+FrameOffset JniCallingConvention::ReturnValueSaveLocation() const {
+  // Segment state is 4 bytes long
+  return FrameOffset(SavedLocalReferenceCookieOffset().Int32Value() + 4);
+}
+
+bool JniCallingConvention::HasNext() {
+  if (itr_args_ <= kObjectOrClass) {
+    return true;
+  } else {
+    unsigned int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+    return arg_pos < NumArgs();
+  }
+}
+
+void JniCallingConvention::Next() {
+  CHECK(HasNext());
+  if (itr_args_ > kObjectOrClass) {
+    int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+    if (IsParamALongOrDouble(arg_pos)) {
+      itr_longs_and_doubles_++;
+      itr_slots_++;
+    }
+  }
+  if (IsCurrentParamAReference()) {
+    itr_refs_++;
+  }
+  itr_args_++;
+  itr_slots_++;
+}
+
+bool JniCallingConvention::IsCurrentParamAReference() {
+  switch (itr_args_) {
+    case kJniEnv:
+      return false;  // JNIEnv*
+    case kObjectOrClass:
+      return true;   // jobject or jclass
+    default: {
+      int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+      return IsParamAReference(arg_pos);
+    }
+  }
+}
+
+// Return position of SIRT entry holding reference at the current iterator
+// position
+FrameOffset JniCallingConvention::CurrentParamSirtEntryOffset() {
+  CHECK(IsCurrentParamAReference());
+  CHECK_GT(SirtLinkOffset(), SirtNumRefsOffset());
+  // Address of 1st SIRT entry
+  int result = SirtLinkOffset().Int32Value() + kPointerSize;
+  result += itr_refs_ * kPointerSize;
+  CHECK_GT(result, SirtLinkOffset().Int32Value());
+  return FrameOffset(result);
+}
+
+size_t JniCallingConvention::CurrentParamSize() {
+  if (itr_args_ <= kObjectOrClass) {
+    return kPointerSize;  // JNIEnv or jobject/jclass
+  } else {
+    int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+    return ParamSize(arg_pos);
+  }
+}
+
+size_t JniCallingConvention::NumberOfExtraArgumentsForJni() {
+  // The first argument is the JNIEnv*.
+  // Static methods have an extra argument which is the jclass.
+  return IsStatic() ? 2 : 1;
+}
+
+}  // namespace art
diff --git a/src/oat/jni/calling_convention.h b/src/oat/jni/calling_convention.h
new file mode 100644
index 0000000..7e42904
--- /dev/null
+++ b/src/oat/jni/calling_convention.h
@@ -0,0 +1,287 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_JNI_CALLING_CONVENTION_H_
+#define ART_SRC_OAT_JNI_CALLING_CONVENTION_H_
+
+#include <vector>
+#include "oat/utils/managed_register.h"
+#include "stack_indirect_reference_table.h"
+#include "thread.h"
+
+namespace art {
+
+// Top-level abstraction for different calling conventions
+class CallingConvention {
+ public:
+  bool IsReturnAReference() const { return shorty_[0] == 'L'; }
+
+  size_t SizeOfReturnValue() const {
+    size_t result = Primitive::ComponentSize(Primitive::GetType(shorty_[0]));
+    if (result >= 1 && result < 4) {
+      result = 4;
+    }
+    return result;
+  }
+
+  // Register that holds result of this method
+  virtual ManagedRegister ReturnRegister() = 0;
+  // Register reserved for scratch usage during procedure calls
+  virtual ManagedRegister InterproceduralScratchRegister() = 0;
+
+  // Offset of Method within the frame
+  FrameOffset MethodStackOffset();
+
+  // Iterator interface
+
+  // Place iterator at start of arguments. The displacement is applied to
+  // frame offset methods to account for frames which may be on the stack
+  // below the one being iterated over.
+  void ResetIterator(FrameOffset displacement) {
+    displacement_ = displacement;
+    itr_slots_ = 0;
+    itr_args_ = 0;
+    itr_refs_ = 0;
+    itr_longs_and_doubles_ = 0;
+  }
+
+  virtual ~CallingConvention() {}
+
+ protected:
+  CallingConvention(bool is_static, bool is_synchronized, const char* shorty)
+      : displacement_(0), is_static_(is_static), is_synchronized_(is_synchronized),
+        shorty_(shorty) {
+    num_args_ = (is_static ? 0 : 1) + strlen(shorty) - 1;
+    num_ref_args_ = is_static ? 0 : 1;  // The implicit this pointer.
+    num_long_or_double_args_ = 0;
+    for (size_t i = 1; i < strlen(shorty); i++) {
+      char ch = shorty_[i];
+      if (ch == 'L') {
+        num_ref_args_++;
+      } else if ((ch == 'D') || (ch == 'J')) {
+        num_long_or_double_args_++;
+      }
+    }
+  }
+
+  bool IsStatic() const {
+    return is_static_;
+  }
+  bool IsSynchronized() const {
+    return is_synchronized_;
+  }
+  bool IsParamALongOrDouble(unsigned int param) const {
+    DCHECK_LT(param, NumArgs());
+    if (IsStatic()) {
+      param++;  // 0th argument must skip return value at start of the shorty
+    } else if (param == 0) {
+      return false;  // this argument
+    }
+    char ch = shorty_[param];
+    return (ch == 'J' || ch == 'D');
+  }
+  bool IsParamAReference(unsigned int param) const {
+    DCHECK_LT(param, NumArgs());
+    if (IsStatic()) {
+      param++;  // 0th argument must skip return value at start of the shorty
+    } else if (param == 0) {
+      return true;  // this argument
+    }
+    return shorty_[param] == 'L';
+  }
+  size_t NumArgs() const {
+    return num_args_;
+  }
+  size_t NumLongOrDoubleArgs() const {
+    return num_long_or_double_args_;
+  }
+  size_t NumReferenceArgs() const {
+    return num_ref_args_;
+  }
+  size_t ParamSize(unsigned int param) const {
+    DCHECK_LT(param, NumArgs());
+    if (IsStatic()) {
+      param++;  // 0th argument must skip return value at start of the shorty
+    } else if (param == 0) {
+      return kPointerSize;  // this argument
+    }
+    size_t result = Primitive::ComponentSize(Primitive::GetType(shorty_[param]));
+    if (result >= 1 && result < 4) {
+      result = 4;
+    }
+    return result;
+  }
+  const char* GetShorty() const {
+    return shorty_.c_str();
+  }
+  // The slot number for current calling_convention argument.
+  // Note that each slot is 32-bit. When the current argument is bigger
+  // than 32 bits, return the first slot number for this argument.
+  unsigned int itr_slots_;
+  // The number of references iterated past
+  unsigned int itr_refs_;
+  // The argument number along argument list for current argument
+  unsigned int itr_args_;
+  // Number of longs and doubles seen along argument list
+  unsigned int itr_longs_and_doubles_;
+  // Space for frames below this on the stack
+  FrameOffset displacement_;
+
+ private:
+  const bool is_static_;
+  const bool is_synchronized_;
+  std::string shorty_;
+  size_t num_args_;
+  size_t num_ref_args_;
+  size_t num_long_or_double_args_;
+};
+
+// Abstraction for managed code's calling conventions
+// | { Incoming stack args } |
+// | { Prior Method* }       | <-- Prior SP
+// | { Return address }      |
+// | { Callee saves }        |
+// | { Spills ... }          |
+// | { Outgoing stack args } |
+// | { Method* }             | <-- SP
+class ManagedRuntimeCallingConvention : public CallingConvention {
+ public:
+  static ManagedRuntimeCallingConvention* Create(bool is_static, bool is_synchronized,
+                                                 const char* shorty,
+                                                 InstructionSet instruction_set);
+
+  // Register that holds the incoming method argument
+  virtual ManagedRegister MethodRegister() = 0;
+
+  // Iterator interface
+  bool HasNext();
+  void Next();
+  bool IsCurrentParamAReference();
+  bool IsCurrentArgExplicit();  // ie a non-implict argument such as this
+  bool IsCurrentArgPossiblyNull();
+  size_t CurrentParamSize();
+  virtual bool IsCurrentParamInRegister() = 0;
+  virtual bool IsCurrentParamOnStack() = 0;
+  virtual ManagedRegister CurrentParamRegister() = 0;
+  virtual FrameOffset CurrentParamStackOffset() = 0;
+
+  virtual ~ManagedRuntimeCallingConvention() {}
+
+  // Registers to spill to caller's out registers on entry.
+  virtual const std::vector<ManagedRegister>& EntrySpills() = 0;
+
+ protected:
+  ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty) :
+      CallingConvention(is_static, is_synchronized, shorty) {}
+};
+
+// Abstraction for JNI calling conventions
+// | { Incoming stack args }         | <-- Prior SP
+// | { Return address }              |
+// | { Callee saves }                |     ([1])
+// | { Return value spill }          |     (live on return slow paths)
+// | { Local Ref. Table State }      |
+// | { Stack Indirect Ref. Table     |
+// |   num. refs./link }             |     (here to prior SP is frame size)
+// | { Method* }                     | <-- Anchor SP written to thread
+// | { Outgoing stack args }         | <-- SP at point of call
+// | Native frame                    |
+//
+// [1] We must save all callee saves here to enable any exception throws to restore
+// callee saves for frames above this one.
+class JniCallingConvention : public CallingConvention {
+ public:
+  static JniCallingConvention* Create(bool is_static, bool is_synchronized, const char* shorty,
+                                      InstructionSet instruction_set);
+
+  // Size of frame excluding space for outgoing args (its assumed Method* is
+  // always at the bottom of a frame, but this doesn't work for outgoing
+  // native args). Includes alignment.
+  virtual size_t FrameSize() = 0;
+  // Size of outgoing arguments, including alignment
+  virtual size_t OutArgSize() = 0;
+  // Number of references in stack indirect reference table
+  size_t ReferenceCount() const;
+  // Location where the segment state of the local indirect reference table is saved
+  FrameOffset SavedLocalReferenceCookieOffset() const;
+  // Location where the return value of a call can be squirreled if another
+  // call is made following the native call
+  FrameOffset ReturnValueSaveLocation() const;
+
+  // Callee save registers to spill prior to native code (which may clobber)
+  virtual const std::vector<ManagedRegister>& CalleeSaveRegisters() const = 0;
+
+  // Spill mask values
+  virtual uint32_t CoreSpillMask() const = 0;
+  virtual uint32_t FpSpillMask() const = 0;
+
+  // Returns true if the method register will have been clobbered during argument
+  // set up
+  virtual bool IsMethodRegisterClobberedPreCall() = 0;
+
+  // An extra scratch register live after the call
+  virtual ManagedRegister ReturnScratchRegister() const = 0;
+
+  // Iterator interface
+  bool HasNext();
+  virtual void Next();
+  bool IsCurrentParamAReference();
+  size_t CurrentParamSize();
+  virtual bool IsCurrentParamInRegister() = 0;
+  virtual bool IsCurrentParamOnStack() = 0;
+  virtual ManagedRegister CurrentParamRegister() = 0;
+  virtual FrameOffset CurrentParamStackOffset() = 0;
+
+  // Iterator interface extension for JNI
+  FrameOffset CurrentParamSirtEntryOffset();
+
+  // Position of SIRT and interior fields
+  FrameOffset SirtOffset() const {
+    return FrameOffset(displacement_.Int32Value() +
+                       kPointerSize);  // above Method*
+  }
+  FrameOffset SirtNumRefsOffset() const {
+    return FrameOffset(SirtOffset().Int32Value() +
+                       StackIndirectReferenceTable::NumberOfReferencesOffset());
+  }
+  FrameOffset SirtLinkOffset() const {
+    return FrameOffset(SirtOffset().Int32Value() +
+                       StackIndirectReferenceTable::LinkOffset());
+  }
+
+  virtual ~JniCallingConvention() {}
+
+ protected:
+  // Named iterator positions
+  enum IteratorPos {
+    kJniEnv = 0,
+    kObjectOrClass = 1
+  };
+
+  explicit JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty) :
+      CallingConvention(is_static, is_synchronized, shorty) {}
+
+  // Number of stack slots for outgoing arguments, above which the SIRT is
+  // located
+  virtual size_t NumberOfOutgoingStackArgs() = 0;
+
+ protected:
+  size_t NumberOfExtraArgumentsForJni();
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_OAT_JNI_CALLING_CONVENTION_H_
diff --git a/src/oat/jni/jni_compiler.cc b/src/oat/jni/jni_compiler.cc
new file mode 100644
index 0000000..30341c2
--- /dev/null
+++ b/src/oat/jni/jni_compiler.cc
@@ -0,0 +1,570 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <sys/mman.h>
+#include <vector>
+
+#include "calling_convention.h"
+#include "class_linker.h"
+#include "compiled_method.h"
+#include "compiler.h"
+#include "constants.h"
+#include "jni_internal.h"
+#include "logging.h"
+#include "macros.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+#include "oat/utils/assembler.h"
+#include "oat/utils/managed_register.h"
+#include "thread.h"
+#include "UniquePtr.h"
+
+#define __ jni_asm->
+
+namespace art {
+
+static void ChangeThreadState(Assembler* jni_asm, Thread::State new_state,
+                              ManagedRegister scratch, ManagedRegister return_reg,
+                              FrameOffset return_save_location,
+                              size_t return_size) {
+  /*
+   * This code mirrors that of Thread::SetState where detail is given on why
+   * barriers occur when they do.
+   */
+  if (new_state == Thread::kRunnable) {
+    /*
+     * Change our status to Thread::kRunnable.  The transition requires
+     * that we check for pending suspension, because the runtime considers
+     * us to be "asleep" in all other states, and another thread could
+     * be performing a GC now.
+     */
+    __ StoreImmediateToThread(Thread::StateOffset(), Thread::kRunnable, scratch);
+    __ MemoryBarrier(scratch);
+    __ SuspendPoll(scratch, return_reg, return_save_location, return_size);
+  } else {
+    /*
+     * Not changing to Thread::kRunnable. No additional work required.
+     */
+    __ MemoryBarrier(scratch);
+    __ StoreImmediateToThread(Thread::StateOffset(), new_state, scratch);
+  }
+}
+
+// Copy a single parameter from the managed to the JNI calling convention
+static void CopyParameter(Assembler* jni_asm,
+                          ManagedRuntimeCallingConvention* mr_conv,
+                          JniCallingConvention* jni_conv,
+                          size_t frame_size, size_t out_arg_size) {
+  bool input_in_reg = mr_conv->IsCurrentParamInRegister();
+  bool output_in_reg = jni_conv->IsCurrentParamInRegister();
+  FrameOffset sirt_offset(0);
+  bool null_allowed = false;
+  bool ref_param = jni_conv->IsCurrentParamAReference();
+  CHECK(!ref_param || mr_conv->IsCurrentParamAReference());
+  // input may be in register, on stack or both - but not none!
+  CHECK(input_in_reg || mr_conv->IsCurrentParamOnStack());
+  if (output_in_reg) {  // output shouldn't straddle registers and stack
+    CHECK(!jni_conv->IsCurrentParamOnStack());
+  } else {
+    CHECK(jni_conv->IsCurrentParamOnStack());
+  }
+  // References need placing in SIRT and the entry address passing
+  if (ref_param) {
+    null_allowed = mr_conv->IsCurrentArgPossiblyNull();
+    // Compute SIRT offset. Note null is placed in the SIRT but the jobject
+    // passed to the native code must be null (not a pointer into the SIRT
+    // as with regular references).
+    sirt_offset = jni_conv->CurrentParamSirtEntryOffset();
+    // Check SIRT offset is within frame.
+    CHECK_LT(sirt_offset.Uint32Value(), (frame_size + out_arg_size));
+  }
+  if (input_in_reg && output_in_reg) {
+    ManagedRegister in_reg = mr_conv->CurrentParamRegister();
+    ManagedRegister out_reg = jni_conv->CurrentParamRegister();
+    if (ref_param) {
+      __ CreateSirtEntry(out_reg, sirt_offset, in_reg, null_allowed);
+    } else {
+      if (!mr_conv->IsCurrentParamOnStack()) {
+        // regular non-straddling move
+        __ Move(out_reg, in_reg, mr_conv->CurrentParamSize());
+      } else {
+        UNIMPLEMENTED(FATAL);  // we currently don't expect to see this case
+      }
+    }
+  } else if (!input_in_reg && !output_in_reg) {
+    FrameOffset out_off = jni_conv->CurrentParamStackOffset();
+    if (ref_param) {
+      __ CreateSirtEntry(out_off, sirt_offset, mr_conv->InterproceduralScratchRegister(),
+                         null_allowed);
+    } else {
+      FrameOffset in_off = mr_conv->CurrentParamStackOffset();
+      size_t param_size = mr_conv->CurrentParamSize();
+      CHECK_EQ(param_size, jni_conv->CurrentParamSize());
+      __ Copy(out_off, in_off, mr_conv->InterproceduralScratchRegister(), param_size);
+    }
+  } else if (!input_in_reg && output_in_reg) {
+    FrameOffset in_off = mr_conv->CurrentParamStackOffset();
+    ManagedRegister out_reg = jni_conv->CurrentParamRegister();
+    // Check that incoming stack arguments are above the current stack frame.
+    CHECK_GT(in_off.Uint32Value(), frame_size);
+    if (ref_param) {
+      __ CreateSirtEntry(out_reg, sirt_offset, ManagedRegister::NoRegister(), null_allowed);
+    } else {
+      size_t param_size = mr_conv->CurrentParamSize();
+      CHECK_EQ(param_size, jni_conv->CurrentParamSize());
+      __ Load(out_reg, in_off, param_size);
+    }
+  } else {
+    CHECK(input_in_reg && !output_in_reg);
+    ManagedRegister in_reg = mr_conv->CurrentParamRegister();
+    FrameOffset out_off = jni_conv->CurrentParamStackOffset();
+    // Check outgoing argument is within frame
+    CHECK_LT(out_off.Uint32Value(), frame_size);
+    if (ref_param) {
+      // TODO: recycle value in in_reg rather than reload from SIRT
+      __ CreateSirtEntry(out_off, sirt_offset, mr_conv->InterproceduralScratchRegister(),
+                         null_allowed);
+    } else {
+      size_t param_size = mr_conv->CurrentParamSize();
+      CHECK_EQ(param_size, jni_conv->CurrentParamSize());
+      if (!mr_conv->IsCurrentParamOnStack()) {
+        // regular non-straddling store
+        __ Store(out_off, in_reg, param_size);
+      } else {
+        // store where input straddles registers and stack
+        CHECK_EQ(param_size, 8u);
+        FrameOffset in_off = mr_conv->CurrentParamStackOffset();
+        __ StoreSpanning(out_off, in_reg, in_off, mr_conv->InterproceduralScratchRegister());
+      }
+    }
+  }
+}
+
+static void SetNativeParameter(Assembler* jni_asm,
+                               JniCallingConvention* jni_conv,
+                               ManagedRegister in_reg) {
+  if (jni_conv->IsCurrentParamOnStack()) {
+    FrameOffset dest = jni_conv->CurrentParamStackOffset();
+    __ StoreRawPtr(dest, in_reg);
+  } else {
+    if (!jni_conv->CurrentParamRegister().Equals(in_reg)) {
+      __ Move(jni_conv->CurrentParamRegister(), in_reg, jni_conv->CurrentParamSize());
+    }
+  }
+}
+
+// Generate the JNI bridge for the given method, general contract:
+// - Arguments are in the managed runtime format, either on stack or in
+//   registers, a reference to the method object is supplied as part of this
+//   convention.
+//
+CompiledMethod* ArtJniCompileMethodInternal(Compiler& compiler,
+                                            uint32_t access_flags, uint32_t method_idx,
+                                            const DexFile& dex_file) {
+  CHECK((access_flags & kAccNative) != 0);
+  const bool is_static = (access_flags & kAccStatic) != 0;
+  const bool is_synchronized = (access_flags & kAccSynchronized) != 0;
+  const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx));
+  InstructionSet instruction_set = compiler.GetInstructionSet();
+  if (instruction_set == kThumb2) {
+    instruction_set = kArm;
+  }
+  // Calling conventions used to iterate over parameters to method
+  UniquePtr<JniCallingConvention> jni_conv(
+      JniCallingConvention::Create(is_static, is_synchronized, shorty, instruction_set));
+  UniquePtr<ManagedRuntimeCallingConvention> mr_conv(
+      ManagedRuntimeCallingConvention::Create(is_static, is_synchronized, shorty, instruction_set));
+
+  // Assembler that holds generated instructions
+  UniquePtr<Assembler> jni_asm(Assembler::Create(instruction_set));
+
+  // Offsets into data structures
+  // TODO: if cross compiling these offsets are for the host not the target
+  const Offset functions(OFFSETOF_MEMBER(JNIEnvExt, functions));
+  const Offset monitor_enter(OFFSETOF_MEMBER(JNINativeInterface, MonitorEnter));
+  const Offset monitor_exit(OFFSETOF_MEMBER(JNINativeInterface, MonitorExit));
+
+  // 1. Build the frame saving all callee saves
+  const size_t frame_size(jni_conv->FrameSize());
+  const std::vector<ManagedRegister>& callee_save_regs = jni_conv->CalleeSaveRegisters();
+  __ BuildFrame(frame_size, mr_conv->MethodRegister(), callee_save_regs, mr_conv->EntrySpills());
+
+  // 2. Set up the StackIndirectReferenceTable
+  mr_conv->ResetIterator(FrameOffset(frame_size));
+  jni_conv->ResetIterator(FrameOffset(0));
+  __ StoreImmediateToFrame(jni_conv->SirtNumRefsOffset(),
+                           jni_conv->ReferenceCount(),
+                           mr_conv->InterproceduralScratchRegister());
+  __ CopyRawPtrFromThread(jni_conv->SirtLinkOffset(),
+                          Thread::TopSirtOffset(),
+                          mr_conv->InterproceduralScratchRegister());
+  __ StoreStackOffsetToThread(Thread::TopSirtOffset(),
+                              jni_conv->SirtOffset(),
+                              mr_conv->InterproceduralScratchRegister());
+
+  // 3. Place incoming reference arguments into SIRT
+  jni_conv->Next();  // Skip JNIEnv*
+  // 3.5. Create Class argument for static methods out of passed method
+  if (is_static) {
+    FrameOffset sirt_offset = jni_conv->CurrentParamSirtEntryOffset();
+    // Check sirt offset is within frame
+    CHECK_LT(sirt_offset.Uint32Value(), frame_size);
+    __ LoadRef(jni_conv->InterproceduralScratchRegister(),
+               mr_conv->MethodRegister(), Method::DeclaringClassOffset());
+    __ VerifyObject(jni_conv->InterproceduralScratchRegister(), false);
+    __ StoreRef(sirt_offset, jni_conv->InterproceduralScratchRegister());
+    jni_conv->Next();  // in SIRT so move to next argument
+  }
+  while (mr_conv->HasNext()) {
+    CHECK(jni_conv->HasNext());
+    bool ref_param = jni_conv->IsCurrentParamAReference();
+    CHECK(!ref_param || mr_conv->IsCurrentParamAReference());
+    // References need placing in SIRT and the entry value passing
+    if (ref_param) {
+      // Compute SIRT entry, note null is placed in the SIRT but its boxed value
+      // must be NULL
+      FrameOffset sirt_offset = jni_conv->CurrentParamSirtEntryOffset();
+      // Check SIRT offset is within frame and doesn't run into the saved segment state
+      CHECK_LT(sirt_offset.Uint32Value(), frame_size);
+      CHECK_NE(sirt_offset.Uint32Value(),
+               jni_conv->SavedLocalReferenceCookieOffset().Uint32Value());
+      bool input_in_reg = mr_conv->IsCurrentParamInRegister();
+      bool input_on_stack = mr_conv->IsCurrentParamOnStack();
+      CHECK(input_in_reg || input_on_stack);
+
+      if (input_in_reg) {
+        ManagedRegister in_reg  =  mr_conv->CurrentParamRegister();
+        __ VerifyObject(in_reg, mr_conv->IsCurrentArgPossiblyNull());
+        __ StoreRef(sirt_offset, in_reg);
+      } else if (input_on_stack) {
+        FrameOffset in_off  = mr_conv->CurrentParamStackOffset();
+        __ VerifyObject(in_off, mr_conv->IsCurrentArgPossiblyNull());
+        __ CopyRef(sirt_offset, in_off,
+                   mr_conv->InterproceduralScratchRegister());
+      }
+    }
+    mr_conv->Next();
+    jni_conv->Next();
+  }
+
+  // 4. Transition from being in managed to native code. Save the top_of_managed_stack_
+  // so that the managed stack can be crawled while in native code. Clear the corresponding
+  // PC value that has no meaning for the this frame.
+  __ StoreStackPointerToThread(Thread::TopOfManagedStackOffset());
+  __ StoreImmediateToThread(Thread::TopOfManagedStackPcOffset(), 0,
+                            mr_conv->InterproceduralScratchRegister());
+  ChangeThreadState(jni_asm.get(), Thread::kNative,
+                    mr_conv->InterproceduralScratchRegister(),
+                    ManagedRegister::NoRegister(), FrameOffset(0), 0);
+
+  // 5. Move frame down to allow space for out going args. Do for as short a
+  //    time as possible to aid profiling..
+  const size_t out_arg_size = jni_conv->OutArgSize();
+  __ IncreaseFrameSize(out_arg_size);
+
+  // 6. Acquire lock for synchronized methods.
+  if (is_synchronized) {
+    // Compute arguments in registers to preserve
+    mr_conv->ResetIterator(FrameOffset(frame_size + out_arg_size));
+    std::vector<ManagedRegister> live_argument_regs;
+    std::vector<size_t> live_argument_regs_size;
+    while (mr_conv->HasNext()) {
+      if (mr_conv->IsCurrentParamInRegister()) {
+        live_argument_regs.push_back(mr_conv->CurrentParamRegister());
+        live_argument_regs_size.push_back(mr_conv->CurrentParamSize());
+      }
+      mr_conv->Next();
+    }
+
+    // Copy arguments to preserve to callee save registers
+    CHECK_LE(live_argument_regs.size(), callee_save_regs.size());
+    for (size_t i = 0; i < live_argument_regs.size(); i++) {
+      __ Move(callee_save_regs.at(i), live_argument_regs.at(i), live_argument_regs_size.at(i));
+    }
+
+    // Get SIRT entry for 1st argument (jclass or this) to be 1st argument to
+    // monitor enter
+    mr_conv->ResetIterator(FrameOffset(frame_size + out_arg_size));
+    jni_conv->ResetIterator(FrameOffset(out_arg_size));
+    jni_conv->Next();  // Skip JNIEnv*
+    if (is_static) {
+      FrameOffset sirt_offset = jni_conv->CurrentParamSirtEntryOffset();
+      if (jni_conv->IsCurrentParamOnStack()) {
+        FrameOffset out_off = jni_conv->CurrentParamStackOffset();
+        __ CreateSirtEntry(out_off, sirt_offset,
+                           mr_conv->InterproceduralScratchRegister(),
+                           false);
+      } else {
+        ManagedRegister out_reg = jni_conv->CurrentParamRegister();
+        __ CreateSirtEntry(out_reg, sirt_offset,
+                           ManagedRegister::NoRegister(), false);
+      }
+    } else {
+      CopyParameter(jni_asm.get(), mr_conv.get(), jni_conv.get(), frame_size,
+                    out_arg_size);
+    }
+
+    // Generate JNIEnv* in place and leave a copy in jni_fns_register
+    jni_conv->ResetIterator(FrameOffset(out_arg_size));
+    ManagedRegister jni_fns_register =
+        jni_conv->InterproceduralScratchRegister();
+    __ LoadRawPtrFromThread(jni_fns_register, Thread::JniEnvOffset());
+    SetNativeParameter(jni_asm.get(), jni_conv.get(), jni_fns_register);
+
+    // Call JNIEnv->MonitorEnter(object)
+    __ LoadRawPtr(jni_fns_register, jni_fns_register, functions);
+    __ Call(jni_fns_register, monitor_enter,
+                  jni_conv->InterproceduralScratchRegister());
+
+    // Check for exceptions
+    __ ExceptionPoll(jni_conv->InterproceduralScratchRegister());
+
+    // Restore live arguments
+    for (size_t i = 0; i < live_argument_regs.size(); i++) {
+      __ Move(live_argument_regs.at(i), callee_save_regs.at(i), live_argument_regs_size.at(i));
+    }
+  }
+
+  // 7. Iterate over arguments placing values from managed calling convention in
+  //    to the convention required for a native call (shuffling). For references
+  //    place an index/pointer to the reference after checking whether it is
+  //    NULL (which must be encoded as NULL).
+  //    Note: we do this prior to materializing the JNIEnv* and static's jclass to
+  //    give as many free registers for the shuffle as possible
+  mr_conv->ResetIterator(FrameOffset(frame_size+out_arg_size));
+  uint32_t args_count = 0;
+  while (mr_conv->HasNext()) {
+    args_count++;
+    mr_conv->Next();
+  }
+
+  // Do a backward pass over arguments, so that the generated code will be "mov
+  // R2, R3; mov R1, R2" instead of "mov R1, R2; mov R2, R3."
+  // TODO: A reverse iterator to improve readability.
+  for (uint32_t i = 0; i < args_count; ++i) {
+    mr_conv->ResetIterator(FrameOffset(frame_size + out_arg_size));
+    jni_conv->ResetIterator(FrameOffset(out_arg_size));
+    jni_conv->Next();  // Skip JNIEnv*
+    if (is_static) {
+      jni_conv->Next();  // Skip Class for now
+    }
+    for (uint32_t j = 0; j < args_count - i - 1; ++j) {
+      mr_conv->Next();
+      jni_conv->Next();
+    }
+    CopyParameter(jni_asm.get(), mr_conv.get(), jni_conv.get(), frame_size, out_arg_size);
+  }
+
+  if (is_static) {
+    // Create argument for Class
+    mr_conv->ResetIterator(FrameOffset(frame_size+out_arg_size));
+    jni_conv->ResetIterator(FrameOffset(out_arg_size));
+    jni_conv->Next();  // Skip JNIEnv*
+    FrameOffset sirt_offset = jni_conv->CurrentParamSirtEntryOffset();
+    if (jni_conv->IsCurrentParamOnStack()) {
+      FrameOffset out_off = jni_conv->CurrentParamStackOffset();
+      __ CreateSirtEntry(out_off, sirt_offset,
+                         mr_conv->InterproceduralScratchRegister(),
+                         false);
+    } else {
+      ManagedRegister out_reg = jni_conv->CurrentParamRegister();
+      __ CreateSirtEntry(out_reg, sirt_offset,
+                         ManagedRegister::NoRegister(), false);
+    }
+  }
+  // 8. Create 1st argument, the JNI environment ptr and save the top of the local reference table
+  jni_conv->ResetIterator(FrameOffset(out_arg_size));
+  // Register that will hold local indirect reference table
+  if (jni_conv->IsCurrentParamInRegister()) {
+    ManagedRegister jni_env = jni_conv->CurrentParamRegister();
+    DCHECK(!jni_env.Equals(jni_conv->InterproceduralScratchRegister()));
+    __ LoadRawPtrFromThread(jni_env, Thread::JniEnvOffset());
+    // Frame[saved_local_ref_cookie_offset] = env->local_ref_cookie
+    __ Copy(jni_conv->SavedLocalReferenceCookieOffset(),
+            jni_env, JNIEnvExt::LocalRefCookieOffset(),
+            jni_conv->InterproceduralScratchRegister(), 4);
+    // env->local_ref_cookie = env->locals.segment_state
+    __ Copy(jni_env, JNIEnvExt::LocalRefCookieOffset(),
+            jni_env, JNIEnvExt::SegmentStateOffset(),
+            jni_conv->InterproceduralScratchRegister(), 4);
+  } else {
+    FrameOffset jni_env = jni_conv->CurrentParamStackOffset();
+    __ CopyRawPtrFromThread(jni_env, Thread::JniEnvOffset(),
+                            jni_conv->InterproceduralScratchRegister());
+    // Frame[saved_local_ref_cookie_offset] = env->local_ref_cookie
+    __ Copy(jni_conv->SavedLocalReferenceCookieOffset(),
+            jni_env, JNIEnvExt::LocalRefCookieOffset(),
+            jni_conv->InterproceduralScratchRegister(), 4);
+    // env->local_ref_cookie = env->locals.segment_state
+    __ Copy(jni_env, JNIEnvExt::LocalRefCookieOffset(),
+            jni_env, JNIEnvExt::SegmentStateOffset(),
+            jni_conv->InterproceduralScratchRegister(), 4);
+  }
+
+  // 9. Plant call to native code associated with method
+  if (!jni_conv->IsMethodRegisterClobberedPreCall()) {
+    // Method register shouldn't have been crushed by setting up outgoing
+    // arguments
+    __ Call(mr_conv->MethodRegister(), Method::NativeMethodOffset(),
+            mr_conv->InterproceduralScratchRegister());
+  } else {
+    __ Call(jni_conv->MethodStackOffset(), Method::NativeMethodOffset(),
+            mr_conv->InterproceduralScratchRegister());
+  }
+
+  // 10. Release lock for synchronized methods.
+  if (is_synchronized) {
+    mr_conv->ResetIterator(FrameOffset(frame_size+out_arg_size));
+    jni_conv->ResetIterator(FrameOffset(out_arg_size));
+    jni_conv->Next();  // Skip JNIEnv*
+    // Save return value
+    FrameOffset return_save_location = jni_conv->ReturnValueSaveLocation();
+    if (jni_conv->SizeOfReturnValue() != 0) {
+      FrameOffset return_save_location = jni_conv->ReturnValueSaveLocation();
+      CHECK_LT(return_save_location.Uint32Value(), frame_size+out_arg_size);
+      __ Store(return_save_location, jni_conv->ReturnRegister(),
+               jni_conv->SizeOfReturnValue());
+    }
+    // Get SIRT entry for 1st argument
+    if (is_static) {
+      FrameOffset sirt_offset = jni_conv->CurrentParamSirtEntryOffset();
+      if (jni_conv->IsCurrentParamOnStack()) {
+        FrameOffset out_off = jni_conv->CurrentParamStackOffset();
+        __ CreateSirtEntry(out_off, sirt_offset,
+                           mr_conv->InterproceduralScratchRegister(),
+                           false);
+      } else {
+        ManagedRegister out_reg = jni_conv->CurrentParamRegister();
+        __ CreateSirtEntry(out_reg, sirt_offset,
+                           ManagedRegister::NoRegister(), false);
+      }
+    } else {
+      CopyParameter(jni_asm.get(), mr_conv.get(), jni_conv.get(), frame_size,
+                    out_arg_size);
+    }
+    // Generate JNIEnv* in place and leave a copy in jni_env_register
+    jni_conv->ResetIterator(FrameOffset(out_arg_size));
+    ManagedRegister jni_env_register =
+        jni_conv->InterproceduralScratchRegister();
+    __ LoadRawPtrFromThread(jni_env_register, Thread::JniEnvOffset());
+    SetNativeParameter(jni_asm.get(), jni_conv.get(), jni_env_register);
+    // Call JNIEnv->MonitorExit(object)
+    __ LoadRawPtr(jni_env_register, jni_env_register, functions);
+    __ Call(jni_env_register, monitor_exit,
+            jni_conv->InterproceduralScratchRegister());
+    // Reload return value
+    if (jni_conv->SizeOfReturnValue() != 0) {
+      __ Load(jni_conv->ReturnRegister(), return_save_location,
+              jni_conv->SizeOfReturnValue());
+    }
+  }
+
+  // 11. Release outgoing argument area
+  __ DecreaseFrameSize(out_arg_size);
+  mr_conv->ResetIterator(FrameOffset(frame_size));
+  jni_conv->ResetIterator(FrameOffset(0));
+
+  // 12. Transition from being in native to managed code, possibly entering a
+  //     safepoint
+  // Don't clobber result
+  CHECK(!jni_conv->InterproceduralScratchRegister().Equals(jni_conv->ReturnRegister()));
+  // Location to preserve result on slow path, ensuring its within the frame
+  FrameOffset return_save_location = jni_conv->ReturnValueSaveLocation();
+  CHECK(return_save_location.Uint32Value() < frame_size ||
+        jni_conv->SizeOfReturnValue() == 0);
+  ChangeThreadState(jni_asm.get(), Thread::kRunnable,
+                    jni_conv->InterproceduralScratchRegister(),
+                    jni_conv->ReturnRegister(), return_save_location,
+                    jni_conv->SizeOfReturnValue());
+
+  // 13. Place result in correct register possibly loading from indirect
+  //     reference table
+  if (jni_conv->IsReturnAReference()) {
+    __ IncreaseFrameSize(out_arg_size);
+    jni_conv->ResetIterator(FrameOffset(out_arg_size));
+
+    jni_conv->Next();  // Skip Thread* argument
+    // Pass result as arg2
+    SetNativeParameter(jni_asm.get(), jni_conv.get(),
+                       jni_conv->ReturnRegister());
+
+    // Pass Thread*
+    jni_conv->ResetIterator(FrameOffset(out_arg_size));
+    if (jni_conv->IsCurrentParamInRegister()) {
+      __ GetCurrentThread(jni_conv->CurrentParamRegister());
+      __ Call(jni_conv->CurrentParamRegister(),
+              Offset(ENTRYPOINT_OFFSET(pDecodeJObjectInThread)),
+              jni_conv->InterproceduralScratchRegister());
+    } else {
+      __ GetCurrentThread(jni_conv->CurrentParamStackOffset(),
+                          jni_conv->InterproceduralScratchRegister());
+      __ Call(ThreadOffset(ENTRYPOINT_OFFSET(pDecodeJObjectInThread)),
+              jni_conv->InterproceduralScratchRegister());
+    }
+
+    __ DecreaseFrameSize(out_arg_size);
+    jni_conv->ResetIterator(FrameOffset(0));
+  }
+  DCHECK_EQ(mr_conv->SizeOfReturnValue(), jni_conv->SizeOfReturnValue());
+  __ Move(mr_conv->ReturnRegister(), jni_conv->ReturnRegister(), mr_conv->SizeOfReturnValue());
+
+  // 14. Restore segment state and remove SIRT from thread
+  {
+    ManagedRegister jni_env = jni_conv->InterproceduralScratchRegister();
+    __ LoadRawPtrFromThread(jni_env, Thread::JniEnvOffset());
+    // env->locals.segment_state = env->local_ref_cookie
+    __ Copy(jni_env, JNIEnvExt::SegmentStateOffset(),
+            jni_env, JNIEnvExt::LocalRefCookieOffset(),
+            jni_conv->ReturnScratchRegister(), 4);
+    // env->local_ref_cookie = Frame[saved_local_ref_cookie_offset]
+    __ Copy(jni_env, JNIEnvExt::LocalRefCookieOffset(),
+            jni_conv->SavedLocalReferenceCookieOffset(),
+            jni_conv->ReturnScratchRegister(), 4);
+  }
+  __ CopyRawPtrToThread(Thread::TopSirtOffset(), jni_conv->SirtLinkOffset(),
+                        jni_conv->InterproceduralScratchRegister());
+
+  // 15. Check for pending exception and forward if there
+  __ ExceptionPoll(jni_conv->InterproceduralScratchRegister());
+
+  // 16. Remove activation
+  if (is_synchronized) {
+    __ RemoveFrame(frame_size, callee_save_regs);
+  } else {
+    // no need to restore callee save registers because we didn't
+    // clobber them while locking the monitor.
+    __ RemoveFrame(frame_size, std::vector<ManagedRegister>());
+  }
+
+  // 17. Finalize code generation
+  __ EmitSlowPaths();
+  size_t cs = __ CodeSize();
+  std::vector<uint8_t> managed_code(cs);
+  MemoryRegion code(&managed_code[0], managed_code.size());
+  __ FinalizeInstructions(code);
+  return new CompiledMethod(instruction_set,
+                            managed_code,
+                            frame_size,
+                            jni_conv->CoreSpillMask(),
+                            jni_conv->FpSpillMask());
+}
+
+}  // namespace art
+
+extern "C" art::CompiledMethod* ArtJniCompileMethod(art::Compiler& compiler,
+                                                    uint32_t access_flags, uint32_t method_idx,
+                                                    const art::DexFile& dex_file) {
+  return ArtJniCompileMethodInternal(compiler, access_flags, method_idx, dex_file);
+}
diff --git a/src/oat/jni/x86/calling_convention_x86.cc b/src/oat/jni/x86/calling_convention_x86.cc
new file mode 100644
index 0000000..1f66d71
--- /dev/null
+++ b/src/oat/jni/x86/calling_convention_x86.cc
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "calling_convention_x86.h"
+
+#include "logging.h"
+#include "oat/utils/x86/managed_register_x86.h"
+#include "utils.h"
+
+namespace art {
+namespace x86 {
+
+// Calling convention
+
+ManagedRegister X86ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
+  return X86ManagedRegister::FromCpuRegister(ECX);
+}
+
+ManagedRegister X86JniCallingConvention::InterproceduralScratchRegister() {
+  return X86ManagedRegister::FromCpuRegister(ECX);
+}
+
+ManagedRegister X86JniCallingConvention::ReturnScratchRegister() const {
+  return ManagedRegister::NoRegister();  // No free regs, so assembler uses push/pop
+}
+
+static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni) {
+  if (shorty[0] == 'F' || shorty[0] == 'D') {
+    if (jni) {
+      return X86ManagedRegister::FromX87Register(ST0);
+    } else {
+      return X86ManagedRegister::FromXmmRegister(XMM0);
+    }
+  } else if (shorty[0] == 'J') {
+    return X86ManagedRegister::FromRegisterPair(EAX_EDX);
+  } else if (shorty[0] == 'V') {
+    return ManagedRegister::NoRegister();
+  } else {
+    return X86ManagedRegister::FromCpuRegister(EAX);
+  }
+}
+
+ManagedRegister X86ManagedRuntimeCallingConvention::ReturnRegister() {
+  return ReturnRegisterForShorty(GetShorty(), false);
+}
+
+ManagedRegister X86JniCallingConvention::ReturnRegister() {
+  return ReturnRegisterForShorty(GetShorty(), true);
+}
+
+// Managed runtime calling convention
+
+ManagedRegister X86ManagedRuntimeCallingConvention::MethodRegister() {
+  return X86ManagedRegister::FromCpuRegister(EAX);
+}
+
+bool X86ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
+  return false;  // Everything is passed by stack
+}
+
+bool X86ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
+  return true;  // Everything is passed by stack
+}
+
+ManagedRegister X86ManagedRuntimeCallingConvention::CurrentParamRegister() {
+  LOG(FATAL) << "Should not reach here";
+  return ManagedRegister::NoRegister();
+}
+
+FrameOffset X86ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
+  return FrameOffset(displacement_.Int32Value() +   // displacement
+                     kPointerSize +                 // Method*
+                     (itr_slots_ * kPointerSize));  // offset into in args
+}
+
+const std::vector<ManagedRegister>& X86ManagedRuntimeCallingConvention::EntrySpills() {
+  // We spill the argument registers on X86 to free them up for scratch use, we then assume
+  // all arguments are on the stack.
+  if (entry_spills_.size() == 0) {
+    size_t num_spills = NumArgs() + NumLongOrDoubleArgs();
+    if (num_spills > 0) {
+      entry_spills_.push_back(X86ManagedRegister::FromCpuRegister(ECX));
+      if (num_spills > 1) {
+        entry_spills_.push_back(X86ManagedRegister::FromCpuRegister(EDX));
+        if (num_spills > 2) {
+          entry_spills_.push_back(X86ManagedRegister::FromCpuRegister(EBX));
+        }
+      }
+    }
+  }
+  return entry_spills_;
+}
+
+// JNI calling convention
+
+std::vector<ManagedRegister> X86JniCallingConvention::callee_save_regs_;
+
+size_t X86JniCallingConvention::FrameSize() {
+  // Return address, Method* and local reference segment state
+  size_t frame_data_size = 3 * kPointerSize;
+  // References plus 2 words for SIRT header
+  size_t sirt_size = (ReferenceCount() + 2) * kPointerSize;
+  // Plus return value spill area size
+  return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(), kStackAlignment);
+}
+
+size_t X86JniCallingConvention::OutArgSize() {
+  return RoundUp(NumberOfOutgoingStackArgs() * kPointerSize, kStackAlignment);
+}
+
+bool X86JniCallingConvention::IsMethodRegisterClobberedPreCall() {
+  return IsSynchronized();  // Monitor enter crushes the method register
+}
+
+bool X86JniCallingConvention::IsCurrentParamInRegister() {
+  return false;  // Everything is passed by stack.
+}
+
+bool X86JniCallingConvention::IsCurrentParamOnStack() {
+  return true;  // Everything is passed by stack.
+}
+
+ManagedRegister X86JniCallingConvention::CurrentParamRegister() {
+  LOG(FATAL) << "Should not reach here";
+  return ManagedRegister::NoRegister();
+}
+
+FrameOffset X86JniCallingConvention::CurrentParamStackOffset() {
+  return FrameOffset(displacement_.Int32Value() - OutArgSize() +
+                     (itr_slots_ * kPointerSize));
+}
+
+size_t X86JniCallingConvention::NumberOfOutgoingStackArgs() {
+  size_t static_args = IsStatic() ? 1 : 0;  // count jclass
+  // regular argument parameters and this
+  size_t param_args = NumArgs() + NumLongOrDoubleArgs();
+  return static_args + param_args + 2;  // count JNIEnv* and return pc (pushed after Method*)
+}
+
+}  // namespace x86
+}  // namespace art
diff --git a/src/oat/jni/x86/calling_convention_x86.h b/src/oat/jni/x86/calling_convention_x86.h
new file mode 100644
index 0000000..e32b8fd
--- /dev/null
+++ b/src/oat/jni/x86/calling_convention_x86.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_JNI_X86_CALLING_CONVENTION_X86_H_
+#define ART_SRC_OAT_JNI_X86_CALLING_CONVENTION_X86_H_
+
+#include "oat/jni/calling_convention.h"
+
+namespace art {
+namespace x86 {
+
+class X86ManagedRuntimeCallingConvention : public ManagedRuntimeCallingConvention {
+ public:
+  explicit X86ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized,
+                                              const char* shorty) :
+      ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty) {}
+  virtual ~X86ManagedRuntimeCallingConvention() {}
+  // Calling convention
+  virtual ManagedRegister ReturnRegister();
+  virtual ManagedRegister InterproceduralScratchRegister();
+  // Managed runtime calling convention
+  virtual ManagedRegister MethodRegister();
+  virtual bool IsCurrentParamInRegister();
+  virtual bool IsCurrentParamOnStack();
+  virtual ManagedRegister CurrentParamRegister();
+  virtual FrameOffset CurrentParamStackOffset();
+  virtual const std::vector<ManagedRegister>& EntrySpills();
+ private:
+  std::vector<ManagedRegister> entry_spills_;
+  DISALLOW_COPY_AND_ASSIGN(X86ManagedRuntimeCallingConvention);
+};
+
+class X86JniCallingConvention : public JniCallingConvention {
+ public:
+  X86JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty) :
+      JniCallingConvention(is_static, is_synchronized, shorty) {}
+  virtual ~X86JniCallingConvention() {}
+  // Calling convention
+  virtual ManagedRegister ReturnRegister();
+  virtual ManagedRegister InterproceduralScratchRegister();
+  // JNI calling convention
+  virtual size_t FrameSize();
+  virtual size_t OutArgSize();
+  virtual const std::vector<ManagedRegister>& CalleeSaveRegisters() const {
+    DCHECK(callee_save_regs_.empty());
+    return callee_save_regs_;
+  }
+  virtual ManagedRegister ReturnScratchRegister() const;
+  virtual uint32_t CoreSpillMask() const {
+    return 0;
+  }
+  virtual uint32_t FpSpillMask() const {
+    return 0;
+  }
+  virtual bool IsMethodRegisterClobberedPreCall();
+  virtual bool IsCurrentParamInRegister();
+  virtual bool IsCurrentParamOnStack();
+  virtual ManagedRegister CurrentParamRegister();
+  virtual FrameOffset CurrentParamStackOffset();
+
+ protected:
+  virtual size_t NumberOfOutgoingStackArgs();
+
+ private:
+  static std::vector<ManagedRegister> callee_save_regs_;
+
+  DISALLOW_COPY_AND_ASSIGN(X86JniCallingConvention);
+};
+
+}  // namespace x86
+}  // namespace art
+
+#endif  // ART_SRC_OAT_JNI_X86_CALLING_CONVENTION_X86_H_
diff --git a/src/oat/jni/x86/jni_internal_x86.cc b/src/oat/jni/x86/jni_internal_x86.cc
new file mode 100644
index 0000000..6abeb49
--- /dev/null
+++ b/src/oat/jni/x86/jni_internal_x86.cc
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiled_method.h"
+#include "compiler.h"
+#include "oat/utils/assembler.h"
+#include "oat/utils/x86/assembler_x86.h"
+#include "object.h"
+
+namespace art {
+namespace x86 {
+
+// Creates a function which invokes a managed method with an array of
+// arguments.
+//
+// Immediately after the call on X86, the environment looks like this:
+//
+// [SP+0 ] = Return address
+// [SP+4 ] = method pointer
+// [SP+8 ] = receiver pointer or NULL for static methods
+// [SP+12] = (managed) thread pointer
+// [SP+16] = argument array or NULL for no argument methods
+// [SP+20] = JValue* result or NULL for void returns
+//
+// As the JNI call has already transitioned the thread into the
+// "running" state the remaining responsibilities of this routine are
+// to save the native registers and set up the managed registers. On
+// return, the return value must be store into the result JValue.
+CompiledInvokeStub* CreateInvokeStub(bool is_static, const char* shorty, uint32_t shorty_len) {
+  UniquePtr<X86Assembler> assembler(down_cast<X86Assembler*>(Assembler::Create(kX86)));
+#define __ assembler->
+  size_t num_arg_array_bytes = NumArgArrayBytes(shorty, shorty_len);
+  // Size of frame = return address + saved EBX + Method* + possible receiver + arg array size
+  // Note, space is left in the frame to flush arguments in registers back to out locations.
+  size_t frame_size = 3 * kPointerSize + (is_static ? 0 : kPointerSize) + num_arg_array_bytes;
+  size_t pad_size = RoundUp(frame_size, kStackAlignment) - frame_size;
+
+  Register rMethod = EAX;
+  __ movl(rMethod,   Address(ESP, 4));     // EAX = method
+  Register rReceiver = ECX;
+  if (!is_static) {
+    __ movl(rReceiver, Address(ESP, 8));   // ECX = receiver
+  }
+  // Save EBX
+  __ pushl(EBX);
+  Register rArgArray = EBX;
+  __ movl(rArgArray, Address(ESP, 20));    // EBX = arg array
+
+  // TODO: optimize the frame set up to avoid excessive SP math
+  // Push padding
+  if (pad_size != 0) {
+    __ subl(ESP, Immediate(pad_size));
+  }
+  // Push/copy arguments.
+  size_t arg_count = (shorty_len - 1);
+  size_t dst_offset = num_arg_array_bytes;
+  size_t src_offset = arg_count * sizeof(JValue);
+  for (size_t i = shorty_len - 1; i > 0; --i) {
+    switch (shorty[i]) {
+      case 'D':
+      case 'J':
+        // Move both pointers 64 bits.
+        dst_offset -= kPointerSize;
+        src_offset -= sizeof(JValue) / 2;
+        __ pushl(Address(rArgArray, src_offset));
+        dst_offset -= kPointerSize;
+        src_offset -= sizeof(JValue) / 2;
+        __ pushl(Address(rArgArray, src_offset));
+        break;
+      default:
+        // Move the source pointer sizeof(JValue) and the destination pointer 32 bits.
+        dst_offset -= kPointerSize;
+        src_offset -= sizeof(JValue);
+        __ pushl(Address(rArgArray, src_offset));
+        break;
+    }
+  }
+
+  // Backing space for receiver.
+  if (!is_static) {
+    __ pushl(Immediate(0));
+  }
+  // Push 0 as NULL Method* thereby terminating managed stack crawls.
+  __ pushl(Immediate(0));
+  if (!is_static) {
+    if (shorty_len > 1) {
+      // Receiver already in ECX, pass remaining 2 args in EDX and EBX.
+      __ movl(EDX, Address(rArgArray, 0));
+      if (shorty[1] == 'D' || shorty[1] == 'J') {
+        __ movl(EBX, Address(rArgArray, sizeof(JValue) / 2));
+      } else if (shorty_len > 2) {
+        __ movl(EBX, Address(rArgArray, sizeof(JValue)));
+      }
+    }
+  } else {
+    if (shorty_len > 1) {
+      // Pass remaining 3 args in ECX, EDX and EBX.
+      __ movl(ECX, Address(rArgArray, 0));
+      if (shorty[1] == 'D' || shorty[1] == 'J') {
+        __ movl(EDX, Address(rArgArray, sizeof(JValue) / 2));
+        if (shorty_len > 2) {
+           __ movl(EBX, Address(rArgArray, sizeof(JValue)));
+        }
+      } else if (shorty_len > 2) {
+        __ movl(EDX, Address(rArgArray, sizeof(JValue)));
+        if (shorty[2] == 'D' || shorty[2] == 'J') {
+          __ movl(EBX, Address(rArgArray, sizeof(JValue) + (sizeof(JValue) / 2)));
+        } else {
+          __ movl(EBX, Address(rArgArray, sizeof(JValue) + sizeof(JValue)));
+        }
+      }
+    }
+  }
+
+  __ call(Address(EAX, Method::GetCodeOffset()));  // Call code off of method
+
+  // Pop arguments up to EBX and the return address.
+  __ addl(ESP, Immediate(frame_size + pad_size - (2 * kPointerSize)));
+  // Restore EBX.
+  __ popl(EBX);
+  char ch = shorty[0];
+  if (ch != 'V') {
+    // Load the result JValue pointer.
+    __ movl(ECX, Address(ESP, 20));
+    switch (ch) {
+      case 'D':
+        __ movsd(Address(ECX, 0), XMM0);
+        break;
+      case 'F':
+        __ movss(Address(ECX, 0), XMM0);
+        break;
+      case 'J':
+        __ movl(Address(ECX, 0), EAX);
+        __ movl(Address(ECX, 4), EDX);
+        break;
+      default:
+        __ movl(Address(ECX, 0), EAX);
+        break;
+    }
+  }
+  __ ret();
+  // TODO: store native_entry in the stub table
+  std::vector<uint8_t> code(assembler->CodeSize());
+  MemoryRegion region(&code[0], code.size());
+  assembler->FinalizeInstructions(region);
+  return new CompiledInvokeStub(code);
+#undef __
+}
+
+}  // namespace x86
+}  // namespace art
+
+extern "C" art::CompiledInvokeStub* ArtCreateInvokeStub(art::Compiler& /*compiler*/, bool is_static,
+                                                        const char* shorty, uint32_t shorty_len) {
+  return art::x86::CreateInvokeStub(is_static, shorty, shorty_len);
+}
diff --git a/src/oat/runtime/arm/context_arm.cc b/src/oat/runtime/arm/context_arm.cc
new file mode 100644
index 0000000..28f1db9
--- /dev/null
+++ b/src/oat/runtime/arm/context_arm.cc
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "context_arm.h"
+
+#include "object.h"
+
+namespace art {
+namespace arm {
+
+ArmContext::ArmContext() {
+#ifndef NDEBUG
+  // Initialize registers with easy to spot debug values
+  for (int i = 0; i < 16; i++) {
+    gprs_[i] = 0xEBAD6070+i;
+  }
+  for (int i = 0; i < 32; i++) {
+    fprs_[i] = 0xEBAD8070+i;
+  }
+#endif
+}
+
+void ArmContext::FillCalleeSaves(const Frame& fr) {
+  Method* method = fr.GetMethod();
+  uint32_t core_spills = method->GetCoreSpillMask();
+  uint32_t fp_core_spills = method->GetFpSpillMask();
+  size_t spill_count = __builtin_popcount(core_spills);
+  size_t fp_spill_count = __builtin_popcount(fp_core_spills);
+  if (spill_count > 0) {
+    // Lowest number spill is furthest away, walk registers and fill into context
+    int j = 1;
+    for (int i = 0; i < 16; i++) {
+      if (((core_spills >> i) & 1) != 0) {
+        gprs_[i] = fr.LoadCalleeSave(spill_count - j);
+        j++;
+      }
+    }
+  }
+  if (fp_spill_count > 0) {
+    // Lowest number spill is furthest away, walk registers and fill into context
+    int j = 1;
+    for (int i = 0; i < 32; i++) {
+      if (((fp_core_spills >> i) & 1) != 0) {
+        fprs_[i] = fr.LoadCalleeSave(spill_count + fp_spill_count - j);
+        j++;
+      }
+    }
+  }
+}
+
+extern "C" void art_do_long_jump(uint32_t*, uint32_t*);
+
+void ArmContext::DoLongJump() {
+  art_do_long_jump(&gprs_[0], &fprs_[S0]);
+}
+
+}  // namespace arm
+}  // namespace art
diff --git a/src/oat/runtime/arm/context_arm.h b/src/oat/runtime/arm/context_arm.h
new file mode 100644
index 0000000..216d282
--- /dev/null
+++ b/src/oat/runtime/arm/context_arm.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_RUNTIME_ARM_CONTEXT_ARM_H_
+#define ART_SRC_OAT_RUNTIME_ARM_CONTEXT_ARM_H_
+
+#include "constants_arm.h"
+#include "oat/runtime/context.h"
+
+namespace art {
+namespace arm {
+
+class ArmContext : public Context {
+ public:
+  ArmContext();
+  virtual ~ArmContext() {}
+
+  virtual void FillCalleeSaves(const Frame& fr);
+
+  virtual void SetSP(uintptr_t new_sp) {
+    gprs_[SP] = new_sp;
+  }
+
+  virtual void SetPC(uintptr_t new_pc) {
+    gprs_[PC] = new_pc;
+  }
+
+  virtual uintptr_t GetGPR(uint32_t reg) {
+    CHECK_GE(reg, 0u);
+    CHECK_LT(reg, 16u);
+    return gprs_[reg];
+  }
+
+  virtual void DoLongJump();
+
+ private:
+  uintptr_t gprs_[16];
+  uint32_t fprs_[32];
+};
+
+}  // namespace arm
+}  // namespace art
+
+#endif  // ART_SRC_OAT_RUNTIME_ARM_CONTEXT_ARM_H_
diff --git a/src/oat/runtime/arm/oat_support_entrypoints_arm.cc b/src/oat/runtime/arm/oat_support_entrypoints_arm.cc
new file mode 100644
index 0000000..fcff424
--- /dev/null
+++ b/src/oat/runtime/arm/oat_support_entrypoints_arm.cc
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "oat/runtime/oat_support_entrypoints.h"
+
+namespace art {
+
+// Alloc entrypoints.
+extern "C" void* art_alloc_array_from_code(uint32_t, void*, int32_t);
+extern "C" void* art_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
+extern "C" void* art_alloc_object_from_code(uint32_t type_idx, void* method);
+extern "C" void* art_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method);
+extern "C" void* art_check_and_alloc_array_from_code(uint32_t, void*, int32_t);
+extern "C" void* art_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
+
+// Cast entrypoints.
+extern uint32_t IsAssignableFromCode(const Class* klass, const Class* ref_class);
+extern "C" void art_can_put_array_element_from_code(void*, void*);
+extern "C" void art_check_cast_from_code(void*, void*);
+
+// Debug entrypoints.
+extern void DebugMe(Method* method, uint32_t info);
+extern "C" void art_update_debugger(void*, void*, int32_t, void*);
+
+// DexCache entrypoints.
+extern "C" void* art_initialize_static_storage_from_code(uint32_t, void*);
+extern "C" void* art_initialize_type_from_code(uint32_t, void*);
+extern "C" void* art_initialize_type_and_verify_access_from_code(uint32_t, void*);
+extern "C" void* art_resolve_string_from_code(void*, uint32_t);
+
+// Field entrypoints.
+extern "C" int art_set32_instance_from_code(uint32_t, void*, int32_t);
+extern "C" int art_set32_static_from_code(uint32_t, int32_t);
+extern "C" int art_set64_instance_from_code(uint32_t, void*, int64_t);
+extern "C" int art_set64_static_from_code(uint32_t, int64_t);
+extern "C" int art_set_obj_instance_from_code(uint32_t, void*, void*);
+extern "C" int art_set_obj_static_from_code(uint32_t, void*);
+extern "C" int32_t art_get32_instance_from_code(uint32_t, void*);
+extern "C" int32_t art_get32_static_from_code(uint32_t);
+extern "C" int64_t art_get64_instance_from_code(uint32_t, void*);
+extern "C" int64_t art_get64_static_from_code(uint32_t);
+extern "C" void* art_get_obj_instance_from_code(uint32_t, void*);
+extern "C" void* art_get_obj_static_from_code(uint32_t);
+
+// FillArray entrypoint.
+extern "C" void art_handle_fill_data_from_code(void*, void*);
+
+// JNI entrypoints.
+extern Object* DecodeJObjectInThread(Thread* thread, jobject obj);
+extern void* FindNativeMethod(Thread* thread);
+
+// Lock entrypoints.
+extern "C" void art_lock_object_from_code(void*);
+extern "C" void art_unlock_object_from_code(void*);
+
+// Math entrypoints.
+extern int32_t CmpgDouble(double a, double b);
+extern int32_t CmplDouble(double a, double b);
+extern int32_t CmpgFloat(float a, float b);
+extern int32_t CmplFloat(float a, float b);
+
+// Math conversions.
+extern "C" float __aeabi_i2f(int32_t op1);         // INT_TO_FLOAT
+extern "C" int32_t __aeabi_f2iz(float op1);        // FLOAT_TO_INT
+extern "C" float __aeabi_d2f(double op1);          // DOUBLE_TO_FLOAT
+extern "C" double __aeabi_f2d(float op1);          // FLOAT_TO_DOUBLE
+extern "C" double __aeabi_i2d(int32_t op1);        // INT_TO_DOUBLE
+extern "C" int32_t __aeabi_d2iz(double op1);       // DOUBLE_TO_INT
+extern "C" float __aeabi_l2f(int64_t op1);         // LONG_TO_FLOAT
+extern "C" double __aeabi_l2d(int64_t op1);        // LONG_TO_DOUBLE
+extern int64_t D2L(double d);
+extern int64_t F2L(float f);
+
+// Single-precision FP arithmetics.
+extern "C" float __aeabi_fadd(float a, float b);   // ADD_FLOAT[_2ADDR]
+extern "C" float __aeabi_fsub(float a, float b);   // SUB_FLOAT[_2ADDR]
+extern "C" float __aeabi_fdiv(float a, float b);   // DIV_FLOAT[_2ADDR]
+extern "C" float __aeabi_fmul(float a, float b);   // MUL_FLOAT[_2ADDR]
+extern "C" float fmodf(float a, float b);          // REM_FLOAT[_2ADDR]
+
+// Double-precision FP arithmetics.
+extern "C" double __aeabi_dadd(double a, double b); // ADD_DOUBLE[_2ADDR]
+extern "C" double __aeabi_dsub(double a, double b); // SUB_DOUBLE[_2ADDR]
+extern "C" double __aeabi_ddiv(double a, double b); // DIV_DOUBLE[_2ADDR]
+extern "C" double __aeabi_dmul(double a, double b); // MUL_DOUBLE[_2ADDR]
+extern "C" double fmod(double a, double b);         // REM_DOUBLE[_2ADDR]
+
+// Integer arithmetics.
+extern "C" int __aeabi_idivmod(int32_t op1, int32_t op2);  // REM_INT[_2ADDR|_LIT8|_LIT16]
+extern "C" int __aeabi_idiv(int32_t op1, int32_t op2);     // DIV_INT[_2ADDR|_LIT8|_LIT16]
+
+// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR]
+extern "C" long long __aeabi_ldivmod(long long op1, long long op2);
+extern "C" long long __aeabi_lmul(long long op1, long long op2);
+extern "C" uint64_t art_shl_long(uint64_t, uint32_t);
+extern "C" uint64_t art_shr_long(uint64_t, uint32_t);
+extern "C" uint64_t art_ushr_long(uint64_t, uint32_t);
+
+// Intrinsic entrypoints.
+extern "C" int32_t __memcmp16(void*, void*, int32_t);
+extern "C" int32_t art_indexof(void*, uint32_t, uint32_t, uint32_t);
+extern "C" int32_t art_string_compareto(void*, void*);
+
+// Invoke entrypoints.
+const void* UnresolvedDirectMethodTrampolineFromCode(Method*, Method**, Thread*,
+                                                     Runtime::TrampolineType);
+extern "C" void art_invoke_direct_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_invoke_interface_trampoline(uint32_t, void*);
+extern "C" void art_invoke_interface_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_invoke_static_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_invoke_super_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_invoke_virtual_trampoline_with_access_check(uint32_t, void*);
+
+// Thread entrypoints.
+extern void CheckSuspendFromCode(Thread* thread);
+extern "C" void art_test_suspend();
+
+// Throw entrypoints.
+extern void ThrowAbstractMethodErrorFromCode(Method* method, Thread* thread, Method** sp);
+extern "C" void art_deliver_exception_from_code(void*);
+extern "C" void art_throw_array_bounds_from_code(int32_t index, int32_t limit);
+extern "C" void art_throw_div_zero_from_code();
+extern "C" void art_throw_no_such_method_from_code(int32_t method_idx);
+extern "C" void art_throw_null_pointer_exception_from_code();
+extern "C" void art_throw_stack_overflow_from_code(void*);
+extern "C" void art_throw_verification_error_from_code(int32_t src1, int32_t ref);
+
+// Trace entrypoints.
+extern "C" void art_trace_entry_from_code(void*);
+extern "C" void art_trace_exit_from_code();
+
+void InitEntryPoints(EntryPoints* points) {
+  // Alloc
+  points->pAllocArrayFromCode = art_alloc_array_from_code;
+  points->pAllocArrayFromCodeWithAccessCheck = art_alloc_array_from_code_with_access_check;
+  points->pAllocObjectFromCode = art_alloc_object_from_code;
+  points->pAllocObjectFromCodeWithAccessCheck = art_alloc_object_from_code_with_access_check;
+  points->pCheckAndAllocArrayFromCode = art_check_and_alloc_array_from_code;
+  points->pCheckAndAllocArrayFromCodeWithAccessCheck = art_check_and_alloc_array_from_code_with_access_check;
+
+  // Cast
+  points->pInstanceofNonTrivialFromCode = IsAssignableFromCode;
+  points->pCanPutArrayElementFromCode = art_can_put_array_element_from_code;
+  points->pCheckCastFromCode = art_check_cast_from_code;
+
+  // Debug
+  points->pDebugMe = DebugMe;
+  points->pUpdateDebuggerFromCode = NULL; // Controlled by SetDebuggerUpdatesEnabled.
+
+  // DexCache
+  points->pInitializeStaticStorage = art_initialize_static_storage_from_code;
+  points->pInitializeTypeAndVerifyAccessFromCode = art_initialize_type_and_verify_access_from_code;
+  points->pInitializeTypeFromCode = art_initialize_type_from_code;
+  points->pResolveStringFromCode = art_resolve_string_from_code;
+
+  // Field
+  points->pSet32Instance = art_set32_instance_from_code;
+  points->pSet32Static = art_set32_static_from_code;
+  points->pSet64Instance = art_set64_instance_from_code;
+  points->pSet64Static = art_set64_static_from_code;
+  points->pSetObjInstance = art_set_obj_instance_from_code;
+  points->pSetObjStatic = art_set_obj_static_from_code;
+  points->pGet32Instance = art_get32_instance_from_code;
+  points->pGet64Instance = art_get64_instance_from_code;
+  points->pGetObjInstance = art_get_obj_instance_from_code;
+  points->pGet32Static = art_get32_static_from_code;
+  points->pGet64Static = art_get64_static_from_code;
+  points->pGetObjStatic = art_get_obj_static_from_code;
+
+  // FillArray
+  points->pHandleFillArrayDataFromCode = art_handle_fill_data_from_code;
+
+  // JNI
+  points->pDecodeJObjectInThread = DecodeJObjectInThread;
+  points->pFindNativeMethod = FindNativeMethod;
+
+  // Locks
+  points->pLockObjectFromCode = art_lock_object_from_code;
+  points->pUnlockObjectFromCode = art_unlock_object_from_code;
+
+  // Math
+  points->pCmpgDouble = CmpgDouble;
+  points->pCmpgFloat = CmpgFloat;
+  points->pCmplDouble = CmplDouble;
+  points->pCmplFloat = CmplFloat;
+  points->pDadd = __aeabi_dadd;
+  points->pDdiv = __aeabi_ddiv;
+  points->pDmul = __aeabi_dmul;
+  points->pDsub = __aeabi_dsub;
+  points->pF2d = __aeabi_f2d;
+  points->pFmod = fmod;
+  points->pI2d = __aeabi_i2d;
+  points->pL2d = __aeabi_l2d;
+  points->pD2f = __aeabi_d2f;
+  points->pFadd = __aeabi_fadd;
+  points->pFdiv = __aeabi_fdiv;
+  points->pFmodf = fmodf;
+  points->pFmul = __aeabi_fmul;
+  points->pFsub = __aeabi_fsub;
+  points->pI2f = __aeabi_i2f;
+  points->pL2f = __aeabi_l2f;
+  points->pD2iz = __aeabi_d2iz;
+  points->pF2iz = __aeabi_f2iz;
+  points->pIdiv = __aeabi_idivmod;
+  points->pIdivmod = __aeabi_idivmod;
+  points->pD2l = D2L;
+  points->pF2l = F2L;
+  points->pLadd = NULL;
+  points->pLand = NULL;
+  points->pLdivmod = __aeabi_ldivmod;
+  points->pLmul = __aeabi_lmul;
+  points->pLor = NULL;
+  points->pLsub = NULL;
+  points->pLxor = NULL;
+  points->pShlLong = art_shl_long;
+  points->pShrLong = art_shr_long;
+  points->pUshrLong = art_ushr_long;
+
+  // Intrinsics
+  points->pIndexOf = art_indexof;
+  points->pMemcmp16 = __memcmp16;
+  points->pStringCompareTo = art_string_compareto;
+  points->pMemcpy = memcpy;
+
+  // Invocation
+  points->pUnresolvedDirectMethodTrampolineFromCode = UnresolvedDirectMethodTrampolineFromCode;
+  points->pInvokeDirectTrampolineWithAccessCheck = art_invoke_direct_trampoline_with_access_check;
+  points->pInvokeInterfaceTrampoline = art_invoke_interface_trampoline;
+  points->pInvokeInterfaceTrampolineWithAccessCheck = art_invoke_interface_trampoline_with_access_check;
+  points->pInvokeStaticTrampolineWithAccessCheck = art_invoke_static_trampoline_with_access_check;
+  points->pInvokeSuperTrampolineWithAccessCheck = art_invoke_super_trampoline_with_access_check;
+  points->pInvokeVirtualTrampolineWithAccessCheck = art_invoke_virtual_trampoline_with_access_check;
+
+  // Thread
+  points->pCheckSuspendFromCode = CheckSuspendFromCode;
+  points->pTestSuspendFromCode = art_test_suspend;
+
+  // Throws
+  points->pDeliverException = art_deliver_exception_from_code;
+  points->pThrowAbstractMethodErrorFromCode = ThrowAbstractMethodErrorFromCode;
+  points->pThrowArrayBoundsFromCode = art_throw_array_bounds_from_code;
+  points->pThrowDivZeroFromCode = art_throw_div_zero_from_code;
+  points->pThrowNoSuchMethodFromCode = art_throw_no_such_method_from_code;
+  points->pThrowNullPointerFromCode = art_throw_null_pointer_exception_from_code;
+  points->pThrowStackOverflowFromCode = art_throw_stack_overflow_from_code;
+  points->pThrowVerificationErrorFromCode = art_throw_verification_error_from_code;
+};
+
+void ChangeDebuggerEntryPoint(EntryPoints* points, bool enabled) {
+  points->pUpdateDebuggerFromCode = (enabled ? art_update_debugger : NULL);
+}
+
+bool IsTraceExitPc(uintptr_t pc) {
+  uintptr_t trace_exit = reinterpret_cast<uintptr_t>(art_trace_exit_from_code);
+  return pc == trace_exit;
+}
+
+void* GetLogTraceEntryPoint() {
+  return reinterpret_cast<void*>(art_trace_entry_from_code);
+}
+
+}  // namespace art
diff --git a/src/oat/runtime/arm/runtime_support_arm.S b/src/oat/runtime/arm/runtime_support_arm.S
new file mode 100644
index 0000000..5446919
--- /dev/null
+++ b/src/oat/runtime/arm/runtime_support_arm.S
@@ -0,0 +1,1069 @@
+#include "asm_support.h"
+
+    /* Deliver the given exception */
+    .extern artDeliverExceptionFromCode
+    /* Deliver an exception pending on a thread */
+    .extern artDeliverPendingException
+
+    /* Cache alignment for function entry */
+.macro ALIGN_FUNCTION_ENTRY
+    .balign 16
+.endm
+
+    /*
+     * Macro that sets up the callee save frame to conform with
+     * Runtime::CreateCalleeSaveMethod(kSaveAll)
+     */
+.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+    push {r4-r11, lr} @ 9 words of callee saves
+    vpush {s0-s31}
+    sub sp, #12       @ 3 words of space, bottom word will hold Method*
+.endm
+
+    /*
+     * Macro that sets up the callee save frame to conform with
+     * Runtime::CreateCalleeSaveMethod(kRefsOnly). Restoration assumes non-moving GC.
+     */
+.macro SETUP_REF_ONLY_CALLEE_SAVE_FRAME
+    push {r5-r8, r10-r11, lr} @ 7 words of callee saves
+    sub sp, #4                @ bottom word will hold Method*
+.endm
+
+.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    ldr lr, [sp, #28]  @ restore lr for return
+    add sp, #32        @ unwind stack
+.endm
+
+.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+    ldr lr, [sp, #28]  @ restore lr for return
+    add sp, #32        @ unwind stack
+    bx  lr             @ return
+.endm
+
+    /*
+     * Macro that sets up the callee save frame to conform with
+     * Runtime::CreateCalleeSaveMethod(kRefsAndArgs). Restoration assumes non-moving GC.
+     */
+.macro SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+    push {r1-r3, r5-r8, r10-r11, lr}  @ 10 words of callee saves
+    sub sp, #8                        @ 2 words of space, bottom word will hold Method*
+.endm
+
+.macro RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+    ldr  r1, [sp, #8]          @ restore non-callee save r1
+    ldrd r2, [sp, #12]         @ restore non-callee saves r2-r3
+    ldr  lr, [sp, #44]         @ restore lr
+    add  sp, #48               @ rewind sp
+.endm
+
+    /*
+     * Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending
+     * exception is Thread::Current()->exception_
+     */
+.macro DELIVER_PENDING_EXCEPTION
+    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME           @ save callee saves for throw
+    mov    r0, r9                              @ pass Thread::Current
+    mov    r1, sp                              @ pass SP
+    b      artDeliverPendingExceptionFromCode  @ artDeliverPendingExceptionFromCode(Thread*, SP)
+.endm
+
+.macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
+    .global \c_name
+    .extern \cxx_name
+    ALIGN_FUNCTION_ENTRY
+\c_name:
+    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME  // save all registers as basis for long jump context
+    mov r0, r9                      @ pass Thread::Current
+    mov r1, sp                      @ pass SP
+    b   \cxx_name                   @ \cxx_name(Thread*, SP)
+.endm
+
+.macro ONE_ARG_RUNTIME_EXCEPTION c_name, cxx_name
+    .global \c_name
+    .extern \cxx_name
+    ALIGN_FUNCTION_ENTRY
+\c_name:
+    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME  // save all registers as basis for long jump context
+    mov r1, r9                      @ pass Thread::Current
+    mov r2, sp                      @ pass SP
+    b   \cxx_name                   @ \cxx_name(Thread*, SP)
+.endm
+
+.macro TWO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
+    .global \c_name
+    .extern \cxx_name
+    ALIGN_FUNCTION_ENTRY
+\c_name:
+    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME  // save all registers as basis for long jump context
+    mov r2, r9                      @ pass Thread::Current
+    mov r3, sp                      @ pass SP
+    b   \cxx_name                   @ \cxx_name(Thread*, SP)
+.endm
+
+    /*
+     * Called by managed code, saves callee saves and then calls artThrowException
+     * that will place a mock Method* at the bottom of the stack. Arg1 holds the exception.
+     */
+ONE_ARG_RUNTIME_EXCEPTION art_deliver_exception_from_code, artDeliverExceptionFromCode
+
+    /*
+     * Called by managed code to create and deliver a NullPointerException.
+     */
+NO_ARG_RUNTIME_EXCEPTION art_throw_null_pointer_exception_from_code, artThrowNullPointerExceptionFromCode
+
+    /*
+     * Called by managed code to create and deliver an ArithmeticException.
+     */
+NO_ARG_RUNTIME_EXCEPTION art_throw_div_zero_from_code, artThrowDivZeroFromCode
+
+    /*
+     * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds
+     * index, arg2 holds limit.
+     */
+TWO_ARG_RUNTIME_EXCEPTION art_throw_array_bounds_from_code, artThrowArrayBoundsFromCode
+
+    /*
+     * Called by managed code to create and deliver a StackOverflowError.
+     */
+NO_ARG_RUNTIME_EXCEPTION art_throw_stack_overflow_from_code, artThrowStackOverflowFromCode
+
+    /*
+     * Called by managed code to create and deliver a NoSuchMethodError.
+     */
+ONE_ARG_RUNTIME_EXCEPTION art_throw_no_such_method_from_code, artThrowNoSuchMethodFromCode
+
+    /*
+     * Called by managed code to create and deliver verification errors. Arg1 is kind, arg2 is ref.
+     */
+TWO_ARG_RUNTIME_EXCEPTION art_throw_verification_error_from_code, artThrowVerificationErrorFromCode
+
+    /*
+     * All generated callsites for interface invokes and invocation slow paths will load arguments
+     * as usual - except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
+     * the method_idx.  This wrapper will save arg1-arg3, load the caller's Method*, align the
+     * stack and call the appropriate C helper.
+     * NOTE: "this" is first visible argument of the target, and so can be found in arg1/r1.
+     *
+     * The helper will attempt to locate the target and return a 64-bit result in r0/r1 consisting
+     * of the target Method* in r0 and method->code_ in r1.
+     *
+     * If unsuccessful, the helper will return NULL/NULL. There will bea pending exception in the
+     * thread and we branch to another stub to deliver it.
+     *
+     * On success this wrapper will restore arguments and *jump* to the target, leaving the lr
+     * pointing back to the original caller.
+     */
+.macro INVOKE_TRAMPOLINE c_name, cxx_name
+    .global \c_name
+    .extern \cxx_name
+    ALIGN_FUNCTION_ENTRY
+\c_name:
+    SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME  @ save callee saves in case allocation triggers GC
+    ldr    r2, [sp, #48]                  @ pass caller Method*
+    mov    r3, r9                         @ pass Thread::Current
+    str    sp, [sp, #0]                   @ pass SP
+    bl     \cxx_name                      @ (method_idx, this, caller, Thread*, SP)
+    mov    r12, r1                        @ save Method*->code_
+    RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+    cmp    r0, #0                         @ did we find the target?
+    bxne   r12                            @ tail call to target if so
+    DELIVER_PENDING_EXCEPTION
+.endm
+
+INVOKE_TRAMPOLINE art_invoke_interface_trampoline, artInvokeInterfaceTrampoline
+INVOKE_TRAMPOLINE art_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
+
+INVOKE_TRAMPOLINE art_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
+
+    .global art_update_debugger
+    .extern artUpdateDebuggerFromCode
+    /*
+     * On entry, r0 and r1 must be preserved, r2 is dex PC
+     */
+     ALIGN_FUNCTION_ENTRY
+art_update_debugger:
+    mov    r3, r0         @ stash away r0 so that it's saved as if it were an argument
+    SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+    mov    r0, r2         @ arg0 is dex PC
+    mov    r1, rSELF      @ arg1 is Thread*
+    mov    r2, sp         @ arg2 is sp
+    bl     artUpdateDebuggerFromCode      @ artUpdateDebuggerFromCode(int32_t, Thread*, Method**)
+    RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+    mov    r0, r3         @ restore original r0
+    bx     lr
+
+    .global art_do_long_jump
+    /*
+     * On entry r0 is uint32_t* gprs_ and r1 is uint32_t* fprs_
+     */
+     ALIGN_FUNCTION_ENTRY
+art_do_long_jump:
+    vldm r1, {s0-s31}     @ load all fprs from argument fprs_
+    ldr  r2, [r0, #60]    @ r2 = r15 (PC from gprs_ 60=4*15)
+    add  r0, r0, #12      @ increment r0 to skip gprs_[0..2] 12=4*3
+    ldm  r0, {r3-r14}     @ load remaining gprs from argument gprs_
+    mov  r0, #0           @ clear result registers r0 and r1
+    mov  r1, #0
+    bx   r2               @ do long jump
+
+    .global art_work_around_app_jni_bugs
+    .extern artWorkAroundAppJniBugs
+    /*
+     * Entry point of native methods when JNI bug compatibility is enabled.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_work_around_app_jni_bugs:
+    @ save registers that may contain arguments and LR that will be crushed by a call
+    push {r0-r3, lr}
+    sub sp, #12      @ 3 words of space for alignment
+    mov r0, r9       @ pass Thread::Current
+    mov r1, sp       @ pass SP
+    bl  artWorkAroundAppJniBugs  @ (Thread*, SP)
+    add sp, #12      @ rewind stack
+    mov r12, r0      @ save target address
+    pop {r0-r3, lr}  @ restore possibly modified argument registers
+    bx  r12          @ tail call into JNI routine
+
+    .global art_handle_fill_data_from_code
+    .extern artHandleFillArrayDataFromCode
+    /*
+     * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on
+     * failure.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_handle_fill_data_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case exception allocation triggers GC
+    mov    r2, r9                          @ pass Thread::Current
+    mov    r3, sp                          @ pass SP
+    bl     artHandleFillArrayDataFromCode  @ (Array* array, const uint16_t* table, Thread*, SP)
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    cmp    r0, #0                          @ success?
+    bxeq   lr                              @ return on success
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_lock_object_from_code
+    .extern artLockObjectFromCode
+    /*
+     * Entry from managed code that calls artLockObjectFromCode, may block for GC.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_lock_object_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case we block
+    mov    r1, r9                     @ pass Thread::Current
+    mov    r2, sp                     @ pass SP
+    bl     artLockObjectFromCode      @ (Object* obj, Thread*, SP)
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+
+    .global art_unlock_object_from_code
+    .extern artUnlockObjectFromCode
+    /*
+     * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_unlock_object_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case exception allocation triggers GC
+    mov    r1, r9                   @ pass Thread::Current
+    mov    r2, sp                   @ pass SP
+    bl     artUnlockObjectFromCode  @ (Object* obj, Thread*, SP)
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    cmp    r0, #0                   @ success?
+    bxeq   lr                       @ return on success
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_check_cast_from_code
+    .extern artCheckCastFromCode
+    /*
+     * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_check_cast_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME    @ save callee saves in case exception allocation triggers GC
+    mov    r2, r9                       @ pass Thread::Current
+    mov    r3, sp                       @ pass SP
+    bl     artCheckCastFromCode         @ (Class* a, Class* b, Thread*, SP)
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    cmp    r0, #0                       @ success?
+    bxeq   lr                           @ return on success
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_can_put_array_element_from_code
+    .extern artCanPutArrayElementFromCode
+    /*
+     * Entry from managed code that calls artCanPutArrayElementFromCode and delivers exception on
+     * failure.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_can_put_array_element_from_code:
+    cmp    r0, #0                         @ return if element == NULL
+    bxeq   lr
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME    @ save callee saves in case exception allocation triggers GC
+    mov    r2, r9                         @ pass Thread::Current
+    mov    r3, sp                         @ pass SP
+    bl     artCanPutArrayElementFromCode  @ (Object* element, Class* array_class, Thread*, SP)
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    cmp    r0, #0                         @ success?
+    bxeq   lr                             @ return on success
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_initialize_static_storage_from_code
+    .extern artInitializeStaticStorageFromCode
+    /*
+     * Entry from managed code when uninitialized static storage, this stub will run the class
+     * initializer and deliver the exception on error. On success the static storage base is
+     * returned.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_initialize_static_storage_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME           @ save callee saves in case of GC
+    mov    r2, r9                              @ pass Thread::Current
+    mov    r3, sp                              @ pass SP
+    @ artInitializeStaticStorageFromCode(uint32_t type_idx, Method* referrer, Thread*, SP)
+    bl     artInitializeStaticStorageFromCode
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    cmp    r0, #0                              @ success if result is non-null
+    bxne   lr                                  @ return on success
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_initialize_type_from_code
+    .extern artInitializeTypeFromCode
+    /*
+     * Entry from managed code when dex cache misses for a type_idx
+     */
+    ALIGN_FUNCTION_ENTRY
+art_initialize_type_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME           @ save callee saves in case of GC
+    mov    r2, r9                              @ pass Thread::Current
+    mov    r3, sp                              @ pass SP
+    @ artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*, SP)
+    bl     artInitializeTypeFromCode
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    cmp    r0, #0                              @ success if result is non-null
+    bxne   lr                                  @ return on success
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_initialize_type_and_verify_access_from_code
+    .extern artInitializeTypeAndVerifyAccessFromCode
+    /*
+     * Entry from managed code when type_idx needs to be checked for access and dex cache may also
+     * miss.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_initialize_type_and_verify_access_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME           @ save callee saves in case of GC
+    mov    r2, r9                              @ pass Thread::Current
+    mov    r3, sp                              @ pass SP
+    @ artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*, SP)
+    bl     artInitializeTypeAndVerifyAccessFromCode
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    cmp    r0, #0                              @ success if result is non-null
+    bxne   lr                                  @ return on success
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_get32_static_from_code
+    .extern artGet32StaticFromCode
+    /*
+     * Called by managed code to resolve a static field and load a 32-bit primitive value.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_get32_static_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
+    ldr    r1, [sp, #32]                 @ pass referrer
+    mov    r2, r9                        @ pass Thread::Current
+    mov    r3, sp                        @ pass SP
+    bl     artGet32StaticFromCode        @ (uint32_t field_idx, const Method* referrer, Thread*, SP)
+    ldr    r12, [r9, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    cmp    r12, #0                       @ success if no exception is pending
+    bxeq   lr                            @ return on success
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_get64_static_from_code
+    .extern artGet64StaticFromCode
+    /*
+     * Called by managed code to resolve a static field and load a 64-bit primitive value.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_get64_static_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
+    ldr    r1, [sp, #32]                 @ pass referrer
+    mov    r2, r9                        @ pass Thread::Current
+    mov    r3, sp                        @ pass SP
+    bl     artGet64StaticFromCode        @ (uint32_t field_idx, const Method* referrer, Thread*, SP)
+    ldr    r12, [r9, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    cmp    r12, #0                       @ success if no exception is pending
+    bxeq    lr                           @ return on success
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_get_obj_static_from_code
+    .extern artGetObjStaticFromCode
+    /*
+     * Called by managed code to resolve a static field and load an object reference.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_get_obj_static_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
+    ldr    r1, [sp, #32]                 @ pass referrer
+    mov    r2, r9                        @ pass Thread::Current
+    mov    r3, sp                        @ pass SP
+    bl     artGetObjStaticFromCode       @ (uint32_t field_idx, const Method* referrer, Thread*, SP)
+    ldr    r12, [r9, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    cmp    r12, #0                       @ success if no exception is pending
+    bxeq   lr                            @ return on success
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_get32_instance_from_code
+    .extern artGet32InstanceFromCode
+    /*
+     * Called by managed code to resolve an instance field and load a 32-bit primitive value.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_get32_instance_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
+    ldr    r2, [sp, #32]                 @ pass referrer
+    mov    r3, r9                        @ pass Thread::Current
+    str    sp, [sp, #0]                  @ pass SP
+    bl     artGet32InstanceFromCode      @ (field_idx, Object*, referrer, Thread*, SP)
+    ldr    r12, [r9, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    cmp    r12, #0                       @ success if no exception is pending
+    bxeq   lr                            @ return on success
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_get64_instance_from_code
+    .extern artGet64InstanceFromCode
+    /*
+     * Called by managed code to resolve an instance field and load a 64-bit primitive value.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_get64_instance_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
+    ldr    r2, [sp, #32]                 @ pass referrer
+    mov    r3, r9                        @ pass Thread::Current
+    str    sp, [sp, #0]                  @ pass SP
+    bl     artGet64InstanceFromCode      @ (field_idx, Object*, referrer, Thread*, SP)
+    ldr    r12, [r9, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    cmp    r12, #0                       @ success if no exception is pending
+    bxeq    lr                           @ return on success
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_get_obj_instance_from_code
+    .extern artGetObjInstanceFromCode
+    /*
+     * Called by managed code to resolve an instance field and load an object reference.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_get_obj_instance_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
+    ldr    r2, [sp, #32]                 @ pass referrer
+    mov    r3, r9                        @ pass Thread::Current
+    str    sp, [sp, #0]                  @ pass SP
+    bl     artGetObjInstanceFromCode     @ (field_idx, Object*, referrer, Thread*, SP)
+    ldr    r12, [r9, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    cmp    r12, #0                       @ success if no exception is pending
+    bxeq   lr                            @ return on success
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_set32_static_from_code
+    .extern artSet32StaticFromCode
+    /*
+     * Called by managed code to resolve a static field and store a 32-bit primitive value.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_set32_static_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
+    ldr    r2, [sp, #32]                 @ pass referrer
+    mov    r3, r9                        @ pass Thread::Current
+    str    sp, [sp, #0]                  @ pass SP
+    bl     artSet32StaticFromCode        @ (field_idx, new_val, referrer, Thread*, SP)
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    cmp    r0, #0                        @ success if result is 0
+    bxeq   lr                            @ return on success
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_set64_static_from_code
+    .extern artSet32StaticFromCode
+    /*
+     * Called by managed code to resolve a static field and store a 64-bit primitive value.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_set64_static_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
+    ldr    r1, [sp, #32]                 @ pass referrer
+    mov    r12, sp                       @ save SP
+    sub    sp, #8                        @ grow frame for alignment with stack args
+    push   {r9, r12}                     @ pass Thread::Current and SP
+    bl     artSet64StaticFromCode        @ (field_idx, referrer, new_val, Thread*, SP)
+    add    sp, #16                       @ release out args
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME   @ TODO: we can clearly save an add here
+    cmp    r0, #0                        @ success if result is 0
+    bxeq   lr                            @ return on success
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_set_obj_static_from_code
+    .extern artSetObjStaticFromCode
+    /*
+     * Called by managed code to resolve a static field and store an object reference.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_set_obj_static_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
+    ldr    r2, [sp, #32]                 @ pass referrer
+    mov    r3, r9                        @ pass Thread::Current
+    str    sp, [sp, #0]                  @ pass SP
+    bl     artSetObjStaticFromCode       @ (field_idx, new_val, referrer, Thread*, SP)
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    cmp    r0, #0                        @ success if result is 0
+    bxeq   lr                            @ return on success
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_set32_instance_from_code
+    .extern artSet32InstanceFromCode
+    /*
+     * Called by managed code to resolve an instance field and store a 32-bit primitive value.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_set32_instance_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
+    ldr    r3, [sp, #32]                 @ pass referrer
+    mov    r12, sp                       @ save SP
+    sub    sp, #8                        @ grow frame for alignment with stack args
+    push   {r9, r12}                     @ pass Thread::Current and SP
+    bl     artSet32InstanceFromCode      @ (field_idx, Object*, new_val, referrer, Thread*, SP)
+    add    sp, #16                       @ release out args
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME   @ TODO: we can clearly save an add here
+    cmp    r0, #0                        @ success if result is 0
+    bxeq   lr                            @ return on success
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_set64_instance_from_code
+    .extern artSet32InstanceFromCode
+    /*
+     * Called by managed code to resolve an instance field and store a 64-bit primitive value.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_set64_instance_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
+    mov    r12, sp                       @ save SP
+    sub    sp, #8                        @ grow frame for alignment with stack args
+    push   {r9, r12}                     @ pass Thread::Current and SP
+    bl     artSet64InstanceFromCode      @ (field_idx, Object*, new_val, Thread*, SP)
+    add    sp, #16                       @ release out args
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME   @ TODO: we can clearly save an add here
+    cmp    r0, #0                        @ success if result is 0
+    bxeq   lr                            @ return on success
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_set_obj_instance_from_code
+    .extern artSetObjInstanceFromCode
+    /*
+     * Called by managed code to resolve an instance field and store an object reference.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_set_obj_instance_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
+    ldr    r3, [sp, #32]                 @ pass referrer
+    mov    r12, sp                       @ save SP
+    sub    sp, #8                        @ grow frame for alignment with stack args
+    push   {r9, r12}                     @ pass Thread::Current and SP
+    bl     artSetObjInstanceFromCode     @ (field_idx, Object*, new_val, referrer, Thread*, SP)
+    add    sp, #16                       @ release out args
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME   @ TODO: we can clearly save an add here
+    cmp    r0, #0                        @ success if result is 0
+    bxeq   lr                            @ return on success
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_resolve_string_from_code
+    .extern artResolveStringFromCode
+    /*
+     * Entry from managed code to resolve a string, this stub will allocate a String and deliver an
+     * exception on error. On success the String is returned. R0 holds the referring method,
+     * R1 holds the string index. The fast path check for hit in strings cache has already been
+     * performed.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_resolve_string_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case of GC
+    mov    r2, r9                     @ pass Thread::Current
+    mov    r3, sp                     @ pass SP
+    @ artResolveStringFromCode(Method* referrer, uint32_t string_idx, Thread*, SP)
+    bl     artResolveStringFromCode
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    cmp    r0, #0                     @ success if result is non-null
+    bxne   lr                         @ return on success
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_alloc_object_from_code
+    .extern artAllocObjectFromCode
+    /*
+     * Called by managed code to allocate an object
+     */
+    ALIGN_FUNCTION_ENTRY
+art_alloc_object_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case of GC
+    mov    r2, r9                     @ pass Thread::Current
+    mov    r3, sp                     @ pass SP
+    bl     artAllocObjectFromCode     @ (uint32_t type_idx, Method* method, Thread*, SP)
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    cmp    r0, #0                     @ success if result is non-null
+    bxne   lr                         @ return on success
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_alloc_object_from_code_with_access_check
+    .extern artAllocObjectFromCodeWithAccessCheck
+    /*
+     * Called by managed code to allocate an object when the caller doesn't know whether it has
+     * access to the created type.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_alloc_object_from_code_with_access_check:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case of GC
+    mov    r2, r9                     @ pass Thread::Current
+    mov    r3, sp                     @ pass SP
+    bl     artAllocObjectFromCodeWithAccessCheck  @ (uint32_t type_idx, Method* method, Thread*, SP)
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    cmp    r0, #0                     @ success if result is non-null
+    bxne   lr                         @ return on success
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_alloc_array_from_code
+    .extern artAllocArrayFromCode
+    /*
+     * Called by managed code to allocate an array.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_alloc_array_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case of GC
+    mov    r3, r9                     @ pass Thread::Current
+    str    sp, [sp, #0]               @ pass SP
+    @ artAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t component_count, Thread*, SP)
+    bl     artAllocArrayFromCode
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    cmp    r0, #0                     @ success if result is non-null
+    bxne   lr                         @ return on success
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_alloc_array_from_code_with_access_check
+    .extern artAllocArrayFromCodeWithAccessCheck
+    /*
+     * Called by managed code to allocate an array when the caller doesn't know whether it has
+     * access to the created type.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_alloc_array_from_code_with_access_check:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case of GC
+    mov    r3, r9                     @ pass Thread::Current
+    str    sp, [sp, #0]               @ pass SP
+    @ artAllocArrayFromCodeWithAccessCheck(type_idx, method, component_count, Thread*, SP)
+    bl     artAllocArrayFromCodeWithAccessCheck
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    cmp    r0, #0                     @ success if result is non-null
+    bxne   lr                         @ return on success
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_check_and_alloc_array_from_code
+    .extern artCheckAndAllocArrayFromCode
+    /*
+     * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_check_and_alloc_array_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case of GC
+    mov    r3, r9                     @ pass Thread::Current
+    str    sp, [sp, #0]               @ pass SP
+    @ artCheckAndAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t count, Thread* , SP)
+    bl     artCheckAndAllocArrayFromCode
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    cmp    r0, #0                     @ success if result is non-null
+    bxne   lr                         @ return on success
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_check_and_alloc_array_from_code_with_access_check
+    .extern artCheckAndAllocArrayFromCodeWithAccessCheck
+    /*
+     * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_check_and_alloc_array_from_code_with_access_check:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case of GC
+    mov    r3, r9                     @ pass Thread::Current
+    str    sp, [sp, #0]               @ pass SP
+    @ artCheckAndAllocArrayFromCodeWithAccessCheck(type_idx, method, count, Thread* , SP)
+    bl     artCheckAndAllocArrayFromCodeWithAccessCheck
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    cmp    r0, #0                     @ success if result is non-null
+    bxne   lr                         @ return on success
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_test_suspend
+    .extern artTestSuspendFromCode
+    /*
+     * Called by managed code when the value in rSUSPEND has been decremented to 0.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_test_suspend:
+    ldr    r0, [rSELF, #THREAD_SUSPEND_COUNT_OFFSET]
+    mov    rSUSPEND, #SUSPEND_CHECK_INTERVAL  @ reset rSUSPEND to SUSPEND_CHECK_INTERVAL
+    cmp    r0, #0                             @ check Thread::Current()->suspend_count_ == 0
+    bxeq   rLR                                @ return if suspend_count_ == 0
+    mov    r0, rSELF
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME          @ save callee saves for stack crawl
+    mov    r1, sp
+    bl     artTestSuspendFromCode             @ (Thread*, SP)
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+
+    .global art_proxy_invoke_handler
+    .extern artProxyInvokeHandler
+    /*
+     * Called by managed code that is attempting to call a method on a proxy class. On entry
+     * r0 holds the proxy method; r1, r2 and r3 may contain arguments.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_proxy_invoke_handler:
+    SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+    str     r0, [sp, #0]           @ place proxy method at bottom of frame
+    mov     r2, r9                 @ pass Thread::Current
+    add     r3, sp, #12            @ pointer to r2/r3/LR/caller's Method**/out-args as second arg
+    blx     artProxyInvokeHandler  @ (Method* proxy method, receiver, Thread*, args...)
+    ldr     r12, [r9, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
+    ldr     lr,  [sp, #44]         @ restore lr
+    ldrd    r0,  [sp, #12]         @ load r0/r1 from r2/r3 that were overwritten with the out args
+    add     sp,  #48               @ pop frame
+    cmp     r12, #0                @ success if no exception is pending
+    bxeq    lr                     @ return on success
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_trace_entry_from_code
+    .extern artTraceMethodEntryFromCode
+    /*
+     * Routine that intercepts method calls.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_trace_entry_from_code:
+    push  {r0-r3}        @ save arguments (4 words)
+    mov   r1, r9         @ pass Thread::Current
+    mov   r2, lr         @ pass LR
+    blx   artTraceMethodEntryFromCode  @ (Method*, Thread*, LR)
+    mov   r12, r0        @ r12 holds reference to code
+    pop   {r0-r3}        @ restore arguments
+    blx   r12            @ call method
+    /* intentional fallthrough */
+
+    .global art_trace_exit_from_code
+    .extern artTraceMethodExitFromCode
+    /*
+     * Routine that intercepts method returns.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_trace_exit_from_code:
+    push  {r0-r1}        @ save return value
+    blx   artTraceMethodExitFromCode  @ ()
+    mov   lr, r0         @ restore link register
+    pop   {r0, r1}       @ restore return value
+    bx    lr             @ return
+
+    .global art_shl_long
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to ignore all but the low
+     * 6 bits.
+     * On entry:
+     *   r0: low word
+     *   r1: high word
+     *   r2: shift count
+     */
+    /* shl-long vAA, vBB, vCC */
+    ALIGN_FUNCTION_ENTRY
+art_shl_long:
+    and     r2, r2, #63                 @ r2<- r2 & 0x3f
+    mov     r1, r1, asl r2              @  r1<- r1 << r2
+    rsb     r3, r2, #32                 @  r3<- 32 - r2
+    orr     r1, r1, r0, lsr r3          @  r1<- r1 | (r0 << (32-r2))
+    subs    ip, r2, #32                 @  ip<- r2 - 32
+    movpl   r1, r0, asl ip              @  if r2 >= 32, r1<- r0 << (r2-32)
+    mov     r0, r0, asl r2              @  r0<- r0 << r2
+    bx      lr
+
+    .global art_shr_long
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to ignore all but the low
+     * 6 bits.
+     * On entry:
+     *   r0: low word
+     *   r1: high word
+     *   r2: shift count
+     */
+    /* shr-long vAA, vBB, vCC */
+    ALIGN_FUNCTION_ENTRY
+art_shr_long:
+    and     r2, r2, #63                 @ r0<- r0 & 0x3f
+    mov     r0, r0, lsr r2              @  r0<- r2 >> r2
+    rsb     r3, r2, #32                 @  r3<- 32 - r2
+    orr     r0, r0, r1, asl r3          @  r0<- r0 | (r1 << (32-r2))
+    subs    ip, r2, #32                 @  ip<- r2 - 32
+    movpl   r0, r1, asr ip              @  if r2 >= 32, r0<-r1 >> (r2-32)
+    mov     r1, r1, asr r2              @  r1<- r1 >> r2
+    bx      lr
+
+    .global art_ushr_long
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to ignore all but the low
+     * 6 bits.
+     * On entry:
+     *   r0: low word
+     *   r1: high word
+     *   r2: shift count
+     */
+    /* ushr-long vAA, vBB, vCC */
+    ALIGN_FUNCTION_ENTRY
+art_ushr_long:
+    and     r2, r2, #63                 @ r0<- r0 & 0x3f
+    mov     r0, r0, lsr r2              @  r0<- r2 >> r2
+    rsb     r3, r2, #32                 @  r3<- 32 - r2
+    orr     r0, r0, r1, asl r3          @  r0<- r0 | (r1 << (32-r2))
+    subs    ip, r2, #32                 @  ip<- r2 - 32
+    movpl   r0, r1, lsr ip              @  if r2 >= 32, r0<-r1 >>> (r2-32)
+    mov     r1, r1, lsr r2              @  r1<- r1 >>> r2
+    bx      lr
+
+    .balign 4
+    .global art_indexof
+art_indexof:
+    /*
+     * String's indexOf.
+     *
+     * On entry:
+     *    r0:   string object (known non-null)
+     *    r1:   char to match
+     *    r2:   Starting offset in string data
+     */
+
+    push {r4, r10-r11, lr} @ 4 words of callee saves
+    ldr   r3, [r0, #STRING_COUNT_OFFSET]
+    ldr   r12, [r0, #STRING_OFFSET_OFFSET]
+    ldr   r0, [r0, #STRING_VALUE_OFFSET]
+
+    /* Clamp start to [0..count] */
+    cmp   r2, #0
+    movlt r2, #0
+    cmp   r2, r3
+    movgt r2, r3
+
+    /* Build a pointer to the start of string data */
+    add   r0, #STRING_DATA_OFFSET
+    add   r0, r0, r12, lsl #1
+
+    /* Save a copy in r12 to later compute result */
+    mov   r12, r0
+
+    /* Build pointer to start of data to compare and pre-bias */
+    add   r0, r0, r2, lsl #1
+    sub   r0, #2
+
+    /* Compute iteration count */
+    sub   r2, r3, r2
+
+    /*
+     * At this point we have:
+     *   r0: start of data to test
+     *   r1: char to compare
+     *   r2: iteration count
+     *   r12: original start of string data
+     *   r3, r4, r10, r11 available for loading string data
+     */
+
+    subs  r2, #4
+    blt   indexof_remainder
+
+indexof_loop4:
+    ldrh  r3, [r0, #2]!
+    ldrh  r4, [r0, #2]!
+    ldrh  r10, [r0, #2]!
+    ldrh  r11, [r0, #2]!
+    cmp   r3, r1
+    beq   match_0
+    cmp   r4, r1
+    beq   match_1
+    cmp   r10, r1
+    beq   match_2
+    cmp   r11, r1
+    beq   match_3
+    subs  r2, #4
+    bge   indexof_loop4
+
+indexof_remainder:
+    adds    r2, #4
+    beq     indexof_nomatch
+
+indexof_loop1:
+    ldrh  r3, [r0, #2]!
+    cmp   r3, r1
+    beq   match_3
+    subs  r2, #1
+    bne   indexof_loop1
+
+indexof_nomatch:
+    mov   r0, #-1
+    pop {r4, r10-r11, pc}
+
+match_0:
+    sub   r0, #6
+    sub   r0, r12
+    asr   r0, r0, #1
+    pop {r4, r10-r11, pc}
+match_1:
+    sub   r0, #4
+    sub   r0, r12
+    asr   r0, r0, #1
+    pop {r4, r10-r11, pc}
+match_2:
+    sub   r0, #2
+    sub   r0, r12
+    asr   r0, r0, #1
+    pop {r4, r10-r11, pc}
+match_3:
+    sub   r0, r12
+    asr   r0, r0, #1
+    pop {r4, r10-r11, pc}
+
+
+   /*
+     * String's compareTo.
+     *
+     * Requires rARG0/rARG1 to have been previously checked for null.  Will
+     * return negative if this's string is < comp, 0 if they are the
+     * same and positive if >.
+     *
+     * On entry:
+     *    r0:   this object pointer
+     *    r1:   comp object pointer
+     *
+     */
+
+    .balign 4
+    .global art_string_compareto
+    .extern __memcmp16
+art_string_compareto:
+    mov    r2, r0         @ this to r2, opening up r0 for return value
+    subs   r0, r2, r1     @ Same?
+    bxeq   lr
+
+    push {r4, r7-r12, lr} @ 8 words - keep alignment
+
+    ldr    r4, [r2, #STRING_OFFSET_OFFSET]
+    ldr    r9, [r1, #STRING_OFFSET_OFFSET]
+    ldr    r7, [r2, #STRING_COUNT_OFFSET]
+    ldr    r10, [r1, #STRING_COUNT_OFFSET]
+    ldr    r2, [r2, #STRING_VALUE_OFFSET]
+    ldr    r1, [r1, #STRING_VALUE_OFFSET]
+
+    /*
+     * At this point, we have:
+     *    value:  r2/r1
+     *    offset: r4/r9
+     *    count:  r7/r10
+     * We're going to compute
+     *    r11 <- countDiff
+     *    r10 <- minCount
+     */
+     subs  r11, r7, r10
+     movls r10, r7
+
+     /* Now, build pointers to the string data */
+     add   r2, r2, r4, lsl #1
+     add   r1, r1, r9, lsl #1
+     /*
+      * Note: data pointers point to previous element so we can use pre-index
+      * mode with base writeback.
+      */
+     add   r2, #STRING_DATA_OFFSET-2   @ offset to contents[-1]
+     add   r1, #STRING_DATA_OFFSET-2   @ offset to contents[-1]
+
+     /*
+      * At this point we have:
+      *   r2: *this string data
+      *   r1: *comp string data
+      *   r10: iteration count for comparison
+      *   r11: value to return if the first part of the string is equal
+      *   r0: reserved for result
+      *   r3, r4, r7, r8, r9, r12 available for loading string data
+      */
+
+    subs  r10, #2
+    blt   do_remainder2
+
+      /*
+       * Unroll the first two checks so we can quickly catch early mismatch
+       * on long strings (but preserve incoming alignment)
+       */
+
+    ldrh  r3, [r2, #2]!
+    ldrh  r4, [r1, #2]!
+    ldrh  r7, [r2, #2]!
+    ldrh  r8, [r1, #2]!
+    subs  r0, r3, r4
+    subeqs  r0, r7, r8
+    bne   done
+    cmp   r10, #28
+    bgt   do_memcmp16
+    subs  r10, #3
+    blt   do_remainder
+
+loopback_triple:
+    ldrh  r3, [r2, #2]!
+    ldrh  r4, [r1, #2]!
+    ldrh  r7, [r2, #2]!
+    ldrh  r8, [r1, #2]!
+    ldrh  r9, [r2, #2]!
+    ldrh  r12,[r1, #2]!
+    subs  r0, r3, r4
+    subeqs  r0, r7, r8
+    subeqs  r0, r9, r12
+    bne   done
+    subs  r10, #3
+    bge   loopback_triple
+
+do_remainder:
+    adds  r10, #3
+    beq   returnDiff
+
+loopback_single:
+    ldrh  r3, [r2, #2]!
+    ldrh  r4, [r1, #2]!
+    subs  r0, r3, r4
+    bne   done
+    subs  r10, #1
+    bne     loopback_single
+
+returnDiff:
+    mov   r0, r11
+    pop   {r4, r7-r12, pc}
+
+do_remainder2:
+    adds  r10, #2
+    bne   loopback_single
+    mov   r0, r11
+    pop   {r4, r7-r12, pc}
+
+    /* Long string case */
+do_memcmp16:
+    mov   r7, r11
+    add   r0, r2, #2
+    add   r1, r1, #2
+    mov   r2, r10
+    bl    __memcmp16
+    cmp   r0, #0
+    moveq r0, r7
+done:
+    pop   {r4, r7-r12, pc}
diff --git a/src/oat/runtime/arm/stub_arm.cc b/src/oat/runtime/arm/stub_arm.cc
new file mode 100644
index 0000000..d6426e8
--- /dev/null
+++ b/src/oat/runtime/arm/stub_arm.cc
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_internal.h"
+#include "oat/utils/arm/assembler_arm.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+#include "object.h"
+#include "stack_indirect_reference_table.h"
+
+#define __ assembler->
+
+namespace art {
+namespace arm {
+
+ByteArray* ArmCreateResolutionTrampoline(Runtime::TrampolineType type) {
+  UniquePtr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kArm)));
+  // | Out args |
+  // | Method*  | <- SP on entry
+  // | LR       |    return address into caller
+  // | ...      |    callee saves
+  // | R3       |    possible argument
+  // | R2       |    possible argument
+  // | R1       |    possible argument
+  // | R0       |    junk on call to UnresolvedDirectMethodTrampolineFromCode, holds result Method*
+  // | Method*  |    Callee save Method* set up by UnresolvedDirectMethodTrampolineFromCode
+  // Save callee saves and ready frame for exception delivery
+  RegList save = (1 << R1) | (1 << R2) | (1 << R3) | (1 << R5) | (1 << R6) | (1 << R7) | (1 << R8) |
+                 (1 << R10) | (1 << R11) | (1 << LR);
+  // TODO: enable when GetCalleeSaveMethod is available at stub generation time
+  // DCHECK_EQ(save, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetCoreSpillMask());
+  __ PushList(save);
+  __ LoadFromOffset(kLoadWord, R12, TR,
+                    ENTRYPOINT_OFFSET(pUnresolvedDirectMethodTrampolineFromCode));
+  __ mov(R2, ShifterOperand(TR));  // Pass Thread::Current() in R2
+  __ LoadImmediate(R3, type);
+  __ IncreaseFrameSize(8);         // 2 words of space for alignment
+  __ mov(R1, ShifterOperand(SP));  // Pass SP
+  // Call to unresolved direct method trampoline (method_idx, sp, Thread*, is_static)
+  __ blx(R12);
+  __ mov(R12, ShifterOperand(R0));  // Save code address returned into R12
+  // Restore registers which may have been modified by GC, "R0" will hold the Method*
+  __ DecreaseFrameSize(4);
+  __ PopList((1 << R0) | save);
+  __ bx(R12);  // Leaf call to method's code
+
+  __ bkpt(0);
+
+  assembler->EmitSlowPaths();
+  size_t cs = assembler->CodeSize();
+  SirtRef<ByteArray> resolution_trampoline(ByteArray::Alloc(cs));
+  CHECK(resolution_trampoline.get() != NULL);
+  MemoryRegion code(resolution_trampoline->GetData(), resolution_trampoline->GetLength());
+  assembler->FinalizeInstructions(code);
+
+  return resolution_trampoline.get();
+}
+
+typedef void (*ThrowAme)(Method*, Thread*);
+
+ByteArray* CreateAbstractMethodErrorStub() {
+  UniquePtr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kArm)));
+  // Save callee saves and ready frame for exception delivery
+  RegList save = (1 << R4) | (1 << R5) | (1 << R6) | (1 << R7) | (1 << R8) | (1 << R9) |
+                 (1 << R10) | (1 << R11) | (1 << LR);
+  // TODO: enable when GetCalleeSaveMethod is available at stub generation time
+  // DCHECK_EQ(save, Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveAll)->GetCoreSpillMask());
+  __ PushList(save);         // push {r4-r11, lr} - 9 words of callee saves
+  // TODO: enable when GetCalleeSaveMethod is available at stub generation time
+  // DCHECK_EQ(Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveAll)->GetFpSpillMask(), 0xFFFFU);
+  __ Emit(0xed2d0a20);       // vpush {s0-s31}
+
+  __ IncreaseFrameSize(12);  // 3 words of space, bottom word will hold callee save Method*
+
+  // R0 is the Method* already
+  __ mov(R1, ShifterOperand(R9));  // Pass Thread::Current() in R1
+  __ mov(R2, ShifterOperand(SP));  // Pass SP in R2
+  // Call to throw AbstractMethodError
+  __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pThrowAbstractMethodErrorFromCode));
+  __ mov(PC, ShifterOperand(R12));  // Leaf call to routine that never returns
+
+  __ bkpt(0);
+
+  assembler->EmitSlowPaths();
+
+  size_t cs = assembler->CodeSize();
+  SirtRef<ByteArray> abstract_stub(ByteArray::Alloc(cs));
+  CHECK(abstract_stub.get() != NULL);
+  MemoryRegion code(abstract_stub->GetData(), abstract_stub->GetLength());
+  assembler->FinalizeInstructions(code);
+
+  return abstract_stub.get();
+}
+
+ByteArray* CreateJniDlsymLookupStub() {
+  UniquePtr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kArm)));
+  // Build frame and save argument registers and LR.
+  RegList save = (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3) | (1 << LR);
+  __ PushList(save);
+  __ AddConstant(SP, -12);         // Ensure 16-byte alignment
+  __ mov(R0, ShifterOperand(R9));  // Pass Thread::Current() in R0
+  // Call FindNativeMethod
+  __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pFindNativeMethod));
+  __ blx(R12);
+  __ mov(R12, ShifterOperand(R0));  // Save result of FindNativeMethod in R12
+  __ AddConstant(SP, 12);           // Restore registers (including outgoing arguments)
+  __ PopList(save);
+  __ cmp(R12, ShifterOperand(0));
+  __ bx(R12, NE);                   // If R12 != 0 tail call into native code
+  __ bx(LR);                        // Return to caller to handle exception
+
+  assembler->EmitSlowPaths();
+
+  size_t cs = assembler->CodeSize();
+  SirtRef<ByteArray> jni_stub(ByteArray::Alloc(cs));
+  CHECK(jni_stub.get() != NULL);
+  MemoryRegion code(jni_stub->GetData(), jni_stub->GetLength());
+  assembler->FinalizeInstructions(code);
+
+  return jni_stub.get();
+}
+
+} // namespace arm
+} // namespace art
diff --git a/src/oat/runtime/callee_save_frame.h b/src/oat/runtime/callee_save_frame.h
new file mode 100644
index 0000000..96811ce
--- /dev/null
+++ b/src/oat/runtime/callee_save_frame.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2012 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_RUNTIME_CALLEE_SAVE_FRAME_H_
+#define ART_SRC_OAT_RUNTIME_CALLEE_SAVE_FRAME_H_
+
+#include "thread.h"
+
+namespace art {
+
+class Method;
+
+// Place a special frame at the TOS that will save the callee saves for the given type
+static void  FinishCalleeSaveFrameSetup(Thread* self, Method** sp, Runtime::CalleeSaveType type) {
+  // Be aware the store below may well stomp on an incoming argument
+  *sp = Runtime::Current()->GetCalleeSaveMethod(type);
+  self->SetTopOfStack(sp, 0);
+  self->VerifyStack();
+}
+
+}  // namespace art
+
+#endif  // ART_SRC_OAT_RUNTIME_CALLEE_SAVE_FRAME_H_
diff --git a/src/oat/runtime/context.cc b/src/oat/runtime/context.cc
new file mode 100644
index 0000000..998e762
--- /dev/null
+++ b/src/oat/runtime/context.cc
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "context.h"
+
+#include "arm/context_arm.h"
+#include "x86/context_x86.h"
+
+namespace art {
+
+Context* Context::Create() {
+#if defined(__arm__)
+  return new arm::ArmContext();
+#else
+  return new x86::X86Context();
+#endif
+}
+
+}  // namespace art
diff --git a/src/oat/runtime/context.h b/src/oat/runtime/context.h
new file mode 100644
index 0000000..b8852d5
--- /dev/null
+++ b/src/oat/runtime/context.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_RUNTIME_CONTEXT_H_
+#define ART_SRC_OAT_RUNTIME_CONTEXT_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace art {
+
+class Frame;
+
+// Representation of a thread's context on the executing machine
+class Context {
+ public:
+  // Creates a context for the running architecture
+  static Context* Create();
+
+  virtual ~Context() {}
+
+  // Read values from callee saves in the given frame. The frame also holds
+  // the method that holds the layout.
+  virtual void FillCalleeSaves(const Frame& fr) = 0;
+
+  // Set the stack pointer value
+  virtual void SetSP(uintptr_t new_sp) = 0;
+
+  // Set the program counter value
+  virtual void SetPC(uintptr_t new_pc) = 0;
+
+  // Read the given GPR
+  virtual uintptr_t GetGPR(uint32_t reg) = 0;
+
+  // Switch execution of the executing context to this context
+  virtual void DoLongJump() = 0;
+};
+
+class VmapTable {
+ public:
+  explicit VmapTable(const uint16_t* table) : table_(table) {
+  }
+
+  uint16_t operator[](size_t i) const {
+    return table_[i + 1];
+  }
+
+  size_t size() const {
+    return table_[0];
+  }
+
+  /*
+   * WARNING: This code should be changed or renamed.  The "reg"
+   * argument is a Dalvik virtual register number, but the way
+   * the vmap and register promotion works a Dalvik vReg can have
+   * neither, one or both of core register and floating point register
+   * identities. The "INVALID_VREG" marker of 0xffff below separates the
+   * core promoted registers from the floating point promoted registers,
+   * and thus terminates the search before reaching the fp section.
+   * This is likely the desired behavior for GC, as references won't
+   * ever be promoted to float registers - but we'll probably want to
+   * rework this shared code to make it useful for the debugger as well.
+   */
+  // Is register 'reg' in the context or on the stack?
+  bool IsInContext(size_t reg, uint32_t& vmap_offset) const {
+    vmap_offset = 0xEBAD0FF5;
+    // TODO: take advantage of the registers being ordered
+    for (size_t i = 0; i < size(); ++i) {
+      // Stop if we find what we are are looking for...
+      if (table_[i + 1] == reg) {
+        vmap_offset = i;
+        return true;
+      }
+      // ...or the INVALID_VREG that marks lr.
+      // TODO: x86?
+      if (table_[i + 1] == 0xffff) {
+        break;
+      }
+    }
+    return false;
+  }
+
+ private:
+  const uint16_t* table_;
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_OAT_RUNTIME_CONTEXT_H_
diff --git a/src/oat/runtime/mips/oat_support_entrypoints_mips.cc b/src/oat/runtime/mips/oat_support_entrypoints_mips.cc
new file mode 100644
index 0000000..e20332a
--- /dev/null
+++ b/src/oat/runtime/mips/oat_support_entrypoints_mips.cc
@@ -0,0 +1,274 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "oat/runtime/oat_support_entrypoints.h"
+
+namespace art {
+
+// Alloc entrypoints.
+extern "C" void* art_alloc_array_from_code(uint32_t, void*, int32_t);
+extern "C" void* art_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
+extern "C" void* art_alloc_object_from_code(uint32_t type_idx, void* method);
+extern "C" void* art_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method);
+extern "C" void* art_check_and_alloc_array_from_code(uint32_t, void*, int32_t);
+extern "C" void* art_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
+
+// Cast entrypoints.
+extern uint32_t IsAssignableFromCode(const Class* klass, const Class* ref_class);
+extern "C" void art_can_put_array_element_from_code(void*, void*);
+extern "C" void art_check_cast_from_code(void*, void*);
+
+// Debug entrypoints.
+extern void DebugMe(Method* method, uint32_t info);
+extern "C" void art_update_debugger(void*, void*, int32_t, void*);
+
+// DexCache entrypoints.
+extern "C" void* art_initialize_static_storage_from_code(uint32_t, void*);
+extern "C" void* art_initialize_type_from_code(uint32_t, void*);
+extern "C" void* art_initialize_type_and_verify_access_from_code(uint32_t, void*);
+extern "C" void* art_resolve_string_from_code(void*, uint32_t);
+
+// Field entrypoints.
+extern "C" int art_set32_instance_from_code(uint32_t, void*, int32_t);
+extern "C" int art_set32_static_from_code(uint32_t, int32_t);
+extern "C" int art_set64_instance_from_code(uint32_t, void*, int64_t);
+extern "C" int art_set64_static_from_code(uint32_t, int64_t);
+extern "C" int art_set_obj_instance_from_code(uint32_t, void*, void*);
+extern "C" int art_set_obj_static_from_code(uint32_t, void*);
+extern "C" int32_t art_get32_instance_from_code(uint32_t, void*);
+extern "C" int32_t art_get32_static_from_code(uint32_t);
+extern "C" int64_t art_get64_instance_from_code(uint32_t, void*);
+extern "C" int64_t art_get64_static_from_code(uint32_t);
+extern "C" void* art_get_obj_instance_from_code(uint32_t, void*);
+extern "C" void* art_get_obj_static_from_code(uint32_t);
+
+// FillArray entrypoint.
+extern "C" void art_handle_fill_data_from_code(void*, void*);
+
+// JNI entrypoints.
+extern Object* DecodeJObjectInThread(Thread* thread, jobject obj);
+extern void* FindNativeMethod(Thread* thread);
+
+// Lock entrypoints.
+extern "C" void art_lock_object_from_code(void*);
+extern "C" void art_unlock_object_from_code(void*);
+
+// Math entrypoints.
+extern int32_t CmpgDouble(double a, double b);
+extern int32_t CmplDouble(double a, double b);
+extern int32_t CmpgFloat(float a, float b);
+extern int32_t CmplFloat(float a, float b);
+
+// Math conversions.
+extern "C" float __floatsisf(int op1);        // INT_TO_FLOAT
+extern "C" int32_t __fixsfsi(float op1);      // FLOAT_TO_INT
+extern "C" float __truncdfsf2(double op1);    // DOUBLE_TO_FLOAT
+extern "C" double __extendsfdf2(float op1);   // FLOAT_TO_DOUBLE
+extern "C" double __floatsidf(int op1);       // INT_TO_DOUBLE
+extern "C" int32_t __fixdfsi(double op1);     // DOUBLE_TO_INT
+extern "C" float __floatdisf(int64_t op1);    // LONG_TO_FLOAT
+extern "C" double __floatdidf(int64_t op1);   // LONG_TO_DOUBLE
+extern "C" int64_t __fixsfdi(float op1);      // FLOAT_TO_LONG
+extern "C" int64_t __fixdfdi(double op1);     // DOUBLE_TO_LONG
+extern int64_t D2L(double d);
+extern int64_t F2L(float f);
+
+// Single-precision FP arithmetics.
+extern "C" float __addsf3(float a, float b);   // ADD_FLOAT[_2ADDR]
+extern "C" float __subsf3(float a, float b);   // SUB_FLOAT[_2ADDR]
+extern "C" float __divsf3(float a, float b);   // DIV_FLOAT[_2ADDR]
+extern "C" float __mulsf3(float a, float b);   // MUL_FLOAT[_2ADDR]
+extern "C" float fmodf(float a, float b);      // REM_FLOAT[_2ADDR]
+
+// Double-precision FP arithmetics.
+extern "C" double __adddf3(double a, double b); // ADD_DOUBLE[_2ADDR]
+extern "C" double __subdf3(double a, double b); // SUB_DOUBLE[_2ADDR]
+extern "C" double __divdf3(double a, double b); // DIV_DOUBLE[_2ADDR]
+extern "C" double __muldf3(double a, double b); // MUL_DOUBLE[_2ADDR]
+extern "C" double fmod(double a, double b);     // REM_DOUBLE[_2ADDR]
+
+// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR]
+extern "C" long long __divdi3(int64_t op1, int64_t op2);
+extern "C" long long __moddi3(int64_t op1, int64_t op2);
+extern "C" uint64_t art_shl_long(uint64_t, uint32_t);
+extern "C" uint64_t art_shr_long(uint64_t, uint32_t);
+extern "C" uint64_t art_ushr_long(uint64_t, uint32_t);
+
+// Intrinsic entrypoints.
+extern "C" int32_t __memcmp16(void*, void*, int32_t);
+extern "C" int32_t art_indexof(void*, uint32_t, uint32_t, uint32_t);
+extern "C" int32_t art_string_compareto(void*, void*);
+
+// Invoke entrypoints.
+const void* UnresolvedDirectMethodTrampolineFromCode(Method*, Method**, Thread*,
+                                                     Runtime::TrampolineType);
+extern "C" void art_invoke_direct_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_invoke_interface_trampoline(uint32_t, void*);
+extern "C" void art_invoke_interface_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_invoke_static_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_invoke_super_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_invoke_virtual_trampoline_with_access_check(uint32_t, void*);
+
+// Thread entrypoints.
+extern void CheckSuspendFromCode(Thread* thread);
+extern "C" void art_test_suspend();
+
+// Throw entrypoints.
+extern void ThrowAbstractMethodErrorFromCode(Method* method, Thread* thread, Method** sp);
+extern "C" void art_deliver_exception_from_code(void*);
+extern "C" void art_throw_array_bounds_from_code(int32_t index, int32_t limit);
+extern "C" void art_throw_div_zero_from_code();
+extern "C" void art_throw_no_such_method_from_code(int32_t method_idx);
+extern "C" void art_throw_null_pointer_exception_from_code();
+extern "C" void art_throw_stack_overflow_from_code(void*);
+extern "C" void art_throw_verification_error_from_code(int32_t src1, int32_t ref);
+
+// Trace entrypoints.
+extern "C" void art_trace_entry_from_code(void*);
+extern "C" void art_trace_exit_from_code();
+
+void InitEntryPoints(EntryPoints* points) {
+  // Alloc
+  points->pAllocArrayFromCode = art_alloc_array_from_code;
+  points->pAllocArrayFromCodeWithAccessCheck = art_alloc_array_from_code_with_access_check;
+  points->pAllocObjectFromCode = art_alloc_object_from_code;
+  points->pAllocObjectFromCodeWithAccessCheck = art_alloc_object_from_code_with_access_check;
+  points->pCheckAndAllocArrayFromCode = art_check_and_alloc_array_from_code;
+  points->pCheckAndAllocArrayFromCodeWithAccessCheck = art_check_and_alloc_array_from_code_with_access_check;
+
+  // Cast
+  points->pInstanceofNonTrivialFromCode = IsAssignableFromCode;
+  points->pCanPutArrayElementFromCode = art_can_put_array_element_from_code;
+  points->pCheckCastFromCode = art_check_cast_from_code;
+
+  // Debug
+  points->pDebugMe = DebugMe;
+  points->pUpdateDebuggerFromCode = NULL; // Controlled by SetDebuggerUpdatesEnabled.
+
+  // DexCache
+  points->pInitializeStaticStorage = art_initialize_static_storage_from_code;
+  points->pInitializeTypeAndVerifyAccessFromCode = art_initialize_type_and_verify_access_from_code;
+  points->pInitializeTypeFromCode = art_initialize_type_from_code;
+  points->pResolveStringFromCode = art_resolve_string_from_code;
+
+  // Field
+  points->pSet32Instance = art_set32_instance_from_code;
+  points->pSet32Static = art_set32_static_from_code;
+  points->pSet64Instance = art_set64_instance_from_code;
+  points->pSet64Static = art_set64_static_from_code;
+  points->pSetObjInstance = art_set_obj_instance_from_code;
+  points->pSetObjStatic = art_set_obj_static_from_code;
+  points->pGet32Instance = art_get32_instance_from_code;
+  points->pGet64Instance = art_get64_instance_from_code;
+  points->pGetObjInstance = art_get_obj_instance_from_code;
+  points->pGet32Static = art_get32_static_from_code;
+  points->pGet64Static = art_get64_static_from_code;
+  points->pGetObjStatic = art_get_obj_static_from_code;
+
+  // FillArray
+  points->pHandleFillArrayDataFromCode = art_handle_fill_data_from_code;
+
+  // JNI
+  points->pDecodeJObjectInThread = DecodeJObjectInThread;
+  points->pFindNativeMethod = FindNativeMethod;
+
+  // Locks
+  points->pLockObjectFromCode = art_lock_object_from_code;
+  points->pUnlockObjectFromCode = art_unlock_object_from_code;
+
+  // Math
+  points->pCmpgDouble = CmpgDouble;
+  points->pCmpgFloat = CmpgFloat;
+  points->pCmplDouble = CmplDouble;
+  points->pCmplFloat = CmplFloat;
+  points->pDadd = __adddf3;
+  points->pDdiv = __subdf3;
+  points->pDmul = __muldf3;
+  points->pDsub = __subdf3;
+  points->pF2d = __extendsfdf2;
+  points->pFmod = fmod;
+  points->pI2d = __floatsidf;
+  points->pL2d = __floatdidf;
+  points->pD2f = __truncdfsf2;
+  points->pFadd = __addsf3;
+  points->pFdiv = __divsf3;
+  points->pFmodf = fmodf;
+  points->pFmul = __mulsf3;
+  points->pFsub = __subsf3;
+  points->pI2f = __floatsisf;
+  points->pL2f = __floatdisf;
+  points->pD2iz = __fixdfsi;
+  points->pF2iz = __fixsfi;
+  points->pIdiv = NULL;
+  points->pIdivmod = NULL;
+  points->pD2l = D2L;
+  points->pF2l = F2L;
+  points->pLadd = NULL;
+  points->pLand = NULL;
+  points->pLdivmod = NULL;
+  points->pLmul = NULL;
+  points->pLor = NULL;
+  points->pLsub = NULL;
+  points->pLxor = NULL;
+  points->pShlLong = art_shl_long;
+  points->pShrLong = art_shr_long;
+  points->pUshrLong = art_ushr_long;
+
+  // Intrinsics
+  points->pIndexOf = art_indexof;
+  points->pMemcmp16 = __memcmp16;
+  points->pStringCompareTo = art_string_compareto;
+  points->pMemcpy = memcpy;
+
+  // Invocation
+  points->pUnresolvedDirectMethodTrampolineFromCode = UnresolvedDirectMethodTrampolineFromCode;
+  points->pInvokeDirectTrampolineWithAccessCheck = art_invoke_direct_trampoline_with_access_check;
+  points->pInvokeInterfaceTrampoline = art_invoke_interface_trampoline;
+  points->pInvokeInterfaceTrampolineWithAccessCheck = art_invoke_interface_trampoline_with_access_check;
+  points->pInvokeStaticTrampolineWithAccessCheck = art_invoke_static_trampoline_with_access_check;
+  points->pInvokeSuperTrampolineWithAccessCheck = art_invoke_super_trampoline_with_access_check;
+  points->pInvokeVirtualTrampolineWithAccessCheck = art_invoke_virtual_trampoline_with_access_check;
+
+  // Thread
+  points->pCheckSuspendFromCode = CheckSuspendFromCode;
+  points->pTestSuspendFromCode = art_test_suspend;
+
+  // Throws
+  points->pDeliverException = art_deliver_exception_from_code;
+  points->pThrowAbstractMethodErrorFromCode = ThrowAbstractMethodErrorFromCode;
+  points->pThrowArrayBoundsFromCode = art_throw_array_bounds_from_code;
+  points->pThrowDivZeroFromCode = art_throw_div_zero_from_code;
+  points->pThrowNoSuchMethodFromCode = art_throw_no_such_method_from_code;
+  points->pThrowNullPointerFromCode = art_throw_null_pointer_exception_from_code;
+  points->pThrowStackOverflowFromCode = art_throw_stack_overflow_from_code;
+  points->pThrowVerificationErrorFromCode = art_throw_verification_error_from_code;
+};
+
+void ChangeDebuggerEntryPoint(EntryPoints* points, bool enabled) {
+  points->pUpdateDebuggerFromCode = (enabled ? art_update_debugger : NULL);
+}
+
+bool IsTraceExitPc(uintptr_t pc) {
+  UNIMPLEMENTED(FATAL);
+  return false;
+}
+
+void* GetLogTraceEntryPoint() {
+  UNIMPLEMENTED(FATAL);
+  return NULL;
+}
+
+}  // namespace art
diff --git a/src/oat/runtime/mips/runtime_support_mips.S b/src/oat/runtime/mips/runtime_support_mips.S
new file mode 100644
index 0000000..65431e3
--- /dev/null
+++ b/src/oat/runtime/mips/runtime_support_mips.S
@@ -0,0 +1,955 @@
+#include "asm_support.h"
+
+    .balign 4
+
+    /* Deliver the given exception */
+    .extern artDeliverExceptionFromCode
+    /* Deliver an exception pending on a thread */
+    .extern artDeliverPendingException
+
+    /* Cache alignment for function entry */
+.macro ALIGN_FUNCTION_ENTRY
+    .balign 16
+.endm
+
+    /*
+     * Macro that sets up the callee save frame to conform with
+     * Runtime::CreateCalleeSaveMethod(kSaveAll)
+     * callee-save: s0-s8 + ra, 10 total + 2 words
+     */
+.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+    addiu  sp, sp, 48
+    sw     ra, 44(sp)
+    sw     s8, 40(sp)
+    sw     s7, 36(sp)
+    sw     s6, 32(sp)
+    sw     s5, 28(sp)
+    sw     s4, 24(sp)
+    sw     s3, 20(sp)
+    sw     s2, 16(sp)
+    sw     s1, 12(sp)
+    sw     s0, 8(sp)
+    @ 2 open words, bottom will hold Method*
+.endm
+
+    /*
+     * Macro that sets up the callee save frame to conform with
+     * Runtime::CreateCalleeSaveMethod(kRefsOnly). Restoration assumes non-moving GC.
+     * Does not include rSUSPEND or rSELF
+     * callee-save: s2-s8 + ra, 8 total + 4 words
+     */
+.macro SETUP_REF_ONLY_CALLEE_SAVE_FRAME
+    addiu  sp, sp, 48
+    sw     ra, 44(sp)
+    sw     s8, 40(sp)
+    sw     s7, 36(sp)
+    sw     s6, 32(sp)
+    sw     s5, 28(sp)
+    sw     s4, 24(sp)
+    sw     s3, 20(sp)
+    sw     s2, 16(sp)
+    @ 4 open words, bottom will hold Method*
+.endm
+
+.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    lw     ra, 44(sp)
+    addiu  sp, sp, 48
+.endm
+
+.macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+    lw     ra, 44(sp)
+    jr     ra
+    addiu  sp, sp, 48
+.endm
+
+    /*
+     * Macro that sets up the callee save frame to conform with
+     * Runtime::CreateCalleeSaveMethod(kRefsAndArgs). Restoration assumes non-moving GC.
+     * a1-a3, s2-s8, ra, 11 total + 1
+     */
+.macro SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+    addiu  sp, sp, 48
+    sw     ra, 44(sp)
+    sw     s8, 40(sp)
+    sw     s7, 36(sp)
+    sw     s6, 32(sp)
+    sw     s5, 28(sp)
+    sw     s4, 24(sp)
+    sw     s3, 20(sp)
+    sw     s2, 16(sp)
+    sw     a3, 12(sp)
+    sw     a2, 8(sp)
+    sw     a1, 4(sp)
+    @ 1 open word, bottom will hold Method*
+.endm
+
+.macro RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+    lw     ra, 44(sp)           @ restore ra
+    lw     a1, 4(sp)            @ restore non-callee save a1
+    lw     a2, 8(sp)            @ restore non-callee save a2
+    lw     a3, 12(sp)           @ restore non-callee save a3
+    addiu  sp, sp, 48           @ strip frame
+.endm
+
+    /*
+     * Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending
+     * exception is Thread::Current()->exception_
+     */
+.macro DELIVER_PENDING_EXCEPTION
+    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME            @ save callee saves for throw
+    move    a0, rSELF                           @ pass Thread::Current
+    b       artDeliverPendingExceptionFromCode  @ artDeliverPendingExceptionFromCode(Thread*, SP)
+    move    a1, sp                              @ pass SP
+.endm
+
+.macro RETURN_IF_NO_EXCEPTION
+    lw     t0, THREAD_EXCEPTION_OFFSET(rSELF) @ load Thread::Current()->exception_
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    bnez   t0, 1f                        @ success if no exception is pending
+    nop
+    jr     ra
+    nop
+1:
+    DELIVER_PENDING_EXCEPTION
+.endm
+
+.macro RETURN_IF_ZERO
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    bnez   v0, 1f                       @ success?
+    nop
+    jr     ra                           @ return on success
+    nop
+1:
+    DELIVER_PENDING_EXCEPTION
+.endm
+
+.macro RETURN_IF_NONZERO
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    beqz   v0, 1f                       @ success?
+    nop
+    jr     ra                           @ return on success
+    nop
+1:
+    DELIVER_PENDING_EXCEPTION
+.endm
+
+    .global art_update_debugger
+    .extern artUpdateDebuggerFromCode
+    /*
+     * On entry, a0 and a1 must be preserved, a2 is dex PC
+     */
+    ALIGN_FUNCTION_ENTRY
+art_update_debugger:
+    move    a3, a0         @ stash away a0 so that it's saved as if it were an argument
+    SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+    move    a0, a2         @ arg0 is dex PC
+    move    a1, rSELF      @ arg1 is Thread*
+    move    a2, sp         @ arg2 is sp
+    jal     artUpdateDebuggerFromCode      @ artUpdateDebuggerFromCode(int32_t, Thread*, Method**)
+    RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+    jr      ra
+    move    a0, a3         @ restore original a0
+
+    .global art_do_long_jump
+    /*
+     * On entry a0 is uint32_t* gprs_ and a1 is uint32_t* fprs_
+     * FIXME: just guessing about the shape of the jmpbuf.  Where will pc be?
+     */
+    ALIGN_FUNCTION_ENTRY
+art_do_long_jump:
+    l.s     f0, 0(a1)
+    l.s     f1, 4(a1)
+    l.s     f2, 8(a1)
+    l.s     f3, 12(a1)
+    l.s     f4, 16(a1)
+    l.s     f5, 20(a1)
+    l.s     f6, 24(a1)
+    l.s     f7, 28(a1)
+    l.s     f8, 32(a1)
+    l.s     f9, 36(a1)
+    l.s     f10, 40(a1)
+    l.s     f11, 44(a1)
+    l.s     f12, 48(a1)
+    l.s     f13, 52(a1)
+    l.s     f14, 56(a1)
+    l.s     f15, 60(a1)
+    l.s     f16, 64(a1)
+    l.s     f17, 68(a1)
+    l.s     f18, 72(a1)
+    l.s     f19, 76(a1)
+    l.s     f20, 80(a1)
+    l.s     f21, 84(a1)
+    l.s     f22, 88(a1)
+    l.s     f23, 92(a1)
+    l.s     f24, 96(a1)
+    l.s     f25, 100(a1)
+    l.s     f26, 104(a1)
+    l.s     f27, 108(a1)
+    l.s     f28, 112(a1)
+    l.s     f29, 116(a1)
+    l.s     f30, 120(a1)
+    l.s     f31, 124(a1)
+    lw      at, 4(a0)
+    lw      v0, 8(a0)
+    lw      v1, 12(a0)
+    lw      a1, 20(a0)
+    lw      a2, 24(a0)
+    lw      a3, 28(a0)
+    lw      t0, 32(a0)
+    lw      t1, 36(a0)
+    lw      t2, 40(a0)
+    lw      t3, 44(a0)
+    lw      t4, 48(a0)
+    lw      t5, 52(a0)
+    lw      t6, 56(a0)
+    lw      t7, 60(a0)
+    lw      s0, 64(a0)
+    lw      s1, 68(a0)
+    lw      s2, 72(a0)
+    lw      s3, 76(a0)
+    lw      s4, 80(a0)
+    lw      s5, 84(a0)
+    lw      s6, 88(a0)
+    lw      s7, 92(a0)
+    lw      t8, 96(a0)
+    lw      t9, 100(a0)
+    lw      k0, 104(a0)
+    lw      k1, 108(a0)
+    lw      gp, 112(a0)
+    lw      sp, 116(a0)
+    lw      fp, 120(a0)
+    lw      ra, 124(a0)
+    lw      a0, 16(a0)
+    move    v0, rzero           @ clear result registers r0 and r1
+    jr      ra               @ do long jump
+    move    v1, rzero
+
+    .global art_deliver_exception_from_code
+    /*
+     * Called by managed code, saves most registers (forms basis of long jump context) and passes
+     * the bottom of the stack. artDeliverExceptionFromCode will place the callee save Method* at
+     * the bottom of the thread. On entry r0 holds Throwable*
+     */
+    ALIGN_FUNCTION_ENTRY
+art_deliver_exception_from_code:
+    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+    move a1, rSELF                      @ pass Thread::Current
+    b    artDeliverExceptionFromCode    @ artDeliverExceptionFromCode(Throwable*, Thread*, SP)
+    move a2, sp                         @ pass SP
+
+    .global art_throw_null_pointer_exception_from_code
+    .extern artThrowNullPointerExceptionFromCode
+    /*
+     * Called by managed code to create and deliver a NullPointerException
+     */
+    ALIGN_FUNCTION_ENTRY
+art_throw_null_pointer_exception_from_code:
+    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+    move a0, rSELF                            @ pass Thread::Current
+    b   artThrowNullPointerExceptionFromCode  @ artThrowNullPointerExceptionFromCode(Thread*, SP)
+    move a1, sp                               @ pass SP
+
+    .global art_throw_div_zero_from_code
+    .extern artThrowDivZeroFromCode
+    /*
+     * Called by managed code to create and deliver an ArithmeticException
+     */
+    ALIGN_FUNCTION_ENTRY
+art_throw_div_zero_from_code:
+    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+    move a0, rSELF                  @ pass Thread::Current
+    b   artThrowDivZeroFromCode     @ artThrowDivZeroFromCode(Thread*, SP)
+    move a1, sp                     @ pass SP
+
+    .global art_throw_array_bounds_from_code
+    .extern artThrowArrayBoundsFromCode
+    /*
+     * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException
+     */
+    ALIGN_FUNCTION_ENTRY
+art_throw_array_bounds_from_code:
+    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+    move a2, rSELF                   @ pass Thread::Current
+    b   artThrowArrayBoundsFromCode  @ artThrowArrayBoundsFromCode(index, limit, Thread*, SP)
+    move a3, sp                      @ pass SP
+
+    .global art_throw_stack_overflow_from_code
+    .extern artThrowStackOverflowFromCode
+    /*
+     * Called by managed code to create and deliver a StackOverflowError.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_throw_stack_overflow_from_code:
+    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+    move a1, rSELF                     @ pass Thread::Current
+    b   artThrowStackOverflowFromCode  @ artThrowStackOverflowFromCode(method, Thread*, SP)
+    move a2, sp                        @ pass SP
+
+    .global art_throw_neg_array_size_from_code
+    .extern artThrowNegArraySizeFromCode
+    /*
+     * Called by managed code to create and deliver a NegativeArraySizeException.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_throw_neg_array_size_from_code:
+    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+    move a1, rSELF                        @ pass Thread::Current
+    b   artThrowNegArraySizeFromCode      @ artThrowNegArraySizeFromCode(size, Thread*, SP)
+    move a2, sp                           @ pass SP
+
+    .global art_throw_no_such_method_from_code
+    .extern artThrowNoSuchMethodFromCode
+    /*
+     * Called by managed code to create and deliver a NoSuchMethodError.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_throw_no_such_method_from_code:
+    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+    move a1, rSELF                        @ pass Thread::Current
+    b   artThrowNoSuchMethodFromCode      @ artThrowNoSuchMethodFromCode(method_idx, Thread*, SP)
+    move a2, sp                           @ pass SP
+
+    .global art_throw_verification_error_from_code
+    .extern artThrowVerificationErrorFromCode
+    /*
+     * Called by managed code to create and deliver verification errors.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_throw_verification_error_from_code:
+    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+    move a2, rSELF                            @ pass Thread::Current
+    b   artThrowVerificationErrorFromCode     @ artThrowVerificationErrorFromCode(kind, ref, Thread*, SP)
+    move a3, sp                               @ pass SP
+
+    /*
+     * All generated callsites for interface invokes and invocation slow paths will load arguments
+     * as usual - except instead of loading arg0/a0 with the target Method*, arg0/a0 will contain
+     * the method_idx.  This wrapper will save arg1-arg3, load the caller's Method*, align the
+     * stack and call the appropriate C helper.
+     * NOTE: "this" is first visable argument of the target, and so can be found in arg1/a1.
+     *
+     * The helper will attempt to locate the target and return a 64-bit result in v0/v1 consisting
+     * of the target Method* in v0 and method->code_ in v1.
+     *
+     * If unsuccessful, the helper will return NULL/NULL. There will bea pending exception in the
+     * thread and we branch to another stub to deliver it.
+     *
+     * On success this wrapper will restore arguments and *jump* to the target, leaving the lr
+     * pointing back to the original caller.
+     */
+.macro INVOKE_TRAMPOLINE c_name, cxx_name
+    .global \c_name
+    .extern \cxx_name
+\c_name:
+    SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME  @ save callee saves in case allocation triggers GC
+    lw    a2, 48(sp)                      @ pass caller Method*
+    move  a3, rSELF                       @ pass Thread::Current
+    sw    sp, 0(sp)                       @ pass SP
+    jal   \cxx_name                       @ (method_idx, this, caller, Thread*, SP)
+    move   t0, v1                         @ save v0->code_
+    RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+    bnez   v0, 1f
+    nop
+    jr     t0
+    nop
+1:
+    DELIVER_PENDING_EXCEPTION
+.endm
+
+INVOKE_TRAMPOLINE art_invoke_interface_trampoline, artInvokeInterfaceTrampoline
+INVOKE_TRAMPOLINE art_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
+
+INVOKE_TRAMPOLINE art_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
+
+    .global art_work_around_app_jni_bugs
+    .extern artWorkAroundAppJniBugs
+    /*
+     * Entry point of native methods when JNI bug compatibility is enabled.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_work_around_app_jni_bugs:
+    @ save registers that may contain arguments and LR that will be crushed by a call
+    addiu    sp, sp, -32
+    sw       a0, 28(sp)
+    sw       a1, 24(sp)
+    sw       a2, 20(sp)
+    sw       a3, 16(sp)
+    sw       ra, 12(sp)
+    move     a0, rSELF       @ pass Thread::Current
+    jal      artWorkAroundAppJniBugs  @ (Thread*, SP)
+    move     a1, sp          @ pass SP
+    move     t0, v0          @ save target address
+    lw       a0, 28(sp)
+    lw       a1, 24(sp)
+    lw       a2, 20(sp)
+    lw       a3, 16(sp)
+    lw       ra, 12(sp)
+    jr       t0              @ tail call into JNI routine
+    addiu    sp, sp, 32
+
+    .global art_handle_fill_data_from_code
+    .extern artHandleFillArrayDataFromCode
+    /*
+     * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on
+     * failure.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_handle_fill_data_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case exception allocation triggers GC
+    move    a2, rSELF                          @ pass Thread::Current
+    jal     artHandleFillArrayDataFromCode     @ (Array* array, const uint16_t* table, Thread*, SP)
+    move    a3, sp                             @ pass SP
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    bnez    v0, 1f                             @ success?
+    nop
+    jr      ra                                 @ return on success
+    nop
+1:
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_lock_object_from_code
+    .extern artLockObjectFromCode
+    /*
+     * Entry from managed code that calls artLockObjectFromCode, may block for GC.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_lock_object_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME      @ save callee saves in case we block
+    move    a1, rSELF                     @ pass Thread::Current
+    jal     artLockObjectFromCode         @ (Object* obj, Thread*, SP)
+    move    a2, sp                        @ pass SP
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+
+    .global art_unlock_object_from_code
+    .extern artUnlockObjectFromCode
+    /*
+     * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_unlock_object_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case exception allocation triggers GC
+    move    a1, rSELF                 @ pass Thread::Current
+    jal     artUnlockObjectFromCode   @ (Object* obj, Thread*, SP)
+    move    a2, sp                    @ pass SP
+    RETURN_IF_ZERO
+
+    .global art_check_cast_from_code
+    .extern artCheckCastFromCode
+    /*
+     * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_check_cast_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case exception allocation triggers GC
+    move    a2, rSELF                 @ pass Thread::Current
+    jal     artCheckCastFromCode      @ (Class* a, Class* b, Thread*, SP)
+    move    a3, sp                    @ pass SP
+    RETURN_IF_ZERO
+
+    .global art_can_put_array_element_from_code
+    .extern artCanPutArrayElementFromCode
+    /*
+     * Entry from managed code that calls artCanPutArrayElementFromCode and delivers exception on
+     * failure.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_can_put_array_element_from_code:
+    bnez   a0, 1f                       @ return if element == NULL
+    nop
+    jr     ra
+    nop
+1:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME    @ save callee saves in case exception allocation triggers GC
+    move    a2, rSELF                      @ pass Thread::Current
+    jal     artCanPutArrayElementFromCode  @ (Object* element, Class* array_class, Thread*, SP)
+    move    a3, sp                         @ pass SP
+    RETURN_IF_ZERO
+
+    .global art_initialize_static_storage_from_code
+    .extern artInitializeStaticStorageFromCode
+    /*
+     * Entry from managed code when uninitialized static storage, this stub will run the class
+     * initializer and deliver the exception on error. On success the static storage base is
+     * returned.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_initialize_static_storage_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME            @ save callee saves in case of GC
+    move    a2, rSELF                           @ pass Thread::Current
+    @ artInitializeStaticStorageFromCode(uint32_t type_idx, Method* referrer, Thread*, SP)
+    jal     artInitializeStaticStorageFromCode
+    move    a3, sp                              @ pass SP
+    RETURN_IF_NONZERO
+
+    .global art_initialize_type_from_code
+    .extern artInitializeTypeFromCode
+    /*
+     * Entry from managed code when dex cache misses for a type_idx.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_initialize_type_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME           @ save callee saves in case of GC
+    move    a2, rSELF                          @ pass Thread::Current
+    @ artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*, SP)
+    jal     artInitializeTypeFromCode
+    move    a3, sp                             @ pass SP
+    RETURN_IF_NONZERO
+
+    .global art_initialize_type_and_verify_access_from_code
+    .extern artInitializeTypeAndVerifyAccessFromCode
+    /*
+     * Entry from managed code when type_idx needs to be checked for access and dex cache may also
+     * miss.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_initialize_type_and_verify_access_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME           @ save callee saves in case of GC
+    move    a2, rSELF                           @ pass Thread::Current
+    @ artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*, SP)
+    jal     artInitializeTypeAndVerifyAccessFromCode
+    move    a3, sp                              @ pass SP
+    RETURN_IF_NONZERO
+
+    .global art_get32_static_from_code
+    .extern artGet32StaticFromCode
+    /*
+     * Called by managed code to resolve a static field and load a 32-bit primitive value.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_get32_static_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
+    lw     a1, 48(sp)                    @ pass referrer's Method*
+    move   a2, rSELF                     @ pass Thread::Current
+    jal     artGet32StaticFromCode       @ (uint32_t field_idx, const Method* referrer, Thread*, SP)
+    move   a3, sp                        @ pass SP
+    RETURN_IF_NO_EXCEPTION
+
+    .global art_get64_static_from_code
+    .extern artGet64StaticFromCode
+    /*
+     * Called by managed code to resolve a static field and load a 64-bit primitive value.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_get64_static_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
+    lw     a1, 48(sp)                    @ pass referrer's Method*
+    move   a2, rSELF                     @ pass Thread::Current
+    jal     artGet64StaticFromCode       @ (uint32_t field_idx, const Method* referrer, Thread*, SP)
+    move   a3, sp                        @ pass SP
+    RETURN_IF_NO_EXCEPTION
+
+    .global art_get_obj_static_from_code
+    .extern artGetObjStaticFromCode
+    /*
+     * Called by managed code to resolve a static field and load an object reference.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_get_obj_static_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
+    lw     a1, 48(sp)                    @ pass referrer's Method*
+    move   a2, rSELF                     @ pass Thread::Current
+    jal     artGetObjStaticFromCode      @ (uint32_t field_idx, const Method* referrer, Thread*, SP)
+    move   a3, sp                        @ pass SP
+    RETURN_IF_NO_EXCEPTION
+
+    .global art_get32_instance_from_code
+    .extern artGet32InstanceFromCode
+    /*
+     * Called by managed code to resolve an instance field and load a 32-bit primitive value.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_get32_instance_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
+    lw     a2, 48(sp)                    @ pass referrer's Method*
+    move   a3, rSELF                     @ pass Thread::Current
+    jal     artGet32InstanceFromCode     @ (field_idx, Object*, referrer, Thread*, SP)
+    sw     sp, 0(sp)                     @ pass SP
+    RETURN_IF_NO_EXCEPTION
+
+    .global art_get64_instance_from_code
+    .extern artGet64InstanceFromCode
+    /*
+     * Called by managed code to resolve an instance field and load a 64-bit primitive value.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_get64_instance_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
+    lw     a2, 48(sp)                    @ pass referrer's Method*
+    move   a3, rSELF                     @ pass Thread::Current
+    jal     artGet64InstanceFromCode     @ (field_idx, Object*, referrer, Thread*, SP)
+    sw     sp, 0(sp)                     @ pass SP
+    RETURN_IF_NO_EXCEPTION
+
+    .global art_get_obj_instance_from_code
+    .extern artGetObjInstanceFromCode
+    /*
+     * Called by managed code to resolve an instance field and load an object reference.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_get_obj_instance_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
+    lw     a2, 48(sp)                    @ pass referrer's Method*
+    move   a3, rSELF                     @ pass Thread::Current
+    jal     artGetObjInstanceFromCode    @ (field_idx, Object*, referrer, Thread*, SP)
+    sw     sp, 0(sp)                     @ pass SP
+    RETURN_IF_NO_EXCEPTION
+
+    .global art_set32_static_from_code
+    .extern artSet32StaticFromCode
+    /*
+     * Called by managed code to resolve a static field and store a 32-bit primitive value.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_set32_static_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
+    lw     a2, 48(sp)                    @ pass referrer's Method*
+    move   a3, rSELF                     @ pass Thread::Current
+    jal     artSet32StaticFromCode       @ (field_idx, new_val, referrer, Thread*, SP)
+    sw     sp, 0(sp)                     @ pass SP
+    RETURN_IF_ZERO
+
+    .global art_set64_static_from_code
+    .extern artSet32StaticFromCode
+    /*
+     * Called by managed code to resolve a static field and store a 64-bit primitive value.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_set64_static_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
+    lw     a1, 48(sp)                    @ pass referrer's Method*
+    move   t0, sp                        @ save SP
+    addui  sp, sp, -16
+    sw     rSELF, 0(sp)                  @ pass Thread::Current and sp
+    jal    artSet64StaticFromCode        @ (field_idx, referrer, new_val, Thread*, SP)
+    sw     t0, 4(sp)
+    addui  sp, #16                       @ release out args
+    RETURN_IF_ZERO
+
+    .global art_set_obj_static_from_code
+    .extern artSetObjStaticFromCode
+    /*
+     * Called by managed code to resolve a static field and store an object reference.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_set_obj_static_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
+    lw     a2, 48(sp)                    @ pass referrer's Method*
+    move   a3, rSELF                     @ pass Thread::Current
+    jal     artSetObjStaticFromCode      @ (field_idx, new_val, referrer, Thread*, SP)
+    sw     sp, 0(sp)                     @ pass SP
+    RETURN_IF_ZERO
+
+    .global art_set32_instance_from_code
+    .extern artSet32InstanceFromCode
+    /*
+     * Called by managed code to resolve an instance field and store a 32-bit primitive value.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_set32_instance_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
+    lw     a4, 48(sp)                    @ pass referrer's Method*
+    move   t0, sp                        @ save SP
+    addui  sp, sp, -16
+    sw     rSELF, 0(sp)                  @ pass Thread::Current and sp
+    jal    artSet32InstanceFromCode      @ (field_idx, Object*, new_val, referrer, Thread*, SP)
+    sw     t0, 4(sp)
+    addiu  sp, sp, 16                    @ release out args
+    RETURN_IF_ZERO
+
+    .global art_set64_instance_from_code
+    .extern artSet32InstanceFromCode
+    /*
+     * Called by managed code to resolve an instance field and store a 64-bit primitive value.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_set64_instance_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
+    move   t0, sp                        @ save SP
+    addui  sp, sp, -16
+    sw     rSELF, 0(sp)                  @ pass Thread::Current and sp
+    jal     artSet64InstanceFromCode     @ (field_idx, Object*, new_val, Thread*, SP)
+    sw     t0, 4(sp)
+    addiu  sp, sp, 16                    @ release out args
+    RETURN_IF_ZERO
+
+    .global art_set_obj_instance_from_code
+    .extern artSetObjInstanceFromCode
+    /*
+     * Called by managed code to resolve an instance field and store an object reference.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_set_obj_instance_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
+    lw     a3, 48(sp)                    @ pass referrer's Method*
+    move   t0, sp                        @ save SP
+    addui  sp, sp, -16
+    sw     rSELF, 0(sp)                  @ pass Thread::Current and sp
+    jal     artSetObjInstanceFromCode    @ (field_idx, Object*, new_val, referrer, Thread*, SP)
+    sw     t0, 4(sp)
+    addiu  sp, sp, 16                    @ release out args
+    RETURN_IF_ZERO
+
+    .global art_resolve_string_from_code
+    .extern artResolveStringFromCode
+    /*
+     * Entry from managed code to resolve a string, this stub will allocate a String and deliver an
+     * exception on error. On success the String is returned. R0 holds the referring method,
+     * R1 holds the string index. The fast path check for hit in strings cache has already been
+     * performed.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_resolve_string_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case of GC
+    move    a2, rSELF                 @ pass Thread::Current
+    @ artResolveStringFromCode(Method* referrer, uint32_t string_idx, Thread*, SP)
+    jal     artResolveStringFromCode
+    move    a3, sp                    @ pass SP
+    RETURN_IF_ZERO
+
+    .global art_alloc_object_from_code
+    .extern artAllocObjectFromCode
+    /*
+     * Called by managed code to allocate an object.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_alloc_object_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case of GC
+    move    a2, rSELF                 @ pass Thread::Current
+    jal     artAllocObjectFromCode    @ (uint32_t type_idx, Method* method, Thread*, SP)
+    move    a3, sp                    @ pass SP
+    RETURN_IF_NONZERO
+
+    .global art_alloc_object_from_code_with_access_check
+    .extern artAllocObjectFromCodeWithAccessCheck
+    /*
+     * Called by managed code to allocate an object when the caller doesn't know whether it has
+     * access to the created type.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_alloc_object_from_code_with_access_check:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case of GC
+    move    a2, rSELF                 @ pass Thread::Current
+    jal     artAllocObjectFromCodeWithAccessCheck  @ (uint32_t type_idx, Method* method, Thread*, SP)
+    move    a3, sp                    @ pass SP
+    RETURN_IF_NONZERO
+
+    .global art_alloc_array_from_code
+    .extern artAllocArrayFromCode
+    /*
+     * Called by managed code to allocate an array.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_alloc_array_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case of GC
+    move    a3, r9                    @ pass Thread::Current
+    @ artAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t component_count, Thread*, SP)
+    jal     artAllocArrayFromCode
+    sw    sp, 0(sp)                   @ pass SP
+    RETURN_IF_NONZERO
+
+    .global art_alloc_array_from_code_with_access_check
+    .extern artAllocArrayFromCodeWithAccessCheck
+    /*
+     * Called by managed code to allocate an array when the caller doesn't know whether it has
+     * access to the created type.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_alloc_array_from_code_with_access_check:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case of GC
+    move    a3, rSELF                 @ pass Thread::Current
+    @ artAllocArrayFromCodeWithAccessCheck(type_idx, method, component_count, Thread*, SP)
+    jal     artAllocArrayFromCodeWithAccessCheck
+    sw      sp, 0(sp)                 @ pass SP
+    RETURN_IF_NONZERO
+
+    .global art_check_and_alloc_array_from_code
+    .extern artCheckAndAllocArrayFromCode
+    /*
+     * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_check_and_alloc_array_from_code:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case of GC
+    move    a3, rSELF                 @ pass Thread::Current
+    @ artCheckAndAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t count, Thread* , SP)
+    jal     artCheckAndAllocArrayFromCode
+    sw      sp, 0(sp)                 @ pass SP
+    RETURN_IF_NONZERO
+
+    .global art_check_and_alloc_array_from_code_with_access_check
+    .extern artCheckAndAllocArrayFromCodeWithAccessCheck
+    /*
+     * Called by managed code to allocate an array in a special case for FILLED_NEW_ARRAY.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_check_and_alloc_array_from_code_with_access_check:
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  @ save callee saves in case of GC
+    move   a3, rSELF                  @ pass Thread::Current
+    @ artCheckAndAllocArrayFromCodeWithAccessCheck(type_idx, method, count, Thread* , SP)
+    jal     artCheckAndAllocArrayFromCodeWithAccessCheck
+    sw     sp, 0(sp)                  @ pass SP
+    RETURN_IF_NONZERO
+
+    .global art_test_suspend
+    .extern artTestSuspendFromCode
+    /*
+     * Called by managed code when the value in rSUSPEND has been decremented to 0.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_test_suspend:
+    lw     a0, THREAD_SUSPEND_COUNT_OFFSET(rSELF)
+    bnez   a0, 1f
+    move   rSUSPEND, SUSPEND_CHECK_INTERVAL   @ reset rSUSPEND to SUSPEND_CHECK_INTERVAL
+    jr     ra
+    nop
+1:
+    move   a0, rSELF
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME          @ save callee saves for stack crawl
+    jal    artTestSuspendFromCode             @ (Thread*, SP)
+    move   a1, sp
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+
+    .global art_proxy_invoke_handler
+    .extern artProxyInvokeHandler
+    /*
+     * Called by managed code that is attempting to call a method on a proxy class. On entry
+     * r0 holds the proxy method; r1, r2 and r3 may contain arguments.
+     */
+    ALIGN_FUNCTION_ENTRY
+art_proxy_invoke_handler:
+    SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
+    sw      a0, 0(sp)             @ place proxy method at bottom of frame
+    move    a2, rSELF             @ pass Thread::Current
+    jalr    artProxyInvokeHandler @ (Method* proxy method, receiver, Thread*, args...)
+    addui   a3, sp, 12            @ pointer to r2/r3/LR/caller's Method**/out-args as second arg
+    lw      t0, THREAD_EXCEPTION_OFFSET(rSELF) @ load Thread::Current()->exception_
+@FIXME - offsets here are probably wrong
+    lw      ra, 44(sp)            @ restore ra
+    lw      v0, 12(sp)
+    lw      v1, 14(sp)
+    bnez    r0, 1f
+    addui   sp, sp, 48            @ pop frame
+    jr      ra
+    nop
+1:
+    DELIVER_PENDING_EXCEPTION
+
+    .global art_trace_entry_from_code
+    .extern artTraceMethodEntryFromCode
+    /*
+     * Routine that intercepts method calls
+     */
+    ALIGN_FUNCTION_ENTRY
+art_trace_entry_from_code:
+    addui    sp, sp, -16
+    sw       a0, 0(sp)
+    sw       a1, 4(sp)
+    sw       a2, 8(sp)
+    sw       a3, 12(sp)
+    move     a2, ra       @ pass ra
+    jalr     artTraceMethodEntryFromCode  @ (Method*, Thread*, LR)
+    move     a1, rSELF    @ pass Thread::Current
+    move     t0, v0       @ t0 holds reference to code
+    lw       a0, 0(sp)
+    lw       a1, 4(sp)
+    lw       a2, 8(sp)
+    lw       a3, 12(sp)
+    jalr     t0           @ call method
+    addui    sp, sp, 16
+    /* intentional fallthrough */
+
+    .global art_trace_exit_from_code
+    .extern artTraceMethodExitFromCode
+    /*
+     * Routine that intercepts method returns
+     */
+    ALIGN_FUNCTION_ENTRY
+art_trace_exit_from_code:
+    addui    sp, sp, -16
+    sw       v0, 0(sp)
+    jalr     artTraceMethodExitFromCode  @ ()
+    sw       v1, 4(sp)
+    move     ra, v0         @ restore link register
+    lw       v0, 0(sp)
+    lw       v1, 4(sp)
+    jr       ra             @ return
+    addui    sp, sp, 16
+
+    .global art_shl_long
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to ignore all but the low
+     * 6 bits.
+     * On entry:
+     *   a0: low word
+     *   a1: high word
+     *   a2: shift count
+     */
+    ALIGN_FUNCTION_ENTRY
+art_shl_long:
+    /* shl-long vAA, vBB, vCC */
+    sll     v0, a0, a2                     @  rlo<- alo << (shift&31)
+    not     v1, a2                         @  rhi<- 31-shift  (shift is 5b)
+    srl     a0, 1
+    srl     a0, v1                         @  alo<- alo >> (32-(shift&31))
+    sll     v1, a1, a2                     @  rhi<- ahi << (shift&31)
+    or      v1, a0                         @  rhi<- rhi | alo
+    andi    a2, 0x20                       @  shift< shift & 0x20
+    movn    v1, v0, a2                     @  rhi<- rlo (if shift&0x20)
+    jr      ra
+    movn    v0, zero, a2                   @  rlo<- 0  (if shift&0x20)
+
+    .global art_shr_long
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to ignore all but the low
+     * 6 bits.
+     * On entry:
+     *   a0: low word
+     *   a1: high word
+     *   a2: shift count
+     */
+    ALIGN_FUNCTION_ENTRY
+art_shr_long:
+    sra     v1, a1, a2                     @  rhi<- ahi >> (shift&31)
+    srl     v0, a0, a2                     @  rlo<- alo >> (shift&31)
+    sra     a3, a1, 31                     @  a3<- sign(ah)
+    not     a0, a2                         @  alo<- 31-shift (shift is 5b)
+    sll     a1, 1
+    sll     a1, a0                         @  ahi<- ahi << (32-(shift&31))
+    or      v0, a1                         @  rlo<- rlo | ahi
+    andi    a2, 0x20                       @  shift & 0x20
+    movn    v0, v1, a2                     @  rlo<- rhi (if shift&0x20)
+    jr      ra
+    movn    v1, a3, a2                     @  rhi<- sign(ahi) (if shift&0x20)
+
+    .global art_ushr_long
+    /*
+     * Long integer shift.  This is different from the generic 32/64-bit
+     * binary operations because vAA/vBB are 64-bit but vCC (the shift
+     * distance) is 32-bit.  Also, Dalvik requires us to ignore all but the low
+     * 6 bits.
+     * On entry:
+     *   r0: low word
+     *   r1: high word
+     *   r2: shift count
+     */
+    /* ushr-long vAA, vBB, vCC */
+    ALIGN_FUNCTION_ENTRY
+art_ushr_long:
+    sra     v1, a1, a2                     @  rhi<- ahi >> (shift&31)
+    srl     v0, a0, a2                     @  rlo<- alo >> (shift&31)
+    sra     a3, a1, 31                     @  a3<- sign(ah)
+    not     a0, a2                         @  alo<- 31-shift (shift is 5b)
+    sll     a1, 1
+    sll     a1, a0                         @  ahi<- ahi << (32-(shift&31))
+    or      v0, a1                         @  rlo<- rlo | ahi
+    andi    a2, 0x20                       @  shift & 0x20
+    movn    v0, v1, a2                     @  rlo<- rhi (if shift&0x20)
+    jr      ra
+    movn    v1, a3, a2                     @  rhi<- sign(ahi) (if shift&0x20)
diff --git a/src/oat/runtime/oat_support_entrypoints.h b/src/oat/runtime/oat_support_entrypoints.h
new file mode 100644
index 0000000..0e59dd8
--- /dev/null
+++ b/src/oat/runtime/oat_support_entrypoints.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_RUNTIME_OAT_SUPPORT_ENTRYPOINTS_H_
+#define ART_SRC_OAT_RUNTIME_OAT_SUPPORT_ENTRYPOINTS_H_
+
+#include "runtime.h"
+
+#define ENTRYPOINT_OFFSET(x) \
+  (static_cast<uintptr_t>(OFFSETOF_MEMBER(Thread, entrypoints_)) + \
+   static_cast<uintptr_t>(OFFSETOF_MEMBER(EntryPoints, x)))
+
+namespace art {
+
+class Class;
+class Method;
+class Thread;
+
+struct PACKED EntryPoints {
+  // Alloc
+  void* (*pAllocArrayFromCode)(uint32_t, void*, int32_t);
+  void* (*pAllocArrayFromCodeWithAccessCheck)(uint32_t, void*, int32_t);
+  void* (*pAllocObjectFromCode)(uint32_t, void*);
+  void* (*pAllocObjectFromCodeWithAccessCheck)(uint32_t, void*);
+  void* (*pCheckAndAllocArrayFromCode)(uint32_t, void*, int32_t);
+  void* (*pCheckAndAllocArrayFromCodeWithAccessCheck)(uint32_t, void*, int32_t);
+
+  // Cast
+  uint32_t (*pInstanceofNonTrivialFromCode)(const Class*, const Class*);
+  void (*pCanPutArrayElementFromCode)(void*, void*);
+  void (*pCheckCastFromCode)(void*, void*);
+
+  // Debug
+  void (*pDebugMe)(Method*, uint32_t);
+  void (*pUpdateDebuggerFromCode)(void*, void*, int32_t, void*);
+
+  // DexCache
+  void* (*pInitializeStaticStorage)(uint32_t, void*);
+  void* (*pInitializeTypeAndVerifyAccessFromCode)(uint32_t, void*);
+  void* (*pInitializeTypeFromCode)(uint32_t, void*);
+  void* (*pResolveStringFromCode)(void*, uint32_t);
+
+  // Field
+  int (*pSet32Instance)(uint32_t, void*, int32_t);  // field_idx, obj, src
+  int (*pSet32Static)(uint32_t, int32_t);
+  int (*pSet64Instance)(uint32_t, void*, int64_t);
+  int (*pSet64Static)(uint32_t, int64_t);
+  int (*pSetObjInstance)(uint32_t, void*, void*);
+  int (*pSetObjStatic)(uint32_t, void*);
+  int32_t (*pGet32Instance)(uint32_t, void*);
+  int32_t (*pGet32Static)(uint32_t);
+  int64_t (*pGet64Instance)(uint32_t, void*);
+  int64_t (*pGet64Static)(uint32_t);
+  void* (*pGetObjInstance)(uint32_t, void*);
+  void* (*pGetObjStatic)(uint32_t);
+
+  // FillArray
+  void (*pHandleFillArrayDataFromCode)(void*, void*);
+
+  // JNI
+  Object* (*pDecodeJObjectInThread)(Thread* thread, jobject obj);
+  void* (*pFindNativeMethod)(Thread* thread);
+
+  // Locks
+  void (*pLockObjectFromCode)(void*);
+  void (*pUnlockObjectFromCode)(void*);
+
+  // Math
+  int32_t (*pCmpgDouble)(double, double);
+  int32_t (*pCmpgFloat)(float, float);
+  int32_t (*pCmplDouble)(double, double);
+  int32_t (*pCmplFloat)(float, float);
+  double (*pDadd)(double, double);
+  double (*pDdiv)(double, double);
+  double (*pDmul)(double, double);
+  double (*pDsub)(double, double);
+  double (*pF2d)(float);
+  double (*pFmod)(double, double);
+  double (*pI2d)(int);
+  double (*pL2d)(int64_t);
+  float (*pD2f)(double);
+  float (*pFadd)(float, float);
+  float (*pFdiv)(float, float);
+  float (*pFmodf)(float, float);
+  float (*pFmul)(float, float);
+  float (*pFsub)(float, float);
+  float (*pI2f)(int32_t);
+  float (*pL2f)(int64_t);
+  int32_t (*pD2iz)(double);
+  int32_t (*pF2iz)(float);
+  int32_t (*pIdiv)(int32_t, int32_t);
+  int32_t (*pIdivmod)(int32_t, int32_t);
+  int64_t (*pD2l)(double);
+  int64_t (*pF2l)(float);
+  int64_t (*pLadd)(int64_t, int64_t);
+  int64_t (*pLand)(int64_t, int64_t);
+  int64_t (*pLdivmod)(int64_t, int64_t);
+  int64_t (*pLmul)(int64_t, int64_t);
+  int64_t (*pLor)(int64_t, int64_t);
+  int64_t (*pLsub)(int64_t, int64_t);
+  int64_t (*pLxor)(int64_t, int64_t);
+  uint64_t (*pShlLong)(uint64_t, uint32_t);
+  uint64_t (*pShrLong)(uint64_t, uint32_t);
+  uint64_t (*pUshrLong)(uint64_t, uint32_t);
+
+  // Intrinsics
+  int32_t (*pIndexOf)(void*, uint32_t, uint32_t, uint32_t);
+  int32_t (*pMemcmp16)(void*, void*, int32_t);
+  int32_t (*pStringCompareTo)(void*, void*);
+  void* (*pMemcpy)(void*, const void*, size_t);
+
+  // Invocation
+  Method* (*pFindInterfaceMethodInCache)(Class*, uint32_t, const Method*, struct DvmDex*);
+  const void* (*pUnresolvedDirectMethodTrampolineFromCode)(Method*, Method**, Thread*,
+                                                           Runtime::TrampolineType);
+  void (*pInvokeDirectTrampolineWithAccessCheck)(uint32_t, void*);
+  void (*pInvokeInterfaceTrampoline)(uint32_t, void*);
+  void (*pInvokeInterfaceTrampolineWithAccessCheck)(uint32_t, void*);
+  void (*pInvokeStaticTrampolineWithAccessCheck)(uint32_t, void*);
+  void (*pInvokeSuperTrampolineWithAccessCheck)(uint32_t, void*);
+  void (*pInvokeVirtualTrampolineWithAccessCheck)(uint32_t, void*);
+
+  // Thread
+  void (*pCheckSuspendFromCode)(Thread*);  // Stub that is called when the suspend count is non-zero
+  void (*pTestSuspendFromCode)();  // Stub that is periodically called to test the suspend count
+
+  // Throws
+  void (*pDeliverException)(void*);
+  void (*pThrowAbstractMethodErrorFromCode)(Method* m, Thread* thread, Method** sp);
+  void (*pThrowArrayBoundsFromCode)(int32_t, int32_t);
+  void (*pThrowDivZeroFromCode)();
+  void (*pThrowNoSuchMethodFromCode)(int32_t);
+  void (*pThrowNullPointerFromCode)();
+  void (*pThrowStackOverflowFromCode)(void*);
+  void (*pThrowVerificationErrorFromCode)(int32_t, int32_t);
+};
+
+// Initialize an entry point data structure.
+void InitEntryPoints(EntryPoints* points);
+
+// Change the debugger entry point in the data structure.
+void ChangeDebuggerEntryPoint(EntryPoints* points, bool enabled);
+
+// Is the given return_pc the trace exit return pc?
+bool IsTraceExitPc(uintptr_t pc);
+
+// Return address of stub that logs method entries.
+void* GetLogTraceEntryPoint();
+
+}  // namespace art
+
+#endif  // ART_SRC_OAT_RUNTIME_OAT_SUPPORT_ENTRYPOINTS_H_
diff --git a/src/oat/runtime/support_alloc.cc b/src/oat/runtime/support_alloc.cc
new file mode 100644
index 0000000..d9394d2
--- /dev/null
+++ b/src/oat/runtime/support_alloc.cc
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "callee_save_frame.h"
+#include "runtime_support.h"
+
+namespace art {
+
+extern "C" Object* artAllocObjectFromCode(uint32_t type_idx, Method* method,
+                                          Thread* self, Method** sp) {
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  return AllocObjectFromCode(type_idx, method, self, false);
+}
+
+extern "C" Object* artAllocObjectFromCodeWithAccessCheck(uint32_t type_idx, Method* method,
+                                                         Thread* self, Method** sp) {
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  return AllocObjectFromCode(type_idx, method, self, true);
+}
+
+extern "C" Array* artAllocArrayFromCode(uint32_t type_idx, Method* method, int32_t component_count,
+                                        Thread* self, Method** sp) {
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  return AllocArrayFromCode(type_idx, method, component_count, self, false);
+}
+
+extern "C" Array* artAllocArrayFromCodeWithAccessCheck(uint32_t type_idx, Method* method,
+                                                       int32_t component_count,
+                                                       Thread* self, Method** sp) {
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  return AllocArrayFromCode(type_idx, method, component_count, self, true);
+}
+
+extern "C" Array* artCheckAndAllocArrayFromCode(uint32_t type_idx, Method* method,
+                                               int32_t component_count, Thread* self, Method** sp) {
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  return CheckAndAllocArrayFromCode(type_idx, method, component_count, self, false);
+}
+
+extern "C" Array* artCheckAndAllocArrayFromCodeWithAccessCheck(uint32_t type_idx, Method* method,
+                                                               int32_t component_count,
+                                                               Thread* self, Method** sp) {
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  return CheckAndAllocArrayFromCode(type_idx, method, component_count, self, true);
+}
+
+}  // namespace art
diff --git a/src/oat/runtime/support_cast.cc b/src/oat/runtime/support_cast.cc
new file mode 100644
index 0000000..987e764
--- /dev/null
+++ b/src/oat/runtime/support_cast.cc
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "callee_save_frame.h"
+#include "runtime_support.h"
+
+namespace art {
+
+// Assignable test for code, won't throw.  Null and equality tests already performed
+uint32_t IsAssignableFromCode(const Class* klass, const Class* ref_class) {
+  DCHECK(klass != NULL);
+  DCHECK(ref_class != NULL);
+  return klass->IsAssignableFrom(ref_class) ? 1 : 0;
+}
+
+// Check whether it is safe to cast one class to the other, throw exception and return -1 on failure
+extern "C" int artCheckCastFromCode(const Class* a, const Class* b, Thread* self, Method** sp) {
+  DCHECK(a->IsClass()) << PrettyClass(a);
+  DCHECK(b->IsClass()) << PrettyClass(b);
+  if (LIKELY(b->IsAssignableFrom(a))) {
+    return 0;  // Success
+  } else {
+    FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+    Thread::Current()->ThrowNewExceptionF("Ljava/lang/ClassCastException;",
+        "%s cannot be cast to %s",
+        PrettyDescriptor(a).c_str(),
+        PrettyDescriptor(b).c_str());
+    return -1;  // Failure
+  }
+}
+
+// Tests whether 'element' can be assigned into an array of type 'array_class'.
+// Returns 0 on success and -1 if an exception is pending.
+extern "C" int artCanPutArrayElementFromCode(const Object* element, const Class* array_class,
+                                             Thread* self, Method** sp) {
+  DCHECK(array_class != NULL);
+  // element can't be NULL as we catch this is screened in runtime_support
+  Class* element_class = element->GetClass();
+  Class* component_type = array_class->GetComponentType();
+  if (LIKELY(component_type->IsAssignableFrom(element_class))) {
+    return 0;  // Success
+  } else {
+    FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+    Thread::Current()->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;",
+        "%s cannot be stored in an array of type %s",
+        PrettyDescriptor(element_class).c_str(),
+        PrettyDescriptor(array_class).c_str());
+    return -1;  // Failure
+  }
+}
+
+}  // namespace art
diff --git a/src/oat/runtime/support_debug.cc b/src/oat/runtime/support_debug.cc
new file mode 100644
index 0000000..2803e27
--- /dev/null
+++ b/src/oat/runtime/support_debug.cc
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "callee_save_frame.h"
+#include "debugger.h"
+
+namespace art {
+
+/*
+ * Report location to debugger.  Note: dex_pc is the current offset within
+ * the method.  However, because the offset alone cannot distinguish between
+ * method entry and offset 0 within the method, we'll use an offset of -1
+ * to denote method entry.
+ */
+extern "C" void artUpdateDebuggerFromCode(int32_t dex_pc, Thread* self, Method** sp) {
+  FinishCalleeSaveFrameSetup(self, sp,  Runtime::kRefsAndArgs);
+  Dbg::UpdateDebugger(dex_pc, self, sp);
+}
+
+// Temporary debugging hook for compiler.
+extern void DebugMe(Method* method, uint32_t info) {
+  LOG(INFO) << "DebugMe";
+  if (method != NULL) {
+    LOG(INFO) << PrettyMethod(method);
+  }
+  LOG(INFO) << "Info: " << info;
+}
+
+}  // namespace art
diff --git a/src/oat/runtime/support_dexcache.cc b/src/oat/runtime/support_dexcache.cc
new file mode 100644
index 0000000..e5f2f82
--- /dev/null
+++ b/src/oat/runtime/support_dexcache.cc
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "callee_save_frame.h"
+#include "runtime_support.h"
+
+namespace art {
+
+extern "C" Class* artInitializeStaticStorageFromCode(uint32_t type_idx, const Method* referrer,
+                                                     Thread* self, Method** sp) {
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  return ResolveVerifyAndClinit(type_idx, referrer, self, true, true);
+}
+
+extern "C" Class* artInitializeTypeFromCode(uint32_t type_idx, const Method* referrer, Thread* self,
+                                            Method** sp) {
+  // Called when method->dex_cache_resolved_types_[] misses
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  return ResolveVerifyAndClinit(type_idx, referrer, self, false, false);
+}
+
+extern "C" Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx,
+                                                           const Method* referrer, Thread* self,
+                                                           Method** sp) {
+  // Called when caller isn't guaranteed to have access to a type and the dex cache may be
+  // unpopulated
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  return ResolveVerifyAndClinit(type_idx, referrer, self, false, true);
+}
+
+extern "C" String* artResolveStringFromCode(Method* referrer, int32_t string_idx,
+                                            Thread* self, Method** sp) {
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  return ResolveStringFromCode(referrer, string_idx);
+}
+
+}  // namespace art
diff --git a/src/oat/runtime/support_field.cc b/src/oat/runtime/support_field.cc
new file mode 100644
index 0000000..77fe618
--- /dev/null
+++ b/src/oat/runtime/support_field.cc
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "callee_save_frame.h"
+#include "runtime_support.h"
+
+#include <stdint.h>
+
+namespace art {
+
+extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx, const Method* referrer,
+                                           Thread* self, Method** sp) {
+  Field* field = FindFieldFast(field_idx, referrer, true, false, sizeof(int32_t));
+  if (LIKELY(field != NULL)) {
+    return field->Get32(NULL);
+  }
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  field = FindFieldFromCode(field_idx, referrer, self, true, true, false, sizeof(int32_t));
+  if (LIKELY(field != NULL)) {
+    return field->Get32(NULL);
+  }
+  return 0;  // Will throw exception by checking with Thread::Current
+}
+
+extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx, const Method* referrer,
+                                           Thread* self, Method** sp) {
+  Field* field = FindFieldFast(field_idx, referrer, true, false, sizeof(int64_t));
+  if (LIKELY(field != NULL)) {
+    return field->Get64(NULL);
+  }
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  field = FindFieldFromCode(field_idx, referrer, self, true, true, false, sizeof(int64_t));
+  if (LIKELY(field != NULL)) {
+    return field->Get64(NULL);
+  }
+  return 0;  // Will throw exception by checking with Thread::Current
+}
+
+extern "C" Object* artGetObjStaticFromCode(uint32_t field_idx, const Method* referrer,
+                                           Thread* self, Method** sp) {
+  Field* field = FindFieldFast(field_idx, referrer, false, false, sizeof(Object*));
+  if (LIKELY(field != NULL)) {
+    return field->GetObj(NULL);
+  }
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  field = FindFieldFromCode(field_idx, referrer, self, true, false, false, sizeof(Object*));
+  if (LIKELY(field != NULL)) {
+    return field->GetObj(NULL);
+  }
+  return NULL;  // Will throw exception by checking with Thread::Current
+}
+
+extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, Object* obj,
+                                             const Method* referrer, Thread* self, Method** sp) {
+  Field* field = FindFieldFast(field_idx, referrer, true, false, sizeof(int32_t));
+  if (LIKELY(field != NULL && obj != NULL)) {
+    return field->Get32(obj);
+  }
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  field = FindFieldFromCode(field_idx, referrer, self, false, true, false, sizeof(int32_t));
+  if (LIKELY(field != NULL)) {
+    if (UNLIKELY(obj == NULL)) {
+      ThrowNullPointerExceptionForFieldAccess(self, field, true);
+    } else {
+      return field->Get32(obj);
+    }
+  }
+  return 0;  // Will throw exception by checking with Thread::Current
+}
+
+extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, Object* obj,
+                                             const Method* referrer, Thread* self, Method** sp) {
+  Field* field = FindFieldFast(field_idx, referrer, true, false, sizeof(int64_t));
+  if (LIKELY(field != NULL && obj != NULL)) {
+    return field->Get64(obj);
+  }
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  field = FindFieldFromCode(field_idx, referrer, self, false, true, false, sizeof(int64_t));
+  if (LIKELY(field != NULL)) {
+    if (UNLIKELY(obj == NULL)) {
+      ThrowNullPointerExceptionForFieldAccess(self, field, true);
+    } else {
+      return field->Get64(obj);
+    }
+  }
+  return 0;  // Will throw exception by checking with Thread::Current
+}
+
+extern "C" Object* artGetObjInstanceFromCode(uint32_t field_idx, Object* obj,
+                                              const Method* referrer, Thread* self, Method** sp) {
+  Field* field = FindFieldFast(field_idx, referrer, false, false, sizeof(Object*));
+  if (LIKELY(field != NULL && obj != NULL)) {
+    return field->GetObj(obj);
+  }
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  field = FindFieldFromCode(field_idx, referrer, self, false, false, false, sizeof(Object*));
+  if (LIKELY(field != NULL)) {
+    if (UNLIKELY(obj == NULL)) {
+      ThrowNullPointerExceptionForFieldAccess(self, field, true);
+    } else {
+      return field->GetObj(obj);
+    }
+  }
+  return NULL;  // Will throw exception by checking with Thread::Current
+}
+
+extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value,
+                                      const Method* referrer, Thread* self, Method** sp) {
+  Field* field = FindFieldFast(field_idx, referrer, true, true, sizeof(int32_t));
+  if (LIKELY(field != NULL)) {
+    field->Set32(NULL, new_value);
+    return 0;  // success
+  }
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  field = FindFieldFromCode(field_idx, referrer, self, true, true, true, sizeof(int32_t));
+  if (LIKELY(field != NULL)) {
+    field->Set32(NULL, new_value);
+    return 0;  // success
+  }
+  return -1;  // failure
+}
+
+extern "C" int artSet64StaticFromCode(uint32_t field_idx, const Method* referrer,
+                                      uint64_t new_value, Thread* self, Method** sp) {
+  Field* field = FindFieldFast(field_idx, referrer, true, true, sizeof(int64_t));
+  if (LIKELY(field != NULL)) {
+    field->Set64(NULL, new_value);
+    return 0;  // success
+  }
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  field = FindFieldFromCode(field_idx, referrer, self, true, true, true, sizeof(int64_t));
+  if (LIKELY(field != NULL)) {
+    field->Set64(NULL, new_value);
+    return 0;  // success
+  }
+  return -1;  // failure
+}
+
+extern "C" int artSetObjStaticFromCode(uint32_t field_idx, Object* new_value,
+                                       const Method* referrer, Thread* self, Method** sp) {
+  Field* field = FindFieldFast(field_idx, referrer, false, true, sizeof(Object*));
+  if (LIKELY(field != NULL)) {
+    if (LIKELY(!FieldHelper(field).IsPrimitiveType())) {
+      field->SetObj(NULL, new_value);
+      return 0;  // success
+    }
+  }
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  field = FindFieldFromCode(field_idx, referrer, self, true, false, true, sizeof(Object*));
+  if (LIKELY(field != NULL)) {
+    field->SetObj(NULL, new_value);
+    return 0;  // success
+  }
+  return -1;  // failure
+}
+
+extern "C" int artSet32InstanceFromCode(uint32_t field_idx, Object* obj, uint32_t new_value,
+                                        const Method* referrer, Thread* self, Method** sp) {
+  Field* field = FindFieldFast(field_idx, referrer, true, true, sizeof(int32_t));
+  if (LIKELY(field != NULL && obj != NULL)) {
+    field->Set32(obj, new_value);
+    return 0;  // success
+  }
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  field = FindFieldFromCode(field_idx, referrer, self, false, true, true, sizeof(int32_t));
+  if (LIKELY(field != NULL)) {
+    if (UNLIKELY(obj == NULL)) {
+      ThrowNullPointerExceptionForFieldAccess(self, field, false);
+    } else {
+      field->Set32(obj, new_value);
+      return 0;  // success
+    }
+  }
+  return -1;  // failure
+}
+
+extern "C" int artSet64InstanceFromCode(uint32_t field_idx, Object* obj, uint64_t new_value,
+                                        Thread* self, Method** sp) {
+  Method* callee_save = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsOnly);
+  Method* referrer = sp[callee_save->GetFrameSizeInBytes() / sizeof(Method*)];
+  Field* field = FindFieldFast(field_idx, referrer, true, true, sizeof(int64_t));
+  if (LIKELY(field != NULL  && obj != NULL)) {
+    field->Set64(obj, new_value);
+    return 0;  // success
+  }
+  *sp = callee_save;
+  self->SetTopOfStack(sp, 0);
+  field = FindFieldFromCode(field_idx, referrer, self, false, true, true, sizeof(int64_t));
+  if (LIKELY(field != NULL)) {
+    if (UNLIKELY(obj == NULL)) {
+      ThrowNullPointerExceptionForFieldAccess(self, field, false);
+    } else {
+      field->Set64(obj, new_value);
+      return 0;  // success
+    }
+  }
+  return -1;  // failure
+}
+
+extern "C" int artSetObjInstanceFromCode(uint32_t field_idx, Object* obj, Object* new_value,
+                                         const Method* referrer, Thread* self, Method** sp) {
+  Field* field = FindFieldFast(field_idx, referrer, false, true, sizeof(Object*));
+  if (LIKELY(field != NULL && obj != NULL)) {
+    field->SetObj(obj, new_value);
+    return 0;  // success
+  }
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  field = FindFieldFromCode(field_idx, referrer, self, false, false, true, sizeof(Object*));
+  if (LIKELY(field != NULL)) {
+    if (UNLIKELY(obj == NULL)) {
+      ThrowNullPointerExceptionForFieldAccess(self, field, false);
+    } else {
+      field->SetObj(obj, new_value);
+      return 0;  // success
+    }
+  }
+  return -1;  // failure
+}
+
+}  // namespace art
diff --git a/src/oat/runtime/support_fillarray.cc b/src/oat/runtime/support_fillarray.cc
new file mode 100644
index 0000000..eb1c46c
--- /dev/null
+++ b/src/oat/runtime/support_fillarray.cc
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "callee_save_frame.h"
+#include "object.h"
+
+namespace art {
+
+/*
+ * Fill the array with predefined constant values, throwing exceptions if the array is null or
+ * not of sufficient length.
+ *
+ * NOTE: When dealing with a raw dex file, the data to be copied uses
+ * little-endian ordering.  Require that oat2dex do any required swapping
+ * so this routine can get by with a memcpy().
+ *
+ * Format of the data:
+ *  ushort ident = 0x0300   magic value
+ *  ushort width            width of each element in the table
+ *  uint   size             number of elements in the table
+ *  ubyte  data[size*width] table of data values (may contain a single-byte
+ *                          padding at the end)
+ */
+extern "C" int artHandleFillArrayDataFromCode(Array* array, const uint16_t* table,
+                                              Thread* self, Method** sp) {
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  DCHECK_EQ(table[0], 0x0300);
+  if (UNLIKELY(array == NULL)) {
+    Thread::Current()->ThrowNewExceptionF("Ljava/lang/NullPointerException;",
+        "null array in fill array");
+    return -1;  // Error
+  }
+  DCHECK(array->IsArrayInstance() && !array->IsObjectArray());
+  uint32_t size = (uint32_t)table[2] | (((uint32_t)table[3]) << 16);
+  if (UNLIKELY(static_cast<int32_t>(size) > array->GetLength())) {
+    Thread::Current()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;",
+        "failed array fill. length=%d; index=%d", array->GetLength(), size);
+    return -1;  // Error
+  }
+  uint16_t width = table[1];
+  uint32_t size_in_bytes = size * width;
+  memcpy((char*)array + Array::DataOffset(width).Int32Value(), (char*)&table[4], size_in_bytes);
+  return 0;  // Success
+}
+
+}  // namespace art
diff --git a/src/oat/runtime/support_invoke.cc b/src/oat/runtime/support_invoke.cc
new file mode 100644
index 0000000..14040ce
--- /dev/null
+++ b/src/oat/runtime/support_invoke.cc
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "callee_save_frame.h"
+#include "runtime_support.h"
+
+namespace art {
+
+static uint64_t artInvokeCommon(uint32_t method_idx, Object* this_object, Method* caller_method,
+                                Thread* self, Method** sp, bool access_check, InvokeType type){
+  Method* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type);
+  if (UNLIKELY(method == NULL)) {
+    FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsAndArgs);
+    if (UNLIKELY(this_object == NULL && type != kDirect && type != kStatic)) {
+      ThrowNullPointerExceptionForMethodAccess(self, caller_method, method_idx, type);
+      return 0;  // failure
+    }
+    method = FindMethodFromCode(method_idx, this_object, caller_method, self, access_check, type);
+    if (UNLIKELY(method == NULL)) {
+      CHECK(self->IsExceptionPending());
+      return 0;  // failure
+    }
+  }
+  DCHECK(!self->IsExceptionPending());
+  const void* code = method->GetCode();
+
+  // When we return, the caller will branch to this address, so it had better not be 0!
+  CHECK(code != NULL) << PrettyMethod(method);
+
+  uint32_t method_uint = reinterpret_cast<uint32_t>(method);
+  uint64_t code_uint = reinterpret_cast<uint32_t>(code);
+  uint64_t result = ((code_uint << 32) | method_uint);
+  return result;
+}
+
+// See comments in runtime_support_asm.S
+extern "C" uint64_t artInvokeInterfaceTrampoline(uint32_t method_idx, Object* this_object,
+                                                 Method* caller_method, Thread* self,
+                                                 Method** sp) {
+  return artInvokeCommon(method_idx, this_object, caller_method, self, sp, false, kInterface);
+}
+
+extern "C" uint64_t artInvokeInterfaceTrampolineWithAccessCheck(uint32_t method_idx,
+                                                                Object* this_object,
+                                                                Method* caller_method, Thread* self,
+                                                                Method** sp) {
+  return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kInterface);
+}
+
+
+extern "C" uint64_t artInvokeDirectTrampolineWithAccessCheck(uint32_t method_idx,
+                                                             Object* this_object,
+                                                             Method* caller_method, Thread* self,
+                                                             Method** sp) {
+  return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kDirect);
+}
+
+extern "C" uint64_t artInvokeStaticTrampolineWithAccessCheck(uint32_t method_idx,
+                                                            Object* this_object,
+                                                            Method* caller_method, Thread* self,
+                                                            Method** sp) {
+  return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kStatic);
+}
+
+extern "C" uint64_t artInvokeSuperTrampolineWithAccessCheck(uint32_t method_idx,
+                                                            Object* this_object,
+                                                            Method* caller_method, Thread* self,
+                                                            Method** sp) {
+  return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kSuper);
+}
+
+extern "C" uint64_t artInvokeVirtualTrampolineWithAccessCheck(uint32_t method_idx,
+                                                              Object* this_object,
+                                                              Method* caller_method, Thread* self,
+                                                              Method** sp) {
+  return artInvokeCommon(method_idx, this_object, caller_method, self, sp, true, kVirtual);
+}
+
+}  // namespace art
diff --git a/src/oat/runtime/support_jni.cc b/src/oat/runtime/support_jni.cc
new file mode 100644
index 0000000..d74f78f
--- /dev/null
+++ b/src/oat/runtime/support_jni.cc
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2012 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "object.h"
+#include "object_utils.h"
+#include "thread.h"
+
+namespace art {
+
+// Used by the JNI dlsym stub to find the native method to invoke if none is registered.
+extern void* FindNativeMethod(Thread* self) {
+  DCHECK(Thread::Current() == self);
+
+  Method* method = const_cast<Method*>(self->GetCurrentMethod());
+  DCHECK(method != NULL);
+
+  // Lookup symbol address for method, on failure we'll return NULL with an
+  // exception set, otherwise we return the address of the method we found.
+  void* native_code = self->GetJniEnv()->vm->FindCodeForNativeMethod(method);
+  if (native_code == NULL) {
+    DCHECK(self->IsExceptionPending());
+    return NULL;
+  } else {
+    // Register so that future calls don't come here
+    method->RegisterNative(self, native_code);
+    return native_code;
+  }
+}
+
+// Return value helper for jobject return types, used for JNI return values.
+extern Object* DecodeJObjectInThread(Thread* thread, jobject obj) {
+  if (thread->IsExceptionPending()) {
+    return NULL;
+  }
+  return thread->DecodeJObject(obj);
+}
+
+static void WorkAroundJniBugsForJobject(intptr_t* arg_ptr) {
+  intptr_t value = *arg_ptr;
+  Object** value_as_jni_rep = reinterpret_cast<Object**>(value);
+  Object* value_as_work_around_rep = value_as_jni_rep != NULL ? *value_as_jni_rep : NULL;
+  CHECK(Runtime::Current()->GetHeap()->IsHeapAddress(value_as_work_around_rep)) << value_as_work_around_rep;
+  *arg_ptr = reinterpret_cast<intptr_t>(value_as_work_around_rep);
+}
+
+extern "C" const void* artWorkAroundAppJniBugs(Thread* self, intptr_t* sp) {
+  DCHECK(Thread::Current() == self);
+  // TODO: this code is specific to ARM
+  // On entry the stack pointed by sp is:
+  // | arg3   | <- Calling JNI method's frame (and extra bit for out args)
+  // | LR     |
+  // | R3     |    arg2
+  // | R2     |    arg1
+  // | R1     |    jclass/jobject
+  // | R0     |    JNIEnv
+  // | unused |
+  // | unused |
+  // | unused | <- sp
+  Method* jni_method = self->GetTopOfStack().GetMethod();
+  DCHECK(jni_method->IsNative()) << PrettyMethod(jni_method);
+  intptr_t* arg_ptr = sp + 4;  // pointer to r1 on stack
+  // Fix up this/jclass argument
+  WorkAroundJniBugsForJobject(arg_ptr);
+  arg_ptr++;
+  // Fix up jobject arguments
+  MethodHelper mh(jni_method);
+  int reg_num = 2;  // Current register being processed, -1 for stack arguments.
+  for (uint32_t i = 1; i < mh.GetShortyLength(); i++) {
+    char shorty_char = mh.GetShorty()[i];
+    if (shorty_char == 'L') {
+      WorkAroundJniBugsForJobject(arg_ptr);
+    }
+    if (shorty_char == 'J' || shorty_char == 'D') {
+      if (reg_num == 2) {
+        arg_ptr = sp + 8;  // skip to out arguments
+        reg_num = -1;
+      } else if (reg_num == 3) {
+        arg_ptr = sp + 10;  // skip to out arguments plus 2 slots as long must be aligned
+        reg_num = -1;
+      } else {
+        DCHECK(reg_num == -1);
+        if ((reinterpret_cast<intptr_t>(arg_ptr) & 7) == 4) {
+          arg_ptr += 3;  // unaligned, pad and move through stack arguments
+        } else {
+          arg_ptr += 2;  // aligned, move through stack arguments
+        }
+      }
+    } else {
+      if (reg_num == 2) {
+        arg_ptr++; // move through register arguments
+        reg_num++;
+      } else if (reg_num == 3) {
+        arg_ptr = sp + 8;  // skip to outgoing stack arguments
+        reg_num = -1;
+      } else {
+        DCHECK(reg_num == -1);
+        arg_ptr++;  // move through stack arguments
+      }
+    }
+  }
+  // Load expected destination, see Method::RegisterNative
+  const void* code = reinterpret_cast<const void*>(jni_method->GetGcMapRaw());
+  if (UNLIKELY(code == NULL)) {
+    code = Runtime::Current()->GetJniDlsymLookupStub()->GetData();
+    jni_method->RegisterNative(self, code);
+  }
+  return code;
+}
+
+}  // namespace art
diff --git a/src/oat/runtime/support_locks.cc b/src/oat/runtime/support_locks.cc
new file mode 100644
index 0000000..30fc567
--- /dev/null
+++ b/src/oat/runtime/support_locks.cc
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "callee_save_frame.h"
+#include "object.h"
+
+namespace art {
+
+extern "C" int artUnlockObjectFromCode(Object* obj, Thread* self, Method** sp) {
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  DCHECK(obj != NULL);  // Assumed to have been checked before entry
+  // MonitorExit may throw exception
+  return obj->MonitorExit(self) ? 0 /* Success */ : -1 /* Failure */;
+}
+
+extern "C" void artLockObjectFromCode(Object* obj, Thread* thread, Method** sp) {
+  FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsOnly);
+  DCHECK(obj != NULL);        // Assumed to have been checked before entry
+  obj->MonitorEnter(thread);  // May block
+  DCHECK(thread->HoldsLock(obj));
+  // Only possible exception is NPE and is handled before entry
+  DCHECK(!thread->IsExceptionPending());
+}
+
+}  // namespace art
diff --git a/src/oat/runtime/support_math.cc b/src/oat/runtime/support_math.cc
new file mode 100644
index 0000000..cb5f705
--- /dev/null
+++ b/src/oat/runtime/support_math.cc
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2012 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+namespace art {
+
+int CmplFloat(float a, float b) {
+  if (a == b) {
+    return 0;
+  } else if (a < b) {
+    return -1;
+  } else if (a > b) {
+    return 1;
+  }
+  return -1;
+}
+
+int CmpgFloat(float a, float b) {
+  if (a == b) {
+    return 0;
+  } else if (a < b) {
+    return -1;
+  } else if (a > b) {
+    return 1;
+  }
+  return 1;
+}
+
+int CmpgDouble(double a, double b) {
+  if (a == b) {
+    return 0;
+  } else if (a < b) {
+    return -1;
+  } else if (a > b) {
+    return 1;
+  }
+  return 1;
+}
+
+int CmplDouble(double a, double b) {
+  if (a == b) {
+    return 0;
+  } else if (a < b) {
+    return -1;
+  } else if (a > b) {
+    return 1;
+  }
+  return -1;
+}
+
+/*
+ * Float/double conversion requires clamping to min and max of integer form.  If
+ * target doesn't support this normally, use these.
+ */
+int64_t D2L(double d) {
+  static const double kMaxLong = (double) (int64_t) 0x7fffffffffffffffULL;
+  static const double kMinLong = (double) (int64_t) 0x8000000000000000ULL;
+  if (d >= kMaxLong) {
+    return (int64_t) 0x7fffffffffffffffULL;
+  } else if (d <= kMinLong) {
+    return (int64_t) 0x8000000000000000ULL;
+  } else if (d != d)  { // NaN case
+    return 0;
+  } else {
+    return (int64_t) d;
+  }
+}
+
+int64_t F2L(float f) {
+  static const float kMaxLong = (float) (int64_t) 0x7fffffffffffffffULL;
+  static const float kMinLong = (float) (int64_t) 0x8000000000000000ULL;
+  if (f >= kMaxLong) {
+    return (int64_t) 0x7fffffffffffffffULL;
+  } else if (f <= kMinLong) {
+    return (int64_t) 0x8000000000000000ULL;
+  } else if (f != f) { // NaN case
+    return 0;
+  } else {
+    return (int64_t) f;
+  }
+}
+
+}  // namespace art
diff --git a/src/oat/runtime/support_proxy.cc b/src/oat/runtime/support_proxy.cc
new file mode 100644
index 0000000..a9c7ebe
--- /dev/null
+++ b/src/oat/runtime/support_proxy.cc
@@ -0,0 +1,238 @@
+/*
+ * Copyright 2012 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "object.h"
+#include "object_utils.h"
+#include "reflection.h"
+#include "thread.h"
+
+#include "ScopedLocalRef.h"
+
+namespace art {
+
+static void ThrowNewUndeclaredThrowableException(Thread* self, JNIEnv* env, Throwable* exception) {
+  ScopedLocalRef<jclass> jlr_UTE_class(env,
+      env->FindClass("java/lang/reflect/UndeclaredThrowableException"));
+  if (jlr_UTE_class.get() == NULL) {
+    LOG(ERROR) << "Couldn't throw new \"java/lang/reflect/UndeclaredThrowableException\"";
+  } else {
+    jmethodID jlre_UTE_constructor = env->GetMethodID(jlr_UTE_class.get(), "<init>",
+                                                      "(Ljava/lang/Throwable;)V");
+    jthrowable jexception = AddLocalReference<jthrowable>(env, exception);
+    ScopedLocalRef<jthrowable> jlr_UTE(env,
+        reinterpret_cast<jthrowable>(env->NewObject(jlr_UTE_class.get(), jlre_UTE_constructor,
+                                                    jexception)));
+    int rc = env->Throw(jlr_UTE.get());
+    if (rc != JNI_OK) {
+      LOG(ERROR) << "Couldn't throw new \"java/lang/reflect/UndeclaredThrowableException\"";
+    }
+  }
+  CHECK(self->IsExceptionPending());
+}
+
+// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
+// which is responsible for recording callee save registers. We explicitly handlerize incoming
+// reference arguments (so they survive GC) and create a boxed argument array. Finally we invoke
+// the invocation handler which is a field within the proxy object receiver.
+extern "C" void artProxyInvokeHandler(Method* proxy_method, Object* receiver,
+                                      Thread* self, byte* stack_args) {
+  // Register the top of the managed stack
+  Method** proxy_sp = reinterpret_cast<Method**>(stack_args - 12);
+  DCHECK_EQ(*proxy_sp, proxy_method);
+  self->SetTopOfStack(proxy_sp, 0);
+  // TODO: ARM specific
+  DCHECK_EQ(proxy_method->GetFrameSizeInBytes(), 48u);
+  // Start new JNI local reference state
+  JNIEnvExt* env = self->GetJniEnv();
+  ScopedJniEnvLocalRefState env_state(env);
+  // Create local ref. copies of proxy method and the receiver
+  jobject rcvr_jobj = AddLocalReference<jobject>(env, receiver);
+  jobject proxy_method_jobj = AddLocalReference<jobject>(env, proxy_method);
+
+  // Placing into local references incoming arguments from the caller's register arguments,
+  // replacing original Object* with jobject
+  MethodHelper proxy_mh(proxy_method);
+  const size_t num_params = proxy_mh.NumArgs();
+  size_t args_in_regs = 0;
+  for (size_t i = 1; i < num_params; i++) {  // skip receiver
+    args_in_regs = args_in_regs + (proxy_mh.IsParamALongOrDouble(i) ? 2 : 1);
+    if (args_in_regs > 2) {
+      args_in_regs = 2;
+      break;
+    }
+  }
+  size_t cur_arg = 0;  // current stack location to read
+  size_t param_index = 1;  // skip receiver
+  while (cur_arg < args_in_regs && param_index < num_params) {
+    if (proxy_mh.IsParamAReference(param_index)) {
+      Object* obj = *reinterpret_cast<Object**>(stack_args + (cur_arg * kPointerSize));
+      jobject jobj = AddLocalReference<jobject>(env, obj);
+      *reinterpret_cast<jobject*>(stack_args + (cur_arg * kPointerSize)) = jobj;
+    }
+    cur_arg = cur_arg + (proxy_mh.IsParamALongOrDouble(param_index) ? 2 : 1);
+    param_index++;
+  }
+  // Placing into local references incoming arguments from the caller's stack arguments
+  cur_arg += 11;  // skip callee saves, LR, Method* and out arg spills for R1 to R3
+  while (param_index < num_params) {
+    if (proxy_mh.IsParamAReference(param_index)) {
+      Object* obj = *reinterpret_cast<Object**>(stack_args + (cur_arg * kPointerSize));
+      jobject jobj = AddLocalReference<jobject>(env, obj);
+      *reinterpret_cast<jobject*>(stack_args + (cur_arg * kPointerSize)) = jobj;
+    }
+    cur_arg = cur_arg + (proxy_mh.IsParamALongOrDouble(param_index) ? 2 : 1);
+    param_index++;
+  }
+  // Set up arguments array and place in local IRT during boxing (which may allocate/GC)
+  jvalue args_jobj[3];
+  args_jobj[0].l = rcvr_jobj;
+  args_jobj[1].l = proxy_method_jobj;
+  // Args array, if no arguments then NULL (don't include receiver in argument count)
+  args_jobj[2].l = NULL;
+  ObjectArray<Object>* args = NULL;
+  if ((num_params - 1) > 0) {
+    args = Runtime::Current()->GetClassLinker()->AllocObjectArray<Object>(num_params - 1);
+    if (args == NULL) {
+      CHECK(self->IsExceptionPending());
+      return;
+    }
+    args_jobj[2].l = AddLocalReference<jobjectArray>(env, args);
+  }
+  // Convert proxy method into expected interface method
+  Method* interface_method = proxy_method->FindOverriddenMethod();
+  DCHECK(interface_method != NULL);
+  DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
+  args_jobj[1].l = AddLocalReference<jobject>(env, interface_method);
+  // Box arguments
+  cur_arg = 0;  // reset stack location to read to start
+  // reset index, will index into param type array which doesn't include the receiver
+  param_index = 0;
+  ObjectArray<Class>* param_types = proxy_mh.GetParameterTypes();
+  if (param_types == NULL) {
+    CHECK(self->IsExceptionPending());
+    return;
+  }
+  // Check number of parameter types agrees with number from the Method - less 1 for the receiver.
+  DCHECK_EQ(static_cast<size_t>(param_types->GetLength()), num_params - 1);
+  while (cur_arg < args_in_regs && param_index < (num_params - 1)) {
+    Class* param_type = param_types->Get(param_index);
+    Object* obj;
+    if (!param_type->IsPrimitive()) {
+      obj = self->DecodeJObject(*reinterpret_cast<jobject*>(stack_args + (cur_arg * kPointerSize)));
+    } else {
+      JValue val = *reinterpret_cast<JValue*>(stack_args + (cur_arg * kPointerSize));
+      if (cur_arg == 1 && (param_type->IsPrimitiveLong() || param_type->IsPrimitiveDouble())) {
+        // long/double split over regs and stack, mask in high half from stack arguments
+        uint64_t high_half = *reinterpret_cast<uint32_t*>(stack_args + (13 * kPointerSize));
+        val.j = (val.j & 0xffffffffULL) | (high_half << 32);
+      }
+      BoxPrimitive(param_type->GetPrimitiveType(), val);
+      if (self->IsExceptionPending()) {
+        return;
+      }
+      obj = val.l;
+    }
+    args->Set(param_index, obj);
+    cur_arg = cur_arg + (param_type->IsPrimitiveLong() || param_type->IsPrimitiveDouble() ? 2 : 1);
+    param_index++;
+  }
+  // Placing into local references incoming arguments from the caller's stack arguments
+  cur_arg += 11;  // skip callee saves, LR, Method* and out arg spills for R1 to R3
+  while (param_index < (num_params - 1)) {
+    Class* param_type = param_types->Get(param_index);
+    Object* obj;
+    if (!param_type->IsPrimitive()) {
+      obj = self->DecodeJObject(*reinterpret_cast<jobject*>(stack_args + (cur_arg * kPointerSize)));
+    } else {
+      JValue val = *reinterpret_cast<JValue*>(stack_args + (cur_arg * kPointerSize));
+      BoxPrimitive(param_type->GetPrimitiveType(), val);
+      if (self->IsExceptionPending()) {
+        return;
+      }
+      obj = val.l;
+    }
+    args->Set(param_index, obj);
+    cur_arg = cur_arg + (param_type->IsPrimitiveLong() || param_type->IsPrimitiveDouble() ? 2 : 1);
+    param_index++;
+  }
+  // Get the InvocationHandler method and the field that holds it within the Proxy object
+  static jmethodID inv_hand_invoke_mid = NULL;
+  static jfieldID proxy_inv_hand_fid = NULL;
+  if (proxy_inv_hand_fid == NULL) {
+    ScopedLocalRef<jclass> proxy(env, env->FindClass("java/lang/reflect/Proxy"));
+    proxy_inv_hand_fid = env->GetFieldID(proxy.get(), "h", "Ljava/lang/reflect/InvocationHandler;");
+    ScopedLocalRef<jclass> inv_hand_class(env, env->FindClass("java/lang/reflect/InvocationHandler"));
+    inv_hand_invoke_mid = env->GetMethodID(inv_hand_class.get(), "invoke",
+        "(Ljava/lang/Object;Ljava/lang/reflect/Method;[Ljava/lang/Object;)Ljava/lang/Object;");
+  }
+  DCHECK(env->IsInstanceOf(rcvr_jobj, env->FindClass("java/lang/reflect/Proxy")));
+  jobject inv_hand = env->GetObjectField(rcvr_jobj, proxy_inv_hand_fid);
+  // Call InvocationHandler.invoke
+  jobject result = env->CallObjectMethodA(inv_hand, inv_hand_invoke_mid, args_jobj);
+  // Place result in stack args
+  if (!self->IsExceptionPending()) {
+    Object* result_ref = self->DecodeJObject(result);
+    if (result_ref != NULL) {
+      JValue result_unboxed;
+      bool unboxed_okay = UnboxPrimitive(result_ref, proxy_mh.GetReturnType(), result_unboxed, "result");
+      if (!unboxed_okay) {
+        self->ClearException();
+        self->ThrowNewExceptionF("Ljava/lang/ClassCastException;",
+                                 "Couldn't convert result of type %s to %s",
+                                 PrettyTypeOf(result_ref).c_str(),
+                                 PrettyDescriptor(proxy_mh.GetReturnType()).c_str());
+        return;
+      }
+      *reinterpret_cast<JValue*>(stack_args) = result_unboxed;
+    } else {
+      *reinterpret_cast<jobject*>(stack_args) = NULL;
+    }
+  } else {
+    // In the case of checked exceptions that aren't declared, the exception must be wrapped by
+    // a UndeclaredThrowableException.
+    Throwable* exception = self->GetException();
+    self->ClearException();
+    if (!exception->IsCheckedException()) {
+      self->SetException(exception);
+    } else {
+      SynthesizedProxyClass* proxy_class =
+          down_cast<SynthesizedProxyClass*>(proxy_method->GetDeclaringClass());
+      int throws_index = -1;
+      size_t num_virt_methods = proxy_class->NumVirtualMethods();
+      for (size_t i = 0; i < num_virt_methods; i++) {
+        if (proxy_class->GetVirtualMethod(i) == proxy_method) {
+          throws_index = i;
+          break;
+        }
+      }
+      CHECK_NE(throws_index, -1);
+      ObjectArray<Class>* declared_exceptions = proxy_class->GetThrows()->Get(throws_index);
+      Class* exception_class = exception->GetClass();
+      bool declares_exception = false;
+      for (int i = 0; i < declared_exceptions->GetLength() && !declares_exception; i++) {
+        Class* declared_exception = declared_exceptions->Get(i);
+        declares_exception = declared_exception->IsAssignableFrom(exception_class);
+      }
+      if (declares_exception) {
+        self->SetException(exception);
+      } else {
+        ThrowNewUndeclaredThrowableException(self, env, exception);
+      }
+    }
+  }
+}
+
+}  // namespace art
diff --git a/src/oat/runtime/support_stubs.cc b/src/oat/runtime/support_stubs.cc
new file mode 100644
index 0000000..cb224e8
--- /dev/null
+++ b/src/oat/runtime/support_stubs.cc
@@ -0,0 +1,191 @@
+/*
+ * Copyright 2012 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "callee_save_frame.h"
+#include "dex_instruction.h"
+#include "object.h"
+#include "object_utils.h"
+
+// Architecture specific assembler helper to deliver exception.
+extern "C" void art_deliver_exception_from_code(void*);
+
+namespace art {
+
+// Lazily resolve a method. Called by stub code.
+const void* UnresolvedDirectMethodTrampolineFromCode(Method* called, Method** sp, Thread* thread,
+                                                     Runtime::TrampolineType type) {
+  // TODO: this code is specific to ARM
+  // On entry the stack pointed by sp is:
+  // | argN       |  |
+  // | ...        |  |
+  // | arg4       |  |
+  // | arg3 spill |  |  Caller's frame
+  // | arg2 spill |  |
+  // | arg1 spill |  |
+  // | Method*    | ---
+  // | LR         |
+  // | ...        |    callee saves
+  // | R3         |    arg3
+  // | R2         |    arg2
+  // | R1         |    arg1
+  // | R0         |
+  // | Method*    |  <- sp
+  uintptr_t* regs = reinterpret_cast<uintptr_t*>(reinterpret_cast<byte*>(sp) + kPointerSize);
+  DCHECK_EQ(48U, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
+  Method** caller_sp = reinterpret_cast<Method**>(reinterpret_cast<byte*>(sp) + 48);
+  uintptr_t caller_pc = regs[10];
+  FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsAndArgs);
+  // Start new JNI local reference state
+  JNIEnvExt* env = thread->GetJniEnv();
+  ScopedJniEnvLocalRefState env_state(env);
+
+  // Compute details about the called method (avoid GCs)
+  ClassLinker* linker = Runtime::Current()->GetClassLinker();
+  Method* caller = *caller_sp;
+  bool is_static;
+  bool is_virtual;
+  uint32_t dex_method_idx;
+  const char* shorty;
+  uint32_t shorty_len;
+  if (type == Runtime::kUnknownMethod) {
+    DCHECK(called->IsRuntimeMethod());
+    // less two as return address may span into next dex instruction
+    uint32_t dex_pc = caller->ToDexPC(caller_pc - 2);
+    const DexFile::CodeItem* code = MethodHelper(caller).GetCodeItem();
+    CHECK_LT(dex_pc, code->insns_size_in_code_units_);
+    const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
+    Instruction::Code instr_code = instr->Opcode();
+    is_static = (instr_code == Instruction::INVOKE_STATIC) ||
+                (instr_code == Instruction::INVOKE_STATIC_RANGE);
+    is_virtual = (instr_code == Instruction::INVOKE_VIRTUAL) ||
+                 (instr_code == Instruction::INVOKE_VIRTUAL_RANGE) ||
+                 (instr_code == Instruction::INVOKE_SUPER) ||
+                 (instr_code == Instruction::INVOKE_SUPER_RANGE);
+    DCHECK(is_static || is_virtual || (instr_code == Instruction::INVOKE_DIRECT) ||
+           (instr_code == Instruction::INVOKE_DIRECT_RANGE));
+    DecodedInstruction dec_insn(instr);
+    dex_method_idx = dec_insn.vB;
+    shorty = linker->MethodShorty(dex_method_idx, caller, &shorty_len);
+  } else {
+    DCHECK(!called->IsRuntimeMethod());
+    is_static = type == Runtime::kStaticMethod;
+    is_virtual = false;
+    dex_method_idx = called->GetDexMethodIndex();
+    MethodHelper mh(called);
+    shorty = mh.GetShorty();
+    shorty_len = mh.GetShortyLength();
+  }
+  // Discover shorty (avoid GCs)
+  size_t args_in_regs = 0;
+  for (size_t i = 1; i < shorty_len; i++) {
+    char c = shorty[i];
+    args_in_regs = args_in_regs + (c == 'J' || c == 'D' ? 2 : 1);
+    if (args_in_regs > 3) {
+      args_in_regs = 3;
+      break;
+    }
+  }
+  // Place into local references incoming arguments from the caller's register arguments
+  size_t cur_arg = 1;   // skip method_idx in R0, first arg is in R1
+  if (!is_static) {
+    Object* obj = reinterpret_cast<Object*>(regs[cur_arg]);
+    cur_arg++;
+    if (args_in_regs < 3) {
+      // If we thought we had fewer than 3 arguments in registers, account for the receiver
+      args_in_regs++;
+    }
+    AddLocalReference<jobject>(env, obj);
+  }
+  size_t shorty_index = 1;  // skip return value
+  // Iterate while arguments and arguments in registers (less 1 from cur_arg which is offset to skip
+  // R0)
+  while ((cur_arg - 1) < args_in_regs && shorty_index < shorty_len) {
+    char c = shorty[shorty_index];
+    shorty_index++;
+    if (c == 'L') {
+      Object* obj = reinterpret_cast<Object*>(regs[cur_arg]);
+      AddLocalReference<jobject>(env, obj);
+    }
+    cur_arg = cur_arg + (c == 'J' || c == 'D' ? 2 : 1);
+  }
+  // Place into local references incoming arguments from the caller's stack arguments
+  cur_arg += 11;  // skip LR, Method* and spills for R1 to R3 and callee saves
+  while (shorty_index < shorty_len) {
+    char c = shorty[shorty_index];
+    shorty_index++;
+    if (c == 'L') {
+      Object* obj = reinterpret_cast<Object*>(regs[cur_arg]);
+      AddLocalReference<jobject>(env, obj);
+    }
+    cur_arg = cur_arg + (c == 'J' || c == 'D' ? 2 : 1);
+  }
+  // Resolve method filling in dex cache
+  if (type == Runtime::kUnknownMethod) {
+    called = linker->ResolveMethod(dex_method_idx, caller, !is_virtual);
+  }
+  const void* code = NULL;
+  if (LIKELY(!thread->IsExceptionPending())) {
+    if (LIKELY(called->IsDirect() == !is_virtual)) {
+      // Ensure that the called method's class is initialized.
+      Class* called_class = called->GetDeclaringClass();
+      linker->EnsureInitialized(called_class, true);
+      if (LIKELY(called_class->IsInitialized())) {
+        code = called->GetCode();
+      } else if (called_class->IsInitializing()) {
+        if (is_static) {
+          // Class is still initializing, go to oat and grab code (trampoline must be left in place
+          // until class is initialized to stop races between threads).
+          code = linker->GetOatCodeFor(called);
+        } else {
+          // No trampoline for non-static methods.
+          code = called->GetCode();
+        }
+      } else {
+        DCHECK(called_class->IsErroneous());
+      }
+    } else {
+      // Direct method has been made virtual
+      thread->ThrowNewExceptionF("Ljava/lang/IncompatibleClassChangeError;",
+                                 "Expected direct method but found virtual: %s",
+                                 PrettyMethod(called, true).c_str());
+    }
+  }
+  if (UNLIKELY(code == NULL)) {
+    // Something went wrong in ResolveMethod or EnsureInitialized,
+    // go into deliver exception with the pending exception in r0
+    code = reinterpret_cast<void*>(art_deliver_exception_from_code);
+    regs[0] = reinterpret_cast<uintptr_t>(thread->GetException());
+    thread->ClearException();
+  } else {
+    // Expect class to at least be initializing.
+    DCHECK(called->GetDeclaringClass()->IsInitializing());
+    // Don't want infinite recursion.
+    DCHECK(code != Runtime::Current()->GetResolutionStubArray(Runtime::kUnknownMethod)->GetData());
+    // Set up entry into main method
+    regs[0] = reinterpret_cast<uintptr_t>(called);
+  }
+  return code;
+}
+
+// Called by the AbstractMethodError. Called by stub code.
+extern void ThrowAbstractMethodErrorFromCode(Method* method, Thread* thread, Method** sp) {
+  FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll);
+  thread->ThrowNewExceptionF("Ljava/lang/AbstractMethodError;",
+                             "abstract method \"%s\"", PrettyMethod(method).c_str());
+  thread->DeliverException();
+}
+
+}  // namespace art
diff --git a/src/oat/runtime/support_thread.cc b/src/oat/runtime/support_thread.cc
new file mode 100644
index 0000000..ed04673
--- /dev/null
+++ b/src/oat/runtime/support_thread.cc
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "callee_save_frame.h"
+#include "thread.h"
+#include "thread_list.h"
+
+namespace art {
+
+void CheckSuspendFromCode(Thread* thread) {
+  // Called when thread->suspend_count_ != 0
+  Runtime::Current()->GetThreadList()->FullSuspendCheck(thread);
+}
+
+extern "C" void artTestSuspendFromCode(Thread* thread, Method** sp) {
+  // Called when suspend count check value is 0 and thread->suspend_count_ != 0
+  FinishCalleeSaveFrameSetup(thread, sp, Runtime::kRefsOnly);
+  Runtime::Current()->GetThreadList()->FullSuspendCheck(thread);
+}
+
+}  // namespace art
diff --git a/src/oat/runtime/support_throw.cc b/src/oat/runtime/support_throw.cc
new file mode 100644
index 0000000..9f46b2b
--- /dev/null
+++ b/src/oat/runtime/support_throw.cc
@@ -0,0 +1,255 @@
+/*
+ * Copyright 2012 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "callee_save_frame.h"
+#include "dex_verifier.h"
+#include "object.h"
+#include "object_utils.h"
+#include "runtime_support.h"
+#include "thread.h"
+
+namespace art {
+
+// Deliver an exception that's pending on thread helping set up a callee save frame on the way.
+extern "C" void artDeliverPendingExceptionFromCode(Thread* thread, Method** sp) {
+  FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll);
+  thread->DeliverException();
+}
+
+// Called by generated call to throw an exception.
+extern "C" void artDeliverExceptionFromCode(Throwable* exception, Thread* thread, Method** sp) {
+  /*
+   * exception may be NULL, in which case this routine should
+   * throw NPE.  NOTE: this is a convenience for generated code,
+   * which previously did the null check inline and constructed
+   * and threw a NPE if NULL.  This routine responsible for setting
+   * exception_ in thread and delivering the exception.
+   */
+  FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll);
+  if (exception == NULL) {
+    thread->ThrowNewException("Ljava/lang/NullPointerException;", "throw with null exception");
+  } else {
+    thread->SetException(exception);
+  }
+  thread->DeliverException();
+}
+
+// Called by generated call to throw a NPE exception.
+extern "C" void artThrowNullPointerExceptionFromCode(Thread* self, Method** sp) {
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
+  Frame fr = self->GetTopOfStack();
+  uintptr_t throw_native_pc = fr.GetReturnPC();
+  fr.Next();
+  Method* throw_method = fr.GetMethod();
+  uint32_t dex_pc = throw_method->ToDexPC(throw_native_pc - 2);
+  const DexFile::CodeItem* code = MethodHelper(throw_method).GetCodeItem();
+  CHECK_LT(dex_pc, code->insns_size_in_code_units_);
+  const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
+  DecodedInstruction dec_insn(instr);
+  switch (instr->Opcode()) {
+    case Instruction::INVOKE_DIRECT:
+    case Instruction::INVOKE_DIRECT_RANGE:
+      ThrowNullPointerExceptionForMethodAccess(self, throw_method, dec_insn.vB, kDirect);
+      break;
+    case Instruction::INVOKE_VIRTUAL:
+    case Instruction::INVOKE_VIRTUAL_RANGE:
+      ThrowNullPointerExceptionForMethodAccess(self, throw_method, dec_insn.vB, kVirtual);
+      break;
+    case Instruction::IGET:
+    case Instruction::IGET_WIDE:
+    case Instruction::IGET_OBJECT:
+    case Instruction::IGET_BOOLEAN:
+    case Instruction::IGET_BYTE:
+    case Instruction::IGET_CHAR:
+    case Instruction::IGET_SHORT: {
+      Field* field =
+          Runtime::Current()->GetClassLinker()->ResolveField(dec_insn.vC, throw_method, false);
+      ThrowNullPointerExceptionForFieldAccess(self, field, true /* read */);
+      break;
+    }
+    case Instruction::IPUT:
+    case Instruction::IPUT_WIDE:
+    case Instruction::IPUT_OBJECT:
+    case Instruction::IPUT_BOOLEAN:
+    case Instruction::IPUT_BYTE:
+    case Instruction::IPUT_CHAR:
+    case Instruction::IPUT_SHORT: {
+      Field* field =
+          Runtime::Current()->GetClassLinker()->ResolveField(dec_insn.vC, throw_method, false);
+      ThrowNullPointerExceptionForFieldAccess(self, field, false /* write */);
+      break;
+    }
+    case Instruction::AGET:
+    case Instruction::AGET_WIDE:
+    case Instruction::AGET_OBJECT:
+    case Instruction::AGET_BOOLEAN:
+    case Instruction::AGET_BYTE:
+    case Instruction::AGET_CHAR:
+    case Instruction::AGET_SHORT:
+      self->ThrowNewException("Ljava/lang/NullPointerException;",
+                              "Attempt to read from null array");
+      break;
+    case Instruction::APUT:
+    case Instruction::APUT_WIDE:
+    case Instruction::APUT_OBJECT:
+    case Instruction::APUT_BOOLEAN:
+    case Instruction::APUT_BYTE:
+    case Instruction::APUT_CHAR:
+    case Instruction::APUT_SHORT:
+      self->ThrowNewException("Ljava/lang/NullPointerException;",
+                              "Attempt to write to null array");
+      break;
+    default: {
+      const DexFile& dex_file = Runtime::Current()->GetClassLinker()
+          ->FindDexFile(throw_method->GetDeclaringClass()->GetDexCache());
+      std::string message("Null pointer exception during instruction '");
+      message += instr->DumpString(&dex_file);
+      message += "'";
+      self->ThrowNewException("Ljava/lang/NullPointerException;", message.c_str());
+      break;
+    }
+  }
+  self->DeliverException();
+}
+
+// Called by generated call to throw an arithmetic divide by zero exception.
+extern "C" void artThrowDivZeroFromCode(Thread* thread, Method** sp) {
+  FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll);
+  thread->ThrowNewException("Ljava/lang/ArithmeticException;", "divide by zero");
+  thread->DeliverException();
+}
+
+// Called by generated call to throw an array index out of bounds exception.
+extern "C" void artThrowArrayBoundsFromCode(int index, int limit, Thread* thread, Method** sp) {
+  FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll);
+  thread->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;",
+                             "length=%d; index=%d", limit, index);
+  thread->DeliverException();
+}
+
+extern "C" void artThrowStackOverflowFromCode(Thread* thread, Method** sp) {
+  FinishCalleeSaveFrameSetup(thread, sp, Runtime::kSaveAll);
+  // Remove extra entry pushed onto second stack during method tracing
+  if (Runtime::Current()->IsMethodTracingActive()) {
+    TraceMethodUnwindFromCode(thread);
+  }
+  thread->SetStackEndForStackOverflow();  // Allow space on the stack for constructor to execute
+  thread->ThrowNewExceptionF("Ljava/lang/StackOverflowError;",
+      "stack size %zdkb; default stack size: %zdkb",
+      thread->GetStackSize() / KB, Runtime::Current()->GetDefaultStackSize() / KB);
+  thread->ResetDefaultStackEnd();  // Return to default stack size
+  thread->DeliverException();
+}
+
+extern "C" void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self, Method** sp) {
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
+  Frame frame = self->GetTopOfStack();  // We need the calling method as context for the method_idx
+  frame.Next();
+  Method* method = frame.GetMethod();
+  self->ThrowNewException("Ljava/lang/NoSuchMethodError;",
+      MethodNameFromIndex(method, method_idx, verifier::VERIFY_ERROR_REF_METHOD, false).c_str());
+  self->DeliverException();
+}
+
+static std::string ClassNameFromIndex(Method* method, uint32_t ref,
+                                      verifier::VerifyErrorRefType ref_type, bool access) {
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  const DexFile& dex_file = class_linker->FindDexFile(method->GetDeclaringClass()->GetDexCache());
+
+  uint16_t type_idx = 0;
+  if (ref_type == verifier::VERIFY_ERROR_REF_FIELD) {
+    const DexFile::FieldId& id = dex_file.GetFieldId(ref);
+    type_idx = id.class_idx_;
+  } else if (ref_type == verifier::VERIFY_ERROR_REF_METHOD) {
+    const DexFile::MethodId& id = dex_file.GetMethodId(ref);
+    type_idx = id.class_idx_;
+  } else if (ref_type == verifier::VERIFY_ERROR_REF_CLASS) {
+    type_idx = ref;
+  } else {
+    CHECK(false) << static_cast<int>(ref_type);
+  }
+
+  std::string class_name(PrettyDescriptor(dex_file.StringByTypeIdx(type_idx)));
+  if (!access) {
+    return class_name;
+  }
+
+  std::string result;
+  result += "tried to access class ";
+  result += class_name;
+  result += " from class ";
+  result += PrettyDescriptor(method->GetDeclaringClass());
+  return result;
+}
+
+extern "C" void artThrowVerificationErrorFromCode(int32_t kind, int32_t ref, Thread* self, Method** sp) {
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kSaveAll);
+  Frame frame = self->GetTopOfStack();  // We need the calling method as context to interpret 'ref'
+  frame.Next();
+  Method* method = frame.GetMethod();
+
+  verifier::VerifyErrorRefType ref_type =
+      static_cast<verifier::VerifyErrorRefType>(kind >> verifier::kVerifyErrorRefTypeShift);
+
+  const char* exception_class = "Ljava/lang/VerifyError;";
+  std::string msg;
+
+  switch (static_cast<verifier::VerifyError>(kind & ~(0xff << verifier::kVerifyErrorRefTypeShift))) {
+  case verifier::VERIFY_ERROR_NO_CLASS:
+    exception_class = "Ljava/lang/NoClassDefFoundError;";
+    msg = ClassNameFromIndex(method, ref, ref_type, false);
+    break;
+  case verifier::VERIFY_ERROR_NO_FIELD:
+    exception_class = "Ljava/lang/NoSuchFieldError;";
+    msg = FieldNameFromIndex(method, ref, ref_type, false);
+    break;
+  case verifier::VERIFY_ERROR_NO_METHOD:
+    exception_class = "Ljava/lang/NoSuchMethodError;";
+    msg = MethodNameFromIndex(method, ref, ref_type, false);
+    break;
+  case verifier::VERIFY_ERROR_ACCESS_CLASS:
+    exception_class = "Ljava/lang/IllegalAccessError;";
+    msg = ClassNameFromIndex(method, ref, ref_type, true);
+    break;
+  case verifier::VERIFY_ERROR_ACCESS_FIELD:
+    exception_class = "Ljava/lang/IllegalAccessError;";
+    msg = FieldNameFromIndex(method, ref, ref_type, true);
+    break;
+  case verifier::VERIFY_ERROR_ACCESS_METHOD:
+    exception_class = "Ljava/lang/IllegalAccessError;";
+    msg = MethodNameFromIndex(method, ref, ref_type, true);
+    break;
+  case verifier::VERIFY_ERROR_CLASS_CHANGE:
+    exception_class = "Ljava/lang/IncompatibleClassChangeError;";
+    msg = ClassNameFromIndex(method, ref, ref_type, false);
+    break;
+  case verifier::VERIFY_ERROR_INSTANTIATION:
+    exception_class = "Ljava/lang/InstantiationError;";
+    msg = ClassNameFromIndex(method, ref, ref_type, false);
+    break;
+  case verifier::VERIFY_ERROR_BAD_CLASS_SOFT:
+  case verifier::VERIFY_ERROR_BAD_CLASS_HARD:
+    // Generic VerifyError; use default exception, no message.
+    break;
+  case verifier::VERIFY_ERROR_NONE:
+    CHECK(false);
+    break;
+  }
+  self->ThrowNewException(exception_class, msg.c_str());
+  self->DeliverException();
+}
+
+}  // namespace art
diff --git a/src/oat/runtime/support_trace.cc b/src/oat/runtime/support_trace.cc
new file mode 100644
index 0000000..5c6a46e
--- /dev/null
+++ b/src/oat/runtime/support_trace.cc
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2012 Google Inc. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "runtime.h"
+#include "thread.h"
+#include "trace.h"
+
+namespace art {
+
+extern "C" const void* artTraceMethodEntryFromCode(Method* method, Thread* self, uintptr_t lr) {
+  Trace* tracer = Runtime::Current()->GetTracer();
+  TraceStackFrame trace_frame = TraceStackFrame(method, lr);
+  self->PushTraceStackFrame(trace_frame);
+
+  tracer->LogMethodTraceEvent(self, method, Trace::kMethodTraceEnter);
+
+  return tracer->GetSavedCodeFromMap(method);
+}
+
+extern "C" uintptr_t artTraceMethodExitFromCode() {
+  Trace* tracer = Runtime::Current()->GetTracer();
+  TraceStackFrame trace_frame = Thread::Current()->PopTraceStackFrame();
+  Method* method = trace_frame.method_;
+  uintptr_t lr = trace_frame.return_pc_;
+
+  tracer->LogMethodTraceEvent(Thread::Current(), method, Trace::kMethodTraceExit);
+
+  return lr;
+}
+
+}  // namespace art
diff --git a/src/oat/runtime/x86/context_x86.cc b/src/oat/runtime/x86/context_x86.cc
new file mode 100644
index 0000000..35bfd01
--- /dev/null
+++ b/src/oat/runtime/x86/context_x86.cc
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "context_x86.h"
+
+#include "object.h"
+
+namespace art {
+namespace x86 {
+
+X86Context::X86Context() {
+#ifndef NDEBUG
+  // Initialize registers with easy to spot debug values
+  for (int i = 0; i < 8; i++) {
+    gprs_[i] = 0xEBAD6070+i;
+  }
+  eip_ = 0xEBAD601F;
+#endif
+}
+
+void X86Context::FillCalleeSaves(const Frame& fr) {
+  Method* method = fr.GetMethod();
+  uint32_t core_spills = method->GetCoreSpillMask();
+  size_t spill_count = __builtin_popcount(core_spills);
+  CHECK_EQ(method->GetFpSpillMask(), 0u);
+  if (spill_count > 0) {
+    // Lowest number spill is furthest away, walk registers and fill into context
+    int j = 1;
+    for (int i = 0; i < 8; i++) {
+      if (((core_spills >> i) & 1) != 0) {
+        gprs_[i] = fr.LoadCalleeSave(spill_count - j);
+        j++;
+      }
+    }
+  }
+}
+
+void X86Context::DoLongJump() {
+#if defined(__i386__)
+  // Load ESP and EIP
+  gprs_[ESP] -= 4;  // push EIP for return
+  *(reinterpret_cast<uintptr_t*>(gprs_[ESP])) = eip_;
+  asm volatile(
+      "pushl %4\n\t"
+      "pushl %0\n\t"
+      "pushl %1\n\t"
+      "pushl %2\n\t"
+      "pushl %3\n\t"
+      "pushl %4\n\t"
+      "pushl %5\n\t"
+      "pushl %6\n\t"
+      "pushl %7\n\t"
+      "popal\n\t"
+      "popl %%esp\n\t"
+      "ret\n\t"
+      :  //output
+      : "g"(gprs_[EAX]), "g"(gprs_[ECX]), "g"(gprs_[EDX]), "g"(gprs_[EBX]),
+        "g"(gprs_[ESP]), "g"(gprs_[EBP]), "g"(gprs_[ESI]), "g"(gprs_[EDI])
+      :);  // clobber
+#else
+    UNIMPLEMENTED(FATAL);
+#endif
+}
+
+}  // namespace x86
+}  // namespace art
diff --git a/src/oat/runtime/x86/context_x86.h b/src/oat/runtime/x86/context_x86.h
new file mode 100644
index 0000000..72dc719
--- /dev/null
+++ b/src/oat/runtime/x86/context_x86.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_RUNTIME_X86_CONTEXT_X86_H_
+#define ART_SRC_OAT_RUNTIME_X86_CONTEXT_X86_H_
+
+#include "constants_x86.h"
+#include "oat/runtime/context.h"
+
+namespace art {
+namespace x86 {
+
+class X86Context : public Context {
+ public:
+  X86Context();
+  virtual ~X86Context() {}
+
+  // No callee saves on X86
+  virtual void FillCalleeSaves(const Frame& fr);
+
+  virtual void SetSP(uintptr_t new_sp) {
+    gprs_[ESP] = new_sp;
+  }
+
+  virtual void SetPC(uintptr_t new_pc) {
+    eip_ = new_pc;
+  }
+
+  virtual uintptr_t GetGPR(uint32_t reg) {
+    CHECK_GE(reg, 0u);
+    CHECK_LT(reg, 8u);
+    return gprs_[reg];
+  }
+
+  virtual void DoLongJump();
+
+ private:
+  uintptr_t gprs_[8];
+  uintptr_t eip_;
+};
+}  // namespace x86
+}  // namespace art
+
+#endif  // ART_SRC_OAT_RUNTIME_X86_CONTEXT_X86_H_
diff --git a/src/oat/runtime/x86/oat_support_entrypoints_x86.cc b/src/oat/runtime/x86/oat_support_entrypoints_x86.cc
new file mode 100644
index 0000000..5d525a9
--- /dev/null
+++ b/src/oat/runtime/x86/oat_support_entrypoints_x86.cc
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "oat/runtime/oat_support_entrypoints.h"
+
+namespace art {
+
+// Alloc entrypoints.
+extern "C" void* art_alloc_array_from_code(uint32_t, void*, int32_t);
+extern "C" void* art_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
+extern "C" void* art_alloc_object_from_code(uint32_t type_idx, void* method);
+extern "C" void* art_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method);
+extern "C" void* art_check_and_alloc_array_from_code(uint32_t, void*, int32_t);
+extern "C" void* art_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
+
+// Cast entrypoints.
+extern uint32_t IsAssignableFromCode(const Class* klass, const Class* ref_class);
+extern "C" void art_can_put_array_element_from_code(void*, void*);
+extern "C" void art_check_cast_from_code(void*, void*);
+
+// Debug entrypoints.
+extern void DebugMe(Method* method, uint32_t info);
+
+// DexCache entrypoints.
+extern "C" void* art_initialize_static_storage_from_code(uint32_t, void*);
+extern "C" void* art_initialize_type_from_code(uint32_t, void*);
+extern "C" void* art_initialize_type_and_verify_access_from_code(uint32_t, void*);
+extern "C" void* art_resolve_string_from_code(void*, uint32_t);
+
+// Field entrypoints.
+extern "C" int art_set32_instance_from_code(uint32_t, void*, int32_t);
+extern "C" int art_set32_static_from_code(uint32_t, int32_t);
+extern "C" int art_set64_instance_from_code(uint32_t, void*, int64_t);
+extern "C" int art_set64_static_from_code(uint32_t, int64_t);
+extern "C" int art_set_obj_instance_from_code(uint32_t, void*, void*);
+extern "C" int art_set_obj_static_from_code(uint32_t, void*);
+extern "C" int32_t art_get32_instance_from_code(uint32_t, void*);
+extern "C" int32_t art_get32_static_from_code(uint32_t);
+extern "C" int64_t art_get64_instance_from_code(uint32_t, void*);
+extern "C" int64_t art_get64_static_from_code(uint32_t);
+extern "C" void* art_get_obj_instance_from_code(uint32_t, void*);
+extern "C" void* art_get_obj_static_from_code(uint32_t);
+
+// FillArray entrypoint.
+extern "C" void art_handle_fill_data_from_code(void*, void*);
+
+// JNI entrypoints.
+extern Object* DecodeJObjectInThread(Thread* thread, jobject obj);
+extern void* FindNativeMethod(Thread* thread);
+
+// Lock entrypoints.
+extern "C" void art_lock_object_from_code(void*);
+extern "C" void art_unlock_object_from_code(void*);
+
+// Math entrypoints.
+extern int32_t CmpgDouble(double a, double b);
+extern int32_t CmplDouble(double a, double b);
+extern int32_t CmpgFloat(float a, float b);
+extern int32_t CmplFloat(float a, float b);
+extern int64_t D2L(double d);
+extern int64_t F2L(float f);
+
+// Intrinsic entrypoints.
+extern "C" int32_t __memcmp16(void*, void*, int32_t);
+extern "C" int32_t art_indexof(void*, uint32_t, uint32_t, uint32_t);
+extern "C" int32_t art_string_compareto(void*, void*);
+
+// Invoke entrypoints.
+const void* UnresolvedDirectMethodTrampolineFromCode(Method*, Method**, Thread*,
+                                                     Runtime::TrampolineType);
+extern "C" void art_invoke_direct_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_invoke_interface_trampoline(uint32_t, void*);
+extern "C" void art_invoke_interface_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_invoke_static_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_invoke_super_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_invoke_virtual_trampoline_with_access_check(uint32_t, void*);
+
+// Thread entrypoints.
+extern void CheckSuspendFromCode(Thread* thread);
+extern "C" void art_test_suspend();
+
+// Throw entrypoints.
+extern void ThrowAbstractMethodErrorFromCode(Method* method, Thread* thread, Method** sp);
+extern "C" void art_deliver_exception_from_code(void*);
+extern "C" void art_throw_array_bounds_from_code(int32_t index, int32_t limit);
+extern "C" void art_throw_div_zero_from_code();
+extern "C" void art_throw_no_such_method_from_code(int32_t method_idx);
+extern "C" void art_throw_null_pointer_exception_from_code();
+extern "C" void art_throw_stack_overflow_from_code(void*);
+extern "C" void art_throw_verification_error_from_code(int32_t src1, int32_t ref);
+
+void InitEntryPoints(EntryPoints* points) {
+  // Alloc
+  points->pAllocArrayFromCode = art_alloc_array_from_code;
+  points->pAllocArrayFromCodeWithAccessCheck = art_alloc_array_from_code_with_access_check;
+  points->pAllocObjectFromCode = art_alloc_object_from_code;
+  points->pAllocObjectFromCodeWithAccessCheck = art_alloc_object_from_code_with_access_check;
+  points->pCheckAndAllocArrayFromCode = art_check_and_alloc_array_from_code;
+  points->pCheckAndAllocArrayFromCodeWithAccessCheck = art_check_and_alloc_array_from_code_with_access_check;
+
+  // Cast
+  points->pInstanceofNonTrivialFromCode = IsAssignableFromCode;
+  points->pCanPutArrayElementFromCode = art_can_put_array_element_from_code;
+  points->pCheckCastFromCode = art_check_cast_from_code;
+
+  // Debug
+  points->pDebugMe = DebugMe;
+  points->pUpdateDebuggerFromCode = NULL; // Controlled by SetDebuggerUpdatesEnabled.
+
+  // DexCache
+  points->pInitializeStaticStorage = art_initialize_static_storage_from_code;
+  points->pInitializeTypeAndVerifyAccessFromCode = art_initialize_type_and_verify_access_from_code;
+  points->pInitializeTypeFromCode = art_initialize_type_from_code;
+  points->pResolveStringFromCode = art_resolve_string_from_code;
+
+  // Field
+  points->pSet32Instance = art_set32_instance_from_code;
+  points->pSet32Static = art_set32_static_from_code;
+  points->pSet64Instance = art_set64_instance_from_code;
+  points->pSet64Static = art_set64_static_from_code;
+  points->pSetObjInstance = art_set_obj_instance_from_code;
+  points->pSetObjStatic = art_set_obj_static_from_code;
+  points->pGet32Instance = art_get32_instance_from_code;
+  points->pGet64Instance = art_get64_instance_from_code;
+  points->pGetObjInstance = art_get_obj_instance_from_code;
+  points->pGet32Static = art_get32_static_from_code;
+  points->pGet64Static = art_get64_static_from_code;
+  points->pGetObjStatic = art_get_obj_static_from_code;
+
+  // FillArray
+  points->pHandleFillArrayDataFromCode = art_handle_fill_data_from_code;
+
+  // JNI
+  points->pDecodeJObjectInThread = DecodeJObjectInThread;
+  points->pFindNativeMethod = FindNativeMethod;
+
+  // Locks
+  points->pLockObjectFromCode = art_lock_object_from_code;
+  points->pUnlockObjectFromCode = art_unlock_object_from_code;
+
+  // Math
+  points->pCmpgDouble = CmpgDouble;
+  points->pCmpgFloat = CmpgFloat;
+  points->pCmplDouble = CmplDouble;
+  points->pCmplFloat = CmplFloat;
+  points->pDadd = NULL;
+  points->pDdiv = NULL;
+  points->pDmul = NULL;
+  points->pDsub = NULL;
+  points->pF2d = NULL;
+  points->pFmod = NULL;
+  points->pI2d = NULL;
+  points->pL2d = NULL;
+  points->pD2f = NULL;
+  points->pFadd = NULL;
+  points->pFdiv = NULL;
+  points->pFmodf = NULL;
+  points->pFmul = NULL;
+  points->pFsub = NULL;
+  points->pI2f = NULL;
+  points->pL2f = NULL;
+  points->pD2iz = NULL;
+  points->pF2iz = NULL;
+  points->pIdiv = NULL;
+  points->pIdivmod = NULL;
+  points->pD2l = D2L;
+  points->pF2l = F2L;
+  points->pLadd = NULL;
+  points->pLand = NULL;
+  points->pLdivmod = NULL;
+  points->pLmul = NULL;
+  points->pLor = NULL;
+  points->pLsub = NULL;
+  points->pLxor = NULL;
+  points->pShlLong = NULL;
+  points->pShrLong = NULL;
+  points->pUshrLong = NULL;
+
+  // Intrinsics
+  points->pIndexOf = art_indexof;
+  points->pMemcmp16 = __memcmp16;
+  points->pStringCompareTo = art_string_compareto;
+  points->pMemcpy = memcpy;
+
+  // Invocation
+  points->pUnresolvedDirectMethodTrampolineFromCode = UnresolvedDirectMethodTrampolineFromCode;
+  points->pInvokeDirectTrampolineWithAccessCheck = art_invoke_direct_trampoline_with_access_check;
+  points->pInvokeInterfaceTrampoline = art_invoke_interface_trampoline;
+  points->pInvokeInterfaceTrampolineWithAccessCheck = art_invoke_interface_trampoline_with_access_check;
+  points->pInvokeStaticTrampolineWithAccessCheck = art_invoke_static_trampoline_with_access_check;
+  points->pInvokeSuperTrampolineWithAccessCheck = art_invoke_super_trampoline_with_access_check;
+  points->pInvokeVirtualTrampolineWithAccessCheck = art_invoke_virtual_trampoline_with_access_check;
+
+  // Thread
+  points->pCheckSuspendFromCode = CheckSuspendFromCode;
+  points->pTestSuspendFromCode = art_test_suspend;
+
+  // Throws
+  points->pDeliverException = art_deliver_exception_from_code;
+  points->pThrowAbstractMethodErrorFromCode = ThrowAbstractMethodErrorFromCode;
+  points->pThrowArrayBoundsFromCode = art_throw_array_bounds_from_code;
+  points->pThrowDivZeroFromCode = art_throw_div_zero_from_code;
+  points->pThrowNoSuchMethodFromCode = art_throw_no_such_method_from_code;
+  points->pThrowNullPointerFromCode = art_throw_null_pointer_exception_from_code;
+  points->pThrowStackOverflowFromCode = art_throw_stack_overflow_from_code;
+  points->pThrowVerificationErrorFromCode = art_throw_verification_error_from_code;
+};
+
+void ChangeDebuggerEntryPoint(EntryPoints*, bool) {
+  UNIMPLEMENTED(FATAL);
+}
+
+bool IsTraceExitPc(uintptr_t) {
+  return false;
+}
+
+void* GetLogTraceEntryPoint() {
+  return NULL;
+}
+
+}  // namespace art
diff --git a/src/oat/runtime/x86/runtime_support_x86.S b/src/oat/runtime/x86/runtime_support_x86.S
new file mode 100644
index 0000000..d344c34
--- /dev/null
+++ b/src/oat/runtime/x86/runtime_support_x86.S
@@ -0,0 +1,321 @@
+#include "asm_support.h"
+
+#if defined(__APPLE__)
+    // Mac OS X mangles the functions with an underscore prefix
+    #define art_deliver_exception_from_code _art_deliver_exception_from_code
+    #define art_proxy_invoke_handler _art_proxy_invoke_handler
+    #define artDeliverExceptionFromCode _artDeliverExceptionFromCode
+#endif
+
+    /* Deliver the given exception */
+    .extern artDeliverExceptionFromCode
+    /* Deliver an exception pending on a thread */
+    .extern artDeliverPendingException
+
+    /* Cache alignment for function entry */
+.macro ALIGN_FUNCTION_ENTRY
+    .balign 16
+.endm
+
+    /*
+     * Macro that sets up the callee save frame to conform with
+     * Runtime::CreateCalleeSaveMethod(...)
+     */
+.macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+    pushl %edi  // Save callee saves (ebx is saved/restored by the upcall)
+    pushl %esi
+    pushl %ebp
+    subl  $16, %esp  // Grow stack by 4 words, bottom word will hold Method*
+.endm
+
+.macro RESTORE_CALLEE_SAVE_FRAME
+    addl $16, %esp  // Remove padding
+    popl %ebp  // Restore callee saves
+    popl %esi
+    popl %edi
+.endm
+
+    /*
+     * Macro that sets up the callee save frame to conform with
+     * Runtime::CreateCalleeSaveMethod(...)
+     */
+.macro SETUP_REF_AND_ARG_CALLEE_SAVE_FRAME
+    pushl %edi  // Save callee saves
+    pushl %esi
+    pushl %ebp
+    pushl %ebx  // Save args
+    pushl %edx
+    pushl %ecx
+    pushl %eax  // Align stack, eax will be clobbered by Method*
+.endm
+
+.macro RESTORE_REF_AND_ARG_CALLEE_SAVE_FRAME
+    addl $16, %esp  // Remove padding
+    popl %ebp  // Restore callee saves
+    popl %esi
+    popl %edi
+.endm
+
+    /*
+     * Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending
+     * exception is Thread::Current()->exception_.
+     */
+.macro DELIVER_PENDING_EXCEPTION
+    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME         // save callee saves for throw
+    mov %esp, %ecx
+    // Outgoing argument set up
+    subl  $8, %esp                           // Alignment padding
+    pushl %ecx                               // pass SP
+    pushl %fs:THREAD_SELF_OFFSET             // pass Thread::Current()
+    call artDeliverPendingExceptionFromCode  // artDeliverExceptionFromCode(Thread*, SP)
+    int3
+.endm
+
+.macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
+    .global \c_name
+    .extern \cxx_name
+    ALIGN_FUNCTION_ENTRY
+\c_name:
+    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME  // save all registers as basis for long jump context
+    mov %esp, %ecx
+    // Outgoing argument set up
+    subl  $8, %esp                // alignment padding
+    pushl %fs:THREAD_SELF_OFFSET  // pass Thread::Current()
+    pushl %ecx                    // pass SP
+    call \cxx_name                // \cxx_name(Thread*, SP)
+    int3                          // unreached
+.endm
+
+.macro ONE_ARG_RUNTIME_EXCEPTION c_name, cxx_name
+    .global \c_name
+    .extern \cxx_name
+    ALIGN_FUNCTION_ENTRY
+\c_name:
+    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME  // save all registers as basis for long jump context
+    mov %esp, %ecx
+    // Outgoing argument set up
+    pushl $0                      // alignment padding
+    pushl %ecx                    // pass SP
+    pushl %fs:THREAD_SELF_OFFSET  // pass Thread::Current()
+    pushl %eax                    // pass arg1
+    call \cxx_name                // \cxx_name(arg1, Thread*, SP)
+    int3                          // unreached
+.endm
+
+.macro TWO_ARG_RUNTIME_EXCEPTION c_name, cxx_name
+    .global \c_name
+    .extern \cxx_name
+    ALIGN_FUNCTION_ENTRY
+\c_name:
+    SETUP_SAVE_ALL_CALLEE_SAVE_FRAME  // save all registers as basis for long jump context
+    mov %esp, %edx
+    // Outgoing argument set up
+    pushl %edx                    // pass SP
+    pushl %fs:THREAD_SELF_OFFSET  // pass Thread::Current()
+    pushl %eax                    // pass arg1
+    pushl %ecx                    // pass arg2
+    call \cxx_name                // \cxx_name(arg1, Thread*, SP)
+    int3                          // unreached
+.endm
+
+    /*
+     * Called by managed code, saves callee saves and then calls artThrowException
+     * that will place a mock Method* at the bottom of the stack. Arg1 holds the exception.
+     */
+ONE_ARG_RUNTIME_EXCEPTION art_deliver_exception_from_code, artDeliverExceptionFromCode
+
+    /*
+     * Called by managed code to create and deliver a NullPointerException.
+     */
+NO_ARG_RUNTIME_EXCEPTION art_throw_null_pointer_exception_from_code, artThrowNullPointerExceptionFromCode
+
+    /*
+     * Called by managed code to create and deliver an ArithmeticException.
+     */
+NO_ARG_RUNTIME_EXCEPTION art_throw_div_zero_from_code, artThrowDivZeroFromCode
+
+    /*
+     * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds
+     * index, arg2 holds limit.
+     */
+TWO_ARG_RUNTIME_EXCEPTION art_throw_array_bounds_from_code, artThrowArrayBoundsFromCode
+
+    /*
+     * Called by managed code to create and deliver a StackOverflowError.
+     */
+NO_ARG_RUNTIME_EXCEPTION art_throw_stack_overflow_from_code, artThrowStackOverflowFromCode
+
+    /*
+     * Called by managed code to create and deliver a NoSuchMethodError.
+     */
+ONE_ARG_RUNTIME_EXCEPTION art_throw_no_such_method_from_code, artThrowNoSuchMethodFromCode
+
+    /*
+     * Called by managed code to create and deliver verification errors. Arg1 is kind, arg2 is ref.
+     */
+TWO_ARG_RUNTIME_EXCEPTION art_throw_verification_error_from_code, artThrowVerificationErrorFromCode
+
+    /*
+     * All generated callsites for interface invokes and invocation slow paths will load arguments
+     * as usual - except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
+     * the method_idx.  This wrapper will save arg1-arg3, load the caller's Method*, align the
+     * stack and call the appropriate C helper.
+     * NOTE: "this" is first visible argument of the target, and so can be found in arg1/r1.
+     *
+     * The helper will attempt to locate the target and return a 64-bit result in r0/r1 consisting
+     * of the target Method* in r0 and method->code_ in r1.
+     *
+     * If unsuccessful, the helper will return NULL/NULL. There will bea pending exception in the
+     * thread and we branch to another stub to deliver it.
+     *
+     * On success this wrapper will restore arguments and *jump* to the target, leaving the lr
+     * pointing back to the original caller.
+     */
+.macro INVOKE_TRAMPOLINE c_name, cxx_name
+    .global \c_name
+    .extern \cxx_name
+    ALIGN_FUNCTION_ENTRY
+\c_name:
+    int3
+.endm
+
+INVOKE_TRAMPOLINE art_invoke_interface_trampoline, artInvokeInterfaceTrampoline
+INVOKE_TRAMPOLINE art_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
+
+INVOKE_TRAMPOLINE art_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
+
+    // TODO
+    .globl art_proxy_invoke_handler
+art_proxy_invoke_handler:
+    int3
+
+    .globl art_update_debugger
+art_update_debugger:
+    int3
+
+   .globl art_test_suspend
+art_test_suspend:
+    int3
+
+   .globl art_alloc_object_from_code
+art_alloc_object_from_code:
+    int3
+
+   .globl art_alloc_object_from_code_with_access_check
+art_alloc_object_from_code_with_access_check:
+    int3
+
+   .globl art_alloc_array_from_code
+art_alloc_array_from_code:
+    int3
+
+   .globl art_alloc_array_from_code_with_access_check
+art_alloc_array_from_code_with_access_check:
+    int3
+
+   .globl art_check_and_alloc_array_from_code
+art_check_and_alloc_array_from_code:
+    int3
+
+   .globl art_check_and_alloc_array_from_code_with_access_check
+art_check_and_alloc_array_from_code_with_access_check:
+    int3
+
+   .globl art_can_put_array_element_from_code
+art_can_put_array_element_from_code:
+    int3
+
+   .globl art_check_cast_from_code
+art_check_cast_from_code:
+    int3
+
+   .globl art_initialize_static_storage_from_code
+art_initialize_static_storage_from_code:
+    int3
+
+   .globl art_initialize_type_and_verify_access_from_code
+art_initialize_type_and_verify_access_from_code:
+    int3
+
+   .globl art_initialize_type_from_code
+art_initialize_type_from_code:
+    int3
+
+   .globl art_resolve_string_from_code
+art_resolve_string_from_code:
+    int3
+
+   .globl art_set32_instance_from_code
+art_set32_instance_from_code:
+    int3
+
+   .globl art_set64_instance_from_code
+art_set64_instance_from_code:
+    int3
+
+   .globl art_set_obj_instance_from_code
+art_set_obj_instance_from_code:
+    int3
+
+   .globl art_get32_instance_from_code
+art_get32_instance_from_code:
+    int3
+
+   .globl art_get64_instance_from_code
+art_get64_instance_from_code:
+    int3
+
+   .globl art_get_obj_instance_from_code
+art_get_obj_instance_from_code:
+    int3
+
+   .globl art_set32_static_from_code
+art_set32_static_from_code:
+    int3
+
+   .globl art_set64_static_from_code
+art_set64_static_from_code:
+    int3
+
+   .globl art_set_obj_static_from_code
+art_set_obj_static_from_code:
+    int3
+
+   .globl art_get32_static_from_code
+art_get32_static_from_code:
+    int3
+
+   .globl art_get64_static_from_code
+art_get64_static_from_code:
+    int3
+
+   .globl art_get_obj_static_from_code
+art_get_obj_static_from_code:
+    int3
+
+    .globl art_handle_fill_data_from_code
+art_handle_fill_data_from_code:
+    int3
+
+    .globl art_lock_object_from_code
+art_lock_object_from_code:
+    int3
+
+    .globl art_unlock_object_from_code
+art_unlock_object_from_code:
+    int3
+
+    .globl art_indexof
+art_indexof:
+    int3
+
+    .globl __memcmp16
+__memcmp16:
+    int3
+
+    .globl art_string_compareto
+art_string_compareto:
+    int3
diff --git a/src/oat/runtime/x86/stub_x86.cc b/src/oat/runtime/x86/stub_x86.cc
new file mode 100644
index 0000000..14e4f23
--- /dev/null
+++ b/src/oat/runtime/x86/stub_x86.cc
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_internal.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+#include "oat/utils/x86/assembler_x86.h"
+#include "object.h"
+#include "stack_indirect_reference_table.h"
+
+#define __ assembler->
+
+namespace art {
+namespace x86 {
+
+ByteArray* X86CreateResolutionTrampoline(Runtime::TrampolineType) {
+  UniquePtr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
+
+  // TODO: unimplemented
+  __ int3();
+
+  assembler->EmitSlowPaths();
+  size_t cs = assembler->CodeSize();
+  SirtRef<ByteArray> resolution_trampoline(ByteArray::Alloc(cs));
+  CHECK(resolution_trampoline.get() != NULL);
+  MemoryRegion code(resolution_trampoline->GetData(), resolution_trampoline->GetLength());
+  assembler->FinalizeInstructions(code);
+
+  return resolution_trampoline.get();
+}
+
+typedef void (*ThrowAme)(Method*, Thread*);
+
+ByteArray* CreateAbstractMethodErrorStub() {
+  UniquePtr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
+
+  // return address
+  __ pushl(EDI);
+  __ pushl(ESI);
+  __ pushl(EBP);
+  __ pushl(Immediate(0));
+  __ pushl(Immediate(0));
+  __ pushl(Immediate(0));
+  __ pushl(Immediate(0));  // <-- callee save Method* to go here
+  __ movl(ECX, ESP);       // save ESP
+  __ pushl(Immediate(0));  // align frame
+  __ pushl(ECX);           // pass ESP for Method*
+  __ fs()->pushl(Address::Absolute(Thread::SelfOffset()));  // Thread*
+  __ pushl(EAX);           // pass Method*
+
+  // Call to throw AbstractMethodError.
+  __ Call(ThreadOffset(ENTRYPOINT_OFFSET(pThrowAbstractMethodErrorFromCode)),
+          X86ManagedRegister::FromCpuRegister(ECX));
+
+#if defined(ART_USE_LLVM_COMPILER)
+  // Return to caller who will handle pending exception.
+  __ addl(ESP, Immediate(32));
+  __ popl(EBP);
+  __ popl(ESI);
+  __ popl(EDI);
+  __ ret();
+#else
+  // Call never returns.
+  __ int3();
+#endif
+
+  assembler->EmitSlowPaths();
+
+  size_t cs = assembler->CodeSize();
+  SirtRef<ByteArray> abstract_stub(ByteArray::Alloc(cs));
+  CHECK(abstract_stub.get() != NULL);
+  MemoryRegion code(abstract_stub->GetData(), abstract_stub->GetLength());
+  assembler->FinalizeInstructions(code);
+
+  return abstract_stub.get();
+}
+
+ByteArray* CreateJniDlsymLookupStub() {
+  UniquePtr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
+
+  // Pad stack to ensure 16-byte alignment
+  __ pushl(Immediate(0));
+  __ pushl(Immediate(0));
+  __ fs()->pushl(Address::Absolute(Thread::SelfOffset()));  // Thread*
+
+  __ Call(ThreadOffset(ENTRYPOINT_OFFSET(pFindNativeMethod)),
+          X86ManagedRegister::FromCpuRegister(ECX));
+
+  __ addl(ESP, Immediate(12));
+
+  Label no_native_code_found;  // forward declaration
+  __ cmpl(EAX, Immediate(0));
+  __ j(kEqual, &no_native_code_found);
+
+  __ jmp(EAX);  // Tail call into native code
+
+  __ Bind(&no_native_code_found);
+  __ ret(); // return to caller to handle exception
+
+  assembler->EmitSlowPaths();
+
+  size_t cs = assembler->CodeSize();
+  SirtRef<ByteArray> jni_stub(ByteArray::Alloc(cs));
+  CHECK(jni_stub.get() != NULL);
+  MemoryRegion code(jni_stub->GetData(), jni_stub->GetLength());
+  assembler->FinalizeInstructions(code);
+
+  return jni_stub.get();
+}
+
+} // namespace x86
+} // namespace art
diff --git a/src/oat/utils/arm/assembler_arm.cc b/src/oat/utils/arm/assembler_arm.cc
new file mode 100644
index 0000000..2392c7d
--- /dev/null
+++ b/src/oat/utils/arm/assembler_arm.cc
@@ -0,0 +1,1925 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assembler_arm.h"
+
+#include "logging.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+#include "offsets.h"
+#include "thread.h"
+#include "utils.h"
+
+namespace art {
+namespace arm {
+
+// Instruction encoding bits.
+enum {
+  H   = 1 << 5,   // halfword (or byte)
+  L   = 1 << 20,  // load (or store)
+  S   = 1 << 20,  // set condition code (or leave unchanged)
+  W   = 1 << 21,  // writeback base register (or leave unchanged)
+  A   = 1 << 21,  // accumulate in multiply instruction (or not)
+  B   = 1 << 22,  // unsigned byte (or word)
+  N   = 1 << 22,  // long (or short)
+  U   = 1 << 23,  // positive (or negative) offset/index
+  P   = 1 << 24,  // offset/pre-indexed addressing (or post-indexed addressing)
+  I   = 1 << 25,  // immediate shifter operand (or not)
+
+  B0 = 1,
+  B1 = 1 << 1,
+  B2 = 1 << 2,
+  B3 = 1 << 3,
+  B4 = 1 << 4,
+  B5 = 1 << 5,
+  B6 = 1 << 6,
+  B7 = 1 << 7,
+  B8 = 1 << 8,
+  B9 = 1 << 9,
+  B10 = 1 << 10,
+  B11 = 1 << 11,
+  B12 = 1 << 12,
+  B16 = 1 << 16,
+  B17 = 1 << 17,
+  B18 = 1 << 18,
+  B19 = 1 << 19,
+  B20 = 1 << 20,
+  B21 = 1 << 21,
+  B22 = 1 << 22,
+  B23 = 1 << 23,
+  B24 = 1 << 24,
+  B25 = 1 << 25,
+  B26 = 1 << 26,
+  B27 = 1 << 27,
+
+  // Instruction bit masks.
+  RdMask = 15 << 12,  // in str instruction
+  CondMask = 15 << 28,
+  CoprocessorMask = 15 << 8,
+  OpCodeMask = 15 << 21,  // in data-processing instructions
+  Imm24Mask = (1 << 24) - 1,
+  Off12Mask = (1 << 12) - 1,
+
+  // ldrex/strex register field encodings.
+  kLdExRnShift = 16,
+  kLdExRtShift = 12,
+  kStrExRnShift = 16,
+  kStrExRdShift = 12,
+  kStrExRtShift = 0,
+};
+
+
+static const char* kRegisterNames[] = {
+  "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
+  "fp", "ip", "sp", "lr", "pc"
+};
+std::ostream& operator<<(std::ostream& os, const Register& rhs) {
+  if (rhs >= R0 && rhs <= PC) {
+    os << kRegisterNames[rhs];
+  } else {
+    os << "Register[" << static_cast<int>(rhs) << "]";
+  }
+  return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, const SRegister& rhs) {
+  if (rhs >= S0 && rhs < kNumberOfSRegisters) {
+    os << "s" << static_cast<int>(rhs);
+  } else {
+    os << "SRegister[" << static_cast<int>(rhs) << "]";
+  }
+  return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, const DRegister& rhs) {
+  if (rhs >= D0 && rhs < kNumberOfDRegisters) {
+    os << "d" << static_cast<int>(rhs);
+  } else {
+    os << "DRegister[" << static_cast<int>(rhs) << "]";
+  }
+  return os;
+}
+
+
+static const char* kConditionNames[] = {
+  "EQ", "NE", "CS", "CC", "MI", "PL", "VS", "VC", "HI", "LS", "GE", "LT", "GT",
+  "LE", "AL",
+};
+std::ostream& operator<<(std::ostream& os, const Condition& rhs) {
+  if (rhs >= EQ && rhs <= AL) {
+    os << kConditionNames[rhs];
+  } else {
+    os << "Condition[" << static_cast<int>(rhs) << "]";
+  }
+  return os;
+}
+
+void ArmAssembler::Emit(int32_t value) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  buffer_.Emit<int32_t>(value);
+}
+
+
+void ArmAssembler::EmitType01(Condition cond,
+                              int type,
+                              Opcode opcode,
+                              int set_cc,
+                              Register rn,
+                              Register rd,
+                              ShifterOperand so) {
+  CHECK_NE(rd, kNoRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
+                     type << kTypeShift |
+                     static_cast<int32_t>(opcode) << kOpcodeShift |
+                     set_cc << kSShift |
+                     static_cast<int32_t>(rn) << kRnShift |
+                     static_cast<int32_t>(rd) << kRdShift |
+                     so.encoding();
+  Emit(encoding);
+}
+
+
+void ArmAssembler::EmitType5(Condition cond, int offset, bool link) {
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
+                     5 << kTypeShift |
+                     (link ? 1 : 0) << kLinkShift;
+  Emit(ArmAssembler::EncodeBranchOffset(offset, encoding));
+}
+
+
+void ArmAssembler::EmitMemOp(Condition cond,
+                             bool load,
+                             bool byte,
+                             Register rd,
+                             Address ad) {
+  CHECK_NE(rd, kNoRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B26 |
+                     (load ? L : 0) |
+                     (byte ? B : 0) |
+                     (static_cast<int32_t>(rd) << kRdShift) |
+                     ad.encoding();
+  Emit(encoding);
+}
+
+
+void ArmAssembler::EmitMemOpAddressMode3(Condition cond,
+                                         int32_t mode,
+                                         Register rd,
+                                         Address ad) {
+  CHECK_NE(rd, kNoRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B22  |
+                     mode |
+                     (static_cast<int32_t>(rd) << kRdShift) |
+                     ad.encoding3();
+  Emit(encoding);
+}
+
+
+void ArmAssembler::EmitMultiMemOp(Condition cond,
+                                  BlockAddressMode am,
+                                  bool load,
+                                  Register base,
+                                  RegList regs) {
+  CHECK_NE(base, kNoRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 |
+                     am |
+                     (load ? L : 0) |
+                     (static_cast<int32_t>(base) << kRnShift) |
+                     regs;
+  Emit(encoding);
+}
+
+
+void ArmAssembler::EmitShiftImmediate(Condition cond,
+                                      Shift opcode,
+                                      Register rd,
+                                      Register rm,
+                                      ShifterOperand so) {
+  CHECK_NE(cond, kNoCondition);
+  CHECK_EQ(so.type(), 1U);
+  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
+                     static_cast<int32_t>(MOV) << kOpcodeShift |
+                     static_cast<int32_t>(rd) << kRdShift |
+                     so.encoding() << kShiftImmShift |
+                     static_cast<int32_t>(opcode) << kShiftShift |
+                     static_cast<int32_t>(rm);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::EmitShiftRegister(Condition cond,
+                                     Shift opcode,
+                                     Register rd,
+                                     Register rm,
+                                     ShifterOperand so) {
+  CHECK_NE(cond, kNoCondition);
+  CHECK_EQ(so.type(), 0U);
+  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
+                     static_cast<int32_t>(MOV) << kOpcodeShift |
+                     static_cast<int32_t>(rd) << kRdShift |
+                     so.encoding() << kShiftRegisterShift |
+                     static_cast<int32_t>(opcode) << kShiftShift |
+                     B4 |
+                     static_cast<int32_t>(rm);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::EmitBranch(Condition cond, Label* label, bool link) {
+  if (label->IsBound()) {
+    EmitType5(cond, label->Position() - buffer_.Size(), link);
+  } else {
+    int position = buffer_.Size();
+    // Use the offset field of the branch instruction for linking the sites.
+    EmitType5(cond, label->position_, link);
+    label->LinkTo(position);
+  }
+}
+
+void ArmAssembler::and_(Register rd, Register rn, ShifterOperand so,
+                        Condition cond) {
+  EmitType01(cond, so.type(), AND, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::eor(Register rd, Register rn, ShifterOperand so,
+                       Condition cond) {
+  EmitType01(cond, so.type(), EOR, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::sub(Register rd, Register rn, ShifterOperand so,
+                       Condition cond) {
+  EmitType01(cond, so.type(), SUB, 0, rn, rd, so);
+}
+
+void ArmAssembler::rsb(Register rd, Register rn, ShifterOperand so,
+                       Condition cond) {
+  EmitType01(cond, so.type(), RSB, 0, rn, rd, so);
+}
+
+void ArmAssembler::rsbs(Register rd, Register rn, ShifterOperand so,
+                        Condition cond) {
+  EmitType01(cond, so.type(), RSB, 1, rn, rd, so);
+}
+
+
+void ArmAssembler::add(Register rd, Register rn, ShifterOperand so,
+                       Condition cond) {
+  EmitType01(cond, so.type(), ADD, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::adds(Register rd, Register rn, ShifterOperand so,
+                        Condition cond) {
+  EmitType01(cond, so.type(), ADD, 1, rn, rd, so);
+}
+
+
+void ArmAssembler::subs(Register rd, Register rn, ShifterOperand so,
+                        Condition cond) {
+  EmitType01(cond, so.type(), SUB, 1, rn, rd, so);
+}
+
+
+void ArmAssembler::adc(Register rd, Register rn, ShifterOperand so,
+                       Condition cond) {
+  EmitType01(cond, so.type(), ADC, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::sbc(Register rd, Register rn, ShifterOperand so,
+                       Condition cond) {
+  EmitType01(cond, so.type(), SBC, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::rsc(Register rd, Register rn, ShifterOperand so,
+                       Condition cond) {
+  EmitType01(cond, so.type(), RSC, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::tst(Register rn, ShifterOperand so, Condition cond) {
+  CHECK_NE(rn, PC);  // Reserve tst pc instruction for exception handler marker.
+  EmitType01(cond, so.type(), TST, 1, rn, R0, so);
+}
+
+
+void ArmAssembler::teq(Register rn, ShifterOperand so, Condition cond) {
+  CHECK_NE(rn, PC);  // Reserve teq pc instruction for exception handler marker.
+  EmitType01(cond, so.type(), TEQ, 1, rn, R0, so);
+}
+
+
+void ArmAssembler::cmp(Register rn, ShifterOperand so, Condition cond) {
+  EmitType01(cond, so.type(), CMP, 1, rn, R0, so);
+}
+
+
+void ArmAssembler::cmn(Register rn, ShifterOperand so, Condition cond) {
+  EmitType01(cond, so.type(), CMN, 1, rn, R0, so);
+}
+
+
+void ArmAssembler::orr(Register rd, Register rn,
+                    ShifterOperand so, Condition cond) {
+  EmitType01(cond, so.type(), ORR, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::orrs(Register rd, Register rn,
+                        ShifterOperand so, Condition cond) {
+  EmitType01(cond, so.type(), ORR, 1, rn, rd, so);
+}
+
+
+void ArmAssembler::mov(Register rd, ShifterOperand so, Condition cond) {
+  EmitType01(cond, so.type(), MOV, 0, R0, rd, so);
+}
+
+
+void ArmAssembler::movs(Register rd, ShifterOperand so, Condition cond) {
+  EmitType01(cond, so.type(), MOV, 1, R0, rd, so);
+}
+
+
+void ArmAssembler::bic(Register rd, Register rn, ShifterOperand so,
+                       Condition cond) {
+  EmitType01(cond, so.type(), BIC, 0, rn, rd, so);
+}
+
+
+void ArmAssembler::mvn(Register rd, ShifterOperand so, Condition cond) {
+  EmitType01(cond, so.type(), MVN, 0, R0, rd, so);
+}
+
+
+void ArmAssembler::mvns(Register rd, ShifterOperand so, Condition cond) {
+  EmitType01(cond, so.type(), MVN, 1, R0, rd, so);
+}
+
+
+void ArmAssembler::clz(Register rd, Register rm, Condition cond) {
+  CHECK_NE(rd, kNoRegister);
+  CHECK_NE(rm, kNoRegister);
+  CHECK_NE(cond, kNoCondition);
+  CHECK_NE(rd, PC);
+  CHECK_NE(rm, PC);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B24 | B22 | B21 | (0xf << 16) |
+                     (static_cast<int32_t>(rd) << kRdShift) |
+                     (0xf << 8) | B4 | static_cast<int32_t>(rm);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::movw(Register rd, uint16_t imm16, Condition cond) {
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
+                     B25 | B24 | ((imm16 >> 12) << 16) |
+                     static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::movt(Register rd, uint16_t imm16, Condition cond) {
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = static_cast<int32_t>(cond) << kConditionShift |
+                     B25 | B24 | B22 | ((imm16 >> 12) << 16) |
+                     static_cast<int32_t>(rd) << kRdShift | (imm16 & 0xfff);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::EmitMulOp(Condition cond, int32_t opcode,
+                             Register rd, Register rn,
+                             Register rm, Register rs) {
+  CHECK_NE(rd, kNoRegister);
+  CHECK_NE(rn, kNoRegister);
+  CHECK_NE(rm, kNoRegister);
+  CHECK_NE(rs, kNoRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = opcode |
+      (static_cast<int32_t>(cond) << kConditionShift) |
+      (static_cast<int32_t>(rn) << kRnShift) |
+      (static_cast<int32_t>(rd) << kRdShift) |
+      (static_cast<int32_t>(rs) << kRsShift) |
+      B7 | B4 |
+      (static_cast<int32_t>(rm) << kRmShift);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::mul(Register rd, Register rn, Register rm, Condition cond) {
+  // Assembler registers rd, rn, rm are encoded as rn, rm, rs.
+  EmitMulOp(cond, 0, R0, rd, rn, rm);
+}
+
+
+void ArmAssembler::mla(Register rd, Register rn, Register rm, Register ra,
+                       Condition cond) {
+  // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd.
+  EmitMulOp(cond, B21, ra, rd, rn, rm);
+}
+
+
+void ArmAssembler::mls(Register rd, Register rn, Register rm, Register ra,
+                       Condition cond) {
+  // Assembler registers rd, rn, rm, ra are encoded as rn, rm, rs, rd.
+  EmitMulOp(cond, B22 | B21, ra, rd, rn, rm);
+}
+
+
+void ArmAssembler::umull(Register rd_lo, Register rd_hi, Register rn,
+                         Register rm, Condition cond) {
+  // Assembler registers rd_lo, rd_hi, rn, rm are encoded as rd, rn, rm, rs.
+  EmitMulOp(cond, B23, rd_lo, rd_hi, rn, rm);
+}
+
+
+void ArmAssembler::ldr(Register rd, Address ad, Condition cond) {
+  EmitMemOp(cond, true, false, rd, ad);
+}
+
+
+void ArmAssembler::str(Register rd, Address ad, Condition cond) {
+  EmitMemOp(cond, false, false, rd, ad);
+}
+
+
+void ArmAssembler::ldrb(Register rd, Address ad, Condition cond) {
+  EmitMemOp(cond, true, true, rd, ad);
+}
+
+
+void ArmAssembler::strb(Register rd, Address ad, Condition cond) {
+  EmitMemOp(cond, false, true, rd, ad);
+}
+
+
+void ArmAssembler::ldrh(Register rd, Address ad, Condition cond) {
+  EmitMemOpAddressMode3(cond, L | B7 | H | B4, rd, ad);
+}
+
+
+void ArmAssembler::strh(Register rd, Address ad, Condition cond) {
+  EmitMemOpAddressMode3(cond, B7 | H | B4, rd, ad);
+}
+
+
+void ArmAssembler::ldrsb(Register rd, Address ad, Condition cond) {
+  EmitMemOpAddressMode3(cond, L | B7 | B6 | B4, rd, ad);
+}
+
+
+void ArmAssembler::ldrsh(Register rd, Address ad, Condition cond) {
+  EmitMemOpAddressMode3(cond, L | B7 | B6 | H | B4, rd, ad);
+}
+
+
+void ArmAssembler::ldrd(Register rd, Address ad, Condition cond) {
+  CHECK_EQ(rd % 2, 0);
+  EmitMemOpAddressMode3(cond, B7 | B6 | B4, rd, ad);
+}
+
+
+void ArmAssembler::strd(Register rd, Address ad, Condition cond) {
+  CHECK_EQ(rd % 2, 0);
+  EmitMemOpAddressMode3(cond, B7 | B6 | B5 | B4, rd, ad);
+}
+
+
+void ArmAssembler::ldm(BlockAddressMode am,
+                       Register base,
+                       RegList regs,
+                       Condition cond) {
+  EmitMultiMemOp(cond, am, true, base, regs);
+}
+
+
+void ArmAssembler::stm(BlockAddressMode am,
+                       Register base,
+                       RegList regs,
+                       Condition cond) {
+  EmitMultiMemOp(cond, am, false, base, regs);
+}
+
+
+void ArmAssembler::ldrex(Register rt, Register rn, Condition cond) {
+  CHECK_NE(rn, kNoRegister);
+  CHECK_NE(rt, kNoRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B24 |
+                     B23 |
+                     L   |
+                     (static_cast<int32_t>(rn) << kLdExRnShift) |
+                     (static_cast<int32_t>(rt) << kLdExRtShift) |
+                     B11 | B10 | B9 | B8 | B7 | B4 | B3 | B2 | B1 | B0;
+  Emit(encoding);
+}
+
+
+void ArmAssembler::strex(Register rd,
+                         Register rt,
+                         Register rn,
+                         Condition cond) {
+  CHECK_NE(rn, kNoRegister);
+  CHECK_NE(rd, kNoRegister);
+  CHECK_NE(rt, kNoRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B24 |
+                     B23 |
+                     (static_cast<int32_t>(rn) << kStrExRnShift) |
+                     (static_cast<int32_t>(rd) << kStrExRdShift) |
+                     B11 | B10 | B9 | B8 | B7 | B4 |
+                     (static_cast<int32_t>(rt) << kStrExRtShift);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::clrex() {
+  int32_t encoding = (kSpecialCondition << kConditionShift) |
+                     B26 | B24 | B22 | B21 | B20 | (0xff << 12) | B4 | 0xf;
+  Emit(encoding);
+}
+
+
+void ArmAssembler::nop(Condition cond) {
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B25 | B24 | B21 | (0xf << 12);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::vmovsr(SRegister sn, Register rt, Condition cond) {
+  CHECK_NE(sn, kNoSRegister);
+  CHECK_NE(rt, kNoRegister);
+  CHECK_NE(rt, SP);
+  CHECK_NE(rt, PC);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B25 |
+                     ((static_cast<int32_t>(sn) >> 1)*B16) |
+                     (static_cast<int32_t>(rt)*B12) | B11 | B9 |
+                     ((static_cast<int32_t>(sn) & 1)*B7) | B4;
+  Emit(encoding);
+}
+
+
+void ArmAssembler::vmovrs(Register rt, SRegister sn, Condition cond) {
+  CHECK_NE(sn, kNoSRegister);
+  CHECK_NE(rt, kNoRegister);
+  CHECK_NE(rt, SP);
+  CHECK_NE(rt, PC);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B25 | B20 |
+                     ((static_cast<int32_t>(sn) >> 1)*B16) |
+                     (static_cast<int32_t>(rt)*B12) | B11 | B9 |
+                     ((static_cast<int32_t>(sn) & 1)*B7) | B4;
+  Emit(encoding);
+}
+
+
+void ArmAssembler::vmovsrr(SRegister sm, Register rt, Register rt2,
+                           Condition cond) {
+  CHECK_NE(sm, kNoSRegister);
+  CHECK_NE(sm, S31);
+  CHECK_NE(rt, kNoRegister);
+  CHECK_NE(rt, SP);
+  CHECK_NE(rt, PC);
+  CHECK_NE(rt2, kNoRegister);
+  CHECK_NE(rt2, SP);
+  CHECK_NE(rt2, PC);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B22 |
+                     (static_cast<int32_t>(rt2)*B16) |
+                     (static_cast<int32_t>(rt)*B12) | B11 | B9 |
+                     ((static_cast<int32_t>(sm) & 1)*B5) | B4 |
+                     (static_cast<int32_t>(sm) >> 1);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::vmovrrs(Register rt, Register rt2, SRegister sm,
+                           Condition cond) {
+  CHECK_NE(sm, kNoSRegister);
+  CHECK_NE(sm, S31);
+  CHECK_NE(rt, kNoRegister);
+  CHECK_NE(rt, SP);
+  CHECK_NE(rt, PC);
+  CHECK_NE(rt2, kNoRegister);
+  CHECK_NE(rt2, SP);
+  CHECK_NE(rt2, PC);
+  CHECK_NE(rt, rt2);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B22 | B20 |
+                     (static_cast<int32_t>(rt2)*B16) |
+                     (static_cast<int32_t>(rt)*B12) | B11 | B9 |
+                     ((static_cast<int32_t>(sm) & 1)*B5) | B4 |
+                     (static_cast<int32_t>(sm) >> 1);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::vmovdrr(DRegister dm, Register rt, Register rt2,
+                           Condition cond) {
+  CHECK_NE(dm, kNoDRegister);
+  CHECK_NE(rt, kNoRegister);
+  CHECK_NE(rt, SP);
+  CHECK_NE(rt, PC);
+  CHECK_NE(rt2, kNoRegister);
+  CHECK_NE(rt2, SP);
+  CHECK_NE(rt2, PC);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B22 |
+                     (static_cast<int32_t>(rt2)*B16) |
+                     (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 |
+                     ((static_cast<int32_t>(dm) >> 4)*B5) | B4 |
+                     (static_cast<int32_t>(dm) & 0xf);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::vmovrrd(Register rt, Register rt2, DRegister dm,
+                           Condition cond) {
+  CHECK_NE(dm, kNoDRegister);
+  CHECK_NE(rt, kNoRegister);
+  CHECK_NE(rt, SP);
+  CHECK_NE(rt, PC);
+  CHECK_NE(rt2, kNoRegister);
+  CHECK_NE(rt2, SP);
+  CHECK_NE(rt2, PC);
+  CHECK_NE(rt, rt2);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B22 | B20 |
+                     (static_cast<int32_t>(rt2)*B16) |
+                     (static_cast<int32_t>(rt)*B12) | B11 | B9 | B8 |
+                     ((static_cast<int32_t>(dm) >> 4)*B5) | B4 |
+                     (static_cast<int32_t>(dm) & 0xf);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::vldrs(SRegister sd, Address ad, Condition cond) {
+  CHECK_NE(sd, kNoSRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B24 | B20 |
+                     ((static_cast<int32_t>(sd) & 1)*B22) |
+                     ((static_cast<int32_t>(sd) >> 1)*B12) |
+                     B11 | B9 | ad.vencoding();
+  Emit(encoding);
+}
+
+
+void ArmAssembler::vstrs(SRegister sd, Address ad, Condition cond) {
+  CHECK_NE(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)), PC);
+  CHECK_NE(sd, kNoSRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B24 |
+                     ((static_cast<int32_t>(sd) & 1)*B22) |
+                     ((static_cast<int32_t>(sd) >> 1)*B12) |
+                     B11 | B9 | ad.vencoding();
+  Emit(encoding);
+}
+
+
+void ArmAssembler::vldrd(DRegister dd, Address ad, Condition cond) {
+  CHECK_NE(dd, kNoDRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B24 | B20 |
+                     ((static_cast<int32_t>(dd) >> 4)*B22) |
+                     ((static_cast<int32_t>(dd) & 0xf)*B12) |
+                     B11 | B9 | B8 | ad.vencoding();
+  Emit(encoding);
+}
+
+
+void ArmAssembler::vstrd(DRegister dd, Address ad, Condition cond) {
+  CHECK_NE(static_cast<Register>(ad.encoding_ & (0xf << kRnShift)), PC);
+  CHECK_NE(dd, kNoDRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B24 |
+                     ((static_cast<int32_t>(dd) >> 4)*B22) |
+                     ((static_cast<int32_t>(dd) & 0xf)*B12) |
+                     B11 | B9 | B8 | ad.vencoding();
+  Emit(encoding);
+}
+
+
+void ArmAssembler::EmitVFPsss(Condition cond, int32_t opcode,
+                              SRegister sd, SRegister sn, SRegister sm) {
+  CHECK_NE(sd, kNoSRegister);
+  CHECK_NE(sn, kNoSRegister);
+  CHECK_NE(sm, kNoSRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B25 | B11 | B9 | opcode |
+                     ((static_cast<int32_t>(sd) & 1)*B22) |
+                     ((static_cast<int32_t>(sn) >> 1)*B16) |
+                     ((static_cast<int32_t>(sd) >> 1)*B12) |
+                     ((static_cast<int32_t>(sn) & 1)*B7) |
+                     ((static_cast<int32_t>(sm) & 1)*B5) |
+                     (static_cast<int32_t>(sm) >> 1);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::EmitVFPddd(Condition cond, int32_t opcode,
+                              DRegister dd, DRegister dn, DRegister dm) {
+  CHECK_NE(dd, kNoDRegister);
+  CHECK_NE(dn, kNoDRegister);
+  CHECK_NE(dm, kNoDRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B25 | B11 | B9 | B8 | opcode |
+                     ((static_cast<int32_t>(dd) >> 4)*B22) |
+                     ((static_cast<int32_t>(dn) & 0xf)*B16) |
+                     ((static_cast<int32_t>(dd) & 0xf)*B12) |
+                     ((static_cast<int32_t>(dn) >> 4)*B7) |
+                     ((static_cast<int32_t>(dm) >> 4)*B5) |
+                     (static_cast<int32_t>(dm) & 0xf);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::vmovs(SRegister sd, SRegister sm, Condition cond) {
+  EmitVFPsss(cond, B23 | B21 | B20 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vmovd(DRegister dd, DRegister dm, Condition cond) {
+  EmitVFPddd(cond, B23 | B21 | B20 | B6, dd, D0, dm);
+}
+
+
+bool ArmAssembler::vmovs(SRegister sd, float s_imm, Condition cond) {
+  uint32_t imm32 = bit_cast<uint32_t, float>(s_imm);
+  if (((imm32 & ((1 << 19) - 1)) == 0) &&
+      ((((imm32 >> 25) & ((1 << 6) - 1)) == (1 << 5)) ||
+       (((imm32 >> 25) & ((1 << 6) - 1)) == ((1 << 5) -1)))) {
+    uint8_t imm8 = ((imm32 >> 31) << 7) | (((imm32 >> 29) & 1) << 6) |
+        ((imm32 >> 19) & ((1 << 6) -1));
+    EmitVFPsss(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | (imm8 & 0xf),
+               sd, S0, S0);
+    return true;
+  }
+  return false;
+}
+
+
+bool ArmAssembler::vmovd(DRegister dd, double d_imm, Condition cond) {
+  uint64_t imm64 = bit_cast<uint64_t, double>(d_imm);
+  if (((imm64 & ((1LL << 48) - 1)) == 0) &&
+      ((((imm64 >> 54) & ((1 << 9) - 1)) == (1 << 8)) ||
+       (((imm64 >> 54) & ((1 << 9) - 1)) == ((1 << 8) -1)))) {
+    uint8_t imm8 = ((imm64 >> 63) << 7) | (((imm64 >> 61) & 1) << 6) |
+        ((imm64 >> 48) & ((1 << 6) -1));
+    EmitVFPddd(cond, B23 | B21 | B20 | ((imm8 >> 4)*B16) | B8 | (imm8 & 0xf),
+               dd, D0, D0);
+    return true;
+  }
+  return false;
+}
+
+
+void ArmAssembler::vadds(SRegister sd, SRegister sn, SRegister sm,
+                         Condition cond) {
+  EmitVFPsss(cond, B21 | B20, sd, sn, sm);
+}
+
+
+void ArmAssembler::vaddd(DRegister dd, DRegister dn, DRegister dm,
+                         Condition cond) {
+  EmitVFPddd(cond, B21 | B20, dd, dn, dm);
+}
+
+
+void ArmAssembler::vsubs(SRegister sd, SRegister sn, SRegister sm,
+                         Condition cond) {
+  EmitVFPsss(cond, B21 | B20 | B6, sd, sn, sm);
+}
+
+
+void ArmAssembler::vsubd(DRegister dd, DRegister dn, DRegister dm,
+                         Condition cond) {
+  EmitVFPddd(cond, B21 | B20 | B6, dd, dn, dm);
+}
+
+
+void ArmAssembler::vmuls(SRegister sd, SRegister sn, SRegister sm,
+                         Condition cond) {
+  EmitVFPsss(cond, B21, sd, sn, sm);
+}
+
+
+void ArmAssembler::vmuld(DRegister dd, DRegister dn, DRegister dm,
+                         Condition cond) {
+  EmitVFPddd(cond, B21, dd, dn, dm);
+}
+
+
+void ArmAssembler::vmlas(SRegister sd, SRegister sn, SRegister sm,
+                         Condition cond) {
+  EmitVFPsss(cond, 0, sd, sn, sm);
+}
+
+
+void ArmAssembler::vmlad(DRegister dd, DRegister dn, DRegister dm,
+                         Condition cond) {
+  EmitVFPddd(cond, 0, dd, dn, dm);
+}
+
+
+void ArmAssembler::vmlss(SRegister sd, SRegister sn, SRegister sm,
+                         Condition cond) {
+  EmitVFPsss(cond, B6, sd, sn, sm);
+}
+
+
+void ArmAssembler::vmlsd(DRegister dd, DRegister dn, DRegister dm,
+                         Condition cond) {
+  EmitVFPddd(cond, B6, dd, dn, dm);
+}
+
+
+void ArmAssembler::vdivs(SRegister sd, SRegister sn, SRegister sm,
+                         Condition cond) {
+  EmitVFPsss(cond, B23, sd, sn, sm);
+}
+
+
+void ArmAssembler::vdivd(DRegister dd, DRegister dn, DRegister dm,
+                         Condition cond) {
+  EmitVFPddd(cond, B23, dd, dn, dm);
+}
+
+
+void ArmAssembler::vabss(SRegister sd, SRegister sm, Condition cond) {
+  EmitVFPsss(cond, B23 | B21 | B20 | B7 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vabsd(DRegister dd, DRegister dm, Condition cond) {
+  EmitVFPddd(cond, B23 | B21 | B20 | B7 | B6, dd, D0, dm);
+}
+
+
+void ArmAssembler::vnegs(SRegister sd, SRegister sm, Condition cond) {
+  EmitVFPsss(cond, B23 | B21 | B20 | B16 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vnegd(DRegister dd, DRegister dm, Condition cond) {
+  EmitVFPddd(cond, B23 | B21 | B20 | B16 | B6, dd, D0, dm);
+}
+
+
+void ArmAssembler::vsqrts(SRegister sd, SRegister sm, Condition cond) {
+  EmitVFPsss(cond, B23 | B21 | B20 | B16 | B7 | B6, sd, S0, sm);
+}
+
+void ArmAssembler::vsqrtd(DRegister dd, DRegister dm, Condition cond) {
+  EmitVFPddd(cond, B23 | B21 | B20 | B16 | B7 | B6, dd, D0, dm);
+}
+
+
+void ArmAssembler::EmitVFPsd(Condition cond, int32_t opcode,
+                             SRegister sd, DRegister dm) {
+  CHECK_NE(sd, kNoSRegister);
+  CHECK_NE(dm, kNoDRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B25 | B11 | B9 | opcode |
+                     ((static_cast<int32_t>(sd) & 1)*B22) |
+                     ((static_cast<int32_t>(sd) >> 1)*B12) |
+                     ((static_cast<int32_t>(dm) >> 4)*B5) |
+                     (static_cast<int32_t>(dm) & 0xf);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::EmitVFPds(Condition cond, int32_t opcode,
+                             DRegister dd, SRegister sm) {
+  CHECK_NE(dd, kNoDRegister);
+  CHECK_NE(sm, kNoSRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B25 | B11 | B9 | opcode |
+                     ((static_cast<int32_t>(dd) >> 4)*B22) |
+                     ((static_cast<int32_t>(dd) & 0xf)*B12) |
+                     ((static_cast<int32_t>(sm) & 1)*B5) |
+                     (static_cast<int32_t>(sm) >> 1);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::vcvtsd(SRegister sd, DRegister dm, Condition cond) {
+  EmitVFPsd(cond, B23 | B21 | B20 | B18 | B17 | B16 | B8 | B7 | B6, sd, dm);
+}
+
+
+void ArmAssembler::vcvtds(DRegister dd, SRegister sm, Condition cond) {
+  EmitVFPds(cond, B23 | B21 | B20 | B18 | B17 | B16 | B7 | B6, dd, sm);
+}
+
+
+void ArmAssembler::vcvtis(SRegister sd, SRegister sm, Condition cond) {
+  EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B16 | B7 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vcvtid(SRegister sd, DRegister dm, Condition cond) {
+  EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B16 | B8 | B7 | B6, sd, dm);
+}
+
+
+void ArmAssembler::vcvtsi(SRegister sd, SRegister sm, Condition cond) {
+  EmitVFPsss(cond, B23 | B21 | B20 | B19 | B7 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vcvtdi(DRegister dd, SRegister sm, Condition cond) {
+  EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B7 | B6, dd, sm);
+}
+
+
+void ArmAssembler::vcvtus(SRegister sd, SRegister sm, Condition cond) {
+  EmitVFPsss(cond, B23 | B21 | B20 | B19 | B18 | B7 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vcvtud(SRegister sd, DRegister dm, Condition cond) {
+  EmitVFPsd(cond, B23 | B21 | B20 | B19 | B18 | B8 | B7 | B6, sd, dm);
+}
+
+
+void ArmAssembler::vcvtsu(SRegister sd, SRegister sm, Condition cond) {
+  EmitVFPsss(cond, B23 | B21 | B20 | B19 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vcvtdu(DRegister dd, SRegister sm, Condition cond) {
+  EmitVFPds(cond, B23 | B21 | B20 | B19 | B8 | B6, dd, sm);
+}
+
+
+void ArmAssembler::vcmps(SRegister sd, SRegister sm, Condition cond) {
+  EmitVFPsss(cond, B23 | B21 | B20 | B18 | B6, sd, S0, sm);
+}
+
+
+void ArmAssembler::vcmpd(DRegister dd, DRegister dm, Condition cond) {
+  EmitVFPddd(cond, B23 | B21 | B20 | B18 | B6, dd, D0, dm);
+}
+
+
+void ArmAssembler::vcmpsz(SRegister sd, Condition cond) {
+  EmitVFPsss(cond, B23 | B21 | B20 | B18 | B16 | B6, sd, S0, S0);
+}
+
+
+void ArmAssembler::vcmpdz(DRegister dd, Condition cond) {
+  EmitVFPddd(cond, B23 | B21 | B20 | B18 | B16 | B6, dd, D0, D0);
+}
+
+
+void ArmAssembler::vmstat(Condition cond) {  // VMRS APSR_nzcv, FPSCR
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B27 | B26 | B25 | B23 | B22 | B21 | B20 | B16 |
+                     (static_cast<int32_t>(PC)*B12) |
+                     B11 | B9 | B4;
+  Emit(encoding);
+}
+
+
+void ArmAssembler::svc(uint32_t imm24) {
+  CHECK(IsUint(24, imm24));
+  int32_t encoding = (AL << kConditionShift) | B27 | B26 | B25 | B24 | imm24;
+  Emit(encoding);
+}
+
+
+void ArmAssembler::bkpt(uint16_t imm16) {
+  int32_t encoding = (AL << kConditionShift) | B24 | B21 |
+                     ((imm16 >> 4) << 8) | B6 | B5 | B4 | (imm16 & 0xf);
+  Emit(encoding);
+}
+
+
+void ArmAssembler::b(Label* label, Condition cond) {
+  EmitBranch(cond, label, false);
+}
+
+
+void ArmAssembler::bl(Label* label, Condition cond) {
+  EmitBranch(cond, label, true);
+}
+
+
+void ArmAssembler::blx(Register rm, Condition cond) {
+  CHECK_NE(rm, kNoRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B24 | B21 | (0xfff << 8) | B5 | B4 |
+                     (static_cast<int32_t>(rm) << kRmShift);
+  Emit(encoding);
+}
+
+void ArmAssembler::bx(Register rm, Condition cond) {
+  CHECK_NE(rm, kNoRegister);
+  CHECK_NE(cond, kNoCondition);
+  int32_t encoding = (static_cast<int32_t>(cond) << kConditionShift) |
+                     B24 | B21 | (0xfff << 8) | B4 |
+                     (static_cast<int32_t>(rm) << kRmShift);
+  Emit(encoding);
+}
+
+void ArmAssembler::MarkExceptionHandler(Label* label) {
+  EmitType01(AL, 1, TST, 1, PC, R0, ShifterOperand(0));
+  Label l;
+  b(&l);
+  EmitBranch(AL, label, false);
+  Bind(&l);
+}
+
+
+void ArmAssembler::Bind(Label* label) {
+  CHECK(!label->IsBound());
+  int bound_pc = buffer_.Size();
+  while (label->IsLinked()) {
+    int32_t position = label->Position();
+    int32_t next = buffer_.Load<int32_t>(position);
+    int32_t encoded = ArmAssembler::EncodeBranchOffset(bound_pc - position, next);
+    buffer_.Store<int32_t>(position, encoded);
+    label->position_ = ArmAssembler::DecodeBranchOffset(next);
+  }
+  label->BindTo(bound_pc);
+}
+
+
+void ArmAssembler::EncodeUint32InTstInstructions(uint32_t data) {
+  // TODO: Consider using movw ip, <16 bits>.
+  while (!IsUint(8, data)) {
+    tst(R0, ShifterOperand(data & 0xFF), VS);
+    data >>= 8;
+  }
+  tst(R0, ShifterOperand(data), MI);
+}
+
+
+int32_t ArmAssembler::EncodeBranchOffset(int offset, int32_t inst) {
+  // The offset is off by 8 due to the way the ARM CPUs read PC.
+  offset -= 8;
+  CHECK_ALIGNED(offset, 4);
+  CHECK(IsInt(CountOneBits(kBranchOffsetMask), offset));
+
+  // Properly preserve only the bits supported in the instruction.
+  offset >>= 2;
+  offset &= kBranchOffsetMask;
+  return (inst & ~kBranchOffsetMask) | offset;
+}
+
+
+int ArmAssembler::DecodeBranchOffset(int32_t inst) {
+  // Sign-extend, left-shift by 2, then add 8.
+  return ((((inst & kBranchOffsetMask) << 8) >> 6) + 8);
+}
+
+void ArmAssembler::AddConstant(Register rd, int32_t value, Condition cond) {
+  AddConstant(rd, rd, value, cond);
+}
+
+
+void ArmAssembler::AddConstant(Register rd, Register rn, int32_t value,
+                               Condition cond) {
+  if (value == 0) {
+    if (rd != rn) {
+      mov(rd, ShifterOperand(rn), cond);
+    }
+    return;
+  }
+  // We prefer to select the shorter code sequence rather than selecting add for
+  // positive values and sub for negatives ones, which would slightly improve
+  // the readability of generated code for some constants.
+  ShifterOperand shifter_op;
+  if (ShifterOperand::CanHold(value, &shifter_op)) {
+    add(rd, rn, shifter_op, cond);
+  } else if (ShifterOperand::CanHold(-value, &shifter_op)) {
+    sub(rd, rn, shifter_op, cond);
+  } else {
+    CHECK(rn != IP);
+    if (ShifterOperand::CanHold(~value, &shifter_op)) {
+      mvn(IP, shifter_op, cond);
+      add(rd, rn, ShifterOperand(IP), cond);
+    } else if (ShifterOperand::CanHold(~(-value), &shifter_op)) {
+      mvn(IP, shifter_op, cond);
+      sub(rd, rn, ShifterOperand(IP), cond);
+    } else {
+      movw(IP, Low16Bits(value), cond);
+      uint16_t value_high = High16Bits(value);
+      if (value_high != 0) {
+        movt(IP, value_high, cond);
+      }
+      add(rd, rn, ShifterOperand(IP), cond);
+    }
+  }
+}
+
+
+void ArmAssembler::AddConstantSetFlags(Register rd, Register rn, int32_t value,
+                                       Condition cond) {
+  ShifterOperand shifter_op;
+  if (ShifterOperand::CanHold(value, &shifter_op)) {
+    adds(rd, rn, shifter_op, cond);
+  } else if (ShifterOperand::CanHold(-value, &shifter_op)) {
+    subs(rd, rn, shifter_op, cond);
+  } else {
+    CHECK(rn != IP);
+    if (ShifterOperand::CanHold(~value, &shifter_op)) {
+      mvn(IP, shifter_op, cond);
+      adds(rd, rn, ShifterOperand(IP), cond);
+    } else if (ShifterOperand::CanHold(~(-value), &shifter_op)) {
+      mvn(IP, shifter_op, cond);
+      subs(rd, rn, ShifterOperand(IP), cond);
+    } else {
+      movw(IP, Low16Bits(value), cond);
+      uint16_t value_high = High16Bits(value);
+      if (value_high != 0) {
+        movt(IP, value_high, cond);
+      }
+      adds(rd, rn, ShifterOperand(IP), cond);
+    }
+  }
+}
+
+
+void ArmAssembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
+  ShifterOperand shifter_op;
+  if (ShifterOperand::CanHold(value, &shifter_op)) {
+    mov(rd, shifter_op, cond);
+  } else if (ShifterOperand::CanHold(~value, &shifter_op)) {
+    mvn(rd, shifter_op, cond);
+  } else {
+    movw(rd, Low16Bits(value), cond);
+    uint16_t value_high = High16Bits(value);
+    if (value_high != 0) {
+      movt(rd, value_high, cond);
+    }
+  }
+}
+
+
+bool Address::CanHoldLoadOffset(LoadOperandType type, int offset) {
+  switch (type) {
+    case kLoadSignedByte:
+    case kLoadSignedHalfword:
+    case kLoadUnsignedHalfword:
+    case kLoadWordPair:
+      return IsAbsoluteUint(8, offset);  // Addressing mode 3.
+    case kLoadUnsignedByte:
+    case kLoadWord:
+      return IsAbsoluteUint(12, offset);  // Addressing mode 2.
+    case kLoadSWord:
+    case kLoadDWord:
+      return IsAbsoluteUint(10, offset);  // VFP addressing mode.
+    default:
+      LOG(FATAL) << "UNREACHABLE";
+      return false;
+  }
+}
+
+
+bool Address::CanHoldStoreOffset(StoreOperandType type, int offset) {
+  switch (type) {
+    case kStoreHalfword:
+    case kStoreWordPair:
+      return IsAbsoluteUint(8, offset);  // Addressing mode 3.
+    case kStoreByte:
+    case kStoreWord:
+      return IsAbsoluteUint(12, offset);  // Addressing mode 2.
+    case kStoreSWord:
+    case kStoreDWord:
+      return IsAbsoluteUint(10, offset);  // VFP addressing mode.
+    default:
+      LOG(FATAL) << "UNREACHABLE";
+      return false;
+  }
+}
+
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldLoadOffset.
+void ArmAssembler::LoadFromOffset(LoadOperandType type,
+                               Register reg,
+                               Register base,
+                               int32_t offset,
+                               Condition cond) {
+  if (!Address::CanHoldLoadOffset(type, offset)) {
+    CHECK(base != IP);
+    LoadImmediate(IP, offset, cond);
+    add(IP, IP, ShifterOperand(base), cond);
+    base = IP;
+    offset = 0;
+  }
+  CHECK(Address::CanHoldLoadOffset(type, offset));
+  switch (type) {
+    case kLoadSignedByte:
+      ldrsb(reg, Address(base, offset), cond);
+      break;
+    case kLoadUnsignedByte:
+      ldrb(reg, Address(base, offset), cond);
+      break;
+    case kLoadSignedHalfword:
+      ldrsh(reg, Address(base, offset), cond);
+      break;
+    case kLoadUnsignedHalfword:
+      ldrh(reg, Address(base, offset), cond);
+      break;
+    case kLoadWord:
+      ldr(reg, Address(base, offset), cond);
+      break;
+    case kLoadWordPair:
+      ldrd(reg, Address(base, offset), cond);
+      break;
+    default:
+      LOG(FATAL) << "UNREACHABLE";
+  }
+}
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldLoadOffset, as expected by JIT::GuardedLoadFromOffset.
+void ArmAssembler::LoadSFromOffset(SRegister reg,
+                                   Register base,
+                                   int32_t offset,
+                                   Condition cond) {
+  if (!Address::CanHoldLoadOffset(kLoadSWord, offset)) {
+    CHECK_NE(base, IP);
+    LoadImmediate(IP, offset, cond);
+    add(IP, IP, ShifterOperand(base), cond);
+    base = IP;
+    offset = 0;
+  }
+  CHECK(Address::CanHoldLoadOffset(kLoadSWord, offset));
+  vldrs(reg, Address(base, offset), cond);
+}
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldLoadOffset, as expected by JIT::GuardedLoadFromOffset.
+void ArmAssembler::LoadDFromOffset(DRegister reg,
+                                   Register base,
+                                   int32_t offset,
+                                   Condition cond) {
+  if (!Address::CanHoldLoadOffset(kLoadDWord, offset)) {
+    CHECK_NE(base, IP);
+    LoadImmediate(IP, offset, cond);
+    add(IP, IP, ShifterOperand(base), cond);
+    base = IP;
+    offset = 0;
+  }
+  CHECK(Address::CanHoldLoadOffset(kLoadDWord, offset));
+  vldrd(reg, Address(base, offset), cond);
+}
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldStoreOffset.
+void ArmAssembler::StoreToOffset(StoreOperandType type,
+                                 Register reg,
+                                 Register base,
+                                 int32_t offset,
+                                 Condition cond) {
+  if (!Address::CanHoldStoreOffset(type, offset)) {
+    CHECK(reg != IP);
+    CHECK(base != IP);
+    LoadImmediate(IP, offset, cond);
+    add(IP, IP, ShifterOperand(base), cond);
+    base = IP;
+    offset = 0;
+  }
+  CHECK(Address::CanHoldStoreOffset(type, offset));
+  switch (type) {
+    case kStoreByte:
+      strb(reg, Address(base, offset), cond);
+      break;
+    case kStoreHalfword:
+      strh(reg, Address(base, offset), cond);
+      break;
+    case kStoreWord:
+      str(reg, Address(base, offset), cond);
+      break;
+    case kStoreWordPair:
+      strd(reg, Address(base, offset), cond);
+      break;
+    default:
+      LOG(FATAL) << "UNREACHABLE";
+  }
+}
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldStoreOffset, as expected by JIT::GuardedStoreToOffset.
+void ArmAssembler::StoreSToOffset(SRegister reg,
+                                  Register base,
+                                  int32_t offset,
+                                  Condition cond) {
+  if (!Address::CanHoldStoreOffset(kStoreSWord, offset)) {
+    CHECK_NE(base, IP);
+    LoadImmediate(IP, offset, cond);
+    add(IP, IP, ShifterOperand(base), cond);
+    base = IP;
+    offset = 0;
+  }
+  CHECK(Address::CanHoldStoreOffset(kStoreSWord, offset));
+  vstrs(reg, Address(base, offset), cond);
+}
+
+// Implementation note: this method must emit at most one instruction when
+// Address::CanHoldStoreOffset, as expected by JIT::GuardedStoreSToOffset.
+void ArmAssembler::StoreDToOffset(DRegister reg,
+                                  Register base,
+                                  int32_t offset,
+                                  Condition cond) {
+  if (!Address::CanHoldStoreOffset(kStoreDWord, offset)) {
+    CHECK_NE(base, IP);
+    LoadImmediate(IP, offset, cond);
+    add(IP, IP, ShifterOperand(base), cond);
+    base = IP;
+    offset = 0;
+  }
+  CHECK(Address::CanHoldStoreOffset(kStoreDWord, offset));
+  vstrd(reg, Address(base, offset), cond);
+}
+
+void ArmAssembler::Push(Register rd, Condition cond) {
+  str(rd, Address(SP, -kRegisterSize, Address::PreIndex), cond);
+}
+
+void ArmAssembler::Pop(Register rd, Condition cond) {
+  ldr(rd, Address(SP, kRegisterSize, Address::PostIndex), cond);
+}
+
+void ArmAssembler::PushList(RegList regs, Condition cond) {
+  stm(DB_W, SP, regs, cond);
+}
+
+void ArmAssembler::PopList(RegList regs, Condition cond) {
+  ldm(IA_W, SP, regs, cond);
+}
+
+void ArmAssembler::Mov(Register rd, Register rm, Condition cond) {
+  if (rd != rm) {
+    mov(rd, ShifterOperand(rm), cond);
+  }
+}
+
+void ArmAssembler::Lsl(Register rd, Register rm, uint32_t shift_imm,
+                       Condition cond) {
+  CHECK_NE(shift_imm, 0u);  // Do not use Lsl if no shift is wanted.
+  mov(rd, ShifterOperand(rm, LSL, shift_imm), cond);
+}
+
+void ArmAssembler::Lsr(Register rd, Register rm, uint32_t shift_imm,
+                       Condition cond) {
+  CHECK_NE(shift_imm, 0u);  // Do not use Lsr if no shift is wanted.
+  if (shift_imm == 32) shift_imm = 0;  // Comply to UAL syntax.
+  mov(rd, ShifterOperand(rm, LSR, shift_imm), cond);
+}
+
+void ArmAssembler::Asr(Register rd, Register rm, uint32_t shift_imm,
+                       Condition cond) {
+  CHECK_NE(shift_imm, 0u);  // Do not use Asr if no shift is wanted.
+  if (shift_imm == 32) shift_imm = 0;  // Comply to UAL syntax.
+  mov(rd, ShifterOperand(rm, ASR, shift_imm), cond);
+}
+
+void ArmAssembler::Ror(Register rd, Register rm, uint32_t shift_imm,
+                       Condition cond) {
+  CHECK_NE(shift_imm, 0u);  // Use Rrx instruction.
+  mov(rd, ShifterOperand(rm, ROR, shift_imm), cond);
+}
+
+void ArmAssembler::Rrx(Register rd, Register rm, Condition cond) {
+  mov(rd, ShifterOperand(rm, ROR, 0), cond);
+}
+
+void ArmAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
+                              const std::vector<ManagedRegister>& callee_save_regs,
+                              const std::vector<ManagedRegister>& entry_spills) {
+  CHECK_ALIGNED(frame_size, kStackAlignment);
+  DCHECK_EQ(entry_spills.size(), 0u);
+  CHECK_EQ(R0, method_reg.AsArm().AsCoreRegister());
+
+  // Push callee saves and link register
+  RegList push_list = 1 << LR;
+  size_t pushed_values = 1;
+  for (size_t i = 0; i < callee_save_regs.size(); i++) {
+    Register reg = callee_save_regs.at(i).AsArm().AsCoreRegister();
+    push_list |= 1 << reg;
+    pushed_values++;
+  }
+  PushList(push_list);
+
+  // Increase frame to required size
+  CHECK_GT(frame_size, pushed_values * kPointerSize);  // Must be at least space to push Method*
+  size_t adjust = frame_size - (pushed_values * kPointerSize);
+  IncreaseFrameSize(adjust);
+
+  // Write out Method*
+  StoreToOffset(kStoreWord, R0, SP, 0);
+}
+
+void ArmAssembler::RemoveFrame(size_t frame_size,
+                              const std::vector<ManagedRegister>& callee_save_regs) {
+  CHECK_ALIGNED(frame_size, kStackAlignment);
+  // Compute callee saves to pop and PC
+  RegList pop_list = 1 << PC;
+  size_t pop_values = 1;
+  for (size_t i = 0; i < callee_save_regs.size(); i++) {
+    Register reg = callee_save_regs.at(i).AsArm().AsCoreRegister();
+    pop_list |= 1 << reg;
+    pop_values++;
+  }
+
+  // Decrease frame to start of callee saves
+  CHECK_GT(frame_size, pop_values * kPointerSize);
+  size_t adjust = frame_size - (pop_values * kPointerSize);
+  DecreaseFrameSize(adjust);
+
+  // Pop callee saves and PC
+  PopList(pop_list);
+}
+
+void ArmAssembler::IncreaseFrameSize(size_t adjust) {
+  AddConstant(SP, -adjust);
+}
+
+void ArmAssembler::DecreaseFrameSize(size_t adjust) {
+  AddConstant(SP, adjust);
+}
+
+void ArmAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
+  ArmManagedRegister src = msrc.AsArm();
+  if (src.IsNoRegister()) {
+    CHECK_EQ(0u, size);
+  } else if (src.IsCoreRegister()) {
+    CHECK_EQ(4u, size);
+    StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+  } else if (src.IsRegisterPair()) {
+    CHECK_EQ(8u, size);
+    StoreToOffset(kStoreWord, src.AsRegisterPairLow(), SP, dest.Int32Value());
+    StoreToOffset(kStoreWord, src.AsRegisterPairHigh(),
+                  SP, dest.Int32Value() + 4);
+  } else if (src.IsSRegister()) {
+    StoreSToOffset(src.AsSRegister(), SP, dest.Int32Value());
+  } else {
+    CHECK(src.IsDRegister());
+    StoreDToOffset(src.AsDRegister(), SP, dest.Int32Value());
+  }
+}
+
+void ArmAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
+  ArmManagedRegister src = msrc.AsArm();
+  CHECK(src.IsCoreRegister());
+  StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void ArmAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
+  ArmManagedRegister src = msrc.AsArm();
+  CHECK(src.IsCoreRegister());
+  StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void ArmAssembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc,
+                              FrameOffset in_off, ManagedRegister mscratch) {
+  ArmManagedRegister src = msrc.AsArm();
+  ArmManagedRegister scratch = mscratch.AsArm();
+  StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
+  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, in_off.Int32Value());
+  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
+}
+
+void ArmAssembler::CopyRef(FrameOffset dest, FrameOffset src,
+                        ManagedRegister mscratch) {
+  ArmManagedRegister scratch = mscratch.AsArm();
+  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
+  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void ArmAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base,
+                           MemberOffset offs) {
+  ArmManagedRegister dest = mdest.AsArm();
+  CHECK(dest.IsCoreRegister() && dest.IsCoreRegister());
+  LoadFromOffset(kLoadWord, dest.AsCoreRegister(),
+                 base.AsArm().AsCoreRegister(), offs.Int32Value());
+}
+
+void ArmAssembler::LoadRef(ManagedRegister mdest, FrameOffset  src) {
+  ArmManagedRegister dest = mdest.AsArm();
+  CHECK(dest.IsCoreRegister());
+  LoadFromOffset(kLoadWord, dest.AsCoreRegister(), SP, src.Int32Value());
+}
+
+void ArmAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
+                           Offset offs) {
+  ArmManagedRegister dest = mdest.AsArm();
+  CHECK(dest.IsCoreRegister() && dest.IsCoreRegister());
+  LoadFromOffset(kLoadWord, dest.AsCoreRegister(),
+                 base.AsArm().AsCoreRegister(), offs.Int32Value());
+}
+
+void ArmAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
+                                      ManagedRegister mscratch) {
+  ArmManagedRegister scratch = mscratch.AsArm();
+  CHECK(scratch.IsCoreRegister());
+  LoadImmediate(scratch.AsCoreRegister(), imm);
+  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+}
+
+void ArmAssembler::StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
+                                       ManagedRegister mscratch) {
+  ArmManagedRegister scratch = mscratch.AsArm();
+  CHECK(scratch.IsCoreRegister());
+  LoadImmediate(scratch.AsCoreRegister(), imm);
+  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), TR, dest.Int32Value());
+}
+
+void ArmAssembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
+  ArmManagedRegister dest = mdest.AsArm();
+  if (dest.IsNoRegister()) {
+    CHECK_EQ(0u, size);
+  } else if (dest.IsCoreRegister()) {
+    CHECK_EQ(4u, size);
+    LoadFromOffset(kLoadWord, dest.AsCoreRegister(), SP, src.Int32Value());
+  } else if (dest.IsRegisterPair()) {
+    CHECK_EQ(8u, size);
+    LoadFromOffset(kLoadWord, dest.AsRegisterPairLow(), SP, src.Int32Value());
+    LoadFromOffset(kLoadWord, dest.AsRegisterPairHigh(), SP, src.Int32Value() + 4);
+  } else if (dest.IsSRegister()) {
+    LoadSFromOffset(dest.AsSRegister(), SP, src.Int32Value());
+  } else {
+    CHECK(dest.IsDRegister());
+    LoadDFromOffset(dest.AsDRegister(), SP, src.Int32Value());
+  }
+}
+
+void ArmAssembler::Load(ManagedRegister mdest, ThreadOffset src, size_t size) {
+  ArmManagedRegister dest = mdest.AsArm();
+  if (dest.IsNoRegister()) {
+    CHECK_EQ(0u, size);
+  } else if (dest.IsCoreRegister()) {
+    CHECK_EQ(4u, size);
+    LoadFromOffset(kLoadWord, dest.AsCoreRegister(), TR, src.Int32Value());
+  } else if (dest.IsRegisterPair()) {
+    CHECK_EQ(8u, size);
+    LoadFromOffset(kLoadWord, dest.AsRegisterPairLow(), TR, src.Int32Value());
+    LoadFromOffset(kLoadWord, dest.AsRegisterPairHigh(), TR, src.Int32Value() + 4);
+  } else if (dest.IsSRegister()) {
+    LoadSFromOffset(dest.AsSRegister(), TR, src.Int32Value());
+  } else {
+    CHECK(dest.IsDRegister());
+    LoadDFromOffset(dest.AsDRegister(), TR, src.Int32Value());
+  }
+}
+
+void ArmAssembler::LoadRawPtrFromThread(ManagedRegister mdest,
+                                        ThreadOffset offs) {
+  ArmManagedRegister dest = mdest.AsArm();
+  CHECK(dest.IsCoreRegister());
+  LoadFromOffset(kLoadWord, dest.AsCoreRegister(),
+                 TR, offs.Int32Value());
+}
+
+void ArmAssembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+                                        ThreadOffset thr_offs,
+                                        ManagedRegister mscratch) {
+  ArmManagedRegister scratch = mscratch.AsArm();
+  CHECK(scratch.IsCoreRegister());
+  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+                 TR, thr_offs.Int32Value());
+  StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
+                SP, fr_offs.Int32Value());
+}
+
+void ArmAssembler::CopyRawPtrToThread(ThreadOffset thr_offs,
+                                      FrameOffset fr_offs,
+                                      ManagedRegister mscratch) {
+  ArmManagedRegister scratch = mscratch.AsArm();
+  CHECK(scratch.IsCoreRegister());
+  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+                 SP, fr_offs.Int32Value());
+  StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
+                TR, thr_offs.Int32Value());
+}
+
+void ArmAssembler::StoreStackOffsetToThread(ThreadOffset thr_offs,
+                                            FrameOffset fr_offs,
+                                            ManagedRegister mscratch) {
+  ArmManagedRegister scratch = mscratch.AsArm();
+  CHECK(scratch.IsCoreRegister());
+  AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value(), AL);
+  StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
+                TR, thr_offs.Int32Value());
+}
+
+void ArmAssembler::StoreStackPointerToThread(ThreadOffset thr_offs) {
+  StoreToOffset(kStoreWord, SP, TR, thr_offs.Int32Value());
+}
+
+void ArmAssembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t /*size*/) {
+  ArmManagedRegister dest = mdest.AsArm();
+  ArmManagedRegister src = msrc.AsArm();
+  if (!dest.Equals(src)) {
+    if (dest.IsCoreRegister()) {
+      CHECK(src.IsCoreRegister());
+      mov(dest.AsCoreRegister(), ShifterOperand(src.AsCoreRegister()));
+    } else if (dest.IsDRegister()) {
+      CHECK(src.IsDRegister());
+      vmovd(dest.AsDRegister(), src.AsDRegister());
+    } else if (dest.IsSRegister()) {
+      CHECK(src.IsSRegister());
+      vmovs(dest.AsSRegister(), src.AsSRegister());
+    } else {
+      CHECK(dest.IsRegisterPair());
+      CHECK(src.IsRegisterPair());
+      // Ensure that the first move doesn't clobber the input of the second
+      if (src.AsRegisterPairHigh() != dest.AsRegisterPairLow()) {
+        mov(dest.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
+        mov(dest.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
+      } else {
+        mov(dest.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
+        mov(dest.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
+      }
+    }
+  }
+}
+
+void ArmAssembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) {
+  ArmManagedRegister scratch = mscratch.AsArm();
+  CHECK(scratch.IsCoreRegister());
+  CHECK(size == 4 || size == 8);
+  if (size == 4) {
+    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
+    StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+  } else if (size == 8) {
+    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
+    StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
+    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + 4);
+    StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
+  }
+}
+
+void ArmAssembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
+                        ManagedRegister mscratch, size_t size) {
+  Register scratch = mscratch.AsArm().AsCoreRegister();
+  CHECK_EQ(size, 4u);
+  LoadFromOffset(kLoadWord, scratch, src_base.AsArm().AsCoreRegister(), src_offset.Int32Value());
+  StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value());
+}
+
+void ArmAssembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
+                        ManagedRegister mscratch, size_t size) {
+  Register scratch = mscratch.AsArm().AsCoreRegister();
+  CHECK_EQ(size, 4u);
+  LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value());
+  StoreToOffset(kStoreWord, scratch, dest_base.AsArm().AsCoreRegister(), dest_offset.Int32Value());
+}
+
+void ArmAssembler::Copy(FrameOffset /*dst*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
+                        ManagedRegister /*mscratch*/, size_t /*size*/) {
+  UNIMPLEMENTED(FATAL);
+}
+
+void ArmAssembler::Copy(ManagedRegister dest, Offset dest_offset,
+                        ManagedRegister src, Offset src_offset,
+                        ManagedRegister mscratch, size_t size) {
+  CHECK_EQ(size, 4u);
+  Register scratch = mscratch.AsArm().AsCoreRegister();
+  LoadFromOffset(kLoadWord, scratch, src.AsArm().AsCoreRegister(), src_offset.Int32Value());
+  StoreToOffset(kStoreWord, scratch, dest.AsArm().AsCoreRegister(), dest_offset.Int32Value());
+}
+
+void ArmAssembler::Copy(FrameOffset /*dst*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset /*src_offset*/,
+                        ManagedRegister /*scratch*/, size_t /*size*/) {
+  UNIMPLEMENTED(FATAL);
+}
+
+
+void ArmAssembler::MemoryBarrier(ManagedRegister mscratch) {
+  CHECK(mscratch.AsArm().AsCoreRegister() == R12);
+#if ANDROID_SMP != 0
+#if defined(__ARM_HAVE_DMB)
+  int32_t encoding = 0xf57ff05f;  // dmb
+  Emit(encoding);
+#elif  defined(__ARM_HAVE_LDREX_STREX)
+  LoadImmediate(R12, 0);
+  int32_t encoding = 0xee07cfba;  // mcr p15, 0, r12, c7, c10, 5
+  Emit(encoding);
+#else
+  LoadImmediate(R12, 0xffff0fa0);  // kuser_memory_barrier
+  blx(R12);
+#endif
+#endif
+}
+
+void ArmAssembler::CreateSirtEntry(ManagedRegister mout_reg,
+                                   FrameOffset sirt_offset,
+                                   ManagedRegister min_reg, bool null_allowed) {
+  ArmManagedRegister out_reg = mout_reg.AsArm();
+  ArmManagedRegister in_reg = min_reg.AsArm();
+  CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister());
+  CHECK(out_reg.IsCoreRegister());
+  if (null_allowed) {
+    // Null values get a SIRT entry value of 0.  Otherwise, the SIRT entry is
+    // the address in the SIRT holding the reference.
+    // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
+    if (in_reg.IsNoRegister()) {
+      LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
+                     SP, sirt_offset.Int32Value());
+      in_reg = out_reg;
+    }
+    cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
+    if (!out_reg.Equals(in_reg)) {
+      LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
+    }
+    AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE);
+  } else {
+    AddConstant(out_reg.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL);
+  }
+}
+
+void ArmAssembler::CreateSirtEntry(FrameOffset out_off,
+                                   FrameOffset sirt_offset,
+                                   ManagedRegister mscratch,
+                                   bool null_allowed) {
+  ArmManagedRegister scratch = mscratch.AsArm();
+  CHECK(scratch.IsCoreRegister());
+  if (null_allowed) {
+    LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP,
+                   sirt_offset.Int32Value());
+    // Null values get a SIRT entry value of 0.  Otherwise, the sirt entry is
+    // the address in the SIRT holding the reference.
+    // e.g. scratch = (scratch == 0) ? 0 : (SP+sirt_offset)
+    cmp(scratch.AsCoreRegister(), ShifterOperand(0));
+    AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), NE);
+  } else {
+    AddConstant(scratch.AsCoreRegister(), SP, sirt_offset.Int32Value(), AL);
+  }
+  StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
+}
+
+void ArmAssembler::LoadReferenceFromSirt(ManagedRegister mout_reg,
+                                         ManagedRegister min_reg) {
+  ArmManagedRegister out_reg = mout_reg.AsArm();
+  ArmManagedRegister in_reg = min_reg.AsArm();
+  CHECK(out_reg.IsCoreRegister());
+  CHECK(in_reg.IsCoreRegister());
+  Label null_arg;
+  if (!out_reg.Equals(in_reg)) {
+    LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
+  }
+  cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
+  LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
+                 in_reg.AsCoreRegister(), 0, NE);
+}
+
+void ArmAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
+  // TODO: not validating references
+}
+
+void ArmAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
+  // TODO: not validating references
+}
+
+void ArmAssembler::Call(ManagedRegister mbase, Offset offset,
+                        ManagedRegister mscratch) {
+  ArmManagedRegister base = mbase.AsArm();
+  ArmManagedRegister scratch = mscratch.AsArm();
+  CHECK(base.IsCoreRegister());
+  CHECK(scratch.IsCoreRegister());
+  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+                 base.AsCoreRegister(), offset.Int32Value());
+  blx(scratch.AsCoreRegister());
+  // TODO: place reference map on call
+}
+
+void ArmAssembler::Call(FrameOffset base, Offset offset,
+                        ManagedRegister mscratch) {
+  ArmManagedRegister scratch = mscratch.AsArm();
+  CHECK(scratch.IsCoreRegister());
+  // Call *(*(SP + base) + offset)
+  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+                 SP, base.Int32Value());
+  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+                 scratch.AsCoreRegister(), offset.Int32Value());
+  blx(scratch.AsCoreRegister());
+  // TODO: place reference map on call
+}
+
+void ArmAssembler::Call(ThreadOffset /*offset*/, ManagedRegister /*scratch*/) {
+  UNIMPLEMENTED(FATAL);
+}
+
+void ArmAssembler::GetCurrentThread(ManagedRegister tr) {
+  mov(tr.AsArm().AsCoreRegister(), ShifterOperand(TR));
+}
+
+void ArmAssembler::GetCurrentThread(FrameOffset offset,
+                                    ManagedRegister /*scratch*/) {
+  StoreToOffset(kStoreWord, TR, SP, offset.Int32Value(), AL);
+}
+
+void ArmAssembler::SuspendPoll(ManagedRegister mscratch,
+                               ManagedRegister return_reg,
+                               FrameOffset return_save_location,
+                               size_t return_size) {
+  ArmManagedRegister scratch = mscratch.AsArm();
+  ArmSuspendCountSlowPath* slow =
+      new ArmSuspendCountSlowPath(return_reg.AsArm(), return_save_location,
+                                  return_size);
+  buffer_.EnqueueSlowPath(slow);
+  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+                 TR, Thread::SuspendCountOffset().Int32Value());
+  cmp(scratch.AsCoreRegister(), ShifterOperand(0));
+  b(slow->Entry(), NE);
+  Bind(slow->Continuation());
+}
+
+void ArmSuspendCountSlowPath::Emit(Assembler* sasm) {
+  ArmAssembler* sp_asm = down_cast<ArmAssembler*>(sasm);
+#define __ sp_asm->
+  __ Bind(&entry_);
+  // Save return value
+  __ Store(return_save_location_, return_register_, return_size_);
+  // Pass thread as argument
+  __ mov(R0, ShifterOperand(TR));
+  __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pCheckSuspendFromCode));
+  // Note: assume that link register will be spilled/filled on method entry/exit
+  __ blx(R12);
+  // Reload return value
+  __ Load(return_register_, return_save_location_, return_size_);
+  __ b(&continuation_);
+#undef __
+}
+
+void ArmAssembler::ExceptionPoll(ManagedRegister mscratch) {
+  ArmManagedRegister scratch = mscratch.AsArm();
+  ArmExceptionSlowPath* slow = new ArmExceptionSlowPath(scratch);
+  buffer_.EnqueueSlowPath(slow);
+  LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
+                 TR, Thread::ExceptionOffset().Int32Value());
+  cmp(scratch.AsCoreRegister(), ShifterOperand(0));
+  b(slow->Entry(), NE);
+}
+
+void ArmExceptionSlowPath::Emit(Assembler* sasm) {
+  ArmAssembler* sp_asm = down_cast<ArmAssembler*>(sasm);
+#define __ sp_asm->
+  __ Bind(&entry_);
+
+  // Pass exception object as argument
+  // Don't care about preserving R0 as this call won't return
+  __ mov(R0, ShifterOperand(scratch_.AsCoreRegister()));
+  // Set up call to Thread::Current()->pDeliverException
+  __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pDeliverException));
+  __ blx(R12);
+  // Call never returns
+  __ bkpt(0);
+#undef __
+}
+
+}  // namespace arm
+}  // namespace art
diff --git a/src/oat/utils/arm/assembler_arm.h b/src/oat/utils/arm/assembler_arm.h
new file mode 100644
index 0000000..d26fc1f
--- /dev/null
+++ b/src/oat/utils/arm/assembler_arm.h
@@ -0,0 +1,673 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_UTILS_ARM_ASSEMBLER_ARM_H_
+#define ART_SRC_OAT_UTILS_ARM_ASSEMBLER_ARM_H_
+
+#include "constants.h"
+#include "logging.h"
+#include "oat/utils/arm/managed_register_arm.h"
+#include "oat/utils/assembler.h"
+#include "offsets.h"
+#include "utils.h"
+#include <vector>
+
+namespace art {
+namespace arm {
+
+// Encodes Addressing Mode 1 - Data-processing operands defined in Section 5.1.
+class ShifterOperand {
+ public:
+  // Data-processing operands - Uninitialized
+  ShifterOperand() {
+    type_ = -1;
+  }
+
+  // Data-processing operands - Immediate
+  explicit ShifterOperand(uint32_t immediate) {
+    CHECK(immediate < (1 << kImmed8Bits));
+    type_ = 1;
+    encoding_ = immediate;
+  }
+
+  // Data-processing operands - Rotated immediate
+  ShifterOperand(uint32_t rotate, uint32_t immed8) {
+    CHECK((rotate < (1 << kRotateBits)) && (immed8 < (1 << kImmed8Bits)));
+    type_ = 1;
+    encoding_ = (rotate << kRotateShift) | (immed8 << kImmed8Shift);
+  }
+
+  // Data-processing operands - Register
+  explicit ShifterOperand(Register rm) {
+    type_ = 0;
+    encoding_ = static_cast<uint32_t>(rm);
+  }
+
+  // Data-processing operands - Logical shift/rotate by immediate
+  ShifterOperand(Register rm, Shift shift, uint32_t shift_imm) {
+    CHECK(shift_imm < (1 << kShiftImmBits));
+    type_ = 0;
+    encoding_ = shift_imm << kShiftImmShift |
+                static_cast<uint32_t>(shift) << kShiftShift |
+                static_cast<uint32_t>(rm);
+  }
+
+  // Data-processing operands - Logical shift/rotate by register
+  ShifterOperand(Register rm, Shift shift, Register rs) {
+    type_ = 0;
+    encoding_ = static_cast<uint32_t>(rs) << kShiftRegisterShift |
+                static_cast<uint32_t>(shift) << kShiftShift | (1 << 4) |
+                static_cast<uint32_t>(rm);
+  }
+
+  static bool CanHold(uint32_t immediate, ShifterOperand* shifter_op) {
+    // Avoid the more expensive test for frequent small immediate values.
+    if (immediate < (1 << kImmed8Bits)) {
+      shifter_op->type_ = 1;
+      shifter_op->encoding_ = (0 << kRotateShift) | (immediate << kImmed8Shift);
+      return true;
+    }
+    // Note that immediate must be unsigned for the test to work correctly.
+    for (int rot = 0; rot < 16; rot++) {
+      uint32_t imm8 = (immediate << 2*rot) | (immediate >> (32 - 2*rot));
+      if (imm8 < (1 << kImmed8Bits)) {
+        shifter_op->type_ = 1;
+        shifter_op->encoding_ = (rot << kRotateShift) | (imm8 << kImmed8Shift);
+        return true;
+      }
+    }
+    return false;
+  }
+
+ private:
+  bool is_valid() const { return (type_ == 0) || (type_ == 1); }
+
+  uint32_t type() const {
+    CHECK(is_valid());
+    return type_;
+  }
+
+  uint32_t encoding() const {
+    CHECK(is_valid());
+    return encoding_;
+  }
+
+  uint32_t type_;  // Encodes the type field (bits 27-25) in the instruction.
+  uint32_t encoding_;
+
+  friend class ArmAssembler;
+#ifdef SOURCE_ASSEMBLER_SUPPORT
+  friend class BinaryAssembler;
+#endif
+};
+
+
+enum LoadOperandType {
+  kLoadSignedByte,
+  kLoadUnsignedByte,
+  kLoadSignedHalfword,
+  kLoadUnsignedHalfword,
+  kLoadWord,
+  kLoadWordPair,
+  kLoadSWord,
+  kLoadDWord
+};
+
+
+enum StoreOperandType {
+  kStoreByte,
+  kStoreHalfword,
+  kStoreWord,
+  kStoreWordPair,
+  kStoreSWord,
+  kStoreDWord
+};
+
+
+// Load/store multiple addressing mode.
+enum BlockAddressMode {
+  // bit encoding P U W
+  DA           = (0|0|0) << 21,  // decrement after
+  IA           = (0|4|0) << 21,  // increment after
+  DB           = (8|0|0) << 21,  // decrement before
+  IB           = (8|4|0) << 21,  // increment before
+  DA_W         = (0|0|1) << 21,  // decrement after with writeback to base
+  IA_W         = (0|4|1) << 21,  // increment after with writeback to base
+  DB_W         = (8|0|1) << 21,  // decrement before with writeback to base
+  IB_W         = (8|4|1) << 21   // increment before with writeback to base
+};
+
+
+class Address {
+ public:
+  // Memory operand addressing mode
+  enum Mode {
+    // bit encoding P U W
+    Offset       = (8|4|0) << 21,  // offset (w/o writeback to base)
+    PreIndex     = (8|4|1) << 21,  // pre-indexed addressing with writeback
+    PostIndex    = (0|4|0) << 21,  // post-indexed addressing with writeback
+    NegOffset    = (8|0|0) << 21,  // negative offset (w/o writeback to base)
+    NegPreIndex  = (8|0|1) << 21,  // negative pre-indexed with writeback
+    NegPostIndex = (0|0|0) << 21   // negative post-indexed with writeback
+  };
+
+  explicit Address(Register rn, int32_t offset = 0, Mode am = Offset) {
+    CHECK(IsAbsoluteUint(12, offset));
+    if (offset < 0) {
+      encoding_ = (am ^ (1 << kUShift)) | -offset;  // Flip U to adjust sign.
+    } else {
+      encoding_ = am | offset;
+    }
+    encoding_ |= static_cast<uint32_t>(rn) << kRnShift;
+  }
+
+  static bool CanHoldLoadOffset(LoadOperandType type, int offset);
+  static bool CanHoldStoreOffset(StoreOperandType type, int offset);
+
+ private:
+  uint32_t encoding() const { return encoding_; }
+
+  // Encoding for addressing mode 3.
+  uint32_t encoding3() const {
+    const uint32_t offset_mask = (1 << 12) - 1;
+    uint32_t offset = encoding_ & offset_mask;
+    CHECK_LT(offset, 256u);
+    return (encoding_ & ~offset_mask) | ((offset & 0xf0) << 4) | (offset & 0xf);
+  }
+
+  // Encoding for vfp load/store addressing.
+  uint32_t vencoding() const {
+    const uint32_t offset_mask = (1 << 12) - 1;
+    uint32_t offset = encoding_ & offset_mask;
+    CHECK(IsAbsoluteUint(10, offset));  // In the range -1020 to +1020.
+    CHECK_ALIGNED(offset, 2);  // Multiple of 4.
+    int mode = encoding_ & ((8|4|1) << 21);
+    CHECK((mode == Offset) || (mode == NegOffset));
+    uint32_t vencoding = (encoding_ & (0xf << kRnShift)) | (offset >> 2);
+    if (mode == Offset) {
+      vencoding |= 1 << 23;
+    }
+    return vencoding;
+  }
+
+  uint32_t encoding_;
+
+  friend class ArmAssembler;
+};
+
+
+class ArmAssembler : public Assembler {
+ public:
+  ArmAssembler() {}
+  virtual ~ArmAssembler() {}
+
+  // Data-processing instructions.
+  void and_(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+  void eor(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+  void sub(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+  void subs(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+  void rsb(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+  void rsbs(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+  void add(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+  void adds(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+  void adc(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+  void sbc(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+  void rsc(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+  void tst(Register rn, ShifterOperand so, Condition cond = AL);
+
+  void teq(Register rn, ShifterOperand so, Condition cond = AL);
+
+  void cmp(Register rn, ShifterOperand so, Condition cond = AL);
+
+  void cmn(Register rn, ShifterOperand so, Condition cond = AL);
+
+  void orr(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+  void orrs(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+  void mov(Register rd, ShifterOperand so, Condition cond = AL);
+  void movs(Register rd, ShifterOperand so, Condition cond = AL);
+
+  void bic(Register rd, Register rn, ShifterOperand so, Condition cond = AL);
+
+  void mvn(Register rd, ShifterOperand so, Condition cond = AL);
+  void mvns(Register rd, ShifterOperand so, Condition cond = AL);
+
+  // Miscellaneous data-processing instructions.
+  void clz(Register rd, Register rm, Condition cond = AL);
+  void movw(Register rd, uint16_t imm16, Condition cond = AL);
+  void movt(Register rd, uint16_t imm16, Condition cond = AL);
+
+  // Multiply instructions.
+  void mul(Register rd, Register rn, Register rm, Condition cond = AL);
+  void mla(Register rd, Register rn, Register rm, Register ra,
+           Condition cond = AL);
+  void mls(Register rd, Register rn, Register rm, Register ra,
+           Condition cond = AL);
+  void umull(Register rd_lo, Register rd_hi, Register rn, Register rm,
+             Condition cond = AL);
+
+  // Load/store instructions.
+  void ldr(Register rd, Address ad, Condition cond = AL);
+  void str(Register rd, Address ad, Condition cond = AL);
+
+  void ldrb(Register rd, Address ad, Condition cond = AL);
+  void strb(Register rd, Address ad, Condition cond = AL);
+
+  void ldrh(Register rd, Address ad, Condition cond = AL);
+  void strh(Register rd, Address ad, Condition cond = AL);
+
+  void ldrsb(Register rd, Address ad, Condition cond = AL);
+  void ldrsh(Register rd, Address ad, Condition cond = AL);
+
+  void ldrd(Register rd, Address ad, Condition cond = AL);
+  void strd(Register rd, Address ad, Condition cond = AL);
+
+  void ldm(BlockAddressMode am, Register base,
+           RegList regs, Condition cond = AL);
+  void stm(BlockAddressMode am, Register base,
+           RegList regs, Condition cond = AL);
+
+  void ldrex(Register rd, Register rn, Condition cond = AL);
+  void strex(Register rd, Register rt, Register rn, Condition cond = AL);
+
+  // Miscellaneous instructions.
+  void clrex();
+  void nop(Condition cond = AL);
+
+  // Note that gdb sets breakpoints using the undefined instruction 0xe7f001f0.
+  void bkpt(uint16_t imm16);
+  void svc(uint32_t imm24);
+
+  // Floating point instructions (VFPv3-D16 and VFPv3-D32 profiles).
+  void vmovsr(SRegister sn, Register rt, Condition cond = AL);
+  void vmovrs(Register rt, SRegister sn, Condition cond = AL);
+  void vmovsrr(SRegister sm, Register rt, Register rt2, Condition cond = AL);
+  void vmovrrs(Register rt, Register rt2, SRegister sm, Condition cond = AL);
+  void vmovdrr(DRegister dm, Register rt, Register rt2, Condition cond = AL);
+  void vmovrrd(Register rt, Register rt2, DRegister dm, Condition cond = AL);
+  void vmovs(SRegister sd, SRegister sm, Condition cond = AL);
+  void vmovd(DRegister dd, DRegister dm, Condition cond = AL);
+
+  // Returns false if the immediate cannot be encoded.
+  bool vmovs(SRegister sd, float s_imm, Condition cond = AL);
+  bool vmovd(DRegister dd, double d_imm, Condition cond = AL);
+
+  void vldrs(SRegister sd, Address ad, Condition cond = AL);
+  void vstrs(SRegister sd, Address ad, Condition cond = AL);
+  void vldrd(DRegister dd, Address ad, Condition cond = AL);
+  void vstrd(DRegister dd, Address ad, Condition cond = AL);
+
+  void vadds(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
+  void vaddd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
+  void vsubs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
+  void vsubd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
+  void vmuls(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
+  void vmuld(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
+  void vmlas(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
+  void vmlad(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
+  void vmlss(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
+  void vmlsd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
+  void vdivs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL);
+  void vdivd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL);
+
+  void vabss(SRegister sd, SRegister sm, Condition cond = AL);
+  void vabsd(DRegister dd, DRegister dm, Condition cond = AL);
+  void vnegs(SRegister sd, SRegister sm, Condition cond = AL);
+  void vnegd(DRegister dd, DRegister dm, Condition cond = AL);
+  void vsqrts(SRegister sd, SRegister sm, Condition cond = AL);
+  void vsqrtd(DRegister dd, DRegister dm, Condition cond = AL);
+
+  void vcvtsd(SRegister sd, DRegister dm, Condition cond = AL);
+  void vcvtds(DRegister dd, SRegister sm, Condition cond = AL);
+  void vcvtis(SRegister sd, SRegister sm, Condition cond = AL);
+  void vcvtid(SRegister sd, DRegister dm, Condition cond = AL);
+  void vcvtsi(SRegister sd, SRegister sm, Condition cond = AL);
+  void vcvtdi(DRegister dd, SRegister sm, Condition cond = AL);
+  void vcvtus(SRegister sd, SRegister sm, Condition cond = AL);
+  void vcvtud(SRegister sd, DRegister dm, Condition cond = AL);
+  void vcvtsu(SRegister sd, SRegister sm, Condition cond = AL);
+  void vcvtdu(DRegister dd, SRegister sm, Condition cond = AL);
+
+  void vcmps(SRegister sd, SRegister sm, Condition cond = AL);
+  void vcmpd(DRegister dd, DRegister dm, Condition cond = AL);
+  void vcmpsz(SRegister sd, Condition cond = AL);
+  void vcmpdz(DRegister dd, Condition cond = AL);
+  void vmstat(Condition cond = AL);  // VMRS APSR_nzcv, FPSCR
+
+  // Branch instructions.
+  void b(Label* label, Condition cond = AL);
+  void bl(Label* label, Condition cond = AL);
+  void blx(Register rm, Condition cond = AL);
+  void bx(Register rm, Condition cond = AL);
+
+  // Macros.
+  // Add signed constant value to rd. May clobber IP.
+  void AddConstant(Register rd, int32_t value, Condition cond = AL);
+  void AddConstant(Register rd, Register rn, int32_t value,
+                   Condition cond = AL);
+  void AddConstantSetFlags(Register rd, Register rn, int32_t value,
+                           Condition cond = AL);
+  void AddConstantWithCarry(Register rd, Register rn, int32_t value,
+                            Condition cond = AL);
+
+  // Load and Store. May clobber IP.
+  void LoadImmediate(Register rd, int32_t value, Condition cond = AL);
+  void LoadSImmediate(SRegister sd, float value, Condition cond = AL);
+  void LoadDImmediate(DRegister dd, double value,
+                      Register scratch, Condition cond = AL);
+  void MarkExceptionHandler(Label* label);
+  void LoadFromOffset(LoadOperandType type,
+                      Register reg,
+                      Register base,
+                      int32_t offset,
+                      Condition cond = AL);
+  void StoreToOffset(StoreOperandType type,
+                     Register reg,
+                     Register base,
+                     int32_t offset,
+                     Condition cond = AL);
+  void LoadSFromOffset(SRegister reg,
+                       Register base,
+                       int32_t offset,
+                       Condition cond = AL);
+  void StoreSToOffset(SRegister reg,
+                      Register base,
+                      int32_t offset,
+                      Condition cond = AL);
+  void LoadDFromOffset(DRegister reg,
+                       Register base,
+                       int32_t offset,
+                       Condition cond = AL);
+  void StoreDToOffset(DRegister reg,
+                      Register base,
+                      int32_t offset,
+                      Condition cond = AL);
+
+  void Push(Register rd, Condition cond = AL);
+  void Pop(Register rd, Condition cond = AL);
+
+  void PushList(RegList regs, Condition cond = AL);
+  void PopList(RegList regs, Condition cond = AL);
+
+  void Mov(Register rd, Register rm, Condition cond = AL);
+
+  // Convenience shift instructions. Use mov instruction with shifter operand
+  // for variants setting the status flags or using a register shift count.
+  void Lsl(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL);
+  void Lsr(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL);
+  void Asr(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL);
+  void Ror(Register rd, Register rm, uint32_t shift_imm, Condition cond = AL);
+  void Rrx(Register rd, Register rm, Condition cond = AL);
+
+  // Encode a signed constant in tst instructions, only affecting the flags.
+  void EncodeUint32InTstInstructions(uint32_t data);
+  // ... and decode from a pc pointing to the start of encoding instructions.
+  static uint32_t DecodeUint32FromTstInstructions(uword pc);
+  static bool IsInstructionForExceptionHandling(uword pc);
+
+  // Emit data (e.g. encoded instruction or immediate) to the
+  // instruction stream.
+  void Emit(int32_t value);
+  void Bind(Label* label);
+
+  //
+  // Overridden common assembler high-level functionality
+  //
+
+  // Emit code that will create an activation on the stack
+  virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
+                          const std::vector<ManagedRegister>& callee_save_regs,
+                          const std::vector<ManagedRegister>& entry_spills);
+
+  // Emit code that will remove an activation from the stack
+  virtual void RemoveFrame(size_t frame_size,
+                           const std::vector<ManagedRegister>& callee_save_regs);
+
+  virtual void IncreaseFrameSize(size_t adjust);
+  virtual void DecreaseFrameSize(size_t adjust);
+
+  // Store routines
+  virtual void Store(FrameOffset offs, ManagedRegister src, size_t size);
+  virtual void StoreRef(FrameOffset dest, ManagedRegister src);
+  virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src);
+
+  virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
+                                     ManagedRegister scratch);
+
+  virtual void StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
+                                      ManagedRegister scratch);
+
+  virtual void StoreStackOffsetToThread(ThreadOffset thr_offs,
+                                        FrameOffset fr_offs,
+                                        ManagedRegister scratch);
+
+  virtual void StoreStackPointerToThread(ThreadOffset thr_offs);
+
+  virtual void StoreSpanning(FrameOffset dest, ManagedRegister src,
+                             FrameOffset in_off, ManagedRegister scratch);
+
+  // Load routines
+  virtual void Load(ManagedRegister dest, FrameOffset src, size_t size);
+
+  virtual void Load(ManagedRegister dest, ThreadOffset src, size_t size);
+
+  virtual void LoadRef(ManagedRegister dest, FrameOffset  src);
+
+  virtual void LoadRef(ManagedRegister dest, ManagedRegister base,
+                       MemberOffset offs);
+
+  virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base,
+                          Offset offs);
+
+  virtual void LoadRawPtrFromThread(ManagedRegister dest,
+                                    ThreadOffset offs);
+
+  // Copying routines
+  virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size);
+
+  virtual void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs,
+                                    ManagedRegister scratch);
+
+  virtual void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs,
+                                  ManagedRegister scratch);
+
+  virtual void CopyRef(FrameOffset dest, FrameOffset src,
+                       ManagedRegister scratch);
+
+  virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size);
+
+  virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
+                    ManagedRegister scratch, size_t size);
+
+  virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
+                    ManagedRegister scratch, size_t size);
+
+  virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
+                    ManagedRegister scratch, size_t size);
+
+  virtual void Copy(ManagedRegister dest, Offset dest_offset,
+                    ManagedRegister src, Offset src_offset,
+                    ManagedRegister scratch, size_t size);
+
+  virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
+                    ManagedRegister scratch, size_t size);
+
+  virtual void MemoryBarrier(ManagedRegister scratch);
+
+  // Exploit fast access in managed code to Thread::Current()
+  virtual void GetCurrentThread(ManagedRegister tr);
+  virtual void GetCurrentThread(FrameOffset dest_offset,
+                                ManagedRegister scratch);
+
+  // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+  // value is null and null_allowed. in_reg holds a possibly stale reference
+  // that can be used to avoid loading the SIRT entry to see if the value is
+  // NULL.
+  virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
+                               ManagedRegister in_reg, bool null_allowed);
+
+  // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+  // value is null and null_allowed.
+  virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
+                               ManagedRegister scratch, bool null_allowed);
+
+  // src holds a SIRT entry (Object**) load this into dst
+  virtual void LoadReferenceFromSirt(ManagedRegister dst,
+                                     ManagedRegister src);
+
+  // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+  // know that src may not be null.
+  virtual void VerifyObject(ManagedRegister src, bool could_be_null);
+  virtual void VerifyObject(FrameOffset src, bool could_be_null);
+
+  // Call to address held at [base+offset]
+  virtual void Call(ManagedRegister base, Offset offset,
+                    ManagedRegister scratch);
+  virtual void Call(FrameOffset base, Offset offset,
+                    ManagedRegister scratch);
+  virtual void Call(ThreadOffset offset, ManagedRegister scratch);
+
+  // Generate code to check if Thread::Current()->suspend_count_ is non-zero
+  // and branch to a SuspendSlowPath if it is. The SuspendSlowPath will continue
+  // at the next instruction.
+  virtual void SuspendPoll(ManagedRegister scratch, ManagedRegister return_reg,
+                           FrameOffset return_save_location,
+                           size_t return_size);
+
+  // Generate code to check if Thread::Current()->exception_ is non-null
+  // and branch to a ExceptionSlowPath if it is.
+  virtual void ExceptionPoll(ManagedRegister scratch);
+
+ private:
+  void EmitType01(Condition cond,
+                  int type,
+                  Opcode opcode,
+                  int set_cc,
+                  Register rn,
+                  Register rd,
+                  ShifterOperand so);
+
+  void EmitType5(Condition cond, int offset, bool link);
+
+  void EmitMemOp(Condition cond,
+                 bool load,
+                 bool byte,
+                 Register rd,
+                 Address ad);
+
+  void EmitMemOpAddressMode3(Condition cond,
+                             int32_t mode,
+                             Register rd,
+                             Address ad);
+
+  void EmitMultiMemOp(Condition cond,
+                      BlockAddressMode am,
+                      bool load,
+                      Register base,
+                      RegList regs);
+
+  void EmitShiftImmediate(Condition cond,
+                          Shift opcode,
+                          Register rd,
+                          Register rm,
+                          ShifterOperand so);
+
+  void EmitShiftRegister(Condition cond,
+                         Shift opcode,
+                         Register rd,
+                         Register rm,
+                         ShifterOperand so);
+
+  void EmitMulOp(Condition cond,
+                 int32_t opcode,
+                 Register rd,
+                 Register rn,
+                 Register rm,
+                 Register rs);
+
+  void EmitVFPsss(Condition cond,
+                  int32_t opcode,
+                  SRegister sd,
+                  SRegister sn,
+                  SRegister sm);
+
+  void EmitVFPddd(Condition cond,
+                  int32_t opcode,
+                  DRegister dd,
+                  DRegister dn,
+                  DRegister dm);
+
+  void EmitVFPsd(Condition cond,
+                 int32_t opcode,
+                 SRegister sd,
+                 DRegister dm);
+
+  void EmitVFPds(Condition cond,
+                 int32_t opcode,
+                 DRegister dd,
+                 SRegister sm);
+
+  void EmitBranch(Condition cond, Label* label, bool link);
+  static int32_t EncodeBranchOffset(int offset, int32_t inst);
+  static int DecodeBranchOffset(int32_t inst);
+  int32_t EncodeTstOffset(int offset, int32_t inst);
+  int DecodeTstOffset(int32_t inst);
+
+  // Returns whether or not the given register is used for passing parameters.
+  static int RegisterCompare(const Register* reg1, const Register* reg2) {
+    return *reg1 - *reg2;
+  }
+};
+
+// Slowpath entered when Thread::Current()->_exception is non-null
+class ArmExceptionSlowPath : public SlowPath {
+ public:
+  explicit ArmExceptionSlowPath(ArmManagedRegister scratch) : scratch_(scratch) {}
+  virtual void Emit(Assembler *sp_asm);
+ private:
+  const ArmManagedRegister scratch_;
+};
+
+// Slowpath entered when Thread::Current()->_suspend_count is non-zero
+class ArmSuspendCountSlowPath : public SlowPath {
+ public:
+  ArmSuspendCountSlowPath(ArmManagedRegister return_reg,
+                          FrameOffset return_save_location,
+                          size_t return_size) :
+     return_register_(return_reg), return_save_location_(return_save_location),
+     return_size_(return_size) {}
+  virtual void Emit(Assembler *sp_asm);
+
+ private:
+  // Remember how to save the return value
+  const ArmManagedRegister return_register_;
+  const FrameOffset return_save_location_;
+  const size_t return_size_;
+};
+
+}  // namespace arm
+}  // namespace art
+
+#endif  // ART_SRC_OAT_UTILS_ARM_ASSEMBLER_ARM_H_
diff --git a/src/oat/utils/arm/managed_register_arm.cc b/src/oat/utils/arm/managed_register_arm.cc
new file mode 100644
index 0000000..57c2305
--- /dev/null
+++ b/src/oat/utils/arm/managed_register_arm.cc
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "managed_register_arm.h"
+
+#include "globals.h"
+
+namespace art {
+namespace arm {
+
+// We need all registers for caching of locals.
+// Register R9 .. R15 are reserved.
+static const int kNumberOfAvailableCoreRegisters = (R8 - R0) + 1;
+static const int kNumberOfAvailableSRegisters = kNumberOfSRegisters;
+static const int kNumberOfAvailableDRegisters = kNumberOfDRegisters;
+static const int kNumberOfAvailableOverlappingDRegisters =
+    kNumberOfOverlappingDRegisters;
+static const int kNumberOfAvailableRegisterPairs = kNumberOfRegisterPairs;
+
+
+// Returns true if this managed-register overlaps the other managed-register.
+bool ArmManagedRegister::Overlaps(const ArmManagedRegister& other) const {
+  if (IsNoRegister() || other.IsNoRegister()) return false;
+  if (Equals(other)) return true;
+  if (IsRegisterPair()) {
+    Register low = AsRegisterPairLow();
+    Register high = AsRegisterPairHigh();
+    return ArmManagedRegister::FromCoreRegister(low).Overlaps(other) ||
+        ArmManagedRegister::FromCoreRegister(high).Overlaps(other);
+  }
+  if (IsOverlappingDRegister()) {
+    if (other.IsDRegister()) return Equals(other);
+    if (other.IsSRegister()) {
+      SRegister low = AsOverlappingDRegisterLow();
+      SRegister high = AsOverlappingDRegisterHigh();
+      SRegister other_sreg = other.AsSRegister();
+      return (low == other_sreg) || (high == other_sreg);
+    }
+    return false;
+  }
+  if (other.IsRegisterPair() || other.IsOverlappingDRegister()) {
+    return other.Overlaps(*this);
+  }
+  return false;
+}
+
+
+int ArmManagedRegister::AllocIdLow() const {
+  CHECK(IsOverlappingDRegister() || IsRegisterPair());
+  const int r = RegId() - (kNumberOfCoreRegIds + kNumberOfSRegIds);
+  int low;
+  if (r < kNumberOfOverlappingDRegIds) {
+    CHECK(IsOverlappingDRegister());
+    low = (r * 2) + kNumberOfCoreRegIds;  // Return a SRegister.
+  } else {
+    CHECK(IsRegisterPair());
+    low = (r - kNumberOfDRegIds) * 2;  // Return a Register.
+    if (low > 6) {
+      // we didn't got a pair higher than R6_R7, must be the dalvik special case
+      low = 1;
+    }
+  }
+  return low;
+}
+
+
+int ArmManagedRegister::AllocIdHigh() const {
+  return AllocIdLow() + 1;
+}
+
+
+void ArmManagedRegister::Print(std::ostream& os) const {
+  if (!IsValidManagedRegister()) {
+    os << "No Register";
+  } else if (IsCoreRegister()) {
+    os << "Core: " << static_cast<int>(AsCoreRegister());
+  } else if (IsRegisterPair()) {
+    os << "Pair: " << static_cast<int>(AsRegisterPairLow()) << ", "
+       << static_cast<int>(AsRegisterPairHigh());
+  } else if (IsSRegister()) {
+    os << "SRegister: " << static_cast<int>(AsSRegister());
+  } else if (IsDRegister()) {
+    os << "DRegister: " << static_cast<int>(AsDRegister());
+  } else {
+    os << "??: " << RegId();
+  }
+}
+
+std::ostream& operator<<(std::ostream& os, const ArmManagedRegister& reg) {
+  reg.Print(os);
+  return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const RegisterPair& r) {
+  os << ArmManagedRegister::FromRegisterPair(r);
+  return os;
+}
+
+}  // namespace arm
+}  // namespace art
diff --git a/src/oat/utils/arm/managed_register_arm.h b/src/oat/utils/arm/managed_register_arm.h
new file mode 100644
index 0000000..8808d2b
--- /dev/null
+++ b/src/oat/utils/arm/managed_register_arm.h
@@ -0,0 +1,274 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_UTILS_ARM_MANAGED_REGISTER_ARM_H_
+#define ART_SRC_OAT_UTILS_ARM_MANAGED_REGISTER_ARM_H_
+
+#include "constants.h"
+#include "logging.h"
+#include "oat/utils/managed_register.h"
+
+namespace art {
+namespace arm {
+
+// Values for register pairs.
+enum RegisterPair {
+  R0_R1 = 0,
+  R2_R3 = 1,
+  R4_R5 = 2,
+  R6_R7 = 3,
+  R1_R2 = 4,  // Dalvik style passing
+  kNumberOfRegisterPairs = 5,
+  kNoRegisterPair = -1,
+};
+
+std::ostream& operator<<(std::ostream& os, const RegisterPair& reg);
+
+const int kNumberOfCoreRegIds = kNumberOfCoreRegisters;
+const int kNumberOfCoreAllocIds = kNumberOfCoreRegisters;
+
+const int kNumberOfSRegIds = kNumberOfSRegisters;
+const int kNumberOfSAllocIds = kNumberOfSRegisters;
+
+const int kNumberOfDRegIds = kNumberOfDRegisters;
+const int kNumberOfOverlappingDRegIds = kNumberOfOverlappingDRegisters;
+const int kNumberOfDAllocIds = kNumberOfDRegIds - kNumberOfOverlappingDRegIds;
+
+const int kNumberOfPairRegIds = kNumberOfRegisterPairs;
+
+const int kNumberOfRegIds = kNumberOfCoreRegIds + kNumberOfSRegIds +
+    kNumberOfDRegIds + kNumberOfPairRegIds;
+const int kNumberOfAllocIds =
+    kNumberOfCoreAllocIds + kNumberOfSAllocIds + kNumberOfDAllocIds;
+
+// Register ids map:
+//   [0..R[  core registers (enum Register)
+//   [R..S[  single precision VFP registers (enum SRegister)
+//   [S..D[  double precision VFP registers (enum DRegister)
+//   [D..P[  core register pairs (enum RegisterPair)
+// where
+//   R = kNumberOfCoreRegIds
+//   S = R + kNumberOfSRegIds
+//   D = S + kNumberOfDRegIds
+//   P = D + kNumberOfRegisterPairs
+
+// Allocation ids map:
+//   [0..R[  core registers (enum Register)
+//   [R..S[  single precision VFP registers (enum SRegister)
+//   [S..N[  non-overlapping double precision VFP registers (16-31 in enum
+//           DRegister, VFPv3-D32 only)
+// where
+//   R = kNumberOfCoreAllocIds
+//   S = R + kNumberOfSAllocIds
+//   N = S + kNumberOfDAllocIds
+
+
+// An instance of class 'ManagedRegister' represents a single ARM register or a
+// pair of core ARM registers (enum RegisterPair). A single register is either a
+// core register (enum Register), a VFP single precision register
+// (enum SRegister), or a VFP double precision register (enum DRegister).
+// 'ManagedRegister::NoRegister()' returns an invalid ManagedRegister.
+// There is a one-to-one mapping between ManagedRegister and register id.
+class ArmManagedRegister : public ManagedRegister {
+ public:
+  Register AsCoreRegister() const {
+    CHECK(IsCoreRegister());
+    return static_cast<Register>(id_);
+  }
+
+  SRegister AsSRegister() const {
+    CHECK(IsSRegister());
+    return static_cast<SRegister>(id_ - kNumberOfCoreRegIds);
+  }
+
+  DRegister AsDRegister() const {
+    CHECK(IsDRegister());
+    return static_cast<DRegister>(id_ - kNumberOfCoreRegIds - kNumberOfSRegIds);
+  }
+
+  SRegister AsOverlappingDRegisterLow() const {
+    CHECK(IsOverlappingDRegister());
+    DRegister d_reg = AsDRegister();
+    return static_cast<SRegister>(d_reg * 2);
+  }
+
+  SRegister AsOverlappingDRegisterHigh() const {
+    CHECK(IsOverlappingDRegister());
+    DRegister d_reg = AsDRegister();
+    return static_cast<SRegister>(d_reg * 2 + 1);
+  }
+
+  RegisterPair AsRegisterPair() const {
+    CHECK(IsRegisterPair());
+    Register reg_low = AsRegisterPairLow();
+    if (reg_low == R1) {
+      return R1_R2;
+    } else {
+      return static_cast<RegisterPair>(reg_low / 2);
+    }
+  }
+
+  Register AsRegisterPairLow() const {
+    CHECK(IsRegisterPair());
+    // Appropriate mapping of register ids allows to use AllocIdLow().
+    return FromRegId(AllocIdLow()).AsCoreRegister();
+  }
+
+  Register AsRegisterPairHigh() const {
+    CHECK(IsRegisterPair());
+    // Appropriate mapping of register ids allows to use AllocIdHigh().
+    return FromRegId(AllocIdHigh()).AsCoreRegister();
+  }
+
+  bool IsCoreRegister() const {
+    CHECK(IsValidManagedRegister());
+    return (0 <= id_) && (id_ < kNumberOfCoreRegIds);
+  }
+
+  bool IsSRegister() const {
+    CHECK(IsValidManagedRegister());
+    const int test = id_ - kNumberOfCoreRegIds;
+    return (0 <= test) && (test < kNumberOfSRegIds);
+  }
+
+  bool IsDRegister() const {
+    CHECK(IsValidManagedRegister());
+    const int test = id_ - (kNumberOfCoreRegIds + kNumberOfSRegIds);
+    return (0 <= test) && (test < kNumberOfDRegIds);
+  }
+
+  // Returns true if this DRegister overlaps SRegisters.
+  bool IsOverlappingDRegister() const {
+    CHECK(IsValidManagedRegister());
+    const int test = id_ - (kNumberOfCoreRegIds + kNumberOfSRegIds);
+    return (0 <= test) && (test < kNumberOfOverlappingDRegIds);
+  }
+
+  bool IsRegisterPair() const {
+    CHECK(IsValidManagedRegister());
+    const int test =
+        id_ - (kNumberOfCoreRegIds + kNumberOfSRegIds + kNumberOfDRegIds);
+    return (0 <= test) && (test < kNumberOfPairRegIds);
+  }
+
+  bool IsSameType(ArmManagedRegister test) const {
+    CHECK(IsValidManagedRegister() && test.IsValidManagedRegister());
+    return
+      (IsCoreRegister() && test.IsCoreRegister()) ||
+      (IsSRegister() && test.IsSRegister()) ||
+      (IsDRegister() && test.IsDRegister()) ||
+      (IsRegisterPair() && test.IsRegisterPair());
+  }
+
+
+  // Returns true if the two managed-registers ('this' and 'other') overlap.
+  // Either managed-register may be the NoRegister. If both are the NoRegister
+  // then false is returned.
+  bool Overlaps(const ArmManagedRegister& other) const;
+
+  void Print(std::ostream& os) const;
+
+  static ArmManagedRegister FromCoreRegister(Register r) {
+    CHECK_NE(r, kNoRegister);
+    return FromRegId(r);
+  }
+
+  static ArmManagedRegister FromSRegister(SRegister r) {
+    CHECK_NE(r, kNoSRegister);
+    return FromRegId(r + kNumberOfCoreRegIds);
+  }
+
+  static ArmManagedRegister FromDRegister(DRegister r) {
+    CHECK_NE(r, kNoDRegister);
+    return FromRegId(r + (kNumberOfCoreRegIds + kNumberOfSRegIds));
+  }
+
+  static ArmManagedRegister FromRegisterPair(RegisterPair r) {
+    CHECK_NE(r, kNoRegisterPair);
+    return FromRegId(r + (kNumberOfCoreRegIds +
+                          kNumberOfSRegIds + kNumberOfDRegIds));
+  }
+
+  // Return a RegisterPair consisting of Register r_low and r_low + 1.
+  static ArmManagedRegister FromCoreRegisterPair(Register r_low) {
+    if (r_low != R1) {  // not the dalvik special case
+      CHECK_NE(r_low, kNoRegister);
+      CHECK_EQ(0, (r_low % 2));
+      const int r = r_low / 2;
+      CHECK_LT(r, kNumberOfPairRegIds);
+      return FromRegisterPair(static_cast<RegisterPair>(r));
+    } else {
+      return FromRegisterPair(R1_R2);
+    }
+  }
+
+  // Return a DRegister overlapping SRegister r_low and r_low + 1.
+  static ArmManagedRegister FromSRegisterPair(SRegister r_low) {
+    CHECK_NE(r_low, kNoSRegister);
+    CHECK_EQ(0, (r_low % 2));
+    const int r = r_low / 2;
+    CHECK_LT(r, kNumberOfOverlappingDRegIds);
+    return FromDRegister(static_cast<DRegister>(r));
+  }
+
+ private:
+  bool IsValidManagedRegister() const {
+    return (0 <= id_) && (id_ < kNumberOfRegIds);
+  }
+
+  int RegId() const {
+    CHECK(!IsNoRegister());
+    return id_;
+  }
+
+  int AllocId() const {
+    CHECK(IsValidManagedRegister() &&
+           !IsOverlappingDRegister() && !IsRegisterPair());
+    int r = id_;
+    if ((kNumberOfDAllocIds > 0) && IsDRegister()) {  // VFPv3-D32 only.
+      r -= kNumberOfOverlappingDRegIds;
+    }
+    CHECK_LT(r, kNumberOfAllocIds);
+    return r;
+  }
+
+  int AllocIdLow() const;
+  int AllocIdHigh() const;
+
+  friend class ManagedRegister;
+
+  explicit ArmManagedRegister(int reg_id) : ManagedRegister(reg_id) {}
+
+  static ArmManagedRegister FromRegId(int reg_id) {
+    ArmManagedRegister reg(reg_id);
+    CHECK(reg.IsValidManagedRegister());
+    return reg;
+  }
+};
+
+std::ostream& operator<<(std::ostream& os, const ArmManagedRegister& reg);
+
+}  // namespace arm
+
+inline arm::ArmManagedRegister ManagedRegister::AsArm() const {
+  arm::ArmManagedRegister reg(id_);
+  CHECK(reg.IsNoRegister() || reg.IsValidManagedRegister());
+  return reg;
+}
+
+}  // namespace art
+
+#endif  // ART_SRC_OAT_UTILS_ARM_MANAGED_REGISTER_ARM_H_
diff --git a/src/oat/utils/arm/managed_register_arm_test.cc b/src/oat/utils/arm/managed_register_arm_test.cc
new file mode 100644
index 0000000..f5d4cc0
--- /dev/null
+++ b/src/oat/utils/arm/managed_register_arm_test.cc
@@ -0,0 +1,767 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "globals.h"
+#include "managed_register_arm.h"
+#include "gtest/gtest.h"
+
+namespace art {
+namespace arm {
+
+TEST(ArmManagedRegister, NoRegister) {
+  ArmManagedRegister reg = ManagedRegister::NoRegister().AsArm();
+  EXPECT_TRUE(reg.IsNoRegister());
+  EXPECT_TRUE(!reg.Overlaps(reg));
+}
+
+TEST(ArmManagedRegister, CoreRegister) {
+  ArmManagedRegister reg = ArmManagedRegister::FromCoreRegister(R0);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(R0, reg.AsCoreRegister());
+
+  reg = ArmManagedRegister::FromCoreRegister(R1);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(R1, reg.AsCoreRegister());
+
+  reg = ArmManagedRegister::FromCoreRegister(R8);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(R8, reg.AsCoreRegister());
+
+  reg = ArmManagedRegister::FromCoreRegister(R15);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(R15, reg.AsCoreRegister());
+}
+
+
+TEST(ArmManagedRegister, SRegister) {
+  ArmManagedRegister reg = ArmManagedRegister::FromSRegister(S0);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(S0, reg.AsSRegister());
+
+  reg = ArmManagedRegister::FromSRegister(S1);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(S1, reg.AsSRegister());
+
+  reg = ArmManagedRegister::FromSRegister(S3);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(S3, reg.AsSRegister());
+
+  reg = ArmManagedRegister::FromSRegister(S15);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(S15, reg.AsSRegister());
+
+  reg = ArmManagedRegister::FromSRegister(S30);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(S30, reg.AsSRegister());
+
+  reg = ArmManagedRegister::FromSRegister(S31);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(S31, reg.AsSRegister());
+}
+
+
+TEST(ArmManagedRegister, DRegister) {
+  ArmManagedRegister reg = ArmManagedRegister::FromDRegister(D0);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(reg.IsDRegister());
+  EXPECT_TRUE(reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(D0, reg.AsDRegister());
+  EXPECT_EQ(S0, reg.AsOverlappingDRegisterLow());
+  EXPECT_EQ(S1, reg.AsOverlappingDRegisterHigh());
+  EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S0)));
+
+  reg = ArmManagedRegister::FromDRegister(D1);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(reg.IsDRegister());
+  EXPECT_TRUE(reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(D1, reg.AsDRegister());
+  EXPECT_EQ(S2, reg.AsOverlappingDRegisterLow());
+  EXPECT_EQ(S3, reg.AsOverlappingDRegisterHigh());
+  EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S2)));
+
+  reg = ArmManagedRegister::FromDRegister(D6);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(reg.IsDRegister());
+  EXPECT_TRUE(reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(D6, reg.AsDRegister());
+  EXPECT_EQ(S12, reg.AsOverlappingDRegisterLow());
+  EXPECT_EQ(S13, reg.AsOverlappingDRegisterHigh());
+  EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S12)));
+
+  reg = ArmManagedRegister::FromDRegister(D14);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(reg.IsDRegister());
+  EXPECT_TRUE(reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(D14, reg.AsDRegister());
+  EXPECT_EQ(S28, reg.AsOverlappingDRegisterLow());
+  EXPECT_EQ(S29, reg.AsOverlappingDRegisterHigh());
+  EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S28)));
+
+  reg = ArmManagedRegister::FromDRegister(D15);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(reg.IsDRegister());
+  EXPECT_TRUE(reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(D15, reg.AsDRegister());
+  EXPECT_EQ(S30, reg.AsOverlappingDRegisterLow());
+  EXPECT_EQ(S31, reg.AsOverlappingDRegisterHigh());
+  EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromSRegisterPair(S30)));
+
+#ifdef VFPv3_D32
+  reg = ArmManagedRegister::FromDRegister(D16);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(D16, reg.AsDRegister());
+
+  reg = ArmManagedRegister::FromDRegister(D18);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(D18, reg.AsDRegister());
+
+  reg = ArmManagedRegister::FromDRegister(D30);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(D30, reg.AsDRegister());
+
+  reg = ArmManagedRegister::FromDRegister(D31);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(D31, reg.AsDRegister());
+#endif  // VFPv3_D32
+}
+
+
+TEST(ArmManagedRegister, Pair) {
+  ArmManagedRegister reg = ArmManagedRegister::FromRegisterPair(R0_R1);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(R0_R1, reg.AsRegisterPair());
+  EXPECT_EQ(R0, reg.AsRegisterPairLow());
+  EXPECT_EQ(R1, reg.AsRegisterPairHigh());
+  EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R0)));
+
+  reg = ArmManagedRegister::FromRegisterPair(R1_R2);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(R1_R2, reg.AsRegisterPair());
+  EXPECT_EQ(R1, reg.AsRegisterPairLow());
+  EXPECT_EQ(R2, reg.AsRegisterPairHigh());
+  EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R1)));
+
+  reg = ArmManagedRegister::FromRegisterPair(R2_R3);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(R2_R3, reg.AsRegisterPair());
+  EXPECT_EQ(R2, reg.AsRegisterPairLow());
+  EXPECT_EQ(R3, reg.AsRegisterPairHigh());
+  EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R2)));
+
+  reg = ArmManagedRegister::FromRegisterPair(R4_R5);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(R4_R5, reg.AsRegisterPair());
+  EXPECT_EQ(R4, reg.AsRegisterPairLow());
+  EXPECT_EQ(R5, reg.AsRegisterPairHigh());
+  EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R4)));
+
+  reg = ArmManagedRegister::FromRegisterPair(R6_R7);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCoreRegister());
+  EXPECT_TRUE(!reg.IsSRegister());
+  EXPECT_TRUE(!reg.IsDRegister());
+  EXPECT_TRUE(!reg.IsOverlappingDRegister());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(R6_R7, reg.AsRegisterPair());
+  EXPECT_EQ(R6, reg.AsRegisterPairLow());
+  EXPECT_EQ(R7, reg.AsRegisterPairHigh());
+  EXPECT_TRUE(reg.Equals(ArmManagedRegister::FromCoreRegisterPair(R6)));
+}
+
+
+TEST(ArmManagedRegister, Equals) {
+  ManagedRegister no_reg = ManagedRegister::NoRegister();
+  EXPECT_TRUE(no_reg.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!no_reg.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+  ArmManagedRegister reg_R0 = ArmManagedRegister::FromCoreRegister(R0);
+  EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(reg_R0.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg_R0.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+  ArmManagedRegister reg_R1 = ArmManagedRegister::FromCoreRegister(R1);
+  EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(reg_R1.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg_R1.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+  ArmManagedRegister reg_R8 = ArmManagedRegister::FromCoreRegister(R8);
+  EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(reg_R8.Equals(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg_R8.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+  ArmManagedRegister reg_S0 = ArmManagedRegister::FromSRegister(S0);
+  EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(reg_S0.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg_S0.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+  ArmManagedRegister reg_S1 = ArmManagedRegister::FromSRegister(S1);
+  EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(reg_S1.Equals(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg_S1.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+  ArmManagedRegister reg_S31 = ArmManagedRegister::FromSRegister(S31);
+  EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(reg_S31.Equals(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg_S31.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+  ArmManagedRegister reg_D0 = ArmManagedRegister::FromDRegister(D0);
+  EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(reg_D0.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg_D0.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+  ArmManagedRegister reg_D15 = ArmManagedRegister::FromDRegister(D15);
+  EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(reg_D15.Equals(ArmManagedRegister::FromDRegister(D15)));
+  EXPECT_TRUE(!reg_D15.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+#ifdef VFPv3_D32
+  ArmManagedRegister reg_D16 = ArmManagedRegister::FromDRegister(D16);
+  EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromDRegister(D15)));
+  EXPECT_TRUE(reg_D16.Equals(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg_D16.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+  ArmManagedRegister reg_D30 = ArmManagedRegister::FromDRegister(D30);
+  EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromDRegister(D15)));
+  EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(reg_D30.Equals(ArmManagedRegister::FromDRegister(D30)));
+  EXPECT_TRUE(!reg_D30.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+
+  ArmManagedRegister reg_D31 = ArmManagedRegister::FromDRegister(D30);
+  EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D15)));
+  EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromDRegister(D30)));
+  EXPECT_TRUE(reg_D31.Equals(ArmManagedRegister::FromDRegister(D31)));
+  EXPECT_TRUE(!reg_D31.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+#endif  // VFPv3_D32
+
+  ArmManagedRegister reg_R0R1 = ArmManagedRegister::FromRegisterPair(R0_R1);
+  EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromDRegister(D15)));
+  EXPECT_TRUE(reg_R0R1.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg_R0R1.Equals(ArmManagedRegister::FromRegisterPair(R2_R3)));
+
+  ArmManagedRegister reg_R4R5 = ArmManagedRegister::FromRegisterPair(R4_R5);
+  EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromDRegister(D15)));
+  EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(reg_R4R5.Equals(ArmManagedRegister::FromRegisterPair(R4_R5)));
+  EXPECT_TRUE(!reg_R4R5.Equals(ArmManagedRegister::FromRegisterPair(R6_R7)));
+
+  ArmManagedRegister reg_R6R7 = ArmManagedRegister::FromRegisterPair(R6_R7);
+  EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::NoRegister()));
+  EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromDRegister(D15)));
+  EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg_R6R7.Equals(ArmManagedRegister::FromRegisterPair(R4_R5)));
+  EXPECT_TRUE(reg_R6R7.Equals(ArmManagedRegister::FromRegisterPair(R6_R7)));
+}
+
+
+TEST(ArmManagedRegister, Overlaps) {
+  ArmManagedRegister reg = ArmManagedRegister::FromCoreRegister(R0);
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif  // VFPv3_D32
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+  reg = ArmManagedRegister::FromCoreRegister(R1);
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif  // VFPv3_D32
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+  reg = ArmManagedRegister::FromCoreRegister(R7);
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif  // VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+  reg = ArmManagedRegister::FromSRegister(S0);
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif  // VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+  reg = ArmManagedRegister::FromSRegister(S1);
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif  // VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+  reg = ArmManagedRegister::FromSRegister(S15);
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif  // VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+  reg = ArmManagedRegister::FromSRegister(S31);
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif  // VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+  reg = ArmManagedRegister::FromDRegister(D0);
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif  // VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+  reg = ArmManagedRegister::FromDRegister(D7);
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif  // VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+  reg = ArmManagedRegister::FromDRegister(D15);
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif  // VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+#ifdef VFPv3_D32
+  reg = ArmManagedRegister::FromDRegister(D16);
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+  reg = ArmManagedRegister::FromDRegister(D31);
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+#endif  // VFPv3_D32
+
+  reg = ArmManagedRegister::FromRegisterPair(R0_R1);
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif  // VFPv3_D32
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+
+  reg = ArmManagedRegister::FromRegisterPair(R4_R5);
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromCoreRegister(R8)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S2)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S15)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S30)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromSRegister(S31)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D0)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D1)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D7)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D15)));
+#ifdef VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D16)));
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromDRegister(D31)));
+#endif  // VFPv3_D32
+  EXPECT_TRUE(!reg.Overlaps(ArmManagedRegister::FromRegisterPair(R0_R1)));
+  EXPECT_TRUE(reg.Overlaps(ArmManagedRegister::FromRegisterPair(R4_R5)));
+}
+
+}  // namespace arm
+}  // namespace art
diff --git a/src/oat/utils/assembler.cc b/src/oat/utils/assembler.cc
new file mode 100644
index 0000000..249a771
--- /dev/null
+++ b/src/oat/utils/assembler.cc
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assembler.h"
+
+#include <algorithm>
+#include <vector>
+
+#include "arm/assembler_arm.h"
+#include "x86/assembler_x86.h"
+#include "globals.h"
+#include "memory_region.h"
+
+namespace art {
+
+static byte* NewContents(size_t capacity) {
+  return new byte[capacity];
+}
+
+
+AssemblerBuffer::AssemblerBuffer() {
+  static const size_t kInitialBufferCapacity = 4 * KB;
+  contents_ = NewContents(kInitialBufferCapacity);
+  cursor_ = contents_;
+  limit_ = ComputeLimit(contents_, kInitialBufferCapacity);
+  fixup_ = NULL;
+  slow_path_ = NULL;
+#ifndef NDEBUG
+  has_ensured_capacity_ = false;
+  fixups_processed_ = false;
+#endif
+
+  // Verify internal state.
+  CHECK_EQ(Capacity(), kInitialBufferCapacity);
+  CHECK_EQ(Size(), 0U);
+}
+
+
+AssemblerBuffer::~AssemblerBuffer() {
+  delete[] contents_;
+}
+
+
+void AssemblerBuffer::ProcessFixups(const MemoryRegion& region) {
+  AssemblerFixup* fixup = fixup_;
+  while (fixup != NULL) {
+    fixup->Process(region, fixup->position());
+    fixup = fixup->previous();
+  }
+}
+
+
+void AssemblerBuffer::FinalizeInstructions(const MemoryRegion& instructions) {
+  // Copy the instructions from the buffer.
+  MemoryRegion from(reinterpret_cast<void*>(contents()), Size());
+  instructions.CopyFrom(0, from);
+  // Process fixups in the instructions.
+  ProcessFixups(instructions);
+#ifndef NDEBUG
+  fixups_processed_ = true;
+#endif
+}
+
+
+void AssemblerBuffer::ExtendCapacity() {
+  size_t old_size = Size();
+  size_t old_capacity = Capacity();
+  size_t new_capacity = std::min(old_capacity * 2, old_capacity + 1 * MB);
+
+  // Allocate the new data area and copy contents of the old one to it.
+  byte* new_contents = NewContents(new_capacity);
+  memmove(reinterpret_cast<void*>(new_contents),
+          reinterpret_cast<void*>(contents_),
+          old_size);
+
+  // Compute the relocation delta and switch to the new contents area.
+  ptrdiff_t delta = new_contents - contents_;
+  contents_ = new_contents;
+
+  // Update the cursor and recompute the limit.
+  cursor_ += delta;
+  limit_ = ComputeLimit(new_contents, new_capacity);
+
+  // Verify internal state.
+  CHECK_EQ(Capacity(), new_capacity);
+  CHECK_EQ(Size(), old_size);
+}
+
+
+Assembler* Assembler::Create(InstructionSet instruction_set) {
+  if (instruction_set == kX86) {
+    return new x86::X86Assembler();
+  } else {
+    CHECK(instruction_set == kArm || instruction_set == kThumb2);
+    return new arm::ArmAssembler();
+  }
+}
+
+}  // namespace art
diff --git a/src/oat/utils/assembler.h b/src/oat/utils/assembler.h
new file mode 100644
index 0000000..6fd150d
--- /dev/null
+++ b/src/oat/utils/assembler.h
@@ -0,0 +1,453 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_UTILS_ASSEMBLER_H_
+#define ART_SRC_OAT_UTILS_ASSEMBLER_H_
+
+#include <vector>
+
+#include "constants.h"
+#include "logging.h"
+#include "macros.h"
+#include "managed_register.h"
+#include "memory_region.h"
+#include "offsets.h"
+
+namespace art {
+
+class Assembler;
+class AssemblerBuffer;
+class AssemblerFixup;
+
+namespace arm {
+  class ArmAssembler;
+}
+namespace x86 {
+  class X86Assembler;
+}
+
+class Label {
+ public:
+  Label() : position_(0) {}
+
+  ~Label() {
+    // Assert if label is being destroyed with unresolved branches pending.
+    CHECK(!IsLinked());
+  }
+
+  // Returns the position for bound and linked labels. Cannot be used
+  // for unused labels.
+  int Position() const {
+    CHECK(!IsUnused());
+    return IsBound() ? -position_ - kPointerSize : position_ - kPointerSize;
+  }
+
+  int LinkPosition() const {
+    CHECK(IsLinked());
+    return position_ - kWordSize;
+  }
+
+  bool IsBound() const { return position_ < 0; }
+  bool IsUnused() const { return position_ == 0; }
+  bool IsLinked() const { return position_ > 0; }
+
+ private:
+  int position_;
+
+  void Reinitialize() {
+    position_ = 0;
+  }
+
+  void BindTo(int position) {
+    CHECK(!IsBound());
+    position_ = -position - kPointerSize;
+    CHECK(IsBound());
+  }
+
+  void LinkTo(int position) {
+    CHECK(!IsBound());
+    position_ = position + kPointerSize;
+    CHECK(IsLinked());
+  }
+
+  friend class arm::ArmAssembler;
+  friend class x86::X86Assembler;
+
+  DISALLOW_COPY_AND_ASSIGN(Label);
+};
+
+
+// Assembler fixups are positions in generated code that require processing
+// after the code has been copied to executable memory. This includes building
+// relocation information.
+class AssemblerFixup {
+ public:
+  virtual void Process(const MemoryRegion& region, int position) = 0;
+  virtual ~AssemblerFixup() {}
+
+ private:
+  AssemblerFixup* previous_;
+  int position_;
+
+  AssemblerFixup* previous() const { return previous_; }
+  void set_previous(AssemblerFixup* previous) { previous_ = previous; }
+
+  int position() const { return position_; }
+  void set_position(int position) { position_ = position; }
+
+  friend class AssemblerBuffer;
+};
+
+// Parent of all queued slow paths, emitted during finalization
+class SlowPath {
+ public:
+  SlowPath() : next_(NULL) {}
+  virtual ~SlowPath() {}
+
+  Label* Continuation() { return &continuation_; }
+  Label* Entry() { return &entry_; }
+  // Generate code for slow path
+  virtual void Emit(Assembler *sp_asm) = 0;
+
+ protected:
+  // Entry branched to by fast path
+  Label entry_;
+  // Optional continuation that is branched to at the end of the slow path
+  Label continuation_;
+  // Next in linked list of slow paths
+  SlowPath *next_;
+
+  friend class AssemblerBuffer;
+  DISALLOW_COPY_AND_ASSIGN(SlowPath);
+};
+
+class AssemblerBuffer {
+ public:
+  AssemblerBuffer();
+  ~AssemblerBuffer();
+
+  // Basic support for emitting, loading, and storing.
+  template<typename T> void Emit(T value) {
+    CHECK(HasEnsuredCapacity());
+    *reinterpret_cast<T*>(cursor_) = value;
+    cursor_ += sizeof(T);
+  }
+
+  template<typename T> T Load(size_t position) {
+    CHECK_LE(position, Size() - static_cast<int>(sizeof(T)));
+    return *reinterpret_cast<T*>(contents_ + position);
+  }
+
+  template<typename T> void Store(size_t position, T value) {
+    CHECK_LE(position, Size() - static_cast<int>(sizeof(T)));
+    *reinterpret_cast<T*>(contents_ + position) = value;
+  }
+
+  // Emit a fixup at the current location.
+  void EmitFixup(AssemblerFixup* fixup) {
+    fixup->set_previous(fixup_);
+    fixup->set_position(Size());
+    fixup_ = fixup;
+  }
+
+  void EnqueueSlowPath(SlowPath* slowpath) {
+    if (slow_path_ == NULL) {
+      slow_path_ = slowpath;
+    } else {
+      SlowPath* cur = slow_path_;
+      for ( ; cur->next_ != NULL ; cur = cur->next_) {}
+      cur->next_ = slowpath;
+    }
+  }
+
+  void EmitSlowPaths(Assembler* sp_asm) {
+    SlowPath* cur = slow_path_;
+    SlowPath* next = NULL;
+    slow_path_ = NULL;
+    for ( ; cur != NULL ; cur = next) {
+      cur->Emit(sp_asm);
+      next = cur->next_;
+      delete cur;
+    }
+  }
+
+  // Get the size of the emitted code.
+  size_t Size() const {
+    CHECK_GE(cursor_, contents_);
+    return cursor_ - contents_;
+  }
+
+  byte* contents() const { return contents_; }
+
+  // Copy the assembled instructions into the specified memory block
+  // and apply all fixups.
+  void FinalizeInstructions(const MemoryRegion& region);
+
+  // To emit an instruction to the assembler buffer, the EnsureCapacity helper
+  // must be used to guarantee that the underlying data area is big enough to
+  // hold the emitted instruction. Usage:
+  //
+  //     AssemblerBuffer buffer;
+  //     AssemblerBuffer::EnsureCapacity ensured(&buffer);
+  //     ... emit bytes for single instruction ...
+
+#ifndef NDEBUG
+
+  class EnsureCapacity {
+   public:
+    explicit EnsureCapacity(AssemblerBuffer* buffer) {
+      if (buffer->cursor() >= buffer->limit()) {
+        buffer->ExtendCapacity();
+      }
+      // In debug mode, we save the assembler buffer along with the gap
+      // size before we start emitting to the buffer. This allows us to
+      // check that any single generated instruction doesn't overflow the
+      // limit implied by the minimum gap size.
+      buffer_ = buffer;
+      gap_ = ComputeGap();
+      // Make sure that extending the capacity leaves a big enough gap
+      // for any kind of instruction.
+      CHECK_GE(gap_, kMinimumGap);
+      // Mark the buffer as having ensured the capacity.
+      CHECK(!buffer->HasEnsuredCapacity());  // Cannot nest.
+      buffer->has_ensured_capacity_ = true;
+    }
+
+    ~EnsureCapacity() {
+      // Unmark the buffer, so we cannot emit after this.
+      buffer_->has_ensured_capacity_ = false;
+      // Make sure the generated instruction doesn't take up more
+      // space than the minimum gap.
+      int delta = gap_ - ComputeGap();
+      CHECK_LE(delta, kMinimumGap);
+    }
+
+   private:
+    AssemblerBuffer* buffer_;
+    int gap_;
+
+    int ComputeGap() { return buffer_->Capacity() - buffer_->Size(); }
+  };
+
+  bool has_ensured_capacity_;
+  bool HasEnsuredCapacity() const { return has_ensured_capacity_; }
+
+#else
+
+  class EnsureCapacity {
+   public:
+    explicit EnsureCapacity(AssemblerBuffer* buffer) {
+      if (buffer->cursor() >= buffer->limit()) buffer->ExtendCapacity();
+    }
+  };
+
+  // When building the C++ tests, assertion code is enabled. To allow
+  // asserting that the user of the assembler buffer has ensured the
+  // capacity needed for emitting, we add a dummy method in non-debug mode.
+  bool HasEnsuredCapacity() const { return true; }
+
+#endif
+
+  // Returns the position in the instruction stream.
+  int GetPosition() { return  cursor_ - contents_; }
+
+ private:
+  // The limit is set to kMinimumGap bytes before the end of the data area.
+  // This leaves enough space for the longest possible instruction and allows
+  // for a single, fast space check per instruction.
+  static const int kMinimumGap = 32;
+
+  byte* contents_;
+  byte* cursor_;
+  byte* limit_;
+  AssemblerFixup* fixup_;
+  bool fixups_processed_;
+
+  // Head of linked list of slow paths
+  SlowPath* slow_path_;
+
+  byte* cursor() const { return cursor_; }
+  byte* limit() const { return limit_; }
+  size_t Capacity() const {
+    CHECK_GE(limit_, contents_);
+    return (limit_ - contents_) + kMinimumGap;
+  }
+
+  // Process the fixup chain starting at the given fixup. The offset is
+  // non-zero for fixups in the body if the preamble is non-empty.
+  void ProcessFixups(const MemoryRegion& region);
+
+  // Compute the limit based on the data area and the capacity. See
+  // description of kMinimumGap for the reasoning behind the value.
+  static byte* ComputeLimit(byte* data, size_t capacity) {
+    return data + capacity - kMinimumGap;
+  }
+
+  void ExtendCapacity();
+
+  friend class AssemblerFixup;
+};
+
+class Assembler {
+ public:
+  static Assembler* Create(InstructionSet instruction_set);
+
+  // Emit slow paths queued during assembly
+  void EmitSlowPaths() { buffer_.EmitSlowPaths(this); }
+
+  // Size of generated code
+  size_t CodeSize() const { return buffer_.Size(); }
+
+  // Copy instructions out of assembly buffer into the given region of memory
+  void FinalizeInstructions(const MemoryRegion& region) {
+    buffer_.FinalizeInstructions(region);
+  }
+
+  // Emit code that will create an activation on the stack
+  virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
+                          const std::vector<ManagedRegister>& callee_save_regs,
+                          const std::vector<ManagedRegister>& entry_spills) = 0;
+
+  // Emit code that will remove an activation from the stack
+  virtual void RemoveFrame(size_t frame_size,
+                           const std::vector<ManagedRegister>& callee_save_regs) = 0;
+
+  virtual void IncreaseFrameSize(size_t adjust) = 0;
+  virtual void DecreaseFrameSize(size_t adjust) = 0;
+
+  // Store routines
+  virtual void Store(FrameOffset offs, ManagedRegister src, size_t size) = 0;
+  virtual void StoreRef(FrameOffset dest, ManagedRegister src) = 0;
+  virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src) = 0;
+
+  virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
+                                     ManagedRegister scratch) = 0;
+
+  virtual void StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
+                                      ManagedRegister scratch) = 0;
+
+  virtual void StoreStackOffsetToThread(ThreadOffset thr_offs,
+                                        FrameOffset fr_offs,
+                                        ManagedRegister scratch) = 0;
+
+  virtual void StoreStackPointerToThread(ThreadOffset thr_offs) = 0;
+
+  virtual void StoreSpanning(FrameOffset dest, ManagedRegister src,
+                             FrameOffset in_off, ManagedRegister scratch) = 0;
+
+  // Load routines
+  virtual void Load(ManagedRegister dest, FrameOffset src, size_t size) = 0;
+
+  virtual void Load(ManagedRegister dest, ThreadOffset src, size_t size) = 0;
+
+  virtual void LoadRef(ManagedRegister dest, FrameOffset  src) = 0;
+
+  virtual void LoadRef(ManagedRegister dest, ManagedRegister base,
+                       MemberOffset offs) = 0;
+
+  virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base,
+                          Offset offs) = 0;
+
+  virtual void LoadRawPtrFromThread(ManagedRegister dest,
+                                    ThreadOffset offs) = 0;
+
+  // Copying routines
+  virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size) = 0;
+
+  virtual void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs,
+                                    ManagedRegister scratch) = 0;
+
+  virtual void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs,
+                                  ManagedRegister scratch) = 0;
+
+  virtual void CopyRef(FrameOffset dest, FrameOffset src,
+                       ManagedRegister scratch) = 0;
+
+  virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) = 0;
+
+  virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
+                    ManagedRegister scratch, size_t size) = 0;
+
+  virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
+                    ManagedRegister scratch, size_t size) = 0;
+
+  virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
+                    ManagedRegister scratch, size_t size) = 0;
+
+  virtual void Copy(ManagedRegister dest, Offset dest_offset,
+                    ManagedRegister src, Offset src_offset,
+                    ManagedRegister scratch, size_t size) = 0;
+
+  virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
+                    ManagedRegister scratch, size_t size) = 0;
+
+  virtual void MemoryBarrier(ManagedRegister scratch) = 0;
+
+  // Exploit fast access in managed code to Thread::Current()
+  virtual void GetCurrentThread(ManagedRegister tr) = 0;
+  virtual void GetCurrentThread(FrameOffset dest_offset,
+                                ManagedRegister scratch) = 0;
+
+  // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+  // value is null and null_allowed. in_reg holds a possibly stale reference
+  // that can be used to avoid loading the SIRT entry to see if the value is
+  // NULL.
+  virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
+                               ManagedRegister in_reg, bool null_allowed) = 0;
+
+  // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+  // value is null and null_allowed.
+  virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
+                               ManagedRegister scratch, bool null_allowed) = 0;
+
+  // src holds a SIRT entry (Object**) load this into dst
+  virtual void LoadReferenceFromSirt(ManagedRegister dst,
+                                     ManagedRegister src) = 0;
+
+  // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+  // know that src may not be null.
+  virtual void VerifyObject(ManagedRegister src, bool could_be_null) = 0;
+  virtual void VerifyObject(FrameOffset src, bool could_be_null) = 0;
+
+  // Call to address held at [base+offset]
+  virtual void Call(ManagedRegister base, Offset offset,
+                    ManagedRegister scratch) = 0;
+  virtual void Call(FrameOffset base, Offset offset,
+                    ManagedRegister scratch) = 0;
+  virtual void Call(ThreadOffset offset, ManagedRegister scratch) = 0;
+
+  // Generate code to check if Thread::Current()->suspend_count_ is non-zero
+  // and branch to a SuspendSlowPath if it is. The SuspendSlowPath will continue
+  // at the next instruction.
+  virtual void SuspendPoll(ManagedRegister scratch, ManagedRegister return_reg,
+                           FrameOffset return_save_location,
+                           size_t return_size) = 0;
+
+  // Generate code to check if Thread::Current()->exception_ is non-null
+  // and branch to a ExceptionSlowPath if it is.
+  virtual void ExceptionPoll(ManagedRegister scratch) = 0;
+
+  virtual ~Assembler() {}
+
+ protected:
+  Assembler() : buffer_() {}
+
+  AssemblerBuffer buffer_;
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_OAT_UTILS_ASSEMBLER_H_
diff --git a/src/oat/utils/managed_register.h b/src/oat/utils/managed_register.h
new file mode 100644
index 0000000..c0fbd21
--- /dev/null
+++ b/src/oat/utils/managed_register.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_UTILS_MANAGED_REGISTER_H_
+#define ART_SRC_OAT_UTILS_MANAGED_REGISTER_H_
+
+namespace art {
+
+namespace x86 {
+class X86ManagedRegister;
+}
+namespace arm {
+class ArmManagedRegister;
+}
+
+class ManagedRegister {
+ public:
+  // ManagedRegister is a value class. There exists no method to change the
+  // internal state. We therefore allow a copy constructor and an
+  // assignment-operator.
+  ManagedRegister(const ManagedRegister& other) : id_(other.id_) { }
+
+  ManagedRegister& operator=(const ManagedRegister& other) {
+    id_ = other.id_;
+    return *this;
+  }
+
+  x86::X86ManagedRegister AsX86() const;
+  arm::ArmManagedRegister AsArm() const;
+
+  // It is valid to invoke Equals on and with a NoRegister.
+  bool Equals(const ManagedRegister& other) const {
+    return id_ == other.id_;
+  }
+
+  bool IsNoRegister() const {
+    return id_ == kNoRegister;
+  }
+
+  static ManagedRegister NoRegister() {
+    return ManagedRegister();
+  }
+
+ protected:
+  static const int kNoRegister = -1;
+
+  ManagedRegister() : id_(kNoRegister) { }
+  explicit ManagedRegister(int reg_id) : id_(reg_id) { }
+
+  int id_;
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_OAT_UTILS_MANAGED_REGISTER_H_
diff --git a/src/oat/utils/x86/assembler_x86.cc b/src/oat/utils/x86/assembler_x86.cc
new file mode 100644
index 0000000..0862551
--- /dev/null
+++ b/src/oat/utils/x86/assembler_x86.cc
@@ -0,0 +1,1849 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assembler_x86.h"
+
+#include "casts.h"
+#include "memory_region.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+#include "thread.h"
+
+namespace art {
+namespace x86 {
+
+class DirectCallRelocation : public AssemblerFixup {
+ public:
+  void Process(const MemoryRegion& region, int position) {
+    // Direct calls are relative to the following instruction on x86.
+    int32_t pointer = region.Load<int32_t>(position);
+    int32_t start = reinterpret_cast<int32_t>(region.start());
+    int32_t delta = start + position + sizeof(int32_t);
+    region.Store<int32_t>(position, pointer - delta);
+  }
+};
+
+static const char* kRegisterNames[] = {
+  "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi",
+};
+std::ostream& operator<<(std::ostream& os, const Register& rhs) {
+  if (rhs >= EAX && rhs <= EDI) {
+    os << kRegisterNames[rhs];
+  } else {
+    os << "Register[" << static_cast<int>(rhs) << "]";
+  }
+  return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const XmmRegister& reg) {
+  return os << "XMM" << static_cast<int>(reg);
+}
+
+std::ostream& operator<<(std::ostream& os, const X87Register& reg) {
+  return os << "ST" << static_cast<int>(reg);
+}
+
+void X86Assembler::call(Register reg) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xFF);
+  EmitRegisterOperand(2, reg);
+}
+
+
+void X86Assembler::call(const Address& address) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xFF);
+  EmitOperand(2, address);
+}
+
+
+void X86Assembler::call(Label* label) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xE8);
+  static const int kSize = 5;
+  EmitLabel(label, kSize);
+}
+
+
+void X86Assembler::pushl(Register reg) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x50 + reg);
+}
+
+
+void X86Assembler::pushl(const Address& address) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xFF);
+  EmitOperand(6, address);
+}
+
+
+void X86Assembler::pushl(const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  if (imm.is_int8()) {
+    EmitUint8(0x6A);
+    EmitUint8(imm.value() & 0xFF);
+  } else {
+    EmitUint8(0x68);
+    EmitImmediate(imm);
+  }
+}
+
+
+void X86Assembler::popl(Register reg) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x58 + reg);
+}
+
+
+void X86Assembler::popl(const Address& address) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x8F);
+  EmitOperand(0, address);
+}
+
+
+void X86Assembler::movl(Register dst, const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xB8 + dst);
+  EmitImmediate(imm);
+}
+
+
+void X86Assembler::movl(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x89);
+  EmitRegisterOperand(src, dst);
+}
+
+
+void X86Assembler::movl(Register dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x8B);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movl(const Address& dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x89);
+  EmitOperand(src, dst);
+}
+
+
+void X86Assembler::movl(const Address& dst, const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xC7);
+  EmitOperand(0, dst);
+  EmitImmediate(imm);
+}
+
+void X86Assembler::movl(const Address& dst, Label* lbl) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xC7);
+  EmitOperand(0, dst);
+  EmitLabel(lbl, dst.length_ + 5);
+}
+
+void X86Assembler::movzxb(Register dst, ByteRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0xB6);
+  EmitRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::movzxb(Register dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0xB6);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movsxb(Register dst, ByteRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0xBE);
+  EmitRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::movsxb(Register dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0xBE);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movb(Register /*dst*/, const Address& /*src*/) {
+  LOG(FATAL) << "Use movzxb or movsxb instead.";
+}
+
+
+void X86Assembler::movb(const Address& dst, ByteRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x88);
+  EmitOperand(src, dst);
+}
+
+
+void X86Assembler::movb(const Address& dst, const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xC6);
+  EmitOperand(EAX, dst);
+  CHECK(imm.is_int8());
+  EmitUint8(imm.value() & 0xFF);
+}
+
+
+void X86Assembler::movzxw(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0xB7);
+  EmitRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::movzxw(Register dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0xB7);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movsxw(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0xBF);
+  EmitRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::movsxw(Register dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0xBF);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movw(Register /*dst*/, const Address& /*src*/) {
+  LOG(FATAL) << "Use movzxw or movsxw instead.";
+}
+
+
+void X86Assembler::movw(const Address& dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitOperandSizeOverride();
+  EmitUint8(0x89);
+  EmitOperand(src, dst);
+}
+
+
+void X86Assembler::leal(Register dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x8D);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::cmovl(Condition condition, Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0x40 + condition);
+  EmitRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::setb(Condition condition, Register dst) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0x90 + condition);
+  EmitOperand(0, Operand(dst));
+}
+
+
+void X86Assembler::movss(XmmRegister dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x10);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movss(const Address& dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x11);
+  EmitOperand(src, dst);
+}
+
+
+void X86Assembler::movss(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x11);
+  EmitXmmRegisterOperand(src, dst);
+}
+
+
+void X86Assembler::movd(XmmRegister dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitUint8(0x0F);
+  EmitUint8(0x6E);
+  EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::movd(Register dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitUint8(0x0F);
+  EmitUint8(0x7E);
+  EmitOperand(src, Operand(dst));
+}
+
+
+void X86Assembler::addss(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x58);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::addss(XmmRegister dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x58);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::subss(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x5C);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::subss(XmmRegister dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x5C);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::mulss(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x59);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::mulss(XmmRegister dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x59);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::divss(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x5E);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::divss(XmmRegister dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x5E);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::flds(const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xD9);
+  EmitOperand(0, src);
+}
+
+
+void X86Assembler::fstps(const Address& dst) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xD9);
+  EmitOperand(3, dst);
+}
+
+
+void X86Assembler::movsd(XmmRegister dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x10);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::movsd(const Address& dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x11);
+  EmitOperand(src, dst);
+}
+
+
+void X86Assembler::movsd(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x11);
+  EmitXmmRegisterOperand(src, dst);
+}
+
+
+void X86Assembler::addsd(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x58);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::addsd(XmmRegister dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x58);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::subsd(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x5C);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::subsd(XmmRegister dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x5C);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::mulsd(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x59);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::mulsd(XmmRegister dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x59);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::divsd(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x5E);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::divsd(XmmRegister dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x5E);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::cvtsi2ss(XmmRegister dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x2A);
+  EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::cvtsi2sd(XmmRegister dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x2A);
+  EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::cvtss2si(Register dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x2D);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::cvtss2sd(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x5A);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::cvtsd2si(Register dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x2D);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::cvttss2si(Register dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x2C);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::cvttsd2si(Register dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x2C);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::cvtsd2ss(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x5A);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::cvtdq2pd(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0xE6);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::comiss(XmmRegister a, XmmRegister b) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0x2F);
+  EmitXmmRegisterOperand(a, b);
+}
+
+
+void X86Assembler::comisd(XmmRegister a, XmmRegister b) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitUint8(0x0F);
+  EmitUint8(0x2F);
+  EmitXmmRegisterOperand(a, b);
+}
+
+
+void X86Assembler::sqrtsd(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF2);
+  EmitUint8(0x0F);
+  EmitUint8(0x51);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::sqrtss(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF3);
+  EmitUint8(0x0F);
+  EmitUint8(0x51);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::xorpd(XmmRegister dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitUint8(0x0F);
+  EmitUint8(0x57);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::xorpd(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitUint8(0x0F);
+  EmitUint8(0x57);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::xorps(XmmRegister dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0x57);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::xorps(XmmRegister dst, XmmRegister src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0x57);
+  EmitXmmRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::andpd(XmmRegister dst, const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x66);
+  EmitUint8(0x0F);
+  EmitUint8(0x54);
+  EmitOperand(dst, src);
+}
+
+
+void X86Assembler::fldl(const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xDD);
+  EmitOperand(0, src);
+}
+
+
+void X86Assembler::fstpl(const Address& dst) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xDD);
+  EmitOperand(3, dst);
+}
+
+
+void X86Assembler::fnstcw(const Address& dst) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xD9);
+  EmitOperand(7, dst);
+}
+
+
+void X86Assembler::fldcw(const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xD9);
+  EmitOperand(5, src);
+}
+
+
+void X86Assembler::fistpl(const Address& dst) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xDF);
+  EmitOperand(7, dst);
+}
+
+
+void X86Assembler::fistps(const Address& dst) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xDB);
+  EmitOperand(3, dst);
+}
+
+
+void X86Assembler::fildl(const Address& src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xDF);
+  EmitOperand(5, src);
+}
+
+
+void X86Assembler::fincstp() {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xD9);
+  EmitUint8(0xF7);
+}
+
+
+void X86Assembler::ffree(const Immediate& index) {
+  CHECK_LT(index.value(), 7);
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xDD);
+  EmitUint8(0xC0 + index.value());
+}
+
+
+void X86Assembler::fsin() {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xD9);
+  EmitUint8(0xFE);
+}
+
+
+void X86Assembler::fcos() {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xD9);
+  EmitUint8(0xFF);
+}
+
+
+void X86Assembler::fptan() {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xD9);
+  EmitUint8(0xF2);
+}
+
+
+void X86Assembler::xchgl(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x87);
+  EmitRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::cmpl(Register reg, const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitComplex(7, Operand(reg), imm);
+}
+
+
+void X86Assembler::cmpl(Register reg0, Register reg1) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x3B);
+  EmitOperand(reg0, Operand(reg1));
+}
+
+
+void X86Assembler::cmpl(Register reg, const Address& address) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x3B);
+  EmitOperand(reg, address);
+}
+
+
+void X86Assembler::addl(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x03);
+  EmitRegisterOperand(dst, src);
+}
+
+
+void X86Assembler::addl(Register reg, const Address& address) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x03);
+  EmitOperand(reg, address);
+}
+
+
+void X86Assembler::cmpl(const Address& address, Register reg) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x39);
+  EmitOperand(reg, address);
+}
+
+
+void X86Assembler::cmpl(const Address& address, const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitComplex(7, address, imm);
+}
+
+
+void X86Assembler::testl(Register reg1, Register reg2) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x85);
+  EmitRegisterOperand(reg1, reg2);
+}
+
+
+void X86Assembler::testl(Register reg, const Immediate& immediate) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  // For registers that have a byte variant (EAX, EBX, ECX, and EDX)
+  // we only test the byte register to keep the encoding short.
+  if (immediate.is_uint8() && reg < 4) {
+    // Use zero-extended 8-bit immediate.
+    if (reg == EAX) {
+      EmitUint8(0xA8);
+    } else {
+      EmitUint8(0xF6);
+      EmitUint8(0xC0 + reg);
+    }
+    EmitUint8(immediate.value() & 0xFF);
+  } else if (reg == EAX) {
+    // Use short form if the destination is EAX.
+    EmitUint8(0xA9);
+    EmitImmediate(immediate);
+  } else {
+    EmitUint8(0xF7);
+    EmitOperand(0, Operand(reg));
+    EmitImmediate(immediate);
+  }
+}
+
+
+void X86Assembler::andl(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x23);
+  EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::andl(Register dst, const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitComplex(4, Operand(dst), imm);
+}
+
+
+void X86Assembler::orl(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0B);
+  EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::orl(Register dst, const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitComplex(1, Operand(dst), imm);
+}
+
+
+void X86Assembler::xorl(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x33);
+  EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::addl(Register reg, const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitComplex(0, Operand(reg), imm);
+}
+
+
+void X86Assembler::addl(const Address& address, Register reg) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x01);
+  EmitOperand(reg, address);
+}
+
+
+void X86Assembler::addl(const Address& address, const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitComplex(0, address, imm);
+}
+
+
+void X86Assembler::adcl(Register reg, const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitComplex(2, Operand(reg), imm);
+}
+
+
+void X86Assembler::adcl(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x13);
+  EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::adcl(Register dst, const Address& address) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x13);
+  EmitOperand(dst, address);
+}
+
+
+void X86Assembler::subl(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x2B);
+  EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::subl(Register reg, const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitComplex(5, Operand(reg), imm);
+}
+
+
+void X86Assembler::subl(Register reg, const Address& address) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x2B);
+  EmitOperand(reg, address);
+}
+
+
+void X86Assembler::cdq() {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x99);
+}
+
+
+void X86Assembler::idivl(Register reg) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF7);
+  EmitUint8(0xF8 | reg);
+}
+
+
+void X86Assembler::imull(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0xAF);
+  EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::imull(Register reg, const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x69);
+  EmitOperand(reg, Operand(reg));
+  EmitImmediate(imm);
+}
+
+
+void X86Assembler::imull(Register reg, const Address& address) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0xAF);
+  EmitOperand(reg, address);
+}
+
+
+void X86Assembler::imull(Register reg) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF7);
+  EmitOperand(5, Operand(reg));
+}
+
+
+void X86Assembler::imull(const Address& address) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF7);
+  EmitOperand(5, address);
+}
+
+
+void X86Assembler::mull(Register reg) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF7);
+  EmitOperand(4, Operand(reg));
+}
+
+
+void X86Assembler::mull(const Address& address) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF7);
+  EmitOperand(4, address);
+}
+
+
+void X86Assembler::sbbl(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x1B);
+  EmitOperand(dst, Operand(src));
+}
+
+
+void X86Assembler::sbbl(Register reg, const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitComplex(3, Operand(reg), imm);
+}
+
+
+void X86Assembler::sbbl(Register dst, const Address& address) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x1B);
+  EmitOperand(dst, address);
+}
+
+
+void X86Assembler::incl(Register reg) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x40 + reg);
+}
+
+
+void X86Assembler::incl(const Address& address) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xFF);
+  EmitOperand(0, address);
+}
+
+
+void X86Assembler::decl(Register reg) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x48 + reg);
+}
+
+
+void X86Assembler::decl(const Address& address) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xFF);
+  EmitOperand(1, address);
+}
+
+
+void X86Assembler::shll(Register reg, const Immediate& imm) {
+  EmitGenericShift(4, reg, imm);
+}
+
+
+void X86Assembler::shll(Register operand, Register shifter) {
+  EmitGenericShift(4, operand, shifter);
+}
+
+
+void X86Assembler::shrl(Register reg, const Immediate& imm) {
+  EmitGenericShift(5, reg, imm);
+}
+
+
+void X86Assembler::shrl(Register operand, Register shifter) {
+  EmitGenericShift(5, operand, shifter);
+}
+
+
+void X86Assembler::sarl(Register reg, const Immediate& imm) {
+  EmitGenericShift(7, reg, imm);
+}
+
+
+void X86Assembler::sarl(Register operand, Register shifter) {
+  EmitGenericShift(7, operand, shifter);
+}
+
+
+void X86Assembler::shld(Register dst, Register src) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0xA5);
+  EmitRegisterOperand(src, dst);
+}
+
+
+void X86Assembler::negl(Register reg) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF7);
+  EmitOperand(3, Operand(reg));
+}
+
+
+void X86Assembler::notl(Register reg) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF7);
+  EmitUint8(0xD0 | reg);
+}
+
+
+void X86Assembler::enter(const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xC8);
+  CHECK(imm.is_uint16());
+  EmitUint8(imm.value() & 0xFF);
+  EmitUint8((imm.value() >> 8) & 0xFF);
+  EmitUint8(0x00);
+}
+
+
+void X86Assembler::leave() {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xC9);
+}
+
+
+void X86Assembler::ret() {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xC3);
+}
+
+
+void X86Assembler::ret(const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xC2);
+  CHECK(imm.is_uint16());
+  EmitUint8(imm.value() & 0xFF);
+  EmitUint8((imm.value() >> 8) & 0xFF);
+}
+
+
+
+void X86Assembler::nop() {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x90);
+}
+
+
+void X86Assembler::int3() {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xCC);
+}
+
+
+void X86Assembler::hlt() {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF4);
+}
+
+
+void X86Assembler::j(Condition condition, Label* label) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  if (label->IsBound()) {
+    static const int kShortSize = 2;
+    static const int kLongSize = 6;
+    int offset = label->Position() - buffer_.Size();
+    CHECK_LE(offset, 0);
+    if (IsInt(8, offset - kShortSize)) {
+      EmitUint8(0x70 + condition);
+      EmitUint8((offset - kShortSize) & 0xFF);
+    } else {
+      EmitUint8(0x0F);
+      EmitUint8(0x80 + condition);
+      EmitInt32(offset - kLongSize);
+    }
+  } else {
+    EmitUint8(0x0F);
+    EmitUint8(0x80 + condition);
+    EmitLabelLink(label);
+  }
+}
+
+
+void X86Assembler::jmp(Register reg) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xFF);
+  EmitRegisterOperand(4, reg);
+}
+
+
+void X86Assembler::jmp(Label* label) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  if (label->IsBound()) {
+    static const int kShortSize = 2;
+    static const int kLongSize = 5;
+    int offset = label->Position() - buffer_.Size();
+    CHECK_LE(offset, 0);
+    if (IsInt(8, offset - kShortSize)) {
+      EmitUint8(0xEB);
+      EmitUint8((offset - kShortSize) & 0xFF);
+    } else {
+      EmitUint8(0xE9);
+      EmitInt32(offset - kLongSize);
+    }
+  } else {
+    EmitUint8(0xE9);
+    EmitLabelLink(label);
+  }
+}
+
+
+X86Assembler* X86Assembler::lock() {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0xF0);
+  return this;
+}
+
+
+void X86Assembler::cmpxchgl(const Address& address, Register reg) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0xB1);
+  EmitOperand(reg, address);
+}
+
+void X86Assembler::mfence() {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x0F);
+  EmitUint8(0xAE);
+  EmitUint8(0xF0);
+}
+
+X86Assembler* X86Assembler::fs() {
+  // TODO: fs is a prefix and not an instruction
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  EmitUint8(0x64);
+  return this;
+}
+
+void X86Assembler::AddImmediate(Register reg, const Immediate& imm) {
+  int value = imm.value();
+  if (value > 0) {
+    if (value == 1) {
+      incl(reg);
+    } else if (value != 0) {
+      addl(reg, imm);
+    }
+  } else if (value < 0) {
+    value = -value;
+    if (value == 1) {
+      decl(reg);
+    } else if (value != 0) {
+      subl(reg, Immediate(value));
+    }
+  }
+}
+
+
+void X86Assembler::LoadDoubleConstant(XmmRegister dst, double value) {
+  // TODO: Need to have a code constants table.
+  int64_t constant = bit_cast<int64_t, double>(value);
+  pushl(Immediate(High32Bits(constant)));
+  pushl(Immediate(Low32Bits(constant)));
+  movsd(dst, Address(ESP, 0));
+  addl(ESP, Immediate(2 * kWordSize));
+}
+
+
+void X86Assembler::FloatNegate(XmmRegister f) {
+  static const struct {
+    uint32_t a;
+    uint32_t b;
+    uint32_t c;
+    uint32_t d;
+  } float_negate_constant __attribute__((aligned(16))) =
+      { 0x80000000, 0x00000000, 0x80000000, 0x00000000 };
+  xorps(f, Address::Absolute(reinterpret_cast<uword>(&float_negate_constant)));
+}
+
+
+void X86Assembler::DoubleNegate(XmmRegister d) {
+  static const struct {
+    uint64_t a;
+    uint64_t b;
+  } double_negate_constant __attribute__((aligned(16))) =
+      {0x8000000000000000LL, 0x8000000000000000LL};
+  xorpd(d, Address::Absolute(reinterpret_cast<uword>(&double_negate_constant)));
+}
+
+
+void X86Assembler::DoubleAbs(XmmRegister reg) {
+  static const struct {
+    uint64_t a;
+    uint64_t b;
+  } double_abs_constant __attribute__((aligned(16))) =
+      {0x7FFFFFFFFFFFFFFFLL, 0x7FFFFFFFFFFFFFFFLL};
+  andpd(reg, Address::Absolute(reinterpret_cast<uword>(&double_abs_constant)));
+}
+
+
+void X86Assembler::Align(int alignment, int offset) {
+  CHECK(IsPowerOfTwo(alignment));
+  // Emit nop instruction until the real position is aligned.
+  while (((offset + buffer_.GetPosition()) & (alignment-1)) != 0) {
+    nop();
+  }
+}
+
+
+void X86Assembler::Bind(Label* label) {
+  int bound = buffer_.Size();
+  CHECK(!label->IsBound());  // Labels can only be bound once.
+  while (label->IsLinked()) {
+    int position = label->LinkPosition();
+    int next = buffer_.Load<int32_t>(position);
+    buffer_.Store<int32_t>(position, bound - (position + 4));
+    label->position_ = next;
+  }
+  label->BindTo(bound);
+}
+
+
+void X86Assembler::Stop(const char* message) {
+  // Emit the message address as immediate operand in the test rax instruction,
+  // followed by the int3 instruction.
+  // Execution can be resumed with the 'cont' command in gdb.
+  testl(EAX, Immediate(reinterpret_cast<int32_t>(message)));
+  int3();
+}
+
+
+void X86Assembler::EmitOperand(int reg_or_opcode, const Operand& operand) {
+  CHECK_GE(reg_or_opcode, 0);
+  CHECK_LT(reg_or_opcode, 8);
+  const int length = operand.length_;
+  CHECK_GT(length, 0);
+  // Emit the ModRM byte updated with the given reg value.
+  CHECK_EQ(operand.encoding_[0] & 0x38, 0);
+  EmitUint8(operand.encoding_[0] + (reg_or_opcode << 3));
+  // Emit the rest of the encoded operand.
+  for (int i = 1; i < length; i++) {
+    EmitUint8(operand.encoding_[i]);
+  }
+}
+
+
+void X86Assembler::EmitImmediate(const Immediate& imm) {
+  EmitInt32(imm.value());
+}
+
+
+void X86Assembler::EmitComplex(int reg_or_opcode,
+                               const Operand& operand,
+                               const Immediate& immediate) {
+  CHECK_GE(reg_or_opcode, 0);
+  CHECK_LT(reg_or_opcode, 8);
+  if (immediate.is_int8()) {
+    // Use sign-extended 8-bit immediate.
+    EmitUint8(0x83);
+    EmitOperand(reg_or_opcode, operand);
+    EmitUint8(immediate.value() & 0xFF);
+  } else if (operand.IsRegister(EAX)) {
+    // Use short form if the destination is eax.
+    EmitUint8(0x05 + (reg_or_opcode << 3));
+    EmitImmediate(immediate);
+  } else {
+    EmitUint8(0x81);
+    EmitOperand(reg_or_opcode, operand);
+    EmitImmediate(immediate);
+  }
+}
+
+
+void X86Assembler::EmitLabel(Label* label, int instruction_size) {
+  if (label->IsBound()) {
+    int offset = label->Position() - buffer_.Size();
+    CHECK_LE(offset, 0);
+    EmitInt32(offset - instruction_size);
+  } else {
+    EmitLabelLink(label);
+  }
+}
+
+
+void X86Assembler::EmitLabelLink(Label* label) {
+  CHECK(!label->IsBound());
+  int position = buffer_.Size();
+  EmitInt32(label->position_);
+  label->LinkTo(position);
+}
+
+
+void X86Assembler::EmitGenericShift(int reg_or_opcode,
+                                    Register reg,
+                                    const Immediate& imm) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  CHECK(imm.is_int8());
+  if (imm.value() == 1) {
+    EmitUint8(0xD1);
+    EmitOperand(reg_or_opcode, Operand(reg));
+  } else {
+    EmitUint8(0xC1);
+    EmitOperand(reg_or_opcode, Operand(reg));
+    EmitUint8(imm.value() & 0xFF);
+  }
+}
+
+
+void X86Assembler::EmitGenericShift(int reg_or_opcode,
+                                    Register operand,
+                                    Register shifter) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  CHECK_EQ(shifter, ECX);
+  EmitUint8(0xD3);
+  EmitOperand(reg_or_opcode, Operand(operand));
+}
+
+void X86Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
+                              const std::vector<ManagedRegister>& spill_regs,
+                              const std::vector<ManagedRegister>& entry_spills) {
+  CHECK_ALIGNED(frame_size, kStackAlignment);
+  CHECK_EQ(0u, spill_regs.size());  // no spilled regs on x86
+  // return address then method on stack
+  addl(ESP, Immediate(-frame_size + kPointerSize /*method*/ +
+                      kPointerSize /*return address*/));
+  pushl(method_reg.AsX86().AsCpuRegister());
+  for (size_t i = 0; i < entry_spills.size(); ++i) {
+    movl(Address(ESP, frame_size + kPointerSize + (i * kPointerSize)),
+         entry_spills.at(i).AsX86().AsCpuRegister());
+  }
+}
+
+void X86Assembler::RemoveFrame(size_t frame_size,
+                            const std::vector<ManagedRegister>& spill_regs) {
+  CHECK_ALIGNED(frame_size, kStackAlignment);
+  CHECK_EQ(0u, spill_regs.size());  // no spilled regs on x86
+  addl(ESP, Immediate(frame_size - kPointerSize));
+  ret();
+}
+
+void X86Assembler::IncreaseFrameSize(size_t adjust) {
+  CHECK_ALIGNED(adjust, kStackAlignment);
+  addl(ESP, Immediate(-adjust));
+}
+
+void X86Assembler::DecreaseFrameSize(size_t adjust) {
+  CHECK_ALIGNED(adjust, kStackAlignment);
+  addl(ESP, Immediate(adjust));
+}
+
+void X86Assembler::Store(FrameOffset offs, ManagedRegister msrc, size_t size) {
+  X86ManagedRegister src = msrc.AsX86();
+  if (src.IsNoRegister()) {
+    CHECK_EQ(0u, size);
+  } else if (src.IsCpuRegister()) {
+    CHECK_EQ(4u, size);
+    movl(Address(ESP, offs), src.AsCpuRegister());
+  } else if (src.IsRegisterPair()) {
+    CHECK_EQ(8u, size);
+    movl(Address(ESP, offs), src.AsRegisterPairLow());
+    movl(Address(ESP, FrameOffset(offs.Int32Value()+4)),
+         src.AsRegisterPairHigh());
+  } else if (src.IsX87Register()) {
+    if (size == 4) {
+      fstps(Address(ESP, offs));
+    } else {
+      fstpl(Address(ESP, offs));
+    }
+  } else {
+    CHECK(src.IsXmmRegister());
+    if (size == 4) {
+      movss(Address(ESP, offs), src.AsXmmRegister());
+    } else {
+      movsd(Address(ESP, offs), src.AsXmmRegister());
+    }
+  }
+}
+
+void X86Assembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
+  X86ManagedRegister src = msrc.AsX86();
+  CHECK(src.IsCpuRegister());
+  movl(Address(ESP, dest), src.AsCpuRegister());
+}
+
+void X86Assembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
+  X86ManagedRegister src = msrc.AsX86();
+  CHECK(src.IsCpuRegister());
+  movl(Address(ESP, dest), src.AsCpuRegister());
+}
+
+void X86Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
+                                         ManagedRegister) {
+  movl(Address(ESP, dest), Immediate(imm));
+}
+
+void X86Assembler::StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
+                                          ManagedRegister) {
+  fs()->movl(Address::Absolute(dest), Immediate(imm));
+}
+
+void X86Assembler::StoreStackOffsetToThread(ThreadOffset thr_offs,
+                                            FrameOffset fr_offs,
+                                            ManagedRegister mscratch) {
+  X86ManagedRegister scratch = mscratch.AsX86();
+  CHECK(scratch.IsCpuRegister());
+  leal(scratch.AsCpuRegister(), Address(ESP, fr_offs));
+  fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
+}
+
+void X86Assembler::StoreStackPointerToThread(ThreadOffset thr_offs) {
+  fs()->movl(Address::Absolute(thr_offs), ESP);
+}
+
+void X86Assembler::StoreLabelToThread(ThreadOffset thr_offs, Label* lbl) {
+  fs()->movl(Address::Absolute(thr_offs), lbl);
+}
+
+void X86Assembler::StoreSpanning(FrameOffset /*dst*/, ManagedRegister /*src*/,
+                                 FrameOffset /*in_off*/, ManagedRegister /*scratch*/) {
+  UNIMPLEMENTED(FATAL);  // this case only currently exists for ARM
+}
+
+void X86Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
+  X86ManagedRegister dest = mdest.AsX86();
+  if (dest.IsNoRegister()) {
+    CHECK_EQ(0u, size);
+  } else if (dest.IsCpuRegister()) {
+    CHECK_EQ(4u, size);
+    movl(dest.AsCpuRegister(), Address(ESP, src));
+  } else if (dest.IsRegisterPair()) {
+    CHECK_EQ(8u, size);
+    movl(dest.AsRegisterPairLow(), Address(ESP, src));
+    movl(dest.AsRegisterPairHigh(), Address(ESP, FrameOffset(src.Int32Value()+4)));
+  } else if (dest.IsX87Register()) {
+    if (size == 4) {
+      flds(Address(ESP, src));
+    } else {
+      fldl(Address(ESP, src));
+    }
+  } else {
+    CHECK(dest.IsXmmRegister());
+    if (size == 4) {
+      movss(dest.AsXmmRegister(), Address(ESP, src));
+    } else {
+      movsd(dest.AsXmmRegister(), Address(ESP, src));
+    }
+  }
+}
+
+void X86Assembler::Load(ManagedRegister mdest, ThreadOffset src, size_t size) {
+  X86ManagedRegister dest = mdest.AsX86();
+  if (dest.IsNoRegister()) {
+    CHECK_EQ(0u, size);
+  } else if (dest.IsCpuRegister()) {
+    CHECK_EQ(4u, size);
+    fs()->movl(dest.AsCpuRegister(), Address::Absolute(src));
+  } else if (dest.IsRegisterPair()) {
+    CHECK_EQ(8u, size);
+    fs()->movl(dest.AsRegisterPairLow(), Address::Absolute(src));
+    fs()->movl(dest.AsRegisterPairHigh(), Address::Absolute(ThreadOffset(src.Int32Value()+4)));
+  } else if (dest.IsX87Register()) {
+    if (size == 4) {
+      fs()->flds(Address::Absolute(src));
+    } else {
+      fs()->fldl(Address::Absolute(src));
+    }
+  } else {
+    CHECK(dest.IsXmmRegister());
+    if (size == 4) {
+      fs()->movss(dest.AsXmmRegister(), Address::Absolute(src));
+    } else {
+      fs()->movsd(dest.AsXmmRegister(), Address::Absolute(src));
+    }
+  }
+}
+
+void X86Assembler::LoadRef(ManagedRegister mdest, FrameOffset  src) {
+  X86ManagedRegister dest = mdest.AsX86();
+  CHECK(dest.IsCpuRegister());
+  movl(dest.AsCpuRegister(), Address(ESP, src));
+}
+
+void X86Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base,
+                           MemberOffset offs) {
+  X86ManagedRegister dest = mdest.AsX86();
+  CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
+  movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs));
+}
+
+void X86Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
+                              Offset offs) {
+  X86ManagedRegister dest = mdest.AsX86();
+  CHECK(dest.IsCpuRegister() && dest.IsCpuRegister());
+  movl(dest.AsCpuRegister(), Address(base.AsX86().AsCpuRegister(), offs));
+}
+
+void X86Assembler::LoadRawPtrFromThread(ManagedRegister mdest,
+                                        ThreadOffset offs) {
+  X86ManagedRegister dest = mdest.AsX86();
+  CHECK(dest.IsCpuRegister());
+  fs()->movl(dest.AsCpuRegister(), Address::Absolute(offs));
+}
+
+void X86Assembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
+  X86ManagedRegister dest = mdest.AsX86();
+  X86ManagedRegister src = msrc.AsX86();
+  if (!dest.Equals(src)) {
+    if (dest.IsCpuRegister() && src.IsCpuRegister()) {
+      movl(dest.AsCpuRegister(), src.AsCpuRegister());
+    } else if (src.IsX87Register() && dest.IsXmmRegister()) {
+      // Pass via stack and pop X87 register
+      subl(ESP, Immediate(16));
+      if (size == 4) {
+        CHECK_EQ(src.AsX87Register(), ST0);
+        fstps(Address(ESP, 0));
+        movss(dest.AsXmmRegister(), Address(ESP, 0));
+      } else {
+        CHECK_EQ(src.AsX87Register(), ST0);
+        fstpl(Address(ESP, 0));
+        movsd(dest.AsXmmRegister(), Address(ESP, 0));
+      }
+      addl(ESP, Immediate(16));
+    } else {
+      // TODO: x87, SSE
+      UNIMPLEMENTED(FATAL) << ": Move " << dest << ", " << src;
+    }
+  }
+}
+
+void X86Assembler::CopyRef(FrameOffset dest, FrameOffset src,
+                           ManagedRegister mscratch) {
+  X86ManagedRegister scratch = mscratch.AsX86();
+  CHECK(scratch.IsCpuRegister());
+  movl(scratch.AsCpuRegister(), Address(ESP, src));
+  movl(Address(ESP, dest), scratch.AsCpuRegister());
+}
+
+void X86Assembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+                                        ThreadOffset thr_offs,
+                                        ManagedRegister mscratch) {
+  X86ManagedRegister scratch = mscratch.AsX86();
+  CHECK(scratch.IsCpuRegister());
+  fs()->movl(scratch.AsCpuRegister(), Address::Absolute(thr_offs));
+  Store(fr_offs, scratch, 4);
+}
+
+void X86Assembler::CopyRawPtrToThread(ThreadOffset thr_offs,
+                                      FrameOffset fr_offs,
+                                      ManagedRegister mscratch) {
+  X86ManagedRegister scratch = mscratch.AsX86();
+  CHECK(scratch.IsCpuRegister());
+  Load(scratch, fr_offs, 4);
+  fs()->movl(Address::Absolute(thr_offs), scratch.AsCpuRegister());
+}
+
+void X86Assembler::Copy(FrameOffset dest, FrameOffset src,
+                        ManagedRegister mscratch,
+                        size_t size) {
+  X86ManagedRegister scratch = mscratch.AsX86();
+  if (scratch.IsCpuRegister() && size == 8) {
+    Load(scratch, src, 4);
+    Store(dest, scratch, 4);
+    Load(scratch, FrameOffset(src.Int32Value() + 4), 4);
+    Store(FrameOffset(dest.Int32Value() + 4), scratch, 4);
+  } else {
+    Load(scratch, src, size);
+    Store(dest, scratch, size);
+  }
+}
+
+void X86Assembler::Copy(FrameOffset /*dst*/, ManagedRegister /*src_base*/, Offset /*src_offset*/,
+                        ManagedRegister /*scratch*/, size_t /*size*/) {
+  UNIMPLEMENTED(FATAL);
+}
+
+void X86Assembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
+                        ManagedRegister scratch, size_t size) {
+  CHECK(scratch.IsNoRegister());
+  CHECK_EQ(size, 4u);
+  pushl(Address(ESP, src));
+  popl(Address(dest_base.AsX86().AsCpuRegister(), dest_offset));
+}
+
+void X86Assembler::Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
+                        ManagedRegister mscratch, size_t size) {
+  Register scratch = mscratch.AsX86().AsCpuRegister();
+  CHECK_EQ(size, 4u);
+  movl(scratch, Address(ESP, src_base));
+  movl(scratch, Address(scratch, src_offset));
+  movl(Address(ESP, dest), scratch);
+}
+
+void X86Assembler::Copy(ManagedRegister dest, Offset dest_offset,
+                        ManagedRegister src, Offset src_offset,
+                        ManagedRegister scratch, size_t size) {
+  CHECK_EQ(size, 4u);
+  CHECK(scratch.IsNoRegister());
+  pushl(Address(src.AsX86().AsCpuRegister(), src_offset));
+  popl(Address(dest.AsX86().AsCpuRegister(), dest_offset));
+}
+
+void X86Assembler::Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
+                        ManagedRegister mscratch, size_t size) {
+  Register scratch = mscratch.AsX86().AsCpuRegister();
+  CHECK_EQ(size, 4u);
+  CHECK_EQ(dest.Int32Value(), src.Int32Value());
+  movl(scratch, Address(ESP, src));
+  pushl(Address(scratch, src_offset));
+  popl(Address(scratch, dest_offset));
+}
+
+void X86Assembler::MemoryBarrier(ManagedRegister) {
+#if ANDROID_SMP != 0
+  mfence();
+#endif
+}
+
+void X86Assembler::CreateSirtEntry(ManagedRegister mout_reg,
+                                   FrameOffset sirt_offset,
+                                   ManagedRegister min_reg, bool null_allowed) {
+  X86ManagedRegister out_reg = mout_reg.AsX86();
+  X86ManagedRegister in_reg = min_reg.AsX86();
+  CHECK(in_reg.IsCpuRegister());
+  CHECK(out_reg.IsCpuRegister());
+  VerifyObject(in_reg, null_allowed);
+  if (null_allowed) {
+    Label null_arg;
+    if (!out_reg.Equals(in_reg)) {
+      xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
+    }
+    testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
+    j(kZero, &null_arg);
+    leal(out_reg.AsCpuRegister(), Address(ESP, sirt_offset));
+    Bind(&null_arg);
+  } else {
+    leal(out_reg.AsCpuRegister(), Address(ESP, sirt_offset));
+  }
+}
+
+void X86Assembler::CreateSirtEntry(FrameOffset out_off,
+                                   FrameOffset sirt_offset,
+                                   ManagedRegister mscratch,
+                                   bool null_allowed) {
+  X86ManagedRegister scratch = mscratch.AsX86();
+  CHECK(scratch.IsCpuRegister());
+  if (null_allowed) {
+    Label null_arg;
+    movl(scratch.AsCpuRegister(), Address(ESP, sirt_offset));
+    testl(scratch.AsCpuRegister(), scratch.AsCpuRegister());
+    j(kZero, &null_arg);
+    leal(scratch.AsCpuRegister(), Address(ESP, sirt_offset));
+    Bind(&null_arg);
+  } else {
+    leal(scratch.AsCpuRegister(), Address(ESP, sirt_offset));
+  }
+  Store(out_off, scratch, 4);
+}
+
+// Given a SIRT entry, load the associated reference.
+void X86Assembler::LoadReferenceFromSirt(ManagedRegister mout_reg,
+                                         ManagedRegister min_reg) {
+  X86ManagedRegister out_reg = mout_reg.AsX86();
+  X86ManagedRegister in_reg = min_reg.AsX86();
+  CHECK(out_reg.IsCpuRegister());
+  CHECK(in_reg.IsCpuRegister());
+  Label null_arg;
+  if (!out_reg.Equals(in_reg)) {
+    xorl(out_reg.AsCpuRegister(), out_reg.AsCpuRegister());
+  }
+  testl(in_reg.AsCpuRegister(), in_reg.AsCpuRegister());
+  j(kZero, &null_arg);
+  movl(out_reg.AsCpuRegister(), Address(in_reg.AsCpuRegister(), 0));
+  Bind(&null_arg);
+}
+
+void X86Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
+  // TODO: not validating references
+}
+
+void X86Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
+  // TODO: not validating references
+}
+
+void X86Assembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister) {
+  X86ManagedRegister base = mbase.AsX86();
+  CHECK(base.IsCpuRegister());
+  call(Address(base.AsCpuRegister(), offset.Int32Value()));
+  // TODO: place reference map on call
+}
+
+void X86Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
+  Register scratch = mscratch.AsX86().AsCpuRegister();
+  movl(scratch, Address(ESP, base));
+  call(Address(scratch, offset));
+}
+
+void X86Assembler::Call(ThreadOffset offset, ManagedRegister /*mscratch*/) {
+  fs()->call(Address::Absolute(offset));
+}
+
+void X86Assembler::GetCurrentThread(ManagedRegister tr) {
+  fs()->movl(tr.AsX86().AsCpuRegister(),
+             Address::Absolute(Thread::SelfOffset()));
+}
+
+void X86Assembler::GetCurrentThread(FrameOffset offset,
+                                    ManagedRegister mscratch) {
+  X86ManagedRegister scratch = mscratch.AsX86();
+  fs()->movl(scratch.AsCpuRegister(), Address::Absolute(Thread::SelfOffset()));
+  movl(Address(ESP, offset), scratch.AsCpuRegister());
+}
+
+void X86Assembler::SuspendPoll(ManagedRegister /*scratch*/,
+                               ManagedRegister return_reg,
+                               FrameOffset return_save_location,
+                               size_t return_size) {
+  X86SuspendCountSlowPath* slow =
+      new X86SuspendCountSlowPath(return_reg.AsX86(), return_save_location,
+                                  return_size);
+  buffer_.EnqueueSlowPath(slow);
+  fs()->cmpl(Address::Absolute(Thread::SuspendCountOffset()), Immediate(0));
+  j(kNotEqual, slow->Entry());
+  Bind(slow->Continuation());
+}
+
+void X86SuspendCountSlowPath::Emit(Assembler *sasm) {
+  X86Assembler* sp_asm = down_cast<X86Assembler*>(sasm);
+#define __ sp_asm->
+  __ Bind(&entry_);
+  // Save return value
+  __ Store(return_save_location_, return_register_, return_size_);
+  // Pass Thread::Current as argument
+  __ fs()->pushl(Address::Absolute(Thread::SelfOffset()));
+  __ fs()->call(Address::Absolute(ENTRYPOINT_OFFSET(pCheckSuspendFromCode)));
+  // Release argument
+  __ addl(ESP, Immediate(kPointerSize));
+  // Reload return value
+  __ Load(return_register_, return_save_location_, return_size_);
+  __ jmp(&continuation_);
+#undef __
+}
+
+void X86Assembler::ExceptionPoll(ManagedRegister /*scratch*/) {
+  X86ExceptionSlowPath* slow = new X86ExceptionSlowPath();
+  buffer_.EnqueueSlowPath(slow);
+  fs()->cmpl(Address::Absolute(Thread::ExceptionOffset()), Immediate(0));
+  j(kNotEqual, slow->Entry());
+}
+
+void X86ExceptionSlowPath::Emit(Assembler *sasm) {
+  X86Assembler* sp_asm = down_cast<X86Assembler*>(sasm);
+#define __ sp_asm->
+  __ Bind(&entry_);
+  // Note: the return value is dead
+  // Pass exception as argument in EAX
+  __ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset()));
+  __ fs()->call(Address::Absolute(ENTRYPOINT_OFFSET(pDeliverException)));
+  // this call should never return
+  __ int3();
+#undef __
+}
+
+}  // namespace x86
+}  // namespace art
diff --git a/src/oat/utils/x86/assembler_x86.h b/src/oat/utils/x86/assembler_x86.h
new file mode 100644
index 0000000..f7d26ff
--- /dev/null
+++ b/src/oat/utils/x86/assembler_x86.h
@@ -0,0 +1,669 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_UTILS_X86_ASSEMBLER_X86_H_
+#define ART_SRC_OAT_UTILS_X86_ASSEMBLER_X86_H_
+
+#include <vector>
+#include "constants.h"
+#include "globals.h"
+#include "managed_register_x86.h"
+#include "macros.h"
+#include "oat/utils/assembler.h"
+#include "offsets.h"
+#include "utils.h"
+
+namespace art {
+namespace x86 {
+
+class Immediate {
+ public:
+  explicit Immediate(int32_t value) : value_(value) {}
+
+  int32_t value() const { return value_; }
+
+  bool is_int8() const { return IsInt(8, value_); }
+  bool is_uint8() const { return IsUint(8, value_); }
+  bool is_uint16() const { return IsUint(16, value_); }
+
+ private:
+  const int32_t value_;
+
+  // TODO: Remove the #if when Mac OS build server no longer uses GCC 4.2.*.
+#if GCC_VERSION >= 40300
+  DISALLOW_COPY_AND_ASSIGN(Immediate);
+#endif
+};
+
+
+class Operand {
+ public:
+  uint8_t mod() const {
+    return (encoding_at(0) >> 6) & 3;
+  }
+
+  Register rm() const {
+    return static_cast<Register>(encoding_at(0) & 7);
+  }
+
+  ScaleFactor scale() const {
+    return static_cast<ScaleFactor>((encoding_at(1) >> 6) & 3);
+  }
+
+  Register index() const {
+    return static_cast<Register>((encoding_at(1) >> 3) & 7);
+  }
+
+  Register base() const {
+    return static_cast<Register>(encoding_at(1) & 7);
+  }
+
+  int8_t disp8() const {
+    CHECK_GE(length_, 2);
+    return static_cast<int8_t>(encoding_[length_ - 1]);
+  }
+
+  int32_t disp32() const {
+    CHECK_GE(length_, 5);
+    int32_t value;
+    memcpy(&value, &encoding_[length_ - 4], sizeof(value));
+    return value;
+  }
+
+  bool IsRegister(Register reg) const {
+    return ((encoding_[0] & 0xF8) == 0xC0)  // Addressing mode is register only.
+        && ((encoding_[0] & 0x07) == reg);  // Register codes match.
+  }
+
+ protected:
+  // Operand can be sub classed (e.g: Address).
+  Operand() : length_(0) { }
+
+  void SetModRM(int mod, Register rm) {
+    CHECK_EQ(mod & ~3, 0);
+    encoding_[0] = (mod << 6) | rm;
+    length_ = 1;
+  }
+
+  void SetSIB(ScaleFactor scale, Register index, Register base) {
+    CHECK_EQ(length_, 1);
+    CHECK_EQ(scale & ~3, 0);
+    encoding_[1] = (scale << 6) | (index << 3) | base;
+    length_ = 2;
+  }
+
+  void SetDisp8(int8_t disp) {
+    CHECK(length_ == 1 || length_ == 2);
+    encoding_[length_++] = static_cast<uint8_t>(disp);
+  }
+
+  void SetDisp32(int32_t disp) {
+    CHECK(length_ == 1 || length_ == 2);
+    int disp_size = sizeof(disp);
+    memmove(&encoding_[length_], &disp, disp_size);
+    length_ += disp_size;
+  }
+
+ private:
+  byte length_;
+  byte encoding_[6];
+  byte padding_;
+
+  explicit Operand(Register reg) { SetModRM(3, reg); }
+
+  // Get the operand encoding byte at the given index.
+  uint8_t encoding_at(int index) const {
+    CHECK_GE(index, 0);
+    CHECK_LT(index, length_);
+    return encoding_[index];
+  }
+
+  friend class X86Assembler;
+
+  // TODO: Remove the #if when Mac OS build server no longer uses GCC 4.2.*.
+#if GCC_VERSION >= 40300
+  DISALLOW_COPY_AND_ASSIGN(Operand);
+#endif
+};
+
+
+class Address : public Operand {
+ public:
+  Address(Register base, int32_t disp) {
+    Init(base, disp);
+  }
+
+  Address(Register base, Offset disp) {
+    Init(base, disp.Int32Value());
+  }
+
+  Address(Register base, FrameOffset disp) {
+    CHECK_EQ(base, ESP);
+    Init(ESP, disp.Int32Value());
+  }
+
+  Address(Register base, MemberOffset disp) {
+    Init(base, disp.Int32Value());
+  }
+
+  void Init(Register base, int32_t disp) {
+    if (disp == 0 && base != EBP) {
+      SetModRM(0, base);
+      if (base == ESP) SetSIB(TIMES_1, ESP, base);
+    } else if (disp >= -128 && disp <= 127) {
+      SetModRM(1, base);
+      if (base == ESP) SetSIB(TIMES_1, ESP, base);
+      SetDisp8(disp);
+    } else {
+      SetModRM(2, base);
+      if (base == ESP) SetSIB(TIMES_1, ESP, base);
+      SetDisp32(disp);
+    }
+  }
+
+
+  Address(Register index, ScaleFactor scale, int32_t disp) {
+    CHECK_NE(index, ESP);  // Illegal addressing mode.
+    SetModRM(0, ESP);
+    SetSIB(scale, index, EBP);
+    SetDisp32(disp);
+  }
+
+  Address(Register base, Register index, ScaleFactor scale, int32_t disp) {
+    CHECK_NE(index, ESP);  // Illegal addressing mode.
+    if (disp == 0 && base != EBP) {
+      SetModRM(0, ESP);
+      SetSIB(scale, index, base);
+    } else if (disp >= -128 && disp <= 127) {
+      SetModRM(1, ESP);
+      SetSIB(scale, index, base);
+      SetDisp8(disp);
+    } else {
+      SetModRM(2, ESP);
+      SetSIB(scale, index, base);
+      SetDisp32(disp);
+    }
+  }
+
+  static Address Absolute(uword addr) {
+    Address result;
+    result.SetModRM(0, EBP);
+    result.SetDisp32(addr);
+    return result;
+  }
+
+  static Address Absolute(ThreadOffset addr) {
+    return Absolute(addr.Int32Value());
+  }
+
+ private:
+  Address() {}
+
+  // TODO: Remove the #if when Mac OS build server no longer uses GCC 4.2.*.
+#if GCC_VERSION >= 40300
+  DISALLOW_COPY_AND_ASSIGN(Address);
+#endif
+};
+
+
+class X86Assembler : public Assembler {
+ public:
+  X86Assembler() {}
+  virtual ~X86Assembler() {}
+
+  /*
+   * Emit Machine Instructions.
+   */
+  void call(Register reg);
+  void call(const Address& address);
+  void call(Label* label);
+
+  void pushl(Register reg);
+  void pushl(const Address& address);
+  void pushl(const Immediate& imm);
+
+  void popl(Register reg);
+  void popl(const Address& address);
+
+  void movl(Register dst, const Immediate& src);
+  void movl(Register dst, Register src);
+
+  void movl(Register dst, const Address& src);
+  void movl(const Address& dst, Register src);
+  void movl(const Address& dst, const Immediate& imm);
+  void movl(const Address& dst, Label* lbl);
+
+  void movzxb(Register dst, ByteRegister src);
+  void movzxb(Register dst, const Address& src);
+  void movsxb(Register dst, ByteRegister src);
+  void movsxb(Register dst, const Address& src);
+  void movb(Register dst, const Address& src);
+  void movb(const Address& dst, ByteRegister src);
+  void movb(const Address& dst, const Immediate& imm);
+
+  void movzxw(Register dst, Register src);
+  void movzxw(Register dst, const Address& src);
+  void movsxw(Register dst, Register src);
+  void movsxw(Register dst, const Address& src);
+  void movw(Register dst, const Address& src);
+  void movw(const Address& dst, Register src);
+
+  void leal(Register dst, const Address& src);
+
+  void cmovl(Condition condition, Register dst, Register src);
+
+  void setb(Condition condition, Register dst);
+
+  void movss(XmmRegister dst, const Address& src);
+  void movss(const Address& dst, XmmRegister src);
+  void movss(XmmRegister dst, XmmRegister src);
+
+  void movd(XmmRegister dst, Register src);
+  void movd(Register dst, XmmRegister src);
+
+  void addss(XmmRegister dst, XmmRegister src);
+  void addss(XmmRegister dst, const Address& src);
+  void subss(XmmRegister dst, XmmRegister src);
+  void subss(XmmRegister dst, const Address& src);
+  void mulss(XmmRegister dst, XmmRegister src);
+  void mulss(XmmRegister dst, const Address& src);
+  void divss(XmmRegister dst, XmmRegister src);
+  void divss(XmmRegister dst, const Address& src);
+
+  void movsd(XmmRegister dst, const Address& src);
+  void movsd(const Address& dst, XmmRegister src);
+  void movsd(XmmRegister dst, XmmRegister src);
+
+  void addsd(XmmRegister dst, XmmRegister src);
+  void addsd(XmmRegister dst, const Address& src);
+  void subsd(XmmRegister dst, XmmRegister src);
+  void subsd(XmmRegister dst, const Address& src);
+  void mulsd(XmmRegister dst, XmmRegister src);
+  void mulsd(XmmRegister dst, const Address& src);
+  void divsd(XmmRegister dst, XmmRegister src);
+  void divsd(XmmRegister dst, const Address& src);
+
+  void cvtsi2ss(XmmRegister dst, Register src);
+  void cvtsi2sd(XmmRegister dst, Register src);
+
+  void cvtss2si(Register dst, XmmRegister src);
+  void cvtss2sd(XmmRegister dst, XmmRegister src);
+
+  void cvtsd2si(Register dst, XmmRegister src);
+  void cvtsd2ss(XmmRegister dst, XmmRegister src);
+
+  void cvttss2si(Register dst, XmmRegister src);
+  void cvttsd2si(Register dst, XmmRegister src);
+
+  void cvtdq2pd(XmmRegister dst, XmmRegister src);
+
+  void comiss(XmmRegister a, XmmRegister b);
+  void comisd(XmmRegister a, XmmRegister b);
+
+  void sqrtsd(XmmRegister dst, XmmRegister src);
+  void sqrtss(XmmRegister dst, XmmRegister src);
+
+  void xorpd(XmmRegister dst, const Address& src);
+  void xorpd(XmmRegister dst, XmmRegister src);
+  void xorps(XmmRegister dst, const Address& src);
+  void xorps(XmmRegister dst, XmmRegister src);
+
+  void andpd(XmmRegister dst, const Address& src);
+
+  void flds(const Address& src);
+  void fstps(const Address& dst);
+
+  void fldl(const Address& src);
+  void fstpl(const Address& dst);
+
+  void fnstcw(const Address& dst);
+  void fldcw(const Address& src);
+
+  void fistpl(const Address& dst);
+  void fistps(const Address& dst);
+  void fildl(const Address& src);
+
+  void fincstp();
+  void ffree(const Immediate& index);
+
+  void fsin();
+  void fcos();
+  void fptan();
+
+  void xchgl(Register dst, Register src);
+
+  void cmpl(Register reg, const Immediate& imm);
+  void cmpl(Register reg0, Register reg1);
+  void cmpl(Register reg, const Address& address);
+
+  void cmpl(const Address& address, Register reg);
+  void cmpl(const Address& address, const Immediate& imm);
+
+  void testl(Register reg1, Register reg2);
+  void testl(Register reg, const Immediate& imm);
+
+  void andl(Register dst, const Immediate& imm);
+  void andl(Register dst, Register src);
+
+  void orl(Register dst, const Immediate& imm);
+  void orl(Register dst, Register src);
+
+  void xorl(Register dst, Register src);
+
+  void addl(Register dst, Register src);
+  void addl(Register reg, const Immediate& imm);
+  void addl(Register reg, const Address& address);
+
+  void addl(const Address& address, Register reg);
+  void addl(const Address& address, const Immediate& imm);
+
+  void adcl(Register dst, Register src);
+  void adcl(Register reg, const Immediate& imm);
+  void adcl(Register dst, const Address& address);
+
+  void subl(Register dst, Register src);
+  void subl(Register reg, const Immediate& imm);
+  void subl(Register reg, const Address& address);
+
+  void cdq();
+
+  void idivl(Register reg);
+
+  void imull(Register dst, Register src);
+  void imull(Register reg, const Immediate& imm);
+  void imull(Register reg, const Address& address);
+
+  void imull(Register reg);
+  void imull(const Address& address);
+
+  void mull(Register reg);
+  void mull(const Address& address);
+
+  void sbbl(Register dst, Register src);
+  void sbbl(Register reg, const Immediate& imm);
+  void sbbl(Register reg, const Address& address);
+
+  void incl(Register reg);
+  void incl(const Address& address);
+
+  void decl(Register reg);
+  void decl(const Address& address);
+
+  void shll(Register reg, const Immediate& imm);
+  void shll(Register operand, Register shifter);
+  void shrl(Register reg, const Immediate& imm);
+  void shrl(Register operand, Register shifter);
+  void sarl(Register reg, const Immediate& imm);
+  void sarl(Register operand, Register shifter);
+  void shld(Register dst, Register src);
+
+  void negl(Register reg);
+  void notl(Register reg);
+
+  void enter(const Immediate& imm);
+  void leave();
+
+  void ret();
+  void ret(const Immediate& imm);
+
+  void nop();
+  void int3();
+  void hlt();
+
+  void j(Condition condition, Label* label);
+
+  void jmp(Register reg);
+  void jmp(Label* label);
+
+  X86Assembler* lock();
+  void cmpxchgl(const Address& address, Register reg);
+
+  void mfence();
+
+  X86Assembler* fs();
+
+  //
+  // Macros for High-level operations.
+  //
+
+  void AddImmediate(Register reg, const Immediate& imm);
+
+  void LoadDoubleConstant(XmmRegister dst, double value);
+
+  void DoubleNegate(XmmRegister d);
+  void FloatNegate(XmmRegister f);
+
+  void DoubleAbs(XmmRegister reg);
+
+  void LockCmpxchgl(const Address& address, Register reg) {
+    lock()->cmpxchgl(address, reg);
+  }
+
+  //
+  // Misc. functionality
+  //
+  int PreferredLoopAlignment() { return 16; }
+  void Align(int alignment, int offset);
+  void Bind(Label* label);
+
+  // Debugging and bringup support.
+  void Stop(const char* message);
+
+  //
+  // Overridden common assembler high-level functionality
+  //
+
+  // Emit code that will create an activation on the stack
+  virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
+                          const std::vector<ManagedRegister>& callee_save_regs,
+                          const std::vector<ManagedRegister>& entry_spills);
+
+  // Emit code that will remove an activation from the stack
+  virtual void RemoveFrame(size_t frame_size,
+                           const std::vector<ManagedRegister>& callee_save_regs);
+
+  virtual void IncreaseFrameSize(size_t adjust);
+  virtual void DecreaseFrameSize(size_t adjust);
+
+  // Store routines
+  virtual void Store(FrameOffset offs, ManagedRegister src, size_t size);
+  virtual void StoreRef(FrameOffset dest, ManagedRegister src);
+  virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src);
+
+  virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
+                                     ManagedRegister scratch);
+
+  virtual void StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
+                                      ManagedRegister scratch);
+
+  virtual void StoreStackOffsetToThread(ThreadOffset thr_offs,
+                                        FrameOffset fr_offs,
+                                        ManagedRegister scratch);
+
+  virtual void StoreStackPointerToThread(ThreadOffset thr_offs);
+
+  void StoreLabelToThread(ThreadOffset thr_offs, Label* lbl);
+
+  virtual void StoreSpanning(FrameOffset dest, ManagedRegister src,
+                             FrameOffset in_off, ManagedRegister scratch);
+
+  // Load routines
+  virtual void Load(ManagedRegister dest, FrameOffset src, size_t size);
+
+  virtual void Load(ManagedRegister dest, ThreadOffset src, size_t size);
+
+  virtual void LoadRef(ManagedRegister dest, FrameOffset  src);
+
+  virtual void LoadRef(ManagedRegister dest, ManagedRegister base,
+                       MemberOffset offs);
+
+  virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base,
+                          Offset offs);
+
+  virtual void LoadRawPtrFromThread(ManagedRegister dest,
+                                    ThreadOffset offs);
+
+  // Copying routines
+  virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size);
+
+  virtual void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs,
+                                    ManagedRegister scratch);
+
+  virtual void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs,
+                                  ManagedRegister scratch);
+
+  virtual void CopyRef(FrameOffset dest, FrameOffset src,
+                       ManagedRegister scratch);
+
+  virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size);
+
+  virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
+                    ManagedRegister scratch, size_t size);
+
+  virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
+                    ManagedRegister scratch, size_t size);
+
+  virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
+                    ManagedRegister scratch, size_t size);
+
+  virtual void Copy(ManagedRegister dest, Offset dest_offset,
+                    ManagedRegister src, Offset src_offset,
+                    ManagedRegister scratch, size_t size);
+
+  virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
+                    ManagedRegister scratch, size_t size);
+
+  virtual void MemoryBarrier(ManagedRegister);
+
+  // Exploit fast access in managed code to Thread::Current()
+  virtual void GetCurrentThread(ManagedRegister tr);
+  virtual void GetCurrentThread(FrameOffset dest_offset,
+                                ManagedRegister scratch);
+
+  // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
+  // value is null and null_allowed. in_reg holds a possibly stale reference
+  // that can be used to avoid loading the SIRT entry to see if the value is
+  // NULL.
+  virtual void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
+                               ManagedRegister in_reg, bool null_allowed);
+
+  // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
+  // value is null and null_allowed.
+  virtual void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
+                               ManagedRegister scratch, bool null_allowed);
+
+  // src holds a SIRT entry (Object**) load this into dst
+  virtual void LoadReferenceFromSirt(ManagedRegister dst,
+                                     ManagedRegister src);
+
+  // Heap::VerifyObject on src. In some cases (such as a reference to this) we
+  // know that src may not be null.
+  virtual void VerifyObject(ManagedRegister src, bool could_be_null);
+  virtual void VerifyObject(FrameOffset src, bool could_be_null);
+
+  // Call to address held at [base+offset]
+  virtual void Call(ManagedRegister base, Offset offset,
+                    ManagedRegister scratch);
+  virtual void Call(FrameOffset base, Offset offset,
+                    ManagedRegister scratch);
+  virtual void Call(ThreadOffset offset, ManagedRegister scratch);
+
+  // Generate code to check if Thread::Current()->suspend_count_ is non-zero
+  // and branch to a SuspendSlowPath if it is. The SuspendSlowPath will continue
+  // at the next instruction.
+  virtual void SuspendPoll(ManagedRegister scratch, ManagedRegister return_reg,
+                           FrameOffset return_save_location,
+                           size_t return_size);
+
+  // Generate code to check if Thread::Current()->exception_ is non-null
+  // and branch to a ExceptionSlowPath if it is.
+  virtual void ExceptionPoll(ManagedRegister scratch);
+
+ private:
+  inline void EmitUint8(uint8_t value);
+  inline void EmitInt32(int32_t value);
+  inline void EmitRegisterOperand(int rm, int reg);
+  inline void EmitXmmRegisterOperand(int rm, XmmRegister reg);
+  inline void EmitFixup(AssemblerFixup* fixup);
+  inline void EmitOperandSizeOverride();
+
+  void EmitOperand(int rm, const Operand& operand);
+  void EmitImmediate(const Immediate& imm);
+  void EmitComplex(int rm, const Operand& operand, const Immediate& immediate);
+  void EmitLabel(Label* label, int instruction_size);
+  void EmitLabelLink(Label* label);
+  void EmitNearLabelLink(Label* label);
+
+  void EmitGenericShift(int rm, Register reg, const Immediate& imm);
+  void EmitGenericShift(int rm, Register operand, Register shifter);
+
+  DISALLOW_COPY_AND_ASSIGN(X86Assembler);
+};
+
+inline void X86Assembler::EmitUint8(uint8_t value) {
+  buffer_.Emit<uint8_t>(value);
+}
+
+inline void X86Assembler::EmitInt32(int32_t value) {
+  buffer_.Emit<int32_t>(value);
+}
+
+inline void X86Assembler::EmitRegisterOperand(int rm, int reg) {
+  CHECK_GE(rm, 0);
+  CHECK_LT(rm, 8);
+  buffer_.Emit<uint8_t>(0xC0 + (rm << 3) + reg);
+}
+
+inline void X86Assembler::EmitXmmRegisterOperand(int rm, XmmRegister reg) {
+  EmitRegisterOperand(rm, static_cast<Register>(reg));
+}
+
+inline void X86Assembler::EmitFixup(AssemblerFixup* fixup) {
+  buffer_.EmitFixup(fixup);
+}
+
+inline void X86Assembler::EmitOperandSizeOverride() {
+  EmitUint8(0x66);
+}
+
+// Slowpath entered when Thread::Current()->_exception is non-null
+class X86ExceptionSlowPath : public SlowPath {
+ public:
+  X86ExceptionSlowPath() {}
+  virtual void Emit(Assembler *sp_asm);
+};
+
+// Slowpath entered when Thread::Current()->_suspend_count is non-zero
+class X86SuspendCountSlowPath : public SlowPath {
+ public:
+  X86SuspendCountSlowPath(X86ManagedRegister return_reg,
+                          FrameOffset return_save_location,
+                          size_t return_size) :
+     return_register_(return_reg), return_save_location_(return_save_location),
+     return_size_(return_size) {}
+  virtual void Emit(Assembler *sp_asm);
+
+ private:
+  // Remember how to save the return value
+  const X86ManagedRegister return_register_;
+  const FrameOffset return_save_location_;
+  const size_t return_size_;
+};
+
+}  // namespace x86
+}  // namespace art
+
+#endif  // ART_SRC_OAT_UTILS_X86_ASSEMBLER_X86_H_
diff --git a/src/oat/utils/x86/assembler_x86_test.cc b/src/oat/utils/x86/assembler_x86_test.cc
new file mode 100644
index 0000000..5d8a3b1
--- /dev/null
+++ b/src/oat/utils/x86/assembler_x86_test.cc
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "assembler_x86.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+TEST(AssemblerX86, CreateBuffer) {
+  AssemblerBuffer buffer;
+  AssemblerBuffer::EnsureCapacity ensured(&buffer);
+  buffer.Emit<uint8_t>(0x42);
+  ASSERT_EQ(static_cast<size_t>(1), buffer.Size());
+  buffer.Emit<int32_t>(42);
+  ASSERT_EQ(static_cast<size_t>(5), buffer.Size());
+}
+
+}  // namespace art
diff --git a/src/oat/utils/x86/managed_register_x86.cc b/src/oat/utils/x86/managed_register_x86.cc
new file mode 100644
index 0000000..4697d06
--- /dev/null
+++ b/src/oat/utils/x86/managed_register_x86.cc
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "managed_register_x86.h"
+
+#include "globals.h"
+
+namespace art {
+namespace x86 {
+
+// These cpu registers are never available for allocation.
+static const Register kReservedCpuRegistersArray[] = { ESP };
+
+
+// We reduce the number of available registers for allocation in debug-code
+// mode in order to increase register pressure.
+
+// We need all registers for caching.
+static const int kNumberOfAvailableCpuRegisters = kNumberOfCpuRegisters;
+static const int kNumberOfAvailableXmmRegisters = kNumberOfXmmRegisters;
+static const int kNumberOfAvailableRegisterPairs = kNumberOfRegisterPairs;
+
+
+// Define register pairs.
+// This list must be kept in sync with the RegisterPair enum.
+#define REGISTER_PAIR_LIST(P) \
+  P(EAX, EDX)                 \
+  P(EAX, ECX)                 \
+  P(EAX, EBX)                 \
+  P(EAX, EDI)                 \
+  P(EDX, ECX)                 \
+  P(EDX, EBX)                 \
+  P(EDX, EDI)                 \
+  P(ECX, EBX)                 \
+  P(ECX, EDI)                 \
+  P(EBX, EDI)
+
+
+struct RegisterPairDescriptor {
+  RegisterPair reg;  // Used to verify that the enum is in sync.
+  Register low;
+  Register high;
+};
+
+
+static const RegisterPairDescriptor kRegisterPairs[] = {
+#define REGISTER_PAIR_ENUMERATION(low, high) { low##_##high, low, high },
+  REGISTER_PAIR_LIST(REGISTER_PAIR_ENUMERATION)
+#undef REGISTER_PAIR_ENUMERATION
+};
+
+std::ostream& operator<<(std::ostream& os, const RegisterPair& reg) {
+  os << X86ManagedRegister::FromRegisterPair(reg);
+  return os;
+}
+
+bool X86ManagedRegister::Overlaps(const X86ManagedRegister& other) const {
+  if (IsNoRegister() || other.IsNoRegister()) return false;
+  CHECK(IsValidManagedRegister());
+  CHECK(other.IsValidManagedRegister());
+  if (Equals(other)) return true;
+  if (IsRegisterPair()) {
+    Register low = AsRegisterPairLow();
+    Register high = AsRegisterPairHigh();
+    return X86ManagedRegister::FromCpuRegister(low).Overlaps(other) ||
+        X86ManagedRegister::FromCpuRegister(high).Overlaps(other);
+  }
+  if (other.IsRegisterPair()) {
+    return other.Overlaps(*this);
+  }
+  return false;
+}
+
+
+int X86ManagedRegister::AllocIdLow() const {
+  CHECK(IsRegisterPair());
+  const int r = RegId() - (kNumberOfCpuRegIds + kNumberOfXmmRegIds +
+                           kNumberOfX87RegIds);
+  CHECK_EQ(r, kRegisterPairs[r].reg);
+  return kRegisterPairs[r].low;
+}
+
+
+int X86ManagedRegister::AllocIdHigh() const {
+  CHECK(IsRegisterPair());
+  const int r = RegId() - (kNumberOfCpuRegIds + kNumberOfXmmRegIds +
+                           kNumberOfX87RegIds);
+  CHECK_EQ(r, kRegisterPairs[r].reg);
+  return kRegisterPairs[r].high;
+}
+
+
+void X86ManagedRegister::Print(std::ostream& os) const {
+  if (!IsValidManagedRegister()) {
+    os << "No Register";
+  } else if (IsXmmRegister()) {
+    os << "XMM: " << static_cast<int>(AsXmmRegister());
+  } else if (IsX87Register()) {
+    os << "X87: " << static_cast<int>(AsX87Register());
+  } else if (IsCpuRegister()) {
+    os << "CPU: " << static_cast<int>(AsCpuRegister());
+  } else if (IsRegisterPair()) {
+    os << "Pair: " << AsRegisterPairLow() << ", " << AsRegisterPairHigh();
+  } else {
+    os << "??: " << RegId();
+  }
+}
+
+std::ostream& operator<<(std::ostream& os, const X86ManagedRegister& reg) {
+  reg.Print(os);
+  return os;
+}
+
+}  // namespace x86
+}  // namespace art
diff --git a/src/oat/utils/x86/managed_register_x86.h b/src/oat/utils/x86/managed_register_x86.h
new file mode 100644
index 0000000..33573f9
--- /dev/null
+++ b/src/oat/utils/x86/managed_register_x86.h
@@ -0,0 +1,212 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_UTILS_X86_MANAGED_REGISTER_X86_H_
+#define ART_SRC_OAT_UTILS_X86_MANAGED_REGISTER_X86_H_
+
+#include "constants_x86.h"
+#include "oat/utils/managed_register.h"
+
+namespace art {
+namespace x86 {
+
+// Values for register pairs.
+// The registers in kReservedCpuRegistersArray in x86.cc are not used in pairs.
+// The table kRegisterPairs in x86.cc must be kept in sync with this enum.
+enum RegisterPair {
+  EAX_EDX = 0,
+  EAX_ECX = 1,
+  EAX_EBX = 2,
+  EAX_EDI = 3,
+  EDX_ECX = 4,
+  EDX_EBX = 5,
+  EDX_EDI = 6,
+  ECX_EBX = 7,
+  ECX_EDI = 8,
+  EBX_EDI = 9,
+  kNumberOfRegisterPairs = 10,
+  kNoRegisterPair = -1,
+};
+
+std::ostream& operator<<(std::ostream& os, const RegisterPair& reg);
+
+const int kNumberOfCpuRegIds = kNumberOfCpuRegisters;
+const int kNumberOfCpuAllocIds = kNumberOfCpuRegisters;
+
+const int kNumberOfXmmRegIds = kNumberOfXmmRegisters;
+const int kNumberOfXmmAllocIds = kNumberOfXmmRegisters;
+
+const int kNumberOfX87RegIds = kNumberOfX87Registers;
+const int kNumberOfX87AllocIds = kNumberOfX87Registers;
+
+const int kNumberOfPairRegIds = kNumberOfRegisterPairs;
+
+const int kNumberOfRegIds = kNumberOfCpuRegIds + kNumberOfXmmRegIds +
+    kNumberOfX87RegIds + kNumberOfPairRegIds;
+const int kNumberOfAllocIds = kNumberOfCpuAllocIds + kNumberOfXmmAllocIds +
+    kNumberOfX87RegIds;
+
+// Register ids map:
+//   [0..R[  cpu registers (enum Register)
+//   [R..X[  xmm registers (enum XmmRegister)
+//   [X..S[  x87 registers (enum X87Register)
+//   [S..P[  register pairs (enum RegisterPair)
+// where
+//   R = kNumberOfCpuRegIds
+//   X = R + kNumberOfXmmRegIds
+//   S = X + kNumberOfX87RegIds
+//   P = X + kNumberOfRegisterPairs
+
+// Allocation ids map:
+//   [0..R[  cpu registers (enum Register)
+//   [R..X[  xmm registers (enum XmmRegister)
+//   [X..S[  x87 registers (enum X87Register)
+// where
+//   R = kNumberOfCpuRegIds
+//   X = R + kNumberOfXmmRegIds
+//   S = X + kNumberOfX87RegIds
+
+
+// An instance of class 'ManagedRegister' represents a single cpu register (enum
+// Register), an xmm register (enum XmmRegister), or a pair of cpu registers
+// (enum RegisterPair).
+// 'ManagedRegister::NoRegister()' provides an invalid register.
+// There is a one-to-one mapping between ManagedRegister and register id.
+class X86ManagedRegister : public ManagedRegister {
+ public:
+  Register AsCpuRegister() const {
+    CHECK(IsCpuRegister());
+    return static_cast<Register>(id_);
+  }
+
+  XmmRegister AsXmmRegister() const {
+    CHECK(IsXmmRegister());
+    return static_cast<XmmRegister>(id_ - kNumberOfCpuRegIds);
+  }
+
+  X87Register AsX87Register() const {
+    CHECK(IsX87Register());
+    return static_cast<X87Register>(id_ -
+                                    (kNumberOfCpuRegIds + kNumberOfXmmRegIds));
+  }
+
+  Register AsRegisterPairLow() const {
+    CHECK(IsRegisterPair());
+    // Appropriate mapping of register ids allows to use AllocIdLow().
+    return FromRegId(AllocIdLow()).AsCpuRegister();
+  }
+
+  Register AsRegisterPairHigh() const {
+    CHECK(IsRegisterPair());
+    // Appropriate mapping of register ids allows to use AllocIdHigh().
+    return FromRegId(AllocIdHigh()).AsCpuRegister();
+  }
+
+  bool IsCpuRegister() const {
+    CHECK(IsValidManagedRegister());
+    return (0 <= id_) && (id_ < kNumberOfCpuRegIds);
+  }
+
+  bool IsXmmRegister() const {
+    CHECK(IsValidManagedRegister());
+    const int test = id_ - kNumberOfCpuRegIds;
+    return (0 <= test) && (test < kNumberOfXmmRegIds);
+  }
+
+  bool IsX87Register() const {
+    CHECK(IsValidManagedRegister());
+    const int test = id_ - (kNumberOfCpuRegIds + kNumberOfXmmRegIds);
+    return (0 <= test) && (test < kNumberOfX87RegIds);
+  }
+
+  bool IsRegisterPair() const {
+    CHECK(IsValidManagedRegister());
+    const int test = id_ -
+        (kNumberOfCpuRegIds + kNumberOfXmmRegIds + kNumberOfX87RegIds);
+    return (0 <= test) && (test < kNumberOfPairRegIds);
+  }
+
+  void Print(std::ostream& os) const;
+
+  // Returns true if the two managed-registers ('this' and 'other') overlap.
+  // Either managed-register may be the NoRegister. If both are the NoRegister
+  // then false is returned.
+  bool Overlaps(const X86ManagedRegister& other) const;
+
+  static X86ManagedRegister FromCpuRegister(Register r) {
+    CHECK_NE(r, kNoRegister);
+    return FromRegId(r);
+  }
+
+  static X86ManagedRegister FromXmmRegister(XmmRegister r) {
+    CHECK_NE(r, kNoXmmRegister);
+    return FromRegId(r + kNumberOfCpuRegIds);
+  }
+
+  static X86ManagedRegister FromX87Register(X87Register r) {
+    CHECK_NE(r, kNoX87Register);
+    return FromRegId(r + kNumberOfCpuRegIds + kNumberOfXmmRegIds);
+  }
+
+  static X86ManagedRegister FromRegisterPair(RegisterPair r) {
+    CHECK_NE(r, kNoRegisterPair);
+    return FromRegId(r + (kNumberOfCpuRegIds + kNumberOfXmmRegIds +
+                          kNumberOfX87RegIds));
+  }
+
+ private:
+  bool IsValidManagedRegister() const {
+    return (0 <= id_) && (id_ < kNumberOfRegIds);
+  }
+
+  int RegId() const {
+    CHECK(!IsNoRegister());
+    return id_;
+  }
+
+  int AllocId() const {
+    CHECK(IsValidManagedRegister() && !IsRegisterPair());
+    CHECK_LT(id_, kNumberOfAllocIds);
+    return id_;
+  }
+
+  int AllocIdLow() const;
+  int AllocIdHigh() const;
+
+  friend class ManagedRegister;
+
+  explicit X86ManagedRegister(int reg_id) : ManagedRegister(reg_id) {}
+
+  static X86ManagedRegister FromRegId(int reg_id) {
+    X86ManagedRegister reg(reg_id);
+    CHECK(reg.IsValidManagedRegister());
+    return reg;
+  }
+};
+
+std::ostream& operator<<(std::ostream& os, const X86ManagedRegister& reg);
+
+}  // namespace x86
+
+inline x86::X86ManagedRegister ManagedRegister::AsX86() const {
+  x86::X86ManagedRegister reg(id_);
+  CHECK(reg.IsNoRegister() || reg.IsValidManagedRegister());
+  return reg;
+}
+
+}  // namespace art
+
+#endif  // ART_SRC_OAT_UTILS_X86_MANAGED_REGISTER_X86_H_
diff --git a/src/oat/utils/x86/managed_register_x86_test.cc b/src/oat/utils/x86/managed_register_x86_test.cc
new file mode 100644
index 0000000..4fbafda
--- /dev/null
+++ b/src/oat/utils/x86/managed_register_x86_test.cc
@@ -0,0 +1,359 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "globals.h"
+#include "managed_register_x86.h"
+#include "gtest/gtest.h"
+
+namespace art {
+namespace x86 {
+
+TEST(X86ManagedRegister, NoRegister) {
+  X86ManagedRegister reg = ManagedRegister::NoRegister().AsX86();
+  EXPECT_TRUE(reg.IsNoRegister());
+  EXPECT_TRUE(!reg.Overlaps(reg));
+}
+
+TEST(X86ManagedRegister, CpuRegister) {
+  X86ManagedRegister reg = X86ManagedRegister::FromCpuRegister(EAX);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(EAX, reg.AsCpuRegister());
+
+  reg = X86ManagedRegister::FromCpuRegister(EBX);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(EBX, reg.AsCpuRegister());
+
+  reg = X86ManagedRegister::FromCpuRegister(ECX);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(ECX, reg.AsCpuRegister());
+
+  reg = X86ManagedRegister::FromCpuRegister(EDI);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(EDI, reg.AsCpuRegister());
+}
+
+TEST(X86ManagedRegister, XmmRegister) {
+  X86ManagedRegister reg = X86ManagedRegister::FromXmmRegister(XMM0);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(XMM0, reg.AsXmmRegister());
+
+  reg = X86ManagedRegister::FromXmmRegister(XMM1);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(XMM1, reg.AsXmmRegister());
+
+  reg = X86ManagedRegister::FromXmmRegister(XMM7);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(XMM7, reg.AsXmmRegister());
+}
+
+TEST(X86ManagedRegister, X87Register) {
+  X86ManagedRegister reg = X86ManagedRegister::FromX87Register(ST0);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(reg.IsX87Register());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(ST0, reg.AsX87Register());
+
+  reg = X86ManagedRegister::FromX87Register(ST1);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(reg.IsX87Register());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(ST1, reg.AsX87Register());
+
+  reg = X86ManagedRegister::FromX87Register(ST7);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(reg.IsX87Register());
+  EXPECT_TRUE(!reg.IsRegisterPair());
+  EXPECT_EQ(ST7, reg.AsX87Register());
+}
+
+TEST(X86ManagedRegister, RegisterPair) {
+  X86ManagedRegister reg = X86ManagedRegister::FromRegisterPair(EAX_EDX);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(EAX, reg.AsRegisterPairLow());
+  EXPECT_EQ(EDX, reg.AsRegisterPairHigh());
+
+  reg = X86ManagedRegister::FromRegisterPair(EAX_ECX);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(EAX, reg.AsRegisterPairLow());
+  EXPECT_EQ(ECX, reg.AsRegisterPairHigh());
+
+  reg = X86ManagedRegister::FromRegisterPair(EAX_EBX);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(EAX, reg.AsRegisterPairLow());
+  EXPECT_EQ(EBX, reg.AsRegisterPairHigh());
+
+  reg = X86ManagedRegister::FromRegisterPair(EAX_EDI);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(EAX, reg.AsRegisterPairLow());
+  EXPECT_EQ(EDI, reg.AsRegisterPairHigh());
+
+  reg = X86ManagedRegister::FromRegisterPair(EDX_ECX);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(EDX, reg.AsRegisterPairLow());
+  EXPECT_EQ(ECX, reg.AsRegisterPairHigh());
+
+  reg = X86ManagedRegister::FromRegisterPair(EDX_EBX);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(EDX, reg.AsRegisterPairLow());
+  EXPECT_EQ(EBX, reg.AsRegisterPairHigh());
+
+  reg = X86ManagedRegister::FromRegisterPair(EDX_EDI);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(EDX, reg.AsRegisterPairLow());
+  EXPECT_EQ(EDI, reg.AsRegisterPairHigh());
+
+  reg = X86ManagedRegister::FromRegisterPair(ECX_EBX);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(ECX, reg.AsRegisterPairLow());
+  EXPECT_EQ(EBX, reg.AsRegisterPairHigh());
+
+  reg = X86ManagedRegister::FromRegisterPair(ECX_EDI);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(ECX, reg.AsRegisterPairLow());
+  EXPECT_EQ(EDI, reg.AsRegisterPairHigh());
+
+  reg = X86ManagedRegister::FromRegisterPair(EBX_EDI);
+  EXPECT_TRUE(!reg.IsNoRegister());
+  EXPECT_TRUE(!reg.IsCpuRegister());
+  EXPECT_TRUE(!reg.IsXmmRegister());
+  EXPECT_TRUE(!reg.IsX87Register());
+  EXPECT_TRUE(reg.IsRegisterPair());
+  EXPECT_EQ(EBX, reg.AsRegisterPairLow());
+  EXPECT_EQ(EDI, reg.AsRegisterPairHigh());
+}
+
+TEST(X86ManagedRegister, Equals) {
+  X86ManagedRegister reg_eax = X86ManagedRegister::FromCpuRegister(EAX);
+  EXPECT_TRUE(reg_eax.Equals(X86ManagedRegister::FromCpuRegister(EAX)));
+  EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromCpuRegister(EBX)));
+  EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromCpuRegister(EDI)));
+  EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromXmmRegister(XMM0)));
+  EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromXmmRegister(XMM7)));
+  EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromX87Register(ST0)));
+  EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromX87Register(ST7)));
+  EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+  EXPECT_TRUE(!reg_eax.Equals(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+  X86ManagedRegister reg_xmm0 = X86ManagedRegister::FromXmmRegister(XMM0);
+  EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromCpuRegister(EAX)));
+  EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromCpuRegister(EBX)));
+  EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromCpuRegister(EDI)));
+  EXPECT_TRUE(reg_xmm0.Equals(X86ManagedRegister::FromXmmRegister(XMM0)));
+  EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromXmmRegister(XMM7)));
+  EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromX87Register(ST0)));
+  EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromX87Register(ST7)));
+  EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+  EXPECT_TRUE(!reg_xmm0.Equals(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+  X86ManagedRegister reg_st0 = X86ManagedRegister::FromX87Register(ST0);
+  EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromCpuRegister(EAX)));
+  EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromCpuRegister(EBX)));
+  EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromCpuRegister(EDI)));
+  EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromXmmRegister(XMM0)));
+  EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromXmmRegister(XMM7)));
+  EXPECT_TRUE(reg_st0.Equals(X86ManagedRegister::FromX87Register(ST0)));
+  EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromX87Register(ST7)));
+  EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+  EXPECT_TRUE(!reg_st0.Equals(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+  X86ManagedRegister reg_pair = X86ManagedRegister::FromRegisterPair(EAX_EDX);
+  EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromCpuRegister(EAX)));
+  EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromCpuRegister(EBX)));
+  EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromCpuRegister(EDI)));
+  EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromXmmRegister(XMM0)));
+  EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromXmmRegister(XMM7)));
+  EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromX87Register(ST0)));
+  EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromX87Register(ST7)));
+  EXPECT_TRUE(reg_pair.Equals(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+  EXPECT_TRUE(!reg_pair.Equals(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+}
+
+TEST(X86ManagedRegister, Overlaps) {
+  X86ManagedRegister reg = X86ManagedRegister::FromCpuRegister(EAX);
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+  reg = X86ManagedRegister::FromCpuRegister(EDX);
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+  reg = X86ManagedRegister::FromCpuRegister(EDI);
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+  reg = X86ManagedRegister::FromCpuRegister(EBX);
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+  reg = X86ManagedRegister::FromXmmRegister(XMM0);
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+  reg = X86ManagedRegister::FromX87Register(ST0);
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromX87Register(ST0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+  reg = X86ManagedRegister::FromRegisterPair(EAX_EDX);
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EDX_ECX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+
+  reg = X86ManagedRegister::FromRegisterPair(EBX_EDI);
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EDX_EBX)));
+
+  reg = X86ManagedRegister::FromRegisterPair(EDX_ECX);
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EAX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EBX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromCpuRegister(EDI)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromXmmRegister(XMM7)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST0)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromX87Register(ST7)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+  EXPECT_TRUE(!reg.Overlaps(X86ManagedRegister::FromRegisterPair(EBX_EDI)));
+  EXPECT_TRUE(reg.Overlaps(X86ManagedRegister::FromRegisterPair(EDX_EBX)));
+}
+
+}  // namespace x86
+}  // namespace art