Move the JNI compilers under compiler.

Change-Id: Id1d40f35be191758906b96b56b74a83ab0dfb88e
diff --git a/src/compiler/jni/quick/arm/calling_convention_arm.cc b/src/compiler/jni/quick/arm/calling_convention_arm.cc
new file mode 100644
index 0000000..e9b09c5
--- /dev/null
+++ b/src/compiler/jni/quick/arm/calling_convention_arm.cc
@@ -0,0 +1,212 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/logging.h"
+#include "calling_convention_arm.h"
+#include "oat/utils/arm/managed_register_arm.h"
+
+namespace art {
+namespace arm {
+
+// Calling convention
+
+ManagedRegister ArmManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
+  return ArmManagedRegister::FromCoreRegister(IP);  // R12
+}
+
+ManagedRegister ArmJniCallingConvention::InterproceduralScratchRegister() {
+  return ArmManagedRegister::FromCoreRegister(IP);  // R12
+}
+
+static ManagedRegister ReturnRegisterForShorty(const char* shorty) {
+  if (shorty[0] == 'F') {
+    return ArmManagedRegister::FromCoreRegister(R0);
+  } else if (shorty[0] == 'D') {
+    return ArmManagedRegister::FromRegisterPair(R0_R1);
+  } else if (shorty[0] == 'J') {
+    return ArmManagedRegister::FromRegisterPair(R0_R1);
+  } else if (shorty[0] == 'V') {
+    return ArmManagedRegister::NoRegister();
+  } else {
+    return ArmManagedRegister::FromCoreRegister(R0);
+  }
+}
+
+ManagedRegister ArmManagedRuntimeCallingConvention::ReturnRegister() {
+  return ReturnRegisterForShorty(GetShorty());
+}
+
+ManagedRegister ArmJniCallingConvention::ReturnRegister() {
+  return ReturnRegisterForShorty(GetShorty());
+}
+
+ManagedRegister ArmJniCallingConvention::IntReturnRegister() {
+  return ArmManagedRegister::FromCoreRegister(R0);
+}
+
+// Managed runtime calling convention
+
+ManagedRegister ArmManagedRuntimeCallingConvention::MethodRegister() {
+  return ArmManagedRegister::FromCoreRegister(R0);
+}
+
+bool ArmManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
+  return false;  // Everything moved to stack on entry.
+}
+
+bool ArmManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
+  return true;
+}
+
+ManagedRegister ArmManagedRuntimeCallingConvention::CurrentParamRegister() {
+  LOG(FATAL) << "Should not reach here";
+  return ManagedRegister::NoRegister();
+}
+
+FrameOffset ArmManagedRuntimeCallingConvention::CurrentParamStackOffset() {
+  CHECK(IsCurrentParamOnStack());
+  FrameOffset result =
+      FrameOffset(displacement_.Int32Value() +   // displacement
+                  kPointerSize +                 // Method*
+                  (itr_slots_ * kPointerSize));  // offset into in args
+  return result;
+}
+
+const std::vector<ManagedRegister>& ArmManagedRuntimeCallingConvention::EntrySpills() {
+  // We spill the argument registers on ARM to free them up for scratch use, we then assume
+  // all arguments are on the stack.
+  if (entry_spills_.size() == 0) {
+    size_t num_spills = NumArgs() + NumLongOrDoubleArgs();
+    if (num_spills > 0) {
+      entry_spills_.push_back(ArmManagedRegister::FromCoreRegister(R1));
+      if (num_spills > 1) {
+        entry_spills_.push_back(ArmManagedRegister::FromCoreRegister(R2));
+        if (num_spills > 2) {
+          entry_spills_.push_back(ArmManagedRegister::FromCoreRegister(R3));
+        }
+      }
+    }
+  }
+  return entry_spills_;
+}
+// JNI calling convention
+
+ArmJniCallingConvention::ArmJniCallingConvention(bool is_static, bool is_synchronized,
+                                                 const char* shorty)
+    : JniCallingConvention(is_static, is_synchronized, shorty) {
+  // Compute padding to ensure longs and doubles are not split in AAPCS. Ignore the 'this' jobject
+  // or jclass for static methods and the JNIEnv. We start at the aligned register r2.
+  size_t padding = 0;
+  for (size_t cur_arg = IsStatic() ? 0 : 1, cur_reg = 2; cur_arg < NumArgs(); cur_arg++) {
+    if (IsParamALongOrDouble(cur_arg)) {
+      if ((cur_reg & 1) != 0) {
+        padding += 4;
+        cur_reg++;  // additional bump to ensure alignment
+      }
+      cur_reg++;  // additional bump to skip extra long word
+    }
+    cur_reg++;  // bump the iterator for every argument
+  }
+  padding_ = padding;
+
+  callee_save_regs_.push_back(ArmManagedRegister::FromCoreRegister(R5));
+  callee_save_regs_.push_back(ArmManagedRegister::FromCoreRegister(R6));
+  callee_save_regs_.push_back(ArmManagedRegister::FromCoreRegister(R7));
+  callee_save_regs_.push_back(ArmManagedRegister::FromCoreRegister(R8));
+  callee_save_regs_.push_back(ArmManagedRegister::FromCoreRegister(R10));
+  callee_save_regs_.push_back(ArmManagedRegister::FromCoreRegister(R11));
+}
+
+uint32_t ArmJniCallingConvention::CoreSpillMask() const {
+  // Compute spill mask to agree with callee saves initialized in the constructor
+  uint32_t result = 0;
+  result =  1 << R5 | 1 << R6 | 1 << R7 | 1 << R8 | 1 << R10 | 1 << R11 | 1 << LR;
+  return result;
+}
+
+ManagedRegister ArmJniCallingConvention::ReturnScratchRegister() const {
+  return ArmManagedRegister::FromCoreRegister(R2);
+}
+
+size_t ArmJniCallingConvention::FrameSize() {
+  // Method*, LR and callee save area size, local reference segment state
+  size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kPointerSize;
+  // References plus 2 words for SIRT header
+  size_t sirt_size = (ReferenceCount() + 2) * kPointerSize;
+  // Plus return value spill area size
+  return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(), kStackAlignment);
+}
+
+size_t ArmJniCallingConvention::OutArgSize() {
+  return RoundUp(NumberOfOutgoingStackArgs() * kPointerSize + padding_,
+                 kStackAlignment);
+}
+
+// JniCallingConvention ABI follows AAPCS where longs and doubles must occur
+// in even register numbers and stack slots
+void ArmJniCallingConvention::Next() {
+  JniCallingConvention::Next();
+  size_t arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+  if ((itr_args_ >= 2) &&
+      (arg_pos < NumArgs()) &&
+      IsParamALongOrDouble(arg_pos)) {
+    // itr_slots_ needs to be an even number, according to AAPCS.
+    if ((itr_slots_ & 0x1u) != 0) {
+      itr_slots_++;
+    }
+  }
+}
+
+bool ArmJniCallingConvention::IsCurrentParamInRegister() {
+  return itr_slots_ < 4;
+}
+
+bool ArmJniCallingConvention::IsCurrentParamOnStack() {
+  return !IsCurrentParamInRegister();
+}
+
+static const Register kJniArgumentRegisters[] = {
+  R0, R1, R2, R3
+};
+ManagedRegister ArmJniCallingConvention::CurrentParamRegister() {
+  CHECK_LT(itr_slots_, 4u);
+  int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+  if ((itr_args_ >= 2) && IsParamALongOrDouble(arg_pos)) {
+    CHECK_EQ(itr_slots_, 2u);
+    return ArmManagedRegister::FromRegisterPair(R2_R3);
+  } else {
+    return
+      ArmManagedRegister::FromCoreRegister(kJniArgumentRegisters[itr_slots_]);
+  }
+}
+
+FrameOffset ArmJniCallingConvention::CurrentParamStackOffset() {
+  CHECK_GE(itr_slots_, 4u);
+  size_t offset = displacement_.Int32Value() - OutArgSize() + ((itr_slots_ - 4) * kPointerSize);
+  CHECK_LT(offset, OutArgSize());
+  return FrameOffset(offset);
+}
+
+size_t ArmJniCallingConvention::NumberOfOutgoingStackArgs() {
+  size_t static_args = IsStatic() ? 1 : 0;  // count jclass
+  // regular argument parameters and this
+  size_t param_args = NumArgs() + NumLongOrDoubleArgs();
+  // count JNIEnv* less arguments in registers
+  return static_args + param_args + 1 - 4;
+}
+
+}  // namespace arm
+}  // namespace art
diff --git a/src/compiler/jni/quick/arm/calling_convention_arm.h b/src/compiler/jni/quick/arm/calling_convention_arm.h
new file mode 100644
index 0000000..7a3e738
--- /dev/null
+++ b/src/compiler/jni/quick/arm/calling_convention_arm.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_JNI_ARM_CALLING_CONVENTION_ARM_H_
+#define ART_SRC_OAT_JNI_ARM_CALLING_CONVENTION_ARM_H_
+
+#include "compiler/jni/quick/calling_convention.h"
+
+namespace art {
+namespace arm {
+
+class ArmManagedRuntimeCallingConvention : public ManagedRuntimeCallingConvention {
+ public:
+  ArmManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
+      : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty) {}
+  virtual ~ArmManagedRuntimeCallingConvention() {}
+  // Calling convention
+  virtual ManagedRegister ReturnRegister();
+  virtual ManagedRegister InterproceduralScratchRegister();
+  // Managed runtime calling convention
+  virtual ManagedRegister MethodRegister();
+  virtual bool IsCurrentParamInRegister();
+  virtual bool IsCurrentParamOnStack();
+  virtual ManagedRegister CurrentParamRegister();
+  virtual FrameOffset CurrentParamStackOffset();
+  virtual const std::vector<ManagedRegister>& EntrySpills();
+
+ private:
+  std::vector<ManagedRegister> entry_spills_;
+
+  DISALLOW_COPY_AND_ASSIGN(ArmManagedRuntimeCallingConvention);
+};
+
+class ArmJniCallingConvention : public JniCallingConvention {
+ public:
+  explicit ArmJniCallingConvention(bool is_static, bool is_synchronized, const char* shorty);
+  virtual ~ArmJniCallingConvention() {}
+  // Calling convention
+  virtual ManagedRegister ReturnRegister();
+  virtual ManagedRegister IntReturnRegister();
+  virtual ManagedRegister InterproceduralScratchRegister();
+  // JNI calling convention
+  virtual void Next();  // Override default behavior for AAPCS
+  virtual size_t FrameSize();
+  virtual size_t OutArgSize();
+  virtual const std::vector<ManagedRegister>& CalleeSaveRegisters() const {
+    return callee_save_regs_;
+  }
+  virtual ManagedRegister ReturnScratchRegister() const;
+  virtual uint32_t CoreSpillMask() const;
+  virtual uint32_t FpSpillMask() const {
+    return 0;  // Floats aren't spilled in JNI down call
+  }
+  virtual bool IsCurrentParamInRegister();
+  virtual bool IsCurrentParamOnStack();
+  virtual ManagedRegister CurrentParamRegister();
+  virtual FrameOffset CurrentParamStackOffset();
+
+ protected:
+  virtual size_t NumberOfOutgoingStackArgs();
+
+ private:
+  // TODO: these values aren't unique and can be shared amongst instances
+  std::vector<ManagedRegister> callee_save_regs_;
+
+  // Padding to ensure longs and doubles are not split in AAPCS
+  size_t padding_;
+
+  DISALLOW_COPY_AND_ASSIGN(ArmJniCallingConvention);
+};
+
+}  // namespace arm
+}  // namespace art
+
+#endif  // ART_SRC_OAT_JNI_ARM_CALLING_CONVENTION_ARM_H_
diff --git a/src/compiler/jni/quick/calling_convention.cc b/src/compiler/jni/quick/calling_convention.cc
new file mode 100644
index 0000000..512483e
--- /dev/null
+++ b/src/compiler/jni/quick/calling_convention.cc
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "calling_convention.h"
+
+#include "base/logging.h"
+#include "compiler/jni/quick/arm/calling_convention_arm.h"
+#include "compiler/jni/quick/mips/calling_convention_mips.h"
+#include "compiler/jni/quick/x86/calling_convention_x86.h"
+#include "utils.h"
+
+namespace art {
+
+// Offset of Method within the frame
+FrameOffset CallingConvention::MethodStackOffset() {
+  return displacement_;
+}
+
+// Managed runtime calling convention
+
+ManagedRuntimeCallingConvention* ManagedRuntimeCallingConvention::Create(
+    bool is_static, bool is_synchronized, const char* shorty, InstructionSet instruction_set) {
+  switch (instruction_set) {
+    case kArm:
+    case kThumb2:
+      return new arm::ArmManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
+    case kMips:
+      return new mips::MipsManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
+    case kX86:
+      return new x86::X86ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
+    default:
+      LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
+      return NULL;
+  }
+}
+
+bool ManagedRuntimeCallingConvention::HasNext() {
+  return itr_args_ < NumArgs();
+}
+
+void ManagedRuntimeCallingConvention::Next() {
+  CHECK(HasNext());
+  if (IsCurrentArgExplicit() &&  // don't query parameter type of implicit args
+      IsParamALongOrDouble(itr_args_)) {
+    itr_longs_and_doubles_++;
+    itr_slots_++;
+  }
+  if (IsCurrentParamAReference()) {
+    itr_refs_++;
+  }
+  itr_args_++;
+  itr_slots_++;
+}
+
+bool ManagedRuntimeCallingConvention::IsCurrentArgExplicit() {
+  // Static methods have no implicit arguments, others implicitly pass this
+  return IsStatic() || (itr_args_ != 0);
+}
+
+bool ManagedRuntimeCallingConvention::IsCurrentArgPossiblyNull() {
+  return IsCurrentArgExplicit();  // any user parameter may be null
+}
+
+size_t ManagedRuntimeCallingConvention::CurrentParamSize() {
+  return ParamSize(itr_args_);
+}
+
+bool ManagedRuntimeCallingConvention::IsCurrentParamAReference() {
+  return IsParamAReference(itr_args_);
+}
+
+// JNI calling convention
+
+JniCallingConvention* JniCallingConvention::Create(bool is_static, bool is_synchronized,
+                                                   const char* shorty,
+                                                   InstructionSet instruction_set) {
+  switch (instruction_set) {
+    case kArm:
+    case kThumb2:
+      return new arm::ArmJniCallingConvention(is_static, is_synchronized, shorty);
+    case kMips:
+      return new mips::MipsJniCallingConvention(is_static, is_synchronized, shorty);
+    case kX86:
+      return new x86::X86JniCallingConvention(is_static, is_synchronized, shorty);
+    default:
+      LOG(FATAL) << "Unknown InstructionSet: " << instruction_set;
+      return NULL;
+  }
+}
+
+size_t JniCallingConvention::ReferenceCount() const {
+  return NumReferenceArgs() + (IsStatic() ? 1 : 0);
+}
+
+FrameOffset JniCallingConvention::SavedLocalReferenceCookieOffset() const {
+  size_t start_of_sirt = SirtLinkOffset().Int32Value() +  kPointerSize;
+  size_t references_size = kPointerSize * ReferenceCount();  // size excluding header
+  return FrameOffset(start_of_sirt + references_size);
+}
+
+FrameOffset JniCallingConvention::ReturnValueSaveLocation() const {
+  // Segment state is 4 bytes long
+  return FrameOffset(SavedLocalReferenceCookieOffset().Int32Value() + 4);
+}
+
+bool JniCallingConvention::HasNext() {
+  if (itr_args_ <= kObjectOrClass) {
+    return true;
+  } else {
+    unsigned int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+    return arg_pos < NumArgs();
+  }
+}
+
+void JniCallingConvention::Next() {
+  CHECK(HasNext());
+  if (itr_args_ > kObjectOrClass) {
+    int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+    if (IsParamALongOrDouble(arg_pos)) {
+      itr_longs_and_doubles_++;
+      itr_slots_++;
+    }
+  }
+  if (IsCurrentParamAReference()) {
+    itr_refs_++;
+  }
+  itr_args_++;
+  itr_slots_++;
+}
+
+bool JniCallingConvention::IsCurrentParamAReference() {
+  switch (itr_args_) {
+    case kJniEnv:
+      return false;  // JNIEnv*
+    case kObjectOrClass:
+      return true;   // jobject or jclass
+    default: {
+      int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+      return IsParamAReference(arg_pos);
+    }
+  }
+}
+
+// Return position of SIRT entry holding reference at the current iterator
+// position
+FrameOffset JniCallingConvention::CurrentParamSirtEntryOffset() {
+  CHECK(IsCurrentParamAReference());
+  CHECK_GT(SirtLinkOffset(), SirtNumRefsOffset());
+  // Address of 1st SIRT entry
+  int result = SirtLinkOffset().Int32Value() + kPointerSize;
+  result += itr_refs_ * kPointerSize;
+  CHECK_GT(result, SirtLinkOffset().Int32Value());
+  return FrameOffset(result);
+}
+
+size_t JniCallingConvention::CurrentParamSize() {
+  if (itr_args_ <= kObjectOrClass) {
+    return kPointerSize;  // JNIEnv or jobject/jclass
+  } else {
+    int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+    return ParamSize(arg_pos);
+  }
+}
+
+size_t JniCallingConvention::NumberOfExtraArgumentsForJni() {
+  // The first argument is the JNIEnv*.
+  // Static methods have an extra argument which is the jclass.
+  return IsStatic() ? 2 : 1;
+}
+
+}  // namespace art
diff --git a/src/compiler/jni/quick/calling_convention.h b/src/compiler/jni/quick/calling_convention.h
new file mode 100644
index 0000000..121d1f8
--- /dev/null
+++ b/src/compiler/jni/quick/calling_convention.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_JNI_CALLING_CONVENTION_H_
+#define ART_SRC_OAT_JNI_CALLING_CONVENTION_H_
+
+#include <vector>
+#include "oat/utils/managed_register.h"
+#include "stack_indirect_reference_table.h"
+#include "thread.h"
+
+namespace art {
+
+// Top-level abstraction for different calling conventions
+class CallingConvention {
+ public:
+  bool IsReturnAReference() const { return shorty_[0] == 'L'; }
+
+  Primitive::Type GetReturnType() const {
+    return Primitive::GetType(shorty_[0]);
+  }
+
+  size_t SizeOfReturnValue() const {
+    size_t result = Primitive::ComponentSize(Primitive::GetType(shorty_[0]));
+    if (result >= 1 && result < 4) {
+      result = 4;
+    }
+    return result;
+  }
+
+  // Register that holds result of this method invocation.
+  virtual ManagedRegister ReturnRegister() = 0;
+  // Register reserved for scratch usage during procedure calls.
+  virtual ManagedRegister InterproceduralScratchRegister() = 0;
+
+  // Offset of Method within the frame
+  FrameOffset MethodStackOffset();
+
+  // Iterator interface
+
+  // Place iterator at start of arguments. The displacement is applied to
+  // frame offset methods to account for frames which may be on the stack
+  // below the one being iterated over.
+  void ResetIterator(FrameOffset displacement) {
+    displacement_ = displacement;
+    itr_slots_ = 0;
+    itr_args_ = 0;
+    itr_refs_ = 0;
+    itr_longs_and_doubles_ = 0;
+  }
+
+  virtual ~CallingConvention() {}
+
+ protected:
+  CallingConvention(bool is_static, bool is_synchronized, const char* shorty)
+      : displacement_(0), is_static_(is_static), is_synchronized_(is_synchronized),
+        shorty_(shorty) {
+    num_args_ = (is_static ? 0 : 1) + strlen(shorty) - 1;
+    num_ref_args_ = is_static ? 0 : 1;  // The implicit this pointer.
+    num_long_or_double_args_ = 0;
+    for (size_t i = 1; i < strlen(shorty); i++) {
+      char ch = shorty_[i];
+      if (ch == 'L') {
+        num_ref_args_++;
+      } else if ((ch == 'D') || (ch == 'J')) {
+        num_long_or_double_args_++;
+      }
+    }
+  }
+
+  bool IsStatic() const {
+    return is_static_;
+  }
+  bool IsSynchronized() const {
+    return is_synchronized_;
+  }
+  bool IsParamALongOrDouble(unsigned int param) const {
+    DCHECK_LT(param, NumArgs());
+    if (IsStatic()) {
+      param++;  // 0th argument must skip return value at start of the shorty
+    } else if (param == 0) {
+      return false;  // this argument
+    }
+    char ch = shorty_[param];
+    return (ch == 'J' || ch == 'D');
+  }
+  bool IsParamAReference(unsigned int param) const {
+    DCHECK_LT(param, NumArgs());
+    if (IsStatic()) {
+      param++;  // 0th argument must skip return value at start of the shorty
+    } else if (param == 0) {
+      return true;  // this argument
+    }
+    return shorty_[param] == 'L';
+  }
+  size_t NumArgs() const {
+    return num_args_;
+  }
+  size_t NumLongOrDoubleArgs() const {
+    return num_long_or_double_args_;
+  }
+  size_t NumReferenceArgs() const {
+    return num_ref_args_;
+  }
+  size_t ParamSize(unsigned int param) const {
+    DCHECK_LT(param, NumArgs());
+    if (IsStatic()) {
+      param++;  // 0th argument must skip return value at start of the shorty
+    } else if (param == 0) {
+      return kPointerSize;  // this argument
+    }
+    size_t result = Primitive::ComponentSize(Primitive::GetType(shorty_[param]));
+    if (result >= 1 && result < 4) {
+      result = 4;
+    }
+    return result;
+  }
+  const char* GetShorty() const {
+    return shorty_.c_str();
+  }
+  // The slot number for current calling_convention argument.
+  // Note that each slot is 32-bit. When the current argument is bigger
+  // than 32 bits, return the first slot number for this argument.
+  unsigned int itr_slots_;
+  // The number of references iterated past
+  unsigned int itr_refs_;
+  // The argument number along argument list for current argument
+  unsigned int itr_args_;
+  // Number of longs and doubles seen along argument list
+  unsigned int itr_longs_and_doubles_;
+  // Space for frames below this on the stack
+  FrameOffset displacement_;
+
+ private:
+  const bool is_static_;
+  const bool is_synchronized_;
+  std::string shorty_;
+  size_t num_args_;
+  size_t num_ref_args_;
+  size_t num_long_or_double_args_;
+};
+
+// Abstraction for managed code's calling conventions
+// | { Incoming stack args } |
+// | { Prior Method* }       | <-- Prior SP
+// | { Return address }      |
+// | { Callee saves }        |
+// | { Spills ... }          |
+// | { Outgoing stack args } |
+// | { Method* }             | <-- SP
+class ManagedRuntimeCallingConvention : public CallingConvention {
+ public:
+  static ManagedRuntimeCallingConvention* Create(bool is_static, bool is_synchronized,
+                                                 const char* shorty,
+                                                 InstructionSet instruction_set);
+
+  // Register that holds the incoming method argument
+  virtual ManagedRegister MethodRegister() = 0;
+
+  // Iterator interface
+  bool HasNext();
+  void Next();
+  bool IsCurrentParamAReference();
+  bool IsCurrentArgExplicit();  // ie a non-implict argument such as this
+  bool IsCurrentArgPossiblyNull();
+  size_t CurrentParamSize();
+  virtual bool IsCurrentParamInRegister() = 0;
+  virtual bool IsCurrentParamOnStack() = 0;
+  virtual ManagedRegister CurrentParamRegister() = 0;
+  virtual FrameOffset CurrentParamStackOffset() = 0;
+
+  virtual ~ManagedRuntimeCallingConvention() {}
+
+  // Registers to spill to caller's out registers on entry.
+  virtual const std::vector<ManagedRegister>& EntrySpills() = 0;
+
+ protected:
+  ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
+      : CallingConvention(is_static, is_synchronized, shorty) {}
+};
+
+// Abstraction for JNI calling conventions
+// | { Incoming stack args }         | <-- Prior SP
+// | { Return address }              |
+// | { Callee saves }                |     ([1])
+// | { Return value spill }          |     (live on return slow paths)
+// | { Local Ref. Table State }      |
+// | { Stack Indirect Ref. Table     |
+// |   num. refs./link }             |     (here to prior SP is frame size)
+// | { Method* }                     | <-- Anchor SP written to thread
+// | { Outgoing stack args }         | <-- SP at point of call
+// | Native frame                    |
+//
+// [1] We must save all callee saves here to enable any exception throws to restore
+// callee saves for frames above this one.
+class JniCallingConvention : public CallingConvention {
+ public:
+  static JniCallingConvention* Create(bool is_static, bool is_synchronized, const char* shorty,
+                                      InstructionSet instruction_set);
+
+  // Size of frame excluding space for outgoing args (its assumed Method* is
+  // always at the bottom of a frame, but this doesn't work for outgoing
+  // native args). Includes alignment.
+  virtual size_t FrameSize() = 0;
+  // Size of outgoing arguments, including alignment
+  virtual size_t OutArgSize() = 0;
+  // Number of references in stack indirect reference table
+  size_t ReferenceCount() const;
+  // Location where the segment state of the local indirect reference table is saved
+  FrameOffset SavedLocalReferenceCookieOffset() const;
+  // Location where the return value of a call can be squirreled if another
+  // call is made following the native call
+  FrameOffset ReturnValueSaveLocation() const;
+  // Register that holds result if it is integer.
+  virtual ManagedRegister IntReturnRegister() = 0;
+
+  // Callee save registers to spill prior to native code (which may clobber)
+  virtual const std::vector<ManagedRegister>& CalleeSaveRegisters() const = 0;
+
+  // Spill mask values
+  virtual uint32_t CoreSpillMask() const = 0;
+  virtual uint32_t FpSpillMask() const = 0;
+
+  // An extra scratch register live after the call
+  virtual ManagedRegister ReturnScratchRegister() const = 0;
+
+  // Iterator interface
+  bool HasNext();
+  virtual void Next();
+  bool IsCurrentParamAReference();
+  size_t CurrentParamSize();
+  virtual bool IsCurrentParamInRegister() = 0;
+  virtual bool IsCurrentParamOnStack() = 0;
+  virtual ManagedRegister CurrentParamRegister() = 0;
+  virtual FrameOffset CurrentParamStackOffset() = 0;
+
+  // Iterator interface extension for JNI
+  FrameOffset CurrentParamSirtEntryOffset();
+
+  // Position of SIRT and interior fields
+  FrameOffset SirtOffset() const {
+    return FrameOffset(displacement_.Int32Value() +
+                       kPointerSize);  // above Method*
+  }
+  FrameOffset SirtNumRefsOffset() const {
+    return FrameOffset(SirtOffset().Int32Value() +
+                       StackIndirectReferenceTable::NumberOfReferencesOffset());
+  }
+  FrameOffset SirtLinkOffset() const {
+    return FrameOffset(SirtOffset().Int32Value() +
+                       StackIndirectReferenceTable::LinkOffset());
+  }
+
+  virtual ~JniCallingConvention() {}
+
+ protected:
+  // Named iterator positions
+  enum IteratorPos {
+    kJniEnv = 0,
+    kObjectOrClass = 1
+  };
+
+  explicit JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
+      : CallingConvention(is_static, is_synchronized, shorty) {}
+
+  // Number of stack slots for outgoing arguments, above which the SIRT is
+  // located
+  virtual size_t NumberOfOutgoingStackArgs() = 0;
+
+ protected:
+  size_t NumberOfExtraArgumentsForJni();
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_OAT_JNI_CALLING_CONVENTION_H_
diff --git a/src/compiler/jni/quick/jni_compiler.cc b/src/compiler/jni/quick/jni_compiler.cc
new file mode 100644
index 0000000..c4919fb
--- /dev/null
+++ b/src/compiler/jni/quick/jni_compiler.cc
@@ -0,0 +1,488 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <algorithm>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "calling_convention.h"
+#include "class_linker.h"
+#include "compiled_method.h"
+#include "compiler/driver/compiler_driver.h"
+#include "disassembler.h"
+#include "jni_internal.h"
+#include "oat/runtime/oat_support_entrypoints.h"
+#include "oat/utils/assembler.h"
+#include "oat/utils/managed_register.h"
+#include "oat/utils/arm/managed_register_arm.h"
+#include "oat/utils/mips/managed_register_mips.h"
+#include "oat/utils/x86/managed_register_x86.h"
+#include "thread.h"
+#include "UniquePtr.h"
+
+#define __ jni_asm->
+
+namespace art {
+
+static void CopyParameter(Assembler* jni_asm,
+                          ManagedRuntimeCallingConvention* mr_conv,
+                          JniCallingConvention* jni_conv,
+                          size_t frame_size, size_t out_arg_size);
+static void SetNativeParameter(Assembler* jni_asm,
+                               JniCallingConvention* jni_conv,
+                               ManagedRegister in_reg);
+
+// Generate the JNI bridge for the given method, general contract:
+// - Arguments are in the managed runtime format, either on stack or in
+//   registers, a reference to the method object is supplied as part of this
+//   convention.
+//
+CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver& compiler,
+                                            uint32_t access_flags, uint32_t method_idx,
+                                            const DexFile& dex_file) {
+  const bool is_native = (access_flags & kAccNative) != 0;
+  CHECK(is_native);
+  const bool is_static = (access_flags & kAccStatic) != 0;
+  const bool is_synchronized = (access_flags & kAccSynchronized) != 0;
+  const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx));
+  InstructionSet instruction_set = compiler.GetInstructionSet();
+  if (instruction_set == kThumb2) {
+    instruction_set = kArm;
+  }
+  // Calling conventions used to iterate over parameters to method
+  UniquePtr<JniCallingConvention> main_jni_conv(
+      JniCallingConvention::Create(is_static, is_synchronized, shorty, instruction_set));
+  bool reference_return = main_jni_conv->IsReturnAReference();
+
+  UniquePtr<ManagedRuntimeCallingConvention> mr_conv(
+      ManagedRuntimeCallingConvention::Create(is_static, is_synchronized, shorty, instruction_set));
+
+  // Calling conventions to call into JNI method "end" possibly passing a returned reference, the
+  //     method and the current thread.
+  size_t jni_end_arg_count = 0;
+  if (reference_return) { jni_end_arg_count++; }
+  if (is_synchronized) { jni_end_arg_count++; }
+  const char* jni_end_shorty = jni_end_arg_count == 0 ? "I"
+                                                        : (jni_end_arg_count == 1 ? "II" : "III");
+  UniquePtr<JniCallingConvention> end_jni_conv(
+      JniCallingConvention::Create(is_static, is_synchronized, jni_end_shorty, instruction_set));
+
+
+  // Assembler that holds generated instructions
+  UniquePtr<Assembler> jni_asm(Assembler::Create(instruction_set));
+  bool should_disassemble = false;
+
+  // Offsets into data structures
+  // TODO: if cross compiling these offsets are for the host not the target
+  const Offset functions(OFFSETOF_MEMBER(JNIEnvExt, functions));
+  const Offset monitor_enter(OFFSETOF_MEMBER(JNINativeInterface, MonitorEnter));
+  const Offset monitor_exit(OFFSETOF_MEMBER(JNINativeInterface, MonitorExit));
+
+  // 1. Build the frame saving all callee saves
+  const size_t frame_size(main_jni_conv->FrameSize());
+  const std::vector<ManagedRegister>& callee_save_regs = main_jni_conv->CalleeSaveRegisters();
+  __ BuildFrame(frame_size, mr_conv->MethodRegister(), callee_save_regs, mr_conv->EntrySpills());
+
+  // 2. Set up the StackIndirectReferenceTable
+  mr_conv->ResetIterator(FrameOffset(frame_size));
+  main_jni_conv->ResetIterator(FrameOffset(0));
+  __ StoreImmediateToFrame(main_jni_conv->SirtNumRefsOffset(),
+                           main_jni_conv->ReferenceCount(),
+                           mr_conv->InterproceduralScratchRegister());
+  __ CopyRawPtrFromThread(main_jni_conv->SirtLinkOffset(),
+                          Thread::TopSirtOffset(),
+                          mr_conv->InterproceduralScratchRegister());
+  __ StoreStackOffsetToThread(Thread::TopSirtOffset(),
+                              main_jni_conv->SirtOffset(),
+                              mr_conv->InterproceduralScratchRegister());
+
+  // 3. Place incoming reference arguments into SIRT
+  main_jni_conv->Next();  // Skip JNIEnv*
+  // 3.5. Create Class argument for static methods out of passed method
+  if (is_static) {
+    FrameOffset sirt_offset = main_jni_conv->CurrentParamSirtEntryOffset();
+    // Check sirt offset is within frame
+    CHECK_LT(sirt_offset.Uint32Value(), frame_size);
+    __ LoadRef(main_jni_conv->InterproceduralScratchRegister(),
+               mr_conv->MethodRegister(), mirror::AbstractMethod::DeclaringClassOffset());
+    __ VerifyObject(main_jni_conv->InterproceduralScratchRegister(), false);
+    __ StoreRef(sirt_offset, main_jni_conv->InterproceduralScratchRegister());
+    main_jni_conv->Next();  // in SIRT so move to next argument
+  }
+  while (mr_conv->HasNext()) {
+    CHECK(main_jni_conv->HasNext());
+    bool ref_param = main_jni_conv->IsCurrentParamAReference();
+    CHECK(!ref_param || mr_conv->IsCurrentParamAReference());
+    // References need placing in SIRT and the entry value passing
+    if (ref_param) {
+      // Compute SIRT entry, note null is placed in the SIRT but its boxed value
+      // must be NULL
+      FrameOffset sirt_offset = main_jni_conv->CurrentParamSirtEntryOffset();
+      // Check SIRT offset is within frame and doesn't run into the saved segment state
+      CHECK_LT(sirt_offset.Uint32Value(), frame_size);
+      CHECK_NE(sirt_offset.Uint32Value(),
+               main_jni_conv->SavedLocalReferenceCookieOffset().Uint32Value());
+      bool input_in_reg = mr_conv->IsCurrentParamInRegister();
+      bool input_on_stack = mr_conv->IsCurrentParamOnStack();
+      CHECK(input_in_reg || input_on_stack);
+
+      if (input_in_reg) {
+        ManagedRegister in_reg  =  mr_conv->CurrentParamRegister();
+        __ VerifyObject(in_reg, mr_conv->IsCurrentArgPossiblyNull());
+        __ StoreRef(sirt_offset, in_reg);
+      } else if (input_on_stack) {
+        FrameOffset in_off  = mr_conv->CurrentParamStackOffset();
+        __ VerifyObject(in_off, mr_conv->IsCurrentArgPossiblyNull());
+        __ CopyRef(sirt_offset, in_off,
+                   mr_conv->InterproceduralScratchRegister());
+      }
+    }
+    mr_conv->Next();
+    main_jni_conv->Next();
+  }
+
+  // 4. Write out the end of the quick frames.
+  __ StoreStackPointerToThread(Thread::TopOfManagedStackOffset());
+  __ StoreImmediateToThread(Thread::TopOfManagedStackPcOffset(), 0,
+                            mr_conv->InterproceduralScratchRegister());
+
+  // 5. Move frame down to allow space for out going args.
+  const size_t main_out_arg_size = main_jni_conv->OutArgSize();
+  const size_t end_out_arg_size = end_jni_conv->OutArgSize();
+  const size_t max_out_arg_size = std::max(main_out_arg_size, end_out_arg_size);
+  __ IncreaseFrameSize(max_out_arg_size);
+
+
+  // 6. Call into appropriate JniMethodStart passing Thread* so that transition out of Runnable
+  //    can occur. The result is the saved JNI local state that is restored by the exit call. We
+  //    abuse the JNI calling convention here, that is guaranteed to support passing 2 pointer
+  //    arguments.
+  uintptr_t jni_start = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodStartSynchronized)
+                                        : ENTRYPOINT_OFFSET(pJniMethodStart);
+  main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
+  FrameOffset locked_object_sirt_offset(0);
+  if (is_synchronized) {
+    // Pass object for locking.
+    main_jni_conv->Next();  // Skip JNIEnv.
+    locked_object_sirt_offset = main_jni_conv->CurrentParamSirtEntryOffset();
+    main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
+    if (main_jni_conv->IsCurrentParamOnStack()) {
+      FrameOffset out_off = main_jni_conv->CurrentParamStackOffset();
+      __ CreateSirtEntry(out_off, locked_object_sirt_offset,
+                         mr_conv->InterproceduralScratchRegister(),
+                         false);
+    } else {
+      ManagedRegister out_reg = main_jni_conv->CurrentParamRegister();
+      __ CreateSirtEntry(out_reg, locked_object_sirt_offset,
+                         ManagedRegister::NoRegister(), false);
+    }
+    main_jni_conv->Next();
+  }
+  if (main_jni_conv->IsCurrentParamInRegister()) {
+    __ GetCurrentThread(main_jni_conv->CurrentParamRegister());
+    __ Call(main_jni_conv->CurrentParamRegister(), Offset(jni_start),
+            main_jni_conv->InterproceduralScratchRegister());
+  } else {
+    __ GetCurrentThread(main_jni_conv->CurrentParamStackOffset(),
+                        main_jni_conv->InterproceduralScratchRegister());
+    __ Call(ThreadOffset(jni_start), main_jni_conv->InterproceduralScratchRegister());
+  }
+  if (is_synchronized) {  // Check for exceptions from monitor enter.
+    __ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), main_out_arg_size);
+  }
+  FrameOffset saved_cookie_offset = main_jni_conv->SavedLocalReferenceCookieOffset();
+  __ Store(saved_cookie_offset, main_jni_conv->IntReturnRegister(), 4);
+
+  // 7. Iterate over arguments placing values from managed calling convention in
+  //    to the convention required for a native call (shuffling). For references
+  //    place an index/pointer to the reference after checking whether it is
+  //    NULL (which must be encoded as NULL).
+  //    Note: we do this prior to materializing the JNIEnv* and static's jclass to
+  //    give as many free registers for the shuffle as possible
+  mr_conv->ResetIterator(FrameOffset(frame_size+main_out_arg_size));
+  uint32_t args_count = 0;
+  while (mr_conv->HasNext()) {
+    args_count++;
+    mr_conv->Next();
+  }
+
+  // Do a backward pass over arguments, so that the generated code will be "mov
+  // R2, R3; mov R1, R2" instead of "mov R1, R2; mov R2, R3."
+  // TODO: A reverse iterator to improve readability.
+  for (uint32_t i = 0; i < args_count; ++i) {
+    mr_conv->ResetIterator(FrameOffset(frame_size + main_out_arg_size));
+    main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
+    main_jni_conv->Next();  // Skip JNIEnv*.
+    if (is_static) {
+      main_jni_conv->Next();  // Skip Class for now.
+    }
+    // Skip to the argument we're interested in.
+    for (uint32_t j = 0; j < args_count - i - 1; ++j) {
+      mr_conv->Next();
+      main_jni_conv->Next();
+    }
+    CopyParameter(jni_asm.get(), mr_conv.get(), main_jni_conv.get(), frame_size, main_out_arg_size);
+  }
+  if (is_static) {
+    // Create argument for Class
+    mr_conv->ResetIterator(FrameOffset(frame_size+main_out_arg_size));
+    main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
+    main_jni_conv->Next();  // Skip JNIEnv*
+    FrameOffset sirt_offset = main_jni_conv->CurrentParamSirtEntryOffset();
+    if (main_jni_conv->IsCurrentParamOnStack()) {
+      FrameOffset out_off = main_jni_conv->CurrentParamStackOffset();
+      __ CreateSirtEntry(out_off, sirt_offset,
+                         mr_conv->InterproceduralScratchRegister(),
+                         false);
+    } else {
+      ManagedRegister out_reg = main_jni_conv->CurrentParamRegister();
+      __ CreateSirtEntry(out_reg, sirt_offset,
+                         ManagedRegister::NoRegister(), false);
+    }
+  }
+
+  // 8. Create 1st argument, the JNI environment ptr.
+  main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
+  // Register that will hold local indirect reference table
+  if (main_jni_conv->IsCurrentParamInRegister()) {
+    ManagedRegister jni_env = main_jni_conv->CurrentParamRegister();
+    DCHECK(!jni_env.Equals(main_jni_conv->InterproceduralScratchRegister()));
+    __ LoadRawPtrFromThread(jni_env, Thread::JniEnvOffset());
+  } else {
+    FrameOffset jni_env = main_jni_conv->CurrentParamStackOffset();
+    __ CopyRawPtrFromThread(jni_env, Thread::JniEnvOffset(),
+                            main_jni_conv->InterproceduralScratchRegister());
+  }
+
+  // 9. Plant call to native code associated with method.
+  __ Call(main_jni_conv->MethodStackOffset(), mirror::AbstractMethod::NativeMethodOffset(),
+          mr_conv->InterproceduralScratchRegister());
+
+  // 10. Fix differences in result widths.
+  if (instruction_set == kX86) {
+    if (main_jni_conv->GetReturnType() == Primitive::kPrimByte ||
+        main_jni_conv->GetReturnType() == Primitive::kPrimShort) {
+      __ SignExtend(main_jni_conv->ReturnRegister(),
+                    Primitive::ComponentSize(main_jni_conv->GetReturnType()));
+    } else if (main_jni_conv->GetReturnType() == Primitive::kPrimBoolean ||
+               main_jni_conv->GetReturnType() == Primitive::kPrimChar) {
+      __ ZeroExtend(main_jni_conv->ReturnRegister(),
+                    Primitive::ComponentSize(main_jni_conv->GetReturnType()));
+    }
+  }
+
+  // 11. Save return value
+  FrameOffset return_save_location = main_jni_conv->ReturnValueSaveLocation();
+  if (main_jni_conv->SizeOfReturnValue() != 0 && !reference_return) {
+    if (instruction_set == kMips && main_jni_conv->GetReturnType() == Primitive::kPrimDouble &&
+        return_save_location.Uint32Value() % 8 != 0) {
+      // Ensure doubles are 8-byte aligned for MIPS
+      return_save_location = FrameOffset(return_save_location.Uint32Value() + kPointerSize);
+    }
+    CHECK_LT(return_save_location.Uint32Value(), frame_size+main_out_arg_size);
+    __ Store(return_save_location, main_jni_conv->ReturnRegister(), main_jni_conv->SizeOfReturnValue());
+  }
+
+  // 12. Call into JNI method end possibly passing a returned reference, the method and the current
+  //     thread.
+  end_jni_conv->ResetIterator(FrameOffset(end_out_arg_size));
+  uintptr_t jni_end;
+  if (reference_return) {
+    // Pass result.
+    jni_end = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodEndWithReferenceSynchronized)
+                              : ENTRYPOINT_OFFSET(pJniMethodEndWithReference);
+    SetNativeParameter(jni_asm.get(), end_jni_conv.get(), end_jni_conv->ReturnRegister());
+    end_jni_conv->Next();
+  } else {
+    jni_end = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodEndSynchronized)
+                              : ENTRYPOINT_OFFSET(pJniMethodEnd);
+  }
+  // Pass saved local reference state.
+  if (end_jni_conv->IsCurrentParamOnStack()) {
+    FrameOffset out_off = end_jni_conv->CurrentParamStackOffset();
+    __ Copy(out_off, saved_cookie_offset, end_jni_conv->InterproceduralScratchRegister(), 4);
+  } else {
+    ManagedRegister out_reg = end_jni_conv->CurrentParamRegister();
+    __ Load(out_reg, saved_cookie_offset, 4);
+  }
+  end_jni_conv->Next();
+  if (is_synchronized) {
+    // Pass object for unlocking.
+    if (end_jni_conv->IsCurrentParamOnStack()) {
+      FrameOffset out_off = end_jni_conv->CurrentParamStackOffset();
+      __ CreateSirtEntry(out_off, locked_object_sirt_offset,
+                         end_jni_conv->InterproceduralScratchRegister(),
+                         false);
+    } else {
+      ManagedRegister out_reg = end_jni_conv->CurrentParamRegister();
+      __ CreateSirtEntry(out_reg, locked_object_sirt_offset,
+                         ManagedRegister::NoRegister(), false);
+    }
+    end_jni_conv->Next();
+  }
+  if (end_jni_conv->IsCurrentParamInRegister()) {
+    __ GetCurrentThread(end_jni_conv->CurrentParamRegister());
+    __ Call(end_jni_conv->CurrentParamRegister(), Offset(jni_end),
+            end_jni_conv->InterproceduralScratchRegister());
+  } else {
+    __ GetCurrentThread(end_jni_conv->CurrentParamStackOffset(),
+                        end_jni_conv->InterproceduralScratchRegister());
+    __ Call(ThreadOffset(jni_end), end_jni_conv->InterproceduralScratchRegister());
+  }
+
+  // 13. Reload return value
+  if (main_jni_conv->SizeOfReturnValue() != 0 && !reference_return) {
+    __ Load(mr_conv->ReturnRegister(), return_save_location, mr_conv->SizeOfReturnValue());
+  }
+
+  // 14. Move frame up now we're done with the out arg space.
+  __ DecreaseFrameSize(max_out_arg_size);
+
+  // 15. Process pending exceptions from JNI call or monitor exit.
+  __ ExceptionPoll(main_jni_conv->InterproceduralScratchRegister(), 0);
+
+  // 16. Remove activation - no need to restore callee save registers because we didn't clobber
+  //     them.
+  __ RemoveFrame(frame_size, std::vector<ManagedRegister>());
+
+  // 17. Finalize code generation
+  __ EmitSlowPaths();
+  size_t cs = __ CodeSize();
+  std::vector<uint8_t> managed_code(cs);
+  MemoryRegion code(&managed_code[0], managed_code.size());
+  __ FinalizeInstructions(code);
+  if (should_disassemble) {
+    UniquePtr<Disassembler> disassembler(Disassembler::Create(instruction_set));
+    disassembler->Dump(LOG(INFO), &managed_code[0], &managed_code[managed_code.size()]);
+  }
+  return new CompiledMethod(instruction_set,
+                            managed_code,
+                            frame_size,
+                            main_jni_conv->CoreSpillMask(),
+                            main_jni_conv->FpSpillMask());
+}
+
+// Copy a single parameter from the managed to the JNI calling convention
+static void CopyParameter(Assembler* jni_asm,
+                          ManagedRuntimeCallingConvention* mr_conv,
+                          JniCallingConvention* jni_conv,
+                          size_t frame_size, size_t out_arg_size) {
+  bool input_in_reg = mr_conv->IsCurrentParamInRegister();
+  bool output_in_reg = jni_conv->IsCurrentParamInRegister();
+  FrameOffset sirt_offset(0);
+  bool null_allowed = false;
+  bool ref_param = jni_conv->IsCurrentParamAReference();
+  CHECK(!ref_param || mr_conv->IsCurrentParamAReference());
+  // input may be in register, on stack or both - but not none!
+  CHECK(input_in_reg || mr_conv->IsCurrentParamOnStack());
+  if (output_in_reg) {  // output shouldn't straddle registers and stack
+    CHECK(!jni_conv->IsCurrentParamOnStack());
+  } else {
+    CHECK(jni_conv->IsCurrentParamOnStack());
+  }
+  // References need placing in SIRT and the entry address passing
+  if (ref_param) {
+    null_allowed = mr_conv->IsCurrentArgPossiblyNull();
+    // Compute SIRT offset. Note null is placed in the SIRT but the jobject
+    // passed to the native code must be null (not a pointer into the SIRT
+    // as with regular references).
+    sirt_offset = jni_conv->CurrentParamSirtEntryOffset();
+    // Check SIRT offset is within frame.
+    CHECK_LT(sirt_offset.Uint32Value(), (frame_size + out_arg_size));
+  }
+  if (input_in_reg && output_in_reg) {
+    ManagedRegister in_reg = mr_conv->CurrentParamRegister();
+    ManagedRegister out_reg = jni_conv->CurrentParamRegister();
+    if (ref_param) {
+      __ CreateSirtEntry(out_reg, sirt_offset, in_reg, null_allowed);
+    } else {
+      if (!mr_conv->IsCurrentParamOnStack()) {
+        // regular non-straddling move
+        __ Move(out_reg, in_reg, mr_conv->CurrentParamSize());
+      } else {
+        UNIMPLEMENTED(FATAL);  // we currently don't expect to see this case
+      }
+    }
+  } else if (!input_in_reg && !output_in_reg) {
+    FrameOffset out_off = jni_conv->CurrentParamStackOffset();
+    if (ref_param) {
+      __ CreateSirtEntry(out_off, sirt_offset, mr_conv->InterproceduralScratchRegister(),
+                         null_allowed);
+    } else {
+      FrameOffset in_off = mr_conv->CurrentParamStackOffset();
+      size_t param_size = mr_conv->CurrentParamSize();
+      CHECK_EQ(param_size, jni_conv->CurrentParamSize());
+      __ Copy(out_off, in_off, mr_conv->InterproceduralScratchRegister(), param_size);
+    }
+  } else if (!input_in_reg && output_in_reg) {
+    FrameOffset in_off = mr_conv->CurrentParamStackOffset();
+    ManagedRegister out_reg = jni_conv->CurrentParamRegister();
+    // Check that incoming stack arguments are above the current stack frame.
+    CHECK_GT(in_off.Uint32Value(), frame_size);
+    if (ref_param) {
+      __ CreateSirtEntry(out_reg, sirt_offset, ManagedRegister::NoRegister(), null_allowed);
+    } else {
+      size_t param_size = mr_conv->CurrentParamSize();
+      CHECK_EQ(param_size, jni_conv->CurrentParamSize());
+      __ Load(out_reg, in_off, param_size);
+    }
+  } else {
+    CHECK(input_in_reg && !output_in_reg);
+    ManagedRegister in_reg = mr_conv->CurrentParamRegister();
+    FrameOffset out_off = jni_conv->CurrentParamStackOffset();
+    // Check outgoing argument is within frame
+    CHECK_LT(out_off.Uint32Value(), frame_size);
+    if (ref_param) {
+      // TODO: recycle value in in_reg rather than reload from SIRT
+      __ CreateSirtEntry(out_off, sirt_offset, mr_conv->InterproceduralScratchRegister(),
+                         null_allowed);
+    } else {
+      size_t param_size = mr_conv->CurrentParamSize();
+      CHECK_EQ(param_size, jni_conv->CurrentParamSize());
+      if (!mr_conv->IsCurrentParamOnStack()) {
+        // regular non-straddling store
+        __ Store(out_off, in_reg, param_size);
+      } else {
+        // store where input straddles registers and stack
+        CHECK_EQ(param_size, 8u);
+        FrameOffset in_off = mr_conv->CurrentParamStackOffset();
+        __ StoreSpanning(out_off, in_reg, in_off, mr_conv->InterproceduralScratchRegister());
+      }
+    }
+  }
+}
+
+static void SetNativeParameter(Assembler* jni_asm,
+                               JniCallingConvention* jni_conv,
+                               ManagedRegister in_reg) {
+  if (jni_conv->IsCurrentParamOnStack()) {
+    FrameOffset dest = jni_conv->CurrentParamStackOffset();
+    __ StoreRawPtr(dest, in_reg);
+  } else {
+    if (!jni_conv->CurrentParamRegister().Equals(in_reg)) {
+      __ Move(jni_conv->CurrentParamRegister(), in_reg, jni_conv->CurrentParamSize());
+    }
+  }
+}
+
+}  // namespace art
+
+extern "C" art::CompiledMethod* ArtQuickJniCompileMethod(art::CompilerDriver& compiler,
+                                                         uint32_t access_flags, uint32_t method_idx,
+                                                         const art::DexFile& dex_file) {
+  return ArtJniCompileMethodInternal(compiler, access_flags, method_idx, dex_file);
+}
diff --git a/src/compiler/jni/quick/mips/calling_convention_mips.cc b/src/compiler/jni/quick/mips/calling_convention_mips.cc
new file mode 100644
index 0000000..053ab44
--- /dev/null
+++ b/src/compiler/jni/quick/mips/calling_convention_mips.cc
@@ -0,0 +1,215 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "calling_convention_mips.h"
+
+#include "base/logging.h"
+#include "oat/utils/mips/managed_register_mips.h"
+
+namespace art {
+namespace mips {
+
+// Calling convention
+ManagedRegister MipsManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
+  return MipsManagedRegister::FromCoreRegister(T9);
+}
+
+ManagedRegister MipsJniCallingConvention::InterproceduralScratchRegister() {
+  return MipsManagedRegister::FromCoreRegister(T9);
+}
+
+static ManagedRegister ReturnRegisterForShorty(const char* shorty) {
+  if (shorty[0] == 'F') {
+    return MipsManagedRegister::FromFRegister(F0);
+  } else if (shorty[0] == 'D') {
+    return MipsManagedRegister::FromDRegister(D0);
+  } else if (shorty[0] == 'J') {
+    return MipsManagedRegister::FromRegisterPair(V0_V1);
+  } else if (shorty[0] == 'V') {
+    return MipsManagedRegister::NoRegister();
+  } else {
+    return MipsManagedRegister::FromCoreRegister(V0);
+  }
+}
+
+ManagedRegister MipsManagedRuntimeCallingConvention::ReturnRegister() {
+  return ReturnRegisterForShorty(GetShorty());
+}
+
+ManagedRegister MipsJniCallingConvention::ReturnRegister() {
+  return ReturnRegisterForShorty(GetShorty());
+}
+
+ManagedRegister MipsJniCallingConvention::IntReturnRegister() {
+  return MipsManagedRegister::FromCoreRegister(V0);
+}
+
+// Managed runtime calling convention
+
+ManagedRegister MipsManagedRuntimeCallingConvention::MethodRegister() {
+  return MipsManagedRegister::FromCoreRegister(A0);
+}
+
+bool MipsManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
+  return false;  // Everything moved to stack on entry.
+}
+
+bool MipsManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
+  return true;
+}
+
+ManagedRegister MipsManagedRuntimeCallingConvention::CurrentParamRegister() {
+  LOG(FATAL) << "Should not reach here";
+  return ManagedRegister::NoRegister();
+}
+
+FrameOffset MipsManagedRuntimeCallingConvention::CurrentParamStackOffset() {
+  CHECK(IsCurrentParamOnStack());
+  FrameOffset result =
+      FrameOffset(displacement_.Int32Value() +   // displacement
+                  kPointerSize +                 // Method*
+                  (itr_slots_ * kPointerSize));  // offset into in args
+  return result;
+}
+
+const std::vector<ManagedRegister>& MipsManagedRuntimeCallingConvention::EntrySpills() {
+  // We spill the argument registers on MIPS to free them up for scratch use, we then assume
+  // all arguments are on the stack.
+  if (entry_spills_.size() == 0) {
+    size_t num_spills = NumArgs() + NumLongOrDoubleArgs();
+    if (num_spills > 0) {
+      entry_spills_.push_back(MipsManagedRegister::FromCoreRegister(A1));
+      if (num_spills > 1) {
+        entry_spills_.push_back(MipsManagedRegister::FromCoreRegister(A2));
+        if (num_spills > 2) {
+          entry_spills_.push_back(MipsManagedRegister::FromCoreRegister(A3));
+        }
+      }
+    }
+  }
+  return entry_spills_;
+}
+// JNI calling convention
+
+MipsJniCallingConvention::MipsJniCallingConvention(bool is_static, bool is_synchronized,
+                                                 const char* shorty)
+    : JniCallingConvention(is_static, is_synchronized, shorty) {
+  // Compute padding to ensure longs and doubles are not split in AAPCS. Ignore the 'this' jobject
+  // or jclass for static methods and the JNIEnv. We start at the aligned register A2.
+  size_t padding = 0;
+  for (size_t cur_arg = IsStatic() ? 0 : 1, cur_reg = 2; cur_arg < NumArgs(); cur_arg++) {
+    if (IsParamALongOrDouble(cur_arg)) {
+      if ((cur_reg & 1) != 0) {
+        padding += 4;
+        cur_reg++;  // additional bump to ensure alignment
+      }
+      cur_reg++;  // additional bump to skip extra long word
+    }
+    cur_reg++;  // bump the iterator for every argument
+  }
+  padding_ = padding;
+
+  callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T0));
+  callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T1));
+  callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T2));
+  callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T3));
+  callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T4));
+  callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T5));
+  callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T6));
+  callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T7));
+  callee_save_regs_.push_back(MipsManagedRegister::FromCoreRegister(T8));
+}
+
+uint32_t MipsJniCallingConvention::CoreSpillMask() const {
+  // Compute spill mask to agree with callee saves initialized in the constructor
+  uint32_t result = 0;
+  result = 1 << T0 | 1 << T1 | 1 << T2 | 1 << T3 | 1 << T4 | 1 << T5 | 1 << T6 |
+           1 << T7 | 1 << T8 | 1 << RA;
+  return result;
+}
+
+ManagedRegister MipsJniCallingConvention::ReturnScratchRegister() const {
+  return MipsManagedRegister::FromCoreRegister(AT);
+}
+
+size_t MipsJniCallingConvention::FrameSize() {
+  // Method*, LR and callee save area size, local reference segment state
+  size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kPointerSize;
+  // References plus 2 words for SIRT header
+  size_t sirt_size = (ReferenceCount() + 2) * kPointerSize;
+  // Plus return value spill area size
+  return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(), kStackAlignment);
+}
+
+size_t MipsJniCallingConvention::OutArgSize() {
+  return RoundUp(NumberOfOutgoingStackArgs() * kPointerSize + padding_,
+                 kStackAlignment);
+}
+
+// JniCallingConvention ABI follows AAPCS where longs and doubles must occur
+// in even register numbers and stack slots
+void MipsJniCallingConvention::Next() {
+  JniCallingConvention::Next();
+  size_t arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+  if ((itr_args_ >= 2) &&
+      (arg_pos < NumArgs()) &&
+      IsParamALongOrDouble(arg_pos)) {
+    // itr_slots_ needs to be an even number, according to AAPCS.
+    if ((itr_slots_ & 0x1u) != 0) {
+      itr_slots_++;
+    }
+  }
+}
+
+bool MipsJniCallingConvention::IsCurrentParamInRegister() {
+  return itr_slots_ < 4;
+}
+
+bool MipsJniCallingConvention::IsCurrentParamOnStack() {
+  return !IsCurrentParamInRegister();
+}
+
+static const Register kJniArgumentRegisters[] = {
+  A0, A1, A2, A3
+};
+ManagedRegister MipsJniCallingConvention::CurrentParamRegister() {
+  CHECK_LT(itr_slots_, 4u);
+  int arg_pos = itr_args_ - NumberOfExtraArgumentsForJni();
+  if ((itr_args_ >= 2) && IsParamALongOrDouble(arg_pos)) {
+    CHECK_EQ(itr_slots_, 2u);
+    return MipsManagedRegister::FromRegisterPair(A2_A3);
+  } else {
+    return
+      MipsManagedRegister::FromCoreRegister(kJniArgumentRegisters[itr_slots_]);
+  }
+}
+
+FrameOffset MipsJniCallingConvention::CurrentParamStackOffset() {
+  CHECK_GE(itr_slots_, 4u);
+  size_t offset = displacement_.Int32Value() - OutArgSize() + (itr_slots_ * kPointerSize);
+  CHECK_LT(offset, OutArgSize());
+  return FrameOffset(offset);
+}
+
+size_t MipsJniCallingConvention::NumberOfOutgoingStackArgs() {
+  size_t static_args = IsStatic() ? 1 : 0;  // count jclass
+  // regular argument parameters and this
+  size_t param_args = NumArgs() + NumLongOrDoubleArgs();
+  // count JNIEnv*
+  return static_args + param_args + 1;
+}
+}  // namespace mips
+}  // namespace art
diff --git a/src/compiler/jni/quick/mips/calling_convention_mips.h b/src/compiler/jni/quick/mips/calling_convention_mips.h
new file mode 100644
index 0000000..b9b2b72
--- /dev/null
+++ b/src/compiler/jni/quick/mips/calling_convention_mips.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_JNI_MIPS_CALLING_CONVENTION_MIPS_H_
+#define ART_SRC_OAT_JNI_MIPS_CALLING_CONVENTION_MIPS_H_
+
+#include "compiler/jni/quick/calling_convention.h"
+
+namespace art {
+namespace mips {
+class MipsManagedRuntimeCallingConvention : public ManagedRuntimeCallingConvention {
+ public:
+  MipsManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
+      : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty) {}
+  virtual ~MipsManagedRuntimeCallingConvention() {}
+  // Calling convention
+  virtual ManagedRegister ReturnRegister();
+  virtual ManagedRegister InterproceduralScratchRegister();
+  // Managed runtime calling convention
+  virtual ManagedRegister MethodRegister();
+  virtual bool IsCurrentParamInRegister();
+  virtual bool IsCurrentParamOnStack();
+  virtual ManagedRegister CurrentParamRegister();
+  virtual FrameOffset CurrentParamStackOffset();
+  virtual const std::vector<ManagedRegister>& EntrySpills();
+
+ private:
+  std::vector<ManagedRegister> entry_spills_;
+
+  DISALLOW_COPY_AND_ASSIGN(MipsManagedRuntimeCallingConvention);
+};
+
+class MipsJniCallingConvention : public JniCallingConvention {
+ public:
+  explicit MipsJniCallingConvention(bool is_static, bool is_synchronized, const char* shorty);
+  virtual ~MipsJniCallingConvention() {}
+  // Calling convention
+  virtual ManagedRegister ReturnRegister();
+  virtual ManagedRegister IntReturnRegister();
+  virtual ManagedRegister InterproceduralScratchRegister();
+  // JNI calling convention
+  virtual void Next();  // Override default behavior for AAPCS
+  virtual size_t FrameSize();
+  virtual size_t OutArgSize();
+  virtual const std::vector<ManagedRegister>& CalleeSaveRegisters() const {
+    return callee_save_regs_;
+  }
+  virtual ManagedRegister ReturnScratchRegister() const;
+  virtual uint32_t CoreSpillMask() const;
+  virtual uint32_t FpSpillMask() const {
+    return 0;  // Floats aren't spilled in JNI down call
+  }
+  virtual bool IsCurrentParamInRegister();
+  virtual bool IsCurrentParamOnStack();
+  virtual ManagedRegister CurrentParamRegister();
+  virtual FrameOffset CurrentParamStackOffset();
+
+ protected:
+  virtual size_t NumberOfOutgoingStackArgs();
+
+ private:
+  // TODO: these values aren't unique and can be shared amongst instances
+  std::vector<ManagedRegister> callee_save_regs_;
+
+  // Padding to ensure longs and doubles are not split in AAPCS
+  size_t padding_;
+
+  DISALLOW_COPY_AND_ASSIGN(MipsJniCallingConvention);
+};
+}  // namespace mips
+}  // namespace art
+
+#endif  // ART_SRC_OAT_JNI_MIPS_CALLING_CONVENTION_MIPS_H_
diff --git a/src/compiler/jni/quick/x86/calling_convention_x86.cc b/src/compiler/jni/quick/x86/calling_convention_x86.cc
new file mode 100644
index 0000000..b671bd1
--- /dev/null
+++ b/src/compiler/jni/quick/x86/calling_convention_x86.cc
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "calling_convention_x86.h"
+
+#include "base/logging.h"
+#include "oat/utils/x86/managed_register_x86.h"
+#include "utils.h"
+
+namespace art {
+namespace x86 {
+
+// Calling convention
+
+ManagedRegister X86ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
+  return X86ManagedRegister::FromCpuRegister(ECX);
+}
+
+ManagedRegister X86JniCallingConvention::InterproceduralScratchRegister() {
+  return X86ManagedRegister::FromCpuRegister(ECX);
+}
+
+ManagedRegister X86JniCallingConvention::ReturnScratchRegister() const {
+  return ManagedRegister::NoRegister();  // No free regs, so assembler uses push/pop
+}
+
+static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni) {
+  if (shorty[0] == 'F' || shorty[0] == 'D') {
+    if (jni) {
+      return X86ManagedRegister::FromX87Register(ST0);
+    } else {
+      return X86ManagedRegister::FromXmmRegister(XMM0);
+    }
+  } else if (shorty[0] == 'J') {
+    return X86ManagedRegister::FromRegisterPair(EAX_EDX);
+  } else if (shorty[0] == 'V') {
+    return ManagedRegister::NoRegister();
+  } else {
+    return X86ManagedRegister::FromCpuRegister(EAX);
+  }
+}
+
+ManagedRegister X86ManagedRuntimeCallingConvention::ReturnRegister() {
+  return ReturnRegisterForShorty(GetShorty(), false);
+}
+
+ManagedRegister X86JniCallingConvention::ReturnRegister() {
+  return ReturnRegisterForShorty(GetShorty(), true);
+}
+
+ManagedRegister X86JniCallingConvention::IntReturnRegister() {
+  return X86ManagedRegister::FromCpuRegister(EAX);
+}
+
+// Managed runtime calling convention
+
+ManagedRegister X86ManagedRuntimeCallingConvention::MethodRegister() {
+  return X86ManagedRegister::FromCpuRegister(EAX);
+}
+
+bool X86ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
+  return false;  // Everything is passed by stack
+}
+
+bool X86ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
+  return true;  // Everything is passed by stack
+}
+
+ManagedRegister X86ManagedRuntimeCallingConvention::CurrentParamRegister() {
+  LOG(FATAL) << "Should not reach here";
+  return ManagedRegister::NoRegister();
+}
+
+FrameOffset X86ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
+  return FrameOffset(displacement_.Int32Value() +   // displacement
+                     kPointerSize +                 // Method*
+                     (itr_slots_ * kPointerSize));  // offset into in args
+}
+
+const std::vector<ManagedRegister>& X86ManagedRuntimeCallingConvention::EntrySpills() {
+  // We spill the argument registers on X86 to free them up for scratch use, we then assume
+  // all arguments are on the stack.
+  if (entry_spills_.size() == 0) {
+    size_t num_spills = NumArgs() + NumLongOrDoubleArgs();
+    if (num_spills > 0) {
+      entry_spills_.push_back(X86ManagedRegister::FromCpuRegister(ECX));
+      if (num_spills > 1) {
+        entry_spills_.push_back(X86ManagedRegister::FromCpuRegister(EDX));
+        if (num_spills > 2) {
+          entry_spills_.push_back(X86ManagedRegister::FromCpuRegister(EBX));
+        }
+      }
+    }
+  }
+  return entry_spills_;
+}
+
+// JNI calling convention
+
+X86JniCallingConvention::X86JniCallingConvention(bool is_static, bool is_synchronized,
+                                                 const char* shorty)
+    : JniCallingConvention(is_static, is_synchronized, shorty) {
+  callee_save_regs_.push_back(X86ManagedRegister::FromCpuRegister(EBP));
+  callee_save_regs_.push_back(X86ManagedRegister::FromCpuRegister(ESI));
+  callee_save_regs_.push_back(X86ManagedRegister::FromCpuRegister(EDI));
+}
+
+uint32_t X86JniCallingConvention::CoreSpillMask() const {
+  return 1 << EBP | 1 << ESI | 1 << EDI | 1 << kNumberOfCpuRegisters;
+}
+
+size_t X86JniCallingConvention::FrameSize() {
+  // Method*, return address and callee save area size, local reference segment state
+  size_t frame_data_size = (3 + CalleeSaveRegisters().size()) * kPointerSize;
+  // References plus 2 words for SIRT header
+  size_t sirt_size = (ReferenceCount() + 2) * kPointerSize;
+  // Plus return value spill area size
+  return RoundUp(frame_data_size + sirt_size + SizeOfReturnValue(), kStackAlignment);
+}
+
+size_t X86JniCallingConvention::OutArgSize() {
+  return RoundUp(NumberOfOutgoingStackArgs() * kPointerSize, kStackAlignment);
+}
+
+bool X86JniCallingConvention::IsCurrentParamInRegister() {
+  return false;  // Everything is passed by stack.
+}
+
+bool X86JniCallingConvention::IsCurrentParamOnStack() {
+  return true;  // Everything is passed by stack.
+}
+
+ManagedRegister X86JniCallingConvention::CurrentParamRegister() {
+  LOG(FATAL) << "Should not reach here";
+  return ManagedRegister::NoRegister();
+}
+
+FrameOffset X86JniCallingConvention::CurrentParamStackOffset() {
+  return FrameOffset(displacement_.Int32Value() - OutArgSize() + (itr_slots_ * kPointerSize));
+}
+
+size_t X86JniCallingConvention::NumberOfOutgoingStackArgs() {
+  size_t static_args = IsStatic() ? 1 : 0;  // count jclass
+  // regular argument parameters and this
+  size_t param_args = NumArgs() + NumLongOrDoubleArgs();
+  // count JNIEnv* and return pc (pushed after Method*)
+  size_t total_args = static_args + param_args + 2;
+  return total_args;
+
+}
+
+}  // namespace x86
+}  // namespace art
diff --git a/src/compiler/jni/quick/x86/calling_convention_x86.h b/src/compiler/jni/quick/x86/calling_convention_x86.h
new file mode 100644
index 0000000..e80e432
--- /dev/null
+++ b/src/compiler/jni/quick/x86/calling_convention_x86.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_OAT_JNI_X86_CALLING_CONVENTION_X86_H_
+#define ART_SRC_OAT_JNI_X86_CALLING_CONVENTION_X86_H_
+
+#include "compiler/jni/quick/calling_convention.h"
+
+namespace art {
+namespace x86 {
+
+class X86ManagedRuntimeCallingConvention : public ManagedRuntimeCallingConvention {
+ public:
+  explicit X86ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized,
+                                              const char* shorty)
+      : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty) {}
+  virtual ~X86ManagedRuntimeCallingConvention() {}
+  // Calling convention
+  virtual ManagedRegister ReturnRegister();
+  virtual ManagedRegister InterproceduralScratchRegister();
+  // Managed runtime calling convention
+  virtual ManagedRegister MethodRegister();
+  virtual bool IsCurrentParamInRegister();
+  virtual bool IsCurrentParamOnStack();
+  virtual ManagedRegister CurrentParamRegister();
+  virtual FrameOffset CurrentParamStackOffset();
+  virtual const std::vector<ManagedRegister>& EntrySpills();
+ private:
+  std::vector<ManagedRegister> entry_spills_;
+  DISALLOW_COPY_AND_ASSIGN(X86ManagedRuntimeCallingConvention);
+};
+
+class X86JniCallingConvention : public JniCallingConvention {
+ public:
+  explicit X86JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty);
+  virtual ~X86JniCallingConvention() {}
+  // Calling convention
+  virtual ManagedRegister ReturnRegister();
+  virtual ManagedRegister IntReturnRegister();
+  virtual ManagedRegister InterproceduralScratchRegister();
+  // JNI calling convention
+  virtual size_t FrameSize();
+  virtual size_t OutArgSize();
+  virtual const std::vector<ManagedRegister>& CalleeSaveRegisters() const {
+    return callee_save_regs_;
+  }
+  virtual ManagedRegister ReturnScratchRegister() const;
+  virtual uint32_t CoreSpillMask() const;
+  virtual uint32_t FpSpillMask() const {
+    return 0;
+  }
+  virtual bool IsCurrentParamInRegister();
+  virtual bool IsCurrentParamOnStack();
+  virtual ManagedRegister CurrentParamRegister();
+  virtual FrameOffset CurrentParamStackOffset();
+
+ protected:
+  virtual size_t NumberOfOutgoingStackArgs();
+
+ private:
+  // TODO: these values aren't unique and can be shared amongst instances
+  std::vector<ManagedRegister> callee_save_regs_;
+
+  DISALLOW_COPY_AND_ASSIGN(X86JniCallingConvention);
+};
+
+}  // namespace x86
+}  // namespace art
+
+#endif  // ART_SRC_OAT_JNI_X86_CALLING_CONVENTION_X86_H_