Create separate Android.mk for main build targets

The runtime, compiler, dex2oat, and oatdump now are in seperate trees
to prevent dependency creep.  They can now be individually built
without rebuilding the rest of the art projects. dalvikvm and jdwpspy
were already this way. Builds in the art directory should behave as
before, building everything including tests.

Change-Id: Ic6b1151e5ed0f823c3dd301afd2b13eb2d8feb81
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
new file mode 100644
index 0000000..4aeda41
--- /dev/null
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -0,0 +1,1388 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_x86.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "x86_lir.h"
+
+namespace art {
+
+#define MAX_ASSEMBLER_RETRIES 50
+
+const X86EncodingMap X86Mir2Lir::EncodingMap[kX86Last] = {
+  { kX8632BitData, kData,    IS_UNARY_OP,            { 0, 0, 0x00, 0, 0, 0, 0, 4 }, "data",  "0x!0d" },
+  { kX86Bkpt,      kNullary, NO_OPERAND | IS_BRANCH, { 0, 0, 0xCC, 0, 0, 0, 0, 0 }, "int 3", "" },
+  { kX86Nop,       kNop,     IS_UNARY_OP,            { 0, 0, 0x90, 0, 0, 0, 0, 0 }, "nop",   "" },
+
+#define ENCODING_MAP(opname, mem_use, reg_def, uses_ccodes, \
+                     rm8_r8, rm32_r32, \
+                     r8_rm8, r32_rm32, \
+                     ax8_i8, ax32_i32, \
+                     rm8_i8, rm8_i8_modrm, \
+                     rm32_i32, rm32_i32_modrm, \
+                     rm32_i8, rm32_i8_modrm) \
+{ kX86 ## opname ## 8MR, kMemReg,    mem_use | IS_TERTIARY_OP |           REG_USE02  | SETS_CCODES | uses_ccodes, { 0,             0, rm8_r8, 0, 0, 0,            0,      0 }, #opname "8MR", "[!0r+!1d],!2r" }, \
+{ kX86 ## opname ## 8AR, kArrayReg,  mem_use | IS_QUIN_OP     |           REG_USE014 | SETS_CCODES | uses_ccodes, { 0,             0, rm8_r8, 0, 0, 0,            0,      0 }, #opname "8AR", "[!0r+!1r<<!2d+!3d],!4r" }, \
+{ kX86 ## opname ## 8TR, kThreadReg, mem_use | IS_BINARY_OP   |           REG_USE1   | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm8_r8, 0, 0, 0,            0,      0 }, #opname "8TR", "fs:[!0d],!1r" }, \
+{ kX86 ## opname ## 8RR, kRegReg,              IS_BINARY_OP   | reg_def | REG_USE01  | SETS_CCODES | uses_ccodes, { 0,             0, r8_rm8, 0, 0, 0,            0,      0 }, #opname "8RR", "!0r,!1r" }, \
+{ kX86 ## opname ## 8RM, kRegMem,    IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01  | SETS_CCODES | uses_ccodes, { 0,             0, r8_rm8, 0, 0, 0,            0,      0 }, #opname "8RM", "!0r,[!1r+!2d]" }, \
+{ kX86 ## opname ## 8RA, kRegArray,  IS_LOAD | IS_QUIN_OP     | reg_def | REG_USE012 | SETS_CCODES | uses_ccodes, { 0,             0, r8_rm8, 0, 0, 0,            0,      0 }, #opname "8RA", "!0r,[!1r+!2r<<!3d+!4d]" }, \
+{ kX86 ## opname ## 8RT, kRegThread, IS_LOAD | IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, r8_rm8, 0, 0, 0,            0,      0 }, #opname "8RT", "!0r,fs:[!1d]" }, \
+{ kX86 ## opname ## 8RI, kRegImm,              IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { 0,             0, rm8_i8, 0, 0, rm8_i8_modrm, ax8_i8, 1 }, #opname "8RI", "!0r,!1d" }, \
+{ kX86 ## opname ## 8MI, kMemImm,    mem_use | IS_TERTIARY_OP |           REG_USE0   | SETS_CCODES | uses_ccodes, { 0,             0, rm8_i8, 0, 0, rm8_i8_modrm, 0,      1 }, #opname "8MI", "[!0r+!1d],!2d" }, \
+{ kX86 ## opname ## 8AI, kArrayImm,  mem_use | IS_QUIN_OP     |           REG_USE01  | SETS_CCODES | uses_ccodes, { 0,             0, rm8_i8, 0, 0, rm8_i8_modrm, 0,      1 }, #opname "8AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
+{ kX86 ## opname ## 8TI, kThreadImm, mem_use | IS_BINARY_OP   |                        SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm8_i8, 0, 0, rm8_i8_modrm, 0,      1 }, #opname "8TI", "fs:[!0d],!1d" }, \
+  \
+{ kX86 ## opname ## 16MR,  kMemReg,    mem_use | IS_TERTIARY_OP |           REG_USE02  | SETS_CCODES | uses_ccodes, { 0x66,          0,    rm32_r32, 0, 0, 0,              0,        0 }, #opname "16MR", "[!0r+!1d],!2r" }, \
+{ kX86 ## opname ## 16AR,  kArrayReg,  mem_use | IS_QUIN_OP     |           REG_USE014 | SETS_CCODES | uses_ccodes, { 0x66,          0,    rm32_r32, 0, 0, 0,              0,        0 }, #opname "16AR", "[!0r+!1r<<!2d+!3d],!4r" }, \
+{ kX86 ## opname ## 16TR,  kThreadReg, mem_use | IS_BINARY_OP   |           REG_USE1   | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0x66, rm32_r32, 0, 0, 0,              0,        0 }, #opname "16TR", "fs:[!0d],!1r" }, \
+{ kX86 ## opname ## 16RR,  kRegReg,              IS_BINARY_OP   | reg_def | REG_USE01  | SETS_CCODES | uses_ccodes, { 0x66,          0,    r32_rm32, 0, 0, 0,              0,        0 }, #opname "16RR", "!0r,!1r" }, \
+{ kX86 ## opname ## 16RM,  kRegMem,    IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01  | SETS_CCODES | uses_ccodes, { 0x66,          0,    r32_rm32, 0, 0, 0,              0,        0 }, #opname "16RM", "!0r,[!1r+!2d]" }, \
+{ kX86 ## opname ## 16RA,  kRegArray,  IS_LOAD | IS_QUIN_OP     | reg_def | REG_USE012 | SETS_CCODES | uses_ccodes, { 0x66,          0,    r32_rm32, 0, 0, 0,              0,        0 }, #opname "16RA", "!0r,[!1r+!2r<<!3d+!4d]" }, \
+{ kX86 ## opname ## 16RT,  kRegThread, IS_LOAD | IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0x66, r32_rm32, 0, 0, 0,              0,        0 }, #opname "16RT", "!0r,fs:[!1d]" }, \
+{ kX86 ## opname ## 16RI,  kRegImm,              IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { 0x66,          0,    rm32_i32, 0, 0, rm32_i32_modrm, ax32_i32, 2 }, #opname "16RI", "!0r,!1d" }, \
+{ kX86 ## opname ## 16MI,  kMemImm,    mem_use | IS_TERTIARY_OP |           REG_USE0   | SETS_CCODES | uses_ccodes, { 0x66,          0,    rm32_i32, 0, 0, rm32_i32_modrm, 0,        2 }, #opname "16MI", "[!0r+!1d],!2d" }, \
+{ kX86 ## opname ## 16AI,  kArrayImm,  mem_use | IS_QUIN_OP     |           REG_USE01  | SETS_CCODES | uses_ccodes, { 0x66,          0,    rm32_i32, 0, 0, rm32_i32_modrm, 0,        2 }, #opname "16AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
+{ kX86 ## opname ## 16TI,  kThreadImm, mem_use | IS_BINARY_OP   |                        SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0x66, rm32_i32, 0, 0, rm32_i32_modrm, 0,        2 }, #opname "16TI", "fs:[!0d],!1d" }, \
+{ kX86 ## opname ## 16RI8, kRegImm,              IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { 0x66,          0,    rm32_i8,  0, 0, rm32_i8_modrm,  0,        1 }, #opname "16RI8", "!0r,!1d" }, \
+{ kX86 ## opname ## 16MI8, kMemImm,    mem_use | IS_TERTIARY_OP |           REG_USE0   | SETS_CCODES | uses_ccodes, { 0x66,          0,    rm32_i8,  0, 0, rm32_i8_modrm,  0,        1 }, #opname "16MI8", "[!0r+!1d],!2d" }, \
+{ kX86 ## opname ## 16AI8, kArrayImm,  mem_use | IS_QUIN_OP     |           REG_USE01  | SETS_CCODES | uses_ccodes, { 0x66,          0,    rm32_i8,  0, 0, rm32_i8_modrm,  0,        1 }, #opname "16AI8", "[!0r+!1r<<!2d+!3d],!4d" }, \
+{ kX86 ## opname ## 16TI8, kThreadImm, mem_use | IS_BINARY_OP   |                        SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0x66, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1 }, #opname "16TI8", "fs:[!0d],!1d" }, \
+  \
+{ kX86 ## opname ## 32MR,  kMemReg,    mem_use | IS_TERTIARY_OP |           REG_USE02  | SETS_CCODES | uses_ccodes, { 0,             0, rm32_r32, 0, 0, 0,              0,        0 }, #opname "32MR", "[!0r+!1d],!2r" }, \
+{ kX86 ## opname ## 32AR,  kArrayReg,  mem_use | IS_QUIN_OP     |           REG_USE014 | SETS_CCODES | uses_ccodes, { 0,             0, rm32_r32, 0, 0, 0,              0,        0 }, #opname "32AR", "[!0r+!1r<<!2d+!3d],!4r" }, \
+{ kX86 ## opname ## 32TR,  kThreadReg, mem_use | IS_BINARY_OP   |           REG_USE1   | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm32_r32, 0, 0, 0,              0,        0 }, #opname "32TR", "fs:[!0d],!1r" }, \
+{ kX86 ## opname ## 32RR,  kRegReg,              IS_BINARY_OP   | reg_def | REG_USE01  | SETS_CCODES | uses_ccodes, { 0,             0, r32_rm32, 0, 0, 0,              0,        0 }, #opname "32RR", "!0r,!1r" }, \
+{ kX86 ## opname ## 32RM,  kRegMem,    IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01  | SETS_CCODES | uses_ccodes, { 0,             0, r32_rm32, 0, 0, 0,              0,        0 }, #opname "32RM", "!0r,[!1r+!2d]" }, \
+{ kX86 ## opname ## 32RA,  kRegArray,  IS_LOAD | IS_QUIN_OP     | reg_def | REG_USE012 | SETS_CCODES | uses_ccodes, { 0,             0, r32_rm32, 0, 0, 0,              0,        0 }, #opname "32RA", "!0r,[!1r+!2r<<!3d+!4d]" }, \
+{ kX86 ## opname ## 32RT,  kRegThread, IS_LOAD | IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, r32_rm32, 0, 0, 0,              0,        0 }, #opname "32RT", "!0r,fs:[!1d]" }, \
+{ kX86 ## opname ## 32RI,  kRegImm,              IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { 0,             0, rm32_i32, 0, 0, rm32_i32_modrm, ax32_i32, 4 }, #opname "32RI", "!0r,!1d" }, \
+{ kX86 ## opname ## 32MI,  kMemImm,    mem_use | IS_TERTIARY_OP |           REG_USE0   | SETS_CCODES | uses_ccodes, { 0,             0, rm32_i32, 0, 0, rm32_i32_modrm, 0,        4 }, #opname "32MI", "[!0r+!1d],!2d" }, \
+{ kX86 ## opname ## 32AI,  kArrayImm,  mem_use | IS_QUIN_OP     |           REG_USE01  | SETS_CCODES | uses_ccodes, { 0,             0, rm32_i32, 0, 0, rm32_i32_modrm, 0,        4 }, #opname "32AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
+{ kX86 ## opname ## 32TI,  kThreadImm, mem_use | IS_BINARY_OP   |                        SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm32_i32, 0, 0, rm32_i32_modrm, 0,        4 }, #opname "32TI", "fs:[!0d],!1d" }, \
+{ kX86 ## opname ## 32RI8, kRegImm,              IS_BINARY_OP   | reg_def | REG_USE0   | SETS_CCODES | uses_ccodes, { 0,             0, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1 }, #opname "32RI8", "!0r,!1d" }, \
+{ kX86 ## opname ## 32MI8, kMemImm,    mem_use | IS_TERTIARY_OP |           REG_USE0   | SETS_CCODES | uses_ccodes, { 0,             0, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1 }, #opname "32MI8", "[!0r+!1d],!2d" }, \
+{ kX86 ## opname ## 32AI8, kArrayImm,  mem_use | IS_QUIN_OP     |           REG_USE01  | SETS_CCODES | uses_ccodes, { 0,             0, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1 }, #opname "32AI8", "[!0r+!1r<<!2d+!3d],!4d" }, \
+{ kX86 ## opname ## 32TI8, kThreadImm, mem_use | IS_BINARY_OP   |                        SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm32_i8,  0, 0, rm32_i8_modrm,  0,        1 }, #opname "32TI8", "fs:[!0d],!1d" }
+
+ENCODING_MAP(Add, IS_LOAD | IS_STORE, REG_DEF0, 0,
+  0x00 /* RegMem8/Reg8 */,     0x01 /* RegMem32/Reg32 */,
+  0x02 /* Reg8/RegMem8 */,     0x03 /* Reg32/RegMem32 */,
+  0x04 /* Rax8/imm8 opcode */, 0x05 /* Rax32/imm32 */,
+  0x80, 0x0 /* RegMem8/imm8 */,
+  0x81, 0x0 /* RegMem32/imm32 */, 0x83, 0x0 /* RegMem32/imm8 */),
+ENCODING_MAP(Or, IS_LOAD | IS_STORE, REG_DEF0, 0,
+  0x08 /* RegMem8/Reg8 */,     0x09 /* RegMem32/Reg32 */,
+  0x0A /* Reg8/RegMem8 */,     0x0B /* Reg32/RegMem32 */,
+  0x0C /* Rax8/imm8 opcode */, 0x0D /* Rax32/imm32 */,
+  0x80, 0x1 /* RegMem8/imm8 */,
+  0x81, 0x1 /* RegMem32/imm32 */, 0x83, 0x1 /* RegMem32/imm8 */),
+ENCODING_MAP(Adc, IS_LOAD | IS_STORE, REG_DEF0, USES_CCODES,
+  0x10 /* RegMem8/Reg8 */,     0x11 /* RegMem32/Reg32 */,
+  0x12 /* Reg8/RegMem8 */,     0x13 /* Reg32/RegMem32 */,
+  0x14 /* Rax8/imm8 opcode */, 0x15 /* Rax32/imm32 */,
+  0x80, 0x2 /* RegMem8/imm8 */,
+  0x81, 0x2 /* RegMem32/imm32 */, 0x83, 0x2 /* RegMem32/imm8 */),
+ENCODING_MAP(Sbb, IS_LOAD | IS_STORE, REG_DEF0, USES_CCODES,
+  0x18 /* RegMem8/Reg8 */,     0x19 /* RegMem32/Reg32 */,
+  0x1A /* Reg8/RegMem8 */,     0x1B /* Reg32/RegMem32 */,
+  0x1C /* Rax8/imm8 opcode */, 0x1D /* Rax32/imm32 */,
+  0x80, 0x3 /* RegMem8/imm8 */,
+  0x81, 0x3 /* RegMem32/imm32 */, 0x83, 0x3 /* RegMem32/imm8 */),
+ENCODING_MAP(And, IS_LOAD | IS_STORE, REG_DEF0, 0,
+  0x20 /* RegMem8/Reg8 */,     0x21 /* RegMem32/Reg32 */,
+  0x22 /* Reg8/RegMem8 */,     0x23 /* Reg32/RegMem32 */,
+  0x24 /* Rax8/imm8 opcode */, 0x25 /* Rax32/imm32 */,
+  0x80, 0x4 /* RegMem8/imm8 */,
+  0x81, 0x4 /* RegMem32/imm32 */, 0x83, 0x4 /* RegMem32/imm8 */),
+ENCODING_MAP(Sub, IS_LOAD | IS_STORE, REG_DEF0, 0,
+  0x28 /* RegMem8/Reg8 */,     0x29 /* RegMem32/Reg32 */,
+  0x2A /* Reg8/RegMem8 */,     0x2B /* Reg32/RegMem32 */,
+  0x2C /* Rax8/imm8 opcode */, 0x2D /* Rax32/imm32 */,
+  0x80, 0x5 /* RegMem8/imm8 */,
+  0x81, 0x5 /* RegMem32/imm32 */, 0x83, 0x5 /* RegMem32/imm8 */),
+ENCODING_MAP(Xor, IS_LOAD | IS_STORE, REG_DEF0, 0,
+  0x30 /* RegMem8/Reg8 */,     0x31 /* RegMem32/Reg32 */,
+  0x32 /* Reg8/RegMem8 */,     0x33 /* Reg32/RegMem32 */,
+  0x34 /* Rax8/imm8 opcode */, 0x35 /* Rax32/imm32 */,
+  0x80, 0x6 /* RegMem8/imm8 */,
+  0x81, 0x6 /* RegMem32/imm32 */, 0x83, 0x6 /* RegMem32/imm8 */),
+ENCODING_MAP(Cmp, IS_LOAD, 0, 0,
+  0x38 /* RegMem8/Reg8 */,     0x39 /* RegMem32/Reg32 */,
+  0x3A /* Reg8/RegMem8 */,     0x3B /* Reg32/RegMem32 */,
+  0x3C /* Rax8/imm8 opcode */, 0x3D /* Rax32/imm32 */,
+  0x80, 0x7 /* RegMem8/imm8 */,
+  0x81, 0x7 /* RegMem32/imm32 */, 0x83, 0x7 /* RegMem32/imm8 */),
+#undef ENCODING_MAP
+
+  { kX86Imul16RRI,   kRegRegImm,             IS_TERTIARY_OP | REG_DEF0_USE1  | SETS_CCODES, { 0x66, 0, 0x69, 0, 0, 0, 0, 2 }, "Imul16RRI", "!0r,!1r,!2d" },
+  { kX86Imul16RMI,   kRegMemImm,   IS_LOAD | IS_QUAD_OP     | REG_DEF0_USE1  | SETS_CCODES, { 0x66, 0, 0x69, 0, 0, 0, 0, 2 }, "Imul16RMI", "!0r,[!1r+!2d],!3d" },
+  { kX86Imul16RAI,   kRegArrayImm, IS_LOAD | IS_SEXTUPLE_OP | REG_DEF0_USE12 | SETS_CCODES, { 0x66, 0, 0x69, 0, 0, 0, 0, 2 }, "Imul16RAI", "!0r,[!1r+!2r<<!3d+!4d],!5d" },
+
+  { kX86Imul32RRI,   kRegRegImm,             IS_TERTIARY_OP | REG_DEF0_USE1  | SETS_CCODES, { 0, 0, 0x69, 0, 0, 0, 0, 4 }, "Imul32RRI", "!0r,!1r,!2d" },
+  { kX86Imul32RMI,   kRegMemImm,   IS_LOAD | IS_QUAD_OP     | REG_DEF0_USE1  | SETS_CCODES, { 0, 0, 0x69, 0, 0, 0, 0, 4 }, "Imul32RMI", "!0r,[!1r+!2d],!3d" },
+  { kX86Imul32RAI,   kRegArrayImm, IS_LOAD | IS_SEXTUPLE_OP | REG_DEF0_USE12 | SETS_CCODES, { 0, 0, 0x69, 0, 0, 0, 0, 4 }, "Imul32RAI", "!0r,[!1r+!2r<<!3d+!4d],!5d" },
+  { kX86Imul32RRI8,  kRegRegImm,             IS_TERTIARY_OP | REG_DEF0_USE1  | SETS_CCODES, { 0, 0, 0x6B, 0, 0, 0, 0, 1 }, "Imul32RRI8", "!0r,!1r,!2d" },
+  { kX86Imul32RMI8,  kRegMemImm,   IS_LOAD | IS_QUAD_OP     | REG_DEF0_USE1  | SETS_CCODES, { 0, 0, 0x6B, 0, 0, 0, 0, 1 }, "Imul32RMI8", "!0r,[!1r+!2d],!3d" },
+  { kX86Imul32RAI8,  kRegArrayImm, IS_LOAD | IS_SEXTUPLE_OP | REG_DEF0_USE12 | SETS_CCODES, { 0, 0, 0x6B, 0, 0, 0, 0, 1 }, "Imul32RAI8", "!0r,[!1r+!2r<<!3d+!4d],!5d" },
+
+  { kX86Mov8MR, kMemReg,    IS_STORE | IS_TERTIARY_OP | REG_USE02,      { 0,             0, 0x88, 0, 0, 0, 0, 0 }, "Mov8MR", "[!0r+!1d],!2r" },
+  { kX86Mov8AR, kArrayReg,  IS_STORE | IS_QUIN_OP     | REG_USE014,     { 0,             0, 0x88, 0, 0, 0, 0, 0 }, "Mov8AR", "[!0r+!1r<<!2d+!3d],!4r" },
+  { kX86Mov8TR, kThreadReg, IS_STORE | IS_BINARY_OP   | REG_USE1,       { THREAD_PREFIX, 0, 0x88, 0, 0, 0, 0, 0 }, "Mov8TR", "fs:[!0d],!1r" },
+  { kX86Mov8RR, kRegReg,               IS_BINARY_OP   | REG_DEF0_USE1,  { 0,             0, 0x8A, 0, 0, 0, 0, 0 }, "Mov8RR", "!0r,!1r" },
+  { kX86Mov8RM, kRegMem,    IS_LOAD  | IS_TERTIARY_OP | REG_DEF0_USE1,  { 0,             0, 0x8A, 0, 0, 0, 0, 0 }, "Mov8RM", "!0r,[!1r+!2d]" },
+  { kX86Mov8RA, kRegArray,  IS_LOAD  | IS_QUIN_OP     | REG_DEF0_USE12, { 0,             0, 0x8A, 0, 0, 0, 0, 0 }, "Mov8RA", "!0r,[!1r+!2r<<!3d+!4d]" },
+  { kX86Mov8RT, kRegThread, IS_LOAD  | IS_BINARY_OP   | REG_DEF0,       { THREAD_PREFIX, 0, 0x8A, 0, 0, 0, 0, 0 }, "Mov8RT", "!0r,fs:[!1d]" },
+  { kX86Mov8RI, kMovRegImm,            IS_BINARY_OP   | REG_DEF0,       { 0,             0, 0xB0, 0, 0, 0, 0, 1 }, "Mov8RI", "!0r,!1d" },
+  { kX86Mov8MI, kMemImm,    IS_STORE | IS_TERTIARY_OP | REG_USE0,       { 0,             0, 0xC6, 0, 0, 0, 0, 1 }, "Mov8MI", "[!0r+!1d],!2d" },
+  { kX86Mov8AI, kArrayImm,  IS_STORE | IS_QUIN_OP     | REG_USE01,      { 0,             0, 0xC6, 0, 0, 0, 0, 1 }, "Mov8AI", "[!0r+!1r<<!2d+!3d],!4d" },
+  { kX86Mov8TI, kThreadImm, IS_STORE | IS_BINARY_OP,                    { THREAD_PREFIX, 0, 0xC6, 0, 0, 0, 0, 1 }, "Mov8TI", "fs:[!0d],!1d" },
+
+  { kX86Mov16MR, kMemReg,    IS_STORE | IS_TERTIARY_OP | REG_USE02,      { 0x66,          0,    0x89, 0, 0, 0, 0, 0 }, "Mov16MR", "[!0r+!1d],!2r" },
+  { kX86Mov16AR, kArrayReg,  IS_STORE | IS_QUIN_OP     | REG_USE014,     { 0x66,          0,    0x89, 0, 0, 0, 0, 0 }, "Mov16AR", "[!0r+!1r<<!2d+!3d],!4r" },
+  { kX86Mov16TR, kThreadReg, IS_STORE | IS_BINARY_OP   | REG_USE1,       { THREAD_PREFIX, 0x66, 0x89, 0, 0, 0, 0, 0 }, "Mov16TR", "fs:[!0d],!1r" },
+  { kX86Mov16RR, kRegReg,               IS_BINARY_OP   | REG_DEF0_USE1,  { 0x66,          0,    0x8B, 0, 0, 0, 0, 0 }, "Mov16RR", "!0r,!1r" },
+  { kX86Mov16RM, kRegMem,    IS_LOAD  | IS_TERTIARY_OP | REG_DEF0_USE1,  { 0x66,          0,    0x8B, 0, 0, 0, 0, 0 }, "Mov16RM", "!0r,[!1r+!2d]" },
+  { kX86Mov16RA, kRegArray,  IS_LOAD  | IS_QUIN_OP     | REG_DEF0_USE12, { 0x66,          0,    0x8B, 0, 0, 0, 0, 0 }, "Mov16RA", "!0r,[!1r+!2r<<!3d+!4d]" },
+  { kX86Mov16RT, kRegThread, IS_LOAD  | IS_BINARY_OP   | REG_DEF0,       { THREAD_PREFIX, 0x66, 0x8B, 0, 0, 0, 0, 0 }, "Mov16RT", "!0r,fs:[!1d]" },
+  { kX86Mov16RI, kMovRegImm,            IS_BINARY_OP   | REG_DEF0,       { 0x66,          0,    0xB8, 0, 0, 0, 0, 2 }, "Mov16RI", "!0r,!1d" },
+  { kX86Mov16MI, kMemImm,    IS_STORE | IS_TERTIARY_OP | REG_USE0,       { 0x66,          0,    0xC7, 0, 0, 0, 0, 2 }, "Mov16MI", "[!0r+!1d],!2d" },
+  { kX86Mov16AI, kArrayImm,  IS_STORE | IS_QUIN_OP     | REG_USE01,      { 0x66,          0,    0xC7, 0, 0, 0, 0, 2 }, "Mov16AI", "[!0r+!1r<<!2d+!3d],!4d" },
+  { kX86Mov16TI, kThreadImm, IS_STORE | IS_BINARY_OP,                    { THREAD_PREFIX, 0x66, 0xC7, 0, 0, 0, 0, 2 }, "Mov16TI", "fs:[!0d],!1d" },
+
+  { kX86Mov32MR, kMemReg,    IS_STORE | IS_TERTIARY_OP | REG_USE02,      { 0,             0, 0x89, 0, 0, 0, 0, 0 }, "Mov32MR", "[!0r+!1d],!2r" },
+  { kX86Mov32AR, kArrayReg,  IS_STORE | IS_QUIN_OP     | REG_USE014,     { 0,             0, 0x89, 0, 0, 0, 0, 0 }, "Mov32AR", "[!0r+!1r<<!2d+!3d],!4r" },
+  { kX86Mov32TR, kThreadReg, IS_STORE | IS_BINARY_OP   | REG_USE1,       { THREAD_PREFIX, 0, 0x89, 0, 0, 0, 0, 0 }, "Mov32TR", "fs:[!0d],!1r" },
+  { kX86Mov32RR, kRegReg,               IS_BINARY_OP   | REG_DEF0_USE1,  { 0,             0, 0x8B, 0, 0, 0, 0, 0 }, "Mov32RR", "!0r,!1r" },
+  { kX86Mov32RM, kRegMem,    IS_LOAD  | IS_TERTIARY_OP | REG_DEF0_USE1,  { 0,             0, 0x8B, 0, 0, 0, 0, 0 }, "Mov32RM", "!0r,[!1r+!2d]" },
+  { kX86Mov32RA, kRegArray,  IS_LOAD  | IS_QUIN_OP     | REG_DEF0_USE12, { 0,             0, 0x8B, 0, 0, 0, 0, 0 }, "Mov32RA", "!0r,[!1r+!2r<<!3d+!4d]" },
+  { kX86Mov32RT, kRegThread, IS_LOAD  | IS_BINARY_OP   | REG_DEF0,       { THREAD_PREFIX, 0, 0x8B, 0, 0, 0, 0, 0 }, "Mov32RT", "!0r,fs:[!1d]" },
+  { kX86Mov32RI, kMovRegImm,            IS_BINARY_OP   | REG_DEF0,       { 0,             0, 0xB8, 0, 0, 0, 0, 4 }, "Mov32RI", "!0r,!1d" },
+  { kX86Mov32MI, kMemImm,    IS_STORE | IS_TERTIARY_OP | REG_USE0,       { 0,             0, 0xC7, 0, 0, 0, 0, 4 }, "Mov32MI", "[!0r+!1d],!2d" },
+  { kX86Mov32AI, kArrayImm,  IS_STORE | IS_QUIN_OP     | REG_USE01,      { 0,             0, 0xC7, 0, 0, 0, 0, 4 }, "Mov32AI", "[!0r+!1r<<!2d+!3d],!4d" },
+  { kX86Mov32TI, kThreadImm, IS_STORE | IS_BINARY_OP,                    { THREAD_PREFIX, 0, 0xC7, 0, 0, 0, 0, 4 }, "Mov32TI", "fs:[!0d],!1d" },
+
+  { kX86Lea32RA, kRegArray, IS_QUIN_OP | REG_DEF0_USE12, { 0, 0, 0x8D, 0, 0, 0, 0, 0 }, "Lea32RA", "!0r,[!1r+!2r<<!3d+!4d]" },
+
+#define SHIFT_ENCODING_MAP(opname, modrm_opcode) \
+{ kX86 ## opname ## 8RI, kShiftRegImm,                        IS_BINARY_OP   | REG_DEF0_USE0 |            SETS_CCODES, { 0,    0, 0xC0, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "8RI", "!0r,!1d" }, \
+{ kX86 ## opname ## 8MI, kShiftMemImm,   IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0      |            SETS_CCODES, { 0,    0, 0xC0, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "8MI", "[!0r+!1d],!2d" }, \
+{ kX86 ## opname ## 8AI, kShiftArrayImm, IS_LOAD | IS_STORE | IS_QUIN_OP     | REG_USE01     |            SETS_CCODES, { 0,    0, 0xC0, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "8AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
+{ kX86 ## opname ## 8RC, kShiftRegCl,                         IS_BINARY_OP   | REG_DEF0_USE0 | REG_USEC | SETS_CCODES, { 0,    0, 0xD2, 0, 0, modrm_opcode, 0,    1 }, #opname "8RC", "!0r,cl" }, \
+{ kX86 ## opname ## 8MC, kShiftMemCl,    IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0      | REG_USEC | SETS_CCODES, { 0,    0, 0xD2, 0, 0, modrm_opcode, 0,    1 }, #opname "8MC", "[!0r+!1d],cl" }, \
+{ kX86 ## opname ## 8AC, kShiftArrayCl,  IS_LOAD | IS_STORE | IS_QUIN_OP     | REG_USE01     | REG_USEC | SETS_CCODES, { 0,    0, 0xD2, 0, 0, modrm_opcode, 0,    1 }, #opname "8AC", "[!0r+!1r<<!2d+!3d],cl" }, \
+  \
+{ kX86 ## opname ## 16RI, kShiftRegImm,                        IS_BINARY_OP   | REG_DEF0_USE0 |            SETS_CCODES, { 0x66, 0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "16RI", "!0r,!1d" }, \
+{ kX86 ## opname ## 16MI, kShiftMemImm,   IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0      |            SETS_CCODES, { 0x66, 0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "16MI", "[!0r+!1d],!2d" }, \
+{ kX86 ## opname ## 16AI, kShiftArrayImm, IS_LOAD | IS_STORE | IS_QUIN_OP     | REG_USE01     |            SETS_CCODES, { 0x66, 0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "16AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
+{ kX86 ## opname ## 16RC, kShiftRegCl,                         IS_BINARY_OP   | REG_DEF0_USE0 | REG_USEC | SETS_CCODES, { 0x66, 0, 0xD3, 0, 0, modrm_opcode, 0,    1 }, #opname "16RC", "!0r,cl" }, \
+{ kX86 ## opname ## 16MC, kShiftMemCl,    IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0      | REG_USEC | SETS_CCODES, { 0x66, 0, 0xD3, 0, 0, modrm_opcode, 0,    1 }, #opname "16MC", "[!0r+!1d],cl" }, \
+{ kX86 ## opname ## 16AC, kShiftArrayCl,  IS_LOAD | IS_STORE | IS_QUIN_OP     | REG_USE01     | REG_USEC | SETS_CCODES, { 0x66, 0, 0xD3, 0, 0, modrm_opcode, 0,    1 }, #opname "16AC", "[!0r+!1r<<!2d+!3d],cl" }, \
+  \
+{ kX86 ## opname ## 32RI, kShiftRegImm,                        IS_BINARY_OP   | REG_DEF0_USE0 |            SETS_CCODES, { 0,    0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "32RI", "!0r,!1d" }, \
+{ kX86 ## opname ## 32MI, kShiftMemImm,   IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0      |            SETS_CCODES, { 0,    0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "32MI", "[!0r+!1d],!2d" }, \
+{ kX86 ## opname ## 32AI, kShiftArrayImm, IS_LOAD | IS_STORE | IS_QUIN_OP     | REG_USE01     |            SETS_CCODES, { 0,    0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "32AI", "[!0r+!1r<<!2d+!3d],!4d" }, \
+{ kX86 ## opname ## 32RC, kShiftRegCl,                         IS_BINARY_OP   | REG_DEF0_USE0 | REG_USEC | SETS_CCODES, { 0,    0, 0xD3, 0, 0, modrm_opcode, 0,    0 }, #opname "32RC", "!0r,cl" }, \
+{ kX86 ## opname ## 32MC, kShiftMemCl,    IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0      | REG_USEC | SETS_CCODES, { 0,    0, 0xD3, 0, 0, modrm_opcode, 0,    0 }, #opname "32MC", "[!0r+!1d],cl" }, \
+{ kX86 ## opname ## 32AC, kShiftArrayCl,  IS_LOAD | IS_STORE | IS_QUIN_OP     | REG_USE01     | REG_USEC | SETS_CCODES, { 0,    0, 0xD3, 0, 0, modrm_opcode, 0,    0 }, #opname "32AC", "[!0r+!1r<<!2d+!3d],cl" }
+
+  SHIFT_ENCODING_MAP(Rol, 0x0),
+  SHIFT_ENCODING_MAP(Ror, 0x1),
+  SHIFT_ENCODING_MAP(Rcl, 0x2),
+  SHIFT_ENCODING_MAP(Rcr, 0x3),
+  SHIFT_ENCODING_MAP(Sal, 0x4),
+  SHIFT_ENCODING_MAP(Shr, 0x5),
+  SHIFT_ENCODING_MAP(Sar, 0x7),
+#undef SHIFT_ENCODING_MAP
+
+  { kX86Cmc, kNullary, NO_OPERAND, { 0, 0, 0xF5, 0, 0, 0, 0, 0}, "Cmc", "" },
+
+  { kX86Test8RI,  kRegImm,             IS_BINARY_OP   | REG_USE0  | SETS_CCODES, { 0,    0, 0xF6, 0, 0, 0, 0, 1}, "Test8RI", "!0r,!1d" },
+  { kX86Test8MI,  kMemImm,   IS_LOAD | IS_TERTIARY_OP | REG_USE0  | SETS_CCODES, { 0,    0, 0xF6, 0, 0, 0, 0, 1}, "Test8MI", "[!0r+!1d],!2d" },
+  { kX86Test8AI,  kArrayImm, IS_LOAD | IS_QUIN_OP     | REG_USE01 | SETS_CCODES, { 0,    0, 0xF6, 0, 0, 0, 0, 1}, "Test8AI", "[!0r+!1r<<!2d+!3d],!4d" },
+  { kX86Test16RI, kRegImm,             IS_BINARY_OP   | REG_USE0  | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2}, "Test16RI", "!0r,!1d" },
+  { kX86Test16MI, kMemImm,   IS_LOAD | IS_TERTIARY_OP | REG_USE0  | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2}, "Test16MI", "[!0r+!1d],!2d" },
+  { kX86Test16AI, kArrayImm, IS_LOAD | IS_QUIN_OP     | REG_USE01 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2}, "Test16AI", "[!0r+!1r<<!2d+!3d],!4d" },
+  { kX86Test32RI, kRegImm,             IS_BINARY_OP   | REG_USE0  | SETS_CCODES, { 0,    0, 0xF7, 0, 0, 0, 0, 4}, "Test32RI", "!0r,!1d" },
+  { kX86Test32MI, kMemImm,   IS_LOAD | IS_TERTIARY_OP | REG_USE0  | SETS_CCODES, { 0,    0, 0xF7, 0, 0, 0, 0, 4}, "Test32MI", "[!0r+!1d],!2d" },
+  { kX86Test32AI, kArrayImm, IS_LOAD | IS_QUIN_OP     | REG_USE01 | SETS_CCODES, { 0,    0, 0xF7, 0, 0, 0, 0, 4}, "Test32AI", "[!0r+!1r<<!2d+!3d],!4d" },
+  { kX86Test32RR, kRegReg,             IS_BINARY_OP   | REG_USE01 | SETS_CCODES, { 0,    0, 0x85, 0, 0, 0, 0, 0}, "Test32RR", "!0r,!1r" },
+
+#define UNARY_ENCODING_MAP(opname, modrm, is_store, sets_ccodes, \
+                           reg, reg_kind, reg_flags, \
+                           mem, mem_kind, mem_flags, \
+                           arr, arr_kind, arr_flags, imm, \
+                           b_flags, hw_flags, w_flags, \
+                           b_format, hw_format, w_format) \
+{ kX86 ## opname ## 8 ## reg,  reg_kind,                      reg_flags | b_flags  | sets_ccodes, { 0,    0, 0xF6, 0, 0, modrm, 0, imm << 0}, #opname "8" #reg, #b_format "!0r" }, \
+{ kX86 ## opname ## 8 ## mem,  mem_kind, IS_LOAD | is_store | mem_flags | b_flags  | sets_ccodes, { 0,    0, 0xF6, 0, 0, modrm, 0, imm << 0}, #opname "8" #mem, #b_format "[!0r+!1d]" }, \
+{ kX86 ## opname ## 8 ## arr,  arr_kind, IS_LOAD | is_store | arr_flags | b_flags  | sets_ccodes, { 0,    0, 0xF6, 0, 0, modrm, 0, imm << 0}, #opname "8" #arr, #b_format "[!0r+!1r<<!2d+!3d]" }, \
+{ kX86 ## opname ## 16 ## reg, reg_kind,                      reg_flags | hw_flags | sets_ccodes, { 0x66, 0, 0xF7, 0, 0, modrm, 0, imm << 1}, #opname "16" #reg, #hw_format "!0r" }, \
+{ kX86 ## opname ## 16 ## mem, mem_kind, IS_LOAD | is_store | mem_flags | hw_flags | sets_ccodes, { 0x66, 0, 0xF7, 0, 0, modrm, 0, imm << 1}, #opname "16" #mem, #hw_format "[!0r+!1d]" }, \
+{ kX86 ## opname ## 16 ## arr, arr_kind, IS_LOAD | is_store | arr_flags | hw_flags | sets_ccodes, { 0x66, 0, 0xF7, 0, 0, modrm, 0, imm << 1}, #opname "16" #arr, #hw_format "[!0r+!1r<<!2d+!3d]" }, \
+{ kX86 ## opname ## 32 ## reg, reg_kind,                      reg_flags | w_flags  | sets_ccodes, { 0,    0, 0xF7, 0, 0, modrm, 0, imm << 2}, #opname "32" #reg, #w_format "!0r" }, \
+{ kX86 ## opname ## 32 ## mem, mem_kind, IS_LOAD | is_store | mem_flags | w_flags  | sets_ccodes, { 0,    0, 0xF7, 0, 0, modrm, 0, imm << 2}, #opname "32" #mem, #w_format "[!0r+!1d]" }, \
+{ kX86 ## opname ## 32 ## arr, arr_kind, IS_LOAD | is_store | arr_flags | w_flags  | sets_ccodes, { 0,    0, 0xF7, 0, 0, modrm, 0, imm << 2}, #opname "32" #arr, #w_format "[!0r+!1r<<!2d+!3d]" }
+
+  UNARY_ENCODING_MAP(Not, 0x2, IS_STORE, 0,           R, kReg, IS_UNARY_OP | REG_DEF0_USE0, M, kMem, IS_BINARY_OP | REG_USE0, A, kArray, IS_QUAD_OP | REG_USE01, 0, 0, 0, 0, "", "", ""),
+  UNARY_ENCODING_MAP(Neg, 0x3, IS_STORE, SETS_CCODES, R, kReg, IS_UNARY_OP | REG_DEF0_USE0, M, kMem, IS_BINARY_OP | REG_USE0, A, kArray, IS_QUAD_OP | REG_USE01, 0, 0, 0, 0, "", "", ""),
+
+  UNARY_ENCODING_MAP(Mul,     0x4, 0, SETS_CCODES, DaR, kRegRegReg, IS_UNARY_OP | REG_USE0, DaM, kRegRegMem, IS_BINARY_OP | REG_USE0, DaA, kRegRegArray, IS_QUAD_OP | REG_USE01, 0, REG_DEFA_USEA, REG_DEFAD_USEA,  REG_DEFAD_USEA,  "ax,al,", "dx:ax,ax,", "edx:eax,eax,"),
+  UNARY_ENCODING_MAP(Imul,    0x5, 0, SETS_CCODES, DaR, kRegRegReg, IS_UNARY_OP | REG_USE0, DaM, kRegRegMem, IS_BINARY_OP | REG_USE0, DaA, kRegRegArray, IS_QUAD_OP | REG_USE01, 0, REG_DEFA_USEA, REG_DEFAD_USEA,  REG_DEFAD_USEA,  "ax,al,", "dx:ax,ax,", "edx:eax,eax,"),
+  UNARY_ENCODING_MAP(Divmod,  0x6, 0, SETS_CCODES, DaR, kRegRegReg, IS_UNARY_OP | REG_USE0, DaM, kRegRegMem, IS_BINARY_OP | REG_USE0, DaA, kRegRegArray, IS_QUAD_OP | REG_USE01, 0, REG_DEFA_USEA, REG_DEFAD_USEAD, REG_DEFAD_USEAD, "ah:al,ax,", "dx:ax,dx:ax,", "edx:eax,edx:eax,"),
+  UNARY_ENCODING_MAP(Idivmod, 0x7, 0, SETS_CCODES, DaR, kRegRegReg, IS_UNARY_OP | REG_USE0, DaM, kRegRegMem, IS_BINARY_OP | REG_USE0, DaA, kRegRegArray, IS_QUAD_OP | REG_USE01, 0, REG_DEFA_USEA, REG_DEFAD_USEAD, REG_DEFAD_USEAD, "ah:al,ax,", "dx:ax,dx:ax,", "edx:eax,edx:eax,"),
+#undef UNARY_ENCODING_MAP
+
+#define EXT_0F_ENCODING_MAP(opname, prefix, opcode, reg_def) \
+{ kX86 ## opname ## RR, kRegReg,             IS_BINARY_OP   | reg_def | REG_USE01,  { prefix, 0, 0x0F, opcode, 0, 0, 0, 0 }, #opname "RR", "!0r,!1r" }, \
+{ kX86 ## opname ## RM, kRegMem,   IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01,  { prefix, 0, 0x0F, opcode, 0, 0, 0, 0 }, #opname "RM", "!0r,[!1r+!2d]" }, \
+{ kX86 ## opname ## RA, kRegArray, IS_LOAD | IS_QUIN_OP     | reg_def | REG_USE012, { prefix, 0, 0x0F, opcode, 0, 0, 0, 0 }, #opname "RA", "!0r,[!1r+!2r<<!3d+!4d]" }
+
+  EXT_0F_ENCODING_MAP(Movsd, 0xF2, 0x10, REG_DEF0),
+  { kX86MovsdMR, kMemReg,   IS_STORE | IS_TERTIARY_OP | REG_USE02,  { 0xF2, 0, 0x0F, 0x11, 0, 0, 0, 0 }, "MovsdMR", "[!0r+!1d],!2r" },
+  { kX86MovsdAR, kArrayReg, IS_STORE | IS_QUIN_OP     | REG_USE014, { 0xF2, 0, 0x0F, 0x11, 0, 0, 0, 0 }, "MovsdAR", "[!0r+!1r<<!2d+!3d],!4r" },
+
+  EXT_0F_ENCODING_MAP(Movss, 0xF3, 0x10, REG_DEF0),
+  { kX86MovssMR, kMemReg,   IS_STORE | IS_TERTIARY_OP | REG_USE02,  { 0xF3, 0, 0x0F, 0x11, 0, 0, 0, 0 }, "MovssMR", "[!0r+!1d],!2r" },
+  { kX86MovssAR, kArrayReg, IS_STORE | IS_QUIN_OP     | REG_USE014, { 0xF3, 0, 0x0F, 0x11, 0, 0, 0, 0 }, "MovssAR", "[!0r+!1r<<!2d+!3d],!4r" },
+
+  EXT_0F_ENCODING_MAP(Cvtsi2sd,  0xF2, 0x2A, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Cvtsi2ss,  0xF3, 0x2A, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Cvttsd2si, 0xF2, 0x2C, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Cvttss2si, 0xF3, 0x2C, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Cvtsd2si,  0xF2, 0x2D, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Cvtss2si,  0xF3, 0x2D, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Ucomisd,   0x66, 0x2E, SETS_CCODES),
+  EXT_0F_ENCODING_MAP(Ucomiss,   0x00, 0x2E, SETS_CCODES),
+  EXT_0F_ENCODING_MAP(Comisd,    0x66, 0x2F, SETS_CCODES),
+  EXT_0F_ENCODING_MAP(Comiss,    0x00, 0x2F, SETS_CCODES),
+  EXT_0F_ENCODING_MAP(Orps,      0x00, 0x56, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Xorps,     0x00, 0x57, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Addsd,     0xF2, 0x58, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Addss,     0xF3, 0x58, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Mulsd,     0xF2, 0x59, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Mulss,     0xF3, 0x59, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Cvtsd2ss,  0xF2, 0x5A, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Cvtss2sd,  0xF3, 0x5A, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Subsd,     0xF2, 0x5C, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Subss,     0xF3, 0x5C, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Divsd,     0xF2, 0x5E, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Divss,     0xF3, 0x5E, REG_DEF0),
+
+  { kX86PsrlqRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x73, 0, 2, 0, 1 }, "PsrlqRI", "!0r,!1d" },
+  { kX86PsllqRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x73, 0, 6, 0, 1 }, "PsllqRI", "!0r,!1d" },
+
+  EXT_0F_ENCODING_MAP(Movdxr,    0x66, 0x6E, REG_DEF0),
+  { kX86MovdrxRR, kRegRegStore, IS_BINARY_OP | REG_DEF0   | REG_USE01,  { 0x66, 0, 0x0F, 0x7E, 0, 0, 0, 0 }, "MovdrxRR", "!0r,!1r" },
+  { kX86MovdrxMR, kMemReg,      IS_STORE | IS_TERTIARY_OP | REG_USE02,  { 0x66, 0, 0x0F, 0x7E, 0, 0, 0, 0 }, "MovdrxMR", "[!0r+!1d],!2r" },
+  { kX86MovdrxAR, kArrayReg,    IS_STORE | IS_QUIN_OP     | REG_USE014, { 0x66, 0, 0x0F, 0x7E, 0, 0, 0, 0 }, "MovdrxAR", "[!0r+!1r<<!2d+!3d],!4r" },
+
+  { kX86Set8R, kRegCond,              IS_BINARY_OP   | REG_DEF0  | USES_CCODES, { 0, 0, 0x0F, 0x90, 0, 0, 0, 0 }, "Set8R", "!1c !0r" },
+  { kX86Set8M, kMemCond,   IS_STORE | IS_TERTIARY_OP | REG_USE0  | USES_CCODES, { 0, 0, 0x0F, 0x90, 0, 0, 0, 0 }, "Set8M", "!2c [!0r+!1d]" },
+  { kX86Set8A, kArrayCond, IS_STORE | IS_QUIN_OP     | REG_USE01 | USES_CCODES, { 0, 0, 0x0F, 0x90, 0, 0, 0, 0 }, "Set8A", "!4c [!0r+!1r<<!2d+!3d]" },
+
+  // TODO: load/store?
+  // Encode the modrm opcode as an extra opcode byte to avoid computation during assembly.
+  { kX86Mfence, kReg,                 NO_OPERAND,     { 0, 0, 0x0F, 0xAE, 0, 6, 0, 0 }, "Mfence", "" },
+
+  EXT_0F_ENCODING_MAP(Imul16,  0x66, 0xAF, REG_DEF0 | SETS_CCODES),
+  EXT_0F_ENCODING_MAP(Imul32,  0x00, 0xAF, REG_DEF0 | SETS_CCODES),
+
+  { kX86CmpxchgRR, kRegRegStore, IS_BINARY_OP | REG_DEF0 | REG_USE01 | REG_DEFA_USEA | SETS_CCODES, { 0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Cmpxchg", "!0r,!1r" },
+  { kX86CmpxchgMR, kMemReg,   IS_STORE | IS_TERTIARY_OP | REG_USE02 | REG_DEFA_USEA | SETS_CCODES, { 0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Cmpxchg", "[!0r+!1d],!2r" },
+  { kX86CmpxchgAR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014 | REG_DEFA_USEA | SETS_CCODES, { 0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Cmpxchg", "[!0r+!1r<<!2d+!3d],!4r" },
+  { kX86LockCmpxchgRR, kRegRegStore, IS_BINARY_OP | REG_DEF0 | REG_USE01 | REG_DEFA_USEA | SETS_CCODES, { 0xF0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Lock Cmpxchg", "!0r,!1r" },
+  { kX86LockCmpxchgMR, kMemReg,   IS_STORE | IS_TERTIARY_OP | REG_USE02 | REG_DEFA_USEA | SETS_CCODES, { 0xF0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Lock Cmpxchg", "[!0r+!1d],!2r" },
+  { kX86LockCmpxchgAR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014 | REG_DEFA_USEA | SETS_CCODES, { 0xF0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Lock Cmpxchg", "[!0r+!1r<<!2d+!3d],!4r" },
+
+  EXT_0F_ENCODING_MAP(Movzx8,  0x00, 0xB6, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Movzx16, 0x00, 0xB7, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Movsx8,  0x00, 0xBE, REG_DEF0),
+  EXT_0F_ENCODING_MAP(Movsx16, 0x00, 0xBF, REG_DEF0),
+#undef EXT_0F_ENCODING_MAP
+
+  { kX86Jcc8,  kJcc,  IS_BINARY_OP | IS_BRANCH | NEEDS_FIXUP | USES_CCODES, { 0,             0, 0x70, 0,    0, 0, 0, 0 }, "Jcc8",  "!1c !0t" },
+  { kX86Jcc32, kJcc,  IS_BINARY_OP | IS_BRANCH | NEEDS_FIXUP | USES_CCODES, { 0,             0, 0x0F, 0x80, 0, 0, 0, 0 }, "Jcc32", "!1c !0t" },
+  { kX86Jmp8,  kJmp,  IS_UNARY_OP  | IS_BRANCH | NEEDS_FIXUP,               { 0,             0, 0xEB, 0,    0, 0, 0, 0 }, "Jmp8",  "!0t" },
+  { kX86Jmp32, kJmp,  IS_UNARY_OP  | IS_BRANCH | NEEDS_FIXUP,               { 0,             0, 0xE9, 0,    0, 0, 0, 0 }, "Jmp32", "!0t" },
+  { kX86JmpR,  kJmp,  IS_UNARY_OP  | IS_BRANCH | REG_USE0,                  { 0,             0, 0xFF, 0,    0, 4, 0, 0 }, "JmpR",  "!0r" },
+  { kX86CallR, kCall, IS_UNARY_OP  | IS_BRANCH | REG_USE0,                  { 0,             0, 0xE8, 0,    0, 0, 0, 0 }, "CallR", "!0r" },
+  { kX86CallM, kCall, IS_BINARY_OP | IS_BRANCH | IS_LOAD | REG_USE0,        { 0,             0, 0xFF, 0,    0, 2, 0, 0 }, "CallM", "[!0r+!1d]" },
+  { kX86CallA, kCall, IS_QUAD_OP   | IS_BRANCH | IS_LOAD | REG_USE01,       { 0,             0, 0xFF, 0,    0, 2, 0, 0 }, "CallA", "[!0r+!1r<<!2d+!3d]" },
+  { kX86CallT, kCall, IS_UNARY_OP  | IS_BRANCH | IS_LOAD,                   { THREAD_PREFIX, 0, 0xFF, 0,    0, 2, 0, 0 }, "CallT", "fs:[!0d]" },
+  { kX86Ret,   kNullary,NO_OPERAND | IS_BRANCH,                             { 0,             0, 0xC3, 0,    0, 0, 0, 0 }, "Ret", "" },
+
+  { kX86StartOfMethod, kMacro,  IS_UNARY_OP | SETS_CCODES,             { 0, 0, 0,    0, 0, 0, 0, 0 }, "StartOfMethod", "!0r" },
+  { kX86PcRelLoadRA,   kPcRel,  IS_LOAD | IS_QUIN_OP | REG_DEF0_USE12, { 0, 0, 0x8B, 0, 0, 0, 0, 0 }, "PcRelLoadRA",   "!0r,[!1r+!2r<<!3d+!4p]" },
+  { kX86PcRelAdr,      kPcRel,  IS_LOAD | IS_BINARY_OP | REG_DEF0,     { 0, 0, 0xB8, 0, 0, 0, 0, 4 }, "PcRelAdr",      "!0r,!1d" },
+};
+
+static size_t ComputeSize(const X86EncodingMap* entry, int base, int displacement, bool has_sib) {
+  size_t size = 0;
+  if (entry->skeleton.prefix1 > 0) {
+    ++size;
+    if (entry->skeleton.prefix2 > 0) {
+      ++size;
+    }
+  }
+  ++size;  // opcode
+  if (entry->skeleton.opcode == 0x0F) {
+    ++size;
+    if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) {
+      ++size;
+    }
+  }
+  ++size;  // modrm
+  if (has_sib || base == rX86_SP) {
+    // SP requires a SIB byte.
+    ++size;
+  }
+  if (displacement != 0 || base == rBP) {
+    // BP requires an explicit displacement, even when it's 0.
+    if (entry->opcode != kX86Lea32RA) {
+      DCHECK_NE(entry->flags & (IS_LOAD | IS_STORE), 0ULL) << entry->name;
+    }
+    size += IS_SIMM8(displacement) ? 1 : 4;
+  }
+  size += entry->skeleton.immediate_bytes;
+  return size;
+}
+
+int X86Mir2Lir::GetInsnSize(LIR* lir) {
+  const X86EncodingMap* entry = &X86Mir2Lir::EncodingMap[lir->opcode];
+  switch (entry->kind) {
+    case kData:
+      return 4;  // 4 bytes of data
+    case kNop:
+      return lir->operands[0];  // length of nop is sole operand
+    case kNullary:
+      return 1;  // 1 byte of opcode
+    case kReg:  // lir operands - 0: reg
+      return ComputeSize(entry, 0, 0, false);
+    case kMem:  // lir operands - 0: base, 1: disp
+      return ComputeSize(entry, lir->operands[0], lir->operands[1], false);
+    case kArray:  // lir operands - 0: base, 1: index, 2: scale, 3: disp
+      return ComputeSize(entry, lir->operands[0], lir->operands[3], true);
+    case kMemReg:  // lir operands - 0: base, 1: disp, 2: reg
+      return ComputeSize(entry, lir->operands[0], lir->operands[1], false);
+    case kArrayReg:  // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
+      return ComputeSize(entry, lir->operands[0], lir->operands[3], true);
+    case kThreadReg:  // lir operands - 0: disp, 1: reg
+      return ComputeSize(entry, 0, lir->operands[0], false);
+    case kRegReg:
+      return ComputeSize(entry, 0, 0, false);
+    case kRegRegStore:
+      return ComputeSize(entry, 0, 0, false);
+    case kRegMem:  // lir operands - 0: reg, 1: base, 2: disp
+      return ComputeSize(entry, lir->operands[1], lir->operands[2], false);
+    case kRegArray:   // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp
+      return ComputeSize(entry, lir->operands[1], lir->operands[4], true);
+    case kRegThread:  // lir operands - 0: reg, 1: disp
+      return ComputeSize(entry, 0, 0x12345678, false);  // displacement size is always 32bit
+    case kRegImm: {  // lir operands - 0: reg, 1: immediate
+      size_t size = ComputeSize(entry, 0, 0, false);
+      if (entry->skeleton.ax_opcode == 0) {
+        return size;
+      } else {
+        // AX opcodes don't require the modrm byte.
+        int reg = lir->operands[0];
+        return size - (reg == rAX ? 1 : 0);
+      }
+    }
+    case kMemImm:  // lir operands - 0: base, 1: disp, 2: immediate
+      return ComputeSize(entry, lir->operands[0], lir->operands[1], false);
+    case kArrayImm:  // lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate
+      return ComputeSize(entry, lir->operands[0], lir->operands[3], true);
+    case kThreadImm:  // lir operands - 0: disp, 1: imm
+      return ComputeSize(entry, 0, 0x12345678, false);  // displacement size is always 32bit
+    case kRegRegImm:  // lir operands - 0: reg, 1: reg, 2: imm
+      return ComputeSize(entry, 0, 0, false);
+    case kRegMemImm:  // lir operands - 0: reg, 1: base, 2: disp, 3: imm
+      return ComputeSize(entry, lir->operands[1], lir->operands[2], false);
+    case kRegArrayImm:  // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp, 5: imm
+      return ComputeSize(entry, lir->operands[1], lir->operands[4], true);
+    case kMovRegImm:  // lir operands - 0: reg, 1: immediate
+      return 1 + entry->skeleton.immediate_bytes;
+    case kShiftRegImm:  // lir operands - 0: reg, 1: immediate
+      // Shift by immediate one has a shorter opcode.
+      return ComputeSize(entry, 0, 0, false) - (lir->operands[1] == 1 ? 1 : 0);
+    case kShiftMemImm:  // lir operands - 0: base, 1: disp, 2: immediate
+      // Shift by immediate one has a shorter opcode.
+      return ComputeSize(entry, lir->operands[0], lir->operands[1], false) -
+             (lir->operands[2] == 1 ? 1 : 0);
+    case kShiftArrayImm:  // lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate
+      // Shift by immediate one has a shorter opcode.
+      return ComputeSize(entry, lir->operands[0], lir->operands[3], true) -
+             (lir->operands[4] == 1 ? 1 : 0);
+    case kShiftRegCl:
+      return ComputeSize(entry, 0, 0, false);
+    case kShiftMemCl:  // lir operands - 0: base, 1: disp, 2: cl
+      return ComputeSize(entry, lir->operands[0], lir->operands[1], false);
+    case kShiftArrayCl:  // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
+      return ComputeSize(entry, lir->operands[0], lir->operands[3], true);
+    case kRegCond:  // lir operands - 0: reg, 1: cond
+      return ComputeSize(entry, 0, 0, false);
+    case kMemCond:  // lir operands - 0: base, 1: disp, 2: cond
+      return ComputeSize(entry, lir->operands[0], lir->operands[1], false);
+    case kArrayCond:  // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: cond
+      return ComputeSize(entry, lir->operands[0], lir->operands[3], true);
+    case kJcc:
+      if (lir->opcode == kX86Jcc8) {
+        return 2;  // opcode + rel8
+      } else {
+        DCHECK(lir->opcode == kX86Jcc32);
+        return 6;  // 2 byte opcode + rel32
+      }
+    case kJmp:
+      if (lir->opcode == kX86Jmp8) {
+        return 2;  // opcode + rel8
+      } else if (lir->opcode == kX86Jmp32) {
+        return 5;  // opcode + rel32
+      } else {
+        DCHECK(lir->opcode == kX86JmpR);
+        return 2;  // opcode + modrm
+      }
+    case kCall:
+      switch (lir->opcode) {
+        case kX86CallR: return 2;  // opcode modrm
+        case kX86CallM:  // lir operands - 0: base, 1: disp
+          return ComputeSize(entry, lir->operands[0], lir->operands[1], false);
+        case kX86CallA:  // lir operands - 0: base, 1: index, 2: scale, 3: disp
+          return ComputeSize(entry, lir->operands[0], lir->operands[3], true);
+        case kX86CallT:  // lir operands - 0: disp
+          return ComputeSize(entry, 0, 0x12345678, false);  // displacement size is always 32bit
+        default:
+          break;
+      }
+      break;
+    case kPcRel:
+      if (entry->opcode == kX86PcRelLoadRA) {
+        // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table
+        return ComputeSize(entry, lir->operands[1], 0x12345678, true);
+      } else {
+        DCHECK(entry->opcode == kX86PcRelAdr);
+        return 5; // opcode with reg + 4 byte immediate
+      }
+    case kMacro:
+      DCHECK_EQ(lir->opcode, static_cast<int>(kX86StartOfMethod));
+      return 5 /* call opcode + 4 byte displacement */ + 1 /* pop reg */ +
+          ComputeSize(&X86Mir2Lir::EncodingMap[kX86Sub32RI], 0, 0, false) -
+          (lir->operands[0] == rAX  ? 1 : 0);  // shorter ax encoding
+    default:
+      break;
+  }
+  UNIMPLEMENTED(FATAL) << "Unimplemented size encoding for: " << entry->name;
+  return 0;
+}
+
+static uint8_t ModrmForDisp(int base, int disp) {
+  // BP requires an explicit disp, so do not omit it in the 0 case
+  if (disp == 0 && base != rBP) {
+    return 0;
+  } else if (IS_SIMM8(disp)) {
+    return 1;
+  } else {
+    return 2;
+  }
+}
+
+void X86Mir2Lir::EmitDisp(int base, int disp) {
+  // BP requires an explicit disp, so do not omit it in the 0 case
+  if (disp == 0 && base != rBP) {
+    return;
+  } else if (IS_SIMM8(disp)) {
+    code_buffer_.push_back(disp & 0xFF);
+  } else {
+    code_buffer_.push_back(disp & 0xFF);
+    code_buffer_.push_back((disp >> 8) & 0xFF);
+    code_buffer_.push_back((disp >> 16) & 0xFF);
+    code_buffer_.push_back((disp >> 24) & 0xFF);
+  }
+}
+
+void X86Mir2Lir::EmitOpReg(const X86EncodingMap* entry, uint8_t reg) {
+  if (entry->skeleton.prefix1 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix1);
+    if (entry->skeleton.prefix2 != 0) {
+      code_buffer_.push_back(entry->skeleton.prefix2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.prefix2);
+  }
+  code_buffer_.push_back(entry->skeleton.opcode);
+  if (entry->skeleton.opcode == 0x0F) {
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
+    if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
+    } else {
+      DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+    DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+  }
+  if (X86_FPREG(reg)) {
+    reg = reg & X86_FP_REG_MASK;
+  }
+  if (reg >= 4) {
+    DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
+        << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
+  }
+  DCHECK_LT(reg, 8);
+  uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
+  code_buffer_.push_back(modrm);
+  DCHECK_EQ(0, entry->skeleton.ax_opcode);
+  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+void X86Mir2Lir::EmitOpMem(const X86EncodingMap* entry, uint8_t base, int disp) {
+  if (entry->skeleton.prefix1 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix1);
+    if (entry->skeleton.prefix2 != 0) {
+      code_buffer_.push_back(entry->skeleton.prefix2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.prefix2);
+  }
+  code_buffer_.push_back(entry->skeleton.opcode);
+  DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+  DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+  DCHECK_LT(entry->skeleton.modrm_opcode, 8);
+  DCHECK_LT(base, 8);
+  uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (entry->skeleton.modrm_opcode << 3) | base;
+  code_buffer_.push_back(modrm);
+  EmitDisp(base, disp);
+  DCHECK_EQ(0, entry->skeleton.ax_opcode);
+  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+void X86Mir2Lir::EmitMemReg(const X86EncodingMap* entry,
+                       uint8_t base, int disp, uint8_t reg) {
+  if (entry->skeleton.prefix1 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix1);
+    if (entry->skeleton.prefix2 != 0) {
+      code_buffer_.push_back(entry->skeleton.prefix2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.prefix2);
+  }
+  code_buffer_.push_back(entry->skeleton.opcode);
+  if (entry->skeleton.opcode == 0x0F) {
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
+    if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
+    } else {
+      DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+    DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+  }
+  if (X86_FPREG(reg)) {
+    reg = reg & X86_FP_REG_MASK;
+  }
+  if (reg >= 4) {
+    DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
+        << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
+  }
+  DCHECK_LT(reg, 8);
+  DCHECK_LT(base, 8);
+  uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (reg << 3) | base;
+  code_buffer_.push_back(modrm);
+  if (base == rX86_SP) {
+    // Special SIB for SP base
+    code_buffer_.push_back(0 << 6 | (rX86_SP << 3) | rX86_SP);
+  }
+  EmitDisp(base, disp);
+  DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+  DCHECK_EQ(0, entry->skeleton.ax_opcode);
+  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+void X86Mir2Lir::EmitRegMem(const X86EncodingMap* entry,
+                       uint8_t reg, uint8_t base, int disp) {
+  // Opcode will flip operands.
+  EmitMemReg(entry, base, disp, reg);
+}
+
+void X86Mir2Lir::EmitRegArray(const X86EncodingMap* entry, uint8_t reg, uint8_t base, uint8_t index,
+                  int scale, int disp) {
+  if (entry->skeleton.prefix1 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix1);
+    if (entry->skeleton.prefix2 != 0) {
+      code_buffer_.push_back(entry->skeleton.prefix2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.prefix2);
+  }
+  code_buffer_.push_back(entry->skeleton.opcode);
+  if (entry->skeleton.opcode == 0x0F) {
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
+    if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
+    } else {
+      DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+    DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+  }
+  if (X86_FPREG(reg)) {
+    reg = reg & X86_FP_REG_MASK;
+  }
+  DCHECK_LT(reg, 8);
+  uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (reg << 3) | rX86_SP;
+  code_buffer_.push_back(modrm);
+  DCHECK_LT(scale, 4);
+  DCHECK_LT(index, 8);
+  DCHECK_LT(base, 8);
+  uint8_t sib = (scale << 6) | (index << 3) | base;
+  code_buffer_.push_back(sib);
+  EmitDisp(base, disp);
+  DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+  DCHECK_EQ(0, entry->skeleton.ax_opcode);
+  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+void X86Mir2Lir::EmitArrayReg(const X86EncodingMap* entry, uint8_t base, uint8_t index, int scale, int disp,
+                  uint8_t reg) {
+  // Opcode will flip operands.
+  EmitRegArray(entry, reg, base, index, scale, disp);
+}
+
+void X86Mir2Lir::EmitRegThread(const X86EncodingMap* entry, uint8_t reg, int disp) {
+  DCHECK_NE(entry->skeleton.prefix1, 0);
+  code_buffer_.push_back(entry->skeleton.prefix1);
+  if (entry->skeleton.prefix2 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix2);
+  }
+  code_buffer_.push_back(entry->skeleton.opcode);
+  if (entry->skeleton.opcode == 0x0F) {
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
+    if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
+    } else {
+      DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+    DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+  }
+  if (X86_FPREG(reg)) {
+    reg = reg & X86_FP_REG_MASK;
+  }
+  if (reg >= 4) {
+    DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
+        << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
+  }
+  DCHECK_LT(reg, 8);
+  uint8_t modrm = (0 << 6) | (reg << 3) | rBP;
+  code_buffer_.push_back(modrm);
+  code_buffer_.push_back(disp & 0xFF);
+  code_buffer_.push_back((disp >> 8) & 0xFF);
+  code_buffer_.push_back((disp >> 16) & 0xFF);
+  code_buffer_.push_back((disp >> 24) & 0xFF);
+  DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+  DCHECK_EQ(0, entry->skeleton.ax_opcode);
+  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+void X86Mir2Lir::EmitRegReg(const X86EncodingMap* entry, uint8_t reg1, uint8_t reg2) {
+  if (entry->skeleton.prefix1 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix1);
+    if (entry->skeleton.prefix2 != 0) {
+      code_buffer_.push_back(entry->skeleton.prefix2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.prefix2);
+  }
+  code_buffer_.push_back(entry->skeleton.opcode);
+  if (entry->skeleton.opcode == 0x0F) {
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
+    if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
+    } else {
+      DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+    DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+  }
+  if (X86_FPREG(reg1)) {
+    reg1 = reg1 & X86_FP_REG_MASK;
+  }
+  if (X86_FPREG(reg2)) {
+    reg2 = reg2 & X86_FP_REG_MASK;
+  }
+  DCHECK_LT(reg1, 8);
+  DCHECK_LT(reg2, 8);
+  uint8_t modrm = (3 << 6) | (reg1 << 3) | reg2;
+  code_buffer_.push_back(modrm);
+  DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+  DCHECK_EQ(0, entry->skeleton.ax_opcode);
+  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+void X86Mir2Lir::EmitRegRegImm(const X86EncodingMap* entry,
+                          uint8_t reg1, uint8_t reg2, int32_t imm) {
+  if (entry->skeleton.prefix1 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix1);
+    if (entry->skeleton.prefix2 != 0) {
+      code_buffer_.push_back(entry->skeleton.prefix2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.prefix2);
+  }
+  code_buffer_.push_back(entry->skeleton.opcode);
+  if (entry->skeleton.opcode == 0x0F) {
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
+    if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
+    } else {
+      DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+    DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+  }
+  if (X86_FPREG(reg1)) {
+    reg1 = reg1 & X86_FP_REG_MASK;
+  }
+  if (X86_FPREG(reg2)) {
+    reg2 = reg2 & X86_FP_REG_MASK;
+  }
+  DCHECK_LT(reg1, 8);
+  DCHECK_LT(reg2, 8);
+  uint8_t modrm = (3 << 6) | (reg1 << 3) | reg2;
+  code_buffer_.push_back(modrm);
+  DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+  DCHECK_EQ(0, entry->skeleton.ax_opcode);
+  switch (entry->skeleton.immediate_bytes) {
+    case 1:
+      DCHECK(IS_SIMM8(imm));
+      code_buffer_.push_back(imm & 0xFF);
+      break;
+    case 2:
+      DCHECK(IS_SIMM16(imm));
+      code_buffer_.push_back(imm & 0xFF);
+      code_buffer_.push_back((imm >> 8) & 0xFF);
+      break;
+    case 4:
+      code_buffer_.push_back(imm & 0xFF);
+      code_buffer_.push_back((imm >> 8) & 0xFF);
+      code_buffer_.push_back((imm >> 16) & 0xFF);
+      code_buffer_.push_back((imm >> 24) & 0xFF);
+      break;
+    default:
+      LOG(FATAL) << "Unexpected immediate bytes (" << entry->skeleton.immediate_bytes
+                 << ") for instruction: " << entry->name;
+      break;
+  }
+}
+
+void X86Mir2Lir::EmitRegImm(const X86EncodingMap* entry, uint8_t reg, int imm) {
+  if (entry->skeleton.prefix1 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix1);
+    if (entry->skeleton.prefix2 != 0) {
+      code_buffer_.push_back(entry->skeleton.prefix2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.prefix2);
+  }
+  if (reg == rAX && entry->skeleton.ax_opcode != 0) {
+    code_buffer_.push_back(entry->skeleton.ax_opcode);
+  } else {
+    code_buffer_.push_back(entry->skeleton.opcode);
+    if (entry->skeleton.opcode == 0x0F) {
+      code_buffer_.push_back(entry->skeleton.extra_opcode1);
+      if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+        code_buffer_.push_back(entry->skeleton.extra_opcode2);
+      } else {
+        DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+      }
+    } else {
+      DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+      DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+    }
+    if (X86_FPREG(reg)) {
+      reg = reg & X86_FP_REG_MASK;
+    }
+    uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
+    code_buffer_.push_back(modrm);
+  }
+  switch (entry->skeleton.immediate_bytes) {
+    case 1:
+      DCHECK(IS_SIMM8(imm));
+      code_buffer_.push_back(imm & 0xFF);
+      break;
+    case 2:
+      DCHECK(IS_SIMM16(imm));
+      code_buffer_.push_back(imm & 0xFF);
+      code_buffer_.push_back((imm >> 8) & 0xFF);
+      break;
+    case 4:
+      code_buffer_.push_back(imm & 0xFF);
+      code_buffer_.push_back((imm >> 8) & 0xFF);
+      code_buffer_.push_back((imm >> 16) & 0xFF);
+      code_buffer_.push_back((imm >> 24) & 0xFF);
+      break;
+    default:
+      LOG(FATAL) << "Unexpected immediate bytes (" << entry->skeleton.immediate_bytes
+          << ") for instruction: " << entry->name;
+      break;
+  }
+}
+
+void X86Mir2Lir::EmitThreadImm(const X86EncodingMap* entry, int disp, int imm) {
+  if (entry->skeleton.prefix1 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix1);
+    if (entry->skeleton.prefix2 != 0) {
+      code_buffer_.push_back(entry->skeleton.prefix2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.prefix2);
+  }
+  code_buffer_.push_back(entry->skeleton.opcode);
+  if (entry->skeleton.opcode == 0x0F) {
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
+    if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
+    } else {
+      DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+    DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+  }
+  uint8_t modrm = (0 << 6) | (entry->skeleton.modrm_opcode << 3) | rBP;
+  code_buffer_.push_back(modrm);
+  code_buffer_.push_back(disp & 0xFF);
+  code_buffer_.push_back((disp >> 8) & 0xFF);
+  code_buffer_.push_back((disp >> 16) & 0xFF);
+  code_buffer_.push_back((disp >> 24) & 0xFF);
+  switch (entry->skeleton.immediate_bytes) {
+    case 1:
+      DCHECK(IS_SIMM8(imm));
+      code_buffer_.push_back(imm & 0xFF);
+      break;
+    case 2:
+      DCHECK(IS_SIMM16(imm));
+      code_buffer_.push_back(imm & 0xFF);
+      code_buffer_.push_back((imm >> 8) & 0xFF);
+      break;
+    case 4:
+      code_buffer_.push_back(imm & 0xFF);
+      code_buffer_.push_back((imm >> 8) & 0xFF);
+      code_buffer_.push_back((imm >> 16) & 0xFF);
+      code_buffer_.push_back((imm >> 24) & 0xFF);
+      break;
+    default:
+      LOG(FATAL) << "Unexpected immediate bytes (" << entry->skeleton.immediate_bytes
+          << ") for instruction: " << entry->name;
+      break;
+  }
+  DCHECK_EQ(entry->skeleton.ax_opcode, 0);
+}
+
+void X86Mir2Lir::EmitMovRegImm(const X86EncodingMap* entry, uint8_t reg, int imm) {
+  DCHECK_LT(reg, 8);
+  code_buffer_.push_back(0xB8 + reg);
+  code_buffer_.push_back(imm & 0xFF);
+  code_buffer_.push_back((imm >> 8) & 0xFF);
+  code_buffer_.push_back((imm >> 16) & 0xFF);
+  code_buffer_.push_back((imm >> 24) & 0xFF);
+}
+
+void X86Mir2Lir::EmitShiftRegImm(const X86EncodingMap* entry, uint8_t reg, int imm) {
+  if (entry->skeleton.prefix1 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix1);
+    if (entry->skeleton.prefix2 != 0) {
+      code_buffer_.push_back(entry->skeleton.prefix2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.prefix2);
+  }
+  if (imm != 1) {
+    code_buffer_.push_back(entry->skeleton.opcode);
+  } else {
+    // Shorter encoding for 1 bit shift
+    code_buffer_.push_back(entry->skeleton.ax_opcode);
+  }
+  if (entry->skeleton.opcode == 0x0F) {
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
+    if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
+    } else {
+      DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+    DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+  }
+  if (reg >= 4) {
+    DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg)
+        << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
+  }
+  DCHECK_LT(reg, 8);
+  uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
+  code_buffer_.push_back(modrm);
+  if (imm != 1) {
+    DCHECK_EQ(entry->skeleton.immediate_bytes, 1);
+    DCHECK(IS_SIMM8(imm));
+    code_buffer_.push_back(imm & 0xFF);
+  }
+}
+
+void X86Mir2Lir::EmitShiftRegCl(const X86EncodingMap* entry, uint8_t reg, uint8_t cl) {
+  DCHECK_EQ(cl, static_cast<uint8_t>(rCX));
+  if (entry->skeleton.prefix1 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix1);
+    if (entry->skeleton.prefix2 != 0) {
+      code_buffer_.push_back(entry->skeleton.prefix2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.prefix2);
+  }
+  code_buffer_.push_back(entry->skeleton.opcode);
+  DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+  DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+  DCHECK_LT(reg, 8);
+  uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
+  code_buffer_.push_back(modrm);
+  DCHECK_EQ(0, entry->skeleton.ax_opcode);
+  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+void X86Mir2Lir::EmitRegCond(const X86EncodingMap* entry, uint8_t reg, uint8_t condition) {
+  if (entry->skeleton.prefix1 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix1);
+    if (entry->skeleton.prefix2 != 0) {
+      code_buffer_.push_back(entry->skeleton.prefix2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.prefix2);
+  }
+  DCHECK_EQ(0, entry->skeleton.ax_opcode);
+  DCHECK_EQ(0x0F, entry->skeleton.opcode);
+  code_buffer_.push_back(0x0F);
+  DCHECK_EQ(0x90, entry->skeleton.extra_opcode1);
+  code_buffer_.push_back(0x90 | condition);
+  DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+  DCHECK_LT(reg, 8);
+  uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
+  code_buffer_.push_back(modrm);
+  DCHECK_EQ(entry->skeleton.immediate_bytes, 0);
+}
+
+void X86Mir2Lir::EmitJmp(const X86EncodingMap* entry, int rel) {
+  if (entry->opcode == kX86Jmp8) {
+    DCHECK(IS_SIMM8(rel));
+    code_buffer_.push_back(0xEB);
+    code_buffer_.push_back(rel & 0xFF);
+  } else if (entry->opcode == kX86Jmp32) {
+    code_buffer_.push_back(0xE9);
+    code_buffer_.push_back(rel & 0xFF);
+    code_buffer_.push_back((rel >> 8) & 0xFF);
+    code_buffer_.push_back((rel >> 16) & 0xFF);
+    code_buffer_.push_back((rel >> 24) & 0xFF);
+  } else {
+    DCHECK(entry->opcode == kX86JmpR);
+    code_buffer_.push_back(entry->skeleton.opcode);
+    uint8_t reg = static_cast<uint8_t>(rel);
+    DCHECK_LT(reg, 8);
+    uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg;
+    code_buffer_.push_back(modrm);
+  }
+}
+
+void X86Mir2Lir::EmitJcc(const X86EncodingMap* entry, int rel, uint8_t cc) {
+  DCHECK_LT(cc, 16);
+  if (entry->opcode == kX86Jcc8) {
+    DCHECK(IS_SIMM8(rel));
+    code_buffer_.push_back(0x70 | cc);
+    code_buffer_.push_back(rel & 0xFF);
+  } else {
+    DCHECK(entry->opcode == kX86Jcc32);
+    code_buffer_.push_back(0x0F);
+    code_buffer_.push_back(0x80 | cc);
+    code_buffer_.push_back(rel & 0xFF);
+    code_buffer_.push_back((rel >> 8) & 0xFF);
+    code_buffer_.push_back((rel >> 16) & 0xFF);
+    code_buffer_.push_back((rel >> 24) & 0xFF);
+  }
+}
+
+void X86Mir2Lir::EmitCallMem(const X86EncodingMap* entry, uint8_t base, int disp) {
+  if (entry->skeleton.prefix1 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix1);
+    if (entry->skeleton.prefix2 != 0) {
+      code_buffer_.push_back(entry->skeleton.prefix2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.prefix2);
+  }
+  code_buffer_.push_back(entry->skeleton.opcode);
+  if (entry->skeleton.opcode == 0x0F) {
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
+    if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
+    } else {
+      DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+    DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+  }
+  uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (entry->skeleton.modrm_opcode << 3) | base;
+  code_buffer_.push_back(modrm);
+  if (base == rX86_SP) {
+    // Special SIB for SP base
+    code_buffer_.push_back(0 << 6 | (rX86_SP << 3) | rX86_SP);
+  }
+  EmitDisp(base, disp);
+  DCHECK_EQ(0, entry->skeleton.ax_opcode);
+  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+void X86Mir2Lir::EmitCallThread(const X86EncodingMap* entry, int disp) {
+  DCHECK_NE(entry->skeleton.prefix1, 0);
+  code_buffer_.push_back(entry->skeleton.prefix1);
+  if (entry->skeleton.prefix2 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix2);
+  }
+  code_buffer_.push_back(entry->skeleton.opcode);
+  if (entry->skeleton.opcode == 0x0F) {
+    code_buffer_.push_back(entry->skeleton.extra_opcode1);
+    if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode2 == 0x3A) {
+      code_buffer_.push_back(entry->skeleton.extra_opcode2);
+    } else {
+      DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+    DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+  }
+  uint8_t modrm = (0 << 6) | (entry->skeleton.modrm_opcode << 3) | rBP;
+  code_buffer_.push_back(modrm);
+  code_buffer_.push_back(disp & 0xFF);
+  code_buffer_.push_back((disp >> 8) & 0xFF);
+  code_buffer_.push_back((disp >> 16) & 0xFF);
+  code_buffer_.push_back((disp >> 24) & 0xFF);
+  DCHECK_EQ(0, entry->skeleton.ax_opcode);
+  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
+void X86Mir2Lir::EmitPcRel(const X86EncodingMap* entry, uint8_t reg,
+                      int base_or_table, uint8_t index, int scale, int table_or_disp) {
+  int disp;
+  if (entry->opcode == kX86PcRelLoadRA) {
+    Mir2Lir::SwitchTable *tab_rec = reinterpret_cast<Mir2Lir::SwitchTable*>(table_or_disp);
+    disp = tab_rec->offset;
+  } else {
+    DCHECK(entry->opcode == kX86PcRelAdr);
+    Mir2Lir::FillArrayData *tab_rec = reinterpret_cast<Mir2Lir::FillArrayData*>(base_or_table);
+    disp = tab_rec->offset;
+  }
+  if (entry->skeleton.prefix1 != 0) {
+    code_buffer_.push_back(entry->skeleton.prefix1);
+    if (entry->skeleton.prefix2 != 0) {
+      code_buffer_.push_back(entry->skeleton.prefix2);
+    }
+  } else {
+    DCHECK_EQ(0, entry->skeleton.prefix2);
+  }
+  if (X86_FPREG(reg)) {
+    reg = reg & X86_FP_REG_MASK;
+  }
+  DCHECK_LT(reg, 8);
+  if (entry->opcode == kX86PcRelLoadRA) {
+    code_buffer_.push_back(entry->skeleton.opcode);
+    DCHECK_EQ(0, entry->skeleton.extra_opcode1);
+    DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+    uint8_t modrm = (2 << 6) | (reg << 3) | rX86_SP;
+    code_buffer_.push_back(modrm);
+    DCHECK_LT(scale, 4);
+    DCHECK_LT(index, 8);
+    DCHECK_LT(base_or_table, 8);
+    uint8_t base = static_cast<uint8_t>(base_or_table);
+    uint8_t sib = (scale << 6) | (index << 3) | base;
+    code_buffer_.push_back(sib);
+    DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+  } else {
+    code_buffer_.push_back(entry->skeleton.opcode + reg);
+  }
+  code_buffer_.push_back(disp & 0xFF);
+  code_buffer_.push_back((disp >> 8) & 0xFF);
+  code_buffer_.push_back((disp >> 16) & 0xFF);
+  code_buffer_.push_back((disp >> 24) & 0xFF);
+  DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+  DCHECK_EQ(0, entry->skeleton.ax_opcode);
+}
+
+void X86Mir2Lir::EmitMacro(const X86EncodingMap* entry, uint8_t reg, int offset) {
+  DCHECK(entry->opcode == kX86StartOfMethod) << entry->name;
+  code_buffer_.push_back(0xE8);  // call +0
+  code_buffer_.push_back(0);
+  code_buffer_.push_back(0);
+  code_buffer_.push_back(0);
+  code_buffer_.push_back(0);
+
+  DCHECK_LT(reg, 8);
+  code_buffer_.push_back(0x58 + reg);  // pop reg
+
+  EmitRegImm(&X86Mir2Lir::EncodingMap[kX86Sub32RI], reg, offset + 5 /* size of call +0 */);
+}
+
+void X86Mir2Lir::EmitUnimplemented(const X86EncodingMap* entry, LIR* lir) {
+  UNIMPLEMENTED(WARNING) << "encoding kind for " << entry->name << " "
+                         << BuildInsnString(entry->fmt, lir, 0);
+  for (int i = 0; i < GetInsnSize(lir); ++i) {
+    code_buffer_.push_back(0xCC);  // push breakpoint instruction - int 3
+  }
+}
+
+/*
+ * Assemble the LIR into binary instruction format.  Note that we may
+ * discover that pc-relative displacements may not fit the selected
+ * instruction.  In those cases we will try to substitute a new code
+ * sequence or request that the trace be shortened and retried.
+ */
+AssemblerStatus X86Mir2Lir::AssembleInstructions(uintptr_t start_addr) {
+  LIR *lir;
+  AssemblerStatus res = kSuccess;  // Assume success
+
+  const bool kVerbosePcFixup = false;
+  for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
+    if (lir->opcode < 0) {
+      continue;
+    }
+
+    if (lir->flags.is_nop) {
+      continue;
+    }
+
+    if (lir->flags.pcRelFixup) {
+      switch (lir->opcode) {
+        case kX86Jcc8: {
+          LIR *target_lir = lir->target;
+          DCHECK(target_lir != NULL);
+          int delta = 0;
+          uintptr_t pc;
+          if (IS_SIMM8(lir->operands[0])) {
+            pc = lir->offset + 2 /* opcode + rel8 */;
+          } else {
+            pc = lir->offset + 6 /* 2 byte opcode + rel32 */;
+          }
+          uintptr_t target = target_lir->offset;
+          delta = target - pc;
+          if (IS_SIMM8(delta) != IS_SIMM8(lir->operands[0])) {
+            if (kVerbosePcFixup) {
+              LOG(INFO) << "Retry for JCC growth at " << lir->offset
+                  << " delta: " << delta << " old delta: " << lir->operands[0];
+            }
+            lir->opcode = kX86Jcc32;
+            SetupResourceMasks(lir);
+            res = kRetryAll;
+          }
+          if (kVerbosePcFixup) {
+            LOG(INFO) << "Source:";
+            DumpLIRInsn(lir, 0);
+            LOG(INFO) << "Target:";
+            DumpLIRInsn(target_lir, 0);
+            LOG(INFO) << "Delta " << delta;
+          }
+          lir->operands[0] = delta;
+          break;
+        }
+        case kX86Jcc32: {
+          LIR *target_lir = lir->target;
+          DCHECK(target_lir != NULL);
+          uintptr_t pc = lir->offset + 6 /* 2 byte opcode + rel32 */;
+          uintptr_t target = target_lir->offset;
+          int delta = target - pc;
+          if (kVerbosePcFixup) {
+            LOG(INFO) << "Source:";
+            DumpLIRInsn(lir, 0);
+            LOG(INFO) << "Target:";
+            DumpLIRInsn(target_lir, 0);
+            LOG(INFO) << "Delta " << delta;
+          }
+          lir->operands[0] = delta;
+          break;
+        }
+        case kX86Jmp8: {
+          LIR *target_lir = lir->target;
+          DCHECK(target_lir != NULL);
+          int delta = 0;
+          uintptr_t pc;
+          if (IS_SIMM8(lir->operands[0])) {
+            pc = lir->offset + 2 /* opcode + rel8 */;
+          } else {
+            pc = lir->offset + 5 /* opcode + rel32 */;
+          }
+          uintptr_t target = target_lir->offset;
+          delta = target - pc;
+          if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && delta == 0) {
+            // Useless branch
+            lir->flags.is_nop = true;
+            if (kVerbosePcFixup) {
+              LOG(INFO) << "Retry for useless branch at " << lir->offset;
+            }
+            res = kRetryAll;
+          } else if (IS_SIMM8(delta) != IS_SIMM8(lir->operands[0])) {
+            if (kVerbosePcFixup) {
+              LOG(INFO) << "Retry for JMP growth at " << lir->offset;
+            }
+            lir->opcode = kX86Jmp32;
+            SetupResourceMasks(lir);
+            res = kRetryAll;
+          }
+          lir->operands[0] = delta;
+          break;
+        }
+        case kX86Jmp32: {
+          LIR *target_lir = lir->target;
+          DCHECK(target_lir != NULL);
+          uintptr_t pc = lir->offset + 5 /* opcode + rel32 */;
+          uintptr_t target = target_lir->offset;
+          int delta = target - pc;
+          lir->operands[0] = delta;
+          break;
+        }
+        default:
+          break;
+      }
+    }
+
+    /*
+     * If one of the pc-relative instructions expanded we'll have
+     * to make another pass.  Don't bother to fully assemble the
+     * instruction.
+     */
+    if (res != kSuccess) {
+      continue;
+    }
+    CHECK_EQ(static_cast<size_t>(lir->offset), code_buffer_.size());
+    const X86EncodingMap *entry = &X86Mir2Lir::EncodingMap[lir->opcode];
+    size_t starting_cbuf_size = code_buffer_.size();
+    switch (entry->kind) {
+      case kData:  // 4 bytes of data
+        code_buffer_.push_back(lir->operands[0]);
+        break;
+      case kNullary:  // 1 byte of opcode
+        DCHECK_EQ(0, entry->skeleton.prefix1);
+        DCHECK_EQ(0, entry->skeleton.prefix2);
+        code_buffer_.push_back(entry->skeleton.opcode);
+        if (entry->skeleton.extra_opcode1 != 0) {
+          code_buffer_.push_back(entry->skeleton.extra_opcode1);
+          if (entry->skeleton.extra_opcode2 != 0) {
+            code_buffer_.push_back(entry->skeleton.extra_opcode2);
+          }
+        } else {
+          DCHECK_EQ(0, entry->skeleton.extra_opcode2);
+        }
+        DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+        DCHECK_EQ(0, entry->skeleton.ax_opcode);
+        DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+        break;
+      case kReg:  // lir operands - 0: reg
+        EmitOpReg(entry, lir->operands[0]);
+        break;
+      case kMem:  // lir operands - 0: base, 1: disp
+        EmitOpMem(entry, lir->operands[0], lir->operands[1]);
+        break;
+      case kMemReg:  // lir operands - 0: base, 1: disp, 2: reg
+        EmitMemReg(entry, lir->operands[0], lir->operands[1], lir->operands[2]);
+        break;
+      case kArrayReg:  // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
+        EmitArrayReg(entry, lir->operands[0], lir->operands[1], lir->operands[2],
+                     lir->operands[3], lir->operands[4]);
+        break;
+      case kRegMem:  // lir operands - 0: reg, 1: base, 2: disp
+        EmitRegMem(entry, lir->operands[0], lir->operands[1], lir->operands[2]);
+        break;
+      case kRegArray:  // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp
+        EmitRegArray(entry, lir->operands[0], lir->operands[1], lir->operands[2],
+                     lir->operands[3], lir->operands[4]);
+        break;
+      case kRegThread:  // lir operands - 0: reg, 1: disp
+        EmitRegThread(entry, lir->operands[0], lir->operands[1]);
+        break;
+      case kRegReg:  // lir operands - 0: reg1, 1: reg2
+        EmitRegReg(entry, lir->operands[0], lir->operands[1]);
+        break;
+      case kRegRegStore:  // lir operands - 0: reg2, 1: reg1
+        EmitRegReg(entry, lir->operands[1], lir->operands[0]);
+        break;
+      case kRegRegImm:
+        EmitRegRegImm(entry, lir->operands[0], lir->operands[1], lir->operands[2]);
+        break;
+      case kRegImm:  // lir operands - 0: reg, 1: immediate
+        EmitRegImm(entry, lir->operands[0], lir->operands[1]);
+        break;
+      case kThreadImm:  // lir operands - 0: disp, 1: immediate
+        EmitThreadImm(entry, lir->operands[0], lir->operands[1]);
+        break;
+      case kMovRegImm:  // lir operands - 0: reg, 1: immediate
+        EmitMovRegImm(entry, lir->operands[0], lir->operands[1]);
+        break;
+      case kShiftRegImm:  // lir operands - 0: reg, 1: immediate
+        EmitShiftRegImm(entry, lir->operands[0], lir->operands[1]);
+        break;
+      case kShiftRegCl: // lir operands - 0: reg, 1: cl
+        EmitShiftRegCl(entry, lir->operands[0], lir->operands[1]);
+        break;
+      case kRegCond:  // lir operands - 0: reg, 1: condition
+        EmitRegCond(entry, lir->operands[0], lir->operands[1]);
+        break;
+      case kJmp:  // lir operands - 0: rel
+        EmitJmp(entry, lir->operands[0]);
+        break;
+      case kJcc:  // lir operands - 0: rel, 1: CC, target assigned
+        EmitJcc(entry, lir->operands[0], lir->operands[1]);
+        break;
+      case kCall:
+        switch (entry->opcode) {
+          case kX86CallM:  // lir operands - 0: base, 1: disp
+            EmitCallMem(entry, lir->operands[0], lir->operands[1]);
+            break;
+          case kX86CallT:  // lir operands - 0: disp
+            EmitCallThread(entry, lir->operands[0]);
+            break;
+          default:
+            EmitUnimplemented(entry, lir);
+            break;
+        }
+        break;
+      case kPcRel:  // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table
+        EmitPcRel(entry, lir->operands[0], lir->operands[1], lir->operands[2],
+                  lir->operands[3], lir->operands[4]);
+        break;
+      case kMacro:
+        EmitMacro(entry, lir->operands[0], lir->offset);
+        break;
+      default:
+        EmitUnimplemented(entry, lir);
+        break;
+    }
+    CHECK_EQ(static_cast<size_t>(GetInsnSize(lir)),
+             code_buffer_.size() - starting_cbuf_size)
+        << "Instruction size mismatch for entry: " << X86Mir2Lir::EncodingMap[lir->opcode].name;
+  }
+  return res;
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
new file mode 100644
index 0000000..d60be72
--- /dev/null
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -0,0 +1,283 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file contains codegen for the X86 ISA */
+
+#include "codegen_x86.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "x86_lir.h"
+
+namespace art {
+
+void X86Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir,
+                                SpecialCaseHandler special_case)
+{
+  // TODO
+}
+
+/*
+ * The sparse table in the literal pool is an array of <key,displacement>
+ * pairs.
+ */
+void X86Mir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset,
+                                 RegLocation rl_src)
+{
+  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  if (cu_->verbose) {
+    DumpSparseSwitchTable(table);
+  }
+  int entries = table[1];
+  const int* keys = reinterpret_cast<const int*>(&table[2]);
+  const int* targets = &keys[entries];
+  rl_src = LoadValue(rl_src, kCoreReg);
+  for (int i = 0; i < entries; i++) {
+    int key = keys[i];
+    BasicBlock* case_block =
+        mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]);
+    OpCmpImmBranch(kCondEq, rl_src.low_reg, key,
+                   &block_label_list_[case_block->id]);
+  }
+}
+
+/*
+ * Code pattern will look something like:
+ *
+ * mov  r_val, ..
+ * call 0
+ * pop  r_start_of_method
+ * sub  r_start_of_method, ..
+ * mov  r_key_reg, r_val
+ * sub  r_key_reg, low_key
+ * cmp  r_key_reg, size-1  ; bound check
+ * ja   done
+ * mov  r_disp, [r_start_of_method + r_key_reg * 4 + table_offset]
+ * add  r_start_of_method, r_disp
+ * jmp  r_start_of_method
+ * done:
+ */
+void X86Mir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset,
+                                 RegLocation rl_src)
+{
+  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  if (cu_->verbose) {
+    DumpPackedSwitchTable(table);
+  }
+  // Add the table to the list - we'll process it later
+  SwitchTable *tab_rec =
+      static_cast<SwitchTable *>(arena_->NewMem(sizeof(SwitchTable), true,
+                                                ArenaAllocator::kAllocData));
+  tab_rec->table = table;
+  tab_rec->vaddr = current_dalvik_offset_;
+  int size = table[1];
+  tab_rec->targets = static_cast<LIR**>(arena_->NewMem(size * sizeof(LIR*), true,
+                                                       ArenaAllocator::kAllocLIR));
+  switch_tables_.Insert(tab_rec);
+
+  // Get the switch value
+  rl_src = LoadValue(rl_src, kCoreReg);
+  int start_of_method_reg = AllocTemp();
+  // Materialize a pointer to the switch table
+  //NewLIR0(kX86Bkpt);
+  NewLIR1(kX86StartOfMethod, start_of_method_reg);
+  int low_key = s4FromSwitchData(&table[2]);
+  int keyReg;
+  // Remove the bias, if necessary
+  if (low_key == 0) {
+    keyReg = rl_src.low_reg;
+  } else {
+    keyReg = AllocTemp();
+    OpRegRegImm(kOpSub, keyReg, rl_src.low_reg, low_key);
+  }
+  // Bounds check - if < 0 or >= size continue following switch
+  OpRegImm(kOpCmp, keyReg, size-1);
+  LIR* branch_over = OpCondBranch(kCondHi, NULL);
+
+  // Load the displacement from the switch table
+  int disp_reg = AllocTemp();
+  NewLIR5(kX86PcRelLoadRA, disp_reg, start_of_method_reg, keyReg, 2,
+          reinterpret_cast<uintptr_t>(tab_rec));
+  // Add displacement to start of method
+  OpRegReg(kOpAdd, start_of_method_reg, disp_reg);
+  // ..and go!
+  LIR* switch_branch = NewLIR1(kX86JmpR, start_of_method_reg);
+  tab_rec->anchor = switch_branch;
+
+  /* branch_over target here */
+  LIR* target = NewLIR0(kPseudoTargetLabel);
+  branch_over->target = target;
+}
+
+/*
+ * Array data table format:
+ *  ushort ident = 0x0300   magic value
+ *  ushort width            width of each element in the table
+ *  uint   size             number of elements in the table
+ *  ubyte  data[size*width] table of data values (may contain a single-byte
+ *                          padding at the end)
+ *
+ * Total size is 4+(width * size + 1)/2 16-bit code units.
+ */
+void X86Mir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src)
+{
+  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  // Add the table to the list - we'll process it later
+  FillArrayData *tab_rec =
+      static_cast<FillArrayData*>(arena_->NewMem(sizeof(FillArrayData), true,
+                                                 ArenaAllocator::kAllocData));
+  tab_rec->table = table;
+  tab_rec->vaddr = current_dalvik_offset_;
+  uint16_t width = tab_rec->table[1];
+  uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
+  tab_rec->size = (size * width) + 8;
+
+  fill_array_data_.Insert(tab_rec);
+
+  // Making a call - use explicit registers
+  FlushAllRegs();   /* Everything to home location */
+  LoadValueDirectFixed(rl_src, rX86_ARG0);
+  // Materialize a pointer to the fill data image
+  NewLIR1(kX86StartOfMethod, rX86_ARG2);
+  NewLIR2(kX86PcRelAdr, rX86_ARG1, reinterpret_cast<uintptr_t>(tab_rec));
+  NewLIR2(kX86Add32RR, rX86_ARG1, rX86_ARG2);
+  CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rX86_ARG0,
+                          rX86_ARG1, true);
+}
+
+void X86Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src)
+{
+  FlushAllRegs();
+  LoadValueDirectFixed(rl_src, rCX);  // Get obj
+  LockCallTemps();  // Prepare for explicit register usage
+  GenNullCheck(rl_src.s_reg_low, rCX, opt_flags);
+  // If lock is unheld, try to grab it quickly with compare and exchange
+  // TODO: copy and clear hash state?
+  NewLIR2(kX86Mov32RT, rDX, Thread::ThinLockIdOffset().Int32Value());
+  NewLIR2(kX86Sal32RI, rDX, LW_LOCK_OWNER_SHIFT);
+  NewLIR2(kX86Xor32RR, rAX, rAX);
+  NewLIR3(kX86LockCmpxchgMR, rCX, mirror::Object::MonitorOffset().Int32Value(), rDX);
+  LIR* branch = NewLIR2(kX86Jcc8, 0, kX86CondEq);
+  // If lock is held, go the expensive route - artLockObjectFromCode(self, obj);
+  CallRuntimeHelperReg(ENTRYPOINT_OFFSET(pLockObjectFromCode), rCX, true);
+  branch->target = NewLIR0(kPseudoTargetLabel);
+}
+
+void X86Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src)
+{
+  FlushAllRegs();
+  LoadValueDirectFixed(rl_src, rAX);  // Get obj
+  LockCallTemps();  // Prepare for explicit register usage
+  GenNullCheck(rl_src.s_reg_low, rAX, opt_flags);
+  // If lock is held by the current thread, clear it to quickly release it
+  // TODO: clear hash state?
+  NewLIR2(kX86Mov32RT, rDX, Thread::ThinLockIdOffset().Int32Value());
+  NewLIR2(kX86Sal32RI, rDX, LW_LOCK_OWNER_SHIFT);
+  NewLIR3(kX86Mov32RM, rCX, rAX, mirror::Object::MonitorOffset().Int32Value());
+  OpRegReg(kOpSub, rCX, rDX);
+  LIR* branch = NewLIR2(kX86Jcc8, 0, kX86CondNe);
+  NewLIR3(kX86Mov32MR, rAX, mirror::Object::MonitorOffset().Int32Value(), rCX);
+  LIR* branch2 = NewLIR1(kX86Jmp8, 0);
+  branch->target = NewLIR0(kPseudoTargetLabel);
+  // Otherwise, go the expensive route - UnlockObjectFromCode(obj);
+  CallRuntimeHelperReg(ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rAX, true);
+  branch2->target = NewLIR0(kPseudoTargetLabel);
+}
+
+void X86Mir2Lir::GenMoveException(RegLocation rl_dest)
+{
+  int ex_offset = Thread::ExceptionOffset().Int32Value();
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  NewLIR2(kX86Mov32RT, rl_result.low_reg, ex_offset);
+  NewLIR2(kX86Mov32TI, ex_offset, 0);
+  StoreValue(rl_dest, rl_result);
+}
+
+/*
+ * Mark garbage collection card. Skip if the value we're storing is null.
+ */
+void X86Mir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg)
+{
+  int reg_card_base = AllocTemp();
+  int reg_card_no = AllocTemp();
+  LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
+  NewLIR2(kX86Mov32RT, reg_card_base, Thread::CardTableOffset().Int32Value());
+  OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
+  StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0,
+                   kUnsignedByte);
+  LIR* target = NewLIR0(kPseudoTargetLabel);
+  branch_over->target = target;
+  FreeTemp(reg_card_base);
+  FreeTemp(reg_card_no);
+}
+
+void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method)
+{
+  /*
+   * On entry, rX86_ARG0, rX86_ARG1, rX86_ARG2 are live.  Let the register
+   * allocation mechanism know so it doesn't try to use any of them when
+   * expanding the frame or flushing.  This leaves the utility
+   * code with no spare temps.
+   */
+  LockTemp(rX86_ARG0);
+  LockTemp(rX86_ARG1);
+  LockTemp(rX86_ARG2);
+
+  /* Build frame, return address already on stack */
+  OpRegImm(kOpSub, rX86_SP, frame_size_ - 4);
+
+  /*
+   * We can safely skip the stack overflow check if we're
+   * a leaf *and* our frame size < fudge factor.
+   */
+  bool skip_overflow_check = (mir_graph_->MethodIsLeaf() &&
+                (static_cast<size_t>(frame_size_) <
+                Thread::kStackOverflowReservedBytes));
+  NewLIR0(kPseudoMethodEntry);
+  /* Spill core callee saves */
+  SpillCoreRegs();
+  /* NOTE: promotion of FP regs currently unsupported, thus no FP spill */
+  DCHECK_EQ(num_fp_spills_, 0);
+  if (!skip_overflow_check) {
+    // cmp rX86_SP, fs:[stack_end_]; jcc throw_launchpad
+    LIR* tgt = RawLIR(0, kPseudoThrowTarget, kThrowStackOverflow, 0, 0, 0, 0);
+    OpRegThreadMem(kOpCmp, rX86_SP, Thread::StackEndOffset().Int32Value());
+    OpCondBranch(kCondUlt, tgt);
+    // Remember branch target - will process later
+    throw_launchpads_.Insert(tgt);
+  }
+
+  FlushIns(ArgLocs, rl_method);
+
+  FreeTemp(rX86_ARG0);
+  FreeTemp(rX86_ARG1);
+  FreeTemp(rX86_ARG2);
+}
+
+void X86Mir2Lir::GenExitSequence() {
+  /*
+   * In the exit path, rX86_RET0/rX86_RET1 are live - make sure they aren't
+   * allocated by the register utilities as temps.
+   */
+  LockTemp(rX86_RET0);
+  LockTemp(rX86_RET1);
+
+  NewLIR0(kPseudoMethodExit);
+  UnSpillCoreRegs();
+  /* Remove frame except for return address */
+  OpRegImm(kOpAdd, rX86_SP, frame_size_ - 4);
+  NewLIR0(kX86Ret);
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
new file mode 100644
index 0000000..3e30141
--- /dev/null
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_QUICK_X86_CODEGENX86_H_
+#define ART_SRC_COMPILER_DEX_QUICK_X86_CODEGENX86_H_
+
+#include "dex/compiler_internals.h"
+#include "x86_lir.h"
+
+namespace art {
+
+class X86Mir2Lir : public Mir2Lir {
+  public:
+
+    X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
+
+    // Required for target - codegen helpers.
+    bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src,
+                                    RegLocation rl_dest, int lit);
+    int LoadHelper(int offset);
+    LIR* LoadBaseDisp(int rBase, int displacement, int r_dest, OpSize size, int s_reg);
+    LIR* LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, int r_dest_hi,
+                                  int s_reg);
+    LIR* LoadBaseIndexed(int rBase, int r_index, int r_dest, int scale, OpSize size);
+    LIR* LoadBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+                                     int r_dest, int r_dest_hi, OpSize size, int s_reg);
+    LIR* LoadConstantNoClobber(int r_dest, int value);
+    LIR* LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value);
+    LIR* StoreBaseDisp(int rBase, int displacement, int r_src, OpSize size);
+    LIR* StoreBaseDispWide(int rBase, int displacement, int r_src_lo, int r_src_hi);
+    LIR* StoreBaseIndexed(int rBase, int r_index, int r_src, int scale, OpSize size);
+    LIR* StoreBaseIndexedDisp(int rBase, int r_index, int scale, int displacement,
+                                      int r_src, int r_src_hi, OpSize size, int s_reg);
+    void MarkGCCard(int val_reg, int tgt_addr_reg);
+
+    // Required for target - register utilities.
+    bool IsFpReg(int reg);
+    bool SameRegType(int reg1, int reg2);
+    int AllocTypedTemp(bool fp_hint, int reg_class);
+    int AllocTypedTempPair(bool fp_hint, int reg_class);
+    int S2d(int low_reg, int high_reg);
+    int TargetReg(SpecialTargetRegister reg);
+    RegisterInfo* GetRegInfo(int reg);
+    RegLocation GetReturnAlt();
+    RegLocation GetReturnWideAlt();
+    RegLocation LocCReturn();
+    RegLocation LocCReturnDouble();
+    RegLocation LocCReturnFloat();
+    RegLocation LocCReturnWide();
+    uint32_t FpRegMask();
+    uint64_t GetRegMaskCommon(int reg);
+    void AdjustSpillMask();
+    void ClobberCalleeSave();
+    void FlushReg(int reg);
+    void FlushRegWide(int reg1, int reg2);
+    void FreeCallTemps();
+    void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free);
+    void LockCallTemps();
+    void MarkPreservedSingle(int v_reg, int reg);
+    void CompilerInitializeRegAlloc();
+
+    // Required for target - miscellaneous.
+    AssemblerStatus AssembleInstructions(uintptr_t start_addr);
+    void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
+    void SetupTargetResourceMasks(LIR* lir);
+    const char* GetTargetInstFmt(int opcode);
+    const char* GetTargetInstName(int opcode);
+    std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
+    uint64_t GetPCUseDefEncoding();
+    uint64_t GetTargetInstFlags(int opcode);
+    int GetInsnSize(LIR* lir);
+    bool IsUnconditionalBranch(LIR* lir);
+
+    // Required for target - Dalvik-level generators.
+    void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                                   RegLocation rl_src1, RegLocation rl_src2);
+    void GenArrayObjPut(int opt_flags, RegLocation rl_array,
+                                RegLocation rl_index, RegLocation rl_src, int scale);
+    void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
+                             RegLocation rl_index, RegLocation rl_dest, int scale);
+    void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
+                             RegLocation rl_index, RegLocation rl_src, int scale);
+    void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                                   RegLocation rl_src1, RegLocation rl_shift);
+    void GenMulLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    void GenAddLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    void GenAndLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    void GenArithOpDouble(Instruction::Code opcode, RegLocation rl_dest,
+                                  RegLocation rl_src1, RegLocation rl_src2);
+    void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
+                                 RegLocation rl_src1, RegLocation rl_src2);
+    void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
+                          RegLocation rl_src2);
+    void GenConversion(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src);
+    bool GenInlinedCas32(CallInfo* info, bool need_write_barrier);
+    bool GenInlinedMinMaxInt(CallInfo* info, bool is_min);
+    bool GenInlinedSqrt(CallInfo* info);
+    void GenNegLong(RegLocation rl_dest, RegLocation rl_src);
+    void GenOrLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    void GenSubLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    void GenXorLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    LIR* GenRegMemCheck(ConditionCode c_code, int reg1, int base, int offset,
+                                ThrowKind kind);
+    RegLocation GenDivRem(RegLocation rl_dest, int reg_lo, int reg_hi, bool is_div);
+    RegLocation GenDivRemLit(RegLocation rl_dest, int reg_lo, int lit, bool is_div);
+    void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2);
+    void GenDivZeroCheck(int reg_lo, int reg_hi);
+    void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
+    void GenExitSequence();
+    void GenFillArrayData(uint32_t table_offset, RegLocation rl_src);
+    void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
+    void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
+    void GenSelect(BasicBlock* bb, MIR* mir);
+    void GenMemBarrier(MemBarrierKind barrier_kind);
+    void GenMonitorEnter(int opt_flags, RegLocation rl_src);
+    void GenMonitorExit(int opt_flags, RegLocation rl_src);
+    void GenMoveException(RegLocation rl_dest);
+    void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result,
+                                               int lit, int first_bit, int second_bit);
+    void GenNegDouble(RegLocation rl_dest, RegLocation rl_src);
+    void GenNegFloat(RegLocation rl_dest, RegLocation rl_src);
+    void GenPackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+    void GenSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src);
+    void GenSpecialCase(BasicBlock* bb, MIR* mir, SpecialCaseHandler special_case);
+
+    // Single operation generators.
+    LIR* OpUnconditionalBranch(LIR* target);
+    LIR* OpCmpBranch(ConditionCode cond, int src1, int src2, LIR* target);
+    LIR* OpCmpImmBranch(ConditionCode cond, int reg, int check_value, LIR* target);
+    LIR* OpCondBranch(ConditionCode cc, LIR* target);
+    LIR* OpDecAndBranch(ConditionCode c_code, int reg, LIR* target);
+    LIR* OpFpRegCopy(int r_dest, int r_src);
+    LIR* OpIT(ConditionCode cond, const char* guide);
+    LIR* OpMem(OpKind op, int rBase, int disp);
+    LIR* OpPcRelLoad(int reg, LIR* target);
+    LIR* OpReg(OpKind op, int r_dest_src);
+    LIR* OpRegCopy(int r_dest, int r_src);
+    LIR* OpRegCopyNoInsert(int r_dest, int r_src);
+    LIR* OpRegImm(OpKind op, int r_dest_src1, int value);
+    LIR* OpRegMem(OpKind op, int r_dest, int rBase, int offset);
+    LIR* OpRegReg(OpKind op, int r_dest_src1, int r_src2);
+    LIR* OpRegRegImm(OpKind op, int r_dest, int r_src1, int value);
+    LIR* OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2);
+    LIR* OpTestSuspend(LIR* target);
+    LIR* OpThreadMem(OpKind op, int thread_offset);
+    LIR* OpVldm(int rBase, int count);
+    LIR* OpVstm(int rBase, int count);
+    void OpLea(int rBase, int reg1, int reg2, int scale, int offset);
+    void OpRegCopyWide(int dest_lo, int dest_hi, int src_lo, int src_hi);
+    void OpTlsCmp(int offset, int val);
+
+    void OpRegThreadMem(OpKind op, int r_dest, int thread_offset);
+    void SpillCoreRegs();
+    void UnSpillCoreRegs();
+    static const X86EncodingMap EncodingMap[kX86Last];
+    bool InexpensiveConstantInt(int32_t value);
+    bool InexpensiveConstantFloat(int32_t value);
+    bool InexpensiveConstantLong(int64_t value);
+    bool InexpensiveConstantDouble(int64_t value);
+
+  private:
+    void EmitDisp(int base, int disp);
+    void EmitOpReg(const X86EncodingMap* entry, uint8_t reg);
+    void EmitOpMem(const X86EncodingMap* entry, uint8_t base, int disp);
+    void EmitMemReg(const X86EncodingMap* entry, uint8_t base, int disp, uint8_t reg);
+    void EmitRegMem(const X86EncodingMap* entry, uint8_t reg, uint8_t base, int disp);
+    void EmitRegArray(const X86EncodingMap* entry, uint8_t reg, uint8_t base, uint8_t index,
+                      int scale, int disp);
+    void EmitArrayReg(const X86EncodingMap* entry, uint8_t base, uint8_t index, int scale, int disp,
+                      uint8_t reg);
+    void EmitRegThread(const X86EncodingMap* entry, uint8_t reg, int disp);
+    void EmitRegReg(const X86EncodingMap* entry, uint8_t reg1, uint8_t reg2);
+    void EmitRegRegImm(const X86EncodingMap* entry, uint8_t reg1, uint8_t reg2, int32_t imm);
+    void EmitRegImm(const X86EncodingMap* entry, uint8_t reg, int imm);
+    void EmitThreadImm(const X86EncodingMap* entry, int disp, int imm);
+    void EmitMovRegImm(const X86EncodingMap* entry, uint8_t reg, int imm);
+    void EmitShiftRegImm(const X86EncodingMap* entry, uint8_t reg, int imm);
+    void EmitShiftRegCl(const X86EncodingMap* entry, uint8_t reg, uint8_t cl);
+    void EmitRegCond(const X86EncodingMap* entry, uint8_t reg, uint8_t condition);
+    void EmitJmp(const X86EncodingMap* entry, int rel);
+    void EmitJcc(const X86EncodingMap* entry, int rel, uint8_t cc);
+    void EmitCallMem(const X86EncodingMap* entry, uint8_t base, int disp);
+    void EmitCallThread(const X86EncodingMap* entry, int disp);
+    void EmitPcRel(const X86EncodingMap* entry, uint8_t reg, int base_or_table, uint8_t index,
+                   int scale, int table_or_disp);
+    void EmitMacro(const X86EncodingMap* entry, uint8_t reg, int offset);
+    void EmitUnimplemented(const X86EncodingMap* entry, LIR* lir);
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_COMPILER_DEX_QUICK_X86_CODEGENX86_H_
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
new file mode 100644
index 0000000..906b4cc
--- /dev/null
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -0,0 +1,378 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_x86.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "x86_lir.h"
+
+namespace art {
+
+void X86Mir2Lir::GenArithOpFloat(Instruction::Code opcode,
+                                 RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+  X86OpCode op = kX86Nop;
+  RegLocation rl_result;
+
+  /*
+   * Don't attempt to optimize register usage since these opcodes call out to
+   * the handlers.
+   */
+  switch (opcode) {
+    case Instruction::ADD_FLOAT_2ADDR:
+    case Instruction::ADD_FLOAT:
+      op = kX86AddssRR;
+      break;
+    case Instruction::SUB_FLOAT_2ADDR:
+    case Instruction::SUB_FLOAT:
+      op = kX86SubssRR;
+      break;
+    case Instruction::DIV_FLOAT_2ADDR:
+    case Instruction::DIV_FLOAT:
+      op = kX86DivssRR;
+      break;
+    case Instruction::MUL_FLOAT_2ADDR:
+    case Instruction::MUL_FLOAT:
+      op = kX86MulssRR;
+      break;
+    case Instruction::REM_FLOAT_2ADDR:
+    case Instruction::REM_FLOAT:
+      FlushAllRegs();   // Send everything to home location
+      CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false);
+      rl_result = GetReturn(true);
+      StoreValue(rl_dest, rl_result);
+      return;
+    case Instruction::NEG_FLOAT:
+      GenNegFloat(rl_dest, rl_src1);
+      return;
+    default:
+      LOG(FATAL) << "Unexpected opcode: " << opcode;
+  }
+  rl_src1 = LoadValue(rl_src1, kFPReg);
+  rl_src2 = LoadValue(rl_src2, kFPReg);
+  rl_result = EvalLoc(rl_dest, kFPReg, true);
+  int r_dest = rl_result.low_reg;
+  int r_src1 = rl_src1.low_reg;
+  int r_src2 = rl_src2.low_reg;
+  if (r_dest == r_src2) {
+    r_src2 = AllocTempFloat();
+    OpRegCopy(r_src2, r_dest);
+  }
+  OpRegCopy(r_dest, r_src1);
+  NewLIR2(op, r_dest, r_src2);
+  StoreValue(rl_dest, rl_result);
+}
+
+void X86Mir2Lir::GenArithOpDouble(Instruction::Code opcode,
+                                  RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+  X86OpCode op = kX86Nop;
+  RegLocation rl_result;
+
+  switch (opcode) {
+    case Instruction::ADD_DOUBLE_2ADDR:
+    case Instruction::ADD_DOUBLE:
+      op = kX86AddsdRR;
+      break;
+    case Instruction::SUB_DOUBLE_2ADDR:
+    case Instruction::SUB_DOUBLE:
+      op = kX86SubsdRR;
+      break;
+    case Instruction::DIV_DOUBLE_2ADDR:
+    case Instruction::DIV_DOUBLE:
+      op = kX86DivsdRR;
+      break;
+    case Instruction::MUL_DOUBLE_2ADDR:
+    case Instruction::MUL_DOUBLE:
+      op = kX86MulsdRR;
+      break;
+    case Instruction::REM_DOUBLE_2ADDR:
+    case Instruction::REM_DOUBLE:
+      FlushAllRegs();   // Send everything to home location
+      CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false);
+      rl_result = GetReturnWide(true);
+      StoreValueWide(rl_dest, rl_result);
+      return;
+    case Instruction::NEG_DOUBLE:
+      GenNegDouble(rl_dest, rl_src1);
+      return;
+    default:
+      LOG(FATAL) << "Unexpected opcode: " << opcode;
+  }
+  rl_src1 = LoadValueWide(rl_src1, kFPReg);
+  DCHECK(rl_src1.wide);
+  rl_src2 = LoadValueWide(rl_src2, kFPReg);
+  DCHECK(rl_src2.wide);
+  rl_result = EvalLoc(rl_dest, kFPReg, true);
+  DCHECK(rl_dest.wide);
+  DCHECK(rl_result.wide);
+  int r_dest = S2d(rl_result.low_reg, rl_result.high_reg);
+  int r_src1 = S2d(rl_src1.low_reg, rl_src1.high_reg);
+  int r_src2 = S2d(rl_src2.low_reg, rl_src2.high_reg);
+  if (r_dest == r_src2) {
+    r_src2 = AllocTempDouble() | X86_FP_DOUBLE;
+    OpRegCopy(r_src2, r_dest);
+  }
+  OpRegCopy(r_dest, r_src1);
+  NewLIR2(op, r_dest, r_src2);
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void X86Mir2Lir::GenConversion(Instruction::Code opcode, RegLocation rl_dest,
+                               RegLocation rl_src) {
+  RegisterClass rcSrc = kFPReg;
+  X86OpCode op = kX86Nop;
+  int src_reg;
+  RegLocation rl_result;
+  switch (opcode) {
+    case Instruction::INT_TO_FLOAT:
+      rcSrc = kCoreReg;
+      op = kX86Cvtsi2ssRR;
+      break;
+    case Instruction::DOUBLE_TO_FLOAT:
+      rcSrc = kFPReg;
+      op = kX86Cvtsd2ssRR;
+      break;
+    case Instruction::FLOAT_TO_DOUBLE:
+      rcSrc = kFPReg;
+      op = kX86Cvtss2sdRR;
+      break;
+    case Instruction::INT_TO_DOUBLE:
+      rcSrc = kCoreReg;
+      op = kX86Cvtsi2sdRR;
+      break;
+    case Instruction::FLOAT_TO_INT: {
+      rl_src = LoadValue(rl_src, kFPReg);
+      src_reg = rl_src.low_reg;
+      // In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
+      ClobberSReg(rl_dest.s_reg_low);
+      rl_result = EvalLoc(rl_dest, kCoreReg, true);
+      int temp_reg = AllocTempFloat();
+
+      LoadConstant(rl_result.low_reg, 0x7fffffff);
+      NewLIR2(kX86Cvtsi2ssRR, temp_reg, rl_result.low_reg);
+      NewLIR2(kX86ComissRR, src_reg, temp_reg);
+      LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondA);
+      LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
+      NewLIR2(kX86Cvttss2siRR, rl_result.low_reg, src_reg);
+      LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
+      branch_na_n->target = NewLIR0(kPseudoTargetLabel);
+      NewLIR2(kX86Xor32RR, rl_result.low_reg, rl_result.low_reg);
+      branch_pos_overflow->target = NewLIR0(kPseudoTargetLabel);
+      branch_normal->target = NewLIR0(kPseudoTargetLabel);
+      StoreValue(rl_dest, rl_result);
+      return;
+    }
+    case Instruction::DOUBLE_TO_INT: {
+      rl_src = LoadValueWide(rl_src, kFPReg);
+      src_reg = rl_src.low_reg;
+      // In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
+      ClobberSReg(rl_dest.s_reg_low);
+      rl_result = EvalLoc(rl_dest, kCoreReg, true);
+      int temp_reg = AllocTempDouble() | X86_FP_DOUBLE;
+
+      LoadConstant(rl_result.low_reg, 0x7fffffff);
+      NewLIR2(kX86Cvtsi2sdRR, temp_reg, rl_result.low_reg);
+      NewLIR2(kX86ComisdRR, src_reg, temp_reg);
+      LIR* branch_pos_overflow = NewLIR2(kX86Jcc8, 0, kX86CondA);
+      LIR* branch_na_n = NewLIR2(kX86Jcc8, 0, kX86CondP);
+      NewLIR2(kX86Cvttsd2siRR, rl_result.low_reg, src_reg);
+      LIR* branch_normal = NewLIR1(kX86Jmp8, 0);
+      branch_na_n->target = NewLIR0(kPseudoTargetLabel);
+      NewLIR2(kX86Xor32RR, rl_result.low_reg, rl_result.low_reg);
+      branch_pos_overflow->target = NewLIR0(kPseudoTargetLabel);
+      branch_normal->target = NewLIR0(kPseudoTargetLabel);
+      StoreValue(rl_dest, rl_result);
+      return;
+    }
+    case Instruction::LONG_TO_DOUBLE:
+      GenConversionCall(ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
+      return;
+    case Instruction::LONG_TO_FLOAT:
+      // TODO: inline by using memory as a 64-bit source. Be careful about promoted registers.
+      GenConversionCall(ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
+      return;
+    case Instruction::FLOAT_TO_LONG:
+      GenConversionCall(ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
+      return;
+    case Instruction::DOUBLE_TO_LONG:
+      GenConversionCall(ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
+      return;
+    default:
+      LOG(INFO) << "Unexpected opcode: " << opcode;
+  }
+  if (rl_src.wide) {
+    rl_src = LoadValueWide(rl_src, rcSrc);
+    src_reg = S2d(rl_src.low_reg, rl_src.high_reg);
+  } else {
+    rl_src = LoadValue(rl_src, rcSrc);
+    src_reg = rl_src.low_reg;
+  }
+  if (rl_dest.wide) {
+    rl_result = EvalLoc(rl_dest, kFPReg, true);
+    NewLIR2(op, S2d(rl_result.low_reg, rl_result.high_reg), src_reg);
+    StoreValueWide(rl_dest, rl_result);
+  } else {
+    rl_result = EvalLoc(rl_dest, kFPReg, true);
+    NewLIR2(op, rl_result.low_reg, src_reg);
+    StoreValue(rl_dest, rl_result);
+  }
+}
+
+void X86Mir2Lir::GenCmpFP(Instruction::Code code, RegLocation rl_dest,
+                          RegLocation rl_src1, RegLocation rl_src2) {
+  bool single = (code == Instruction::CMPL_FLOAT) || (code == Instruction::CMPG_FLOAT);
+  bool unordered_gt = (code == Instruction::CMPG_DOUBLE) || (code == Instruction::CMPG_FLOAT);
+  int src_reg1;
+  int src_reg2;
+  if (single) {
+    rl_src1 = LoadValue(rl_src1, kFPReg);
+    src_reg1 = rl_src1.low_reg;
+    rl_src2 = LoadValue(rl_src2, kFPReg);
+    src_reg2 = rl_src2.low_reg;
+  } else {
+    rl_src1 = LoadValueWide(rl_src1, kFPReg);
+    src_reg1 = S2d(rl_src1.low_reg, rl_src1.high_reg);
+    rl_src2 = LoadValueWide(rl_src2, kFPReg);
+    src_reg2 = S2d(rl_src2.low_reg, rl_src2.high_reg);
+  }
+  // In case result vreg is also src vreg, break association to avoid useless copy by EvalLoc()
+  ClobberSReg(rl_dest.s_reg_low);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  LoadConstantNoClobber(rl_result.low_reg, unordered_gt ? 1 : 0);
+  if (single) {
+    NewLIR2(kX86UcomissRR, src_reg1, src_reg2);
+  } else {
+    NewLIR2(kX86UcomisdRR, src_reg1, src_reg2);
+  }
+  LIR* branch = NULL;
+  if (unordered_gt) {
+    branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
+  }
+  // If the result reg can't be byte accessed, use a jump and move instead of a set.
+  if (rl_result.low_reg >= 4) {
+    LIR* branch2 = NULL;
+    if (unordered_gt) {
+      branch2 = NewLIR2(kX86Jcc8, 0, kX86CondA);
+      NewLIR2(kX86Mov32RI, rl_result.low_reg, 0x0);
+    } else {
+      branch2 = NewLIR2(kX86Jcc8, 0, kX86CondBe);
+      NewLIR2(kX86Mov32RI, rl_result.low_reg, 0x1);
+    }
+    branch2->target = NewLIR0(kPseudoTargetLabel);
+  } else {
+    NewLIR2(kX86Set8R, rl_result.low_reg, kX86CondA /* above - unsigned > */);
+  }
+  NewLIR2(kX86Sbb32RI, rl_result.low_reg, 0);
+  if (unordered_gt) {
+    branch->target = NewLIR0(kPseudoTargetLabel);
+  }
+  StoreValue(rl_dest, rl_result);
+}
+
+void X86Mir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
+                                     bool is_double) {
+  LIR* taken = &block_label_list_[bb->taken->id];
+  LIR* not_taken = &block_label_list_[bb->fall_through->id];
+  LIR* branch = NULL;
+  RegLocation rl_src1;
+  RegLocation rl_src2;
+  if (is_double) {
+    rl_src1 = mir_graph_->GetSrcWide(mir, 0);
+    rl_src2 = mir_graph_->GetSrcWide(mir, 2);
+    rl_src1 = LoadValueWide(rl_src1, kFPReg);
+    rl_src2 = LoadValueWide(rl_src2, kFPReg);
+    NewLIR2(kX86UcomisdRR, S2d(rl_src1.low_reg, rl_src1.high_reg),
+            S2d(rl_src2.low_reg, rl_src2.high_reg));
+  } else {
+    rl_src1 = mir_graph_->GetSrc(mir, 0);
+    rl_src2 = mir_graph_->GetSrc(mir, 1);
+    rl_src1 = LoadValue(rl_src1, kFPReg);
+    rl_src2 = LoadValue(rl_src2, kFPReg);
+    NewLIR2(kX86UcomissRR, rl_src1.low_reg, rl_src2.low_reg);
+  }
+  ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
+  switch (ccode) {
+    case kCondEq:
+      if (!gt_bias) {
+        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
+        branch->target = not_taken;
+      }
+      break;
+    case kCondNe:
+      if (!gt_bias) {
+        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
+        branch->target = taken;
+      }
+      break;
+    case kCondLt:
+      if (gt_bias) {
+        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
+        branch->target = not_taken;
+      }
+      ccode = kCondCs;
+      break;
+    case kCondLe:
+      if (gt_bias) {
+        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
+        branch->target = not_taken;
+      }
+      ccode = kCondLs;
+      break;
+    case kCondGt:
+      if (gt_bias) {
+        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
+        branch->target = taken;
+      }
+      ccode = kCondHi;
+      break;
+    case kCondGe:
+      if (gt_bias) {
+        branch = NewLIR2(kX86Jcc8, 0, kX86CondPE);
+        branch->target = taken;
+      }
+      ccode = kCondCc;
+      break;
+    default:
+      LOG(FATAL) << "Unexpected ccode: " << ccode;
+  }
+  OpCondBranch(ccode, taken);
+}
+
+void X86Mir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src)
+{
+  RegLocation rl_result;
+  rl_src = LoadValue(rl_src, kCoreReg);
+  rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  OpRegRegImm(kOpAdd, rl_result.low_reg, rl_src.low_reg, 0x80000000);
+  StoreValue(rl_dest, rl_result);
+}
+
+void X86Mir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src)
+{
+  RegLocation rl_result;
+  rl_src = LoadValueWide(rl_src, kCoreReg);
+  rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  OpRegRegImm(kOpAdd, rl_result.high_reg, rl_src.high_reg, 0x80000000);
+  OpRegCopy(rl_result.low_reg, rl_src.low_reg);
+  StoreValueWide(rl_dest, rl_result);
+}
+
+bool X86Mir2Lir::GenInlinedSqrt(CallInfo* info) {
+  DCHECK_NE(cu_->instruction_set, kThumb2);
+  return false;
+}
+
+
+
+} //  namespace art
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
new file mode 100644
index 0000000..97d9d2d
--- /dev/null
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -0,0 +1,606 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file contains codegen for the X86 ISA */
+
+#include "codegen_x86.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "mirror/array.h"
+#include "x86_lir.h"
+
+namespace art {
+
+/*
+ * Perform register memory operation.
+ */
+LIR* X86Mir2Lir::GenRegMemCheck(ConditionCode c_code,
+                                int reg1, int base, int offset, ThrowKind kind)
+{
+  LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind,
+                    current_dalvik_offset_, reg1, base, offset);
+  OpRegMem(kOpCmp, reg1, base, offset);
+  LIR* branch = OpCondBranch(c_code, tgt);
+  // Remember branch target - will process later
+  throw_launchpads_.Insert(tgt);
+  return branch;
+}
+
+/*
+ * Compare two 64-bit values
+ *    x = y     return  0
+ *    x < y     return -1
+ *    x > y     return  1
+ */
+void X86Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2)
+{
+  FlushAllRegs();
+  LockCallTemps();  // Prepare for explicit register usage
+  LoadValueDirectWideFixed(rl_src1, r0, r1);
+  LoadValueDirectWideFixed(rl_src2, r2, r3);
+  // Compute (r1:r0) = (r1:r0) - (r3:r2)
+  OpRegReg(kOpSub, r0, r2);  // r0 = r0 - r2
+  OpRegReg(kOpSbc, r1, r3);  // r1 = r1 - r3 - CF
+  NewLIR2(kX86Set8R, r2, kX86CondL);  // r2 = (r1:r0) < (r3:r2) ? 1 : 0
+  NewLIR2(kX86Movzx8RR, r2, r2);
+  OpReg(kOpNeg, r2);         // r2 = -r2
+  OpRegReg(kOpOr, r0, r1);   // r0 = high | low - sets ZF
+  NewLIR2(kX86Set8R, r0, kX86CondNz);  // r0 = (r1:r0) != (r3:r2) ? 1 : 0
+  NewLIR2(kX86Movzx8RR, r0, r0);
+  OpRegReg(kOpOr, r0, r2);   // r0 = r0 | r2
+  RegLocation rl_result = LocCReturn();
+  StoreValue(rl_dest, rl_result);
+}
+
+X86ConditionCode X86ConditionEncoding(ConditionCode cond) {
+  switch (cond) {
+    case kCondEq: return kX86CondEq;
+    case kCondNe: return kX86CondNe;
+    case kCondCs: return kX86CondC;
+    case kCondCc: return kX86CondNc;
+    case kCondMi: return kX86CondS;
+    case kCondPl: return kX86CondNs;
+    case kCondVs: return kX86CondO;
+    case kCondVc: return kX86CondNo;
+    case kCondHi: return kX86CondA;
+    case kCondLs: return kX86CondBe;
+    case kCondGe: return kX86CondGe;
+    case kCondLt: return kX86CondL;
+    case kCondGt: return kX86CondG;
+    case kCondLe: return kX86CondLe;
+    case kCondAl:
+    case kCondNv: LOG(FATAL) << "Should not reach here";
+  }
+  return kX86CondO;
+}
+
+LIR* X86Mir2Lir::OpCmpBranch(ConditionCode cond, int src1, int src2,
+                             LIR* target)
+{
+  NewLIR2(kX86Cmp32RR, src1, src2);
+  X86ConditionCode cc = X86ConditionEncoding(cond);
+  LIR* branch = NewLIR2(kX86Jcc8, 0 /* lir operand for Jcc offset */ ,
+                        cc);
+  branch->target = target;
+  return branch;
+}
+
+LIR* X86Mir2Lir::OpCmpImmBranch(ConditionCode cond, int reg,
+                                int check_value, LIR* target)
+{
+  if ((check_value == 0) && (cond == kCondEq || cond == kCondNe)) {
+    // TODO: when check_value == 0 and reg is rCX, use the jcxz/nz opcode
+    NewLIR2(kX86Test32RR, reg, reg);
+  } else {
+    NewLIR2(IS_SIMM8(check_value) ? kX86Cmp32RI8 : kX86Cmp32RI, reg, check_value);
+  }
+  X86ConditionCode cc = X86ConditionEncoding(cond);
+  LIR* branch = NewLIR2(kX86Jcc8, 0 /* lir operand for Jcc offset */ , cc);
+  branch->target = target;
+  return branch;
+}
+
+LIR* X86Mir2Lir::OpRegCopyNoInsert(int r_dest, int r_src)
+{
+  if (X86_FPREG(r_dest) || X86_FPREG(r_src))
+    return OpFpRegCopy(r_dest, r_src);
+  LIR* res = RawLIR(current_dalvik_offset_, kX86Mov32RR,
+                    r_dest, r_src);
+  if (r_dest == r_src) {
+    res->flags.is_nop = true;
+  }
+  return res;
+}
+
+LIR* X86Mir2Lir::OpRegCopy(int r_dest, int r_src)
+{
+  LIR *res = OpRegCopyNoInsert(r_dest, r_src);
+  AppendLIR(res);
+  return res;
+}
+
+void X86Mir2Lir::OpRegCopyWide(int dest_lo, int dest_hi,
+                               int src_lo, int src_hi)
+{
+  bool dest_fp = X86_FPREG(dest_lo) && X86_FPREG(dest_hi);
+  bool src_fp = X86_FPREG(src_lo) && X86_FPREG(src_hi);
+  assert(X86_FPREG(src_lo) == X86_FPREG(src_hi));
+  assert(X86_FPREG(dest_lo) == X86_FPREG(dest_hi));
+  if (dest_fp) {
+    if (src_fp) {
+      OpRegCopy(S2d(dest_lo, dest_hi), S2d(src_lo, src_hi));
+    } else {
+      // TODO: Prevent this from happening in the code. The result is often
+      // unused or could have been loaded more easily from memory.
+      NewLIR2(kX86MovdxrRR, dest_lo, src_lo);
+      NewLIR2(kX86MovdxrRR, dest_hi, src_hi);
+      NewLIR2(kX86PsllqRI, dest_hi, 32);
+      NewLIR2(kX86OrpsRR, dest_lo, dest_hi);
+    }
+  } else {
+    if (src_fp) {
+      NewLIR2(kX86MovdrxRR, dest_lo, src_lo);
+      NewLIR2(kX86PsrlqRI, src_lo, 32);
+      NewLIR2(kX86MovdrxRR, dest_hi, src_lo);
+    } else {
+      // Handle overlap
+      if (src_hi == dest_lo) {
+        OpRegCopy(dest_hi, src_hi);
+        OpRegCopy(dest_lo, src_lo);
+      } else {
+        OpRegCopy(dest_lo, src_lo);
+        OpRegCopy(dest_hi, src_hi);
+      }
+    }
+  }
+}
+
+void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir)
+{
+  UNIMPLEMENTED(FATAL) << "Need codegen for GenSelect";
+}
+
+void X86Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
+  LIR* taken = &block_label_list_[bb->taken->id];
+  RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
+  RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
+  FlushAllRegs();
+  LockCallTemps();  // Prepare for explicit register usage
+  LoadValueDirectWideFixed(rl_src1, r0, r1);
+  LoadValueDirectWideFixed(rl_src2, r2, r3);
+  ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
+  // Swap operands and condition code to prevent use of zero flag.
+  if (ccode == kCondLe || ccode == kCondGt) {
+    // Compute (r3:r2) = (r3:r2) - (r1:r0)
+    OpRegReg(kOpSub, r2, r0);  // r2 = r2 - r0
+    OpRegReg(kOpSbc, r3, r1);  // r3 = r3 - r1 - CF
+  } else {
+    // Compute (r1:r0) = (r1:r0) - (r3:r2)
+    OpRegReg(kOpSub, r0, r2);  // r0 = r0 - r2
+    OpRegReg(kOpSbc, r1, r3);  // r1 = r1 - r3 - CF
+  }
+  switch (ccode) {
+    case kCondEq:
+    case kCondNe:
+      OpRegReg(kOpOr, r0, r1);  // r0 = r0 | r1
+      break;
+    case kCondLe:
+      ccode = kCondGe;
+      break;
+    case kCondGt:
+      ccode = kCondLt;
+      break;
+    case kCondLt:
+    case kCondGe:
+      break;
+    default:
+      LOG(FATAL) << "Unexpected ccode: " << ccode;
+  }
+  OpCondBranch(ccode, taken);
+}
+
+RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, int reg_lo,
+                                     int lit, bool is_div)
+{
+  LOG(FATAL) << "Unexpected use of GenDivRemLit for x86";
+  return rl_dest;
+}
+
+RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, int reg_lo,
+                                  int reg_hi, bool is_div)
+{
+  LOG(FATAL) << "Unexpected use of GenDivRem for x86";
+  return rl_dest;
+}
+
+bool X86Mir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min)
+{
+  DCHECK_EQ(cu_->instruction_set, kX86);
+  RegLocation rl_src1 = info->args[0];
+  RegLocation rl_src2 = info->args[1];
+  rl_src1 = LoadValue(rl_src1, kCoreReg);
+  rl_src2 = LoadValue(rl_src2, kCoreReg);
+  RegLocation rl_dest = InlineTarget(info);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  OpRegReg(kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
+  DCHECK_EQ(cu_->instruction_set, kX86);
+  LIR* branch = NewLIR2(kX86Jcc8, 0, is_min ? kX86CondG : kX86CondL);
+  OpRegReg(kOpMov, rl_result.low_reg, rl_src1.low_reg);
+  LIR* branch2 = NewLIR1(kX86Jmp8, 0);
+  branch->target = NewLIR0(kPseudoTargetLabel);
+  OpRegReg(kOpMov, rl_result.low_reg, rl_src2.low_reg);
+  branch2->target = NewLIR0(kPseudoTargetLabel);
+  StoreValue(rl_dest, rl_result);
+  return true;
+}
+
+void X86Mir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset)
+{
+  NewLIR5(kX86Lea32RA, rBase, reg1, reg2, scale, offset);
+}
+
+void X86Mir2Lir::OpTlsCmp(int offset, int val)
+{
+  NewLIR2(kX86Cmp16TI8, offset, val);
+}
+
+bool X86Mir2Lir::GenInlinedCas32(CallInfo* info, bool need_write_barrier) {
+  DCHECK_NE(cu_->instruction_set, kThumb2);
+  return false;
+}
+
+LIR* X86Mir2Lir::OpPcRelLoad(int reg, LIR* target) {
+  LOG(FATAL) << "Unexpected use of OpPcRelLoad for x86";
+  return NULL;
+}
+
+LIR* X86Mir2Lir::OpVldm(int rBase, int count)
+{
+  LOG(FATAL) << "Unexpected use of OpVldm for x86";
+  return NULL;
+}
+
+LIR* X86Mir2Lir::OpVstm(int rBase, int count)
+{
+  LOG(FATAL) << "Unexpected use of OpVstm for x86";
+  return NULL;
+}
+
+void X86Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
+                                               RegLocation rl_result, int lit,
+                                               int first_bit, int second_bit)
+{
+  int t_reg = AllocTemp();
+  OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit);
+  OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, t_reg);
+  FreeTemp(t_reg);
+  if (first_bit != 0) {
+    OpRegRegImm(kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
+  }
+}
+
+void X86Mir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi)
+{
+  int t_reg = AllocTemp();
+  OpRegRegReg(kOpOr, t_reg, reg_lo, reg_hi);
+  GenImmedCheck(kCondEq, t_reg, 0, kThrowDivZero);
+  FreeTemp(t_reg);
+}
+
+// Test suspend flag, return target of taken suspend branch
+LIR* X86Mir2Lir::OpTestSuspend(LIR* target)
+{
+  OpTlsCmp(Thread::ThreadFlagsOffset().Int32Value(), 0);
+  return OpCondBranch((target == NULL) ? kCondNe : kCondEq, target);
+}
+
+// Decrement register and branch on condition
+LIR* X86Mir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target)
+{
+  OpRegImm(kOpSub, reg, 1);
+  return OpCmpImmBranch(c_code, reg, 0, target);
+}
+
+bool X86Mir2Lir::SmallLiteralDivide(Instruction::Code dalvik_opcode,
+                                    RegLocation rl_src, RegLocation rl_dest, int lit)
+{
+  LOG(FATAL) << "Unexpected use of smallLiteralDive in x86";
+  return false;
+}
+
+LIR* X86Mir2Lir::OpIT(ConditionCode cond, const char* guide)
+{
+  LOG(FATAL) << "Unexpected use of OpIT in x86";
+  return NULL;
+}
+
+void X86Mir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2)
+{
+  LOG(FATAL) << "Unexpected use of GenX86Long for x86";
+}
+void X86Mir2Lir::GenAddLong(RegLocation rl_dest, RegLocation rl_src1,
+                         RegLocation rl_src2)
+{
+  // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
+  // enough.
+  FlushAllRegs();
+  LockCallTemps();  // Prepare for explicit register usage
+  LoadValueDirectWideFixed(rl_src1, r0, r1);
+  LoadValueDirectWideFixed(rl_src2, r2, r3);
+  // Compute (r1:r0) = (r1:r0) + (r2:r3)
+  OpRegReg(kOpAdd, r0, r2);  // r0 = r0 + r2
+  OpRegReg(kOpAdc, r1, r3);  // r1 = r1 + r3 + CF
+  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+                          INVALID_SREG, INVALID_SREG};
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void X86Mir2Lir::GenSubLong(RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2)
+{
+  // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
+  // enough.
+  FlushAllRegs();
+  LockCallTemps();  // Prepare for explicit register usage
+  LoadValueDirectWideFixed(rl_src1, r0, r1);
+  LoadValueDirectWideFixed(rl_src2, r2, r3);
+  // Compute (r1:r0) = (r1:r0) + (r2:r3)
+  OpRegReg(kOpSub, r0, r2);  // r0 = r0 - r2
+  OpRegReg(kOpSbc, r1, r3);  // r1 = r1 - r3 - CF
+  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+                          INVALID_SREG, INVALID_SREG};
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void X86Mir2Lir::GenAndLong(RegLocation rl_dest, RegLocation rl_src1,
+                            RegLocation rl_src2)
+{
+  // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
+  // enough.
+  FlushAllRegs();
+  LockCallTemps();  // Prepare for explicit register usage
+  LoadValueDirectWideFixed(rl_src1, r0, r1);
+  LoadValueDirectWideFixed(rl_src2, r2, r3);
+  // Compute (r1:r0) = (r1:r0) & (r2:r3)
+  OpRegReg(kOpAnd, r0, r2);  // r0 = r0 & r2
+  OpRegReg(kOpAnd, r1, r3);  // r1 = r1 & r3
+  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+                          INVALID_SREG, INVALID_SREG};
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void X86Mir2Lir::GenOrLong(RegLocation rl_dest,
+                           RegLocation rl_src1, RegLocation rl_src2)
+{
+  // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
+  // enough.
+  FlushAllRegs();
+  LockCallTemps();  // Prepare for explicit register usage
+  LoadValueDirectWideFixed(rl_src1, r0, r1);
+  LoadValueDirectWideFixed(rl_src2, r2, r3);
+  // Compute (r1:r0) = (r1:r0) | (r2:r3)
+  OpRegReg(kOpOr, r0, r2);  // r0 = r0 | r2
+  OpRegReg(kOpOr, r1, r3);  // r1 = r1 | r3
+  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+                          INVALID_SREG, INVALID_SREG};
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void X86Mir2Lir::GenXorLong(RegLocation rl_dest,
+                            RegLocation rl_src1, RegLocation rl_src2)
+{
+  // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
+  // enough.
+  FlushAllRegs();
+  LockCallTemps();  // Prepare for explicit register usage
+  LoadValueDirectWideFixed(rl_src1, r0, r1);
+  LoadValueDirectWideFixed(rl_src2, r2, r3);
+  // Compute (r1:r0) = (r1:r0) ^ (r2:r3)
+  OpRegReg(kOpXor, r0, r2);  // r0 = r0 ^ r2
+  OpRegReg(kOpXor, r1, r3);  // r1 = r1 ^ r3
+  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+                          INVALID_SREG, INVALID_SREG};
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void X86Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src)
+{
+  FlushAllRegs();
+  LockCallTemps();  // Prepare for explicit register usage
+  LoadValueDirectWideFixed(rl_src, r0, r1);
+  // Compute (r1:r0) = -(r1:r0)
+  OpRegReg(kOpNeg, r0, r0);  // r0 = -r0
+  OpRegImm(kOpAdc, r1, 0);   // r1 = r1 + CF
+  OpRegReg(kOpNeg, r1, r1);  // r1 = -r1
+  RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+                          INVALID_SREG, INVALID_SREG};
+  StoreValueWide(rl_dest, rl_result);
+}
+
+void X86Mir2Lir::OpRegThreadMem(OpKind op, int r_dest, int thread_offset) {
+  X86OpCode opcode = kX86Bkpt;
+  switch (op) {
+  case kOpCmp: opcode = kX86Cmp32RT;  break;
+  case kOpMov: opcode = kX86Mov32RT;  break;
+  default:
+    LOG(FATAL) << "Bad opcode: " << op;
+    break;
+  }
+  NewLIR2(opcode, r_dest, thread_offset);
+}
+
+/*
+ * Generate array load
+ */
+void X86Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
+                          RegLocation rl_index, RegLocation rl_dest, int scale)
+{
+  RegisterClass reg_class = oat_reg_class_by_size(size);
+  int len_offset = mirror::Array::LengthOffset().Int32Value();
+  int data_offset;
+  RegLocation rl_result;
+  rl_array = LoadValue(rl_array, kCoreReg);
+  rl_index = LoadValue(rl_index, kCoreReg);
+
+  if (size == kLong || size == kDouble) {
+    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
+  } else {
+    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
+  }
+
+  /* null object? */
+  GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+
+  if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
+    /* if (rl_index >= [rl_array + len_offset]) goto kThrowArrayBounds */
+    GenRegMemCheck(kCondUge, rl_index.low_reg, rl_array.low_reg,
+                   len_offset, kThrowArrayBounds);
+  }
+  if ((size == kLong) || (size == kDouble)) {
+    int reg_addr = AllocTemp();
+    OpLea(reg_addr, rl_array.low_reg, rl_index.low_reg, scale, data_offset);
+    FreeTemp(rl_array.low_reg);
+    FreeTemp(rl_index.low_reg);
+    rl_result = EvalLoc(rl_dest, reg_class, true);
+    LoadBaseIndexedDisp(reg_addr, INVALID_REG, 0, 0, rl_result.low_reg,
+                        rl_result.high_reg, size, INVALID_SREG);
+    StoreValueWide(rl_dest, rl_result);
+  } else {
+    rl_result = EvalLoc(rl_dest, reg_class, true);
+
+    LoadBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale,
+                        data_offset, rl_result.low_reg, INVALID_REG, size,
+                        INVALID_SREG);
+
+    StoreValue(rl_dest, rl_result);
+  }
+}
+
+/*
+ * Generate array store
+ *
+ */
+void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
+                          RegLocation rl_index, RegLocation rl_src, int scale)
+{
+  RegisterClass reg_class = oat_reg_class_by_size(size);
+  int len_offset = mirror::Array::LengthOffset().Int32Value();
+  int data_offset;
+
+  if (size == kLong || size == kDouble) {
+    data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
+  } else {
+    data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
+  }
+
+  rl_array = LoadValue(rl_array, kCoreReg);
+  rl_index = LoadValue(rl_index, kCoreReg);
+
+  /* null object? */
+  GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
+
+  if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
+    /* if (rl_index >= [rl_array + len_offset]) goto kThrowArrayBounds */
+    GenRegMemCheck(kCondUge, rl_index.low_reg, rl_array.low_reg, len_offset, kThrowArrayBounds);
+  }
+  if ((size == kLong) || (size == kDouble)) {
+    rl_src = LoadValueWide(rl_src, reg_class);
+  } else {
+    rl_src = LoadValue(rl_src, reg_class);
+  }
+  // If the src reg can't be byte accessed, move it to a temp first.
+  if ((size == kSignedByte || size == kUnsignedByte) && rl_src.low_reg >= 4) {
+    int temp = AllocTemp();
+    OpRegCopy(temp, rl_src.low_reg);
+    StoreBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale, data_offset, temp,
+                         INVALID_REG, size, INVALID_SREG);
+  } else {
+    StoreBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale, data_offset, rl_src.low_reg,
+                         rl_src.high_reg, size, INVALID_SREG);
+  }
+}
+
+/*
+ * Generate array store
+ *
+ */
+void X86Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array,
+                             RegLocation rl_index, RegLocation rl_src, int scale)
+{
+  int len_offset = mirror::Array::LengthOffset().Int32Value();
+  int data_offset = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value();
+
+  FlushAllRegs();  // Use explicit registers
+  LockCallTemps();
+
+  int r_value = TargetReg(kArg0);  // Register holding value
+  int r_array_class = TargetReg(kArg1);  // Register holding array's Class
+  int r_array = TargetReg(kArg2);  // Register holding array
+  int r_index = TargetReg(kArg3);  // Register holding index into array
+
+  LoadValueDirectFixed(rl_array, r_array);  // Grab array
+  LoadValueDirectFixed(rl_src, r_value);  // Grab value
+  LoadValueDirectFixed(rl_index, r_index);  // Grab index
+
+  GenNullCheck(rl_array.s_reg_low, r_array, opt_flags);  // NPE?
+
+  // Store of null?
+  LIR* null_value_check = OpCmpImmBranch(kCondEq, r_value, 0, NULL);
+
+  // Get the array's class.
+  LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
+  CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+                          r_array_class, true);
+  // Redo LoadValues in case they didn't survive the call.
+  LoadValueDirectFixed(rl_array, r_array);  // Reload array
+  LoadValueDirectFixed(rl_index, r_index);  // Reload index
+  LoadValueDirectFixed(rl_src, r_value);  // Reload value
+  r_array_class = INVALID_REG;
+
+  // Branch here if value to be stored == null
+  LIR* target = NewLIR0(kPseudoTargetLabel);
+  null_value_check->target = target;
+
+  // make an extra temp available for card mark below
+  FreeTemp(TargetReg(kArg1));
+  if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
+    /* if (rl_index >= [rl_array + len_offset]) goto kThrowArrayBounds */
+    GenRegMemCheck(kCondUge, r_index, r_array, len_offset, kThrowArrayBounds);
+  }
+  StoreBaseIndexedDisp(r_array, r_index, scale,
+                       data_offset, r_value, INVALID_REG, kWord, INVALID_SREG);
+  FreeTemp(r_index);
+  if (!mir_graph_->IsConstantNullRef(rl_src)) {
+    MarkGCCard(r_value, r_array);
+  }
+}
+
+void X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
+                                   RegLocation rl_src1, RegLocation rl_shift)
+{
+  // Default implementation is just to ignore the constant case.
+  GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
+}
+
+void X86Mir2Lir::GenArithImmOpLong(Instruction::Code opcode,
+                                   RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
+{
+  // Default - bail to non-const handler.
+  GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
new file mode 100644
index 0000000..c421ef3
--- /dev/null
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -0,0 +1,571 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_x86.h"
+#include "dex/compiler_internals.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "x86_lir.h"
+
+#include <string>
+
+namespace art {
+
+//FIXME: restore "static" when usage uncovered
+/*static*/ int core_regs[] = {
+  rAX, rCX, rDX, rBX, rX86_SP, rBP, rSI, rDI
+#ifdef TARGET_REX_SUPPORT
+  r8, r9, r10, r11, r12, r13, r14, 15
+#endif
+};
+/*static*/ int ReservedRegs[] = {rX86_SP};
+/*static*/ int core_temps[] = {rAX, rCX, rDX, rBX};
+/*static*/ int FpRegs[] = {
+  fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
+#ifdef TARGET_REX_SUPPORT
+  fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15
+#endif
+};
+/*static*/ int fp_temps[] = {
+  fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
+#ifdef TARGET_REX_SUPPORT
+  fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15
+#endif
+};
+
+RegLocation X86Mir2Lir::LocCReturn()
+{
+  RegLocation res = X86_LOC_C_RETURN;
+  return res;
+}
+
+RegLocation X86Mir2Lir::LocCReturnWide()
+{
+  RegLocation res = X86_LOC_C_RETURN_WIDE;
+  return res;
+}
+
+RegLocation X86Mir2Lir::LocCReturnFloat()
+{
+  RegLocation res = X86_LOC_C_RETURN_FLOAT;
+  return res;
+}
+
+RegLocation X86Mir2Lir::LocCReturnDouble()
+{
+  RegLocation res = X86_LOC_C_RETURN_DOUBLE;
+  return res;
+}
+
+// Return a target-dependent special register.
+int X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
+  int res = INVALID_REG;
+  switch (reg) {
+    case kSelf: res = rX86_SELF; break;
+    case kSuspend: res =  rX86_SUSPEND; break;
+    case kLr: res =  rX86_LR; break;
+    case kPc: res =  rX86_PC; break;
+    case kSp: res =  rX86_SP; break;
+    case kArg0: res = rX86_ARG0; break;
+    case kArg1: res = rX86_ARG1; break;
+    case kArg2: res = rX86_ARG2; break;
+    case kArg3: res = rX86_ARG3; break;
+    case kFArg0: res = rX86_FARG0; break;
+    case kFArg1: res = rX86_FARG1; break;
+    case kFArg2: res = rX86_FARG2; break;
+    case kFArg3: res = rX86_FARG3; break;
+    case kRet0: res = rX86_RET0; break;
+    case kRet1: res = rX86_RET1; break;
+    case kInvokeTgt: res = rX86_INVOKE_TGT; break;
+    case kCount: res = rX86_COUNT; break;
+  }
+  return res;
+}
+
+// Create a double from a pair of singles.
+int X86Mir2Lir::S2d(int low_reg, int high_reg)
+{
+  return X86_S2D(low_reg, high_reg);
+}
+
+// Return mask to strip off fp reg flags and bias.
+uint32_t X86Mir2Lir::FpRegMask()
+{
+  return X86_FP_REG_MASK;
+}
+
+// True if both regs single, both core or both double.
+bool X86Mir2Lir::SameRegType(int reg1, int reg2)
+{
+  return (X86_REGTYPE(reg1) == X86_REGTYPE(reg2));
+}
+
+/*
+ * Decode the register id.
+ */
+uint64_t X86Mir2Lir::GetRegMaskCommon(int reg)
+{
+  uint64_t seed;
+  int shift;
+  int reg_id;
+
+  reg_id = reg & 0xf;
+  /* Double registers in x86 are just a single FP register */
+  seed = 1;
+  /* FP register starts at bit position 16 */
+  shift = X86_FPREG(reg) ? kX86FPReg0 : 0;
+  /* Expand the double register id into single offset */
+  shift += reg_id;
+  return (seed << shift);
+}
+
+uint64_t X86Mir2Lir::GetPCUseDefEncoding()
+{
+  /*
+   * FIXME: might make sense to use a virtual resource encoding bit for pc.  Might be
+   * able to clean up some of the x86/Arm_Mips differences
+   */
+  LOG(FATAL) << "Unexpected call to GetPCUseDefEncoding for x86";
+  return 0ULL;
+}
+
+void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir)
+{
+  DCHECK_EQ(cu_->instruction_set, kX86);
+
+  // X86-specific resource map setup here.
+  uint64_t flags = X86Mir2Lir::EncodingMap[lir->opcode].flags;
+
+  if (flags & REG_USE_SP) {
+    lir->use_mask |= ENCODE_X86_REG_SP;
+  }
+
+  if (flags & REG_DEF_SP) {
+    lir->def_mask |= ENCODE_X86_REG_SP;
+  }
+
+  if (flags & REG_DEFA) {
+    SetupRegMask(&lir->def_mask, rAX);
+  }
+
+  if (flags & REG_DEFD) {
+    SetupRegMask(&lir->def_mask, rDX);
+  }
+  if (flags & REG_USEA) {
+    SetupRegMask(&lir->use_mask, rAX);
+  }
+
+  if (flags & REG_USEC) {
+    SetupRegMask(&lir->use_mask, rCX);
+  }
+
+  if (flags & REG_USED) {
+    SetupRegMask(&lir->use_mask, rDX);
+  }
+}
+
+/* For dumping instructions */
+static const char* x86RegName[] = {
+  "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
+  "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
+};
+
+static const char* x86CondName[] = {
+  "O",
+  "NO",
+  "B/NAE/C",
+  "NB/AE/NC",
+  "Z/EQ",
+  "NZ/NE",
+  "BE/NA",
+  "NBE/A",
+  "S",
+  "NS",
+  "P/PE",
+  "NP/PO",
+  "L/NGE",
+  "NL/GE",
+  "LE/NG",
+  "NLE/G"
+};
+
+/*
+ * Interpret a format string and build a string no longer than size
+ * See format key in Assemble.cc.
+ */
+std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
+  std::string buf;
+  size_t i = 0;
+  size_t fmt_len = strlen(fmt);
+  while (i < fmt_len) {
+    if (fmt[i] != '!') {
+      buf += fmt[i];
+      i++;
+    } else {
+      i++;
+      DCHECK_LT(i, fmt_len);
+      char operand_number_ch = fmt[i];
+      i++;
+      if (operand_number_ch == '!') {
+        buf += "!";
+      } else {
+        int operand_number = operand_number_ch - '0';
+        DCHECK_LT(operand_number, 6);  // Expect upto 6 LIR operands.
+        DCHECK_LT(i, fmt_len);
+        int operand = lir->operands[operand_number];
+        switch (fmt[i]) {
+          case 'c':
+            DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName));
+            buf += x86CondName[operand];
+            break;
+          case 'd':
+            buf += StringPrintf("%d", operand);
+            break;
+          case 'p': {
+            SwitchTable *tab_rec = reinterpret_cast<SwitchTable*>(operand);
+            buf += StringPrintf("0x%08x", tab_rec->offset);
+            break;
+          }
+          case 'r':
+            if (X86_FPREG(operand) || X86_DOUBLEREG(operand)) {
+              int fp_reg = operand & X86_FP_REG_MASK;
+              buf += StringPrintf("xmm%d", fp_reg);
+            } else {
+              DCHECK_LT(static_cast<size_t>(operand), sizeof(x86RegName));
+              buf += x86RegName[operand];
+            }
+            break;
+          case 't':
+            buf += StringPrintf("0x%08x (L%p)",
+                                reinterpret_cast<uint32_t>(base_addr)
+                                + lir->offset + operand, lir->target);
+            break;
+          default:
+            buf += StringPrintf("DecodeError '%c'", fmt[i]);
+            break;
+        }
+        i++;
+      }
+    }
+  }
+  return buf;
+}
+
+void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, uint64_t mask, const char *prefix)
+{
+  char buf[256];
+  buf[0] = 0;
+
+  if (mask == ENCODE_ALL) {
+    strcpy(buf, "all");
+  } else {
+    char num[8];
+    int i;
+
+    for (i = 0; i < kX86RegEnd; i++) {
+      if (mask & (1ULL << i)) {
+        sprintf(num, "%d ", i);
+        strcat(buf, num);
+      }
+    }
+
+    if (mask & ENCODE_CCODE) {
+      strcat(buf, "cc ");
+    }
+    /* Memory bits */
+    if (x86LIR && (mask & ENCODE_DALVIK_REG)) {
+      sprintf(buf + strlen(buf), "dr%d%s", x86LIR->alias_info & 0xffff,
+              (x86LIR->alias_info & 0x80000000) ? "(+1)" : "");
+    }
+    if (mask & ENCODE_LITERAL) {
+      strcat(buf, "lit ");
+    }
+
+    if (mask & ENCODE_HEAP_REF) {
+      strcat(buf, "heap ");
+    }
+    if (mask & ENCODE_MUST_NOT_ALIAS) {
+      strcat(buf, "noalias ");
+    }
+  }
+  if (buf[0]) {
+    LOG(INFO) << prefix << ": " <<  buf;
+  }
+}
+
+void X86Mir2Lir::AdjustSpillMask() {
+  // Adjustment for LR spilling, x86 has no LR so nothing to do here
+  core_spill_mask_ |= (1 << rRET);
+  num_core_spills_++;
+}
+
+/*
+ * Mark a callee-save fp register as promoted.  Note that
+ * vpush/vpop uses contiguous register lists so we must
+ * include any holes in the mask.  Associate holes with
+ * Dalvik register INVALID_VREG (0xFFFFU).
+ */
+void X86Mir2Lir::MarkPreservedSingle(int v_reg, int reg)
+{
+  UNIMPLEMENTED(WARNING) << "MarkPreservedSingle";
+#if 0
+  LOG(FATAL) << "No support yet for promoted FP regs";
+#endif
+}
+
+void X86Mir2Lir::FlushRegWide(int reg1, int reg2)
+{
+  RegisterInfo* info1 = GetRegInfo(reg1);
+  RegisterInfo* info2 = GetRegInfo(reg2);
+  DCHECK(info1 && info2 && info1->pair && info2->pair &&
+         (info1->partner == info2->reg) &&
+         (info2->partner == info1->reg));
+  if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
+    if (!(info1->is_temp && info2->is_temp)) {
+      /* Should not happen.  If it does, there's a problem in eval_loc */
+      LOG(FATAL) << "Long half-temp, half-promoted";
+    }
+
+    info1->dirty = false;
+    info2->dirty = false;
+    if (mir_graph_->SRegToVReg(info2->s_reg) < mir_graph_->SRegToVReg(info1->s_reg))
+      info1 = info2;
+    int v_reg = mir_graph_->SRegToVReg(info1->s_reg);
+    StoreBaseDispWide(rX86_SP, VRegOffset(v_reg), info1->reg, info1->partner);
+  }
+}
+
+void X86Mir2Lir::FlushReg(int reg)
+{
+  RegisterInfo* info = GetRegInfo(reg);
+  if (info->live && info->dirty) {
+    info->dirty = false;
+    int v_reg = mir_graph_->SRegToVReg(info->s_reg);
+    StoreBaseDisp(rX86_SP, VRegOffset(v_reg), reg, kWord);
+  }
+}
+
+/* Give access to the target-dependent FP register encoding to common code */
+bool X86Mir2Lir::IsFpReg(int reg) {
+  return X86_FPREG(reg);
+}
+
+/* Clobber all regs that might be used by an external C call */
+void X86Mir2Lir::ClobberCalleeSave()
+{
+  Clobber(rAX);
+  Clobber(rCX);
+  Clobber(rDX);
+}
+
+RegLocation X86Mir2Lir::GetReturnWideAlt() {
+  RegLocation res = LocCReturnWide();
+  CHECK(res.low_reg == rAX);
+  CHECK(res.high_reg == rDX);
+  Clobber(rAX);
+  Clobber(rDX);
+  MarkInUse(rAX);
+  MarkInUse(rDX);
+  MarkPair(res.low_reg, res.high_reg);
+  return res;
+}
+
+RegLocation X86Mir2Lir::GetReturnAlt()
+{
+  RegLocation res = LocCReturn();
+  res.low_reg = rDX;
+  Clobber(rDX);
+  MarkInUse(rDX);
+  return res;
+}
+
+X86Mir2Lir::RegisterInfo* X86Mir2Lir::GetRegInfo(int reg)
+{
+  return X86_FPREG(reg) ? &reg_pool_->FPRegs[reg & X86_FP_REG_MASK]
+                    : &reg_pool_->core_regs[reg];
+}
+
+/* To be used when explicitly managing register use */
+void X86Mir2Lir::LockCallTemps()
+{
+  LockTemp(rX86_ARG0);
+  LockTemp(rX86_ARG1);
+  LockTemp(rX86_ARG2);
+  LockTemp(rX86_ARG3);
+}
+
+/* To be used when explicitly managing register use */
+void X86Mir2Lir::FreeCallTemps()
+{
+  FreeTemp(rX86_ARG0);
+  FreeTemp(rX86_ARG1);
+  FreeTemp(rX86_ARG2);
+  FreeTemp(rX86_ARG3);
+}
+
+void X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind)
+{
+#if ANDROID_SMP != 0
+  // TODO: optimize fences
+  NewLIR0(kX86Mfence);
+#endif
+}
+/*
+ * Alloc a pair of core registers, or a double.  Low reg in low byte,
+ * high reg in next byte.
+ */
+int X86Mir2Lir::AllocTypedTempPair(bool fp_hint,
+                          int reg_class)
+{
+  int high_reg;
+  int low_reg;
+  int res = 0;
+
+  if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
+    low_reg = AllocTempDouble();
+    high_reg = low_reg + 1;
+    res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
+    return res;
+  }
+
+  low_reg = AllocTemp();
+  high_reg = AllocTemp();
+  res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
+  return res;
+}
+
+int X86Mir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
+  if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
+    return AllocTempFloat();
+  }
+  return AllocTemp();
+}
+
+void X86Mir2Lir::CompilerInitializeRegAlloc() {
+  int num_regs = sizeof(core_regs)/sizeof(*core_regs);
+  int num_reserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs);
+  int num_temps = sizeof(core_temps)/sizeof(*core_temps);
+  int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
+  int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
+  reg_pool_ = static_cast<RegisterPool*>(arena_->NewMem(sizeof(*reg_pool_), true,
+                                                        ArenaAllocator::kAllocRegAlloc));
+  reg_pool_->num_core_regs = num_regs;
+  reg_pool_->core_regs =
+      static_cast<RegisterInfo*>(arena_->NewMem(num_regs * sizeof(*reg_pool_->core_regs), true,
+                                                ArenaAllocator::kAllocRegAlloc));
+  reg_pool_->num_fp_regs = num_fp_regs;
+  reg_pool_->FPRegs =
+      static_cast<RegisterInfo *>(arena_->NewMem(num_fp_regs * sizeof(*reg_pool_->FPRegs), true,
+                                                 ArenaAllocator::kAllocRegAlloc));
+  CompilerInitPool(reg_pool_->core_regs, core_regs, reg_pool_->num_core_regs);
+  CompilerInitPool(reg_pool_->FPRegs, FpRegs, reg_pool_->num_fp_regs);
+  // Keep special registers from being allocated
+  for (int i = 0; i < num_reserved; i++) {
+    MarkInUse(ReservedRegs[i]);
+  }
+  // Mark temp regs - all others not in use can be used for promotion
+  for (int i = 0; i < num_temps; i++) {
+    MarkTemp(core_temps[i]);
+  }
+  for (int i = 0; i < num_fp_temps; i++) {
+    MarkTemp(fp_temps[i]);
+  }
+}
+
+void X86Mir2Lir::FreeRegLocTemps(RegLocation rl_keep,
+                     RegLocation rl_free)
+{
+  if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
+      (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) {
+    // No overlap, free both
+    FreeTemp(rl_free.low_reg);
+    FreeTemp(rl_free.high_reg);
+  }
+}
+
+void X86Mir2Lir::SpillCoreRegs() {
+  if (num_core_spills_ == 0) {
+    return;
+  }
+  // Spill mask not including fake return address register
+  uint32_t mask = core_spill_mask_ & ~(1 << rRET);
+  int offset = frame_size_ - (4 * num_core_spills_);
+  for (int reg = 0; mask; mask >>= 1, reg++) {
+    if (mask & 0x1) {
+      StoreWordDisp(rX86_SP, offset, reg);
+      offset += 4;
+    }
+  }
+}
+
+void X86Mir2Lir::UnSpillCoreRegs() {
+  if (num_core_spills_ == 0) {
+    return;
+  }
+  // Spill mask not including fake return address register
+  uint32_t mask = core_spill_mask_ & ~(1 << rRET);
+  int offset = frame_size_ - (4 * num_core_spills_);
+  for (int reg = 0; mask; mask >>= 1, reg++) {
+    if (mask & 0x1) {
+      LoadWordDisp(rX86_SP, offset, reg);
+      offset += 4;
+    }
+  }
+}
+
+bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir)
+{
+  return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32);
+}
+
+X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
+    : Mir2Lir(cu, mir_graph, arena) {
+  for (int i = 0; i < kX86Last; i++) {
+    if (X86Mir2Lir::EncodingMap[i].opcode != i) {
+      LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
+                 << " is wrong: expecting " << i << ", seeing "
+                 << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
+    }
+  }
+}
+
+Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+                          ArenaAllocator* const arena) {
+  return new X86Mir2Lir(cu, mir_graph, arena);
+}
+
+// Not used in x86
+int X86Mir2Lir::LoadHelper(int offset)
+{
+  LOG(FATAL) << "Unexpected use of LoadHelper in x86";
+  return INVALID_REG;
+}
+
+uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode)
+{
+  return X86Mir2Lir::EncodingMap[opcode].flags;
+}
+
+const char* X86Mir2Lir::GetTargetInstName(int opcode)
+{
+  return X86Mir2Lir::EncodingMap[opcode].name;
+}
+
+const char* X86Mir2Lir::GetTargetInstFmt(int opcode)
+{
+  return X86Mir2Lir::EncodingMap[opcode].fmt;
+}
+
+} // namespace art
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
new file mode 100644
index 0000000..fb07ff1
--- /dev/null
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -0,0 +1,582 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "codegen_x86.h"
+#include "dex/quick/mir_to_lir-inl.h"
+#include "x86_lir.h"
+
+namespace art {
+
+/* This file contains codegen for the X86 ISA */
+
+LIR* X86Mir2Lir::OpFpRegCopy(int r_dest, int r_src)
+{
+  int opcode;
+  /* must be both DOUBLE or both not DOUBLE */
+  DCHECK_EQ(X86_DOUBLEREG(r_dest), X86_DOUBLEREG(r_src));
+  if (X86_DOUBLEREG(r_dest)) {
+    opcode = kX86MovsdRR;
+  } else {
+    if (X86_SINGLEREG(r_dest)) {
+      if (X86_SINGLEREG(r_src)) {
+        opcode = kX86MovssRR;
+      } else {  // Fpr <- Gpr
+        opcode = kX86MovdxrRR;
+      }
+    } else {  // Gpr <- Fpr
+      DCHECK(X86_SINGLEREG(r_src));
+      opcode = kX86MovdrxRR;
+    }
+  }
+  DCHECK_NE((EncodingMap[opcode].flags & IS_BINARY_OP), 0ULL);
+  LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest, r_src);
+  if (r_dest == r_src) {
+    res->flags.is_nop = true;
+  }
+  return res;
+}
+
+bool X86Mir2Lir::InexpensiveConstantInt(int32_t value)
+{
+  return true;
+}
+
+bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value)
+{
+  return false;
+}
+
+bool X86Mir2Lir::InexpensiveConstantLong(int64_t value)
+{
+  return true;
+}
+
+bool X86Mir2Lir::InexpensiveConstantDouble(int64_t value)
+{
+  return false; // TUNING
+}
+
+/*
+ * Load a immediate using a shortcut if possible; otherwise
+ * grab from the per-translation literal pool.  If target is
+ * a high register, build constant into a low register and copy.
+ *
+ * No additional register clobbering operation performed. Use this version when
+ * 1) r_dest is freshly returned from AllocTemp or
+ * 2) The codegen is under fixed register usage
+ */
+LIR* X86Mir2Lir::LoadConstantNoClobber(int r_dest, int value)
+{
+  int r_dest_save = r_dest;
+  if (X86_FPREG(r_dest)) {
+    if (value == 0) {
+      return NewLIR2(kX86XorpsRR, r_dest, r_dest);
+    }
+    DCHECK(X86_SINGLEREG(r_dest));
+    r_dest = AllocTemp();
+  }
+
+  LIR *res;
+  if (value == 0) {
+    res = NewLIR2(kX86Xor32RR, r_dest, r_dest);
+  } else {
+    // Note, there is no byte immediate form of a 32 bit immediate move.
+    res = NewLIR2(kX86Mov32RI, r_dest, value);
+  }
+
+  if (X86_FPREG(r_dest_save)) {
+    NewLIR2(kX86MovdxrRR, r_dest_save, r_dest);
+    FreeTemp(r_dest);
+  }
+
+  return res;
+}
+
+LIR* X86Mir2Lir::OpUnconditionalBranch(LIR* target)
+{
+  LIR* res = NewLIR1(kX86Jmp8, 0 /* offset to be patched during assembly*/ );
+  res->target = target;
+  return res;
+}
+
+LIR* X86Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target)
+{
+  LIR* branch = NewLIR2(kX86Jcc8, 0 /* offset to be patched */,
+                        X86ConditionEncoding(cc));
+  branch->target = target;
+  return branch;
+}
+
+LIR* X86Mir2Lir::OpReg(OpKind op, int r_dest_src)
+{
+  X86OpCode opcode = kX86Bkpt;
+  switch (op) {
+    case kOpNeg: opcode = kX86Neg32R; break;
+    case kOpNot: opcode = kX86Not32R; break;
+    case kOpBlx: opcode = kX86CallR; break;
+    default:
+      LOG(FATAL) << "Bad case in OpReg " << op;
+  }
+  return NewLIR1(opcode, r_dest_src);
+}
+
+LIR* X86Mir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value)
+{
+  X86OpCode opcode = kX86Bkpt;
+  bool byte_imm = IS_SIMM8(value);
+  DCHECK(!X86_FPREG(r_dest_src1));
+  switch (op) {
+    case kOpLsl: opcode = kX86Sal32RI; break;
+    case kOpLsr: opcode = kX86Shr32RI; break;
+    case kOpAsr: opcode = kX86Sar32RI; break;
+    case kOpAdd: opcode = byte_imm ? kX86Add32RI8 : kX86Add32RI; break;
+    case kOpOr:  opcode = byte_imm ? kX86Or32RI8  : kX86Or32RI;  break;
+    case kOpAdc: opcode = byte_imm ? kX86Adc32RI8 : kX86Adc32RI; break;
+    //case kOpSbb: opcode = kX86Sbb32RI; break;
+    case kOpAnd: opcode = byte_imm ? kX86And32RI8 : kX86And32RI; break;
+    case kOpSub: opcode = byte_imm ? kX86Sub32RI8 : kX86Sub32RI; break;
+    case kOpXor: opcode = byte_imm ? kX86Xor32RI8 : kX86Xor32RI; break;
+    case kOpCmp: opcode = byte_imm ? kX86Cmp32RI8 : kX86Cmp32RI; break;
+    case kOpMov: return LoadConstantNoClobber(r_dest_src1, value);
+    case kOpMul:
+      opcode = byte_imm ? kX86Imul32RRI8 : kX86Imul32RRI;
+      return NewLIR3(opcode, r_dest_src1, r_dest_src1, value);
+    default:
+      LOG(FATAL) << "Bad case in OpRegImm " << op;
+  }
+  return NewLIR2(opcode, r_dest_src1, value);
+}
+
+LIR* X86Mir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2)
+{
+    X86OpCode opcode = kX86Nop;
+    bool src2_must_be_cx = false;
+    switch (op) {
+        // X86 unary opcodes
+      case kOpMvn:
+        OpRegCopy(r_dest_src1, r_src2);
+        return OpReg(kOpNot, r_dest_src1);
+      case kOpNeg:
+        OpRegCopy(r_dest_src1, r_src2);
+        return OpReg(kOpNeg, r_dest_src1);
+        // X86 binary opcodes
+      case kOpSub: opcode = kX86Sub32RR; break;
+      case kOpSbc: opcode = kX86Sbb32RR; break;
+      case kOpLsl: opcode = kX86Sal32RC; src2_must_be_cx = true; break;
+      case kOpLsr: opcode = kX86Shr32RC; src2_must_be_cx = true; break;
+      case kOpAsr: opcode = kX86Sar32RC; src2_must_be_cx = true; break;
+      case kOpMov: opcode = kX86Mov32RR; break;
+      case kOpCmp: opcode = kX86Cmp32RR; break;
+      case kOpAdd: opcode = kX86Add32RR; break;
+      case kOpAdc: opcode = kX86Adc32RR; break;
+      case kOpAnd: opcode = kX86And32RR; break;
+      case kOpOr:  opcode = kX86Or32RR; break;
+      case kOpXor: opcode = kX86Xor32RR; break;
+      case kOp2Byte:
+        // Use shifts instead of a byte operand if the source can't be byte accessed.
+        if (r_src2 >= 4) {
+          NewLIR2(kX86Mov32RR, r_dest_src1, r_src2);
+          NewLIR2(kX86Sal32RI, r_dest_src1, 24);
+          return NewLIR2(kX86Sar32RI, r_dest_src1, 24);
+        } else {
+          opcode = kX86Movsx8RR;
+        }
+        break;
+      case kOp2Short: opcode = kX86Movsx16RR; break;
+      case kOp2Char: opcode = kX86Movzx16RR; break;
+      case kOpMul: opcode = kX86Imul32RR; break;
+      default:
+        LOG(FATAL) << "Bad case in OpRegReg " << op;
+        break;
+    }
+    CHECK(!src2_must_be_cx || r_src2 == rCX);
+    return NewLIR2(opcode, r_dest_src1, r_src2);
+}
+
+LIR* X86Mir2Lir::OpRegMem(OpKind op, int r_dest, int rBase,
+              int offset)
+{
+  X86OpCode opcode = kX86Nop;
+  switch (op) {
+      // X86 binary opcodes
+    case kOpSub: opcode = kX86Sub32RM; break;
+    case kOpMov: opcode = kX86Mov32RM; break;
+    case kOpCmp: opcode = kX86Cmp32RM; break;
+    case kOpAdd: opcode = kX86Add32RM; break;
+    case kOpAnd: opcode = kX86And32RM; break;
+    case kOpOr:  opcode = kX86Or32RM; break;
+    case kOpXor: opcode = kX86Xor32RM; break;
+    case kOp2Byte: opcode = kX86Movsx8RM; break;
+    case kOp2Short: opcode = kX86Movsx16RM; break;
+    case kOp2Char: opcode = kX86Movzx16RM; break;
+    case kOpMul:
+    default:
+      LOG(FATAL) << "Bad case in OpRegMem " << op;
+      break;
+  }
+  return NewLIR3(opcode, r_dest, rBase, offset);
+}
+
+LIR* X86Mir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1,
+                 int r_src2)
+{
+  if (r_dest != r_src1 && r_dest != r_src2) {
+    if (op == kOpAdd) { // lea special case, except can't encode rbp as base
+      if (r_src1 == r_src2) {
+        OpRegCopy(r_dest, r_src1);
+        return OpRegImm(kOpLsl, r_dest, 1);
+      } else if (r_src1 != rBP) {
+        return NewLIR5(kX86Lea32RA, r_dest, r_src1 /* base */,
+                       r_src2 /* index */, 0 /* scale */, 0 /* disp */);
+      } else {
+        return NewLIR5(kX86Lea32RA, r_dest, r_src2 /* base */,
+                       r_src1 /* index */, 0 /* scale */, 0 /* disp */);
+      }
+    } else {
+      OpRegCopy(r_dest, r_src1);
+      return OpRegReg(op, r_dest, r_src2);
+    }
+  } else if (r_dest == r_src1) {
+    return OpRegReg(op, r_dest, r_src2);
+  } else {  // r_dest == r_src2
+    switch (op) {
+      case kOpSub:  // non-commutative
+        OpReg(kOpNeg, r_dest);
+        op = kOpAdd;
+        break;
+      case kOpSbc:
+      case kOpLsl: case kOpLsr: case kOpAsr: case kOpRor: {
+        int t_reg = AllocTemp();
+        OpRegCopy(t_reg, r_src1);
+        OpRegReg(op, t_reg, r_src2);
+        LIR* res = OpRegCopy(r_dest, t_reg);
+        FreeTemp(t_reg);
+        return res;
+      }
+      case kOpAdd:  // commutative
+      case kOpOr:
+      case kOpAdc:
+      case kOpAnd:
+      case kOpXor:
+        break;
+      default:
+        LOG(FATAL) << "Bad case in OpRegRegReg " << op;
+    }
+    return OpRegReg(op, r_dest, r_src1);
+  }
+}
+
+LIR* X86Mir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src,
+                 int value)
+{
+  if (op == kOpMul) {
+    X86OpCode opcode = IS_SIMM8(value) ? kX86Imul32RRI8 : kX86Imul32RRI;
+    return NewLIR3(opcode, r_dest, r_src, value);
+  } else if (op == kOpAnd) {
+    if (value == 0xFF && r_src < 4) {
+      return NewLIR2(kX86Movzx8RR, r_dest, r_src);
+    } else if (value == 0xFFFF) {
+      return NewLIR2(kX86Movzx16RR, r_dest, r_src);
+    }
+  }
+  if (r_dest != r_src) {
+    if (false && op == kOpLsl && value >= 0 && value <= 3) { // lea shift special case
+      // TODO: fix bug in LEA encoding when disp == 0
+      return NewLIR5(kX86Lea32RA, r_dest,  r5sib_no_base /* base */,
+                     r_src /* index */, value /* scale */, 0 /* disp */);
+    } else if (op == kOpAdd) { // lea add special case
+      return NewLIR5(kX86Lea32RA, r_dest, r_src /* base */,
+                     r4sib_no_index /* index */, 0 /* scale */, value /* disp */);
+    }
+    OpRegCopy(r_dest, r_src);
+  }
+  return OpRegImm(op, r_dest, value);
+}
+
+LIR* X86Mir2Lir::OpThreadMem(OpKind op, int thread_offset)
+{
+  X86OpCode opcode = kX86Bkpt;
+  switch (op) {
+    case kOpBlx: opcode = kX86CallT;  break;
+    default:
+      LOG(FATAL) << "Bad opcode: " << op;
+      break;
+  }
+  return NewLIR1(opcode, thread_offset);
+}
+
+LIR* X86Mir2Lir::OpMem(OpKind op, int rBase, int disp)
+{
+  X86OpCode opcode = kX86Bkpt;
+  switch (op) {
+    case kOpBlx: opcode = kX86CallM;  break;
+    default:
+      LOG(FATAL) << "Bad opcode: " << op;
+      break;
+  }
+  return NewLIR2(opcode, rBase, disp);
+}
+
+LIR* X86Mir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value)
+{
+    int32_t val_lo = Low32Bits(value);
+    int32_t val_hi = High32Bits(value);
+    LIR *res;
+    if (X86_FPREG(r_dest_lo)) {
+      DCHECK(X86_FPREG(r_dest_hi));  // ignore r_dest_hi
+      if (value == 0) {
+        return NewLIR2(kX86XorpsRR, r_dest_lo, r_dest_lo);
+      } else {
+        if (val_lo == 0) {
+          res = NewLIR2(kX86XorpsRR, r_dest_lo, r_dest_lo);
+        } else {
+          res = LoadConstantNoClobber(r_dest_lo, val_lo);
+        }
+        if (val_hi != 0) {
+          LoadConstantNoClobber(r_dest_hi, val_hi);
+          NewLIR2(kX86PsllqRI, r_dest_hi, 32);
+          NewLIR2(kX86OrpsRR, r_dest_lo, r_dest_hi);
+        }
+      }
+    } else {
+      res = LoadConstantNoClobber(r_dest_lo, val_lo);
+      LoadConstantNoClobber(r_dest_hi, val_hi);
+    }
+    return res;
+}
+
+LIR* X86Mir2Lir::LoadBaseIndexedDisp(int rBase, int r_index, int scale,
+                                     int displacement, int r_dest, int r_dest_hi, OpSize size,
+                                     int s_reg) {
+  LIR *load = NULL;
+  LIR *load2 = NULL;
+  bool is_array = r_index != INVALID_REG;
+  bool pair = false;
+  bool is64bit = false;
+  X86OpCode opcode = kX86Nop;
+  switch (size) {
+    case kLong:
+    case kDouble:
+      is64bit = true;
+      if (X86_FPREG(r_dest)) {
+        opcode = is_array ? kX86MovsdRA : kX86MovsdRM;
+        if (X86_SINGLEREG(r_dest)) {
+          DCHECK(X86_FPREG(r_dest_hi));
+          DCHECK_EQ(r_dest, (r_dest_hi - 1));
+          r_dest = S2d(r_dest, r_dest_hi);
+        }
+        r_dest_hi = r_dest + 1;
+      } else {
+        pair = true;
+        opcode = is_array ? kX86Mov32RA  : kX86Mov32RM;
+      }
+      // TODO: double store is to unaligned address
+      DCHECK_EQ((displacement & 0x3), 0);
+      break;
+    case kWord:
+    case kSingle:
+      opcode = is_array ? kX86Mov32RA : kX86Mov32RM;
+      if (X86_FPREG(r_dest)) {
+        opcode = is_array ? kX86MovssRA : kX86MovssRM;
+        DCHECK(X86_SINGLEREG(r_dest));
+      }
+      DCHECK_EQ((displacement & 0x3), 0);
+      break;
+    case kUnsignedHalf:
+      opcode = is_array ? kX86Movzx16RA : kX86Movzx16RM;
+      DCHECK_EQ((displacement & 0x1), 0);
+      break;
+    case kSignedHalf:
+      opcode = is_array ? kX86Movsx16RA : kX86Movsx16RM;
+      DCHECK_EQ((displacement & 0x1), 0);
+      break;
+    case kUnsignedByte:
+      opcode = is_array ? kX86Movzx8RA : kX86Movzx8RM;
+      break;
+    case kSignedByte:
+      opcode = is_array ? kX86Movsx8RA : kX86Movsx8RM;
+      break;
+    default:
+      LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody";
+  }
+
+  if (!is_array) {
+    if (!pair) {
+      load = NewLIR3(opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
+    } else {
+      if (rBase == r_dest) {
+        load2 = NewLIR3(opcode, r_dest_hi, rBase,
+                        displacement + HIWORD_OFFSET);
+        load = NewLIR3(opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
+      } else {
+        load = NewLIR3(opcode, r_dest, rBase, displacement + LOWORD_OFFSET);
+        load2 = NewLIR3(opcode, r_dest_hi, rBase,
+                        displacement + HIWORD_OFFSET);
+      }
+    }
+    if (rBase == rX86_SP) {
+      AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
+                              true /* is_load */, is64bit);
+      if (pair) {
+        AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
+                                true /* is_load */, is64bit);
+      }
+    }
+  } else {
+    if (!pair) {
+      load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
+                     displacement + LOWORD_OFFSET);
+    } else {
+      if (rBase == r_dest) {
+        load2 = NewLIR5(opcode, r_dest_hi, rBase, r_index, scale,
+                        displacement + HIWORD_OFFSET);
+        load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
+                       displacement + LOWORD_OFFSET);
+      } else {
+        load = NewLIR5(opcode, r_dest, rBase, r_index, scale,
+                       displacement + LOWORD_OFFSET);
+        load2 = NewLIR5(opcode, r_dest_hi, rBase, r_index, scale,
+                        displacement + HIWORD_OFFSET);
+      }
+    }
+  }
+
+  return load;
+}
+
+/* Load value from base + scaled index. */
+LIR* X86Mir2Lir::LoadBaseIndexed(int rBase,
+                     int r_index, int r_dest, int scale, OpSize size) {
+  return LoadBaseIndexedDisp(rBase, r_index, scale, 0,
+                             r_dest, INVALID_REG, size, INVALID_SREG);
+}
+
+LIR* X86Mir2Lir::LoadBaseDisp(int rBase, int displacement,
+                  int r_dest, OpSize size, int s_reg) {
+  return LoadBaseIndexedDisp(rBase, INVALID_REG, 0, displacement,
+                             r_dest, INVALID_REG, size, s_reg);
+}
+
+LIR* X86Mir2Lir::LoadBaseDispWide(int rBase, int displacement,
+                      int r_dest_lo, int r_dest_hi, int s_reg) {
+  return LoadBaseIndexedDisp(rBase, INVALID_REG, 0, displacement,
+                             r_dest_lo, r_dest_hi, kLong, s_reg);
+}
+
+LIR* X86Mir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale,
+                                      int displacement, int r_src, int r_src_hi, OpSize size,
+                                      int s_reg) {
+  LIR *store = NULL;
+  LIR *store2 = NULL;
+  bool is_array = r_index != INVALID_REG;
+  bool pair = false;
+  bool is64bit = false;
+  X86OpCode opcode = kX86Nop;
+  switch (size) {
+    case kLong:
+    case kDouble:
+      is64bit = true;
+      if (X86_FPREG(r_src)) {
+        opcode = is_array ? kX86MovsdAR : kX86MovsdMR;
+        if (X86_SINGLEREG(r_src)) {
+          DCHECK(X86_FPREG(r_src_hi));
+          DCHECK_EQ(r_src, (r_src_hi - 1));
+          r_src = S2d(r_src, r_src_hi);
+        }
+        r_src_hi = r_src + 1;
+      } else {
+        pair = true;
+        opcode = is_array ? kX86Mov32AR  : kX86Mov32MR;
+      }
+      // TODO: double store is to unaligned address
+      DCHECK_EQ((displacement & 0x3), 0);
+      break;
+    case kWord:
+    case kSingle:
+      opcode = is_array ? kX86Mov32AR : kX86Mov32MR;
+      if (X86_FPREG(r_src)) {
+        opcode = is_array ? kX86MovssAR : kX86MovssMR;
+        DCHECK(X86_SINGLEREG(r_src));
+      }
+      DCHECK_EQ((displacement & 0x3), 0);
+      break;
+    case kUnsignedHalf:
+    case kSignedHalf:
+      opcode = is_array ? kX86Mov16AR : kX86Mov16MR;
+      DCHECK_EQ((displacement & 0x1), 0);
+      break;
+    case kUnsignedByte:
+    case kSignedByte:
+      opcode = is_array ? kX86Mov8AR : kX86Mov8MR;
+      break;
+    default:
+      LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody";
+  }
+
+  if (!is_array) {
+    if (!pair) {
+      store = NewLIR3(opcode, rBase, displacement + LOWORD_OFFSET, r_src);
+    } else {
+      store = NewLIR3(opcode, rBase, displacement + LOWORD_OFFSET, r_src);
+      store2 = NewLIR3(opcode, rBase, displacement + HIWORD_OFFSET, r_src_hi);
+    }
+    if (rBase == rX86_SP) {
+      AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
+                              false /* is_load */, is64bit);
+      if (pair) {
+        AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2,
+                                false /* is_load */, is64bit);
+      }
+    }
+  } else {
+    if (!pair) {
+      store = NewLIR5(opcode, rBase, r_index, scale,
+                      displacement + LOWORD_OFFSET, r_src);
+    } else {
+      store = NewLIR5(opcode, rBase, r_index, scale,
+                      displacement + LOWORD_OFFSET, r_src);
+      store2 = NewLIR5(opcode, rBase, r_index, scale,
+                       displacement + HIWORD_OFFSET, r_src_hi);
+    }
+  }
+
+  return store;
+}
+
+/* store value base base + scaled index. */
+LIR* X86Mir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src,
+                      int scale, OpSize size)
+{
+  return StoreBaseIndexedDisp(rBase, r_index, scale, 0,
+                              r_src, INVALID_REG, size, INVALID_SREG);
+}
+
+LIR* X86Mir2Lir::StoreBaseDisp(int rBase, int displacement,
+                               int r_src, OpSize size)
+{
+    return StoreBaseIndexedDisp(rBase, INVALID_REG, 0,
+                                displacement, r_src, INVALID_REG, size,
+                                INVALID_SREG);
+}
+
+LIR* X86Mir2Lir::StoreBaseDispWide(int rBase, int displacement,
+                                   int r_src_lo, int r_src_hi)
+{
+  return StoreBaseIndexedDisp(rBase, INVALID_REG, 0, displacement,
+                              r_src_lo, r_src_hi, kLong, INVALID_SREG);
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
new file mode 100644
index 0000000..600bd03
--- /dev/null
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -0,0 +1,442 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_DEX_QUICK_X86_X86LIR_H_
+#define ART_SRC_COMPILER_DEX_QUICK_X86_X86LIR_H_
+
+#include "dex/compiler_internals.h"
+
+namespace art {
+
+/*
+ * Runtime register conventions. We consider both x86, x86-64 and x32 (32bit mode x86-64), although
+ * we currently only target x86. The ABI has different conventions and we hope to have a single
+ * convention to simplify code generation. Changing something that is callee save and making it
+ * caller save places a burden on up-calls to save/restore the callee save register, however, there
+ * are few registers that are callee save in the ABI. Changing something that is caller save and
+ * making it callee save places a burden on down-calls to save/restore the callee save register.
+ * For these reasons we aim to match native conventions for caller and callee save. The first 4
+ * registers can be used for byte operations, for this reason they are preferred for temporary
+ * scratch registers.
+ *
+ * General Purpose Register:
+ *  Native: x86         | x86-64 / x32      | ART
+ *  r0/eax: caller save | caller save       | caller, Method*, scratch, return value
+ *  r1/ecx: caller save | caller save, arg4 | caller, arg1, scratch
+ *  r2/edx: caller save | caller save, arg3 | caller, arg2, scratch, high half of long return
+ *  r3/ebx: callEE save | callEE save       | callER, arg3, scratch
+ *  r4/esp: stack pointer
+ *  r5/ebp: callee save | callee save       | callee, available for dalvik register promotion
+ *  r6/esi: callEE save | callER save, arg2 | callee, available for dalvik register promotion
+ *  r7/edi: callEE save | callER save, arg1 | callee, available for dalvik register promotion
+ *  ---  x86-64/x32 registers
+ *  Native: x86-64 / x32      | ART
+ *  r8:     caller save, arg5 | caller, scratch
+ *  r9:     caller save, arg6 | caller, scratch
+ *  r10:    caller save       | caller, scratch
+ *  r11:    caller save       | caller, scratch
+ *  r12:    callee save       | callee, available for dalvik register promotion
+ *  r13:    callee save       | callee, available for dalvik register promotion
+ *  r14:    callee save       | callee, available for dalvik register promotion
+ *  r15:    callee save       | callee, available for dalvik register promotion
+ *
+ * There is no rSELF, instead on x86 fs: has a base address of Thread::Current, whereas on
+ * x86-64/x32 gs: holds it.
+ *
+ * For floating point we don't support CPUs without SSE2 support (ie newer than PIII):
+ *  Native: x86       | x86-64 / x32     | ART
+ *  XMM0: caller save |caller save, arg1 | caller, float/double return value (except for native x86 code)
+ *  XMM1: caller save |caller save, arg2 | caller, scratch
+ *  XMM2: caller save |caller save, arg3 | caller, scratch
+ *  XMM3: caller save |caller save, arg4 | caller, scratch
+ *  XMM4: caller save |caller save, arg5 | caller, scratch
+ *  XMM5: caller save |caller save, arg6 | caller, scratch
+ *  XMM6: caller save |caller save, arg7 | caller, scratch
+ *  XMM7: caller save |caller save, arg8 | caller, scratch
+ *  ---  x86-64/x32 registers
+ *  XMM8 .. 15: caller save
+ *
+ * X87 is a necessary evil outside of ART code:
+ *  ST0:  x86 float/double native return value, caller save
+ *  ST1 .. ST7: caller save
+ *
+ *  Stack frame diagram (stack grows down, higher addresses at top):
+ *
+ * +------------------------+
+ * | IN[ins-1]              |  {Note: resides in caller's frame}
+ * |       .                |
+ * | IN[0]                  |
+ * | caller's Method*       |
+ * +========================+  {Note: start of callee's frame}
+ * | return address         |  {pushed by call}
+ * | spill region           |  {variable sized}
+ * +------------------------+
+ * | ...filler word...      |  {Note: used as 2nd word of V[locals-1] if long]
+ * +------------------------+
+ * | V[locals-1]            |
+ * | V[locals-2]            |
+ * |      .                 |
+ * |      .                 |
+ * | V[1]                   |
+ * | V[0]                   |
+ * +------------------------+
+ * |  0 to 3 words padding  |
+ * +------------------------+
+ * | OUT[outs-1]            |
+ * | OUT[outs-2]            |
+ * |       .                |
+ * | OUT[0]                 |
+ * | cur_method*            | <<== sp w/ 16-byte alignment
+ * +========================+
+ */
+
+// Offset to distingish FP regs.
+#define X86_FP_REG_OFFSET 32
+// Offset to distinguish DP FP regs.
+#define X86_FP_DOUBLE (X86_FP_REG_OFFSET + 16)
+// Offset to distingish the extra regs.
+#define X86_EXTRA_REG_OFFSET (X86_FP_DOUBLE + 16)
+// Reg types.
+#define X86_REGTYPE(x) (x & (X86_FP_REG_OFFSET | X86_FP_DOUBLE))
+#define X86_FPREG(x) ((x & X86_FP_REG_OFFSET) == X86_FP_REG_OFFSET)
+#define X86_EXTRAREG(x) ((x & X86_EXTRA_REG_OFFSET) == X86_EXTRA_REG_OFFSET)
+#define X86_DOUBLEREG(x) ((x & X86_FP_DOUBLE) == X86_FP_DOUBLE)
+#define X86_SINGLEREG(x) (X86_FPREG(x) && !X86_DOUBLEREG(x))
+
+/*
+ * Note: the low register of a floating point pair is sufficient to
+ * create the name of a double, but require both names to be passed to
+ * allow for asserts to verify that the pair is consecutive if significant
+ * rework is done in this area.  Also, it is a good reminder in the calling
+ * code that reg locations always describe doubles as a pair of singles.
+ */
+#define X86_S2D(x,y) ((x) | X86_FP_DOUBLE)
+/* Mask to strip off fp flags */
+#define X86_FP_REG_MASK 0xF
+
+// RegisterLocation templates return values (rAX, rAX/rDX or XMM0).
+//                               location,     wide, defined, const, fp, core, ref, high_word, home, low_reg, high_reg,     s_reg_low
+#define X86_LOC_C_RETURN             {kLocPhysReg, 0,    0,       0,     0,  0,    0,   0,        1,    rAX,    INVALID_REG, INVALID_SREG, INVALID_SREG}
+#define X86_LOC_C_RETURN_WIDE        {kLocPhysReg, 1,    0,       0,     0,  0,    0,   0,        1,    rAX,    rDX,         INVALID_SREG, INVALID_SREG}
+#define X86_LOC_C_RETURN_FLOAT       {kLocPhysReg, 0,    0,       0,     1,  0,    0,   0,        1,    fr0,    INVALID_REG, INVALID_SREG, INVALID_SREG}
+#define X86_LOC_C_RETURN_DOUBLE      {kLocPhysReg, 1,    0,       0,     1,  0,    0,   0,        1,    fr0,    fr1,         INVALID_SREG, INVALID_SREG}
+
+enum X86ResourceEncodingPos {
+  kX86GPReg0   = 0,
+  kX86RegSP    = 4,
+  kX86FPReg0   = 16,  // xmm0 .. xmm7/xmm15.
+  kX86FPRegEnd   = 32,
+  kX86RegEnd   = kX86FPRegEnd,
+};
+
+#define ENCODE_X86_REG_LIST(N)      (static_cast<uint64_t>(N))
+#define ENCODE_X86_REG_SP           (1ULL << kX86RegSP)
+
+enum X86NativeRegisterPool {
+  r0     = 0,
+  rAX    = r0,
+  r1     = 1,
+  rCX    = r1,
+  r2     = 2,
+  rDX    = r2,
+  r3     = 3,
+  rBX    = r3,
+  r4sp   = 4,
+  rX86_SP    = r4sp,
+  r4sib_no_index = r4sp,
+  r5     = 5,
+  rBP    = r5,
+  r5sib_no_base = r5,
+  r6     = 6,
+  rSI    = r6,
+  r7     = 7,
+  rDI    = r7,
+#ifndef TARGET_REX_SUPPORT
+  rRET   = 8,  // fake return address register for core spill mask.
+#else
+  r8     = 8,
+  r9     = 9,
+  r10    = 10,
+  r11    = 11,
+  r12    = 12,
+  r13    = 13,
+  r14    = 14,
+  r15    = 15,
+  rRET   = 16,  // fake return address register for core spill mask.
+#endif
+  fr0  =  0 + X86_FP_REG_OFFSET,
+  fr1  =  1 + X86_FP_REG_OFFSET,
+  fr2  =  2 + X86_FP_REG_OFFSET,
+  fr3  =  3 + X86_FP_REG_OFFSET,
+  fr4  =  4 + X86_FP_REG_OFFSET,
+  fr5  =  5 + X86_FP_REG_OFFSET,
+  fr6  =  6 + X86_FP_REG_OFFSET,
+  fr7  =  7 + X86_FP_REG_OFFSET,
+  fr8  =  8 + X86_FP_REG_OFFSET,
+  fr9  =  9 + X86_FP_REG_OFFSET,
+  fr10 = 10 + X86_FP_REG_OFFSET,
+  fr11 = 11 + X86_FP_REG_OFFSET,
+  fr12 = 12 + X86_FP_REG_OFFSET,
+  fr13 = 13 + X86_FP_REG_OFFSET,
+  fr14 = 14 + X86_FP_REG_OFFSET,
+  fr15 = 15 + X86_FP_REG_OFFSET,
+};
+
+#define rX86_ARG0 rAX
+#define rX86_ARG1 rCX
+#define rX86_ARG2 rDX
+#define rX86_ARG3 rBX
+#define rX86_FARG0 rAX
+#define rX86_FARG1 rCX
+#define rX86_FARG2 rDX
+#define rX86_FARG3 rBX
+#define rX86_RET0 rAX
+#define rX86_RET1 rDX
+#define rX86_INVOKE_TGT rAX
+#define rX86_LR INVALID_REG
+#define rX86_SUSPEND INVALID_REG
+#define rX86_SELF INVALID_REG
+#define rX86_COUNT rCX
+#define rX86_PC INVALID_REG
+
+/*
+ * The following enum defines the list of supported X86 instructions by the
+ * assembler. Their corresponding EncodingMap positions will be defined in
+ * Assemble.cc.
+ */
+enum X86OpCode {
+  kX86First = 0,
+  kX8632BitData = kX86First, // data [31..0].
+  kX86Bkpt,
+  kX86Nop,
+  // Define groups of binary operations
+  // MR - Memory Register  - opcode [base + disp], reg
+  //             - lir operands - 0: base, 1: disp, 2: reg
+  // AR - Array Register   - opcode [base + index * scale + disp], reg
+  //             - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
+  // TR - Thread Register  - opcode fs:[disp], reg - where fs: is equal to Thread::Current()
+  //             - lir operands - 0: disp, 1: reg
+  // RR - Register Register  - opcode reg1, reg2
+  //             - lir operands - 0: reg1, 1: reg2
+  // RM - Register Memory  - opcode reg, [base + disp]
+  //             - lir operands - 0: reg, 1: base, 2: disp
+  // RA - Register Array   - opcode reg, [base + index * scale + disp]
+  //             - lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp
+  // RT - Register Thread  - opcode reg, fs:[disp] - where fs: is equal to Thread::Current()
+  //             - lir operands - 0: reg, 1: disp
+  // RI - Register Immediate - opcode reg, #immediate
+  //             - lir operands - 0: reg, 1: immediate
+  // MI - Memory Immediate   - opcode [base + disp], #immediate
+  //             - lir operands - 0: base, 1: disp, 2: immediate
+  // AI - Array Immediate  - opcode [base + index * scale + disp], #immediate
+  //             - lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate
+  // TI - Thread Register  - opcode fs:[disp], imm - where fs: is equal to Thread::Current()
+  //             - lir operands - 0: disp, 1: imm
+#define BinaryOpCode(opcode) \
+  opcode ## 8MR, opcode ## 8AR, opcode ## 8TR, \
+  opcode ## 8RR, opcode ## 8RM, opcode ## 8RA, opcode ## 8RT, \
+  opcode ## 8RI, opcode ## 8MI, opcode ## 8AI, opcode ## 8TI, \
+  opcode ## 16MR, opcode ## 16AR, opcode ## 16TR, \
+  opcode ## 16RR, opcode ## 16RM, opcode ## 16RA, opcode ## 16RT, \
+  opcode ## 16RI, opcode ## 16MI, opcode ## 16AI, opcode ## 16TI, \
+  opcode ## 16RI8, opcode ## 16MI8, opcode ## 16AI8, opcode ## 16TI8, \
+  opcode ## 32MR, opcode ## 32AR, opcode ## 32TR,  \
+  opcode ## 32RR, opcode ## 32RM, opcode ## 32RA, opcode ## 32RT, \
+  opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, opcode ## 32TI, \
+  opcode ## 32RI8, opcode ## 32MI8, opcode ## 32AI8, opcode ## 32TI8
+  BinaryOpCode(kX86Add),
+  BinaryOpCode(kX86Or),
+  BinaryOpCode(kX86Adc),
+  BinaryOpCode(kX86Sbb),
+  BinaryOpCode(kX86And),
+  BinaryOpCode(kX86Sub),
+  BinaryOpCode(kX86Xor),
+  BinaryOpCode(kX86Cmp),
+#undef BinaryOpCode
+  kX86Imul16RRI, kX86Imul16RMI, kX86Imul16RAI,
+  kX86Imul32RRI, kX86Imul32RMI, kX86Imul32RAI,
+  kX86Imul32RRI8, kX86Imul32RMI8, kX86Imul32RAI8,
+  kX86Mov8MR, kX86Mov8AR, kX86Mov8TR,
+  kX86Mov8RR, kX86Mov8RM, kX86Mov8RA, kX86Mov8RT,
+  kX86Mov8RI, kX86Mov8MI, kX86Mov8AI, kX86Mov8TI,
+  kX86Mov16MR, kX86Mov16AR, kX86Mov16TR,
+  kX86Mov16RR, kX86Mov16RM, kX86Mov16RA, kX86Mov16RT,
+  kX86Mov16RI, kX86Mov16MI, kX86Mov16AI, kX86Mov16TI,
+  kX86Mov32MR, kX86Mov32AR, kX86Mov32TR,
+  kX86Mov32RR, kX86Mov32RM, kX86Mov32RA, kX86Mov32RT,
+  kX86Mov32RI, kX86Mov32MI, kX86Mov32AI, kX86Mov32TI,
+  kX86Lea32RA,
+  // RC - Register CL - opcode reg, CL
+  //          - lir operands - 0: reg, 1: CL
+  // MC - Memory CL   - opcode [base + disp], CL
+  //          - lir operands - 0: base, 1: disp, 2: CL
+  // AC - Array CL  - opcode [base + index * scale + disp], CL
+  //          - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: CL
+#define BinaryShiftOpCode(opcode) \
+  opcode ## 8RI, opcode ## 8MI, opcode ## 8AI, \
+  opcode ## 8RC, opcode ## 8MC, opcode ## 8AC, \
+  opcode ## 16RI, opcode ## 16MI, opcode ## 16AI, \
+  opcode ## 16RC, opcode ## 16MC, opcode ## 16AC, \
+  opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, \
+  opcode ## 32RC, opcode ## 32MC, opcode ## 32AC
+  BinaryShiftOpCode(kX86Rol),
+  BinaryShiftOpCode(kX86Ror),
+  BinaryShiftOpCode(kX86Rcl),
+  BinaryShiftOpCode(kX86Rcr),
+  BinaryShiftOpCode(kX86Sal),
+  BinaryShiftOpCode(kX86Shr),
+  BinaryShiftOpCode(kX86Sar),
+#undef BinaryShiftOpcode
+  kX86Cmc,
+#define UnaryOpcode(opcode, reg, mem, array) \
+  opcode ## 8 ## reg, opcode ## 8 ## mem, opcode ## 8 ## array, \
+  opcode ## 16 ## reg, opcode ## 16 ## mem, opcode ## 16 ## array, \
+  opcode ## 32 ## reg, opcode ## 32 ## mem, opcode ## 32 ## array
+  UnaryOpcode(kX86Test, RI, MI, AI),
+  kX86Test32RR,
+  UnaryOpcode(kX86Not, R, M, A),
+  UnaryOpcode(kX86Neg, R, M, A),
+  UnaryOpcode(kX86Mul,  DaR, DaM, DaA),
+  UnaryOpcode(kX86Imul, DaR, DaM, DaA),
+  UnaryOpcode(kX86Divmod,  DaR, DaM, DaA),
+  UnaryOpcode(kX86Idivmod, DaR, DaM, DaA),
+#undef UnaryOpcode
+#define Binary0fOpCode(opcode) \
+  opcode ## RR, opcode ## RM, opcode ## RA
+  Binary0fOpCode(kX86Movsd),
+  kX86MovsdMR,
+  kX86MovsdAR,
+  Binary0fOpCode(kX86Movss),
+  kX86MovssMR,
+  kX86MovssAR,
+  Binary0fOpCode(kX86Cvtsi2sd), // int to double
+  Binary0fOpCode(kX86Cvtsi2ss), // int to float
+  Binary0fOpCode(kX86Cvttsd2si),// truncating double to int
+  Binary0fOpCode(kX86Cvttss2si),// truncating float to int
+  Binary0fOpCode(kX86Cvtsd2si), // rounding double to int
+  Binary0fOpCode(kX86Cvtss2si), // rounding float to int
+  Binary0fOpCode(kX86Ucomisd),  // unordered double compare
+  Binary0fOpCode(kX86Ucomiss),  // unordered float compare
+  Binary0fOpCode(kX86Comisd),   // double compare
+  Binary0fOpCode(kX86Comiss),   // float compare
+  Binary0fOpCode(kX86Orps),     // or of floating point registers
+  Binary0fOpCode(kX86Xorps),    // xor of floating point registers
+  Binary0fOpCode(kX86Addsd),    // double add
+  Binary0fOpCode(kX86Addss),    // float add
+  Binary0fOpCode(kX86Mulsd),    // double multiply
+  Binary0fOpCode(kX86Mulss),    // float multiply
+  Binary0fOpCode(kX86Cvtsd2ss), // double to float
+  Binary0fOpCode(kX86Cvtss2sd), // float to double
+  Binary0fOpCode(kX86Subsd),    // double subtract
+  Binary0fOpCode(kX86Subss),    // float subtract
+  Binary0fOpCode(kX86Divsd),    // double divide
+  Binary0fOpCode(kX86Divss),    // float divide
+  kX86PsrlqRI,                  // right shift of floating point registers
+  kX86PsllqRI,                  // left shift of floating point registers
+  Binary0fOpCode(kX86Movdxr),   // move into xmm from gpr
+  kX86MovdrxRR, kX86MovdrxMR, kX86MovdrxAR,// move into reg from xmm
+  kX86Set8R, kX86Set8M, kX86Set8A,// set byte depending on condition operand
+  kX86Mfence,                   // memory barrier
+  Binary0fOpCode(kX86Imul16),   // 16bit multiply
+  Binary0fOpCode(kX86Imul32),   // 32bit multiply
+  kX86CmpxchgRR, kX86CmpxchgMR, kX86CmpxchgAR,// compare and exchange
+  kX86LockCmpxchgRR, kX86LockCmpxchgMR, kX86LockCmpxchgAR,// locked compare and exchange
+  Binary0fOpCode(kX86Movzx8),   // zero-extend 8-bit value
+  Binary0fOpCode(kX86Movzx16),  // zero-extend 16-bit value
+  Binary0fOpCode(kX86Movsx8),   // sign-extend 8-bit value
+  Binary0fOpCode(kX86Movsx16),  // sign-extend 16-bit value
+#undef Binary0fOpCode
+  kX86Jcc8, kX86Jcc32,  // jCC rel8/32; lir operands - 0: rel, 1: CC, target assigned
+  kX86Jmp8, kX86Jmp32,  // jmp rel8/32; lir operands - 0: rel, target assigned
+  kX86JmpR,             // jmp reg; lir operands - 0: reg
+  kX86CallR,            // call reg; lir operands - 0: reg
+  kX86CallM,            // call [base + disp]; lir operands - 0: base, 1: disp
+  kX86CallA,            // call [base + index * scale + disp]
+                        // lir operands - 0: base, 1: index, 2: scale, 3: disp
+  kX86CallT,            // call fs:[disp]; fs: is equal to Thread::Current(); lir operands - 0: disp
+  kX86Ret,              // ret; no lir operands
+  kX86StartOfMethod,    // call 0; pop reg; sub reg, # - generate start of method into reg
+                        // lir operands - 0: reg
+  kX86PcRelLoadRA,      // mov reg, [base + index * scale + PC relative displacement]
+                        // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table
+  kX86PcRelAdr,         // mov reg, PC relative displacement; lir operands - 0: reg, 1: table
+  kX86Last
+};
+
+/* Instruction assembly field_loc kind */
+enum X86EncodingKind {
+  kData,                                   // Special case for raw data.
+  kNop,                                    // Special case for variable length nop.
+  kNullary,                                // Opcode that takes no arguments.
+  kReg, kMem, kArray,                      // R, M and A instruction kinds.
+  kMemReg, kArrayReg, kThreadReg,          // MR, AR and TR instruction kinds.
+  kRegReg, kRegMem, kRegArray, kRegThread, // RR, RM, RA and RT instruction kinds.
+  kRegRegStore,                            // RR following the store modrm reg-reg encoding rather than the load.
+  kRegImm, kMemImm, kArrayImm, kThreadImm, // RI, MI, AI and TI instruction kinds.
+  kRegRegImm, kRegMemImm, kRegArrayImm,    // RRI, RMI and RAI instruction kinds.
+  kMovRegImm,                              // Shorter form move RI.
+  kShiftRegImm, kShiftMemImm, kShiftArrayImm,  // Shift opcode with immediate.
+  kShiftRegCl, kShiftMemCl, kShiftArrayCl,     // Shift opcode with register CL.
+  kRegRegReg, kRegRegMem, kRegRegArray,    // RRR, RRM, RRA instruction kinds.
+  kRegCond, kMemCond, kArrayCond,          // R, M, A instruction kinds following by a condition.
+  kJmp, kJcc, kCall,                       // Branch instruction kinds.
+  kPcRel,                                  // Operation with displacement that is PC relative
+  kMacro,                                  // An instruction composing multiple others
+  kUnimplemented                           // Encoding used when an instruction isn't yet implemented.
+};
+
+/* Struct used to define the EncodingMap positions for each X86 opcode */
+struct X86EncodingMap {
+  X86OpCode opcode;      // e.g. kOpAddRI
+  X86EncodingKind kind;  // Used to discriminate in the union below
+  uint64_t flags;
+  struct {
+  uint8_t prefix1;       // non-zero => a prefix byte
+  uint8_t prefix2;       // non-zero => a second prefix byte
+  uint8_t opcode;        // 1 byte opcode
+  uint8_t extra_opcode1; // possible extra opcode byte
+  uint8_t extra_opcode2; // possible second extra opcode byte
+  // 3bit opcode that gets encoded in the register bits of the modrm byte, use determined by the
+  // encoding kind
+  uint8_t modrm_opcode;
+  uint8_t ax_opcode;  // non-zero => shorter encoding for AX as a destination
+  uint8_t immediate_bytes; // number of bytes of immediate
+  } skeleton;
+  const char *name;
+  const char* fmt;
+};
+
+
+// FIXME: mem barrier type - what do we do for x86?
+#define kSY 0
+#define kST 0
+
+// Offsets of high and low halves of a 64bit value.
+#define LOWORD_OFFSET 0
+#define HIWORD_OFFSET 4
+
+// Segment override instruction prefix used for quick TLS access to Thread::Current().
+#define THREAD_PREFIX 0x64
+
+#define IS_SIMM8(v) ((-128 <= (v)) && ((v) <= 127))
+#define IS_SIMM16(v) ((-32768 <= (v)) && ((v) <= 32767))
+
+extern X86EncodingMap EncodingMap[kX86Last];
+extern X86ConditionCode X86ConditionEncoding(ConditionCode cond);
+
+}  // namespace art
+
+#endif  // ART_SRC_COMPILER_DEX_QUICK_X86_X86LIR_H_