blob: 22a2f30d4de8e54fefbb7509f9d03020a9d256e8 [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_COMPILER_DEX_QUICK_X86_X86_LIR_H_
18#define ART_COMPILER_DEX_QUICK_X86_X86_LIR_H_
Brian Carlstrom7940e442013-07-12 13:46:57 -070019
20#include "dex/compiler_internals.h"
21
22namespace art {
23
24/*
Ian Rogers0177e532014-02-11 16:30:46 -080025 * Runtime register conventions. We consider both x86, x86-64 and x32 (32bit mode x86-64). The ABI
26 * has different conventions and we capture those here. Changing something that is callee save and
27 * making it caller save places a burden on up-calls to save/restore the callee save register,
28 * however, there are few registers that are callee save in the ABI. Changing something that is
29 * caller save and making it callee save places a burden on down-calls to save/restore the callee
30 * save register. For these reasons we aim to match native conventions for caller and callee save.
31 * On x86 only the first 4 registers can be used for byte operations, for this reason they are
32 * preferred for temporary scratch registers.
Brian Carlstrom7940e442013-07-12 13:46:57 -070033 *
34 * General Purpose Register:
Ian Rogers0177e532014-02-11 16:30:46 -080035 * Native: x86 | x86-64 / x32 | ART x86 | ART x86-64
36 * r0/eax: caller | caller | caller, Method*, scratch, return value | caller, scratch, return value
37 * r1/ecx: caller | caller, arg4 | caller, arg1, scratch | caller, arg3, scratch
38 * r2/edx: caller | caller, arg3 | caller, arg2, scratch, high half of long return | caller, arg2, scratch
39 * r3/ebx: callEE | callEE | callER, arg3, scratch | callee, promotable
Brian Carlstrom7940e442013-07-12 13:46:57 -070040 * r4/esp: stack pointer
Ian Rogers0177e532014-02-11 16:30:46 -080041 * r5/ebp: callee | callee | callee, promotable | callee, promotable
42 * r6/esi: callEE | callER, arg2 | callee, promotable | caller, arg1, scratch
43 * r7/edi: callEE | callER, arg1 | callee, promotable | caller, Method*, scratch
Brian Carlstrom7940e442013-07-12 13:46:57 -070044 * --- x86-64/x32 registers
45 * Native: x86-64 / x32 | ART
Ian Rogers0177e532014-02-11 16:30:46 -080046 * r8: caller save, arg5 | caller, arg4, scratch
47 * r9: caller save, arg6 | caller, arg5, scratch
Brian Carlstrom7940e442013-07-12 13:46:57 -070048 * r10: caller save | caller, scratch
49 * r11: caller save | caller, scratch
Ian Rogers0177e532014-02-11 16:30:46 -080050 * r12: callee save | callee, available for register promotion (promotable)
51 * r13: callee save | callee, available for register promotion (promotable)
52 * r14: callee save | callee, available for register promotion (promotable)
53 * r15: callee save | callee, available for register promotion (promotable)
Brian Carlstrom7940e442013-07-12 13:46:57 -070054 *
55 * There is no rSELF, instead on x86 fs: has a base address of Thread::Current, whereas on
56 * x86-64/x32 gs: holds it.
57 *
58 * For floating point we don't support CPUs without SSE2 support (ie newer than PIII):
Ian Rogers0177e532014-02-11 16:30:46 -080059 * Native: x86 | x86-64 / x32 | ART x86 | ART x86-64
60 * XMM0: caller | caller, arg1 | caller, float return value | caller, arg1, float return value
61 * XMM1: caller | caller, arg2 | caller, scratch | caller, arg2, scratch
62 * XMM2: caller | caller, arg3 | caller, scratch | caller, arg3, scratch
63 * XMM3: caller | caller, arg4 | caller, scratch | caller, arg4, scratch
64 * XMM4: caller | caller, arg5 | caller, scratch | caller, arg5, scratch
65 * XMM5: caller | caller, arg6 | caller, scratch | caller, arg6, scratch
66 * XMM6: caller | caller, arg7 | caller, scratch | caller, arg7, scratch
67 * XMM7: caller | caller, arg8 | caller, scratch | caller, arg8, scratch
Brian Carlstrom7940e442013-07-12 13:46:57 -070068 * --- x86-64/x32 registers
Serguei Katkovc3801912014-07-08 17:21:53 +070069 * XMM8 .. 11: caller save available as scratch registers for ART.
70 * XMM12 .. 15: callee save available as promoted registers for ART.
71 * This change (XMM12..15) is for QCG only, for others they are caller save.
Brian Carlstrom7940e442013-07-12 13:46:57 -070072 *
Ian Rogers0177e532014-02-11 16:30:46 -080073 * X87 is a necessary evil outside of ART code for x86:
Brian Carlstrom7940e442013-07-12 13:46:57 -070074 * ST0: x86 float/double native return value, caller save
75 * ST1 .. ST7: caller save
76 *
77 * Stack frame diagram (stack grows down, higher addresses at top):
Razvan A Lupusoru8d0d03e2014-06-06 17:04:52 -070078 * For a more detailed view of each region see stack.h.
Brian Carlstrom7940e442013-07-12 13:46:57 -070079 *
Razvan A Lupusoru8d0d03e2014-06-06 17:04:52 -070080 * +---------------------------+
81 * | IN[ins-1] | {Note: resides in caller's frame}
82 * | . |
83 * | IN[0] |
84 * | caller's Method* |
85 * +===========================+ {Note: start of callee's frame}
86 * | return address | {pushed by call}
87 * | spill region | {variable sized}
88 * +---------------------------+
89 * | ...filler 4-bytes... | {Note: used as 2nd word of V[locals-1] if long]
90 * +---------------------------+
91 * | V[locals-1] |
92 * | V[locals-2] |
93 * | . |
94 * | . |
95 * | V[1] |
96 * | V[0] |
97 * +---------------------------+
98 * | 0 to 12-bytes padding |
99 * +---------------------------+
100 * | compiler temp region |
101 * +---------------------------+
102 * | OUT[outs-1] |
103 * | OUT[outs-2] |
104 * | . |
105 * | OUT[0] |
106 * | StackReference<ArtMethod> | <<== sp w/ 16-byte alignment
107 * +===========================+
Brian Carlstrom7940e442013-07-12 13:46:57 -0700108 */
109
Brian Carlstrom7940e442013-07-12 13:46:57 -0700110enum X86ResourceEncodingPos {
111 kX86GPReg0 = 0,
112 kX86RegSP = 4,
113 kX86FPReg0 = 16, // xmm0 .. xmm7/xmm15.
Serguei Katkove90501d2014-03-12 15:56:54 +0700114 kX86FPRegEnd = 32,
115 kX86FPStack = 33,
116 kX86RegEnd = kX86FPStack,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700117};
118
buzbee091cc402014-03-31 10:14:40 -0700119// FIXME: for 64-bit, perhaps add an X86_64NativeRegisterPool enum?
Brian Carlstrom7940e442013-07-12 13:46:57 -0700120enum X86NativeRegisterPool {
buzbee091cc402014-03-31 10:14:40 -0700121 r0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 0,
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +0700122 r0q = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 0,
buzbee091cc402014-03-31 10:14:40 -0700123 rAX = r0,
124 r1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 1,
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +0700125 r1q = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 1,
buzbee091cc402014-03-31 10:14:40 -0700126 rCX = r1,
127 r2 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 2,
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +0700128 r2q = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 2,
buzbee091cc402014-03-31 10:14:40 -0700129 rDX = r2,
130 r3 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 3,
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +0700131 r3q = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 3,
buzbee091cc402014-03-31 10:14:40 -0700132 rBX = r3,
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700133 r4sp_32 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 4,
134 rX86_SP_32 = r4sp_32,
135 r4sp_64 = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 4,
136 rX86_SP_64 = r4sp_64,
buzbee091cc402014-03-31 10:14:40 -0700137 r5 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 5,
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +0700138 r5q = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 5,
buzbee091cc402014-03-31 10:14:40 -0700139 rBP = r5,
140 r5sib_no_base = r5,
141 r6 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 6,
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +0700142 r6q = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 6,
buzbee091cc402014-03-31 10:14:40 -0700143 rSI = r6,
144 r7 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 7,
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +0700145 r7q = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 7,
buzbee091cc402014-03-31 10:14:40 -0700146 rDI = r7,
buzbee091cc402014-03-31 10:14:40 -0700147 r8 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 8,
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +0700148 r8q = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 8,
buzbee091cc402014-03-31 10:14:40 -0700149 r9 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 9,
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +0700150 r9q = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 9,
buzbee091cc402014-03-31 10:14:40 -0700151 r10 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 10,
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +0700152 r10q = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 10,
buzbee091cc402014-03-31 10:14:40 -0700153 r11 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 11,
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +0700154 r11q = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 11,
buzbee091cc402014-03-31 10:14:40 -0700155 r12 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 12,
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +0700156 r12q = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 12,
buzbee091cc402014-03-31 10:14:40 -0700157 r13 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 13,
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +0700158 r13q = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 13,
buzbee091cc402014-03-31 10:14:40 -0700159 r14 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 14,
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +0700160 r14q = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 14,
buzbee091cc402014-03-31 10:14:40 -0700161 r15 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 15,
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +0700162 r15q = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 15,
buzbee091cc402014-03-31 10:14:40 -0700163 // fake return address register for core spill mask.
164 rRET = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 16,
buzbee091cc402014-03-31 10:14:40 -0700165
Mark Mendellfe945782014-05-22 09:52:36 -0400166 // xmm registers, single precision view.
buzbee091cc402014-03-31 10:14:40 -0700167 fr0 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 0,
168 fr1 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 1,
169 fr2 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 2,
170 fr3 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 3,
171 fr4 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 4,
172 fr5 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 5,
173 fr6 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 6,
174 fr7 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 7,
Dmitry Petrochenkoa20468c2014-04-30 13:40:19 +0700175 fr8 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 8,
176 fr9 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 9,
177 fr10 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 10,
178 fr11 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 11,
179 fr12 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 12,
180 fr13 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 13,
181 fr14 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 14,
182 fr15 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 15,
buzbee091cc402014-03-31 10:14:40 -0700183
Mark Mendellfe945782014-05-22 09:52:36 -0400184 // xmm registers, double precision aliases.
buzbee091cc402014-03-31 10:14:40 -0700185 dr0 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 0,
186 dr1 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 1,
187 dr2 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 2,
188 dr3 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 3,
189 dr4 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 4,
190 dr5 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 5,
191 dr6 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 6,
192 dr7 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 7,
Dmitry Petrochenkoa20468c2014-04-30 13:40:19 +0700193 dr8 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 8,
194 dr9 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 9,
195 dr10 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
196 dr11 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 11,
197 dr12 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
198 dr13 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 13,
199 dr14 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
200 dr15 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 15,
buzbee091cc402014-03-31 10:14:40 -0700201
Dmitry Petrochenkoa20468c2014-04-30 13:40:19 +0700202 // xmm registers, quad precision aliases
Mark Mendellfe945782014-05-22 09:52:36 -0400203 xr0 = RegStorage::k128BitSolo | 0,
204 xr1 = RegStorage::k128BitSolo | 1,
205 xr2 = RegStorage::k128BitSolo | 2,
206 xr3 = RegStorage::k128BitSolo | 3,
207 xr4 = RegStorage::k128BitSolo | 4,
208 xr5 = RegStorage::k128BitSolo | 5,
209 xr6 = RegStorage::k128BitSolo | 6,
210 xr7 = RegStorage::k128BitSolo | 7,
Dmitry Petrochenkoa20468c2014-04-30 13:40:19 +0700211 xr8 = RegStorage::k128BitSolo | 8,
212 xr9 = RegStorage::k128BitSolo | 9,
213 xr10 = RegStorage::k128BitSolo | 10,
214 xr11 = RegStorage::k128BitSolo | 11,
215 xr12 = RegStorage::k128BitSolo | 12,
216 xr13 = RegStorage::k128BitSolo | 13,
217 xr14 = RegStorage::k128BitSolo | 14,
218 xr15 = RegStorage::k128BitSolo | 15,
buzbee091cc402014-03-31 10:14:40 -0700219
220 // TODO: as needed, add 256, 512 and 1024-bit xmm views.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700221};
222
buzbee091cc402014-03-31 10:14:40 -0700223constexpr RegStorage rs_r0(RegStorage::kValid | r0);
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +0700224constexpr RegStorage rs_r0q(RegStorage::kValid | r0q);
buzbee091cc402014-03-31 10:14:40 -0700225constexpr RegStorage rs_rAX = rs_r0;
226constexpr RegStorage rs_r1(RegStorage::kValid | r1);
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +0700227constexpr RegStorage rs_r1q(RegStorage::kValid | r1q);
buzbee091cc402014-03-31 10:14:40 -0700228constexpr RegStorage rs_rCX = rs_r1;
229constexpr RegStorage rs_r2(RegStorage::kValid | r2);
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +0700230constexpr RegStorage rs_r2q(RegStorage::kValid | r2q);
buzbee091cc402014-03-31 10:14:40 -0700231constexpr RegStorage rs_rDX = rs_r2;
232constexpr RegStorage rs_r3(RegStorage::kValid | r3);
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +0700233constexpr RegStorage rs_r3q(RegStorage::kValid | r3q);
buzbee091cc402014-03-31 10:14:40 -0700234constexpr RegStorage rs_rBX = rs_r3;
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700235constexpr RegStorage rs_rX86_SP_64(RegStorage::kValid | r4sp_64);
236constexpr RegStorage rs_rX86_SP_32(RegStorage::kValid | r4sp_32);
237extern RegStorage rs_rX86_SP;
buzbee091cc402014-03-31 10:14:40 -0700238constexpr RegStorage rs_r5(RegStorage::kValid | r5);
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +0700239constexpr RegStorage rs_r5q(RegStorage::kValid | r5q);
buzbee091cc402014-03-31 10:14:40 -0700240constexpr RegStorage rs_rBP = rs_r5;
241constexpr RegStorage rs_r6(RegStorage::kValid | r6);
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +0700242constexpr RegStorage rs_r6q(RegStorage::kValid | r6q);
buzbee091cc402014-03-31 10:14:40 -0700243constexpr RegStorage rs_rSI = rs_r6;
244constexpr RegStorage rs_r7(RegStorage::kValid | r7);
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +0700245constexpr RegStorage rs_r7q(RegStorage::kValid | r7q);
buzbee091cc402014-03-31 10:14:40 -0700246constexpr RegStorage rs_rDI = rs_r7;
247constexpr RegStorage rs_rRET(RegStorage::kValid | rRET);
Dmitry Petrochenko0999a6f2014-05-22 12:26:50 +0700248constexpr RegStorage rs_r8(RegStorage::kValid | r8);
249constexpr RegStorage rs_r8q(RegStorage::kValid | r8q);
250constexpr RegStorage rs_r9(RegStorage::kValid | r9);
251constexpr RegStorage rs_r9q(RegStorage::kValid | r9q);
252constexpr RegStorage rs_r10(RegStorage::kValid | r10);
253constexpr RegStorage rs_r10q(RegStorage::kValid | r10q);
254constexpr RegStorage rs_r11(RegStorage::kValid | r11);
255constexpr RegStorage rs_r11q(RegStorage::kValid | r11q);
256constexpr RegStorage rs_r12(RegStorage::kValid | r12);
257constexpr RegStorage rs_r12q(RegStorage::kValid | r12q);
258constexpr RegStorage rs_r13(RegStorage::kValid | r13);
259constexpr RegStorage rs_r13q(RegStorage::kValid | r13q);
260constexpr RegStorage rs_r14(RegStorage::kValid | r14);
261constexpr RegStorage rs_r14q(RegStorage::kValid | r14q);
262constexpr RegStorage rs_r15(RegStorage::kValid | r15);
263constexpr RegStorage rs_r15q(RegStorage::kValid | r15q);
buzbee091cc402014-03-31 10:14:40 -0700264
265constexpr RegStorage rs_fr0(RegStorage::kValid | fr0);
266constexpr RegStorage rs_fr1(RegStorage::kValid | fr1);
267constexpr RegStorage rs_fr2(RegStorage::kValid | fr2);
268constexpr RegStorage rs_fr3(RegStorage::kValid | fr3);
269constexpr RegStorage rs_fr4(RegStorage::kValid | fr4);
270constexpr RegStorage rs_fr5(RegStorage::kValid | fr5);
271constexpr RegStorage rs_fr6(RegStorage::kValid | fr6);
272constexpr RegStorage rs_fr7(RegStorage::kValid | fr7);
Dmitry Petrochenkoa20468c2014-04-30 13:40:19 +0700273constexpr RegStorage rs_fr8(RegStorage::kValid | fr8);
274constexpr RegStorage rs_fr9(RegStorage::kValid | fr9);
275constexpr RegStorage rs_fr10(RegStorage::kValid | fr10);
276constexpr RegStorage rs_fr11(RegStorage::kValid | fr11);
277constexpr RegStorage rs_fr12(RegStorage::kValid | fr12);
278constexpr RegStorage rs_fr13(RegStorage::kValid | fr13);
279constexpr RegStorage rs_fr14(RegStorage::kValid | fr14);
280constexpr RegStorage rs_fr15(RegStorage::kValid | fr15);
buzbee091cc402014-03-31 10:14:40 -0700281
282constexpr RegStorage rs_dr0(RegStorage::kValid | dr0);
283constexpr RegStorage rs_dr1(RegStorage::kValid | dr1);
284constexpr RegStorage rs_dr2(RegStorage::kValid | dr2);
285constexpr RegStorage rs_dr3(RegStorage::kValid | dr3);
286constexpr RegStorage rs_dr4(RegStorage::kValid | dr4);
287constexpr RegStorage rs_dr5(RegStorage::kValid | dr5);
288constexpr RegStorage rs_dr6(RegStorage::kValid | dr6);
289constexpr RegStorage rs_dr7(RegStorage::kValid | dr7);
Dmitry Petrochenkoa20468c2014-04-30 13:40:19 +0700290constexpr RegStorage rs_dr8(RegStorage::kValid | dr8);
291constexpr RegStorage rs_dr9(RegStorage::kValid | dr9);
292constexpr RegStorage rs_dr10(RegStorage::kValid | dr10);
293constexpr RegStorage rs_dr11(RegStorage::kValid | dr11);
294constexpr RegStorage rs_dr12(RegStorage::kValid | dr12);
295constexpr RegStorage rs_dr13(RegStorage::kValid | dr13);
296constexpr RegStorage rs_dr14(RegStorage::kValid | dr14);
297constexpr RegStorage rs_dr15(RegStorage::kValid | dr15);
buzbee091cc402014-03-31 10:14:40 -0700298
Mark Mendellfe945782014-05-22 09:52:36 -0400299constexpr RegStorage rs_xr0(RegStorage::kValid | xr0);
300constexpr RegStorage rs_xr1(RegStorage::kValid | xr1);
301constexpr RegStorage rs_xr2(RegStorage::kValid | xr2);
302constexpr RegStorage rs_xr3(RegStorage::kValid | xr3);
303constexpr RegStorage rs_xr4(RegStorage::kValid | xr4);
304constexpr RegStorage rs_xr5(RegStorage::kValid | xr5);
305constexpr RegStorage rs_xr6(RegStorage::kValid | xr6);
306constexpr RegStorage rs_xr7(RegStorage::kValid | xr7);
Dmitry Petrochenkoa20468c2014-04-30 13:40:19 +0700307constexpr RegStorage rs_xr8(RegStorage::kValid | xr8);
308constexpr RegStorage rs_xr9(RegStorage::kValid | xr9);
309constexpr RegStorage rs_xr10(RegStorage::kValid | xr10);
310constexpr RegStorage rs_xr11(RegStorage::kValid | xr11);
311constexpr RegStorage rs_xr12(RegStorage::kValid | xr12);
312constexpr RegStorage rs_xr13(RegStorage::kValid | xr13);
313constexpr RegStorage rs_xr14(RegStorage::kValid | xr14);
314constexpr RegStorage rs_xr15(RegStorage::kValid | xr15);
buzbee2700f7e2014-03-07 09:46:20 -0800315
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700316extern X86NativeRegisterPool rX86_ARG0;
317extern X86NativeRegisterPool rX86_ARG1;
318extern X86NativeRegisterPool rX86_ARG2;
319extern X86NativeRegisterPool rX86_ARG3;
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +0700320extern X86NativeRegisterPool rX86_ARG4;
321extern X86NativeRegisterPool rX86_ARG5;
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700322extern X86NativeRegisterPool rX86_FARG0;
323extern X86NativeRegisterPool rX86_FARG1;
324extern X86NativeRegisterPool rX86_FARG2;
325extern X86NativeRegisterPool rX86_FARG3;
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +0700326extern X86NativeRegisterPool rX86_FARG4;
327extern X86NativeRegisterPool rX86_FARG5;
328extern X86NativeRegisterPool rX86_FARG6;
329extern X86NativeRegisterPool rX86_FARG7;
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700330extern X86NativeRegisterPool rX86_RET0;
331extern X86NativeRegisterPool rX86_RET1;
332extern X86NativeRegisterPool rX86_INVOKE_TGT;
333extern X86NativeRegisterPool rX86_COUNT;
334
335extern RegStorage rs_rX86_ARG0;
336extern RegStorage rs_rX86_ARG1;
337extern RegStorage rs_rX86_ARG2;
338extern RegStorage rs_rX86_ARG3;
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +0700339extern RegStorage rs_rX86_ARG4;
340extern RegStorage rs_rX86_ARG5;
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700341extern RegStorage rs_rX86_FARG0;
342extern RegStorage rs_rX86_FARG1;
343extern RegStorage rs_rX86_FARG2;
344extern RegStorage rs_rX86_FARG3;
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +0700345extern RegStorage rs_rX86_FARG4;
346extern RegStorage rs_rX86_FARG5;
347extern RegStorage rs_rX86_FARG6;
348extern RegStorage rs_rX86_FARG7;
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700349extern RegStorage rs_rX86_RET0;
350extern RegStorage rs_rX86_RET1;
351extern RegStorage rs_rX86_INVOKE_TGT;
352extern RegStorage rs_rX86_COUNT;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700353
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000354// RegisterLocation templates return values (r_V0, or r_V0/r_V1).
355const RegLocation x86_loc_c_return
buzbee091cc402014-03-31 10:14:40 -0700356 {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1,
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000357 RegStorage(RegStorage::k32BitSolo, rAX), INVALID_SREG, INVALID_SREG};
358const RegLocation x86_loc_c_return_wide
buzbee091cc402014-03-31 10:14:40 -0700359 {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000360 RegStorage(RegStorage::k64BitPair, rAX, rDX), INVALID_SREG, INVALID_SREG};
Chao-ying Fua77ee512014-07-01 17:43:41 -0700361const RegLocation x86_loc_c_return_ref
362 {kLocPhysReg, 0, 0, 0, 0, 0, 1, 0, 1,
363 RegStorage(RegStorage::k32BitSolo, rAX), INVALID_SREG, INVALID_SREG};
364const RegLocation x86_64_loc_c_return_ref
365 {kLocPhysReg, 0, 0, 0, 0, 0, 1, 0, 1,
366 RegStorage(RegStorage::k64BitSolo, rAX), INVALID_SREG, INVALID_SREG};
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700367const RegLocation x86_64_loc_c_return_wide
368 {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
369 RegStorage(RegStorage::k64BitSolo, rAX), INVALID_SREG, INVALID_SREG};
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000370const RegLocation x86_loc_c_return_float
buzbee091cc402014-03-31 10:14:40 -0700371 {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1,
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000372 RegStorage(RegStorage::k32BitSolo, fr0), INVALID_SREG, INVALID_SREG};
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000373const RegLocation x86_loc_c_return_double
buzbee091cc402014-03-31 10:14:40 -0700374 {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1,
375 RegStorage(RegStorage::k64BitSolo, dr0), INVALID_SREG, INVALID_SREG};
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000376
Brian Carlstrom7940e442013-07-12 13:46:57 -0700377/*
378 * The following enum defines the list of supported X86 instructions by the
379 * assembler. Their corresponding EncodingMap positions will be defined in
380 * Assemble.cc.
381 */
382enum X86OpCode {
383 kX86First = 0,
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700384 kX8632BitData = kX86First, // data [31..0].
Brian Carlstrom7940e442013-07-12 13:46:57 -0700385 kX86Bkpt,
386 kX86Nop,
387 // Define groups of binary operations
388 // MR - Memory Register - opcode [base + disp], reg
389 // - lir operands - 0: base, 1: disp, 2: reg
390 // AR - Array Register - opcode [base + index * scale + disp], reg
391 // - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
392 // TR - Thread Register - opcode fs:[disp], reg - where fs: is equal to Thread::Current()
393 // - lir operands - 0: disp, 1: reg
394 // RR - Register Register - opcode reg1, reg2
395 // - lir operands - 0: reg1, 1: reg2
396 // RM - Register Memory - opcode reg, [base + disp]
397 // - lir operands - 0: reg, 1: base, 2: disp
398 // RA - Register Array - opcode reg, [base + index * scale + disp]
399 // - lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp
400 // RT - Register Thread - opcode reg, fs:[disp] - where fs: is equal to Thread::Current()
401 // - lir operands - 0: reg, 1: disp
402 // RI - Register Immediate - opcode reg, #immediate
403 // - lir operands - 0: reg, 1: immediate
404 // MI - Memory Immediate - opcode [base + disp], #immediate
405 // - lir operands - 0: base, 1: disp, 2: immediate
406 // AI - Array Immediate - opcode [base + index * scale + disp], #immediate
407 // - lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate
Vladimir Markoe6ed00b2013-10-24 14:52:37 +0100408 // TI - Thread Immediate - opcode fs:[disp], imm - where fs: is equal to Thread::Current()
Brian Carlstrom7940e442013-07-12 13:46:57 -0700409 // - lir operands - 0: disp, 1: imm
410#define BinaryOpCode(opcode) \
411 opcode ## 8MR, opcode ## 8AR, opcode ## 8TR, \
412 opcode ## 8RR, opcode ## 8RM, opcode ## 8RA, opcode ## 8RT, \
413 opcode ## 8RI, opcode ## 8MI, opcode ## 8AI, opcode ## 8TI, \
414 opcode ## 16MR, opcode ## 16AR, opcode ## 16TR, \
415 opcode ## 16RR, opcode ## 16RM, opcode ## 16RA, opcode ## 16RT, \
416 opcode ## 16RI, opcode ## 16MI, opcode ## 16AI, opcode ## 16TI, \
417 opcode ## 16RI8, opcode ## 16MI8, opcode ## 16AI8, opcode ## 16TI8, \
Dmitry Petrochenko96992e82014-05-20 04:03:46 +0700418 opcode ## 32MR, opcode ## 32AR, opcode ## 32TR, \
419 opcode ## 32RR, opcode ## 32RM, opcode ## 32RA, opcode ## 32RT, \
420 opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, opcode ## 32TI, \
421 opcode ## 32RI8, opcode ## 32MI8, opcode ## 32AI8, opcode ## 32TI8, \
422 opcode ## 64MR, opcode ## 64AR, opcode ## 64TR, \
423 opcode ## 64RR, opcode ## 64RM, opcode ## 64RA, opcode ## 64RT, \
424 opcode ## 64RI, opcode ## 64MI, opcode ## 64AI, opcode ## 64TI, \
425 opcode ## 64RI8, opcode ## 64MI8, opcode ## 64AI8, opcode ## 64TI8
Brian Carlstrom7940e442013-07-12 13:46:57 -0700426 BinaryOpCode(kX86Add),
427 BinaryOpCode(kX86Or),
428 BinaryOpCode(kX86Adc),
429 BinaryOpCode(kX86Sbb),
430 BinaryOpCode(kX86And),
431 BinaryOpCode(kX86Sub),
432 BinaryOpCode(kX86Xor),
433 BinaryOpCode(kX86Cmp),
434#undef BinaryOpCode
435 kX86Imul16RRI, kX86Imul16RMI, kX86Imul16RAI,
436 kX86Imul32RRI, kX86Imul32RMI, kX86Imul32RAI,
437 kX86Imul32RRI8, kX86Imul32RMI8, kX86Imul32RAI8,
Dmitry Petrochenko96992e82014-05-20 04:03:46 +0700438 kX86Imul64RRI, kX86Imul64RMI, kX86Imul64RAI,
439 kX86Imul64RRI8, kX86Imul64RMI8, kX86Imul64RAI8,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700440 kX86Mov8MR, kX86Mov8AR, kX86Mov8TR,
441 kX86Mov8RR, kX86Mov8RM, kX86Mov8RA, kX86Mov8RT,
442 kX86Mov8RI, kX86Mov8MI, kX86Mov8AI, kX86Mov8TI,
443 kX86Mov16MR, kX86Mov16AR, kX86Mov16TR,
444 kX86Mov16RR, kX86Mov16RM, kX86Mov16RA, kX86Mov16RT,
445 kX86Mov16RI, kX86Mov16MI, kX86Mov16AI, kX86Mov16TI,
Jean Christophe Beylerb5bce7c2014-07-25 12:32:18 -0700446 kX86Mov32MR, kX86Mov32AR, kX86Movnti32MR, kX86Movnti32AR, kX86Mov32TR,
Dmitry Petrochenko96992e82014-05-20 04:03:46 +0700447 kX86Mov32RR, kX86Mov32RM, kX86Mov32RA, kX86Mov32RT,
448 kX86Mov32RI, kX86Mov32MI, kX86Mov32AI, kX86Mov32TI,
Mark Mendell4028a6c2014-02-19 20:06:20 -0800449 kX86Lea32RM,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700450 kX86Lea32RA,
Jean Christophe Beylerb5bce7c2014-07-25 12:32:18 -0700451 kX86Mov64MR, kX86Mov64AR, kX86Movnti64MR, kX86Movnti64AR, kX86Mov64TR,
Dmitry Petrochenko96992e82014-05-20 04:03:46 +0700452 kX86Mov64RR, kX86Mov64RM, kX86Mov64RA, kX86Mov64RT,
Yixin Shou5192cbb2014-07-01 13:48:17 -0400453 kX86Mov64RI32, kX86Mov64RI64, kX86Mov64MI, kX86Mov64AI, kX86Mov64TI,
Dmitry Petrochenko96992e82014-05-20 04:03:46 +0700454 kX86Lea64RM,
455 kX86Lea64RA,
Razvan A Lupusorubd288c22013-12-20 17:27:23 -0800456 // RRC - Register Register ConditionCode - cond_opcode reg1, reg2
457 // - lir operands - 0: reg1, 1: reg2, 2: CC
458 kX86Cmov32RRC,
Dmitry Petrochenko96992e82014-05-20 04:03:46 +0700459 kX86Cmov64RRC,
Mark Mendell2637f2e2014-04-30 10:10:47 -0400460 // RMC - Register Memory ConditionCode - cond_opcode reg1, [base + disp]
461 // - lir operands - 0: reg1, 1: base, 2: disp 3: CC
462 kX86Cmov32RMC,
Dmitry Petrochenko96992e82014-05-20 04:03:46 +0700463 kX86Cmov64RMC,
Mark Mendell2637f2e2014-04-30 10:10:47 -0400464
Brian Carlstrom7940e442013-07-12 13:46:57 -0700465 // RC - Register CL - opcode reg, CL
466 // - lir operands - 0: reg, 1: CL
467 // MC - Memory CL - opcode [base + disp], CL
468 // - lir operands - 0: base, 1: disp, 2: CL
469 // AC - Array CL - opcode [base + index * scale + disp], CL
470 // - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: CL
471#define BinaryShiftOpCode(opcode) \
472 opcode ## 8RI, opcode ## 8MI, opcode ## 8AI, \
473 opcode ## 8RC, opcode ## 8MC, opcode ## 8AC, \
474 opcode ## 16RI, opcode ## 16MI, opcode ## 16AI, \
475 opcode ## 16RC, opcode ## 16MC, opcode ## 16AC, \
476 opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, \
Dmitry Petrochenko96992e82014-05-20 04:03:46 +0700477 opcode ## 32RC, opcode ## 32MC, opcode ## 32AC, \
478 opcode ## 64RI, opcode ## 64MI, opcode ## 64AI, \
479 opcode ## 64RC, opcode ## 64MC, opcode ## 64AC
Brian Carlstrom7940e442013-07-12 13:46:57 -0700480 BinaryShiftOpCode(kX86Rol),
481 BinaryShiftOpCode(kX86Ror),
482 BinaryShiftOpCode(kX86Rcl),
483 BinaryShiftOpCode(kX86Rcr),
484 BinaryShiftOpCode(kX86Sal),
485 BinaryShiftOpCode(kX86Shr),
486 BinaryShiftOpCode(kX86Sar),
487#undef BinaryShiftOpcode
488 kX86Cmc,
Mark Mendell4708dcd2014-01-22 09:05:18 -0800489 kX86Shld32RRI,
Yixin Shouf40f8902014-08-14 14:10:32 -0400490 kX86Shld32RRC,
Mark Mendell2637f2e2014-04-30 10:10:47 -0400491 kX86Shld32MRI,
Mark Mendell4708dcd2014-01-22 09:05:18 -0800492 kX86Shrd32RRI,
Yixin Shouf40f8902014-08-14 14:10:32 -0400493 kX86Shrd32RRC,
Mark Mendell2637f2e2014-04-30 10:10:47 -0400494 kX86Shrd32MRI,
Dmitry Petrochenko96992e82014-05-20 04:03:46 +0700495 kX86Shld64RRI,
496 kX86Shld64MRI,
497 kX86Shrd64RRI,
498 kX86Shrd64MRI,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700499#define UnaryOpcode(opcode, reg, mem, array) \
500 opcode ## 8 ## reg, opcode ## 8 ## mem, opcode ## 8 ## array, \
501 opcode ## 16 ## reg, opcode ## 16 ## mem, opcode ## 16 ## array, \
Dmitry Petrochenko96992e82014-05-20 04:03:46 +0700502 opcode ## 32 ## reg, opcode ## 32 ## mem, opcode ## 32 ## array, \
503 opcode ## 64 ## reg, opcode ## 64 ## mem, opcode ## 64 ## array
Brian Carlstrom7940e442013-07-12 13:46:57 -0700504 UnaryOpcode(kX86Test, RI, MI, AI),
505 kX86Test32RR,
Dmitry Petrochenko96992e82014-05-20 04:03:46 +0700506 kX86Test64RR,
Dave Allison69dfe512014-07-11 17:11:58 +0000507 kX86Test32RM,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700508 UnaryOpcode(kX86Not, R, M, A),
509 UnaryOpcode(kX86Neg, R, M, A),
510 UnaryOpcode(kX86Mul, DaR, DaM, DaA),
511 UnaryOpcode(kX86Imul, DaR, DaM, DaA),
512 UnaryOpcode(kX86Divmod, DaR, DaM, DaA),
513 UnaryOpcode(kX86Idivmod, DaR, DaM, DaA),
Mark Mendell2bf31e62014-01-23 12:13:40 -0800514 kx86Cdq32Da,
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700515 kx86Cqo64Da,
Vladimir Markoa8b4caf2013-10-24 15:08:57 +0100516 kX86Bswap32R,
nikolay serdjukc5e4ce12014-06-10 17:07:10 +0700517 kX86Bswap64R,
Vladimir Marko70b797d2013-12-03 15:25:24 +0000518 kX86Push32R, kX86Pop32R,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700519#undef UnaryOpcode
520#define Binary0fOpCode(opcode) \
521 opcode ## RR, opcode ## RM, opcode ## RA
522 Binary0fOpCode(kX86Movsd),
523 kX86MovsdMR,
524 kX86MovsdAR,
525 Binary0fOpCode(kX86Movss),
526 kX86MovssMR,
527 kX86MovssAR,
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700528 Binary0fOpCode(kX86Cvtsi2sd), // int to double
529 Binary0fOpCode(kX86Cvtsi2ss), // int to float
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700530 Binary0fOpCode(kX86Cvtsqi2sd), // long to double
531 Binary0fOpCode(kX86Cvtsqi2ss), // long to float
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700532 Binary0fOpCode(kX86Cvttsd2si), // truncating double to int
533 Binary0fOpCode(kX86Cvttss2si), // truncating float to int
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700534 Binary0fOpCode(kX86Cvttsd2sqi), // truncating double to long
535 Binary0fOpCode(kX86Cvttss2sqi), // truncating float to long
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700536 Binary0fOpCode(kX86Cvtsd2si), // rounding double to int
537 Binary0fOpCode(kX86Cvtss2si), // rounding float to int
Brian Carlstrom7940e442013-07-12 13:46:57 -0700538 Binary0fOpCode(kX86Ucomisd), // unordered double compare
539 Binary0fOpCode(kX86Ucomiss), // unordered float compare
540 Binary0fOpCode(kX86Comisd), // double compare
541 Binary0fOpCode(kX86Comiss), // float compare
Alexei Zavjalov1222c962014-07-16 00:54:13 +0700542 Binary0fOpCode(kX86Orpd), // double logical OR
543 Binary0fOpCode(kX86Orps), // float logical OR
544 Binary0fOpCode(kX86Andpd), // double logical AND
545 Binary0fOpCode(kX86Andps), // float logical AND
546 Binary0fOpCode(kX86Xorpd), // double logical XOR
547 Binary0fOpCode(kX86Xorps), // float logical XOR
548 Binary0fOpCode(kX86Addsd), // double ADD
549 Binary0fOpCode(kX86Addss), // float ADD
Brian Carlstrom7940e442013-07-12 13:46:57 -0700550 Binary0fOpCode(kX86Mulsd), // double multiply
551 Binary0fOpCode(kX86Mulss), // float multiply
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700552 Binary0fOpCode(kX86Cvtsd2ss), // double to float
553 Binary0fOpCode(kX86Cvtss2sd), // float to double
Brian Carlstrom7940e442013-07-12 13:46:57 -0700554 Binary0fOpCode(kX86Subsd), // double subtract
555 Binary0fOpCode(kX86Subss), // float subtract
556 Binary0fOpCode(kX86Divsd), // double divide
557 Binary0fOpCode(kX86Divss), // float divide
Lupusoru, Razvan Ab3a84e22014-07-28 14:11:01 -0700558 Binary0fOpCode(kX86Punpcklbw), // Interleave low-order bytes
559 Binary0fOpCode(kX86Punpcklwd), // Interleave low-order single words (16-bits)
560 Binary0fOpCode(kX86Punpckldq), // Interleave low-order double words (32-bit)
561 Binary0fOpCode(kX86Punpcklqdq), // Interleave low-order quad word
Mark Mendellfe945782014-05-22 09:52:36 -0400562 Binary0fOpCode(kX86Sqrtsd), // square root
563 Binary0fOpCode(kX86Pmulld), // parallel integer multiply 32 bits x 4
564 Binary0fOpCode(kX86Pmullw), // parallel integer multiply 16 bits x 8
Lupusoru, Razvan Ab3a84e22014-07-28 14:11:01 -0700565 Binary0fOpCode(kX86Pmuludq), // parallel unsigned 32 integer and stores result as 64
Mark Mendellfe945782014-05-22 09:52:36 -0400566 Binary0fOpCode(kX86Mulps), // parallel FP multiply 32 bits x 4
567 Binary0fOpCode(kX86Mulpd), // parallel FP multiply 64 bits x 2
568 Binary0fOpCode(kX86Paddb), // parallel integer addition 8 bits x 16
569 Binary0fOpCode(kX86Paddw), // parallel integer addition 16 bits x 8
570 Binary0fOpCode(kX86Paddd), // parallel integer addition 32 bits x 4
Lupusoru, Razvan Ab3a84e22014-07-28 14:11:01 -0700571 Binary0fOpCode(kX86Paddq), // parallel integer addition 64 bits x 2
572 Binary0fOpCode(kX86Psadbw), // computes sum of absolute differences for unsigned byte integers
Mark Mendellfe945782014-05-22 09:52:36 -0400573 Binary0fOpCode(kX86Addps), // parallel FP addition 32 bits x 4
574 Binary0fOpCode(kX86Addpd), // parallel FP addition 64 bits x 2
575 Binary0fOpCode(kX86Psubb), // parallel integer subtraction 8 bits x 16
576 Binary0fOpCode(kX86Psubw), // parallel integer subtraction 16 bits x 8
577 Binary0fOpCode(kX86Psubd), // parallel integer subtraction 32 bits x 4
Lupusoru, Razvan Ab3a84e22014-07-28 14:11:01 -0700578 Binary0fOpCode(kX86Psubq), // parallel integer subtraction 32 bits x 4
Mark Mendellfe945782014-05-22 09:52:36 -0400579 Binary0fOpCode(kX86Subps), // parallel FP subtraction 32 bits x 4
580 Binary0fOpCode(kX86Subpd), // parallel FP subtraction 64 bits x 2
581 Binary0fOpCode(kX86Pand), // parallel AND 128 bits x 1
582 Binary0fOpCode(kX86Por), // parallel OR 128 bits x 1
583 Binary0fOpCode(kX86Pxor), // parallel XOR 128 bits x 1
584 Binary0fOpCode(kX86Phaddw), // parallel horizontal addition 16 bits x 8
585 Binary0fOpCode(kX86Phaddd), // parallel horizontal addition 32 bits x 4
Olivier Comefb0fecf2014-06-20 11:46:16 +0200586 Binary0fOpCode(kX86Haddpd), // parallel FP horizontal addition 64 bits x 2
587 Binary0fOpCode(kX86Haddps), // parallel FP horizontal addition 32 bits x 4
Mark Mendellfe945782014-05-22 09:52:36 -0400588 kX86PextrbRRI, // Extract 8 bits from XMM into GPR
589 kX86PextrwRRI, // Extract 16 bits from XMM into GPR
590 kX86PextrdRRI, // Extract 32 bits from XMM into GPR
Udayan Banerji60bfe7b2014-07-08 19:59:43 -0700591 kX86PextrbMRI, // Extract 8 bits from XMM into memory
592 kX86PextrwMRI, // Extract 16 bits from XMM into memory
593 kX86PextrdMRI, // Extract 32 bits from XMM into memory
Mark Mendellfe945782014-05-22 09:52:36 -0400594 kX86PshuflwRRI, // Shuffle 16 bits in lower 64 bits of XMM.
595 kX86PshufdRRI, // Shuffle 32 bits in XMM.
Olivier Comefb0fecf2014-06-20 11:46:16 +0200596 kX86ShufpsRRI, // FP Shuffle 32 bits in XMM.
597 kX86ShufpdRRI, // FP Shuffle 64 bits in XMM.
Mark Mendellfe945782014-05-22 09:52:36 -0400598 kX86PsrawRI, // signed right shift of floating point registers 16 bits x 8
599 kX86PsradRI, // signed right shift of floating point registers 32 bits x 4
600 kX86PsrlwRI, // logical right shift of floating point registers 16 bits x 8
601 kX86PsrldRI, // logical right shift of floating point registers 32 bits x 4
602 kX86PsrlqRI, // logical right shift of floating point registers 64 bits x 2
Lupusoru, Razvan Ab3a84e22014-07-28 14:11:01 -0700603 kX86PsrldqRI, // logical shift of 128-bit vector register, immediate in bytes
Mark Mendellfe945782014-05-22 09:52:36 -0400604 kX86PsllwRI, // left shift of floating point registers 16 bits x 8
605 kX86PslldRI, // left shift of floating point registers 32 bits x 4
606 kX86PsllqRI, // left shift of floating point registers 64 bits x 2
Razvan A Lupusoru614c2b42014-01-28 17:05:21 -0800607 kX86Fild32M, // push 32-bit integer on x87 stack
608 kX86Fild64M, // push 64-bit integer on x87 stack
Alexei Zavjalovbd3682e2014-06-12 03:08:01 +0700609 kX86Fld32M, // push float on x87 stack
610 kX86Fld64M, // push double on x87 stack
Razvan A Lupusoru614c2b42014-01-28 17:05:21 -0800611 kX86Fstp32M, // pop top x87 fp stack and do 32-bit store
612 kX86Fstp64M, // pop top x87 fp stack and do 64-bit store
Alexei Zavjalovbd3682e2014-06-12 03:08:01 +0700613 kX86Fst32M, // do 32-bit store
614 kX86Fst64M, // do 64-bit store
615 kX86Fprem, // remainder from dividing of two floating point values
616 kX86Fucompp, // compare floating point values and pop x87 fp stack twice
617 kX86Fstsw16R, // store FPU status word
Lupusoru, Razvan Ab3a84e22014-07-28 14:11:01 -0700618 Binary0fOpCode(kX86Movdqa), // move 128 bits aligned
619 kX86MovdqaMR, kX86MovdqaAR, // store 128 bit aligned from xmm1 to m128
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800620 Binary0fOpCode(kX86Movups), // load unaligned packed single FP values from xmm2/m128 to xmm1
621 kX86MovupsMR, kX86MovupsAR, // store unaligned packed single FP values from xmm1 to m128
622 Binary0fOpCode(kX86Movaps), // load aligned packed single FP values from xmm2/m128 to xmm1
623 kX86MovapsMR, kX86MovapsAR, // store aligned packed single FP values from xmm1 to m128
624 kX86MovlpsRM, kX86MovlpsRA, // load packed single FP values from m64 to low quadword of xmm
625 kX86MovlpsMR, kX86MovlpsAR, // store packed single FP values from low quadword of xmm to m64
626 kX86MovhpsRM, kX86MovhpsRA, // load packed single FP values from m64 to high quadword of xmm
627 kX86MovhpsMR, kX86MovhpsAR, // store packed single FP values from high quadword of xmm to m64
Brian Carlstrom7940e442013-07-12 13:46:57 -0700628 Binary0fOpCode(kX86Movdxr), // move into xmm from gpr
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700629 Binary0fOpCode(kX86Movqxr), // move into xmm from 64 bit gpr
630 kX86MovqrxRR, kX86MovqrxMR, kX86MovqrxAR, // move into 64 bit reg from xmm
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700631 kX86MovdrxRR, kX86MovdrxMR, kX86MovdrxAR, // move into reg from xmm
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700632 kX86MovsxdRR, kX86MovsxdRM, kX86MovsxdRA, // move 32 bit to 64 bit with sign extension
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700633 kX86Set8R, kX86Set8M, kX86Set8A, // set byte depending on condition operand
Jean Christophe Beylerb5bce7c2014-07-25 12:32:18 -0700634 kX86Lfence, // memory barrier to serialize all previous
635 // load-from-memory instructions
636 kX86Mfence, // memory barrier to serialize all previous
637 // load-from-memory and store-to-memory instructions
638 kX86Sfence, // memory barrier to serialize all previous
639 // store-to-memory instructions
Brian Carlstrom7940e442013-07-12 13:46:57 -0700640 Binary0fOpCode(kX86Imul16), // 16bit multiply
641 Binary0fOpCode(kX86Imul32), // 32bit multiply
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700642 Binary0fOpCode(kX86Imul64), // 64bit multiply
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700643 kX86CmpxchgRR, kX86CmpxchgMR, kX86CmpxchgAR, // compare and exchange
nikolay serdjukc5e4ce12014-06-10 17:07:10 +0700644 kX86LockCmpxchgMR, kX86LockCmpxchgAR, kX86LockCmpxchg64AR, // locked compare and exchange
Ian Rogers0f9b9c52014-06-09 01:32:12 -0700645 kX86LockCmpxchg64M, kX86LockCmpxchg64A, // locked compare and exchange
Razvan A Lupusoru99ad7232014-02-25 17:41:08 -0800646 kX86XchgMR, // exchange memory with register (automatically locked)
Brian Carlstrom7940e442013-07-12 13:46:57 -0700647 Binary0fOpCode(kX86Movzx8), // zero-extend 8-bit value
648 Binary0fOpCode(kX86Movzx16), // zero-extend 16-bit value
649 Binary0fOpCode(kX86Movsx8), // sign-extend 8-bit value
650 Binary0fOpCode(kX86Movsx16), // sign-extend 16-bit value
Serguei Katkov1c557032014-06-23 13:23:38 +0700651 Binary0fOpCode(kX86Movzx8q), // zero-extend 8-bit value to quad word
652 Binary0fOpCode(kX86Movzx16q), // zero-extend 16-bit value to quad word
653 Binary0fOpCode(kX86Movsx8q), // sign-extend 8-bit value to quad word
654 Binary0fOpCode(kX86Movsx16q), // sign-extend 16-bit value to quad word
Brian Carlstrom7940e442013-07-12 13:46:57 -0700655#undef Binary0fOpCode
656 kX86Jcc8, kX86Jcc32, // jCC rel8/32; lir operands - 0: rel, 1: CC, target assigned
657 kX86Jmp8, kX86Jmp32, // jmp rel8/32; lir operands - 0: rel, target assigned
658 kX86JmpR, // jmp reg; lir operands - 0: reg
Mark Mendell4028a6c2014-02-19 20:06:20 -0800659 kX86Jecxz8, // jcexz rel8; jump relative if ECX is zero.
Brian Carlstrom60d7a652014-03-13 18:10:08 -0700660 kX86JmpT, // jmp fs:[disp]; fs: is equal to Thread::Current(); lir operands - 0: disp
661
Brian Carlstrom7940e442013-07-12 13:46:57 -0700662 kX86CallR, // call reg; lir operands - 0: reg
663 kX86CallM, // call [base + disp]; lir operands - 0: base, 1: disp
664 kX86CallA, // call [base + index * scale + disp]
665 // lir operands - 0: base, 1: index, 2: scale, 3: disp
666 kX86CallT, // call fs:[disp]; fs: is equal to Thread::Current(); lir operands - 0: disp
Mark Mendell55d0eac2014-02-06 11:02:52 -0800667 kX86CallI, // call <relative> - 0: disp; Used for core.oat linking only
Brian Carlstrom7940e442013-07-12 13:46:57 -0700668 kX86Ret, // ret; no lir operands
669 kX86StartOfMethod, // call 0; pop reg; sub reg, # - generate start of method into reg
670 // lir operands - 0: reg
671 kX86PcRelLoadRA, // mov reg, [base + index * scale + PC relative displacement]
672 // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table
673 kX86PcRelAdr, // mov reg, PC relative displacement; lir operands - 0: reg, 1: table
Mark Mendell4028a6c2014-02-19 20:06:20 -0800674 kX86RepneScasw, // repne scasw
Brian Carlstrom7940e442013-07-12 13:46:57 -0700675 kX86Last
676};
677
678/* Instruction assembly field_loc kind */
679enum X86EncodingKind {
Dmitry Petrochenko96992e82014-05-20 04:03:46 +0700680 kData, // Special case for raw data.
681 kNop, // Special case for variable length nop.
682 kNullary, // Opcode that takes no arguments.
Dmitry Petrochenko96992e82014-05-20 04:03:46 +0700683 kRegOpcode, // Shorter form of R instruction kind (opcode+rd)
684 kReg, kMem, kArray, // R, M and A instruction kinds.
685 kMemReg, kArrayReg, kThreadReg, // MR, AR and TR instruction kinds.
686 kRegReg, kRegMem, kRegArray, kRegThread, // RR, RM, RA and RT instruction kinds.
687 kRegRegStore, // RR following the store modrm reg-reg encoding rather than the load.
688 kRegImm, kMemImm, kArrayImm, kThreadImm, // RI, MI, AI and TI instruction kinds.
689 kRegRegImm, kRegMemImm, kRegArrayImm, // RRI, RMI and RAI instruction kinds.
690 kMovRegImm, // Shorter form move RI.
Yixin Shou5192cbb2014-07-01 13:48:17 -0400691 kMovRegQuadImm, // 64 bit move RI
Ian Rogers0f9b9c52014-06-09 01:32:12 -0700692 kRegRegImmStore, // RRI following the store modrm reg-reg encoding rather than the load.
Dmitry Petrochenko96992e82014-05-20 04:03:46 +0700693 kMemRegImm, // MRI instruction kinds.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700694 kShiftRegImm, kShiftMemImm, kShiftArrayImm, // Shift opcode with immediate.
695 kShiftRegCl, kShiftMemCl, kShiftArrayCl, // Shift opcode with register CL.
Yixin Shouf40f8902014-08-14 14:10:32 -0400696 kShiftRegRegCl,
Ian Rogers0f9b9c52014-06-09 01:32:12 -0700697 // kRegRegReg, kRegRegMem, kRegRegArray, // RRR, RRM, RRA instruction kinds.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700698 kRegCond, kMemCond, kArrayCond, // R, M, A instruction kinds following by a condition.
Razvan A Lupusorubd288c22013-12-20 17:27:23 -0800699 kRegRegCond, // RR instruction kind followed by a condition.
Mark Mendell2637f2e2014-04-30 10:10:47 -0400700 kRegMemCond, // RM instruction kind followed by a condition.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700701 kJmp, kJcc, kCall, // Branch instruction kinds.
702 kPcRel, // Operation with displacement that is PC relative
703 kMacro, // An instruction composing multiple others
704 kUnimplemented // Encoding used when an instruction isn't yet implemented.
705};
706
707/* Struct used to define the EncodingMap positions for each X86 opcode */
708struct X86EncodingMap {
709 X86OpCode opcode; // e.g. kOpAddRI
Ian Rogers0f9b9c52014-06-09 01:32:12 -0700710 // The broad category the instruction conforms to, such as kRegReg. Identifies which LIR operands
711 // hold meaning for the opcode.
712 X86EncodingKind kind;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700713 uint64_t flags;
714 struct {
Ian Rogers0f9b9c52014-06-09 01:32:12 -0700715 uint8_t prefix1; // Non-zero => a prefix byte.
716 uint8_t prefix2; // Non-zero => a second prefix byte.
717 uint8_t opcode; // 1 byte opcode.
718 uint8_t extra_opcode1; // Possible extra opcode byte.
719 uint8_t extra_opcode2; // Possible second extra opcode byte.
720 // 3-bit opcode that gets encoded in the register bits of the modrm byte, use determined by the
721 // encoding kind.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700722 uint8_t modrm_opcode;
Ian Rogers0f9b9c52014-06-09 01:32:12 -0700723 uint8_t ax_opcode; // Non-zero => shorter encoding for AX as a destination.
724 uint8_t immediate_bytes; // Number of bytes of immediate.
725 // Does the instruction address a byte register? In 32-bit mode the registers ah, bh, ch and dh
726 // are not used. In 64-bit mode the REX prefix is used to normalize and allow any byte register
727 // to be addressed.
728 bool r8_form;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700729 } skeleton;
730 const char *name;
731 const char* fmt;
732};
733
734
735// FIXME: mem barrier type - what do we do for x86?
736#define kSY 0
737#define kST 0
738
739// Offsets of high and low halves of a 64bit value.
740#define LOWORD_OFFSET 0
741#define HIWORD_OFFSET 4
742
743// Segment override instruction prefix used for quick TLS access to Thread::Current().
744#define THREAD_PREFIX 0x64
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700745#define THREAD_PREFIX_GS 0x65
746
747// 64 Bit Operand Size
748#define REX_W 0x48
749// Extension of the ModR/M reg field
Dmitry Petrochenkoa20468c2014-04-30 13:40:19 +0700750#define REX_R 0x44
751// Extension of the SIB index field
752#define REX_X 0x42
753// Extension of the ModR/M r/m field, SIB base field, or Opcode reg field
754#define REX_B 0x41
Serguei Katkov94f3eb02014-06-24 13:23:17 +0700755// An empty REX prefix used to normalize the byte operations so that they apply to R4 through R15
Serguei Katkov1c557032014-06-23 13:23:38 +0700756#define REX 0x40
Dmitry Petrochenkoa20468c2014-04-30 13:40:19 +0700757// Mask extracting the least 3 bits of r0..r15
758#define kRegNumMask32 0x07
759// Value indicating that base or reg is not used
760#define NO_REG 0
Brian Carlstrom7940e442013-07-12 13:46:57 -0700761
762#define IS_SIMM8(v) ((-128 <= (v)) && ((v) <= 127))
763#define IS_SIMM16(v) ((-32768 <= (v)) && ((v) <= 32767))
Ian Rogers0f9b9c52014-06-09 01:32:12 -0700764#define IS_SIMM32(v) ((INT64_C(-2147483648) <= (v)) && ((v) <= INT64_C(2147483647)))
Brian Carlstrom7940e442013-07-12 13:46:57 -0700765
766extern X86EncodingMap EncodingMap[kX86Last];
767extern X86ConditionCode X86ConditionEncoding(ConditionCode cond);
768
769} // namespace art
770
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700771#endif // ART_COMPILER_DEX_QUICK_X86_X86_LIR_H_