blob: 09cbbeec82012307fae8cc651e21747a792fdd3b [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_COMPILER_DEX_QUICK_X86_X86_LIR_H_
18#define ART_COMPILER_DEX_QUICK_X86_X86_LIR_H_
Brian Carlstrom7940e442013-07-12 13:46:57 -070019
20#include "dex/compiler_internals.h"
21
22namespace art {
23
24/*
Ian Rogers0177e532014-02-11 16:30:46 -080025 * Runtime register conventions. We consider both x86, x86-64 and x32 (32bit mode x86-64). The ABI
26 * has different conventions and we capture those here. Changing something that is callee save and
27 * making it caller save places a burden on up-calls to save/restore the callee save register,
28 * however, there are few registers that are callee save in the ABI. Changing something that is
29 * caller save and making it callee save places a burden on down-calls to save/restore the callee
30 * save register. For these reasons we aim to match native conventions for caller and callee save.
31 * On x86 only the first 4 registers can be used for byte operations, for this reason they are
32 * preferred for temporary scratch registers.
Brian Carlstrom7940e442013-07-12 13:46:57 -070033 *
34 * General Purpose Register:
Ian Rogers0177e532014-02-11 16:30:46 -080035 * Native: x86 | x86-64 / x32 | ART x86 | ART x86-64
36 * r0/eax: caller | caller | caller, Method*, scratch, return value | caller, scratch, return value
37 * r1/ecx: caller | caller, arg4 | caller, arg1, scratch | caller, arg3, scratch
38 * r2/edx: caller | caller, arg3 | caller, arg2, scratch, high half of long return | caller, arg2, scratch
39 * r3/ebx: callEE | callEE | callER, arg3, scratch | callee, promotable
Brian Carlstrom7940e442013-07-12 13:46:57 -070040 * r4/esp: stack pointer
Ian Rogers0177e532014-02-11 16:30:46 -080041 * r5/ebp: callee | callee | callee, promotable | callee, promotable
42 * r6/esi: callEE | callER, arg2 | callee, promotable | caller, arg1, scratch
43 * r7/edi: callEE | callER, arg1 | callee, promotable | caller, Method*, scratch
Brian Carlstrom7940e442013-07-12 13:46:57 -070044 * --- x86-64/x32 registers
45 * Native: x86-64 / x32 | ART
Ian Rogers0177e532014-02-11 16:30:46 -080046 * r8: caller save, arg5 | caller, arg4, scratch
47 * r9: caller save, arg6 | caller, arg5, scratch
Brian Carlstrom7940e442013-07-12 13:46:57 -070048 * r10: caller save | caller, scratch
49 * r11: caller save | caller, scratch
Ian Rogers0177e532014-02-11 16:30:46 -080050 * r12: callee save | callee, available for register promotion (promotable)
51 * r13: callee save | callee, available for register promotion (promotable)
52 * r14: callee save | callee, available for register promotion (promotable)
53 * r15: callee save | callee, available for register promotion (promotable)
Brian Carlstrom7940e442013-07-12 13:46:57 -070054 *
55 * There is no rSELF, instead on x86 fs: has a base address of Thread::Current, whereas on
56 * x86-64/x32 gs: holds it.
57 *
58 * For floating point we don't support CPUs without SSE2 support (ie newer than PIII):
Ian Rogers0177e532014-02-11 16:30:46 -080059 * Native: x86 | x86-64 / x32 | ART x86 | ART x86-64
60 * XMM0: caller | caller, arg1 | caller, float return value | caller, arg1, float return value
61 * XMM1: caller | caller, arg2 | caller, scratch | caller, arg2, scratch
62 * XMM2: caller | caller, arg3 | caller, scratch | caller, arg3, scratch
63 * XMM3: caller | caller, arg4 | caller, scratch | caller, arg4, scratch
64 * XMM4: caller | caller, arg5 | caller, scratch | caller, arg5, scratch
65 * XMM5: caller | caller, arg6 | caller, scratch | caller, arg6, scratch
66 * XMM6: caller | caller, arg7 | caller, scratch | caller, arg7, scratch
67 * XMM7: caller | caller, arg8 | caller, scratch | caller, arg8, scratch
Brian Carlstrom7940e442013-07-12 13:46:57 -070068 * --- x86-64/x32 registers
Ian Rogers0177e532014-02-11 16:30:46 -080069 * XMM8 .. 15: caller save available as scratch registers for ART.
Brian Carlstrom7940e442013-07-12 13:46:57 -070070 *
Ian Rogers0177e532014-02-11 16:30:46 -080071 * X87 is a necessary evil outside of ART code for x86:
Brian Carlstrom7940e442013-07-12 13:46:57 -070072 * ST0: x86 float/double native return value, caller save
73 * ST1 .. ST7: caller save
74 *
75 * Stack frame diagram (stack grows down, higher addresses at top):
76 *
77 * +------------------------+
78 * | IN[ins-1] | {Note: resides in caller's frame}
79 * | . |
80 * | IN[0] |
81 * | caller's Method* |
82 * +========================+ {Note: start of callee's frame}
83 * | return address | {pushed by call}
84 * | spill region | {variable sized}
85 * +------------------------+
86 * | ...filler word... | {Note: used as 2nd word of V[locals-1] if long]
87 * +------------------------+
88 * | V[locals-1] |
89 * | V[locals-2] |
90 * | . |
91 * | . |
92 * | V[1] |
93 * | V[0] |
94 * +------------------------+
95 * | 0 to 3 words padding |
96 * +------------------------+
97 * | OUT[outs-1] |
98 * | OUT[outs-2] |
99 * | . |
100 * | OUT[0] |
101 * | cur_method* | <<== sp w/ 16-byte alignment
102 * +========================+
103 */
104
105// Offset to distingish FP regs.
106#define X86_FP_REG_OFFSET 32
107// Offset to distinguish DP FP regs.
108#define X86_FP_DOUBLE (X86_FP_REG_OFFSET + 16)
109// Offset to distingish the extra regs.
110#define X86_EXTRA_REG_OFFSET (X86_FP_DOUBLE + 16)
111// Reg types.
112#define X86_REGTYPE(x) (x & (X86_FP_REG_OFFSET | X86_FP_DOUBLE))
113#define X86_FPREG(x) ((x & X86_FP_REG_OFFSET) == X86_FP_REG_OFFSET)
114#define X86_EXTRAREG(x) ((x & X86_EXTRA_REG_OFFSET) == X86_EXTRA_REG_OFFSET)
115#define X86_DOUBLEREG(x) ((x & X86_FP_DOUBLE) == X86_FP_DOUBLE)
116#define X86_SINGLEREG(x) (X86_FPREG(x) && !X86_DOUBLEREG(x))
117
118/*
119 * Note: the low register of a floating point pair is sufficient to
120 * create the name of a double, but require both names to be passed to
121 * allow for asserts to verify that the pair is consecutive if significant
122 * rework is done in this area. Also, it is a good reminder in the calling
123 * code that reg locations always describe doubles as a pair of singles.
124 */
Brian Carlstromb1eba212013-07-17 18:07:19 -0700125#define X86_S2D(x, y) ((x) | X86_FP_DOUBLE)
Brian Carlstrom7940e442013-07-12 13:46:57 -0700126/* Mask to strip off fp flags */
127#define X86_FP_REG_MASK 0xF
128
Brian Carlstrom7940e442013-07-12 13:46:57 -0700129enum X86ResourceEncodingPos {
130 kX86GPReg0 = 0,
131 kX86RegSP = 4,
132 kX86FPReg0 = 16, // xmm0 .. xmm7/xmm15.
133 kX86FPRegEnd = 32,
134 kX86RegEnd = kX86FPRegEnd,
135};
136
137#define ENCODE_X86_REG_LIST(N) (static_cast<uint64_t>(N))
138#define ENCODE_X86_REG_SP (1ULL << kX86RegSP)
139
140enum X86NativeRegisterPool {
141 r0 = 0,
142 rAX = r0,
143 r1 = 1,
144 rCX = r1,
145 r2 = 2,
146 rDX = r2,
147 r3 = 3,
148 rBX = r3,
149 r4sp = 4,
150 rX86_SP = r4sp,
151 r4sib_no_index = r4sp,
152 r5 = 5,
153 rBP = r5,
154 r5sib_no_base = r5,
155 r6 = 6,
156 rSI = r6,
157 r7 = 7,
158 rDI = r7,
159#ifndef TARGET_REX_SUPPORT
160 rRET = 8, // fake return address register for core spill mask.
161#else
162 r8 = 8,
163 r9 = 9,
164 r10 = 10,
165 r11 = 11,
166 r12 = 12,
167 r13 = 13,
168 r14 = 14,
169 r15 = 15,
170 rRET = 16, // fake return address register for core spill mask.
171#endif
172 fr0 = 0 + X86_FP_REG_OFFSET,
173 fr1 = 1 + X86_FP_REG_OFFSET,
174 fr2 = 2 + X86_FP_REG_OFFSET,
175 fr3 = 3 + X86_FP_REG_OFFSET,
176 fr4 = 4 + X86_FP_REG_OFFSET,
177 fr5 = 5 + X86_FP_REG_OFFSET,
178 fr6 = 6 + X86_FP_REG_OFFSET,
179 fr7 = 7 + X86_FP_REG_OFFSET,
180 fr8 = 8 + X86_FP_REG_OFFSET,
181 fr9 = 9 + X86_FP_REG_OFFSET,
182 fr10 = 10 + X86_FP_REG_OFFSET,
183 fr11 = 11 + X86_FP_REG_OFFSET,
184 fr12 = 12 + X86_FP_REG_OFFSET,
185 fr13 = 13 + X86_FP_REG_OFFSET,
186 fr14 = 14 + X86_FP_REG_OFFSET,
187 fr15 = 15 + X86_FP_REG_OFFSET,
188};
189
190#define rX86_ARG0 rAX
191#define rX86_ARG1 rCX
192#define rX86_ARG2 rDX
193#define rX86_ARG3 rBX
194#define rX86_FARG0 rAX
195#define rX86_FARG1 rCX
196#define rX86_FARG2 rDX
197#define rX86_FARG3 rBX
198#define rX86_RET0 rAX
199#define rX86_RET1 rDX
200#define rX86_INVOKE_TGT rAX
201#define rX86_LR INVALID_REG
202#define rX86_SUSPEND INVALID_REG
203#define rX86_SELF INVALID_REG
204#define rX86_COUNT rCX
205#define rX86_PC INVALID_REG
206
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000207// RegisterLocation templates return values (r_V0, or r_V0/r_V1).
208const RegLocation x86_loc_c_return
209 {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
210 RegStorage(RegStorage::k32BitSolo, rAX), INVALID_SREG, INVALID_SREG};
211const RegLocation x86_loc_c_return_wide
212 {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed,
213 RegStorage(RegStorage::k64BitPair, rAX, rDX), INVALID_SREG, INVALID_SREG};
214// TODO: update to use k32BitVector (must encode in 7 bits, including fp flag).
215const RegLocation x86_loc_c_return_float
216 {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1, kVectorLength4,
217 RegStorage(RegStorage::k32BitSolo, fr0), INVALID_SREG, INVALID_SREG};
218// TODO: update to use k64BitVector (must encode in 7 bits, including fp flag).
219const RegLocation x86_loc_c_return_double
220 {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1, kVectorLength8,
221 RegStorage(RegStorage::k64BitPair, fr0, fr0), INVALID_SREG, INVALID_SREG};
222
Brian Carlstrom7940e442013-07-12 13:46:57 -0700223/*
224 * The following enum defines the list of supported X86 instructions by the
225 * assembler. Their corresponding EncodingMap positions will be defined in
226 * Assemble.cc.
227 */
228enum X86OpCode {
229 kX86First = 0,
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700230 kX8632BitData = kX86First, // data [31..0].
Brian Carlstrom7940e442013-07-12 13:46:57 -0700231 kX86Bkpt,
232 kX86Nop,
233 // Define groups of binary operations
234 // MR - Memory Register - opcode [base + disp], reg
235 // - lir operands - 0: base, 1: disp, 2: reg
236 // AR - Array Register - opcode [base + index * scale + disp], reg
237 // - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
238 // TR - Thread Register - opcode fs:[disp], reg - where fs: is equal to Thread::Current()
239 // - lir operands - 0: disp, 1: reg
240 // RR - Register Register - opcode reg1, reg2
241 // - lir operands - 0: reg1, 1: reg2
242 // RM - Register Memory - opcode reg, [base + disp]
243 // - lir operands - 0: reg, 1: base, 2: disp
244 // RA - Register Array - opcode reg, [base + index * scale + disp]
245 // - lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp
246 // RT - Register Thread - opcode reg, fs:[disp] - where fs: is equal to Thread::Current()
247 // - lir operands - 0: reg, 1: disp
248 // RI - Register Immediate - opcode reg, #immediate
249 // - lir operands - 0: reg, 1: immediate
250 // MI - Memory Immediate - opcode [base + disp], #immediate
251 // - lir operands - 0: base, 1: disp, 2: immediate
252 // AI - Array Immediate - opcode [base + index * scale + disp], #immediate
253 // - lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate
Vladimir Markoe6ed00b2013-10-24 14:52:37 +0100254 // TI - Thread Immediate - opcode fs:[disp], imm - where fs: is equal to Thread::Current()
Brian Carlstrom7940e442013-07-12 13:46:57 -0700255 // - lir operands - 0: disp, 1: imm
256#define BinaryOpCode(opcode) \
257 opcode ## 8MR, opcode ## 8AR, opcode ## 8TR, \
258 opcode ## 8RR, opcode ## 8RM, opcode ## 8RA, opcode ## 8RT, \
259 opcode ## 8RI, opcode ## 8MI, opcode ## 8AI, opcode ## 8TI, \
260 opcode ## 16MR, opcode ## 16AR, opcode ## 16TR, \
261 opcode ## 16RR, opcode ## 16RM, opcode ## 16RA, opcode ## 16RT, \
262 opcode ## 16RI, opcode ## 16MI, opcode ## 16AI, opcode ## 16TI, \
263 opcode ## 16RI8, opcode ## 16MI8, opcode ## 16AI8, opcode ## 16TI8, \
264 opcode ## 32MR, opcode ## 32AR, opcode ## 32TR, \
265 opcode ## 32RR, opcode ## 32RM, opcode ## 32RA, opcode ## 32RT, \
266 opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, opcode ## 32TI, \
267 opcode ## 32RI8, opcode ## 32MI8, opcode ## 32AI8, opcode ## 32TI8
268 BinaryOpCode(kX86Add),
269 BinaryOpCode(kX86Or),
270 BinaryOpCode(kX86Adc),
271 BinaryOpCode(kX86Sbb),
272 BinaryOpCode(kX86And),
273 BinaryOpCode(kX86Sub),
274 BinaryOpCode(kX86Xor),
275 BinaryOpCode(kX86Cmp),
276#undef BinaryOpCode
277 kX86Imul16RRI, kX86Imul16RMI, kX86Imul16RAI,
278 kX86Imul32RRI, kX86Imul32RMI, kX86Imul32RAI,
279 kX86Imul32RRI8, kX86Imul32RMI8, kX86Imul32RAI8,
280 kX86Mov8MR, kX86Mov8AR, kX86Mov8TR,
281 kX86Mov8RR, kX86Mov8RM, kX86Mov8RA, kX86Mov8RT,
282 kX86Mov8RI, kX86Mov8MI, kX86Mov8AI, kX86Mov8TI,
283 kX86Mov16MR, kX86Mov16AR, kX86Mov16TR,
284 kX86Mov16RR, kX86Mov16RM, kX86Mov16RA, kX86Mov16RT,
285 kX86Mov16RI, kX86Mov16MI, kX86Mov16AI, kX86Mov16TI,
286 kX86Mov32MR, kX86Mov32AR, kX86Mov32TR,
287 kX86Mov32RR, kX86Mov32RM, kX86Mov32RA, kX86Mov32RT,
288 kX86Mov32RI, kX86Mov32MI, kX86Mov32AI, kX86Mov32TI,
Mark Mendell4028a6c2014-02-19 20:06:20 -0800289 kX86Lea32RM,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700290 kX86Lea32RA,
Razvan A Lupusorubd288c22013-12-20 17:27:23 -0800291 // RRC - Register Register ConditionCode - cond_opcode reg1, reg2
292 // - lir operands - 0: reg1, 1: reg2, 2: CC
293 kX86Cmov32RRC,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700294 // RC - Register CL - opcode reg, CL
295 // - lir operands - 0: reg, 1: CL
296 // MC - Memory CL - opcode [base + disp], CL
297 // - lir operands - 0: base, 1: disp, 2: CL
298 // AC - Array CL - opcode [base + index * scale + disp], CL
299 // - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: CL
300#define BinaryShiftOpCode(opcode) \
301 opcode ## 8RI, opcode ## 8MI, opcode ## 8AI, \
302 opcode ## 8RC, opcode ## 8MC, opcode ## 8AC, \
303 opcode ## 16RI, opcode ## 16MI, opcode ## 16AI, \
304 opcode ## 16RC, opcode ## 16MC, opcode ## 16AC, \
305 opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, \
306 opcode ## 32RC, opcode ## 32MC, opcode ## 32AC
307 BinaryShiftOpCode(kX86Rol),
308 BinaryShiftOpCode(kX86Ror),
309 BinaryShiftOpCode(kX86Rcl),
310 BinaryShiftOpCode(kX86Rcr),
311 BinaryShiftOpCode(kX86Sal),
312 BinaryShiftOpCode(kX86Shr),
313 BinaryShiftOpCode(kX86Sar),
314#undef BinaryShiftOpcode
315 kX86Cmc,
Mark Mendell4708dcd2014-01-22 09:05:18 -0800316 kX86Shld32RRI,
317 kX86Shrd32RRI,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700318#define UnaryOpcode(opcode, reg, mem, array) \
319 opcode ## 8 ## reg, opcode ## 8 ## mem, opcode ## 8 ## array, \
320 opcode ## 16 ## reg, opcode ## 16 ## mem, opcode ## 16 ## array, \
321 opcode ## 32 ## reg, opcode ## 32 ## mem, opcode ## 32 ## array
322 UnaryOpcode(kX86Test, RI, MI, AI),
323 kX86Test32RR,
324 UnaryOpcode(kX86Not, R, M, A),
325 UnaryOpcode(kX86Neg, R, M, A),
326 UnaryOpcode(kX86Mul, DaR, DaM, DaA),
327 UnaryOpcode(kX86Imul, DaR, DaM, DaA),
328 UnaryOpcode(kX86Divmod, DaR, DaM, DaA),
329 UnaryOpcode(kX86Idivmod, DaR, DaM, DaA),
Mark Mendell2bf31e62014-01-23 12:13:40 -0800330 kx86Cdq32Da,
Vladimir Markoa8b4caf2013-10-24 15:08:57 +0100331 kX86Bswap32R,
Vladimir Marko70b797d2013-12-03 15:25:24 +0000332 kX86Push32R, kX86Pop32R,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700333#undef UnaryOpcode
334#define Binary0fOpCode(opcode) \
335 opcode ## RR, opcode ## RM, opcode ## RA
336 Binary0fOpCode(kX86Movsd),
337 kX86MovsdMR,
338 kX86MovsdAR,
339 Binary0fOpCode(kX86Movss),
340 kX86MovssMR,
341 kX86MovssAR,
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700342 Binary0fOpCode(kX86Cvtsi2sd), // int to double
343 Binary0fOpCode(kX86Cvtsi2ss), // int to float
344 Binary0fOpCode(kX86Cvttsd2si), // truncating double to int
345 Binary0fOpCode(kX86Cvttss2si), // truncating float to int
346 Binary0fOpCode(kX86Cvtsd2si), // rounding double to int
347 Binary0fOpCode(kX86Cvtss2si), // rounding float to int
Brian Carlstrom7940e442013-07-12 13:46:57 -0700348 Binary0fOpCode(kX86Ucomisd), // unordered double compare
349 Binary0fOpCode(kX86Ucomiss), // unordered float compare
350 Binary0fOpCode(kX86Comisd), // double compare
351 Binary0fOpCode(kX86Comiss), // float compare
352 Binary0fOpCode(kX86Orps), // or of floating point registers
353 Binary0fOpCode(kX86Xorps), // xor of floating point registers
354 Binary0fOpCode(kX86Addsd), // double add
355 Binary0fOpCode(kX86Addss), // float add
356 Binary0fOpCode(kX86Mulsd), // double multiply
357 Binary0fOpCode(kX86Mulss), // float multiply
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700358 Binary0fOpCode(kX86Cvtsd2ss), // double to float
359 Binary0fOpCode(kX86Cvtss2sd), // float to double
Brian Carlstrom7940e442013-07-12 13:46:57 -0700360 Binary0fOpCode(kX86Subsd), // double subtract
361 Binary0fOpCode(kX86Subss), // float subtract
362 Binary0fOpCode(kX86Divsd), // double divide
363 Binary0fOpCode(kX86Divss), // float divide
Razvan A Lupusorud3266bc2014-01-24 12:55:31 -0800364 Binary0fOpCode(kX86Punpckldq), // Interleave low-order double words
Brian Carlstrom7940e442013-07-12 13:46:57 -0700365 kX86PsrlqRI, // right shift of floating point registers
366 kX86PsllqRI, // left shift of floating point registers
Mark Mendellbff1ef02013-12-13 13:47:34 -0800367 kX86SqrtsdRR, // sqrt of floating point register
Razvan A Lupusoru614c2b42014-01-28 17:05:21 -0800368 kX86Fild32M, // push 32-bit integer on x87 stack
369 kX86Fild64M, // push 64-bit integer on x87 stack
370 kX86Fstp32M, // pop top x87 fp stack and do 32-bit store
371 kX86Fstp64M, // pop top x87 fp stack and do 64-bit store
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800372 Binary0fOpCode(kX86Movups), // load unaligned packed single FP values from xmm2/m128 to xmm1
373 kX86MovupsMR, kX86MovupsAR, // store unaligned packed single FP values from xmm1 to m128
374 Binary0fOpCode(kX86Movaps), // load aligned packed single FP values from xmm2/m128 to xmm1
375 kX86MovapsMR, kX86MovapsAR, // store aligned packed single FP values from xmm1 to m128
376 kX86MovlpsRM, kX86MovlpsRA, // load packed single FP values from m64 to low quadword of xmm
377 kX86MovlpsMR, kX86MovlpsAR, // store packed single FP values from low quadword of xmm to m64
378 kX86MovhpsRM, kX86MovhpsRA, // load packed single FP values from m64 to high quadword of xmm
379 kX86MovhpsMR, kX86MovhpsAR, // store packed single FP values from high quadword of xmm to m64
Brian Carlstrom7940e442013-07-12 13:46:57 -0700380 Binary0fOpCode(kX86Movdxr), // move into xmm from gpr
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700381 kX86MovdrxRR, kX86MovdrxMR, kX86MovdrxAR, // move into reg from xmm
382 kX86Set8R, kX86Set8M, kX86Set8A, // set byte depending on condition operand
Brian Carlstrom7940e442013-07-12 13:46:57 -0700383 kX86Mfence, // memory barrier
384 Binary0fOpCode(kX86Imul16), // 16bit multiply
385 Binary0fOpCode(kX86Imul32), // 32bit multiply
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700386 kX86CmpxchgRR, kX86CmpxchgMR, kX86CmpxchgAR, // compare and exchange
Vladimir Markoc29bb612013-11-27 16:47:25 +0000387 kX86LockCmpxchgMR, kX86LockCmpxchgAR, // locked compare and exchange
Vladimir Marko70b797d2013-12-03 15:25:24 +0000388 kX86LockCmpxchg8bM, kX86LockCmpxchg8bA, // locked compare and exchange
Brian Carlstrom7940e442013-07-12 13:46:57 -0700389 Binary0fOpCode(kX86Movzx8), // zero-extend 8-bit value
390 Binary0fOpCode(kX86Movzx16), // zero-extend 16-bit value
391 Binary0fOpCode(kX86Movsx8), // sign-extend 8-bit value
392 Binary0fOpCode(kX86Movsx16), // sign-extend 16-bit value
393#undef Binary0fOpCode
394 kX86Jcc8, kX86Jcc32, // jCC rel8/32; lir operands - 0: rel, 1: CC, target assigned
395 kX86Jmp8, kX86Jmp32, // jmp rel8/32; lir operands - 0: rel, target assigned
396 kX86JmpR, // jmp reg; lir operands - 0: reg
Mark Mendell4028a6c2014-02-19 20:06:20 -0800397 kX86Jecxz8, // jcexz rel8; jump relative if ECX is zero.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700398 kX86CallR, // call reg; lir operands - 0: reg
399 kX86CallM, // call [base + disp]; lir operands - 0: base, 1: disp
400 kX86CallA, // call [base + index * scale + disp]
401 // lir operands - 0: base, 1: index, 2: scale, 3: disp
402 kX86CallT, // call fs:[disp]; fs: is equal to Thread::Current(); lir operands - 0: disp
Mark Mendell55d0eac2014-02-06 11:02:52 -0800403 kX86CallI, // call <relative> - 0: disp; Used for core.oat linking only
Brian Carlstrom7940e442013-07-12 13:46:57 -0700404 kX86Ret, // ret; no lir operands
405 kX86StartOfMethod, // call 0; pop reg; sub reg, # - generate start of method into reg
406 // lir operands - 0: reg
407 kX86PcRelLoadRA, // mov reg, [base + index * scale + PC relative displacement]
408 // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table
409 kX86PcRelAdr, // mov reg, PC relative displacement; lir operands - 0: reg, 1: table
Mark Mendell4028a6c2014-02-19 20:06:20 -0800410 kX86RepneScasw, // repne scasw
Brian Carlstrom7940e442013-07-12 13:46:57 -0700411 kX86Last
412};
413
414/* Instruction assembly field_loc kind */
415enum X86EncodingKind {
416 kData, // Special case for raw data.
417 kNop, // Special case for variable length nop.
418 kNullary, // Opcode that takes no arguments.
Mark Mendell4028a6c2014-02-19 20:06:20 -0800419 kPrefix2Nullary, // Opcode that takes no arguments, but 2 prefixes.
Vladimir Markoa8b4caf2013-10-24 15:08:57 +0100420 kRegOpcode, // Shorter form of R instruction kind (opcode+rd)
Brian Carlstrom7940e442013-07-12 13:46:57 -0700421 kReg, kMem, kArray, // R, M and A instruction kinds.
422 kMemReg, kArrayReg, kThreadReg, // MR, AR and TR instruction kinds.
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700423 kRegReg, kRegMem, kRegArray, kRegThread, // RR, RM, RA and RT instruction kinds.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700424 kRegRegStore, // RR following the store modrm reg-reg encoding rather than the load.
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700425 kRegImm, kMemImm, kArrayImm, kThreadImm, // RI, MI, AI and TI instruction kinds.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700426 kRegRegImm, kRegMemImm, kRegArrayImm, // RRI, RMI and RAI instruction kinds.
427 kMovRegImm, // Shorter form move RI.
Mark Mendell4708dcd2014-01-22 09:05:18 -0800428 kRegRegImmRev, // RRI with first reg in r/m
Brian Carlstrom7940e442013-07-12 13:46:57 -0700429 kShiftRegImm, kShiftMemImm, kShiftArrayImm, // Shift opcode with immediate.
430 kShiftRegCl, kShiftMemCl, kShiftArrayCl, // Shift opcode with register CL.
431 kRegRegReg, kRegRegMem, kRegRegArray, // RRR, RRM, RRA instruction kinds.
432 kRegCond, kMemCond, kArrayCond, // R, M, A instruction kinds following by a condition.
Razvan A Lupusorubd288c22013-12-20 17:27:23 -0800433 kRegRegCond, // RR instruction kind followed by a condition.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700434 kJmp, kJcc, kCall, // Branch instruction kinds.
435 kPcRel, // Operation with displacement that is PC relative
436 kMacro, // An instruction composing multiple others
437 kUnimplemented // Encoding used when an instruction isn't yet implemented.
438};
439
440/* Struct used to define the EncodingMap positions for each X86 opcode */
441struct X86EncodingMap {
442 X86OpCode opcode; // e.g. kOpAddRI
443 X86EncodingKind kind; // Used to discriminate in the union below
444 uint64_t flags;
445 struct {
446 uint8_t prefix1; // non-zero => a prefix byte
447 uint8_t prefix2; // non-zero => a second prefix byte
448 uint8_t opcode; // 1 byte opcode
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700449 uint8_t extra_opcode1; // possible extra opcode byte
450 uint8_t extra_opcode2; // possible second extra opcode byte
Brian Carlstrom7940e442013-07-12 13:46:57 -0700451 // 3bit opcode that gets encoded in the register bits of the modrm byte, use determined by the
452 // encoding kind
453 uint8_t modrm_opcode;
454 uint8_t ax_opcode; // non-zero => shorter encoding for AX as a destination
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700455 uint8_t immediate_bytes; // number of bytes of immediate
Brian Carlstrom7940e442013-07-12 13:46:57 -0700456 } skeleton;
457 const char *name;
458 const char* fmt;
459};
460
461
462// FIXME: mem barrier type - what do we do for x86?
463#define kSY 0
464#define kST 0
465
466// Offsets of high and low halves of a 64bit value.
467#define LOWORD_OFFSET 0
468#define HIWORD_OFFSET 4
469
470// Segment override instruction prefix used for quick TLS access to Thread::Current().
471#define THREAD_PREFIX 0x64
472
473#define IS_SIMM8(v) ((-128 <= (v)) && ((v) <= 127))
474#define IS_SIMM16(v) ((-32768 <= (v)) && ((v) <= 32767))
475
476extern X86EncodingMap EncodingMap[kX86Last];
477extern X86ConditionCode X86ConditionEncoding(ConditionCode cond);
478
479} // namespace art
480
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700481#endif // ART_COMPILER_DEX_QUICK_X86_X86_LIR_H_