blob: 4064bd6550a56406b32b584da6b0a2040e38c4e2 [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_COMPILER_DEX_QUICK_X86_X86_LIR_H_
18#define ART_COMPILER_DEX_QUICK_X86_X86_LIR_H_
Brian Carlstrom7940e442013-07-12 13:46:57 -070019
20#include "dex/compiler_internals.h"
21
22namespace art {
23
24/*
Ian Rogers0177e532014-02-11 16:30:46 -080025 * Runtime register conventions. We consider both x86, x86-64 and x32 (32bit mode x86-64). The ABI
26 * has different conventions and we capture those here. Changing something that is callee save and
27 * making it caller save places a burden on up-calls to save/restore the callee save register,
28 * however, there are few registers that are callee save in the ABI. Changing something that is
29 * caller save and making it callee save places a burden on down-calls to save/restore the callee
30 * save register. For these reasons we aim to match native conventions for caller and callee save.
31 * On x86 only the first 4 registers can be used for byte operations, for this reason they are
32 * preferred for temporary scratch registers.
Brian Carlstrom7940e442013-07-12 13:46:57 -070033 *
34 * General Purpose Register:
Ian Rogers0177e532014-02-11 16:30:46 -080035 * Native: x86 | x86-64 / x32 | ART x86 | ART x86-64
36 * r0/eax: caller | caller | caller, Method*, scratch, return value | caller, scratch, return value
37 * r1/ecx: caller | caller, arg4 | caller, arg1, scratch | caller, arg3, scratch
38 * r2/edx: caller | caller, arg3 | caller, arg2, scratch, high half of long return | caller, arg2, scratch
39 * r3/ebx: callEE | callEE | callER, arg3, scratch | callee, promotable
Brian Carlstrom7940e442013-07-12 13:46:57 -070040 * r4/esp: stack pointer
Ian Rogers0177e532014-02-11 16:30:46 -080041 * r5/ebp: callee | callee | callee, promotable | callee, promotable
42 * r6/esi: callEE | callER, arg2 | callee, promotable | caller, arg1, scratch
43 * r7/edi: callEE | callER, arg1 | callee, promotable | caller, Method*, scratch
Brian Carlstrom7940e442013-07-12 13:46:57 -070044 * --- x86-64/x32 registers
45 * Native: x86-64 / x32 | ART
Ian Rogers0177e532014-02-11 16:30:46 -080046 * r8: caller save, arg5 | caller, arg4, scratch
47 * r9: caller save, arg6 | caller, arg5, scratch
Brian Carlstrom7940e442013-07-12 13:46:57 -070048 * r10: caller save | caller, scratch
49 * r11: caller save | caller, scratch
Ian Rogers0177e532014-02-11 16:30:46 -080050 * r12: callee save | callee, available for register promotion (promotable)
51 * r13: callee save | callee, available for register promotion (promotable)
52 * r14: callee save | callee, available for register promotion (promotable)
53 * r15: callee save | callee, available for register promotion (promotable)
Brian Carlstrom7940e442013-07-12 13:46:57 -070054 *
55 * There is no rSELF, instead on x86 fs: has a base address of Thread::Current, whereas on
56 * x86-64/x32 gs: holds it.
57 *
58 * For floating point we don't support CPUs without SSE2 support (ie newer than PIII):
Ian Rogers0177e532014-02-11 16:30:46 -080059 * Native: x86 | x86-64 / x32 | ART x86 | ART x86-64
60 * XMM0: caller | caller, arg1 | caller, float return value | caller, arg1, float return value
61 * XMM1: caller | caller, arg2 | caller, scratch | caller, arg2, scratch
62 * XMM2: caller | caller, arg3 | caller, scratch | caller, arg3, scratch
63 * XMM3: caller | caller, arg4 | caller, scratch | caller, arg4, scratch
64 * XMM4: caller | caller, arg5 | caller, scratch | caller, arg5, scratch
65 * XMM5: caller | caller, arg6 | caller, scratch | caller, arg6, scratch
66 * XMM6: caller | caller, arg7 | caller, scratch | caller, arg7, scratch
67 * XMM7: caller | caller, arg8 | caller, scratch | caller, arg8, scratch
Brian Carlstrom7940e442013-07-12 13:46:57 -070068 * --- x86-64/x32 registers
Ian Rogers0177e532014-02-11 16:30:46 -080069 * XMM8 .. 15: caller save available as scratch registers for ART.
Brian Carlstrom7940e442013-07-12 13:46:57 -070070 *
Ian Rogers0177e532014-02-11 16:30:46 -080071 * X87 is a necessary evil outside of ART code for x86:
Brian Carlstrom7940e442013-07-12 13:46:57 -070072 * ST0: x86 float/double native return value, caller save
73 * ST1 .. ST7: caller save
74 *
75 * Stack frame diagram (stack grows down, higher addresses at top):
76 *
77 * +------------------------+
78 * | IN[ins-1] | {Note: resides in caller's frame}
79 * | . |
80 * | IN[0] |
81 * | caller's Method* |
82 * +========================+ {Note: start of callee's frame}
83 * | return address | {pushed by call}
84 * | spill region | {variable sized}
85 * +------------------------+
86 * | ...filler word... | {Note: used as 2nd word of V[locals-1] if long]
87 * +------------------------+
88 * | V[locals-1] |
89 * | V[locals-2] |
90 * | . |
91 * | . |
92 * | V[1] |
93 * | V[0] |
94 * +------------------------+
95 * | 0 to 3 words padding |
96 * +------------------------+
97 * | OUT[outs-1] |
98 * | OUT[outs-2] |
99 * | . |
100 * | OUT[0] |
101 * | cur_method* | <<== sp w/ 16-byte alignment
102 * +========================+
103 */
104
105// Offset to distingish FP regs.
106#define X86_FP_REG_OFFSET 32
107// Offset to distinguish DP FP regs.
108#define X86_FP_DOUBLE (X86_FP_REG_OFFSET + 16)
109// Offset to distingish the extra regs.
110#define X86_EXTRA_REG_OFFSET (X86_FP_DOUBLE + 16)
111// Reg types.
112#define X86_REGTYPE(x) (x & (X86_FP_REG_OFFSET | X86_FP_DOUBLE))
113#define X86_FPREG(x) ((x & X86_FP_REG_OFFSET) == X86_FP_REG_OFFSET)
114#define X86_EXTRAREG(x) ((x & X86_EXTRA_REG_OFFSET) == X86_EXTRA_REG_OFFSET)
115#define X86_DOUBLEREG(x) ((x & X86_FP_DOUBLE) == X86_FP_DOUBLE)
116#define X86_SINGLEREG(x) (X86_FPREG(x) && !X86_DOUBLEREG(x))
117
118/*
119 * Note: the low register of a floating point pair is sufficient to
120 * create the name of a double, but require both names to be passed to
121 * allow for asserts to verify that the pair is consecutive if significant
122 * rework is done in this area. Also, it is a good reminder in the calling
123 * code that reg locations always describe doubles as a pair of singles.
124 */
Brian Carlstromb1eba212013-07-17 18:07:19 -0700125#define X86_S2D(x, y) ((x) | X86_FP_DOUBLE)
Brian Carlstrom7940e442013-07-12 13:46:57 -0700126/* Mask to strip off fp flags */
127#define X86_FP_REG_MASK 0xF
128
129// RegisterLocation templates return values (rAX, rAX/rDX or XMM0).
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000130// location, wide, defined, const, fp, core, ref, high_word, home, vec_len, low_reg, high_reg, s_reg_low
131#define X86_LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, rAX, INVALID_REG, INVALID_SREG, INVALID_SREG}
132#define X86_LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, rAX, rDX, INVALID_SREG, INVALID_SREG}
133#define X86_LOC_C_RETURN_FLOAT {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1, kVectorLength4, fr0, INVALID_REG, INVALID_SREG, INVALID_SREG}
134#define X86_LOC_C_RETURN_DOUBLE {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1, kVectorLength8, fr0, fr0, INVALID_SREG, INVALID_SREG}
Brian Carlstrom7940e442013-07-12 13:46:57 -0700135
136enum X86ResourceEncodingPos {
137 kX86GPReg0 = 0,
138 kX86RegSP = 4,
139 kX86FPReg0 = 16, // xmm0 .. xmm7/xmm15.
140 kX86FPRegEnd = 32,
141 kX86RegEnd = kX86FPRegEnd,
142};
143
144#define ENCODE_X86_REG_LIST(N) (static_cast<uint64_t>(N))
145#define ENCODE_X86_REG_SP (1ULL << kX86RegSP)
146
147enum X86NativeRegisterPool {
148 r0 = 0,
149 rAX = r0,
150 r1 = 1,
151 rCX = r1,
152 r2 = 2,
153 rDX = r2,
154 r3 = 3,
155 rBX = r3,
156 r4sp = 4,
157 rX86_SP = r4sp,
158 r4sib_no_index = r4sp,
159 r5 = 5,
160 rBP = r5,
161 r5sib_no_base = r5,
162 r6 = 6,
163 rSI = r6,
164 r7 = 7,
165 rDI = r7,
166#ifndef TARGET_REX_SUPPORT
167 rRET = 8, // fake return address register for core spill mask.
168#else
169 r8 = 8,
170 r9 = 9,
171 r10 = 10,
172 r11 = 11,
173 r12 = 12,
174 r13 = 13,
175 r14 = 14,
176 r15 = 15,
177 rRET = 16, // fake return address register for core spill mask.
178#endif
179 fr0 = 0 + X86_FP_REG_OFFSET,
180 fr1 = 1 + X86_FP_REG_OFFSET,
181 fr2 = 2 + X86_FP_REG_OFFSET,
182 fr3 = 3 + X86_FP_REG_OFFSET,
183 fr4 = 4 + X86_FP_REG_OFFSET,
184 fr5 = 5 + X86_FP_REG_OFFSET,
185 fr6 = 6 + X86_FP_REG_OFFSET,
186 fr7 = 7 + X86_FP_REG_OFFSET,
187 fr8 = 8 + X86_FP_REG_OFFSET,
188 fr9 = 9 + X86_FP_REG_OFFSET,
189 fr10 = 10 + X86_FP_REG_OFFSET,
190 fr11 = 11 + X86_FP_REG_OFFSET,
191 fr12 = 12 + X86_FP_REG_OFFSET,
192 fr13 = 13 + X86_FP_REG_OFFSET,
193 fr14 = 14 + X86_FP_REG_OFFSET,
194 fr15 = 15 + X86_FP_REG_OFFSET,
195};
196
197#define rX86_ARG0 rAX
198#define rX86_ARG1 rCX
199#define rX86_ARG2 rDX
200#define rX86_ARG3 rBX
201#define rX86_FARG0 rAX
202#define rX86_FARG1 rCX
203#define rX86_FARG2 rDX
204#define rX86_FARG3 rBX
205#define rX86_RET0 rAX
206#define rX86_RET1 rDX
207#define rX86_INVOKE_TGT rAX
208#define rX86_LR INVALID_REG
209#define rX86_SUSPEND INVALID_REG
210#define rX86_SELF INVALID_REG
211#define rX86_COUNT rCX
212#define rX86_PC INVALID_REG
213
214/*
215 * The following enum defines the list of supported X86 instructions by the
216 * assembler. Their corresponding EncodingMap positions will be defined in
217 * Assemble.cc.
218 */
219enum X86OpCode {
220 kX86First = 0,
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700221 kX8632BitData = kX86First, // data [31..0].
Brian Carlstrom7940e442013-07-12 13:46:57 -0700222 kX86Bkpt,
223 kX86Nop,
224 // Define groups of binary operations
225 // MR - Memory Register - opcode [base + disp], reg
226 // - lir operands - 0: base, 1: disp, 2: reg
227 // AR - Array Register - opcode [base + index * scale + disp], reg
228 // - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
229 // TR - Thread Register - opcode fs:[disp], reg - where fs: is equal to Thread::Current()
230 // - lir operands - 0: disp, 1: reg
231 // RR - Register Register - opcode reg1, reg2
232 // - lir operands - 0: reg1, 1: reg2
233 // RM - Register Memory - opcode reg, [base + disp]
234 // - lir operands - 0: reg, 1: base, 2: disp
235 // RA - Register Array - opcode reg, [base + index * scale + disp]
236 // - lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp
237 // RT - Register Thread - opcode reg, fs:[disp] - where fs: is equal to Thread::Current()
238 // - lir operands - 0: reg, 1: disp
239 // RI - Register Immediate - opcode reg, #immediate
240 // - lir operands - 0: reg, 1: immediate
241 // MI - Memory Immediate - opcode [base + disp], #immediate
242 // - lir operands - 0: base, 1: disp, 2: immediate
243 // AI - Array Immediate - opcode [base + index * scale + disp], #immediate
244 // - lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate
Vladimir Markoe6ed00b2013-10-24 14:52:37 +0100245 // TI - Thread Immediate - opcode fs:[disp], imm - where fs: is equal to Thread::Current()
Brian Carlstrom7940e442013-07-12 13:46:57 -0700246 // - lir operands - 0: disp, 1: imm
247#define BinaryOpCode(opcode) \
248 opcode ## 8MR, opcode ## 8AR, opcode ## 8TR, \
249 opcode ## 8RR, opcode ## 8RM, opcode ## 8RA, opcode ## 8RT, \
250 opcode ## 8RI, opcode ## 8MI, opcode ## 8AI, opcode ## 8TI, \
251 opcode ## 16MR, opcode ## 16AR, opcode ## 16TR, \
252 opcode ## 16RR, opcode ## 16RM, opcode ## 16RA, opcode ## 16RT, \
253 opcode ## 16RI, opcode ## 16MI, opcode ## 16AI, opcode ## 16TI, \
254 opcode ## 16RI8, opcode ## 16MI8, opcode ## 16AI8, opcode ## 16TI8, \
255 opcode ## 32MR, opcode ## 32AR, opcode ## 32TR, \
256 opcode ## 32RR, opcode ## 32RM, opcode ## 32RA, opcode ## 32RT, \
257 opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, opcode ## 32TI, \
258 opcode ## 32RI8, opcode ## 32MI8, opcode ## 32AI8, opcode ## 32TI8
259 BinaryOpCode(kX86Add),
260 BinaryOpCode(kX86Or),
261 BinaryOpCode(kX86Adc),
262 BinaryOpCode(kX86Sbb),
263 BinaryOpCode(kX86And),
264 BinaryOpCode(kX86Sub),
265 BinaryOpCode(kX86Xor),
266 BinaryOpCode(kX86Cmp),
267#undef BinaryOpCode
268 kX86Imul16RRI, kX86Imul16RMI, kX86Imul16RAI,
269 kX86Imul32RRI, kX86Imul32RMI, kX86Imul32RAI,
270 kX86Imul32RRI8, kX86Imul32RMI8, kX86Imul32RAI8,
271 kX86Mov8MR, kX86Mov8AR, kX86Mov8TR,
272 kX86Mov8RR, kX86Mov8RM, kX86Mov8RA, kX86Mov8RT,
273 kX86Mov8RI, kX86Mov8MI, kX86Mov8AI, kX86Mov8TI,
274 kX86Mov16MR, kX86Mov16AR, kX86Mov16TR,
275 kX86Mov16RR, kX86Mov16RM, kX86Mov16RA, kX86Mov16RT,
276 kX86Mov16RI, kX86Mov16MI, kX86Mov16AI, kX86Mov16TI,
277 kX86Mov32MR, kX86Mov32AR, kX86Mov32TR,
278 kX86Mov32RR, kX86Mov32RM, kX86Mov32RA, kX86Mov32RT,
279 kX86Mov32RI, kX86Mov32MI, kX86Mov32AI, kX86Mov32TI,
Mark Mendell4028a6c2014-02-19 20:06:20 -0800280 kX86Lea32RM,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700281 kX86Lea32RA,
Razvan A Lupusorubd288c22013-12-20 17:27:23 -0800282 // RRC - Register Register ConditionCode - cond_opcode reg1, reg2
283 // - lir operands - 0: reg1, 1: reg2, 2: CC
284 kX86Cmov32RRC,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700285 // RC - Register CL - opcode reg, CL
286 // - lir operands - 0: reg, 1: CL
287 // MC - Memory CL - opcode [base + disp], CL
288 // - lir operands - 0: base, 1: disp, 2: CL
289 // AC - Array CL - opcode [base + index * scale + disp], CL
290 // - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: CL
291#define BinaryShiftOpCode(opcode) \
292 opcode ## 8RI, opcode ## 8MI, opcode ## 8AI, \
293 opcode ## 8RC, opcode ## 8MC, opcode ## 8AC, \
294 opcode ## 16RI, opcode ## 16MI, opcode ## 16AI, \
295 opcode ## 16RC, opcode ## 16MC, opcode ## 16AC, \
296 opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, \
297 opcode ## 32RC, opcode ## 32MC, opcode ## 32AC
298 BinaryShiftOpCode(kX86Rol),
299 BinaryShiftOpCode(kX86Ror),
300 BinaryShiftOpCode(kX86Rcl),
301 BinaryShiftOpCode(kX86Rcr),
302 BinaryShiftOpCode(kX86Sal),
303 BinaryShiftOpCode(kX86Shr),
304 BinaryShiftOpCode(kX86Sar),
305#undef BinaryShiftOpcode
306 kX86Cmc,
Mark Mendell4708dcd2014-01-22 09:05:18 -0800307 kX86Shld32RRI,
308 kX86Shrd32RRI,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700309#define UnaryOpcode(opcode, reg, mem, array) \
310 opcode ## 8 ## reg, opcode ## 8 ## mem, opcode ## 8 ## array, \
311 opcode ## 16 ## reg, opcode ## 16 ## mem, opcode ## 16 ## array, \
312 opcode ## 32 ## reg, opcode ## 32 ## mem, opcode ## 32 ## array
313 UnaryOpcode(kX86Test, RI, MI, AI),
314 kX86Test32RR,
315 UnaryOpcode(kX86Not, R, M, A),
316 UnaryOpcode(kX86Neg, R, M, A),
317 UnaryOpcode(kX86Mul, DaR, DaM, DaA),
318 UnaryOpcode(kX86Imul, DaR, DaM, DaA),
319 UnaryOpcode(kX86Divmod, DaR, DaM, DaA),
320 UnaryOpcode(kX86Idivmod, DaR, DaM, DaA),
Mark Mendell2bf31e62014-01-23 12:13:40 -0800321 kx86Cdq32Da,
Vladimir Markoa8b4caf2013-10-24 15:08:57 +0100322 kX86Bswap32R,
Vladimir Marko70b797d2013-12-03 15:25:24 +0000323 kX86Push32R, kX86Pop32R,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700324#undef UnaryOpcode
325#define Binary0fOpCode(opcode) \
326 opcode ## RR, opcode ## RM, opcode ## RA
327 Binary0fOpCode(kX86Movsd),
328 kX86MovsdMR,
329 kX86MovsdAR,
330 Binary0fOpCode(kX86Movss),
331 kX86MovssMR,
332 kX86MovssAR,
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700333 Binary0fOpCode(kX86Cvtsi2sd), // int to double
334 Binary0fOpCode(kX86Cvtsi2ss), // int to float
335 Binary0fOpCode(kX86Cvttsd2si), // truncating double to int
336 Binary0fOpCode(kX86Cvttss2si), // truncating float to int
337 Binary0fOpCode(kX86Cvtsd2si), // rounding double to int
338 Binary0fOpCode(kX86Cvtss2si), // rounding float to int
Brian Carlstrom7940e442013-07-12 13:46:57 -0700339 Binary0fOpCode(kX86Ucomisd), // unordered double compare
340 Binary0fOpCode(kX86Ucomiss), // unordered float compare
341 Binary0fOpCode(kX86Comisd), // double compare
342 Binary0fOpCode(kX86Comiss), // float compare
343 Binary0fOpCode(kX86Orps), // or of floating point registers
344 Binary0fOpCode(kX86Xorps), // xor of floating point registers
345 Binary0fOpCode(kX86Addsd), // double add
346 Binary0fOpCode(kX86Addss), // float add
347 Binary0fOpCode(kX86Mulsd), // double multiply
348 Binary0fOpCode(kX86Mulss), // float multiply
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700349 Binary0fOpCode(kX86Cvtsd2ss), // double to float
350 Binary0fOpCode(kX86Cvtss2sd), // float to double
Brian Carlstrom7940e442013-07-12 13:46:57 -0700351 Binary0fOpCode(kX86Subsd), // double subtract
352 Binary0fOpCode(kX86Subss), // float subtract
353 Binary0fOpCode(kX86Divsd), // double divide
354 Binary0fOpCode(kX86Divss), // float divide
Razvan A Lupusorud3266bc2014-01-24 12:55:31 -0800355 Binary0fOpCode(kX86Punpckldq), // Interleave low-order double words
Brian Carlstrom7940e442013-07-12 13:46:57 -0700356 kX86PsrlqRI, // right shift of floating point registers
357 kX86PsllqRI, // left shift of floating point registers
Mark Mendellbff1ef02013-12-13 13:47:34 -0800358 kX86SqrtsdRR, // sqrt of floating point register
Razvan A Lupusoru614c2b42014-01-28 17:05:21 -0800359 kX86Fild32M, // push 32-bit integer on x87 stack
360 kX86Fild64M, // push 64-bit integer on x87 stack
361 kX86Fstp32M, // pop top x87 fp stack and do 32-bit store
362 kX86Fstp64M, // pop top x87 fp stack and do 64-bit store
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800363 Binary0fOpCode(kX86Movups), // load unaligned packed single FP values from xmm2/m128 to xmm1
364 kX86MovupsMR, kX86MovupsAR, // store unaligned packed single FP values from xmm1 to m128
365 Binary0fOpCode(kX86Movaps), // load aligned packed single FP values from xmm2/m128 to xmm1
366 kX86MovapsMR, kX86MovapsAR, // store aligned packed single FP values from xmm1 to m128
367 kX86MovlpsRM, kX86MovlpsRA, // load packed single FP values from m64 to low quadword of xmm
368 kX86MovlpsMR, kX86MovlpsAR, // store packed single FP values from low quadword of xmm to m64
369 kX86MovhpsRM, kX86MovhpsRA, // load packed single FP values from m64 to high quadword of xmm
370 kX86MovhpsMR, kX86MovhpsAR, // store packed single FP values from high quadword of xmm to m64
Brian Carlstrom7940e442013-07-12 13:46:57 -0700371 Binary0fOpCode(kX86Movdxr), // move into xmm from gpr
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700372 kX86MovdrxRR, kX86MovdrxMR, kX86MovdrxAR, // move into reg from xmm
373 kX86Set8R, kX86Set8M, kX86Set8A, // set byte depending on condition operand
Brian Carlstrom7940e442013-07-12 13:46:57 -0700374 kX86Mfence, // memory barrier
375 Binary0fOpCode(kX86Imul16), // 16bit multiply
376 Binary0fOpCode(kX86Imul32), // 32bit multiply
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700377 kX86CmpxchgRR, kX86CmpxchgMR, kX86CmpxchgAR, // compare and exchange
Vladimir Markoc29bb612013-11-27 16:47:25 +0000378 kX86LockCmpxchgMR, kX86LockCmpxchgAR, // locked compare and exchange
Vladimir Marko70b797d2013-12-03 15:25:24 +0000379 kX86LockCmpxchg8bM, kX86LockCmpxchg8bA, // locked compare and exchange
Brian Carlstrom7940e442013-07-12 13:46:57 -0700380 Binary0fOpCode(kX86Movzx8), // zero-extend 8-bit value
381 Binary0fOpCode(kX86Movzx16), // zero-extend 16-bit value
382 Binary0fOpCode(kX86Movsx8), // sign-extend 8-bit value
383 Binary0fOpCode(kX86Movsx16), // sign-extend 16-bit value
384#undef Binary0fOpCode
385 kX86Jcc8, kX86Jcc32, // jCC rel8/32; lir operands - 0: rel, 1: CC, target assigned
386 kX86Jmp8, kX86Jmp32, // jmp rel8/32; lir operands - 0: rel, target assigned
387 kX86JmpR, // jmp reg; lir operands - 0: reg
Mark Mendell4028a6c2014-02-19 20:06:20 -0800388 kX86Jecxz8, // jcexz rel8; jump relative if ECX is zero.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700389 kX86CallR, // call reg; lir operands - 0: reg
390 kX86CallM, // call [base + disp]; lir operands - 0: base, 1: disp
391 kX86CallA, // call [base + index * scale + disp]
392 // lir operands - 0: base, 1: index, 2: scale, 3: disp
393 kX86CallT, // call fs:[disp]; fs: is equal to Thread::Current(); lir operands - 0: disp
Mark Mendell55d0eac2014-02-06 11:02:52 -0800394 kX86CallI, // call <relative> - 0: disp; Used for core.oat linking only
Brian Carlstrom7940e442013-07-12 13:46:57 -0700395 kX86Ret, // ret; no lir operands
396 kX86StartOfMethod, // call 0; pop reg; sub reg, # - generate start of method into reg
397 // lir operands - 0: reg
398 kX86PcRelLoadRA, // mov reg, [base + index * scale + PC relative displacement]
399 // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table
400 kX86PcRelAdr, // mov reg, PC relative displacement; lir operands - 0: reg, 1: table
Mark Mendell4028a6c2014-02-19 20:06:20 -0800401 kX86RepneScasw, // repne scasw
Brian Carlstrom7940e442013-07-12 13:46:57 -0700402 kX86Last
403};
404
405/* Instruction assembly field_loc kind */
406enum X86EncodingKind {
407 kData, // Special case for raw data.
408 kNop, // Special case for variable length nop.
409 kNullary, // Opcode that takes no arguments.
Mark Mendell4028a6c2014-02-19 20:06:20 -0800410 kPrefix2Nullary, // Opcode that takes no arguments, but 2 prefixes.
Vladimir Markoa8b4caf2013-10-24 15:08:57 +0100411 kRegOpcode, // Shorter form of R instruction kind (opcode+rd)
Brian Carlstrom7940e442013-07-12 13:46:57 -0700412 kReg, kMem, kArray, // R, M and A instruction kinds.
413 kMemReg, kArrayReg, kThreadReg, // MR, AR and TR instruction kinds.
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700414 kRegReg, kRegMem, kRegArray, kRegThread, // RR, RM, RA and RT instruction kinds.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700415 kRegRegStore, // RR following the store modrm reg-reg encoding rather than the load.
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700416 kRegImm, kMemImm, kArrayImm, kThreadImm, // RI, MI, AI and TI instruction kinds.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700417 kRegRegImm, kRegMemImm, kRegArrayImm, // RRI, RMI and RAI instruction kinds.
418 kMovRegImm, // Shorter form move RI.
Mark Mendell4708dcd2014-01-22 09:05:18 -0800419 kRegRegImmRev, // RRI with first reg in r/m
Brian Carlstrom7940e442013-07-12 13:46:57 -0700420 kShiftRegImm, kShiftMemImm, kShiftArrayImm, // Shift opcode with immediate.
421 kShiftRegCl, kShiftMemCl, kShiftArrayCl, // Shift opcode with register CL.
422 kRegRegReg, kRegRegMem, kRegRegArray, // RRR, RRM, RRA instruction kinds.
423 kRegCond, kMemCond, kArrayCond, // R, M, A instruction kinds following by a condition.
Razvan A Lupusorubd288c22013-12-20 17:27:23 -0800424 kRegRegCond, // RR instruction kind followed by a condition.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700425 kJmp, kJcc, kCall, // Branch instruction kinds.
426 kPcRel, // Operation with displacement that is PC relative
427 kMacro, // An instruction composing multiple others
428 kUnimplemented // Encoding used when an instruction isn't yet implemented.
429};
430
431/* Struct used to define the EncodingMap positions for each X86 opcode */
432struct X86EncodingMap {
433 X86OpCode opcode; // e.g. kOpAddRI
434 X86EncodingKind kind; // Used to discriminate in the union below
435 uint64_t flags;
436 struct {
437 uint8_t prefix1; // non-zero => a prefix byte
438 uint8_t prefix2; // non-zero => a second prefix byte
439 uint8_t opcode; // 1 byte opcode
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700440 uint8_t extra_opcode1; // possible extra opcode byte
441 uint8_t extra_opcode2; // possible second extra opcode byte
Brian Carlstrom7940e442013-07-12 13:46:57 -0700442 // 3bit opcode that gets encoded in the register bits of the modrm byte, use determined by the
443 // encoding kind
444 uint8_t modrm_opcode;
445 uint8_t ax_opcode; // non-zero => shorter encoding for AX as a destination
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700446 uint8_t immediate_bytes; // number of bytes of immediate
Brian Carlstrom7940e442013-07-12 13:46:57 -0700447 } skeleton;
448 const char *name;
449 const char* fmt;
450};
451
452
453// FIXME: mem barrier type - what do we do for x86?
454#define kSY 0
455#define kST 0
456
457// Offsets of high and low halves of a 64bit value.
458#define LOWORD_OFFSET 0
459#define HIWORD_OFFSET 4
460
461// Segment override instruction prefix used for quick TLS access to Thread::Current().
462#define THREAD_PREFIX 0x64
463
464#define IS_SIMM8(v) ((-128 <= (v)) && ((v) <= 127))
465#define IS_SIMM16(v) ((-32768 <= (v)) && ((v) <= 32767))
466
467extern X86EncodingMap EncodingMap[kX86Last];
468extern X86ConditionCode X86ConditionEncoding(ConditionCode cond);
469
470} // namespace art
471
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700472#endif // ART_COMPILER_DEX_QUICK_X86_X86_LIR_H_