blob: 3a1203858b33c18730ad9f254f2830696f92fb38 [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_COMPILER_DEX_QUICK_X86_X86_LIR_H_
18#define ART_COMPILER_DEX_QUICK_X86_X86_LIR_H_
Brian Carlstrom7940e442013-07-12 13:46:57 -070019
20#include "dex/compiler_internals.h"
21
22namespace art {
23
24/*
25 * Runtime register conventions. We consider both x86, x86-64 and x32 (32bit mode x86-64), although
26 * we currently only target x86. The ABI has different conventions and we hope to have a single
27 * convention to simplify code generation. Changing something that is callee save and making it
28 * caller save places a burden on up-calls to save/restore the callee save register, however, there
29 * are few registers that are callee save in the ABI. Changing something that is caller save and
30 * making it callee save places a burden on down-calls to save/restore the callee save register.
31 * For these reasons we aim to match native conventions for caller and callee save. The first 4
32 * registers can be used for byte operations, for this reason they are preferred for temporary
33 * scratch registers.
34 *
35 * General Purpose Register:
36 * Native: x86 | x86-64 / x32 | ART
37 * r0/eax: caller save | caller save | caller, Method*, scratch, return value
38 * r1/ecx: caller save | caller save, arg4 | caller, arg1, scratch
39 * r2/edx: caller save | caller save, arg3 | caller, arg2, scratch, high half of long return
40 * r3/ebx: callEE save | callEE save | callER, arg3, scratch
41 * r4/esp: stack pointer
42 * r5/ebp: callee save | callee save | callee, available for dalvik register promotion
43 * r6/esi: callEE save | callER save, arg2 | callee, available for dalvik register promotion
44 * r7/edi: callEE save | callER save, arg1 | callee, available for dalvik register promotion
45 * --- x86-64/x32 registers
46 * Native: x86-64 / x32 | ART
47 * r8: caller save, arg5 | caller, scratch
48 * r9: caller save, arg6 | caller, scratch
49 * r10: caller save | caller, scratch
50 * r11: caller save | caller, scratch
51 * r12: callee save | callee, available for dalvik register promotion
52 * r13: callee save | callee, available for dalvik register promotion
53 * r14: callee save | callee, available for dalvik register promotion
54 * r15: callee save | callee, available for dalvik register promotion
55 *
56 * There is no rSELF, instead on x86 fs: has a base address of Thread::Current, whereas on
57 * x86-64/x32 gs: holds it.
58 *
59 * For floating point we don't support CPUs without SSE2 support (ie newer than PIII):
60 * Native: x86 | x86-64 / x32 | ART
61 * XMM0: caller save |caller save, arg1 | caller, float/double return value (except for native x86 code)
62 * XMM1: caller save |caller save, arg2 | caller, scratch
63 * XMM2: caller save |caller save, arg3 | caller, scratch
64 * XMM3: caller save |caller save, arg4 | caller, scratch
65 * XMM4: caller save |caller save, arg5 | caller, scratch
66 * XMM5: caller save |caller save, arg6 | caller, scratch
67 * XMM6: caller save |caller save, arg7 | caller, scratch
68 * XMM7: caller save |caller save, arg8 | caller, scratch
69 * --- x86-64/x32 registers
70 * XMM8 .. 15: caller save
71 *
72 * X87 is a necessary evil outside of ART code:
73 * ST0: x86 float/double native return value, caller save
74 * ST1 .. ST7: caller save
75 *
76 * Stack frame diagram (stack grows down, higher addresses at top):
77 *
78 * +------------------------+
79 * | IN[ins-1] | {Note: resides in caller's frame}
80 * | . |
81 * | IN[0] |
82 * | caller's Method* |
83 * +========================+ {Note: start of callee's frame}
84 * | return address | {pushed by call}
85 * | spill region | {variable sized}
86 * +------------------------+
87 * | ...filler word... | {Note: used as 2nd word of V[locals-1] if long]
88 * +------------------------+
89 * | V[locals-1] |
90 * | V[locals-2] |
91 * | . |
92 * | . |
93 * | V[1] |
94 * | V[0] |
95 * +------------------------+
96 * | 0 to 3 words padding |
97 * +------------------------+
98 * | OUT[outs-1] |
99 * | OUT[outs-2] |
100 * | . |
101 * | OUT[0] |
102 * | cur_method* | <<== sp w/ 16-byte alignment
103 * +========================+
104 */
105
106// Offset to distingish FP regs.
107#define X86_FP_REG_OFFSET 32
108// Offset to distinguish DP FP regs.
109#define X86_FP_DOUBLE (X86_FP_REG_OFFSET + 16)
110// Offset to distingish the extra regs.
111#define X86_EXTRA_REG_OFFSET (X86_FP_DOUBLE + 16)
112// Reg types.
113#define X86_REGTYPE(x) (x & (X86_FP_REG_OFFSET | X86_FP_DOUBLE))
114#define X86_FPREG(x) ((x & X86_FP_REG_OFFSET) == X86_FP_REG_OFFSET)
115#define X86_EXTRAREG(x) ((x & X86_EXTRA_REG_OFFSET) == X86_EXTRA_REG_OFFSET)
116#define X86_DOUBLEREG(x) ((x & X86_FP_DOUBLE) == X86_FP_DOUBLE)
117#define X86_SINGLEREG(x) (X86_FPREG(x) && !X86_DOUBLEREG(x))
118
119/*
120 * Note: the low register of a floating point pair is sufficient to
121 * create the name of a double, but require both names to be passed to
122 * allow for asserts to verify that the pair is consecutive if significant
123 * rework is done in this area. Also, it is a good reminder in the calling
124 * code that reg locations always describe doubles as a pair of singles.
125 */
Brian Carlstromb1eba212013-07-17 18:07:19 -0700126#define X86_S2D(x, y) ((x) | X86_FP_DOUBLE)
Brian Carlstrom7940e442013-07-12 13:46:57 -0700127/* Mask to strip off fp flags */
128#define X86_FP_REG_MASK 0xF
129
130// RegisterLocation templates return values (rAX, rAX/rDX or XMM0).
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000131// location, wide, defined, const, fp, core, ref, high_word, home, vec_len, low_reg, high_reg, s_reg_low
132#define X86_LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, rAX, INVALID_REG, INVALID_SREG, INVALID_SREG}
133#define X86_LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, kVectorNotUsed, rAX, rDX, INVALID_SREG, INVALID_SREG}
134#define X86_LOC_C_RETURN_FLOAT {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1, kVectorLength4, fr0, INVALID_REG, INVALID_SREG, INVALID_SREG}
135#define X86_LOC_C_RETURN_DOUBLE {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1, kVectorLength8, fr0, fr0, INVALID_SREG, INVALID_SREG}
Brian Carlstrom7940e442013-07-12 13:46:57 -0700136
137enum X86ResourceEncodingPos {
138 kX86GPReg0 = 0,
139 kX86RegSP = 4,
140 kX86FPReg0 = 16, // xmm0 .. xmm7/xmm15.
141 kX86FPRegEnd = 32,
142 kX86RegEnd = kX86FPRegEnd,
143};
144
145#define ENCODE_X86_REG_LIST(N) (static_cast<uint64_t>(N))
146#define ENCODE_X86_REG_SP (1ULL << kX86RegSP)
147
148enum X86NativeRegisterPool {
149 r0 = 0,
150 rAX = r0,
151 r1 = 1,
152 rCX = r1,
153 r2 = 2,
154 rDX = r2,
155 r3 = 3,
156 rBX = r3,
157 r4sp = 4,
158 rX86_SP = r4sp,
159 r4sib_no_index = r4sp,
160 r5 = 5,
161 rBP = r5,
162 r5sib_no_base = r5,
163 r6 = 6,
164 rSI = r6,
165 r7 = 7,
166 rDI = r7,
167#ifndef TARGET_REX_SUPPORT
168 rRET = 8, // fake return address register for core spill mask.
169#else
170 r8 = 8,
171 r9 = 9,
172 r10 = 10,
173 r11 = 11,
174 r12 = 12,
175 r13 = 13,
176 r14 = 14,
177 r15 = 15,
178 rRET = 16, // fake return address register for core spill mask.
179#endif
180 fr0 = 0 + X86_FP_REG_OFFSET,
181 fr1 = 1 + X86_FP_REG_OFFSET,
182 fr2 = 2 + X86_FP_REG_OFFSET,
183 fr3 = 3 + X86_FP_REG_OFFSET,
184 fr4 = 4 + X86_FP_REG_OFFSET,
185 fr5 = 5 + X86_FP_REG_OFFSET,
186 fr6 = 6 + X86_FP_REG_OFFSET,
187 fr7 = 7 + X86_FP_REG_OFFSET,
188 fr8 = 8 + X86_FP_REG_OFFSET,
189 fr9 = 9 + X86_FP_REG_OFFSET,
190 fr10 = 10 + X86_FP_REG_OFFSET,
191 fr11 = 11 + X86_FP_REG_OFFSET,
192 fr12 = 12 + X86_FP_REG_OFFSET,
193 fr13 = 13 + X86_FP_REG_OFFSET,
194 fr14 = 14 + X86_FP_REG_OFFSET,
195 fr15 = 15 + X86_FP_REG_OFFSET,
196};
197
198#define rX86_ARG0 rAX
199#define rX86_ARG1 rCX
200#define rX86_ARG2 rDX
201#define rX86_ARG3 rBX
202#define rX86_FARG0 rAX
203#define rX86_FARG1 rCX
204#define rX86_FARG2 rDX
205#define rX86_FARG3 rBX
206#define rX86_RET0 rAX
207#define rX86_RET1 rDX
208#define rX86_INVOKE_TGT rAX
209#define rX86_LR INVALID_REG
210#define rX86_SUSPEND INVALID_REG
211#define rX86_SELF INVALID_REG
212#define rX86_COUNT rCX
213#define rX86_PC INVALID_REG
214
215/*
216 * The following enum defines the list of supported X86 instructions by the
217 * assembler. Their corresponding EncodingMap positions will be defined in
218 * Assemble.cc.
219 */
220enum X86OpCode {
221 kX86First = 0,
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700222 kX8632BitData = kX86First, // data [31..0].
Brian Carlstrom7940e442013-07-12 13:46:57 -0700223 kX86Bkpt,
224 kX86Nop,
225 // Define groups of binary operations
226 // MR - Memory Register - opcode [base + disp], reg
227 // - lir operands - 0: base, 1: disp, 2: reg
228 // AR - Array Register - opcode [base + index * scale + disp], reg
229 // - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
230 // TR - Thread Register - opcode fs:[disp], reg - where fs: is equal to Thread::Current()
231 // - lir operands - 0: disp, 1: reg
232 // RR - Register Register - opcode reg1, reg2
233 // - lir operands - 0: reg1, 1: reg2
234 // RM - Register Memory - opcode reg, [base + disp]
235 // - lir operands - 0: reg, 1: base, 2: disp
236 // RA - Register Array - opcode reg, [base + index * scale + disp]
237 // - lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp
238 // RT - Register Thread - opcode reg, fs:[disp] - where fs: is equal to Thread::Current()
239 // - lir operands - 0: reg, 1: disp
240 // RI - Register Immediate - opcode reg, #immediate
241 // - lir operands - 0: reg, 1: immediate
242 // MI - Memory Immediate - opcode [base + disp], #immediate
243 // - lir operands - 0: base, 1: disp, 2: immediate
244 // AI - Array Immediate - opcode [base + index * scale + disp], #immediate
245 // - lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate
Vladimir Markoe6ed00b2013-10-24 14:52:37 +0100246 // TI - Thread Immediate - opcode fs:[disp], imm - where fs: is equal to Thread::Current()
Brian Carlstrom7940e442013-07-12 13:46:57 -0700247 // - lir operands - 0: disp, 1: imm
248#define BinaryOpCode(opcode) \
249 opcode ## 8MR, opcode ## 8AR, opcode ## 8TR, \
250 opcode ## 8RR, opcode ## 8RM, opcode ## 8RA, opcode ## 8RT, \
251 opcode ## 8RI, opcode ## 8MI, opcode ## 8AI, opcode ## 8TI, \
252 opcode ## 16MR, opcode ## 16AR, opcode ## 16TR, \
253 opcode ## 16RR, opcode ## 16RM, opcode ## 16RA, opcode ## 16RT, \
254 opcode ## 16RI, opcode ## 16MI, opcode ## 16AI, opcode ## 16TI, \
255 opcode ## 16RI8, opcode ## 16MI8, opcode ## 16AI8, opcode ## 16TI8, \
256 opcode ## 32MR, opcode ## 32AR, opcode ## 32TR, \
257 opcode ## 32RR, opcode ## 32RM, opcode ## 32RA, opcode ## 32RT, \
258 opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, opcode ## 32TI, \
259 opcode ## 32RI8, opcode ## 32MI8, opcode ## 32AI8, opcode ## 32TI8
260 BinaryOpCode(kX86Add),
261 BinaryOpCode(kX86Or),
262 BinaryOpCode(kX86Adc),
263 BinaryOpCode(kX86Sbb),
264 BinaryOpCode(kX86And),
265 BinaryOpCode(kX86Sub),
266 BinaryOpCode(kX86Xor),
267 BinaryOpCode(kX86Cmp),
268#undef BinaryOpCode
269 kX86Imul16RRI, kX86Imul16RMI, kX86Imul16RAI,
270 kX86Imul32RRI, kX86Imul32RMI, kX86Imul32RAI,
271 kX86Imul32RRI8, kX86Imul32RMI8, kX86Imul32RAI8,
272 kX86Mov8MR, kX86Mov8AR, kX86Mov8TR,
273 kX86Mov8RR, kX86Mov8RM, kX86Mov8RA, kX86Mov8RT,
274 kX86Mov8RI, kX86Mov8MI, kX86Mov8AI, kX86Mov8TI,
275 kX86Mov16MR, kX86Mov16AR, kX86Mov16TR,
276 kX86Mov16RR, kX86Mov16RM, kX86Mov16RA, kX86Mov16RT,
277 kX86Mov16RI, kX86Mov16MI, kX86Mov16AI, kX86Mov16TI,
278 kX86Mov32MR, kX86Mov32AR, kX86Mov32TR,
279 kX86Mov32RR, kX86Mov32RM, kX86Mov32RA, kX86Mov32RT,
280 kX86Mov32RI, kX86Mov32MI, kX86Mov32AI, kX86Mov32TI,
281 kX86Lea32RA,
Razvan A Lupusorubd288c22013-12-20 17:27:23 -0800282 // RRC - Register Register ConditionCode - cond_opcode reg1, reg2
283 // - lir operands - 0: reg1, 1: reg2, 2: CC
284 kX86Cmov32RRC,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700285 // RC - Register CL - opcode reg, CL
286 // - lir operands - 0: reg, 1: CL
287 // MC - Memory CL - opcode [base + disp], CL
288 // - lir operands - 0: base, 1: disp, 2: CL
289 // AC - Array CL - opcode [base + index * scale + disp], CL
290 // - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: CL
291#define BinaryShiftOpCode(opcode) \
292 opcode ## 8RI, opcode ## 8MI, opcode ## 8AI, \
293 opcode ## 8RC, opcode ## 8MC, opcode ## 8AC, \
294 opcode ## 16RI, opcode ## 16MI, opcode ## 16AI, \
295 opcode ## 16RC, opcode ## 16MC, opcode ## 16AC, \
296 opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, \
297 opcode ## 32RC, opcode ## 32MC, opcode ## 32AC
298 BinaryShiftOpCode(kX86Rol),
299 BinaryShiftOpCode(kX86Ror),
300 BinaryShiftOpCode(kX86Rcl),
301 BinaryShiftOpCode(kX86Rcr),
302 BinaryShiftOpCode(kX86Sal),
303 BinaryShiftOpCode(kX86Shr),
304 BinaryShiftOpCode(kX86Sar),
305#undef BinaryShiftOpcode
306 kX86Cmc,
Mark Mendell4708dcd2014-01-22 09:05:18 -0800307 kX86Shld32RRI,
308 kX86Shrd32RRI,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700309#define UnaryOpcode(opcode, reg, mem, array) \
310 opcode ## 8 ## reg, opcode ## 8 ## mem, opcode ## 8 ## array, \
311 opcode ## 16 ## reg, opcode ## 16 ## mem, opcode ## 16 ## array, \
312 opcode ## 32 ## reg, opcode ## 32 ## mem, opcode ## 32 ## array
313 UnaryOpcode(kX86Test, RI, MI, AI),
314 kX86Test32RR,
315 UnaryOpcode(kX86Not, R, M, A),
316 UnaryOpcode(kX86Neg, R, M, A),
317 UnaryOpcode(kX86Mul, DaR, DaM, DaA),
318 UnaryOpcode(kX86Imul, DaR, DaM, DaA),
319 UnaryOpcode(kX86Divmod, DaR, DaM, DaA),
320 UnaryOpcode(kX86Idivmod, DaR, DaM, DaA),
Mark Mendell2bf31e62014-01-23 12:13:40 -0800321 kx86Cdq32Da,
Vladimir Markoa8b4caf2013-10-24 15:08:57 +0100322 kX86Bswap32R,
Vladimir Marko70b797d2013-12-03 15:25:24 +0000323 kX86Push32R, kX86Pop32R,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700324#undef UnaryOpcode
325#define Binary0fOpCode(opcode) \
326 opcode ## RR, opcode ## RM, opcode ## RA
327 Binary0fOpCode(kX86Movsd),
328 kX86MovsdMR,
329 kX86MovsdAR,
330 Binary0fOpCode(kX86Movss),
331 kX86MovssMR,
332 kX86MovssAR,
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700333 Binary0fOpCode(kX86Cvtsi2sd), // int to double
334 Binary0fOpCode(kX86Cvtsi2ss), // int to float
335 Binary0fOpCode(kX86Cvttsd2si), // truncating double to int
336 Binary0fOpCode(kX86Cvttss2si), // truncating float to int
337 Binary0fOpCode(kX86Cvtsd2si), // rounding double to int
338 Binary0fOpCode(kX86Cvtss2si), // rounding float to int
Brian Carlstrom7940e442013-07-12 13:46:57 -0700339 Binary0fOpCode(kX86Ucomisd), // unordered double compare
340 Binary0fOpCode(kX86Ucomiss), // unordered float compare
341 Binary0fOpCode(kX86Comisd), // double compare
342 Binary0fOpCode(kX86Comiss), // float compare
343 Binary0fOpCode(kX86Orps), // or of floating point registers
344 Binary0fOpCode(kX86Xorps), // xor of floating point registers
345 Binary0fOpCode(kX86Addsd), // double add
346 Binary0fOpCode(kX86Addss), // float add
347 Binary0fOpCode(kX86Mulsd), // double multiply
348 Binary0fOpCode(kX86Mulss), // float multiply
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700349 Binary0fOpCode(kX86Cvtsd2ss), // double to float
350 Binary0fOpCode(kX86Cvtss2sd), // float to double
Brian Carlstrom7940e442013-07-12 13:46:57 -0700351 Binary0fOpCode(kX86Subsd), // double subtract
352 Binary0fOpCode(kX86Subss), // float subtract
353 Binary0fOpCode(kX86Divsd), // double divide
354 Binary0fOpCode(kX86Divss), // float divide
355 kX86PsrlqRI, // right shift of floating point registers
356 kX86PsllqRI, // left shift of floating point registers
Mark Mendellbff1ef02013-12-13 13:47:34 -0800357 kX86SqrtsdRR, // sqrt of floating point register
Vladimir Marko12f96282013-12-16 14:44:03 +0000358 kX86FstpdM, // Store and pop top x87 fp stack
Brian Carlstrom7940e442013-07-12 13:46:57 -0700359 Binary0fOpCode(kX86Movdxr), // move into xmm from gpr
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700360 kX86MovdrxRR, kX86MovdrxMR, kX86MovdrxAR, // move into reg from xmm
361 kX86Set8R, kX86Set8M, kX86Set8A, // set byte depending on condition operand
Brian Carlstrom7940e442013-07-12 13:46:57 -0700362 kX86Mfence, // memory barrier
363 Binary0fOpCode(kX86Imul16), // 16bit multiply
364 Binary0fOpCode(kX86Imul32), // 32bit multiply
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700365 kX86CmpxchgRR, kX86CmpxchgMR, kX86CmpxchgAR, // compare and exchange
Vladimir Markoc29bb612013-11-27 16:47:25 +0000366 kX86LockCmpxchgMR, kX86LockCmpxchgAR, // locked compare and exchange
Vladimir Marko70b797d2013-12-03 15:25:24 +0000367 kX86LockCmpxchg8bM, kX86LockCmpxchg8bA, // locked compare and exchange
Brian Carlstrom7940e442013-07-12 13:46:57 -0700368 Binary0fOpCode(kX86Movzx8), // zero-extend 8-bit value
369 Binary0fOpCode(kX86Movzx16), // zero-extend 16-bit value
370 Binary0fOpCode(kX86Movsx8), // sign-extend 8-bit value
371 Binary0fOpCode(kX86Movsx16), // sign-extend 16-bit value
372#undef Binary0fOpCode
373 kX86Jcc8, kX86Jcc32, // jCC rel8/32; lir operands - 0: rel, 1: CC, target assigned
374 kX86Jmp8, kX86Jmp32, // jmp rel8/32; lir operands - 0: rel, target assigned
375 kX86JmpR, // jmp reg; lir operands - 0: reg
376 kX86CallR, // call reg; lir operands - 0: reg
377 kX86CallM, // call [base + disp]; lir operands - 0: base, 1: disp
378 kX86CallA, // call [base + index * scale + disp]
379 // lir operands - 0: base, 1: index, 2: scale, 3: disp
380 kX86CallT, // call fs:[disp]; fs: is equal to Thread::Current(); lir operands - 0: disp
381 kX86Ret, // ret; no lir operands
382 kX86StartOfMethod, // call 0; pop reg; sub reg, # - generate start of method into reg
383 // lir operands - 0: reg
384 kX86PcRelLoadRA, // mov reg, [base + index * scale + PC relative displacement]
385 // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table
386 kX86PcRelAdr, // mov reg, PC relative displacement; lir operands - 0: reg, 1: table
387 kX86Last
388};
389
390/* Instruction assembly field_loc kind */
391enum X86EncodingKind {
392 kData, // Special case for raw data.
393 kNop, // Special case for variable length nop.
394 kNullary, // Opcode that takes no arguments.
Vladimir Markoa8b4caf2013-10-24 15:08:57 +0100395 kRegOpcode, // Shorter form of R instruction kind (opcode+rd)
Brian Carlstrom7940e442013-07-12 13:46:57 -0700396 kReg, kMem, kArray, // R, M and A instruction kinds.
397 kMemReg, kArrayReg, kThreadReg, // MR, AR and TR instruction kinds.
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700398 kRegReg, kRegMem, kRegArray, kRegThread, // RR, RM, RA and RT instruction kinds.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700399 kRegRegStore, // RR following the store modrm reg-reg encoding rather than the load.
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700400 kRegImm, kMemImm, kArrayImm, kThreadImm, // RI, MI, AI and TI instruction kinds.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700401 kRegRegImm, kRegMemImm, kRegArrayImm, // RRI, RMI and RAI instruction kinds.
402 kMovRegImm, // Shorter form move RI.
Mark Mendell4708dcd2014-01-22 09:05:18 -0800403 kRegRegImmRev, // RRI with first reg in r/m
Brian Carlstrom7940e442013-07-12 13:46:57 -0700404 kShiftRegImm, kShiftMemImm, kShiftArrayImm, // Shift opcode with immediate.
405 kShiftRegCl, kShiftMemCl, kShiftArrayCl, // Shift opcode with register CL.
406 kRegRegReg, kRegRegMem, kRegRegArray, // RRR, RRM, RRA instruction kinds.
407 kRegCond, kMemCond, kArrayCond, // R, M, A instruction kinds following by a condition.
Razvan A Lupusorubd288c22013-12-20 17:27:23 -0800408 kRegRegCond, // RR instruction kind followed by a condition.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700409 kJmp, kJcc, kCall, // Branch instruction kinds.
410 kPcRel, // Operation with displacement that is PC relative
411 kMacro, // An instruction composing multiple others
412 kUnimplemented // Encoding used when an instruction isn't yet implemented.
413};
414
415/* Struct used to define the EncodingMap positions for each X86 opcode */
416struct X86EncodingMap {
417 X86OpCode opcode; // e.g. kOpAddRI
418 X86EncodingKind kind; // Used to discriminate in the union below
419 uint64_t flags;
420 struct {
421 uint8_t prefix1; // non-zero => a prefix byte
422 uint8_t prefix2; // non-zero => a second prefix byte
423 uint8_t opcode; // 1 byte opcode
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700424 uint8_t extra_opcode1; // possible extra opcode byte
425 uint8_t extra_opcode2; // possible second extra opcode byte
Brian Carlstrom7940e442013-07-12 13:46:57 -0700426 // 3bit opcode that gets encoded in the register bits of the modrm byte, use determined by the
427 // encoding kind
428 uint8_t modrm_opcode;
429 uint8_t ax_opcode; // non-zero => shorter encoding for AX as a destination
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700430 uint8_t immediate_bytes; // number of bytes of immediate
Brian Carlstrom7940e442013-07-12 13:46:57 -0700431 } skeleton;
432 const char *name;
433 const char* fmt;
434};
435
436
437// FIXME: mem barrier type - what do we do for x86?
438#define kSY 0
439#define kST 0
440
441// Offsets of high and low halves of a 64bit value.
442#define LOWORD_OFFSET 0
443#define HIWORD_OFFSET 4
444
445// Segment override instruction prefix used for quick TLS access to Thread::Current().
446#define THREAD_PREFIX 0x64
447
448#define IS_SIMM8(v) ((-128 <= (v)) && ((v) <= 127))
449#define IS_SIMM16(v) ((-32768 <= (v)) && ((v) <= 32767))
450
451extern X86EncodingMap EncodingMap[kX86Last];
452extern X86ConditionCode X86ConditionEncoding(ConditionCode cond);
453
454} // namespace art
455
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700456#endif // ART_COMPILER_DEX_QUICK_X86_X86_LIR_H_