blob: edfcd4d10e69927622cf8cf160011ce91bfa5ded [file] [log] [blame]
Ian Rogerse32ca232012-03-05 10:20:23 -08001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_COMPILER_COMPILER_CODEGEN_X86_X86LIR_H_
18#define ART_COMPILER_COMPILER_CODEGEN_X86_X86LIR_H_
19
buzbeeefc63692012-11-14 16:31:52 -080020#include "../../compiler_internals.h"
Ian Rogerse32ca232012-03-05 10:20:23 -080021
22namespace art {
23
Ian Rogerse32ca232012-03-05 10:20:23 -080024/*
25 * Runtime register conventions. We consider both x86, x86-64 and x32 (32bit mode x86-64), although
26 * we currently only target x86. The ABI has different conventions and we hope to have a single
27 * convention to simplify code generation. Changing something that is callee save and making it
28 * caller save places a burden on up-calls to save/restore the callee save register, however, there
29 * are few registers that are callee save in the ABI. Changing something that is caller save and
30 * making it callee save places a burden on down-calls to save/restore the callee save register.
Ian Rogersb41b33b2012-03-20 14:22:54 -070031 * For these reasons we aim to match native conventions for caller and callee save. The first 4
32 * registers can be used for byte operations, for this reason they are preferred for temporary
33 * scratch registers.
Ian Rogerse32ca232012-03-05 10:20:23 -080034 *
35 * General Purpose Register:
36 * Native: x86 | x86-64 / x32 | ART
37 * r0/eax: caller save | caller save | caller, Method*, scratch, return value
Ian Rogersb41b33b2012-03-20 14:22:54 -070038 * r1/ecx: caller save | caller save, arg4 | caller, arg1, scratch
39 * r2/edx: caller save | caller save, arg3 | caller, arg2, scratch, high half of long return
40 * r3/ebx: callEE save | callEE save | callER, arg3, scratch
Ian Rogerse32ca232012-03-05 10:20:23 -080041 * r4/esp: stack pointer
42 * r5/ebp: callee save | callee save | callee, available for dalvik register promotion
43 * r6/esi: callEE save | callER save, arg2 | callee, available for dalvik register promotion
44 * r7/edi: callEE save | callER save, arg1 | callee, available for dalvik register promotion
45 * --- x86-64/x32 registers
46 * Native: x86-64 / x32 | ART
47 * r8: caller save, arg5 | caller, scratch
48 * r9: caller save, arg6 | caller, scratch
49 * r10: caller save | caller, scratch
50 * r11: caller save | caller, scratch
51 * r12: callee save | callee, available for dalvik register promotion
52 * r13: callee save | callee, available for dalvik register promotion
53 * r14: callee save | callee, available for dalvik register promotion
54 * r15: callee save | callee, available for dalvik register promotion
55 *
56 * There is no rSELF, instead on x86 fs: has a base address of Thread::Current, whereas on
57 * x86-64/x32 gs: holds it.
58 *
59 * For floating point we don't support CPUs without SSE2 support (ie newer than PIII):
60 * Native: x86 | x86-64 / x32 | ART
61 * XMM0: caller save |caller save, arg1 | caller, float/double return value (except for native x86 code)
62 * XMM1: caller save |caller save, arg2 | caller, scratch
63 * XMM2: caller save |caller save, arg3 | caller, scratch
64 * XMM3: caller save |caller save, arg4 | caller, scratch
65 * XMM4: caller save |caller save, arg5 | caller, scratch
66 * XMM5: caller save |caller save, arg6 | caller, scratch
67 * XMM6: caller save |caller save, arg7 | caller, scratch
68 * XMM7: caller save |caller save, arg8 | caller, scratch
69 * --- x86-64/x32 registers
70 * XMM8 .. 15: caller save
71 *
72 * X87 is a necessary evil outside of ART code:
73 * ST0: x86 float/double native return value, caller save
74 * ST1 .. ST7: caller save
75 *
76 * Stack frame diagram (stack grows down, higher addresses at top):
77 *
78 * +------------------------+
79 * | IN[ins-1] | {Note: resides in caller's frame}
80 * | . |
81 * | IN[0] |
82 * | caller's Method* |
83 * +========================+ {Note: start of callee's frame}
84 * | return address | {pushed by call}
85 * | spill region | {variable sized}
86 * +------------------------+
87 * | ...filler word... | {Note: used as 2nd word of V[locals-1] if long]
88 * +------------------------+
89 * | V[locals-1] |
90 * | V[locals-2] |
91 * | . |
92 * | . |
93 * | V[1] |
94 * | V[0] |
95 * +------------------------+
96 * | 0 to 3 words padding |
97 * +------------------------+
98 * | OUT[outs-1] |
99 * | OUT[outs-2] |
100 * | . |
101 * | OUT[0] |
buzbeefa57c472012-11-21 12:06:18 -0800102 * | cur_method* | <<== sp w/ 16-byte alignment
Ian Rogerse32ca232012-03-05 10:20:23 -0800103 * +========================+
104 */
105
buzbee02031b12012-11-23 09:41:35 -0800106// Offset to distingish FP regs.
buzbeef0504cd2012-11-13 16:31:10 -0800107#define X86_FP_REG_OFFSET 32
buzbee02031b12012-11-23 09:41:35 -0800108// Offset to distinguish DP FP regs.
buzbeef0504cd2012-11-13 16:31:10 -0800109#define X86_FP_DOUBLE (X86_FP_REG_OFFSET + 16)
buzbee02031b12012-11-23 09:41:35 -0800110// Offset to distingish the extra regs.
buzbeef0504cd2012-11-13 16:31:10 -0800111#define X86_EXTRA_REG_OFFSET (X86_FP_DOUBLE + 16)
buzbee02031b12012-11-23 09:41:35 -0800112// Reg types.
buzbeef0504cd2012-11-13 16:31:10 -0800113#define X86_REGTYPE(x) (x & (X86_FP_REG_OFFSET | X86_FP_DOUBLE))
114#define X86_FPREG(x) ((x & X86_FP_REG_OFFSET) == X86_FP_REG_OFFSET)
115#define X86_EXTRAREG(x) ((x & X86_EXTRA_REG_OFFSET) == X86_EXTRA_REG_OFFSET)
116#define X86_DOUBLEREG(x) ((x & X86_FP_DOUBLE) == X86_FP_DOUBLE)
117#define X86_SINGLEREG(x) (X86_FPREG(x) && !X86_DOUBLEREG(x))
Ian Rogersb5d09b22012-03-06 22:14:17 -0800118
Ian Rogerse32ca232012-03-05 10:20:23 -0800119/*
120 * Note: the low register of a floating point pair is sufficient to
121 * create the name of a double, but require both names to be passed to
122 * allow for asserts to verify that the pair is consecutive if significant
123 * rework is done in this area. Also, it is a good reminder in the calling
124 * code that reg locations always describe doubles as a pair of singles.
125 */
buzbeef0504cd2012-11-13 16:31:10 -0800126#define X86_S2D(x,y) ((x) | X86_FP_DOUBLE)
Ian Rogerse32ca232012-03-05 10:20:23 -0800127/* Mask to strip off fp flags */
buzbeef0504cd2012-11-13 16:31:10 -0800128#define X86_FP_REG_MASK 0xF
Ian Rogerse32ca232012-03-05 10:20:23 -0800129
buzbee02031b12012-11-23 09:41:35 -0800130// RegisterLocation templates return values (rAX, rAX/rDX or XMM0).
buzbeefa57c472012-11-21 12:06:18 -0800131// location, wide, defined, const, fp, core, ref, high_word, home, low_reg, high_reg, s_reg_low
buzbeef0504cd2012-11-13 16:31:10 -0800132#define X86_LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, rAX, INVALID_REG, INVALID_SREG, INVALID_SREG}
133#define X86_LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, rAX, rDX, INVALID_SREG, INVALID_SREG}
134#define X86_LOC_C_RETURN_FLOAT {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1, fr0, INVALID_REG, INVALID_SREG, INVALID_SREG}
135#define X86_LOC_C_RETURN_DOUBLE {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1, fr0, fr1, INVALID_SREG, INVALID_SREG}
Ian Rogerse32ca232012-03-05 10:20:23 -0800136
buzbeeec137432012-11-13 12:13:16 -0800137enum X86ResourceEncodingPos {
138 kX86GPReg0 = 0,
139 kX86RegSP = 4,
buzbee02031b12012-11-23 09:41:35 -0800140 kX86FPReg0 = 16, // xmm0 .. xmm7/xmm15.
buzbeeec137432012-11-13 12:13:16 -0800141 kX86FPRegEnd = 32,
142 kX86RegEnd = kX86FPRegEnd,
Elliott Hughes719ace42012-03-09 18:06:03 -0800143};
Ian Rogerse32ca232012-03-05 10:20:23 -0800144
buzbeeeaf09bc2012-11-15 14:51:41 -0800145#define ENCODE_X86_REG_LIST(N) (static_cast<uint64_t>(N))
buzbeeec137432012-11-13 12:13:16 -0800146#define ENCODE_X86_REG_SP (1ULL << kX86RegSP)
Ian Rogerse32ca232012-03-05 10:20:23 -0800147
buzbeef0504cd2012-11-13 16:31:10 -0800148enum X86NativeRegisterPool {
Ian Rogerse32ca232012-03-05 10:20:23 -0800149 r0 = 0,
Ian Rogersb5d09b22012-03-06 22:14:17 -0800150 rAX = r0,
Ian Rogerse32ca232012-03-05 10:20:23 -0800151 r1 = 1,
Ian Rogersb5d09b22012-03-06 22:14:17 -0800152 rCX = r1,
Ian Rogerse32ca232012-03-05 10:20:23 -0800153 r2 = 2,
154 rDX = r2,
155 r3 = 3,
156 rBX = r3,
157 r4sp = 4,
buzbeef0504cd2012-11-13 16:31:10 -0800158 rX86_SP = r4sp,
Ian Rogersb5d09b22012-03-06 22:14:17 -0800159 r4sib_no_index = r4sp,
Ian Rogerse32ca232012-03-05 10:20:23 -0800160 r5 = 5,
161 rBP = r5,
Ian Rogers7caad772012-03-30 01:07:54 -0700162 r5sib_no_base = r5,
Ian Rogerse32ca232012-03-05 10:20:23 -0800163 r6 = 6,
164 rSI = r6,
165 r7 = 7,
166 rDI = r7,
jeffhao703f2cd2012-07-13 17:25:52 -0700167#ifndef TARGET_REX_SUPPORT
buzbee02031b12012-11-23 09:41:35 -0800168 rRET = 8, // fake return address register for core spill mask.
jeffhao703f2cd2012-07-13 17:25:52 -0700169#else
Ian Rogerse32ca232012-03-05 10:20:23 -0800170 r8 = 8,
171 r9 = 9,
172 r10 = 10,
173 r11 = 11,
174 r12 = 12,
175 r13 = 13,
176 r14 = 14,
177 r15 = 15,
buzbee02031b12012-11-23 09:41:35 -0800178 rRET = 16, // fake return address register for core spill mask.
jeffhao703f2cd2012-07-13 17:25:52 -0700179#endif
buzbeef0504cd2012-11-13 16:31:10 -0800180 fr0 = 0 + X86_FP_REG_OFFSET,
181 fr1 = 1 + X86_FP_REG_OFFSET,
182 fr2 = 2 + X86_FP_REG_OFFSET,
183 fr3 = 3 + X86_FP_REG_OFFSET,
184 fr4 = 4 + X86_FP_REG_OFFSET,
185 fr5 = 5 + X86_FP_REG_OFFSET,
186 fr6 = 6 + X86_FP_REG_OFFSET,
187 fr7 = 7 + X86_FP_REG_OFFSET,
188 fr8 = 8 + X86_FP_REG_OFFSET,
189 fr9 = 9 + X86_FP_REG_OFFSET,
190 fr10 = 10 + X86_FP_REG_OFFSET,
191 fr11 = 11 + X86_FP_REG_OFFSET,
192 fr12 = 12 + X86_FP_REG_OFFSET,
193 fr13 = 13 + X86_FP_REG_OFFSET,
194 fr14 = 14 + X86_FP_REG_OFFSET,
195 fr15 = 15 + X86_FP_REG_OFFSET,
Elliott Hughes719ace42012-03-09 18:06:03 -0800196};
Ian Rogerse32ca232012-03-05 10:20:23 -0800197
buzbeef0504cd2012-11-13 16:31:10 -0800198#define rX86_ARG0 rAX
199#define rX86_ARG1 rCX
200#define rX86_ARG2 rDX
201#define rX86_ARG3 rBX
202#define rX86_FARG0 rAX
203#define rX86_FARG1 rCX
204#define rX86_FARG2 rDX
205#define rX86_FARG3 rBX
206#define rX86_RET0 rAX
207#define rX86_RET1 rDX
208#define rX86_INVOKE_TGT rAX
209#define rX86_LR INVALID_REG
210#define rX86_SUSPEND INVALID_REG
211#define rX86_SELF INVALID_REG
212#define rX86_COUNT rCX
213#define rX86_PC INVALID_REG
Ian Rogerse32ca232012-03-05 10:20:23 -0800214
Ian Rogerse32ca232012-03-05 10:20:23 -0800215/*
Ian Rogersde797832012-03-06 10:18:10 -0800216 * The following enum defines the list of supported X86 instructions by the
217 * assembler. Their corresponding EncodingMap positions will be defined in
218 * Assemble.cc.
Ian Rogerse32ca232012-03-05 10:20:23 -0800219 */
Elliott Hughes719ace42012-03-09 18:06:03 -0800220enum X86OpCode {
buzbeeb046e162012-10-30 15:48:42 -0700221 kX86First = 0,
buzbee02031b12012-11-23 09:41:35 -0800222 kX8632BitData = kX86First, // data [31..0].
Bill Buzbeea114add2012-05-03 15:00:40 -0700223 kX86Bkpt,
224 kX86Nop,
225 // Define groups of binary operations
226 // MR - Memory Register - opcode [base + disp], reg
227 // - lir operands - 0: base, 1: disp, 2: reg
228 // AR - Array Register - opcode [base + index * scale + disp], reg
229 // - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
230 // TR - Thread Register - opcode fs:[disp], reg - where fs: is equal to Thread::Current()
231 // - lir operands - 0: disp, 1: reg
232 // RR - Register Register - opcode reg1, reg2
233 // - lir operands - 0: reg1, 1: reg2
234 // RM - Register Memory - opcode reg, [base + disp]
235 // - lir operands - 0: reg, 1: base, 2: disp
236 // RA - Register Array - opcode reg, [base + index * scale + disp]
237 // - lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp
238 // RT - Register Thread - opcode reg, fs:[disp] - where fs: is equal to Thread::Current()
239 // - lir operands - 0: reg, 1: disp
240 // RI - Register Immediate - opcode reg, #immediate
241 // - lir operands - 0: reg, 1: immediate
242 // MI - Memory Immediate - opcode [base + disp], #immediate
243 // - lir operands - 0: base, 1: disp, 2: immediate
244 // AI - Array Immediate - opcode [base + index * scale + disp], #immediate
245 // - lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate
246 // TI - Thread Register - opcode fs:[disp], imm - where fs: is equal to Thread::Current()
247 // - lir operands - 0: disp, 1: imm
Ian Rogers96ab4202012-03-05 19:51:02 -0800248#define BinaryOpCode(opcode) \
Ian Rogersb5d09b22012-03-06 22:14:17 -0800249 opcode ## 8MR, opcode ## 8AR, opcode ## 8TR, \
250 opcode ## 8RR, opcode ## 8RM, opcode ## 8RA, opcode ## 8RT, \
251 opcode ## 8RI, opcode ## 8MI, opcode ## 8AI, opcode ## 8TI, \
252 opcode ## 16MR, opcode ## 16AR, opcode ## 16TR, \
253 opcode ## 16RR, opcode ## 16RM, opcode ## 16RA, opcode ## 16RT, \
254 opcode ## 16RI, opcode ## 16MI, opcode ## 16AI, opcode ## 16TI, \
255 opcode ## 16RI8, opcode ## 16MI8, opcode ## 16AI8, opcode ## 16TI8, \
256 opcode ## 32MR, opcode ## 32AR, opcode ## 32TR, \
257 opcode ## 32RR, opcode ## 32RM, opcode ## 32RA, opcode ## 32RT, \
258 opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, opcode ## 32TI, \
259 opcode ## 32RI8, opcode ## 32MI8, opcode ## 32AI8, opcode ## 32TI8
Bill Buzbeea114add2012-05-03 15:00:40 -0700260 BinaryOpCode(kX86Add),
261 BinaryOpCode(kX86Or),
262 BinaryOpCode(kX86Adc),
263 BinaryOpCode(kX86Sbb),
264 BinaryOpCode(kX86And),
265 BinaryOpCode(kX86Sub),
266 BinaryOpCode(kX86Xor),
267 BinaryOpCode(kX86Cmp),
Ian Rogers96ab4202012-03-05 19:51:02 -0800268#undef BinaryOpCode
Bill Buzbeea114add2012-05-03 15:00:40 -0700269 kX86Imul16RRI, kX86Imul16RMI, kX86Imul16RAI,
270 kX86Imul32RRI, kX86Imul32RMI, kX86Imul32RAI,
271 kX86Imul32RRI8, kX86Imul32RMI8, kX86Imul32RAI8,
272 kX86Mov8MR, kX86Mov8AR, kX86Mov8TR,
273 kX86Mov8RR, kX86Mov8RM, kX86Mov8RA, kX86Mov8RT,
274 kX86Mov8RI, kX86Mov8MI, kX86Mov8AI, kX86Mov8TI,
275 kX86Mov16MR, kX86Mov16AR, kX86Mov16TR,
276 kX86Mov16RR, kX86Mov16RM, kX86Mov16RA, kX86Mov16RT,
277 kX86Mov16RI, kX86Mov16MI, kX86Mov16AI, kX86Mov16TI,
278 kX86Mov32MR, kX86Mov32AR, kX86Mov32TR,
279 kX86Mov32RR, kX86Mov32RM, kX86Mov32RA, kX86Mov32RT,
280 kX86Mov32RI, kX86Mov32MI, kX86Mov32AI, kX86Mov32TI,
281 kX86Lea32RA,
282 // RC - Register CL - opcode reg, CL
283 // - lir operands - 0: reg, 1: CL
284 // MC - Memory CL - opcode [base + disp], CL
285 // - lir operands - 0: base, 1: disp, 2: CL
286 // AC - Array CL - opcode [base + index * scale + disp], CL
287 // - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: CL
Ian Rogersb5d09b22012-03-06 22:14:17 -0800288#define BinaryShiftOpCode(opcode) \
Bill Buzbeea114add2012-05-03 15:00:40 -0700289 opcode ## 8RI, opcode ## 8MI, opcode ## 8AI, \
290 opcode ## 8RC, opcode ## 8MC, opcode ## 8AC, \
291 opcode ## 16RI, opcode ## 16MI, opcode ## 16AI, \
292 opcode ## 16RC, opcode ## 16MC, opcode ## 16AC, \
293 opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, \
294 opcode ## 32RC, opcode ## 32MC, opcode ## 32AC
295 BinaryShiftOpCode(kX86Rol),
296 BinaryShiftOpCode(kX86Ror),
297 BinaryShiftOpCode(kX86Rcl),
298 BinaryShiftOpCode(kX86Rcr),
299 BinaryShiftOpCode(kX86Sal),
300 BinaryShiftOpCode(kX86Shr),
301 BinaryShiftOpCode(kX86Sar),
Ian Rogersb5d09b22012-03-06 22:14:17 -0800302#undef BinaryShiftOpcode
jeffhao77ae36b2012-08-07 14:18:16 -0700303 kX86Cmc,
Ian Rogersb5d09b22012-03-06 22:14:17 -0800304#define UnaryOpcode(opcode, reg, mem, array) \
Bill Buzbeea114add2012-05-03 15:00:40 -0700305 opcode ## 8 ## reg, opcode ## 8 ## mem, opcode ## 8 ## array, \
306 opcode ## 16 ## reg, opcode ## 16 ## mem, opcode ## 16 ## array, \
307 opcode ## 32 ## reg, opcode ## 32 ## mem, opcode ## 32 ## array
308 UnaryOpcode(kX86Test, RI, MI, AI),
Ian Rogers2e9f7ed2012-09-26 11:30:43 -0700309 kX86Test32RR,
Bill Buzbeea114add2012-05-03 15:00:40 -0700310 UnaryOpcode(kX86Not, R, M, A),
311 UnaryOpcode(kX86Neg, R, M, A),
312 UnaryOpcode(kX86Mul, DaR, DaM, DaA),
313 UnaryOpcode(kX86Imul, DaR, DaM, DaA),
314 UnaryOpcode(kX86Divmod, DaR, DaM, DaA),
315 UnaryOpcode(kX86Idivmod, DaR, DaM, DaA),
Ian Rogersb5d09b22012-03-06 22:14:17 -0800316#undef UnaryOpcode
317#define Binary0fOpCode(opcode) \
318 opcode ## RR, opcode ## RM, opcode ## RA
Bill Buzbeea114add2012-05-03 15:00:40 -0700319 Binary0fOpCode(kX86Movsd),
320 kX86MovsdMR,
321 kX86MovsdAR,
322 Binary0fOpCode(kX86Movss),
323 kX86MovssMR,
324 kX86MovssAR,
325 Binary0fOpCode(kX86Cvtsi2sd), // int to double
326 Binary0fOpCode(kX86Cvtsi2ss), // int to float
327 Binary0fOpCode(kX86Cvttsd2si),// truncating double to int
328 Binary0fOpCode(kX86Cvttss2si),// truncating float to int
329 Binary0fOpCode(kX86Cvtsd2si), // rounding double to int
330 Binary0fOpCode(kX86Cvtss2si), // rounding float to int
331 Binary0fOpCode(kX86Ucomisd), // unordered double compare
332 Binary0fOpCode(kX86Ucomiss), // unordered float compare
333 Binary0fOpCode(kX86Comisd), // double compare
334 Binary0fOpCode(kX86Comiss), // float compare
335 Binary0fOpCode(kX86Orps), // or of floating point registers
336 Binary0fOpCode(kX86Xorps), // xor of floating point registers
337 Binary0fOpCode(kX86Addsd), // double add
338 Binary0fOpCode(kX86Addss), // float add
339 Binary0fOpCode(kX86Mulsd), // double multiply
340 Binary0fOpCode(kX86Mulss), // float multiply
Bill Buzbeea114add2012-05-03 15:00:40 -0700341 Binary0fOpCode(kX86Cvtsd2ss), // double to float
jeffhao292188d2012-05-17 15:45:04 -0700342 Binary0fOpCode(kX86Cvtss2sd), // float to double
Bill Buzbeea114add2012-05-03 15:00:40 -0700343 Binary0fOpCode(kX86Subsd), // double subtract
344 Binary0fOpCode(kX86Subss), // float subtract
345 Binary0fOpCode(kX86Divsd), // double divide
346 Binary0fOpCode(kX86Divss), // float divide
jeffhaofdffdf82012-07-11 16:08:43 -0700347 kX86PsrlqRI, // right shift of floating point registers
348 kX86PsllqRI, // left shift of floating point registers
Bill Buzbeea114add2012-05-03 15:00:40 -0700349 Binary0fOpCode(kX86Movdxr), // move into xmm from gpr
jeffhaofdffdf82012-07-11 16:08:43 -0700350 kX86MovdrxRR, kX86MovdrxMR, kX86MovdrxAR,// move into reg from xmm
Bill Buzbeea114add2012-05-03 15:00:40 -0700351 kX86Set8R, kX86Set8M, kX86Set8A,// set byte depending on condition operand
352 kX86Mfence, // memory barrier
353 Binary0fOpCode(kX86Imul16), // 16bit multiply
354 Binary0fOpCode(kX86Imul32), // 32bit multiply
jeffhao83025762012-08-02 11:08:56 -0700355 kX86CmpxchgRR, kX86CmpxchgMR, kX86CmpxchgAR,// compare and exchange
356 kX86LockCmpxchgRR, kX86LockCmpxchgMR, kX86LockCmpxchgAR,// locked compare and exchange
Bill Buzbeea114add2012-05-03 15:00:40 -0700357 Binary0fOpCode(kX86Movzx8), // zero-extend 8-bit value
358 Binary0fOpCode(kX86Movzx16), // zero-extend 16-bit value
359 Binary0fOpCode(kX86Movsx8), // sign-extend 8-bit value
360 Binary0fOpCode(kX86Movsx16), // sign-extend 16-bit value
Ian Rogersb5d09b22012-03-06 22:14:17 -0800361#undef Binary0fOpCode
Bill Buzbeea114add2012-05-03 15:00:40 -0700362 kX86Jcc8, kX86Jcc32, // jCC rel8/32; lir operands - 0: rel, 1: CC, target assigned
363 kX86Jmp8, kX86Jmp32, // jmp rel8/32; lir operands - 0: rel, target assigned
364 kX86JmpR, // jmp reg; lir operands - 0: reg
365 kX86CallR, // call reg; lir operands - 0: reg
366 kX86CallM, // call [base + disp]; lir operands - 0: base, 1: disp
367 kX86CallA, // call [base + index * scale + disp]
368 // lir operands - 0: base, 1: index, 2: scale, 3: disp
369 kX86CallT, // call fs:[disp]; fs: is equal to Thread::Current(); lir operands - 0: disp
370 kX86Ret, // ret; no lir operands
371 kX86StartOfMethod, // call 0; pop reg; sub reg, # - generate start of method into reg
372 // lir operands - 0: reg
373 kX86PcRelLoadRA, // mov reg, [base + index * scale + PC relative displacement]
374 // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table
375 kX86PcRelAdr, // mov reg, PC relative displacement; lir operands - 0: reg, 1: table
376 kX86Last
Elliott Hughes719ace42012-03-09 18:06:03 -0800377};
Ian Rogerse32ca232012-03-05 10:20:23 -0800378
buzbeefa57c472012-11-21 12:06:18 -0800379/* Instruction assembly field_loc kind */
Elliott Hughes719ace42012-03-09 18:06:03 -0800380enum X86EncodingKind {
Ian Rogersb5d09b22012-03-06 22:14:17 -0800381 kData, // Special case for raw data.
382 kNop, // Special case for variable length nop.
383 kNullary, // Opcode that takes no arguments.
384 kReg, kMem, kArray, // R, M and A instruction kinds.
385 kMemReg, kArrayReg, kThreadReg, // MR, AR and TR instruction kinds.
386 kRegReg, kRegMem, kRegArray, kRegThread, // RR, RM, RA and RT instruction kinds.
jeffhaofdffdf82012-07-11 16:08:43 -0700387 kRegRegStore, // RR following the store modrm reg-reg encoding rather than the load.
Ian Rogersb5d09b22012-03-06 22:14:17 -0800388 kRegImm, kMemImm, kArrayImm, kThreadImm, // RI, MI, AI and TI instruction kinds.
389 kRegRegImm, kRegMemImm, kRegArrayImm, // RRI, RMI and RAI instruction kinds.
390 kMovRegImm, // Shorter form move RI.
391 kShiftRegImm, kShiftMemImm, kShiftArrayImm, // Shift opcode with immediate.
392 kShiftRegCl, kShiftMemCl, kShiftArrayCl, // Shift opcode with register CL.
393 kRegRegReg, kRegRegMem, kRegRegArray, // RRR, RRM, RRA instruction kinds.
394 kRegCond, kMemCond, kArrayCond, // R, M, A instruction kinds following by a condition.
Bill Buzbeea114add2012-05-03 15:00:40 -0700395 kJmp, kJcc, kCall, // Branch instruction kinds.
396 kPcRel, // Operation with displacement that is PC relative
397 kMacro, // An instruction composing multiple others
398 kUnimplemented // Encoding used when an instruction isn't yet implemented.
Elliott Hughes719ace42012-03-09 18:06:03 -0800399};
Ian Rogersde797832012-03-06 10:18:10 -0800400
Ian Rogersde797832012-03-06 10:18:10 -0800401/* Struct used to define the EncodingMap positions for each X86 opcode */
Elliott Hughes719ace42012-03-09 18:06:03 -0800402struct X86EncodingMap {
Ian Rogersde797832012-03-06 10:18:10 -0800403 X86OpCode opcode; // e.g. kOpAddRI
404 X86EncodingKind kind; // Used to discriminate in the union below
buzbeeec137432012-11-13 12:13:16 -0800405 uint64_t flags;
Ian Rogersb5d09b22012-03-06 22:14:17 -0800406 struct {
Bill Buzbeea114add2012-05-03 15:00:40 -0700407 uint8_t prefix1; // non-zero => a prefix byte
408 uint8_t prefix2; // non-zero => a second prefix byte
409 uint8_t opcode; // 1 byte opcode
410 uint8_t extra_opcode1; // possible extra opcode byte
411 uint8_t extra_opcode2; // possible second extra opcode byte
412 // 3bit opcode that gets encoded in the register bits of the modrm byte, use determined by the
413 // encoding kind
414 uint8_t modrm_opcode;
415 uint8_t ax_opcode; // non-zero => shorter encoding for AX as a destination
416 uint8_t immediate_bytes; // number of bytes of immediate
Ian Rogersde797832012-03-06 10:18:10 -0800417 } skeleton;
418 const char *name;
419 const char* fmt;
Elliott Hughes719ace42012-03-09 18:06:03 -0800420};
Ian Rogersde797832012-03-06 10:18:10 -0800421
Ian Rogersde797832012-03-06 10:18:10 -0800422
buzbeea7678db2012-03-05 15:35:46 -0800423// FIXME: mem barrier type - what do we do for x86?
424#define kSY 0
425#define kST 0
426
buzbee02031b12012-11-23 09:41:35 -0800427// Offsets of high and low halves of a 64bit value.
Ian Rogersb5d09b22012-03-06 22:14:17 -0800428#define LOWORD_OFFSET 0
429#define HIWORD_OFFSET 4
430
buzbee02031b12012-11-23 09:41:35 -0800431// Segment override instruction prefix used for quick TLS access to Thread::Current().
Ian Rogersb5d09b22012-03-06 22:14:17 -0800432#define THREAD_PREFIX 0x64
Ian Rogerse32ca232012-03-05 10:20:23 -0800433
Ian Rogersde797832012-03-06 10:18:10 -0800434#define IS_SIMM8(v) ((-128 <= (v)) && ((v) <= 127))
Ian Rogersb5d09b22012-03-06 22:14:17 -0800435#define IS_SIMM16(v) ((-32768 <= (v)) && ((v) <= 32767))
Ian Rogerse32ca232012-03-05 10:20:23 -0800436
buzbee02031b12012-11-23 09:41:35 -0800437extern X86EncodingMap EncodingMap[kX86Last];
438extern X86ConditionCode X86ConditionEncoding(ConditionCode cond);
439
Ian Rogerse32ca232012-03-05 10:20:23 -0800440} // namespace art
441
442#endif // ART_COMPILER_COMPILER_CODEGEN_X86_X86LIR_H_