blob: 2e6bfdeff5f9b25cc6ae71d03f0c9ae895e293c7 [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Nicolas Geoffrayf3e2cc42014-02-18 18:37:26 +000017#include <string>
18#include <inttypes.h>
19
Brian Carlstrom7940e442013-07-12 13:46:57 -070020#include "codegen_x86.h"
21#include "dex/compiler_internals.h"
22#include "dex/quick/mir_to_lir-inl.h"
Mark Mendelle19c91f2014-02-25 08:19:08 -080023#include "mirror/array.h"
24#include "mirror/string.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070025#include "x86_lir.h"
26
Brian Carlstrom7940e442013-07-12 13:46:57 -070027namespace art {
28
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +070029static const RegStorage core_regs_arr_32[] = {
30 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_32, rs_rBP, rs_rSI, rs_rDI,
31};
32static const RegStorage core_regs_arr_64[] = {
33 rs_rAX, rs_rCX, rs_rDX, rs_rBX, rs_rX86_SP_64, rs_rBP, rs_rSI, rs_rDI,
Brian Carlstrom7940e442013-07-12 13:46:57 -070034#ifdef TARGET_REX_SUPPORT
buzbee091cc402014-03-31 10:14:40 -070035 rs_r8, rs_r9, rs_r10, rs_r11, rs_r12, rs_r13, rs_r14, rs_r15
Brian Carlstrom7940e442013-07-12 13:46:57 -070036#endif
37};
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +070038static const RegStorage sp_regs_arr_32[] = {
39 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
40};
41static const RegStorage sp_regs_arr_64[] = {
buzbee091cc402014-03-31 10:14:40 -070042 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
Brian Carlstrom7940e442013-07-12 13:46:57 -070043#ifdef TARGET_REX_SUPPORT
buzbee091cc402014-03-31 10:14:40 -070044 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15
Brian Carlstrom7940e442013-07-12 13:46:57 -070045#endif
46};
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +070047static const RegStorage dp_regs_arr_32[] = {
48 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
49};
50static const RegStorage dp_regs_arr_64[] = {
buzbee091cc402014-03-31 10:14:40 -070051 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
Brian Carlstrom7940e442013-07-12 13:46:57 -070052#ifdef TARGET_REX_SUPPORT
buzbee091cc402014-03-31 10:14:40 -070053 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15
Brian Carlstrom7940e442013-07-12 13:46:57 -070054#endif
55};
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +070056static const RegStorage reserved_regs_arr_32[] = {rs_rX86_SP_32};
57static const RegStorage reserved_regs_arr_64[] = {rs_rX86_SP_64};
58static const RegStorage core_temps_arr_32[] = {rs_rAX, rs_rCX, rs_rDX, rs_rBX};
59static const RegStorage core_temps_arr_64[] = {
60 rs_rAX, rs_rCX, rs_rDX, rs_rSI, rs_rDI,
61#ifdef TARGET_REX_SUPPORT
62 rs_r8, rs_r9, rs_r10, rs_r11
63#endif
64};
65static const RegStorage sp_temps_arr_32[] = {
66 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
67};
68static const RegStorage sp_temps_arr_64[] = {
buzbee091cc402014-03-31 10:14:40 -070069 rs_fr0, rs_fr1, rs_fr2, rs_fr3, rs_fr4, rs_fr5, rs_fr6, rs_fr7,
70#ifdef TARGET_REX_SUPPORT
71 rs_fr8, rs_fr9, rs_fr10, rs_fr11, rs_fr12, rs_fr13, rs_fr14, rs_fr15
72#endif
73};
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +070074static const RegStorage dp_temps_arr_32[] = {
75 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
76};
77static const RegStorage dp_temps_arr_64[] = {
buzbee091cc402014-03-31 10:14:40 -070078 rs_dr0, rs_dr1, rs_dr2, rs_dr3, rs_dr4, rs_dr5, rs_dr6, rs_dr7,
79#ifdef TARGET_REX_SUPPORT
80 rs_dr8, rs_dr9, rs_dr10, rs_dr11, rs_dr12, rs_dr13, rs_dr14, rs_dr15
81#endif
82};
83
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +070084static const std::vector<RegStorage> core_regs_32(core_regs_arr_32,
85 core_regs_arr_32 + sizeof(core_regs_arr_32) / sizeof(core_regs_arr_32[0]));
86static const std::vector<RegStorage> core_regs_64(core_regs_arr_64,
87 core_regs_arr_64 + sizeof(core_regs_arr_64) / sizeof(core_regs_arr_64[0]));
88static const std::vector<RegStorage> sp_regs_32(sp_regs_arr_32,
89 sp_regs_arr_32 + sizeof(sp_regs_arr_32) / sizeof(sp_regs_arr_32[0]));
90static const std::vector<RegStorage> sp_regs_64(sp_regs_arr_64,
91 sp_regs_arr_64 + sizeof(sp_regs_arr_64) / sizeof(sp_regs_arr_64[0]));
92static const std::vector<RegStorage> dp_regs_32(dp_regs_arr_32,
93 dp_regs_arr_32 + sizeof(dp_regs_arr_32) / sizeof(dp_regs_arr_32[0]));
94static const std::vector<RegStorage> dp_regs_64(dp_regs_arr_64,
95 dp_regs_arr_64 + sizeof(dp_regs_arr_64) / sizeof(dp_regs_arr_64[0]));
96static const std::vector<RegStorage> reserved_regs_32(reserved_regs_arr_32,
97 reserved_regs_arr_32 + sizeof(reserved_regs_arr_32) / sizeof(reserved_regs_arr_32[0]));
98static const std::vector<RegStorage> reserved_regs_64(reserved_regs_arr_64,
99 reserved_regs_arr_64 + sizeof(reserved_regs_arr_64) / sizeof(reserved_regs_arr_64[0]));
100static const std::vector<RegStorage> core_temps_32(core_temps_arr_32,
101 core_temps_arr_32 + sizeof(core_temps_arr_32) / sizeof(core_temps_arr_32[0]));
102static const std::vector<RegStorage> core_temps_64(core_temps_arr_64,
103 core_temps_arr_64 + sizeof(core_temps_arr_64) / sizeof(core_temps_arr_64[0]));
104static const std::vector<RegStorage> sp_temps_32(sp_temps_arr_32,
105 sp_temps_arr_32 + sizeof(sp_temps_arr_32) / sizeof(sp_temps_arr_32[0]));
106static const std::vector<RegStorage> sp_temps_64(sp_temps_arr_64,
107 sp_temps_arr_64 + sizeof(sp_temps_arr_64) / sizeof(sp_temps_arr_64[0]));
108static const std::vector<RegStorage> dp_temps_32(dp_temps_arr_32,
109 dp_temps_arr_32 + sizeof(dp_temps_arr_32) / sizeof(dp_temps_arr_32[0]));
110static const std::vector<RegStorage> dp_temps_64(dp_temps_arr_64,
111 dp_temps_arr_64 + sizeof(dp_temps_arr_64) / sizeof(dp_temps_arr_64[0]));
112
113RegStorage rs_rX86_SP;
114
115X86NativeRegisterPool rX86_ARG0;
116X86NativeRegisterPool rX86_ARG1;
117X86NativeRegisterPool rX86_ARG2;
118X86NativeRegisterPool rX86_ARG3;
119X86NativeRegisterPool rX86_FARG0;
120X86NativeRegisterPool rX86_FARG1;
121X86NativeRegisterPool rX86_FARG2;
122X86NativeRegisterPool rX86_FARG3;
123X86NativeRegisterPool rX86_RET0;
124X86NativeRegisterPool rX86_RET1;
125X86NativeRegisterPool rX86_INVOKE_TGT;
126X86NativeRegisterPool rX86_COUNT;
127
128RegStorage rs_rX86_ARG0;
129RegStorage rs_rX86_ARG1;
130RegStorage rs_rX86_ARG2;
131RegStorage rs_rX86_ARG3;
132RegStorage rs_rX86_FARG0;
133RegStorage rs_rX86_FARG1;
134RegStorage rs_rX86_FARG2;
135RegStorage rs_rX86_FARG3;
136RegStorage rs_rX86_RET0;
137RegStorage rs_rX86_RET1;
138RegStorage rs_rX86_INVOKE_TGT;
139RegStorage rs_rX86_COUNT;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700140
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700141RegLocation X86Mir2Lir::LocCReturn() {
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000142 return x86_loc_c_return;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700143}
144
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700145RegLocation X86Mir2Lir::LocCReturnWide() {
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000146 return x86_loc_c_return_wide;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700147}
148
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700149RegLocation X86Mir2Lir::LocCReturnFloat() {
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000150 return x86_loc_c_return_float;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700151}
152
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700153RegLocation X86Mir2Lir::LocCReturnDouble() {
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000154 return x86_loc_c_return_double;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700155}
156
157// Return a target-dependent special register.
buzbee2700f7e2014-03-07 09:46:20 -0800158RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
buzbee091cc402014-03-31 10:14:40 -0700159 RegStorage res_reg = RegStorage::InvalidReg();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700160 switch (reg) {
buzbee091cc402014-03-31 10:14:40 -0700161 case kSelf: res_reg = RegStorage::InvalidReg(); break;
162 case kSuspend: res_reg = RegStorage::InvalidReg(); break;
163 case kLr: res_reg = RegStorage::InvalidReg(); break;
164 case kPc: res_reg = RegStorage::InvalidReg(); break;
165 case kSp: res_reg = rs_rX86_SP; break;
166 case kArg0: res_reg = rs_rX86_ARG0; break;
167 case kArg1: res_reg = rs_rX86_ARG1; break;
168 case kArg2: res_reg = rs_rX86_ARG2; break;
169 case kArg3: res_reg = rs_rX86_ARG3; break;
170 case kFArg0: res_reg = rs_rX86_FARG0; break;
171 case kFArg1: res_reg = rs_rX86_FARG1; break;
172 case kFArg2: res_reg = rs_rX86_FARG2; break;
173 case kFArg3: res_reg = rs_rX86_FARG3; break;
174 case kRet0: res_reg = rs_rX86_RET0; break;
175 case kRet1: res_reg = rs_rX86_RET1; break;
176 case kInvokeTgt: res_reg = rs_rX86_INVOKE_TGT; break;
177 case kHiddenArg: res_reg = rs_rAX; break;
178 case kHiddenFpArg: res_reg = rs_fr0; break;
179 case kCount: res_reg = rs_rX86_COUNT; break;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700180 }
buzbee091cc402014-03-31 10:14:40 -0700181 return res_reg;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700182}
183
buzbee2700f7e2014-03-07 09:46:20 -0800184RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
Razvan A Lupusoru3bc01742014-02-06 13:18:43 -0800185 // For the 32-bit internal ABI, the first 3 arguments are passed in registers.
186 // TODO: This is not 64-bit compliant and depends on new internal ABI.
187 switch (arg_num) {
188 case 0:
buzbee2700f7e2014-03-07 09:46:20 -0800189 return rs_rX86_ARG1;
Razvan A Lupusoru3bc01742014-02-06 13:18:43 -0800190 case 1:
buzbee2700f7e2014-03-07 09:46:20 -0800191 return rs_rX86_ARG2;
Razvan A Lupusoru3bc01742014-02-06 13:18:43 -0800192 case 2:
buzbee2700f7e2014-03-07 09:46:20 -0800193 return rs_rX86_ARG3;
Razvan A Lupusoru3bc01742014-02-06 13:18:43 -0800194 default:
buzbee2700f7e2014-03-07 09:46:20 -0800195 return RegStorage::InvalidReg();
Razvan A Lupusoru3bc01742014-02-06 13:18:43 -0800196 }
197}
198
Brian Carlstrom7940e442013-07-12 13:46:57 -0700199/*
200 * Decode the register id.
201 */
buzbee091cc402014-03-31 10:14:40 -0700202uint64_t X86Mir2Lir::GetRegMaskCommon(RegStorage reg) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700203 uint64_t seed;
204 int shift;
205 int reg_id;
206
buzbee091cc402014-03-31 10:14:40 -0700207 reg_id = reg.GetRegNum();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700208 /* Double registers in x86 are just a single FP register */
209 seed = 1;
210 /* FP register starts at bit position 16 */
buzbee091cc402014-03-31 10:14:40 -0700211 shift = reg.IsFloat() ? kX86FPReg0 : 0;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700212 /* Expand the double register id into single offset */
213 shift += reg_id;
214 return (seed << shift);
215}
216
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700217uint64_t X86Mir2Lir::GetPCUseDefEncoding() {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700218 /*
219 * FIXME: might make sense to use a virtual resource encoding bit for pc. Might be
220 * able to clean up some of the x86/Arm_Mips differences
221 */
222 LOG(FATAL) << "Unexpected call to GetPCUseDefEncoding for x86";
223 return 0ULL;
224}
225
buzbeeb48819d2013-09-14 16:15:25 -0700226void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags) {
Dmitry Petrochenko6a58cb12014-04-02 17:27:59 +0700227 DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
buzbeeb48819d2013-09-14 16:15:25 -0700228 DCHECK(!lir->flags.use_def_invalid);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700229
230 // X86-specific resource map setup here.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700231 if (flags & REG_USE_SP) {
buzbeeb48819d2013-09-14 16:15:25 -0700232 lir->u.m.use_mask |= ENCODE_X86_REG_SP;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700233 }
234
235 if (flags & REG_DEF_SP) {
buzbeeb48819d2013-09-14 16:15:25 -0700236 lir->u.m.def_mask |= ENCODE_X86_REG_SP;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700237 }
238
239 if (flags & REG_DEFA) {
buzbee091cc402014-03-31 10:14:40 -0700240 SetupRegMask(&lir->u.m.def_mask, rs_rAX.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700241 }
242
243 if (flags & REG_DEFD) {
buzbee091cc402014-03-31 10:14:40 -0700244 SetupRegMask(&lir->u.m.def_mask, rs_rDX.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700245 }
246 if (flags & REG_USEA) {
buzbee091cc402014-03-31 10:14:40 -0700247 SetupRegMask(&lir->u.m.use_mask, rs_rAX.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700248 }
249
250 if (flags & REG_USEC) {
buzbee091cc402014-03-31 10:14:40 -0700251 SetupRegMask(&lir->u.m.use_mask, rs_rCX.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700252 }
253
254 if (flags & REG_USED) {
buzbee091cc402014-03-31 10:14:40 -0700255 SetupRegMask(&lir->u.m.use_mask, rs_rDX.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700256 }
Vladimir Marko70b797d2013-12-03 15:25:24 +0000257
258 if (flags & REG_USEB) {
buzbee091cc402014-03-31 10:14:40 -0700259 SetupRegMask(&lir->u.m.use_mask, rs_rBX.GetReg());
Vladimir Marko70b797d2013-12-03 15:25:24 +0000260 }
Mark Mendell4028a6c2014-02-19 20:06:20 -0800261
262 // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI.
263 if (lir->opcode == kX86RepneScasw) {
buzbee091cc402014-03-31 10:14:40 -0700264 SetupRegMask(&lir->u.m.use_mask, rs_rAX.GetReg());
265 SetupRegMask(&lir->u.m.use_mask, rs_rCX.GetReg());
266 SetupRegMask(&lir->u.m.use_mask, rs_rDI.GetReg());
267 SetupRegMask(&lir->u.m.def_mask, rs_rDI.GetReg());
Mark Mendell4028a6c2014-02-19 20:06:20 -0800268 }
Serguei Katkove90501d2014-03-12 15:56:54 +0700269
270 if (flags & USE_FP_STACK) {
271 lir->u.m.use_mask |= ENCODE_X86_FP_STACK;
272 lir->u.m.def_mask |= ENCODE_X86_FP_STACK;
273 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700274}
275
276/* For dumping instructions */
277static const char* x86RegName[] = {
278 "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
279 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
280};
281
282static const char* x86CondName[] = {
283 "O",
284 "NO",
285 "B/NAE/C",
286 "NB/AE/NC",
287 "Z/EQ",
288 "NZ/NE",
289 "BE/NA",
290 "NBE/A",
291 "S",
292 "NS",
293 "P/PE",
294 "NP/PO",
295 "L/NGE",
296 "NL/GE",
297 "LE/NG",
298 "NLE/G"
299};
300
301/*
302 * Interpret a format string and build a string no longer than size
303 * See format key in Assemble.cc.
304 */
305std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
306 std::string buf;
307 size_t i = 0;
308 size_t fmt_len = strlen(fmt);
309 while (i < fmt_len) {
310 if (fmt[i] != '!') {
311 buf += fmt[i];
312 i++;
313 } else {
314 i++;
315 DCHECK_LT(i, fmt_len);
316 char operand_number_ch = fmt[i];
317 i++;
318 if (operand_number_ch == '!') {
319 buf += "!";
320 } else {
321 int operand_number = operand_number_ch - '0';
322 DCHECK_LT(operand_number, 6); // Expect upto 6 LIR operands.
323 DCHECK_LT(i, fmt_len);
324 int operand = lir->operands[operand_number];
325 switch (fmt[i]) {
326 case 'c':
327 DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName));
328 buf += x86CondName[operand];
329 break;
330 case 'd':
331 buf += StringPrintf("%d", operand);
332 break;
333 case 'p': {
buzbee0d829482013-10-11 15:24:55 -0700334 EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700335 buf += StringPrintf("0x%08x", tab_rec->offset);
336 break;
337 }
338 case 'r':
buzbee091cc402014-03-31 10:14:40 -0700339 if (RegStorage::IsFloat(operand)) {
340 int fp_reg = RegStorage::RegNum(operand);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700341 buf += StringPrintf("xmm%d", fp_reg);
342 } else {
buzbee091cc402014-03-31 10:14:40 -0700343 int reg_num = RegStorage::RegNum(operand);
344 DCHECK_LT(static_cast<size_t>(reg_num), sizeof(x86RegName));
345 buf += x86RegName[reg_num];
Brian Carlstrom7940e442013-07-12 13:46:57 -0700346 }
347 break;
348 case 't':
Ian Rogers107c31e2014-01-23 20:55:29 -0800349 buf += StringPrintf("0x%08" PRIxPTR " (L%p)",
350 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + operand,
351 lir->target);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700352 break;
353 default:
354 buf += StringPrintf("DecodeError '%c'", fmt[i]);
355 break;
356 }
357 i++;
358 }
359 }
360 }
361 return buf;
362}
363
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700364void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, uint64_t mask, const char *prefix) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700365 char buf[256];
366 buf[0] = 0;
367
368 if (mask == ENCODE_ALL) {
369 strcpy(buf, "all");
370 } else {
371 char num[8];
372 int i;
373
374 for (i = 0; i < kX86RegEnd; i++) {
375 if (mask & (1ULL << i)) {
Ian Rogers988e6ea2014-01-08 11:30:50 -0800376 snprintf(num, arraysize(num), "%d ", i);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700377 strcat(buf, num);
378 }
379 }
380
381 if (mask & ENCODE_CCODE) {
382 strcat(buf, "cc ");
383 }
384 /* Memory bits */
385 if (x86LIR && (mask & ENCODE_DALVIK_REG)) {
Ian Rogers988e6ea2014-01-08 11:30:50 -0800386 snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
387 DECODE_ALIAS_INFO_REG(x86LIR->flags.alias_info),
388 (DECODE_ALIAS_INFO_WIDE(x86LIR->flags.alias_info)) ? "(+1)" : "");
Brian Carlstrom7940e442013-07-12 13:46:57 -0700389 }
390 if (mask & ENCODE_LITERAL) {
391 strcat(buf, "lit ");
392 }
393
394 if (mask & ENCODE_HEAP_REF) {
395 strcat(buf, "heap ");
396 }
397 if (mask & ENCODE_MUST_NOT_ALIAS) {
398 strcat(buf, "noalias ");
399 }
400 }
401 if (buf[0]) {
402 LOG(INFO) << prefix << ": " << buf;
403 }
404}
405
406void X86Mir2Lir::AdjustSpillMask() {
407 // Adjustment for LR spilling, x86 has no LR so nothing to do here
buzbee091cc402014-03-31 10:14:40 -0700408 core_spill_mask_ |= (1 << rs_rRET.GetRegNum());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700409 num_core_spills_++;
410}
411
412/*
413 * Mark a callee-save fp register as promoted. Note that
414 * vpush/vpop uses contiguous register lists so we must
415 * include any holes in the mask. Associate holes with
416 * Dalvik register INVALID_VREG (0xFFFFU).
417 */
buzbee091cc402014-03-31 10:14:40 -0700418void X86Mir2Lir::MarkPreservedSingle(int v_reg, RegStorage reg) {
419 UNIMPLEMENTED(FATAL) << "MarkPreservedSingle";
Brian Carlstrom7940e442013-07-12 13:46:57 -0700420}
421
buzbee091cc402014-03-31 10:14:40 -0700422void X86Mir2Lir::MarkPreservedDouble(int v_reg, RegStorage reg) {
423 UNIMPLEMENTED(FATAL) << "MarkPreservedDouble";
buzbee2700f7e2014-03-07 09:46:20 -0800424}
425
Brian Carlstrom7940e442013-07-12 13:46:57 -0700426/* Clobber all regs that might be used by an external C call */
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000427void X86Mir2Lir::ClobberCallerSave() {
buzbee091cc402014-03-31 10:14:40 -0700428 Clobber(rs_rAX);
429 Clobber(rs_rCX);
430 Clobber(rs_rDX);
431 Clobber(rs_rBX);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700432}
433
434RegLocation X86Mir2Lir::GetReturnWideAlt() {
435 RegLocation res = LocCReturnWide();
buzbee091cc402014-03-31 10:14:40 -0700436 DCHECK(res.reg.GetLowReg() == rs_rAX.GetReg());
437 DCHECK(res.reg.GetHighReg() == rs_rDX.GetReg());
438 Clobber(rs_rAX);
439 Clobber(rs_rDX);
440 MarkInUse(rs_rAX);
441 MarkInUse(rs_rDX);
442 MarkWide(res.reg);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700443 return res;
444}
445
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700446RegLocation X86Mir2Lir::GetReturnAlt() {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700447 RegLocation res = LocCReturn();
buzbee091cc402014-03-31 10:14:40 -0700448 res.reg.SetReg(rs_rDX.GetReg());
449 Clobber(rs_rDX);
450 MarkInUse(rs_rDX);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700451 return res;
452}
453
Brian Carlstrom7940e442013-07-12 13:46:57 -0700454/* To be used when explicitly managing register use */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700455void X86Mir2Lir::LockCallTemps() {
buzbee091cc402014-03-31 10:14:40 -0700456 LockTemp(rs_rX86_ARG0);
457 LockTemp(rs_rX86_ARG1);
458 LockTemp(rs_rX86_ARG2);
459 LockTemp(rs_rX86_ARG3);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700460}
461
462/* To be used when explicitly managing register use */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700463void X86Mir2Lir::FreeCallTemps() {
buzbee091cc402014-03-31 10:14:40 -0700464 FreeTemp(rs_rX86_ARG0);
465 FreeTemp(rs_rX86_ARG1);
466 FreeTemp(rs_rX86_ARG2);
467 FreeTemp(rs_rX86_ARG3);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700468}
469
Razvan A Lupusoru99ad7232014-02-25 17:41:08 -0800470bool X86Mir2Lir::ProvidesFullMemoryBarrier(X86OpCode opcode) {
471 switch (opcode) {
472 case kX86LockCmpxchgMR:
473 case kX86LockCmpxchgAR:
474 case kX86LockCmpxchg8bM:
475 case kX86LockCmpxchg8bA:
476 case kX86XchgMR:
477 case kX86Mfence:
478 // Atomic memory instructions provide full barrier.
479 return true;
480 default:
481 break;
482 }
483
484 // Conservative if cannot prove it provides full barrier.
485 return false;
486}
487
Andreas Gampeb14329f2014-05-15 11:16:06 -0700488bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700489#if ANDROID_SMP != 0
Razvan A Lupusoru99ad7232014-02-25 17:41:08 -0800490 // Start off with using the last LIR as the barrier. If it is not enough, then we will update it.
491 LIR* mem_barrier = last_lir_insn_;
492
Andreas Gampeb14329f2014-05-15 11:16:06 -0700493 bool ret = false;
Razvan A Lupusoru99ad7232014-02-25 17:41:08 -0800494 /*
495 * According to the JSR-133 Cookbook, for x86 only StoreLoad barriers need memory fence. All other barriers
496 * (LoadLoad, LoadStore, StoreStore) are nops due to the x86 memory model. For those cases, all we need
497 * to ensure is that there is a scheduling barrier in place.
498 */
499 if (barrier_kind == kStoreLoad) {
500 // If no LIR exists already that can be used a barrier, then generate an mfence.
501 if (mem_barrier == nullptr) {
502 mem_barrier = NewLIR0(kX86Mfence);
Andreas Gampeb14329f2014-05-15 11:16:06 -0700503 ret = true;
Razvan A Lupusoru99ad7232014-02-25 17:41:08 -0800504 }
505
506 // If last instruction does not provide full barrier, then insert an mfence.
507 if (ProvidesFullMemoryBarrier(static_cast<X86OpCode>(mem_barrier->opcode)) == false) {
508 mem_barrier = NewLIR0(kX86Mfence);
Andreas Gampeb14329f2014-05-15 11:16:06 -0700509 ret = true;
Razvan A Lupusoru99ad7232014-02-25 17:41:08 -0800510 }
511 }
512
513 // Now ensure that a scheduling barrier is in place.
514 if (mem_barrier == nullptr) {
515 GenBarrier();
516 } else {
517 // Mark as a scheduling barrier.
518 DCHECK(!mem_barrier->flags.use_def_invalid);
519 mem_barrier->u.m.def_mask = ENCODE_ALL;
520 }
Andreas Gampeb14329f2014-05-15 11:16:06 -0700521 return ret;
522#else
523 return false;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700524#endif
525}
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000526
527// Alloc a pair of core registers, or a double.
528RegStorage X86Mir2Lir::AllocTypedTempWide(bool fp_hint, int reg_class) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700529 if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
buzbee2700f7e2014-03-07 09:46:20 -0800530 return AllocTempDouble();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700531 }
buzbee2700f7e2014-03-07 09:46:20 -0800532 RegStorage low_reg = AllocTemp();
533 RegStorage high_reg = AllocTemp();
534 return RegStorage::MakeRegPair(low_reg, high_reg);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700535}
536
buzbee2700f7e2014-03-07 09:46:20 -0800537RegStorage X86Mir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700538 if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
buzbee091cc402014-03-31 10:14:40 -0700539 return AllocTempSingle();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700540 }
541 return AllocTemp();
542}
543
544void X86Mir2Lir::CompilerInitializeRegAlloc() {
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700545 if (Gen64Bit()) {
546 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_64, sp_regs_64, dp_regs_64, reserved_regs_64,
547 core_temps_64, sp_temps_64, dp_temps_64);
548 } else {
549 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs_32, sp_regs_32, dp_regs_32, reserved_regs_32,
550 core_temps_32, sp_temps_32, dp_temps_32);
551 }
buzbee091cc402014-03-31 10:14:40 -0700552
553 // Target-specific adjustments.
554
555 // Alias single precision xmm to double xmms.
556 // TODO: as needed, add larger vector sizes - alias all to the largest.
557 GrowableArray<RegisterInfo*>::Iterator it(&reg_pool_->sp_regs_);
558 for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
559 int sp_reg_num = info->GetReg().GetRegNum();
560 RegStorage dp_reg = RegStorage::Solo64(RegStorage::kFloatingPoint | sp_reg_num);
561 RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
562 // 64-bit xmm vector register's master storage should refer to itself.
563 DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
564 // Redirect 32-bit vector's master storage to 64-bit vector.
565 info->SetMaster(dp_reg_info);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700566 }
buzbee091cc402014-03-31 10:14:40 -0700567
568 // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods.
569 // TODO: adjust for x86/hard float calling convention.
570 reg_pool_->next_core_reg_ = 2;
571 reg_pool_->next_sp_reg_ = 2;
572 reg_pool_->next_dp_reg_ = 1;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700573}
574
buzbee2700f7e2014-03-07 09:46:20 -0800575void X86Mir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) {
576 DCHECK(rl_keep.wide);
577 DCHECK(rl_free.wide);
578 int free_low = rl_free.reg.GetLowReg();
579 int free_high = rl_free.reg.GetHighReg();
580 int keep_low = rl_keep.reg.GetLowReg();
581 int keep_high = rl_keep.reg.GetHighReg();
582 if ((free_low != keep_low) && (free_low != keep_high) &&
583 (free_high != keep_low) && (free_high != keep_high)) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700584 // No overlap, free both
buzbee091cc402014-03-31 10:14:40 -0700585 FreeTemp(rl_free.reg);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700586 }
587}
588
589void X86Mir2Lir::SpillCoreRegs() {
590 if (num_core_spills_ == 0) {
591 return;
592 }
593 // Spill mask not including fake return address register
buzbee091cc402014-03-31 10:14:40 -0700594 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700595 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700596 for (int reg = 0; mask; mask >>= 1, reg++) {
597 if (mask & 0x1) {
buzbee2700f7e2014-03-07 09:46:20 -0800598 StoreWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg));
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700599 offset += GetInstructionSetPointerSize(cu_->instruction_set);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700600 }
601 }
602}
603
604void X86Mir2Lir::UnSpillCoreRegs() {
605 if (num_core_spills_ == 0) {
606 return;
607 }
608 // Spill mask not including fake return address register
buzbee091cc402014-03-31 10:14:40 -0700609 uint32_t mask = core_spill_mask_ & ~(1 << rs_rRET.GetRegNum());
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700610 int offset = frame_size_ - (GetInstructionSetPointerSize(cu_->instruction_set) * num_core_spills_);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700611 for (int reg = 0; mask; mask >>= 1, reg++) {
612 if (mask & 0x1) {
buzbee2700f7e2014-03-07 09:46:20 -0800613 LoadWordDisp(rs_rX86_SP, offset, RegStorage::Solo32(reg));
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700614 offset += GetInstructionSetPointerSize(cu_->instruction_set);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700615 }
616 }
617}
618
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700619bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700620 return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32);
621}
622
Vladimir Marko674744e2014-04-24 15:18:26 +0100623bool X86Mir2Lir::SupportsVolatileLoadStore(OpSize size) {
624 return true;
625}
626
627RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
628 if (UNLIKELY(is_volatile)) {
629 // On x86, atomic 64-bit load/store requires an fp register.
630 // Smaller aligned load/store is atomic for both core and fp registers.
631 if (size == k64 || size == kDouble) {
632 return kFPReg;
633 }
634 }
635 return RegClassBySize(size);
636}
637
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700638X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena, bool gen64bit)
Mark Mendell55d0eac2014-02-06 11:02:52 -0800639 : Mir2Lir(cu, mir_graph, arena),
Ian Rogersdd7624d2014-03-14 17:43:00 -0700640 base_of_code_(nullptr), store_method_addr_(false), store_method_addr_used_(false),
Mark Mendell55d0eac2014-02-06 11:02:52 -0800641 method_address_insns_(arena, 100, kGrowableArrayMisc),
642 class_type_address_insns_(arena, 100, kGrowableArrayMisc),
Mark Mendellae9fd932014-02-10 16:14:35 -0800643 call_method_insns_(arena, 100, kGrowableArrayMisc),
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700644 stack_decrement_(nullptr), stack_increment_(nullptr), gen64bit_(gen64bit) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700645 if (kIsDebugBuild) {
646 for (int i = 0; i < kX86Last; i++) {
647 if (X86Mir2Lir::EncodingMap[i].opcode != i) {
648 LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
649 << " is wrong: expecting " << i << ", seeing "
650 << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
651 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700652 }
653 }
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700654 if (Gen64Bit()) {
655 rs_rX86_SP = rs_rX86_SP_64;
656
657 rs_rX86_ARG0 = rs_rDI;
658 rs_rX86_ARG1 = rs_rSI;
659 rs_rX86_ARG2 = rs_rDX;
660 rs_rX86_ARG3 = rs_rCX;
661 rX86_ARG0 = rDI;
662 rX86_ARG1 = rSI;
663 rX86_ARG2 = rDX;
664 rX86_ARG3 = rCX;
665 // TODO: ARG4(r8), ARG5(r9), floating point args.
666 } else {
667 rs_rX86_SP = rs_rX86_SP_32;
668
669 rs_rX86_ARG0 = rs_rAX;
670 rs_rX86_ARG1 = rs_rCX;
671 rs_rX86_ARG2 = rs_rDX;
672 rs_rX86_ARG3 = rs_rBX;
673 rX86_ARG0 = rAX;
674 rX86_ARG1 = rCX;
675 rX86_ARG2 = rDX;
676 rX86_ARG3 = rBX;
677 }
678 rs_rX86_FARG0 = rs_rAX;
679 rs_rX86_FARG1 = rs_rCX;
680 rs_rX86_FARG2 = rs_rDX;
681 rs_rX86_FARG3 = rs_rBX;
682 rs_rX86_RET0 = rs_rAX;
683 rs_rX86_RET1 = rs_rDX;
684 rs_rX86_INVOKE_TGT = rs_rAX;
685 rs_rX86_COUNT = rs_rCX;
686 rX86_FARG0 = rAX;
687 rX86_FARG1 = rCX;
688 rX86_FARG2 = rDX;
689 rX86_FARG3 = rBX;
690 rX86_RET0 = rAX;
691 rX86_RET1 = rDX;
692 rX86_INVOKE_TGT = rAX;
693 rX86_COUNT = rCX;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700694}
695
696Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
697 ArenaAllocator* const arena) {
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700698 return new X86Mir2Lir(cu, mir_graph, arena, false);
699}
700
701Mir2Lir* X86_64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
702 ArenaAllocator* const arena) {
703 return new X86Mir2Lir(cu, mir_graph, arena, true);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700704}
705
706// Not used in x86
Ian Rogersdd7624d2014-03-14 17:43:00 -0700707RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<4> offset) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700708 LOG(FATAL) << "Unexpected use of LoadHelper in x86";
buzbee2700f7e2014-03-07 09:46:20 -0800709 return RegStorage::InvalidReg();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700710}
711
Andreas Gampe2f244e92014-05-08 03:35:25 -0700712// Not used in x86
713RegStorage X86Mir2Lir::LoadHelper(ThreadOffset<8> offset) {
714 LOG(FATAL) << "Unexpected use of LoadHelper in x86";
715 return RegStorage::InvalidReg();
716}
717
Dave Allisonb373e092014-02-20 16:06:36 -0800718LIR* X86Mir2Lir::CheckSuspendUsingLoad() {
719 LOG(FATAL) << "Unexpected use of CheckSuspendUsingLoad in x86";
720 return nullptr;
721}
722
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700723uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) {
buzbee409fe942013-10-11 10:49:56 -0700724 DCHECK(!IsPseudoLirOp(opcode));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700725 return X86Mir2Lir::EncodingMap[opcode].flags;
726}
727
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700728const char* X86Mir2Lir::GetTargetInstName(int opcode) {
buzbee409fe942013-10-11 10:49:56 -0700729 DCHECK(!IsPseudoLirOp(opcode));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700730 return X86Mir2Lir::EncodingMap[opcode].name;
731}
732
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700733const char* X86Mir2Lir::GetTargetInstFmt(int opcode) {
buzbee409fe942013-10-11 10:49:56 -0700734 DCHECK(!IsPseudoLirOp(opcode));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700735 return X86Mir2Lir::EncodingMap[opcode].fmt;
736}
737
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000738void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) {
739 // Can we do this directly to memory?
740 rl_dest = UpdateLocWide(rl_dest);
741 if ((rl_dest.location == kLocDalvikFrame) ||
742 (rl_dest.location == kLocCompilerTemp)) {
743 int32_t val_lo = Low32Bits(value);
744 int32_t val_hi = High32Bits(value);
buzbee2700f7e2014-03-07 09:46:20 -0800745 int r_base = TargetReg(kSp).GetReg();
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000746 int displacement = SRegOffset(rl_dest.s_reg_low);
747
buzbee2700f7e2014-03-07 09:46:20 -0800748 LIR * store = NewLIR3(kX86Mov32MI, r_base, displacement + LOWORD_OFFSET, val_lo);
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000749 AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2,
750 false /* is_load */, true /* is64bit */);
buzbee2700f7e2014-03-07 09:46:20 -0800751 store = NewLIR3(kX86Mov32MI, r_base, displacement + HIWORD_OFFSET, val_hi);
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000752 AnnotateDalvikRegAccess(store, (displacement + HIWORD_OFFSET) >> 2,
753 false /* is_load */, true /* is64bit */);
754 return;
755 }
756
757 // Just use the standard code to do the generation.
758 Mir2Lir::GenConstWide(rl_dest, value);
759}
Mark Mendelle02d48f2014-01-15 11:19:23 -0800760
761// TODO: Merge with existing RegLocation dumper in vreg_analysis.cc
762void X86Mir2Lir::DumpRegLocation(RegLocation loc) {
763 LOG(INFO) << "location: " << loc.location << ','
764 << (loc.wide ? " w" : " ")
765 << (loc.defined ? " D" : " ")
766 << (loc.is_const ? " c" : " ")
767 << (loc.fp ? " F" : " ")
768 << (loc.core ? " C" : " ")
769 << (loc.ref ? " r" : " ")
770 << (loc.high_word ? " h" : " ")
771 << (loc.home ? " H" : " ")
buzbee2700f7e2014-03-07 09:46:20 -0800772 << ", low: " << static_cast<int>(loc.reg.GetLowReg())
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000773 << ", high: " << static_cast<int>(loc.reg.GetHighReg())
Mark Mendelle02d48f2014-01-15 11:19:23 -0800774 << ", s_reg: " << loc.s_reg_low
775 << ", orig: " << loc.orig_sreg;
776}
777
Mark Mendell67c39c42014-01-31 17:28:00 -0800778void X86Mir2Lir::Materialize() {
779 // A good place to put the analysis before starting.
780 AnalyzeMIR();
781
782 // Now continue with regular code generation.
783 Mir2Lir::Materialize();
784}
785
Jeff Hao49161ce2014-03-12 11:05:25 -0700786void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type,
Mark Mendell55d0eac2014-02-06 11:02:52 -0800787 SpecialTargetRegister symbolic_reg) {
788 /*
789 * For x86, just generate a 32 bit move immediate instruction, that will be filled
790 * in at 'link time'. For now, put a unique value based on target to ensure that
791 * code deduplication works.
792 */
Jeff Hao49161ce2014-03-12 11:05:25 -0700793 int target_method_idx = target_method.dex_method_index;
794 const DexFile* target_dex_file = target_method.dex_file;
795 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx);
796 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id);
Mark Mendell55d0eac2014-02-06 11:02:52 -0800797
Jeff Hao49161ce2014-03-12 11:05:25 -0700798 // Generate the move instruction with the unique pointer and save index, dex_file, and type.
buzbee2700f7e2014-03-07 09:46:20 -0800799 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg).GetReg(),
Jeff Hao49161ce2014-03-12 11:05:25 -0700800 static_cast<int>(target_method_id_ptr), target_method_idx,
801 WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
Mark Mendell55d0eac2014-02-06 11:02:52 -0800802 AppendLIR(move);
803 method_address_insns_.Insert(move);
804}
805
806void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) {
807 /*
808 * For x86, just generate a 32 bit move immediate instruction, that will be filled
809 * in at 'link time'. For now, put a unique value based on target to ensure that
810 * code deduplication works.
811 */
812 const DexFile::TypeId& id = cu_->dex_file->GetTypeId(type_idx);
813 uintptr_t ptr = reinterpret_cast<uintptr_t>(&id);
814
815 // Generate the move instruction with the unique pointer and save index and type.
buzbee2700f7e2014-03-07 09:46:20 -0800816 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg).GetReg(),
Mark Mendell55d0eac2014-02-06 11:02:52 -0800817 static_cast<int>(ptr), type_idx);
818 AppendLIR(move);
819 class_type_address_insns_.Insert(move);
820}
821
Jeff Hao49161ce2014-03-12 11:05:25 -0700822LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) {
Mark Mendell55d0eac2014-02-06 11:02:52 -0800823 /*
824 * For x86, just generate a 32 bit call relative instruction, that will be filled
825 * in at 'link time'. For now, put a unique value based on target to ensure that
826 * code deduplication works.
827 */
Jeff Hao49161ce2014-03-12 11:05:25 -0700828 int target_method_idx = target_method.dex_method_index;
829 const DexFile* target_dex_file = target_method.dex_file;
830 const DexFile::MethodId& target_method_id = target_dex_file->GetMethodId(target_method_idx);
831 uintptr_t target_method_id_ptr = reinterpret_cast<uintptr_t>(&target_method_id);
Mark Mendell55d0eac2014-02-06 11:02:52 -0800832
Jeff Hao49161ce2014-03-12 11:05:25 -0700833 // Generate the call instruction with the unique pointer and save index, dex_file, and type.
834 LIR *call = RawLIR(current_dalvik_offset_, kX86CallI, static_cast<int>(target_method_id_ptr),
835 target_method_idx, WrapPointer(const_cast<DexFile*>(target_dex_file)), type);
Mark Mendell55d0eac2014-02-06 11:02:52 -0800836 AppendLIR(call);
837 call_method_insns_.Insert(call);
838 return call;
839}
840
841void X86Mir2Lir::InstallLiteralPools() {
842 // These are handled differently for x86.
843 DCHECK(code_literal_list_ == nullptr);
844 DCHECK(method_literal_list_ == nullptr);
845 DCHECK(class_literal_list_ == nullptr);
846
847 // Handle the fixups for methods.
848 for (uint32_t i = 0; i < method_address_insns_.Size(); i++) {
849 LIR* p = method_address_insns_.Get(i);
850 DCHECK_EQ(p->opcode, kX86Mov32RI);
Jeff Hao49161ce2014-03-12 11:05:25 -0700851 uint32_t target_method_idx = p->operands[2];
852 const DexFile* target_dex_file =
853 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[3]));
Mark Mendell55d0eac2014-02-06 11:02:52 -0800854
855 // The offset to patch is the last 4 bytes of the instruction.
856 int patch_offset = p->offset + p->flags.size - 4;
857 cu_->compiler_driver->AddMethodPatch(cu_->dex_file, cu_->class_def_idx,
858 cu_->method_idx, cu_->invoke_type,
Jeff Hao49161ce2014-03-12 11:05:25 -0700859 target_method_idx, target_dex_file,
860 static_cast<InvokeType>(p->operands[4]),
Mark Mendell55d0eac2014-02-06 11:02:52 -0800861 patch_offset);
862 }
863
864 // Handle the fixups for class types.
865 for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) {
866 LIR* p = class_type_address_insns_.Get(i);
867 DCHECK_EQ(p->opcode, kX86Mov32RI);
Jeff Hao49161ce2014-03-12 11:05:25 -0700868 uint32_t target_method_idx = p->operands[2];
Mark Mendell55d0eac2014-02-06 11:02:52 -0800869
870 // The offset to patch is the last 4 bytes of the instruction.
871 int patch_offset = p->offset + p->flags.size - 4;
872 cu_->compiler_driver->AddClassPatch(cu_->dex_file, cu_->class_def_idx,
Jeff Hao49161ce2014-03-12 11:05:25 -0700873 cu_->method_idx, target_method_idx, patch_offset);
Mark Mendell55d0eac2014-02-06 11:02:52 -0800874 }
875
876 // And now the PC-relative calls to methods.
877 for (uint32_t i = 0; i < call_method_insns_.Size(); i++) {
878 LIR* p = call_method_insns_.Get(i);
879 DCHECK_EQ(p->opcode, kX86CallI);
Jeff Hao49161ce2014-03-12 11:05:25 -0700880 uint32_t target_method_idx = p->operands[1];
881 const DexFile* target_dex_file =
882 reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[2]));
Mark Mendell55d0eac2014-02-06 11:02:52 -0800883
884 // The offset to patch is the last 4 bytes of the instruction.
885 int patch_offset = p->offset + p->flags.size - 4;
886 cu_->compiler_driver->AddRelativeCodePatch(cu_->dex_file, cu_->class_def_idx,
Jeff Hao49161ce2014-03-12 11:05:25 -0700887 cu_->method_idx, cu_->invoke_type,
888 target_method_idx, target_dex_file,
889 static_cast<InvokeType>(p->operands[3]),
Mark Mendell55d0eac2014-02-06 11:02:52 -0800890 patch_offset, -4 /* offset */);
891 }
892
893 // And do the normal processing.
894 Mir2Lir::InstallLiteralPools();
895}
896
Mark Mendell4028a6c2014-02-19 20:06:20 -0800897/*
898 * Fast string.index_of(I) & (II). Inline check for simple case of char <= 0xffff,
899 * otherwise bails to standard library code.
900 */
901bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
902 ClobberCallerSave();
903 LockCallTemps(); // Using fixed registers
904
905 // EAX: 16 bit character being searched.
906 // ECX: count: number of words to be searched.
907 // EDI: String being searched.
908 // EDX: temporary during execution.
909 // EBX: temporary during execution.
910
911 RegLocation rl_obj = info->args[0];
912 RegLocation rl_char = info->args[1];
buzbeea44d4f52014-03-05 11:26:39 -0800913 RegLocation rl_start; // Note: only present in III flavor or IndexOf.
Mark Mendell4028a6c2014-02-19 20:06:20 -0800914
915 uint32_t char_value =
916 rl_char.is_const ? mir_graph_->ConstantValue(rl_char.orig_sreg) : 0;
917
918 if (char_value > 0xFFFF) {
919 // We have to punt to the real String.indexOf.
920 return false;
921 }
922
923 // Okay, we are commited to inlining this.
924 RegLocation rl_return = GetReturn(false);
925 RegLocation rl_dest = InlineTarget(info);
926
927 // Is the string non-NULL?
buzbee2700f7e2014-03-07 09:46:20 -0800928 LoadValueDirectFixed(rl_obj, rs_rDX);
929 GenNullCheck(rs_rDX, info->opt_flags);
Vladimir Marko3bc86152014-03-13 14:11:28 +0000930 info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked.
Mark Mendell4028a6c2014-02-19 20:06:20 -0800931
932 // Does the character fit in 16 bits?
Mingyao Yang3a74d152014-04-21 15:39:44 -0700933 LIR* slowpath_branch = nullptr;
Mark Mendell4028a6c2014-02-19 20:06:20 -0800934 if (rl_char.is_const) {
935 // We need the value in EAX.
buzbee2700f7e2014-03-07 09:46:20 -0800936 LoadConstantNoClobber(rs_rAX, char_value);
Mark Mendell4028a6c2014-02-19 20:06:20 -0800937 } else {
938 // Character is not a constant; compare at runtime.
buzbee2700f7e2014-03-07 09:46:20 -0800939 LoadValueDirectFixed(rl_char, rs_rAX);
Mingyao Yang3a74d152014-04-21 15:39:44 -0700940 slowpath_branch = OpCmpImmBranch(kCondGt, rs_rAX, 0xFFFF, nullptr);
Mark Mendell4028a6c2014-02-19 20:06:20 -0800941 }
942
943 // From here down, we know that we are looking for a char that fits in 16 bits.
Mark Mendelle19c91f2014-02-25 08:19:08 -0800944 // Location of reference to data array within the String object.
945 int value_offset = mirror::String::ValueOffset().Int32Value();
946 // Location of count within the String object.
947 int count_offset = mirror::String::CountOffset().Int32Value();
948 // Starting offset within data array.
949 int offset_offset = mirror::String::OffsetOffset().Int32Value();
950 // Start of char data with array_.
951 int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
Mark Mendell4028a6c2014-02-19 20:06:20 -0800952
953 // Character is in EAX.
954 // Object pointer is in EDX.
955
956 // We need to preserve EDI, but have no spare registers, so push it on the stack.
957 // We have to remember that all stack addresses after this are offset by sizeof(EDI).
buzbee091cc402014-03-31 10:14:40 -0700958 NewLIR1(kX86Push32R, rs_rDI.GetReg());
Mark Mendell4028a6c2014-02-19 20:06:20 -0800959
960 // Compute the number of words to search in to rCX.
buzbee695d13a2014-04-19 13:32:20 -0700961 Load32Disp(rs_rDX, count_offset, rs_rCX);
Mark Mendell4028a6c2014-02-19 20:06:20 -0800962 LIR *length_compare = nullptr;
963 int start_value = 0;
Alexei Zavjalova1758d82014-04-17 01:55:43 +0700964 bool is_index_on_stack = false;
Mark Mendell4028a6c2014-02-19 20:06:20 -0800965 if (zero_based) {
966 // We have to handle an empty string. Use special instruction JECXZ.
967 length_compare = NewLIR0(kX86Jecxz8);
968 } else {
buzbeea44d4f52014-03-05 11:26:39 -0800969 rl_start = info->args[2];
Mark Mendell4028a6c2014-02-19 20:06:20 -0800970 // We have to offset by the start index.
971 if (rl_start.is_const) {
972 start_value = mir_graph_->ConstantValue(rl_start.orig_sreg);
973 start_value = std::max(start_value, 0);
974
975 // Is the start > count?
buzbee2700f7e2014-03-07 09:46:20 -0800976 length_compare = OpCmpImmBranch(kCondLe, rs_rCX, start_value, nullptr);
Mark Mendell4028a6c2014-02-19 20:06:20 -0800977
978 if (start_value != 0) {
buzbee2700f7e2014-03-07 09:46:20 -0800979 OpRegImm(kOpSub, rs_rCX, start_value);
Mark Mendell4028a6c2014-02-19 20:06:20 -0800980 }
981 } else {
982 // Runtime start index.
buzbee30adc732014-05-09 15:10:18 -0700983 rl_start = UpdateLocTyped(rl_start, kCoreReg);
Mark Mendell4028a6c2014-02-19 20:06:20 -0800984 if (rl_start.location == kLocPhysReg) {
Alexei Zavjalova1758d82014-04-17 01:55:43 +0700985 // Handle "start index < 0" case.
986 OpRegReg(kOpXor, rs_rBX, rs_rBX);
987 OpRegReg(kOpCmp, rl_start.reg, rs_rBX);
988 OpCondRegReg(kOpCmov, kCondLt, rl_start.reg, rs_rBX);
989
990 // The length of the string should be greater than the start index.
buzbee2700f7e2014-03-07 09:46:20 -0800991 length_compare = OpCmpBranch(kCondLe, rs_rCX, rl_start.reg, nullptr);
992 OpRegReg(kOpSub, rs_rCX, rl_start.reg);
Alexei Zavjalova1758d82014-04-17 01:55:43 +0700993 if (rl_start.reg == rs_rDI) {
994 // The special case. We will use EDI further, so lets put start index to stack.
buzbee091cc402014-03-31 10:14:40 -0700995 NewLIR1(kX86Push32R, rs_rDI.GetReg());
Alexei Zavjalova1758d82014-04-17 01:55:43 +0700996 is_index_on_stack = true;
997 }
Mark Mendell4028a6c2014-02-19 20:06:20 -0800998 } else {
Alexei Zavjalova1758d82014-04-17 01:55:43 +0700999 // Load the start index from stack, remembering that we pushed EDI.
Mark Mendell4028a6c2014-02-19 20:06:20 -08001000 int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t);
buzbee695d13a2014-04-19 13:32:20 -07001001 Load32Disp(rs_rX86_SP, displacement, rs_rBX);
Alexei Zavjalova1758d82014-04-17 01:55:43 +07001002 OpRegReg(kOpXor, rs_rDI, rs_rDI);
1003 OpRegReg(kOpCmp, rs_rBX, rs_rDI);
1004 OpCondRegReg(kOpCmov, kCondLt, rs_rBX, rs_rDI);
1005
1006 length_compare = OpCmpBranch(kCondLe, rs_rCX, rs_rBX, nullptr);
1007 OpRegReg(kOpSub, rs_rCX, rs_rBX);
1008 // Put the start index to stack.
buzbee091cc402014-03-31 10:14:40 -07001009 NewLIR1(kX86Push32R, rs_rBX.GetReg());
Alexei Zavjalova1758d82014-04-17 01:55:43 +07001010 is_index_on_stack = true;
Mark Mendell4028a6c2014-02-19 20:06:20 -08001011 }
1012 }
1013 }
1014 DCHECK(length_compare != nullptr);
1015
1016 // ECX now contains the count in words to be searched.
1017
1018 // Load the address of the string into EBX.
Mark Mendelle19c91f2014-02-25 08:19:08 -08001019 // The string starts at VALUE(String) + 2 * OFFSET(String) + DATA_OFFSET.
buzbee695d13a2014-04-19 13:32:20 -07001020 Load32Disp(rs_rDX, value_offset, rs_rDI);
1021 Load32Disp(rs_rDX, offset_offset, rs_rBX);
buzbee2700f7e2014-03-07 09:46:20 -08001022 OpLea(rs_rBX, rs_rDI, rs_rBX, 1, data_offset);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001023
1024 // Now compute into EDI where the search will start.
1025 if (zero_based || rl_start.is_const) {
1026 if (start_value == 0) {
buzbee2700f7e2014-03-07 09:46:20 -08001027 OpRegCopy(rs_rDI, rs_rBX);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001028 } else {
buzbee091cc402014-03-31 10:14:40 -07001029 NewLIR3(kX86Lea32RM, rs_rDI.GetReg(), rs_rBX.GetReg(), 2 * start_value);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001030 }
1031 } else {
Alexei Zavjalova1758d82014-04-17 01:55:43 +07001032 if (is_index_on_stack == true) {
1033 // Load the start index from stack.
buzbee091cc402014-03-31 10:14:40 -07001034 NewLIR1(kX86Pop32R, rs_rDX.GetReg());
buzbee2700f7e2014-03-07 09:46:20 -08001035 OpLea(rs_rDI, rs_rBX, rs_rDX, 1, 0);
Alexei Zavjalova1758d82014-04-17 01:55:43 +07001036 } else {
1037 OpLea(rs_rDI, rs_rBX, rl_start.reg, 1, 0);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001038 }
1039 }
1040
1041 // EDI now contains the start of the string to be searched.
1042 // We are all prepared to do the search for the character.
1043 NewLIR0(kX86RepneScasw);
1044
1045 // Did we find a match?
1046 LIR* failed_branch = OpCondBranch(kCondNe, nullptr);
1047
1048 // yes, we matched. Compute the index of the result.
1049 // index = ((curr_ptr - orig_ptr) / 2) - 1.
buzbee2700f7e2014-03-07 09:46:20 -08001050 OpRegReg(kOpSub, rs_rDI, rs_rBX);
1051 OpRegImm(kOpAsr, rs_rDI, 1);
buzbee091cc402014-03-31 10:14:40 -07001052 NewLIR3(kX86Lea32RM, rl_return.reg.GetReg(), rs_rDI.GetReg(), -1);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001053 LIR *all_done = NewLIR1(kX86Jmp8, 0);
1054
1055 // Failed to match; return -1.
1056 LIR *not_found = NewLIR0(kPseudoTargetLabel);
1057 length_compare->target = not_found;
1058 failed_branch->target = not_found;
buzbee2700f7e2014-03-07 09:46:20 -08001059 LoadConstantNoClobber(rl_return.reg, -1);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001060
1061 // And join up at the end.
1062 all_done->target = NewLIR0(kPseudoTargetLabel);
1063 // Restore EDI from the stack.
buzbee091cc402014-03-31 10:14:40 -07001064 NewLIR1(kX86Pop32R, rs_rDI.GetReg());
Mark Mendell4028a6c2014-02-19 20:06:20 -08001065
1066 // Out of line code returns here.
Mingyao Yang3a74d152014-04-21 15:39:44 -07001067 if (slowpath_branch != nullptr) {
Mark Mendell4028a6c2014-02-19 20:06:20 -08001068 LIR *return_point = NewLIR0(kPseudoTargetLabel);
Mingyao Yang3a74d152014-04-21 15:39:44 -07001069 AddIntrinsicSlowPath(info, slowpath_branch, return_point);
Mark Mendell4028a6c2014-02-19 20:06:20 -08001070 }
1071
1072 StoreValue(rl_dest, rl_return);
1073 return true;
1074}
1075
Mark Mendellae9fd932014-02-10 16:14:35 -08001076/*
1077 * @brief Enter a 32 bit quantity into the FDE buffer
1078 * @param buf FDE buffer.
1079 * @param data Data value.
1080 */
1081static void PushWord(std::vector<uint8_t>&buf, int data) {
1082 buf.push_back(data & 0xff);
1083 buf.push_back((data >> 8) & 0xff);
1084 buf.push_back((data >> 16) & 0xff);
1085 buf.push_back((data >> 24) & 0xff);
1086}
1087
1088/*
1089 * @brief Enter an 'advance LOC' into the FDE buffer
1090 * @param buf FDE buffer.
1091 * @param increment Amount by which to increase the current location.
1092 */
1093static void AdvanceLoc(std::vector<uint8_t>&buf, uint32_t increment) {
1094 if (increment < 64) {
1095 // Encoding in opcode.
1096 buf.push_back(0x1 << 6 | increment);
1097 } else if (increment < 256) {
1098 // Single byte delta.
1099 buf.push_back(0x02);
1100 buf.push_back(increment);
1101 } else if (increment < 256 * 256) {
1102 // Two byte delta.
1103 buf.push_back(0x03);
1104 buf.push_back(increment & 0xff);
1105 buf.push_back((increment >> 8) & 0xff);
1106 } else {
1107 // Four byte delta.
1108 buf.push_back(0x04);
1109 PushWord(buf, increment);
1110 }
1111}
1112
1113
1114std::vector<uint8_t>* X86CFIInitialization() {
1115 return X86Mir2Lir::ReturnCommonCallFrameInformation();
1116}
1117
1118std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation() {
1119 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
1120
1121 // Length of the CIE (except for this field).
1122 PushWord(*cfi_info, 16);
1123
1124 // CIE id.
1125 PushWord(*cfi_info, 0xFFFFFFFFU);
1126
1127 // Version: 3.
1128 cfi_info->push_back(0x03);
1129
1130 // Augmentation: empty string.
1131 cfi_info->push_back(0x0);
1132
1133 // Code alignment: 1.
1134 cfi_info->push_back(0x01);
1135
1136 // Data alignment: -4.
1137 cfi_info->push_back(0x7C);
1138
1139 // Return address register (R8).
1140 cfi_info->push_back(0x08);
1141
1142 // Initial return PC is 4(ESP): DW_CFA_def_cfa R4 4.
1143 cfi_info->push_back(0x0C);
1144 cfi_info->push_back(0x04);
1145 cfi_info->push_back(0x04);
1146
1147 // Return address location: 0(SP): DW_CFA_offset R8 1 (* -4);.
1148 cfi_info->push_back(0x2 << 6 | 0x08);
1149 cfi_info->push_back(0x01);
1150
1151 // And 2 Noops to align to 4 byte boundary.
1152 cfi_info->push_back(0x0);
1153 cfi_info->push_back(0x0);
1154
1155 DCHECK_EQ(cfi_info->size() & 3, 0U);
1156 return cfi_info;
1157}
1158
1159static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) {
1160 uint8_t buffer[12];
1161 uint8_t *ptr = EncodeUnsignedLeb128(buffer, value);
1162 for (uint8_t *p = buffer; p < ptr; p++) {
1163 buf.push_back(*p);
1164 }
1165}
1166
1167std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() {
1168 std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
1169
1170 // Generate the FDE for the method.
1171 DCHECK_NE(data_offset_, 0U);
1172
1173 // Length (will be filled in later in this routine).
1174 PushWord(*cfi_info, 0);
1175
1176 // CIE_pointer (can be filled in by linker); might be left at 0 if there is only
1177 // one CIE for the whole debug_frame section.
1178 PushWord(*cfi_info, 0);
1179
1180 // 'initial_location' (filled in by linker).
1181 PushWord(*cfi_info, 0);
1182
1183 // 'address_range' (number of bytes in the method).
1184 PushWord(*cfi_info, data_offset_);
1185
1186 // The instructions in the FDE.
1187 if (stack_decrement_ != nullptr) {
1188 // Advance LOC to just past the stack decrement.
1189 uint32_t pc = NEXT_LIR(stack_decrement_)->offset;
1190 AdvanceLoc(*cfi_info, pc);
1191
1192 // Now update the offset to the call frame: DW_CFA_def_cfa_offset frame_size.
1193 cfi_info->push_back(0x0e);
1194 EncodeUnsignedLeb128(*cfi_info, frame_size_);
1195
1196 // We continue with that stack until the epilogue.
1197 if (stack_increment_ != nullptr) {
1198 uint32_t new_pc = NEXT_LIR(stack_increment_)->offset;
1199 AdvanceLoc(*cfi_info, new_pc - pc);
1200
1201 // We probably have code snippets after the epilogue, so save the
1202 // current state: DW_CFA_remember_state.
1203 cfi_info->push_back(0x0a);
1204
1205 // We have now popped the stack: DW_CFA_def_cfa_offset 4. There is only the return
1206 // PC on the stack now.
1207 cfi_info->push_back(0x0e);
1208 EncodeUnsignedLeb128(*cfi_info, 4);
1209
1210 // Everything after that is the same as before the epilogue.
1211 // Stack bump was followed by RET instruction.
1212 LIR *post_ret_insn = NEXT_LIR(NEXT_LIR(stack_increment_));
1213 if (post_ret_insn != nullptr) {
1214 pc = new_pc;
1215 new_pc = post_ret_insn->offset;
1216 AdvanceLoc(*cfi_info, new_pc - pc);
1217 // Restore the state: DW_CFA_restore_state.
1218 cfi_info->push_back(0x0b);
1219 }
1220 }
1221 }
1222
1223 // Padding to a multiple of 4
1224 while ((cfi_info->size() & 3) != 0) {
1225 // DW_CFA_nop is encoded as 0.
1226 cfi_info->push_back(0);
1227 }
1228
1229 // Set the length of the FDE inside the generated bytes.
1230 uint32_t length = cfi_info->size() - 4;
1231 (*cfi_info)[0] = length;
1232 (*cfi_info)[1] = length >> 8;
1233 (*cfi_info)[2] = length >> 16;
1234 (*cfi_info)[3] = length >> 24;
1235 return cfi_info;
1236}
1237
Brian Carlstrom7934ac22013-07-26 10:54:15 -07001238} // namespace art