blob: ad5b1543883a923ba1ae6b030cdd97fa53a0c5f2 [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Nicolas Geoffrayf3e2cc42014-02-18 18:37:26 +000017#include <string>
18#include <inttypes.h>
19
Brian Carlstrom7940e442013-07-12 13:46:57 -070020#include "codegen_x86.h"
21#include "dex/compiler_internals.h"
22#include "dex/quick/mir_to_lir-inl.h"
23#include "x86_lir.h"
24
Brian Carlstrom7940e442013-07-12 13:46:57 -070025namespace art {
26
Brian Carlstrom7934ac22013-07-26 10:54:15 -070027// FIXME: restore "static" when usage uncovered
Brian Carlstrom7940e442013-07-12 13:46:57 -070028/*static*/ int core_regs[] = {
29 rAX, rCX, rDX, rBX, rX86_SP, rBP, rSI, rDI
30#ifdef TARGET_REX_SUPPORT
31 r8, r9, r10, r11, r12, r13, r14, 15
32#endif
33};
34/*static*/ int ReservedRegs[] = {rX86_SP};
35/*static*/ int core_temps[] = {rAX, rCX, rDX, rBX};
36/*static*/ int FpRegs[] = {
37 fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
38#ifdef TARGET_REX_SUPPORT
39 fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15
40#endif
41};
42/*static*/ int fp_temps[] = {
43 fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
44#ifdef TARGET_REX_SUPPORT
45 fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15
46#endif
47};
48
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070049RegLocation X86Mir2Lir::LocCReturn() {
Brian Carlstrom7940e442013-07-12 13:46:57 -070050 RegLocation res = X86_LOC_C_RETURN;
51 return res;
52}
53
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070054RegLocation X86Mir2Lir::LocCReturnWide() {
Brian Carlstrom7940e442013-07-12 13:46:57 -070055 RegLocation res = X86_LOC_C_RETURN_WIDE;
56 return res;
57}
58
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070059RegLocation X86Mir2Lir::LocCReturnFloat() {
Brian Carlstrom7940e442013-07-12 13:46:57 -070060 RegLocation res = X86_LOC_C_RETURN_FLOAT;
61 return res;
62}
63
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070064RegLocation X86Mir2Lir::LocCReturnDouble() {
Brian Carlstrom7940e442013-07-12 13:46:57 -070065 RegLocation res = X86_LOC_C_RETURN_DOUBLE;
66 return res;
67}
68
69// Return a target-dependent special register.
70int X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
71 int res = INVALID_REG;
72 switch (reg) {
73 case kSelf: res = rX86_SELF; break;
74 case kSuspend: res = rX86_SUSPEND; break;
75 case kLr: res = rX86_LR; break;
76 case kPc: res = rX86_PC; break;
77 case kSp: res = rX86_SP; break;
78 case kArg0: res = rX86_ARG0; break;
79 case kArg1: res = rX86_ARG1; break;
80 case kArg2: res = rX86_ARG2; break;
81 case kArg3: res = rX86_ARG3; break;
82 case kFArg0: res = rX86_FARG0; break;
83 case kFArg1: res = rX86_FARG1; break;
84 case kFArg2: res = rX86_FARG2; break;
85 case kFArg3: res = rX86_FARG3; break;
86 case kRet0: res = rX86_RET0; break;
87 case kRet1: res = rX86_RET1; break;
88 case kInvokeTgt: res = rX86_INVOKE_TGT; break;
Jeff Hao88474b42013-10-23 16:24:40 -070089 case kHiddenArg: res = rAX; break;
90 case kHiddenFpArg: res = fr0; break;
Brian Carlstrom7940e442013-07-12 13:46:57 -070091 case kCount: res = rX86_COUNT; break;
92 }
93 return res;
94}
95
Razvan A Lupusoru3bc01742014-02-06 13:18:43 -080096int X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
97 // For the 32-bit internal ABI, the first 3 arguments are passed in registers.
98 // TODO: This is not 64-bit compliant and depends on new internal ABI.
99 switch (arg_num) {
100 case 0:
101 return rX86_ARG1;
102 case 1:
103 return rX86_ARG2;
104 case 2:
105 return rX86_ARG3;
106 default:
107 return INVALID_REG;
108 }
109}
110
Brian Carlstrom7940e442013-07-12 13:46:57 -0700111// Create a double from a pair of singles.
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700112int X86Mir2Lir::S2d(int low_reg, int high_reg) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700113 return X86_S2D(low_reg, high_reg);
114}
115
116// Return mask to strip off fp reg flags and bias.
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700117uint32_t X86Mir2Lir::FpRegMask() {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700118 return X86_FP_REG_MASK;
119}
120
121// True if both regs single, both core or both double.
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700122bool X86Mir2Lir::SameRegType(int reg1, int reg2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700123 return (X86_REGTYPE(reg1) == X86_REGTYPE(reg2));
124}
125
126/*
127 * Decode the register id.
128 */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700129uint64_t X86Mir2Lir::GetRegMaskCommon(int reg) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700130 uint64_t seed;
131 int shift;
132 int reg_id;
133
134 reg_id = reg & 0xf;
135 /* Double registers in x86 are just a single FP register */
136 seed = 1;
137 /* FP register starts at bit position 16 */
138 shift = X86_FPREG(reg) ? kX86FPReg0 : 0;
139 /* Expand the double register id into single offset */
140 shift += reg_id;
141 return (seed << shift);
142}
143
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700144uint64_t X86Mir2Lir::GetPCUseDefEncoding() {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700145 /*
146 * FIXME: might make sense to use a virtual resource encoding bit for pc. Might be
147 * able to clean up some of the x86/Arm_Mips differences
148 */
149 LOG(FATAL) << "Unexpected call to GetPCUseDefEncoding for x86";
150 return 0ULL;
151}
152
buzbeeb48819d2013-09-14 16:15:25 -0700153void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700154 DCHECK_EQ(cu_->instruction_set, kX86);
buzbeeb48819d2013-09-14 16:15:25 -0700155 DCHECK(!lir->flags.use_def_invalid);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700156
157 // X86-specific resource map setup here.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700158 if (flags & REG_USE_SP) {
buzbeeb48819d2013-09-14 16:15:25 -0700159 lir->u.m.use_mask |= ENCODE_X86_REG_SP;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700160 }
161
162 if (flags & REG_DEF_SP) {
buzbeeb48819d2013-09-14 16:15:25 -0700163 lir->u.m.def_mask |= ENCODE_X86_REG_SP;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700164 }
165
166 if (flags & REG_DEFA) {
buzbeeb48819d2013-09-14 16:15:25 -0700167 SetupRegMask(&lir->u.m.def_mask, rAX);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700168 }
169
170 if (flags & REG_DEFD) {
buzbeeb48819d2013-09-14 16:15:25 -0700171 SetupRegMask(&lir->u.m.def_mask, rDX);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700172 }
173 if (flags & REG_USEA) {
buzbeeb48819d2013-09-14 16:15:25 -0700174 SetupRegMask(&lir->u.m.use_mask, rAX);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700175 }
176
177 if (flags & REG_USEC) {
buzbeeb48819d2013-09-14 16:15:25 -0700178 SetupRegMask(&lir->u.m.use_mask, rCX);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700179 }
180
181 if (flags & REG_USED) {
buzbeeb48819d2013-09-14 16:15:25 -0700182 SetupRegMask(&lir->u.m.use_mask, rDX);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700183 }
Vladimir Marko70b797d2013-12-03 15:25:24 +0000184
185 if (flags & REG_USEB) {
186 SetupRegMask(&lir->u.m.use_mask, rBX);
187 }
Mark Mendell4028a6c2014-02-19 20:06:20 -0800188
189 // Fixup hard to describe instruction: Uses rAX, rCX, rDI; sets rDI.
190 if (lir->opcode == kX86RepneScasw) {
191 SetupRegMask(&lir->u.m.use_mask, rAX);
192 SetupRegMask(&lir->u.m.use_mask, rCX);
193 SetupRegMask(&lir->u.m.use_mask, rDI);
194 SetupRegMask(&lir->u.m.def_mask, rDI);
195 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700196}
197
198/* For dumping instructions */
199static const char* x86RegName[] = {
200 "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
201 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
202};
203
204static const char* x86CondName[] = {
205 "O",
206 "NO",
207 "B/NAE/C",
208 "NB/AE/NC",
209 "Z/EQ",
210 "NZ/NE",
211 "BE/NA",
212 "NBE/A",
213 "S",
214 "NS",
215 "P/PE",
216 "NP/PO",
217 "L/NGE",
218 "NL/GE",
219 "LE/NG",
220 "NLE/G"
221};
222
223/*
224 * Interpret a format string and build a string no longer than size
225 * See format key in Assemble.cc.
226 */
227std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
228 std::string buf;
229 size_t i = 0;
230 size_t fmt_len = strlen(fmt);
231 while (i < fmt_len) {
232 if (fmt[i] != '!') {
233 buf += fmt[i];
234 i++;
235 } else {
236 i++;
237 DCHECK_LT(i, fmt_len);
238 char operand_number_ch = fmt[i];
239 i++;
240 if (operand_number_ch == '!') {
241 buf += "!";
242 } else {
243 int operand_number = operand_number_ch - '0';
244 DCHECK_LT(operand_number, 6); // Expect upto 6 LIR operands.
245 DCHECK_LT(i, fmt_len);
246 int operand = lir->operands[operand_number];
247 switch (fmt[i]) {
248 case 'c':
249 DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName));
250 buf += x86CondName[operand];
251 break;
252 case 'd':
253 buf += StringPrintf("%d", operand);
254 break;
255 case 'p': {
buzbee0d829482013-10-11 15:24:55 -0700256 EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700257 buf += StringPrintf("0x%08x", tab_rec->offset);
258 break;
259 }
260 case 'r':
261 if (X86_FPREG(operand) || X86_DOUBLEREG(operand)) {
262 int fp_reg = operand & X86_FP_REG_MASK;
263 buf += StringPrintf("xmm%d", fp_reg);
264 } else {
265 DCHECK_LT(static_cast<size_t>(operand), sizeof(x86RegName));
266 buf += x86RegName[operand];
267 }
268 break;
269 case 't':
Ian Rogers107c31e2014-01-23 20:55:29 -0800270 buf += StringPrintf("0x%08" PRIxPTR " (L%p)",
271 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + operand,
272 lir->target);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700273 break;
274 default:
275 buf += StringPrintf("DecodeError '%c'", fmt[i]);
276 break;
277 }
278 i++;
279 }
280 }
281 }
282 return buf;
283}
284
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700285void X86Mir2Lir::DumpResourceMask(LIR *x86LIR, uint64_t mask, const char *prefix) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700286 char buf[256];
287 buf[0] = 0;
288
289 if (mask == ENCODE_ALL) {
290 strcpy(buf, "all");
291 } else {
292 char num[8];
293 int i;
294
295 for (i = 0; i < kX86RegEnd; i++) {
296 if (mask & (1ULL << i)) {
Ian Rogers988e6ea2014-01-08 11:30:50 -0800297 snprintf(num, arraysize(num), "%d ", i);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700298 strcat(buf, num);
299 }
300 }
301
302 if (mask & ENCODE_CCODE) {
303 strcat(buf, "cc ");
304 }
305 /* Memory bits */
306 if (x86LIR && (mask & ENCODE_DALVIK_REG)) {
Ian Rogers988e6ea2014-01-08 11:30:50 -0800307 snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
308 DECODE_ALIAS_INFO_REG(x86LIR->flags.alias_info),
309 (DECODE_ALIAS_INFO_WIDE(x86LIR->flags.alias_info)) ? "(+1)" : "");
Brian Carlstrom7940e442013-07-12 13:46:57 -0700310 }
311 if (mask & ENCODE_LITERAL) {
312 strcat(buf, "lit ");
313 }
314
315 if (mask & ENCODE_HEAP_REF) {
316 strcat(buf, "heap ");
317 }
318 if (mask & ENCODE_MUST_NOT_ALIAS) {
319 strcat(buf, "noalias ");
320 }
321 }
322 if (buf[0]) {
323 LOG(INFO) << prefix << ": " << buf;
324 }
325}
326
327void X86Mir2Lir::AdjustSpillMask() {
328 // Adjustment for LR spilling, x86 has no LR so nothing to do here
329 core_spill_mask_ |= (1 << rRET);
330 num_core_spills_++;
331}
332
333/*
334 * Mark a callee-save fp register as promoted. Note that
335 * vpush/vpop uses contiguous register lists so we must
336 * include any holes in the mask. Associate holes with
337 * Dalvik register INVALID_VREG (0xFFFFU).
338 */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700339void X86Mir2Lir::MarkPreservedSingle(int v_reg, int reg) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700340 UNIMPLEMENTED(WARNING) << "MarkPreservedSingle";
341#if 0
342 LOG(FATAL) << "No support yet for promoted FP regs";
343#endif
344}
345
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700346void X86Mir2Lir::FlushRegWide(int reg1, int reg2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700347 RegisterInfo* info1 = GetRegInfo(reg1);
348 RegisterInfo* info2 = GetRegInfo(reg2);
349 DCHECK(info1 && info2 && info1->pair && info2->pair &&
350 (info1->partner == info2->reg) &&
351 (info2->partner == info1->reg));
352 if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
353 if (!(info1->is_temp && info2->is_temp)) {
354 /* Should not happen. If it does, there's a problem in eval_loc */
355 LOG(FATAL) << "Long half-temp, half-promoted";
356 }
357
358 info1->dirty = false;
359 info2->dirty = false;
360 if (mir_graph_->SRegToVReg(info2->s_reg) < mir_graph_->SRegToVReg(info1->s_reg))
361 info1 = info2;
362 int v_reg = mir_graph_->SRegToVReg(info1->s_reg);
363 StoreBaseDispWide(rX86_SP, VRegOffset(v_reg), info1->reg, info1->partner);
364 }
365}
366
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700367void X86Mir2Lir::FlushReg(int reg) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700368 RegisterInfo* info = GetRegInfo(reg);
369 if (info->live && info->dirty) {
370 info->dirty = false;
371 int v_reg = mir_graph_->SRegToVReg(info->s_reg);
372 StoreBaseDisp(rX86_SP, VRegOffset(v_reg), reg, kWord);
373 }
374}
375
376/* Give access to the target-dependent FP register encoding to common code */
377bool X86Mir2Lir::IsFpReg(int reg) {
378 return X86_FPREG(reg);
379}
380
381/* Clobber all regs that might be used by an external C call */
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000382void X86Mir2Lir::ClobberCallerSave() {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700383 Clobber(rAX);
384 Clobber(rCX);
385 Clobber(rDX);
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000386 Clobber(rBX);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700387}
388
389RegLocation X86Mir2Lir::GetReturnWideAlt() {
390 RegLocation res = LocCReturnWide();
391 CHECK(res.low_reg == rAX);
392 CHECK(res.high_reg == rDX);
393 Clobber(rAX);
394 Clobber(rDX);
395 MarkInUse(rAX);
396 MarkInUse(rDX);
397 MarkPair(res.low_reg, res.high_reg);
398 return res;
399}
400
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700401RegLocation X86Mir2Lir::GetReturnAlt() {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700402 RegLocation res = LocCReturn();
403 res.low_reg = rDX;
404 Clobber(rDX);
405 MarkInUse(rDX);
406 return res;
407}
408
Brian Carlstrom7940e442013-07-12 13:46:57 -0700409/* To be used when explicitly managing register use */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700410void X86Mir2Lir::LockCallTemps() {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700411 LockTemp(rX86_ARG0);
412 LockTemp(rX86_ARG1);
413 LockTemp(rX86_ARG2);
414 LockTemp(rX86_ARG3);
415}
416
417/* To be used when explicitly managing register use */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700418void X86Mir2Lir::FreeCallTemps() {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700419 FreeTemp(rX86_ARG0);
420 FreeTemp(rX86_ARG1);
421 FreeTemp(rX86_ARG2);
422 FreeTemp(rX86_ARG3);
423}
424
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700425void X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700426#if ANDROID_SMP != 0
427 // TODO: optimize fences
428 NewLIR0(kX86Mfence);
429#endif
430}
431/*
432 * Alloc a pair of core registers, or a double. Low reg in low byte,
433 * high reg in next byte.
434 */
435int X86Mir2Lir::AllocTypedTempPair(bool fp_hint,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700436 int reg_class) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700437 int high_reg;
438 int low_reg;
439 int res = 0;
440
441 if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
442 low_reg = AllocTempDouble();
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000443 high_reg = low_reg; // only one allocated!
Brian Carlstrom7940e442013-07-12 13:46:57 -0700444 res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
445 return res;
446 }
447
448 low_reg = AllocTemp();
449 high_reg = AllocTemp();
450 res = (low_reg & 0xff) | ((high_reg & 0xff) << 8);
451 return res;
452}
453
454int X86Mir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) {
455 if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
456 return AllocTempFloat();
457 }
458 return AllocTemp();
459}
460
461void X86Mir2Lir::CompilerInitializeRegAlloc() {
462 int num_regs = sizeof(core_regs)/sizeof(*core_regs);
463 int num_reserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs);
464 int num_temps = sizeof(core_temps)/sizeof(*core_temps);
465 int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
466 int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
Mathieu Chartierf6c4b3b2013-08-24 16:11:37 -0700467 reg_pool_ = static_cast<RegisterPool*>(arena_->Alloc(sizeof(*reg_pool_),
468 ArenaAllocator::kAllocRegAlloc));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700469 reg_pool_->num_core_regs = num_regs;
470 reg_pool_->core_regs =
Mathieu Chartierf6c4b3b2013-08-24 16:11:37 -0700471 static_cast<RegisterInfo*>(arena_->Alloc(num_regs * sizeof(*reg_pool_->core_regs),
472 ArenaAllocator::kAllocRegAlloc));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700473 reg_pool_->num_fp_regs = num_fp_regs;
474 reg_pool_->FPRegs =
Mathieu Chartierf6c4b3b2013-08-24 16:11:37 -0700475 static_cast<RegisterInfo *>(arena_->Alloc(num_fp_regs * sizeof(*reg_pool_->FPRegs),
476 ArenaAllocator::kAllocRegAlloc));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700477 CompilerInitPool(reg_pool_->core_regs, core_regs, reg_pool_->num_core_regs);
478 CompilerInitPool(reg_pool_->FPRegs, FpRegs, reg_pool_->num_fp_regs);
479 // Keep special registers from being allocated
480 for (int i = 0; i < num_reserved; i++) {
481 MarkInUse(ReservedRegs[i]);
482 }
483 // Mark temp regs - all others not in use can be used for promotion
484 for (int i = 0; i < num_temps; i++) {
485 MarkTemp(core_temps[i]);
486 }
487 for (int i = 0; i < num_fp_temps; i++) {
488 MarkTemp(fp_temps[i]);
489 }
490}
491
492void X86Mir2Lir::FreeRegLocTemps(RegLocation rl_keep,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700493 RegLocation rl_free) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700494 if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
495 (rl_free.high_reg != rl_keep.low_reg) && (rl_free.high_reg != rl_keep.high_reg)) {
496 // No overlap, free both
497 FreeTemp(rl_free.low_reg);
498 FreeTemp(rl_free.high_reg);
499 }
500}
501
502void X86Mir2Lir::SpillCoreRegs() {
503 if (num_core_spills_ == 0) {
504 return;
505 }
506 // Spill mask not including fake return address register
507 uint32_t mask = core_spill_mask_ & ~(1 << rRET);
508 int offset = frame_size_ - (4 * num_core_spills_);
509 for (int reg = 0; mask; mask >>= 1, reg++) {
510 if (mask & 0x1) {
511 StoreWordDisp(rX86_SP, offset, reg);
512 offset += 4;
513 }
514 }
515}
516
517void X86Mir2Lir::UnSpillCoreRegs() {
518 if (num_core_spills_ == 0) {
519 return;
520 }
521 // Spill mask not including fake return address register
522 uint32_t mask = core_spill_mask_ & ~(1 << rRET);
523 int offset = frame_size_ - (4 * num_core_spills_);
524 for (int reg = 0; mask; mask >>= 1, reg++) {
525 if (mask & 0x1) {
526 LoadWordDisp(rX86_SP, offset, reg);
527 offset += 4;
528 }
529 }
530}
531
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700532bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700533 return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32);
534}
535
536X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
Mark Mendell55d0eac2014-02-06 11:02:52 -0800537 : Mir2Lir(cu, mir_graph, arena),
538 method_address_insns_(arena, 100, kGrowableArrayMisc),
539 class_type_address_insns_(arena, 100, kGrowableArrayMisc),
540 call_method_insns_(arena, 100, kGrowableArrayMisc) {
541 store_method_addr_used_ = false;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700542 for (int i = 0; i < kX86Last; i++) {
543 if (X86Mir2Lir::EncodingMap[i].opcode != i) {
544 LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
545 << " is wrong: expecting " << i << ", seeing "
546 << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
547 }
548 }
549}
550
551Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
552 ArenaAllocator* const arena) {
553 return new X86Mir2Lir(cu, mir_graph, arena);
554}
555
556// Not used in x86
Ian Rogers468532e2013-08-05 10:56:33 -0700557int X86Mir2Lir::LoadHelper(ThreadOffset offset) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700558 LOG(FATAL) << "Unexpected use of LoadHelper in x86";
559 return INVALID_REG;
560}
561
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700562uint64_t X86Mir2Lir::GetTargetInstFlags(int opcode) {
buzbee409fe942013-10-11 10:49:56 -0700563 DCHECK(!IsPseudoLirOp(opcode));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700564 return X86Mir2Lir::EncodingMap[opcode].flags;
565}
566
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700567const char* X86Mir2Lir::GetTargetInstName(int opcode) {
buzbee409fe942013-10-11 10:49:56 -0700568 DCHECK(!IsPseudoLirOp(opcode));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700569 return X86Mir2Lir::EncodingMap[opcode].name;
570}
571
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700572const char* X86Mir2Lir::GetTargetInstFmt(int opcode) {
buzbee409fe942013-10-11 10:49:56 -0700573 DCHECK(!IsPseudoLirOp(opcode));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700574 return X86Mir2Lir::EncodingMap[opcode].fmt;
575}
576
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000577/*
578 * Return an updated location record with current in-register status.
579 * If the value lives in live temps, reflect that fact. No code
580 * is generated. If the live value is part of an older pair,
581 * clobber both low and high.
582 */
583// TODO: Reunify with common code after 'pair mess' has been fixed
584RegLocation X86Mir2Lir::UpdateLocWide(RegLocation loc) {
585 DCHECK(loc.wide);
586 DCHECK(CheckCorePoolSanity());
587 if (loc.location != kLocPhysReg) {
588 DCHECK((loc.location == kLocDalvikFrame) ||
589 (loc.location == kLocCompilerTemp));
590 // Are the dalvik regs already live in physical registers?
591 RegisterInfo* info_lo = AllocLive(loc.s_reg_low, kAnyReg);
592
593 // Handle FP registers specially on x86.
594 if (info_lo && IsFpReg(info_lo->reg)) {
595 bool match = true;
596
597 // We can't match a FP register with a pair of Core registers.
598 match = match && (info_lo->pair == 0);
599
600 if (match) {
601 // We can reuse;update the register usage info.
602 loc.low_reg = info_lo->reg;
603 loc.high_reg = info_lo->reg; // Play nice with existing code.
604 loc.location = kLocPhysReg;
605 loc.vec_len = kVectorLength8;
606 DCHECK(IsFpReg(loc.low_reg));
607 return loc;
608 }
609 // We can't easily reuse; clobber and free any overlaps.
610 if (info_lo) {
611 Clobber(info_lo->reg);
612 FreeTemp(info_lo->reg);
613 if (info_lo->pair)
614 Clobber(info_lo->partner);
615 }
616 } else {
617 RegisterInfo* info_hi = AllocLive(GetSRegHi(loc.s_reg_low), kAnyReg);
618 bool match = true;
619 match = match && (info_lo != NULL);
620 match = match && (info_hi != NULL);
621 // Are they both core or both FP?
622 match = match && (IsFpReg(info_lo->reg) == IsFpReg(info_hi->reg));
623 // If a pair of floating point singles, are they properly aligned?
624 if (match && IsFpReg(info_lo->reg)) {
625 match &= ((info_lo->reg & 0x1) == 0);
626 match &= ((info_hi->reg - info_lo->reg) == 1);
627 }
628 // If previously used as a pair, it is the same pair?
629 if (match && (info_lo->pair || info_hi->pair)) {
630 match = (info_lo->pair == info_hi->pair);
631 match &= ((info_lo->reg == info_hi->partner) &&
632 (info_hi->reg == info_lo->partner));
633 }
634 if (match) {
635 // Can reuse - update the register usage info
636 loc.low_reg = info_lo->reg;
637 loc.high_reg = info_hi->reg;
638 loc.location = kLocPhysReg;
639 MarkPair(loc.low_reg, loc.high_reg);
640 DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
641 return loc;
642 }
643 // Can't easily reuse - clobber and free any overlaps
644 if (info_lo) {
645 Clobber(info_lo->reg);
646 FreeTemp(info_lo->reg);
647 if (info_lo->pair)
648 Clobber(info_lo->partner);
649 }
650 if (info_hi) {
651 Clobber(info_hi->reg);
652 FreeTemp(info_hi->reg);
653 if (info_hi->pair)
654 Clobber(info_hi->partner);
655 }
656 }
657 }
658 return loc;
659}
660
661// TODO: Reunify with common code after 'pair mess' has been fixed
662RegLocation X86Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) {
663 DCHECK(loc.wide);
664 int32_t new_regs;
665 int32_t low_reg;
666 int32_t high_reg;
667
668 loc = UpdateLocWide(loc);
669
670 /* If it is already in a register, we can assume proper form. Is it the right reg class? */
671 if (loc.location == kLocPhysReg) {
672 DCHECK_EQ(IsFpReg(loc.low_reg), loc.IsVectorScalar());
673 if (!RegClassMatches(reg_class, loc.low_reg)) {
674 /* It is the wrong register class. Reallocate and copy. */
675 if (!IsFpReg(loc.low_reg)) {
676 // We want this in a FP reg, and it is in core registers.
677 DCHECK(reg_class != kCoreReg);
678 // Allocate this into any FP reg, and mark it with the right size.
679 low_reg = AllocTypedTemp(true, reg_class);
680 OpVectorRegCopyWide(low_reg, loc.low_reg, loc.high_reg);
681 CopyRegInfo(low_reg, loc.low_reg);
682 Clobber(loc.low_reg);
683 Clobber(loc.high_reg);
684 loc.low_reg = low_reg;
685 loc.high_reg = low_reg; // Play nice with existing code.
686 loc.vec_len = kVectorLength8;
687 } else {
688 // The value is in a FP register, and we want it in a pair of core registers.
689 DCHECK_EQ(reg_class, kCoreReg);
690 DCHECK_EQ(loc.low_reg, loc.high_reg);
691 new_regs = AllocTypedTempPair(false, kCoreReg); // Force to core registers.
692 low_reg = new_regs & 0xff;
693 high_reg = (new_regs >> 8) & 0xff;
694 DCHECK_NE(low_reg, high_reg);
695 OpRegCopyWide(low_reg, high_reg, loc.low_reg, loc.high_reg);
696 CopyRegInfo(low_reg, loc.low_reg);
697 CopyRegInfo(high_reg, loc.high_reg);
698 Clobber(loc.low_reg);
699 Clobber(loc.high_reg);
700 loc.low_reg = low_reg;
701 loc.high_reg = high_reg;
702 MarkPair(loc.low_reg, loc.high_reg);
703 DCHECK(!IsFpReg(loc.low_reg) || ((loc.low_reg & 0x1) == 0));
704 }
705 }
706 return loc;
707 }
708
709 DCHECK_NE(loc.s_reg_low, INVALID_SREG);
Mark Mendelle02d48f2014-01-15 11:19:23 -0800710 DCHECK_NE(GetSRegHi(loc.s_reg_low), INVALID_SREG);
711
712 new_regs = AllocTypedTempPair(loc.fp, reg_class);
713 loc.low_reg = new_regs & 0xff;
714 loc.high_reg = (new_regs >> 8) & 0xff;
715
716 if (loc.low_reg == loc.high_reg) {
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000717 DCHECK(IsFpReg(loc.low_reg));
Mark Mendelle02d48f2014-01-15 11:19:23 -0800718 loc.vec_len = kVectorLength8;
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000719 } else {
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000720 MarkPair(loc.low_reg, loc.high_reg);
Mark Mendelle02d48f2014-01-15 11:19:23 -0800721 }
722 if (update) {
723 loc.location = kLocPhysReg;
724 MarkLive(loc.low_reg, loc.s_reg_low);
725 if (loc.low_reg != loc.high_reg) {
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000726 MarkLive(loc.high_reg, GetSRegHi(loc.s_reg_low));
727 }
Bill Buzbeed61ba4b2014-01-13 21:44:01 +0000728 }
729 return loc;
730}
731
732// TODO: Reunify with common code after 'pair mess' has been fixed
733RegLocation X86Mir2Lir::EvalLoc(RegLocation loc, int reg_class, bool update) {
734 int new_reg;
735
736 if (loc.wide)
737 return EvalLocWide(loc, reg_class, update);
738
739 loc = UpdateLoc(loc);
740
741 if (loc.location == kLocPhysReg) {
742 if (!RegClassMatches(reg_class, loc.low_reg)) {
743 /* Wrong register class. Realloc, copy and transfer ownership. */
744 new_reg = AllocTypedTemp(loc.fp, reg_class);
745 OpRegCopy(new_reg, loc.low_reg);
746 CopyRegInfo(new_reg, loc.low_reg);
747 Clobber(loc.low_reg);
748 loc.low_reg = new_reg;
749 if (IsFpReg(loc.low_reg) && reg_class != kCoreReg)
750 loc.vec_len = kVectorLength4;
751 }
752 return loc;
753 }
754
755 DCHECK_NE(loc.s_reg_low, INVALID_SREG);
756
757 new_reg = AllocTypedTemp(loc.fp, reg_class);
758 loc.low_reg = new_reg;
759 if (IsFpReg(loc.low_reg) && reg_class != kCoreReg)
760 loc.vec_len = kVectorLength4;
761
762 if (update) {
763 loc.location = kLocPhysReg;
764 MarkLive(loc.low_reg, loc.s_reg_low);
765 }
766 return loc;
767}
768
769int X86Mir2Lir::AllocTempDouble() {
770 // We really don't need a pair of registers.
771 return AllocTempFloat();
772}
773
774// TODO: Reunify with common code after 'pair mess' has been fixed
775void X86Mir2Lir::ResetDefLocWide(RegLocation rl) {
776 DCHECK(rl.wide);
777 RegisterInfo* p_low = IsTemp(rl.low_reg);
778 if (IsFpReg(rl.low_reg)) {
779 // We are using only the low register.
780 if (p_low && !(cu_->disable_opt & (1 << kSuppressLoads))) {
781 NullifyRange(p_low->def_start, p_low->def_end, p_low->s_reg, rl.s_reg_low);
782 }
783 ResetDef(rl.low_reg);
784 } else {
785 RegisterInfo* p_high = IsTemp(rl.high_reg);
786 if (p_low && !(cu_->disable_opt & (1 << kSuppressLoads))) {
787 DCHECK(p_low->pair);
788 NullifyRange(p_low->def_start, p_low->def_end, p_low->s_reg, rl.s_reg_low);
789 }
790 if (p_high && !(cu_->disable_opt & (1 << kSuppressLoads))) {
791 DCHECK(p_high->pair);
792 }
793 ResetDef(rl.low_reg);
794 ResetDef(rl.high_reg);
795 }
796}
797
798void X86Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) {
799 // Can we do this directly to memory?
800 rl_dest = UpdateLocWide(rl_dest);
801 if ((rl_dest.location == kLocDalvikFrame) ||
802 (rl_dest.location == kLocCompilerTemp)) {
803 int32_t val_lo = Low32Bits(value);
804 int32_t val_hi = High32Bits(value);
805 int rBase = TargetReg(kSp);
806 int displacement = SRegOffset(rl_dest.s_reg_low);
807
808 LIR * store = NewLIR3(kX86Mov32MI, rBase, displacement + LOWORD_OFFSET, val_lo);
809 AnnotateDalvikRegAccess(store, (displacement + LOWORD_OFFSET) >> 2,
810 false /* is_load */, true /* is64bit */);
811 store = NewLIR3(kX86Mov32MI, rBase, displacement + HIWORD_OFFSET, val_hi);
812 AnnotateDalvikRegAccess(store, (displacement + HIWORD_OFFSET) >> 2,
813 false /* is_load */, true /* is64bit */);
814 return;
815 }
816
817 // Just use the standard code to do the generation.
818 Mir2Lir::GenConstWide(rl_dest, value);
819}
Mark Mendelle02d48f2014-01-15 11:19:23 -0800820
821// TODO: Merge with existing RegLocation dumper in vreg_analysis.cc
822void X86Mir2Lir::DumpRegLocation(RegLocation loc) {
823 LOG(INFO) << "location: " << loc.location << ','
824 << (loc.wide ? " w" : " ")
825 << (loc.defined ? " D" : " ")
826 << (loc.is_const ? " c" : " ")
827 << (loc.fp ? " F" : " ")
828 << (loc.core ? " C" : " ")
829 << (loc.ref ? " r" : " ")
830 << (loc.high_word ? " h" : " ")
831 << (loc.home ? " H" : " ")
832 << " vec_len: " << loc.vec_len
833 << ", low: " << static_cast<int>(loc.low_reg)
834 << ", high: " << static_cast<int>(loc.high_reg)
835 << ", s_reg: " << loc.s_reg_low
836 << ", orig: " << loc.orig_sreg;
837}
838
Mark Mendell67c39c42014-01-31 17:28:00 -0800839void X86Mir2Lir::Materialize() {
840 // A good place to put the analysis before starting.
841 AnalyzeMIR();
842
843 // Now continue with regular code generation.
844 Mir2Lir::Materialize();
845}
846
Mark Mendell55d0eac2014-02-06 11:02:52 -0800847void X86Mir2Lir::LoadMethodAddress(int dex_method_index, InvokeType type,
848 SpecialTargetRegister symbolic_reg) {
849 /*
850 * For x86, just generate a 32 bit move immediate instruction, that will be filled
851 * in at 'link time'. For now, put a unique value based on target to ensure that
852 * code deduplication works.
853 */
854 const DexFile::MethodId& id = cu_->dex_file->GetMethodId(dex_method_index);
855 uintptr_t ptr = reinterpret_cast<uintptr_t>(&id);
856
857 // Generate the move instruction with the unique pointer and save index and type.
858 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg),
859 static_cast<int>(ptr), dex_method_index, type);
860 AppendLIR(move);
861 method_address_insns_.Insert(move);
862}
863
864void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) {
865 /*
866 * For x86, just generate a 32 bit move immediate instruction, that will be filled
867 * in at 'link time'. For now, put a unique value based on target to ensure that
868 * code deduplication works.
869 */
870 const DexFile::TypeId& id = cu_->dex_file->GetTypeId(type_idx);
871 uintptr_t ptr = reinterpret_cast<uintptr_t>(&id);
872
873 // Generate the move instruction with the unique pointer and save index and type.
874 LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI, TargetReg(symbolic_reg),
875 static_cast<int>(ptr), type_idx);
876 AppendLIR(move);
877 class_type_address_insns_.Insert(move);
878}
879
880LIR *X86Mir2Lir::CallWithLinkerFixup(int dex_method_index, InvokeType type) {
881 /*
882 * For x86, just generate a 32 bit call relative instruction, that will be filled
883 * in at 'link time'. For now, put a unique value based on target to ensure that
884 * code deduplication works.
885 */
886 const DexFile::MethodId& id = cu_->dex_file->GetMethodId(dex_method_index);
887 uintptr_t ptr = reinterpret_cast<uintptr_t>(&id);
888
889 // Generate the call instruction with the unique pointer and save index and type.
890 LIR *call = RawLIR(current_dalvik_offset_, kX86CallI, static_cast<int>(ptr), dex_method_index,
891 type);
892 AppendLIR(call);
893 call_method_insns_.Insert(call);
894 return call;
895}
896
897void X86Mir2Lir::InstallLiteralPools() {
898 // These are handled differently for x86.
899 DCHECK(code_literal_list_ == nullptr);
900 DCHECK(method_literal_list_ == nullptr);
901 DCHECK(class_literal_list_ == nullptr);
902
903 // Handle the fixups for methods.
904 for (uint32_t i = 0; i < method_address_insns_.Size(); i++) {
905 LIR* p = method_address_insns_.Get(i);
906 DCHECK_EQ(p->opcode, kX86Mov32RI);
907 uint32_t target = p->operands[2];
908
909 // The offset to patch is the last 4 bytes of the instruction.
910 int patch_offset = p->offset + p->flags.size - 4;
911 cu_->compiler_driver->AddMethodPatch(cu_->dex_file, cu_->class_def_idx,
912 cu_->method_idx, cu_->invoke_type,
913 target, static_cast<InvokeType>(p->operands[3]),
914 patch_offset);
915 }
916
917 // Handle the fixups for class types.
918 for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) {
919 LIR* p = class_type_address_insns_.Get(i);
920 DCHECK_EQ(p->opcode, kX86Mov32RI);
921 uint32_t target = p->operands[2];
922
923 // The offset to patch is the last 4 bytes of the instruction.
924 int patch_offset = p->offset + p->flags.size - 4;
925 cu_->compiler_driver->AddClassPatch(cu_->dex_file, cu_->class_def_idx,
926 cu_->method_idx, target, patch_offset);
927 }
928
929 // And now the PC-relative calls to methods.
930 for (uint32_t i = 0; i < call_method_insns_.Size(); i++) {
931 LIR* p = call_method_insns_.Get(i);
932 DCHECK_EQ(p->opcode, kX86CallI);
933 uint32_t target = p->operands[1];
934
935 // The offset to patch is the last 4 bytes of the instruction.
936 int patch_offset = p->offset + p->flags.size - 4;
937 cu_->compiler_driver->AddRelativeCodePatch(cu_->dex_file, cu_->class_def_idx,
938 cu_->method_idx, cu_->invoke_type, target,
939 static_cast<InvokeType>(p->operands[2]),
940 patch_offset, -4 /* offset */);
941 }
942
943 // And do the normal processing.
944 Mir2Lir::InstallLiteralPools();
945}
946
Mark Mendell4028a6c2014-02-19 20:06:20 -0800947// Offsets within java.lang.String.
948#define STRING_VALUE_OFFSET 8
949#define STRING_COUNT_OFFSET 12
950#define STRING_OFFSET_OFFSET 20
951#define STRING_DATA_OFFSET 12
952
953/*
954 * Fast string.index_of(I) & (II). Inline check for simple case of char <= 0xffff,
955 * otherwise bails to standard library code.
956 */
957bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
958 ClobberCallerSave();
959 LockCallTemps(); // Using fixed registers
960
961 // EAX: 16 bit character being searched.
962 // ECX: count: number of words to be searched.
963 // EDI: String being searched.
964 // EDX: temporary during execution.
965 // EBX: temporary during execution.
966
967 RegLocation rl_obj = info->args[0];
968 RegLocation rl_char = info->args[1];
969 RegLocation rl_start = info->args[2];
970
971 uint32_t char_value =
972 rl_char.is_const ? mir_graph_->ConstantValue(rl_char.orig_sreg) : 0;
973
974 if (char_value > 0xFFFF) {
975 // We have to punt to the real String.indexOf.
976 return false;
977 }
978
979 // Okay, we are commited to inlining this.
980 RegLocation rl_return = GetReturn(false);
981 RegLocation rl_dest = InlineTarget(info);
982
983 // Is the string non-NULL?
984 LoadValueDirectFixed(rl_obj, rDX);
985 GenNullCheck(rl_obj.s_reg_low, rDX, info->opt_flags);
986
987 // Record that we have inlined & null checked the object.
988 info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
989
990 // Does the character fit in 16 bits?
991 LIR* launch_pad = nullptr;
992 if (rl_char.is_const) {
993 // We need the value in EAX.
994 LoadConstantNoClobber(rAX, char_value);
995 } else {
996 // Character is not a constant; compare at runtime.
997 LoadValueDirectFixed(rl_char, rAX);
998 launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
999 intrinsic_launchpads_.Insert(launch_pad);
1000 OpCmpImmBranch(kCondGt, rAX, 0xFFFF, launch_pad);
1001 }
1002
1003 // From here down, we know that we are looking for a char that fits in 16 bits.
1004
1005 // Character is in EAX.
1006 // Object pointer is in EDX.
1007
1008 // We need to preserve EDI, but have no spare registers, so push it on the stack.
1009 // We have to remember that all stack addresses after this are offset by sizeof(EDI).
1010 NewLIR1(kX86Push32R, rDI);
1011
1012 // Compute the number of words to search in to rCX.
1013 LoadWordDisp(rDX, STRING_COUNT_OFFSET, rCX);
1014 LIR *length_compare = nullptr;
1015 int start_value = 0;
1016 if (zero_based) {
1017 // We have to handle an empty string. Use special instruction JECXZ.
1018 length_compare = NewLIR0(kX86Jecxz8);
1019 } else {
1020 // We have to offset by the start index.
1021 if (rl_start.is_const) {
1022 start_value = mir_graph_->ConstantValue(rl_start.orig_sreg);
1023 start_value = std::max(start_value, 0);
1024
1025 // Is the start > count?
1026 length_compare = OpCmpImmBranch(kCondLe, rCX, start_value, nullptr);
1027
1028 if (start_value != 0) {
1029 OpRegImm(kOpSub, rCX, start_value);
1030 }
1031 } else {
1032 // Runtime start index.
1033 rl_start = UpdateLoc(rl_start);
1034 if (rl_start.location == kLocPhysReg) {
1035 length_compare = OpCmpBranch(kCondLe, rCX, rl_start.low_reg, nullptr);
1036 OpRegReg(kOpSub, rCX, rl_start.low_reg);
1037 } else {
1038 // Compare to memory to avoid a register load. Handle pushed EDI.
1039 int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t);
1040 OpRegMem(kOpCmp, rDX, rX86_SP, displacement);
1041 length_compare = NewLIR2(kX86Jcc8, 0, kX86CondLe);
1042 OpRegMem(kOpSub, rCX, rX86_SP, displacement);
1043 }
1044 }
1045 }
1046 DCHECK(length_compare != nullptr);
1047
1048 // ECX now contains the count in words to be searched.
1049
1050 // Load the address of the string into EBX.
1051 // The string starts at VALUE(String) + 2 * OFFSET(String) + STRING_DATA_OFFSET.
1052 LoadWordDisp(rDX, STRING_VALUE_OFFSET, rDI);
1053 LoadWordDisp(rDX, STRING_OFFSET_OFFSET, rBX);
1054 OpLea(rBX, rDI, rBX, 1, STRING_DATA_OFFSET);
1055
1056 // Now compute into EDI where the search will start.
1057 if (zero_based || rl_start.is_const) {
1058 if (start_value == 0) {
1059 OpRegCopy(rDI, rBX);
1060 } else {
1061 NewLIR3(kX86Lea32RM, rDI, rBX, 2 * start_value);
1062 }
1063 } else {
1064 if (rl_start.location == kLocPhysReg) {
1065 if (rl_start.low_reg == rDI) {
1066 // We have a slight problem here. We are already using RDI!
1067 // Grab the value from the stack.
1068 LoadWordDisp(rX86_SP, 0, rDX);
1069 OpLea(rDI, rBX, rDX, 1, 0);
1070 } else {
1071 OpLea(rDI, rBX, rl_start.low_reg, 1, 0);
1072 }
1073 } else {
1074 OpRegCopy(rDI, rBX);
1075 // Load the start index from stack, remembering that we pushed EDI.
1076 int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t);
1077 LoadWordDisp(rX86_SP, displacement, rDX);
1078 OpLea(rDI, rBX, rDX, 1, 0);
1079 }
1080 }
1081
1082 // EDI now contains the start of the string to be searched.
1083 // We are all prepared to do the search for the character.
1084 NewLIR0(kX86RepneScasw);
1085
1086 // Did we find a match?
1087 LIR* failed_branch = OpCondBranch(kCondNe, nullptr);
1088
1089 // yes, we matched. Compute the index of the result.
1090 // index = ((curr_ptr - orig_ptr) / 2) - 1.
1091 OpRegReg(kOpSub, rDI, rBX);
1092 OpRegImm(kOpAsr, rDI, 1);
1093 NewLIR3(kX86Lea32RM, rl_return.low_reg, rDI, -1);
1094 LIR *all_done = NewLIR1(kX86Jmp8, 0);
1095
1096 // Failed to match; return -1.
1097 LIR *not_found = NewLIR0(kPseudoTargetLabel);
1098 length_compare->target = not_found;
1099 failed_branch->target = not_found;
1100 LoadConstantNoClobber(rl_return.low_reg, -1);
1101
1102 // And join up at the end.
1103 all_done->target = NewLIR0(kPseudoTargetLabel);
1104 // Restore EDI from the stack.
1105 NewLIR1(kX86Pop32R, rDI);
1106
1107 // Out of line code returns here.
1108 if (launch_pad != nullptr) {
1109 LIR *return_point = NewLIR0(kPseudoTargetLabel);
1110 launch_pad->operands[2] = WrapPointer(return_point);
1111 }
1112
1113 StoreValue(rl_dest, rl_return);
1114 return true;
1115}
1116
Brian Carlstrom7934ac22013-07-26 10:54:15 -07001117} // namespace art