blob: 97d9d2deed8d678d69fbe25fd19a17f3e2799d4e [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This file contains codegen for the X86 ISA */
18
19#include "codegen_x86.h"
20#include "dex/quick/mir_to_lir-inl.h"
21#include "mirror/array.h"
22#include "x86_lir.h"
23
24namespace art {
25
26/*
27 * Perform register memory operation.
28 */
29LIR* X86Mir2Lir::GenRegMemCheck(ConditionCode c_code,
30 int reg1, int base, int offset, ThrowKind kind)
31{
32 LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind,
33 current_dalvik_offset_, reg1, base, offset);
34 OpRegMem(kOpCmp, reg1, base, offset);
35 LIR* branch = OpCondBranch(c_code, tgt);
36 // Remember branch target - will process later
37 throw_launchpads_.Insert(tgt);
38 return branch;
39}
40
41/*
42 * Compare two 64-bit values
43 * x = y return 0
44 * x < y return -1
45 * x > y return 1
46 */
47void X86Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
48 RegLocation rl_src2)
49{
50 FlushAllRegs();
51 LockCallTemps(); // Prepare for explicit register usage
52 LoadValueDirectWideFixed(rl_src1, r0, r1);
53 LoadValueDirectWideFixed(rl_src2, r2, r3);
54 // Compute (r1:r0) = (r1:r0) - (r3:r2)
55 OpRegReg(kOpSub, r0, r2); // r0 = r0 - r2
56 OpRegReg(kOpSbc, r1, r3); // r1 = r1 - r3 - CF
57 NewLIR2(kX86Set8R, r2, kX86CondL); // r2 = (r1:r0) < (r3:r2) ? 1 : 0
58 NewLIR2(kX86Movzx8RR, r2, r2);
59 OpReg(kOpNeg, r2); // r2 = -r2
60 OpRegReg(kOpOr, r0, r1); // r0 = high | low - sets ZF
61 NewLIR2(kX86Set8R, r0, kX86CondNz); // r0 = (r1:r0) != (r3:r2) ? 1 : 0
62 NewLIR2(kX86Movzx8RR, r0, r0);
63 OpRegReg(kOpOr, r0, r2); // r0 = r0 | r2
64 RegLocation rl_result = LocCReturn();
65 StoreValue(rl_dest, rl_result);
66}
67
68X86ConditionCode X86ConditionEncoding(ConditionCode cond) {
69 switch (cond) {
70 case kCondEq: return kX86CondEq;
71 case kCondNe: return kX86CondNe;
72 case kCondCs: return kX86CondC;
73 case kCondCc: return kX86CondNc;
74 case kCondMi: return kX86CondS;
75 case kCondPl: return kX86CondNs;
76 case kCondVs: return kX86CondO;
77 case kCondVc: return kX86CondNo;
78 case kCondHi: return kX86CondA;
79 case kCondLs: return kX86CondBe;
80 case kCondGe: return kX86CondGe;
81 case kCondLt: return kX86CondL;
82 case kCondGt: return kX86CondG;
83 case kCondLe: return kX86CondLe;
84 case kCondAl:
85 case kCondNv: LOG(FATAL) << "Should not reach here";
86 }
87 return kX86CondO;
88}
89
90LIR* X86Mir2Lir::OpCmpBranch(ConditionCode cond, int src1, int src2,
91 LIR* target)
92{
93 NewLIR2(kX86Cmp32RR, src1, src2);
94 X86ConditionCode cc = X86ConditionEncoding(cond);
95 LIR* branch = NewLIR2(kX86Jcc8, 0 /* lir operand for Jcc offset */ ,
96 cc);
97 branch->target = target;
98 return branch;
99}
100
101LIR* X86Mir2Lir::OpCmpImmBranch(ConditionCode cond, int reg,
102 int check_value, LIR* target)
103{
104 if ((check_value == 0) && (cond == kCondEq || cond == kCondNe)) {
105 // TODO: when check_value == 0 and reg is rCX, use the jcxz/nz opcode
106 NewLIR2(kX86Test32RR, reg, reg);
107 } else {
108 NewLIR2(IS_SIMM8(check_value) ? kX86Cmp32RI8 : kX86Cmp32RI, reg, check_value);
109 }
110 X86ConditionCode cc = X86ConditionEncoding(cond);
111 LIR* branch = NewLIR2(kX86Jcc8, 0 /* lir operand for Jcc offset */ , cc);
112 branch->target = target;
113 return branch;
114}
115
116LIR* X86Mir2Lir::OpRegCopyNoInsert(int r_dest, int r_src)
117{
118 if (X86_FPREG(r_dest) || X86_FPREG(r_src))
119 return OpFpRegCopy(r_dest, r_src);
120 LIR* res = RawLIR(current_dalvik_offset_, kX86Mov32RR,
121 r_dest, r_src);
122 if (r_dest == r_src) {
123 res->flags.is_nop = true;
124 }
125 return res;
126}
127
128LIR* X86Mir2Lir::OpRegCopy(int r_dest, int r_src)
129{
130 LIR *res = OpRegCopyNoInsert(r_dest, r_src);
131 AppendLIR(res);
132 return res;
133}
134
135void X86Mir2Lir::OpRegCopyWide(int dest_lo, int dest_hi,
136 int src_lo, int src_hi)
137{
138 bool dest_fp = X86_FPREG(dest_lo) && X86_FPREG(dest_hi);
139 bool src_fp = X86_FPREG(src_lo) && X86_FPREG(src_hi);
140 assert(X86_FPREG(src_lo) == X86_FPREG(src_hi));
141 assert(X86_FPREG(dest_lo) == X86_FPREG(dest_hi));
142 if (dest_fp) {
143 if (src_fp) {
144 OpRegCopy(S2d(dest_lo, dest_hi), S2d(src_lo, src_hi));
145 } else {
146 // TODO: Prevent this from happening in the code. The result is often
147 // unused or could have been loaded more easily from memory.
148 NewLIR2(kX86MovdxrRR, dest_lo, src_lo);
149 NewLIR2(kX86MovdxrRR, dest_hi, src_hi);
150 NewLIR2(kX86PsllqRI, dest_hi, 32);
151 NewLIR2(kX86OrpsRR, dest_lo, dest_hi);
152 }
153 } else {
154 if (src_fp) {
155 NewLIR2(kX86MovdrxRR, dest_lo, src_lo);
156 NewLIR2(kX86PsrlqRI, src_lo, 32);
157 NewLIR2(kX86MovdrxRR, dest_hi, src_lo);
158 } else {
159 // Handle overlap
160 if (src_hi == dest_lo) {
161 OpRegCopy(dest_hi, src_hi);
162 OpRegCopy(dest_lo, src_lo);
163 } else {
164 OpRegCopy(dest_lo, src_lo);
165 OpRegCopy(dest_hi, src_hi);
166 }
167 }
168 }
169}
170
171void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir)
172{
173 UNIMPLEMENTED(FATAL) << "Need codegen for GenSelect";
174}
175
176void X86Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
177 LIR* taken = &block_label_list_[bb->taken->id];
178 RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
179 RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
180 FlushAllRegs();
181 LockCallTemps(); // Prepare for explicit register usage
182 LoadValueDirectWideFixed(rl_src1, r0, r1);
183 LoadValueDirectWideFixed(rl_src2, r2, r3);
184 ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
185 // Swap operands and condition code to prevent use of zero flag.
186 if (ccode == kCondLe || ccode == kCondGt) {
187 // Compute (r3:r2) = (r3:r2) - (r1:r0)
188 OpRegReg(kOpSub, r2, r0); // r2 = r2 - r0
189 OpRegReg(kOpSbc, r3, r1); // r3 = r3 - r1 - CF
190 } else {
191 // Compute (r1:r0) = (r1:r0) - (r3:r2)
192 OpRegReg(kOpSub, r0, r2); // r0 = r0 - r2
193 OpRegReg(kOpSbc, r1, r3); // r1 = r1 - r3 - CF
194 }
195 switch (ccode) {
196 case kCondEq:
197 case kCondNe:
198 OpRegReg(kOpOr, r0, r1); // r0 = r0 | r1
199 break;
200 case kCondLe:
201 ccode = kCondGe;
202 break;
203 case kCondGt:
204 ccode = kCondLt;
205 break;
206 case kCondLt:
207 case kCondGe:
208 break;
209 default:
210 LOG(FATAL) << "Unexpected ccode: " << ccode;
211 }
212 OpCondBranch(ccode, taken);
213}
214
215RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, int reg_lo,
216 int lit, bool is_div)
217{
218 LOG(FATAL) << "Unexpected use of GenDivRemLit for x86";
219 return rl_dest;
220}
221
222RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, int reg_lo,
223 int reg_hi, bool is_div)
224{
225 LOG(FATAL) << "Unexpected use of GenDivRem for x86";
226 return rl_dest;
227}
228
229bool X86Mir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min)
230{
231 DCHECK_EQ(cu_->instruction_set, kX86);
232 RegLocation rl_src1 = info->args[0];
233 RegLocation rl_src2 = info->args[1];
234 rl_src1 = LoadValue(rl_src1, kCoreReg);
235 rl_src2 = LoadValue(rl_src2, kCoreReg);
236 RegLocation rl_dest = InlineTarget(info);
237 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
238 OpRegReg(kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
239 DCHECK_EQ(cu_->instruction_set, kX86);
240 LIR* branch = NewLIR2(kX86Jcc8, 0, is_min ? kX86CondG : kX86CondL);
241 OpRegReg(kOpMov, rl_result.low_reg, rl_src1.low_reg);
242 LIR* branch2 = NewLIR1(kX86Jmp8, 0);
243 branch->target = NewLIR0(kPseudoTargetLabel);
244 OpRegReg(kOpMov, rl_result.low_reg, rl_src2.low_reg);
245 branch2->target = NewLIR0(kPseudoTargetLabel);
246 StoreValue(rl_dest, rl_result);
247 return true;
248}
249
250void X86Mir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset)
251{
252 NewLIR5(kX86Lea32RA, rBase, reg1, reg2, scale, offset);
253}
254
255void X86Mir2Lir::OpTlsCmp(int offset, int val)
256{
257 NewLIR2(kX86Cmp16TI8, offset, val);
258}
259
260bool X86Mir2Lir::GenInlinedCas32(CallInfo* info, bool need_write_barrier) {
261 DCHECK_NE(cu_->instruction_set, kThumb2);
262 return false;
263}
264
265LIR* X86Mir2Lir::OpPcRelLoad(int reg, LIR* target) {
266 LOG(FATAL) << "Unexpected use of OpPcRelLoad for x86";
267 return NULL;
268}
269
270LIR* X86Mir2Lir::OpVldm(int rBase, int count)
271{
272 LOG(FATAL) << "Unexpected use of OpVldm for x86";
273 return NULL;
274}
275
276LIR* X86Mir2Lir::OpVstm(int rBase, int count)
277{
278 LOG(FATAL) << "Unexpected use of OpVstm for x86";
279 return NULL;
280}
281
282void X86Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
283 RegLocation rl_result, int lit,
284 int first_bit, int second_bit)
285{
286 int t_reg = AllocTemp();
287 OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit);
288 OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, t_reg);
289 FreeTemp(t_reg);
290 if (first_bit != 0) {
291 OpRegRegImm(kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
292 }
293}
294
295void X86Mir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi)
296{
297 int t_reg = AllocTemp();
298 OpRegRegReg(kOpOr, t_reg, reg_lo, reg_hi);
299 GenImmedCheck(kCondEq, t_reg, 0, kThrowDivZero);
300 FreeTemp(t_reg);
301}
302
303// Test suspend flag, return target of taken suspend branch
304LIR* X86Mir2Lir::OpTestSuspend(LIR* target)
305{
306 OpTlsCmp(Thread::ThreadFlagsOffset().Int32Value(), 0);
307 return OpCondBranch((target == NULL) ? kCondNe : kCondEq, target);
308}
309
310// Decrement register and branch on condition
311LIR* X86Mir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target)
312{
313 OpRegImm(kOpSub, reg, 1);
314 return OpCmpImmBranch(c_code, reg, 0, target);
315}
316
317bool X86Mir2Lir::SmallLiteralDivide(Instruction::Code dalvik_opcode,
318 RegLocation rl_src, RegLocation rl_dest, int lit)
319{
320 LOG(FATAL) << "Unexpected use of smallLiteralDive in x86";
321 return false;
322}
323
324LIR* X86Mir2Lir::OpIT(ConditionCode cond, const char* guide)
325{
326 LOG(FATAL) << "Unexpected use of OpIT in x86";
327 return NULL;
328}
329
330void X86Mir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1,
331 RegLocation rl_src2)
332{
333 LOG(FATAL) << "Unexpected use of GenX86Long for x86";
334}
335void X86Mir2Lir::GenAddLong(RegLocation rl_dest, RegLocation rl_src1,
336 RegLocation rl_src2)
337{
338 // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
339 // enough.
340 FlushAllRegs();
341 LockCallTemps(); // Prepare for explicit register usage
342 LoadValueDirectWideFixed(rl_src1, r0, r1);
343 LoadValueDirectWideFixed(rl_src2, r2, r3);
344 // Compute (r1:r0) = (r1:r0) + (r2:r3)
345 OpRegReg(kOpAdd, r0, r2); // r0 = r0 + r2
346 OpRegReg(kOpAdc, r1, r3); // r1 = r1 + r3 + CF
347 RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
348 INVALID_SREG, INVALID_SREG};
349 StoreValueWide(rl_dest, rl_result);
350}
351
352void X86Mir2Lir::GenSubLong(RegLocation rl_dest, RegLocation rl_src1,
353 RegLocation rl_src2)
354{
355 // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
356 // enough.
357 FlushAllRegs();
358 LockCallTemps(); // Prepare for explicit register usage
359 LoadValueDirectWideFixed(rl_src1, r0, r1);
360 LoadValueDirectWideFixed(rl_src2, r2, r3);
361 // Compute (r1:r0) = (r1:r0) + (r2:r3)
362 OpRegReg(kOpSub, r0, r2); // r0 = r0 - r2
363 OpRegReg(kOpSbc, r1, r3); // r1 = r1 - r3 - CF
364 RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
365 INVALID_SREG, INVALID_SREG};
366 StoreValueWide(rl_dest, rl_result);
367}
368
369void X86Mir2Lir::GenAndLong(RegLocation rl_dest, RegLocation rl_src1,
370 RegLocation rl_src2)
371{
372 // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
373 // enough.
374 FlushAllRegs();
375 LockCallTemps(); // Prepare for explicit register usage
376 LoadValueDirectWideFixed(rl_src1, r0, r1);
377 LoadValueDirectWideFixed(rl_src2, r2, r3);
378 // Compute (r1:r0) = (r1:r0) & (r2:r3)
379 OpRegReg(kOpAnd, r0, r2); // r0 = r0 & r2
380 OpRegReg(kOpAnd, r1, r3); // r1 = r1 & r3
381 RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
382 INVALID_SREG, INVALID_SREG};
383 StoreValueWide(rl_dest, rl_result);
384}
385
386void X86Mir2Lir::GenOrLong(RegLocation rl_dest,
387 RegLocation rl_src1, RegLocation rl_src2)
388{
389 // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
390 // enough.
391 FlushAllRegs();
392 LockCallTemps(); // Prepare for explicit register usage
393 LoadValueDirectWideFixed(rl_src1, r0, r1);
394 LoadValueDirectWideFixed(rl_src2, r2, r3);
395 // Compute (r1:r0) = (r1:r0) | (r2:r3)
396 OpRegReg(kOpOr, r0, r2); // r0 = r0 | r2
397 OpRegReg(kOpOr, r1, r3); // r1 = r1 | r3
398 RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
399 INVALID_SREG, INVALID_SREG};
400 StoreValueWide(rl_dest, rl_result);
401}
402
403void X86Mir2Lir::GenXorLong(RegLocation rl_dest,
404 RegLocation rl_src1, RegLocation rl_src2)
405{
406 // TODO: fixed register usage here as we only have 4 temps and temporary allocation isn't smart
407 // enough.
408 FlushAllRegs();
409 LockCallTemps(); // Prepare for explicit register usage
410 LoadValueDirectWideFixed(rl_src1, r0, r1);
411 LoadValueDirectWideFixed(rl_src2, r2, r3);
412 // Compute (r1:r0) = (r1:r0) ^ (r2:r3)
413 OpRegReg(kOpXor, r0, r2); // r0 = r0 ^ r2
414 OpRegReg(kOpXor, r1, r3); // r1 = r1 ^ r3
415 RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
416 INVALID_SREG, INVALID_SREG};
417 StoreValueWide(rl_dest, rl_result);
418}
419
420void X86Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src)
421{
422 FlushAllRegs();
423 LockCallTemps(); // Prepare for explicit register usage
424 LoadValueDirectWideFixed(rl_src, r0, r1);
425 // Compute (r1:r0) = -(r1:r0)
426 OpRegReg(kOpNeg, r0, r0); // r0 = -r0
427 OpRegImm(kOpAdc, r1, 0); // r1 = r1 + CF
428 OpRegReg(kOpNeg, r1, r1); // r1 = -r1
429 RegLocation rl_result = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
430 INVALID_SREG, INVALID_SREG};
431 StoreValueWide(rl_dest, rl_result);
432}
433
434void X86Mir2Lir::OpRegThreadMem(OpKind op, int r_dest, int thread_offset) {
435 X86OpCode opcode = kX86Bkpt;
436 switch (op) {
437 case kOpCmp: opcode = kX86Cmp32RT; break;
438 case kOpMov: opcode = kX86Mov32RT; break;
439 default:
440 LOG(FATAL) << "Bad opcode: " << op;
441 break;
442 }
443 NewLIR2(opcode, r_dest, thread_offset);
444}
445
446/*
447 * Generate array load
448 */
449void X86Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
450 RegLocation rl_index, RegLocation rl_dest, int scale)
451{
452 RegisterClass reg_class = oat_reg_class_by_size(size);
453 int len_offset = mirror::Array::LengthOffset().Int32Value();
454 int data_offset;
455 RegLocation rl_result;
456 rl_array = LoadValue(rl_array, kCoreReg);
457 rl_index = LoadValue(rl_index, kCoreReg);
458
459 if (size == kLong || size == kDouble) {
460 data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
461 } else {
462 data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
463 }
464
465 /* null object? */
466 GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
467
468 if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
469 /* if (rl_index >= [rl_array + len_offset]) goto kThrowArrayBounds */
470 GenRegMemCheck(kCondUge, rl_index.low_reg, rl_array.low_reg,
471 len_offset, kThrowArrayBounds);
472 }
473 if ((size == kLong) || (size == kDouble)) {
474 int reg_addr = AllocTemp();
475 OpLea(reg_addr, rl_array.low_reg, rl_index.low_reg, scale, data_offset);
476 FreeTemp(rl_array.low_reg);
477 FreeTemp(rl_index.low_reg);
478 rl_result = EvalLoc(rl_dest, reg_class, true);
479 LoadBaseIndexedDisp(reg_addr, INVALID_REG, 0, 0, rl_result.low_reg,
480 rl_result.high_reg, size, INVALID_SREG);
481 StoreValueWide(rl_dest, rl_result);
482 } else {
483 rl_result = EvalLoc(rl_dest, reg_class, true);
484
485 LoadBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale,
486 data_offset, rl_result.low_reg, INVALID_REG, size,
487 INVALID_SREG);
488
489 StoreValue(rl_dest, rl_result);
490 }
491}
492
493/*
494 * Generate array store
495 *
496 */
497void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
498 RegLocation rl_index, RegLocation rl_src, int scale)
499{
500 RegisterClass reg_class = oat_reg_class_by_size(size);
501 int len_offset = mirror::Array::LengthOffset().Int32Value();
502 int data_offset;
503
504 if (size == kLong || size == kDouble) {
505 data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
506 } else {
507 data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
508 }
509
510 rl_array = LoadValue(rl_array, kCoreReg);
511 rl_index = LoadValue(rl_index, kCoreReg);
512
513 /* null object? */
514 GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
515
516 if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
517 /* if (rl_index >= [rl_array + len_offset]) goto kThrowArrayBounds */
518 GenRegMemCheck(kCondUge, rl_index.low_reg, rl_array.low_reg, len_offset, kThrowArrayBounds);
519 }
520 if ((size == kLong) || (size == kDouble)) {
521 rl_src = LoadValueWide(rl_src, reg_class);
522 } else {
523 rl_src = LoadValue(rl_src, reg_class);
524 }
525 // If the src reg can't be byte accessed, move it to a temp first.
526 if ((size == kSignedByte || size == kUnsignedByte) && rl_src.low_reg >= 4) {
527 int temp = AllocTemp();
528 OpRegCopy(temp, rl_src.low_reg);
529 StoreBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale, data_offset, temp,
530 INVALID_REG, size, INVALID_SREG);
531 } else {
532 StoreBaseIndexedDisp(rl_array.low_reg, rl_index.low_reg, scale, data_offset, rl_src.low_reg,
533 rl_src.high_reg, size, INVALID_SREG);
534 }
535}
536
537/*
538 * Generate array store
539 *
540 */
541void X86Mir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array,
542 RegLocation rl_index, RegLocation rl_src, int scale)
543{
544 int len_offset = mirror::Array::LengthOffset().Int32Value();
545 int data_offset = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value();
546
547 FlushAllRegs(); // Use explicit registers
548 LockCallTemps();
549
550 int r_value = TargetReg(kArg0); // Register holding value
551 int r_array_class = TargetReg(kArg1); // Register holding array's Class
552 int r_array = TargetReg(kArg2); // Register holding array
553 int r_index = TargetReg(kArg3); // Register holding index into array
554
555 LoadValueDirectFixed(rl_array, r_array); // Grab array
556 LoadValueDirectFixed(rl_src, r_value); // Grab value
557 LoadValueDirectFixed(rl_index, r_index); // Grab index
558
559 GenNullCheck(rl_array.s_reg_low, r_array, opt_flags); // NPE?
560
561 // Store of null?
562 LIR* null_value_check = OpCmpImmBranch(kCondEq, r_value, 0, NULL);
563
564 // Get the array's class.
565 LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
566 CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
567 r_array_class, true);
568 // Redo LoadValues in case they didn't survive the call.
569 LoadValueDirectFixed(rl_array, r_array); // Reload array
570 LoadValueDirectFixed(rl_index, r_index); // Reload index
571 LoadValueDirectFixed(rl_src, r_value); // Reload value
572 r_array_class = INVALID_REG;
573
574 // Branch here if value to be stored == null
575 LIR* target = NewLIR0(kPseudoTargetLabel);
576 null_value_check->target = target;
577
578 // make an extra temp available for card mark below
579 FreeTemp(TargetReg(kArg1));
580 if (!(opt_flags & MIR_IGNORE_RANGE_CHECK)) {
581 /* if (rl_index >= [rl_array + len_offset]) goto kThrowArrayBounds */
582 GenRegMemCheck(kCondUge, r_index, r_array, len_offset, kThrowArrayBounds);
583 }
584 StoreBaseIndexedDisp(r_array, r_index, scale,
585 data_offset, r_value, INVALID_REG, kWord, INVALID_SREG);
586 FreeTemp(r_index);
587 if (!mir_graph_->IsConstantNullRef(rl_src)) {
588 MarkGCCard(r_value, r_array);
589 }
590}
591
592void X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
593 RegLocation rl_src1, RegLocation rl_shift)
594{
595 // Default implementation is just to ignore the constant case.
596 GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
597}
598
599void X86Mir2Lir::GenArithImmOpLong(Instruction::Code opcode,
600 RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2)
601{
602 // Default - bail to non-const handler.
603 GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
604}
605
606} // namespace art