blob: 41d921385af1cea25adf10f2ca167d6b6ab242fe [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This file contains codegen for the Thumb2 ISA. */
18
19#include "arm_lir.h"
20#include "codegen_arm.h"
21#include "dex/quick/mir_to_lir-inl.h"
Ian Rogers166db042013-07-26 12:05:57 -070022#include "entrypoints/quick/quick_entrypoints.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070023#include "mirror/array.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070024
25namespace art {
26
Ian Rogersd9c4fc92013-10-01 19:45:43 -070027LIR* ArmMir2Lir::OpCmpBranch(ConditionCode cond, int src1, int src2, LIR* target) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070028 OpRegReg(kOpCmp, src1, src2);
29 return OpCondBranch(cond, target);
30}
31
32/*
33 * Generate a Thumb2 IT instruction, which can nullify up to
34 * four subsequent instructions based on a condition and its
35 * inverse. The condition applies to the first instruction, which
36 * is executed if the condition is met. The string "guide" consists
37 * of 0 to 3 chars, and applies to the 2nd through 4th instruction.
38 * A "T" means the instruction is executed if the condition is
39 * met, and an "E" means the instruction is executed if the condition
40 * is not met.
41 */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070042LIR* ArmMir2Lir::OpIT(ConditionCode ccode, const char* guide) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070043 int mask;
44 int mask3 = 0;
45 int mask2 = 0;
46 int mask1 = 0;
47 ArmConditionCode code = ArmConditionEncoding(ccode);
48 int cond_bit = code & 1;
49 int alt_bit = cond_bit ^ 1;
50
Brian Carlstrom7934ac22013-07-26 10:54:15 -070051 // Note: case fallthroughs intentional
Brian Carlstrom7940e442013-07-12 13:46:57 -070052 switch (strlen(guide)) {
53 case 3:
54 mask1 = (guide[2] == 'T') ? cond_bit : alt_bit;
55 case 2:
56 mask2 = (guide[1] == 'T') ? cond_bit : alt_bit;
57 case 1:
58 mask3 = (guide[0] == 'T') ? cond_bit : alt_bit;
59 break;
60 case 0:
61 break;
62 default:
63 LOG(FATAL) << "OAT: bad case in OpIT";
64 }
65 mask = (mask3 << 3) | (mask2 << 2) | (mask1 << 1) |
66 (1 << (3 - strlen(guide)));
67 return NewLIR2(kThumb2It, code, mask);
68}
69
70/*
71 * 64-bit 3way compare function.
72 * mov rX, #-1
73 * cmp op1hi, op2hi
74 * blt done
75 * bgt flip
76 * sub rX, op1lo, op2lo (treat as unsigned)
77 * beq done
78 * ite hi
79 * mov(hi) rX, #-1
80 * mov(!hi) rX, #1
81 * flip:
82 * neg rX
83 * done:
84 */
85void ArmMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070086 RegLocation rl_src2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070087 LIR* target1;
88 LIR* target2;
89 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
90 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
91 int t_reg = AllocTemp();
92 LoadConstant(t_reg, -1);
93 OpRegReg(kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
94 LIR* branch1 = OpCondBranch(kCondLt, NULL);
95 LIR* branch2 = OpCondBranch(kCondGt, NULL);
96 OpRegRegReg(kOpSub, t_reg, rl_src1.low_reg, rl_src2.low_reg);
97 LIR* branch3 = OpCondBranch(kCondEq, NULL);
98
99 OpIT(kCondHi, "E");
Vladimir Marko332b7aa2013-11-18 12:01:54 +0000100 NewLIR2(kThumb2MovI8M, t_reg, ModifiedImmediate(-1));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700101 LoadConstant(t_reg, 1);
102 GenBarrier();
103
104 target2 = NewLIR0(kPseudoTargetLabel);
105 OpRegReg(kOpNeg, t_reg, t_reg);
106
107 target1 = NewLIR0(kPseudoTargetLabel);
108
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700109 RegLocation rl_temp = LocCReturn(); // Just using as template, will change
Brian Carlstrom7940e442013-07-12 13:46:57 -0700110 rl_temp.low_reg = t_reg;
111 StoreValue(rl_dest, rl_temp);
112 FreeTemp(t_reg);
113
114 branch1->target = target1;
115 branch2->target = target2;
116 branch3->target = branch1->target;
117}
118
119void ArmMir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700120 int64_t val, ConditionCode ccode) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700121 int32_t val_lo = Low32Bits(val);
122 int32_t val_hi = High32Bits(val);
Brian Carlstrom42748892013-07-18 18:04:08 -0700123 DCHECK_GE(ModifiedImmediate(val_lo), 0);
124 DCHECK_GE(ModifiedImmediate(val_hi), 0);
buzbee0d829482013-10-11 15:24:55 -0700125 LIR* taken = &block_label_list_[bb->taken];
126 LIR* not_taken = &block_label_list_[bb->fall_through];
Brian Carlstrom7940e442013-07-12 13:46:57 -0700127 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
128 int32_t low_reg = rl_src1.low_reg;
129 int32_t high_reg = rl_src1.high_reg;
130
Brian Carlstromdf629502013-07-17 22:39:56 -0700131 switch (ccode) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700132 case kCondEq:
133 case kCondNe:
134 LIR* target;
135 ConditionCode condition;
136 if (ccode == kCondEq) {
137 target = not_taken;
138 condition = kCondEq;
139 } else {
140 target = taken;
141 condition = kCondNe;
142 }
143 if (val == 0) {
144 int t_reg = AllocTemp();
145 NewLIR4(kThumb2OrrRRRs, t_reg, low_reg, high_reg, 0);
146 FreeTemp(t_reg);
147 OpCondBranch(condition, taken);
148 return;
149 }
150 OpCmpImmBranch(kCondNe, high_reg, val_hi, target);
151 break;
152 case kCondLt:
153 OpCmpImmBranch(kCondLt, high_reg, val_hi, taken);
154 OpCmpImmBranch(kCondGt, high_reg, val_hi, not_taken);
155 ccode = kCondCc;
156 break;
157 case kCondLe:
158 OpCmpImmBranch(kCondLt, high_reg, val_hi, taken);
159 OpCmpImmBranch(kCondGt, high_reg, val_hi, not_taken);
160 ccode = kCondLs;
161 break;
162 case kCondGt:
163 OpCmpImmBranch(kCondGt, high_reg, val_hi, taken);
164 OpCmpImmBranch(kCondLt, high_reg, val_hi, not_taken);
165 ccode = kCondHi;
166 break;
167 case kCondGe:
168 OpCmpImmBranch(kCondGt, high_reg, val_hi, taken);
169 OpCmpImmBranch(kCondLt, high_reg, val_hi, not_taken);
170 ccode = kCondCs;
171 break;
172 default:
173 LOG(FATAL) << "Unexpected ccode: " << ccode;
174 }
175 OpCmpImmBranch(ccode, low_reg, val_lo, taken);
176}
177
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700178void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700179 RegLocation rl_result;
180 RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700181 RegLocation rl_dest = mir_graph_->GetDest(mir);
182 rl_src = LoadValue(rl_src, kCoreReg);
183 if (mir->ssa_rep->num_uses == 1) {
184 // CONST case
185 int true_val = mir->dalvikInsn.vB;
186 int false_val = mir->dalvikInsn.vC;
187 rl_result = EvalLoc(rl_dest, kCoreReg, true);
188 if ((true_val == 1) && (false_val == 0)) {
189 OpRegRegImm(kOpRsub, rl_result.low_reg, rl_src.low_reg, 1);
190 OpIT(kCondCc, "");
191 LoadConstant(rl_result.low_reg, 0);
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700192 GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
Brian Carlstrom7940e442013-07-12 13:46:57 -0700193 } else if (InexpensiveConstantInt(true_val) && InexpensiveConstantInt(false_val)) {
194 OpRegImm(kOpCmp, rl_src.low_reg, 0);
195 OpIT(kCondEq, "E");
196 LoadConstant(rl_result.low_reg, true_val);
197 LoadConstant(rl_result.low_reg, false_val);
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700198 GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
Brian Carlstrom7940e442013-07-12 13:46:57 -0700199 } else {
200 // Unlikely case - could be tuned.
201 int t_reg1 = AllocTemp();
202 int t_reg2 = AllocTemp();
203 LoadConstant(t_reg1, true_val);
204 LoadConstant(t_reg2, false_val);
205 OpRegImm(kOpCmp, rl_src.low_reg, 0);
206 OpIT(kCondEq, "E");
207 OpRegCopy(rl_result.low_reg, t_reg1);
208 OpRegCopy(rl_result.low_reg, t_reg2);
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700209 GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
Brian Carlstrom7940e442013-07-12 13:46:57 -0700210 }
211 } else {
212 // MOVE case
213 RegLocation rl_true = mir_graph_->reg_location_[mir->ssa_rep->uses[1]];
214 RegLocation rl_false = mir_graph_->reg_location_[mir->ssa_rep->uses[2]];
215 rl_true = LoadValue(rl_true, kCoreReg);
216 rl_false = LoadValue(rl_false, kCoreReg);
217 rl_result = EvalLoc(rl_dest, kCoreReg, true);
218 OpRegImm(kOpCmp, rl_src.low_reg, 0);
buzbee252254b2013-09-08 16:20:53 -0700219 if (rl_result.low_reg == rl_true.low_reg) { // Is the "true" case already in place?
220 OpIT(kCondNe, "");
221 OpRegCopy(rl_result.low_reg, rl_false.low_reg);
222 } else if (rl_result.low_reg == rl_false.low_reg) { // False case in place?
223 OpIT(kCondEq, "");
224 OpRegCopy(rl_result.low_reg, rl_true.low_reg);
225 } else { // Normal - select between the two.
226 OpIT(kCondEq, "E");
227 OpRegCopy(rl_result.low_reg, rl_true.low_reg);
228 OpRegCopy(rl_result.low_reg, rl_false.low_reg);
229 }
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700230 GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
Brian Carlstrom7940e442013-07-12 13:46:57 -0700231 }
232 StoreValue(rl_dest, rl_result);
233}
234
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700235void ArmMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700236 RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
237 RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
238 // Normalize such that if either operand is constant, src2 will be constant.
239 ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
240 if (rl_src1.is_const) {
241 RegLocation rl_temp = rl_src1;
242 rl_src1 = rl_src2;
243 rl_src2 = rl_temp;
244 ccode = FlipComparisonOrder(ccode);
245 }
246 if (rl_src2.is_const) {
247 RegLocation rl_temp = UpdateLocWide(rl_src2);
248 // Do special compare/branch against simple const operand if not already in registers.
249 int64_t val = mir_graph_->ConstantValueWide(rl_src2);
250 if ((rl_temp.location != kLocPhysReg) &&
251 ((ModifiedImmediate(Low32Bits(val)) >= 0) && (ModifiedImmediate(High32Bits(val)) >= 0))) {
252 GenFusedLongCmpImmBranch(bb, rl_src1, val, ccode);
253 return;
254 }
255 }
buzbee0d829482013-10-11 15:24:55 -0700256 LIR* taken = &block_label_list_[bb->taken];
257 LIR* not_taken = &block_label_list_[bb->fall_through];
Brian Carlstrom7940e442013-07-12 13:46:57 -0700258 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
259 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
260 OpRegReg(kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
Brian Carlstromdf629502013-07-17 22:39:56 -0700261 switch (ccode) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700262 case kCondEq:
263 OpCondBranch(kCondNe, not_taken);
264 break;
265 case kCondNe:
266 OpCondBranch(kCondNe, taken);
267 break;
268 case kCondLt:
269 OpCondBranch(kCondLt, taken);
270 OpCondBranch(kCondGt, not_taken);
271 ccode = kCondCc;
272 break;
273 case kCondLe:
274 OpCondBranch(kCondLt, taken);
275 OpCondBranch(kCondGt, not_taken);
276 ccode = kCondLs;
277 break;
278 case kCondGt:
279 OpCondBranch(kCondGt, taken);
280 OpCondBranch(kCondLt, not_taken);
281 ccode = kCondHi;
282 break;
283 case kCondGe:
284 OpCondBranch(kCondGt, taken);
285 OpCondBranch(kCondLt, not_taken);
286 ccode = kCondCs;
287 break;
288 default:
289 LOG(FATAL) << "Unexpected ccode: " << ccode;
290 }
291 OpRegReg(kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
292 OpCondBranch(ccode, taken);
293}
294
295/*
296 * Generate a register comparison to an immediate and branch. Caller
297 * is responsible for setting branch target field.
298 */
299LIR* ArmMir2Lir::OpCmpImmBranch(ConditionCode cond, int reg, int check_value,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700300 LIR* target) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700301 LIR* branch;
302 int mod_imm;
303 ArmConditionCode arm_cond = ArmConditionEncoding(cond);
buzbeeb48819d2013-09-14 16:15:25 -0700304 /*
305 * A common use of OpCmpImmBranch is for null checks, and using the Thumb 16-bit
306 * compare-and-branch if zero is ideal if it will reach. However, because null checks
307 * branch forward to a launch pad, they will frequently not reach - and thus have to
308 * be converted to a long form during assembly (which will trigger another assembly
309 * pass). Here we estimate the branch distance for checks, and if large directly
310 * generate the long form in an attempt to avoid an extra assembly pass.
311 * TODO: consider interspersing launchpads in code following unconditional branches.
312 */
313 bool skip = ((target != NULL) && (target->opcode == kPseudoThrowTarget));
314 skip &= ((cu_->code_item->insns_size_in_code_units_ - current_dalvik_offset_) > 64);
315 if (!skip && (ARM_LOWREG(reg)) && (check_value == 0) &&
Brian Carlstrom7940e442013-07-12 13:46:57 -0700316 ((arm_cond == kArmCondEq) || (arm_cond == kArmCondNe))) {
317 branch = NewLIR2((arm_cond == kArmCondEq) ? kThumb2Cbz : kThumb2Cbnz,
318 reg, 0);
319 } else {
320 mod_imm = ModifiedImmediate(check_value);
321 if (ARM_LOWREG(reg) && ((check_value & 0xff) == check_value)) {
322 NewLIR2(kThumbCmpRI8, reg, check_value);
323 } else if (mod_imm >= 0) {
Vladimir Marko332b7aa2013-11-18 12:01:54 +0000324 NewLIR2(kThumb2CmpRI8M, reg, mod_imm);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700325 } else {
326 int t_reg = AllocTemp();
327 LoadConstant(t_reg, check_value);
328 OpRegReg(kOpCmp, reg, t_reg);
329 }
330 branch = NewLIR2(kThumbBCond, 0, arm_cond);
331 }
332 branch->target = target;
333 return branch;
334}
335
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700336LIR* ArmMir2Lir::OpRegCopyNoInsert(int r_dest, int r_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700337 LIR* res;
338 int opcode;
339 if (ARM_FPREG(r_dest) || ARM_FPREG(r_src))
340 return OpFpRegCopy(r_dest, r_src);
341 if (ARM_LOWREG(r_dest) && ARM_LOWREG(r_src))
342 opcode = kThumbMovRR;
343 else if (!ARM_LOWREG(r_dest) && !ARM_LOWREG(r_src))
344 opcode = kThumbMovRR_H2H;
345 else if (ARM_LOWREG(r_dest))
346 opcode = kThumbMovRR_H2L;
347 else
348 opcode = kThumbMovRR_L2H;
349 res = RawLIR(current_dalvik_offset_, opcode, r_dest, r_src);
350 if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
351 res->flags.is_nop = true;
352 }
353 return res;
354}
355
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700356LIR* ArmMir2Lir::OpRegCopy(int r_dest, int r_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700357 LIR* res = OpRegCopyNoInsert(r_dest, r_src);
358 AppendLIR(res);
359 return res;
360}
361
362void ArmMir2Lir::OpRegCopyWide(int dest_lo, int dest_hi, int src_lo,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700363 int src_hi) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700364 bool dest_fp = ARM_FPREG(dest_lo) && ARM_FPREG(dest_hi);
365 bool src_fp = ARM_FPREG(src_lo) && ARM_FPREG(src_hi);
366 DCHECK_EQ(ARM_FPREG(src_lo), ARM_FPREG(src_hi));
367 DCHECK_EQ(ARM_FPREG(dest_lo), ARM_FPREG(dest_hi));
368 if (dest_fp) {
369 if (src_fp) {
370 OpRegCopy(S2d(dest_lo, dest_hi), S2d(src_lo, src_hi));
371 } else {
372 NewLIR3(kThumb2Fmdrr, S2d(dest_lo, dest_hi), src_lo, src_hi);
373 }
374 } else {
375 if (src_fp) {
376 NewLIR3(kThumb2Fmrrd, dest_lo, dest_hi, S2d(src_lo, src_hi));
377 } else {
378 // Handle overlap
379 if (src_hi == dest_lo) {
380 OpRegCopy(dest_hi, src_hi);
381 OpRegCopy(dest_lo, src_lo);
382 } else {
383 OpRegCopy(dest_lo, src_lo);
384 OpRegCopy(dest_hi, src_hi);
385 }
386 }
387 }
388}
389
390// Table of magic divisors
391struct MagicTable {
392 uint32_t magic;
393 uint32_t shift;
394 DividePattern pattern;
395};
396
397static const MagicTable magic_table[] = {
398 {0, 0, DivideNone}, // 0
399 {0, 0, DivideNone}, // 1
400 {0, 0, DivideNone}, // 2
401 {0x55555556, 0, Divide3}, // 3
402 {0, 0, DivideNone}, // 4
403 {0x66666667, 1, Divide5}, // 5
404 {0x2AAAAAAB, 0, Divide3}, // 6
405 {0x92492493, 2, Divide7}, // 7
406 {0, 0, DivideNone}, // 8
407 {0x38E38E39, 1, Divide5}, // 9
408 {0x66666667, 2, Divide5}, // 10
409 {0x2E8BA2E9, 1, Divide5}, // 11
410 {0x2AAAAAAB, 1, Divide5}, // 12
411 {0x4EC4EC4F, 2, Divide5}, // 13
412 {0x92492493, 3, Divide7}, // 14
413 {0x88888889, 3, Divide7}, // 15
414};
415
416// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
buzbee11b63d12013-08-27 07:34:17 -0700417bool ArmMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700418 RegLocation rl_src, RegLocation rl_dest, int lit) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700419 if ((lit < 0) || (lit >= static_cast<int>(sizeof(magic_table)/sizeof(magic_table[0])))) {
420 return false;
421 }
422 DividePattern pattern = magic_table[lit].pattern;
423 if (pattern == DivideNone) {
424 return false;
425 }
426 // Tuning: add rem patterns
buzbee11b63d12013-08-27 07:34:17 -0700427 if (!is_div) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700428 return false;
429 }
430
431 int r_magic = AllocTemp();
432 LoadConstant(r_magic, magic_table[lit].magic);
433 rl_src = LoadValue(rl_src, kCoreReg);
434 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
435 int r_hi = AllocTemp();
436 int r_lo = AllocTemp();
437 NewLIR4(kThumb2Smull, r_lo, r_hi, r_magic, rl_src.low_reg);
Brian Carlstromdf629502013-07-17 22:39:56 -0700438 switch (pattern) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700439 case Divide3:
440 OpRegRegRegShift(kOpSub, rl_result.low_reg, r_hi,
441 rl_src.low_reg, EncodeShift(kArmAsr, 31));
442 break;
443 case Divide5:
444 OpRegRegImm(kOpAsr, r_lo, rl_src.low_reg, 31);
445 OpRegRegRegShift(kOpRsub, rl_result.low_reg, r_lo, r_hi,
446 EncodeShift(kArmAsr, magic_table[lit].shift));
447 break;
448 case Divide7:
449 OpRegReg(kOpAdd, r_hi, rl_src.low_reg);
450 OpRegRegImm(kOpAsr, r_lo, rl_src.low_reg, 31);
451 OpRegRegRegShift(kOpRsub, rl_result.low_reg, r_lo, r_hi,
452 EncodeShift(kArmAsr, magic_table[lit].shift));
453 break;
454 default:
455 LOG(FATAL) << "Unexpected pattern: " << pattern;
456 }
457 StoreValue(rl_dest, rl_result);
458 return true;
459}
460
461LIR* ArmMir2Lir::GenRegMemCheck(ConditionCode c_code,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700462 int reg1, int base, int offset, ThrowKind kind) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700463 LOG(FATAL) << "Unexpected use of GenRegMemCheck for Arm";
464 return NULL;
465}
466
467RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, int reg1, int lit,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700468 bool is_div) {
Dave Allison70202782013-10-22 17:52:19 -0700469 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
470
471 // Put the literal in a temp.
472 int lit_temp = AllocTemp();
473 LoadConstant(lit_temp, lit);
474 // Use the generic case for div/rem with arg2 in a register.
475 // TODO: The literal temp can be freed earlier during a modulus to reduce reg pressure.
476 rl_result = GenDivRem(rl_result, reg1, lit_temp, is_div);
477 FreeTemp(lit_temp);
478
479 return rl_result;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700480}
481
482RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, int reg1, int reg2,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700483 bool is_div) {
Dave Allison70202782013-10-22 17:52:19 -0700484 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
485 if (is_div) {
486 // Simple case, use sdiv instruction.
487 OpRegRegReg(kOpDiv, rl_result.low_reg, reg1, reg2);
488 } else {
489 // Remainder case, use the following code:
490 // temp = reg1 / reg2 - integer division
491 // temp = temp * reg2
492 // dest = reg1 - temp
493
494 int temp = AllocTemp();
495 OpRegRegReg(kOpDiv, temp, reg1, reg2);
496 OpRegReg(kOpMul, temp, reg2);
497 OpRegRegReg(kOpSub, rl_result.low_reg, reg1, temp);
498 FreeTemp(temp);
499 }
500
501 return rl_result;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700502}
503
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700504bool ArmMir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700505 DCHECK_EQ(cu_->instruction_set, kThumb2);
506 RegLocation rl_src1 = info->args[0];
507 RegLocation rl_src2 = info->args[1];
508 rl_src1 = LoadValue(rl_src1, kCoreReg);
509 rl_src2 = LoadValue(rl_src2, kCoreReg);
510 RegLocation rl_dest = InlineTarget(info);
511 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
512 OpRegReg(kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
513 OpIT((is_min) ? kCondGt : kCondLt, "E");
514 OpRegReg(kOpMov, rl_result.low_reg, rl_src2.low_reg);
515 OpRegReg(kOpMov, rl_result.low_reg, rl_src1.low_reg);
516 GenBarrier();
517 StoreValue(rl_dest, rl_result);
518 return true;
519}
520
Vladimir Markoe508a202013-11-04 15:24:22 +0000521bool ArmMir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
522 RegLocation rl_src_address = info->args[0]; // long address
523 rl_src_address.wide = 0; // ignore high half in info->args[1]
524 RegLocation rl_dest = InlineTarget(info);
525 RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
526 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
527 if (size == kLong) {
528 // Fake unaligned LDRD by two unaligned LDR instructions on ARMv7 with SCTLR.A set to 0.
529 if (rl_address.low_reg != rl_result.low_reg) {
530 LoadBaseDisp(rl_address.low_reg, 0, rl_result.low_reg, kWord, INVALID_SREG);
531 LoadBaseDisp(rl_address.low_reg, 4, rl_result.high_reg, kWord, INVALID_SREG);
532 } else {
533 LoadBaseDisp(rl_address.low_reg, 4, rl_result.high_reg, kWord, INVALID_SREG);
534 LoadBaseDisp(rl_address.low_reg, 0, rl_result.low_reg, kWord, INVALID_SREG);
535 }
536 StoreValueWide(rl_dest, rl_result);
537 } else {
538 DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord);
539 // Unaligned load with LDR and LDRSH is allowed on ARMv7 with SCTLR.A set to 0.
540 LoadBaseDisp(rl_address.low_reg, 0, rl_result.low_reg, size, INVALID_SREG);
541 StoreValue(rl_dest, rl_result);
542 }
543 return true;
544}
545
546bool ArmMir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
547 RegLocation rl_src_address = info->args[0]; // long address
548 rl_src_address.wide = 0; // ignore high half in info->args[1]
549 RegLocation rl_src_value = info->args[2]; // [size] value
550 RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
551 if (size == kLong) {
552 // Fake unaligned STRD by two unaligned STR instructions on ARMv7 with SCTLR.A set to 0.
553 RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg);
554 StoreBaseDisp(rl_address.low_reg, 0, rl_value.low_reg, kWord);
555 StoreBaseDisp(rl_address.low_reg, 4, rl_value.high_reg, kWord);
556 } else {
557 DCHECK(size == kSignedByte || size == kSignedHalf || size == kWord);
558 // Unaligned store with STR and STRSH is allowed on ARMv7 with SCTLR.A set to 0.
559 RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
560 StoreBaseDisp(rl_address.low_reg, 0, rl_value.low_reg, size);
561 }
562 return true;
563}
564
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700565void ArmMir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700566 LOG(FATAL) << "Unexpected use of OpLea for Arm";
567}
568
Ian Rogers468532e2013-08-05 10:56:33 -0700569void ArmMir2Lir::OpTlsCmp(ThreadOffset offset, int val) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700570 LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm";
571}
572
573bool ArmMir2Lir::GenInlinedCas32(CallInfo* info, bool need_write_barrier) {
574 DCHECK_EQ(cu_->instruction_set, kThumb2);
575 // Unused - RegLocation rl_src_unsafe = info->args[0];
576 RegLocation rl_src_obj= info->args[1]; // Object - known non-null
577 RegLocation rl_src_offset= info->args[2]; // long low
578 rl_src_offset.wide = 0; // ignore high half in info->args[3]
579 RegLocation rl_src_expected= info->args[4]; // int or Object
580 RegLocation rl_src_new_value= info->args[5]; // int or Object
581 RegLocation rl_dest = InlineTarget(info); // boolean place for result
582
583
584 // Release store semantics, get the barrier out of the way. TODO: revisit
585 GenMemBarrier(kStoreLoad);
586
587 RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
588 RegLocation rl_new_value = LoadValue(rl_src_new_value, kCoreReg);
589
590 if (need_write_barrier && !mir_graph_->IsConstantNullRef(rl_new_value)) {
591 // Mark card for object assuming new value is stored.
592 MarkGCCard(rl_new_value.low_reg, rl_object.low_reg);
593 }
594
595 RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
596
597 int r_ptr = AllocTemp();
598 OpRegRegReg(kOpAdd, r_ptr, rl_object.low_reg, rl_offset.low_reg);
599
600 // Free now unneeded rl_object and rl_offset to give more temps.
601 ClobberSReg(rl_object.s_reg_low);
602 FreeTemp(rl_object.low_reg);
603 ClobberSReg(rl_offset.s_reg_low);
604 FreeTemp(rl_offset.low_reg);
605
Jeff Hao2de2aa12013-09-12 17:20:31 -0700606 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
607 LoadConstant(rl_result.low_reg, 0); // r_result := 0
Brian Carlstrom7940e442013-07-12 13:46:57 -0700608
Jeff Hao2de2aa12013-09-12 17:20:31 -0700609 // while ([r_ptr] == rExpected && r_result == 0) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700610 // [r_ptr] <- r_new_value && r_result := success ? 0 : 1
611 // r_result ^= 1
Brian Carlstrom7940e442013-07-12 13:46:57 -0700612 // }
Jeff Hao2de2aa12013-09-12 17:20:31 -0700613 int r_old_value = AllocTemp();
614 LIR* target = NewLIR0(kPseudoTargetLabel);
615 NewLIR3(kThumb2Ldrex, r_old_value, r_ptr, 0);
616
617 RegLocation rl_expected = LoadValue(rl_src_expected, kCoreReg);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700618 OpRegReg(kOpCmp, r_old_value, rl_expected.low_reg);
619 FreeTemp(r_old_value); // Now unneeded.
Jeff Hao2de2aa12013-09-12 17:20:31 -0700620 OpIT(kCondEq, "TT");
621 NewLIR4(kThumb2Strex /* eq */, rl_result.low_reg, rl_new_value.low_reg, r_ptr, 0);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700622 FreeTemp(r_ptr); // Now unneeded.
Jeff Hao2de2aa12013-09-12 17:20:31 -0700623 OpRegImm(kOpXor /* eq */, rl_result.low_reg, 1);
624 OpRegImm(kOpCmp /* eq */, rl_result.low_reg, 0);
625 OpCondBranch(kCondEq, target);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700626
627 StoreValue(rl_dest, rl_result);
628
629 return true;
630}
631
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700632LIR* ArmMir2Lir::OpPcRelLoad(int reg, LIR* target) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700633 return RawLIR(current_dalvik_offset_, kThumb2LdrPcRel12, reg, 0, 0, 0, 0, target);
634}
635
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700636LIR* ArmMir2Lir::OpVldm(int rBase, int count) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700637 return NewLIR3(kThumb2Vldms, rBase, fr0, count);
638}
639
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700640LIR* ArmMir2Lir::OpVstm(int rBase, int count) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700641 return NewLIR3(kThumb2Vstms, rBase, fr0, count);
642}
643
644void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
645 RegLocation rl_result, int lit,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700646 int first_bit, int second_bit) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700647 OpRegRegRegShift(kOpAdd, rl_result.low_reg, rl_src.low_reg, rl_src.low_reg,
648 EncodeShift(kArmLsl, second_bit - first_bit));
649 if (first_bit != 0) {
650 OpRegRegImm(kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
651 }
652}
653
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700654void ArmMir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700655 int t_reg = AllocTemp();
656 NewLIR4(kThumb2OrrRRRs, t_reg, reg_lo, reg_hi, 0);
657 FreeTemp(t_reg);
658 GenCheck(kCondEq, kThrowDivZero);
659}
660
661// Test suspend flag, return target of taken suspend branch
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700662LIR* ArmMir2Lir::OpTestSuspend(LIR* target) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700663 NewLIR2(kThumbSubRI8, rARM_SUSPEND, 1);
664 return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target);
665}
666
667// Decrement register and branch on condition
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700668LIR* ArmMir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700669 // Combine sub & test using sub setflags encoding here
670 NewLIR3(kThumb2SubsRRI12, reg, reg, 1);
671 return OpCondBranch(c_code, target);
672}
673
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700674void ArmMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700675#if ANDROID_SMP != 0
676 int dmb_flavor;
677 // TODO: revisit Arm barrier kinds
678 switch (barrier_kind) {
679 case kLoadStore: dmb_flavor = kSY; break;
680 case kLoadLoad: dmb_flavor = kSY; break;
681 case kStoreStore: dmb_flavor = kST; break;
682 case kStoreLoad: dmb_flavor = kSY; break;
683 default:
684 LOG(FATAL) << "Unexpected MemBarrierKind: " << barrier_kind;
685 dmb_flavor = kSY; // quiet gcc.
686 break;
687 }
688 LIR* dmb = NewLIR1(kThumb2Dmb, dmb_flavor);
buzbeeb48819d2013-09-14 16:15:25 -0700689 dmb->u.m.def_mask = ENCODE_ALL;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700690#endif
691}
692
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700693void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700694 rl_src = LoadValueWide(rl_src, kCoreReg);
695 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
696 int z_reg = AllocTemp();
697 LoadConstantNoClobber(z_reg, 0);
698 // Check for destructive overlap
699 if (rl_result.low_reg == rl_src.high_reg) {
700 int t_reg = AllocTemp();
701 OpRegRegReg(kOpSub, rl_result.low_reg, z_reg, rl_src.low_reg);
702 OpRegRegReg(kOpSbc, rl_result.high_reg, z_reg, t_reg);
703 FreeTemp(t_reg);
704 } else {
705 OpRegRegReg(kOpSub, rl_result.low_reg, z_reg, rl_src.low_reg);
706 OpRegRegReg(kOpSbc, rl_result.high_reg, z_reg, rl_src.high_reg);
707 }
708 FreeTemp(z_reg);
709 StoreValueWide(rl_dest, rl_result);
710}
711
712
713 /*
714 * Check to see if a result pair has a misaligned overlap with an operand pair. This
715 * is not usual for dx to generate, but it is legal (for now). In a future rev of
716 * dex, we'll want to make this case illegal.
717 */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700718bool ArmMir2Lir::BadOverlap(RegLocation rl_src, RegLocation rl_dest) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700719 DCHECK(rl_src.wide);
720 DCHECK(rl_dest.wide);
721 return (abs(mir_graph_->SRegToVReg(rl_src.s_reg_low) - mir_graph_->SRegToVReg(rl_dest.s_reg_low)) == 1);
722}
723
724void ArmMir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700725 RegLocation rl_src2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700726 /*
727 * To pull off inline multiply, we have a worst-case requirement of 8 temporary
728 * registers. Normally for Arm, we get 5. We can get to 6 by including
729 * lr in the temp set. The only problematic case is all operands and result are
730 * distinct, and none have been promoted. In that case, we can succeed by aggressively
731 * freeing operand temp registers after they are no longer needed. All other cases
732 * can proceed normally. We'll just punt on the case of the result having a misaligned
733 * overlap with either operand and send that case to a runtime handler.
734 */
735 RegLocation rl_result;
736 if (BadOverlap(rl_src1, rl_dest) || (BadOverlap(rl_src2, rl_dest))) {
Ian Rogers468532e2013-08-05 10:56:33 -0700737 ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700738 FlushAllRegs();
739 CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false);
740 rl_result = GetReturnWide(false);
741 StoreValueWide(rl_dest, rl_result);
742 return;
743 }
744 // Temporarily add LR to the temp pool, and assign it to tmp1
745 MarkTemp(rARM_LR);
746 FreeTemp(rARM_LR);
747 int tmp1 = rARM_LR;
748 LockTemp(rARM_LR);
749
750 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
751 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
752
753 bool special_case = true;
754 // If operands are the same, or any pair has been promoted we're not the special case.
755 if ((rl_src1.s_reg_low == rl_src2.s_reg_low) ||
756 (!IsTemp(rl_src1.low_reg) && !IsTemp(rl_src1.high_reg)) ||
757 (!IsTemp(rl_src2.low_reg) && !IsTemp(rl_src2.high_reg))) {
758 special_case = false;
759 }
760 // Tuning: if rl_dest has been promoted and is *not* either operand, could use directly.
761 int res_lo = AllocTemp();
762 int res_hi;
763 if (rl_src1.low_reg == rl_src2.low_reg) {
764 res_hi = AllocTemp();
765 NewLIR3(kThumb2MulRRR, tmp1, rl_src1.low_reg, rl_src1.high_reg);
766 NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src1.low_reg, rl_src1.low_reg);
767 OpRegRegRegShift(kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1));
768 } else {
769 // In the special case, all temps are now allocated
770 NewLIR3(kThumb2MulRRR, tmp1, rl_src2.low_reg, rl_src1.high_reg);
771 if (special_case) {
772 DCHECK_NE(rl_src1.low_reg, rl_src2.low_reg);
773 DCHECK_NE(rl_src1.high_reg, rl_src2.high_reg);
774 FreeTemp(rl_src1.high_reg);
775 }
776 res_hi = AllocTemp();
777
778 NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src2.low_reg, rl_src1.low_reg);
779 NewLIR4(kThumb2Mla, tmp1, rl_src1.low_reg, rl_src2.high_reg, tmp1);
780 NewLIR4(kThumb2AddRRR, res_hi, tmp1, res_hi, 0);
781 if (special_case) {
782 FreeTemp(rl_src1.low_reg);
783 Clobber(rl_src1.low_reg);
784 Clobber(rl_src1.high_reg);
785 }
786 }
787 FreeTemp(tmp1);
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700788 rl_result = GetReturnWide(false); // Just using as a template.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700789 rl_result.low_reg = res_lo;
790 rl_result.high_reg = res_hi;
791 StoreValueWide(rl_dest, rl_result);
792 // Now, restore lr to its non-temp status.
793 Clobber(rARM_LR);
794 UnmarkTemp(rARM_LR);
795}
796
797void ArmMir2Lir::GenAddLong(RegLocation rl_dest, RegLocation rl_src1,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700798 RegLocation rl_src2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700799 LOG(FATAL) << "Unexpected use of GenAddLong for Arm";
800}
801
802void ArmMir2Lir::GenSubLong(RegLocation rl_dest, RegLocation rl_src1,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700803 RegLocation rl_src2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700804 LOG(FATAL) << "Unexpected use of GenSubLong for Arm";
805}
806
807void ArmMir2Lir::GenAndLong(RegLocation rl_dest, RegLocation rl_src1,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700808 RegLocation rl_src2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700809 LOG(FATAL) << "Unexpected use of GenAndLong for Arm";
810}
811
812void ArmMir2Lir::GenOrLong(RegLocation rl_dest, RegLocation rl_src1,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700813 RegLocation rl_src2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700814 LOG(FATAL) << "Unexpected use of GenOrLong for Arm";
815}
816
817void ArmMir2Lir::GenXorLong(RegLocation rl_dest, RegLocation rl_src1,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700818 RegLocation rl_src2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700819 LOG(FATAL) << "Unexpected use of genXoLong for Arm";
820}
821
822/*
823 * Generate array load
824 */
825void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
Ian Rogersa9a82542013-10-04 11:17:26 -0700826 RegLocation rl_index, RegLocation rl_dest, int scale) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700827 RegisterClass reg_class = oat_reg_class_by_size(size);
828 int len_offset = mirror::Array::LengthOffset().Int32Value();
829 int data_offset;
830 RegLocation rl_result;
831 bool constant_index = rl_index.is_const;
832 rl_array = LoadValue(rl_array, kCoreReg);
833 if (!constant_index) {
834 rl_index = LoadValue(rl_index, kCoreReg);
835 }
836
837 if (rl_dest.wide) {
838 data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
839 } else {
840 data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
841 }
842
843 // If index is constant, just fold it into the data offset
844 if (constant_index) {
845 data_offset += mir_graph_->ConstantValue(rl_index) << scale;
846 }
847
848 /* null object? */
849 GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
850
851 bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
852 int reg_len = INVALID_REG;
853 if (needs_range_check) {
854 reg_len = AllocTemp();
855 /* Get len */
856 LoadWordDisp(rl_array.low_reg, len_offset, reg_len);
857 }
858 if (rl_dest.wide || rl_dest.fp || constant_index) {
859 int reg_ptr;
860 if (constant_index) {
861 reg_ptr = rl_array.low_reg; // NOTE: must not alter reg_ptr in constant case.
862 } else {
863 // No special indexed operation, lea + load w/ displacement
864 reg_ptr = AllocTemp();
865 OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.low_reg, rl_index.low_reg,
866 EncodeShift(kArmLsl, scale));
867 FreeTemp(rl_index.low_reg);
868 }
869 rl_result = EvalLoc(rl_dest, reg_class, true);
870
871 if (needs_range_check) {
872 if (constant_index) {
873 GenImmedCheck(kCondLs, reg_len, mir_graph_->ConstantValue(rl_index), kThrowConstantArrayBounds);
874 } else {
875 GenRegRegCheck(kCondLs, reg_len, rl_index.low_reg, kThrowArrayBounds);
876 }
877 FreeTemp(reg_len);
878 }
879 if (rl_dest.wide) {
880 LoadBaseDispWide(reg_ptr, data_offset, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
881 if (!constant_index) {
882 FreeTemp(reg_ptr);
883 }
884 StoreValueWide(rl_dest, rl_result);
885 } else {
886 LoadBaseDisp(reg_ptr, data_offset, rl_result.low_reg, size, INVALID_SREG);
887 if (!constant_index) {
888 FreeTemp(reg_ptr);
889 }
890 StoreValue(rl_dest, rl_result);
891 }
892 } else {
893 // Offset base, then use indexed load
894 int reg_ptr = AllocTemp();
895 OpRegRegImm(kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
896 FreeTemp(rl_array.low_reg);
897 rl_result = EvalLoc(rl_dest, reg_class, true);
898
899 if (needs_range_check) {
900 // TODO: change kCondCS to a more meaningful name, is the sense of
901 // carry-set/clear flipped?
902 GenRegRegCheck(kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
903 FreeTemp(reg_len);
904 }
905 LoadBaseIndexed(reg_ptr, rl_index.low_reg, rl_result.low_reg, scale, size);
906 FreeTemp(reg_ptr);
907 StoreValue(rl_dest, rl_result);
908 }
909}
910
911/*
912 * Generate array store
913 *
914 */
915void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
Ian Rogersa9a82542013-10-04 11:17:26 -0700916 RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700917 RegisterClass reg_class = oat_reg_class_by_size(size);
918 int len_offset = mirror::Array::LengthOffset().Int32Value();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700919 bool constant_index = rl_index.is_const;
920
Ian Rogersa9a82542013-10-04 11:17:26 -0700921 int data_offset;
922 if (size == kLong || size == kDouble) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700923 data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
924 } else {
925 data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
926 }
927
928 // If index is constant, just fold it into the data offset.
929 if (constant_index) {
930 data_offset += mir_graph_->ConstantValue(rl_index) << scale;
931 }
932
933 rl_array = LoadValue(rl_array, kCoreReg);
934 if (!constant_index) {
935 rl_index = LoadValue(rl_index, kCoreReg);
936 }
937
938 int reg_ptr;
Ian Rogers773aab12013-10-14 13:50:10 -0700939 bool allocated_reg_ptr_temp = false;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700940 if (constant_index) {
941 reg_ptr = rl_array.low_reg;
Ian Rogers379067c2013-10-15 15:06:58 -0700942 } else if (IsTemp(rl_array.low_reg) && !card_mark) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700943 Clobber(rl_array.low_reg);
944 reg_ptr = rl_array.low_reg;
945 } else {
Ian Rogers773aab12013-10-14 13:50:10 -0700946 allocated_reg_ptr_temp = true;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700947 reg_ptr = AllocTemp();
948 }
949
950 /* null object? */
951 GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
952
953 bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
954 int reg_len = INVALID_REG;
955 if (needs_range_check) {
956 reg_len = AllocTemp();
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700957 // NOTE: max live temps(4) here.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700958 /* Get len */
959 LoadWordDisp(rl_array.low_reg, len_offset, reg_len);
960 }
961 /* at this point, reg_ptr points to array, 2 live temps */
962 if (rl_src.wide || rl_src.fp || constant_index) {
963 if (rl_src.wide) {
964 rl_src = LoadValueWide(rl_src, reg_class);
965 } else {
966 rl_src = LoadValue(rl_src, reg_class);
967 }
968 if (!constant_index) {
969 OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.low_reg, rl_index.low_reg,
970 EncodeShift(kArmLsl, scale));
971 }
972 if (needs_range_check) {
973 if (constant_index) {
974 GenImmedCheck(kCondLs, reg_len, mir_graph_->ConstantValue(rl_index), kThrowConstantArrayBounds);
975 } else {
976 GenRegRegCheck(kCondLs, reg_len, rl_index.low_reg, kThrowArrayBounds);
977 }
978 FreeTemp(reg_len);
979 }
980
981 if (rl_src.wide) {
982 StoreBaseDispWide(reg_ptr, data_offset, rl_src.low_reg, rl_src.high_reg);
983 } else {
984 StoreBaseDisp(reg_ptr, data_offset, rl_src.low_reg, size);
985 }
986 } else {
987 /* reg_ptr -> array data */
988 OpRegRegImm(kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
989 rl_src = LoadValue(rl_src, reg_class);
990 if (needs_range_check) {
991 GenRegRegCheck(kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
992 FreeTemp(reg_len);
993 }
994 StoreBaseIndexed(reg_ptr, rl_index.low_reg, rl_src.low_reg,
995 scale, size);
996 }
Ian Rogers773aab12013-10-14 13:50:10 -0700997 if (allocated_reg_ptr_temp) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700998 FreeTemp(reg_ptr);
999 }
Ian Rogersa9a82542013-10-04 11:17:26 -07001000 if (card_mark) {
1001 MarkGCCard(rl_src.low_reg, rl_array.low_reg);
Brian Carlstrom7940e442013-07-12 13:46:57 -07001002 }
1003}
1004
Ian Rogersa9a82542013-10-04 11:17:26 -07001005
Brian Carlstrom7940e442013-07-12 13:46:57 -07001006void ArmMir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001007 RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001008 rl_src = LoadValueWide(rl_src, kCoreReg);
1009 // Per spec, we only care about low 6 bits of shift amount.
1010 int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
1011 if (shift_amount == 0) {
1012 StoreValueWide(rl_dest, rl_src);
1013 return;
1014 }
1015 if (BadOverlap(rl_src, rl_dest)) {
1016 GenShiftOpLong(opcode, rl_dest, rl_src, rl_shift);
1017 return;
1018 }
1019 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
Brian Carlstromdf629502013-07-17 22:39:56 -07001020 switch (opcode) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001021 case Instruction::SHL_LONG:
1022 case Instruction::SHL_LONG_2ADDR:
1023 if (shift_amount == 1) {
1024 OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, rl_src.low_reg);
1025 OpRegRegReg(kOpAdc, rl_result.high_reg, rl_src.high_reg, rl_src.high_reg);
1026 } else if (shift_amount == 32) {
1027 OpRegCopy(rl_result.high_reg, rl_src.low_reg);
1028 LoadConstant(rl_result.low_reg, 0);
1029 } else if (shift_amount > 31) {
1030 OpRegRegImm(kOpLsl, rl_result.high_reg, rl_src.low_reg, shift_amount - 32);
1031 LoadConstant(rl_result.low_reg, 0);
1032 } else {
1033 OpRegRegImm(kOpLsl, rl_result.high_reg, rl_src.high_reg, shift_amount);
1034 OpRegRegRegShift(kOpOr, rl_result.high_reg, rl_result.high_reg, rl_src.low_reg,
1035 EncodeShift(kArmLsr, 32 - shift_amount));
1036 OpRegRegImm(kOpLsl, rl_result.low_reg, rl_src.low_reg, shift_amount);
1037 }
1038 break;
1039 case Instruction::SHR_LONG:
1040 case Instruction::SHR_LONG_2ADDR:
1041 if (shift_amount == 32) {
1042 OpRegCopy(rl_result.low_reg, rl_src.high_reg);
1043 OpRegRegImm(kOpAsr, rl_result.high_reg, rl_src.high_reg, 31);
1044 } else if (shift_amount > 31) {
1045 OpRegRegImm(kOpAsr, rl_result.low_reg, rl_src.high_reg, shift_amount - 32);
1046 OpRegRegImm(kOpAsr, rl_result.high_reg, rl_src.high_reg, 31);
1047 } else {
1048 int t_reg = AllocTemp();
1049 OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, shift_amount);
1050 OpRegRegRegShift(kOpOr, rl_result.low_reg, t_reg, rl_src.high_reg,
1051 EncodeShift(kArmLsl, 32 - shift_amount));
1052 FreeTemp(t_reg);
1053 OpRegRegImm(kOpAsr, rl_result.high_reg, rl_src.high_reg, shift_amount);
1054 }
1055 break;
1056 case Instruction::USHR_LONG:
1057 case Instruction::USHR_LONG_2ADDR:
1058 if (shift_amount == 32) {
1059 OpRegCopy(rl_result.low_reg, rl_src.high_reg);
1060 LoadConstant(rl_result.high_reg, 0);
1061 } else if (shift_amount > 31) {
1062 OpRegRegImm(kOpLsr, rl_result.low_reg, rl_src.high_reg, shift_amount - 32);
1063 LoadConstant(rl_result.high_reg, 0);
1064 } else {
1065 int t_reg = AllocTemp();
1066 OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, shift_amount);
1067 OpRegRegRegShift(kOpOr, rl_result.low_reg, t_reg, rl_src.high_reg,
1068 EncodeShift(kArmLsl, 32 - shift_amount));
1069 FreeTemp(t_reg);
1070 OpRegRegImm(kOpLsr, rl_result.high_reg, rl_src.high_reg, shift_amount);
1071 }
1072 break;
1073 default:
1074 LOG(FATAL) << "Unexpected case";
1075 }
1076 StoreValueWide(rl_dest, rl_result);
1077}
1078
1079void ArmMir2Lir::GenArithImmOpLong(Instruction::Code opcode,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001080 RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001081 if ((opcode == Instruction::SUB_LONG_2ADDR) || (opcode == Instruction::SUB_LONG)) {
1082 if (!rl_src2.is_const) {
1083 // Don't bother with special handling for subtract from immediate.
1084 GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
1085 return;
1086 }
1087 } else {
1088 // Normalize
1089 if (!rl_src2.is_const) {
1090 DCHECK(rl_src1.is_const);
1091 RegLocation rl_temp = rl_src1;
1092 rl_src1 = rl_src2;
1093 rl_src2 = rl_temp;
1094 }
1095 }
1096 if (BadOverlap(rl_src1, rl_dest)) {
1097 GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
1098 return;
1099 }
1100 DCHECK(rl_src2.is_const);
1101 int64_t val = mir_graph_->ConstantValueWide(rl_src2);
1102 uint32_t val_lo = Low32Bits(val);
1103 uint32_t val_hi = High32Bits(val);
1104 int32_t mod_imm_lo = ModifiedImmediate(val_lo);
1105 int32_t mod_imm_hi = ModifiedImmediate(val_hi);
1106
1107 // Only a subset of add/sub immediate instructions set carry - so bail if we don't fit
Brian Carlstromdf629502013-07-17 22:39:56 -07001108 switch (opcode) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001109 case Instruction::ADD_LONG:
1110 case Instruction::ADD_LONG_2ADDR:
1111 case Instruction::SUB_LONG:
1112 case Instruction::SUB_LONG_2ADDR:
1113 if ((mod_imm_lo < 0) || (mod_imm_hi < 0)) {
1114 GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
1115 return;
1116 }
1117 break;
1118 default:
1119 break;
1120 }
1121 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1122 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1123 // NOTE: once we've done the EvalLoc on dest, we can no longer bail.
1124 switch (opcode) {
1125 case Instruction::ADD_LONG:
1126 case Instruction::ADD_LONG_2ADDR:
Vladimir Marko332b7aa2013-11-18 12:01:54 +00001127 NewLIR3(kThumb2AddRRI8M, rl_result.low_reg, rl_src1.low_reg, mod_imm_lo);
1128 NewLIR3(kThumb2AdcRRI8M, rl_result.high_reg, rl_src1.high_reg, mod_imm_hi);
Brian Carlstrom7940e442013-07-12 13:46:57 -07001129 break;
1130 case Instruction::OR_LONG:
1131 case Instruction::OR_LONG_2ADDR:
1132 if ((val_lo != 0) || (rl_result.low_reg != rl_src1.low_reg)) {
1133 OpRegRegImm(kOpOr, rl_result.low_reg, rl_src1.low_reg, val_lo);
1134 }
1135 if ((val_hi != 0) || (rl_result.high_reg != rl_src1.high_reg)) {
1136 OpRegRegImm(kOpOr, rl_result.high_reg, rl_src1.high_reg, val_hi);
1137 }
1138 break;
1139 case Instruction::XOR_LONG:
1140 case Instruction::XOR_LONG_2ADDR:
1141 OpRegRegImm(kOpXor, rl_result.low_reg, rl_src1.low_reg, val_lo);
1142 OpRegRegImm(kOpXor, rl_result.high_reg, rl_src1.high_reg, val_hi);
1143 break;
1144 case Instruction::AND_LONG:
1145 case Instruction::AND_LONG_2ADDR:
1146 if ((val_lo != 0xffffffff) || (rl_result.low_reg != rl_src1.low_reg)) {
1147 OpRegRegImm(kOpAnd, rl_result.low_reg, rl_src1.low_reg, val_lo);
1148 }
1149 if ((val_hi != 0xffffffff) || (rl_result.high_reg != rl_src1.high_reg)) {
1150 OpRegRegImm(kOpAnd, rl_result.high_reg, rl_src1.high_reg, val_hi);
1151 }
1152 break;
1153 case Instruction::SUB_LONG_2ADDR:
1154 case Instruction::SUB_LONG:
Vladimir Marko332b7aa2013-11-18 12:01:54 +00001155 NewLIR3(kThumb2SubRRI8M, rl_result.low_reg, rl_src1.low_reg, mod_imm_lo);
1156 NewLIR3(kThumb2SbcRRI8M, rl_result.high_reg, rl_src1.high_reg, mod_imm_hi);
Brian Carlstrom7940e442013-07-12 13:46:57 -07001157 break;
1158 default:
1159 LOG(FATAL) << "Unexpected opcode " << opcode;
1160 }
1161 StoreValueWide(rl_dest, rl_result);
1162}
1163
1164} // namespace art