blob: 07782d957f92da746c18060b5cfcb93d7cbcea44 [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This file contains codegen for the Thumb2 ISA. */
18
19#include "arm_lir.h"
20#include "codegen_arm.h"
21#include "dex/quick/mir_to_lir-inl.h"
Ian Rogers166db042013-07-26 12:05:57 -070022#include "entrypoints/quick/quick_entrypoints.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070023#include "mirror/array.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070024
25namespace art {
26
27LIR* ArmMir2Lir::OpCmpBranch(ConditionCode cond, int src1,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070028 int src2, LIR* target) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070029 OpRegReg(kOpCmp, src1, src2);
30 return OpCondBranch(cond, target);
31}
32
33/*
34 * Generate a Thumb2 IT instruction, which can nullify up to
35 * four subsequent instructions based on a condition and its
36 * inverse. The condition applies to the first instruction, which
37 * is executed if the condition is met. The string "guide" consists
38 * of 0 to 3 chars, and applies to the 2nd through 4th instruction.
39 * A "T" means the instruction is executed if the condition is
40 * met, and an "E" means the instruction is executed if the condition
41 * is not met.
42 */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070043LIR* ArmMir2Lir::OpIT(ConditionCode ccode, const char* guide) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070044 int mask;
45 int mask3 = 0;
46 int mask2 = 0;
47 int mask1 = 0;
48 ArmConditionCode code = ArmConditionEncoding(ccode);
49 int cond_bit = code & 1;
50 int alt_bit = cond_bit ^ 1;
51
Brian Carlstrom7934ac22013-07-26 10:54:15 -070052 // Note: case fallthroughs intentional
Brian Carlstrom7940e442013-07-12 13:46:57 -070053 switch (strlen(guide)) {
54 case 3:
55 mask1 = (guide[2] == 'T') ? cond_bit : alt_bit;
56 case 2:
57 mask2 = (guide[1] == 'T') ? cond_bit : alt_bit;
58 case 1:
59 mask3 = (guide[0] == 'T') ? cond_bit : alt_bit;
60 break;
61 case 0:
62 break;
63 default:
64 LOG(FATAL) << "OAT: bad case in OpIT";
65 }
66 mask = (mask3 << 3) | (mask2 << 2) | (mask1 << 1) |
67 (1 << (3 - strlen(guide)));
68 return NewLIR2(kThumb2It, code, mask);
69}
70
71/*
72 * 64-bit 3way compare function.
73 * mov rX, #-1
74 * cmp op1hi, op2hi
75 * blt done
76 * bgt flip
77 * sub rX, op1lo, op2lo (treat as unsigned)
78 * beq done
79 * ite hi
80 * mov(hi) rX, #-1
81 * mov(!hi) rX, #1
82 * flip:
83 * neg rX
84 * done:
85 */
86void ArmMir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070087 RegLocation rl_src2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070088 LIR* target1;
89 LIR* target2;
90 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
91 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
92 int t_reg = AllocTemp();
93 LoadConstant(t_reg, -1);
94 OpRegReg(kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
95 LIR* branch1 = OpCondBranch(kCondLt, NULL);
96 LIR* branch2 = OpCondBranch(kCondGt, NULL);
97 OpRegRegReg(kOpSub, t_reg, rl_src1.low_reg, rl_src2.low_reg);
98 LIR* branch3 = OpCondBranch(kCondEq, NULL);
99
100 OpIT(kCondHi, "E");
101 NewLIR2(kThumb2MovImmShift, t_reg, ModifiedImmediate(-1));
102 LoadConstant(t_reg, 1);
103 GenBarrier();
104
105 target2 = NewLIR0(kPseudoTargetLabel);
106 OpRegReg(kOpNeg, t_reg, t_reg);
107
108 target1 = NewLIR0(kPseudoTargetLabel);
109
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700110 RegLocation rl_temp = LocCReturn(); // Just using as template, will change
Brian Carlstrom7940e442013-07-12 13:46:57 -0700111 rl_temp.low_reg = t_reg;
112 StoreValue(rl_dest, rl_temp);
113 FreeTemp(t_reg);
114
115 branch1->target = target1;
116 branch2->target = target2;
117 branch3->target = branch1->target;
118}
119
120void ArmMir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700121 int64_t val, ConditionCode ccode) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700122 int32_t val_lo = Low32Bits(val);
123 int32_t val_hi = High32Bits(val);
Brian Carlstrom42748892013-07-18 18:04:08 -0700124 DCHECK_GE(ModifiedImmediate(val_lo), 0);
125 DCHECK_GE(ModifiedImmediate(val_hi), 0);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700126 LIR* taken = &block_label_list_[bb->taken->id];
127 LIR* not_taken = &block_label_list_[bb->fall_through->id];
128 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
129 int32_t low_reg = rl_src1.low_reg;
130 int32_t high_reg = rl_src1.high_reg;
131
Brian Carlstromdf629502013-07-17 22:39:56 -0700132 switch (ccode) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700133 case kCondEq:
134 case kCondNe:
135 LIR* target;
136 ConditionCode condition;
137 if (ccode == kCondEq) {
138 target = not_taken;
139 condition = kCondEq;
140 } else {
141 target = taken;
142 condition = kCondNe;
143 }
144 if (val == 0) {
145 int t_reg = AllocTemp();
146 NewLIR4(kThumb2OrrRRRs, t_reg, low_reg, high_reg, 0);
147 FreeTemp(t_reg);
148 OpCondBranch(condition, taken);
149 return;
150 }
151 OpCmpImmBranch(kCondNe, high_reg, val_hi, target);
152 break;
153 case kCondLt:
154 OpCmpImmBranch(kCondLt, high_reg, val_hi, taken);
155 OpCmpImmBranch(kCondGt, high_reg, val_hi, not_taken);
156 ccode = kCondCc;
157 break;
158 case kCondLe:
159 OpCmpImmBranch(kCondLt, high_reg, val_hi, taken);
160 OpCmpImmBranch(kCondGt, high_reg, val_hi, not_taken);
161 ccode = kCondLs;
162 break;
163 case kCondGt:
164 OpCmpImmBranch(kCondGt, high_reg, val_hi, taken);
165 OpCmpImmBranch(kCondLt, high_reg, val_hi, not_taken);
166 ccode = kCondHi;
167 break;
168 case kCondGe:
169 OpCmpImmBranch(kCondGt, high_reg, val_hi, taken);
170 OpCmpImmBranch(kCondLt, high_reg, val_hi, not_taken);
171 ccode = kCondCs;
172 break;
173 default:
174 LOG(FATAL) << "Unexpected ccode: " << ccode;
175 }
176 OpCmpImmBranch(ccode, low_reg, val_lo, taken);
177}
178
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700179void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700180 RegLocation rl_result;
181 RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
182 // Temporary debugging code
183 int dest_sreg = mir->ssa_rep->defs[0];
184 if ((dest_sreg < 0) || (dest_sreg >= mir_graph_->GetNumSSARegs())) {
185 LOG(INFO) << "Bad target sreg: " << dest_sreg << ", in "
Brian Carlstromb1eba212013-07-17 18:07:19 -0700186 << PrettyMethod(cu_->method_idx, *cu_->dex_file);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700187 LOG(INFO) << "at dex offset 0x" << std::hex << mir->offset;
188 LOG(INFO) << "vreg = " << mir_graph_->SRegToVReg(dest_sreg);
189 LOG(INFO) << "num uses = " << mir->ssa_rep->num_uses;
190 if (mir->ssa_rep->num_uses == 1) {
191 LOG(INFO) << "CONST case, vals = " << mir->dalvikInsn.vB << ", " << mir->dalvikInsn.vC;
192 } else {
193 LOG(INFO) << "MOVE case, operands = " << mir->ssa_rep->uses[1] << ", "
194 << mir->ssa_rep->uses[2];
195 }
196 CHECK(false) << "Invalid target sreg on Select.";
197 }
198 // End temporary debugging code
199 RegLocation rl_dest = mir_graph_->GetDest(mir);
200 rl_src = LoadValue(rl_src, kCoreReg);
201 if (mir->ssa_rep->num_uses == 1) {
202 // CONST case
203 int true_val = mir->dalvikInsn.vB;
204 int false_val = mir->dalvikInsn.vC;
205 rl_result = EvalLoc(rl_dest, kCoreReg, true);
206 if ((true_val == 1) && (false_val == 0)) {
207 OpRegRegImm(kOpRsub, rl_result.low_reg, rl_src.low_reg, 1);
208 OpIT(kCondCc, "");
209 LoadConstant(rl_result.low_reg, 0);
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700210 GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
Brian Carlstrom7940e442013-07-12 13:46:57 -0700211 } else if (InexpensiveConstantInt(true_val) && InexpensiveConstantInt(false_val)) {
212 OpRegImm(kOpCmp, rl_src.low_reg, 0);
213 OpIT(kCondEq, "E");
214 LoadConstant(rl_result.low_reg, true_val);
215 LoadConstant(rl_result.low_reg, false_val);
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700216 GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
Brian Carlstrom7940e442013-07-12 13:46:57 -0700217 } else {
218 // Unlikely case - could be tuned.
219 int t_reg1 = AllocTemp();
220 int t_reg2 = AllocTemp();
221 LoadConstant(t_reg1, true_val);
222 LoadConstant(t_reg2, false_val);
223 OpRegImm(kOpCmp, rl_src.low_reg, 0);
224 OpIT(kCondEq, "E");
225 OpRegCopy(rl_result.low_reg, t_reg1);
226 OpRegCopy(rl_result.low_reg, t_reg2);
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700227 GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
Brian Carlstrom7940e442013-07-12 13:46:57 -0700228 }
229 } else {
230 // MOVE case
231 RegLocation rl_true = mir_graph_->reg_location_[mir->ssa_rep->uses[1]];
232 RegLocation rl_false = mir_graph_->reg_location_[mir->ssa_rep->uses[2]];
233 rl_true = LoadValue(rl_true, kCoreReg);
234 rl_false = LoadValue(rl_false, kCoreReg);
235 rl_result = EvalLoc(rl_dest, kCoreReg, true);
236 OpRegImm(kOpCmp, rl_src.low_reg, 0);
buzbee252254b2013-09-08 16:20:53 -0700237 if (rl_result.low_reg == rl_true.low_reg) { // Is the "true" case already in place?
238 OpIT(kCondNe, "");
239 OpRegCopy(rl_result.low_reg, rl_false.low_reg);
240 } else if (rl_result.low_reg == rl_false.low_reg) { // False case in place?
241 OpIT(kCondEq, "");
242 OpRegCopy(rl_result.low_reg, rl_true.low_reg);
243 } else { // Normal - select between the two.
244 OpIT(kCondEq, "E");
245 OpRegCopy(rl_result.low_reg, rl_true.low_reg);
246 OpRegCopy(rl_result.low_reg, rl_false.low_reg);
247 }
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700248 GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
Brian Carlstrom7940e442013-07-12 13:46:57 -0700249 }
250 StoreValue(rl_dest, rl_result);
251}
252
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700253void ArmMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700254 RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
255 RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
256 // Normalize such that if either operand is constant, src2 will be constant.
257 ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
258 if (rl_src1.is_const) {
259 RegLocation rl_temp = rl_src1;
260 rl_src1 = rl_src2;
261 rl_src2 = rl_temp;
262 ccode = FlipComparisonOrder(ccode);
263 }
264 if (rl_src2.is_const) {
265 RegLocation rl_temp = UpdateLocWide(rl_src2);
266 // Do special compare/branch against simple const operand if not already in registers.
267 int64_t val = mir_graph_->ConstantValueWide(rl_src2);
268 if ((rl_temp.location != kLocPhysReg) &&
269 ((ModifiedImmediate(Low32Bits(val)) >= 0) && (ModifiedImmediate(High32Bits(val)) >= 0))) {
270 GenFusedLongCmpImmBranch(bb, rl_src1, val, ccode);
271 return;
272 }
273 }
274 LIR* taken = &block_label_list_[bb->taken->id];
275 LIR* not_taken = &block_label_list_[bb->fall_through->id];
276 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
277 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
278 OpRegReg(kOpCmp, rl_src1.high_reg, rl_src2.high_reg);
Brian Carlstromdf629502013-07-17 22:39:56 -0700279 switch (ccode) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700280 case kCondEq:
281 OpCondBranch(kCondNe, not_taken);
282 break;
283 case kCondNe:
284 OpCondBranch(kCondNe, taken);
285 break;
286 case kCondLt:
287 OpCondBranch(kCondLt, taken);
288 OpCondBranch(kCondGt, not_taken);
289 ccode = kCondCc;
290 break;
291 case kCondLe:
292 OpCondBranch(kCondLt, taken);
293 OpCondBranch(kCondGt, not_taken);
294 ccode = kCondLs;
295 break;
296 case kCondGt:
297 OpCondBranch(kCondGt, taken);
298 OpCondBranch(kCondLt, not_taken);
299 ccode = kCondHi;
300 break;
301 case kCondGe:
302 OpCondBranch(kCondGt, taken);
303 OpCondBranch(kCondLt, not_taken);
304 ccode = kCondCs;
305 break;
306 default:
307 LOG(FATAL) << "Unexpected ccode: " << ccode;
308 }
309 OpRegReg(kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
310 OpCondBranch(ccode, taken);
311}
312
313/*
314 * Generate a register comparison to an immediate and branch. Caller
315 * is responsible for setting branch target field.
316 */
317LIR* ArmMir2Lir::OpCmpImmBranch(ConditionCode cond, int reg, int check_value,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700318 LIR* target) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700319 LIR* branch;
320 int mod_imm;
321 ArmConditionCode arm_cond = ArmConditionEncoding(cond);
322 if ((ARM_LOWREG(reg)) && (check_value == 0) &&
323 ((arm_cond == kArmCondEq) || (arm_cond == kArmCondNe))) {
324 branch = NewLIR2((arm_cond == kArmCondEq) ? kThumb2Cbz : kThumb2Cbnz,
325 reg, 0);
326 } else {
327 mod_imm = ModifiedImmediate(check_value);
328 if (ARM_LOWREG(reg) && ((check_value & 0xff) == check_value)) {
329 NewLIR2(kThumbCmpRI8, reg, check_value);
330 } else if (mod_imm >= 0) {
331 NewLIR2(kThumb2CmpRI12, reg, mod_imm);
332 } else {
333 int t_reg = AllocTemp();
334 LoadConstant(t_reg, check_value);
335 OpRegReg(kOpCmp, reg, t_reg);
336 }
337 branch = NewLIR2(kThumbBCond, 0, arm_cond);
338 }
339 branch->target = target;
340 return branch;
341}
342
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700343LIR* ArmMir2Lir::OpRegCopyNoInsert(int r_dest, int r_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700344 LIR* res;
345 int opcode;
346 if (ARM_FPREG(r_dest) || ARM_FPREG(r_src))
347 return OpFpRegCopy(r_dest, r_src);
348 if (ARM_LOWREG(r_dest) && ARM_LOWREG(r_src))
349 opcode = kThumbMovRR;
350 else if (!ARM_LOWREG(r_dest) && !ARM_LOWREG(r_src))
351 opcode = kThumbMovRR_H2H;
352 else if (ARM_LOWREG(r_dest))
353 opcode = kThumbMovRR_H2L;
354 else
355 opcode = kThumbMovRR_L2H;
356 res = RawLIR(current_dalvik_offset_, opcode, r_dest, r_src);
357 if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
358 res->flags.is_nop = true;
359 }
360 return res;
361}
362
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700363LIR* ArmMir2Lir::OpRegCopy(int r_dest, int r_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700364 LIR* res = OpRegCopyNoInsert(r_dest, r_src);
365 AppendLIR(res);
366 return res;
367}
368
369void ArmMir2Lir::OpRegCopyWide(int dest_lo, int dest_hi, int src_lo,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700370 int src_hi) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700371 bool dest_fp = ARM_FPREG(dest_lo) && ARM_FPREG(dest_hi);
372 bool src_fp = ARM_FPREG(src_lo) && ARM_FPREG(src_hi);
373 DCHECK_EQ(ARM_FPREG(src_lo), ARM_FPREG(src_hi));
374 DCHECK_EQ(ARM_FPREG(dest_lo), ARM_FPREG(dest_hi));
375 if (dest_fp) {
376 if (src_fp) {
377 OpRegCopy(S2d(dest_lo, dest_hi), S2d(src_lo, src_hi));
378 } else {
379 NewLIR3(kThumb2Fmdrr, S2d(dest_lo, dest_hi), src_lo, src_hi);
380 }
381 } else {
382 if (src_fp) {
383 NewLIR3(kThumb2Fmrrd, dest_lo, dest_hi, S2d(src_lo, src_hi));
384 } else {
385 // Handle overlap
386 if (src_hi == dest_lo) {
387 OpRegCopy(dest_hi, src_hi);
388 OpRegCopy(dest_lo, src_lo);
389 } else {
390 OpRegCopy(dest_lo, src_lo);
391 OpRegCopy(dest_hi, src_hi);
392 }
393 }
394 }
395}
396
397// Table of magic divisors
398struct MagicTable {
399 uint32_t magic;
400 uint32_t shift;
401 DividePattern pattern;
402};
403
404static const MagicTable magic_table[] = {
405 {0, 0, DivideNone}, // 0
406 {0, 0, DivideNone}, // 1
407 {0, 0, DivideNone}, // 2
408 {0x55555556, 0, Divide3}, // 3
409 {0, 0, DivideNone}, // 4
410 {0x66666667, 1, Divide5}, // 5
411 {0x2AAAAAAB, 0, Divide3}, // 6
412 {0x92492493, 2, Divide7}, // 7
413 {0, 0, DivideNone}, // 8
414 {0x38E38E39, 1, Divide5}, // 9
415 {0x66666667, 2, Divide5}, // 10
416 {0x2E8BA2E9, 1, Divide5}, // 11
417 {0x2AAAAAAB, 1, Divide5}, // 12
418 {0x4EC4EC4F, 2, Divide5}, // 13
419 {0x92492493, 3, Divide7}, // 14
420 {0x88888889, 3, Divide7}, // 15
421};
422
423// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
buzbee11b63d12013-08-27 07:34:17 -0700424bool ArmMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700425 RegLocation rl_src, RegLocation rl_dest, int lit) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700426 if ((lit < 0) || (lit >= static_cast<int>(sizeof(magic_table)/sizeof(magic_table[0])))) {
427 return false;
428 }
429 DividePattern pattern = magic_table[lit].pattern;
430 if (pattern == DivideNone) {
431 return false;
432 }
433 // Tuning: add rem patterns
buzbee11b63d12013-08-27 07:34:17 -0700434 if (!is_div) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700435 return false;
436 }
437
438 int r_magic = AllocTemp();
439 LoadConstant(r_magic, magic_table[lit].magic);
440 rl_src = LoadValue(rl_src, kCoreReg);
441 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
442 int r_hi = AllocTemp();
443 int r_lo = AllocTemp();
444 NewLIR4(kThumb2Smull, r_lo, r_hi, r_magic, rl_src.low_reg);
Brian Carlstromdf629502013-07-17 22:39:56 -0700445 switch (pattern) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700446 case Divide3:
447 OpRegRegRegShift(kOpSub, rl_result.low_reg, r_hi,
448 rl_src.low_reg, EncodeShift(kArmAsr, 31));
449 break;
450 case Divide5:
451 OpRegRegImm(kOpAsr, r_lo, rl_src.low_reg, 31);
452 OpRegRegRegShift(kOpRsub, rl_result.low_reg, r_lo, r_hi,
453 EncodeShift(kArmAsr, magic_table[lit].shift));
454 break;
455 case Divide7:
456 OpRegReg(kOpAdd, r_hi, rl_src.low_reg);
457 OpRegRegImm(kOpAsr, r_lo, rl_src.low_reg, 31);
458 OpRegRegRegShift(kOpRsub, rl_result.low_reg, r_lo, r_hi,
459 EncodeShift(kArmAsr, magic_table[lit].shift));
460 break;
461 default:
462 LOG(FATAL) << "Unexpected pattern: " << pattern;
463 }
464 StoreValue(rl_dest, rl_result);
465 return true;
466}
467
468LIR* ArmMir2Lir::GenRegMemCheck(ConditionCode c_code,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700469 int reg1, int base, int offset, ThrowKind kind) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700470 LOG(FATAL) << "Unexpected use of GenRegMemCheck for Arm";
471 return NULL;
472}
473
474RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, int reg1, int lit,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700475 bool is_div) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700476 LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm";
477 return rl_dest;
478}
479
480RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, int reg1, int reg2,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700481 bool is_div) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700482 LOG(FATAL) << "Unexpected use of GenDivRem for Arm";
483 return rl_dest;
484}
485
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700486bool ArmMir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700487 DCHECK_EQ(cu_->instruction_set, kThumb2);
488 RegLocation rl_src1 = info->args[0];
489 RegLocation rl_src2 = info->args[1];
490 rl_src1 = LoadValue(rl_src1, kCoreReg);
491 rl_src2 = LoadValue(rl_src2, kCoreReg);
492 RegLocation rl_dest = InlineTarget(info);
493 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
494 OpRegReg(kOpCmp, rl_src1.low_reg, rl_src2.low_reg);
495 OpIT((is_min) ? kCondGt : kCondLt, "E");
496 OpRegReg(kOpMov, rl_result.low_reg, rl_src2.low_reg);
497 OpRegReg(kOpMov, rl_result.low_reg, rl_src1.low_reg);
498 GenBarrier();
499 StoreValue(rl_dest, rl_result);
500 return true;
501}
502
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700503void ArmMir2Lir::OpLea(int rBase, int reg1, int reg2, int scale, int offset) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700504 LOG(FATAL) << "Unexpected use of OpLea for Arm";
505}
506
Ian Rogers468532e2013-08-05 10:56:33 -0700507void ArmMir2Lir::OpTlsCmp(ThreadOffset offset, int val) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700508 LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm";
509}
510
511bool ArmMir2Lir::GenInlinedCas32(CallInfo* info, bool need_write_barrier) {
512 DCHECK_EQ(cu_->instruction_set, kThumb2);
513 // Unused - RegLocation rl_src_unsafe = info->args[0];
514 RegLocation rl_src_obj= info->args[1]; // Object - known non-null
515 RegLocation rl_src_offset= info->args[2]; // long low
516 rl_src_offset.wide = 0; // ignore high half in info->args[3]
517 RegLocation rl_src_expected= info->args[4]; // int or Object
518 RegLocation rl_src_new_value= info->args[5]; // int or Object
519 RegLocation rl_dest = InlineTarget(info); // boolean place for result
520
521
522 // Release store semantics, get the barrier out of the way. TODO: revisit
523 GenMemBarrier(kStoreLoad);
524
525 RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
526 RegLocation rl_new_value = LoadValue(rl_src_new_value, kCoreReg);
527
528 if (need_write_barrier && !mir_graph_->IsConstantNullRef(rl_new_value)) {
529 // Mark card for object assuming new value is stored.
530 MarkGCCard(rl_new_value.low_reg, rl_object.low_reg);
531 }
532
533 RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
534
535 int r_ptr = AllocTemp();
536 OpRegRegReg(kOpAdd, r_ptr, rl_object.low_reg, rl_offset.low_reg);
537
538 // Free now unneeded rl_object and rl_offset to give more temps.
539 ClobberSReg(rl_object.s_reg_low);
540 FreeTemp(rl_object.low_reg);
541 ClobberSReg(rl_offset.s_reg_low);
542 FreeTemp(rl_offset.low_reg);
543
Jeff Hao2de2aa12013-09-12 17:20:31 -0700544 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
545 LoadConstant(rl_result.low_reg, 0); // r_result := 0
Brian Carlstrom7940e442013-07-12 13:46:57 -0700546
Jeff Hao2de2aa12013-09-12 17:20:31 -0700547 // while ([r_ptr] == rExpected && r_result == 0) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700548 // [r_ptr] <- r_new_value && r_result := success ? 0 : 1
549 // r_result ^= 1
Brian Carlstrom7940e442013-07-12 13:46:57 -0700550 // }
Jeff Hao2de2aa12013-09-12 17:20:31 -0700551 int r_old_value = AllocTemp();
552 LIR* target = NewLIR0(kPseudoTargetLabel);
553 NewLIR3(kThumb2Ldrex, r_old_value, r_ptr, 0);
554
555 RegLocation rl_expected = LoadValue(rl_src_expected, kCoreReg);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700556 OpRegReg(kOpCmp, r_old_value, rl_expected.low_reg);
557 FreeTemp(r_old_value); // Now unneeded.
Jeff Hao2de2aa12013-09-12 17:20:31 -0700558 OpIT(kCondEq, "TT");
559 NewLIR4(kThumb2Strex /* eq */, rl_result.low_reg, rl_new_value.low_reg, r_ptr, 0);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700560 FreeTemp(r_ptr); // Now unneeded.
Jeff Hao2de2aa12013-09-12 17:20:31 -0700561 OpRegImm(kOpXor /* eq */, rl_result.low_reg, 1);
562 OpRegImm(kOpCmp /* eq */, rl_result.low_reg, 0);
563 OpCondBranch(kCondEq, target);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700564
565 StoreValue(rl_dest, rl_result);
566
567 return true;
568}
569
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700570LIR* ArmMir2Lir::OpPcRelLoad(int reg, LIR* target) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700571 return RawLIR(current_dalvik_offset_, kThumb2LdrPcRel12, reg, 0, 0, 0, 0, target);
572}
573
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700574LIR* ArmMir2Lir::OpVldm(int rBase, int count) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700575 return NewLIR3(kThumb2Vldms, rBase, fr0, count);
576}
577
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700578LIR* ArmMir2Lir::OpVstm(int rBase, int count) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700579 return NewLIR3(kThumb2Vstms, rBase, fr0, count);
580}
581
582void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
583 RegLocation rl_result, int lit,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700584 int first_bit, int second_bit) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700585 OpRegRegRegShift(kOpAdd, rl_result.low_reg, rl_src.low_reg, rl_src.low_reg,
586 EncodeShift(kArmLsl, second_bit - first_bit));
587 if (first_bit != 0) {
588 OpRegRegImm(kOpLsl, rl_result.low_reg, rl_result.low_reg, first_bit);
589 }
590}
591
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700592void ArmMir2Lir::GenDivZeroCheck(int reg_lo, int reg_hi) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700593 int t_reg = AllocTemp();
594 NewLIR4(kThumb2OrrRRRs, t_reg, reg_lo, reg_hi, 0);
595 FreeTemp(t_reg);
596 GenCheck(kCondEq, kThrowDivZero);
597}
598
599// Test suspend flag, return target of taken suspend branch
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700600LIR* ArmMir2Lir::OpTestSuspend(LIR* target) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700601 NewLIR2(kThumbSubRI8, rARM_SUSPEND, 1);
602 return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target);
603}
604
605// Decrement register and branch on condition
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700606LIR* ArmMir2Lir::OpDecAndBranch(ConditionCode c_code, int reg, LIR* target) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700607 // Combine sub & test using sub setflags encoding here
608 NewLIR3(kThumb2SubsRRI12, reg, reg, 1);
609 return OpCondBranch(c_code, target);
610}
611
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700612void ArmMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700613#if ANDROID_SMP != 0
614 int dmb_flavor;
615 // TODO: revisit Arm barrier kinds
616 switch (barrier_kind) {
617 case kLoadStore: dmb_flavor = kSY; break;
618 case kLoadLoad: dmb_flavor = kSY; break;
619 case kStoreStore: dmb_flavor = kST; break;
620 case kStoreLoad: dmb_flavor = kSY; break;
621 default:
622 LOG(FATAL) << "Unexpected MemBarrierKind: " << barrier_kind;
623 dmb_flavor = kSY; // quiet gcc.
624 break;
625 }
626 LIR* dmb = NewLIR1(kThumb2Dmb, dmb_flavor);
627 dmb->def_mask = ENCODE_ALL;
628#endif
629}
630
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700631void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700632 rl_src = LoadValueWide(rl_src, kCoreReg);
633 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
634 int z_reg = AllocTemp();
635 LoadConstantNoClobber(z_reg, 0);
636 // Check for destructive overlap
637 if (rl_result.low_reg == rl_src.high_reg) {
638 int t_reg = AllocTemp();
639 OpRegRegReg(kOpSub, rl_result.low_reg, z_reg, rl_src.low_reg);
640 OpRegRegReg(kOpSbc, rl_result.high_reg, z_reg, t_reg);
641 FreeTemp(t_reg);
642 } else {
643 OpRegRegReg(kOpSub, rl_result.low_reg, z_reg, rl_src.low_reg);
644 OpRegRegReg(kOpSbc, rl_result.high_reg, z_reg, rl_src.high_reg);
645 }
646 FreeTemp(z_reg);
647 StoreValueWide(rl_dest, rl_result);
648}
649
650
651 /*
652 * Check to see if a result pair has a misaligned overlap with an operand pair. This
653 * is not usual for dx to generate, but it is legal (for now). In a future rev of
654 * dex, we'll want to make this case illegal.
655 */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700656bool ArmMir2Lir::BadOverlap(RegLocation rl_src, RegLocation rl_dest) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700657 DCHECK(rl_src.wide);
658 DCHECK(rl_dest.wide);
659 return (abs(mir_graph_->SRegToVReg(rl_src.s_reg_low) - mir_graph_->SRegToVReg(rl_dest.s_reg_low)) == 1);
660}
661
662void ArmMir2Lir::GenMulLong(RegLocation rl_dest, RegLocation rl_src1,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700663 RegLocation rl_src2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700664 /*
665 * To pull off inline multiply, we have a worst-case requirement of 8 temporary
666 * registers. Normally for Arm, we get 5. We can get to 6 by including
667 * lr in the temp set. The only problematic case is all operands and result are
668 * distinct, and none have been promoted. In that case, we can succeed by aggressively
669 * freeing operand temp registers after they are no longer needed. All other cases
670 * can proceed normally. We'll just punt on the case of the result having a misaligned
671 * overlap with either operand and send that case to a runtime handler.
672 */
673 RegLocation rl_result;
674 if (BadOverlap(rl_src1, rl_dest) || (BadOverlap(rl_src2, rl_dest))) {
Ian Rogers468532e2013-08-05 10:56:33 -0700675 ThreadOffset func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700676 FlushAllRegs();
677 CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false);
678 rl_result = GetReturnWide(false);
679 StoreValueWide(rl_dest, rl_result);
680 return;
681 }
682 // Temporarily add LR to the temp pool, and assign it to tmp1
683 MarkTemp(rARM_LR);
684 FreeTemp(rARM_LR);
685 int tmp1 = rARM_LR;
686 LockTemp(rARM_LR);
687
688 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
689 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
690
691 bool special_case = true;
692 // If operands are the same, or any pair has been promoted we're not the special case.
693 if ((rl_src1.s_reg_low == rl_src2.s_reg_low) ||
694 (!IsTemp(rl_src1.low_reg) && !IsTemp(rl_src1.high_reg)) ||
695 (!IsTemp(rl_src2.low_reg) && !IsTemp(rl_src2.high_reg))) {
696 special_case = false;
697 }
698 // Tuning: if rl_dest has been promoted and is *not* either operand, could use directly.
699 int res_lo = AllocTemp();
700 int res_hi;
701 if (rl_src1.low_reg == rl_src2.low_reg) {
702 res_hi = AllocTemp();
703 NewLIR3(kThumb2MulRRR, tmp1, rl_src1.low_reg, rl_src1.high_reg);
704 NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src1.low_reg, rl_src1.low_reg);
705 OpRegRegRegShift(kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1));
706 } else {
707 // In the special case, all temps are now allocated
708 NewLIR3(kThumb2MulRRR, tmp1, rl_src2.low_reg, rl_src1.high_reg);
709 if (special_case) {
710 DCHECK_NE(rl_src1.low_reg, rl_src2.low_reg);
711 DCHECK_NE(rl_src1.high_reg, rl_src2.high_reg);
712 FreeTemp(rl_src1.high_reg);
713 }
714 res_hi = AllocTemp();
715
716 NewLIR4(kThumb2Umull, res_lo, res_hi, rl_src2.low_reg, rl_src1.low_reg);
717 NewLIR4(kThumb2Mla, tmp1, rl_src1.low_reg, rl_src2.high_reg, tmp1);
718 NewLIR4(kThumb2AddRRR, res_hi, tmp1, res_hi, 0);
719 if (special_case) {
720 FreeTemp(rl_src1.low_reg);
721 Clobber(rl_src1.low_reg);
722 Clobber(rl_src1.high_reg);
723 }
724 }
725 FreeTemp(tmp1);
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700726 rl_result = GetReturnWide(false); // Just using as a template.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700727 rl_result.low_reg = res_lo;
728 rl_result.high_reg = res_hi;
729 StoreValueWide(rl_dest, rl_result);
730 // Now, restore lr to its non-temp status.
731 Clobber(rARM_LR);
732 UnmarkTemp(rARM_LR);
733}
734
735void ArmMir2Lir::GenAddLong(RegLocation rl_dest, RegLocation rl_src1,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700736 RegLocation rl_src2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700737 LOG(FATAL) << "Unexpected use of GenAddLong for Arm";
738}
739
740void ArmMir2Lir::GenSubLong(RegLocation rl_dest, RegLocation rl_src1,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700741 RegLocation rl_src2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700742 LOG(FATAL) << "Unexpected use of GenSubLong for Arm";
743}
744
745void ArmMir2Lir::GenAndLong(RegLocation rl_dest, RegLocation rl_src1,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700746 RegLocation rl_src2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700747 LOG(FATAL) << "Unexpected use of GenAndLong for Arm";
748}
749
750void ArmMir2Lir::GenOrLong(RegLocation rl_dest, RegLocation rl_src1,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700751 RegLocation rl_src2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700752 LOG(FATAL) << "Unexpected use of GenOrLong for Arm";
753}
754
755void ArmMir2Lir::GenXorLong(RegLocation rl_dest, RegLocation rl_src1,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700756 RegLocation rl_src2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700757 LOG(FATAL) << "Unexpected use of genXoLong for Arm";
758}
759
760/*
761 * Generate array load
762 */
763void ArmMir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700764 RegLocation rl_index, RegLocation rl_dest, int scale) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700765 RegisterClass reg_class = oat_reg_class_by_size(size);
766 int len_offset = mirror::Array::LengthOffset().Int32Value();
767 int data_offset;
768 RegLocation rl_result;
769 bool constant_index = rl_index.is_const;
770 rl_array = LoadValue(rl_array, kCoreReg);
771 if (!constant_index) {
772 rl_index = LoadValue(rl_index, kCoreReg);
773 }
774
775 if (rl_dest.wide) {
776 data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
777 } else {
778 data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
779 }
780
781 // If index is constant, just fold it into the data offset
782 if (constant_index) {
783 data_offset += mir_graph_->ConstantValue(rl_index) << scale;
784 }
785
786 /* null object? */
787 GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
788
789 bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
790 int reg_len = INVALID_REG;
791 if (needs_range_check) {
792 reg_len = AllocTemp();
793 /* Get len */
794 LoadWordDisp(rl_array.low_reg, len_offset, reg_len);
795 }
796 if (rl_dest.wide || rl_dest.fp || constant_index) {
797 int reg_ptr;
798 if (constant_index) {
799 reg_ptr = rl_array.low_reg; // NOTE: must not alter reg_ptr in constant case.
800 } else {
801 // No special indexed operation, lea + load w/ displacement
802 reg_ptr = AllocTemp();
803 OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.low_reg, rl_index.low_reg,
804 EncodeShift(kArmLsl, scale));
805 FreeTemp(rl_index.low_reg);
806 }
807 rl_result = EvalLoc(rl_dest, reg_class, true);
808
809 if (needs_range_check) {
810 if (constant_index) {
811 GenImmedCheck(kCondLs, reg_len, mir_graph_->ConstantValue(rl_index), kThrowConstantArrayBounds);
812 } else {
813 GenRegRegCheck(kCondLs, reg_len, rl_index.low_reg, kThrowArrayBounds);
814 }
815 FreeTemp(reg_len);
816 }
817 if (rl_dest.wide) {
818 LoadBaseDispWide(reg_ptr, data_offset, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
819 if (!constant_index) {
820 FreeTemp(reg_ptr);
821 }
822 StoreValueWide(rl_dest, rl_result);
823 } else {
824 LoadBaseDisp(reg_ptr, data_offset, rl_result.low_reg, size, INVALID_SREG);
825 if (!constant_index) {
826 FreeTemp(reg_ptr);
827 }
828 StoreValue(rl_dest, rl_result);
829 }
830 } else {
831 // Offset base, then use indexed load
832 int reg_ptr = AllocTemp();
833 OpRegRegImm(kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
834 FreeTemp(rl_array.low_reg);
835 rl_result = EvalLoc(rl_dest, reg_class, true);
836
837 if (needs_range_check) {
838 // TODO: change kCondCS to a more meaningful name, is the sense of
839 // carry-set/clear flipped?
840 GenRegRegCheck(kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
841 FreeTemp(reg_len);
842 }
843 LoadBaseIndexed(reg_ptr, rl_index.low_reg, rl_result.low_reg, scale, size);
844 FreeTemp(reg_ptr);
845 StoreValue(rl_dest, rl_result);
846 }
847}
848
849/*
850 * Generate array store
851 *
852 */
853void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700854 RegLocation rl_index, RegLocation rl_src, int scale) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700855 RegisterClass reg_class = oat_reg_class_by_size(size);
856 int len_offset = mirror::Array::LengthOffset().Int32Value();
857 int data_offset;
858 bool constant_index = rl_index.is_const;
859
860 if (rl_src.wide) {
861 data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
862 } else {
863 data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
864 }
865
866 // If index is constant, just fold it into the data offset.
867 if (constant_index) {
868 data_offset += mir_graph_->ConstantValue(rl_index) << scale;
869 }
870
871 rl_array = LoadValue(rl_array, kCoreReg);
872 if (!constant_index) {
873 rl_index = LoadValue(rl_index, kCoreReg);
874 }
875
876 int reg_ptr;
877 if (constant_index) {
878 reg_ptr = rl_array.low_reg;
879 } else if (IsTemp(rl_array.low_reg)) {
880 Clobber(rl_array.low_reg);
881 reg_ptr = rl_array.low_reg;
882 } else {
883 reg_ptr = AllocTemp();
884 }
885
886 /* null object? */
887 GenNullCheck(rl_array.s_reg_low, rl_array.low_reg, opt_flags);
888
889 bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
890 int reg_len = INVALID_REG;
891 if (needs_range_check) {
892 reg_len = AllocTemp();
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700893 // NOTE: max live temps(4) here.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700894 /* Get len */
895 LoadWordDisp(rl_array.low_reg, len_offset, reg_len);
896 }
897 /* at this point, reg_ptr points to array, 2 live temps */
898 if (rl_src.wide || rl_src.fp || constant_index) {
899 if (rl_src.wide) {
900 rl_src = LoadValueWide(rl_src, reg_class);
901 } else {
902 rl_src = LoadValue(rl_src, reg_class);
903 }
904 if (!constant_index) {
905 OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.low_reg, rl_index.low_reg,
906 EncodeShift(kArmLsl, scale));
907 }
908 if (needs_range_check) {
909 if (constant_index) {
910 GenImmedCheck(kCondLs, reg_len, mir_graph_->ConstantValue(rl_index), kThrowConstantArrayBounds);
911 } else {
912 GenRegRegCheck(kCondLs, reg_len, rl_index.low_reg, kThrowArrayBounds);
913 }
914 FreeTemp(reg_len);
915 }
916
917 if (rl_src.wide) {
918 StoreBaseDispWide(reg_ptr, data_offset, rl_src.low_reg, rl_src.high_reg);
919 } else {
920 StoreBaseDisp(reg_ptr, data_offset, rl_src.low_reg, size);
921 }
922 } else {
923 /* reg_ptr -> array data */
924 OpRegRegImm(kOpAdd, reg_ptr, rl_array.low_reg, data_offset);
925 rl_src = LoadValue(rl_src, reg_class);
926 if (needs_range_check) {
927 GenRegRegCheck(kCondCs, rl_index.low_reg, reg_len, kThrowArrayBounds);
928 FreeTemp(reg_len);
929 }
930 StoreBaseIndexed(reg_ptr, rl_index.low_reg, rl_src.low_reg,
931 scale, size);
932 }
933 if (!constant_index) {
934 FreeTemp(reg_ptr);
935 }
936}
937
938/*
939 * Generate array store
940 *
941 */
942void ArmMir2Lir::GenArrayObjPut(int opt_flags, RegLocation rl_array,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700943 RegLocation rl_index, RegLocation rl_src, int scale) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700944 int len_offset = mirror::Array::LengthOffset().Int32Value();
945 int data_offset = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value();
946
947 FlushAllRegs(); // Use explicit registers
948 LockCallTemps();
949
950 int r_value = TargetReg(kArg0); // Register holding value
951 int r_array_class = TargetReg(kArg1); // Register holding array's Class
952 int r_array = TargetReg(kArg2); // Register holding array
953 int r_index = TargetReg(kArg3); // Register holding index into array
954
955 LoadValueDirectFixed(rl_array, r_array); // Grab array
956 LoadValueDirectFixed(rl_src, r_value); // Grab value
957 LoadValueDirectFixed(rl_index, r_index); // Grab index
958
959 GenNullCheck(rl_array.s_reg_low, r_array, opt_flags); // NPE?
960
961 // Store of null?
962 LIR* null_value_check = OpCmpImmBranch(kCondEq, r_value, 0, NULL);
963
964 // Get the array's class.
965 LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
Ian Rogers468532e2013-08-05 10:56:33 -0700966 CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElement), r_value,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700967 r_array_class, true);
968 // Redo LoadValues in case they didn't survive the call.
969 LoadValueDirectFixed(rl_array, r_array); // Reload array
970 LoadValueDirectFixed(rl_index, r_index); // Reload index
971 LoadValueDirectFixed(rl_src, r_value); // Reload value
972 r_array_class = INVALID_REG;
973
974 // Branch here if value to be stored == null
975 LIR* target = NewLIR0(kPseudoTargetLabel);
976 null_value_check->target = target;
977
978 bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
979 int reg_len = INVALID_REG;
980 if (needs_range_check) {
981 reg_len = TargetReg(kArg1);
982 LoadWordDisp(r_array, len_offset, reg_len); // Get len
983 }
984 /* r_ptr -> array data */
985 int r_ptr = AllocTemp();
986 OpRegRegImm(kOpAdd, r_ptr, r_array, data_offset);
987 if (needs_range_check) {
988 GenRegRegCheck(kCondCs, r_index, reg_len, kThrowArrayBounds);
989 }
990 StoreBaseIndexed(r_ptr, r_index, r_value, scale, kWord);
991 FreeTemp(r_ptr);
992 FreeTemp(r_index);
993 if (!mir_graph_->IsConstantNullRef(rl_src)) {
994 MarkGCCard(r_value, r_array);
995 }
996}
997
998void ArmMir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700999 RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001000 rl_src = LoadValueWide(rl_src, kCoreReg);
1001 // Per spec, we only care about low 6 bits of shift amount.
1002 int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
1003 if (shift_amount == 0) {
1004 StoreValueWide(rl_dest, rl_src);
1005 return;
1006 }
1007 if (BadOverlap(rl_src, rl_dest)) {
1008 GenShiftOpLong(opcode, rl_dest, rl_src, rl_shift);
1009 return;
1010 }
1011 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
Brian Carlstromdf629502013-07-17 22:39:56 -07001012 switch (opcode) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001013 case Instruction::SHL_LONG:
1014 case Instruction::SHL_LONG_2ADDR:
1015 if (shift_amount == 1) {
1016 OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, rl_src.low_reg);
1017 OpRegRegReg(kOpAdc, rl_result.high_reg, rl_src.high_reg, rl_src.high_reg);
1018 } else if (shift_amount == 32) {
1019 OpRegCopy(rl_result.high_reg, rl_src.low_reg);
1020 LoadConstant(rl_result.low_reg, 0);
1021 } else if (shift_amount > 31) {
1022 OpRegRegImm(kOpLsl, rl_result.high_reg, rl_src.low_reg, shift_amount - 32);
1023 LoadConstant(rl_result.low_reg, 0);
1024 } else {
1025 OpRegRegImm(kOpLsl, rl_result.high_reg, rl_src.high_reg, shift_amount);
1026 OpRegRegRegShift(kOpOr, rl_result.high_reg, rl_result.high_reg, rl_src.low_reg,
1027 EncodeShift(kArmLsr, 32 - shift_amount));
1028 OpRegRegImm(kOpLsl, rl_result.low_reg, rl_src.low_reg, shift_amount);
1029 }
1030 break;
1031 case Instruction::SHR_LONG:
1032 case Instruction::SHR_LONG_2ADDR:
1033 if (shift_amount == 32) {
1034 OpRegCopy(rl_result.low_reg, rl_src.high_reg);
1035 OpRegRegImm(kOpAsr, rl_result.high_reg, rl_src.high_reg, 31);
1036 } else if (shift_amount > 31) {
1037 OpRegRegImm(kOpAsr, rl_result.low_reg, rl_src.high_reg, shift_amount - 32);
1038 OpRegRegImm(kOpAsr, rl_result.high_reg, rl_src.high_reg, 31);
1039 } else {
1040 int t_reg = AllocTemp();
1041 OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, shift_amount);
1042 OpRegRegRegShift(kOpOr, rl_result.low_reg, t_reg, rl_src.high_reg,
1043 EncodeShift(kArmLsl, 32 - shift_amount));
1044 FreeTemp(t_reg);
1045 OpRegRegImm(kOpAsr, rl_result.high_reg, rl_src.high_reg, shift_amount);
1046 }
1047 break;
1048 case Instruction::USHR_LONG:
1049 case Instruction::USHR_LONG_2ADDR:
1050 if (shift_amount == 32) {
1051 OpRegCopy(rl_result.low_reg, rl_src.high_reg);
1052 LoadConstant(rl_result.high_reg, 0);
1053 } else if (shift_amount > 31) {
1054 OpRegRegImm(kOpLsr, rl_result.low_reg, rl_src.high_reg, shift_amount - 32);
1055 LoadConstant(rl_result.high_reg, 0);
1056 } else {
1057 int t_reg = AllocTemp();
1058 OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, shift_amount);
1059 OpRegRegRegShift(kOpOr, rl_result.low_reg, t_reg, rl_src.high_reg,
1060 EncodeShift(kArmLsl, 32 - shift_amount));
1061 FreeTemp(t_reg);
1062 OpRegRegImm(kOpLsr, rl_result.high_reg, rl_src.high_reg, shift_amount);
1063 }
1064 break;
1065 default:
1066 LOG(FATAL) << "Unexpected case";
1067 }
1068 StoreValueWide(rl_dest, rl_result);
1069}
1070
1071void ArmMir2Lir::GenArithImmOpLong(Instruction::Code opcode,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001072 RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001073 if ((opcode == Instruction::SUB_LONG_2ADDR) || (opcode == Instruction::SUB_LONG)) {
1074 if (!rl_src2.is_const) {
1075 // Don't bother with special handling for subtract from immediate.
1076 GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
1077 return;
1078 }
1079 } else {
1080 // Normalize
1081 if (!rl_src2.is_const) {
1082 DCHECK(rl_src1.is_const);
1083 RegLocation rl_temp = rl_src1;
1084 rl_src1 = rl_src2;
1085 rl_src2 = rl_temp;
1086 }
1087 }
1088 if (BadOverlap(rl_src1, rl_dest)) {
1089 GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
1090 return;
1091 }
1092 DCHECK(rl_src2.is_const);
1093 int64_t val = mir_graph_->ConstantValueWide(rl_src2);
1094 uint32_t val_lo = Low32Bits(val);
1095 uint32_t val_hi = High32Bits(val);
1096 int32_t mod_imm_lo = ModifiedImmediate(val_lo);
1097 int32_t mod_imm_hi = ModifiedImmediate(val_hi);
1098
1099 // Only a subset of add/sub immediate instructions set carry - so bail if we don't fit
Brian Carlstromdf629502013-07-17 22:39:56 -07001100 switch (opcode) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001101 case Instruction::ADD_LONG:
1102 case Instruction::ADD_LONG_2ADDR:
1103 case Instruction::SUB_LONG:
1104 case Instruction::SUB_LONG_2ADDR:
1105 if ((mod_imm_lo < 0) || (mod_imm_hi < 0)) {
1106 GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
1107 return;
1108 }
1109 break;
1110 default:
1111 break;
1112 }
1113 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1114 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1115 // NOTE: once we've done the EvalLoc on dest, we can no longer bail.
1116 switch (opcode) {
1117 case Instruction::ADD_LONG:
1118 case Instruction::ADD_LONG_2ADDR:
1119 NewLIR3(kThumb2AddRRI8, rl_result.low_reg, rl_src1.low_reg, mod_imm_lo);
1120 NewLIR3(kThumb2AdcRRI8, rl_result.high_reg, rl_src1.high_reg, mod_imm_hi);
1121 break;
1122 case Instruction::OR_LONG:
1123 case Instruction::OR_LONG_2ADDR:
1124 if ((val_lo != 0) || (rl_result.low_reg != rl_src1.low_reg)) {
1125 OpRegRegImm(kOpOr, rl_result.low_reg, rl_src1.low_reg, val_lo);
1126 }
1127 if ((val_hi != 0) || (rl_result.high_reg != rl_src1.high_reg)) {
1128 OpRegRegImm(kOpOr, rl_result.high_reg, rl_src1.high_reg, val_hi);
1129 }
1130 break;
1131 case Instruction::XOR_LONG:
1132 case Instruction::XOR_LONG_2ADDR:
1133 OpRegRegImm(kOpXor, rl_result.low_reg, rl_src1.low_reg, val_lo);
1134 OpRegRegImm(kOpXor, rl_result.high_reg, rl_src1.high_reg, val_hi);
1135 break;
1136 case Instruction::AND_LONG:
1137 case Instruction::AND_LONG_2ADDR:
1138 if ((val_lo != 0xffffffff) || (rl_result.low_reg != rl_src1.low_reg)) {
1139 OpRegRegImm(kOpAnd, rl_result.low_reg, rl_src1.low_reg, val_lo);
1140 }
1141 if ((val_hi != 0xffffffff) || (rl_result.high_reg != rl_src1.high_reg)) {
1142 OpRegRegImm(kOpAnd, rl_result.high_reg, rl_src1.high_reg, val_hi);
1143 }
1144 break;
1145 case Instruction::SUB_LONG_2ADDR:
1146 case Instruction::SUB_LONG:
1147 NewLIR3(kThumb2SubRRI8, rl_result.low_reg, rl_src1.low_reg, mod_imm_lo);
1148 NewLIR3(kThumb2SbcRRI8, rl_result.high_reg, rl_src1.high_reg, mod_imm_hi);
1149 break;
1150 default:
1151 LOG(FATAL) << "Unexpected opcode " << opcode;
1152 }
1153 StoreValueWide(rl_dest, rl_result);
1154}
1155
1156} // namespace art