blob: 360acd5bb2108c7e85f73c7913276100efc15e13 [file] [log] [blame]
Matteo Franchin43ec8732014-03-31 15:00:14 +01001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This file contains codegen for the Thumb2 ISA. */
18
19#include "arm64_lir.h"
20#include "codegen_arm64.h"
21#include "dex/quick/mir_to_lir-inl.h"
buzbeeb5860fb2014-06-21 15:31:01 -070022#include "dex/reg_storage_eq.h"
Matteo Franchin43ec8732014-03-31 15:00:14 +010023#include "entrypoints/quick/quick_entrypoints.h"
24#include "mirror/array.h"
25
26namespace art {
27
28LIR* Arm64Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
29 OpRegReg(kOpCmp, src1, src2);
30 return OpCondBranch(cond, target);
31}
32
Matteo Franchin43ec8732014-03-31 15:00:14 +010033LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode, const char* guide) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +010034 LOG(FATAL) << "Unexpected use of OpIT for Arm64";
35 return NULL;
Matteo Franchin43ec8732014-03-31 15:00:14 +010036}
37
38void Arm64Mir2Lir::OpEndIT(LIR* it) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +010039 LOG(FATAL) << "Unexpected use of OpEndIT for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +010040}
41
42/*
43 * 64-bit 3way compare function.
Matteo Franchine45fb9e2014-05-06 10:10:30 +010044 * cmp xA, xB
Zheng Xu511c8a62014-06-03 16:22:23 +080045 * csinc wC, wzr, wzr, eq // wC = (xA == xB) ? 0 : 1
46 * csneg wC, wC, wC, ge // wC = (xA >= xB) ? wC : -wC
Matteo Franchin43ec8732014-03-31 15:00:14 +010047 */
Matteo Franchine45fb9e2014-05-06 10:10:30 +010048void Arm64Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
49 RegLocation rl_src2) {
50 RegLocation rl_result;
Matteo Franchin43ec8732014-03-31 15:00:14 +010051 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
52 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
Matteo Franchine45fb9e2014-05-06 10:10:30 +010053 rl_result = EvalLoc(rl_dest, kCoreReg, true);
Matteo Franchin43ec8732014-03-31 15:00:14 +010054
Matteo Franchine45fb9e2014-05-06 10:10:30 +010055 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
Zheng Xu511c8a62014-06-03 16:22:23 +080056 NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondEq);
57 NewLIR4(kA64Csneg4rrrc, rl_result.reg.GetReg(), rl_result.reg.GetReg(),
58 rl_result.reg.GetReg(), kArmCondGe);
59 StoreValue(rl_dest, rl_result);
Serban Constantinescued65c5e2014-05-22 15:10:18 +010060}
61
62void Arm64Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
63 RegLocation rl_src1, RegLocation rl_shift) {
64 OpKind op = kOpBkpt;
65 switch (opcode) {
66 case Instruction::SHL_LONG:
67 case Instruction::SHL_LONG_2ADDR:
68 op = kOpLsl;
69 break;
70 case Instruction::SHR_LONG:
71 case Instruction::SHR_LONG_2ADDR:
72 op = kOpAsr;
73 break;
74 case Instruction::USHR_LONG:
75 case Instruction::USHR_LONG_2ADDR:
76 op = kOpLsr;
77 break;
78 default:
79 LOG(FATAL) << "Unexpected case: " << opcode;
80 }
Zheng Xue2eb29e2014-06-12 10:22:33 +080081 rl_shift = LoadValue(rl_shift, kCoreReg);
Serban Constantinescued65c5e2014-05-22 15:10:18 +010082 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
83 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
Zheng Xue2eb29e2014-06-12 10:22:33 +080084 OpRegRegReg(op, rl_result.reg, rl_src1.reg, As64BitReg(rl_shift.reg));
Serban Constantinescued65c5e2014-05-22 15:10:18 +010085 StoreValueWide(rl_dest, rl_result);
Matteo Franchin43ec8732014-03-31 15:00:14 +010086}
87
Andreas Gampe90969af2014-07-15 23:02:11 -070088static constexpr bool kUseDeltaEncodingInGenSelect = false;
Andreas Gampe381f8ac2014-07-10 03:23:41 -070089
Andreas Gampe90969af2014-07-15 23:02:11 -070090void Arm64Mir2Lir::GenSelect(int32_t true_val, int32_t false_val, ConditionCode ccode,
91 RegStorage rs_dest, int result_reg_class) {
92 if (false_val == 0 || // 0 is better as first operand.
93 true_val == 1 || // Potentially Csinc.
94 true_val == -1 || // Potentially Csinv.
95 true_val == false_val + 1) { // Potentially Csinc.
96 ccode = NegateComparison(ccode);
97 std::swap(true_val, false_val);
98 }
99
100 ArmConditionCode code = ArmConditionEncoding(ccode);
101
102 int opcode; // The opcode.
103 RegStorage left_op = RegStorage::InvalidReg(); // The operands.
104 RegStorage right_op = RegStorage::InvalidReg(); // The operands.
105
106 bool is_wide = rs_dest.Is64Bit();
107
108 RegStorage zero_reg = is_wide ? rs_xzr : rs_wzr;
109
110 if (true_val == 0) {
111 left_op = zero_reg;
112 } else {
113 left_op = rs_dest;
114 LoadConstantNoClobber(rs_dest, true_val);
115 }
116 if (false_val == 1) {
117 right_op = zero_reg;
118 opcode = kA64Csinc4rrrc;
119 } else if (false_val == -1) {
120 right_op = zero_reg;
121 opcode = kA64Csinv4rrrc;
122 } else if (false_val == true_val + 1) {
123 right_op = left_op;
124 opcode = kA64Csinc4rrrc;
125 } else if (false_val == -true_val) {
126 right_op = left_op;
127 opcode = kA64Csneg4rrrc;
128 } else if (false_val == ~true_val) {
129 right_op = left_op;
130 opcode = kA64Csinv4rrrc;
131 } else if (true_val == 0) {
132 // left_op is zero_reg.
133 right_op = rs_dest;
134 LoadConstantNoClobber(rs_dest, false_val);
135 opcode = kA64Csel4rrrc;
136 } else {
137 // Generic case.
138 RegStorage t_reg2 = AllocTypedTemp(false, result_reg_class);
139 if (is_wide) {
140 if (t_reg2.Is32Bit()) {
141 t_reg2 = As64BitReg(t_reg2);
142 }
143 } else {
144 if (t_reg2.Is64Bit()) {
145 t_reg2 = As32BitReg(t_reg2);
146 }
147 }
148
149 if (kUseDeltaEncodingInGenSelect) {
150 int32_t delta = false_val - true_val;
151 uint32_t abs_val = delta < 0 ? -delta : delta;
152
153 if (abs_val < 0x1000) { // TODO: Replace with InexpensiveConstant with opcode.
154 // Can encode as immediate to an add.
155 right_op = t_reg2;
156 OpRegRegImm(kOpAdd, t_reg2, left_op, delta);
157 }
158 }
159
160 // Load as constant.
161 if (!right_op.Valid()) {
162 LoadConstantNoClobber(t_reg2, false_val);
163 right_op = t_reg2;
164 }
165
166 opcode = kA64Csel4rrrc;
167 }
168
169 DCHECK(left_op.Valid() && right_op.Valid());
170 NewLIR4(is_wide ? WIDE(opcode) : opcode, rs_dest.GetReg(), left_op.GetReg(), right_op.GetReg(),
171 code);
172}
173
174void Arm64Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
175 int32_t true_val, int32_t false_val, RegStorage rs_dest,
176 int dest_reg_class) {
177 DCHECK(rs_dest.Valid());
178 OpRegReg(kOpCmp, left_op, right_op);
179 GenSelect(true_val, false_val, code, rs_dest, dest_reg_class);
180}
181
182void Arm64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
183 RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
184 rl_src = LoadValue(rl_src, rl_src.ref ? kRefReg : kCoreReg);
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700185 // rl_src may be aliased with rl_result/rl_dest, so do compare early.
186 OpRegImm(kOpCmp, rl_src.reg, 0);
187
Andreas Gampe90969af2014-07-15 23:02:11 -0700188 RegLocation rl_dest = mir_graph_->GetDest(mir);
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100189
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700190 // The kMirOpSelect has two variants, one for constants and one for moves.
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700191 if (mir->ssa_rep->num_uses == 1) {
Andreas Gampe90969af2014-07-15 23:02:11 -0700192 RegLocation rl_result = EvalLoc(rl_dest, rl_dest.ref ? kRefReg : kCoreReg, true);
193 GenSelect(mir->dalvikInsn.vB, mir->dalvikInsn.vC, mir->meta.ccode, rl_result.reg,
194 rl_dest.ref ? kRefReg : kCoreReg);
195 StoreValue(rl_dest, rl_result);
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700196 } else {
197 RegLocation rl_true = mir_graph_->reg_location_[mir->ssa_rep->uses[1]];
198 RegLocation rl_false = mir_graph_->reg_location_[mir->ssa_rep->uses[2]];
199
Andreas Gampe90969af2014-07-15 23:02:11 -0700200 RegisterClass result_reg_class = rl_dest.ref ? kRefReg : kCoreReg;
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700201 rl_true = LoadValue(rl_true, result_reg_class);
202 rl_false = LoadValue(rl_false, result_reg_class);
Andreas Gampe90969af2014-07-15 23:02:11 -0700203 RegLocation rl_result = EvalLoc(rl_dest, result_reg_class, true);
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700204
Andreas Gampe90969af2014-07-15 23:02:11 -0700205 bool is_wide = rl_dest.ref || rl_dest.wide;
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700206 int opcode = is_wide ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc;
207 NewLIR4(opcode, rl_result.reg.GetReg(),
Andreas Gampe90969af2014-07-15 23:02:11 -0700208 rl_true.reg.GetReg(), rl_false.reg.GetReg(), ArmConditionEncoding(mir->meta.ccode));
209 StoreValue(rl_dest, rl_result);
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700210 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100211}
212
213void Arm64Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
214 RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
215 RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100216 LIR* taken = &block_label_list_[bb->taken];
217 LIR* not_taken = &block_label_list_[bb->fall_through];
Matteo Franchin43ec8732014-03-31 15:00:14 +0100218 // Normalize such that if either operand is constant, src2 will be constant.
219 ConditionCode ccode = mir->meta.ccode;
220 if (rl_src1.is_const) {
221 std::swap(rl_src1, rl_src2);
222 ccode = FlipComparisonOrder(ccode);
223 }
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100224
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700225 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
226
Matteo Franchin43ec8732014-03-31 15:00:14 +0100227 if (rl_src2.is_const) {
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700228 // TODO: Optimize for rl_src1.is_const? (Does happen in the boot image at the moment.)
229
Matteo Franchin43ec8732014-03-31 15:00:14 +0100230 int64_t val = mir_graph_->ConstantValueWide(rl_src2);
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100231 // Special handling using cbz & cbnz.
232 if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) {
233 OpCmpImmBranch(ccode, rl_src1.reg, 0, taken);
234 OpCmpImmBranch(NegateComparison(ccode), rl_src1.reg, 0, not_taken);
235 return;
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700236 }
237
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100238 // Only handle Imm if src2 is not already in a register.
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700239 rl_src2 = UpdateLocWide(rl_src2);
240 if (rl_src2.location != kLocPhysReg) {
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100241 OpRegImm64(kOpCmp, rl_src1.reg, val);
242 OpCondBranch(ccode, taken);
243 OpCondBranch(NegateComparison(ccode), not_taken);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100244 return;
245 }
246 }
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100247
Matteo Franchin43ec8732014-03-31 15:00:14 +0100248 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100249 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100250 OpCondBranch(ccode, taken);
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100251 OpCondBranch(NegateComparison(ccode), not_taken);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100252}
253
254/*
255 * Generate a register comparison to an immediate and branch. Caller
256 * is responsible for setting branch target field.
257 */
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100258LIR* Arm64Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value,
259 LIR* target) {
Andreas Gampe9522af92014-07-14 20:16:59 -0700260 LIR* branch = nullptr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100261 ArmConditionCode arm_cond = ArmConditionEncoding(cond);
Andreas Gampe9522af92014-07-14 20:16:59 -0700262 if (check_value == 0) {
263 if (arm_cond == kArmCondEq || arm_cond == kArmCondNe) {
264 ArmOpcode opcode = (arm_cond == kArmCondEq) ? kA64Cbz2rt : kA64Cbnz2rt;
265 ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
266 branch = NewLIR2(opcode | wide, reg.GetReg(), 0);
267 } else if (arm_cond == kArmCondLs) {
268 // kArmCondLs is an unsigned less or equal. A comparison r <= 0 is then the same as cbz.
269 // This case happens for a bounds check of array[0].
270 ArmOpcode opcode = kA64Cbz2rt;
271 ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
272 branch = NewLIR2(opcode | wide, reg.GetReg(), 0);
273 }
274 }
275
276 if (branch == nullptr) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100277 OpRegImm(kOpCmp, reg, check_value);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100278 branch = NewLIR2(kA64B2ct, arm_cond, 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100279 }
Andreas Gampe9522af92014-07-14 20:16:59 -0700280
Matteo Franchin43ec8732014-03-31 15:00:14 +0100281 branch->target = target;
282 return branch;
283}
284
Zheng Xu7c1c2632014-06-17 18:17:31 +0800285LIR* Arm64Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg,
286 RegStorage base_reg, int offset, int check_value,
Dave Allison69dfe512014-07-11 17:11:58 +0000287 LIR* target, LIR** compare) {
288 DCHECK(compare == nullptr);
Zheng Xu7c1c2632014-06-17 18:17:31 +0800289 // It is possible that temp register is 64-bit. (ArgReg or RefReg)
290 // Always compare 32-bit value no matter what temp_reg is.
291 if (temp_reg.Is64Bit()) {
292 temp_reg = As32BitReg(temp_reg);
293 }
294 Load32Disp(base_reg, offset, temp_reg);
295 LIR* branch = OpCmpImmBranch(cond, temp_reg, check_value, target);
296 return branch;
297}
298
Matteo Franchin43ec8732014-03-31 15:00:14 +0100299LIR* Arm64Mir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100300 bool dest_is_fp = r_dest.IsFloat();
301 bool src_is_fp = r_src.IsFloat();
302 ArmOpcode opcode = kA64Brk1d;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100303 LIR* res;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100304
305 if (LIKELY(dest_is_fp == src_is_fp)) {
306 if (LIKELY(!dest_is_fp)) {
Andreas Gampe4b537a82014-06-30 22:24:53 -0700307 DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit());
308
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100309 // Core/core copy.
310 // Copies involving the sp register require a different instruction.
311 opcode = UNLIKELY(A64_REG_IS_SP(r_dest.GetReg())) ? kA64Add4RRdT : kA64Mov2rr;
312
313 // TODO(Arm64): kA64Add4RRdT formally has 4 args, but is used as a 2 args instruction.
314 // This currently works because the other arguments are set to 0 by default. We should
315 // rather introduce an alias kA64Mov2RR.
316
317 // core/core copy. Do a x/x copy only if both registers are x.
318 if (r_dest.Is64Bit() && r_src.Is64Bit()) {
319 opcode = WIDE(opcode);
320 }
321 } else {
322 // Float/float copy.
323 bool dest_is_double = r_dest.IsDouble();
324 bool src_is_double = r_src.IsDouble();
325
326 // We do not do float/double or double/float casts here.
327 DCHECK_EQ(dest_is_double, src_is_double);
328
329 // Homogeneous float/float copy.
330 opcode = (dest_is_double) ? FWIDE(kA64Fmov2ff) : kA64Fmov2ff;
331 }
332 } else {
333 // Inhomogeneous register copy.
334 if (dest_is_fp) {
335 if (r_dest.IsDouble()) {
336 opcode = kA64Fmov2Sx;
337 } else {
Andreas Gampe4b537a82014-06-30 22:24:53 -0700338 r_src = Check32BitReg(r_src);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100339 opcode = kA64Fmov2sw;
340 }
341 } else {
342 if (r_src.IsDouble()) {
343 opcode = kA64Fmov2xS;
344 } else {
Andreas Gampe4b537a82014-06-30 22:24:53 -0700345 r_dest = Check32BitReg(r_dest);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100346 opcode = kA64Fmov2ws;
347 }
348 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100349 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100350
Matteo Franchin43ec8732014-03-31 15:00:14 +0100351 res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100352
Matteo Franchin43ec8732014-03-31 15:00:14 +0100353 if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
354 res->flags.is_nop = true;
355 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100356
Matteo Franchin43ec8732014-03-31 15:00:14 +0100357 return res;
358}
359
360void Arm64Mir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
361 if (r_dest != r_src) {
362 LIR* res = OpRegCopyNoInsert(r_dest, r_src);
363 AppendLIR(res);
364 }
365}
366
367void Arm64Mir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100368 OpRegCopy(r_dest, r_src);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100369}
370
371// Table of magic divisors
372struct MagicTable {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100373 int magic64_base;
374 int magic64_eor;
375 uint64_t magic64;
376 uint32_t magic32;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100377 uint32_t shift;
378 DividePattern pattern;
379};
380
381static const MagicTable magic_table[] = {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100382 { 0, 0, 0, 0, 0, DivideNone}, // 0
383 { 0, 0, 0, 0, 0, DivideNone}, // 1
384 { 0, 0, 0, 0, 0, DivideNone}, // 2
385 {0x3c, -1, 0x5555555555555556, 0x55555556, 0, Divide3}, // 3
386 { 0, 0, 0, 0, 0, DivideNone}, // 4
387 {0xf9, -1, 0x6666666666666667, 0x66666667, 1, Divide5}, // 5
388 {0x7c, 0x1041, 0x2AAAAAAAAAAAAAAB, 0x2AAAAAAB, 0, Divide3}, // 6
389 { -1, -1, 0x924924924924924A, 0x92492493, 2, Divide7}, // 7
390 { 0, 0, 0, 0, 0, DivideNone}, // 8
391 { -1, -1, 0x38E38E38E38E38E4, 0x38E38E39, 1, Divide5}, // 9
392 {0xf9, -1, 0x6666666666666667, 0x66666667, 2, Divide5}, // 10
393 { -1, -1, 0x2E8BA2E8BA2E8BA3, 0x2E8BA2E9, 1, Divide5}, // 11
394 {0x7c, 0x1041, 0x2AAAAAAAAAAAAAAB, 0x2AAAAAAB, 1, Divide5}, // 12
395 { -1, -1, 0x4EC4EC4EC4EC4EC5, 0x4EC4EC4F, 2, Divide5}, // 13
396 { -1, -1, 0x924924924924924A, 0x92492493, 3, Divide7}, // 14
397 {0x78, -1, 0x8888888888888889, 0x88888889, 3, Divide7}, // 15
Matteo Franchin43ec8732014-03-31 15:00:14 +0100398};
399
400// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
401bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100402 RegLocation rl_src, RegLocation rl_dest, int lit) {
403 if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100404 return false;
405 }
406 DividePattern pattern = magic_table[lit].pattern;
407 if (pattern == DivideNone) {
408 return false;
409 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100410 // Tuning: add rem patterns
411 if (!is_div) {
412 return false;
413 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100414
415 RegStorage r_magic = AllocTemp();
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100416 LoadConstant(r_magic, magic_table[lit].magic32);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100417 rl_src = LoadValue(rl_src, kCoreReg);
418 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100419 RegStorage r_long_mul = AllocTemp();
420 NewLIR4(kA64Smaddl4xwwx, As64BitReg(r_long_mul).GetReg(),
421 r_magic.GetReg(), rl_src.reg.GetReg(), rxzr);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100422 switch (pattern) {
423 case Divide3:
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100424 OpRegRegImm(kOpLsr, As64BitReg(r_long_mul), As64BitReg(r_long_mul), 32);
425 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100426 break;
427 case Divide5:
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100428 OpRegRegImm(kOpAsr, As64BitReg(r_long_mul), As64BitReg(r_long_mul),
429 32 + magic_table[lit].shift);
430 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100431 break;
432 case Divide7:
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100433 OpRegRegRegShift(kOpAdd, As64BitReg(r_long_mul), As64BitReg(rl_src.reg),
434 As64BitReg(r_long_mul), EncodeShift(kA64Lsr, 32));
435 OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift);
436 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100437 break;
438 default:
439 LOG(FATAL) << "Unexpected pattern: " << pattern;
440 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100441 StoreValue(rl_dest, rl_result);
442 return true;
443}
444
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100445bool Arm64Mir2Lir::SmallLiteralDivRem64(Instruction::Code dalvik_opcode, bool is_div,
446 RegLocation rl_src, RegLocation rl_dest, int64_t lit) {
447 if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
448 return false;
449 }
450 DividePattern pattern = magic_table[lit].pattern;
451 if (pattern == DivideNone) {
452 return false;
453 }
454 // Tuning: add rem patterns
455 if (!is_div) {
456 return false;
457 }
458
459 RegStorage r_magic = AllocTempWide();
460 rl_src = LoadValueWide(rl_src, kCoreReg);
461 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
462 RegStorage r_long_mul = AllocTempWide();
463
464 if (magic_table[lit].magic64_base >= 0) {
465 // Check that the entry in the table is correct.
466 if (kIsDebugBuild) {
467 uint64_t reconstructed_imm;
468 uint64_t base = DecodeLogicalImmediate(/*is_wide*/true, magic_table[lit].magic64_base);
469 if (magic_table[lit].magic64_eor >= 0) {
470 uint64_t eor = DecodeLogicalImmediate(/*is_wide*/true, magic_table[lit].magic64_eor);
471 reconstructed_imm = base ^ eor;
472 } else {
473 reconstructed_imm = base + 1;
474 }
475 DCHECK_EQ(reconstructed_imm, magic_table[lit].magic64) << " for literal " << lit;
476 }
477
478 // Load the magic constant in two instructions.
479 NewLIR3(WIDE(kA64Orr3Rrl), r_magic.GetReg(), rxzr, magic_table[lit].magic64_base);
480 if (magic_table[lit].magic64_eor >= 0) {
481 NewLIR3(WIDE(kA64Eor3Rrl), r_magic.GetReg(), r_magic.GetReg(),
482 magic_table[lit].magic64_eor);
483 } else {
484 NewLIR4(WIDE(kA64Add4RRdT), r_magic.GetReg(), r_magic.GetReg(), 1, 0);
485 }
486 } else {
487 LoadConstantWide(r_magic, magic_table[lit].magic64);
488 }
489
490 NewLIR3(kA64Smulh3xxx, r_long_mul.GetReg(), r_magic.GetReg(), rl_src.reg.GetReg());
491 switch (pattern) {
492 case Divide3:
493 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 63));
494 break;
495 case Divide5:
496 OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift);
497 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 63));
498 break;
499 case Divide7:
500 OpRegRegReg(kOpAdd, r_long_mul, rl_src.reg, r_long_mul);
501 OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift);
502 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 63));
503 break;
504 default:
505 LOG(FATAL) << "Unexpected pattern: " << pattern;
506 }
507 StoreValueWide(rl_dest, rl_result);
508 return true;
509}
510
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100511// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
512// and store the result in 'rl_dest'.
513bool Arm64Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
514 RegLocation rl_src, RegLocation rl_dest, int lit) {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100515 return HandleEasyDivRem64(dalvik_opcode, is_div, rl_src, rl_dest, static_cast<int>(lit));
516}
517
518// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
519// and store the result in 'rl_dest'.
520bool Arm64Mir2Lir::HandleEasyDivRem64(Instruction::Code dalvik_opcode, bool is_div,
521 RegLocation rl_src, RegLocation rl_dest, int64_t lit) {
522 const bool is_64bit = rl_dest.wide;
523 const int nbits = (is_64bit) ? 64 : 32;
524
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100525 if (lit < 2) {
526 return false;
527 }
528 if (!IsPowerOfTwo(lit)) {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100529 if (is_64bit) {
530 return SmallLiteralDivRem64(dalvik_opcode, is_div, rl_src, rl_dest, lit);
531 } else {
532 return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, static_cast<int32_t>(lit));
533 }
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100534 }
535 int k = LowestSetBit(lit);
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100536 if (k >= nbits - 2) {
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100537 // Avoid special cases.
538 return false;
539 }
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100540
541 RegLocation rl_result;
542 RegStorage t_reg;
543 if (is_64bit) {
544 rl_src = LoadValueWide(rl_src, kCoreReg);
545 rl_result = EvalLocWide(rl_dest, kCoreReg, true);
546 t_reg = AllocTempWide();
547 } else {
548 rl_src = LoadValue(rl_src, kCoreReg);
549 rl_result = EvalLoc(rl_dest, kCoreReg, true);
550 t_reg = AllocTemp();
551 }
552
553 int shift = EncodeShift(kA64Lsr, nbits - k);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100554 if (is_div) {
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100555 if (lit == 2) {
556 // Division by 2 is by far the most common division by constant.
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100557 OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, rl_src.reg, shift);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100558 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
559 } else {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100560 OpRegRegImm(kOpAsr, t_reg, rl_src.reg, nbits - 1);
561 OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, t_reg, shift);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100562 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
563 }
564 } else {
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100565 if (lit == 2) {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100566 OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, rl_src.reg, shift);
567 OpRegRegImm64(kOpAnd, t_reg, t_reg, lit - 1);
568 OpRegRegRegShift(kOpSub, rl_result.reg, t_reg, rl_src.reg, shift);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100569 } else {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100570 RegStorage t_reg2 = (is_64bit) ? AllocTempWide() : AllocTemp();
571 OpRegRegImm(kOpAsr, t_reg, rl_src.reg, nbits - 1);
572 OpRegRegRegShift(kOpAdd, t_reg2, rl_src.reg, t_reg, shift);
573 OpRegRegImm64(kOpAnd, t_reg2, t_reg2, lit - 1);
574 OpRegRegRegShift(kOpSub, rl_result.reg, t_reg2, t_reg, shift);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100575 }
576 }
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100577
578 if (is_64bit) {
579 StoreValueWide(rl_dest, rl_result);
580 } else {
581 StoreValue(rl_dest, rl_result);
582 }
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100583 return true;
584}
585
Matteo Franchin43ec8732014-03-31 15:00:14 +0100586bool Arm64Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100587 LOG(FATAL) << "Unexpected use of EasyMultiply for Arm64";
588 return false;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100589}
590
Matteo Franchin43ec8732014-03-31 15:00:14 +0100591RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100592 LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +0100593 return rl_dest;
594}
595
596RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
597 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
598
599 // Put the literal in a temp.
600 RegStorage lit_temp = AllocTemp();
601 LoadConstant(lit_temp, lit);
602 // Use the generic case for div/rem with arg2 in a register.
603 // TODO: The literal temp can be freed earlier during a modulus to reduce reg pressure.
604 rl_result = GenDivRem(rl_result, reg1, lit_temp, is_div);
605 FreeTemp(lit_temp);
606
607 return rl_result;
608}
609
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100610RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
611 RegLocation rl_src2, bool is_div, bool check_zero) {
612 LOG(FATAL) << "Unexpected use of GenDivRem for Arm64";
613 return rl_dest;
614}
615
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100616RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage r_src1, RegStorage r_src2,
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100617 bool is_div) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100618 CHECK_EQ(r_src1.Is64Bit(), r_src2.Is64Bit());
619
Matteo Franchin43ec8732014-03-31 15:00:14 +0100620 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
621 if (is_div) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100622 OpRegRegReg(kOpDiv, rl_result.reg, r_src1, r_src2);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100623 } else {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100624 // temp = r_src1 / r_src2
625 // dest = r_src1 - temp * r_src2
626 RegStorage temp;
627 ArmOpcode wide;
628 if (rl_result.reg.Is64Bit()) {
629 temp = AllocTempWide();
630 wide = WIDE(0);
631 } else {
632 temp = AllocTemp();
633 wide = UNWIDE(0);
634 }
635 OpRegRegReg(kOpDiv, temp, r_src1, r_src2);
636 NewLIR4(kA64Msub4rrrr | wide, rl_result.reg.GetReg(), temp.GetReg(),
637 r_src1.GetReg(), r_src2.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100638 FreeTemp(temp);
639 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100640 return rl_result;
641}
642
Serban Constantinescu169489b2014-06-11 16:43:35 +0100643bool Arm64Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
644 RegLocation rl_src = info->args[0];
645 rl_src = LoadValueWide(rl_src, kCoreReg);
646 RegLocation rl_dest = InlineTargetWide(info);
647 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
648 RegStorage sign_reg = AllocTempWide();
649 // abs(x) = y<=x>>63, (x+y)^y.
650 OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 63);
651 OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg);
652 OpRegReg(kOpXor, rl_result.reg, sign_reg);
653 StoreValueWide(rl_dest, rl_result);
654 return true;
655}
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100656
Serban Constantinescu23abec92014-07-02 16:13:38 +0100657bool Arm64Mir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) {
Serban Constantinescu169489b2014-06-11 16:43:35 +0100658 DCHECK_EQ(cu_->instruction_set, kArm64);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100659 RegLocation rl_src1 = info->args[0];
Serban Constantinescu23abec92014-07-02 16:13:38 +0100660 RegLocation rl_src2 = (is_long) ? info->args[2] : info->args[1];
661 rl_src1 = (is_long) ? LoadValueWide(rl_src1, kCoreReg) : LoadValue(rl_src1, kCoreReg);
662 rl_src2 = (is_long) ? LoadValueWide(rl_src2, kCoreReg) : LoadValue(rl_src2, kCoreReg);
663 RegLocation rl_dest = (is_long) ? InlineTargetWide(info) : InlineTarget(info);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100664 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
665 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
Serban Constantinescu23abec92014-07-02 16:13:38 +0100666 NewLIR4((is_long) ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc, rl_result.reg.GetReg(),
667 rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), (is_min) ? kArmCondLt : kArmCondGt);
668 (is_long) ? StoreValueWide(rl_dest, rl_result) :StoreValue(rl_dest, rl_result);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100669 return true;
670}
671
672bool Arm64Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
673 RegLocation rl_src_address = info->args[0]; // long address
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100674 RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info);
675 RegLocation rl_address = LoadValueWide(rl_src_address, kCoreReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100676 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100677
Andreas Gampe3c12c512014-06-24 18:46:29 +0000678 LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100679 if (size == k64) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100680 StoreValueWide(rl_dest, rl_result);
681 } else {
682 DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100683 StoreValue(rl_dest, rl_result);
684 }
685 return true;
686}
687
688bool Arm64Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
689 RegLocation rl_src_address = info->args[0]; // long address
Matteo Franchin43ec8732014-03-31 15:00:14 +0100690 RegLocation rl_src_value = info->args[2]; // [size] value
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100691 RegLocation rl_address = LoadValueWide(rl_src_address, kCoreReg);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100692
693 RegLocation rl_value;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100694 if (size == k64) {
Serban Constantinescu169489b2014-06-11 16:43:35 +0100695 rl_value = LoadValueWide(rl_src_value, kCoreReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100696 } else {
697 DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100698 rl_value = LoadValue(rl_src_value, kCoreReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100699 }
Andreas Gampe3c12c512014-06-24 18:46:29 +0000700 StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100701 return true;
702}
703
Matteo Franchin43ec8732014-03-31 15:00:14 +0100704bool Arm64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
Serban Constantinescu169489b2014-06-11 16:43:35 +0100705 DCHECK_EQ(cu_->instruction_set, kArm64);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100706 // Unused - RegLocation rl_src_unsafe = info->args[0];
707 RegLocation rl_src_obj = info->args[1]; // Object - known non-null
708 RegLocation rl_src_offset = info->args[2]; // long low
Matteo Franchin43ec8732014-03-31 15:00:14 +0100709 RegLocation rl_src_expected = info->args[4]; // int, long or Object
710 // If is_long, high half is in info->args[5]
711 RegLocation rl_src_new_value = info->args[is_long ? 6 : 5]; // int, long or Object
712 // If is_long, high half is in info->args[7]
713 RegLocation rl_dest = InlineTarget(info); // boolean place for result
714
Serban Constantinescu169489b2014-06-11 16:43:35 +0100715 // Load Object and offset
buzbeea0cd2d72014-06-01 09:33:49 -0700716 RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100717 RegLocation rl_offset = LoadValueWide(rl_src_offset, kCoreReg);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100718
Matteo Franchin43ec8732014-03-31 15:00:14 +0100719 RegLocation rl_new_value;
Serban Constantinescu169489b2014-06-11 16:43:35 +0100720 RegLocation rl_expected;
721 if (is_long) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100722 rl_new_value = LoadValueWide(rl_src_new_value, kCoreReg);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100723 rl_expected = LoadValueWide(rl_src_expected, kCoreReg);
724 } else {
725 rl_new_value = LoadValue(rl_src_new_value, is_object ? kRefReg : kCoreReg);
726 rl_expected = LoadValue(rl_src_expected, is_object ? kRefReg : kCoreReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100727 }
728
729 if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
730 // Mark card for object assuming new value is stored.
731 MarkGCCard(rl_new_value.reg, rl_object.reg);
732 }
733
Serban Constantinescu169489b2014-06-11 16:43:35 +0100734 RegStorage r_ptr = AllocTempRef();
Matteo Franchin43ec8732014-03-31 15:00:14 +0100735 OpRegRegReg(kOpAdd, r_ptr, rl_object.reg, rl_offset.reg);
736
737 // Free now unneeded rl_object and rl_offset to give more temps.
738 ClobberSReg(rl_object.s_reg_low);
739 FreeTemp(rl_object.reg);
740 ClobberSReg(rl_offset.s_reg_low);
741 FreeTemp(rl_offset.reg);
742
Matteo Franchin43ec8732014-03-31 15:00:14 +0100743 // do {
744 // tmp = [r_ptr] - expected;
745 // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
746 // result = tmp != 0;
747
Serban Constantinescu169489b2014-06-11 16:43:35 +0100748 RegStorage r_tmp;
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100749 RegStorage r_tmp_stored;
750 RegStorage rl_new_value_stored = rl_new_value.reg;
751 ArmOpcode wide = UNWIDE(0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100752 if (is_long) {
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100753 r_tmp_stored = r_tmp = AllocTempWide();
754 wide = WIDE(0);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100755 } else if (is_object) {
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100756 // References use 64-bit registers, but are stored as compressed 32-bit values.
757 // This means r_tmp_stored != r_tmp.
Serban Constantinescu169489b2014-06-11 16:43:35 +0100758 r_tmp = AllocTempRef();
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100759 r_tmp_stored = As32BitReg(r_tmp);
760 rl_new_value_stored = As32BitReg(rl_new_value_stored);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100761 } else {
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100762 r_tmp_stored = r_tmp = AllocTemp();
Matteo Franchin43ec8732014-03-31 15:00:14 +0100763 }
764
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100765 RegStorage r_tmp32 = (r_tmp.Is32Bit()) ? r_tmp : As32BitReg(r_tmp);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100766 LIR* loop = NewLIR0(kPseudoTargetLabel);
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100767 NewLIR2(kA64Ldaxr2rX | wide, r_tmp_stored.GetReg(), r_ptr.GetReg());
Serban Constantinescu169489b2014-06-11 16:43:35 +0100768 OpRegReg(kOpCmp, r_tmp, rl_expected.reg);
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100769 DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
Serban Constantinescu169489b2014-06-11 16:43:35 +0100770 LIR* early_exit = OpCondBranch(kCondNe, NULL);
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100771 NewLIR3(kA64Stlxr3wrX | wide, r_tmp32.GetReg(), rl_new_value_stored.GetReg(), r_ptr.GetReg());
772 NewLIR3(kA64Cmp3RdT, r_tmp32.GetReg(), 0, ENCODE_NO_SHIFT);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100773 DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
774 OpCondBranch(kCondNe, loop);
775
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100776 LIR* exit_loop = NewLIR0(kPseudoTargetLabel);
777 early_exit->target = exit_loop;
778
Serban Constantinescu169489b2014-06-11 16:43:35 +0100779 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100780 NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondNe);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100781
Matteo Franchin43ec8732014-03-31 15:00:14 +0100782 FreeTemp(r_tmp); // Now unneeded.
Serban Constantinescu169489b2014-06-11 16:43:35 +0100783 FreeTemp(r_ptr); // Now unneeded.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100784
785 StoreValue(rl_dest, rl_result);
786
Matteo Franchin43ec8732014-03-31 15:00:14 +0100787 return true;
788}
789
790LIR* Arm64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
Serban Constantinescu63999682014-07-15 17:44:21 +0100791 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100792 return RawLIR(current_dalvik_offset_, WIDE(kA64Ldr2rp), reg.GetReg(), 0, 0, 0, 0, target);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100793}
794
795LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base, int count) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100796 LOG(FATAL) << "Unexpected use of OpVldm for Arm64";
797 return NULL;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100798}
799
800LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base, int count) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100801 LOG(FATAL) << "Unexpected use of OpVstm for Arm64";
802 return NULL;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100803}
804
805void Arm64Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
806 RegLocation rl_result, int lit,
807 int first_bit, int second_bit) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100808 OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg, EncodeShift(kA64Lsl, second_bit - first_bit));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100809 if (first_bit != 0) {
810 OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
811 }
812}
813
814void Arm64Mir2Lir::GenDivZeroCheckWide(RegStorage reg) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100815 LOG(FATAL) << "Unexpected use of GenDivZero for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +0100816}
817
818// Test suspend flag, return target of taken suspend branch
819LIR* Arm64Mir2Lir::OpTestSuspend(LIR* target) {
Zheng Xubaa7c882014-06-30 14:26:50 +0800820 NewLIR3(kA64Subs3rRd, rwSUSPEND, rwSUSPEND, 1);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100821 return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target);
822}
823
824// Decrement register and branch on condition
825LIR* Arm64Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
buzbee33ae5582014-06-12 14:56:32 -0700826 // Combine sub & test using sub setflags encoding here. We need to make sure a
827 // subtract form that sets carry is used, so generate explicitly.
828 // TODO: might be best to add a new op, kOpSubs, and handle it generically.
829 ArmOpcode opcode = reg.Is64Bit() ? WIDE(kA64Subs3rRd) : UNWIDE(kA64Subs3rRd);
830 NewLIR3(opcode, reg.GetReg(), reg.GetReg(), 1); // For value == 1, this should set flags.
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100831 DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100832 return OpCondBranch(c_code, target);
833}
834
Andreas Gampeb14329f2014-05-15 11:16:06 -0700835bool Arm64Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100836#if ANDROID_SMP != 0
837 // Start off with using the last LIR as the barrier. If it is not enough, then we will generate one.
838 LIR* barrier = last_lir_insn_;
839
840 int dmb_flavor;
841 // TODO: revisit Arm barrier kinds
842 switch (barrier_kind) {
Hans Boehm48f5c472014-06-27 14:50:10 -0700843 case kAnyStore: dmb_flavor = kISH; break;
844 case kLoadAny: dmb_flavor = kISH; break;
845 // We conjecture that kISHLD is insufficient. It is documented
846 // to provide LoadLoad | StoreStore ordering. But if this were used
847 // to implement volatile loads, we suspect that the lack of store
848 // atomicity on ARM would cause us to allow incorrect results for
849 // the canonical IRIW example. But we're not sure.
850 // We should be using acquire loads instead.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100851 case kStoreStore: dmb_flavor = kISHST; break;
Hans Boehm48f5c472014-06-27 14:50:10 -0700852 case kAnyAny: dmb_flavor = kISH; break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100853 default:
854 LOG(FATAL) << "Unexpected MemBarrierKind: " << barrier_kind;
855 dmb_flavor = kSY; // quiet gcc.
856 break;
857 }
858
Andreas Gampeb14329f2014-05-15 11:16:06 -0700859 bool ret = false;
860
Matteo Franchin43ec8732014-03-31 15:00:14 +0100861 // If the same barrier already exists, don't generate another.
862 if (barrier == nullptr
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100863 || (barrier->opcode != kA64Dmb1B || barrier->operands[0] != dmb_flavor)) {
864 barrier = NewLIR1(kA64Dmb1B, dmb_flavor);
Andreas Gampeb14329f2014-05-15 11:16:06 -0700865 ret = true;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100866 }
867
868 // At this point we must have a memory barrier. Mark it as a scheduling barrier as well.
869 DCHECK(!barrier->flags.use_def_invalid);
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100870 barrier->u.m.def_mask = &kEncodeAll;
Andreas Gampeb14329f2014-05-15 11:16:06 -0700871 return ret;
872#else
873 return false;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100874#endif
875}
876
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100877void Arm64Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
878 RegLocation rl_result;
879
880 rl_src = LoadValue(rl_src, kCoreReg);
881 rl_result = EvalLocWide(rl_dest, kCoreReg, true);
Andreas Gampe4b537a82014-06-30 22:24:53 -0700882 NewLIR4(WIDE(kA64Sbfm4rrdd), rl_result.reg.GetReg(), As64BitReg(rl_src.reg).GetReg(), 0, 31);
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100883 StoreValueWide(rl_dest, rl_result);
884}
885
886void Arm64Mir2Lir::GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest,
887 RegLocation rl_src1, RegLocation rl_src2, bool is_div) {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100888 if (rl_src2.is_const) {
889 DCHECK(rl_src2.wide);
890 int64_t lit = mir_graph_->ConstantValueWide(rl_src2);
891 if (HandleEasyDivRem64(opcode, is_div, rl_src1, rl_dest, lit)) {
892 return;
893 }
894 }
895
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100896 RegLocation rl_result;
897 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
898 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
899 GenDivZeroCheck(rl_src2.reg);
900 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, is_div);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100901 StoreValueWide(rl_dest, rl_result);
902}
903
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100904void Arm64Mir2Lir::GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
905 RegLocation rl_src2) {
906 RegLocation rl_result;
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100907
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100908 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
909 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
910 rl_result = EvalLocWide(rl_dest, kCoreReg, true);
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100911 OpRegRegRegShift(op, rl_result.reg, rl_src1.reg, rl_src2.reg, ENCODE_NO_SHIFT);
912 StoreValueWide(rl_dest, rl_result);
913}
914
915void Arm64Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
916 RegLocation rl_result;
917
918 rl_src = LoadValueWide(rl_src, kCoreReg);
919 rl_result = EvalLocWide(rl_dest, kCoreReg, true);
920 OpRegRegShift(kOpNeg, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT);
921 StoreValueWide(rl_dest, rl_result);
922}
923
924void Arm64Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
925 RegLocation rl_result;
926
927 rl_src = LoadValueWide(rl_src, kCoreReg);
928 rl_result = EvalLocWide(rl_dest, kCoreReg, true);
929 OpRegRegShift(kOpMvn, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100930 StoreValueWide(rl_dest, rl_result);
931}
932
Matteo Franchin43ec8732014-03-31 15:00:14 +0100933void Arm64Mir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100934 RegLocation rl_src1, RegLocation rl_src2) {
935 GenLongOp(kOpMul, rl_dest, rl_src1, rl_src2);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100936}
937
938void Arm64Mir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100939 RegLocation rl_src2) {
940 GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100941}
942
943void Arm64Mir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
944 RegLocation rl_src2) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100945 GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100946}
947
948void Arm64Mir2Lir::GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
949 RegLocation rl_src2) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100950 GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100951}
952
953void Arm64Mir2Lir::GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
954 RegLocation rl_src2) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100955 GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100956}
957
958void Arm64Mir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
959 RegLocation rl_src2) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100960 GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100961}
962
963/*
964 * Generate array load
965 */
966void Arm64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
967 RegLocation rl_index, RegLocation rl_dest, int scale) {
968 RegisterClass reg_class = RegClassBySize(size);
969 int len_offset = mirror::Array::LengthOffset().Int32Value();
970 int data_offset;
971 RegLocation rl_result;
972 bool constant_index = rl_index.is_const;
buzbeea0cd2d72014-06-01 09:33:49 -0700973 rl_array = LoadValue(rl_array, kRefReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100974 if (!constant_index) {
975 rl_index = LoadValue(rl_index, kCoreReg);
976 }
977
978 if (rl_dest.wide) {
979 data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
980 } else {
981 data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
982 }
983
984 // If index is constant, just fold it into the data offset
985 if (constant_index) {
986 data_offset += mir_graph_->ConstantValue(rl_index) << scale;
987 }
988
989 /* null object? */
990 GenNullCheck(rl_array.reg, opt_flags);
991
992 bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
993 RegStorage reg_len;
994 if (needs_range_check) {
995 reg_len = AllocTemp();
996 /* Get len */
997 Load32Disp(rl_array.reg, len_offset, reg_len);
998 MarkPossibleNullPointerException(opt_flags);
999 } else {
1000 ForceImplicitNullCheck(rl_array.reg, opt_flags);
1001 }
1002 if (rl_dest.wide || rl_dest.fp || constant_index) {
1003 RegStorage reg_ptr;
1004 if (constant_index) {
1005 reg_ptr = rl_array.reg; // NOTE: must not alter reg_ptr in constant case.
1006 } else {
1007 // No special indexed operation, lea + load w/ displacement
buzbeea0cd2d72014-06-01 09:33:49 -07001008 reg_ptr = AllocTempRef();
buzbee33ae5582014-06-12 14:56:32 -07001009 OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, As64BitReg(rl_index.reg),
1010 EncodeShift(kA64Lsl, scale));
Matteo Franchin43ec8732014-03-31 15:00:14 +01001011 FreeTemp(rl_index.reg);
1012 }
1013 rl_result = EvalLoc(rl_dest, reg_class, true);
1014
1015 if (needs_range_check) {
1016 if (constant_index) {
1017 GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
1018 } else {
1019 GenArrayBoundsCheck(rl_index.reg, reg_len);
1020 }
1021 FreeTemp(reg_len);
1022 }
Andreas Gampe3c12c512014-06-24 18:46:29 +00001023 if (rl_result.ref) {
1024 LoadRefDisp(reg_ptr, data_offset, rl_result.reg, kNotVolatile);
1025 } else {
1026 LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, kNotVolatile);
1027 }
Vladimir Marko455759b2014-05-06 20:49:36 +01001028 MarkPossibleNullPointerException(opt_flags);
1029 if (!constant_index) {
1030 FreeTemp(reg_ptr);
1031 }
Matteo Franchin43ec8732014-03-31 15:00:14 +01001032 if (rl_dest.wide) {
Matteo Franchin43ec8732014-03-31 15:00:14 +01001033 StoreValueWide(rl_dest, rl_result);
1034 } else {
Matteo Franchin43ec8732014-03-31 15:00:14 +01001035 StoreValue(rl_dest, rl_result);
1036 }
1037 } else {
1038 // Offset base, then use indexed load
buzbeea0cd2d72014-06-01 09:33:49 -07001039 RegStorage reg_ptr = AllocTempRef();
Matteo Franchin43ec8732014-03-31 15:00:14 +01001040 OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
1041 FreeTemp(rl_array.reg);
1042 rl_result = EvalLoc(rl_dest, reg_class, true);
1043
1044 if (needs_range_check) {
1045 GenArrayBoundsCheck(rl_index.reg, reg_len);
1046 FreeTemp(reg_len);
1047 }
Andreas Gampe3c12c512014-06-24 18:46:29 +00001048 if (rl_result.ref) {
Matteo Franchin255e0142014-07-04 13:50:41 +01001049 LoadRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001050 } else {
1051 LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale, size);
1052 }
Matteo Franchin43ec8732014-03-31 15:00:14 +01001053 MarkPossibleNullPointerException(opt_flags);
1054 FreeTemp(reg_ptr);
1055 StoreValue(rl_dest, rl_result);
1056 }
1057}
1058
1059/*
1060 * Generate array store
1061 *
1062 */
1063void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
1064 RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) {
1065 RegisterClass reg_class = RegClassBySize(size);
1066 int len_offset = mirror::Array::LengthOffset().Int32Value();
1067 bool constant_index = rl_index.is_const;
1068
1069 int data_offset;
1070 if (size == k64 || size == kDouble) {
1071 data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
1072 } else {
1073 data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
1074 }
1075
1076 // If index is constant, just fold it into the data offset.
1077 if (constant_index) {
1078 data_offset += mir_graph_->ConstantValue(rl_index) << scale;
1079 }
1080
buzbeea0cd2d72014-06-01 09:33:49 -07001081 rl_array = LoadValue(rl_array, kRefReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001082 if (!constant_index) {
1083 rl_index = LoadValue(rl_index, kCoreReg);
1084 }
1085
1086 RegStorage reg_ptr;
1087 bool allocated_reg_ptr_temp = false;
1088 if (constant_index) {
1089 reg_ptr = rl_array.reg;
1090 } else if (IsTemp(rl_array.reg) && !card_mark) {
1091 Clobber(rl_array.reg);
1092 reg_ptr = rl_array.reg;
1093 } else {
1094 allocated_reg_ptr_temp = true;
buzbeea0cd2d72014-06-01 09:33:49 -07001095 reg_ptr = AllocTempRef();
Matteo Franchin43ec8732014-03-31 15:00:14 +01001096 }
1097
1098 /* null object? */
1099 GenNullCheck(rl_array.reg, opt_flags);
1100
1101 bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
1102 RegStorage reg_len;
1103 if (needs_range_check) {
1104 reg_len = AllocTemp();
1105 // NOTE: max live temps(4) here.
1106 /* Get len */
1107 Load32Disp(rl_array.reg, len_offset, reg_len);
1108 MarkPossibleNullPointerException(opt_flags);
1109 } else {
1110 ForceImplicitNullCheck(rl_array.reg, opt_flags);
1111 }
1112 /* at this point, reg_ptr points to array, 2 live temps */
1113 if (rl_src.wide || rl_src.fp || constant_index) {
1114 if (rl_src.wide) {
1115 rl_src = LoadValueWide(rl_src, reg_class);
1116 } else {
1117 rl_src = LoadValue(rl_src, reg_class);
1118 }
1119 if (!constant_index) {
buzbee33ae5582014-06-12 14:56:32 -07001120 OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, As64BitReg(rl_index.reg),
1121 EncodeShift(kA64Lsl, scale));
Matteo Franchin43ec8732014-03-31 15:00:14 +01001122 }
1123 if (needs_range_check) {
1124 if (constant_index) {
1125 GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
1126 } else {
1127 GenArrayBoundsCheck(rl_index.reg, reg_len);
1128 }
1129 FreeTemp(reg_len);
1130 }
Andreas Gampe3c12c512014-06-24 18:46:29 +00001131 if (rl_src.ref) {
1132 StoreRefDisp(reg_ptr, data_offset, rl_src.reg, kNotVolatile);
1133 } else {
1134 StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size, kNotVolatile);
1135 }
Matteo Franchin43ec8732014-03-31 15:00:14 +01001136 MarkPossibleNullPointerException(opt_flags);
1137 } else {
1138 /* reg_ptr -> array data */
1139 OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
1140 rl_src = LoadValue(rl_src, reg_class);
1141 if (needs_range_check) {
1142 GenArrayBoundsCheck(rl_index.reg, reg_len);
1143 FreeTemp(reg_len);
1144 }
Andreas Gampe3c12c512014-06-24 18:46:29 +00001145 if (rl_src.ref) {
Matteo Franchin255e0142014-07-04 13:50:41 +01001146 StoreRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001147 } else {
1148 StoreBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale, size);
1149 }
Matteo Franchin43ec8732014-03-31 15:00:14 +01001150 MarkPossibleNullPointerException(opt_flags);
1151 }
1152 if (allocated_reg_ptr_temp) {
1153 FreeTemp(reg_ptr);
1154 }
1155 if (card_mark) {
1156 MarkGCCard(rl_src.reg, rl_array.reg);
1157 }
1158}
1159
Matteo Franchin43ec8732014-03-31 15:00:14 +01001160void Arm64Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +01001161 RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001162 OpKind op = kOpBkpt;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001163 // Per spec, we only care about low 6 bits of shift amount.
1164 int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001165 rl_src = LoadValueWide(rl_src, kCoreReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001166 if (shift_amount == 0) {
1167 StoreValueWide(rl_dest, rl_src);
1168 return;
1169 }
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001170
1171 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001172 switch (opcode) {
1173 case Instruction::SHL_LONG:
1174 case Instruction::SHL_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001175 op = kOpLsl;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001176 break;
1177 case Instruction::SHR_LONG:
1178 case Instruction::SHR_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001179 op = kOpAsr;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001180 break;
1181 case Instruction::USHR_LONG:
1182 case Instruction::USHR_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001183 op = kOpLsr;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001184 break;
1185 default:
1186 LOG(FATAL) << "Unexpected case";
1187 }
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001188 OpRegRegImm(op, rl_result.reg, rl_src.reg, shift_amount);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001189 StoreValueWide(rl_dest, rl_result);
1190}
1191
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001192void Arm64Mir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
1193 RegLocation rl_src1, RegLocation rl_src2) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001194 if ((opcode == Instruction::SUB_LONG) || (opcode == Instruction::SUB_LONG_2ADDR)) {
Matteo Franchin43ec8732014-03-31 15:00:14 +01001195 if (!rl_src2.is_const) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001196 return GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001197 }
1198 } else {
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001199 // Associativity.
Matteo Franchin43ec8732014-03-31 15:00:14 +01001200 if (!rl_src2.is_const) {
1201 DCHECK(rl_src1.is_const);
1202 std::swap(rl_src1, rl_src2);
1203 }
1204 }
Matteo Franchin43ec8732014-03-31 15:00:14 +01001205 DCHECK(rl_src2.is_const);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001206
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001207 OpKind op = kOpBkpt;
1208 int64_t val = mir_graph_->ConstantValueWide(rl_src2);
1209
Matteo Franchin43ec8732014-03-31 15:00:14 +01001210 switch (opcode) {
1211 case Instruction::ADD_LONG:
1212 case Instruction::ADD_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001213 op = kOpAdd;
1214 break;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001215 case Instruction::SUB_LONG:
1216 case Instruction::SUB_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001217 op = kOpSub;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001218 break;
1219 case Instruction::AND_LONG:
1220 case Instruction::AND_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001221 op = kOpAnd;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001222 break;
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001223 case Instruction::OR_LONG:
1224 case Instruction::OR_LONG_2ADDR:
1225 op = kOpOr;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001226 break;
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001227 case Instruction::XOR_LONG:
1228 case Instruction::XOR_LONG_2ADDR:
1229 op = kOpXor;
1230 break;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001231 default:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001232 LOG(FATAL) << "Unexpected opcode";
Matteo Franchin43ec8732014-03-31 15:00:14 +01001233 }
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001234
1235 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1236 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
Zheng Xue2eb29e2014-06-12 10:22:33 +08001237 OpRegRegImm64(op, rl_result.reg, rl_src1.reg, val);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001238 StoreValueWide(rl_dest, rl_result);
1239}
1240
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001241/**
1242 * @brief Split a register list in pairs or registers.
1243 *
1244 * Given a list of registers in @p reg_mask, split the list in pairs. Use as follows:
1245 * @code
1246 * int reg1 = -1, reg2 = -1;
1247 * while (reg_mask) {
1248 * reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1249 * if (UNLIKELY(reg2 < 0)) {
1250 * // Single register in reg1.
1251 * } else {
1252 * // Pair in reg1, reg2.
1253 * }
1254 * }
1255 * @endcode
1256 */
1257uint32_t Arm64Mir2Lir::GenPairWise(uint32_t reg_mask, int* reg1, int* reg2) {
1258 // Find first register.
1259 int first_bit_set = __builtin_ctz(reg_mask) + 1;
1260 int reg = *reg1 + first_bit_set;
1261 reg_mask >>= first_bit_set;
1262
1263 if (LIKELY(reg_mask)) {
1264 // Save the first register, find the second and use the pair opcode.
1265 int second_bit_set = __builtin_ctz(reg_mask) + 1;
1266 *reg2 = reg;
1267 reg_mask >>= second_bit_set;
1268 *reg1 = reg + second_bit_set;
1269 return reg_mask;
1270 }
1271
1272 // Use the single opcode, as we just have one register.
1273 *reg1 = reg;
1274 *reg2 = -1;
1275 return reg_mask;
1276}
1277
1278void Arm64Mir2Lir::UnSpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) {
1279 int reg1 = -1, reg2 = -1;
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001280 const int reg_log2_size = 3;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001281
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001282 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001283 reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1284 if (UNLIKELY(reg2 < 0)) {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001285 NewLIR3(WIDE(kA64Ldr3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001286 } else {
buzbeeb5860fb2014-06-21 15:31:01 -07001287 DCHECK_LE(offset, 63);
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001288 NewLIR4(WIDE(kA64Ldp4rrXD), RegStorage::Solo64(reg2).GetReg(),
1289 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001290 }
1291 }
1292}
1293
1294void Arm64Mir2Lir::SpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) {
1295 int reg1 = -1, reg2 = -1;
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001296 const int reg_log2_size = 3;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001297
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001298 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001299 reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1300 if (UNLIKELY(reg2 < 0)) {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001301 NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001302 } else {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001303 NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
1304 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
1305 }
1306 }
1307}
1308
1309void Arm64Mir2Lir::UnSpillFPRegs(RegStorage base, int offset, uint32_t reg_mask) {
1310 int reg1 = -1, reg2 = -1;
1311 const int reg_log2_size = 3;
1312
1313 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1314 reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1315 if (UNLIKELY(reg2 < 0)) {
1316 NewLIR3(FWIDE(kA64Ldr3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
1317 } else {
1318 NewLIR4(WIDE(kA64Ldp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
1319 RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
1320 }
1321 }
1322}
1323
1324// TODO(Arm64): consider using ld1 and st1?
1325void Arm64Mir2Lir::SpillFPRegs(RegStorage base, int offset, uint32_t reg_mask) {
1326 int reg1 = -1, reg2 = -1;
1327 const int reg_log2_size = 3;
1328
1329 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1330 reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1331 if (UNLIKELY(reg2 < 0)) {
1332 NewLIR3(FWIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
1333 } else {
1334 NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
1335 RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001336 }
1337 }
1338}
1339
Serban Constantinescu23abec92014-07-02 16:13:38 +01001340bool Arm64Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
1341 ArmOpcode wide = (size == k64) ? WIDE(0) : UNWIDE(0);
1342 RegLocation rl_src_i = info->args[0];
1343 RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info); // result reg
1344 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1345 RegLocation rl_i = (size == k64) ? LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg);
1346 NewLIR2(kA64Rbit2rr | wide, rl_result.reg.GetReg(), rl_i.reg.GetReg());
1347 (size == k64) ? StoreValueWide(rl_dest, rl_result) : StoreValue(rl_dest, rl_result);
1348 return true;
1349}
1350
Matteo Franchin43ec8732014-03-31 15:00:14 +01001351} // namespace art