blob: dfdb76bbab1b98c82a5eac1e8ffa5f45598be831 [file] [log] [blame]
Matteo Franchin43ec8732014-03-31 15:00:14 +01001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This file contains codegen for the Thumb2 ISA. */
18
19#include "arm64_lir.h"
20#include "codegen_arm64.h"
21#include "dex/quick/mir_to_lir-inl.h"
buzbeeb5860fb2014-06-21 15:31:01 -070022#include "dex/reg_storage_eq.h"
Matteo Franchin43ec8732014-03-31 15:00:14 +010023#include "entrypoints/quick/quick_entrypoints.h"
Ian Rogers7e70b002014-10-08 11:47:24 -070024#include "mirror/array-inl.h"
Andreas Gampef29ecd62014-07-29 00:35:00 -070025#include "utils.h"
Matteo Franchin43ec8732014-03-31 15:00:14 +010026
27namespace art {
28
29LIR* Arm64Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
30 OpRegReg(kOpCmp, src1, src2);
31 return OpCondBranch(cond, target);
32}
33
Matteo Franchin43ec8732014-03-31 15:00:14 +010034LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode, const char* guide) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -070035 UNUSED(ccode, guide);
Matteo Franchine45fb9e2014-05-06 10:10:30 +010036 LOG(FATAL) << "Unexpected use of OpIT for Arm64";
Ian Rogers6a3c1fc2014-10-31 00:33:20 -070037 UNREACHABLE();
Matteo Franchin43ec8732014-03-31 15:00:14 +010038}
39
40void Arm64Mir2Lir::OpEndIT(LIR* it) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -070041 UNUSED(it);
Matteo Franchine45fb9e2014-05-06 10:10:30 +010042 LOG(FATAL) << "Unexpected use of OpEndIT for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +010043}
44
45/*
46 * 64-bit 3way compare function.
Matteo Franchine45fb9e2014-05-06 10:10:30 +010047 * cmp xA, xB
Zheng Xu511c8a62014-06-03 16:22:23 +080048 * csinc wC, wzr, wzr, eq // wC = (xA == xB) ? 0 : 1
49 * csneg wC, wC, wC, ge // wC = (xA >= xB) ? wC : -wC
Matteo Franchin43ec8732014-03-31 15:00:14 +010050 */
Matteo Franchine45fb9e2014-05-06 10:10:30 +010051void Arm64Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
52 RegLocation rl_src2) {
53 RegLocation rl_result;
Matteo Franchin43ec8732014-03-31 15:00:14 +010054 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
55 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
Matteo Franchine45fb9e2014-05-06 10:10:30 +010056 rl_result = EvalLoc(rl_dest, kCoreReg, true);
Matteo Franchin43ec8732014-03-31 15:00:14 +010057
Matteo Franchine45fb9e2014-05-06 10:10:30 +010058 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
Zheng Xu511c8a62014-06-03 16:22:23 +080059 NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondEq);
60 NewLIR4(kA64Csneg4rrrc, rl_result.reg.GetReg(), rl_result.reg.GetReg(),
61 rl_result.reg.GetReg(), kArmCondGe);
62 StoreValue(rl_dest, rl_result);
Serban Constantinescued65c5e2014-05-22 15:10:18 +010063}
64
65void Arm64Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
66 RegLocation rl_src1, RegLocation rl_shift) {
67 OpKind op = kOpBkpt;
68 switch (opcode) {
69 case Instruction::SHL_LONG:
70 case Instruction::SHL_LONG_2ADDR:
71 op = kOpLsl;
72 break;
73 case Instruction::SHR_LONG:
74 case Instruction::SHR_LONG_2ADDR:
75 op = kOpAsr;
76 break;
77 case Instruction::USHR_LONG:
78 case Instruction::USHR_LONG_2ADDR:
79 op = kOpLsr;
80 break;
81 default:
82 LOG(FATAL) << "Unexpected case: " << opcode;
83 }
Zheng Xue2eb29e2014-06-12 10:22:33 +080084 rl_shift = LoadValue(rl_shift, kCoreReg);
Serban Constantinescued65c5e2014-05-22 15:10:18 +010085 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
86 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
Zheng Xue2eb29e2014-06-12 10:22:33 +080087 OpRegRegReg(op, rl_result.reg, rl_src1.reg, As64BitReg(rl_shift.reg));
Serban Constantinescued65c5e2014-05-22 15:10:18 +010088 StoreValueWide(rl_dest, rl_result);
Matteo Franchin43ec8732014-03-31 15:00:14 +010089}
90
Andreas Gampe90969af2014-07-15 23:02:11 -070091static constexpr bool kUseDeltaEncodingInGenSelect = false;
Andreas Gampe381f8ac2014-07-10 03:23:41 -070092
Andreas Gampe90969af2014-07-15 23:02:11 -070093void Arm64Mir2Lir::GenSelect(int32_t true_val, int32_t false_val, ConditionCode ccode,
94 RegStorage rs_dest, int result_reg_class) {
95 if (false_val == 0 || // 0 is better as first operand.
96 true_val == 1 || // Potentially Csinc.
97 true_val == -1 || // Potentially Csinv.
98 true_val == false_val + 1) { // Potentially Csinc.
99 ccode = NegateComparison(ccode);
100 std::swap(true_val, false_val);
101 }
102
103 ArmConditionCode code = ArmConditionEncoding(ccode);
104
105 int opcode; // The opcode.
106 RegStorage left_op = RegStorage::InvalidReg(); // The operands.
107 RegStorage right_op = RegStorage::InvalidReg(); // The operands.
108
109 bool is_wide = rs_dest.Is64Bit();
110
111 RegStorage zero_reg = is_wide ? rs_xzr : rs_wzr;
112
113 if (true_val == 0) {
114 left_op = zero_reg;
115 } else {
116 left_op = rs_dest;
117 LoadConstantNoClobber(rs_dest, true_val);
118 }
119 if (false_val == 1) {
120 right_op = zero_reg;
121 opcode = kA64Csinc4rrrc;
122 } else if (false_val == -1) {
123 right_op = zero_reg;
124 opcode = kA64Csinv4rrrc;
125 } else if (false_val == true_val + 1) {
126 right_op = left_op;
127 opcode = kA64Csinc4rrrc;
128 } else if (false_val == -true_val) {
129 right_op = left_op;
130 opcode = kA64Csneg4rrrc;
131 } else if (false_val == ~true_val) {
132 right_op = left_op;
133 opcode = kA64Csinv4rrrc;
134 } else if (true_val == 0) {
135 // left_op is zero_reg.
136 right_op = rs_dest;
137 LoadConstantNoClobber(rs_dest, false_val);
138 opcode = kA64Csel4rrrc;
139 } else {
140 // Generic case.
141 RegStorage t_reg2 = AllocTypedTemp(false, result_reg_class);
142 if (is_wide) {
143 if (t_reg2.Is32Bit()) {
144 t_reg2 = As64BitReg(t_reg2);
145 }
146 } else {
147 if (t_reg2.Is64Bit()) {
148 t_reg2 = As32BitReg(t_reg2);
149 }
150 }
151
152 if (kUseDeltaEncodingInGenSelect) {
153 int32_t delta = false_val - true_val;
154 uint32_t abs_val = delta < 0 ? -delta : delta;
155
156 if (abs_val < 0x1000) { // TODO: Replace with InexpensiveConstant with opcode.
157 // Can encode as immediate to an add.
158 right_op = t_reg2;
159 OpRegRegImm(kOpAdd, t_reg2, left_op, delta);
160 }
161 }
162
163 // Load as constant.
164 if (!right_op.Valid()) {
165 LoadConstantNoClobber(t_reg2, false_val);
166 right_op = t_reg2;
167 }
168
169 opcode = kA64Csel4rrrc;
170 }
171
172 DCHECK(left_op.Valid() && right_op.Valid());
173 NewLIR4(is_wide ? WIDE(opcode) : opcode, rs_dest.GetReg(), left_op.GetReg(), right_op.GetReg(),
174 code);
175}
176
177void Arm64Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
178 int32_t true_val, int32_t false_val, RegStorage rs_dest,
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700179 RegisterClass dest_reg_class) {
Andreas Gampe90969af2014-07-15 23:02:11 -0700180 DCHECK(rs_dest.Valid());
181 OpRegReg(kOpCmp, left_op, right_op);
182 GenSelect(true_val, false_val, code, rs_dest, dest_reg_class);
183}
184
185void Arm64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700186 UNUSED(bb);
Andreas Gampe90969af2014-07-15 23:02:11 -0700187 RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
188 rl_src = LoadValue(rl_src, rl_src.ref ? kRefReg : kCoreReg);
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700189 // rl_src may be aliased with rl_result/rl_dest, so do compare early.
190 OpRegImm(kOpCmp, rl_src.reg, 0);
191
Andreas Gampe90969af2014-07-15 23:02:11 -0700192 RegLocation rl_dest = mir_graph_->GetDest(mir);
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100193
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700194 // The kMirOpSelect has two variants, one for constants and one for moves.
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700195 if (mir->ssa_rep->num_uses == 1) {
Andreas Gampe90969af2014-07-15 23:02:11 -0700196 RegLocation rl_result = EvalLoc(rl_dest, rl_dest.ref ? kRefReg : kCoreReg, true);
197 GenSelect(mir->dalvikInsn.vB, mir->dalvikInsn.vC, mir->meta.ccode, rl_result.reg,
198 rl_dest.ref ? kRefReg : kCoreReg);
199 StoreValue(rl_dest, rl_result);
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700200 } else {
201 RegLocation rl_true = mir_graph_->reg_location_[mir->ssa_rep->uses[1]];
202 RegLocation rl_false = mir_graph_->reg_location_[mir->ssa_rep->uses[2]];
203
Andreas Gampe90969af2014-07-15 23:02:11 -0700204 RegisterClass result_reg_class = rl_dest.ref ? kRefReg : kCoreReg;
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700205 rl_true = LoadValue(rl_true, result_reg_class);
206 rl_false = LoadValue(rl_false, result_reg_class);
Andreas Gampe90969af2014-07-15 23:02:11 -0700207 RegLocation rl_result = EvalLoc(rl_dest, result_reg_class, true);
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700208
Andreas Gampe90969af2014-07-15 23:02:11 -0700209 bool is_wide = rl_dest.ref || rl_dest.wide;
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700210 int opcode = is_wide ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc;
211 NewLIR4(opcode, rl_result.reg.GetReg(),
Andreas Gampe90969af2014-07-15 23:02:11 -0700212 rl_true.reg.GetReg(), rl_false.reg.GetReg(), ArmConditionEncoding(mir->meta.ccode));
213 StoreValue(rl_dest, rl_result);
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700214 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100215}
216
217void Arm64Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
218 RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
219 RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100220 LIR* taken = &block_label_list_[bb->taken];
221 LIR* not_taken = &block_label_list_[bb->fall_through];
Matteo Franchin43ec8732014-03-31 15:00:14 +0100222 // Normalize such that if either operand is constant, src2 will be constant.
223 ConditionCode ccode = mir->meta.ccode;
224 if (rl_src1.is_const) {
225 std::swap(rl_src1, rl_src2);
226 ccode = FlipComparisonOrder(ccode);
227 }
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100228
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700229 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
230
Matteo Franchin43ec8732014-03-31 15:00:14 +0100231 if (rl_src2.is_const) {
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700232 // TODO: Optimize for rl_src1.is_const? (Does happen in the boot image at the moment.)
233
Matteo Franchin43ec8732014-03-31 15:00:14 +0100234 int64_t val = mir_graph_->ConstantValueWide(rl_src2);
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100235 // Special handling using cbz & cbnz.
236 if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) {
237 OpCmpImmBranch(ccode, rl_src1.reg, 0, taken);
238 OpCmpImmBranch(NegateComparison(ccode), rl_src1.reg, 0, not_taken);
239 return;
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700240 }
241
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100242 // Only handle Imm if src2 is not already in a register.
Andreas Gampe381f8ac2014-07-10 03:23:41 -0700243 rl_src2 = UpdateLocWide(rl_src2);
244 if (rl_src2.location != kLocPhysReg) {
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100245 OpRegImm64(kOpCmp, rl_src1.reg, val);
246 OpCondBranch(ccode, taken);
247 OpCondBranch(NegateComparison(ccode), not_taken);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100248 return;
249 }
250 }
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100251
Matteo Franchin43ec8732014-03-31 15:00:14 +0100252 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100253 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100254 OpCondBranch(ccode, taken);
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100255 OpCondBranch(NegateComparison(ccode), not_taken);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100256}
257
258/*
259 * Generate a register comparison to an immediate and branch. Caller
260 * is responsible for setting branch target field.
261 */
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100262LIR* Arm64Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value,
263 LIR* target) {
Andreas Gampe9522af92014-07-14 20:16:59 -0700264 LIR* branch = nullptr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100265 ArmConditionCode arm_cond = ArmConditionEncoding(cond);
Andreas Gampe9522af92014-07-14 20:16:59 -0700266 if (check_value == 0) {
267 if (arm_cond == kArmCondEq || arm_cond == kArmCondNe) {
Matteo Franchin4163c532014-07-15 15:20:27 +0100268 A64Opcode opcode = (arm_cond == kArmCondEq) ? kA64Cbz2rt : kA64Cbnz2rt;
269 A64Opcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
Andreas Gampe9522af92014-07-14 20:16:59 -0700270 branch = NewLIR2(opcode | wide, reg.GetReg(), 0);
271 } else if (arm_cond == kArmCondLs) {
272 // kArmCondLs is an unsigned less or equal. A comparison r <= 0 is then the same as cbz.
273 // This case happens for a bounds check of array[0].
Matteo Franchin4163c532014-07-15 15:20:27 +0100274 A64Opcode opcode = kA64Cbz2rt;
275 A64Opcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
Andreas Gampe9522af92014-07-14 20:16:59 -0700276 branch = NewLIR2(opcode | wide, reg.GetReg(), 0);
Zheng Xu5d7cdec2014-08-18 17:28:22 +0800277 } else if (arm_cond == kArmCondLt || arm_cond == kArmCondGe) {
Matteo Franchin4163c532014-07-15 15:20:27 +0100278 A64Opcode opcode = (arm_cond == kArmCondLt) ? kA64Tbnz3rht : kA64Tbz3rht;
279 A64Opcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
Zheng Xu5d7cdec2014-08-18 17:28:22 +0800280 int value = reg.Is64Bit() ? 63 : 31;
281 branch = NewLIR3(opcode | wide, reg.GetReg(), value, 0);
Andreas Gampe9522af92014-07-14 20:16:59 -0700282 }
283 }
284
285 if (branch == nullptr) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100286 OpRegImm(kOpCmp, reg, check_value);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100287 branch = NewLIR2(kA64B2ct, arm_cond, 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100288 }
Andreas Gampe9522af92014-07-14 20:16:59 -0700289
Matteo Franchin43ec8732014-03-31 15:00:14 +0100290 branch->target = target;
291 return branch;
292}
293
Zheng Xu7c1c2632014-06-17 18:17:31 +0800294LIR* Arm64Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg,
295 RegStorage base_reg, int offset, int check_value,
Dave Allison69dfe512014-07-11 17:11:58 +0000296 LIR* target, LIR** compare) {
297 DCHECK(compare == nullptr);
Zheng Xu7c1c2632014-06-17 18:17:31 +0800298 // It is possible that temp register is 64-bit. (ArgReg or RefReg)
299 // Always compare 32-bit value no matter what temp_reg is.
300 if (temp_reg.Is64Bit()) {
301 temp_reg = As32BitReg(temp_reg);
302 }
303 Load32Disp(base_reg, offset, temp_reg);
304 LIR* branch = OpCmpImmBranch(cond, temp_reg, check_value, target);
305 return branch;
306}
307
Matteo Franchin43ec8732014-03-31 15:00:14 +0100308LIR* Arm64Mir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100309 bool dest_is_fp = r_dest.IsFloat();
310 bool src_is_fp = r_src.IsFloat();
Matteo Franchin4163c532014-07-15 15:20:27 +0100311 A64Opcode opcode = kA64Brk1d;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100312 LIR* res;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100313
314 if (LIKELY(dest_is_fp == src_is_fp)) {
315 if (LIKELY(!dest_is_fp)) {
Andreas Gampe4b537a82014-06-30 22:24:53 -0700316 DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit());
317
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100318 // Core/core copy.
319 // Copies involving the sp register require a different instruction.
320 opcode = UNLIKELY(A64_REG_IS_SP(r_dest.GetReg())) ? kA64Add4RRdT : kA64Mov2rr;
321
322 // TODO(Arm64): kA64Add4RRdT formally has 4 args, but is used as a 2 args instruction.
323 // This currently works because the other arguments are set to 0 by default. We should
324 // rather introduce an alias kA64Mov2RR.
325
326 // core/core copy. Do a x/x copy only if both registers are x.
327 if (r_dest.Is64Bit() && r_src.Is64Bit()) {
328 opcode = WIDE(opcode);
329 }
330 } else {
331 // Float/float copy.
332 bool dest_is_double = r_dest.IsDouble();
333 bool src_is_double = r_src.IsDouble();
334
335 // We do not do float/double or double/float casts here.
336 DCHECK_EQ(dest_is_double, src_is_double);
337
338 // Homogeneous float/float copy.
Matteo Franchin4163c532014-07-15 15:20:27 +0100339 opcode = (dest_is_double) ? WIDE(kA64Fmov2ff) : kA64Fmov2ff;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100340 }
341 } else {
342 // Inhomogeneous register copy.
343 if (dest_is_fp) {
344 if (r_dest.IsDouble()) {
345 opcode = kA64Fmov2Sx;
346 } else {
Andreas Gampe4b537a82014-06-30 22:24:53 -0700347 r_src = Check32BitReg(r_src);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100348 opcode = kA64Fmov2sw;
349 }
350 } else {
351 if (r_src.IsDouble()) {
352 opcode = kA64Fmov2xS;
353 } else {
Andreas Gampe4b537a82014-06-30 22:24:53 -0700354 r_dest = Check32BitReg(r_dest);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100355 opcode = kA64Fmov2ws;
356 }
357 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100358 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100359
Matteo Franchin43ec8732014-03-31 15:00:14 +0100360 res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100361
Matteo Franchin43ec8732014-03-31 15:00:14 +0100362 if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
363 res->flags.is_nop = true;
364 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100365
Matteo Franchin43ec8732014-03-31 15:00:14 +0100366 return res;
367}
368
369void Arm64Mir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
370 if (r_dest != r_src) {
371 LIR* res = OpRegCopyNoInsert(r_dest, r_src);
372 AppendLIR(res);
373 }
374}
375
376void Arm64Mir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100377 OpRegCopy(r_dest, r_src);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100378}
379
380// Table of magic divisors
381struct MagicTable {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100382 int magic64_base;
383 int magic64_eor;
384 uint64_t magic64;
385 uint32_t magic32;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100386 uint32_t shift;
387 DividePattern pattern;
388};
389
390static const MagicTable magic_table[] = {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100391 { 0, 0, 0, 0, 0, DivideNone}, // 0
392 { 0, 0, 0, 0, 0, DivideNone}, // 1
393 { 0, 0, 0, 0, 0, DivideNone}, // 2
394 {0x3c, -1, 0x5555555555555556, 0x55555556, 0, Divide3}, // 3
395 { 0, 0, 0, 0, 0, DivideNone}, // 4
396 {0xf9, -1, 0x6666666666666667, 0x66666667, 1, Divide5}, // 5
397 {0x7c, 0x1041, 0x2AAAAAAAAAAAAAAB, 0x2AAAAAAB, 0, Divide3}, // 6
398 { -1, -1, 0x924924924924924A, 0x92492493, 2, Divide7}, // 7
399 { 0, 0, 0, 0, 0, DivideNone}, // 8
400 { -1, -1, 0x38E38E38E38E38E4, 0x38E38E39, 1, Divide5}, // 9
401 {0xf9, -1, 0x6666666666666667, 0x66666667, 2, Divide5}, // 10
402 { -1, -1, 0x2E8BA2E8BA2E8BA3, 0x2E8BA2E9, 1, Divide5}, // 11
403 {0x7c, 0x1041, 0x2AAAAAAAAAAAAAAB, 0x2AAAAAAB, 1, Divide5}, // 12
404 { -1, -1, 0x4EC4EC4EC4EC4EC5, 0x4EC4EC4F, 2, Divide5}, // 13
405 { -1, -1, 0x924924924924924A, 0x92492493, 3, Divide7}, // 14
406 {0x78, -1, 0x8888888888888889, 0x88888889, 3, Divide7}, // 15
Matteo Franchin43ec8732014-03-31 15:00:14 +0100407};
408
409// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
410bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100411 RegLocation rl_src, RegLocation rl_dest, int lit) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700412 UNUSED(dalvik_opcode);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100413 if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100414 return false;
415 }
416 DividePattern pattern = magic_table[lit].pattern;
417 if (pattern == DivideNone) {
418 return false;
419 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100420 // Tuning: add rem patterns
421 if (!is_div) {
422 return false;
423 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100424
425 RegStorage r_magic = AllocTemp();
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100426 LoadConstant(r_magic, magic_table[lit].magic32);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100427 rl_src = LoadValue(rl_src, kCoreReg);
428 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100429 RegStorage r_long_mul = AllocTemp();
Matteo Franchin65420b22014-10-27 13:29:30 +0000430 NewLIR3(kA64Smull3xww, As64BitReg(r_long_mul).GetReg(), r_magic.GetReg(), rl_src.reg.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100431 switch (pattern) {
432 case Divide3:
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100433 OpRegRegImm(kOpLsr, As64BitReg(r_long_mul), As64BitReg(r_long_mul), 32);
434 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100435 break;
436 case Divide5:
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100437 OpRegRegImm(kOpAsr, As64BitReg(r_long_mul), As64BitReg(r_long_mul),
438 32 + magic_table[lit].shift);
439 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100440 break;
441 case Divide7:
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100442 OpRegRegRegShift(kOpAdd, As64BitReg(r_long_mul), As64BitReg(rl_src.reg),
443 As64BitReg(r_long_mul), EncodeShift(kA64Lsr, 32));
444 OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift);
445 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100446 break;
447 default:
448 LOG(FATAL) << "Unexpected pattern: " << pattern;
449 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100450 StoreValue(rl_dest, rl_result);
451 return true;
452}
453
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100454bool Arm64Mir2Lir::SmallLiteralDivRem64(Instruction::Code dalvik_opcode, bool is_div,
455 RegLocation rl_src, RegLocation rl_dest, int64_t lit) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700456 UNUSED(dalvik_opcode);
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100457 if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
458 return false;
459 }
460 DividePattern pattern = magic_table[lit].pattern;
461 if (pattern == DivideNone) {
462 return false;
463 }
464 // Tuning: add rem patterns
465 if (!is_div) {
466 return false;
467 }
468
469 RegStorage r_magic = AllocTempWide();
470 rl_src = LoadValueWide(rl_src, kCoreReg);
471 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
472 RegStorage r_long_mul = AllocTempWide();
473
474 if (magic_table[lit].magic64_base >= 0) {
475 // Check that the entry in the table is correct.
476 if (kIsDebugBuild) {
477 uint64_t reconstructed_imm;
478 uint64_t base = DecodeLogicalImmediate(/*is_wide*/true, magic_table[lit].magic64_base);
479 if (magic_table[lit].magic64_eor >= 0) {
480 uint64_t eor = DecodeLogicalImmediate(/*is_wide*/true, magic_table[lit].magic64_eor);
481 reconstructed_imm = base ^ eor;
482 } else {
483 reconstructed_imm = base + 1;
484 }
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100485 }
486
487 // Load the magic constant in two instructions.
488 NewLIR3(WIDE(kA64Orr3Rrl), r_magic.GetReg(), rxzr, magic_table[lit].magic64_base);
489 if (magic_table[lit].magic64_eor >= 0) {
490 NewLIR3(WIDE(kA64Eor3Rrl), r_magic.GetReg(), r_magic.GetReg(),
491 magic_table[lit].magic64_eor);
492 } else {
493 NewLIR4(WIDE(kA64Add4RRdT), r_magic.GetReg(), r_magic.GetReg(), 1, 0);
494 }
495 } else {
496 LoadConstantWide(r_magic, magic_table[lit].magic64);
497 }
498
499 NewLIR3(kA64Smulh3xxx, r_long_mul.GetReg(), r_magic.GetReg(), rl_src.reg.GetReg());
500 switch (pattern) {
501 case Divide3:
502 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 63));
503 break;
504 case Divide5:
505 OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift);
506 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 63));
507 break;
508 case Divide7:
509 OpRegRegReg(kOpAdd, r_long_mul, rl_src.reg, r_long_mul);
510 OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift);
511 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 63));
512 break;
513 default:
514 LOG(FATAL) << "Unexpected pattern: " << pattern;
515 }
516 StoreValueWide(rl_dest, rl_result);
517 return true;
518}
519
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100520// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
521// and store the result in 'rl_dest'.
522bool Arm64Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
523 RegLocation rl_src, RegLocation rl_dest, int lit) {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100524 return HandleEasyDivRem64(dalvik_opcode, is_div, rl_src, rl_dest, static_cast<int>(lit));
525}
526
527// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
528// and store the result in 'rl_dest'.
529bool Arm64Mir2Lir::HandleEasyDivRem64(Instruction::Code dalvik_opcode, bool is_div,
530 RegLocation rl_src, RegLocation rl_dest, int64_t lit) {
531 const bool is_64bit = rl_dest.wide;
532 const int nbits = (is_64bit) ? 64 : 32;
533
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100534 if (lit < 2) {
535 return false;
536 }
537 if (!IsPowerOfTwo(lit)) {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100538 if (is_64bit) {
539 return SmallLiteralDivRem64(dalvik_opcode, is_div, rl_src, rl_dest, lit);
540 } else {
541 return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, static_cast<int32_t>(lit));
542 }
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100543 }
544 int k = LowestSetBit(lit);
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100545 if (k >= nbits - 2) {
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100546 // Avoid special cases.
547 return false;
548 }
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100549
550 RegLocation rl_result;
551 RegStorage t_reg;
552 if (is_64bit) {
553 rl_src = LoadValueWide(rl_src, kCoreReg);
554 rl_result = EvalLocWide(rl_dest, kCoreReg, true);
555 t_reg = AllocTempWide();
556 } else {
557 rl_src = LoadValue(rl_src, kCoreReg);
558 rl_result = EvalLoc(rl_dest, kCoreReg, true);
559 t_reg = AllocTemp();
560 }
561
562 int shift = EncodeShift(kA64Lsr, nbits - k);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100563 if (is_div) {
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100564 if (lit == 2) {
565 // Division by 2 is by far the most common division by constant.
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100566 OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, rl_src.reg, shift);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100567 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
568 } else {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100569 OpRegRegImm(kOpAsr, t_reg, rl_src.reg, nbits - 1);
570 OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, t_reg, shift);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100571 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
572 }
573 } else {
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100574 if (lit == 2) {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100575 OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, rl_src.reg, shift);
576 OpRegRegImm64(kOpAnd, t_reg, t_reg, lit - 1);
577 OpRegRegRegShift(kOpSub, rl_result.reg, t_reg, rl_src.reg, shift);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100578 } else {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100579 RegStorage t_reg2 = (is_64bit) ? AllocTempWide() : AllocTemp();
580 OpRegRegImm(kOpAsr, t_reg, rl_src.reg, nbits - 1);
581 OpRegRegRegShift(kOpAdd, t_reg2, rl_src.reg, t_reg, shift);
582 OpRegRegImm64(kOpAnd, t_reg2, t_reg2, lit - 1);
583 OpRegRegRegShift(kOpSub, rl_result.reg, t_reg2, t_reg, shift);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100584 }
585 }
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100586
587 if (is_64bit) {
588 StoreValueWide(rl_dest, rl_result);
589 } else {
590 StoreValue(rl_dest, rl_result);
591 }
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100592 return true;
593}
594
Matteo Franchin43ec8732014-03-31 15:00:14 +0100595bool Arm64Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700596 UNUSED(rl_src, rl_dest, lit);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100597 LOG(FATAL) << "Unexpected use of EasyMultiply for Arm64";
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700598 UNREACHABLE();
Matteo Franchin43ec8732014-03-31 15:00:14 +0100599}
600
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700601RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
602 bool is_div) {
603 UNUSED(rl_dest, rl_src1, lit, is_div);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100604 LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm64";
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700605 UNREACHABLE();
Matteo Franchin43ec8732014-03-31 15:00:14 +0100606}
607
608RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
609 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
610
611 // Put the literal in a temp.
612 RegStorage lit_temp = AllocTemp();
613 LoadConstant(lit_temp, lit);
614 // Use the generic case for div/rem with arg2 in a register.
615 // TODO: The literal temp can be freed earlier during a modulus to reduce reg pressure.
616 rl_result = GenDivRem(rl_result, reg1, lit_temp, is_div);
617 FreeTemp(lit_temp);
618
619 return rl_result;
620}
621
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100622RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
Razvan A Lupusoru5c5676b2014-09-29 16:42:11 -0700623 RegLocation rl_src2, bool is_div, int flags) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700624 UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100625 LOG(FATAL) << "Unexpected use of GenDivRem for Arm64";
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700626 UNREACHABLE();
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100627}
628
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100629RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage r_src1, RegStorage r_src2,
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +0100630 bool is_div) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100631 CHECK_EQ(r_src1.Is64Bit(), r_src2.Is64Bit());
632
Matteo Franchin43ec8732014-03-31 15:00:14 +0100633 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
634 if (is_div) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100635 OpRegRegReg(kOpDiv, rl_result.reg, r_src1, r_src2);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100636 } else {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100637 // temp = r_src1 / r_src2
638 // dest = r_src1 - temp * r_src2
639 RegStorage temp;
Matteo Franchin4163c532014-07-15 15:20:27 +0100640 A64Opcode wide;
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100641 if (rl_result.reg.Is64Bit()) {
642 temp = AllocTempWide();
643 wide = WIDE(0);
644 } else {
645 temp = AllocTemp();
646 wide = UNWIDE(0);
647 }
648 OpRegRegReg(kOpDiv, temp, r_src1, r_src2);
649 NewLIR4(kA64Msub4rrrr | wide, rl_result.reg.GetReg(), temp.GetReg(),
Matteo Franchin65420b22014-10-27 13:29:30 +0000650 r_src2.GetReg(), r_src1.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100651 FreeTemp(temp);
652 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100653 return rl_result;
654}
655
Martyn Capewell9a8a5062014-08-07 11:31:48 +0100656bool Arm64Mir2Lir::GenInlinedAbsInt(CallInfo* info) {
657 RegLocation rl_src = info->args[0];
658 rl_src = LoadValue(rl_src, kCoreReg);
659 RegLocation rl_dest = InlineTarget(info);
660 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
661
662 // Compare the source value with zero. Write the negated value to the result if
663 // negative, otherwise write the original value.
664 OpRegImm(kOpCmp, rl_src.reg, 0);
665 NewLIR4(kA64Csneg4rrrc, rl_result.reg.GetReg(), rl_src.reg.GetReg(), rl_src.reg.GetReg(),
666 kArmCondPl);
667 StoreValue(rl_dest, rl_result);
668 return true;
669}
670
Serban Constantinescu169489b2014-06-11 16:43:35 +0100671bool Arm64Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
672 RegLocation rl_src = info->args[0];
673 rl_src = LoadValueWide(rl_src, kCoreReg);
674 RegLocation rl_dest = InlineTargetWide(info);
675 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
Martyn Capewell9a8a5062014-08-07 11:31:48 +0100676
677 // Compare the source value with zero. Write the negated value to the result if
678 // negative, otherwise write the original value.
679 OpRegImm(kOpCmp, rl_src.reg, 0);
680 NewLIR4(WIDE(kA64Csneg4rrrc), rl_result.reg.GetReg(), rl_src.reg.GetReg(),
681 rl_src.reg.GetReg(), kArmCondPl);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100682 StoreValueWide(rl_dest, rl_result);
683 return true;
684}
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100685
Serban Constantinescu23abec92014-07-02 16:13:38 +0100686bool Arm64Mir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) {
Serban Constantinescu169489b2014-06-11 16:43:35 +0100687 DCHECK_EQ(cu_->instruction_set, kArm64);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100688 RegLocation rl_src1 = info->args[0];
Serban Constantinescu23abec92014-07-02 16:13:38 +0100689 RegLocation rl_src2 = (is_long) ? info->args[2] : info->args[1];
690 rl_src1 = (is_long) ? LoadValueWide(rl_src1, kCoreReg) : LoadValue(rl_src1, kCoreReg);
691 rl_src2 = (is_long) ? LoadValueWide(rl_src2, kCoreReg) : LoadValue(rl_src2, kCoreReg);
692 RegLocation rl_dest = (is_long) ? InlineTargetWide(info) : InlineTarget(info);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100693 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
694 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
Serban Constantinescu23abec92014-07-02 16:13:38 +0100695 NewLIR4((is_long) ? WIDE(kA64Csel4rrrc) : kA64Csel4rrrc, rl_result.reg.GetReg(),
696 rl_src1.reg.GetReg(), rl_src2.reg.GetReg(), (is_min) ? kArmCondLt : kArmCondGt);
697 (is_long) ? StoreValueWide(rl_dest, rl_result) :StoreValue(rl_dest, rl_result);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100698 return true;
699}
700
701bool Arm64Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
702 RegLocation rl_src_address = info->args[0]; // long address
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100703 RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info);
704 RegLocation rl_address = LoadValueWide(rl_src_address, kCoreReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100705 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100706
Andreas Gampe3c12c512014-06-24 18:46:29 +0000707 LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100708 if (size == k64) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100709 StoreValueWide(rl_dest, rl_result);
710 } else {
711 DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100712 StoreValue(rl_dest, rl_result);
713 }
714 return true;
715}
716
717bool Arm64Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
718 RegLocation rl_src_address = info->args[0]; // long address
Matteo Franchin43ec8732014-03-31 15:00:14 +0100719 RegLocation rl_src_value = info->args[2]; // [size] value
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100720 RegLocation rl_address = LoadValueWide(rl_src_address, kCoreReg);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100721
722 RegLocation rl_value;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100723 if (size == k64) {
Serban Constantinescu169489b2014-06-11 16:43:35 +0100724 rl_value = LoadValueWide(rl_src_value, kCoreReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100725 } else {
726 DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100727 rl_value = LoadValue(rl_src_value, kCoreReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100728 }
Andreas Gampe3c12c512014-06-24 18:46:29 +0000729 StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100730 return true;
731}
732
Matteo Franchin43ec8732014-03-31 15:00:14 +0100733bool Arm64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
Serban Constantinescu169489b2014-06-11 16:43:35 +0100734 DCHECK_EQ(cu_->instruction_set, kArm64);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100735 // Unused - RegLocation rl_src_unsafe = info->args[0];
736 RegLocation rl_src_obj = info->args[1]; // Object - known non-null
737 RegLocation rl_src_offset = info->args[2]; // long low
Matteo Franchin43ec8732014-03-31 15:00:14 +0100738 RegLocation rl_src_expected = info->args[4]; // int, long or Object
739 // If is_long, high half is in info->args[5]
740 RegLocation rl_src_new_value = info->args[is_long ? 6 : 5]; // int, long or Object
741 // If is_long, high half is in info->args[7]
742 RegLocation rl_dest = InlineTarget(info); // boolean place for result
743
Serban Constantinescu169489b2014-06-11 16:43:35 +0100744 // Load Object and offset
buzbeea0cd2d72014-06-01 09:33:49 -0700745 RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100746 RegLocation rl_offset = LoadValueWide(rl_src_offset, kCoreReg);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100747
Matteo Franchin43ec8732014-03-31 15:00:14 +0100748 RegLocation rl_new_value;
Serban Constantinescu169489b2014-06-11 16:43:35 +0100749 RegLocation rl_expected;
750 if (is_long) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100751 rl_new_value = LoadValueWide(rl_src_new_value, kCoreReg);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100752 rl_expected = LoadValueWide(rl_src_expected, kCoreReg);
753 } else {
754 rl_new_value = LoadValue(rl_src_new_value, is_object ? kRefReg : kCoreReg);
755 rl_expected = LoadValue(rl_src_expected, is_object ? kRefReg : kCoreReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100756 }
757
758 if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
759 // Mark card for object assuming new value is stored.
760 MarkGCCard(rl_new_value.reg, rl_object.reg);
761 }
762
Serban Constantinescu169489b2014-06-11 16:43:35 +0100763 RegStorage r_ptr = AllocTempRef();
Matteo Franchin43ec8732014-03-31 15:00:14 +0100764 OpRegRegReg(kOpAdd, r_ptr, rl_object.reg, rl_offset.reg);
765
766 // Free now unneeded rl_object and rl_offset to give more temps.
767 ClobberSReg(rl_object.s_reg_low);
768 FreeTemp(rl_object.reg);
769 ClobberSReg(rl_offset.s_reg_low);
770 FreeTemp(rl_offset.reg);
771
Matteo Franchin43ec8732014-03-31 15:00:14 +0100772 // do {
773 // tmp = [r_ptr] - expected;
774 // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
775 // result = tmp != 0;
776
Serban Constantinescu169489b2014-06-11 16:43:35 +0100777 RegStorage r_tmp;
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100778 RegStorage r_tmp_stored;
779 RegStorage rl_new_value_stored = rl_new_value.reg;
Matteo Franchin4163c532014-07-15 15:20:27 +0100780 A64Opcode wide = UNWIDE(0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100781 if (is_long) {
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100782 r_tmp_stored = r_tmp = AllocTempWide();
783 wide = WIDE(0);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100784 } else if (is_object) {
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100785 // References use 64-bit registers, but are stored as compressed 32-bit values.
786 // This means r_tmp_stored != r_tmp.
Serban Constantinescu169489b2014-06-11 16:43:35 +0100787 r_tmp = AllocTempRef();
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100788 r_tmp_stored = As32BitReg(r_tmp);
789 rl_new_value_stored = As32BitReg(rl_new_value_stored);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100790 } else {
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100791 r_tmp_stored = r_tmp = AllocTemp();
Matteo Franchin43ec8732014-03-31 15:00:14 +0100792 }
793
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100794 RegStorage r_tmp32 = (r_tmp.Is32Bit()) ? r_tmp : As32BitReg(r_tmp);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100795 LIR* loop = NewLIR0(kPseudoTargetLabel);
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100796 NewLIR2(kA64Ldaxr2rX | wide, r_tmp_stored.GetReg(), r_ptr.GetReg());
Serban Constantinescu169489b2014-06-11 16:43:35 +0100797 OpRegReg(kOpCmp, r_tmp, rl_expected.reg);
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100798 DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
Serban Constantinescu169489b2014-06-11 16:43:35 +0100799 LIR* early_exit = OpCondBranch(kCondNe, NULL);
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100800 NewLIR3(kA64Stlxr3wrX | wide, r_tmp32.GetReg(), rl_new_value_stored.GetReg(), r_ptr.GetReg());
801 NewLIR3(kA64Cmp3RdT, r_tmp32.GetReg(), 0, ENCODE_NO_SHIFT);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100802 DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
803 OpCondBranch(kCondNe, loop);
804
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100805 LIR* exit_loop = NewLIR0(kPseudoTargetLabel);
806 early_exit->target = exit_loop;
807
Serban Constantinescu169489b2014-06-11 16:43:35 +0100808 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100809 NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondNe);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100810
Matteo Franchin43ec8732014-03-31 15:00:14 +0100811 FreeTemp(r_tmp); // Now unneeded.
Serban Constantinescu169489b2014-06-11 16:43:35 +0100812 FreeTemp(r_ptr); // Now unneeded.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100813
814 StoreValue(rl_dest, rl_result);
815
Matteo Franchin43ec8732014-03-31 15:00:14 +0100816 return true;
817}
818
Zheng Xu947717a2014-08-07 14:05:23 +0800819bool Arm64Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
820 constexpr int kLargeArrayThreshold = 512;
821
822 RegLocation rl_src = info->args[0];
823 RegLocation rl_src_pos = info->args[1];
824 RegLocation rl_dst = info->args[2];
825 RegLocation rl_dst_pos = info->args[3];
826 RegLocation rl_length = info->args[4];
827 // Compile time check, handle exception by non-inline method to reduce related meta-data.
828 if ((rl_src_pos.is_const && (mir_graph_->ConstantValue(rl_src_pos) < 0)) ||
829 (rl_dst_pos.is_const && (mir_graph_->ConstantValue(rl_dst_pos) < 0)) ||
830 (rl_length.is_const && (mir_graph_->ConstantValue(rl_length) < 0))) {
831 return false;
832 }
833
834 ClobberCallerSave();
835 LockCallTemps(); // Prepare for explicit register usage.
836 RegStorage rs_src = rs_x0;
837 RegStorage rs_dst = rs_x1;
838 LoadValueDirectFixed(rl_src, rs_src);
839 LoadValueDirectFixed(rl_dst, rs_dst);
840
841 // Handle null pointer exception in slow-path.
842 LIR* src_check_branch = OpCmpImmBranch(kCondEq, rs_src, 0, nullptr);
843 LIR* dst_check_branch = OpCmpImmBranch(kCondEq, rs_dst, 0, nullptr);
844 // Handle potential overlapping in slow-path.
845 // TUNING: Support overlapping cases.
846 LIR* src_dst_same = OpCmpBranch(kCondEq, rs_src, rs_dst, nullptr);
847 // Handle exception or big length in slow-path.
848 RegStorage rs_length = rs_w2;
849 LoadValueDirectFixed(rl_length, rs_length);
850 LIR* len_neg_or_too_big = OpCmpImmBranch(kCondHi, rs_length, kLargeArrayThreshold, nullptr);
851 // Src bounds check.
852 RegStorage rs_src_pos = rs_w3;
853 RegStorage rs_arr_length = rs_w4;
854 LoadValueDirectFixed(rl_src_pos, rs_src_pos);
855 LIR* src_pos_negative = OpCmpImmBranch(kCondLt, rs_src_pos, 0, nullptr);
856 Load32Disp(rs_src, mirror::Array::LengthOffset().Int32Value(), rs_arr_length);
857 OpRegReg(kOpSub, rs_arr_length, rs_src_pos);
858 LIR* src_bad_len = OpCmpBranch(kCondLt, rs_arr_length, rs_length, nullptr);
859 // Dst bounds check.
860 RegStorage rs_dst_pos = rs_w5;
861 LoadValueDirectFixed(rl_dst_pos, rs_dst_pos);
862 LIR* dst_pos_negative = OpCmpImmBranch(kCondLt, rs_dst_pos, 0, nullptr);
863 Load32Disp(rs_dst, mirror::Array::LengthOffset().Int32Value(), rs_arr_length);
864 OpRegReg(kOpSub, rs_arr_length, rs_dst_pos);
865 LIR* dst_bad_len = OpCmpBranch(kCondLt, rs_arr_length, rs_length, nullptr);
866
867 // Everything is checked now.
868 // Set rs_src to the address of the first element to be copied.
869 rs_src_pos = As64BitReg(rs_src_pos);
870 OpRegImm(kOpAdd, rs_src, mirror::Array::DataOffset(2).Int32Value());
871 OpRegRegImm(kOpLsl, rs_src_pos, rs_src_pos, 1);
872 OpRegReg(kOpAdd, rs_src, rs_src_pos);
873 // Set rs_src to the address of the first element to be copied.
874 rs_dst_pos = As64BitReg(rs_dst_pos);
875 OpRegImm(kOpAdd, rs_dst, mirror::Array::DataOffset(2).Int32Value());
876 OpRegRegImm(kOpLsl, rs_dst_pos, rs_dst_pos, 1);
877 OpRegReg(kOpAdd, rs_dst, rs_dst_pos);
878
879 // rs_arr_length won't be not used anymore.
880 RegStorage rs_tmp = rs_arr_length;
881 // Use 64-bit view since rs_length will be used as index.
882 rs_length = As64BitReg(rs_length);
883 OpRegRegImm(kOpLsl, rs_length, rs_length, 1);
884
885 // Copy one element.
Zheng Xu5d7cdec2014-08-18 17:28:22 +0800886 LIR* jmp_to_copy_two = NewLIR3(WIDE(kA64Tbz3rht), rs_length.GetReg(), 1, 0);
Zheng Xu947717a2014-08-07 14:05:23 +0800887 OpRegImm(kOpSub, rs_length, 2);
888 LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, kSignedHalf);
889 StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, kSignedHalf);
890
891 // Copy two elements.
892 LIR *copy_two = NewLIR0(kPseudoTargetLabel);
Zheng Xu5d7cdec2014-08-18 17:28:22 +0800893 LIR* jmp_to_copy_four = NewLIR3(WIDE(kA64Tbz3rht), rs_length.GetReg(), 2, 0);
Zheng Xu947717a2014-08-07 14:05:23 +0800894 OpRegImm(kOpSub, rs_length, 4);
895 LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, k32);
896 StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, k32);
897
898 // Copy four elements.
899 LIR *copy_four = NewLIR0(kPseudoTargetLabel);
900 LIR* jmp_to_ret = OpCmpImmBranch(kCondEq, rs_length, 0, nullptr);
901 LIR *begin_loop = NewLIR0(kPseudoTargetLabel);
902 OpRegImm(kOpSub, rs_length, 8);
903 rs_tmp = As64BitReg(rs_tmp);
904 LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, k64);
905 StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, k64);
906 LIR* jmp_to_loop = OpCmpImmBranch(kCondNe, rs_length, 0, nullptr);
907 LIR* loop_finished = OpUnconditionalBranch(nullptr);
908
909 LIR *check_failed = NewLIR0(kPseudoTargetLabel);
910 LIR* launchpad_branch = OpUnconditionalBranch(nullptr);
911 LIR* return_point = NewLIR0(kPseudoTargetLabel);
912
913 src_check_branch->target = check_failed;
914 dst_check_branch->target = check_failed;
915 src_dst_same->target = check_failed;
916 len_neg_or_too_big->target = check_failed;
917 src_pos_negative->target = check_failed;
918 src_bad_len->target = check_failed;
919 dst_pos_negative->target = check_failed;
920 dst_bad_len->target = check_failed;
921 jmp_to_copy_two->target = copy_two;
922 jmp_to_copy_four->target = copy_four;
923 jmp_to_ret->target = return_point;
924 jmp_to_loop->target = begin_loop;
925 loop_finished->target = return_point;
926
927 AddIntrinsicSlowPath(info, launchpad_branch, return_point);
Serguei Katkov9863daf2014-09-04 15:21:32 +0700928 ClobberCallerSave(); // We must clobber everything because slow path will return here
Zheng Xu947717a2014-08-07 14:05:23 +0800929
930 return true;
931}
932
Matteo Franchin43ec8732014-03-31 15:00:14 +0100933LIR* Arm64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
Serban Constantinescu63999682014-07-15 17:44:21 +0100934 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
Matteo Franchin27cc0932014-09-08 18:29:24 +0100935 return RawLIR(current_dalvik_offset_, kA64Ldr2rp, As32BitReg(reg).GetReg(), 0, 0, 0, 0, target);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100936}
937
938LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base, int count) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700939 UNUSED(r_base, count);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100940 LOG(FATAL) << "Unexpected use of OpVldm for Arm64";
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700941 UNREACHABLE();
Matteo Franchin43ec8732014-03-31 15:00:14 +0100942}
943
944LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base, int count) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700945 UNUSED(r_base, count);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100946 LOG(FATAL) << "Unexpected use of OpVstm for Arm64";
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700947 UNREACHABLE();
Matteo Franchin43ec8732014-03-31 15:00:14 +0100948}
949
950void Arm64Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700951 RegLocation rl_result, int lit ATTRIBUTE_UNUSED,
952 int first_bit, int second_bit) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100953 OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg, EncodeShift(kA64Lsl, second_bit - first_bit));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100954 if (first_bit != 0) {
955 OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
956 }
957}
958
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700959void Arm64Mir2Lir::GenDivZeroCheckWide(RegStorage reg ATTRIBUTE_UNUSED) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100960 LOG(FATAL) << "Unexpected use of GenDivZero for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +0100961}
962
963// Test suspend flag, return target of taken suspend branch
964LIR* Arm64Mir2Lir::OpTestSuspend(LIR* target) {
Zheng Xubaa7c882014-06-30 14:26:50 +0800965 NewLIR3(kA64Subs3rRd, rwSUSPEND, rwSUSPEND, 1);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100966 return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target);
967}
968
969// Decrement register and branch on condition
970LIR* Arm64Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
buzbee33ae5582014-06-12 14:56:32 -0700971 // Combine sub & test using sub setflags encoding here. We need to make sure a
972 // subtract form that sets carry is used, so generate explicitly.
973 // TODO: might be best to add a new op, kOpSubs, and handle it generically.
Matteo Franchin4163c532014-07-15 15:20:27 +0100974 A64Opcode opcode = reg.Is64Bit() ? WIDE(kA64Subs3rRd) : UNWIDE(kA64Subs3rRd);
buzbee33ae5582014-06-12 14:56:32 -0700975 NewLIR3(opcode, reg.GetReg(), reg.GetReg(), 1); // For value == 1, this should set flags.
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100976 DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100977 return OpCondBranch(c_code, target);
978}
979
Andreas Gampeb14329f2014-05-15 11:16:06 -0700980bool Arm64Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100981#if ANDROID_SMP != 0
982 // Start off with using the last LIR as the barrier. If it is not enough, then we will generate one.
983 LIR* barrier = last_lir_insn_;
984
985 int dmb_flavor;
986 // TODO: revisit Arm barrier kinds
987 switch (barrier_kind) {
Hans Boehm48f5c472014-06-27 14:50:10 -0700988 case kAnyStore: dmb_flavor = kISH; break;
989 case kLoadAny: dmb_flavor = kISH; break;
990 // We conjecture that kISHLD is insufficient. It is documented
991 // to provide LoadLoad | StoreStore ordering. But if this were used
992 // to implement volatile loads, we suspect that the lack of store
993 // atomicity on ARM would cause us to allow incorrect results for
994 // the canonical IRIW example. But we're not sure.
995 // We should be using acquire loads instead.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100996 case kStoreStore: dmb_flavor = kISHST; break;
Hans Boehm48f5c472014-06-27 14:50:10 -0700997 case kAnyAny: dmb_flavor = kISH; break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100998 default:
999 LOG(FATAL) << "Unexpected MemBarrierKind: " << barrier_kind;
1000 dmb_flavor = kSY; // quiet gcc.
1001 break;
1002 }
1003
Andreas Gampeb14329f2014-05-15 11:16:06 -07001004 bool ret = false;
1005
Matteo Franchin43ec8732014-03-31 15:00:14 +01001006 // If the same barrier already exists, don't generate another.
1007 if (barrier == nullptr
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001008 || (barrier->opcode != kA64Dmb1B || barrier->operands[0] != dmb_flavor)) {
1009 barrier = NewLIR1(kA64Dmb1B, dmb_flavor);
Andreas Gampeb14329f2014-05-15 11:16:06 -07001010 ret = true;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001011 }
1012
1013 // At this point we must have a memory barrier. Mark it as a scheduling barrier as well.
1014 DCHECK(!barrier->flags.use_def_invalid);
Vladimir Marko8dea81c2014-06-06 14:50:36 +01001015 barrier->u.m.def_mask = &kEncodeAll;
Andreas Gampeb14329f2014-05-15 11:16:06 -07001016 return ret;
1017#else
1018 return false;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001019#endif
1020}
1021
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001022void Arm64Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
1023 RegLocation rl_result;
1024
1025 rl_src = LoadValue(rl_src, kCoreReg);
1026 rl_result = EvalLocWide(rl_dest, kCoreReg, true);
Andreas Gampe4b537a82014-06-30 22:24:53 -07001027 NewLIR4(WIDE(kA64Sbfm4rrdd), rl_result.reg.GetReg(), As64BitReg(rl_src.reg).GetReg(), 0, 31);
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001028 StoreValueWide(rl_dest, rl_result);
1029}
1030
1031void Arm64Mir2Lir::GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest,
Razvan A Lupusoru5c5676b2014-09-29 16:42:11 -07001032 RegLocation rl_src1, RegLocation rl_src2, bool is_div, int flags) {
Matteo Franchin7c6c2ac2014-07-01 18:03:08 +01001033 if (rl_src2.is_const) {
1034 DCHECK(rl_src2.wide);
1035 int64_t lit = mir_graph_->ConstantValueWide(rl_src2);
1036 if (HandleEasyDivRem64(opcode, is_div, rl_src1, rl_dest, lit)) {
1037 return;
1038 }
1039 }
1040
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001041 RegLocation rl_result;
1042 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1043 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
Razvan A Lupusoru5c5676b2014-09-29 16:42:11 -07001044 if ((flags & MIR_IGNORE_DIV_ZERO_CHECK) == 0) {
1045 GenDivZeroCheck(rl_src2.reg);
1046 }
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001047 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, is_div);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001048 StoreValueWide(rl_dest, rl_result);
1049}
1050
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001051void Arm64Mir2Lir::GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
1052 RegLocation rl_src2) {
1053 RegLocation rl_result;
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001054
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001055 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1056 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
1057 rl_result = EvalLocWide(rl_dest, kCoreReg, true);
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001058 OpRegRegRegShift(op, rl_result.reg, rl_src1.reg, rl_src2.reg, ENCODE_NO_SHIFT);
1059 StoreValueWide(rl_dest, rl_result);
1060}
1061
1062void Arm64Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
1063 RegLocation rl_result;
1064
1065 rl_src = LoadValueWide(rl_src, kCoreReg);
1066 rl_result = EvalLocWide(rl_dest, kCoreReg, true);
1067 OpRegRegShift(kOpNeg, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT);
1068 StoreValueWide(rl_dest, rl_result);
1069}
1070
1071void Arm64Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
1072 RegLocation rl_result;
1073
1074 rl_src = LoadValueWide(rl_src, kCoreReg);
1075 rl_result = EvalLocWide(rl_dest, kCoreReg, true);
1076 OpRegRegShift(kOpMvn, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001077 StoreValueWide(rl_dest, rl_result);
1078}
1079
Andreas Gampec76c6142014-08-04 16:30:03 -07001080void Arm64Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
Razvan A Lupusoru5c5676b2014-09-29 16:42:11 -07001081 RegLocation rl_src1, RegLocation rl_src2, int flags) {
Andreas Gampec76c6142014-08-04 16:30:03 -07001082 switch (opcode) {
1083 case Instruction::NOT_LONG:
1084 GenNotLong(rl_dest, rl_src2);
1085 return;
1086 case Instruction::ADD_LONG:
1087 case Instruction::ADD_LONG_2ADDR:
1088 GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2);
1089 return;
1090 case Instruction::SUB_LONG:
1091 case Instruction::SUB_LONG_2ADDR:
1092 GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2);
1093 return;
1094 case Instruction::MUL_LONG:
1095 case Instruction::MUL_LONG_2ADDR:
1096 GenLongOp(kOpMul, rl_dest, rl_src1, rl_src2);
1097 return;
1098 case Instruction::DIV_LONG:
1099 case Instruction::DIV_LONG_2ADDR:
Razvan A Lupusoru5c5676b2014-09-29 16:42:11 -07001100 GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ true, flags);
Andreas Gampec76c6142014-08-04 16:30:03 -07001101 return;
1102 case Instruction::REM_LONG:
1103 case Instruction::REM_LONG_2ADDR:
Razvan A Lupusoru5c5676b2014-09-29 16:42:11 -07001104 GenDivRemLong(opcode, rl_dest, rl_src1, rl_src2, /*is_div*/ false, flags);
Andreas Gampec76c6142014-08-04 16:30:03 -07001105 return;
1106 case Instruction::AND_LONG_2ADDR:
1107 case Instruction::AND_LONG:
1108 GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2);
1109 return;
1110 case Instruction::OR_LONG:
1111 case Instruction::OR_LONG_2ADDR:
1112 GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2);
1113 return;
1114 case Instruction::XOR_LONG:
1115 case Instruction::XOR_LONG_2ADDR:
1116 GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2);
1117 return;
1118 case Instruction::NEG_LONG: {
1119 GenNegLong(rl_dest, rl_src2);
1120 return;
1121 }
1122 default:
1123 LOG(FATAL) << "Invalid long arith op";
1124 return;
1125 }
Matteo Franchin43ec8732014-03-31 15:00:14 +01001126}
1127
1128/*
1129 * Generate array load
1130 */
1131void Arm64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
1132 RegLocation rl_index, RegLocation rl_dest, int scale) {
1133 RegisterClass reg_class = RegClassBySize(size);
1134 int len_offset = mirror::Array::LengthOffset().Int32Value();
1135 int data_offset;
1136 RegLocation rl_result;
1137 bool constant_index = rl_index.is_const;
buzbeea0cd2d72014-06-01 09:33:49 -07001138 rl_array = LoadValue(rl_array, kRefReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001139 if (!constant_index) {
1140 rl_index = LoadValue(rl_index, kCoreReg);
1141 }
1142
1143 if (rl_dest.wide) {
1144 data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
1145 } else {
1146 data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
1147 }
1148
Matteo Franchin43ec8732014-03-31 15:00:14 +01001149 /* null object? */
1150 GenNullCheck(rl_array.reg, opt_flags);
1151
1152 bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
1153 RegStorage reg_len;
1154 if (needs_range_check) {
1155 reg_len = AllocTemp();
1156 /* Get len */
1157 Load32Disp(rl_array.reg, len_offset, reg_len);
1158 MarkPossibleNullPointerException(opt_flags);
1159 } else {
1160 ForceImplicitNullCheck(rl_array.reg, opt_flags);
1161 }
Vladimir Markoe08785b2014-11-07 16:11:00 +00001162 if (constant_index) {
Matteo Franchin43ec8732014-03-31 15:00:14 +01001163 rl_result = EvalLoc(rl_dest, reg_class, true);
1164
1165 if (needs_range_check) {
Vladimir Markoe08785b2014-11-07 16:11:00 +00001166 GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001167 FreeTemp(reg_len);
1168 }
Vladimir Markoe08785b2014-11-07 16:11:00 +00001169 // Fold the constant index into the data offset.
1170 data_offset += mir_graph_->ConstantValue(rl_index) << scale;
Andreas Gampe3c12c512014-06-24 18:46:29 +00001171 if (rl_result.ref) {
Vladimir Markoe08785b2014-11-07 16:11:00 +00001172 LoadRefDisp(rl_array.reg, data_offset, rl_result.reg, kNotVolatile);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001173 } else {
Vladimir Markoe08785b2014-11-07 16:11:00 +00001174 LoadBaseDisp(rl_array.reg, data_offset, rl_result.reg, size, kNotVolatile);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001175 }
1176 } else {
Vladimir Markoe08785b2014-11-07 16:11:00 +00001177 // Offset base, then use indexed load.
buzbeea0cd2d72014-06-01 09:33:49 -07001178 RegStorage reg_ptr = AllocTempRef();
Matteo Franchin43ec8732014-03-31 15:00:14 +01001179 OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
1180 FreeTemp(rl_array.reg);
1181 rl_result = EvalLoc(rl_dest, reg_class, true);
1182
1183 if (needs_range_check) {
1184 GenArrayBoundsCheck(rl_index.reg, reg_len);
1185 FreeTemp(reg_len);
1186 }
Andreas Gampe3c12c512014-06-24 18:46:29 +00001187 if (rl_result.ref) {
Vladimir Markoe08785b2014-11-07 16:11:00 +00001188 LoadRefIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001189 } else {
Vladimir Markoe08785b2014-11-07 16:11:00 +00001190 LoadBaseIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale, size);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001191 }
Matteo Franchin43ec8732014-03-31 15:00:14 +01001192 FreeTemp(reg_ptr);
Vladimir Markoe08785b2014-11-07 16:11:00 +00001193 }
1194 if (rl_dest.wide) {
1195 StoreValueWide(rl_dest, rl_result);
1196 } else {
Matteo Franchin43ec8732014-03-31 15:00:14 +01001197 StoreValue(rl_dest, rl_result);
1198 }
1199}
1200
1201/*
1202 * Generate array store
1203 *
1204 */
1205void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
1206 RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) {
1207 RegisterClass reg_class = RegClassBySize(size);
1208 int len_offset = mirror::Array::LengthOffset().Int32Value();
1209 bool constant_index = rl_index.is_const;
1210
1211 int data_offset;
1212 if (size == k64 || size == kDouble) {
1213 data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
1214 } else {
1215 data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
1216 }
1217
buzbeea0cd2d72014-06-01 09:33:49 -07001218 rl_array = LoadValue(rl_array, kRefReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001219 if (!constant_index) {
1220 rl_index = LoadValue(rl_index, kCoreReg);
1221 }
1222
1223 RegStorage reg_ptr;
1224 bool allocated_reg_ptr_temp = false;
1225 if (constant_index) {
1226 reg_ptr = rl_array.reg;
1227 } else if (IsTemp(rl_array.reg) && !card_mark) {
1228 Clobber(rl_array.reg);
1229 reg_ptr = rl_array.reg;
1230 } else {
1231 allocated_reg_ptr_temp = true;
buzbeea0cd2d72014-06-01 09:33:49 -07001232 reg_ptr = AllocTempRef();
Matteo Franchin43ec8732014-03-31 15:00:14 +01001233 }
1234
1235 /* null object? */
1236 GenNullCheck(rl_array.reg, opt_flags);
1237
1238 bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
1239 RegStorage reg_len;
1240 if (needs_range_check) {
1241 reg_len = AllocTemp();
1242 // NOTE: max live temps(4) here.
1243 /* Get len */
1244 Load32Disp(rl_array.reg, len_offset, reg_len);
1245 MarkPossibleNullPointerException(opt_flags);
1246 } else {
1247 ForceImplicitNullCheck(rl_array.reg, opt_flags);
1248 }
1249 /* at this point, reg_ptr points to array, 2 live temps */
Vladimir Markoe08785b2014-11-07 16:11:00 +00001250 if (rl_src.wide) {
1251 rl_src = LoadValueWide(rl_src, reg_class);
1252 } else {
1253 rl_src = LoadValue(rl_src, reg_class);
1254 }
1255 if (constant_index) {
Matteo Franchin43ec8732014-03-31 15:00:14 +01001256 if (needs_range_check) {
Vladimir Markoe08785b2014-11-07 16:11:00 +00001257 GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001258 FreeTemp(reg_len);
1259 }
Vladimir Markoe08785b2014-11-07 16:11:00 +00001260 // Fold the constant index into the data offset.
1261 data_offset += mir_graph_->ConstantValue(rl_index) << scale;
Andreas Gampe3c12c512014-06-24 18:46:29 +00001262 if (rl_src.ref) {
1263 StoreRefDisp(reg_ptr, data_offset, rl_src.reg, kNotVolatile);
1264 } else {
1265 StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size, kNotVolatile);
1266 }
Matteo Franchin43ec8732014-03-31 15:00:14 +01001267 } else {
1268 /* reg_ptr -> array data */
1269 OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001270 if (needs_range_check) {
1271 GenArrayBoundsCheck(rl_index.reg, reg_len);
1272 FreeTemp(reg_len);
1273 }
Andreas Gampe3c12c512014-06-24 18:46:29 +00001274 if (rl_src.ref) {
Vladimir Markoe08785b2014-11-07 16:11:00 +00001275 StoreRefIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001276 } else {
Vladimir Markoe08785b2014-11-07 16:11:00 +00001277 StoreBaseIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale, size);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001278 }
Matteo Franchin43ec8732014-03-31 15:00:14 +01001279 }
1280 if (allocated_reg_ptr_temp) {
1281 FreeTemp(reg_ptr);
1282 }
1283 if (card_mark) {
1284 MarkGCCard(rl_src.reg, rl_array.reg);
1285 }
1286}
1287
Matteo Franchin43ec8732014-03-31 15:00:14 +01001288void Arm64Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
Razvan A Lupusoru5c5676b2014-09-29 16:42:11 -07001289 RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift,
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001290 int flags ATTRIBUTE_UNUSED) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001291 OpKind op = kOpBkpt;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001292 // Per spec, we only care about low 6 bits of shift amount.
1293 int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001294 rl_src = LoadValueWide(rl_src, kCoreReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001295 if (shift_amount == 0) {
1296 StoreValueWide(rl_dest, rl_src);
1297 return;
1298 }
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001299
1300 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001301 switch (opcode) {
1302 case Instruction::SHL_LONG:
1303 case Instruction::SHL_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001304 op = kOpLsl;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001305 break;
1306 case Instruction::SHR_LONG:
1307 case Instruction::SHR_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001308 op = kOpAsr;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001309 break;
1310 case Instruction::USHR_LONG:
1311 case Instruction::USHR_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001312 op = kOpLsr;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001313 break;
1314 default:
1315 LOG(FATAL) << "Unexpected case";
1316 }
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001317 OpRegRegImm(op, rl_result.reg, rl_src.reg, shift_amount);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001318 StoreValueWide(rl_dest, rl_result);
1319}
1320
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001321void Arm64Mir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
Razvan A Lupusoru5c5676b2014-09-29 16:42:11 -07001322 RegLocation rl_src1, RegLocation rl_src2, int flags) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001323 OpKind op = kOpBkpt;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001324 switch (opcode) {
1325 case Instruction::ADD_LONG:
1326 case Instruction::ADD_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001327 op = kOpAdd;
1328 break;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001329 case Instruction::SUB_LONG:
1330 case Instruction::SUB_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001331 op = kOpSub;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001332 break;
1333 case Instruction::AND_LONG:
1334 case Instruction::AND_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001335 op = kOpAnd;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001336 break;
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001337 case Instruction::OR_LONG:
1338 case Instruction::OR_LONG_2ADDR:
1339 op = kOpOr;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001340 break;
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001341 case Instruction::XOR_LONG:
1342 case Instruction::XOR_LONG_2ADDR:
1343 op = kOpXor;
1344 break;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001345 default:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001346 LOG(FATAL) << "Unexpected opcode";
Matteo Franchin43ec8732014-03-31 15:00:14 +01001347 }
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001348
Matteo Franchinc763e352014-07-04 12:53:27 +01001349 if (op == kOpSub) {
1350 if (!rl_src2.is_const) {
Razvan A Lupusoru5c5676b2014-09-29 16:42:11 -07001351 return GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2, flags);
Matteo Franchinc763e352014-07-04 12:53:27 +01001352 }
1353 } else {
1354 // Associativity.
1355 if (!rl_src2.is_const) {
1356 DCHECK(rl_src1.is_const);
1357 std::swap(rl_src1, rl_src2);
1358 }
1359 }
1360 DCHECK(rl_src2.is_const);
1361 int64_t val = mir_graph_->ConstantValueWide(rl_src2);
1362
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001363 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1364 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
Zheng Xue2eb29e2014-06-12 10:22:33 +08001365 OpRegRegImm64(op, rl_result.reg, rl_src1.reg, val);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001366 StoreValueWide(rl_dest, rl_result);
1367}
1368
Andreas Gampef29ecd62014-07-29 00:35:00 -07001369static uint32_t ExtractReg(uint32_t reg_mask, int* reg) {
1370 // Find first register.
1371 int first_bit_set = CTZ(reg_mask) + 1;
1372 *reg = *reg + first_bit_set;
1373 reg_mask >>= first_bit_set;
1374 return reg_mask;
1375}
1376
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001377/**
1378 * @brief Split a register list in pairs or registers.
1379 *
1380 * Given a list of registers in @p reg_mask, split the list in pairs. Use as follows:
1381 * @code
1382 * int reg1 = -1, reg2 = -1;
1383 * while (reg_mask) {
1384 * reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1385 * if (UNLIKELY(reg2 < 0)) {
1386 * // Single register in reg1.
1387 * } else {
1388 * // Pair in reg1, reg2.
1389 * }
1390 * }
1391 * @endcode
1392 */
Andreas Gampef29ecd62014-07-29 00:35:00 -07001393static uint32_t GenPairWise(uint32_t reg_mask, int* reg1, int* reg2) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001394 // Find first register.
Andreas Gampef29ecd62014-07-29 00:35:00 -07001395 int first_bit_set = CTZ(reg_mask) + 1;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001396 int reg = *reg1 + first_bit_set;
1397 reg_mask >>= first_bit_set;
1398
1399 if (LIKELY(reg_mask)) {
1400 // Save the first register, find the second and use the pair opcode.
Andreas Gampef29ecd62014-07-29 00:35:00 -07001401 int second_bit_set = CTZ(reg_mask) + 1;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001402 *reg2 = reg;
1403 reg_mask >>= second_bit_set;
1404 *reg1 = reg + second_bit_set;
1405 return reg_mask;
1406 }
1407
1408 // Use the single opcode, as we just have one register.
1409 *reg1 = reg;
1410 *reg2 = -1;
1411 return reg_mask;
1412}
1413
Andreas Gampef29ecd62014-07-29 00:35:00 -07001414static void SpillCoreRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001415 int reg1 = -1, reg2 = -1;
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001416 const int reg_log2_size = 3;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001417
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001418 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001419 reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1420 if (UNLIKELY(reg2 < 0)) {
Andreas Gampef29ecd62014-07-29 00:35:00 -07001421 m2l->NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001422 } else {
Andreas Gampef29ecd62014-07-29 00:35:00 -07001423 m2l->NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
1424 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001425 }
1426 }
1427}
1428
1429// TODO(Arm64): consider using ld1 and st1?
Andreas Gampef29ecd62014-07-29 00:35:00 -07001430static void SpillFPRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001431 int reg1 = -1, reg2 = -1;
1432 const int reg_log2_size = 3;
1433
1434 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1435 reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1436 if (UNLIKELY(reg2 < 0)) {
Matteo Franchin4163c532014-07-15 15:20:27 +01001437 m2l->NewLIR3(WIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
Andreas Gampef29ecd62014-07-29 00:35:00 -07001438 offset);
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001439 } else {
Andreas Gampef29ecd62014-07-29 00:35:00 -07001440 m2l->NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
1441 RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001442 }
1443 }
1444}
1445
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001446static int SpillRegsPreSub(Arm64Mir2Lir* m2l, uint32_t core_reg_mask, uint32_t fp_reg_mask,
1447 int frame_size) {
Andreas Gampef29ecd62014-07-29 00:35:00 -07001448 m2l->OpRegRegImm(kOpSub, rs_sp, rs_sp, frame_size);
1449
1450 int core_count = POPCOUNT(core_reg_mask);
1451
1452 if (fp_reg_mask != 0) {
1453 // Spill FP regs.
1454 int fp_count = POPCOUNT(fp_reg_mask);
1455 int spill_offset = frame_size - (core_count + fp_count) * kArm64PointerSize;
1456 SpillFPRegs(m2l, rs_sp, spill_offset, fp_reg_mask);
1457 }
1458
1459 if (core_reg_mask != 0) {
1460 // Spill core regs.
1461 int spill_offset = frame_size - (core_count * kArm64PointerSize);
1462 SpillCoreRegs(m2l, rs_sp, spill_offset, core_reg_mask);
1463 }
1464
1465 return frame_size;
1466}
1467
1468static int SpillRegsPreIndexed(Arm64Mir2Lir* m2l, RegStorage base, uint32_t core_reg_mask,
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001469 uint32_t fp_reg_mask) {
Andreas Gampef29ecd62014-07-29 00:35:00 -07001470 // Otherwise, spill both core and fp regs at the same time.
1471 // The very first instruction will be an stp with pre-indexed address, moving the stack pointer
1472 // down. From then on, we fill upwards. This will generate overall the same number of instructions
1473 // as the specialized code above in most cases (exception being odd number of core and even
1474 // non-zero fp spills), but is more flexible, as the offsets are guaranteed small.
1475 //
1476 // Some demonstrative fill cases : (c) = core, (f) = fp
1477 // cc 44 cc 44 cc 22 cc 33 fc => 1[1/2]
1478 // fc => 23 fc => 23 ff => 11 ff => 22
1479 // ff 11 f 11 f 11
1480 //
1481 int reg1 = -1, reg2 = -1;
1482 int core_count = POPCOUNT(core_reg_mask);
1483 int fp_count = POPCOUNT(fp_reg_mask);
1484
1485 int combined = fp_count + core_count;
1486 int all_offset = RoundUp(combined, 2); // Needs to be 16B = 2-reg aligned.
1487
1488 int cur_offset = 2; // What's the starting offset after the first stp? We expect the base slot
1489 // to be filled.
1490
1491 // First figure out whether the bottom is FP or core.
1492 if (fp_count > 0) {
1493 // Some FP spills.
1494 //
1495 // Four cases: (d0 is dummy to fill up stp)
1496 // 1) Single FP, even number of core -> stp d0, fp_reg
1497 // 2) Single FP, odd number of core -> stp fp_reg, d0
1498 // 3) More FP, even number combined -> stp fp_reg1, fp_reg2
1499 // 4) More FP, odd number combined -> stp d0, fp_reg
1500 if (fp_count == 1) {
1501 fp_reg_mask = ExtractReg(fp_reg_mask, &reg1);
1502 DCHECK_EQ(fp_reg_mask, 0U);
1503 if (core_count % 2 == 0) {
1504 m2l->NewLIR4(WIDE(kA64StpPre4ffXD),
1505 RegStorage::FloatSolo64(reg1).GetReg(),
1506 RegStorage::FloatSolo64(reg1).GetReg(),
1507 base.GetReg(), -all_offset);
1508 } else {
1509 m2l->NewLIR4(WIDE(kA64StpPre4ffXD),
1510 RegStorage::FloatSolo64(reg1).GetReg(),
1511 RegStorage::FloatSolo64(reg1).GetReg(),
1512 base.GetReg(), -all_offset);
1513 cur_offset = 0; // That core reg needs to go into the upper half.
1514 }
1515 } else {
1516 if (combined % 2 == 0) {
1517 fp_reg_mask = GenPairWise(fp_reg_mask, &reg1, &reg2);
1518 m2l->NewLIR4(WIDE(kA64StpPre4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
1519 RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), -all_offset);
1520 } else {
1521 fp_reg_mask = ExtractReg(fp_reg_mask, &reg1);
1522 m2l->NewLIR4(WIDE(kA64StpPre4ffXD), rs_d0.GetReg(), RegStorage::FloatSolo64(reg1).GetReg(),
1523 base.GetReg(), -all_offset);
1524 }
1525 }
1526 } else {
1527 // No FP spills.
1528 //
1529 // Two cases:
1530 // 1) Even number of core -> stp core1, core2
1531 // 2) Odd number of core -> stp xzr, core1
1532 if (core_count % 2 == 1) {
1533 core_reg_mask = ExtractReg(core_reg_mask, &reg1);
1534 m2l->NewLIR4(WIDE(kA64StpPre4rrXD), rs_xzr.GetReg(),
1535 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), -all_offset);
1536 } else {
1537 core_reg_mask = GenPairWise(core_reg_mask, &reg1, &reg2);
1538 m2l->NewLIR4(WIDE(kA64StpPre4rrXD), RegStorage::Solo64(reg2).GetReg(),
1539 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), -all_offset);
1540 }
1541 }
1542
1543 if (fp_count != 0) {
1544 for (; fp_reg_mask != 0;) {
1545 // Have some FP regs to do.
1546 fp_reg_mask = GenPairWise(fp_reg_mask, &reg1, &reg2);
1547 if (UNLIKELY(reg2 < 0)) {
Matteo Franchin4163c532014-07-15 15:20:27 +01001548 m2l->NewLIR3(WIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
Andreas Gampef29ecd62014-07-29 00:35:00 -07001549 cur_offset);
1550 // Do not increment offset here, as the second half will be filled by a core reg.
1551 } else {
1552 m2l->NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
1553 RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), cur_offset);
1554 cur_offset += 2;
1555 }
1556 }
1557
1558 // Reset counting.
1559 reg1 = -1;
1560
1561 // If there is an odd number of core registers, we need to store the bottom now.
1562 if (core_count % 2 == 1) {
1563 core_reg_mask = ExtractReg(core_reg_mask, &reg1);
1564 m2l->NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(),
1565 cur_offset + 1);
1566 cur_offset += 2; // Half-slot filled now.
1567 }
1568 }
1569
1570 // Spill the rest of the core regs. They are guaranteed to be even.
1571 DCHECK_EQ(POPCOUNT(core_reg_mask) % 2, 0);
1572 for (; core_reg_mask != 0; cur_offset += 2) {
1573 core_reg_mask = GenPairWise(core_reg_mask, &reg1, &reg2);
1574 m2l->NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
1575 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), cur_offset);
1576 }
1577
1578 DCHECK_EQ(cur_offset, all_offset);
1579
1580 return all_offset * 8;
1581}
1582
1583int Arm64Mir2Lir::SpillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask,
1584 int frame_size) {
1585 // If the frame size is small enough that all offsets would fit into the immediates, use that
1586 // setup, as it decrements sp early (kind of instruction scheduling), and is not worse
1587 // instruction-count wise than the complicated code below.
1588 //
1589 // This case is also optimal when we have an odd number of core spills, and an even (non-zero)
1590 // number of fp spills.
1591 if ((RoundUp(frame_size, 8) / 8 <= 63)) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001592 return SpillRegsPreSub(this, core_reg_mask, fp_reg_mask, frame_size);
Andreas Gampef29ecd62014-07-29 00:35:00 -07001593 } else {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001594 return SpillRegsPreIndexed(this, base, core_reg_mask, fp_reg_mask);
Andreas Gampef29ecd62014-07-29 00:35:00 -07001595 }
1596}
1597
1598static void UnSpillCoreRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
1599 int reg1 = -1, reg2 = -1;
1600 const int reg_log2_size = 3;
1601
1602 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1603 reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1604 if (UNLIKELY(reg2 < 0)) {
1605 m2l->NewLIR3(WIDE(kA64Ldr3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
1606 } else {
1607 DCHECK_LE(offset, 63);
1608 m2l->NewLIR4(WIDE(kA64Ldp4rrXD), RegStorage::Solo64(reg2).GetReg(),
1609 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
1610 }
1611 }
1612}
1613
1614static void UnSpillFPRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t reg_mask) {
1615 int reg1 = -1, reg2 = -1;
1616 const int reg_log2_size = 3;
1617
1618 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1619 reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1620 if (UNLIKELY(reg2 < 0)) {
Matteo Franchin4163c532014-07-15 15:20:27 +01001621 m2l->NewLIR3(WIDE(kA64Ldr3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(),
Andreas Gampef29ecd62014-07-29 00:35:00 -07001622 offset);
1623 } else {
1624 m2l->NewLIR4(WIDE(kA64Ldp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
1625 RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
1626 }
1627 }
1628}
1629
1630void Arm64Mir2Lir::UnspillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask,
1631 int frame_size) {
Ian Rogersb28c1c02014-11-08 11:21:21 -08001632 DCHECK_EQ(base, rs_sp);
Andreas Gampef29ecd62014-07-29 00:35:00 -07001633 // Restore saves and drop stack frame.
1634 // 2 versions:
1635 //
1636 // 1. (Original): Try to address directly, then drop the whole frame.
1637 // Limitation: ldp is a 7b signed immediate.
1638 //
1639 // 2. (New): Drop the non-save-part. Then do similar to original, which is now guaranteed to be
1640 // in range. Then drop the rest.
1641 //
1642 // TODO: In methods with few spills but huge frame, it would be better to do non-immediate loads
1643 // in variant 1.
1644
1645 // "Magic" constant, 63 (max signed 7b) * 8.
1646 static constexpr int kMaxFramesizeForOffset = 63 * kArm64PointerSize;
1647
1648 const int num_core_spills = POPCOUNT(core_reg_mask);
1649 const int num_fp_spills = POPCOUNT(fp_reg_mask);
1650
1651 int early_drop = 0;
1652
1653 if (frame_size > kMaxFramesizeForOffset) {
1654 // Second variant. Drop the frame part.
1655
1656 // TODO: Always use the first formula, as num_fp_spills would be zero?
1657 if (fp_reg_mask != 0) {
1658 early_drop = frame_size - kArm64PointerSize * (num_fp_spills + num_core_spills);
1659 } else {
1660 early_drop = frame_size - kArm64PointerSize * num_core_spills;
1661 }
1662
1663 // Drop needs to be 16B aligned, so that SP keeps aligned.
1664 early_drop = RoundDown(early_drop, 16);
1665
1666 OpRegImm64(kOpAdd, rs_sp, early_drop);
1667 }
1668
1669 // Unspill.
1670 if (fp_reg_mask != 0) {
1671 int offset = frame_size - early_drop - kArm64PointerSize * (num_fp_spills + num_core_spills);
1672 UnSpillFPRegs(this, rs_sp, offset, fp_reg_mask);
1673 }
1674 if (core_reg_mask != 0) {
1675 int offset = frame_size - early_drop - kArm64PointerSize * num_core_spills;
1676 UnSpillCoreRegs(this, rs_sp, offset, core_reg_mask);
1677 }
1678
1679 // Drop the (rest of) the frame.
1680 OpRegImm64(kOpAdd, rs_sp, frame_size - early_drop);
1681}
1682
Serban Constantinescu23abec92014-07-02 16:13:38 +01001683bool Arm64Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
Matteo Franchin4163c532014-07-15 15:20:27 +01001684 A64Opcode wide = IsWide(size) ? WIDE(0) : UNWIDE(0);
Serban Constantinescu23abec92014-07-02 16:13:38 +01001685 RegLocation rl_src_i = info->args[0];
Fred Shih37f05ef2014-07-16 18:38:08 -07001686 RegLocation rl_dest = IsWide(size) ? InlineTargetWide(info) : InlineTarget(info); // result reg
Serban Constantinescu23abec92014-07-02 16:13:38 +01001687 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
Fred Shih37f05ef2014-07-16 18:38:08 -07001688 RegLocation rl_i = IsWide(size) ? LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg);
Serban Constantinescu23abec92014-07-02 16:13:38 +01001689 NewLIR2(kA64Rbit2rr | wide, rl_result.reg.GetReg(), rl_i.reg.GetReg());
Fred Shih37f05ef2014-07-16 18:38:08 -07001690 IsWide(size) ? StoreValueWide(rl_dest, rl_result) : StoreValue(rl_dest, rl_result);
Serban Constantinescu23abec92014-07-02 16:13:38 +01001691 return true;
1692}
1693
Matteo Franchin43ec8732014-03-31 15:00:14 +01001694} // namespace art