blob: 18a4e8f2a577e40a8f10fa12a6e622486d868335 [file] [log] [blame]
Matteo Franchin43ec8732014-03-31 15:00:14 +01001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This file contains codegen for the Thumb2 ISA. */
18
19#include "arm64_lir.h"
20#include "codegen_arm64.h"
21#include "dex/quick/mir_to_lir-inl.h"
22#include "entrypoints/quick/quick_entrypoints.h"
23#include "mirror/array.h"
24
25namespace art {
26
27LIR* Arm64Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
28 OpRegReg(kOpCmp, src1, src2);
29 return OpCondBranch(cond, target);
30}
31
Matteo Franchin43ec8732014-03-31 15:00:14 +010032LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode, const char* guide) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +010033 LOG(FATAL) << "Unexpected use of OpIT for Arm64";
34 return NULL;
Matteo Franchin43ec8732014-03-31 15:00:14 +010035}
36
37void Arm64Mir2Lir::OpEndIT(LIR* it) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +010038 LOG(FATAL) << "Unexpected use of OpEndIT for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +010039}
40
41/*
42 * 64-bit 3way compare function.
Matteo Franchine45fb9e2014-05-06 10:10:30 +010043 * cmp xA, xB
Zheng Xu511c8a62014-06-03 16:22:23 +080044 * csinc wC, wzr, wzr, eq // wC = (xA == xB) ? 0 : 1
45 * csneg wC, wC, wC, ge // wC = (xA >= xB) ? wC : -wC
Matteo Franchin43ec8732014-03-31 15:00:14 +010046 */
Matteo Franchine45fb9e2014-05-06 10:10:30 +010047void Arm64Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
48 RegLocation rl_src2) {
49 RegLocation rl_result;
Matteo Franchin43ec8732014-03-31 15:00:14 +010050 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
51 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
Matteo Franchine45fb9e2014-05-06 10:10:30 +010052 rl_result = EvalLoc(rl_dest, kCoreReg, true);
Matteo Franchin43ec8732014-03-31 15:00:14 +010053
Matteo Franchine45fb9e2014-05-06 10:10:30 +010054 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
Zheng Xu511c8a62014-06-03 16:22:23 +080055 NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondEq);
56 NewLIR4(kA64Csneg4rrrc, rl_result.reg.GetReg(), rl_result.reg.GetReg(),
57 rl_result.reg.GetReg(), kArmCondGe);
58 StoreValue(rl_dest, rl_result);
Serban Constantinescued65c5e2014-05-22 15:10:18 +010059}
60
61void Arm64Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
62 RegLocation rl_src1, RegLocation rl_shift) {
63 OpKind op = kOpBkpt;
64 switch (opcode) {
65 case Instruction::SHL_LONG:
66 case Instruction::SHL_LONG_2ADDR:
67 op = kOpLsl;
68 break;
69 case Instruction::SHR_LONG:
70 case Instruction::SHR_LONG_2ADDR:
71 op = kOpAsr;
72 break;
73 case Instruction::USHR_LONG:
74 case Instruction::USHR_LONG_2ADDR:
75 op = kOpLsr;
76 break;
77 default:
78 LOG(FATAL) << "Unexpected case: " << opcode;
79 }
Zheng Xue2eb29e2014-06-12 10:22:33 +080080 rl_shift = LoadValue(rl_shift, kCoreReg);
Serban Constantinescued65c5e2014-05-22 15:10:18 +010081 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
82 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
Zheng Xue2eb29e2014-06-12 10:22:33 +080083 OpRegRegReg(op, rl_result.reg, rl_src1.reg, As64BitReg(rl_shift.reg));
Serban Constantinescued65c5e2014-05-22 15:10:18 +010084 StoreValueWide(rl_dest, rl_result);
Matteo Franchin43ec8732014-03-31 15:00:14 +010085}
86
Matteo Franchin43ec8732014-03-31 15:00:14 +010087void Arm64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
88 RegLocation rl_result;
89 RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
90 RegLocation rl_dest = mir_graph_->GetDest(mir);
buzbeea0cd2d72014-06-01 09:33:49 -070091 RegisterClass src_reg_class = rl_src.ref ? kRefReg : kCoreReg;
92 RegisterClass result_reg_class = rl_dest.ref ? kRefReg : kCoreReg;
93 rl_src = LoadValue(rl_src, src_reg_class);
Serban Constantinescu05e27ff2014-05-28 13:21:45 +010094 ArmConditionCode code = ArmConditionEncoding(mir->meta.ccode);
95
96 RegLocation rl_true = mir_graph_->reg_location_[mir->ssa_rep->uses[1]];
97 RegLocation rl_false = mir_graph_->reg_location_[mir->ssa_rep->uses[2]];
buzbeea0cd2d72014-06-01 09:33:49 -070098 rl_true = LoadValue(rl_true, result_reg_class);
99 rl_false = LoadValue(rl_false, result_reg_class);
100 rl_result = EvalLoc(rl_dest, result_reg_class, true);
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100101 OpRegImm(kOpCmp, rl_src.reg, 0);
102 NewLIR4(kA64Csel4rrrc, rl_result.reg.GetReg(), rl_true.reg.GetReg(),
103 rl_false.reg.GetReg(), code);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100104 StoreValue(rl_dest, rl_result);
105}
106
107void Arm64Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
108 RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
109 RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100110 LIR* taken = &block_label_list_[bb->taken];
111 LIR* not_taken = &block_label_list_[bb->fall_through];
112 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100113 // Normalize such that if either operand is constant, src2 will be constant.
114 ConditionCode ccode = mir->meta.ccode;
115 if (rl_src1.is_const) {
116 std::swap(rl_src1, rl_src2);
117 ccode = FlipComparisonOrder(ccode);
118 }
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100119
Matteo Franchin43ec8732014-03-31 15:00:14 +0100120 if (rl_src2.is_const) {
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100121 rl_src2 = UpdateLocWide(rl_src2);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100122 int64_t val = mir_graph_->ConstantValueWide(rl_src2);
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100123 // Special handling using cbz & cbnz.
124 if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) {
125 OpCmpImmBranch(ccode, rl_src1.reg, 0, taken);
126 OpCmpImmBranch(NegateComparison(ccode), rl_src1.reg, 0, not_taken);
127 return;
128 // Only handle Imm if src2 is not already in a register.
129 } else if (rl_src2.location != kLocPhysReg) {
130 OpRegImm64(kOpCmp, rl_src1.reg, val);
131 OpCondBranch(ccode, taken);
132 OpCondBranch(NegateComparison(ccode), not_taken);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100133 return;
134 }
135 }
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100136
Matteo Franchin43ec8732014-03-31 15:00:14 +0100137 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100138 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100139 OpCondBranch(ccode, taken);
Serban Constantinescu05e27ff2014-05-28 13:21:45 +0100140 OpCondBranch(NegateComparison(ccode), not_taken);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100141}
142
143/*
144 * Generate a register comparison to an immediate and branch. Caller
145 * is responsible for setting branch target field.
146 */
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100147LIR* Arm64Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value,
148 LIR* target) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100149 LIR* branch;
150 ArmConditionCode arm_cond = ArmConditionEncoding(cond);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100151 if (check_value == 0 && (arm_cond == kArmCondEq || arm_cond == kArmCondNe)) {
152 ArmOpcode opcode = (arm_cond == kArmCondEq) ? kA64Cbz2rt : kA64Cbnz2rt;
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100153 ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
154 branch = NewLIR2(opcode | wide, reg.GetReg(), 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100155 } else {
156 OpRegImm(kOpCmp, reg, check_value);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100157 branch = NewLIR2(kA64B2ct, arm_cond, 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100158 }
159 branch->target = target;
160 return branch;
161}
162
Zheng Xu7c1c2632014-06-17 18:17:31 +0800163LIR* Arm64Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg,
164 RegStorage base_reg, int offset, int check_value,
165 LIR* target) {
166 // It is possible that temp register is 64-bit. (ArgReg or RefReg)
167 // Always compare 32-bit value no matter what temp_reg is.
168 if (temp_reg.Is64Bit()) {
169 temp_reg = As32BitReg(temp_reg);
170 }
171 Load32Disp(base_reg, offset, temp_reg);
172 LIR* branch = OpCmpImmBranch(cond, temp_reg, check_value, target);
173 return branch;
174}
175
Matteo Franchin43ec8732014-03-31 15:00:14 +0100176LIR* Arm64Mir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100177 bool dest_is_fp = r_dest.IsFloat();
178 bool src_is_fp = r_src.IsFloat();
179 ArmOpcode opcode = kA64Brk1d;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100180 LIR* res;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100181
182 if (LIKELY(dest_is_fp == src_is_fp)) {
183 if (LIKELY(!dest_is_fp)) {
Andreas Gampe4b537a82014-06-30 22:24:53 -0700184 DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit());
185
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100186 // Core/core copy.
187 // Copies involving the sp register require a different instruction.
188 opcode = UNLIKELY(A64_REG_IS_SP(r_dest.GetReg())) ? kA64Add4RRdT : kA64Mov2rr;
189
190 // TODO(Arm64): kA64Add4RRdT formally has 4 args, but is used as a 2 args instruction.
191 // This currently works because the other arguments are set to 0 by default. We should
192 // rather introduce an alias kA64Mov2RR.
193
194 // core/core copy. Do a x/x copy only if both registers are x.
195 if (r_dest.Is64Bit() && r_src.Is64Bit()) {
196 opcode = WIDE(opcode);
197 }
198 } else {
199 // Float/float copy.
200 bool dest_is_double = r_dest.IsDouble();
201 bool src_is_double = r_src.IsDouble();
202
203 // We do not do float/double or double/float casts here.
204 DCHECK_EQ(dest_is_double, src_is_double);
205
206 // Homogeneous float/float copy.
207 opcode = (dest_is_double) ? FWIDE(kA64Fmov2ff) : kA64Fmov2ff;
208 }
209 } else {
210 // Inhomogeneous register copy.
211 if (dest_is_fp) {
212 if (r_dest.IsDouble()) {
213 opcode = kA64Fmov2Sx;
214 } else {
Andreas Gampe4b537a82014-06-30 22:24:53 -0700215 r_src = Check32BitReg(r_src);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100216 opcode = kA64Fmov2sw;
217 }
218 } else {
219 if (r_src.IsDouble()) {
220 opcode = kA64Fmov2xS;
221 } else {
Andreas Gampe4b537a82014-06-30 22:24:53 -0700222 r_dest = Check32BitReg(r_dest);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100223 opcode = kA64Fmov2ws;
224 }
225 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100226 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100227
Matteo Franchin43ec8732014-03-31 15:00:14 +0100228 res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100229
Matteo Franchin43ec8732014-03-31 15:00:14 +0100230 if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
231 res->flags.is_nop = true;
232 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100233
Matteo Franchin43ec8732014-03-31 15:00:14 +0100234 return res;
235}
236
237void Arm64Mir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
238 if (r_dest != r_src) {
239 LIR* res = OpRegCopyNoInsert(r_dest, r_src);
240 AppendLIR(res);
241 }
242}
243
244void Arm64Mir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100245 OpRegCopy(r_dest, r_src);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100246}
247
248// Table of magic divisors
249struct MagicTable {
250 uint32_t magic;
251 uint32_t shift;
252 DividePattern pattern;
253};
254
255static const MagicTable magic_table[] = {
256 {0, 0, DivideNone}, // 0
257 {0, 0, DivideNone}, // 1
258 {0, 0, DivideNone}, // 2
259 {0x55555556, 0, Divide3}, // 3
260 {0, 0, DivideNone}, // 4
261 {0x66666667, 1, Divide5}, // 5
262 {0x2AAAAAAB, 0, Divide3}, // 6
263 {0x92492493, 2, Divide7}, // 7
264 {0, 0, DivideNone}, // 8
265 {0x38E38E39, 1, Divide5}, // 9
266 {0x66666667, 2, Divide5}, // 10
267 {0x2E8BA2E9, 1, Divide5}, // 11
268 {0x2AAAAAAB, 1, Divide5}, // 12
269 {0x4EC4EC4F, 2, Divide5}, // 13
270 {0x92492493, 3, Divide7}, // 14
271 {0x88888889, 3, Divide7}, // 15
272};
273
274// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
275bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100276 RegLocation rl_src, RegLocation rl_dest, int lit) {
277 if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100278 return false;
279 }
280 DividePattern pattern = magic_table[lit].pattern;
281 if (pattern == DivideNone) {
282 return false;
283 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100284 // Tuning: add rem patterns
285 if (!is_div) {
286 return false;
287 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100288
289 RegStorage r_magic = AllocTemp();
290 LoadConstant(r_magic, magic_table[lit].magic);
291 rl_src = LoadValue(rl_src, kCoreReg);
292 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100293 RegStorage r_long_mul = AllocTemp();
294 NewLIR4(kA64Smaddl4xwwx, As64BitReg(r_long_mul).GetReg(),
295 r_magic.GetReg(), rl_src.reg.GetReg(), rxzr);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100296 switch (pattern) {
297 case Divide3:
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100298 OpRegRegImm(kOpLsr, As64BitReg(r_long_mul), As64BitReg(r_long_mul), 32);
299 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100300 break;
301 case Divide5:
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100302 OpRegRegImm(kOpAsr, As64BitReg(r_long_mul), As64BitReg(r_long_mul),
303 32 + magic_table[lit].shift);
304 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100305 break;
306 case Divide7:
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100307 OpRegRegRegShift(kOpAdd, As64BitReg(r_long_mul), As64BitReg(rl_src.reg),
308 As64BitReg(r_long_mul), EncodeShift(kA64Lsr, 32));
309 OpRegRegImm(kOpAsr, r_long_mul, r_long_mul, magic_table[lit].shift);
310 OpRegRegRegShift(kOpSub, rl_result.reg, r_long_mul, rl_src.reg, EncodeShift(kA64Asr, 31));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100311 break;
312 default:
313 LOG(FATAL) << "Unexpected pattern: " << pattern;
314 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100315 StoreValue(rl_dest, rl_result);
316 return true;
317}
318
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100319// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
320// and store the result in 'rl_dest'.
321bool Arm64Mir2Lir::HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
322 RegLocation rl_src, RegLocation rl_dest, int lit) {
323 if (lit < 2) {
324 return false;
325 }
326 if (!IsPowerOfTwo(lit)) {
327 return SmallLiteralDivRem(dalvik_opcode, is_div, rl_src, rl_dest, lit);
328 }
329 int k = LowestSetBit(lit);
330 if (k >= 30) {
331 // Avoid special cases.
332 return false;
333 }
334 rl_src = LoadValue(rl_src, kCoreReg);
335 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
336 if (is_div) {
337 RegStorage t_reg = AllocTemp();
338 if (lit == 2) {
339 // Division by 2 is by far the most common division by constant.
340 OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, rl_src.reg, EncodeShift(kA64Lsr, 32 - k));
341 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
342 } else {
343 OpRegRegImm(kOpAsr, t_reg, rl_src.reg, 31);
344 OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, t_reg, EncodeShift(kA64Lsr, 32 - k));
345 OpRegRegImm(kOpAsr, rl_result.reg, t_reg, k);
346 }
347 } else {
348 RegStorage t_reg = AllocTemp();
349 if (lit == 2) {
350 OpRegRegRegShift(kOpAdd, t_reg, rl_src.reg, rl_src.reg, EncodeShift(kA64Lsr, 32 - k));
351 OpRegRegImm(kOpAnd, t_reg, t_reg, lit - 1);
352 OpRegRegRegShift(kOpSub, rl_result.reg, t_reg, rl_src.reg, EncodeShift(kA64Lsr, 32 - k));
353 } else {
354 RegStorage t_reg2 = AllocTemp();
355 OpRegRegImm(kOpAsr, t_reg, rl_src.reg, 31);
356 OpRegRegRegShift(kOpAdd, t_reg2, rl_src.reg, t_reg, EncodeShift(kA64Lsr, 32 - k));
357 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1);
358 OpRegRegRegShift(kOpSub, rl_result.reg, t_reg2, t_reg, EncodeShift(kA64Lsr, 32 - k));
359 }
360 }
361 StoreValue(rl_dest, rl_result);
362 return true;
363}
364
Matteo Franchin43ec8732014-03-31 15:00:14 +0100365bool Arm64Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100366 LOG(FATAL) << "Unexpected use of EasyMultiply for Arm64";
367 return false;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100368}
369
370RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100371 RegLocation rl_src2, bool is_div, bool check_zero) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100372 LOG(FATAL) << "Unexpected use of GenDivRem for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +0100373 return rl_dest;
374}
375
376RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100377 LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +0100378 return rl_dest;
379}
380
381RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
382 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
383
384 // Put the literal in a temp.
385 RegStorage lit_temp = AllocTemp();
386 LoadConstant(lit_temp, lit);
387 // Use the generic case for div/rem with arg2 in a register.
388 // TODO: The literal temp can be freed earlier during a modulus to reduce reg pressure.
389 rl_result = GenDivRem(rl_result, reg1, lit_temp, is_div);
390 FreeTemp(lit_temp);
391
392 return rl_result;
393}
394
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100395RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage r_src1, RegStorage r_src2,
Matteo Franchin43ec8732014-03-31 15:00:14 +0100396 bool is_div) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100397 CHECK_EQ(r_src1.Is64Bit(), r_src2.Is64Bit());
398
Matteo Franchin43ec8732014-03-31 15:00:14 +0100399 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
400 if (is_div) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100401 OpRegRegReg(kOpDiv, rl_result.reg, r_src1, r_src2);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100402 } else {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100403 // temp = r_src1 / r_src2
404 // dest = r_src1 - temp * r_src2
405 RegStorage temp;
406 ArmOpcode wide;
407 if (rl_result.reg.Is64Bit()) {
408 temp = AllocTempWide();
409 wide = WIDE(0);
410 } else {
411 temp = AllocTemp();
412 wide = UNWIDE(0);
413 }
414 OpRegRegReg(kOpDiv, temp, r_src1, r_src2);
415 NewLIR4(kA64Msub4rrrr | wide, rl_result.reg.GetReg(), temp.GetReg(),
416 r_src1.GetReg(), r_src2.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100417 FreeTemp(temp);
418 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100419 return rl_result;
420}
421
Serban Constantinescu169489b2014-06-11 16:43:35 +0100422bool Arm64Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
423 RegLocation rl_src = info->args[0];
424 rl_src = LoadValueWide(rl_src, kCoreReg);
425 RegLocation rl_dest = InlineTargetWide(info);
426 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
427 RegStorage sign_reg = AllocTempWide();
428 // abs(x) = y<=x>>63, (x+y)^y.
429 OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 63);
430 OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg);
431 OpRegReg(kOpXor, rl_result.reg, sign_reg);
432 StoreValueWide(rl_dest, rl_result);
433 return true;
434}
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100435
Serban Constantinescu169489b2014-06-11 16:43:35 +0100436bool Arm64Mir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) {
437 DCHECK_EQ(cu_->instruction_set, kArm64);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100438 RegLocation rl_src1 = info->args[0];
439 RegLocation rl_src2 = info->args[1];
440 rl_src1 = LoadValue(rl_src1, kCoreReg);
441 rl_src2 = LoadValue(rl_src2, kCoreReg);
442 RegLocation rl_dest = InlineTarget(info);
443 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
444 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100445 NewLIR4(kA64Csel4rrrc, rl_result.reg.GetReg(), rl_src1.reg.GetReg(),
446 rl_src2.reg.GetReg(), (is_min) ? kArmCondLt : kArmCondGt);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100447 StoreValue(rl_dest, rl_result);
448 return true;
449}
450
451bool Arm64Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
452 RegLocation rl_src_address = info->args[0]; // long address
Serban Constantinescu169489b2014-06-11 16:43:35 +0100453 rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[1] ?
Matteo Franchin43ec8732014-03-31 15:00:14 +0100454 RegLocation rl_dest = InlineTarget(info);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100455 RegLocation rl_address = LoadValue(rl_src_address, kCoreReg); // kRefReg
Matteo Franchin43ec8732014-03-31 15:00:14 +0100456 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100457
Andreas Gampe3c12c512014-06-24 18:46:29 +0000458 LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size, kNotVolatile);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100459 if (size == k64) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100460 StoreValueWide(rl_dest, rl_result);
461 } else {
462 DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100463 StoreValue(rl_dest, rl_result);
464 }
465 return true;
466}
467
468bool Arm64Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
469 RegLocation rl_src_address = info->args[0]; // long address
470 rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[1]
471 RegLocation rl_src_value = info->args[2]; // [size] value
Serban Constantinescu169489b2014-06-11 16:43:35 +0100472 RegLocation rl_address = LoadValue(rl_src_address, kCoreReg); // kRefReg
473
474 RegLocation rl_value;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100475 if (size == k64) {
Serban Constantinescu169489b2014-06-11 16:43:35 +0100476 rl_value = LoadValueWide(rl_src_value, kCoreReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100477 } else {
478 DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100479 rl_value = LoadValue(rl_src_value, kCoreReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100480 }
Andreas Gampe3c12c512014-06-24 18:46:29 +0000481 StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size, kNotVolatile);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100482 return true;
483}
484
485void Arm64Mir2Lir::OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100486 LOG(FATAL) << "Unexpected use of OpLea for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +0100487}
488
Andreas Gampe2f244e92014-05-08 03:35:25 -0700489void Arm64Mir2Lir::OpTlsCmp(ThreadOffset<4> offset, int val) {
490 UNIMPLEMENTED(FATAL) << "Should not be used.";
491}
492
493void Arm64Mir2Lir::OpTlsCmp(ThreadOffset<8> offset, int val) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100494 LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +0100495}
496
497bool Arm64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
Serban Constantinescu169489b2014-06-11 16:43:35 +0100498 DCHECK_EQ(cu_->instruction_set, kArm64);
499 ArmOpcode wide = is_long ? WIDE(0) : UNWIDE(0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100500 // Unused - RegLocation rl_src_unsafe = info->args[0];
501 RegLocation rl_src_obj = info->args[1]; // Object - known non-null
502 RegLocation rl_src_offset = info->args[2]; // long low
Serban Constantinescu169489b2014-06-11 16:43:35 +0100503 rl_src_offset = NarrowRegLoc(rl_src_offset); // ignore high half in info->args[3] //TODO: do we really need this
Matteo Franchin43ec8732014-03-31 15:00:14 +0100504 RegLocation rl_src_expected = info->args[4]; // int, long or Object
505 // If is_long, high half is in info->args[5]
506 RegLocation rl_src_new_value = info->args[is_long ? 6 : 5]; // int, long or Object
507 // If is_long, high half is in info->args[7]
508 RegLocation rl_dest = InlineTarget(info); // boolean place for result
509
Serban Constantinescu169489b2014-06-11 16:43:35 +0100510 // Load Object and offset
buzbeea0cd2d72014-06-01 09:33:49 -0700511 RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100512 RegLocation rl_offset = LoadValue(rl_src_offset, kRefReg);
513
Matteo Franchin43ec8732014-03-31 15:00:14 +0100514 RegLocation rl_new_value;
Serban Constantinescu169489b2014-06-11 16:43:35 +0100515 RegLocation rl_expected;
516 if (is_long) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100517 rl_new_value = LoadValueWide(rl_src_new_value, kCoreReg);
Serban Constantinescu169489b2014-06-11 16:43:35 +0100518 rl_expected = LoadValueWide(rl_src_expected, kCoreReg);
519 } else {
520 rl_new_value = LoadValue(rl_src_new_value, is_object ? kRefReg : kCoreReg);
521 rl_expected = LoadValue(rl_src_expected, is_object ? kRefReg : kCoreReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100522 }
523
524 if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
525 // Mark card for object assuming new value is stored.
526 MarkGCCard(rl_new_value.reg, rl_object.reg);
527 }
528
Serban Constantinescu169489b2014-06-11 16:43:35 +0100529 RegStorage r_ptr = AllocTempRef();
Matteo Franchin43ec8732014-03-31 15:00:14 +0100530 OpRegRegReg(kOpAdd, r_ptr, rl_object.reg, rl_offset.reg);
531
532 // Free now unneeded rl_object and rl_offset to give more temps.
533 ClobberSReg(rl_object.s_reg_low);
534 FreeTemp(rl_object.reg);
535 ClobberSReg(rl_offset.s_reg_low);
536 FreeTemp(rl_offset.reg);
537
Matteo Franchin43ec8732014-03-31 15:00:14 +0100538 // do {
539 // tmp = [r_ptr] - expected;
540 // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
541 // result = tmp != 0;
542
Serban Constantinescu169489b2014-06-11 16:43:35 +0100543 RegStorage r_tmp;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100544 if (is_long) {
Serban Constantinescu169489b2014-06-11 16:43:35 +0100545 r_tmp = AllocTempWide();
546 } else if (is_object) {
547 r_tmp = AllocTempRef();
Matteo Franchin43ec8732014-03-31 15:00:14 +0100548 } else {
Serban Constantinescu169489b2014-06-11 16:43:35 +0100549 r_tmp = AllocTemp();
Matteo Franchin43ec8732014-03-31 15:00:14 +0100550 }
551
Serban Constantinescu169489b2014-06-11 16:43:35 +0100552 LIR* loop = NewLIR0(kPseudoTargetLabel);
553 NewLIR2(kA64Ldaxr2rX | wide, r_tmp.GetReg(), r_ptr.GetReg());
554 OpRegReg(kOpCmp, r_tmp, rl_expected.reg);
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100555 DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
Serban Constantinescu169489b2014-06-11 16:43:35 +0100556 LIR* early_exit = OpCondBranch(kCondNe, NULL);
557
558 NewLIR3(kA64Stlxr3wrX | wide, As32BitReg(r_tmp).GetReg(), rl_new_value.reg.GetReg(), r_ptr.GetReg());
559 NewLIR3(kA64Cmp3RdT, As32BitReg(r_tmp).GetReg(), 0, ENCODE_NO_SHIFT);
560 DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
561 OpCondBranch(kCondNe, loop);
562
563 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
564 LIR* exit = NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondNe);
565 early_exit->target = exit;
566
Matteo Franchin43ec8732014-03-31 15:00:14 +0100567 FreeTemp(r_tmp); // Now unneeded.
Serban Constantinescu169489b2014-06-11 16:43:35 +0100568 FreeTemp(r_ptr); // Now unneeded.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100569
570 StoreValue(rl_dest, rl_result);
571
Matteo Franchin43ec8732014-03-31 15:00:14 +0100572 return true;
573}
574
575LIR* Arm64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100576 return RawLIR(current_dalvik_offset_, WIDE(kA64Ldr2rp), reg.GetReg(), 0, 0, 0, 0, target);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100577}
578
579LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base, int count) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100580 LOG(FATAL) << "Unexpected use of OpVldm for Arm64";
581 return NULL;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100582}
583
584LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base, int count) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100585 LOG(FATAL) << "Unexpected use of OpVstm for Arm64";
586 return NULL;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100587}
588
589void Arm64Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
590 RegLocation rl_result, int lit,
591 int first_bit, int second_bit) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100592 OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg, EncodeShift(kA64Lsl, second_bit - first_bit));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100593 if (first_bit != 0) {
594 OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
595 }
596}
597
598void Arm64Mir2Lir::GenDivZeroCheckWide(RegStorage reg) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100599 LOG(FATAL) << "Unexpected use of GenDivZero for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +0100600}
601
602// Test suspend flag, return target of taken suspend branch
603LIR* Arm64Mir2Lir::OpTestSuspend(LIR* target) {
Zheng Xubaa7c882014-06-30 14:26:50 +0800604 NewLIR3(kA64Subs3rRd, rwSUSPEND, rwSUSPEND, 1);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100605 return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target);
606}
607
608// Decrement register and branch on condition
609LIR* Arm64Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
buzbee33ae5582014-06-12 14:56:32 -0700610 // Combine sub & test using sub setflags encoding here. We need to make sure a
611 // subtract form that sets carry is used, so generate explicitly.
612 // TODO: might be best to add a new op, kOpSubs, and handle it generically.
613 ArmOpcode opcode = reg.Is64Bit() ? WIDE(kA64Subs3rRd) : UNWIDE(kA64Subs3rRd);
614 NewLIR3(opcode, reg.GetReg(), reg.GetReg(), 1); // For value == 1, this should set flags.
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100615 DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100616 return OpCondBranch(c_code, target);
617}
618
Andreas Gampeb14329f2014-05-15 11:16:06 -0700619bool Arm64Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100620#if ANDROID_SMP != 0
621 // Start off with using the last LIR as the barrier. If it is not enough, then we will generate one.
622 LIR* barrier = last_lir_insn_;
623
624 int dmb_flavor;
625 // TODO: revisit Arm barrier kinds
626 switch (barrier_kind) {
627 case kLoadStore: dmb_flavor = kISH; break;
628 case kLoadLoad: dmb_flavor = kISH; break;
629 case kStoreStore: dmb_flavor = kISHST; break;
630 case kStoreLoad: dmb_flavor = kISH; break;
631 default:
632 LOG(FATAL) << "Unexpected MemBarrierKind: " << barrier_kind;
633 dmb_flavor = kSY; // quiet gcc.
634 break;
635 }
636
Andreas Gampeb14329f2014-05-15 11:16:06 -0700637 bool ret = false;
638
Matteo Franchin43ec8732014-03-31 15:00:14 +0100639 // If the same barrier already exists, don't generate another.
640 if (barrier == nullptr
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100641 || (barrier->opcode != kA64Dmb1B || barrier->operands[0] != dmb_flavor)) {
642 barrier = NewLIR1(kA64Dmb1B, dmb_flavor);
Andreas Gampeb14329f2014-05-15 11:16:06 -0700643 ret = true;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100644 }
645
646 // At this point we must have a memory barrier. Mark it as a scheduling barrier as well.
647 DCHECK(!barrier->flags.use_def_invalid);
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100648 barrier->u.m.def_mask = &kEncodeAll;
Andreas Gampeb14329f2014-05-15 11:16:06 -0700649 return ret;
650#else
651 return false;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100652#endif
653}
654
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100655void Arm64Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
656 RegLocation rl_result;
657
658 rl_src = LoadValue(rl_src, kCoreReg);
659 rl_result = EvalLocWide(rl_dest, kCoreReg, true);
Andreas Gampe4b537a82014-06-30 22:24:53 -0700660 NewLIR4(WIDE(kA64Sbfm4rrdd), rl_result.reg.GetReg(), As64BitReg(rl_src.reg).GetReg(), 0, 31);
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100661 StoreValueWide(rl_dest, rl_result);
662}
663
664void Arm64Mir2Lir::GenDivRemLong(Instruction::Code opcode, RegLocation rl_dest,
665 RegLocation rl_src1, RegLocation rl_src2, bool is_div) {
666 RegLocation rl_result;
667 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
668 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
669 GenDivZeroCheck(rl_src2.reg);
670 rl_result = GenDivRem(rl_dest, rl_src1.reg, rl_src2.reg, is_div);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100671 StoreValueWide(rl_dest, rl_result);
672}
673
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100674void Arm64Mir2Lir::GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
675 RegLocation rl_src2) {
676 RegLocation rl_result;
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100677
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100678 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
679 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
680 rl_result = EvalLocWide(rl_dest, kCoreReg, true);
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100681 OpRegRegRegShift(op, rl_result.reg, rl_src1.reg, rl_src2.reg, ENCODE_NO_SHIFT);
682 StoreValueWide(rl_dest, rl_result);
683}
684
685void Arm64Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
686 RegLocation rl_result;
687
688 rl_src = LoadValueWide(rl_src, kCoreReg);
689 rl_result = EvalLocWide(rl_dest, kCoreReg, true);
690 OpRegRegShift(kOpNeg, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT);
691 StoreValueWide(rl_dest, rl_result);
692}
693
694void Arm64Mir2Lir::GenNotLong(RegLocation rl_dest, RegLocation rl_src) {
695 RegLocation rl_result;
696
697 rl_src = LoadValueWide(rl_src, kCoreReg);
698 rl_result = EvalLocWide(rl_dest, kCoreReg, true);
699 OpRegRegShift(kOpMvn, rl_result.reg, rl_src.reg, ENCODE_NO_SHIFT);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100700 StoreValueWide(rl_dest, rl_result);
701}
702
Matteo Franchin43ec8732014-03-31 15:00:14 +0100703void Arm64Mir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100704 RegLocation rl_src1, RegLocation rl_src2) {
705 GenLongOp(kOpMul, rl_dest, rl_src1, rl_src2);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100706}
707
708void Arm64Mir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100709 RegLocation rl_src2) {
710 GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100711}
712
713void Arm64Mir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
714 RegLocation rl_src2) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100715 GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100716}
717
718void Arm64Mir2Lir::GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
719 RegLocation rl_src2) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100720 GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100721}
722
723void Arm64Mir2Lir::GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
724 RegLocation rl_src2) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100725 GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100726}
727
728void Arm64Mir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
729 RegLocation rl_src2) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100730 GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100731}
732
733/*
734 * Generate array load
735 */
736void Arm64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
737 RegLocation rl_index, RegLocation rl_dest, int scale) {
738 RegisterClass reg_class = RegClassBySize(size);
739 int len_offset = mirror::Array::LengthOffset().Int32Value();
740 int data_offset;
741 RegLocation rl_result;
742 bool constant_index = rl_index.is_const;
buzbeea0cd2d72014-06-01 09:33:49 -0700743 rl_array = LoadValue(rl_array, kRefReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100744 if (!constant_index) {
745 rl_index = LoadValue(rl_index, kCoreReg);
746 }
747
748 if (rl_dest.wide) {
749 data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
750 } else {
751 data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
752 }
753
754 // If index is constant, just fold it into the data offset
755 if (constant_index) {
756 data_offset += mir_graph_->ConstantValue(rl_index) << scale;
757 }
758
759 /* null object? */
760 GenNullCheck(rl_array.reg, opt_flags);
761
762 bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
763 RegStorage reg_len;
764 if (needs_range_check) {
765 reg_len = AllocTemp();
766 /* Get len */
767 Load32Disp(rl_array.reg, len_offset, reg_len);
768 MarkPossibleNullPointerException(opt_flags);
769 } else {
770 ForceImplicitNullCheck(rl_array.reg, opt_flags);
771 }
772 if (rl_dest.wide || rl_dest.fp || constant_index) {
773 RegStorage reg_ptr;
774 if (constant_index) {
775 reg_ptr = rl_array.reg; // NOTE: must not alter reg_ptr in constant case.
776 } else {
777 // No special indexed operation, lea + load w/ displacement
buzbeea0cd2d72014-06-01 09:33:49 -0700778 reg_ptr = AllocTempRef();
buzbee33ae5582014-06-12 14:56:32 -0700779 OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, As64BitReg(rl_index.reg),
780 EncodeShift(kA64Lsl, scale));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100781 FreeTemp(rl_index.reg);
782 }
783 rl_result = EvalLoc(rl_dest, reg_class, true);
784
785 if (needs_range_check) {
786 if (constant_index) {
787 GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
788 } else {
789 GenArrayBoundsCheck(rl_index.reg, reg_len);
790 }
791 FreeTemp(reg_len);
792 }
Andreas Gampe3c12c512014-06-24 18:46:29 +0000793 if (rl_result.ref) {
794 LoadRefDisp(reg_ptr, data_offset, rl_result.reg, kNotVolatile);
795 } else {
796 LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size, kNotVolatile);
797 }
Vladimir Marko455759b2014-05-06 20:49:36 +0100798 MarkPossibleNullPointerException(opt_flags);
799 if (!constant_index) {
800 FreeTemp(reg_ptr);
801 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100802 if (rl_dest.wide) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100803 StoreValueWide(rl_dest, rl_result);
804 } else {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100805 StoreValue(rl_dest, rl_result);
806 }
807 } else {
808 // Offset base, then use indexed load
buzbeea0cd2d72014-06-01 09:33:49 -0700809 RegStorage reg_ptr = AllocTempRef();
Matteo Franchin43ec8732014-03-31 15:00:14 +0100810 OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
811 FreeTemp(rl_array.reg);
812 rl_result = EvalLoc(rl_dest, reg_class, true);
813
814 if (needs_range_check) {
815 GenArrayBoundsCheck(rl_index.reg, reg_len);
816 FreeTemp(reg_len);
817 }
Andreas Gampe3c12c512014-06-24 18:46:29 +0000818 if (rl_result.ref) {
819 LoadRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg);
820 } else {
821 LoadBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_result.reg, scale, size);
822 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100823 MarkPossibleNullPointerException(opt_flags);
824 FreeTemp(reg_ptr);
825 StoreValue(rl_dest, rl_result);
826 }
827}
828
829/*
830 * Generate array store
831 *
832 */
833void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
834 RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) {
835 RegisterClass reg_class = RegClassBySize(size);
836 int len_offset = mirror::Array::LengthOffset().Int32Value();
837 bool constant_index = rl_index.is_const;
838
839 int data_offset;
840 if (size == k64 || size == kDouble) {
841 data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
842 } else {
843 data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
844 }
845
846 // If index is constant, just fold it into the data offset.
847 if (constant_index) {
848 data_offset += mir_graph_->ConstantValue(rl_index) << scale;
849 }
850
buzbeea0cd2d72014-06-01 09:33:49 -0700851 rl_array = LoadValue(rl_array, kRefReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100852 if (!constant_index) {
853 rl_index = LoadValue(rl_index, kCoreReg);
854 }
855
856 RegStorage reg_ptr;
857 bool allocated_reg_ptr_temp = false;
858 if (constant_index) {
859 reg_ptr = rl_array.reg;
860 } else if (IsTemp(rl_array.reg) && !card_mark) {
861 Clobber(rl_array.reg);
862 reg_ptr = rl_array.reg;
863 } else {
864 allocated_reg_ptr_temp = true;
buzbeea0cd2d72014-06-01 09:33:49 -0700865 reg_ptr = AllocTempRef();
Matteo Franchin43ec8732014-03-31 15:00:14 +0100866 }
867
868 /* null object? */
869 GenNullCheck(rl_array.reg, opt_flags);
870
871 bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
872 RegStorage reg_len;
873 if (needs_range_check) {
874 reg_len = AllocTemp();
875 // NOTE: max live temps(4) here.
876 /* Get len */
877 Load32Disp(rl_array.reg, len_offset, reg_len);
878 MarkPossibleNullPointerException(opt_flags);
879 } else {
880 ForceImplicitNullCheck(rl_array.reg, opt_flags);
881 }
882 /* at this point, reg_ptr points to array, 2 live temps */
883 if (rl_src.wide || rl_src.fp || constant_index) {
884 if (rl_src.wide) {
885 rl_src = LoadValueWide(rl_src, reg_class);
886 } else {
887 rl_src = LoadValue(rl_src, reg_class);
888 }
889 if (!constant_index) {
buzbee33ae5582014-06-12 14:56:32 -0700890 OpRegRegRegShift(kOpAdd, reg_ptr, rl_array.reg, As64BitReg(rl_index.reg),
891 EncodeShift(kA64Lsl, scale));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100892 }
893 if (needs_range_check) {
894 if (constant_index) {
895 GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
896 } else {
897 GenArrayBoundsCheck(rl_index.reg, reg_len);
898 }
899 FreeTemp(reg_len);
900 }
Andreas Gampe3c12c512014-06-24 18:46:29 +0000901 if (rl_src.ref) {
902 StoreRefDisp(reg_ptr, data_offset, rl_src.reg, kNotVolatile);
903 } else {
904 StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size, kNotVolatile);
905 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100906 MarkPossibleNullPointerException(opt_flags);
907 } else {
908 /* reg_ptr -> array data */
909 OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
910 rl_src = LoadValue(rl_src, reg_class);
911 if (needs_range_check) {
912 GenArrayBoundsCheck(rl_index.reg, reg_len);
913 FreeTemp(reg_len);
914 }
Andreas Gampe3c12c512014-06-24 18:46:29 +0000915 if (rl_src.ref) {
916 StoreRefIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg);
917 } else {
918 StoreBaseIndexed(reg_ptr, As64BitReg(rl_index.reg), rl_src.reg, scale, size);
919 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100920 MarkPossibleNullPointerException(opt_flags);
921 }
922 if (allocated_reg_ptr_temp) {
923 FreeTemp(reg_ptr);
924 }
925 if (card_mark) {
926 MarkGCCard(rl_src.reg, rl_array.reg);
927 }
928}
929
Matteo Franchin43ec8732014-03-31 15:00:14 +0100930void Arm64Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
931 RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100932 OpKind op = kOpBkpt;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100933 // Per spec, we only care about low 6 bits of shift amount.
934 int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100935 rl_src = LoadValueWide(rl_src, kCoreReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100936 if (shift_amount == 0) {
937 StoreValueWide(rl_dest, rl_src);
938 return;
939 }
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100940
941 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100942 switch (opcode) {
943 case Instruction::SHL_LONG:
944 case Instruction::SHL_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100945 op = kOpLsl;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100946 break;
947 case Instruction::SHR_LONG:
948 case Instruction::SHR_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100949 op = kOpAsr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100950 break;
951 case Instruction::USHR_LONG:
952 case Instruction::USHR_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100953 op = kOpLsr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100954 break;
955 default:
956 LOG(FATAL) << "Unexpected case";
957 }
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100958 OpRegRegImm(op, rl_result.reg, rl_src.reg, shift_amount);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100959 StoreValueWide(rl_dest, rl_result);
960}
961
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100962void Arm64Mir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
963 RegLocation rl_src1, RegLocation rl_src2) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100964 if ((opcode == Instruction::SUB_LONG) || (opcode == Instruction::SUB_LONG_2ADDR)) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100965 if (!rl_src2.is_const) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100966 return GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100967 }
968 } else {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100969 // Associativity.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100970 if (!rl_src2.is_const) {
971 DCHECK(rl_src1.is_const);
972 std::swap(rl_src1, rl_src2);
973 }
974 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100975 DCHECK(rl_src2.is_const);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100976
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100977 OpKind op = kOpBkpt;
978 int64_t val = mir_graph_->ConstantValueWide(rl_src2);
979
Matteo Franchin43ec8732014-03-31 15:00:14 +0100980 switch (opcode) {
981 case Instruction::ADD_LONG:
982 case Instruction::ADD_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100983 op = kOpAdd;
984 break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100985 case Instruction::SUB_LONG:
986 case Instruction::SUB_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100987 op = kOpSub;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100988 break;
989 case Instruction::AND_LONG:
990 case Instruction::AND_LONG_2ADDR:
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100991 op = kOpAnd;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100992 break;
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100993 case Instruction::OR_LONG:
994 case Instruction::OR_LONG_2ADDR:
995 op = kOpOr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100996 break;
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100997 case Instruction::XOR_LONG:
998 case Instruction::XOR_LONG_2ADDR:
999 op = kOpXor;
1000 break;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001001 default:
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001002 LOG(FATAL) << "Unexpected opcode";
Matteo Franchin43ec8732014-03-31 15:00:14 +01001003 }
Serban Constantinescued65c5e2014-05-22 15:10:18 +01001004
1005 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1006 RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
Zheng Xue2eb29e2014-06-12 10:22:33 +08001007 OpRegRegImm64(op, rl_result.reg, rl_src1.reg, val);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001008 StoreValueWide(rl_dest, rl_result);
1009}
1010
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001011/**
1012 * @brief Split a register list in pairs or registers.
1013 *
1014 * Given a list of registers in @p reg_mask, split the list in pairs. Use as follows:
1015 * @code
1016 * int reg1 = -1, reg2 = -1;
1017 * while (reg_mask) {
1018 * reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1019 * if (UNLIKELY(reg2 < 0)) {
1020 * // Single register in reg1.
1021 * } else {
1022 * // Pair in reg1, reg2.
1023 * }
1024 * }
1025 * @endcode
1026 */
1027uint32_t Arm64Mir2Lir::GenPairWise(uint32_t reg_mask, int* reg1, int* reg2) {
1028 // Find first register.
1029 int first_bit_set = __builtin_ctz(reg_mask) + 1;
1030 int reg = *reg1 + first_bit_set;
1031 reg_mask >>= first_bit_set;
1032
1033 if (LIKELY(reg_mask)) {
1034 // Save the first register, find the second and use the pair opcode.
1035 int second_bit_set = __builtin_ctz(reg_mask) + 1;
1036 *reg2 = reg;
1037 reg_mask >>= second_bit_set;
1038 *reg1 = reg + second_bit_set;
1039 return reg_mask;
1040 }
1041
1042 // Use the single opcode, as we just have one register.
1043 *reg1 = reg;
1044 *reg2 = -1;
1045 return reg_mask;
1046}
1047
1048void Arm64Mir2Lir::UnSpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) {
1049 int reg1 = -1, reg2 = -1;
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001050 const int reg_log2_size = 3;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001051
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001052 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001053 reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1054 if (UNLIKELY(reg2 < 0)) {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001055 NewLIR3(WIDE(kA64Ldr3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001056 } else {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001057 NewLIR4(WIDE(kA64Ldp4rrXD), RegStorage::Solo64(reg2).GetReg(),
1058 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001059 }
1060 }
1061}
1062
1063void Arm64Mir2Lir::SpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) {
1064 int reg1 = -1, reg2 = -1;
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001065 const int reg_log2_size = 3;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001066
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001067 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001068 reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1069 if (UNLIKELY(reg2 < 0)) {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001070 NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001071 } else {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001072 NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo64(reg2).GetReg(),
1073 RegStorage::Solo64(reg1).GetReg(), base.GetReg(), offset);
1074 }
1075 }
1076}
1077
1078void Arm64Mir2Lir::UnSpillFPRegs(RegStorage base, int offset, uint32_t reg_mask) {
1079 int reg1 = -1, reg2 = -1;
1080 const int reg_log2_size = 3;
1081
1082 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1083 reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1084 if (UNLIKELY(reg2 < 0)) {
1085 NewLIR3(FWIDE(kA64Ldr3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
1086 } else {
1087 NewLIR4(WIDE(kA64Ldp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
1088 RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
1089 }
1090 }
1091}
1092
1093// TODO(Arm64): consider using ld1 and st1?
1094void Arm64Mir2Lir::SpillFPRegs(RegStorage base, int offset, uint32_t reg_mask) {
1095 int reg1 = -1, reg2 = -1;
1096 const int reg_log2_size = 3;
1097
1098 for (offset = (offset >> reg_log2_size); reg_mask; offset += 2) {
1099 reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1100 if (UNLIKELY(reg2 < 0)) {
1101 NewLIR3(FWIDE(kA64Str3fXD), RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
1102 } else {
1103 NewLIR4(WIDE(kA64Stp4ffXD), RegStorage::FloatSolo64(reg2).GetReg(),
1104 RegStorage::FloatSolo64(reg1).GetReg(), base.GetReg(), offset);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001105 }
1106 }
1107}
1108
Matteo Franchin43ec8732014-03-31 15:00:14 +01001109} // namespace art