blob: 04652490aaa94986dd2c64d8071d3dc324c4dbe2 [file] [log] [blame]
Matteo Franchin43ec8732014-03-31 15:00:14 +01001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This file contains codegen for the Thumb2 ISA. */
18
19#include "arm64_lir.h"
20#include "codegen_arm64.h"
21#include "dex/quick/mir_to_lir-inl.h"
22#include "entrypoints/quick/quick_entrypoints.h"
23#include "mirror/array.h"
24
25namespace art {
26
27LIR* Arm64Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) {
28 OpRegReg(kOpCmp, src1, src2);
29 return OpCondBranch(cond, target);
30}
31
Matteo Franchine45fb9e2014-05-06 10:10:30 +010032// TODO(Arm64): remove this.
Matteo Franchin43ec8732014-03-31 15:00:14 +010033LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode, const char* guide) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +010034 LOG(FATAL) << "Unexpected use of OpIT for Arm64";
35 return NULL;
Matteo Franchin43ec8732014-03-31 15:00:14 +010036}
37
38void Arm64Mir2Lir::OpEndIT(LIR* it) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +010039 LOG(FATAL) << "Unexpected use of OpEndIT for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +010040}
41
42/*
43 * 64-bit 3way compare function.
Matteo Franchine45fb9e2014-05-06 10:10:30 +010044 * cmp xA, xB
45 * csinc wC, wzr, wzr, eq
46 * csneg wC, wC, wC, le
Matteo Franchin43ec8732014-03-31 15:00:14 +010047 */
Matteo Franchine45fb9e2014-05-06 10:10:30 +010048void Arm64Mir2Lir::GenCmpLong(RegLocation rl_dest, RegLocation rl_src1,
49 RegLocation rl_src2) {
50 RegLocation rl_result;
Matteo Franchin43ec8732014-03-31 15:00:14 +010051 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
52 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
Matteo Franchine45fb9e2014-05-06 10:10:30 +010053 rl_result = EvalLoc(rl_dest, kCoreReg, true);
Matteo Franchin43ec8732014-03-31 15:00:14 +010054
Matteo Franchine45fb9e2014-05-06 10:10:30 +010055 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
56 NewLIR4(kA64Csinc4rrrc, rl_result.reg.GetReg(), rwzr, rwzr, kArmCondEq);
57 NewLIR4(kA64Csneg4rrrc, rl_result.reg.GetReg(), rl_result.reg.GetReg(),
58 rl_result.reg.GetReg(), kArmCondLe);
59 StoreValue(rl_dest, rl_result);
Matteo Franchin43ec8732014-03-31 15:00:14 +010060}
61
62void Arm64Mir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
Matteo Franchine45fb9e2014-05-06 10:10:30 +010063 int64_t val, ConditionCode ccode) {
Matteo Franchin43ec8732014-03-31 15:00:14 +010064 LIR* taken = &block_label_list_[bb->taken];
Matteo Franchin43ec8732014-03-31 15:00:14 +010065 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
Matteo Franchin43ec8732014-03-31 15:00:14 +010066
67 if (val == 0 && (ccode == kCondEq || ccode == kCondNe)) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +010068 ArmOpcode opcode = (ccode == kCondEq) ? kA64Cbz2rt : kA64Cbnz2rt;
69 LIR* branch = NewLIR2(WIDE(opcode), rl_src1.reg.GetLowReg(), 0);
70 branch->target = taken;
71 } else {
72 OpRegImm64(kOpCmp, rl_src1.reg, val, /*is_wide*/true);
Matteo Franchin43ec8732014-03-31 15:00:14 +010073 OpCondBranch(ccode, taken);
Matteo Franchin43ec8732014-03-31 15:00:14 +010074 }
Matteo Franchin43ec8732014-03-31 15:00:14 +010075}
76
77void Arm64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +010078 // TODO(Arm64): implement this.
79 UNIMPLEMENTED(FATAL);
80
Matteo Franchin43ec8732014-03-31 15:00:14 +010081 RegLocation rl_result;
82 RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
83 RegLocation rl_dest = mir_graph_->GetDest(mir);
84 rl_src = LoadValue(rl_src, kCoreReg);
85 ConditionCode ccode = mir->meta.ccode;
86 if (mir->ssa_rep->num_uses == 1) {
87 // CONST case
88 int true_val = mir->dalvikInsn.vB;
89 int false_val = mir->dalvikInsn.vC;
90 rl_result = EvalLoc(rl_dest, kCoreReg, true);
91 // Change kCondNe to kCondEq for the special cases below.
92 if (ccode == kCondNe) {
93 ccode = kCondEq;
94 std::swap(true_val, false_val);
95 }
96 bool cheap_false_val = InexpensiveConstantInt(false_val);
97 if (cheap_false_val && ccode == kCondEq && (true_val == 0 || true_val == -1)) {
98 OpRegRegImm(kOpSub, rl_result.reg, rl_src.reg, -true_val);
99 DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100100 OpIT(true_val == 0 ? kCondNe : kCondUge, "");
Matteo Franchin43ec8732014-03-31 15:00:14 +0100101 LoadConstant(rl_result.reg, false_val);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100102 GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
Matteo Franchin43ec8732014-03-31 15:00:14 +0100103 } else if (cheap_false_val && ccode == kCondEq && true_val == 1) {
104 OpRegRegImm(kOpRsub, rl_result.reg, rl_src.reg, 1);
105 DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100106 OpIT(kCondLs, "");
Matteo Franchin43ec8732014-03-31 15:00:14 +0100107 LoadConstant(rl_result.reg, false_val);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100108 GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
Matteo Franchin43ec8732014-03-31 15:00:14 +0100109 } else if (cheap_false_val && InexpensiveConstantInt(true_val)) {
110 OpRegImm(kOpCmp, rl_src.reg, 0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100111 OpIT(ccode, "E");
Matteo Franchin43ec8732014-03-31 15:00:14 +0100112 LoadConstant(rl_result.reg, true_val);
113 LoadConstant(rl_result.reg, false_val);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100114 GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
Matteo Franchin43ec8732014-03-31 15:00:14 +0100115 } else {
116 // Unlikely case - could be tuned.
117 RegStorage t_reg1 = AllocTemp();
118 RegStorage t_reg2 = AllocTemp();
119 LoadConstant(t_reg1, true_val);
120 LoadConstant(t_reg2, false_val);
121 OpRegImm(kOpCmp, rl_src.reg, 0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100122 OpIT(ccode, "E");
Matteo Franchin43ec8732014-03-31 15:00:14 +0100123 OpRegCopy(rl_result.reg, t_reg1);
124 OpRegCopy(rl_result.reg, t_reg2);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100125 GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
Matteo Franchin43ec8732014-03-31 15:00:14 +0100126 }
127 } else {
128 // MOVE case
129 RegLocation rl_true = mir_graph_->reg_location_[mir->ssa_rep->uses[1]];
130 RegLocation rl_false = mir_graph_->reg_location_[mir->ssa_rep->uses[2]];
131 rl_true = LoadValue(rl_true, kCoreReg);
132 rl_false = LoadValue(rl_false, kCoreReg);
133 rl_result = EvalLoc(rl_dest, kCoreReg, true);
134 OpRegImm(kOpCmp, rl_src.reg, 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100135 if (rl_result.reg.GetReg() == rl_true.reg.GetReg()) { // Is the "true" case already in place?
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100136 OpIT(NegateComparison(ccode), "");
Matteo Franchin43ec8732014-03-31 15:00:14 +0100137 OpRegCopy(rl_result.reg, rl_false.reg);
138 } else if (rl_result.reg.GetReg() == rl_false.reg.GetReg()) { // False case in place?
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100139 OpIT(ccode, "");
Matteo Franchin43ec8732014-03-31 15:00:14 +0100140 OpRegCopy(rl_result.reg, rl_true.reg);
141 } else { // Normal - select between the two.
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100142 OpIT(ccode, "E");
Matteo Franchin43ec8732014-03-31 15:00:14 +0100143 OpRegCopy(rl_result.reg, rl_true.reg);
144 OpRegCopy(rl_result.reg, rl_false.reg);
145 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100146 GenBarrier(); // Add a scheduling barrier to keep the IT shadow intact
Matteo Franchin43ec8732014-03-31 15:00:14 +0100147 }
148 StoreValue(rl_dest, rl_result);
149}
150
151void Arm64Mir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100152 // TODO(Arm64): implement this.
153 UNIMPLEMENTED(FATAL);
154
Matteo Franchin43ec8732014-03-31 15:00:14 +0100155 RegLocation rl_src1 = mir_graph_->GetSrcWide(mir, 0);
156 RegLocation rl_src2 = mir_graph_->GetSrcWide(mir, 2);
157 // Normalize such that if either operand is constant, src2 will be constant.
158 ConditionCode ccode = mir->meta.ccode;
159 if (rl_src1.is_const) {
160 std::swap(rl_src1, rl_src2);
161 ccode = FlipComparisonOrder(ccode);
162 }
163 if (rl_src2.is_const) {
164 RegLocation rl_temp = UpdateLocWide(rl_src2);
165 // Do special compare/branch against simple const operand if not already in registers.
166 int64_t val = mir_graph_->ConstantValueWide(rl_src2);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100167 if ((rl_temp.location != kLocPhysReg)
168 /*&& ((ModifiedImmediate(Low32Bits(val)) >= 0) && (ModifiedImmediate(High32Bits(val)) >= 0))*/) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100169 GenFusedLongCmpImmBranch(bb, rl_src1, val, ccode);
170 return;
171 }
172 }
173 LIR* taken = &block_label_list_[bb->taken];
174 LIR* not_taken = &block_label_list_[bb->fall_through];
175 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
176 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
177 OpRegReg(kOpCmp, rl_src1.reg.GetHigh(), rl_src2.reg.GetHigh());
178 switch (ccode) {
179 case kCondEq:
180 OpCondBranch(kCondNe, not_taken);
181 break;
182 case kCondNe:
183 OpCondBranch(kCondNe, taken);
184 break;
185 case kCondLt:
186 OpCondBranch(kCondLt, taken);
187 OpCondBranch(kCondGt, not_taken);
188 ccode = kCondUlt;
189 break;
190 case kCondLe:
191 OpCondBranch(kCondLt, taken);
192 OpCondBranch(kCondGt, not_taken);
193 ccode = kCondLs;
194 break;
195 case kCondGt:
196 OpCondBranch(kCondGt, taken);
197 OpCondBranch(kCondLt, not_taken);
198 ccode = kCondHi;
199 break;
200 case kCondGe:
201 OpCondBranch(kCondGt, taken);
202 OpCondBranch(kCondLt, not_taken);
203 ccode = kCondUge;
204 break;
205 default:
206 LOG(FATAL) << "Unexpected ccode: " << ccode;
207 }
208 OpRegReg(kOpCmp, rl_src1.reg.GetLow(), rl_src2.reg.GetLow());
209 OpCondBranch(ccode, taken);
210}
211
212/*
213 * Generate a register comparison to an immediate and branch. Caller
214 * is responsible for setting branch target field.
215 */
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100216LIR* Arm64Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value,
217 LIR* target) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100218 LIR* branch;
219 ArmConditionCode arm_cond = ArmConditionEncoding(cond);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100220 if (check_value == 0 && (arm_cond == kArmCondEq || arm_cond == kArmCondNe)) {
221 ArmOpcode opcode = (arm_cond == kArmCondEq) ? kA64Cbz2rt : kA64Cbnz2rt;
222 branch = NewLIR2(opcode, reg.GetReg(), 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100223 } else {
224 OpRegImm(kOpCmp, reg, check_value);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100225 branch = NewLIR2(kA64B2ct, arm_cond, 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100226 }
227 branch->target = target;
228 return branch;
229}
230
231LIR* Arm64Mir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100232 bool dest_is_fp = r_dest.IsFloat();
233 bool src_is_fp = r_src.IsFloat();
234 ArmOpcode opcode = kA64Brk1d;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100235 LIR* res;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100236
237 if (LIKELY(dest_is_fp == src_is_fp)) {
238 if (LIKELY(!dest_is_fp)) {
239 // Core/core copy.
240 // Copies involving the sp register require a different instruction.
241 opcode = UNLIKELY(A64_REG_IS_SP(r_dest.GetReg())) ? kA64Add4RRdT : kA64Mov2rr;
242
243 // TODO(Arm64): kA64Add4RRdT formally has 4 args, but is used as a 2 args instruction.
244 // This currently works because the other arguments are set to 0 by default. We should
245 // rather introduce an alias kA64Mov2RR.
246
247 // core/core copy. Do a x/x copy only if both registers are x.
248 if (r_dest.Is64Bit() && r_src.Is64Bit()) {
249 opcode = WIDE(opcode);
250 }
251 } else {
252 // Float/float copy.
253 bool dest_is_double = r_dest.IsDouble();
254 bool src_is_double = r_src.IsDouble();
255
256 // We do not do float/double or double/float casts here.
257 DCHECK_EQ(dest_is_double, src_is_double);
258
259 // Homogeneous float/float copy.
260 opcode = (dest_is_double) ? FWIDE(kA64Fmov2ff) : kA64Fmov2ff;
261 }
262 } else {
263 // Inhomogeneous register copy.
264 if (dest_is_fp) {
265 if (r_dest.IsDouble()) {
266 opcode = kA64Fmov2Sx;
267 } else {
268 DCHECK(r_src.IsSingle());
269 opcode = kA64Fmov2sw;
270 }
271 } else {
272 if (r_src.IsDouble()) {
273 opcode = kA64Fmov2xS;
274 } else {
275 DCHECK(r_dest.Is32Bit());
276 opcode = kA64Fmov2ws;
277 }
278 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100279 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100280
Matteo Franchin43ec8732014-03-31 15:00:14 +0100281 res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100282
Matteo Franchin43ec8732014-03-31 15:00:14 +0100283 if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
284 res->flags.is_nop = true;
285 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100286
Matteo Franchin43ec8732014-03-31 15:00:14 +0100287 return res;
288}
289
290void Arm64Mir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) {
291 if (r_dest != r_src) {
292 LIR* res = OpRegCopyNoInsert(r_dest, r_src);
293 AppendLIR(res);
294 }
295}
296
297void Arm64Mir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100298 OpRegCopy(r_dest, r_src);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100299}
300
301// Table of magic divisors
302struct MagicTable {
303 uint32_t magic;
304 uint32_t shift;
305 DividePattern pattern;
306};
307
308static const MagicTable magic_table[] = {
309 {0, 0, DivideNone}, // 0
310 {0, 0, DivideNone}, // 1
311 {0, 0, DivideNone}, // 2
312 {0x55555556, 0, Divide3}, // 3
313 {0, 0, DivideNone}, // 4
314 {0x66666667, 1, Divide5}, // 5
315 {0x2AAAAAAB, 0, Divide3}, // 6
316 {0x92492493, 2, Divide7}, // 7
317 {0, 0, DivideNone}, // 8
318 {0x38E38E39, 1, Divide5}, // 9
319 {0x66666667, 2, Divide5}, // 10
320 {0x2E8BA2E9, 1, Divide5}, // 11
321 {0x2AAAAAAB, 1, Divide5}, // 12
322 {0x4EC4EC4F, 2, Divide5}, // 13
323 {0x92492493, 3, Divide7}, // 14
324 {0x88888889, 3, Divide7}, // 15
325};
326
327// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
328bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
329 RegLocation rl_src, RegLocation rl_dest, int lit) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100330 // TODO(Arm64): fix this for Arm64. Note: may be worth revisiting the magic table.
331 // It should be possible subtracting one from all its entries, and using smaddl
332 // to counteract this. The advantage is that integers should then be easier to
333 // encode as logical immediates (0x55555555 rather than 0x55555556).
334 UNIMPLEMENTED(FATAL);
335
Matteo Franchin43ec8732014-03-31 15:00:14 +0100336 if ((lit < 0) || (lit >= static_cast<int>(sizeof(magic_table)/sizeof(magic_table[0])))) {
337 return false;
338 }
339 DividePattern pattern = magic_table[lit].pattern;
340 if (pattern == DivideNone) {
341 return false;
342 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100343 // Tuning: add rem patterns
344 if (!is_div) {
345 return false;
346 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100347
348 RegStorage r_magic = AllocTemp();
349 LoadConstant(r_magic, magic_table[lit].magic);
350 rl_src = LoadValue(rl_src, kCoreReg);
351 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
352 RegStorage r_hi = AllocTemp();
353 RegStorage r_lo = AllocTemp();
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100354 NewLIR4(kA64Smaddl4xwwx, r_lo.GetReg(), r_magic.GetReg(), rl_src.reg.GetReg(), rxzr);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100355 switch (pattern) {
356 case Divide3:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100357 OpRegRegRegShift(kOpSub, rl_result.reg.GetReg(), r_hi.GetReg(),
358 rl_src.reg.GetReg(), EncodeShift(kA64Asr, 31));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100359 break;
360 case Divide5:
361 OpRegRegImm(kOpAsr, r_lo, rl_src.reg, 31);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100362 OpRegRegRegShift(kOpRsub, rl_result.reg.GetReg(), r_lo.GetReg(), r_hi.GetReg(),
363 EncodeShift(kA64Asr, magic_table[lit].shift));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100364 break;
365 case Divide7:
366 OpRegReg(kOpAdd, r_hi, rl_src.reg);
367 OpRegRegImm(kOpAsr, r_lo, rl_src.reg, 31);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100368 OpRegRegRegShift(kOpRsub, rl_result.reg.GetReg(), r_lo.GetReg(), r_hi.GetReg(),
369 EncodeShift(kA64Asr, magic_table[lit].shift));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100370 break;
371 default:
372 LOG(FATAL) << "Unexpected pattern: " << pattern;
373 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100374 StoreValue(rl_dest, rl_result);
375 return true;
376}
377
Matteo Franchin43ec8732014-03-31 15:00:14 +0100378bool Arm64Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100379 LOG(FATAL) << "Unexpected use of EasyMultiply for Arm64";
380 return false;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100381}
382
383RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
384 RegLocation rl_src2, bool is_div, bool check_zero) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100385 LOG(FATAL) << "Unexpected use of GenDivRem for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +0100386 return rl_dest;
387}
388
389RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100390 LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +0100391 return rl_dest;
392}
393
394RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
395 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
396
397 // Put the literal in a temp.
398 RegStorage lit_temp = AllocTemp();
399 LoadConstant(lit_temp, lit);
400 // Use the generic case for div/rem with arg2 in a register.
401 // TODO: The literal temp can be freed earlier during a modulus to reduce reg pressure.
402 rl_result = GenDivRem(rl_result, reg1, lit_temp, is_div);
403 FreeTemp(lit_temp);
404
405 return rl_result;
406}
407
408RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg1, RegStorage reg2,
409 bool is_div) {
410 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
411 if (is_div) {
412 // Simple case, use sdiv instruction.
413 OpRegRegReg(kOpDiv, rl_result.reg, reg1, reg2);
414 } else {
415 // Remainder case, use the following code:
416 // temp = reg1 / reg2 - integer division
417 // temp = temp * reg2
418 // dest = reg1 - temp
419
420 RegStorage temp = AllocTemp();
421 OpRegRegReg(kOpDiv, temp, reg1, reg2);
422 OpRegReg(kOpMul, temp, reg2);
423 OpRegRegReg(kOpSub, rl_result.reg, reg1, temp);
424 FreeTemp(temp);
425 }
426
427 return rl_result;
428}
429
430bool Arm64Mir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100431 // TODO(Arm64): implement this.
432 UNIMPLEMENTED(FATAL);
433
Matteo Franchin43ec8732014-03-31 15:00:14 +0100434 DCHECK_EQ(cu_->instruction_set, kThumb2);
435 RegLocation rl_src1 = info->args[0];
436 RegLocation rl_src2 = info->args[1];
437 rl_src1 = LoadValue(rl_src1, kCoreReg);
438 rl_src2 = LoadValue(rl_src2, kCoreReg);
439 RegLocation rl_dest = InlineTarget(info);
440 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
441 OpRegReg(kOpCmp, rl_src1.reg, rl_src2.reg);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100442 OpIT((is_min) ? kCondGt : kCondLt, "E");
Matteo Franchin43ec8732014-03-31 15:00:14 +0100443 OpRegReg(kOpMov, rl_result.reg, rl_src2.reg);
444 OpRegReg(kOpMov, rl_result.reg, rl_src1.reg);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100445 GenBarrier();
Matteo Franchin43ec8732014-03-31 15:00:14 +0100446 StoreValue(rl_dest, rl_result);
447 return true;
448}
449
450bool Arm64Mir2Lir::GenInlinedPeek(CallInfo* info, OpSize size) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100451 // TODO(Arm64): implement this.
452 UNIMPLEMENTED(WARNING);
453
Matteo Franchin43ec8732014-03-31 15:00:14 +0100454 RegLocation rl_src_address = info->args[0]; // long address
455 rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[1]
456 RegLocation rl_dest = InlineTarget(info);
457 RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
458 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
459 if (size == k64) {
460 // Fake unaligned LDRD by two unaligned LDR instructions on ARMv7 with SCTLR.A set to 0.
461 if (rl_address.reg.GetReg() != rl_result.reg.GetLowReg()) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100462 LoadWordDisp(rl_address.reg, 0, rl_result.reg.GetLow());
463 LoadWordDisp(rl_address.reg, 4, rl_result.reg.GetHigh());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100464 } else {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100465 LoadWordDisp(rl_address.reg, 4, rl_result.reg.GetHigh());
466 LoadWordDisp(rl_address.reg, 0, rl_result.reg.GetLow());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100467 }
468 StoreValueWide(rl_dest, rl_result);
469 } else {
470 DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
471 // Unaligned load with LDR and LDRSH is allowed on ARMv7 with SCTLR.A set to 0.
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100472 LoadBaseDisp(rl_address.reg, 0, rl_result.reg, size);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100473 StoreValue(rl_dest, rl_result);
474 }
475 return true;
476}
477
478bool Arm64Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100479 // TODO(Arm64): implement this.
480 UNIMPLEMENTED(WARNING);
481
Matteo Franchin43ec8732014-03-31 15:00:14 +0100482 RegLocation rl_src_address = info->args[0]; // long address
483 rl_src_address = NarrowRegLoc(rl_src_address); // ignore high half in info->args[1]
484 RegLocation rl_src_value = info->args[2]; // [size] value
485 RegLocation rl_address = LoadValue(rl_src_address, kCoreReg);
486 if (size == k64) {
487 // Fake unaligned STRD by two unaligned STR instructions on ARMv7 with SCTLR.A set to 0.
488 RegLocation rl_value = LoadValueWide(rl_src_value, kCoreReg);
489 StoreBaseDisp(rl_address.reg, 0, rl_value.reg.GetLow(), k32);
490 StoreBaseDisp(rl_address.reg, 4, rl_value.reg.GetHigh(), k32);
491 } else {
492 DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
493 // Unaligned store with STR and STRSH is allowed on ARMv7 with SCTLR.A set to 0.
494 RegLocation rl_value = LoadValue(rl_src_value, kCoreReg);
495 StoreBaseDisp(rl_address.reg, 0, rl_value.reg, size);
496 }
497 return true;
498}
499
500void Arm64Mir2Lir::OpLea(RegStorage r_base, RegStorage reg1, RegStorage reg2, int scale, int offset) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100501 LOG(FATAL) << "Unexpected use of OpLea for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +0100502}
503
Andreas Gampe2f244e92014-05-08 03:35:25 -0700504void Arm64Mir2Lir::OpTlsCmp(ThreadOffset<4> offset, int val) {
505 UNIMPLEMENTED(FATAL) << "Should not be used.";
506}
507
508void Arm64Mir2Lir::OpTlsCmp(ThreadOffset<8> offset, int val) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100509 LOG(FATAL) << "Unexpected use of OpTlsCmp for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +0100510}
511
512bool Arm64Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100513 // TODO(Arm64): implement this.
514 UNIMPLEMENTED(WARNING);
515
Matteo Franchin43ec8732014-03-31 15:00:14 +0100516 DCHECK_EQ(cu_->instruction_set, kThumb2);
517 // Unused - RegLocation rl_src_unsafe = info->args[0];
518 RegLocation rl_src_obj = info->args[1]; // Object - known non-null
519 RegLocation rl_src_offset = info->args[2]; // long low
520 rl_src_offset = NarrowRegLoc(rl_src_offset); // ignore high half in info->args[3]
521 RegLocation rl_src_expected = info->args[4]; // int, long or Object
522 // If is_long, high half is in info->args[5]
523 RegLocation rl_src_new_value = info->args[is_long ? 6 : 5]; // int, long or Object
524 // If is_long, high half is in info->args[7]
525 RegLocation rl_dest = InlineTarget(info); // boolean place for result
526
527 // We have only 5 temporary registers available and actually only 4 if the InlineTarget
528 // above locked one of the temps. For a straightforward CAS64 we need 7 registers:
529 // r_ptr (1), new_value (2), expected(2) and ldrexd result (2). If neither expected nor
530 // new_value is in a non-temp core register we shall reload them in the ldrex/strex loop
531 // into the same temps, reducing the number of required temps down to 5. We shall work
532 // around the potentially locked temp by using LR for r_ptr, unconditionally.
533 // TODO: Pass information about the need for more temps to the stack frame generation
534 // code so that we can rely on being able to allocate enough temps.
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100535 DCHECK(!GetRegInfo(rs_rA64_LR)->IsTemp());
536 MarkTemp(rs_rA64_LR);
537 FreeTemp(rs_rA64_LR);
538 LockTemp(rs_rA64_LR);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100539 bool load_early = true;
540 if (is_long) {
541 RegStorage expected_reg = rl_src_expected.reg.IsPair() ? rl_src_expected.reg.GetLow() :
542 rl_src_expected.reg;
543 RegStorage new_val_reg = rl_src_new_value.reg.IsPair() ? rl_src_new_value.reg.GetLow() :
544 rl_src_new_value.reg;
545 bool expected_is_core_reg = rl_src_expected.location == kLocPhysReg && !expected_reg.IsFloat();
546 bool new_value_is_core_reg = rl_src_new_value.location == kLocPhysReg && !new_val_reg.IsFloat();
547 bool expected_is_good_reg = expected_is_core_reg && !IsTemp(expected_reg);
548 bool new_value_is_good_reg = new_value_is_core_reg && !IsTemp(new_val_reg);
549
550 if (!expected_is_good_reg && !new_value_is_good_reg) {
551 // None of expected/new_value is non-temp reg, need to load both late
552 load_early = false;
553 // Make sure they are not in the temp regs and the load will not be skipped.
554 if (expected_is_core_reg) {
555 FlushRegWide(rl_src_expected.reg);
556 ClobberSReg(rl_src_expected.s_reg_low);
557 ClobberSReg(GetSRegHi(rl_src_expected.s_reg_low));
558 rl_src_expected.location = kLocDalvikFrame;
559 }
560 if (new_value_is_core_reg) {
561 FlushRegWide(rl_src_new_value.reg);
562 ClobberSReg(rl_src_new_value.s_reg_low);
563 ClobberSReg(GetSRegHi(rl_src_new_value.s_reg_low));
564 rl_src_new_value.location = kLocDalvikFrame;
565 }
566 }
567 }
568
569 // Release store semantics, get the barrier out of the way. TODO: revisit
570 GenMemBarrier(kStoreLoad);
571
572 RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
573 RegLocation rl_new_value;
574 if (!is_long) {
575 rl_new_value = LoadValue(rl_src_new_value, kCoreReg);
576 } else if (load_early) {
577 rl_new_value = LoadValueWide(rl_src_new_value, kCoreReg);
578 }
579
580 if (is_object && !mir_graph_->IsConstantNullRef(rl_new_value)) {
581 // Mark card for object assuming new value is stored.
582 MarkGCCard(rl_new_value.reg, rl_object.reg);
583 }
584
585 RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
586
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100587 RegStorage r_ptr = rs_rA64_LR;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100588 OpRegRegReg(kOpAdd, r_ptr, rl_object.reg, rl_offset.reg);
589
590 // Free now unneeded rl_object and rl_offset to give more temps.
591 ClobberSReg(rl_object.s_reg_low);
592 FreeTemp(rl_object.reg);
593 ClobberSReg(rl_offset.s_reg_low);
594 FreeTemp(rl_offset.reg);
595
596 RegLocation rl_expected;
597 if (!is_long) {
598 rl_expected = LoadValue(rl_src_expected, kCoreReg);
599 } else if (load_early) {
600 rl_expected = LoadValueWide(rl_src_expected, kCoreReg);
601 } else {
602 // NOTE: partially defined rl_expected & rl_new_value - but we just want the regs.
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100603 int low_reg = AllocTemp().GetReg();
604 int high_reg = AllocTemp().GetReg();
605 rl_new_value.reg = RegStorage(RegStorage::k64BitPair, low_reg, high_reg);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100606 rl_expected = rl_new_value;
607 }
608
609 // do {
610 // tmp = [r_ptr] - expected;
611 // } while (tmp == 0 && failure([r_ptr] <- r_new_value));
612 // result = tmp != 0;
613
614 RegStorage r_tmp = AllocTemp();
615 LIR* target = NewLIR0(kPseudoTargetLabel);
616
Matteo Franchin43ec8732014-03-31 15:00:14 +0100617 if (is_long) {
618 RegStorage r_tmp_high = AllocTemp();
619 if (!load_early) {
620 LoadValueDirectWide(rl_src_expected, rl_expected.reg);
621 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100622 NewLIR3(kA64Ldxr2rX, r_tmp.GetReg(), r_tmp_high.GetReg(), r_ptr.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100623 OpRegReg(kOpSub, r_tmp, rl_expected.reg.GetLow());
624 OpRegReg(kOpSub, r_tmp_high, rl_expected.reg.GetHigh());
625 if (!load_early) {
626 LoadValueDirectWide(rl_src_new_value, rl_new_value.reg);
627 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100628
629 LIR* branch1 = OpCmpImmBranch(kCondNe, r_tmp, 0, NULL);
630 LIR* branch2 = OpCmpImmBranch(kCondNe, r_tmp_high, 0, NULL);
631 NewLIR4(WIDE(kA64Stxr3wrX) /* eq */, r_tmp.GetReg(), rl_new_value.reg.GetReg(),
632 rl_new_value.reg.GetHighReg(), r_ptr.GetReg());
633 LIR* target2 = NewLIR0(kPseudoTargetLabel);
634 branch1->target = target2;
635 branch2->target = target2;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100636 FreeTemp(r_tmp_high); // Now unneeded
637
Matteo Franchin43ec8732014-03-31 15:00:14 +0100638 } else {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100639 NewLIR3(kA64Ldxr2rX, r_tmp.GetReg(), r_ptr.GetReg(), 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100640 OpRegReg(kOpSub, r_tmp, rl_expected.reg);
641 DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100642 OpIT(kCondEq, "T");
643 NewLIR4(kA64Stxr3wrX /* eq */, r_tmp.GetReg(), rl_new_value.reg.GetReg(), r_ptr.GetReg(), 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100644 }
645
646 // Still one conditional left from OpIT(kCondEq, "T") from either branch
647 OpRegImm(kOpCmp /* eq */, r_tmp, 1);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100648 OpCondBranch(kCondEq, target);
649
650 if (!load_early) {
651 FreeTemp(rl_expected.reg); // Now unneeded.
652 }
653
654 // result := (tmp1 != 0) ? 0 : 1;
655 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
656 OpRegRegImm(kOpRsub, rl_result.reg, r_tmp, 1);
657 DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100658 OpIT(kCondUlt, "");
Matteo Franchin43ec8732014-03-31 15:00:14 +0100659 LoadConstant(rl_result.reg, 0); /* cc */
660 FreeTemp(r_tmp); // Now unneeded.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100661
662 StoreValue(rl_dest, rl_result);
663
664 // Now, restore lr to its non-temp status.
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100665 Clobber(rs_rA64_LR);
666 UnmarkTemp(rs_rA64_LR);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100667 return true;
668}
669
670LIR* Arm64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100671 return RawLIR(current_dalvik_offset_, WIDE(kA64Ldr2rp), reg.GetReg(), 0, 0, 0, 0, target);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100672}
673
674LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base, int count) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100675 LOG(FATAL) << "Unexpected use of OpVldm for Arm64";
676 return NULL;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100677}
678
679LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base, int count) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100680 LOG(FATAL) << "Unexpected use of OpVstm for Arm64";
681 return NULL;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100682}
683
684void Arm64Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
685 RegLocation rl_result, int lit,
686 int first_bit, int second_bit) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100687 OpRegRegRegShift(kOpAdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), rl_src.reg.GetReg(),
688 EncodeShift(kA64Lsl, second_bit - first_bit));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100689 if (first_bit != 0) {
690 OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
691 }
692}
693
694void Arm64Mir2Lir::GenDivZeroCheckWide(RegStorage reg) {
695 DCHECK(reg.IsPair()); // TODO: support k64BitSolo.
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100696 OpRegImm64(kOpCmp, reg, 0, /*is_wide*/true);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100697 GenDivZeroCheck(kCondEq);
698}
699
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100700// TODO(Arm64): the function below should go.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100701// Test suspend flag, return target of taken suspend branch
702LIR* Arm64Mir2Lir::OpTestSuspend(LIR* target) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100703 NewLIR3(kA64Subs3rRd, rA64_SUSPEND, rA64_SUSPEND, 1);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100704 return OpCondBranch((target == NULL) ? kCondEq : kCondNe, target);
705}
706
707// Decrement register and branch on condition
708LIR* Arm64Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) {
709 // Combine sub & test using sub setflags encoding here
710 OpRegRegImm(kOpSub, reg, reg, 1); // For value == 1, this should set flags.
711 DCHECK(last_lir_insn_->u.m.def_mask & ENCODE_CCODE);
712 return OpCondBranch(c_code, target);
713}
714
715void Arm64Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
716#if ANDROID_SMP != 0
717 // Start off with using the last LIR as the barrier. If it is not enough, then we will generate one.
718 LIR* barrier = last_lir_insn_;
719
720 int dmb_flavor;
721 // TODO: revisit Arm barrier kinds
722 switch (barrier_kind) {
723 case kLoadStore: dmb_flavor = kISH; break;
724 case kLoadLoad: dmb_flavor = kISH; break;
725 case kStoreStore: dmb_flavor = kISHST; break;
726 case kStoreLoad: dmb_flavor = kISH; break;
727 default:
728 LOG(FATAL) << "Unexpected MemBarrierKind: " << barrier_kind;
729 dmb_flavor = kSY; // quiet gcc.
730 break;
731 }
732
733 // If the same barrier already exists, don't generate another.
734 if (barrier == nullptr
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100735 || (barrier->opcode != kA64Dmb1B || barrier->operands[0] != dmb_flavor)) {
736 barrier = NewLIR1(kA64Dmb1B, dmb_flavor);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100737 }
738
739 // At this point we must have a memory barrier. Mark it as a scheduling barrier as well.
740 DCHECK(!barrier->flags.use_def_invalid);
741 barrier->u.m.def_mask = ENCODE_ALL;
742#endif
743}
744
745void Arm64Mir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
746 rl_src = LoadValueWide(rl_src, kCoreReg);
747 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
748 RegStorage z_reg = AllocTemp();
749 LoadConstantNoClobber(z_reg, 0);
750 // Check for destructive overlap
751 if (rl_result.reg.GetLowReg() == rl_src.reg.GetHighReg()) {
752 RegStorage t_reg = AllocTemp();
753 OpRegRegReg(kOpSub, rl_result.reg.GetLow(), z_reg, rl_src.reg.GetLow());
754 OpRegRegReg(kOpSbc, rl_result.reg.GetHigh(), z_reg, t_reg);
755 FreeTemp(t_reg);
756 } else {
757 OpRegRegReg(kOpSub, rl_result.reg.GetLow(), z_reg, rl_src.reg.GetLow());
758 OpRegRegReg(kOpSbc, rl_result.reg.GetHigh(), z_reg, rl_src.reg.GetHigh());
759 }
760 FreeTemp(z_reg);
761 StoreValueWide(rl_dest, rl_result);
762}
763
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100764void Arm64Mir2Lir::GenLongOp(OpKind op, RegLocation rl_dest, RegLocation rl_src1,
765 RegLocation rl_src2) {
766 RegLocation rl_result;
767 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
768 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
769 rl_result = EvalLocWide(rl_dest, kCoreReg, true);
770 OpRegRegRegShift(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg(),
771 ENCODE_NO_SHIFT, /*is_wide*/ true);
772 StoreValueWide(rl_dest, rl_result);
773}
774
Matteo Franchin43ec8732014-03-31 15:00:14 +0100775void Arm64Mir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100776 RegLocation rl_src1, RegLocation rl_src2) {
777 GenLongOp(kOpMul, rl_dest, rl_src1, rl_src2);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100778}
779
780void Arm64Mir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100781 RegLocation rl_src2) {
782 GenLongOp(kOpAdd, rl_dest, rl_src1, rl_src2);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100783}
784
785void Arm64Mir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
786 RegLocation rl_src2) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100787 GenLongOp(kOpSub, rl_dest, rl_src1, rl_src2);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100788}
789
790void Arm64Mir2Lir::GenAndLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
791 RegLocation rl_src2) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100792 GenLongOp(kOpAnd, rl_dest, rl_src1, rl_src2);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100793}
794
795void Arm64Mir2Lir::GenOrLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
796 RegLocation rl_src2) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100797 GenLongOp(kOpOr, rl_dest, rl_src1, rl_src2);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100798}
799
800void Arm64Mir2Lir::GenXorLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
801 RegLocation rl_src2) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100802 GenLongOp(kOpXor, rl_dest, rl_src1, rl_src2);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100803}
804
805/*
806 * Generate array load
807 */
808void Arm64Mir2Lir::GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
809 RegLocation rl_index, RegLocation rl_dest, int scale) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100810 // TODO(Arm64): check this.
811 UNIMPLEMENTED(WARNING);
812
Matteo Franchin43ec8732014-03-31 15:00:14 +0100813 RegisterClass reg_class = RegClassBySize(size);
814 int len_offset = mirror::Array::LengthOffset().Int32Value();
815 int data_offset;
816 RegLocation rl_result;
817 bool constant_index = rl_index.is_const;
818 rl_array = LoadValue(rl_array, kCoreReg);
819 if (!constant_index) {
820 rl_index = LoadValue(rl_index, kCoreReg);
821 }
822
823 if (rl_dest.wide) {
824 data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
825 } else {
826 data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
827 }
828
829 // If index is constant, just fold it into the data offset
830 if (constant_index) {
831 data_offset += mir_graph_->ConstantValue(rl_index) << scale;
832 }
833
834 /* null object? */
835 GenNullCheck(rl_array.reg, opt_flags);
836
837 bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
838 RegStorage reg_len;
839 if (needs_range_check) {
840 reg_len = AllocTemp();
841 /* Get len */
842 Load32Disp(rl_array.reg, len_offset, reg_len);
843 MarkPossibleNullPointerException(opt_flags);
844 } else {
845 ForceImplicitNullCheck(rl_array.reg, opt_flags);
846 }
847 if (rl_dest.wide || rl_dest.fp || constant_index) {
848 RegStorage reg_ptr;
849 if (constant_index) {
850 reg_ptr = rl_array.reg; // NOTE: must not alter reg_ptr in constant case.
851 } else {
852 // No special indexed operation, lea + load w/ displacement
853 reg_ptr = AllocTemp();
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100854 OpRegRegRegShift(kOpAdd, reg_ptr.GetReg(), rl_array.reg.GetReg(), rl_index.reg.GetReg(),
855 EncodeShift(kA64Lsl, scale));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100856 FreeTemp(rl_index.reg);
857 }
858 rl_result = EvalLoc(rl_dest, reg_class, true);
859
860 if (needs_range_check) {
861 if (constant_index) {
862 GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
863 } else {
864 GenArrayBoundsCheck(rl_index.reg, reg_len);
865 }
866 FreeTemp(reg_len);
867 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100868 LoadBaseDisp(reg_ptr, data_offset, rl_result.reg, size);
Vladimir Marko455759b2014-05-06 20:49:36 +0100869 MarkPossibleNullPointerException(opt_flags);
870 if (!constant_index) {
871 FreeTemp(reg_ptr);
872 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100873 if (rl_dest.wide) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100874 StoreValueWide(rl_dest, rl_result);
875 } else {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100876 StoreValue(rl_dest, rl_result);
877 }
878 } else {
879 // Offset base, then use indexed load
880 RegStorage reg_ptr = AllocTemp();
881 OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
882 FreeTemp(rl_array.reg);
883 rl_result = EvalLoc(rl_dest, reg_class, true);
884
885 if (needs_range_check) {
886 GenArrayBoundsCheck(rl_index.reg, reg_len);
887 FreeTemp(reg_len);
888 }
889 LoadBaseIndexed(reg_ptr, rl_index.reg, rl_result.reg, scale, size);
890 MarkPossibleNullPointerException(opt_flags);
891 FreeTemp(reg_ptr);
892 StoreValue(rl_dest, rl_result);
893 }
894}
895
896/*
897 * Generate array store
898 *
899 */
900void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
901 RegLocation rl_index, RegLocation rl_src, int scale, bool card_mark) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100902 // TODO(Arm64): check this.
903 UNIMPLEMENTED(WARNING);
904
Matteo Franchin43ec8732014-03-31 15:00:14 +0100905 RegisterClass reg_class = RegClassBySize(size);
906 int len_offset = mirror::Array::LengthOffset().Int32Value();
907 bool constant_index = rl_index.is_const;
908
909 int data_offset;
910 if (size == k64 || size == kDouble) {
911 data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Int32Value();
912 } else {
913 data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Int32Value();
914 }
915
916 // If index is constant, just fold it into the data offset.
917 if (constant_index) {
918 data_offset += mir_graph_->ConstantValue(rl_index) << scale;
919 }
920
921 rl_array = LoadValue(rl_array, kCoreReg);
922 if (!constant_index) {
923 rl_index = LoadValue(rl_index, kCoreReg);
924 }
925
926 RegStorage reg_ptr;
927 bool allocated_reg_ptr_temp = false;
928 if (constant_index) {
929 reg_ptr = rl_array.reg;
930 } else if (IsTemp(rl_array.reg) && !card_mark) {
931 Clobber(rl_array.reg);
932 reg_ptr = rl_array.reg;
933 } else {
934 allocated_reg_ptr_temp = true;
935 reg_ptr = AllocTemp();
936 }
937
938 /* null object? */
939 GenNullCheck(rl_array.reg, opt_flags);
940
941 bool needs_range_check = (!(opt_flags & MIR_IGNORE_RANGE_CHECK));
942 RegStorage reg_len;
943 if (needs_range_check) {
944 reg_len = AllocTemp();
945 // NOTE: max live temps(4) here.
946 /* Get len */
947 Load32Disp(rl_array.reg, len_offset, reg_len);
948 MarkPossibleNullPointerException(opt_flags);
949 } else {
950 ForceImplicitNullCheck(rl_array.reg, opt_flags);
951 }
952 /* at this point, reg_ptr points to array, 2 live temps */
953 if (rl_src.wide || rl_src.fp || constant_index) {
954 if (rl_src.wide) {
955 rl_src = LoadValueWide(rl_src, reg_class);
956 } else {
957 rl_src = LoadValue(rl_src, reg_class);
958 }
959 if (!constant_index) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100960 OpRegRegRegShift(kOpAdd, reg_ptr.GetReg(), rl_array.reg.GetReg(), rl_index.reg.GetReg(),
961 EncodeShift(kA64Lsl, scale));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100962 }
963 if (needs_range_check) {
964 if (constant_index) {
965 GenArrayBoundsCheck(mir_graph_->ConstantValue(rl_index), reg_len);
966 } else {
967 GenArrayBoundsCheck(rl_index.reg, reg_len);
968 }
969 FreeTemp(reg_len);
970 }
971
Vladimir Marko455759b2014-05-06 20:49:36 +0100972 StoreBaseDisp(reg_ptr, data_offset, rl_src.reg, size);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100973 MarkPossibleNullPointerException(opt_flags);
974 } else {
975 /* reg_ptr -> array data */
976 OpRegRegImm(kOpAdd, reg_ptr, rl_array.reg, data_offset);
977 rl_src = LoadValue(rl_src, reg_class);
978 if (needs_range_check) {
979 GenArrayBoundsCheck(rl_index.reg, reg_len);
980 FreeTemp(reg_len);
981 }
982 StoreBaseIndexed(reg_ptr, rl_index.reg, rl_src.reg, scale, size);
983 MarkPossibleNullPointerException(opt_flags);
984 }
985 if (allocated_reg_ptr_temp) {
986 FreeTemp(reg_ptr);
987 }
988 if (card_mark) {
989 MarkGCCard(rl_src.reg, rl_array.reg);
990 }
991}
992
993
994void Arm64Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
995 RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100996 // TODO(Arm64): check this.
997 UNIMPLEMENTED(WARNING);
998
Matteo Franchin43ec8732014-03-31 15:00:14 +0100999 rl_src = LoadValueWide(rl_src, kCoreReg);
1000 // Per spec, we only care about low 6 bits of shift amount.
1001 int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
1002 if (shift_amount == 0) {
1003 StoreValueWide(rl_dest, rl_src);
1004 return;
1005 }
1006 if (BadOverlap(rl_src, rl_dest)) {
1007 GenShiftOpLong(opcode, rl_dest, rl_src, rl_shift);
1008 return;
1009 }
1010 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1011 switch (opcode) {
1012 case Instruction::SHL_LONG:
1013 case Instruction::SHL_LONG_2ADDR:
1014 if (shift_amount == 1) {
1015 OpRegRegReg(kOpAdd, rl_result.reg.GetLow(), rl_src.reg.GetLow(), rl_src.reg.GetLow());
1016 OpRegRegReg(kOpAdc, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), rl_src.reg.GetHigh());
1017 } else if (shift_amount == 32) {
1018 OpRegCopy(rl_result.reg.GetHigh(), rl_src.reg);
1019 LoadConstant(rl_result.reg.GetLow(), 0);
1020 } else if (shift_amount > 31) {
1021 OpRegRegImm(kOpLsl, rl_result.reg.GetHigh(), rl_src.reg.GetLow(), shift_amount - 32);
1022 LoadConstant(rl_result.reg.GetLow(), 0);
1023 } else {
1024 OpRegRegImm(kOpLsl, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001025 OpRegRegRegShift(kOpOr, rl_result.reg.GetHighReg(), rl_result.reg.GetHighReg(), rl_src.reg.GetLowReg(),
1026 EncodeShift(kA64Lsr, 32 - shift_amount));
Matteo Franchin43ec8732014-03-31 15:00:14 +01001027 OpRegRegImm(kOpLsl, rl_result.reg.GetLow(), rl_src.reg.GetLow(), shift_amount);
1028 }
1029 break;
1030 case Instruction::SHR_LONG:
1031 case Instruction::SHR_LONG_2ADDR:
1032 if (shift_amount == 32) {
1033 OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh());
1034 OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 31);
1035 } else if (shift_amount > 31) {
1036 OpRegRegImm(kOpAsr, rl_result.reg.GetLow(), rl_src.reg.GetHigh(), shift_amount - 32);
1037 OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 31);
1038 } else {
1039 RegStorage t_reg = AllocTemp();
1040 OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetLow(), shift_amount);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001041 OpRegRegRegShift(kOpOr, rl_result.reg.GetLowReg(), t_reg.GetReg(), rl_src.reg.GetHighReg(),
1042 EncodeShift(kA64Lsl, 32 - shift_amount));
Matteo Franchin43ec8732014-03-31 15:00:14 +01001043 FreeTemp(t_reg);
1044 OpRegRegImm(kOpAsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
1045 }
1046 break;
1047 case Instruction::USHR_LONG:
1048 case Instruction::USHR_LONG_2ADDR:
1049 if (shift_amount == 32) {
1050 OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetHigh());
1051 LoadConstant(rl_result.reg.GetHigh(), 0);
1052 } else if (shift_amount > 31) {
1053 OpRegRegImm(kOpLsr, rl_result.reg.GetLow(), rl_src.reg.GetHigh(), shift_amount - 32);
1054 LoadConstant(rl_result.reg.GetHigh(), 0);
1055 } else {
1056 RegStorage t_reg = AllocTemp();
1057 OpRegRegImm(kOpLsr, t_reg, rl_src.reg.GetLow(), shift_amount);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001058 OpRegRegRegShift(kOpOr, rl_result.reg.GetLowReg(), t_reg.GetReg(), rl_src.reg.GetHighReg(),
1059 EncodeShift(kA64Lsl, 32 - shift_amount));
Matteo Franchin43ec8732014-03-31 15:00:14 +01001060 FreeTemp(t_reg);
1061 OpRegRegImm(kOpLsr, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), shift_amount);
1062 }
1063 break;
1064 default:
1065 LOG(FATAL) << "Unexpected case";
1066 }
1067 StoreValueWide(rl_dest, rl_result);
1068}
1069
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001070void Arm64Mir2Lir::GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
1071 RegLocation rl_src1, RegLocation rl_src2) {
1072 // TODO(Arm64): implement this.
1073 UNIMPLEMENTED(WARNING);
1074
Matteo Franchin43ec8732014-03-31 15:00:14 +01001075 if ((opcode == Instruction::SUB_LONG_2ADDR) || (opcode == Instruction::SUB_LONG)) {
1076 if (!rl_src2.is_const) {
1077 // Don't bother with special handling for subtract from immediate.
1078 GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
1079 return;
1080 }
1081 } else {
1082 // Normalize
1083 if (!rl_src2.is_const) {
1084 DCHECK(rl_src1.is_const);
1085 std::swap(rl_src1, rl_src2);
1086 }
1087 }
1088 if (BadOverlap(rl_src1, rl_dest)) {
1089 GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
1090 return;
1091 }
1092 DCHECK(rl_src2.is_const);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001093 // TODO(Arm64): implement this.
1094 // int64_t val = mir_graph_->ConstantValueWide(rl_src2);
1095 int32_t mod_imm_lo = -1; // ModifiedImmediate(val_lo);
1096 int32_t mod_imm_hi = -1; // ModifiedImmediate(val_hi);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001097
1098 // Only a subset of add/sub immediate instructions set carry - so bail if we don't fit
1099 switch (opcode) {
1100 case Instruction::ADD_LONG:
1101 case Instruction::ADD_LONG_2ADDR:
1102 case Instruction::SUB_LONG:
1103 case Instruction::SUB_LONG_2ADDR:
1104 if ((mod_imm_lo < 0) || (mod_imm_hi < 0)) {
1105 GenArithOpLong(opcode, rl_dest, rl_src1, rl_src2);
1106 return;
1107 }
1108 break;
1109 default:
1110 break;
1111 }
1112 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1113 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1114 // NOTE: once we've done the EvalLoc on dest, we can no longer bail.
1115 switch (opcode) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001116#if 0
Matteo Franchin43ec8732014-03-31 15:00:14 +01001117 case Instruction::ADD_LONG:
1118 case Instruction::ADD_LONG_2ADDR:
1119 NewLIR3(kThumb2AddRRI8M, rl_result.reg.GetLowReg(), rl_src1.reg.GetLowReg(), mod_imm_lo);
1120 NewLIR3(kThumb2AdcRRI8M, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), mod_imm_hi);
1121 break;
1122 case Instruction::OR_LONG:
1123 case Instruction::OR_LONG_2ADDR:
1124 if ((val_lo != 0) || (rl_result.reg.GetLowReg() != rl_src1.reg.GetLowReg())) {
1125 OpRegRegImm(kOpOr, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), val_lo);
1126 }
1127 if ((val_hi != 0) || (rl_result.reg.GetHighReg() != rl_src1.reg.GetHighReg())) {
1128 OpRegRegImm(kOpOr, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), val_hi);
1129 }
1130 break;
1131 case Instruction::XOR_LONG:
1132 case Instruction::XOR_LONG_2ADDR:
1133 OpRegRegImm(kOpXor, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), val_lo);
1134 OpRegRegImm(kOpXor, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), val_hi);
1135 break;
1136 case Instruction::AND_LONG:
1137 case Instruction::AND_LONG_2ADDR:
1138 if ((val_lo != 0xffffffff) || (rl_result.reg.GetLowReg() != rl_src1.reg.GetLowReg())) {
1139 OpRegRegImm(kOpAnd, rl_result.reg.GetLow(), rl_src1.reg.GetLow(), val_lo);
1140 }
1141 if ((val_hi != 0xffffffff) || (rl_result.reg.GetHighReg() != rl_src1.reg.GetHighReg())) {
1142 OpRegRegImm(kOpAnd, rl_result.reg.GetHigh(), rl_src1.reg.GetHigh(), val_hi);
1143 }
1144 break;
1145 case Instruction::SUB_LONG_2ADDR:
1146 case Instruction::SUB_LONG:
1147 NewLIR3(kThumb2SubRRI8M, rl_result.reg.GetLowReg(), rl_src1.reg.GetLowReg(), mod_imm_lo);
1148 NewLIR3(kThumb2SbcRRI8M, rl_result.reg.GetHighReg(), rl_src1.reg.GetHighReg(), mod_imm_hi);
1149 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001150#endif
Matteo Franchin43ec8732014-03-31 15:00:14 +01001151 default:
1152 LOG(FATAL) << "Unexpected opcode " << opcode;
1153 }
1154 StoreValueWide(rl_dest, rl_result);
1155}
1156
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001157/**
1158 * @brief Split a register list in pairs or registers.
1159 *
1160 * Given a list of registers in @p reg_mask, split the list in pairs. Use as follows:
1161 * @code
1162 * int reg1 = -1, reg2 = -1;
1163 * while (reg_mask) {
1164 * reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1165 * if (UNLIKELY(reg2 < 0)) {
1166 * // Single register in reg1.
1167 * } else {
1168 * // Pair in reg1, reg2.
1169 * }
1170 * }
1171 * @endcode
1172 */
1173uint32_t Arm64Mir2Lir::GenPairWise(uint32_t reg_mask, int* reg1, int* reg2) {
1174 // Find first register.
1175 int first_bit_set = __builtin_ctz(reg_mask) + 1;
1176 int reg = *reg1 + first_bit_set;
1177 reg_mask >>= first_bit_set;
1178
1179 if (LIKELY(reg_mask)) {
1180 // Save the first register, find the second and use the pair opcode.
1181 int second_bit_set = __builtin_ctz(reg_mask) + 1;
1182 *reg2 = reg;
1183 reg_mask >>= second_bit_set;
1184 *reg1 = reg + second_bit_set;
1185 return reg_mask;
1186 }
1187
1188 // Use the single opcode, as we just have one register.
1189 *reg1 = reg;
1190 *reg2 = -1;
1191 return reg_mask;
1192}
1193
1194void Arm64Mir2Lir::UnSpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) {
1195 int reg1 = -1, reg2 = -1;
1196 const int pop_log2_size = 3;
1197
1198 for (offset = (offset >> pop_log2_size) - 1; reg_mask; offset--) {
1199 reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1200 if (UNLIKELY(reg2 < 0)) {
1201 // TODO(Arm64): replace Solo32 with Solo64, once rxN are defined properly.
1202 NewLIR3(WIDE(kA64Ldr3rXD), RegStorage::Solo32(reg1).GetReg(), base.GetReg(), offset);
1203 } else {
1204 // TODO(Arm64): replace Solo32 with Solo64 (twice below), once rxN are defined properly.
1205 NewLIR4(WIDE(kA64Ldp4rrXD), RegStorage::Solo32(reg1).GetReg(),
1206 RegStorage::Solo32(reg2).GetReg(), base.GetReg(), offset);
1207 }
1208 }
1209}
1210
1211void Arm64Mir2Lir::SpillCoreRegs(RegStorage base, int offset, uint32_t reg_mask) {
1212 int reg1 = -1, reg2 = -1;
1213 const int pop_log2_size = 3;
1214
1215 for (offset = (offset >> pop_log2_size) - 1; reg_mask; offset--) {
1216 reg_mask = GenPairWise(reg_mask, & reg1, & reg2);
1217 if (UNLIKELY(reg2 < 0)) {
1218 // TODO(Arm64): replace Solo32 with Solo64, once rxN are defined properly.
1219 NewLIR3(WIDE(kA64Str3rXD), RegStorage::Solo32(reg1).GetReg(), base.GetReg(), offset);
1220 } else {
1221 // TODO(Arm64): replace Solo32 with Solo64 (twice below), once rxN are defined properly.
1222 NewLIR4(WIDE(kA64Stp4rrXD), RegStorage::Solo32(reg1).GetReg(),
1223 RegStorage::Solo32(reg2).GetReg(), base.GetReg(), offset);
1224 }
1225 }
1226}
1227
Matteo Franchin43ec8732014-03-31 15:00:14 +01001228} // namespace art