blob: 954360dafaea83fe2aa5c70e9496553fa0798916 [file] [log] [blame]
Matteo Franchin43ec8732014-03-31 15:00:14 +01001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "arm64_lir.h"
18#include "codegen_arm64.h"
19#include "dex/quick/mir_to_lir-inl.h"
20
21namespace art {
22
Matteo Franchine45fb9e2014-05-06 10:10:30 +010023/* This file contains codegen for the A64 ISA. */
Matteo Franchin43ec8732014-03-31 15:00:14 +010024
Matteo Franchine45fb9e2014-05-06 10:10:30 +010025static int32_t EncodeImmSingle(uint32_t bits) {
26 /*
27 * Valid values will have the form:
28 *
29 * aBbb.bbbc.defg.h000.0000.0000.0000.0000
30 *
31 * where B = not(b). In other words, if b == 1, then B == 0 and viceversa.
32 */
33
34 // bits[19..0] are cleared.
35 if ((bits & 0x0007ffff) != 0)
Matteo Franchin43ec8732014-03-31 15:00:14 +010036 return -1;
Matteo Franchine45fb9e2014-05-06 10:10:30 +010037
38 // bits[29..25] are all set or all cleared.
39 uint32_t b_pattern = (bits >> 16) & 0x3e00;
40 if (b_pattern != 0 && b_pattern != 0x3e00)
41 return -1;
42
43 // bit[30] and bit[29] are opposite.
44 if (((bits ^ (bits << 1)) & 0x40000000) == 0)
45 return -1;
46
47 // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
48 // bit7: a000.0000
49 uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
50 // bit6: 0b00.0000
51 uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
52 // bit5_to_0: 00cd.efgh
53 uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
54 return (bit7 | bit6 | bit5_to_0);
Matteo Franchin43ec8732014-03-31 15:00:14 +010055}
56
Matteo Franchine45fb9e2014-05-06 10:10:30 +010057static int32_t EncodeImmDouble(uint64_t bits) {
58 /*
59 * Valid values will have the form:
60 *
61 * aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
62 * 0000.0000.0000.0000.0000.0000.0000.0000
63 *
64 * where B = not(b).
65 */
66
67 // bits[47..0] are cleared.
68 if ((bits & UINT64_C(0xffffffffffff)) != 0)
Matteo Franchin43ec8732014-03-31 15:00:14 +010069 return -1;
Matteo Franchine45fb9e2014-05-06 10:10:30 +010070
71 // bits[61..54] are all set or all cleared.
72 uint32_t b_pattern = (bits >> 48) & 0x3fc0;
73 if (b_pattern != 0 && b_pattern != 0x3fc0)
74 return -1;
75
76 // bit[62] and bit[61] are opposite.
77 if (((bits ^ (bits << 1)) & UINT64_C(0x4000000000000000)) == 0)
78 return -1;
79
80 // bit7: a000.0000
81 uint32_t bit7 = ((bits >> 63) & 0x1) << 7;
82 // bit6: 0b00.0000
83 uint32_t bit6 = ((bits >> 61) & 0x1) << 6;
84 // bit5_to_0: 00cd.efgh
85 uint32_t bit5_to_0 = (bits >> 48) & 0x3f;
86 return (bit7 | bit6 | bit5_to_0);
Matteo Franchin43ec8732014-03-31 15:00:14 +010087}
88
Matteo Franchine45fb9e2014-05-06 10:10:30 +010089LIR* Arm64Mir2Lir::LoadFPConstantValue(int r_dest, int32_t value) {
Matteo Franchin43ec8732014-03-31 15:00:14 +010090 DCHECK(RegStorage::IsSingle(r_dest));
91 if (value == 0) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +010092 return NewLIR2(kA64Fmov2sw, r_dest, rwzr);
Matteo Franchin43ec8732014-03-31 15:00:14 +010093 } else {
Matteo Franchine45fb9e2014-05-06 10:10:30 +010094 int32_t encoded_imm = EncodeImmSingle((uint32_t)value);
Matteo Franchin43ec8732014-03-31 15:00:14 +010095 if (encoded_imm >= 0) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +010096 return NewLIR2(kA64Fmov2fI, r_dest, encoded_imm);
Matteo Franchin43ec8732014-03-31 15:00:14 +010097 }
98 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +010099
Matteo Franchin43ec8732014-03-31 15:00:14 +0100100 LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
101 if (data_target == NULL) {
102 data_target = AddWordData(&literal_list_, value);
103 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100104
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100105 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100106 LIR* load_pc_rel = RawLIR(current_dalvik_offset_, kA64Ldr2fp,
107 r_dest, 0, 0, 0, 0, data_target);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100108 AppendLIR(load_pc_rel);
109 return load_pc_rel;
110}
111
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100112LIR* Arm64Mir2Lir::LoadFPConstantValueWide(int r_dest, int64_t value) {
113 DCHECK(RegStorage::IsDouble(r_dest));
114 if (value == 0) {
115 return NewLIR2(kA64Fmov2Sx, r_dest, rwzr);
116 } else {
117 int32_t encoded_imm = EncodeImmDouble(value);
118 if (encoded_imm >= 0) {
119 return NewLIR2(FWIDE(kA64Fmov2fI), r_dest, encoded_imm);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100120 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100121 }
122
123 // No short form - load from the literal pool.
124 int32_t val_lo = Low32Bits(value);
125 int32_t val_hi = High32Bits(value);
126 LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
127 if (data_target == NULL) {
128 data_target = AddWideData(&literal_list_, val_lo, val_hi);
129 }
130
131 DCHECK(RegStorage::IsFloat(r_dest));
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100132 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100133 LIR* load_pc_rel = RawLIR(current_dalvik_offset_, FWIDE(kA64Ldr2fp),
134 r_dest, 0, 0, 0, 0, data_target);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100135 AppendLIR(load_pc_rel);
136 return load_pc_rel;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100137}
138
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100139static int CountLeadingZeros(bool is_wide, uint64_t value) {
140 return (is_wide) ? __builtin_clzl(value) : __builtin_clz((uint32_t)value);
141}
Matteo Franchin43ec8732014-03-31 15:00:14 +0100142
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100143static int CountTrailingZeros(bool is_wide, uint64_t value) {
144 return (is_wide) ? __builtin_ctzl(value) : __builtin_ctz((uint32_t)value);
145}
146
147static int CountSetBits(bool is_wide, uint64_t value) {
148 return ((is_wide) ?
149 __builtin_popcountl(value) : __builtin_popcount((uint32_t)value));
150}
151
152/**
153 * @brief Try encoding an immediate in the form required by logical instructions.
154 *
155 * @param is_wide Whether @p value is a 64-bit (as opposed to 32-bit) value.
156 * @param value An integer to be encoded. This is interpreted as 64-bit if @p is_wide is true and as
157 * 32-bit if @p is_wide is false.
158 * @return A non-negative integer containing the encoded immediate or -1 if the encoding failed.
159 * @note This is the inverse of Arm64Mir2Lir::DecodeLogicalImmediate().
160 */
161int Arm64Mir2Lir::EncodeLogicalImmediate(bool is_wide, uint64_t value) {
162 unsigned n, imm_s, imm_r;
163
164 // Logical immediates are encoded using parameters n, imm_s and imm_r using
165 // the following table:
166 //
167 // N imms immr size S R
168 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
169 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
170 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
171 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
172 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
173 // 0 11110s xxxxxr 2 UInt(s) UInt(r)
174 // (s bits must not be all set)
175 //
176 // A pattern is constructed of size bits, where the least significant S+1
177 // bits are set. The pattern is rotated right by R, and repeated across a
178 // 32 or 64-bit value, depending on destination register width.
179 //
180 // To test if an arbitary immediate can be encoded using this scheme, an
181 // iterative algorithm is used.
182 //
183
184 // 1. If the value has all set or all clear bits, it can't be encoded.
185 if (value == 0 || value == ~UINT64_C(0) ||
186 (!is_wide && (uint32_t)value == ~UINT32_C(0))) {
187 return -1;
188 }
189
190 unsigned lead_zero = CountLeadingZeros(is_wide, value);
191 unsigned lead_one = CountLeadingZeros(is_wide, ~value);
192 unsigned trail_zero = CountTrailingZeros(is_wide, value);
193 unsigned trail_one = CountTrailingZeros(is_wide, ~value);
194 unsigned set_bits = CountSetBits(is_wide, value);
195
196 // The fixed bits in the immediate s field.
197 // If width == 64 (X reg), start at 0xFFFFFF80.
198 // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit
199 // widths won't be executed.
200 unsigned width = (is_wide) ? 64 : 32;
201 int imm_s_fixed = (is_wide) ? -128 : -64;
202 int imm_s_mask = 0x3f;
203
204 for (;;) {
205 // 2. If the value is two bits wide, it can be encoded.
206 if (width == 2) {
207 n = 0;
208 imm_s = 0x3C;
209 imm_r = (value & 3) - 1;
210 break;
211 }
212
213 n = (width == 64) ? 1 : 0;
214 imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask);
215 if ((lead_zero + set_bits) == width) {
216 imm_r = 0;
217 } else {
218 imm_r = (lead_zero > 0) ? (width - trail_zero) : lead_one;
219 }
220
221 // 3. If the sum of leading zeros, trailing zeros and set bits is
222 // equal to the bit width of the value, it can be encoded.
223 if (lead_zero + trail_zero + set_bits == width) {
224 break;
225 }
226
227 // 4. If the sum of leading ones, trailing ones and unset bits in the
228 // value is equal to the bit width of the value, it can be encoded.
229 if (lead_one + trail_one + (width - set_bits) == width) {
230 break;
231 }
232
233 // 5. If the most-significant half of the bitwise value is equal to
234 // the least-significant half, return to step 2 using the
235 // least-significant half of the value.
236 uint64_t mask = (UINT64_C(1) << (width >> 1)) - 1;
237 if ((value & mask) == ((value >> (width >> 1)) & mask)) {
238 width >>= 1;
239 set_bits >>= 1;
240 imm_s_fixed >>= 1;
241 continue;
242 }
243
244 // 6. Otherwise, the value can't be encoded.
245 return -1;
246 }
247
248 return (n << 12 | imm_r << 6 | imm_s);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100249}
250
251bool Arm64Mir2Lir::InexpensiveConstantInt(int32_t value) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100252 return false; // (ModifiedImmediate(value) >= 0) || (ModifiedImmediate(~value) >= 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100253}
254
255bool Arm64Mir2Lir::InexpensiveConstantFloat(int32_t value) {
256 return EncodeImmSingle(value) >= 0;
257}
258
259bool Arm64Mir2Lir::InexpensiveConstantLong(int64_t value) {
260 return InexpensiveConstantInt(High32Bits(value)) && InexpensiveConstantInt(Low32Bits(value));
261}
262
263bool Arm64Mir2Lir::InexpensiveConstantDouble(int64_t value) {
264 return EncodeImmDouble(value) >= 0;
265}
266
267/*
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100268 * Load a immediate using one single instruction when possible; otherwise
269 * use a pair of movz and movk instructions.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100270 *
271 * No additional register clobbering operation performed. Use this version when
272 * 1) r_dest is freshly returned from AllocTemp or
273 * 2) The codegen is under fixed register usage
274 */
275LIR* Arm64Mir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
276 LIR* res;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100277
278 if (r_dest.IsFloat()) {
279 return LoadFPConstantValue(r_dest.GetReg(), value);
280 }
281
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100282 // Loading SP/ZR with an immediate is not supported.
283 DCHECK_NE(r_dest.GetReg(), rwsp);
284 DCHECK_NE(r_dest.GetReg(), rwzr);
285
286 // Compute how many movk, movz instructions are needed to load the value.
287 uint16_t high_bits = High16Bits(value);
288 uint16_t low_bits = Low16Bits(value);
289
290 bool low_fast = ((uint16_t)(low_bits + 1) <= 1);
291 bool high_fast = ((uint16_t)(high_bits + 1) <= 1);
292
293 if (LIKELY(low_fast || high_fast)) {
294 // 1 instruction is enough to load the immediate.
295 if (LIKELY(low_bits == high_bits)) {
296 // Value is either 0 or -1: we can just use wzr.
297 ArmOpcode opcode = LIKELY(low_bits == 0) ? kA64Mov2rr : kA64Mvn2rr;
298 res = NewLIR2(opcode, r_dest.GetReg(), rwzr);
299 } else {
300 uint16_t uniform_bits, useful_bits;
301 int shift;
302
303 if (LIKELY(high_fast)) {
304 shift = 0;
305 uniform_bits = high_bits;
306 useful_bits = low_bits;
307 } else {
308 shift = 1;
309 uniform_bits = low_bits;
310 useful_bits = high_bits;
311 }
312
313 if (UNLIKELY(uniform_bits != 0)) {
314 res = NewLIR3(kA64Movn3rdM, r_dest.GetReg(), ~useful_bits, shift);
315 } else {
316 res = NewLIR3(kA64Movz3rdM, r_dest.GetReg(), useful_bits, shift);
317 }
318 }
319 } else {
320 // movk, movz require 2 instructions. Try detecting logical immediates.
321 int log_imm = EncodeLogicalImmediate(/*is_wide=*/false, value);
322 if (log_imm >= 0) {
323 res = NewLIR3(kA64Orr3Rrl, r_dest.GetReg(), rwzr, log_imm);
324 } else {
325 // Use 2 instructions.
326 res = NewLIR3(kA64Movz3rdM, r_dest.GetReg(), low_bits, 0);
327 NewLIR3(kA64Movk3rdM, r_dest.GetReg(), high_bits, 1);
328 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100329 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100330
Matteo Franchin43ec8732014-03-31 15:00:14 +0100331 return res;
332}
333
334LIR* Arm64Mir2Lir::OpUnconditionalBranch(LIR* target) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100335 LIR* res = NewLIR1(kA64B1t, 0 /* offset to be patched during assembly */);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100336 res->target = target;
337 return res;
338}
339
340LIR* Arm64Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100341 LIR* branch = NewLIR2(kA64B2ct, ArmConditionEncoding(cc),
342 0 /* offset to be patched */);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100343 branch->target = target;
344 return branch;
345}
346
347LIR* Arm64Mir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100348 ArmOpcode opcode = kA64Brk1d;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100349 switch (op) {
350 case kOpBlx:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100351 opcode = kA64Blr1x;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100352 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100353 // TODO(Arm64): port kThumbBx.
354 // case kOpBx:
355 // opcode = kThumbBx;
356 // break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100357 default:
358 LOG(FATAL) << "Bad opcode " << op;
359 }
360 return NewLIR1(opcode, r_dest_src.GetReg());
361}
362
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100363LIR* Arm64Mir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift) {
364 ArmOpcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
365 CHECK_EQ(r_dest_src1.Is64Bit(), r_src2.Is64Bit());
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100366 ArmOpcode opcode = kA64Brk1d;
367
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100368 switch (op) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100369 case kOpCmn:
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100370 opcode = kA64Cmn3rro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100371 break;
372 case kOpCmp:
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100373 opcode = kA64Cmp3rro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100374 break;
375 case kOpMov:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100376 opcode = kA64Mov2rr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100377 break;
378 case kOpMvn:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100379 opcode = kA64Mvn2rr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100380 break;
381 case kOpNeg:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100382 opcode = kA64Neg3rro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100383 break;
384 case kOpTst:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100385 opcode = kA64Tst3rro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100386 break;
387 case kOpRev:
388 DCHECK_EQ(shift, 0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100389 // Binary, but rm is encoded twice.
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100390 return NewLIR3(kA64Rev2rr | wide, r_dest_src1.GetReg(), r_src2.GetReg(), r_src2.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100391 break;
392 case kOpRevsh:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100393 // Binary, but rm is encoded twice.
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100394 return NewLIR3(kA64Rev162rr | wide, r_dest_src1.GetReg(), r_src2.GetReg(), r_src2.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100395 break;
396 case kOp2Byte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100397 DCHECK_EQ(shift, ENCODE_NO_SHIFT);
398 // "sbfx r1, r2, #imm1, #imm2" is "sbfm r1, r2, #imm1, #(imm1 + imm2 - 1)".
399 // For now we use sbfm directly.
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100400 return NewLIR4(kA64Sbfm4rrdd | wide, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 7);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100401 case kOp2Short:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100402 DCHECK_EQ(shift, ENCODE_NO_SHIFT);
403 // For now we use sbfm rather than its alias, sbfx.
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100404 return NewLIR4(kA64Sbfm4rrdd | wide, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 15);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100405 case kOp2Char:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100406 // "ubfx r1, r2, #imm1, #imm2" is "ubfm r1, r2, #imm1, #(imm1 + imm2 - 1)".
407 // For now we use ubfm directly.
408 DCHECK_EQ(shift, ENCODE_NO_SHIFT);
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100409 return NewLIR4(kA64Ubfm4rrdd | wide, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 15);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100410 default:
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100411 return OpRegRegRegShift(op, r_dest_src1, r_dest_src1, r_src2, shift);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100412 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100413
Matteo Franchin43ec8732014-03-31 15:00:14 +0100414 DCHECK(!IsPseudoLirOp(opcode));
415 if (EncodingMap[opcode].flags & IS_BINARY_OP) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100416 DCHECK_EQ(shift, ENCODE_NO_SHIFT);
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100417 return NewLIR2(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100418 } else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100419 ArmEncodingKind kind = EncodingMap[opcode].field_loc[2].kind;
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100420 if (kind == kFmtShift) {
421 return NewLIR3(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg(), shift);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100422 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100423 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100424
425 LOG(FATAL) << "Unexpected encoding operand count";
426 return NULL;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100427}
428
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100429LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int extend) {
430 ArmOpcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
431 ArmOpcode opcode = kA64Brk1d;
432
433 switch (op) {
434 case kOpCmn:
435 opcode = kA64Cmn3Rre;
436 break;
437 case kOpCmp:
438 opcode = kA64Cmp3Rre;
439 break;
440 default:
441 LOG(FATAL) << "Bad Opcode: " << opcode;
442 break;
443 }
444
445 DCHECK(!IsPseudoLirOp(opcode));
446 if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
447 ArmEncodingKind kind = EncodingMap[opcode].field_loc[2].kind;
448 if (kind == kFmtExtend) {
449 return NewLIR3(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg(), extend);
450 }
451 }
452
453 LOG(FATAL) << "Unexpected encoding operand count";
454 return NULL;
455}
456
Matteo Franchin43ec8732014-03-31 15:00:14 +0100457LIR* Arm64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100458 /* RegReg operations with SP in first parameter need extended register instruction form.
459 * Only CMN and CMP instructions are implemented.
460 */
461 if (r_dest_src1 == rs_rA64_SP) {
462 return OpRegRegExtend(op, r_dest_src1, r_src2, ENCODE_NO_EXTEND);
463 } else {
464 return OpRegRegShift(op, r_dest_src1, r_src2, ENCODE_NO_SHIFT);
465 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100466}
467
468LIR* Arm64Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
469 UNIMPLEMENTED(FATAL);
470 return nullptr;
471}
472
473LIR* Arm64Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
474 UNIMPLEMENTED(FATAL);
475 return nullptr;
476}
477
478LIR* Arm64Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100479 LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +0100480 return NULL;
481}
482
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100483LIR* Arm64Mir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1,
484 RegStorage r_src2, int shift) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100485 ArmOpcode opcode = kA64Brk1d;
486
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100487 switch (op) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100488 case kOpAdd:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100489 opcode = kA64Add4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100490 break;
491 case kOpSub:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100492 opcode = kA64Sub4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100493 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100494 // case kOpRsub:
495 // opcode = kA64RsubWWW;
496 // break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100497 case kOpAdc:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100498 opcode = kA64Adc3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100499 break;
500 case kOpAnd:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100501 opcode = kA64And4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100502 break;
503 case kOpXor:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100504 opcode = kA64Eor4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100505 break;
506 case kOpMul:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100507 opcode = kA64Mul3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100508 break;
509 case kOpDiv:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100510 opcode = kA64Sdiv3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100511 break;
512 case kOpOr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100513 opcode = kA64Orr4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100514 break;
515 case kOpSbc:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100516 opcode = kA64Sbc3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100517 break;
518 case kOpLsl:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100519 opcode = kA64Lsl3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100520 break;
521 case kOpLsr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100522 opcode = kA64Lsr3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100523 break;
524 case kOpAsr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100525 opcode = kA64Asr3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100526 break;
527 case kOpRor:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100528 opcode = kA64Ror3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100529 break;
530 default:
531 LOG(FATAL) << "Bad opcode: " << op;
532 break;
533 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100534
535 // The instructions above belong to two kinds:
536 // - 4-operands instructions, where the last operand is a shift/extend immediate,
537 // - 3-operands instructions with no shift/extend.
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100538 ArmOpcode widened_opcode = r_dest.Is64Bit() ? WIDE(opcode) : opcode;
539 CHECK_EQ(r_dest.Is64Bit(), r_src1.Is64Bit());
540 CHECK_EQ(r_dest.Is64Bit(), r_src2.Is64Bit());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100541 if (EncodingMap[opcode].flags & IS_QUAD_OP) {
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100542 DCHECK(!IsExtendEncoding(shift));
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100543 return NewLIR4(widened_opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg(), shift);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100544 } else {
545 DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100546 DCHECK_EQ(shift, ENCODE_NO_SHIFT);
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100547 return NewLIR3(widened_opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100548 }
549}
550
551LIR* Arm64Mir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100552 return OpRegRegRegShift(op, r_dest, r_src1, r_src2, ENCODE_NO_SHIFT);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100553}
554
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100555// Should be taking an int64_t value ?
Matteo Franchin43ec8732014-03-31 15:00:14 +0100556LIR* Arm64Mir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
557 LIR* res;
558 bool neg = (value < 0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100559 int64_t abs_value = (neg) ? -value : value;
560 ArmOpcode opcode = kA64Brk1d;
561 ArmOpcode alt_opcode = kA64Brk1d;
562 int32_t log_imm = -1;
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100563 bool is_wide = r_dest.Is64Bit();
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100564 ArmOpcode wide = (is_wide) ? WIDE(0) : UNWIDE(0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100565
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100566 switch (op) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100567 case kOpLsl: {
568 // "lsl w1, w2, #imm" is an alias of "ubfm w1, w2, #(-imm MOD 32), #(31-imm)"
Zheng Xu2d41a652014-06-09 11:05:31 +0800569 // and "lsl x1, x2, #imm" of "ubfm x1, x2, #(-imm MOD 64), #(63-imm)".
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100570 // For now, we just use ubfm directly.
Zheng Xu2d41a652014-06-09 11:05:31 +0800571 int max_value = (is_wide) ? 63 : 31;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100572 return NewLIR4(kA64Ubfm4rrdd | wide, r_dest.GetReg(), r_src1.GetReg(),
Zheng Xu2d41a652014-06-09 11:05:31 +0800573 (-value) & max_value, max_value - value);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100574 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100575 case kOpLsr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100576 return NewLIR3(kA64Lsr3rrd | wide, r_dest.GetReg(), r_src1.GetReg(), value);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100577 case kOpAsr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100578 return NewLIR3(kA64Asr3rrd | wide, r_dest.GetReg(), r_src1.GetReg(), value);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100579 case kOpRor:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100580 // "ror r1, r2, #imm" is an alias of "extr r1, r2, r2, #imm".
581 // For now, we just use extr directly.
582 return NewLIR4(kA64Extr4rrrd | wide, r_dest.GetReg(), r_src1.GetReg(), r_src1.GetReg(),
583 value);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100584 case kOpAdd:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100585 neg = !neg;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100586 // Note: intentional fallthrough
587 case kOpSub:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100588 // Add and sub below read/write sp rather than xzr.
589 if (abs_value < 0x1000) {
590 opcode = (neg) ? kA64Add4RRdT : kA64Sub4RRdT;
591 return NewLIR4(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), abs_value, 0);
592 } else if ((abs_value & UINT64_C(0xfff)) == 0 && ((abs_value >> 12) < 0x1000)) {
593 opcode = (neg) ? kA64Add4RRdT : kA64Sub4RRdT;
594 return NewLIR4(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), abs_value >> 12, 1);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100595 } else {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100596 log_imm = -1;
597 alt_opcode = (neg) ? kA64Add4rrro : kA64Sub4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100598 }
599 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100600 // case kOpRsub:
601 // opcode = kThumb2RsubRRI8M;
602 // alt_opcode = kThumb2RsubRRR;
603 // break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100604 case kOpAdc:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100605 log_imm = -1;
606 alt_opcode = kA64Adc3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100607 break;
608 case kOpSbc:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100609 log_imm = -1;
610 alt_opcode = kA64Sbc3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100611 break;
612 case kOpOr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100613 log_imm = EncodeLogicalImmediate(is_wide, value);
614 opcode = kA64Orr3Rrl;
615 alt_opcode = kA64Orr4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100616 break;
617 case kOpAnd:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100618 log_imm = EncodeLogicalImmediate(is_wide, value);
619 opcode = kA64And3Rrl;
620 alt_opcode = kA64And4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100621 break;
622 case kOpXor:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100623 log_imm = EncodeLogicalImmediate(is_wide, value);
624 opcode = kA64Eor3Rrl;
625 alt_opcode = kA64Eor4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100626 break;
627 case kOpMul:
628 // TUNING: power of 2, shift & add
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100629 log_imm = -1;
630 alt_opcode = kA64Mul3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100631 break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100632 default:
633 LOG(FATAL) << "Bad opcode: " << op;
634 }
635
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100636 if (log_imm >= 0) {
637 return NewLIR3(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), log_imm);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100638 } else {
639 RegStorage r_scratch = AllocTemp();
640 LoadConstant(r_scratch, value);
641 if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
642 res = NewLIR4(alt_opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg(), 0);
643 else
644 res = NewLIR3(alt_opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
645 FreeTemp(r_scratch);
646 return res;
647 }
648}
649
Matteo Franchin43ec8732014-03-31 15:00:14 +0100650LIR* Arm64Mir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100651 return OpRegImm64(op, r_dest_src1, static_cast<int64_t>(value));
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100652}
653
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100654LIR* Arm64Mir2Lir::OpRegImm64(OpKind op, RegStorage r_dest_src1, int64_t value) {
655 ArmOpcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100656 ArmOpcode opcode = kA64Brk1d;
657 ArmOpcode neg_opcode = kA64Brk1d;
658 bool shift;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100659 bool neg = (value < 0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100660 uint64_t abs_value = (neg) ? -value : value;
661
662 if (LIKELY(abs_value < 0x1000)) {
663 // abs_value is a 12-bit immediate.
664 shift = false;
665 } else if ((abs_value & UINT64_C(0xfff)) == 0 && ((abs_value >> 12) < 0x1000)) {
666 // abs_value is a shifted 12-bit immediate.
667 shift = true;
668 abs_value >>= 12;
669 } else {
670 RegStorage r_tmp = AllocTemp();
671 LIR* res = LoadConstant(r_tmp, value);
672 OpRegReg(op, r_dest_src1, r_tmp);
673 FreeTemp(r_tmp);
674 return res;
675 }
676
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100677 switch (op) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100678 case kOpAdd:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100679 neg_opcode = kA64Sub4RRdT;
680 opcode = kA64Add4RRdT;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100681 break;
682 case kOpSub:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100683 neg_opcode = kA64Add4RRdT;
684 opcode = kA64Sub4RRdT;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100685 break;
686 case kOpCmp:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100687 neg_opcode = kA64Cmn3RdT;
688 opcode = kA64Cmp3RdT;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100689 break;
690 default:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100691 LOG(FATAL) << "Bad op-kind in OpRegImm: " << op;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100692 break;
693 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100694
695 if (UNLIKELY(neg))
696 opcode = neg_opcode;
697
698 if (EncodingMap[opcode].flags & IS_QUAD_OP)
699 return NewLIR4(opcode | wide, r_dest_src1.GetReg(), r_dest_src1.GetReg(), abs_value,
700 (shift) ? 1 : 0);
701 else
702 return NewLIR3(opcode | wide, r_dest_src1.GetReg(), abs_value, (shift) ? 1 : 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100703}
704
705LIR* Arm64Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100706 if (r_dest.IsFloat()) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100707 return LoadFPConstantValueWide(r_dest.GetReg(), value);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100708 } else {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100709 // TODO(Arm64): check whether we can load the immediate with a short form.
710 // e.g. via movz, movk or via logical immediate.
711
Matteo Franchin43ec8732014-03-31 15:00:14 +0100712 // No short form - load from the literal pool.
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100713 int32_t val_lo = Low32Bits(value);
714 int32_t val_hi = High32Bits(value);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100715 LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
716 if (data_target == NULL) {
717 data_target = AddWideData(&literal_list_, val_lo, val_hi);
718 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100719
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100720 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100721 LIR* res = RawLIR(current_dalvik_offset_, WIDE(kA64Ldr2rp),
722 r_dest.GetReg(), 0, 0, 0, 0, data_target);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100723 AppendLIR(res);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100724 return res;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100725 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100726}
727
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100728int Arm64Mir2Lir::EncodeShift(int shift_type, int amount) {
729 return ((shift_type & 0x3) << 7) | (amount & 0x1f);
730}
731
732int Arm64Mir2Lir::EncodeExtend(int extend_type, int amount) {
733 return (1 << 6) | ((extend_type & 0x7) << 3) | (amount & 0x7);
734}
735
736bool Arm64Mir2Lir::IsExtendEncoding(int encoded_value) {
737 return ((1 << 6) & encoded_value) != 0;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100738}
739
740LIR* Arm64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100741 int scale, OpSize size) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100742 LIR* load;
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100743 int expected_scale = 0;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100744 ArmOpcode opcode = kA64Brk1d;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100745
746 if (r_dest.IsFloat()) {
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100747 if (r_dest.IsDouble()) {
748 DCHECK(size == k64 || size == kDouble);
749 expected_scale = 3;
750 opcode = FWIDE(kA64Ldr4fXxG);
751 } else {
752 DCHECK(r_dest.IsSingle());
753 DCHECK(size == k32 || size == kSingle);
754 expected_scale = 2;
755 opcode = kA64Ldr4fXxG;
756 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100757
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100758 DCHECK(scale == 0 || scale == expected_scale);
759 return NewLIR4(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(),
760 (scale != 0) ? 1 : 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100761 }
762
763 switch (size) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100764 case kDouble:
765 case kWord:
766 case k64:
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100767 opcode = WIDE(kA64Ldr4rXxG);
768 expected_scale = 3;
769 break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100770 case kSingle:
Matteo Franchin43ec8732014-03-31 15:00:14 +0100771 case k32:
Matteo Franchin43ec8732014-03-31 15:00:14 +0100772 case kReference:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100773 opcode = kA64Ldr4rXxG;
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100774 expected_scale = 2;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100775 break;
776 case kUnsignedHalf:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100777 opcode = kA64Ldrh4wXxd;
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100778 expected_scale = 1;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100779 break;
780 case kSignedHalf:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100781 opcode = kA64Ldrsh4rXxd;
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100782 expected_scale = 1;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100783 break;
784 case kUnsignedByte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100785 opcode = kA64Ldrb3wXx;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100786 break;
787 case kSignedByte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100788 opcode = kA64Ldrsb3rXx;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100789 break;
790 default:
791 LOG(FATAL) << "Bad size: " << size;
792 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100793
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100794 if (UNLIKELY(expected_scale == 0)) {
795 // This is a tertiary op (e.g. ldrb, ldrsb), it does not not support scale.
796 DCHECK_NE(EncodingMap[UNWIDE(opcode)].flags & IS_TERTIARY_OP, 0U);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100797 DCHECK_EQ(scale, 0);
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100798 load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg());
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100799 } else {
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100800 DCHECK(scale == 0 || scale == expected_scale);
801 load = NewLIR4(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(),
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100802 (scale != 0) ? 1 : 0);
803 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100804
805 return load;
806}
807
808LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100809 int scale, OpSize size) {
810 LIR* store;
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100811 int expected_scale = 0;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100812 ArmOpcode opcode = kA64Brk1d;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100813
814 if (r_src.IsFloat()) {
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100815 if (r_src.IsDouble()) {
816 DCHECK(size == k64 || size == kDouble);
817 expected_scale = 3;
818 opcode = FWIDE(kA64Str4fXxG);
819 } else {
820 DCHECK(r_src.IsSingle());
821 DCHECK(size == k32 || size == kSingle);
822 expected_scale = 2;
823 opcode = kA64Str4fXxG;
824 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100825
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100826 DCHECK(scale == 0 || scale == expected_scale);
827 return NewLIR4(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg(),
828 (scale != 0) ? 1 : 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100829 }
830
831 switch (size) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100832 case kDouble: // Intentional fall-trough.
833 case kWord: // Intentional fall-trough.
834 case k64:
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100835 opcode = WIDE(kA64Str4rXxG);
836 expected_scale = 3;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100837 break;
838 case kSingle: // Intentional fall-trough.
839 case k32: // Intentional fall-trough.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100840 case kReference:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100841 opcode = kA64Str4rXxG;
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100842 expected_scale = 2;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100843 break;
844 case kUnsignedHalf:
Matteo Franchin43ec8732014-03-31 15:00:14 +0100845 case kSignedHalf:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100846 opcode = kA64Strh4wXxd;
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100847 expected_scale = 1;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100848 break;
849 case kUnsignedByte:
Matteo Franchin43ec8732014-03-31 15:00:14 +0100850 case kSignedByte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100851 opcode = kA64Strb3wXx;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100852 break;
853 default:
854 LOG(FATAL) << "Bad size: " << size;
855 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100856
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100857 if (UNLIKELY(expected_scale == 0)) {
858 // This is a tertiary op (e.g. strb), it does not not support scale.
859 DCHECK_NE(EncodingMap[UNWIDE(opcode)].flags & IS_TERTIARY_OP, 0U);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100860 DCHECK_EQ(scale, 0);
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100861 store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg());
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100862 } else {
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100863 store = NewLIR4(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg(),
864 (scale != 0) ? 1 : 0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100865 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100866
867 return store;
868}
869
870/*
871 * Load value from base + displacement. Optionally perform null check
872 * on base (which must have an associated s_reg and MIR). If not
873 * performing null check, incoming MIR can be null.
874 */
875LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
Vladimir Marko3bf7c602014-05-07 14:55:43 +0100876 OpSize size) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100877 LIR* load = NULL;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100878 ArmOpcode opcode = kA64Brk1d;
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100879 ArmOpcode alt_opcode = kA64Brk1d;
880 int scale = 0;
881
Matteo Franchin43ec8732014-03-31 15:00:14 +0100882 switch (size) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100883 case kDouble: // Intentional fall-through.
884 case kWord: // Intentional fall-through.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100885 case k64:
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100886 scale = 3;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100887 if (r_dest.IsFloat()) {
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100888 DCHECK(r_dest.IsDouble());
889 opcode = FWIDE(kA64Ldr3fXD);
890 alt_opcode = FWIDE(kA64Ldur3fXd);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100891 } else {
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100892 opcode = WIDE(kA64Ldr3rXD);
893 alt_opcode = WIDE(kA64Ldur3rXd);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100894 }
895 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100896 case kSingle: // Intentional fall-through.
897 case k32: // Intentional fall-trough.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100898 case kReference:
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100899 scale = 2;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100900 if (r_dest.IsFloat()) {
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100901 DCHECK(r_dest.IsSingle());
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100902 opcode = kA64Ldr3fXD;
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100903 } else {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100904 opcode = kA64Ldr3rXD;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100905 }
906 break;
907 case kUnsignedHalf:
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100908 scale = 1;
909 opcode = kA64Ldrh3wXF;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100910 break;
911 case kSignedHalf:
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100912 scale = 1;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100913 opcode = kA64Ldrsh3rXF;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100914 break;
915 case kUnsignedByte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100916 opcode = kA64Ldrb3wXd;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100917 break;
918 case kSignedByte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100919 opcode = kA64Ldrsb3rXd;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100920 break;
921 default:
922 LOG(FATAL) << "Bad size: " << size;
923 }
924
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100925 bool displacement_is_aligned = (displacement & ((1 << scale) - 1)) == 0;
926 int scaled_disp = displacement >> scale;
927 if (displacement_is_aligned && scaled_disp >= 0 && scaled_disp < 4096) {
928 // Can use scaled load.
929 load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), scaled_disp);
930 } else if (alt_opcode != kA64Brk1d && IS_SIGNED_IMM9(displacement)) {
931 // Can use unscaled load.
932 load = NewLIR3(alt_opcode, r_dest.GetReg(), r_base.GetReg(), displacement);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100933 } else {
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100934 // Use long sequence.
935 RegStorage r_scratch = AllocTemp();
936 LoadConstant(r_scratch, displacement);
937 load = LoadBaseIndexed(r_base, r_scratch, r_dest, 0, size);
938 FreeTemp(r_scratch);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100939 }
940
941 // TODO: in future may need to differentiate Dalvik accesses w/ spills
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100942 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
943 DCHECK(r_base == rs_rA64_SP);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100944 AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100945 }
946 return load;
947}
948
Vladimir Marko674744e2014-04-24 15:18:26 +0100949LIR* Arm64Mir2Lir::LoadBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_dest,
950 OpSize size) {
951 // LoadBaseDisp() will emit correct insn for atomic load on arm64
952 // assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
953 return LoadBaseDisp(r_base, displacement, r_dest, size);
954}
955
Vladimir Marko3bf7c602014-05-07 14:55:43 +0100956LIR* Arm64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
957 OpSize size) {
Vladimir Marko3bf7c602014-05-07 14:55:43 +0100958 return LoadBaseDispBody(r_base, displacement, r_dest, size);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100959}
960
Matteo Franchin43ec8732014-03-31 15:00:14 +0100961
962LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100963 OpSize size) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100964 LIR* store = NULL;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100965 ArmOpcode opcode = kA64Brk1d;
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100966 ArmOpcode alt_opcode = kA64Brk1d;
967 int scale = 0;
968
Matteo Franchin43ec8732014-03-31 15:00:14 +0100969 switch (size) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100970 case kDouble: // Intentional fall-through.
971 case kWord: // Intentional fall-through.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100972 case k64:
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100973 scale = 3;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100974 if (r_src.IsFloat()) {
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100975 DCHECK(r_src.IsDouble());
976 opcode = FWIDE(kA64Str3fXD);
977 alt_opcode = FWIDE(kA64Stur3fXd);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100978 } else {
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100979 opcode = FWIDE(kA64Str3rXD);
980 alt_opcode = FWIDE(kA64Stur3rXd);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100981 }
982 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100983 case kSingle: // Intentional fall-through.
984 case k32: // Intentional fall-trough.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100985 case kReference:
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100986 scale = 2;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100987 if (r_src.IsFloat()) {
988 DCHECK(r_src.IsSingle());
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100989 opcode = kA64Str3fXD;
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100990 } else {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100991 opcode = kA64Str3rXD;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100992 }
993 break;
994 case kUnsignedHalf:
995 case kSignedHalf:
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100996 scale = 1;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100997 opcode = kA64Strh3wXF;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100998 break;
999 case kUnsignedByte:
1000 case kSignedByte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001001 opcode = kA64Strb3wXd;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001002 break;
1003 default:
1004 LOG(FATAL) << "Bad size: " << size;
1005 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001006
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001007 bool displacement_is_aligned = (displacement & ((1 << scale) - 1)) == 0;
1008 int scaled_disp = displacement >> scale;
1009 if (displacement_is_aligned && scaled_disp >= 0 && scaled_disp < 4096) {
1010 // Can use scaled store.
1011 store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), scaled_disp);
1012 } else if (alt_opcode != kA64Brk1d && IS_SIGNED_IMM9(displacement)) {
1013 // Can use unscaled store.
1014 store = NewLIR3(alt_opcode, r_src.GetReg(), r_base.GetReg(), displacement);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001015 } else {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001016 // Use long sequence.
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001017 RegStorage r_scratch = AllocTemp();
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001018 LoadConstant(r_scratch, displacement);
1019 store = StoreBaseIndexed(r_base, r_scratch, r_src, 0, size);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001020 FreeTemp(r_scratch);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001021 }
1022
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001023 // TODO: In future, may need to differentiate Dalvik & spill accesses.
Vladimir Marko8dea81c2014-06-06 14:50:36 +01001024 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
1025 DCHECK(r_base == rs_rA64_SP);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001026 AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
Matteo Franchin43ec8732014-03-31 15:00:14 +01001027 }
1028 return store;
1029}
1030
Vladimir Marko674744e2014-04-24 15:18:26 +01001031LIR* Arm64Mir2Lir::StoreBaseDispVolatile(RegStorage r_base, int displacement, RegStorage r_src,
1032 OpSize size) {
1033 // StoreBaseDisp() will emit correct insn for atomic store on arm64
1034 // assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
1035 return StoreBaseDisp(r_base, displacement, r_src, size);
1036}
1037
Matteo Franchin43ec8732014-03-31 15:00:14 +01001038LIR* Arm64Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
Vladimir Marko674744e2014-04-24 15:18:26 +01001039 OpSize size) {
Matteo Franchin43ec8732014-03-31 15:00:14 +01001040 return StoreBaseDispBody(r_base, displacement, r_src, size);
1041}
1042
Matteo Franchin43ec8732014-03-31 15:00:14 +01001043LIR* Arm64Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001044 LOG(FATAL) << "Unexpected use of OpFpRegCopy for Arm64";
1045 return NULL;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001046}
1047
Andreas Gampe2f244e92014-05-08 03:35:25 -07001048LIR* Arm64Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) {
1049 UNIMPLEMENTED(FATAL) << "Should not be used.";
1050 return nullptr;
1051}
1052
1053LIR* Arm64Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001054 LOG(FATAL) << "Unexpected use of OpThreadMem for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +01001055 return NULL;
1056}
1057
1058LIR* Arm64Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001059 LOG(FATAL) << "Unexpected use of OpMem for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +01001060 return NULL;
1061}
1062
1063LIR* Arm64Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001064 int displacement, RegStorage r_src, OpSize size) {
1065 LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +01001066 return NULL;
1067}
1068
1069LIR* Arm64Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001070 LOG(FATAL) << "Unexpected use of OpRegMem for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +01001071 return NULL;
1072}
1073
1074LIR* Arm64Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
Vladimir Marko3bf7c602014-05-07 14:55:43 +01001075 int displacement, RegStorage r_dest, OpSize size) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001076 LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +01001077 return NULL;
1078}
1079
1080} // namespace art