blob: cd1840ae0a73dbe571645c0e73894ea829b06fdf [file] [log] [blame]
Matteo Franchin43ec8732014-03-31 15:00:14 +01001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "arm64_lir.h"
18#include "codegen_arm64.h"
19#include "dex/quick/mir_to_lir-inl.h"
buzbeeb5860fb2014-06-21 15:31:01 -070020#include "dex/reg_storage_eq.h"
Matteo Franchin43ec8732014-03-31 15:00:14 +010021
22namespace art {
23
Matteo Franchine45fb9e2014-05-06 10:10:30 +010024/* This file contains codegen for the A64 ISA. */
Matteo Franchin43ec8732014-03-31 15:00:14 +010025
Serban Constantinescu2eba1fa2014-07-31 19:07:17 +010026int32_t Arm64Mir2Lir::EncodeImmSingle(uint32_t bits) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +010027 /*
28 * Valid values will have the form:
29 *
30 * aBbb.bbbc.defg.h000.0000.0000.0000.0000
31 *
32 * where B = not(b). In other words, if b == 1, then B == 0 and viceversa.
33 */
34
35 // bits[19..0] are cleared.
36 if ((bits & 0x0007ffff) != 0)
Matteo Franchin43ec8732014-03-31 15:00:14 +010037 return -1;
Matteo Franchine45fb9e2014-05-06 10:10:30 +010038
39 // bits[29..25] are all set or all cleared.
40 uint32_t b_pattern = (bits >> 16) & 0x3e00;
41 if (b_pattern != 0 && b_pattern != 0x3e00)
42 return -1;
43
44 // bit[30] and bit[29] are opposite.
45 if (((bits ^ (bits << 1)) & 0x40000000) == 0)
46 return -1;
47
48 // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
49 // bit7: a000.0000
50 uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
51 // bit6: 0b00.0000
52 uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
53 // bit5_to_0: 00cd.efgh
54 uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
55 return (bit7 | bit6 | bit5_to_0);
Matteo Franchin43ec8732014-03-31 15:00:14 +010056}
57
Serban Constantinescu2eba1fa2014-07-31 19:07:17 +010058int32_t Arm64Mir2Lir::EncodeImmDouble(uint64_t bits) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +010059 /*
60 * Valid values will have the form:
61 *
62 * aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
63 * 0000.0000.0000.0000.0000.0000.0000.0000
64 *
65 * where B = not(b).
66 */
67
68 // bits[47..0] are cleared.
69 if ((bits & UINT64_C(0xffffffffffff)) != 0)
Matteo Franchin43ec8732014-03-31 15:00:14 +010070 return -1;
Matteo Franchine45fb9e2014-05-06 10:10:30 +010071
72 // bits[61..54] are all set or all cleared.
73 uint32_t b_pattern = (bits >> 48) & 0x3fc0;
74 if (b_pattern != 0 && b_pattern != 0x3fc0)
75 return -1;
76
77 // bit[62] and bit[61] are opposite.
78 if (((bits ^ (bits << 1)) & UINT64_C(0x4000000000000000)) == 0)
79 return -1;
80
81 // bit7: a000.0000
82 uint32_t bit7 = ((bits >> 63) & 0x1) << 7;
83 // bit6: 0b00.0000
84 uint32_t bit6 = ((bits >> 61) & 0x1) << 6;
85 // bit5_to_0: 00cd.efgh
86 uint32_t bit5_to_0 = (bits >> 48) & 0x3f;
87 return (bit7 | bit6 | bit5_to_0);
Matteo Franchin43ec8732014-03-31 15:00:14 +010088}
89
Serban Constantinescu63999682014-07-15 17:44:21 +010090size_t Arm64Mir2Lir::GetLoadStoreSize(LIR* lir) {
91 bool opcode_is_wide = IS_WIDE(lir->opcode);
92 ArmOpcode opcode = UNWIDE(lir->opcode);
93 DCHECK(!IsPseudoLirOp(opcode));
94 const ArmEncodingMap *encoder = &EncodingMap[opcode];
95 uint32_t bits = opcode_is_wide ? encoder->xskeleton : encoder->wskeleton;
96 return (bits >> 30);
97}
98
99size_t Arm64Mir2Lir::GetInstructionOffset(LIR* lir) {
100 size_t offset = lir->operands[2];
101 uint64_t check_flags = GetTargetInstFlags(lir->opcode);
102 DCHECK((check_flags & IS_LOAD) || (check_flags & IS_STORE));
103 if (check_flags & SCALED_OFFSET_X0) {
104 DCHECK(check_flags & IS_TERTIARY_OP);
105 offset = offset * (1 << GetLoadStoreSize(lir));
106 }
107 return offset;
108}
109
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100110LIR* Arm64Mir2Lir::LoadFPConstantValue(RegStorage r_dest, int32_t value) {
111 DCHECK(r_dest.IsSingle());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100112 if (value == 0) {
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100113 return NewLIR2(kA64Fmov2sw, r_dest.GetReg(), rwzr);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100114 } else {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100115 int32_t encoded_imm = EncodeImmSingle((uint32_t)value);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100116 if (encoded_imm >= 0) {
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100117 return NewLIR2(kA64Fmov2fI, r_dest.GetReg(), encoded_imm);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100118 }
119 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100120
Matteo Franchin43ec8732014-03-31 15:00:14 +0100121 LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
122 if (data_target == NULL) {
Andreas Gampef9879272014-06-18 23:19:07 -0700123 // Wide, as we need 8B alignment.
124 data_target = AddWideData(&literal_list_, value, 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100125 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100126
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100127 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100128 LIR* load_pc_rel = RawLIR(current_dalvik_offset_, kA64Ldr2fp,
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100129 r_dest.GetReg(), 0, 0, 0, 0, data_target);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100130 AppendLIR(load_pc_rel);
131 return load_pc_rel;
132}
133
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100134LIR* Arm64Mir2Lir::LoadFPConstantValueWide(RegStorage r_dest, int64_t value) {
135 DCHECK(r_dest.IsDouble());
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100136 if (value == 0) {
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100137 return NewLIR2(kA64Fmov2Sx, r_dest.GetReg(), rxzr);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100138 } else {
139 int32_t encoded_imm = EncodeImmDouble(value);
140 if (encoded_imm >= 0) {
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100141 return NewLIR2(FWIDE(kA64Fmov2fI), r_dest.GetReg(), encoded_imm);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100142 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100143 }
144
145 // No short form - load from the literal pool.
146 int32_t val_lo = Low32Bits(value);
147 int32_t val_hi = High32Bits(value);
148 LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
149 if (data_target == NULL) {
150 data_target = AddWideData(&literal_list_, val_lo, val_hi);
151 }
152
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100153 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100154 LIR* load_pc_rel = RawLIR(current_dalvik_offset_, FWIDE(kA64Ldr2fp),
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100155 r_dest.GetReg(), 0, 0, 0, 0, data_target);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100156 AppendLIR(load_pc_rel);
157 return load_pc_rel;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100158}
159
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100160static int CountLeadingZeros(bool is_wide, uint64_t value) {
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100161 return (is_wide) ? __builtin_clzll(value) : __builtin_clz((uint32_t)value);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100162}
Matteo Franchin43ec8732014-03-31 15:00:14 +0100163
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100164static int CountTrailingZeros(bool is_wide, uint64_t value) {
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100165 return (is_wide) ? __builtin_ctzll(value) : __builtin_ctz((uint32_t)value);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100166}
167
168static int CountSetBits(bool is_wide, uint64_t value) {
169 return ((is_wide) ?
Zheng Xue2eb29e2014-06-12 10:22:33 +0800170 __builtin_popcountll(value) : __builtin_popcount((uint32_t)value));
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100171}
172
173/**
174 * @brief Try encoding an immediate in the form required by logical instructions.
175 *
176 * @param is_wide Whether @p value is a 64-bit (as opposed to 32-bit) value.
177 * @param value An integer to be encoded. This is interpreted as 64-bit if @p is_wide is true and as
178 * 32-bit if @p is_wide is false.
179 * @return A non-negative integer containing the encoded immediate or -1 if the encoding failed.
180 * @note This is the inverse of Arm64Mir2Lir::DecodeLogicalImmediate().
181 */
182int Arm64Mir2Lir::EncodeLogicalImmediate(bool is_wide, uint64_t value) {
183 unsigned n, imm_s, imm_r;
184
185 // Logical immediates are encoded using parameters n, imm_s and imm_r using
186 // the following table:
187 //
188 // N imms immr size S R
189 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
190 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
191 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
192 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
193 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
194 // 0 11110s xxxxxr 2 UInt(s) UInt(r)
195 // (s bits must not be all set)
196 //
197 // A pattern is constructed of size bits, where the least significant S+1
198 // bits are set. The pattern is rotated right by R, and repeated across a
199 // 32 or 64-bit value, depending on destination register width.
200 //
201 // To test if an arbitary immediate can be encoded using this scheme, an
202 // iterative algorithm is used.
203 //
204
205 // 1. If the value has all set or all clear bits, it can't be encoded.
206 if (value == 0 || value == ~UINT64_C(0) ||
207 (!is_wide && (uint32_t)value == ~UINT32_C(0))) {
208 return -1;
209 }
210
211 unsigned lead_zero = CountLeadingZeros(is_wide, value);
212 unsigned lead_one = CountLeadingZeros(is_wide, ~value);
213 unsigned trail_zero = CountTrailingZeros(is_wide, value);
214 unsigned trail_one = CountTrailingZeros(is_wide, ~value);
215 unsigned set_bits = CountSetBits(is_wide, value);
216
217 // The fixed bits in the immediate s field.
218 // If width == 64 (X reg), start at 0xFFFFFF80.
219 // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit
220 // widths won't be executed.
221 unsigned width = (is_wide) ? 64 : 32;
222 int imm_s_fixed = (is_wide) ? -128 : -64;
223 int imm_s_mask = 0x3f;
224
225 for (;;) {
226 // 2. If the value is two bits wide, it can be encoded.
227 if (width == 2) {
228 n = 0;
229 imm_s = 0x3C;
230 imm_r = (value & 3) - 1;
231 break;
232 }
233
234 n = (width == 64) ? 1 : 0;
235 imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask);
236 if ((lead_zero + set_bits) == width) {
237 imm_r = 0;
238 } else {
239 imm_r = (lead_zero > 0) ? (width - trail_zero) : lead_one;
240 }
241
242 // 3. If the sum of leading zeros, trailing zeros and set bits is
243 // equal to the bit width of the value, it can be encoded.
244 if (lead_zero + trail_zero + set_bits == width) {
245 break;
246 }
247
248 // 4. If the sum of leading ones, trailing ones and unset bits in the
249 // value is equal to the bit width of the value, it can be encoded.
250 if (lead_one + trail_one + (width - set_bits) == width) {
251 break;
252 }
253
254 // 5. If the most-significant half of the bitwise value is equal to
255 // the least-significant half, return to step 2 using the
256 // least-significant half of the value.
257 uint64_t mask = (UINT64_C(1) << (width >> 1)) - 1;
258 if ((value & mask) == ((value >> (width >> 1)) & mask)) {
259 width >>= 1;
260 set_bits >>= 1;
261 imm_s_fixed >>= 1;
262 continue;
263 }
264
265 // 6. Otherwise, the value can't be encoded.
266 return -1;
267 }
268
269 return (n << 12 | imm_r << 6 | imm_s);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100270}
271
272bool Arm64Mir2Lir::InexpensiveConstantInt(int32_t value) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100273 return false; // (ModifiedImmediate(value) >= 0) || (ModifiedImmediate(~value) >= 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100274}
275
276bool Arm64Mir2Lir::InexpensiveConstantFloat(int32_t value) {
277 return EncodeImmSingle(value) >= 0;
278}
279
280bool Arm64Mir2Lir::InexpensiveConstantLong(int64_t value) {
281 return InexpensiveConstantInt(High32Bits(value)) && InexpensiveConstantInt(Low32Bits(value));
282}
283
284bool Arm64Mir2Lir::InexpensiveConstantDouble(int64_t value) {
285 return EncodeImmDouble(value) >= 0;
286}
287
288/*
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100289 * Load a immediate using one single instruction when possible; otherwise
290 * use a pair of movz and movk instructions.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100291 *
292 * No additional register clobbering operation performed. Use this version when
293 * 1) r_dest is freshly returned from AllocTemp or
294 * 2) The codegen is under fixed register usage
295 */
296LIR* Arm64Mir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
297 LIR* res;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100298
299 if (r_dest.IsFloat()) {
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100300 return LoadFPConstantValue(r_dest, value);
301 }
302
303 if (r_dest.Is64Bit()) {
304 return LoadConstantWide(r_dest, value);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100305 }
306
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100307 // Loading SP/ZR with an immediate is not supported.
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100308 DCHECK(!A64_REG_IS_SP(r_dest.GetReg()));
309 DCHECK(!A64_REG_IS_ZR(r_dest.GetReg()));
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100310
311 // Compute how many movk, movz instructions are needed to load the value.
312 uint16_t high_bits = High16Bits(value);
313 uint16_t low_bits = Low16Bits(value);
314
315 bool low_fast = ((uint16_t)(low_bits + 1) <= 1);
316 bool high_fast = ((uint16_t)(high_bits + 1) <= 1);
317
318 if (LIKELY(low_fast || high_fast)) {
319 // 1 instruction is enough to load the immediate.
320 if (LIKELY(low_bits == high_bits)) {
321 // Value is either 0 or -1: we can just use wzr.
322 ArmOpcode opcode = LIKELY(low_bits == 0) ? kA64Mov2rr : kA64Mvn2rr;
323 res = NewLIR2(opcode, r_dest.GetReg(), rwzr);
324 } else {
325 uint16_t uniform_bits, useful_bits;
326 int shift;
327
328 if (LIKELY(high_fast)) {
329 shift = 0;
330 uniform_bits = high_bits;
331 useful_bits = low_bits;
332 } else {
333 shift = 1;
334 uniform_bits = low_bits;
335 useful_bits = high_bits;
336 }
337
338 if (UNLIKELY(uniform_bits != 0)) {
339 res = NewLIR3(kA64Movn3rdM, r_dest.GetReg(), ~useful_bits, shift);
340 } else {
341 res = NewLIR3(kA64Movz3rdM, r_dest.GetReg(), useful_bits, shift);
342 }
343 }
344 } else {
345 // movk, movz require 2 instructions. Try detecting logical immediates.
346 int log_imm = EncodeLogicalImmediate(/*is_wide=*/false, value);
347 if (log_imm >= 0) {
348 res = NewLIR3(kA64Orr3Rrl, r_dest.GetReg(), rwzr, log_imm);
349 } else {
350 // Use 2 instructions.
351 res = NewLIR3(kA64Movz3rdM, r_dest.GetReg(), low_bits, 0);
352 NewLIR3(kA64Movk3rdM, r_dest.GetReg(), high_bits, 1);
353 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100354 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100355
Matteo Franchin43ec8732014-03-31 15:00:14 +0100356 return res;
357}
358
Matteo Franchinc41e6dc2014-06-13 19:16:28 +0100359// TODO: clean up the names. LoadConstantWide() should really be LoadConstantNoClobberWide().
360LIR* Arm64Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
361 // Maximum number of instructions to use for encoding the immediate.
362 const int max_num_ops = 2;
363
364 if (r_dest.IsFloat()) {
365 return LoadFPConstantValueWide(r_dest, value);
366 }
367
368 DCHECK(r_dest.Is64Bit());
369
370 // Loading SP/ZR with an immediate is not supported.
371 DCHECK(!A64_REG_IS_SP(r_dest.GetReg()));
372 DCHECK(!A64_REG_IS_ZR(r_dest.GetReg()));
373
374 if (LIKELY(value == INT64_C(0) || value == INT64_C(-1))) {
375 // value is either 0 or -1: we can just use xzr.
376 ArmOpcode opcode = LIKELY(value == 0) ? WIDE(kA64Mov2rr) : WIDE(kA64Mvn2rr);
377 return NewLIR2(opcode, r_dest.GetReg(), rxzr);
378 }
379
380 // At least one in value's halfwords is not 0x0, nor 0xffff: find out how many.
381 int num_0000_halfwords = 0;
382 int num_ffff_halfwords = 0;
383 uint64_t uvalue = static_cast<uint64_t>(value);
384 for (int shift = 0; shift < 64; shift += 16) {
385 uint16_t halfword = static_cast<uint16_t>(uvalue >> shift);
386 if (halfword == 0)
387 num_0000_halfwords++;
388 else if (halfword == UINT16_C(0xffff))
389 num_ffff_halfwords++;
390 }
391 int num_fast_halfwords = std::max(num_0000_halfwords, num_ffff_halfwords);
392
393 if (num_fast_halfwords < 3) {
394 // A single movz/movn is not enough. Try the logical immediate route.
395 int log_imm = EncodeLogicalImmediate(/*is_wide=*/true, value);
396 if (log_imm >= 0) {
397 return NewLIR3(WIDE(kA64Orr3Rrl), r_dest.GetReg(), rxzr, log_imm);
398 }
399 }
400
401 if (num_fast_halfwords >= 4 - max_num_ops) {
402 // We can encode the number using a movz/movn followed by one or more movk.
403 ArmOpcode op;
404 uint16_t background;
405 LIR* res = nullptr;
406
407 // Decide whether to use a movz or a movn.
408 if (num_0000_halfwords >= num_ffff_halfwords) {
409 op = WIDE(kA64Movz3rdM);
410 background = 0;
411 } else {
412 op = WIDE(kA64Movn3rdM);
413 background = 0xffff;
414 }
415
416 // Emit the first instruction (movz, movn).
417 int shift;
418 for (shift = 0; shift < 4; shift++) {
419 uint16_t halfword = static_cast<uint16_t>(uvalue >> (shift << 4));
420 if (halfword != background) {
421 res = NewLIR3(op, r_dest.GetReg(), halfword ^ background, shift);
422 break;
423 }
424 }
425
426 // Emit the movk instructions.
427 for (shift++; shift < 4; shift++) {
428 uint16_t halfword = static_cast<uint16_t>(uvalue >> (shift << 4));
429 if (halfword != background) {
430 NewLIR3(WIDE(kA64Movk3rdM), r_dest.GetReg(), halfword, shift);
431 }
432 }
433 return res;
434 }
435
436 // Use the literal pool.
437 int32_t val_lo = Low32Bits(value);
438 int32_t val_hi = High32Bits(value);
439 LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
440 if (data_target == NULL) {
441 data_target = AddWideData(&literal_list_, val_lo, val_hi);
442 }
443
444 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
445 LIR *res = RawLIR(current_dalvik_offset_, WIDE(kA64Ldr2rp),
446 r_dest.GetReg(), 0, 0, 0, 0, data_target);
447 AppendLIR(res);
448 return res;
449}
450
Matteo Franchin43ec8732014-03-31 15:00:14 +0100451LIR* Arm64Mir2Lir::OpUnconditionalBranch(LIR* target) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100452 LIR* res = NewLIR1(kA64B1t, 0 /* offset to be patched during assembly */);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100453 res->target = target;
454 return res;
455}
456
457LIR* Arm64Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100458 LIR* branch = NewLIR2(kA64B2ct, ArmConditionEncoding(cc),
459 0 /* offset to be patched */);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100460 branch->target = target;
461 return branch;
462}
463
464LIR* Arm64Mir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100465 ArmOpcode opcode = kA64Brk1d;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100466 switch (op) {
467 case kOpBlx:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100468 opcode = kA64Blr1x;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100469 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100470 // TODO(Arm64): port kThumbBx.
471 // case kOpBx:
472 // opcode = kThumbBx;
473 // break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100474 default:
475 LOG(FATAL) << "Bad opcode " << op;
476 }
477 return NewLIR1(opcode, r_dest_src.GetReg());
478}
479
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100480LIR* Arm64Mir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r_src2, int shift) {
481 ArmOpcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
482 CHECK_EQ(r_dest_src1.Is64Bit(), r_src2.Is64Bit());
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100483 ArmOpcode opcode = kA64Brk1d;
484
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100485 switch (op) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100486 case kOpCmn:
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100487 opcode = kA64Cmn3rro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100488 break;
489 case kOpCmp:
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100490 opcode = kA64Cmp3rro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100491 break;
492 case kOpMov:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100493 opcode = kA64Mov2rr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100494 break;
495 case kOpMvn:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100496 opcode = kA64Mvn2rr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100497 break;
498 case kOpNeg:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100499 opcode = kA64Neg3rro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100500 break;
501 case kOpTst:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100502 opcode = kA64Tst3rro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100503 break;
504 case kOpRev:
505 DCHECK_EQ(shift, 0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100506 // Binary, but rm is encoded twice.
Serban Constantinescu169489b2014-06-11 16:43:35 +0100507 return NewLIR2(kA64Rev2rr | wide, r_dest_src1.GetReg(), r_src2.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100508 break;
509 case kOpRevsh:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100510 // Binary, but rm is encoded twice.
Zheng Xua3fe7422014-07-09 14:03:15 +0800511 NewLIR2(kA64Rev162rr | wide, r_dest_src1.GetReg(), r_src2.GetReg());
512 // "sxth r1, r2" is "sbfm r1, r2, #0, #15"
513 return NewLIR4(kA64Sbfm4rrdd | wide, r_dest_src1.GetReg(), r_dest_src1.GetReg(), 0, 15);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100514 break;
515 case kOp2Byte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100516 DCHECK_EQ(shift, ENCODE_NO_SHIFT);
517 // "sbfx r1, r2, #imm1, #imm2" is "sbfm r1, r2, #imm1, #(imm1 + imm2 - 1)".
518 // For now we use sbfm directly.
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100519 return NewLIR4(kA64Sbfm4rrdd | wide, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 7);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100520 case kOp2Short:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100521 DCHECK_EQ(shift, ENCODE_NO_SHIFT);
522 // For now we use sbfm rather than its alias, sbfx.
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100523 return NewLIR4(kA64Sbfm4rrdd | wide, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 15);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100524 case kOp2Char:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100525 // "ubfx r1, r2, #imm1, #imm2" is "ubfm r1, r2, #imm1, #(imm1 + imm2 - 1)".
526 // For now we use ubfm directly.
527 DCHECK_EQ(shift, ENCODE_NO_SHIFT);
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100528 return NewLIR4(kA64Ubfm4rrdd | wide, r_dest_src1.GetReg(), r_src2.GetReg(), 0, 15);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100529 default:
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100530 return OpRegRegRegShift(op, r_dest_src1, r_dest_src1, r_src2, shift);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100531 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100532
Matteo Franchin43ec8732014-03-31 15:00:14 +0100533 DCHECK(!IsPseudoLirOp(opcode));
534 if (EncodingMap[opcode].flags & IS_BINARY_OP) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100535 DCHECK_EQ(shift, ENCODE_NO_SHIFT);
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100536 return NewLIR2(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100537 } else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100538 ArmEncodingKind kind = EncodingMap[opcode].field_loc[2].kind;
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100539 if (kind == kFmtShift) {
540 return NewLIR3(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg(), shift);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100541 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100542 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100543
544 LOG(FATAL) << "Unexpected encoding operand count";
545 return NULL;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100546}
547
Zheng Xucedee472014-07-01 09:53:22 +0800548LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage r_src2,
549 A64RegExtEncodings ext, uint8_t amount) {
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100550 ArmOpcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
551 ArmOpcode opcode = kA64Brk1d;
552
553 switch (op) {
554 case kOpCmn:
555 opcode = kA64Cmn3Rre;
556 break;
557 case kOpCmp:
558 opcode = kA64Cmp3Rre;
559 break;
Zheng Xucedee472014-07-01 09:53:22 +0800560 case kOpAdd:
561 // Note: intentional fallthrough
562 case kOpSub:
563 return OpRegRegRegExtend(op, r_dest_src1, r_dest_src1, r_src2, ext, amount);
564 break;
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100565 default:
566 LOG(FATAL) << "Bad Opcode: " << opcode;
567 break;
568 }
569
570 DCHECK(!IsPseudoLirOp(opcode));
571 if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
572 ArmEncodingKind kind = EncodingMap[opcode].field_loc[2].kind;
573 if (kind == kFmtExtend) {
Zheng Xucedee472014-07-01 09:53:22 +0800574 return NewLIR3(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg(),
575 EncodeExtend(ext, amount));
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100576 }
577 }
578
579 LOG(FATAL) << "Unexpected encoding operand count";
580 return NULL;
581}
582
Matteo Franchin43ec8732014-03-31 15:00:14 +0100583LIR* Arm64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100584 /* RegReg operations with SP in first parameter need extended register instruction form.
Zheng Xucedee472014-07-01 09:53:22 +0800585 * Only CMN, CMP, ADD & SUB instructions are implemented.
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100586 */
Zheng Xubaa7c882014-06-30 14:26:50 +0800587 if (r_dest_src1 == rs_sp) {
Zheng Xucedee472014-07-01 09:53:22 +0800588 return OpRegRegExtend(op, r_dest_src1, r_src2, kA64Uxtx, 0);
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100589 } else {
590 return OpRegRegShift(op, r_dest_src1, r_src2, ENCODE_NO_SHIFT);
591 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100592}
593
594LIR* Arm64Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
595 UNIMPLEMENTED(FATAL);
596 return nullptr;
597}
598
599LIR* Arm64Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
600 UNIMPLEMENTED(FATAL);
601 return nullptr;
602}
603
604LIR* Arm64Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100605 LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +0100606 return NULL;
607}
608
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100609LIR* Arm64Mir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1,
610 RegStorage r_src2, int shift) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100611 ArmOpcode opcode = kA64Brk1d;
612
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100613 switch (op) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100614 case kOpAdd:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100615 opcode = kA64Add4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100616 break;
617 case kOpSub:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100618 opcode = kA64Sub4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100619 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100620 // case kOpRsub:
621 // opcode = kA64RsubWWW;
622 // break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100623 case kOpAdc:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100624 opcode = kA64Adc3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100625 break;
626 case kOpAnd:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100627 opcode = kA64And4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100628 break;
629 case kOpXor:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100630 opcode = kA64Eor4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100631 break;
632 case kOpMul:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100633 opcode = kA64Mul3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100634 break;
635 case kOpDiv:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100636 opcode = kA64Sdiv3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100637 break;
638 case kOpOr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100639 opcode = kA64Orr4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100640 break;
641 case kOpSbc:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100642 opcode = kA64Sbc3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100643 break;
644 case kOpLsl:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100645 opcode = kA64Lsl3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100646 break;
647 case kOpLsr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100648 opcode = kA64Lsr3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100649 break;
650 case kOpAsr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100651 opcode = kA64Asr3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100652 break;
653 case kOpRor:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100654 opcode = kA64Ror3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100655 break;
656 default:
657 LOG(FATAL) << "Bad opcode: " << op;
658 break;
659 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100660
661 // The instructions above belong to two kinds:
662 // - 4-operands instructions, where the last operand is a shift/extend immediate,
663 // - 3-operands instructions with no shift/extend.
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100664 ArmOpcode widened_opcode = r_dest.Is64Bit() ? WIDE(opcode) : opcode;
665 CHECK_EQ(r_dest.Is64Bit(), r_src1.Is64Bit());
666 CHECK_EQ(r_dest.Is64Bit(), r_src2.Is64Bit());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100667 if (EncodingMap[opcode].flags & IS_QUAD_OP) {
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100668 DCHECK(!IsExtendEncoding(shift));
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100669 return NewLIR4(widened_opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg(), shift);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100670 } else {
671 DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100672 DCHECK_EQ(shift, ENCODE_NO_SHIFT);
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100673 return NewLIR3(widened_opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100674 }
675}
676
Andreas Gampe47b31aa2014-06-19 01:10:07 -0700677LIR* Arm64Mir2Lir::OpRegRegRegExtend(OpKind op, RegStorage r_dest, RegStorage r_src1,
678 RegStorage r_src2, A64RegExtEncodings ext, uint8_t amount) {
679 ArmOpcode opcode = kA64Brk1d;
680
681 switch (op) {
682 case kOpAdd:
683 opcode = kA64Add4RRre;
684 break;
685 case kOpSub:
686 opcode = kA64Sub4RRre;
687 break;
688 default:
689 LOG(FATAL) << "Unimplemented opcode: " << op;
690 break;
691 }
692 ArmOpcode widened_opcode = r_dest.Is64Bit() ? WIDE(opcode) : opcode;
693
694 if (r_dest.Is64Bit()) {
695 CHECK(r_src1.Is64Bit());
696
697 // dest determines whether the op is wide or not. Up-convert src2 when necessary.
698 // Note: this is not according to aarch64 specifications, but our encoding.
699 if (!r_src2.Is64Bit()) {
700 r_src2 = As64BitReg(r_src2);
701 }
702 } else {
703 CHECK(!r_src1.Is64Bit());
704 CHECK(!r_src2.Is64Bit());
705 }
706
707 // Sanity checks.
708 // 1) Amount is in the range 0..4
709 CHECK_LE(amount, 4);
710
711 return NewLIR4(widened_opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg(),
712 EncodeExtend(ext, amount));
713}
714
Matteo Franchin43ec8732014-03-31 15:00:14 +0100715LIR* Arm64Mir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, RegStorage r_src2) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100716 return OpRegRegRegShift(op, r_dest, r_src1, r_src2, ENCODE_NO_SHIFT);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100717}
718
719LIR* Arm64Mir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
Zheng Xue2eb29e2014-06-12 10:22:33 +0800720 return OpRegRegImm64(op, r_dest, r_src1, static_cast<int64_t>(value));
721}
722
723LIR* Arm64Mir2Lir::OpRegRegImm64(OpKind op, RegStorage r_dest, RegStorage r_src1, int64_t value) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100724 LIR* res;
725 bool neg = (value < 0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100726 int64_t abs_value = (neg) ? -value : value;
727 ArmOpcode opcode = kA64Brk1d;
728 ArmOpcode alt_opcode = kA64Brk1d;
729 int32_t log_imm = -1;
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100730 bool is_wide = r_dest.Is64Bit();
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100731 ArmOpcode wide = (is_wide) ? WIDE(0) : UNWIDE(0);
Andreas Gampe9f975bf2014-06-18 17:45:32 -0700732 int info = 0;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100733
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100734 switch (op) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100735 case kOpLsl: {
736 // "lsl w1, w2, #imm" is an alias of "ubfm w1, w2, #(-imm MOD 32), #(31-imm)"
Zheng Xu2d41a652014-06-09 11:05:31 +0800737 // and "lsl x1, x2, #imm" of "ubfm x1, x2, #(-imm MOD 64), #(63-imm)".
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100738 // For now, we just use ubfm directly.
Zheng Xu2d41a652014-06-09 11:05:31 +0800739 int max_value = (is_wide) ? 63 : 31;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100740 return NewLIR4(kA64Ubfm4rrdd | wide, r_dest.GetReg(), r_src1.GetReg(),
Zheng Xu2d41a652014-06-09 11:05:31 +0800741 (-value) & max_value, max_value - value);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100742 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100743 case kOpLsr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100744 return NewLIR3(kA64Lsr3rrd | wide, r_dest.GetReg(), r_src1.GetReg(), value);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100745 case kOpAsr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100746 return NewLIR3(kA64Asr3rrd | wide, r_dest.GetReg(), r_src1.GetReg(), value);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100747 case kOpRor:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100748 // "ror r1, r2, #imm" is an alias of "extr r1, r2, r2, #imm".
749 // For now, we just use extr directly.
750 return NewLIR4(kA64Extr4rrrd | wide, r_dest.GetReg(), r_src1.GetReg(), r_src1.GetReg(),
751 value);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100752 case kOpAdd:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100753 neg = !neg;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100754 // Note: intentional fallthrough
755 case kOpSub:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100756 // Add and sub below read/write sp rather than xzr.
757 if (abs_value < 0x1000) {
758 opcode = (neg) ? kA64Add4RRdT : kA64Sub4RRdT;
759 return NewLIR4(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), abs_value, 0);
760 } else if ((abs_value & UINT64_C(0xfff)) == 0 && ((abs_value >> 12) < 0x1000)) {
761 opcode = (neg) ? kA64Add4RRdT : kA64Sub4RRdT;
762 return NewLIR4(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), abs_value >> 12, 1);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100763 } else {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100764 log_imm = -1;
Vladimir Marko903989d2014-07-01 17:21:18 +0100765 alt_opcode = (op == kOpAdd) ? kA64Add4RRre : kA64Sub4RRre;
Andreas Gampe47b31aa2014-06-19 01:10:07 -0700766 info = EncodeExtend(is_wide ? kA64Uxtx : kA64Uxtw, 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100767 }
768 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100769 // case kOpRsub:
770 // opcode = kThumb2RsubRRI8M;
771 // alt_opcode = kThumb2RsubRRR;
772 // break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100773 case kOpAdc:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100774 log_imm = -1;
775 alt_opcode = kA64Adc3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100776 break;
777 case kOpSbc:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100778 log_imm = -1;
779 alt_opcode = kA64Sbc3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100780 break;
781 case kOpOr:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100782 log_imm = EncodeLogicalImmediate(is_wide, value);
783 opcode = kA64Orr3Rrl;
784 alt_opcode = kA64Orr4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100785 break;
786 case kOpAnd:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100787 log_imm = EncodeLogicalImmediate(is_wide, value);
788 opcode = kA64And3Rrl;
789 alt_opcode = kA64And4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100790 break;
791 case kOpXor:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100792 log_imm = EncodeLogicalImmediate(is_wide, value);
793 opcode = kA64Eor3Rrl;
794 alt_opcode = kA64Eor4rrro;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100795 break;
796 case kOpMul:
797 // TUNING: power of 2, shift & add
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100798 log_imm = -1;
799 alt_opcode = kA64Mul3rrr;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100800 break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100801 default:
802 LOG(FATAL) << "Bad opcode: " << op;
803 }
804
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100805 if (log_imm >= 0) {
806 return NewLIR3(opcode | wide, r_dest.GetReg(), r_src1.GetReg(), log_imm);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100807 } else {
Andreas Gampe9f975bf2014-06-18 17:45:32 -0700808 RegStorage r_scratch;
Andreas Gampe47b31aa2014-06-19 01:10:07 -0700809 if (is_wide) {
Zheng Xue2eb29e2014-06-12 10:22:33 +0800810 r_scratch = AllocTempWide();
811 LoadConstantWide(r_scratch, value);
812 } else {
813 r_scratch = AllocTemp();
814 LoadConstant(r_scratch, value);
815 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100816 if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
Andreas Gampe9f975bf2014-06-18 17:45:32 -0700817 res = NewLIR4(alt_opcode | wide, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg(), info);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100818 else
Zheng Xue2eb29e2014-06-12 10:22:33 +0800819 res = NewLIR3(alt_opcode | wide, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100820 FreeTemp(r_scratch);
821 return res;
822 }
823}
824
Matteo Franchin43ec8732014-03-31 15:00:14 +0100825LIR* Arm64Mir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100826 return OpRegImm64(op, r_dest_src1, static_cast<int64_t>(value));
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100827}
828
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100829LIR* Arm64Mir2Lir::OpRegImm64(OpKind op, RegStorage r_dest_src1, int64_t value) {
830 ArmOpcode wide = (r_dest_src1.Is64Bit()) ? WIDE(0) : UNWIDE(0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100831 ArmOpcode opcode = kA64Brk1d;
832 ArmOpcode neg_opcode = kA64Brk1d;
833 bool shift;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100834 bool neg = (value < 0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100835 uint64_t abs_value = (neg) ? -value : value;
836
837 if (LIKELY(abs_value < 0x1000)) {
838 // abs_value is a 12-bit immediate.
839 shift = false;
840 } else if ((abs_value & UINT64_C(0xfff)) == 0 && ((abs_value >> 12) < 0x1000)) {
841 // abs_value is a shifted 12-bit immediate.
842 shift = true;
843 abs_value >>= 12;
Zheng Xue2eb29e2014-06-12 10:22:33 +0800844 } else if (LIKELY(abs_value < 0x1000000 && (op == kOpAdd || op == kOpSub))) {
845 // Note: It is better to use two ADD/SUB instead of loading a number to a temp register.
846 // This works for both normal registers and SP.
847 // For a frame size == 0x2468, it will be encoded as:
848 // sub sp, #0x2000
849 // sub sp, #0x468
850 if (neg) {
851 op = (op == kOpAdd) ? kOpSub : kOpAdd;
852 }
853 OpRegImm64(op, r_dest_src1, abs_value & (~INT64_C(0xfff)));
854 return OpRegImm64(op, r_dest_src1, abs_value & 0xfff);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100855 } else {
Zheng Xue2eb29e2014-06-12 10:22:33 +0800856 RegStorage r_tmp;
857 LIR* res;
858 if (IS_WIDE(wide)) {
859 r_tmp = AllocTempWide();
860 res = LoadConstantWide(r_tmp, value);
861 } else {
862 r_tmp = AllocTemp();
863 res = LoadConstant(r_tmp, value);
864 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100865 OpRegReg(op, r_dest_src1, r_tmp);
866 FreeTemp(r_tmp);
867 return res;
868 }
869
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100870 switch (op) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100871 case kOpAdd:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100872 neg_opcode = kA64Sub4RRdT;
873 opcode = kA64Add4RRdT;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100874 break;
875 case kOpSub:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100876 neg_opcode = kA64Add4RRdT;
877 opcode = kA64Sub4RRdT;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100878 break;
879 case kOpCmp:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100880 neg_opcode = kA64Cmn3RdT;
881 opcode = kA64Cmp3RdT;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100882 break;
883 default:
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100884 LOG(FATAL) << "Bad op-kind in OpRegImm: " << op;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100885 break;
886 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100887
888 if (UNLIKELY(neg))
889 opcode = neg_opcode;
890
891 if (EncodingMap[opcode].flags & IS_QUAD_OP)
892 return NewLIR4(opcode | wide, r_dest_src1.GetReg(), r_dest_src1.GetReg(), abs_value,
893 (shift) ? 1 : 0);
894 else
895 return NewLIR3(opcode | wide, r_dest_src1.GetReg(), abs_value, (shift) ? 1 : 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100896}
897
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100898int Arm64Mir2Lir::EncodeShift(int shift_type, int amount) {
Zheng Xucedee472014-07-01 09:53:22 +0800899 DCHECK_EQ(shift_type & 0x3, shift_type);
900 DCHECK_EQ(amount & 0x3f, amount);
Matteo Franchinc61b3c92014-06-18 11:52:47 +0100901 return ((shift_type & 0x3) << 7) | (amount & 0x3f);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100902}
903
904int Arm64Mir2Lir::EncodeExtend(int extend_type, int amount) {
Zheng Xucedee472014-07-01 09:53:22 +0800905 DCHECK_EQ(extend_type & 0x7, extend_type);
906 DCHECK_EQ(amount & 0x7, amount);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100907 return (1 << 6) | ((extend_type & 0x7) << 3) | (amount & 0x7);
908}
909
910bool Arm64Mir2Lir::IsExtendEncoding(int encoded_value) {
911 return ((1 << 6) & encoded_value) != 0;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100912}
913
914LIR* Arm64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100915 int scale, OpSize size) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100916 LIR* load;
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100917 int expected_scale = 0;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100918 ArmOpcode opcode = kA64Brk1d;
Andreas Gampe4b537a82014-06-30 22:24:53 -0700919 r_base = Check64BitReg(r_base);
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100920
921 // TODO(Arm64): The sign extension of r_index should be carried out by using an extended
922 // register offset load (rather than doing the sign extension in a separate instruction).
923 if (r_index.Is32Bit()) {
924 // Assemble: ``sxtw xN, wN''.
925 r_index = As64BitReg(r_index);
926 NewLIR4(WIDE(kA64Sbfm4rrdd), r_index.GetReg(), r_index.GetReg(), 0, 31);
927 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100928
929 if (r_dest.IsFloat()) {
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100930 if (r_dest.IsDouble()) {
931 DCHECK(size == k64 || size == kDouble);
932 expected_scale = 3;
933 opcode = FWIDE(kA64Ldr4fXxG);
934 } else {
935 DCHECK(r_dest.IsSingle());
936 DCHECK(size == k32 || size == kSingle);
937 expected_scale = 2;
938 opcode = kA64Ldr4fXxG;
939 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100940
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100941 DCHECK(scale == 0 || scale == expected_scale);
942 return NewLIR4(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(),
943 (scale != 0) ? 1 : 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100944 }
945
946 switch (size) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100947 case kDouble:
948 case kWord:
949 case k64:
Andreas Gampe3c12c512014-06-24 18:46:29 +0000950 r_dest = Check64BitReg(r_dest);
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100951 opcode = WIDE(kA64Ldr4rXxG);
952 expected_scale = 3;
953 break;
Matteo Franchin255e0142014-07-04 13:50:41 +0100954 case kSingle: // Intentional fall-through.
955 case k32: // Intentional fall-through.
Serban Constantinescu63fe93d2014-06-30 17:10:28 +0100956 case kReference:
Andreas Gampe3c12c512014-06-24 18:46:29 +0000957 r_dest = Check32BitReg(r_dest);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100958 opcode = kA64Ldr4rXxG;
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100959 expected_scale = 2;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100960 break;
961 case kUnsignedHalf:
Andreas Gampe4b537a82014-06-30 22:24:53 -0700962 r_dest = Check32BitReg(r_dest);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100963 opcode = kA64Ldrh4wXxd;
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100964 expected_scale = 1;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100965 break;
966 case kSignedHalf:
Andreas Gampe4b537a82014-06-30 22:24:53 -0700967 r_dest = Check32BitReg(r_dest);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100968 opcode = kA64Ldrsh4rXxd;
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100969 expected_scale = 1;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100970 break;
971 case kUnsignedByte:
Andreas Gampe4b537a82014-06-30 22:24:53 -0700972 r_dest = Check32BitReg(r_dest);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100973 opcode = kA64Ldrb3wXx;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100974 break;
975 case kSignedByte:
Andreas Gampe4b537a82014-06-30 22:24:53 -0700976 r_dest = Check32BitReg(r_dest);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100977 opcode = kA64Ldrsb3rXx;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100978 break;
979 default:
980 LOG(FATAL) << "Bad size: " << size;
981 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100982
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100983 if (UNLIKELY(expected_scale == 0)) {
984 // This is a tertiary op (e.g. ldrb, ldrsb), it does not not support scale.
985 DCHECK_NE(EncodingMap[UNWIDE(opcode)].flags & IS_TERTIARY_OP, 0U);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100986 DCHECK_EQ(scale, 0);
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100987 load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg());
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100988 } else {
Matteo Franchin0955f7e2014-05-23 17:32:52 +0100989 DCHECK(scale == 0 || scale == expected_scale);
990 load = NewLIR4(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(),
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100991 (scale != 0) ? 1 : 0);
992 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100993
994 return load;
995}
996
Matteo Franchin255e0142014-07-04 13:50:41 +0100997LIR* Arm64Mir2Lir::LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
998 int scale) {
999 return LoadBaseIndexed(r_base, r_index, As32BitReg(r_dest), scale, kReference);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001000}
1001
Matteo Franchin43ec8732014-03-31 15:00:14 +01001002LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001003 int scale, OpSize size) {
1004 LIR* store;
Matteo Franchin0955f7e2014-05-23 17:32:52 +01001005 int expected_scale = 0;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001006 ArmOpcode opcode = kA64Brk1d;
Andreas Gampe4b537a82014-06-30 22:24:53 -07001007 r_base = Check64BitReg(r_base);
Serban Constantinescu63fe93d2014-06-30 17:10:28 +01001008
1009 // TODO(Arm64): The sign extension of r_index should be carried out by using an extended
1010 // register offset store (rather than doing the sign extension in a separate instruction).
1011 if (r_index.Is32Bit()) {
1012 // Assemble: ``sxtw xN, wN''.
1013 r_index = As64BitReg(r_index);
1014 NewLIR4(WIDE(kA64Sbfm4rrdd), r_index.GetReg(), r_index.GetReg(), 0, 31);
1015 }
Matteo Franchin43ec8732014-03-31 15:00:14 +01001016
1017 if (r_src.IsFloat()) {
Matteo Franchin0955f7e2014-05-23 17:32:52 +01001018 if (r_src.IsDouble()) {
1019 DCHECK(size == k64 || size == kDouble);
1020 expected_scale = 3;
1021 opcode = FWIDE(kA64Str4fXxG);
1022 } else {
1023 DCHECK(r_src.IsSingle());
1024 DCHECK(size == k32 || size == kSingle);
1025 expected_scale = 2;
1026 opcode = kA64Str4fXxG;
1027 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001028
Matteo Franchin0955f7e2014-05-23 17:32:52 +01001029 DCHECK(scale == 0 || scale == expected_scale);
1030 return NewLIR4(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg(),
1031 (scale != 0) ? 1 : 0);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001032 }
1033
1034 switch (size) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001035 case kDouble: // Intentional fall-trough.
1036 case kWord: // Intentional fall-trough.
1037 case k64:
Andreas Gampe3c12c512014-06-24 18:46:29 +00001038 r_src = Check64BitReg(r_src);
Matteo Franchin0955f7e2014-05-23 17:32:52 +01001039 opcode = WIDE(kA64Str4rXxG);
1040 expected_scale = 3;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001041 break;
1042 case kSingle: // Intentional fall-trough.
1043 case k32: // Intentional fall-trough.
Matteo Franchin255e0142014-07-04 13:50:41 +01001044 case kReference:
Andreas Gampe3c12c512014-06-24 18:46:29 +00001045 r_src = Check32BitReg(r_src);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001046 opcode = kA64Str4rXxG;
Matteo Franchin0955f7e2014-05-23 17:32:52 +01001047 expected_scale = 2;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001048 break;
1049 case kUnsignedHalf:
Matteo Franchin43ec8732014-03-31 15:00:14 +01001050 case kSignedHalf:
Andreas Gampe4b537a82014-06-30 22:24:53 -07001051 r_src = Check32BitReg(r_src);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001052 opcode = kA64Strh4wXxd;
Matteo Franchin0955f7e2014-05-23 17:32:52 +01001053 expected_scale = 1;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001054 break;
1055 case kUnsignedByte:
Matteo Franchin43ec8732014-03-31 15:00:14 +01001056 case kSignedByte:
Andreas Gampe4b537a82014-06-30 22:24:53 -07001057 r_src = Check32BitReg(r_src);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001058 opcode = kA64Strb3wXx;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001059 break;
1060 default:
1061 LOG(FATAL) << "Bad size: " << size;
1062 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001063
Matteo Franchin0955f7e2014-05-23 17:32:52 +01001064 if (UNLIKELY(expected_scale == 0)) {
1065 // This is a tertiary op (e.g. strb), it does not not support scale.
1066 DCHECK_NE(EncodingMap[UNWIDE(opcode)].flags & IS_TERTIARY_OP, 0U);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001067 DCHECK_EQ(scale, 0);
Matteo Franchin0955f7e2014-05-23 17:32:52 +01001068 store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg());
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001069 } else {
Matteo Franchin0955f7e2014-05-23 17:32:52 +01001070 store = NewLIR4(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg(),
1071 (scale != 0) ? 1 : 0);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001072 }
Matteo Franchin43ec8732014-03-31 15:00:14 +01001073
1074 return store;
1075}
1076
Matteo Franchin255e0142014-07-04 13:50:41 +01001077LIR* Arm64Mir2Lir::StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
1078 int scale) {
1079 return StoreBaseIndexed(r_base, r_index, As32BitReg(r_src), scale, kReference);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001080}
1081
Matteo Franchin43ec8732014-03-31 15:00:14 +01001082/*
1083 * Load value from base + displacement. Optionally perform null check
1084 * on base (which must have an associated s_reg and MIR). If not
1085 * performing null check, incoming MIR can be null.
1086 */
1087LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
Vladimir Marko3bf7c602014-05-07 14:55:43 +01001088 OpSize size) {
Matteo Franchin43ec8732014-03-31 15:00:14 +01001089 LIR* load = NULL;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001090 ArmOpcode opcode = kA64Brk1d;
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001091 ArmOpcode alt_opcode = kA64Brk1d;
1092 int scale = 0;
1093
Matteo Franchin43ec8732014-03-31 15:00:14 +01001094 switch (size) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001095 case kDouble: // Intentional fall-through.
1096 case kWord: // Intentional fall-through.
Matteo Franchin43ec8732014-03-31 15:00:14 +01001097 case k64:
Andreas Gampe3c12c512014-06-24 18:46:29 +00001098 r_dest = Check64BitReg(r_dest);
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001099 scale = 3;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001100 if (r_dest.IsFloat()) {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001101 DCHECK(r_dest.IsDouble());
1102 opcode = FWIDE(kA64Ldr3fXD);
1103 alt_opcode = FWIDE(kA64Ldur3fXd);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001104 } else {
Matteo Franchin0955f7e2014-05-23 17:32:52 +01001105 opcode = WIDE(kA64Ldr3rXD);
1106 alt_opcode = WIDE(kA64Ldur3rXd);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001107 }
1108 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001109 case kSingle: // Intentional fall-through.
1110 case k32: // Intentional fall-trough.
Matteo Franchin255e0142014-07-04 13:50:41 +01001111 case kReference:
Andreas Gampe3c12c512014-06-24 18:46:29 +00001112 r_dest = Check32BitReg(r_dest);
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001113 scale = 2;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001114 if (r_dest.IsFloat()) {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001115 DCHECK(r_dest.IsSingle());
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001116 opcode = kA64Ldr3fXD;
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001117 } else {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001118 opcode = kA64Ldr3rXD;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001119 }
1120 break;
1121 case kUnsignedHalf:
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001122 scale = 1;
1123 opcode = kA64Ldrh3wXF;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001124 break;
1125 case kSignedHalf:
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001126 scale = 1;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001127 opcode = kA64Ldrsh3rXF;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001128 break;
1129 case kUnsignedByte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001130 opcode = kA64Ldrb3wXd;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001131 break;
1132 case kSignedByte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001133 opcode = kA64Ldrsb3rXd;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001134 break;
1135 default:
1136 LOG(FATAL) << "Bad size: " << size;
1137 }
1138
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001139 bool displacement_is_aligned = (displacement & ((1 << scale) - 1)) == 0;
1140 int scaled_disp = displacement >> scale;
1141 if (displacement_is_aligned && scaled_disp >= 0 && scaled_disp < 4096) {
1142 // Can use scaled load.
1143 load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), scaled_disp);
1144 } else if (alt_opcode != kA64Brk1d && IS_SIGNED_IMM9(displacement)) {
1145 // Can use unscaled load.
1146 load = NewLIR3(alt_opcode, r_dest.GetReg(), r_base.GetReg(), displacement);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001147 } else {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001148 // Use long sequence.
buzbee33ae5582014-06-12 14:56:32 -07001149 // TODO: cleaner support for index/displacement registers? Not a reference, but must match width.
1150 RegStorage r_scratch = AllocTempWide();
1151 LoadConstantWide(r_scratch, displacement);
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001152 load = LoadBaseIndexed(r_base, r_scratch, r_dest, 0, size);
1153 FreeTemp(r_scratch);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001154 }
1155
1156 // TODO: in future may need to differentiate Dalvik accesses w/ spills
Vladimir Marko8dea81c2014-06-06 14:50:36 +01001157 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
Zheng Xubaa7c882014-06-30 14:26:50 +08001158 DCHECK(r_base == rs_sp);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001159 AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
Matteo Franchin43ec8732014-03-31 15:00:14 +01001160 }
1161 return load;
1162}
1163
Andreas Gampe3c12c512014-06-24 18:46:29 +00001164LIR* Arm64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
1165 OpSize size, VolatileKind is_volatile) {
Vladimir Marko674744e2014-04-24 15:18:26 +01001166 // LoadBaseDisp() will emit correct insn for atomic load on arm64
1167 // assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
Andreas Gampe3c12c512014-06-24 18:46:29 +00001168
1169 LIR* load = LoadBaseDispBody(r_base, displacement, r_dest, size);
1170
1171 if (UNLIKELY(is_volatile == kVolatile)) {
Hans Boehm48f5c472014-06-27 14:50:10 -07001172 // TODO: This should generate an acquire load instead of the barrier.
1173 GenMemBarrier(kLoadAny);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001174 }
1175
1176 return load;
Vladimir Marko674744e2014-04-24 15:18:26 +01001177}
1178
Andreas Gampe3c12c512014-06-24 18:46:29 +00001179LIR* Arm64Mir2Lir::LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest,
1180 VolatileKind is_volatile) {
1181 return LoadBaseDisp(r_base, displacement, As32BitReg(r_dest), kReference, is_volatile);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001182}
1183
Matteo Franchin43ec8732014-03-31 15:00:14 +01001184LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001185 OpSize size) {
Matteo Franchin43ec8732014-03-31 15:00:14 +01001186 LIR* store = NULL;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001187 ArmOpcode opcode = kA64Brk1d;
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001188 ArmOpcode alt_opcode = kA64Brk1d;
1189 int scale = 0;
1190
Matteo Franchin43ec8732014-03-31 15:00:14 +01001191 switch (size) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001192 case kDouble: // Intentional fall-through.
1193 case kWord: // Intentional fall-through.
Matteo Franchin43ec8732014-03-31 15:00:14 +01001194 case k64:
Andreas Gampe3c12c512014-06-24 18:46:29 +00001195 r_src = Check64BitReg(r_src);
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001196 scale = 3;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001197 if (r_src.IsFloat()) {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001198 DCHECK(r_src.IsDouble());
1199 opcode = FWIDE(kA64Str3fXD);
1200 alt_opcode = FWIDE(kA64Stur3fXd);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001201 } else {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001202 opcode = FWIDE(kA64Str3rXD);
1203 alt_opcode = FWIDE(kA64Stur3rXd);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001204 }
1205 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001206 case kSingle: // Intentional fall-through.
1207 case k32: // Intentional fall-trough.
Matteo Franchin255e0142014-07-04 13:50:41 +01001208 case kReference:
Andreas Gampe3c12c512014-06-24 18:46:29 +00001209 r_src = Check32BitReg(r_src);
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001210 scale = 2;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001211 if (r_src.IsFloat()) {
1212 DCHECK(r_src.IsSingle());
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001213 opcode = kA64Str3fXD;
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001214 } else {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001215 opcode = kA64Str3rXD;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001216 }
1217 break;
1218 case kUnsignedHalf:
1219 case kSignedHalf:
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001220 scale = 1;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001221 opcode = kA64Strh3wXF;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001222 break;
1223 case kUnsignedByte:
1224 case kSignedByte:
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001225 opcode = kA64Strb3wXd;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001226 break;
1227 default:
1228 LOG(FATAL) << "Bad size: " << size;
1229 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001230
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001231 bool displacement_is_aligned = (displacement & ((1 << scale) - 1)) == 0;
1232 int scaled_disp = displacement >> scale;
1233 if (displacement_is_aligned && scaled_disp >= 0 && scaled_disp < 4096) {
1234 // Can use scaled store.
1235 store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), scaled_disp);
1236 } else if (alt_opcode != kA64Brk1d && IS_SIGNED_IMM9(displacement)) {
1237 // Can use unscaled store.
1238 store = NewLIR3(alt_opcode, r_src.GetReg(), r_base.GetReg(), displacement);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001239 } else {
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001240 // Use long sequence.
buzbee33ae5582014-06-12 14:56:32 -07001241 RegStorage r_scratch = AllocTempWide();
1242 LoadConstantWide(r_scratch, displacement);
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001243 store = StoreBaseIndexed(r_base, r_scratch, r_src, 0, size);
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001244 FreeTemp(r_scratch);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001245 }
1246
Matteo Franchinbc6d1972014-05-13 12:33:28 +01001247 // TODO: In future, may need to differentiate Dalvik & spill accesses.
Vladimir Marko8dea81c2014-06-06 14:50:36 +01001248 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
Zheng Xubaa7c882014-06-30 14:26:50 +08001249 DCHECK(r_base == rs_sp);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001250 AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
Matteo Franchin43ec8732014-03-31 15:00:14 +01001251 }
1252 return store;
1253}
1254
Andreas Gampe3c12c512014-06-24 18:46:29 +00001255LIR* Arm64Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
1256 OpSize size, VolatileKind is_volatile) {
Hans Boehm48f5c472014-06-27 14:50:10 -07001257 // TODO: This should generate a release store and no barriers.
Andreas Gampe3c12c512014-06-24 18:46:29 +00001258 if (UNLIKELY(is_volatile == kVolatile)) {
Hans Boehm48f5c472014-06-27 14:50:10 -07001259 // Ensure that prior accesses become visible to other threads first.
1260 GenMemBarrier(kAnyStore);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001261 }
1262
Vladimir Marko674744e2014-04-24 15:18:26 +01001263 // StoreBaseDisp() will emit correct insn for atomic store on arm64
1264 // assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
Andreas Gampe3c12c512014-06-24 18:46:29 +00001265
1266 LIR* store = StoreBaseDispBody(r_base, displacement, r_src, size);
1267
1268 if (UNLIKELY(is_volatile == kVolatile)) {
Hans Boehm48f5c472014-06-27 14:50:10 -07001269 // Preserve order with respect to any subsequent volatile loads.
1270 // We need StoreLoad, but that generally requires the most expensive barrier.
1271 GenMemBarrier(kAnyAny);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001272 }
1273
1274 return store;
Vladimir Marko674744e2014-04-24 15:18:26 +01001275}
1276
Andreas Gampe3c12c512014-06-24 18:46:29 +00001277LIR* Arm64Mir2Lir::StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src,
1278 VolatileKind is_volatile) {
1279 return StoreBaseDisp(r_base, displacement, As32BitReg(r_src), kReference, is_volatile);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001280}
1281
Matteo Franchin43ec8732014-03-31 15:00:14 +01001282LIR* Arm64Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001283 LOG(FATAL) << "Unexpected use of OpFpRegCopy for Arm64";
1284 return NULL;
Matteo Franchin43ec8732014-03-31 15:00:14 +01001285}
1286
Matteo Franchin43ec8732014-03-31 15:00:14 +01001287LIR* Arm64Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001288 LOG(FATAL) << "Unexpected use of OpMem for Arm64";
Matteo Franchin43ec8732014-03-31 15:00:14 +01001289 return NULL;
1290}
1291
Andreas Gampe98430592014-07-27 19:44:50 -07001292LIR* Arm64Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
1293 return OpReg(op, r_tgt);
Matteo Franchin43ec8732014-03-31 15:00:14 +01001294}
1295
1296} // namespace art