blob: 9b4546a94bb8fd4e255d387885fcb5c5d8db9812 [file] [log] [blame]
Matteo Franchin43ec8732014-03-31 15:00:14 +01001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "codegen_arm64.h"
18
19#include <inttypes.h>
20
21#include <string>
22
23#include "dex/compiler_internals.h"
24#include "dex/quick/mir_to_lir-inl.h"
buzbeeb5860fb2014-06-21 15:31:01 -070025#include "dex/reg_storage_eq.h"
Matteo Franchin43ec8732014-03-31 15:00:14 +010026
27namespace art {
28
Vladimir Marko089142c2014-06-05 10:57:05 +010029static constexpr RegStorage core_regs_arr[] =
buzbeeb01bf152014-05-13 15:59:07 -070030 {rs_w0, rs_w1, rs_w2, rs_w3, rs_w4, rs_w5, rs_w6, rs_w7,
31 rs_w8, rs_w9, rs_w10, rs_w11, rs_w12, rs_w13, rs_w14, rs_w15,
32 rs_w16, rs_w17, rs_w18, rs_w19, rs_w20, rs_w21, rs_w22, rs_w23,
33 rs_w24, rs_w25, rs_w26, rs_w27, rs_w28, rs_w29, rs_w30, rs_w31,
34 rs_wzr};
Vladimir Marko089142c2014-06-05 10:57:05 +010035static constexpr RegStorage core64_regs_arr[] =
Matteo Franchine45fb9e2014-05-06 10:10:30 +010036 {rs_x0, rs_x1, rs_x2, rs_x3, rs_x4, rs_x5, rs_x6, rs_x7,
37 rs_x8, rs_x9, rs_x10, rs_x11, rs_x12, rs_x13, rs_x14, rs_x15,
38 rs_x16, rs_x17, rs_x18, rs_x19, rs_x20, rs_x21, rs_x22, rs_x23,
Matteo Franchinbc6d1972014-05-13 12:33:28 +010039 rs_x24, rs_x25, rs_x26, rs_x27, rs_x28, rs_x29, rs_x30, rs_x31,
40 rs_xzr};
Vladimir Marko089142c2014-06-05 10:57:05 +010041static constexpr RegStorage sp_regs_arr[] =
Matteo Franchine45fb9e2014-05-06 10:10:30 +010042 {rs_f0, rs_f1, rs_f2, rs_f3, rs_f4, rs_f5, rs_f6, rs_f7,
43 rs_f8, rs_f9, rs_f10, rs_f11, rs_f12, rs_f13, rs_f14, rs_f15,
44 rs_f16, rs_f17, rs_f18, rs_f19, rs_f20, rs_f21, rs_f22, rs_f23,
45 rs_f24, rs_f25, rs_f26, rs_f27, rs_f28, rs_f29, rs_f30, rs_f31};
Vladimir Marko089142c2014-06-05 10:57:05 +010046static constexpr RegStorage dp_regs_arr[] =
Matteo Franchine45fb9e2014-05-06 10:10:30 +010047 {rs_d0, rs_d1, rs_d2, rs_d3, rs_d4, rs_d5, rs_d6, rs_d7,
Zheng Xuc8304302014-05-15 17:21:01 +010048 rs_d8, rs_d9, rs_d10, rs_d11, rs_d12, rs_d13, rs_d14, rs_d15,
49 rs_d16, rs_d17, rs_d18, rs_d19, rs_d20, rs_d21, rs_d22, rs_d23,
50 rs_d24, rs_d25, rs_d26, rs_d27, rs_d28, rs_d29, rs_d30, rs_d31};
Zheng Xub551fdc2014-07-25 11:49:42 +080051// Note: we are not able to call to C function since rs_xSELF is a special register need to be
52// preserved but would be scratched by native functions follow aapcs64.
Vladimir Marko089142c2014-06-05 10:57:05 +010053static constexpr RegStorage reserved_regs_arr[] =
Zheng Xubaa7c882014-06-30 14:26:50 +080054 {rs_wSUSPEND, rs_wSELF, rs_wsp, rs_wLR, rs_wzr};
Vladimir Marko089142c2014-06-05 10:57:05 +010055static constexpr RegStorage reserved64_regs_arr[] =
Zheng Xubaa7c882014-06-30 14:26:50 +080056 {rs_xSUSPEND, rs_xSELF, rs_sp, rs_xLR, rs_xzr};
Vladimir Marko089142c2014-06-05 10:57:05 +010057static constexpr RegStorage core_temps_arr[] =
buzbeeb01bf152014-05-13 15:59:07 -070058 {rs_w0, rs_w1, rs_w2, rs_w3, rs_w4, rs_w5, rs_w6, rs_w7,
59 rs_w8, rs_w9, rs_w10, rs_w11, rs_w12, rs_w13, rs_w14, rs_w15, rs_w16,
60 rs_w17};
Vladimir Marko089142c2014-06-05 10:57:05 +010061static constexpr RegStorage core64_temps_arr[] =
Zheng Xuc8304302014-05-15 17:21:01 +010062 {rs_x0, rs_x1, rs_x2, rs_x3, rs_x4, rs_x5, rs_x6, rs_x7,
63 rs_x8, rs_x9, rs_x10, rs_x11, rs_x12, rs_x13, rs_x14, rs_x15, rs_x16,
64 rs_x17};
Vladimir Marko089142c2014-06-05 10:57:05 +010065static constexpr RegStorage sp_temps_arr[] =
Matteo Franchine45fb9e2014-05-06 10:10:30 +010066 {rs_f0, rs_f1, rs_f2, rs_f3, rs_f4, rs_f5, rs_f6, rs_f7,
Zheng Xuc8304302014-05-15 17:21:01 +010067 rs_f16, rs_f17, rs_f18, rs_f19, rs_f20, rs_f21, rs_f22, rs_f23,
68 rs_f24, rs_f25, rs_f26, rs_f27, rs_f28, rs_f29, rs_f30, rs_f31};
Vladimir Marko089142c2014-06-05 10:57:05 +010069static constexpr RegStorage dp_temps_arr[] =
Zheng Xuc8304302014-05-15 17:21:01 +010070 {rs_d0, rs_d1, rs_d2, rs_d3, rs_d4, rs_d5, rs_d6, rs_d7,
71 rs_d16, rs_d17, rs_d18, rs_d19, rs_d20, rs_d21, rs_d22, rs_d23,
72 rs_d24, rs_d25, rs_d26, rs_d27, rs_d28, rs_d29, rs_d30, rs_d31};
Matteo Franchin43ec8732014-03-31 15:00:14 +010073
Vladimir Marko089142c2014-06-05 10:57:05 +010074static constexpr ArrayRef<const RegStorage> core_regs(core_regs_arr);
75static constexpr ArrayRef<const RegStorage> core64_regs(core64_regs_arr);
76static constexpr ArrayRef<const RegStorage> sp_regs(sp_regs_arr);
77static constexpr ArrayRef<const RegStorage> dp_regs(dp_regs_arr);
78static constexpr ArrayRef<const RegStorage> reserved_regs(reserved_regs_arr);
79static constexpr ArrayRef<const RegStorage> reserved64_regs(reserved64_regs_arr);
80static constexpr ArrayRef<const RegStorage> core_temps(core_temps_arr);
81static constexpr ArrayRef<const RegStorage> core64_temps(core64_temps_arr);
82static constexpr ArrayRef<const RegStorage> sp_temps(sp_temps_arr);
83static constexpr ArrayRef<const RegStorage> dp_temps(dp_temps_arr);
Matteo Franchin43ec8732014-03-31 15:00:14 +010084
85RegLocation Arm64Mir2Lir::LocCReturn() {
86 return arm_loc_c_return;
87}
88
buzbeea0cd2d72014-06-01 09:33:49 -070089RegLocation Arm64Mir2Lir::LocCReturnRef() {
Andreas Gampe4b537a82014-06-30 22:24:53 -070090 return arm_loc_c_return_ref;
buzbeea0cd2d72014-06-01 09:33:49 -070091}
92
Matteo Franchin43ec8732014-03-31 15:00:14 +010093RegLocation Arm64Mir2Lir::LocCReturnWide() {
94 return arm_loc_c_return_wide;
95}
96
97RegLocation Arm64Mir2Lir::LocCReturnFloat() {
98 return arm_loc_c_return_float;
99}
100
101RegLocation Arm64Mir2Lir::LocCReturnDouble() {
102 return arm_loc_c_return_double;
103}
104
105// Return a target-dependent special register.
106RegStorage Arm64Mir2Lir::TargetReg(SpecialTargetRegister reg) {
107 RegStorage res_reg = RegStorage::InvalidReg();
108 switch (reg) {
Matteo Franchined7a0f22014-06-10 19:23:45 +0100109 case kSelf: res_reg = rs_wSELF; break;
110 case kSuspend: res_reg = rs_wSUSPEND; break;
111 case kLr: res_reg = rs_wLR; break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100112 case kPc: res_reg = RegStorage::InvalidReg(); break;
Matteo Franchined7a0f22014-06-10 19:23:45 +0100113 case kSp: res_reg = rs_wsp; break;
114 case kArg0: res_reg = rs_w0; break;
115 case kArg1: res_reg = rs_w1; break;
116 case kArg2: res_reg = rs_w2; break;
117 case kArg3: res_reg = rs_w3; break;
118 case kArg4: res_reg = rs_w4; break;
119 case kArg5: res_reg = rs_w5; break;
120 case kArg6: res_reg = rs_w6; break;
121 case kArg7: res_reg = rs_w7; break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100122 case kFArg0: res_reg = rs_f0; break;
123 case kFArg1: res_reg = rs_f1; break;
124 case kFArg2: res_reg = rs_f2; break;
125 case kFArg3: res_reg = rs_f3; break;
buzbee33ae5582014-06-12 14:56:32 -0700126 case kFArg4: res_reg = rs_f4; break;
127 case kFArg5: res_reg = rs_f5; break;
128 case kFArg6: res_reg = rs_f6; break;
129 case kFArg7: res_reg = rs_f7; break;
Matteo Franchined7a0f22014-06-10 19:23:45 +0100130 case kRet0: res_reg = rs_w0; break;
131 case kRet1: res_reg = rs_w1; break;
132 case kInvokeTgt: res_reg = rs_wLR; break;
Zheng Xub551fdc2014-07-25 11:49:42 +0800133 case kHiddenArg: res_reg = rs_wIP1; break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100134 case kHiddenFpArg: res_reg = RegStorage::InvalidReg(); break;
135 case kCount: res_reg = RegStorage::InvalidReg(); break;
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +0700136 default: res_reg = RegStorage::InvalidReg();
Matteo Franchin43ec8732014-03-31 15:00:14 +0100137 }
138 return res_reg;
139}
140
Matteo Franchin43ec8732014-03-31 15:00:14 +0100141/*
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100142 * Decode the register id. This routine makes assumptions on the encoding made by RegStorage.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100143 */
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100144ResourceMask Arm64Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100145 // TODO(Arm64): this function depends too much on the internal RegStorage encoding. Refactor.
146
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100147 // Check if the shape mask is zero (i.e. invalid).
148 if (UNLIKELY(reg == rs_wzr || reg == rs_xzr)) {
149 // The zero register is not a true register. It is just an immediate zero.
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100150 return kEncodeNone;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100151 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100152
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100153 return ResourceMask::Bit(
154 // FP register starts at bit position 32.
155 (reg.IsFloat() ? kArm64FPReg0 : 0) + reg.GetRegNum());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100156}
157
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100158ResourceMask Arm64Mir2Lir::GetPCUseDefEncoding() const {
Zheng Xu421efca2014-07-11 17:33:59 +0800159 // Note: On arm64, we are not able to set pc except branch instructions, which is regarded as a
160 // kind of barrier. All other instructions only use pc, which has no dependency between any
161 // of them. So it is fine to just return kEncodeNone here.
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100162 return kEncodeNone;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100163}
164
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100165// Arm64 specific setup. TODO: inline?:
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100166void Arm64Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags,
167 ResourceMask* use_mask, ResourceMask* def_mask) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100168 DCHECK_EQ(cu_->instruction_set, kArm64);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100169 DCHECK(!lir->flags.use_def_invalid);
170
Zheng Xu421efca2014-07-11 17:33:59 +0800171 // Note: REG_USE_PC is ignored, the reason is the same with what we do in GetPCUseDefEncoding().
Matteo Franchin43ec8732014-03-31 15:00:14 +0100172 // These flags are somewhat uncommon - bypass if we can.
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100173 if ((flags & (REG_DEF_SP | REG_USE_SP | REG_DEF_LR)) != 0) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100174 if (flags & REG_DEF_SP) {
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100175 def_mask->SetBit(kArm64RegSP);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100176 }
177
178 if (flags & REG_USE_SP) {
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100179 use_mask->SetBit(kArm64RegSP);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100180 }
181
Matteo Franchin43ec8732014-03-31 15:00:14 +0100182 if (flags & REG_DEF_LR) {
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100183 def_mask->SetBit(kArm64RegLR);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100184 }
185 }
186}
187
188ArmConditionCode Arm64Mir2Lir::ArmConditionEncoding(ConditionCode ccode) {
189 ArmConditionCode res;
190 switch (ccode) {
191 case kCondEq: res = kArmCondEq; break;
192 case kCondNe: res = kArmCondNe; break;
193 case kCondCs: res = kArmCondCs; break;
194 case kCondCc: res = kArmCondCc; break;
195 case kCondUlt: res = kArmCondCc; break;
196 case kCondUge: res = kArmCondCs; break;
197 case kCondMi: res = kArmCondMi; break;
198 case kCondPl: res = kArmCondPl; break;
199 case kCondVs: res = kArmCondVs; break;
200 case kCondVc: res = kArmCondVc; break;
201 case kCondHi: res = kArmCondHi; break;
202 case kCondLs: res = kArmCondLs; break;
203 case kCondGe: res = kArmCondGe; break;
204 case kCondLt: res = kArmCondLt; break;
205 case kCondGt: res = kArmCondGt; break;
206 case kCondLe: res = kArmCondLe; break;
207 case kCondAl: res = kArmCondAl; break;
208 case kCondNv: res = kArmCondNv; break;
209 default:
210 LOG(FATAL) << "Bad condition code " << ccode;
211 res = static_cast<ArmConditionCode>(0); // Quiet gcc
212 }
213 return res;
214}
215
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100216static const char *shift_names[4] = {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100217 "lsl",
218 "lsr",
219 "asr",
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100220 "ror"
221};
Matteo Franchin43ec8732014-03-31 15:00:14 +0100222
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100223static const char* extend_names[8] = {
224 "uxtb",
225 "uxth",
226 "uxtw",
227 "uxtx",
228 "sxtb",
229 "sxth",
230 "sxtw",
231 "sxtx",
232};
233
234/* Decode and print a register extension (e.g. ", uxtb #1") */
235static void DecodeRegExtendOrShift(int operand, char *buf, size_t buf_size) {
236 if ((operand & (1 << 6)) == 0) {
237 const char *shift_name = shift_names[(operand >> 7) & 0x3];
238 int amount = operand & 0x3f;
239 snprintf(buf, buf_size, ", %s #%d", shift_name, amount);
240 } else {
241 const char *extend_name = extend_names[(operand >> 3) & 0x7];
242 int amount = operand & 0x7;
243 if (amount == 0) {
244 snprintf(buf, buf_size, ", %s", extend_name);
245 } else {
246 snprintf(buf, buf_size, ", %s #%d", extend_name, amount);
247 }
248 }
249}
250
251#define BIT_MASK(w) ((UINT64_C(1) << (w)) - UINT64_C(1))
252
253static uint64_t RotateRight(uint64_t value, unsigned rotate, unsigned width) {
254 DCHECK_LE(width, 64U);
255 rotate &= 63;
256 value = value & BIT_MASK(width);
257 return ((value & BIT_MASK(rotate)) << (width - rotate)) | (value >> rotate);
258}
259
260static uint64_t RepeatBitsAcrossReg(bool is_wide, uint64_t value, unsigned width) {
261 unsigned i;
262 unsigned reg_size = (is_wide) ? 64 : 32;
263 uint64_t result = value & BIT_MASK(width);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100264 for (i = width; i < reg_size; i *= 2) {
265 result |= (result << i);
266 }
267 DCHECK_EQ(i, reg_size);
268 return result;
269}
270
271/**
272 * @brief Decode an immediate in the form required by logical instructions.
273 *
274 * @param is_wide Whether @p value encodes a 64-bit (as opposed to 32-bit) immediate.
275 * @param value The encoded logical immediates that is to be decoded.
276 * @return The decoded logical immediate.
277 * @note This is the inverse of Arm64Mir2Lir::EncodeLogicalImmediate().
278 */
279uint64_t Arm64Mir2Lir::DecodeLogicalImmediate(bool is_wide, int value) {
280 unsigned n = (value >> 12) & 0x01;
281 unsigned imm_r = (value >> 6) & 0x3f;
282 unsigned imm_s = (value >> 0) & 0x3f;
283
284 // An integer is constructed from the n, imm_s and imm_r bits according to
285 // the following table:
286 //
287 // N imms immr size S R
288 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
289 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
290 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
291 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
292 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
293 // 0 11110s xxxxxr 2 UInt(s) UInt(r)
294 // (s bits must not be all set)
295 //
296 // A pattern is constructed of size bits, where the least significant S+1
297 // bits are set. The pattern is rotated right by R, and repeated across a
298 // 32 or 64-bit value, depending on destination register width.
299
300 if (n == 1) {
301 DCHECK_NE(imm_s, 0x3fU);
302 uint64_t bits = BIT_MASK(imm_s + 1);
303 return RotateRight(bits, imm_r, 64);
304 } else {
305 DCHECK_NE((imm_s >> 1), 0x1fU);
306 for (unsigned width = 0x20; width >= 0x2; width >>= 1) {
307 if ((imm_s & width) == 0) {
308 unsigned mask = (unsigned)(width - 1);
309 DCHECK_NE((imm_s & mask), mask);
310 uint64_t bits = BIT_MASK((imm_s & mask) + 1);
311 return RepeatBitsAcrossReg(is_wide, RotateRight(bits, imm_r & mask, width), width);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100312 }
313 }
314 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100315 return 0;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100316}
317
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100318/**
319 * @brief Decode an 8-bit single point number encoded with EncodeImmSingle().
320 */
321static float DecodeImmSingle(uint8_t small_float) {
322 int mantissa = (small_float & 0x0f) + 0x10;
323 int sign = ((small_float & 0x80) == 0) ? 1 : -1;
324 float signed_mantissa = static_cast<float>(sign*mantissa);
325 int exponent = (((small_float >> 4) & 0x7) + 4) & 0x7;
326 return signed_mantissa*static_cast<float>(1 << exponent)*0.0078125f;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100327}
328
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100329static const char* cc_names[] = {"eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
330 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"};
Matteo Franchin43ec8732014-03-31 15:00:14 +0100331/*
332 * Interpret a format string and build a string no longer than size
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100333 * See format key in assemble_arm64.cc.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100334 */
335std::string Arm64Mir2Lir::BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) {
336 std::string buf;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100337 const char* fmt_end = &fmt[strlen(fmt)];
338 char tbuf[256];
339 const char* name;
340 char nc;
341 while (fmt < fmt_end) {
342 int operand;
343 if (*fmt == '!') {
344 fmt++;
345 DCHECK_LT(fmt, fmt_end);
346 nc = *fmt++;
347 if (nc == '!') {
348 strcpy(tbuf, "!");
349 } else {
350 DCHECK_LT(fmt, fmt_end);
351 DCHECK_LT(static_cast<unsigned>(nc-'0'), 4U);
352 operand = lir->operands[nc-'0'];
353 switch (*fmt++) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100354 case 'e': {
355 // Omit ", uxtw #0" in strings like "add w0, w1, w3, uxtw #0" and
356 // ", uxtx #0" in strings like "add x0, x1, x3, uxtx #0"
357 int omittable = ((IS_WIDE(lir->opcode)) ? EncodeExtend(kA64Uxtw, 0) :
358 EncodeExtend(kA64Uxtw, 0));
359 if (LIKELY(operand == omittable)) {
360 strcpy(tbuf, "");
361 } else {
362 DecodeRegExtendOrShift(operand, tbuf, arraysize(tbuf));
363 }
364 }
365 break;
366 case 'o':
367 // Omit ", lsl #0"
368 if (LIKELY(operand == EncodeShift(kA64Lsl, 0))) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100369 strcpy(tbuf, "");
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100370 } else {
371 DecodeRegExtendOrShift(operand, tbuf, arraysize(tbuf));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100372 }
373 break;
374 case 'B':
375 switch (operand) {
376 case kSY:
377 name = "sy";
378 break;
379 case kST:
380 name = "st";
381 break;
382 case kISH:
383 name = "ish";
384 break;
385 case kISHST:
386 name = "ishst";
387 break;
388 case kNSH:
389 name = "nsh";
390 break;
391 case kNSHST:
392 name = "shst";
393 break;
394 default:
395 name = "DecodeError2";
396 break;
397 }
398 strcpy(tbuf, name);
399 break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100400 case 's':
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100401 snprintf(tbuf, arraysize(tbuf), "s%d", operand & RegStorage::kRegNumMask);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100402 break;
403 case 'S':
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100404 snprintf(tbuf, arraysize(tbuf), "d%d", operand & RegStorage::kRegNumMask);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100405 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100406 case 'f':
407 snprintf(tbuf, arraysize(tbuf), "%c%d", (IS_FWIDE(lir->opcode)) ? 'd' : 's',
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100408 operand & RegStorage::kRegNumMask);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100409 break;
410 case 'l': {
411 bool is_wide = IS_WIDE(lir->opcode);
412 uint64_t imm = DecodeLogicalImmediate(is_wide, operand);
413 snprintf(tbuf, arraysize(tbuf), "%" PRId64 " (%#" PRIx64 ")", imm, imm);
414 }
415 break;
416 case 'I':
417 snprintf(tbuf, arraysize(tbuf), "%f", DecodeImmSingle(operand));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100418 break;
419 case 'M':
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100420 if (LIKELY(operand == 0))
421 strcpy(tbuf, "");
422 else
423 snprintf(tbuf, arraysize(tbuf), ", lsl #%d", 16*operand);
424 break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100425 case 'd':
426 snprintf(tbuf, arraysize(tbuf), "%d", operand);
427 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100428 case 'w':
429 if (LIKELY(operand != rwzr))
430 snprintf(tbuf, arraysize(tbuf), "w%d", operand & RegStorage::kRegNumMask);
431 else
432 strcpy(tbuf, "wzr");
433 break;
434 case 'W':
435 if (LIKELY(operand != rwsp))
436 snprintf(tbuf, arraysize(tbuf), "w%d", operand & RegStorage::kRegNumMask);
437 else
438 strcpy(tbuf, "wsp");
439 break;
440 case 'x':
441 if (LIKELY(operand != rxzr))
442 snprintf(tbuf, arraysize(tbuf), "x%d", operand & RegStorage::kRegNumMask);
443 else
444 strcpy(tbuf, "xzr");
445 break;
446 case 'X':
447 if (LIKELY(operand != rsp))
448 snprintf(tbuf, arraysize(tbuf), "x%d", operand & RegStorage::kRegNumMask);
449 else
450 strcpy(tbuf, "sp");
451 break;
452 case 'D':
453 snprintf(tbuf, arraysize(tbuf), "%d", operand*((IS_WIDE(lir->opcode)) ? 8 : 4));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100454 break;
455 case 'E':
456 snprintf(tbuf, arraysize(tbuf), "%d", operand*4);
457 break;
458 case 'F':
459 snprintf(tbuf, arraysize(tbuf), "%d", operand*2);
460 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100461 case 'G':
462 if (LIKELY(operand == 0))
463 strcpy(tbuf, "");
464 else
465 strcpy(tbuf, (IS_WIDE(lir->opcode)) ? ", lsl #3" : ", lsl #2");
466 break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100467 case 'c':
468 strcpy(tbuf, cc_names[operand]);
469 break;
470 case 't':
471 snprintf(tbuf, arraysize(tbuf), "0x%08" PRIxPTR " (L%p)",
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100472 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + (operand << 2),
Matteo Franchin43ec8732014-03-31 15:00:14 +0100473 lir->target);
474 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100475 case 'r': {
476 bool is_wide = IS_WIDE(lir->opcode);
477 if (LIKELY(operand != rwzr && operand != rxzr)) {
478 snprintf(tbuf, arraysize(tbuf), "%c%d", (is_wide) ? 'x' : 'w',
479 operand & RegStorage::kRegNumMask);
480 } else {
481 strcpy(tbuf, (is_wide) ? "xzr" : "wzr");
482 }
483 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100484 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100485 case 'R': {
486 bool is_wide = IS_WIDE(lir->opcode);
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100487 if (LIKELY(operand != rwsp && operand != rsp)) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100488 snprintf(tbuf, arraysize(tbuf), "%c%d", (is_wide) ? 'x' : 'w',
489 operand & RegStorage::kRegNumMask);
490 } else {
491 strcpy(tbuf, (is_wide) ? "sp" : "wsp");
492 }
493 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100494 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100495 case 'p':
496 snprintf(tbuf, arraysize(tbuf), ".+%d (addr %#" PRIxPTR ")", 4*operand,
497 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4*operand);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100498 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100499 case 'T':
500 if (LIKELY(operand == 0))
501 strcpy(tbuf, "");
502 else if (operand == 1)
503 strcpy(tbuf, ", lsl #12");
504 else
505 strcpy(tbuf, ", DecodeError3");
Matteo Franchin43ec8732014-03-31 15:00:14 +0100506 break;
507 default:
508 strcpy(tbuf, "DecodeError1");
509 break;
510 }
511 buf += tbuf;
512 }
513 } else {
514 buf += *fmt++;
515 }
516 }
517 return buf;
518}
519
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100520void Arm64Mir2Lir::DumpResourceMask(LIR* arm_lir, const ResourceMask& mask, const char* prefix) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100521 char buf[256];
522 buf[0] = 0;
523
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100524 if (mask.Equals(kEncodeAll)) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100525 strcpy(buf, "all");
526 } else {
527 char num[8];
528 int i;
529
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100530 for (i = 0; i < kArm64RegEnd; i++) {
531 if (mask.HasBit(i)) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100532 snprintf(num, arraysize(num), "%d ", i);
533 strcat(buf, num);
534 }
535 }
536
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100537 if (mask.HasBit(ResourceMask::kCCode)) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100538 strcat(buf, "cc ");
539 }
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100540 if (mask.HasBit(ResourceMask::kFPStatus)) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100541 strcat(buf, "fpcc ");
542 }
543
544 /* Memory bits */
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100545 if (arm_lir && (mask.HasBit(ResourceMask::kDalvikReg))) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100546 snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
547 DECODE_ALIAS_INFO_REG(arm_lir->flags.alias_info),
548 DECODE_ALIAS_INFO_WIDE(arm_lir->flags.alias_info) ? "(+1)" : "");
549 }
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100550 if (mask.HasBit(ResourceMask::kLiteral)) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100551 strcat(buf, "lit ");
552 }
553
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100554 if (mask.HasBit(ResourceMask::kHeapRef)) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100555 strcat(buf, "heap ");
556 }
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100557 if (mask.HasBit(ResourceMask::kMustNotAlias)) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100558 strcat(buf, "noalias ");
559 }
560 }
561 if (buf[0]) {
562 LOG(INFO) << prefix << ": " << buf;
563 }
564}
565
566bool Arm64Mir2Lir::IsUnconditionalBranch(LIR* lir) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100567 return (lir->opcode == kA64B1t);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100568}
569
Vladimir Marko674744e2014-04-24 15:18:26 +0100570RegisterClass Arm64Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
571 if (UNLIKELY(is_volatile)) {
572 // On arm64, fp register load/store is atomic only for single bytes.
573 if (size != kSignedByte && size != kUnsignedByte) {
buzbeea0cd2d72014-06-01 09:33:49 -0700574 return (size == kReference) ? kRefReg : kCoreReg;
Vladimir Marko674744e2014-04-24 15:18:26 +0100575 }
576 }
577 return RegClassBySize(size);
578}
579
Matteo Franchin43ec8732014-03-31 15:00:14 +0100580Arm64Mir2Lir::Arm64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
581 : Mir2Lir(cu, mir_graph, arena) {
582 // Sanity check - make sure encoding map lines up.
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100583 for (int i = 0; i < kA64Last; i++) {
584 if (UNWIDE(Arm64Mir2Lir::EncodingMap[i].opcode) != i) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100585 LOG(FATAL) << "Encoding order for " << Arm64Mir2Lir::EncodingMap[i].name
586 << " is wrong: expecting " << i << ", seeing "
587 << static_cast<int>(Arm64Mir2Lir::EncodingMap[i].opcode);
588 }
589 }
590}
591
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100592Mir2Lir* Arm64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
593 ArenaAllocator* const arena) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100594 return new Arm64Mir2Lir(cu, mir_graph, arena);
595}
596
Matteo Franchin43ec8732014-03-31 15:00:14 +0100597void Arm64Mir2Lir::CompilerInitializeRegAlloc() {
buzbeeb01bf152014-05-13 15:59:07 -0700598 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs, core64_regs, sp_regs, dp_regs,
599 reserved_regs, reserved64_regs, core_temps, core64_temps,
600 sp_temps, dp_temps);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100601
602 // Target-specific adjustments.
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100603 // Alias single precision float registers to corresponding double registers.
604 GrowableArray<RegisterInfo*>::Iterator fp_it(&reg_pool_->sp_regs_);
605 for (RegisterInfo* info = fp_it.Next(); info != nullptr; info = fp_it.Next()) {
606 int fp_reg_num = info->GetReg().GetRegNum();
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100607 RegStorage dp_reg = RegStorage::FloatSolo64(fp_reg_num);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100608 RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
609 // Double precision register's master storage should refer to itself.
610 DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
611 // Redirect single precision's master storage to master.
612 info->SetMaster(dp_reg_info);
613 // Singles should show a single 32-bit mask bit, at first referring to the low half.
614 DCHECK_EQ(info->StorageMask(), 0x1U);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100615 }
616
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100617 // Alias 32bit W registers to corresponding 64bit X registers.
618 GrowableArray<RegisterInfo*>::Iterator w_it(&reg_pool_->core_regs_);
619 for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) {
620 int x_reg_num = info->GetReg().GetRegNum();
621 RegStorage x_reg = RegStorage::Solo64(x_reg_num);
622 RegisterInfo* x_reg_info = GetRegInfo(x_reg);
623 // 64bit X register's master storage should refer to itself.
624 DCHECK_EQ(x_reg_info, x_reg_info->Master());
625 // Redirect 32bit W master storage to 64bit X.
626 info->SetMaster(x_reg_info);
627 // 32bit W should show a single 32-bit mask bit, at first referring to the low half.
628 DCHECK_EQ(info->StorageMask(), 0x1U);
629 }
630
Matteo Franchin43ec8732014-03-31 15:00:14 +0100631 // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods.
632 // TODO: adjust when we roll to hard float calling convention.
633 reg_pool_->next_core_reg_ = 2;
634 reg_pool_->next_sp_reg_ = 0;
635 reg_pool_->next_dp_reg_ = 0;
636}
637
Matteo Franchin43ec8732014-03-31 15:00:14 +0100638/*
639 * TUNING: is true leaf? Can't just use METHOD_IS_LEAF to determine as some
640 * instructions might call out to C/assembly helper functions. Until
641 * machinery is in place, always spill lr.
642 */
643
644void Arm64Mir2Lir::AdjustSpillMask() {
Zheng Xubaa7c882014-06-30 14:26:50 +0800645 core_spill_mask_ |= (1 << rs_xLR.GetRegNum());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100646 num_core_spills_++;
647}
648
Matteo Franchin43ec8732014-03-31 15:00:14 +0100649/* Clobber all regs that might be used by an external C call */
650void Arm64Mir2Lir::ClobberCallerSave() {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100651 Clobber(rs_x0);
652 Clobber(rs_x1);
653 Clobber(rs_x2);
654 Clobber(rs_x3);
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100655 Clobber(rs_x4);
656 Clobber(rs_x5);
657 Clobber(rs_x6);
658 Clobber(rs_x7);
659 Clobber(rs_x8);
660 Clobber(rs_x9);
661 Clobber(rs_x10);
662 Clobber(rs_x11);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100663 Clobber(rs_x12);
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100664 Clobber(rs_x13);
665 Clobber(rs_x14);
666 Clobber(rs_x15);
667 Clobber(rs_x16);
668 Clobber(rs_x17);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100669 Clobber(rs_x30);
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100670
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100671 Clobber(rs_f0);
672 Clobber(rs_f1);
673 Clobber(rs_f2);
674 Clobber(rs_f3);
675 Clobber(rs_f4);
676 Clobber(rs_f5);
677 Clobber(rs_f6);
678 Clobber(rs_f7);
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100679 Clobber(rs_f16);
680 Clobber(rs_f17);
681 Clobber(rs_f18);
682 Clobber(rs_f19);
683 Clobber(rs_f20);
684 Clobber(rs_f21);
685 Clobber(rs_f22);
686 Clobber(rs_f23);
687 Clobber(rs_f24);
688 Clobber(rs_f25);
689 Clobber(rs_f26);
690 Clobber(rs_f27);
691 Clobber(rs_f28);
692 Clobber(rs_f29);
693 Clobber(rs_f30);
694 Clobber(rs_f31);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100695}
696
697RegLocation Arm64Mir2Lir::GetReturnWideAlt() {
698 RegLocation res = LocCReturnWide();
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100699 res.reg.SetReg(rx2);
700 res.reg.SetHighReg(rx3);
701 Clobber(rs_x2);
702 Clobber(rs_x3);
703 MarkInUse(rs_x2);
704 MarkInUse(rs_x3);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100705 MarkWide(res.reg);
706 return res;
707}
708
709RegLocation Arm64Mir2Lir::GetReturnAlt() {
710 RegLocation res = LocCReturn();
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100711 res.reg.SetReg(rx1);
712 Clobber(rs_x1);
713 MarkInUse(rs_x1);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100714 return res;
715}
716
717/* To be used when explicitly managing register use */
718void Arm64Mir2Lir::LockCallTemps() {
buzbee33ae5582014-06-12 14:56:32 -0700719 // TODO: needs cleanup.
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100720 LockTemp(rs_x0);
721 LockTemp(rs_x1);
722 LockTemp(rs_x2);
723 LockTemp(rs_x3);
buzbee33ae5582014-06-12 14:56:32 -0700724 LockTemp(rs_x4);
725 LockTemp(rs_x5);
726 LockTemp(rs_x6);
727 LockTemp(rs_x7);
728 LockTemp(rs_f0);
729 LockTemp(rs_f1);
730 LockTemp(rs_f2);
731 LockTemp(rs_f3);
732 LockTemp(rs_f4);
733 LockTemp(rs_f5);
734 LockTemp(rs_f6);
735 LockTemp(rs_f7);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100736}
737
738/* To be used when explicitly managing register use */
739void Arm64Mir2Lir::FreeCallTemps() {
buzbee33ae5582014-06-12 14:56:32 -0700740 // TODO: needs cleanup.
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100741 FreeTemp(rs_x0);
742 FreeTemp(rs_x1);
743 FreeTemp(rs_x2);
744 FreeTemp(rs_x3);
buzbee33ae5582014-06-12 14:56:32 -0700745 FreeTemp(rs_x4);
746 FreeTemp(rs_x5);
747 FreeTemp(rs_x6);
748 FreeTemp(rs_x7);
749 FreeTemp(rs_f0);
750 FreeTemp(rs_f1);
751 FreeTemp(rs_f2);
752 FreeTemp(rs_f3);
753 FreeTemp(rs_f4);
754 FreeTemp(rs_f5);
755 FreeTemp(rs_f6);
756 FreeTemp(rs_f7);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100757}
758
Andreas Gampe98430592014-07-27 19:44:50 -0700759RegStorage Arm64Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100760 // TODO(Arm64): use LoadWordDisp instead.
761 // e.g. LoadWordDisp(rs_rA64_SELF, offset.Int32Value(), rs_rA64_LR);
Andreas Gampe98430592014-07-27 19:44:50 -0700762 LoadBaseDisp(rs_xSELF, GetThreadOffset<8>(trampoline).Int32Value(), rs_xLR, k64, kNotVolatile);
Zheng Xubaa7c882014-06-30 14:26:50 +0800763 return rs_xLR;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100764}
765
766LIR* Arm64Mir2Lir::CheckSuspendUsingLoad() {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100767 RegStorage tmp = rs_x0;
Zheng Xubaa7c882014-06-30 14:26:50 +0800768 LoadWordDisp(rs_xSELF, Thread::ThreadSuspendTriggerOffset<8>().Int32Value(), tmp);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100769 LIR* load2 = LoadWordDisp(tmp, 0, tmp);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100770 return load2;
771}
772
773uint64_t Arm64Mir2Lir::GetTargetInstFlags(int opcode) {
774 DCHECK(!IsPseudoLirOp(opcode));
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100775 return Arm64Mir2Lir::EncodingMap[UNWIDE(opcode)].flags;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100776}
777
778const char* Arm64Mir2Lir::GetTargetInstName(int opcode) {
779 DCHECK(!IsPseudoLirOp(opcode));
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100780 return Arm64Mir2Lir::EncodingMap[UNWIDE(opcode)].name;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100781}
782
783const char* Arm64Mir2Lir::GetTargetInstFmt(int opcode) {
784 DCHECK(!IsPseudoLirOp(opcode));
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100785 return Arm64Mir2Lir::EncodingMap[UNWIDE(opcode)].fmt;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100786}
787
buzbee33ae5582014-06-12 14:56:32 -0700788RegStorage Arm64Mir2Lir::InToRegStorageArm64Mapper::GetNextReg(bool is_double_or_float,
Zheng Xu949cd972014-06-23 18:33:08 +0800789 bool is_wide,
790 bool is_ref) {
buzbee33ae5582014-06-12 14:56:32 -0700791 const RegStorage coreArgMappingToPhysicalReg[] =
792 {rs_x1, rs_x2, rs_x3, rs_x4, rs_x5, rs_x6, rs_x7};
793 const int coreArgMappingToPhysicalRegSize =
794 sizeof(coreArgMappingToPhysicalReg) / sizeof(RegStorage);
795 const RegStorage fpArgMappingToPhysicalReg[] =
796 {rs_f0, rs_f1, rs_f2, rs_f3, rs_f4, rs_f5, rs_f6, rs_f7};
797 const int fpArgMappingToPhysicalRegSize =
798 sizeof(fpArgMappingToPhysicalReg) / sizeof(RegStorage);
799
800 RegStorage result = RegStorage::InvalidReg();
801 if (is_double_or_float) {
802 if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) {
Zheng Xu949cd972014-06-23 18:33:08 +0800803 DCHECK(!is_ref);
buzbee33ae5582014-06-12 14:56:32 -0700804 result = fpArgMappingToPhysicalReg[cur_fp_reg_++];
805 if (result.Valid()) {
806 // TODO: switching between widths remains a bit ugly. Better way?
807 int res_reg = result.GetReg();
808 result = is_wide ? RegStorage::FloatSolo64(res_reg) : RegStorage::FloatSolo32(res_reg);
809 }
810 }
811 } else {
812 if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
813 result = coreArgMappingToPhysicalReg[cur_core_reg_++];
814 if (result.Valid()) {
815 // TODO: switching between widths remains a bit ugly. Better way?
816 int res_reg = result.GetReg();
Zheng Xu949cd972014-06-23 18:33:08 +0800817 DCHECK(!(is_wide && is_ref));
818 result = (is_wide || is_ref) ? RegStorage::Solo64(res_reg) : RegStorage::Solo32(res_reg);
buzbee33ae5582014-06-12 14:56:32 -0700819 }
820 }
821 }
822 return result;
823}
824
825RegStorage Arm64Mir2Lir::InToRegStorageMapping::Get(int in_position) {
826 DCHECK(IsInitialized());
827 auto res = mapping_.find(in_position);
828 return res != mapping_.end() ? res->second : RegStorage::InvalidReg();
829}
830
831void Arm64Mir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int count,
832 InToRegStorageMapper* mapper) {
833 DCHECK(mapper != nullptr);
834 max_mapped_in_ = -1;
835 is_there_stack_mapped_ = false;
836 for (int in_position = 0; in_position < count; in_position++) {
Zheng Xu949cd972014-06-23 18:33:08 +0800837 RegStorage reg = mapper->GetNextReg(arg_locs[in_position].fp,
838 arg_locs[in_position].wide,
839 arg_locs[in_position].ref);
buzbee33ae5582014-06-12 14:56:32 -0700840 if (reg.Valid()) {
841 mapping_[in_position] = reg;
Zheng Xu949cd972014-06-23 18:33:08 +0800842 if (arg_locs[in_position].wide) {
buzbee33ae5582014-06-12 14:56:32 -0700843 // We covered 2 args, so skip the next one
844 in_position++;
845 }
Zheng Xu949cd972014-06-23 18:33:08 +0800846 max_mapped_in_ = std::max(max_mapped_in_, in_position);
buzbee33ae5582014-06-12 14:56:32 -0700847 } else {
848 is_there_stack_mapped_ = true;
849 }
850 }
851 initialized_ = true;
852}
853
854
855// Deprecate. Use the new mechanism.
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100856// TODO(Arm64): reuse info in QuickArgumentVisitor?
857static RegStorage GetArgPhysicalReg(RegLocation* loc, int* num_gpr_used, int* num_fpr_used,
858 OpSize* op_size) {
859 if (loc->fp) {
860 int n = *num_fpr_used;
861 if (n < 8) {
862 *num_fpr_used = n + 1;
863 RegStorage::RegStorageKind reg_kind;
864 if (loc->wide) {
865 *op_size = kDouble;
866 reg_kind = RegStorage::k64BitSolo;
867 } else {
868 *op_size = kSingle;
869 reg_kind = RegStorage::k32BitSolo;
870 }
871 return RegStorage(RegStorage::kValid | reg_kind | RegStorage::kFloatingPoint | n);
872 }
873 } else {
874 int n = *num_gpr_used;
buzbee33ae5582014-06-12 14:56:32 -0700875 if (n < 8) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100876 *num_gpr_used = n + 1;
buzbeeb5860fb2014-06-21 15:31:01 -0700877 if (loc->wide || loc->ref) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100878 *op_size = k64;
879 return RegStorage::Solo64(n);
880 } else {
881 *op_size = k32;
882 return RegStorage::Solo32(n);
883 }
884 }
885 }
Ian Rogers54874942014-06-10 16:31:03 -0700886 *op_size = kWord;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100887 return RegStorage::InvalidReg();
888}
889
buzbee33ae5582014-06-12 14:56:32 -0700890RegStorage Arm64Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
891 if (!in_to_reg_storage_mapping_.IsInitialized()) {
892 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
893 RegLocation* arg_locs = &mir_graph_->reg_location_[start_vreg];
894
895 InToRegStorageArm64Mapper mapper;
896 in_to_reg_storage_mapping_.Initialize(arg_locs, cu_->num_ins, &mapper);
897 }
898 return in_to_reg_storage_mapping_.Get(arg_num);
899}
900
901
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100902/*
903 * If there are any ins passed in registers that have not been promoted
904 * to a callee-save register, flush them to the frame. Perform initial
905 * assignment of promoted arguments.
906 *
907 * ArgLocs is an array of location records describing the incoming arguments
908 * with one location record per word of argument.
909 */
910void Arm64Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
911 int num_gpr_used = 1;
912 int num_fpr_used = 0;
913
914 /*
Zheng Xu511c8a62014-06-03 16:22:23 +0800915 * Dummy up a RegLocation for the incoming StackReference<mirror::ArtMethod>
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100916 * It will attempt to keep kArg0 live (or copy it to home location
917 * if promoted).
918 */
919 RegLocation rl_src = rl_method;
920 rl_src.location = kLocPhysReg;
Matteo Franchined7a0f22014-06-10 19:23:45 +0100921 rl_src.reg = TargetReg(kArg0, kRef);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100922 rl_src.home = false;
923 MarkLive(rl_src);
Zheng Xu511c8a62014-06-03 16:22:23 +0800924 StoreValue(rl_method, rl_src);
925 // If Method* has been promoted, explicitly flush
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100926 if (rl_method.location == kLocPhysReg) {
Matteo Franchined7a0f22014-06-10 19:23:45 +0100927 StoreRefDisp(TargetPtrReg(kSp), 0, rl_src.reg, kNotVolatile);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100928 }
929
930 if (cu_->num_ins == 0) {
931 return;
932 }
933
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100934 // Handle dalvik registers.
935 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100936 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
937 for (int i = 0; i < cu_->num_ins; i++) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100938 RegLocation* t_loc = &ArgLocs[i];
939 OpSize op_size;
940 RegStorage reg = GetArgPhysicalReg(t_loc, &num_gpr_used, &num_fpr_used, &op_size);
941
942 if (reg.Valid()) {
buzbeeb5860fb2014-06-21 15:31:01 -0700943 // If arriving in register.
944
945 // We have already updated the arg location with promoted info
946 // so we can be based on it.
947 if (t_loc->location == kLocPhysReg) {
948 // Just copy it.
949 OpRegCopy(t_loc->reg, reg);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100950 } else {
buzbeeb5860fb2014-06-21 15:31:01 -0700951 // Needs flush.
952 if (t_loc->ref) {
Matteo Franchined7a0f22014-06-10 19:23:45 +0100953 StoreRefDisp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), reg, kNotVolatile);
buzbeeb5860fb2014-06-21 15:31:01 -0700954 } else {
Matteo Franchined7a0f22014-06-10 19:23:45 +0100955 StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32,
buzbeeb5860fb2014-06-21 15:31:01 -0700956 kNotVolatile);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100957 }
958 }
959 } else {
buzbeeb5860fb2014-06-21 15:31:01 -0700960 // If arriving in frame & promoted.
961 if (t_loc->location == kLocPhysReg) {
962 if (t_loc->ref) {
Matteo Franchined7a0f22014-06-10 19:23:45 +0100963 LoadRefDisp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile);
buzbeeb5860fb2014-06-21 15:31:01 -0700964 } else {
Matteo Franchined7a0f22014-06-10 19:23:45 +0100965 LoadBaseDisp(TargetPtrReg(kSp), SRegOffset(start_vreg + i), t_loc->reg,
buzbeeb5860fb2014-06-21 15:31:01 -0700966 t_loc->wide ? k64 : k32, kNotVolatile);
967 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100968 }
969 }
buzbeeb5860fb2014-06-21 15:31:01 -0700970 if (t_loc->wide) {
971 // Increment i to skip the next one.
972 i++;
973 }
974 // if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) {
975 // OpRegCopy(RegStorage::Solo32(v_map->core_reg), reg);
976 // } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) {
977 // OpRegCopy(RegStorage::Solo32(v_map->fp_reg), reg);
978 // } else {
979 // StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, op_size, kNotVolatile);
980 // if (reg.Is64Bit()) {
981 // if (SRegOffset(start_vreg + i) + 4 != SRegOffset(start_vreg + i + 1)) {
982 // LOG(FATAL) << "64 bit value stored in non-consecutive 4 bytes slots";
983 // }
984 // i += 1;
985 // }
986 // }
987 // } else {
988 // // If arriving in frame & promoted
989 // if (v_map->core_location == kLocPhysReg) {
990 // LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
991 // RegStorage::Solo32(v_map->core_reg));
992 // }
993 // if (v_map->fp_location == kLocPhysReg) {
994 // LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->fp_reg));
995 // }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100996 }
997}
998
buzbee33ae5582014-06-12 14:56:32 -0700999/*
1000 * Load up to 5 arguments, the first three of which will be in
1001 * kArg1 .. kArg3. On entry kArg0 contains the current method pointer,
1002 * and as part of the load sequence, it must be replaced with
1003 * the target method pointer.
1004 */
1005int Arm64Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
1006 int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
1007 const MethodReference& target_method,
1008 uint32_t vtable_idx, uintptr_t direct_code,
1009 uintptr_t direct_method, InvokeType type, bool skip_this) {
1010 return GenDalvikArgsRange(info,
1011 call_state, pcrLabel, next_call_insn,
1012 target_method,
1013 vtable_idx, direct_code,
1014 direct_method, type, skip_this);
1015}
1016
1017/*
1018 * May have 0+ arguments (also used for jumbo). Note that
1019 * source virtual registers may be in physical registers, so may
1020 * need to be flushed to home location before copying. This
1021 * applies to arg3 and above (see below).
1022 *
1023 * FIXME: update comments.
1024 *
1025 * Two general strategies:
1026 * If < 20 arguments
1027 * Pass args 3-18 using vldm/vstm block copy
1028 * Pass arg0, arg1 & arg2 in kArg1-kArg3
1029 * If 20+ arguments
1030 * Pass args arg19+ using memcpy block copy
1031 * Pass arg0, arg1 & arg2 in kArg1-kArg3
1032 *
1033 */
1034int Arm64Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
1035 LIR** pcrLabel, NextCallInsn next_call_insn,
1036 const MethodReference& target_method,
1037 uint32_t vtable_idx, uintptr_t direct_code,
1038 uintptr_t direct_method, InvokeType type, bool skip_this) {
1039 /* If no arguments, just return */
1040 if (info->num_arg_words == 0)
1041 return call_state;
1042
1043 const int start_index = skip_this ? 1 : 0;
1044
1045 InToRegStorageArm64Mapper mapper;
1046 InToRegStorageMapping in_to_reg_storage_mapping;
1047 in_to_reg_storage_mapping.Initialize(info->args, info->num_arg_words, &mapper);
1048 const int last_mapped_in = in_to_reg_storage_mapping.GetMaxMappedIn();
Zheng Xu949cd972014-06-23 18:33:08 +08001049 int regs_left_to_pass_via_stack = info->num_arg_words - (last_mapped_in + 1);
buzbee33ae5582014-06-12 14:56:32 -07001050
Matteo Franchinf1013192014-07-07 13:35:14 +01001051 // First of all, check whether it makes sense to use bulk copying.
1052 // Bulk copying is done only for the range case.
buzbee33ae5582014-06-12 14:56:32 -07001053 // TODO: make a constant instead of 2
1054 if (info->is_range && regs_left_to_pass_via_stack >= 2) {
1055 // Scan the rest of the args - if in phys_reg flush to memory
Zheng Xu949cd972014-06-23 18:33:08 +08001056 for (int next_arg = last_mapped_in + 1; next_arg < info->num_arg_words;) {
buzbee33ae5582014-06-12 14:56:32 -07001057 RegLocation loc = info->args[next_arg];
1058 if (loc.wide) {
1059 loc = UpdateLocWide(loc);
1060 if (loc.location == kLocPhysReg) {
1061 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
Matteo Franchined7a0f22014-06-10 19:23:45 +01001062 StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
buzbee33ae5582014-06-12 14:56:32 -07001063 }
1064 next_arg += 2;
1065 } else {
1066 loc = UpdateLoc(loc);
1067 if (loc.location == kLocPhysReg) {
1068 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
buzbeeb5860fb2014-06-21 15:31:01 -07001069 if (loc.ref) {
Matteo Franchined7a0f22014-06-10 19:23:45 +01001070 StoreRefDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kNotVolatile);
buzbeeb5860fb2014-06-21 15:31:01 -07001071 } else {
Matteo Franchined7a0f22014-06-10 19:23:45 +01001072 StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32,
1073 kNotVolatile);
buzbeeb5860fb2014-06-21 15:31:01 -07001074 }
buzbee33ae5582014-06-12 14:56:32 -07001075 }
1076 next_arg++;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001077 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001078 }
buzbee33ae5582014-06-12 14:56:32 -07001079
1080 // Logic below assumes that Method pointer is at offset zero from SP.
1081 DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0);
1082
1083 // The rest can be copied together
Zheng Xu949cd972014-06-23 18:33:08 +08001084 int start_offset = SRegOffset(info->args[last_mapped_in + 1].s_reg_low);
1085 int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + 1,
buzbee33ae5582014-06-12 14:56:32 -07001086 cu_->instruction_set);
1087
1088 int current_src_offset = start_offset;
1089 int current_dest_offset = outs_offset;
1090
1091 // Only davik regs are accessed in this loop; no next_call_insn() calls.
1092 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
1093 while (regs_left_to_pass_via_stack > 0) {
1094 /*
1095 * TODO: Improve by adding block copy for large number of arguments. This
1096 * should be done, if possible, as a target-depending helper. For now, just
1097 * copy a Dalvik vreg at a time.
1098 */
1099 // Moving 32-bits via general purpose register.
1100 size_t bytes_to_move = sizeof(uint32_t);
1101
1102 // Instead of allocating a new temp, simply reuse one of the registers being used
1103 // for argument passing.
Andreas Gampeccc60262014-07-04 18:02:38 -07001104 RegStorage temp = TargetReg(kArg3, kNotWide);
buzbee33ae5582014-06-12 14:56:32 -07001105
1106 // Now load the argument VR and store to the outs.
Matteo Franchined7a0f22014-06-10 19:23:45 +01001107 Load32Disp(TargetPtrReg(kSp), current_src_offset, temp);
1108 Store32Disp(TargetPtrReg(kSp), current_dest_offset, temp);
buzbee33ae5582014-06-12 14:56:32 -07001109
1110 current_src_offset += bytes_to_move;
1111 current_dest_offset += bytes_to_move;
1112 regs_left_to_pass_via_stack -= (bytes_to_move >> 2);
1113 }
1114 DCHECK_EQ(regs_left_to_pass_via_stack, 0);
1115 }
1116
1117 // Now handle rest not registers if they are
1118 if (in_to_reg_storage_mapping.IsThereStackMapped()) {
Matteo Franchined7a0f22014-06-10 19:23:45 +01001119 RegStorage regWide = TargetReg(kArg3, kWide);
buzbee33ae5582014-06-12 14:56:32 -07001120 for (int i = start_index; i <= last_mapped_in + regs_left_to_pass_via_stack; i++) {
1121 RegLocation rl_arg = info->args[i];
1122 rl_arg = UpdateRawLoc(rl_arg);
1123 RegStorage reg = in_to_reg_storage_mapping.Get(i);
1124 if (!reg.Valid()) {
1125 int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set);
1126
1127 {
1128 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
1129 if (rl_arg.wide) {
1130 if (rl_arg.location == kLocPhysReg) {
Matteo Franchined7a0f22014-06-10 19:23:45 +01001131 StoreBaseDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile);
buzbee33ae5582014-06-12 14:56:32 -07001132 } else {
1133 LoadValueDirectWideFixed(rl_arg, regWide);
Matteo Franchined7a0f22014-06-10 19:23:45 +01001134 StoreBaseDisp(TargetPtrReg(kSp), out_offset, regWide, k64, kNotVolatile);
buzbee33ae5582014-06-12 14:56:32 -07001135 }
buzbee33ae5582014-06-12 14:56:32 -07001136 } else {
1137 if (rl_arg.location == kLocPhysReg) {
Andreas Gampe3c12c512014-06-24 18:46:29 +00001138 if (rl_arg.ref) {
Matteo Franchined7a0f22014-06-10 19:23:45 +01001139 StoreRefDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, kNotVolatile);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001140 } else {
Matteo Franchined7a0f22014-06-10 19:23:45 +01001141 StoreBaseDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, k32, kNotVolatile);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001142 }
buzbee33ae5582014-06-12 14:56:32 -07001143 } else {
Andreas Gampe3c12c512014-06-24 18:46:29 +00001144 if (rl_arg.ref) {
Matteo Franchined7a0f22014-06-10 19:23:45 +01001145 RegStorage regSingle = TargetReg(kArg2, kRef);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001146 LoadValueDirectFixed(rl_arg, regSingle);
Matteo Franchined7a0f22014-06-10 19:23:45 +01001147 StoreRefDisp(TargetPtrReg(kSp), out_offset, regSingle, kNotVolatile);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001148 } else {
Matteo Franchined7a0f22014-06-10 19:23:45 +01001149 RegStorage regSingle = TargetReg(kArg2, kNotWide);
1150 LoadValueDirectFixed(rl_arg, regSingle);
1151 StoreBaseDisp(TargetPtrReg(kSp), out_offset, regSingle, k32, kNotVolatile);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001152 }
buzbee33ae5582014-06-12 14:56:32 -07001153 }
1154 }
1155 }
1156 call_state = next_call_insn(cu_, info, call_state, target_method,
1157 vtable_idx, direct_code, direct_method, type);
1158 }
Matteo Franchinf1013192014-07-07 13:35:14 +01001159 if (rl_arg.wide) {
1160 i++;
1161 }
buzbee33ae5582014-06-12 14:56:32 -07001162 }
1163 }
1164
1165 // Finish with mapped registers
1166 for (int i = start_index; i <= last_mapped_in; i++) {
1167 RegLocation rl_arg = info->args[i];
1168 rl_arg = UpdateRawLoc(rl_arg);
1169 RegStorage reg = in_to_reg_storage_mapping.Get(i);
1170 if (reg.Valid()) {
1171 if (rl_arg.wide) {
1172 LoadValueDirectWideFixed(rl_arg, reg);
buzbee33ae5582014-06-12 14:56:32 -07001173 } else {
1174 LoadValueDirectFixed(rl_arg, reg);
1175 }
1176 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
Matteo Franchinf1013192014-07-07 13:35:14 +01001177 direct_code, direct_method, type);
1178 }
1179 if (rl_arg.wide) {
1180 i++;
buzbee33ae5582014-06-12 14:56:32 -07001181 }
1182 }
1183
1184 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
1185 direct_code, direct_method, type);
1186 if (pcrLabel) {
Dave Allison69dfe512014-07-11 17:11:58 +00001187 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
Matteo Franchined7a0f22014-06-10 19:23:45 +01001188 *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
buzbee33ae5582014-06-12 14:56:32 -07001189 } else {
1190 *pcrLabel = nullptr;
1191 // In lieu of generating a check for kArg1 being null, we need to
1192 // perform a load when doing implicit checks.
1193 RegStorage tmp = AllocTemp();
Matteo Franchined7a0f22014-06-10 19:23:45 +01001194 Load32Disp(TargetReg(kArg1, kRef), 0, tmp);
buzbee33ae5582014-06-12 14:56:32 -07001195 MarkPossibleNullPointerException(info->opt_flags);
1196 FreeTemp(tmp);
1197 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001198 }
1199 return call_state;
1200}
1201
Matteo Franchin43ec8732014-03-31 15:00:14 +01001202} // namespace art