blob: be3cd8e142074b9550d14d4b518d470914fb4123 [file] [log] [blame]
Matteo Franchin43ec8732014-03-31 15:00:14 +01001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "codegen_arm64.h"
18
19#include <inttypes.h>
20
21#include <string>
22
23#include "dex/compiler_internals.h"
24#include "dex/quick/mir_to_lir-inl.h"
buzbeeb5860fb2014-06-21 15:31:01 -070025#include "dex/reg_storage_eq.h"
Matteo Franchin43ec8732014-03-31 15:00:14 +010026
27namespace art {
28
Vladimir Marko089142c2014-06-05 10:57:05 +010029static constexpr RegStorage core_regs_arr[] =
buzbeeb01bf152014-05-13 15:59:07 -070030 {rs_w0, rs_w1, rs_w2, rs_w3, rs_w4, rs_w5, rs_w6, rs_w7,
31 rs_w8, rs_w9, rs_w10, rs_w11, rs_w12, rs_w13, rs_w14, rs_w15,
32 rs_w16, rs_w17, rs_w18, rs_w19, rs_w20, rs_w21, rs_w22, rs_w23,
33 rs_w24, rs_w25, rs_w26, rs_w27, rs_w28, rs_w29, rs_w30, rs_w31,
34 rs_wzr};
Vladimir Marko089142c2014-06-05 10:57:05 +010035static constexpr RegStorage core64_regs_arr[] =
Matteo Franchine45fb9e2014-05-06 10:10:30 +010036 {rs_x0, rs_x1, rs_x2, rs_x3, rs_x4, rs_x5, rs_x6, rs_x7,
37 rs_x8, rs_x9, rs_x10, rs_x11, rs_x12, rs_x13, rs_x14, rs_x15,
38 rs_x16, rs_x17, rs_x18, rs_x19, rs_x20, rs_x21, rs_x22, rs_x23,
Matteo Franchinbc6d1972014-05-13 12:33:28 +010039 rs_x24, rs_x25, rs_x26, rs_x27, rs_x28, rs_x29, rs_x30, rs_x31,
40 rs_xzr};
Vladimir Marko089142c2014-06-05 10:57:05 +010041static constexpr RegStorage sp_regs_arr[] =
Matteo Franchine45fb9e2014-05-06 10:10:30 +010042 {rs_f0, rs_f1, rs_f2, rs_f3, rs_f4, rs_f5, rs_f6, rs_f7,
43 rs_f8, rs_f9, rs_f10, rs_f11, rs_f12, rs_f13, rs_f14, rs_f15,
44 rs_f16, rs_f17, rs_f18, rs_f19, rs_f20, rs_f21, rs_f22, rs_f23,
45 rs_f24, rs_f25, rs_f26, rs_f27, rs_f28, rs_f29, rs_f30, rs_f31};
Vladimir Marko089142c2014-06-05 10:57:05 +010046static constexpr RegStorage dp_regs_arr[] =
Matteo Franchine45fb9e2014-05-06 10:10:30 +010047 {rs_d0, rs_d1, rs_d2, rs_d3, rs_d4, rs_d5, rs_d6, rs_d7,
Zheng Xuc8304302014-05-15 17:21:01 +010048 rs_d8, rs_d9, rs_d10, rs_d11, rs_d12, rs_d13, rs_d14, rs_d15,
49 rs_d16, rs_d17, rs_d18, rs_d19, rs_d20, rs_d21, rs_d22, rs_d23,
50 rs_d24, rs_d25, rs_d26, rs_d27, rs_d28, rs_d29, rs_d30, rs_d31};
Vladimir Marko089142c2014-06-05 10:57:05 +010051static constexpr RegStorage reserved_regs_arr[] =
Zheng Xubaa7c882014-06-30 14:26:50 +080052 {rs_wSUSPEND, rs_wSELF, rs_wsp, rs_wLR, rs_wzr};
Vladimir Marko089142c2014-06-05 10:57:05 +010053static constexpr RegStorage reserved64_regs_arr[] =
Zheng Xubaa7c882014-06-30 14:26:50 +080054 {rs_xSUSPEND, rs_xSELF, rs_sp, rs_xLR, rs_xzr};
Matteo Franchinbc6d1972014-05-13 12:33:28 +010055// TUNING: Are there too many temp registers and too less promote target?
Zheng Xuc8304302014-05-15 17:21:01 +010056// This definition need to be matched with runtime.cc, quick entry assembly and JNI compiler
57// Note: we are not able to call to C function directly if it un-match C ABI.
58// Currently, rs_rA64_SELF is not a callee save register which does not match C ABI.
Vladimir Marko089142c2014-06-05 10:57:05 +010059static constexpr RegStorage core_temps_arr[] =
buzbeeb01bf152014-05-13 15:59:07 -070060 {rs_w0, rs_w1, rs_w2, rs_w3, rs_w4, rs_w5, rs_w6, rs_w7,
61 rs_w8, rs_w9, rs_w10, rs_w11, rs_w12, rs_w13, rs_w14, rs_w15, rs_w16,
62 rs_w17};
Vladimir Marko089142c2014-06-05 10:57:05 +010063static constexpr RegStorage core64_temps_arr[] =
Zheng Xuc8304302014-05-15 17:21:01 +010064 {rs_x0, rs_x1, rs_x2, rs_x3, rs_x4, rs_x5, rs_x6, rs_x7,
65 rs_x8, rs_x9, rs_x10, rs_x11, rs_x12, rs_x13, rs_x14, rs_x15, rs_x16,
66 rs_x17};
Vladimir Marko089142c2014-06-05 10:57:05 +010067static constexpr RegStorage sp_temps_arr[] =
Matteo Franchine45fb9e2014-05-06 10:10:30 +010068 {rs_f0, rs_f1, rs_f2, rs_f3, rs_f4, rs_f5, rs_f6, rs_f7,
Zheng Xuc8304302014-05-15 17:21:01 +010069 rs_f16, rs_f17, rs_f18, rs_f19, rs_f20, rs_f21, rs_f22, rs_f23,
70 rs_f24, rs_f25, rs_f26, rs_f27, rs_f28, rs_f29, rs_f30, rs_f31};
Vladimir Marko089142c2014-06-05 10:57:05 +010071static constexpr RegStorage dp_temps_arr[] =
Zheng Xuc8304302014-05-15 17:21:01 +010072 {rs_d0, rs_d1, rs_d2, rs_d3, rs_d4, rs_d5, rs_d6, rs_d7,
73 rs_d16, rs_d17, rs_d18, rs_d19, rs_d20, rs_d21, rs_d22, rs_d23,
74 rs_d24, rs_d25, rs_d26, rs_d27, rs_d28, rs_d29, rs_d30, rs_d31};
Matteo Franchin43ec8732014-03-31 15:00:14 +010075
Vladimir Marko089142c2014-06-05 10:57:05 +010076static constexpr ArrayRef<const RegStorage> core_regs(core_regs_arr);
77static constexpr ArrayRef<const RegStorage> core64_regs(core64_regs_arr);
78static constexpr ArrayRef<const RegStorage> sp_regs(sp_regs_arr);
79static constexpr ArrayRef<const RegStorage> dp_regs(dp_regs_arr);
80static constexpr ArrayRef<const RegStorage> reserved_regs(reserved_regs_arr);
81static constexpr ArrayRef<const RegStorage> reserved64_regs(reserved64_regs_arr);
82static constexpr ArrayRef<const RegStorage> core_temps(core_temps_arr);
83static constexpr ArrayRef<const RegStorage> core64_temps(core64_temps_arr);
84static constexpr ArrayRef<const RegStorage> sp_temps(sp_temps_arr);
85static constexpr ArrayRef<const RegStorage> dp_temps(dp_temps_arr);
Matteo Franchin43ec8732014-03-31 15:00:14 +010086
87RegLocation Arm64Mir2Lir::LocCReturn() {
88 return arm_loc_c_return;
89}
90
buzbeea0cd2d72014-06-01 09:33:49 -070091RegLocation Arm64Mir2Lir::LocCReturnRef() {
Andreas Gampe4b537a82014-06-30 22:24:53 -070092 return arm_loc_c_return_ref;
buzbeea0cd2d72014-06-01 09:33:49 -070093}
94
Matteo Franchin43ec8732014-03-31 15:00:14 +010095RegLocation Arm64Mir2Lir::LocCReturnWide() {
96 return arm_loc_c_return_wide;
97}
98
99RegLocation Arm64Mir2Lir::LocCReturnFloat() {
100 return arm_loc_c_return_float;
101}
102
103RegLocation Arm64Mir2Lir::LocCReturnDouble() {
104 return arm_loc_c_return_double;
105}
106
107// Return a target-dependent special register.
108RegStorage Arm64Mir2Lir::TargetReg(SpecialTargetRegister reg) {
109 RegStorage res_reg = RegStorage::InvalidReg();
110 switch (reg) {
Zheng Xubaa7c882014-06-30 14:26:50 +0800111 case kSelf: res_reg = rs_xSELF; break;
112 case kSuspend: res_reg = rs_xSUSPEND; break;
113 case kLr: res_reg = rs_xLR; break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100114 case kPc: res_reg = RegStorage::InvalidReg(); break;
Zheng Xubaa7c882014-06-30 14:26:50 +0800115 case kSp: res_reg = rs_sp; break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100116 case kArg0: res_reg = rs_x0; break;
117 case kArg1: res_reg = rs_x1; break;
118 case kArg2: res_reg = rs_x2; break;
119 case kArg3: res_reg = rs_x3; break;
buzbee33ae5582014-06-12 14:56:32 -0700120 case kArg4: res_reg = rs_x4; break;
121 case kArg5: res_reg = rs_x5; break;
122 case kArg6: res_reg = rs_x6; break;
123 case kArg7: res_reg = rs_x7; break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100124 case kFArg0: res_reg = rs_f0; break;
125 case kFArg1: res_reg = rs_f1; break;
126 case kFArg2: res_reg = rs_f2; break;
127 case kFArg3: res_reg = rs_f3; break;
buzbee33ae5582014-06-12 14:56:32 -0700128 case kFArg4: res_reg = rs_f4; break;
129 case kFArg5: res_reg = rs_f5; break;
130 case kFArg6: res_reg = rs_f6; break;
131 case kFArg7: res_reg = rs_f7; break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100132 case kRet0: res_reg = rs_x0; break;
buzbee33ae5582014-06-12 14:56:32 -0700133 case kRet1: res_reg = rs_x1; break;
Zheng Xubaa7c882014-06-30 14:26:50 +0800134 case kInvokeTgt: res_reg = rs_xLR; break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100135 case kHiddenArg: res_reg = rs_x12; break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100136 case kHiddenFpArg: res_reg = RegStorage::InvalidReg(); break;
137 case kCount: res_reg = RegStorage::InvalidReg(); break;
Dmitry Petrochenko58994cd2014-05-17 01:02:18 +0700138 default: res_reg = RegStorage::InvalidReg();
Matteo Franchin43ec8732014-03-31 15:00:14 +0100139 }
140 return res_reg;
141}
142
Matteo Franchin43ec8732014-03-31 15:00:14 +0100143/*
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100144 * Decode the register id. This routine makes assumptions on the encoding made by RegStorage.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100145 */
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100146ResourceMask Arm64Mir2Lir::GetRegMaskCommon(const RegStorage& reg) const {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100147 // TODO(Arm64): this function depends too much on the internal RegStorage encoding. Refactor.
148
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100149 // Check if the shape mask is zero (i.e. invalid).
150 if (UNLIKELY(reg == rs_wzr || reg == rs_xzr)) {
151 // The zero register is not a true register. It is just an immediate zero.
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100152 return kEncodeNone;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100153 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100154
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100155 return ResourceMask::Bit(
156 // FP register starts at bit position 32.
157 (reg.IsFloat() ? kArm64FPReg0 : 0) + reg.GetRegNum());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100158}
159
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100160ResourceMask Arm64Mir2Lir::GetPCUseDefEncoding() const {
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100161 return kEncodeNone;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100162}
163
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100164// Arm64 specific setup. TODO: inline?:
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100165void Arm64Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags,
166 ResourceMask* use_mask, ResourceMask* def_mask) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100167 DCHECK_EQ(cu_->instruction_set, kArm64);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100168 DCHECK(!lir->flags.use_def_invalid);
169
Matteo Franchin43ec8732014-03-31 15:00:14 +0100170 // These flags are somewhat uncommon - bypass if we can.
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100171 if ((flags & (REG_DEF_SP | REG_USE_SP | REG_DEF_LR)) != 0) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100172 if (flags & REG_DEF_SP) {
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100173 def_mask->SetBit(kArm64RegSP);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100174 }
175
176 if (flags & REG_USE_SP) {
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100177 use_mask->SetBit(kArm64RegSP);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100178 }
179
Matteo Franchin43ec8732014-03-31 15:00:14 +0100180 if (flags & REG_DEF_LR) {
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100181 def_mask->SetBit(kArm64RegLR);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100182 }
183 }
184}
185
186ArmConditionCode Arm64Mir2Lir::ArmConditionEncoding(ConditionCode ccode) {
187 ArmConditionCode res;
188 switch (ccode) {
189 case kCondEq: res = kArmCondEq; break;
190 case kCondNe: res = kArmCondNe; break;
191 case kCondCs: res = kArmCondCs; break;
192 case kCondCc: res = kArmCondCc; break;
193 case kCondUlt: res = kArmCondCc; break;
194 case kCondUge: res = kArmCondCs; break;
195 case kCondMi: res = kArmCondMi; break;
196 case kCondPl: res = kArmCondPl; break;
197 case kCondVs: res = kArmCondVs; break;
198 case kCondVc: res = kArmCondVc; break;
199 case kCondHi: res = kArmCondHi; break;
200 case kCondLs: res = kArmCondLs; break;
201 case kCondGe: res = kArmCondGe; break;
202 case kCondLt: res = kArmCondLt; break;
203 case kCondGt: res = kArmCondGt; break;
204 case kCondLe: res = kArmCondLe; break;
205 case kCondAl: res = kArmCondAl; break;
206 case kCondNv: res = kArmCondNv; break;
207 default:
208 LOG(FATAL) << "Bad condition code " << ccode;
209 res = static_cast<ArmConditionCode>(0); // Quiet gcc
210 }
211 return res;
212}
213
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100214static const char *shift_names[4] = {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100215 "lsl",
216 "lsr",
217 "asr",
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100218 "ror"
219};
Matteo Franchin43ec8732014-03-31 15:00:14 +0100220
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100221static const char* extend_names[8] = {
222 "uxtb",
223 "uxth",
224 "uxtw",
225 "uxtx",
226 "sxtb",
227 "sxth",
228 "sxtw",
229 "sxtx",
230};
231
232/* Decode and print a register extension (e.g. ", uxtb #1") */
233static void DecodeRegExtendOrShift(int operand, char *buf, size_t buf_size) {
234 if ((operand & (1 << 6)) == 0) {
235 const char *shift_name = shift_names[(operand >> 7) & 0x3];
236 int amount = operand & 0x3f;
237 snprintf(buf, buf_size, ", %s #%d", shift_name, amount);
238 } else {
239 const char *extend_name = extend_names[(operand >> 3) & 0x7];
240 int amount = operand & 0x7;
241 if (amount == 0) {
242 snprintf(buf, buf_size, ", %s", extend_name);
243 } else {
244 snprintf(buf, buf_size, ", %s #%d", extend_name, amount);
245 }
246 }
247}
248
249#define BIT_MASK(w) ((UINT64_C(1) << (w)) - UINT64_C(1))
250
251static uint64_t RotateRight(uint64_t value, unsigned rotate, unsigned width) {
252 DCHECK_LE(width, 64U);
253 rotate &= 63;
254 value = value & BIT_MASK(width);
255 return ((value & BIT_MASK(rotate)) << (width - rotate)) | (value >> rotate);
256}
257
258static uint64_t RepeatBitsAcrossReg(bool is_wide, uint64_t value, unsigned width) {
259 unsigned i;
260 unsigned reg_size = (is_wide) ? 64 : 32;
261 uint64_t result = value & BIT_MASK(width);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100262 for (i = width; i < reg_size; i *= 2) {
263 result |= (result << i);
264 }
265 DCHECK_EQ(i, reg_size);
266 return result;
267}
268
269/**
270 * @brief Decode an immediate in the form required by logical instructions.
271 *
272 * @param is_wide Whether @p value encodes a 64-bit (as opposed to 32-bit) immediate.
273 * @param value The encoded logical immediates that is to be decoded.
274 * @return The decoded logical immediate.
275 * @note This is the inverse of Arm64Mir2Lir::EncodeLogicalImmediate().
276 */
277uint64_t Arm64Mir2Lir::DecodeLogicalImmediate(bool is_wide, int value) {
278 unsigned n = (value >> 12) & 0x01;
279 unsigned imm_r = (value >> 6) & 0x3f;
280 unsigned imm_s = (value >> 0) & 0x3f;
281
282 // An integer is constructed from the n, imm_s and imm_r bits according to
283 // the following table:
284 //
285 // N imms immr size S R
286 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
287 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
288 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
289 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
290 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
291 // 0 11110s xxxxxr 2 UInt(s) UInt(r)
292 // (s bits must not be all set)
293 //
294 // A pattern is constructed of size bits, where the least significant S+1
295 // bits are set. The pattern is rotated right by R, and repeated across a
296 // 32 or 64-bit value, depending on destination register width.
297
298 if (n == 1) {
299 DCHECK_NE(imm_s, 0x3fU);
300 uint64_t bits = BIT_MASK(imm_s + 1);
301 return RotateRight(bits, imm_r, 64);
302 } else {
303 DCHECK_NE((imm_s >> 1), 0x1fU);
304 for (unsigned width = 0x20; width >= 0x2; width >>= 1) {
305 if ((imm_s & width) == 0) {
306 unsigned mask = (unsigned)(width - 1);
307 DCHECK_NE((imm_s & mask), mask);
308 uint64_t bits = BIT_MASK((imm_s & mask) + 1);
309 return RepeatBitsAcrossReg(is_wide, RotateRight(bits, imm_r & mask, width), width);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100310 }
311 }
312 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100313 return 0;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100314}
315
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100316/**
317 * @brief Decode an 8-bit single point number encoded with EncodeImmSingle().
318 */
319static float DecodeImmSingle(uint8_t small_float) {
320 int mantissa = (small_float & 0x0f) + 0x10;
321 int sign = ((small_float & 0x80) == 0) ? 1 : -1;
322 float signed_mantissa = static_cast<float>(sign*mantissa);
323 int exponent = (((small_float >> 4) & 0x7) + 4) & 0x7;
324 return signed_mantissa*static_cast<float>(1 << exponent)*0.0078125f;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100325}
326
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100327static const char* cc_names[] = {"eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
328 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"};
Matteo Franchin43ec8732014-03-31 15:00:14 +0100329/*
330 * Interpret a format string and build a string no longer than size
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100331 * See format key in assemble_arm64.cc.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100332 */
333std::string Arm64Mir2Lir::BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) {
334 std::string buf;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100335 const char* fmt_end = &fmt[strlen(fmt)];
336 char tbuf[256];
337 const char* name;
338 char nc;
339 while (fmt < fmt_end) {
340 int operand;
341 if (*fmt == '!') {
342 fmt++;
343 DCHECK_LT(fmt, fmt_end);
344 nc = *fmt++;
345 if (nc == '!') {
346 strcpy(tbuf, "!");
347 } else {
348 DCHECK_LT(fmt, fmt_end);
349 DCHECK_LT(static_cast<unsigned>(nc-'0'), 4U);
350 operand = lir->operands[nc-'0'];
351 switch (*fmt++) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100352 case 'e': {
353 // Omit ", uxtw #0" in strings like "add w0, w1, w3, uxtw #0" and
354 // ", uxtx #0" in strings like "add x0, x1, x3, uxtx #0"
355 int omittable = ((IS_WIDE(lir->opcode)) ? EncodeExtend(kA64Uxtw, 0) :
356 EncodeExtend(kA64Uxtw, 0));
357 if (LIKELY(operand == omittable)) {
358 strcpy(tbuf, "");
359 } else {
360 DecodeRegExtendOrShift(operand, tbuf, arraysize(tbuf));
361 }
362 }
363 break;
364 case 'o':
365 // Omit ", lsl #0"
366 if (LIKELY(operand == EncodeShift(kA64Lsl, 0))) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100367 strcpy(tbuf, "");
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100368 } else {
369 DecodeRegExtendOrShift(operand, tbuf, arraysize(tbuf));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100370 }
371 break;
372 case 'B':
373 switch (operand) {
374 case kSY:
375 name = "sy";
376 break;
377 case kST:
378 name = "st";
379 break;
380 case kISH:
381 name = "ish";
382 break;
383 case kISHST:
384 name = "ishst";
385 break;
386 case kNSH:
387 name = "nsh";
388 break;
389 case kNSHST:
390 name = "shst";
391 break;
392 default:
393 name = "DecodeError2";
394 break;
395 }
396 strcpy(tbuf, name);
397 break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100398 case 's':
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100399 snprintf(tbuf, arraysize(tbuf), "s%d", operand & RegStorage::kRegNumMask);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100400 break;
401 case 'S':
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100402 snprintf(tbuf, arraysize(tbuf), "d%d", operand & RegStorage::kRegNumMask);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100403 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100404 case 'f':
405 snprintf(tbuf, arraysize(tbuf), "%c%d", (IS_FWIDE(lir->opcode)) ? 'd' : 's',
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100406 operand & RegStorage::kRegNumMask);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100407 break;
408 case 'l': {
409 bool is_wide = IS_WIDE(lir->opcode);
410 uint64_t imm = DecodeLogicalImmediate(is_wide, operand);
411 snprintf(tbuf, arraysize(tbuf), "%" PRId64 " (%#" PRIx64 ")", imm, imm);
412 }
413 break;
414 case 'I':
415 snprintf(tbuf, arraysize(tbuf), "%f", DecodeImmSingle(operand));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100416 break;
417 case 'M':
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100418 if (LIKELY(operand == 0))
419 strcpy(tbuf, "");
420 else
421 snprintf(tbuf, arraysize(tbuf), ", lsl #%d", 16*operand);
422 break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100423 case 'd':
424 snprintf(tbuf, arraysize(tbuf), "%d", operand);
425 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100426 case 'w':
427 if (LIKELY(operand != rwzr))
428 snprintf(tbuf, arraysize(tbuf), "w%d", operand & RegStorage::kRegNumMask);
429 else
430 strcpy(tbuf, "wzr");
431 break;
432 case 'W':
433 if (LIKELY(operand != rwsp))
434 snprintf(tbuf, arraysize(tbuf), "w%d", operand & RegStorage::kRegNumMask);
435 else
436 strcpy(tbuf, "wsp");
437 break;
438 case 'x':
439 if (LIKELY(operand != rxzr))
440 snprintf(tbuf, arraysize(tbuf), "x%d", operand & RegStorage::kRegNumMask);
441 else
442 strcpy(tbuf, "xzr");
443 break;
444 case 'X':
445 if (LIKELY(operand != rsp))
446 snprintf(tbuf, arraysize(tbuf), "x%d", operand & RegStorage::kRegNumMask);
447 else
448 strcpy(tbuf, "sp");
449 break;
450 case 'D':
451 snprintf(tbuf, arraysize(tbuf), "%d", operand*((IS_WIDE(lir->opcode)) ? 8 : 4));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100452 break;
453 case 'E':
454 snprintf(tbuf, arraysize(tbuf), "%d", operand*4);
455 break;
456 case 'F':
457 snprintf(tbuf, arraysize(tbuf), "%d", operand*2);
458 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100459 case 'G':
460 if (LIKELY(operand == 0))
461 strcpy(tbuf, "");
462 else
463 strcpy(tbuf, (IS_WIDE(lir->opcode)) ? ", lsl #3" : ", lsl #2");
464 break;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100465 case 'c':
466 strcpy(tbuf, cc_names[operand]);
467 break;
468 case 't':
469 snprintf(tbuf, arraysize(tbuf), "0x%08" PRIxPTR " (L%p)",
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100470 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + (operand << 2),
Matteo Franchin43ec8732014-03-31 15:00:14 +0100471 lir->target);
472 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100473 case 'r': {
474 bool is_wide = IS_WIDE(lir->opcode);
475 if (LIKELY(operand != rwzr && operand != rxzr)) {
476 snprintf(tbuf, arraysize(tbuf), "%c%d", (is_wide) ? 'x' : 'w',
477 operand & RegStorage::kRegNumMask);
478 } else {
479 strcpy(tbuf, (is_wide) ? "xzr" : "wzr");
480 }
481 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100482 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100483 case 'R': {
484 bool is_wide = IS_WIDE(lir->opcode);
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100485 if (LIKELY(operand != rwsp && operand != rsp)) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100486 snprintf(tbuf, arraysize(tbuf), "%c%d", (is_wide) ? 'x' : 'w',
487 operand & RegStorage::kRegNumMask);
488 } else {
489 strcpy(tbuf, (is_wide) ? "sp" : "wsp");
490 }
491 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100492 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100493 case 'p':
494 snprintf(tbuf, arraysize(tbuf), ".+%d (addr %#" PRIxPTR ")", 4*operand,
495 reinterpret_cast<uintptr_t>(base_addr) + lir->offset + 4*operand);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100496 break;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100497 case 'T':
498 if (LIKELY(operand == 0))
499 strcpy(tbuf, "");
500 else if (operand == 1)
501 strcpy(tbuf, ", lsl #12");
502 else
503 strcpy(tbuf, ", DecodeError3");
Matteo Franchin43ec8732014-03-31 15:00:14 +0100504 break;
505 default:
506 strcpy(tbuf, "DecodeError1");
507 break;
508 }
509 buf += tbuf;
510 }
511 } else {
512 buf += *fmt++;
513 }
514 }
515 return buf;
516}
517
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100518void Arm64Mir2Lir::DumpResourceMask(LIR* arm_lir, const ResourceMask& mask, const char* prefix) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100519 char buf[256];
520 buf[0] = 0;
521
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100522 if (mask.Equals(kEncodeAll)) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100523 strcpy(buf, "all");
524 } else {
525 char num[8];
526 int i;
527
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100528 for (i = 0; i < kArm64RegEnd; i++) {
529 if (mask.HasBit(i)) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100530 snprintf(num, arraysize(num), "%d ", i);
531 strcat(buf, num);
532 }
533 }
534
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100535 if (mask.HasBit(ResourceMask::kCCode)) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100536 strcat(buf, "cc ");
537 }
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100538 if (mask.HasBit(ResourceMask::kFPStatus)) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100539 strcat(buf, "fpcc ");
540 }
541
542 /* Memory bits */
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100543 if (arm_lir && (mask.HasBit(ResourceMask::kDalvikReg))) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100544 snprintf(buf + strlen(buf), arraysize(buf) - strlen(buf), "dr%d%s",
545 DECODE_ALIAS_INFO_REG(arm_lir->flags.alias_info),
546 DECODE_ALIAS_INFO_WIDE(arm_lir->flags.alias_info) ? "(+1)" : "");
547 }
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100548 if (mask.HasBit(ResourceMask::kLiteral)) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100549 strcat(buf, "lit ");
550 }
551
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100552 if (mask.HasBit(ResourceMask::kHeapRef)) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100553 strcat(buf, "heap ");
554 }
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100555 if (mask.HasBit(ResourceMask::kMustNotAlias)) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100556 strcat(buf, "noalias ");
557 }
558 }
559 if (buf[0]) {
560 LOG(INFO) << prefix << ": " << buf;
561 }
562}
563
564bool Arm64Mir2Lir::IsUnconditionalBranch(LIR* lir) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100565 return (lir->opcode == kA64B1t);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100566}
567
Vladimir Marko674744e2014-04-24 15:18:26 +0100568bool Arm64Mir2Lir::SupportsVolatileLoadStore(OpSize size) {
569 return true;
570}
571
572RegisterClass Arm64Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
573 if (UNLIKELY(is_volatile)) {
574 // On arm64, fp register load/store is atomic only for single bytes.
575 if (size != kSignedByte && size != kUnsignedByte) {
buzbeea0cd2d72014-06-01 09:33:49 -0700576 return (size == kReference) ? kRefReg : kCoreReg;
Vladimir Marko674744e2014-04-24 15:18:26 +0100577 }
578 }
579 return RegClassBySize(size);
580}
581
Matteo Franchin43ec8732014-03-31 15:00:14 +0100582Arm64Mir2Lir::Arm64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
583 : Mir2Lir(cu, mir_graph, arena) {
584 // Sanity check - make sure encoding map lines up.
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100585 for (int i = 0; i < kA64Last; i++) {
586 if (UNWIDE(Arm64Mir2Lir::EncodingMap[i].opcode) != i) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100587 LOG(FATAL) << "Encoding order for " << Arm64Mir2Lir::EncodingMap[i].name
588 << " is wrong: expecting " << i << ", seeing "
589 << static_cast<int>(Arm64Mir2Lir::EncodingMap[i].opcode);
590 }
591 }
592}
593
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100594Mir2Lir* Arm64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
595 ArenaAllocator* const arena) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100596 return new Arm64Mir2Lir(cu, mir_graph, arena);
597}
598
Matteo Franchin43ec8732014-03-31 15:00:14 +0100599void Arm64Mir2Lir::CompilerInitializeRegAlloc() {
buzbeeb01bf152014-05-13 15:59:07 -0700600 reg_pool_ = new (arena_) RegisterPool(this, arena_, core_regs, core64_regs, sp_regs, dp_regs,
601 reserved_regs, reserved64_regs, core_temps, core64_temps,
602 sp_temps, dp_temps);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100603
604 // Target-specific adjustments.
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100605 // Alias single precision float registers to corresponding double registers.
606 GrowableArray<RegisterInfo*>::Iterator fp_it(&reg_pool_->sp_regs_);
607 for (RegisterInfo* info = fp_it.Next(); info != nullptr; info = fp_it.Next()) {
608 int fp_reg_num = info->GetReg().GetRegNum();
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100609 RegStorage dp_reg = RegStorage::FloatSolo64(fp_reg_num);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100610 RegisterInfo* dp_reg_info = GetRegInfo(dp_reg);
611 // Double precision register's master storage should refer to itself.
612 DCHECK_EQ(dp_reg_info, dp_reg_info->Master());
613 // Redirect single precision's master storage to master.
614 info->SetMaster(dp_reg_info);
615 // Singles should show a single 32-bit mask bit, at first referring to the low half.
616 DCHECK_EQ(info->StorageMask(), 0x1U);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100617 }
618
Serban Constantinescued65c5e2014-05-22 15:10:18 +0100619 // Alias 32bit W registers to corresponding 64bit X registers.
620 GrowableArray<RegisterInfo*>::Iterator w_it(&reg_pool_->core_regs_);
621 for (RegisterInfo* info = w_it.Next(); info != nullptr; info = w_it.Next()) {
622 int x_reg_num = info->GetReg().GetRegNum();
623 RegStorage x_reg = RegStorage::Solo64(x_reg_num);
624 RegisterInfo* x_reg_info = GetRegInfo(x_reg);
625 // 64bit X register's master storage should refer to itself.
626 DCHECK_EQ(x_reg_info, x_reg_info->Master());
627 // Redirect 32bit W master storage to 64bit X.
628 info->SetMaster(x_reg_info);
629 // 32bit W should show a single 32-bit mask bit, at first referring to the low half.
630 DCHECK_EQ(info->StorageMask(), 0x1U);
631 }
632
Matteo Franchin43ec8732014-03-31 15:00:14 +0100633 // Don't start allocating temps at r0/s0/d0 or you may clobber return regs in early-exit methods.
634 // TODO: adjust when we roll to hard float calling convention.
635 reg_pool_->next_core_reg_ = 2;
636 reg_pool_->next_sp_reg_ = 0;
637 reg_pool_->next_dp_reg_ = 0;
638}
639
Matteo Franchin43ec8732014-03-31 15:00:14 +0100640/*
641 * TUNING: is true leaf? Can't just use METHOD_IS_LEAF to determine as some
642 * instructions might call out to C/assembly helper functions. Until
643 * machinery is in place, always spill lr.
644 */
645
646void Arm64Mir2Lir::AdjustSpillMask() {
Zheng Xubaa7c882014-06-30 14:26:50 +0800647 core_spill_mask_ |= (1 << rs_xLR.GetRegNum());
Matteo Franchin43ec8732014-03-31 15:00:14 +0100648 num_core_spills_++;
649}
650
Matteo Franchin43ec8732014-03-31 15:00:14 +0100651/* Clobber all regs that might be used by an external C call */
652void Arm64Mir2Lir::ClobberCallerSave() {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100653 Clobber(rs_x0);
654 Clobber(rs_x1);
655 Clobber(rs_x2);
656 Clobber(rs_x3);
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100657 Clobber(rs_x4);
658 Clobber(rs_x5);
659 Clobber(rs_x6);
660 Clobber(rs_x7);
661 Clobber(rs_x8);
662 Clobber(rs_x9);
663 Clobber(rs_x10);
664 Clobber(rs_x11);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100665 Clobber(rs_x12);
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100666 Clobber(rs_x13);
667 Clobber(rs_x14);
668 Clobber(rs_x15);
669 Clobber(rs_x16);
670 Clobber(rs_x17);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100671 Clobber(rs_x30);
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100672
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100673 Clobber(rs_f0);
674 Clobber(rs_f1);
675 Clobber(rs_f2);
676 Clobber(rs_f3);
677 Clobber(rs_f4);
678 Clobber(rs_f5);
679 Clobber(rs_f6);
680 Clobber(rs_f7);
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100681 Clobber(rs_f16);
682 Clobber(rs_f17);
683 Clobber(rs_f18);
684 Clobber(rs_f19);
685 Clobber(rs_f20);
686 Clobber(rs_f21);
687 Clobber(rs_f22);
688 Clobber(rs_f23);
689 Clobber(rs_f24);
690 Clobber(rs_f25);
691 Clobber(rs_f26);
692 Clobber(rs_f27);
693 Clobber(rs_f28);
694 Clobber(rs_f29);
695 Clobber(rs_f30);
696 Clobber(rs_f31);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100697}
698
699RegLocation Arm64Mir2Lir::GetReturnWideAlt() {
700 RegLocation res = LocCReturnWide();
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100701 res.reg.SetReg(rx2);
702 res.reg.SetHighReg(rx3);
703 Clobber(rs_x2);
704 Clobber(rs_x3);
705 MarkInUse(rs_x2);
706 MarkInUse(rs_x3);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100707 MarkWide(res.reg);
708 return res;
709}
710
711RegLocation Arm64Mir2Lir::GetReturnAlt() {
712 RegLocation res = LocCReturn();
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100713 res.reg.SetReg(rx1);
714 Clobber(rs_x1);
715 MarkInUse(rs_x1);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100716 return res;
717}
718
719/* To be used when explicitly managing register use */
720void Arm64Mir2Lir::LockCallTemps() {
buzbee33ae5582014-06-12 14:56:32 -0700721 // TODO: needs cleanup.
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100722 LockTemp(rs_x0);
723 LockTemp(rs_x1);
724 LockTemp(rs_x2);
725 LockTemp(rs_x3);
buzbee33ae5582014-06-12 14:56:32 -0700726 LockTemp(rs_x4);
727 LockTemp(rs_x5);
728 LockTemp(rs_x6);
729 LockTemp(rs_x7);
730 LockTemp(rs_f0);
731 LockTemp(rs_f1);
732 LockTemp(rs_f2);
733 LockTemp(rs_f3);
734 LockTemp(rs_f4);
735 LockTemp(rs_f5);
736 LockTemp(rs_f6);
737 LockTemp(rs_f7);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100738}
739
740/* To be used when explicitly managing register use */
741void Arm64Mir2Lir::FreeCallTemps() {
buzbee33ae5582014-06-12 14:56:32 -0700742 // TODO: needs cleanup.
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100743 FreeTemp(rs_x0);
744 FreeTemp(rs_x1);
745 FreeTemp(rs_x2);
746 FreeTemp(rs_x3);
buzbee33ae5582014-06-12 14:56:32 -0700747 FreeTemp(rs_x4);
748 FreeTemp(rs_x5);
749 FreeTemp(rs_x6);
750 FreeTemp(rs_x7);
751 FreeTemp(rs_f0);
752 FreeTemp(rs_f1);
753 FreeTemp(rs_f2);
754 FreeTemp(rs_f3);
755 FreeTemp(rs_f4);
756 FreeTemp(rs_f5);
757 FreeTemp(rs_f6);
758 FreeTemp(rs_f7);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100759}
760
Andreas Gampe2f244e92014-05-08 03:35:25 -0700761RegStorage Arm64Mir2Lir::LoadHelper(ThreadOffset<4> offset) {
762 UNIMPLEMENTED(FATAL) << "Should not be called.";
763 return RegStorage::InvalidReg();
764}
765
766RegStorage Arm64Mir2Lir::LoadHelper(ThreadOffset<8> offset) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100767 // TODO(Arm64): use LoadWordDisp instead.
768 // e.g. LoadWordDisp(rs_rA64_SELF, offset.Int32Value(), rs_rA64_LR);
Zheng Xubaa7c882014-06-30 14:26:50 +0800769 LoadBaseDisp(rs_xSELF, offset.Int32Value(), rs_xLR, k64, kNotVolatile);
770 return rs_xLR;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100771}
772
773LIR* Arm64Mir2Lir::CheckSuspendUsingLoad() {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100774 RegStorage tmp = rs_x0;
Zheng Xubaa7c882014-06-30 14:26:50 +0800775 LoadWordDisp(rs_xSELF, Thread::ThreadSuspendTriggerOffset<8>().Int32Value(), tmp);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100776 LIR* load2 = LoadWordDisp(tmp, 0, tmp);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100777 return load2;
778}
779
780uint64_t Arm64Mir2Lir::GetTargetInstFlags(int opcode) {
781 DCHECK(!IsPseudoLirOp(opcode));
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100782 return Arm64Mir2Lir::EncodingMap[UNWIDE(opcode)].flags;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100783}
784
785const char* Arm64Mir2Lir::GetTargetInstName(int opcode) {
786 DCHECK(!IsPseudoLirOp(opcode));
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100787 return Arm64Mir2Lir::EncodingMap[UNWIDE(opcode)].name;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100788}
789
790const char* Arm64Mir2Lir::GetTargetInstFmt(int opcode) {
791 DCHECK(!IsPseudoLirOp(opcode));
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100792 return Arm64Mir2Lir::EncodingMap[UNWIDE(opcode)].fmt;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100793}
794
buzbee33ae5582014-06-12 14:56:32 -0700795RegStorage Arm64Mir2Lir::InToRegStorageArm64Mapper::GetNextReg(bool is_double_or_float,
Zheng Xu949cd972014-06-23 18:33:08 +0800796 bool is_wide,
797 bool is_ref) {
buzbee33ae5582014-06-12 14:56:32 -0700798 const RegStorage coreArgMappingToPhysicalReg[] =
799 {rs_x1, rs_x2, rs_x3, rs_x4, rs_x5, rs_x6, rs_x7};
800 const int coreArgMappingToPhysicalRegSize =
801 sizeof(coreArgMappingToPhysicalReg) / sizeof(RegStorage);
802 const RegStorage fpArgMappingToPhysicalReg[] =
803 {rs_f0, rs_f1, rs_f2, rs_f3, rs_f4, rs_f5, rs_f6, rs_f7};
804 const int fpArgMappingToPhysicalRegSize =
805 sizeof(fpArgMappingToPhysicalReg) / sizeof(RegStorage);
806
807 RegStorage result = RegStorage::InvalidReg();
808 if (is_double_or_float) {
809 if (cur_fp_reg_ < fpArgMappingToPhysicalRegSize) {
Zheng Xu949cd972014-06-23 18:33:08 +0800810 DCHECK(!is_ref);
buzbee33ae5582014-06-12 14:56:32 -0700811 result = fpArgMappingToPhysicalReg[cur_fp_reg_++];
812 if (result.Valid()) {
813 // TODO: switching between widths remains a bit ugly. Better way?
814 int res_reg = result.GetReg();
815 result = is_wide ? RegStorage::FloatSolo64(res_reg) : RegStorage::FloatSolo32(res_reg);
816 }
817 }
818 } else {
819 if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) {
820 result = coreArgMappingToPhysicalReg[cur_core_reg_++];
821 if (result.Valid()) {
822 // TODO: switching between widths remains a bit ugly. Better way?
823 int res_reg = result.GetReg();
Zheng Xu949cd972014-06-23 18:33:08 +0800824 DCHECK(!(is_wide && is_ref));
825 result = (is_wide || is_ref) ? RegStorage::Solo64(res_reg) : RegStorage::Solo32(res_reg);
buzbee33ae5582014-06-12 14:56:32 -0700826 }
827 }
828 }
829 return result;
830}
831
832RegStorage Arm64Mir2Lir::InToRegStorageMapping::Get(int in_position) {
833 DCHECK(IsInitialized());
834 auto res = mapping_.find(in_position);
835 return res != mapping_.end() ? res->second : RegStorage::InvalidReg();
836}
837
838void Arm64Mir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int count,
839 InToRegStorageMapper* mapper) {
840 DCHECK(mapper != nullptr);
841 max_mapped_in_ = -1;
842 is_there_stack_mapped_ = false;
843 for (int in_position = 0; in_position < count; in_position++) {
Zheng Xu949cd972014-06-23 18:33:08 +0800844 RegStorage reg = mapper->GetNextReg(arg_locs[in_position].fp,
845 arg_locs[in_position].wide,
846 arg_locs[in_position].ref);
buzbee33ae5582014-06-12 14:56:32 -0700847 if (reg.Valid()) {
848 mapping_[in_position] = reg;
Zheng Xu949cd972014-06-23 18:33:08 +0800849 if (arg_locs[in_position].wide) {
buzbee33ae5582014-06-12 14:56:32 -0700850 // We covered 2 args, so skip the next one
851 in_position++;
852 }
Zheng Xu949cd972014-06-23 18:33:08 +0800853 max_mapped_in_ = std::max(max_mapped_in_, in_position);
buzbee33ae5582014-06-12 14:56:32 -0700854 } else {
855 is_there_stack_mapped_ = true;
856 }
857 }
858 initialized_ = true;
859}
860
861
862// Deprecate. Use the new mechanism.
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100863// TODO(Arm64): reuse info in QuickArgumentVisitor?
864static RegStorage GetArgPhysicalReg(RegLocation* loc, int* num_gpr_used, int* num_fpr_used,
865 OpSize* op_size) {
866 if (loc->fp) {
867 int n = *num_fpr_used;
868 if (n < 8) {
869 *num_fpr_used = n + 1;
870 RegStorage::RegStorageKind reg_kind;
871 if (loc->wide) {
872 *op_size = kDouble;
873 reg_kind = RegStorage::k64BitSolo;
874 } else {
875 *op_size = kSingle;
876 reg_kind = RegStorage::k32BitSolo;
877 }
878 return RegStorage(RegStorage::kValid | reg_kind | RegStorage::kFloatingPoint | n);
879 }
880 } else {
881 int n = *num_gpr_used;
buzbee33ae5582014-06-12 14:56:32 -0700882 if (n < 8) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100883 *num_gpr_used = n + 1;
buzbeeb5860fb2014-06-21 15:31:01 -0700884 if (loc->wide || loc->ref) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100885 *op_size = k64;
886 return RegStorage::Solo64(n);
887 } else {
888 *op_size = k32;
889 return RegStorage::Solo32(n);
890 }
891 }
892 }
Ian Rogers54874942014-06-10 16:31:03 -0700893 *op_size = kWord;
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100894 return RegStorage::InvalidReg();
895}
896
buzbee33ae5582014-06-12 14:56:32 -0700897RegStorage Arm64Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
898 if (!in_to_reg_storage_mapping_.IsInitialized()) {
899 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
900 RegLocation* arg_locs = &mir_graph_->reg_location_[start_vreg];
901
902 InToRegStorageArm64Mapper mapper;
903 in_to_reg_storage_mapping_.Initialize(arg_locs, cu_->num_ins, &mapper);
904 }
905 return in_to_reg_storage_mapping_.Get(arg_num);
906}
907
908
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100909/*
910 * If there are any ins passed in registers that have not been promoted
911 * to a callee-save register, flush them to the frame. Perform initial
912 * assignment of promoted arguments.
913 *
914 * ArgLocs is an array of location records describing the incoming arguments
915 * with one location record per word of argument.
916 */
917void Arm64Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
918 int num_gpr_used = 1;
919 int num_fpr_used = 0;
920
921 /*
Zheng Xu511c8a62014-06-03 16:22:23 +0800922 * Dummy up a RegLocation for the incoming StackReference<mirror::ArtMethod>
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100923 * It will attempt to keep kArg0 live (or copy it to home location
924 * if promoted).
925 */
926 RegLocation rl_src = rl_method;
927 rl_src.location = kLocPhysReg;
928 rl_src.reg = TargetReg(kArg0);
929 rl_src.home = false;
930 MarkLive(rl_src);
Zheng Xu511c8a62014-06-03 16:22:23 +0800931 StoreValue(rl_method, rl_src);
932 // If Method* has been promoted, explicitly flush
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100933 if (rl_method.location == kLocPhysReg) {
Andreas Gampe3c12c512014-06-24 18:46:29 +0000934 StoreRefDisp(TargetReg(kSp), 0, TargetReg(kArg0), kNotVolatile);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100935 }
936
937 if (cu_->num_ins == 0) {
938 return;
939 }
940
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100941 // Handle dalvik registers.
942 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100943 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
944 for (int i = 0; i < cu_->num_ins; i++) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100945 RegLocation* t_loc = &ArgLocs[i];
946 OpSize op_size;
947 RegStorage reg = GetArgPhysicalReg(t_loc, &num_gpr_used, &num_fpr_used, &op_size);
948
949 if (reg.Valid()) {
buzbeeb5860fb2014-06-21 15:31:01 -0700950 // If arriving in register.
951
952 // We have already updated the arg location with promoted info
953 // so we can be based on it.
954 if (t_loc->location == kLocPhysReg) {
955 // Just copy it.
956 OpRegCopy(t_loc->reg, reg);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100957 } else {
buzbeeb5860fb2014-06-21 15:31:01 -0700958 // Needs flush.
959 if (t_loc->ref) {
960 StoreRefDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, kNotVolatile);
961 } else {
962 StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, t_loc->wide ? k64 : k32,
963 kNotVolatile);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100964 }
965 }
966 } else {
buzbeeb5860fb2014-06-21 15:31:01 -0700967 // If arriving in frame & promoted.
968 if (t_loc->location == kLocPhysReg) {
969 if (t_loc->ref) {
970 LoadRefDisp(TargetReg(kSp), SRegOffset(start_vreg + i), t_loc->reg, kNotVolatile);
971 } else {
972 LoadBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), t_loc->reg,
973 t_loc->wide ? k64 : k32, kNotVolatile);
974 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100975 }
976 }
buzbeeb5860fb2014-06-21 15:31:01 -0700977 if (t_loc->wide) {
978 // Increment i to skip the next one.
979 i++;
980 }
981 // if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) {
982 // OpRegCopy(RegStorage::Solo32(v_map->core_reg), reg);
983 // } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) {
984 // OpRegCopy(RegStorage::Solo32(v_map->fp_reg), reg);
985 // } else {
986 // StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i), reg, op_size, kNotVolatile);
987 // if (reg.Is64Bit()) {
988 // if (SRegOffset(start_vreg + i) + 4 != SRegOffset(start_vreg + i + 1)) {
989 // LOG(FATAL) << "64 bit value stored in non-consecutive 4 bytes slots";
990 // }
991 // i += 1;
992 // }
993 // }
994 // } else {
995 // // If arriving in frame & promoted
996 // if (v_map->core_location == kLocPhysReg) {
997 // LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
998 // RegStorage::Solo32(v_map->core_reg));
999 // }
1000 // if (v_map->fp_location == kLocPhysReg) {
1001 // LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i), RegStorage::Solo32(v_map->fp_reg));
1002 // }
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001003 }
1004}
1005
buzbee33ae5582014-06-12 14:56:32 -07001006/*
1007 * Load up to 5 arguments, the first three of which will be in
1008 * kArg1 .. kArg3. On entry kArg0 contains the current method pointer,
1009 * and as part of the load sequence, it must be replaced with
1010 * the target method pointer.
1011 */
1012int Arm64Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
1013 int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
1014 const MethodReference& target_method,
1015 uint32_t vtable_idx, uintptr_t direct_code,
1016 uintptr_t direct_method, InvokeType type, bool skip_this) {
1017 return GenDalvikArgsRange(info,
1018 call_state, pcrLabel, next_call_insn,
1019 target_method,
1020 vtable_idx, direct_code,
1021 direct_method, type, skip_this);
1022}
1023
1024/*
1025 * May have 0+ arguments (also used for jumbo). Note that
1026 * source virtual registers may be in physical registers, so may
1027 * need to be flushed to home location before copying. This
1028 * applies to arg3 and above (see below).
1029 *
1030 * FIXME: update comments.
1031 *
1032 * Two general strategies:
1033 * If < 20 arguments
1034 * Pass args 3-18 using vldm/vstm block copy
1035 * Pass arg0, arg1 & arg2 in kArg1-kArg3
1036 * If 20+ arguments
1037 * Pass args arg19+ using memcpy block copy
1038 * Pass arg0, arg1 & arg2 in kArg1-kArg3
1039 *
1040 */
1041int Arm64Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
1042 LIR** pcrLabel, NextCallInsn next_call_insn,
1043 const MethodReference& target_method,
1044 uint32_t vtable_idx, uintptr_t direct_code,
1045 uintptr_t direct_method, InvokeType type, bool skip_this) {
1046 /* If no arguments, just return */
1047 if (info->num_arg_words == 0)
1048 return call_state;
1049
1050 const int start_index = skip_this ? 1 : 0;
1051
1052 InToRegStorageArm64Mapper mapper;
1053 InToRegStorageMapping in_to_reg_storage_mapping;
1054 in_to_reg_storage_mapping.Initialize(info->args, info->num_arg_words, &mapper);
1055 const int last_mapped_in = in_to_reg_storage_mapping.GetMaxMappedIn();
Zheng Xu949cd972014-06-23 18:33:08 +08001056 int regs_left_to_pass_via_stack = info->num_arg_words - (last_mapped_in + 1);
buzbee33ae5582014-06-12 14:56:32 -07001057
Matteo Franchinf1013192014-07-07 13:35:14 +01001058 // First of all, check whether it makes sense to use bulk copying.
1059 // Bulk copying is done only for the range case.
buzbee33ae5582014-06-12 14:56:32 -07001060 // TODO: make a constant instead of 2
1061 if (info->is_range && regs_left_to_pass_via_stack >= 2) {
1062 // Scan the rest of the args - if in phys_reg flush to memory
Zheng Xu949cd972014-06-23 18:33:08 +08001063 for (int next_arg = last_mapped_in + 1; next_arg < info->num_arg_words;) {
buzbee33ae5582014-06-12 14:56:32 -07001064 RegLocation loc = info->args[next_arg];
1065 if (loc.wide) {
1066 loc = UpdateLocWide(loc);
1067 if (loc.location == kLocPhysReg) {
1068 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001069 StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
buzbee33ae5582014-06-12 14:56:32 -07001070 }
1071 next_arg += 2;
1072 } else {
1073 loc = UpdateLoc(loc);
1074 if (loc.location == kLocPhysReg) {
1075 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
buzbeeb5860fb2014-06-21 15:31:01 -07001076 if (loc.ref) {
1077 StoreRefDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kNotVolatile);
1078 } else {
1079 StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32, kNotVolatile);
1080 }
buzbee33ae5582014-06-12 14:56:32 -07001081 }
1082 next_arg++;
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001083 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001084 }
buzbee33ae5582014-06-12 14:56:32 -07001085
1086 // Logic below assumes that Method pointer is at offset zero from SP.
1087 DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0);
1088
1089 // The rest can be copied together
Zheng Xu949cd972014-06-23 18:33:08 +08001090 int start_offset = SRegOffset(info->args[last_mapped_in + 1].s_reg_low);
1091 int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + 1,
buzbee33ae5582014-06-12 14:56:32 -07001092 cu_->instruction_set);
1093
1094 int current_src_offset = start_offset;
1095 int current_dest_offset = outs_offset;
1096
1097 // Only davik regs are accessed in this loop; no next_call_insn() calls.
1098 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
1099 while (regs_left_to_pass_via_stack > 0) {
1100 /*
1101 * TODO: Improve by adding block copy for large number of arguments. This
1102 * should be done, if possible, as a target-depending helper. For now, just
1103 * copy a Dalvik vreg at a time.
1104 */
1105 // Moving 32-bits via general purpose register.
1106 size_t bytes_to_move = sizeof(uint32_t);
1107
1108 // Instead of allocating a new temp, simply reuse one of the registers being used
1109 // for argument passing.
Andreas Gampe4b537a82014-06-30 22:24:53 -07001110 RegStorage temp = TargetReg(kArg3, false);
buzbee33ae5582014-06-12 14:56:32 -07001111
1112 // Now load the argument VR and store to the outs.
1113 Load32Disp(TargetReg(kSp), current_src_offset, temp);
1114 Store32Disp(TargetReg(kSp), current_dest_offset, temp);
1115
1116 current_src_offset += bytes_to_move;
1117 current_dest_offset += bytes_to_move;
1118 regs_left_to_pass_via_stack -= (bytes_to_move >> 2);
1119 }
1120 DCHECK_EQ(regs_left_to_pass_via_stack, 0);
1121 }
1122
1123 // Now handle rest not registers if they are
1124 if (in_to_reg_storage_mapping.IsThereStackMapped()) {
1125 RegStorage regSingle = TargetReg(kArg2);
1126 RegStorage regWide = RegStorage::Solo64(TargetReg(kArg3).GetReg());
1127 for (int i = start_index; i <= last_mapped_in + regs_left_to_pass_via_stack; i++) {
1128 RegLocation rl_arg = info->args[i];
1129 rl_arg = UpdateRawLoc(rl_arg);
1130 RegStorage reg = in_to_reg_storage_mapping.Get(i);
1131 if (!reg.Valid()) {
1132 int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set);
1133
1134 {
1135 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
1136 if (rl_arg.wide) {
1137 if (rl_arg.location == kLocPhysReg) {
Andreas Gampe3c12c512014-06-24 18:46:29 +00001138 StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile);
buzbee33ae5582014-06-12 14:56:32 -07001139 } else {
1140 LoadValueDirectWideFixed(rl_arg, regWide);
Andreas Gampe3c12c512014-06-24 18:46:29 +00001141 StoreBaseDisp(TargetReg(kSp), out_offset, regWide, k64, kNotVolatile);
buzbee33ae5582014-06-12 14:56:32 -07001142 }
buzbee33ae5582014-06-12 14:56:32 -07001143 } else {
1144 if (rl_arg.location == kLocPhysReg) {
Andreas Gampe3c12c512014-06-24 18:46:29 +00001145 if (rl_arg.ref) {
1146 StoreRefDisp(TargetReg(kSp), out_offset, rl_arg.reg, kNotVolatile);
1147 } else {
1148 StoreBaseDisp(TargetReg(kSp), out_offset, rl_arg.reg, k32, kNotVolatile);
1149 }
buzbee33ae5582014-06-12 14:56:32 -07001150 } else {
Andreas Gampe3c12c512014-06-24 18:46:29 +00001151 if (rl_arg.ref) {
1152 LoadValueDirectFixed(rl_arg, regSingle);
1153 StoreRefDisp(TargetReg(kSp), out_offset, regSingle, kNotVolatile);
1154 } else {
1155 LoadValueDirectFixed(rl_arg, As32BitReg(regSingle));
1156 StoreBaseDisp(TargetReg(kSp), out_offset, As32BitReg(regSingle), k32, kNotVolatile);
1157 }
buzbee33ae5582014-06-12 14:56:32 -07001158 }
1159 }
1160 }
1161 call_state = next_call_insn(cu_, info, call_state, target_method,
1162 vtable_idx, direct_code, direct_method, type);
1163 }
Matteo Franchinf1013192014-07-07 13:35:14 +01001164 if (rl_arg.wide) {
1165 i++;
1166 }
buzbee33ae5582014-06-12 14:56:32 -07001167 }
1168 }
1169
1170 // Finish with mapped registers
1171 for (int i = start_index; i <= last_mapped_in; i++) {
1172 RegLocation rl_arg = info->args[i];
1173 rl_arg = UpdateRawLoc(rl_arg);
1174 RegStorage reg = in_to_reg_storage_mapping.Get(i);
1175 if (reg.Valid()) {
1176 if (rl_arg.wide) {
1177 LoadValueDirectWideFixed(rl_arg, reg);
buzbee33ae5582014-06-12 14:56:32 -07001178 } else {
1179 LoadValueDirectFixed(rl_arg, reg);
1180 }
1181 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
Matteo Franchinf1013192014-07-07 13:35:14 +01001182 direct_code, direct_method, type);
1183 }
1184 if (rl_arg.wide) {
1185 i++;
buzbee33ae5582014-06-12 14:56:32 -07001186 }
1187 }
1188
1189 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
1190 direct_code, direct_method, type);
1191 if (pcrLabel) {
Nicolas Geoffray0025a862014-07-11 08:26:40 +00001192 if (cu_->compiler_driver->GetCompilerOptions().GetExplicitNullChecks()) {
buzbee33ae5582014-06-12 14:56:32 -07001193 *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1), info->opt_flags);
1194 } else {
1195 *pcrLabel = nullptr;
1196 // In lieu of generating a check for kArg1 being null, we need to
1197 // perform a load when doing implicit checks.
1198 RegStorage tmp = AllocTemp();
1199 Load32Disp(TargetReg(kArg1), 0, tmp);
1200 MarkPossibleNullPointerException(info->opt_flags);
1201 FreeTemp(tmp);
1202 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +01001203 }
1204 return call_state;
1205}
1206
Matteo Franchin43ec8732014-03-31 15:00:14 +01001207} // namespace art