blob: 61354dfc530a35dbb00a9b615b210a7014deddd6 [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "codegen_x86.h"
Andreas Gampe0b9203e2015-01-22 20:39:27 -080018
19#include "base/logging.h"
Vladimir Marko1961b602015-04-08 20:51:48 +010020#include "dex/mir_graph.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070021#include "dex/quick/mir_to_lir-inl.h"
Mark Mendell67c39c42014-01-31 17:28:00 -080022#include "dex/dataflow_iterator-inl.h"
Yixin Shou7071c8d2014-03-05 06:07:48 -050023#include "dex/quick/dex_file_method_inliner.h"
24#include "dex/quick/dex_file_to_method_inliner_map.h"
buzbeeb5860fb2014-06-21 15:31:01 -070025#include "dex/reg_storage_eq.h"
Andreas Gampe0b9203e2015-01-22 20:39:27 -080026#include "driver/compiler_driver.h"
27#include "x86_lir.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070028
29namespace art {
30
31/* This file contains codegen for the X86 ISA */
32
buzbee2700f7e2014-03-07 09:46:20 -080033LIR* X86Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070034 int opcode;
35 /* must be both DOUBLE or both not DOUBLE */
buzbee091cc402014-03-31 10:14:40 -070036 DCHECK(r_dest.IsFloat() || r_src.IsFloat());
37 DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble());
38 if (r_dest.IsDouble()) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070039 opcode = kX86MovsdRR;
40 } else {
buzbee091cc402014-03-31 10:14:40 -070041 if (r_dest.IsSingle()) {
42 if (r_src.IsSingle()) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070043 opcode = kX86MovssRR;
44 } else { // Fpr <- Gpr
45 opcode = kX86MovdxrRR;
46 }
47 } else { // Gpr <- Fpr
buzbee091cc402014-03-31 10:14:40 -070048 DCHECK(r_src.IsSingle()) << "Raw: 0x" << std::hex << r_src.GetRawBits();
Brian Carlstrom7940e442013-07-12 13:46:57 -070049 opcode = kX86MovdrxRR;
50 }
51 }
52 DCHECK_NE((EncodingMap[opcode].flags & IS_BINARY_OP), 0ULL);
buzbee2700f7e2014-03-07 09:46:20 -080053 LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -070054 if (r_dest == r_src) {
55 res->flags.is_nop = true;
56 }
57 return res;
58}
59
Roland Levillain4b8f1ec2015-08-26 18:34:03 +010060bool X86Mir2Lir::InexpensiveConstantInt(int32_t value ATTRIBUTE_UNUSED) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070061 return true;
62}
63
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070064bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -070065 return value == 0;
Brian Carlstrom7940e442013-07-12 13:46:57 -070066}
67
Roland Levillain4b8f1ec2015-08-26 18:34:03 +010068bool X86Mir2Lir::InexpensiveConstantLong(int64_t value ATTRIBUTE_UNUSED) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070069 return true;
70}
71
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070072bool X86Mir2Lir::InexpensiveConstantDouble(int64_t value) {
Mark Mendell67c39c42014-01-31 17:28:00 -080073 return value == 0;
Brian Carlstrom7940e442013-07-12 13:46:57 -070074}
75
76/*
77 * Load a immediate using a shortcut if possible; otherwise
78 * grab from the per-translation literal pool. If target is
79 * a high register, build constant into a low register and copy.
80 *
81 * No additional register clobbering operation performed. Use this version when
82 * 1) r_dest is freshly returned from AllocTemp or
83 * 2) The codegen is under fixed register usage
84 */
buzbee2700f7e2014-03-07 09:46:20 -080085LIR* X86Mir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
86 RegStorage r_dest_save = r_dest;
buzbee091cc402014-03-31 10:14:40 -070087 if (r_dest.IsFloat()) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070088 if (value == 0) {
buzbee2700f7e2014-03-07 09:46:20 -080089 return NewLIR2(kX86XorpsRR, r_dest.GetReg(), r_dest.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -070090 }
Brian Carlstrom7940e442013-07-12 13:46:57 -070091 r_dest = AllocTemp();
92 }
93
94 LIR *res;
95 if (value == 0) {
buzbee2700f7e2014-03-07 09:46:20 -080096 res = NewLIR2(kX86Xor32RR, r_dest.GetReg(), r_dest.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -070097 } else {
98 // Note, there is no byte immediate form of a 32 bit immediate move.
Chao-ying Fue0ccdc02014-06-06 17:32:37 -070099 // 64-bit immediate is not supported by LIR structure
100 res = NewLIR2(kX86Mov32RI, r_dest.GetReg(), value);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700101 }
102
buzbee091cc402014-03-31 10:14:40 -0700103 if (r_dest_save.IsFloat()) {
buzbee2700f7e2014-03-07 09:46:20 -0800104 NewLIR2(kX86MovdxrRR, r_dest_save.GetReg(), r_dest.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700105 FreeTemp(r_dest);
106 }
107
108 return res;
109}
110
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700111LIR* X86Mir2Lir::OpUnconditionalBranch(LIR* target) {
Brian Carlstromdf629502013-07-17 22:39:56 -0700112 LIR* res = NewLIR1(kX86Jmp8, 0 /* offset to be patched during assembly*/);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700113 res->target = target;
114 return res;
115}
116
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700117LIR* X86Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700118 LIR* branch = NewLIR2(kX86Jcc8, 0 /* offset to be patched */,
119 X86ConditionEncoding(cc));
120 branch->target = target;
121 return branch;
122}
123
buzbee2700f7e2014-03-07 09:46:20 -0800124LIR* X86Mir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700125 X86OpCode opcode = kX86Bkpt;
126 switch (op) {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700127 case kOpNeg: opcode = r_dest_src.Is64Bit() ? kX86Neg64R : kX86Neg32R; break;
128 case kOpNot: opcode = r_dest_src.Is64Bit() ? kX86Not64R : kX86Not32R; break;
nikolay serdjukc5e4ce12014-06-10 17:07:10 +0700129 case kOpRev: opcode = r_dest_src.Is64Bit() ? kX86Bswap64R : kX86Bswap32R; break;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700130 case kOpBlx: opcode = kX86CallR; break;
131 default:
132 LOG(FATAL) << "Bad case in OpReg " << op;
133 }
buzbee2700f7e2014-03-07 09:46:20 -0800134 return NewLIR1(opcode, r_dest_src.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700135}
136
buzbee2700f7e2014-03-07 09:46:20 -0800137LIR* X86Mir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700138 X86OpCode opcode = kX86Bkpt;
139 bool byte_imm = IS_SIMM8(value);
buzbee091cc402014-03-31 10:14:40 -0700140 DCHECK(!r_dest_src1.IsFloat());
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700141 if (r_dest_src1.Is64Bit()) {
142 switch (op) {
143 case kOpAdd: opcode = byte_imm ? kX86Add64RI8 : kX86Add64RI; break;
144 case kOpSub: opcode = byte_imm ? kX86Sub64RI8 : kX86Sub64RI; break;
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700145 case kOpLsl: opcode = kX86Sal64RI; break;
146 case kOpLsr: opcode = kX86Shr64RI; break;
147 case kOpAsr: opcode = kX86Sar64RI; break;
Chao-ying Fu7e399fd2014-06-10 18:11:11 -0700148 case kOpCmp: opcode = byte_imm ? kX86Cmp64RI8 : kX86Cmp64RI; break;
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700149 default:
150 LOG(FATAL) << "Bad case in OpRegImm (64-bit) " << op;
151 }
152 } else {
153 switch (op) {
154 case kOpLsl: opcode = kX86Sal32RI; break;
155 case kOpLsr: opcode = kX86Shr32RI; break;
156 case kOpAsr: opcode = kX86Sar32RI; break;
157 case kOpAdd: opcode = byte_imm ? kX86Add32RI8 : kX86Add32RI; break;
158 case kOpOr: opcode = byte_imm ? kX86Or32RI8 : kX86Or32RI; break;
159 case kOpAdc: opcode = byte_imm ? kX86Adc32RI8 : kX86Adc32RI; break;
160 // case kOpSbb: opcode = kX86Sbb32RI; break;
161 case kOpAnd: opcode = byte_imm ? kX86And32RI8 : kX86And32RI; break;
162 case kOpSub: opcode = byte_imm ? kX86Sub32RI8 : kX86Sub32RI; break;
163 case kOpXor: opcode = byte_imm ? kX86Xor32RI8 : kX86Xor32RI; break;
164 case kOpCmp: opcode = byte_imm ? kX86Cmp32RI8 : kX86Cmp32RI; break;
165 case kOpMov:
166 /*
167 * Moving the constant zero into register can be specialized as an xor of the register.
168 * However, that sets eflags while the move does not. For that reason here, always do
169 * the move and if caller is flexible, they should be calling LoadConstantNoClobber instead.
170 */
171 opcode = kX86Mov32RI;
172 break;
173 case kOpMul:
174 opcode = byte_imm ? kX86Imul32RRI8 : kX86Imul32RRI;
175 return NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), value);
Mark Mendelle87f9b52014-04-30 14:13:18 -0400176 case kOp2Byte:
177 opcode = kX86Mov32RI;
178 value = static_cast<int8_t>(value);
179 break;
180 case kOp2Short:
181 opcode = kX86Mov32RI;
182 value = static_cast<int16_t>(value);
183 break;
184 case kOp2Char:
185 opcode = kX86Mov32RI;
186 value = static_cast<uint16_t>(value);
187 break;
188 case kOpNeg:
189 opcode = kX86Mov32RI;
190 value = -value;
191 break;
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700192 default:
193 LOG(FATAL) << "Bad case in OpRegImm " << op;
194 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700195 }
buzbee2700f7e2014-03-07 09:46:20 -0800196 return NewLIR2(opcode, r_dest_src1.GetReg(), value);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700197}
198
buzbee2700f7e2014-03-07 09:46:20 -0800199LIR* X86Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700200 bool is64Bit = r_dest_src1.Is64Bit();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700201 X86OpCode opcode = kX86Nop;
202 bool src2_must_be_cx = false;
203 switch (op) {
204 // X86 unary opcodes
205 case kOpMvn:
206 OpRegCopy(r_dest_src1, r_src2);
207 return OpReg(kOpNot, r_dest_src1);
208 case kOpNeg:
209 OpRegCopy(r_dest_src1, r_src2);
210 return OpReg(kOpNeg, r_dest_src1);
Vladimir Markoa8b4caf2013-10-24 15:08:57 +0100211 case kOpRev:
212 OpRegCopy(r_dest_src1, r_src2);
213 return OpReg(kOpRev, r_dest_src1);
214 case kOpRevsh:
215 OpRegCopy(r_dest_src1, r_src2);
216 OpReg(kOpRev, r_dest_src1);
217 return OpRegImm(kOpAsr, r_dest_src1, 16);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700218 // X86 binary opcodes
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700219 case kOpSub: opcode = is64Bit ? kX86Sub64RR : kX86Sub32RR; break;
220 case kOpSbc: opcode = is64Bit ? kX86Sbb64RR : kX86Sbb32RR; break;
221 case kOpLsl: opcode = is64Bit ? kX86Sal64RC : kX86Sal32RC; src2_must_be_cx = true; break;
222 case kOpLsr: opcode = is64Bit ? kX86Shr64RC : kX86Shr32RC; src2_must_be_cx = true; break;
223 case kOpAsr: opcode = is64Bit ? kX86Sar64RC : kX86Sar32RC; src2_must_be_cx = true; break;
224 case kOpMov: opcode = is64Bit ? kX86Mov64RR : kX86Mov32RR; break;
225 case kOpCmp: opcode = is64Bit ? kX86Cmp64RR : kX86Cmp32RR; break;
226 case kOpAdd: opcode = is64Bit ? kX86Add64RR : kX86Add32RR; break;
227 case kOpAdc: opcode = is64Bit ? kX86Adc64RR : kX86Adc32RR; break;
228 case kOpAnd: opcode = is64Bit ? kX86And64RR : kX86And32RR; break;
229 case kOpOr: opcode = is64Bit ? kX86Or64RR : kX86Or32RR; break;
230 case kOpXor: opcode = is64Bit ? kX86Xor64RR : kX86Xor32RR; break;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700231 case kOp2Byte:
buzbee091cc402014-03-31 10:14:40 -0700232 // TODO: there are several instances of this check. A utility function perhaps?
233 // TODO: Similar to Arm's reg < 8 check. Perhaps add attribute checks to RegStorage?
Brian Carlstrom7940e442013-07-12 13:46:57 -0700234 // Use shifts instead of a byte operand if the source can't be byte accessed.
Ian Rogersb28c1c02014-11-08 11:21:21 -0800235 if (r_src2.GetRegNum() >= rs_rX86_SP_32.GetRegNum()) {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700236 NewLIR2(is64Bit ? kX86Mov64RR : kX86Mov32RR, r_dest_src1.GetReg(), r_src2.GetReg());
237 NewLIR2(is64Bit ? kX86Sal64RI : kX86Sal32RI, r_dest_src1.GetReg(), is64Bit ? 56 : 24);
238 return NewLIR2(is64Bit ? kX86Sar64RI : kX86Sar32RI, r_dest_src1.GetReg(),
239 is64Bit ? 56 : 24);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700240 } else {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700241 opcode = is64Bit ? kX86Bkpt : kX86Movsx8RR;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700242 }
243 break;
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700244 case kOp2Short: opcode = is64Bit ? kX86Bkpt : kX86Movsx16RR; break;
245 case kOp2Char: opcode = is64Bit ? kX86Bkpt : kX86Movzx16RR; break;
246 case kOpMul: opcode = is64Bit ? kX86Bkpt : kX86Imul32RR; break;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700247 default:
248 LOG(FATAL) << "Bad case in OpRegReg " << op;
249 break;
250 }
buzbee091cc402014-03-31 10:14:40 -0700251 CHECK(!src2_must_be_cx || r_src2.GetReg() == rs_rCX.GetReg());
buzbee2700f7e2014-03-07 09:46:20 -0800252 return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700253}
254
buzbee2700f7e2014-03-07 09:46:20 -0800255LIR* X86Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
buzbee091cc402014-03-31 10:14:40 -0700256 DCHECK(!r_base.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800257 X86OpCode opcode = kX86Nop;
buzbee2700f7e2014-03-07 09:46:20 -0800258 int dest = r_dest.IsPair() ? r_dest.GetLowReg() : r_dest.GetReg();
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800259 switch (move_type) {
260 case kMov8GP:
buzbee091cc402014-03-31 10:14:40 -0700261 CHECK(!r_dest.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800262 opcode = kX86Mov8RM;
263 break;
264 case kMov16GP:
buzbee091cc402014-03-31 10:14:40 -0700265 CHECK(!r_dest.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800266 opcode = kX86Mov16RM;
267 break;
268 case kMov32GP:
buzbee091cc402014-03-31 10:14:40 -0700269 CHECK(!r_dest.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800270 opcode = kX86Mov32RM;
271 break;
272 case kMov32FP:
buzbee091cc402014-03-31 10:14:40 -0700273 CHECK(r_dest.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800274 opcode = kX86MovssRM;
275 break;
276 case kMov64FP:
buzbee091cc402014-03-31 10:14:40 -0700277 CHECK(r_dest.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800278 opcode = kX86MovsdRM;
279 break;
280 case kMovU128FP:
buzbee091cc402014-03-31 10:14:40 -0700281 CHECK(r_dest.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800282 opcode = kX86MovupsRM;
283 break;
284 case kMovA128FP:
buzbee091cc402014-03-31 10:14:40 -0700285 CHECK(r_dest.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800286 opcode = kX86MovapsRM;
287 break;
288 case kMovLo128FP:
buzbee091cc402014-03-31 10:14:40 -0700289 CHECK(r_dest.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800290 opcode = kX86MovlpsRM;
291 break;
292 case kMovHi128FP:
buzbee091cc402014-03-31 10:14:40 -0700293 CHECK(r_dest.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800294 opcode = kX86MovhpsRM;
295 break;
296 case kMov64GP:
297 case kMovLo64FP:
298 case kMovHi64FP:
299 default:
300 LOG(FATAL) << "Bad case in OpMovRegMem";
301 break;
302 }
303
buzbee2700f7e2014-03-07 09:46:20 -0800304 return NewLIR3(opcode, dest, r_base.GetReg(), offset);
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800305}
306
buzbee2700f7e2014-03-07 09:46:20 -0800307LIR* X86Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
buzbee091cc402014-03-31 10:14:40 -0700308 DCHECK(!r_base.IsFloat());
buzbee2700f7e2014-03-07 09:46:20 -0800309 int src = r_src.IsPair() ? r_src.GetLowReg() : r_src.GetReg();
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800310
311 X86OpCode opcode = kX86Nop;
312 switch (move_type) {
313 case kMov8GP:
buzbee091cc402014-03-31 10:14:40 -0700314 CHECK(!r_src.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800315 opcode = kX86Mov8MR;
316 break;
317 case kMov16GP:
buzbee091cc402014-03-31 10:14:40 -0700318 CHECK(!r_src.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800319 opcode = kX86Mov16MR;
320 break;
321 case kMov32GP:
buzbee091cc402014-03-31 10:14:40 -0700322 CHECK(!r_src.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800323 opcode = kX86Mov32MR;
324 break;
325 case kMov32FP:
buzbee091cc402014-03-31 10:14:40 -0700326 CHECK(r_src.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800327 opcode = kX86MovssMR;
328 break;
329 case kMov64FP:
buzbee091cc402014-03-31 10:14:40 -0700330 CHECK(r_src.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800331 opcode = kX86MovsdMR;
332 break;
333 case kMovU128FP:
buzbee091cc402014-03-31 10:14:40 -0700334 CHECK(r_src.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800335 opcode = kX86MovupsMR;
336 break;
337 case kMovA128FP:
buzbee091cc402014-03-31 10:14:40 -0700338 CHECK(r_src.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800339 opcode = kX86MovapsMR;
340 break;
341 case kMovLo128FP:
buzbee091cc402014-03-31 10:14:40 -0700342 CHECK(r_src.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800343 opcode = kX86MovlpsMR;
344 break;
345 case kMovHi128FP:
buzbee091cc402014-03-31 10:14:40 -0700346 CHECK(r_src.IsFloat());
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800347 opcode = kX86MovhpsMR;
348 break;
349 case kMov64GP:
350 case kMovLo64FP:
351 case kMovHi64FP:
352 default:
353 LOG(FATAL) << "Bad case in OpMovMemReg";
354 break;
355 }
356
buzbee2700f7e2014-03-07 09:46:20 -0800357 return NewLIR3(opcode, r_base.GetReg(), offset, src);
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800358}
359
buzbee2700f7e2014-03-07 09:46:20 -0800360LIR* X86Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
Razvan A Lupusorubd288c22013-12-20 17:27:23 -0800361 // The only conditional reg to reg operation supported is Cmov
362 DCHECK_EQ(op, kOpCmov);
nikolay serdjukc5e4ce12014-06-10 17:07:10 +0700363 DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit());
364 return NewLIR3(r_dest.Is64Bit() ? kX86Cmov64RRC : kX86Cmov32RRC, r_dest.GetReg(),
365 r_src.GetReg(), X86ConditionEncoding(cc));
Razvan A Lupusorubd288c22013-12-20 17:27:23 -0800366}
367
buzbee2700f7e2014-03-07 09:46:20 -0800368LIR* X86Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegStorage r_base, int offset) {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700369 bool is64Bit = r_dest.Is64Bit();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700370 X86OpCode opcode = kX86Nop;
371 switch (op) {
372 // X86 binary opcodes
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700373 case kOpSub: opcode = is64Bit ? kX86Sub64RM : kX86Sub32RM; break;
374 case kOpMov: opcode = is64Bit ? kX86Mov64RM : kX86Mov32RM; break;
375 case kOpCmp: opcode = is64Bit ? kX86Cmp64RM : kX86Cmp32RM; break;
376 case kOpAdd: opcode = is64Bit ? kX86Add64RM : kX86Add32RM; break;
377 case kOpAnd: opcode = is64Bit ? kX86And64RM : kX86And32RM; break;
378 case kOpOr: opcode = is64Bit ? kX86Or64RM : kX86Or32RM; break;
379 case kOpXor: opcode = is64Bit ? kX86Xor64RM : kX86Xor32RM; break;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700380 case kOp2Byte: opcode = kX86Movsx8RM; break;
381 case kOp2Short: opcode = kX86Movsx16RM; break;
382 case kOp2Char: opcode = kX86Movzx16RM; break;
383 case kOpMul:
384 default:
385 LOG(FATAL) << "Bad case in OpRegMem " << op;
386 break;
387 }
buzbee2700f7e2014-03-07 09:46:20 -0800388 LIR *l = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), offset);
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100389 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
Ian Rogersb28c1c02014-11-08 11:21:21 -0800390 DCHECK_EQ(r_base, cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32);
Mark Mendellfeb2b4e2014-01-28 12:59:49 -0800391 AnnotateDalvikRegAccess(l, offset >> 2, true /* is_load */, false /* is_64bit */);
392 }
393 return l;
394}
395
396LIR* X86Mir2Lir::OpMemReg(OpKind op, RegLocation rl_dest, int r_value) {
397 DCHECK_NE(rl_dest.location, kLocPhysReg);
398 int displacement = SRegOffset(rl_dest.s_reg_low);
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700399 bool is64Bit = rl_dest.wide != 0;
Mark Mendellfeb2b4e2014-01-28 12:59:49 -0800400 X86OpCode opcode = kX86Nop;
401 switch (op) {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700402 case kOpSub: opcode = is64Bit ? kX86Sub64MR : kX86Sub32MR; break;
403 case kOpMov: opcode = is64Bit ? kX86Mov64MR : kX86Mov32MR; break;
404 case kOpCmp: opcode = is64Bit ? kX86Cmp64MR : kX86Cmp32MR; break;
405 case kOpAdd: opcode = is64Bit ? kX86Add64MR : kX86Add32MR; break;
406 case kOpAnd: opcode = is64Bit ? kX86And64MR : kX86And32MR; break;
407 case kOpOr: opcode = is64Bit ? kX86Or64MR : kX86Or32MR; break;
408 case kOpXor: opcode = is64Bit ? kX86Xor64MR : kX86Xor32MR; break;
409 case kOpLsl: opcode = is64Bit ? kX86Sal64MC : kX86Sal32MC; break;
410 case kOpLsr: opcode = is64Bit ? kX86Shr64MC : kX86Shr32MC; break;
411 case kOpAsr: opcode = is64Bit ? kX86Sar64MC : kX86Sar32MC; break;
Mark Mendellfeb2b4e2014-01-28 12:59:49 -0800412 default:
413 LOG(FATAL) << "Bad case in OpMemReg " << op;
414 break;
415 }
Ian Rogersb28c1c02014-11-08 11:21:21 -0800416 LIR *l = NewLIR3(opcode, rs_rX86_SP_32.GetReg(), displacement, r_value);
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100417 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
418 AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is64Bit /* is_64bit */);
419 AnnotateDalvikRegAccess(l, displacement >> 2, false /* is_load */, is64Bit /* is_64bit */);
420 }
Mark Mendellfeb2b4e2014-01-28 12:59:49 -0800421 return l;
422}
423
buzbee2700f7e2014-03-07 09:46:20 -0800424LIR* X86Mir2Lir::OpRegMem(OpKind op, RegStorage r_dest, RegLocation rl_value) {
Mark Mendellfeb2b4e2014-01-28 12:59:49 -0800425 DCHECK_NE(rl_value.location, kLocPhysReg);
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700426 bool is64Bit = r_dest.Is64Bit();
Mark Mendellfeb2b4e2014-01-28 12:59:49 -0800427 int displacement = SRegOffset(rl_value.s_reg_low);
428 X86OpCode opcode = kX86Nop;
429 switch (op) {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700430 case kOpSub: opcode = is64Bit ? kX86Sub64RM : kX86Sub32RM; break;
431 case kOpMov: opcode = is64Bit ? kX86Mov64RM : kX86Mov32RM; break;
432 case kOpCmp: opcode = is64Bit ? kX86Cmp64RM : kX86Cmp32RM; break;
433 case kOpAdd: opcode = is64Bit ? kX86Add64RM : kX86Add32RM; break;
434 case kOpAnd: opcode = is64Bit ? kX86And64RM : kX86And32RM; break;
435 case kOpOr: opcode = is64Bit ? kX86Or64RM : kX86Or32RM; break;
436 case kOpXor: opcode = is64Bit ? kX86Xor64RM : kX86Xor32RM; break;
437 case kOpMul: opcode = is64Bit ? kX86Bkpt : kX86Imul32RM; break;
Mark Mendellfeb2b4e2014-01-28 12:59:49 -0800438 default:
439 LOG(FATAL) << "Bad case in OpRegMem " << op;
440 break;
441 }
Ian Rogersb28c1c02014-11-08 11:21:21 -0800442 LIR *l = NewLIR3(opcode, r_dest.GetReg(), rs_rX86_SP_32.GetReg(), displacement);
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100443 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
444 AnnotateDalvikRegAccess(l, displacement >> 2, true /* is_load */, is64Bit /* is_64bit */);
445 }
Mark Mendellfeb2b4e2014-01-28 12:59:49 -0800446 return l;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700447}
448
buzbee2700f7e2014-03-07 09:46:20 -0800449LIR* X86Mir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1,
450 RegStorage r_src2) {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700451 bool is64Bit = r_dest.Is64Bit();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700452 if (r_dest != r_src1 && r_dest != r_src2) {
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700453 if (op == kOpAdd) { // lea special case, except can't encode rbp as base
Brian Carlstrom7940e442013-07-12 13:46:57 -0700454 if (r_src1 == r_src2) {
455 OpRegCopy(r_dest, r_src1);
456 return OpRegImm(kOpLsl, r_dest, 1);
buzbee2700f7e2014-03-07 09:46:20 -0800457 } else if (r_src1 != rs_rBP) {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700458 return NewLIR5(is64Bit ? kX86Lea64RA : kX86Lea32RA, r_dest.GetReg(),
459 r_src1.GetReg() /* base */, r_src2.GetReg() /* index */,
460 0 /* scale */, 0 /* disp */);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700461 } else {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700462 return NewLIR5(is64Bit ? kX86Lea64RA : kX86Lea32RA, r_dest.GetReg(),
463 r_src2.GetReg() /* base */, r_src1.GetReg() /* index */,
464 0 /* scale */, 0 /* disp */);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700465 }
466 } else {
467 OpRegCopy(r_dest, r_src1);
468 return OpRegReg(op, r_dest, r_src2);
469 }
470 } else if (r_dest == r_src1) {
471 return OpRegReg(op, r_dest, r_src2);
472 } else { // r_dest == r_src2
473 switch (op) {
474 case kOpSub: // non-commutative
475 OpReg(kOpNeg, r_dest);
476 op = kOpAdd;
477 break;
478 case kOpSbc:
479 case kOpLsl: case kOpLsr: case kOpAsr: case kOpRor: {
buzbee2700f7e2014-03-07 09:46:20 -0800480 RegStorage t_reg = AllocTemp();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700481 OpRegCopy(t_reg, r_src1);
482 OpRegReg(op, t_reg, r_src2);
buzbee7a11ab02014-04-28 20:02:38 -0700483 LIR* res = OpRegCopyNoInsert(r_dest, t_reg);
484 AppendLIR(res);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700485 FreeTemp(t_reg);
486 return res;
487 }
488 case kOpAdd: // commutative
489 case kOpOr:
490 case kOpAdc:
491 case kOpAnd:
492 case kOpXor:
Pavel Vyssotski4ee71b22014-11-18 11:51:24 +0600493 case kOpMul:
Brian Carlstrom7940e442013-07-12 13:46:57 -0700494 break;
495 default:
496 LOG(FATAL) << "Bad case in OpRegRegReg " << op;
497 }
498 return OpRegReg(op, r_dest, r_src1);
499 }
500}
501
buzbee2700f7e2014-03-07 09:46:20 -0800502LIR* X86Mir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src, int value) {
Elena Sayapinadd644502014-07-01 18:39:52 +0700503 if (op == kOpMul && !cu_->target64) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700504 X86OpCode opcode = IS_SIMM8(value) ? kX86Imul32RRI8 : kX86Imul32RRI;
buzbee2700f7e2014-03-07 09:46:20 -0800505 return NewLIR3(opcode, r_dest.GetReg(), r_src.GetReg(), value);
Elena Sayapinadd644502014-07-01 18:39:52 +0700506 } else if (op == kOpAnd && !cu_->target64) {
buzbee091cc402014-03-31 10:14:40 -0700507 if (value == 0xFF && r_src.Low4()) {
buzbee2700f7e2014-03-07 09:46:20 -0800508 return NewLIR2(kX86Movzx8RR, r_dest.GetReg(), r_src.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700509 } else if (value == 0xFFFF) {
buzbee2700f7e2014-03-07 09:46:20 -0800510 return NewLIR2(kX86Movzx16RR, r_dest.GetReg(), r_src.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700511 }
512 }
513 if (r_dest != r_src) {
Andreas Gampe0b9203e2015-01-22 20:39:27 -0800514 if ((false) && op == kOpLsl && value >= 0 && value <= 3) { // lea shift special case
Brian Carlstrom7940e442013-07-12 13:46:57 -0700515 // TODO: fix bug in LEA encoding when disp == 0
buzbee2700f7e2014-03-07 09:46:20 -0800516 return NewLIR5(kX86Lea32RA, r_dest.GetReg(), r5sib_no_base /* base */,
517 r_src.GetReg() /* index */, value /* scale */, 0 /* disp */);
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700518 } else if (op == kOpAdd) { // lea add special case
Chao-ying Fu7e399fd2014-06-10 18:11:11 -0700519 return NewLIR5(r_dest.Is64Bit() ? kX86Lea64RA : kX86Lea32RA, r_dest.GetReg(),
Ian Rogersb28c1c02014-11-08 11:21:21 -0800520 r_src.GetReg() /* base */, rs_rX86_SP_32.GetReg()/*r4sib_no_index*/ /* index */,
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700521 0 /* scale */, value /* disp */);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700522 }
523 OpRegCopy(r_dest, r_src);
524 }
525 return OpRegImm(op, r_dest, value);
526}
527
Ian Rogersdd7624d2014-03-14 17:43:00 -0700528LIR* X86Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<4> thread_offset) {
Andreas Gampe2f244e92014-05-08 03:35:25 -0700529 DCHECK_EQ(kX86, cu_->instruction_set);
530 X86OpCode opcode = kX86Bkpt;
531 switch (op) {
532 case kOpBlx: opcode = kX86CallT; break;
533 case kOpBx: opcode = kX86JmpT; break;
534 default:
535 LOG(FATAL) << "Bad opcode: " << op;
536 break;
537 }
538 return NewLIR1(opcode, thread_offset.Int32Value());
539}
540
541LIR* X86Mir2Lir::OpThreadMem(OpKind op, ThreadOffset<8> thread_offset) {
542 DCHECK_EQ(kX86_64, cu_->instruction_set);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700543 X86OpCode opcode = kX86Bkpt;
544 switch (op) {
545 case kOpBlx: opcode = kX86CallT; break;
Brian Carlstrom60d7a652014-03-13 18:10:08 -0700546 case kOpBx: opcode = kX86JmpT; break;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700547 default:
548 LOG(FATAL) << "Bad opcode: " << op;
549 break;
550 }
Ian Rogers468532e2013-08-05 10:56:33 -0700551 return NewLIR1(opcode, thread_offset.Int32Value());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700552}
553
buzbee2700f7e2014-03-07 09:46:20 -0800554LIR* X86Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700555 X86OpCode opcode = kX86Bkpt;
556 switch (op) {
557 case kOpBlx: opcode = kX86CallM; break;
558 default:
559 LOG(FATAL) << "Bad opcode: " << op;
560 break;
561 }
buzbee2700f7e2014-03-07 09:46:20 -0800562 return NewLIR2(opcode, r_base.GetReg(), disp);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700563}
564
buzbee2700f7e2014-03-07 09:46:20 -0800565LIR* X86Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700566 int32_t val_lo = Low32Bits(value);
567 int32_t val_hi = High32Bits(value);
buzbee2700f7e2014-03-07 09:46:20 -0800568 int32_t low_reg_val = r_dest.IsPair() ? r_dest.GetLowReg() : r_dest.GetReg();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700569 LIR *res;
Mark Mendelle87f9b52014-04-30 14:13:18 -0400570 bool is_fp = r_dest.IsFloat();
buzbee2700f7e2014-03-07 09:46:20 -0800571 // TODO: clean this up once we fully recognize 64-bit storage containers.
572 if (is_fp) {
Alexei Zavjalov0e63ce12014-07-10 18:34:23 +0700573 DCHECK(r_dest.IsDouble());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700574 if (value == 0) {
Mark Mendell27dee8b2014-12-01 19:06:12 -0500575 return NewLIR2(kX86XorpdRR, low_reg_val, low_reg_val);
Vladimir Marko1961b602015-04-08 20:51:48 +0100576 } else if (pc_rel_base_reg_.Valid() || cu_->target64) {
Mark Mendell67c39c42014-01-31 17:28:00 -0800577 // We will load the value from the literal area.
578 LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700579 if (data_target == nullptr) {
Mark Mendell67c39c42014-01-31 17:28:00 -0800580 data_target = AddWideData(&literal_list_, val_lo, val_hi);
581 }
582
Mark Mendell67c39c42014-01-31 17:28:00 -0800583 // Load the proper value from the literal area.
Mark Mendell27dee8b2014-12-01 19:06:12 -0500584 // We don't know the proper offset for the value, so pick one that
585 // will force 4 byte offset. We will fix this up in the assembler
586 // later to have the right value.
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100587 ScopedMemRefType mem_ref_type(this, ResourceMask::kLiteral);
Mark Mendell27dee8b2014-12-01 19:06:12 -0500588 if (cu_->target64) {
589 res = NewLIR3(kX86MovsdRM, low_reg_val, kRIPReg, 256 /* bogus */);
590 } else {
Vladimir Marko1961b602015-04-08 20:51:48 +0100591 // Get the PC to a register and get the anchor.
592 LIR* anchor;
593 RegStorage r_pc = GetPcAndAnchor(&anchor);
Mark Mendell27dee8b2014-12-01 19:06:12 -0500594
Vladimir Marko1961b602015-04-08 20:51:48 +0100595 res = LoadBaseDisp(r_pc, kDummy32BitOffset, RegStorage::FloatSolo64(low_reg_val),
Mark Mendell27dee8b2014-12-01 19:06:12 -0500596 kDouble, kNotVolatile);
Vladimir Marko1961b602015-04-08 20:51:48 +0100597 res->operands[4] = WrapPointer(anchor);
598 if (IsTemp(r_pc)) {
599 FreeTemp(r_pc);
600 }
Mark Mendell27dee8b2014-12-01 19:06:12 -0500601 }
Mark Mendell67c39c42014-01-31 17:28:00 -0800602 res->target = data_target;
603 res->flags.fixup = kFixupLoad;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700604 } else {
Alexei Zavjalov0e63ce12014-07-10 18:34:23 +0700605 if (r_dest.IsPair()) {
606 if (val_lo == 0) {
607 res = NewLIR2(kX86XorpsRR, low_reg_val, low_reg_val);
608 } else {
609 res = LoadConstantNoClobber(RegStorage::FloatSolo32(low_reg_val), val_lo);
610 }
611 if (val_hi != 0) {
612 RegStorage r_dest_hi = AllocTempDouble();
613 LoadConstantNoClobber(r_dest_hi, val_hi);
614 NewLIR2(kX86PunpckldqRR, low_reg_val, r_dest_hi.GetReg());
615 FreeTemp(r_dest_hi);
616 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700617 } else {
Alexei Zavjalov0e63ce12014-07-10 18:34:23 +0700618 RegStorage r_temp = AllocTypedTempWide(false, kCoreReg);
619 res = LoadConstantWide(r_temp, value);
620 OpRegCopyWide(r_dest, r_temp);
621 FreeTemp(r_temp);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700622 }
623 }
624 } else {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700625 if (r_dest.IsPair()) {
626 res = LoadConstantNoClobber(r_dest.GetLow(), val_lo);
627 LoadConstantNoClobber(r_dest.GetHigh(), val_hi);
628 } else {
Yixin Shou5192cbb2014-07-01 13:48:17 -0400629 if (value == 0) {
Serguei Katkov1c557032014-06-23 13:23:38 +0700630 res = NewLIR2(kX86Xor64RR, r_dest.GetReg(), r_dest.GetReg());
Yixin Shou5192cbb2014-07-01 13:48:17 -0400631 } else if (value >= INT_MIN && value <= INT_MAX) {
632 res = NewLIR2(kX86Mov64RI32, r_dest.GetReg(), val_lo);
633 } else {
634 res = NewLIR3(kX86Mov64RI64, r_dest.GetReg(), val_hi, val_lo);
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700635 }
636 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700637 }
638 return res;
639}
640
buzbee2700f7e2014-03-07 09:46:20 -0800641LIR* X86Mir2Lir::LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
Vladimir Marko3bf7c602014-05-07 14:55:43 +0100642 int displacement, RegStorage r_dest, OpSize size) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700643 LIR *load = nullptr;
644 LIR *load2 = nullptr;
buzbee2700f7e2014-03-07 09:46:20 -0800645 bool is_array = r_index.Valid();
buzbee091cc402014-03-31 10:14:40 -0700646 bool pair = r_dest.IsPair();
647 bool is64bit = ((size == k64) || (size == kDouble));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700648 X86OpCode opcode = kX86Nop;
649 switch (size) {
buzbee695d13a2014-04-19 13:32:20 -0700650 case k64:
Brian Carlstrom7940e442013-07-12 13:46:57 -0700651 case kDouble:
buzbee091cc402014-03-31 10:14:40 -0700652 if (r_dest.IsFloat()) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700653 opcode = is_array ? kX86MovsdRA : kX86MovsdRM;
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700654 } else if (!pair) {
655 opcode = is_array ? kX86Mov64RA : kX86Mov64RM;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700656 } else {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700657 opcode = is_array ? kX86Mov32RA : kX86Mov32RM;
658 }
659 // TODO: double store is to unaligned address
Roland Levillain14d90572015-07-16 10:52:26 +0100660 DCHECK_ALIGNED(displacement, 4);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700661 break;
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700662 case kWord:
Elena Sayapinadd644502014-07-01 18:39:52 +0700663 if (cu_->target64) {
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700664 opcode = is_array ? kX86Mov64RA : kX86Mov64RM;
665 CHECK_EQ(is_array, false);
666 CHECK_EQ(r_dest.IsFloat(), false);
667 break;
Ian Rogersfc787ec2014-10-09 21:56:44 -0700668 }
669 FALLTHROUGH_INTENDED; // else fall-through to k32 case
buzbee695d13a2014-04-19 13:32:20 -0700670 case k32:
Brian Carlstrom7940e442013-07-12 13:46:57 -0700671 case kSingle:
buzbee695d13a2014-04-19 13:32:20 -0700672 case kReference: // TODO: update for reference decompression on 64-bit targets.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700673 opcode = is_array ? kX86Mov32RA : kX86Mov32RM;
buzbee091cc402014-03-31 10:14:40 -0700674 if (r_dest.IsFloat()) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700675 opcode = is_array ? kX86MovssRA : kX86MovssRM;
buzbee091cc402014-03-31 10:14:40 -0700676 DCHECK(r_dest.IsFloat());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700677 }
Roland Levillain14d90572015-07-16 10:52:26 +0100678 DCHECK_ALIGNED(displacement, 4);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700679 break;
680 case kUnsignedHalf:
681 opcode = is_array ? kX86Movzx16RA : kX86Movzx16RM;
Roland Levillain14d90572015-07-16 10:52:26 +0100682 DCHECK_ALIGNED(displacement, 2);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700683 break;
684 case kSignedHalf:
685 opcode = is_array ? kX86Movsx16RA : kX86Movsx16RM;
Roland Levillain14d90572015-07-16 10:52:26 +0100686 DCHECK_ALIGNED(displacement, 2);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700687 break;
688 case kUnsignedByte:
689 opcode = is_array ? kX86Movzx8RA : kX86Movzx8RM;
690 break;
691 case kSignedByte:
692 opcode = is_array ? kX86Movsx8RA : kX86Movsx8RM;
693 break;
694 default:
695 LOG(FATAL) << "Bad case in LoadBaseIndexedDispBody";
696 }
697
698 if (!is_array) {
699 if (!pair) {
buzbee2700f7e2014-03-07 09:46:20 -0800700 load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700701 } else {
buzbee091cc402014-03-31 10:14:40 -0700702 DCHECK(!r_dest.IsFloat()); // Make sure we're not still using a pair here.
703 if (r_base == r_dest.GetLow()) {
Dave Allison69dfe512014-07-11 17:11:58 +0000704 load = NewLIR3(opcode, r_dest.GetHighReg(), r_base.GetReg(),
Brian Carlstrom7940e442013-07-12 13:46:57 -0700705 displacement + HIWORD_OFFSET);
Dave Allison69dfe512014-07-11 17:11:58 +0000706 load2 = NewLIR3(opcode, r_dest.GetLowReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700707 } else {
buzbee091cc402014-03-31 10:14:40 -0700708 load = NewLIR3(opcode, r_dest.GetLowReg(), r_base.GetReg(), displacement + LOWORD_OFFSET);
709 load2 = NewLIR3(opcode, r_dest.GetHighReg(), r_base.GetReg(),
Brian Carlstrom7940e442013-07-12 13:46:57 -0700710 displacement + HIWORD_OFFSET);
711 }
712 }
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100713 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
Ian Rogersb28c1c02014-11-08 11:21:21 -0800714 DCHECK_EQ(r_base, cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700715 AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
716 true /* is_load */, is64bit);
717 if (pair) {
718 AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
719 true /* is_load */, is64bit);
720 }
721 }
722 } else {
723 if (!pair) {
buzbee2700f7e2014-03-07 09:46:20 -0800724 load = NewLIR5(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700725 displacement + LOWORD_OFFSET);
726 } else {
buzbee091cc402014-03-31 10:14:40 -0700727 DCHECK(!r_dest.IsFloat()); // Make sure we're not still using a pair here.
728 if (r_base == r_dest.GetLow()) {
729 if (r_dest.GetHigh() == r_index) {
Mark Mendellae427c32014-01-24 09:17:22 -0800730 // We can't use either register for the first load.
buzbee2700f7e2014-03-07 09:46:20 -0800731 RegStorage temp = AllocTemp();
Dave Allison69dfe512014-07-11 17:11:58 +0000732 load = NewLIR5(opcode, temp.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
Mark Mendellae427c32014-01-24 09:17:22 -0800733 displacement + HIWORD_OFFSET);
Dave Allison69dfe512014-07-11 17:11:58 +0000734 load2 = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale,
Mark Mendellae427c32014-01-24 09:17:22 -0800735 displacement + LOWORD_OFFSET);
buzbee091cc402014-03-31 10:14:40 -0700736 OpRegCopy(r_dest.GetHigh(), temp);
Mark Mendellae427c32014-01-24 09:17:22 -0800737 FreeTemp(temp);
738 } else {
Dave Allison69dfe512014-07-11 17:11:58 +0000739 load = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale,
Mark Mendellae427c32014-01-24 09:17:22 -0800740 displacement + HIWORD_OFFSET);
Dave Allison69dfe512014-07-11 17:11:58 +0000741 load2 = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale,
Mark Mendellae427c32014-01-24 09:17:22 -0800742 displacement + LOWORD_OFFSET);
743 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700744 } else {
buzbee091cc402014-03-31 10:14:40 -0700745 if (r_dest.GetLow() == r_index) {
Mark Mendellae427c32014-01-24 09:17:22 -0800746 // We can't use either register for the first load.
buzbee2700f7e2014-03-07 09:46:20 -0800747 RegStorage temp = AllocTemp();
748 load = NewLIR5(opcode, temp.GetReg(), r_base.GetReg(), r_index.GetReg(), scale,
Mark Mendellae427c32014-01-24 09:17:22 -0800749 displacement + LOWORD_OFFSET);
buzbee091cc402014-03-31 10:14:40 -0700750 load2 = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale,
Mark Mendellae427c32014-01-24 09:17:22 -0800751 displacement + HIWORD_OFFSET);
buzbee091cc402014-03-31 10:14:40 -0700752 OpRegCopy(r_dest.GetLow(), temp);
Mark Mendellae427c32014-01-24 09:17:22 -0800753 FreeTemp(temp);
754 } else {
buzbee091cc402014-03-31 10:14:40 -0700755 load = NewLIR5(opcode, r_dest.GetLowReg(), r_base.GetReg(), r_index.GetReg(), scale,
Mark Mendellae427c32014-01-24 09:17:22 -0800756 displacement + LOWORD_OFFSET);
buzbee091cc402014-03-31 10:14:40 -0700757 load2 = NewLIR5(opcode, r_dest.GetHighReg(), r_base.GetReg(), r_index.GetReg(), scale,
Mark Mendellae427c32014-01-24 09:17:22 -0800758 displacement + HIWORD_OFFSET);
759 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700760 }
761 }
762 }
763
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700764 // Always return first load generated as this might cause a fault if base is null.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700765 return load;
766}
767
768/* Load value from base + scaled index. */
buzbee2700f7e2014-03-07 09:46:20 -0800769LIR* X86Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
770 int scale, OpSize size) {
Vladimir Marko3bf7c602014-05-07 14:55:43 +0100771 return LoadBaseIndexedDisp(r_base, r_index, scale, 0, r_dest, size);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700772}
773
Andreas Gampe3c12c512014-06-24 18:46:29 +0000774LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
775 OpSize size, VolatileKind is_volatile) {
Vladimir Marko674744e2014-04-24 15:18:26 +0100776 // LoadBaseDisp() will emit correct insn for atomic load on x86
777 // assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
Vladimir Marko674744e2014-04-24 15:18:26 +0100778
Andreas Gampe3c12c512014-06-24 18:46:29 +0000779 LIR* load = LoadBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_dest,
780 size);
781
782 if (UNLIKELY(is_volatile == kVolatile)) {
Hans Boehm48f5c472014-06-27 14:50:10 -0700783 GenMemBarrier(kLoadAny); // Only a scheduling barrier.
Andreas Gampe3c12c512014-06-24 18:46:29 +0000784 }
785
786 return load;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700787}
788
buzbee2700f7e2014-03-07 09:46:20 -0800789LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
Jean Christophe Beylerb5bce7c2014-07-25 12:32:18 -0700790 int displacement, RegStorage r_src, OpSize size,
791 int opt_flags) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700792 LIR *store = nullptr;
793 LIR *store2 = nullptr;
buzbee2700f7e2014-03-07 09:46:20 -0800794 bool is_array = r_index.Valid();
buzbee091cc402014-03-31 10:14:40 -0700795 bool pair = r_src.IsPair();
796 bool is64bit = (size == k64) || (size == kDouble);
Jean Christophe Beylerb5bce7c2014-07-25 12:32:18 -0700797 bool consider_non_temporal = false;
798
Brian Carlstrom7940e442013-07-12 13:46:57 -0700799 X86OpCode opcode = kX86Nop;
800 switch (size) {
buzbee695d13a2014-04-19 13:32:20 -0700801 case k64:
Jean Christophe Beylerb5bce7c2014-07-25 12:32:18 -0700802 consider_non_temporal = true;
Ian Rogersfc787ec2014-10-09 21:56:44 -0700803 FALLTHROUGH_INTENDED;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700804 case kDouble:
buzbee091cc402014-03-31 10:14:40 -0700805 if (r_src.IsFloat()) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700806 opcode = is_array ? kX86MovsdAR : kX86MovsdMR;
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700807 } else if (!pair) {
808 opcode = is_array ? kX86Mov64AR : kX86Mov64MR;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700809 } else {
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700810 opcode = is_array ? kX86Mov32AR : kX86Mov32MR;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700811 }
812 // TODO: double store is to unaligned address
Roland Levillain14d90572015-07-16 10:52:26 +0100813 DCHECK_ALIGNED(displacement, 4);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700814 break;
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700815 case kWord:
Elena Sayapinadd644502014-07-01 18:39:52 +0700816 if (cu_->target64) {
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700817 opcode = is_array ? kX86Mov64AR : kX86Mov64MR;
818 CHECK_EQ(is_array, false);
819 CHECK_EQ(r_src.IsFloat(), false);
Jean Christophe Beylerb5bce7c2014-07-25 12:32:18 -0700820 consider_non_temporal = true;
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700821 break;
Ian Rogersfc787ec2014-10-09 21:56:44 -0700822 }
823 FALLTHROUGH_INTENDED; // else fall-through to k32 case
buzbee695d13a2014-04-19 13:32:20 -0700824 case k32:
Brian Carlstrom7940e442013-07-12 13:46:57 -0700825 case kSingle:
buzbee695d13a2014-04-19 13:32:20 -0700826 case kReference:
Brian Carlstrom7940e442013-07-12 13:46:57 -0700827 opcode = is_array ? kX86Mov32AR : kX86Mov32MR;
buzbee091cc402014-03-31 10:14:40 -0700828 if (r_src.IsFloat()) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700829 opcode = is_array ? kX86MovssAR : kX86MovssMR;
buzbee091cc402014-03-31 10:14:40 -0700830 DCHECK(r_src.IsSingle());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700831 }
Roland Levillain14d90572015-07-16 10:52:26 +0100832 DCHECK_ALIGNED(displacement, 4);
Jean Christophe Beylerb5bce7c2014-07-25 12:32:18 -0700833 consider_non_temporal = true;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700834 break;
835 case kUnsignedHalf:
836 case kSignedHalf:
837 opcode = is_array ? kX86Mov16AR : kX86Mov16MR;
Roland Levillain14d90572015-07-16 10:52:26 +0100838 DCHECK_ALIGNED(displacement, 2);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700839 break;
840 case kUnsignedByte:
841 case kSignedByte:
842 opcode = is_array ? kX86Mov8AR : kX86Mov8MR;
843 break;
844 default:
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000845 LOG(FATAL) << "Bad case in StoreBaseIndexedDispBody";
Brian Carlstrom7940e442013-07-12 13:46:57 -0700846 }
847
Jean Christophe Beylerb5bce7c2014-07-25 12:32:18 -0700848 // Handle non temporal hint here.
849 if (consider_non_temporal && ((opt_flags & MIR_STORE_NON_TEMPORAL) != 0)) {
850 switch (opcode) {
851 // We currently only handle 32/64 bit moves here.
852 case kX86Mov64AR:
853 opcode = kX86Movnti64AR;
854 break;
855 case kX86Mov64MR:
856 opcode = kX86Movnti64MR;
857 break;
858 case kX86Mov32AR:
859 opcode = kX86Movnti32AR;
860 break;
861 case kX86Mov32MR:
862 opcode = kX86Movnti32MR;
863 break;
864 default:
865 // Do nothing here.
866 break;
867 }
868 }
869
Brian Carlstrom7940e442013-07-12 13:46:57 -0700870 if (!is_array) {
871 if (!pair) {
buzbee2700f7e2014-03-07 09:46:20 -0800872 store = NewLIR3(opcode, r_base.GetReg(), displacement + LOWORD_OFFSET, r_src.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700873 } else {
buzbee091cc402014-03-31 10:14:40 -0700874 DCHECK(!r_src.IsFloat()); // Make sure we're not still using a pair here.
875 store = NewLIR3(opcode, r_base.GetReg(), displacement + LOWORD_OFFSET, r_src.GetLowReg());
876 store2 = NewLIR3(opcode, r_base.GetReg(), displacement + HIWORD_OFFSET, r_src.GetHighReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700877 }
Vladimir Marko8dea81c2014-06-06 14:50:36 +0100878 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
Ian Rogersb28c1c02014-11-08 11:21:21 -0800879 DCHECK_EQ(r_base, cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700880 AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
881 false /* is_load */, is64bit);
882 if (pair) {
883 AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2,
884 false /* is_load */, is64bit);
885 }
886 }
887 } else {
888 if (!pair) {
buzbee2700f7e2014-03-07 09:46:20 -0800889 store = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale,
890 displacement + LOWORD_OFFSET, r_src.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700891 } else {
buzbee091cc402014-03-31 10:14:40 -0700892 DCHECK(!r_src.IsFloat()); // Make sure we're not still using a pair here.
buzbee2700f7e2014-03-07 09:46:20 -0800893 store = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale,
buzbee091cc402014-03-31 10:14:40 -0700894 displacement + LOWORD_OFFSET, r_src.GetLowReg());
buzbee2700f7e2014-03-07 09:46:20 -0800895 store2 = NewLIR5(opcode, r_base.GetReg(), r_index.GetReg(), scale,
buzbee091cc402014-03-31 10:14:40 -0700896 displacement + HIWORD_OFFSET, r_src.GetHighReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700897 }
898 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700899 return store;
900}
901
902/* store value base base + scaled index. */
buzbee2700f7e2014-03-07 09:46:20 -0800903LIR* X86Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
Andreas Gampe3c12c512014-06-24 18:46:29 +0000904 int scale, OpSize size) {
Vladimir Marko3bf7c602014-05-07 14:55:43 +0100905 return StoreBaseIndexedDisp(r_base, r_index, scale, 0, r_src, size);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700906}
907
Andreas Gampe3c12c512014-06-24 18:46:29 +0000908LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
909 VolatileKind is_volatile) {
910 if (UNLIKELY(is_volatile == kVolatile)) {
Hans Boehm48f5c472014-06-27 14:50:10 -0700911 GenMemBarrier(kAnyStore); // Only a scheduling barrier.
Andreas Gampe3c12c512014-06-24 18:46:29 +0000912 }
913
Vladimir Marko674744e2014-04-24 15:18:26 +0100914 // StoreBaseDisp() will emit correct insn for atomic store on x86
915 // assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
Fred Shih37f05ef2014-07-16 18:38:08 -0700916 // x86 only allows registers EAX-EDX to be used as byte registers, if the input src is not
917 // valid, allocate a temp.
918 bool allocated_temp = false;
919 if (size == kUnsignedByte || size == kSignedByte) {
920 if (!cu_->target64 && !r_src.Low4()) {
921 RegStorage r_input = r_src;
922 r_src = AllocateByteRegister();
923 OpRegCopy(r_src, r_input);
924 allocated_temp = true;
925 }
926 }
Vladimir Marko674744e2014-04-24 15:18:26 +0100927
Andreas Gampe3c12c512014-06-24 18:46:29 +0000928 LIR* store = StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_src, size);
929
930 if (UNLIKELY(is_volatile == kVolatile)) {
Hans Boehm48f5c472014-06-27 14:50:10 -0700931 // A volatile load might follow the volatile store so insert a StoreLoad barrier.
932 // This does require a fence, even on x86.
933 GenMemBarrier(kAnyAny);
Andreas Gampe3c12c512014-06-24 18:46:29 +0000934 }
935
Fred Shih37f05ef2014-07-16 18:38:08 -0700936 if (allocated_temp) {
937 FreeTemp(r_src);
938 }
939
Andreas Gampe3c12c512014-06-24 18:46:29 +0000940 return store;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700941}
942
Roland Levillain4b8f1ec2015-08-26 18:34:03 +0100943LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond,
944 // Comparison performed directly with memory.
945 RegStorage temp_reg ATTRIBUTE_UNUSED,
946 RegStorage base_reg,
947 int offset,
948 int check_value,
949 LIR* target,
950 LIR** compare) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700951 LIR* inst = NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(),
952 offset, check_value);
953 if (compare != nullptr) {
954 *compare = inst;
955 }
956 LIR* branch = OpCondBranch(cond, target);
957 return branch;
Mark Mendell766e9292014-01-27 07:55:47 -0800958}
959
Vladimir Marko1961b602015-04-08 20:51:48 +0100960void X86Mir2Lir::AnalyzeMIR(RefCounts* core_counts, MIR* mir, uint32_t weight) {
961 if (cu_->target64) {
962 Mir2Lir::AnalyzeMIR(core_counts, mir, weight);
Mark Mendell67c39c42014-01-31 17:28:00 -0800963 return;
964 }
965
Vladimir Marko1961b602015-04-08 20:51:48 +0100966 int opcode = mir->dalvikInsn.opcode;
967 bool uses_pc_rel_load = false;
Mark Mendell67c39c42014-01-31 17:28:00 -0800968 switch (opcode) {
969 // Instructions referencing doubles.
970 case Instruction::CMPL_DOUBLE:
971 case Instruction::CMPG_DOUBLE:
972 case Instruction::NEG_DOUBLE:
973 case Instruction::ADD_DOUBLE:
974 case Instruction::SUB_DOUBLE:
975 case Instruction::MUL_DOUBLE:
976 case Instruction::DIV_DOUBLE:
977 case Instruction::REM_DOUBLE:
978 case Instruction::ADD_DOUBLE_2ADDR:
979 case Instruction::SUB_DOUBLE_2ADDR:
980 case Instruction::MUL_DOUBLE_2ADDR:
981 case Instruction::DIV_DOUBLE_2ADDR:
982 case Instruction::REM_DOUBLE_2ADDR:
Vladimir Marko1961b602015-04-08 20:51:48 +0100983 case kMirOpFusedCmplDouble:
984 case kMirOpFusedCmpgDouble:
985 uses_pc_rel_load = AnalyzeFPInstruction(opcode, mir);
Mark Mendell67c39c42014-01-31 17:28:00 -0800986 break;
Mark Mendell55d0eac2014-02-06 11:02:52 -0800987
Vladimir Marko1961b602015-04-08 20:51:48 +0100988 // Packed switch needs the PC-relative pointer if it's large.
Mark Mendell67c39c42014-01-31 17:28:00 -0800989 case Instruction::PACKED_SWITCH:
Vladimir Marko1961b602015-04-08 20:51:48 +0100990 if (mir_graph_->GetTable(mir, mir->dalvikInsn.vB)[1] > kSmallSwitchThreshold) {
991 uses_pc_rel_load = true;
Mark Mendell27dee8b2014-12-01 19:06:12 -0500992 }
Mark Mendell67c39c42014-01-31 17:28:00 -0800993 break;
Vladimir Marko1961b602015-04-08 20:51:48 +0100994
995 case kMirOpConstVector:
996 uses_pc_rel_load = true;
997 break;
998 case kMirOpPackedMultiply:
999 case kMirOpPackedShiftLeft:
1000 case kMirOpPackedSignedShiftRight:
1001 case kMirOpPackedUnsignedShiftRight:
1002 {
1003 // Byte emulation requires constants from the literal pool.
1004 OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
1005 if (opsize == kSignedByte || opsize == kUnsignedByte) {
1006 uses_pc_rel_load = true;
1007 }
1008 }
1009 break;
1010
Yixin Shou7071c8d2014-03-05 06:07:48 -05001011 case Instruction::INVOKE_STATIC:
Razvan A Lupusorue5beb182014-08-14 13:49:57 +08001012 case Instruction::INVOKE_STATIC_RANGE:
Vladimir Marko1961b602015-04-08 20:51:48 +01001013 if (mir_graph_->GetMethodLoweringInfo(mir).IsIntrinsic()) {
1014 uses_pc_rel_load = AnalyzeInvokeStaticIntrinsic(mir);
1015 break;
1016 }
1017 FALLTHROUGH_INTENDED;
Mark Mendell67c39c42014-01-31 17:28:00 -08001018 default:
Vladimir Marko1961b602015-04-08 20:51:48 +01001019 Mir2Lir::AnalyzeMIR(core_counts, mir, weight);
Mark Mendell67c39c42014-01-31 17:28:00 -08001020 break;
1021 }
Vladimir Marko1961b602015-04-08 20:51:48 +01001022
1023 if (uses_pc_rel_load) {
1024 DCHECK(pc_rel_temp_ != nullptr);
1025 core_counts[SRegToPMap(pc_rel_temp_->s_reg_low)].count += weight;
1026 }
Mark Mendell67c39c42014-01-31 17:28:00 -08001027}
1028
Vladimir Marko1961b602015-04-08 20:51:48 +01001029bool X86Mir2Lir::AnalyzeFPInstruction(int opcode, MIR* mir) {
1030 DCHECK(!cu_->target64);
Mark Mendell67c39c42014-01-31 17:28:00 -08001031 // Look at all the uses, and see if they are double constants.
Jean Christophe Beylercc794c32014-05-02 09:34:13 -07001032 uint64_t attrs = MIRGraph::GetDataFlowAttributes(static_cast<Instruction::Code>(opcode));
Mark Mendell67c39c42014-01-31 17:28:00 -08001033 int next_sreg = 0;
1034 if (attrs & DF_UA) {
1035 if (attrs & DF_A_WIDE) {
Vladimir Marko1961b602015-04-08 20:51:48 +01001036 if (AnalyzeDoubleUse(mir_graph_->GetSrcWide(mir, next_sreg))) {
1037 return true;
1038 }
Mark Mendell67c39c42014-01-31 17:28:00 -08001039 next_sreg += 2;
1040 } else {
1041 next_sreg++;
1042 }
1043 }
1044 if (attrs & DF_UB) {
1045 if (attrs & DF_B_WIDE) {
Vladimir Marko1961b602015-04-08 20:51:48 +01001046 if (AnalyzeDoubleUse(mir_graph_->GetSrcWide(mir, next_sreg))) {
1047 return true;
1048 }
Mark Mendell67c39c42014-01-31 17:28:00 -08001049 next_sreg += 2;
1050 } else {
1051 next_sreg++;
1052 }
1053 }
1054 if (attrs & DF_UC) {
1055 if (attrs & DF_C_WIDE) {
Vladimir Marko1961b602015-04-08 20:51:48 +01001056 if (AnalyzeDoubleUse(mir_graph_->GetSrcWide(mir, next_sreg))) {
1057 return true;
1058 }
Mark Mendell67c39c42014-01-31 17:28:00 -08001059 }
1060 }
Vladimir Marko1961b602015-04-08 20:51:48 +01001061 return false;
Mark Mendell67c39c42014-01-31 17:28:00 -08001062}
1063
Vladimir Marko1961b602015-04-08 20:51:48 +01001064inline bool X86Mir2Lir::AnalyzeDoubleUse(RegLocation use) {
Alexei Zavjalov0e63ce12014-07-10 18:34:23 +07001065 // If this is a double literal, we will want it in the literal pool on 32b platforms.
Vladimir Marko1961b602015-04-08 20:51:48 +01001066 DCHECK(!cu_->target64);
1067 return use.is_const;
1068}
1069
1070bool X86Mir2Lir::AnalyzeInvokeStaticIntrinsic(MIR* mir) {
1071 // 64 bit RIP addressing doesn't need this analysis.
1072 DCHECK(!cu_->target64);
1073
1074 // Retrieve the type of the intrinsic.
1075 MethodReference method_ref = mir_graph_->GetMethodLoweringInfo(mir).GetTargetMethod();
1076 DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
1077 DexFileMethodInliner* method_inliner =
1078 cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(method_ref.dex_file);
1079 InlineMethod method;
1080 bool is_intrinsic = method_inliner->IsIntrinsic(method_ref.dex_method_index, &method);
1081 DCHECK(is_intrinsic);
1082
1083 switch (method.opcode) {
1084 case kIntrinsicAbsDouble:
1085 case kIntrinsicMinMaxDouble:
1086 return true;
1087 default:
1088 return false;
Mark Mendell67c39c42014-01-31 17:28:00 -08001089 }
1090}
1091
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001092RegLocation X86Mir2Lir::UpdateLocTyped(RegLocation loc) {
buzbee30adc732014-05-09 15:10:18 -07001093 loc = UpdateLoc(loc);
1094 if ((loc.location == kLocPhysReg) && (loc.fp != loc.reg.IsFloat())) {
1095 if (GetRegInfo(loc.reg)->IsTemp()) {
1096 Clobber(loc.reg);
1097 FreeTemp(loc.reg);
1098 loc.reg = RegStorage::InvalidReg();
1099 loc.location = kLocDalvikFrame;
1100 }
1101 }
Chao-ying Fue0ccdc02014-06-06 17:32:37 -07001102 DCHECK(CheckCorePoolSanity());
buzbee30adc732014-05-09 15:10:18 -07001103 return loc;
1104}
1105
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001106RegLocation X86Mir2Lir::UpdateLocWideTyped(RegLocation loc) {
buzbee30adc732014-05-09 15:10:18 -07001107 loc = UpdateLocWide(loc);
1108 if ((loc.location == kLocPhysReg) && (loc.fp != loc.reg.IsFloat())) {
1109 if (GetRegInfo(loc.reg)->IsTemp()) {
1110 Clobber(loc.reg);
1111 FreeTemp(loc.reg);
1112 loc.reg = RegStorage::InvalidReg();
1113 loc.location = kLocDalvikFrame;
1114 }
1115 }
Chao-ying Fue0ccdc02014-06-06 17:32:37 -07001116 DCHECK(CheckCorePoolSanity());
buzbee30adc732014-05-09 15:10:18 -07001117 return loc;
1118}
Yixin Shou7071c8d2014-03-05 06:07:48 -05001119
Roland Levillain4b8f1ec2015-08-26 18:34:03 +01001120LIR* X86Mir2Lir::InvokeTrampoline(OpKind op,
1121 // Call to absolute memory location doesn't
1122 // need a temporary target register.
1123 RegStorage r_tgt ATTRIBUTE_UNUSED,
1124 QuickEntrypointEnum trampoline) {
Andreas Gampe98430592014-07-27 19:44:50 -07001125 if (cu_->target64) {
1126 return OpThreadMem(op, GetThreadOffset<8>(trampoline));
1127 } else {
1128 return OpThreadMem(op, GetThreadOffset<4>(trampoline));
1129 }
1130}
1131
Vladimir Marko1961b602015-04-08 20:51:48 +01001132void X86Mir2Lir::CountRefs(RefCounts* core_counts, RefCounts* fp_counts, size_t num_regs) {
1133 // Start with the default counts.
1134 Mir2Lir::CountRefs(core_counts, fp_counts, num_regs);
1135
1136 if (pc_rel_temp_ != nullptr) {
1137 // Now, if the dex cache array base temp is used only once outside any loops (weight = 1),
1138 // avoid the promotion, otherwise boost the weight by factor 2 because the full PC-relative
1139 // load sequence is 3 instructions long and by promoting the PC base we save 2 instructions
1140 // per use.
1141 int p_map_idx = SRegToPMap(pc_rel_temp_->s_reg_low);
1142 if (core_counts[p_map_idx].count == 1) {
1143 core_counts[p_map_idx].count = 0;
1144 } else {
1145 core_counts[p_map_idx].count *= 2;
1146 }
1147 }
1148}
1149
1150void X86Mir2Lir::DoPromotion() {
1151 if (!cu_->target64) {
1152 pc_rel_temp_ = mir_graph_->GetNewCompilerTemp(kCompilerTempBackend, false);
1153 }
1154
1155 Mir2Lir::DoPromotion();
1156
1157 if (pc_rel_temp_ != nullptr) {
1158 // Now, if the dex cache array base temp is promoted, remember the register but
1159 // always remove the temp's stack location to avoid unnecessarily bloating the stack.
1160 pc_rel_base_reg_ = mir_graph_->reg_location_[pc_rel_temp_->s_reg_low].reg;
1161 DCHECK(!pc_rel_base_reg_.Valid() || !pc_rel_base_reg_.IsFloat());
1162 mir_graph_->RemoveLastCompilerTemp(kCompilerTempBackend, false, pc_rel_temp_);
1163 pc_rel_temp_ = nullptr;
1164 }
1165}
1166
Brian Carlstrom7940e442013-07-12 13:46:57 -07001167} // namespace art