blob: abf921f8ad7605521816ed439448d0954babe583 [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "arm_lir.h"
18#include "codegen_arm.h"
19#include "dex/quick/mir_to_lir-inl.h"
20
21namespace art {
22
23/* This file contains codegen for the Thumb ISA. */
24
25static int EncodeImmSingle(int value)
26{
27 int res;
28 int bit_a = (value & 0x80000000) >> 31;
29 int not_bit_b = (value & 0x40000000) >> 30;
30 int bit_b = (value & 0x20000000) >> 29;
31 int b_smear = (value & 0x3e000000) >> 25;
32 int slice = (value & 0x01f80000) >> 19;
33 int zeroes = (value & 0x0007ffff);
34 if (zeroes != 0)
35 return -1;
36 if (bit_b) {
37 if ((not_bit_b != 0) || (b_smear != 0x1f))
38 return -1;
39 } else {
40 if ((not_bit_b != 1) || (b_smear != 0x0))
41 return -1;
42 }
43 res = (bit_a << 7) | (bit_b << 6) | slice;
44 return res;
45}
46
47/*
48 * Determine whether value can be encoded as a Thumb2 floating point
49 * immediate. If not, return -1. If so return encoded 8-bit value.
50 */
51static int EncodeImmDouble(int64_t value)
52{
53 int res;
54 int bit_a = (value & 0x8000000000000000ll) >> 63;
55 int not_bit_b = (value & 0x4000000000000000ll) >> 62;
56 int bit_b = (value & 0x2000000000000000ll) >> 61;
57 int b_smear = (value & 0x3fc0000000000000ll) >> 54;
58 int slice = (value & 0x003f000000000000ll) >> 48;
59 uint64_t zeroes = (value & 0x0000ffffffffffffll);
60 if (zeroes != 0)
61 return -1;
62 if (bit_b) {
63 if ((not_bit_b != 0) || (b_smear != 0xff))
64 return -1;
65 } else {
66 if ((not_bit_b != 1) || (b_smear != 0x0))
67 return -1;
68 }
69 res = (bit_a << 7) | (bit_b << 6) | slice;
70 return res;
71}
72
73LIR* ArmMir2Lir::LoadFPConstantValue(int r_dest, int value)
74{
75 DCHECK(ARM_SINGLEREG(r_dest));
76 if (value == 0) {
77 // TODO: we need better info about the target CPU. a vector exclusive or
78 // would probably be better here if we could rely on its existance.
79 // Load an immediate +2.0 (which encodes to 0)
80 NewLIR2(kThumb2Vmovs_IMM8, r_dest, 0);
81 // +0.0 = +2.0 - +2.0
82 return NewLIR3(kThumb2Vsubs, r_dest, r_dest, r_dest);
83 } else {
84 int encoded_imm = EncodeImmSingle(value);
85 if (encoded_imm >= 0) {
86 return NewLIR2(kThumb2Vmovs_IMM8, r_dest, encoded_imm);
87 }
88 }
89 LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
90 if (data_target == NULL) {
91 data_target = AddWordData(&literal_list_, value);
92 }
93 LIR* load_pc_rel = RawLIR(current_dalvik_offset_, kThumb2Vldrs,
94 r_dest, r15pc, 0, 0, 0, data_target);
95 SetMemRefType(load_pc_rel, true, kLiteral);
96 load_pc_rel->alias_info = reinterpret_cast<uintptr_t>(data_target);
97 AppendLIR(load_pc_rel);
98 return load_pc_rel;
99}
100
101static int LeadingZeros(uint32_t val)
102{
103 uint32_t alt;
104 int n;
105 int count;
106
107 count = 16;
108 n = 32;
109 do {
110 alt = val >> count;
111 if (alt != 0) {
112 n = n - count;
113 val = alt;
114 }
115 count >>= 1;
116 } while (count);
117 return n - val;
118}
119
120/*
121 * Determine whether value can be encoded as a Thumb2 modified
122 * immediate. If not, return -1. If so, return i:imm3:a:bcdefgh form.
123 */
124int ArmMir2Lir::ModifiedImmediate(uint32_t value)
125{
126 int z_leading;
127 int z_trailing;
128 uint32_t b0 = value & 0xff;
129
130 /* Note: case of value==0 must use 0:000:0:0000000 encoding */
131 if (value <= 0xFF)
132 return b0; // 0:000:a:bcdefgh
133 if (value == ((b0 << 16) | b0))
134 return (0x1 << 8) | b0; /* 0:001:a:bcdefgh */
135 if (value == ((b0 << 24) | (b0 << 16) | (b0 << 8) | b0))
136 return (0x3 << 8) | b0; /* 0:011:a:bcdefgh */
137 b0 = (value >> 8) & 0xff;
138 if (value == ((b0 << 24) | (b0 << 8)))
139 return (0x2 << 8) | b0; /* 0:010:a:bcdefgh */
140 /* Can we do it with rotation? */
141 z_leading = LeadingZeros(value);
142 z_trailing = 32 - LeadingZeros(~value & (value - 1));
143 /* A run of eight or fewer active bits? */
144 if ((z_leading + z_trailing) < 24)
145 return -1; /* No - bail */
146 /* left-justify the constant, discarding msb (known to be 1) */
147 value <<= z_leading + 1;
148 /* Create bcdefgh */
149 value >>= 25;
150 /* Put it all together */
151 return value | ((0x8 + z_leading) << 7); /* [01000..11111]:bcdefgh */
152}
153
154bool ArmMir2Lir::InexpensiveConstantInt(int32_t value)
155{
156 return (ModifiedImmediate(value) >= 0) || (ModifiedImmediate(~value) >= 0);
157}
158
159bool ArmMir2Lir::InexpensiveConstantFloat(int32_t value)
160{
161 return EncodeImmSingle(value) >= 0;
162}
163
164bool ArmMir2Lir::InexpensiveConstantLong(int64_t value)
165{
166 return InexpensiveConstantInt(High32Bits(value)) && InexpensiveConstantInt(Low32Bits(value));
167}
168
169bool ArmMir2Lir::InexpensiveConstantDouble(int64_t value)
170{
171 return EncodeImmDouble(value) >= 0;
172}
173
174/*
175 * Load a immediate using a shortcut if possible; otherwise
176 * grab from the per-translation literal pool.
177 *
178 * No additional register clobbering operation performed. Use this version when
179 * 1) r_dest is freshly returned from AllocTemp or
180 * 2) The codegen is under fixed register usage
181 */
182LIR* ArmMir2Lir::LoadConstantNoClobber(int r_dest, int value)
183{
184 LIR* res;
185 int mod_imm;
186
187 if (ARM_FPREG(r_dest)) {
188 return LoadFPConstantValue(r_dest, value);
189 }
190
191 /* See if the value can be constructed cheaply */
192 if (ARM_LOWREG(r_dest) && (value >= 0) && (value <= 255)) {
193 return NewLIR2(kThumbMovImm, r_dest, value);
194 }
195 /* Check Modified immediate special cases */
196 mod_imm = ModifiedImmediate(value);
197 if (mod_imm >= 0) {
198 res = NewLIR2(kThumb2MovImmShift, r_dest, mod_imm);
199 return res;
200 }
201 mod_imm = ModifiedImmediate(~value);
202 if (mod_imm >= 0) {
203 res = NewLIR2(kThumb2MvnImm12, r_dest, mod_imm);
204 return res;
205 }
206 /* 16-bit immediate? */
207 if ((value & 0xffff) == value) {
208 res = NewLIR2(kThumb2MovImm16, r_dest, value);
209 return res;
210 }
211 /* Do a low/high pair */
212 res = NewLIR2(kThumb2MovImm16, r_dest, Low16Bits(value));
213 NewLIR2(kThumb2MovImm16H, r_dest, High16Bits(value));
214 return res;
215}
216
217LIR* ArmMir2Lir::OpUnconditionalBranch(LIR* target)
218{
219 LIR* res = NewLIR1(kThumbBUncond, 0 /* offset to be patched during assembly*/);
220 res->target = target;
221 return res;
222}
223
224LIR* ArmMir2Lir::OpCondBranch(ConditionCode cc, LIR* target)
225{
226 LIR* branch = NewLIR2(kThumb2BCond, 0 /* offset to be patched */,
227 ArmConditionEncoding(cc));
228 branch->target = target;
229 return branch;
230}
231
232LIR* ArmMir2Lir::OpReg(OpKind op, int r_dest_src)
233{
234 ArmOpcode opcode = kThumbBkpt;
235 switch (op) {
236 case kOpBlx:
237 opcode = kThumbBlxR;
238 break;
239 default:
240 LOG(FATAL) << "Bad opcode " << op;
241 }
242 return NewLIR1(opcode, r_dest_src);
243}
244
245LIR* ArmMir2Lir::OpRegRegShift(OpKind op, int r_dest_src1, int r_src2,
246 int shift)
247{
248 bool thumb_form = ((shift == 0) && ARM_LOWREG(r_dest_src1) && ARM_LOWREG(r_src2));
249 ArmOpcode opcode = kThumbBkpt;
250 switch (op) {
251 case kOpAdc:
252 opcode = (thumb_form) ? kThumbAdcRR : kThumb2AdcRRR;
253 break;
254 case kOpAnd:
255 opcode = (thumb_form) ? kThumbAndRR : kThumb2AndRRR;
256 break;
257 case kOpBic:
258 opcode = (thumb_form) ? kThumbBicRR : kThumb2BicRRR;
259 break;
260 case kOpCmn:
261 DCHECK_EQ(shift, 0);
262 opcode = (thumb_form) ? kThumbCmnRR : kThumb2CmnRR;
263 break;
264 case kOpCmp:
265 if (thumb_form)
266 opcode = kThumbCmpRR;
267 else if ((shift == 0) && !ARM_LOWREG(r_dest_src1) && !ARM_LOWREG(r_src2))
268 opcode = kThumbCmpHH;
269 else if ((shift == 0) && ARM_LOWREG(r_dest_src1))
270 opcode = kThumbCmpLH;
271 else if (shift == 0)
272 opcode = kThumbCmpHL;
273 else
274 opcode = kThumb2CmpRR;
275 break;
276 case kOpXor:
277 opcode = (thumb_form) ? kThumbEorRR : kThumb2EorRRR;
278 break;
279 case kOpMov:
280 DCHECK_EQ(shift, 0);
281 if (ARM_LOWREG(r_dest_src1) && ARM_LOWREG(r_src2))
282 opcode = kThumbMovRR;
283 else if (!ARM_LOWREG(r_dest_src1) && !ARM_LOWREG(r_src2))
284 opcode = kThumbMovRR_H2H;
285 else if (ARM_LOWREG(r_dest_src1))
286 opcode = kThumbMovRR_H2L;
287 else
288 opcode = kThumbMovRR_L2H;
289 break;
290 case kOpMul:
291 DCHECK_EQ(shift, 0);
292 opcode = (thumb_form) ? kThumbMul : kThumb2MulRRR;
293 break;
294 case kOpMvn:
295 opcode = (thumb_form) ? kThumbMvn : kThumb2MnvRR;
296 break;
297 case kOpNeg:
298 DCHECK_EQ(shift, 0);
299 opcode = (thumb_form) ? kThumbNeg : kThumb2NegRR;
300 break;
301 case kOpOr:
302 opcode = (thumb_form) ? kThumbOrr : kThumb2OrrRRR;
303 break;
304 case kOpSbc:
305 opcode = (thumb_form) ? kThumbSbc : kThumb2SbcRRR;
306 break;
307 case kOpTst:
308 opcode = (thumb_form) ? kThumbTst : kThumb2TstRR;
309 break;
310 case kOpLsl:
311 DCHECK_EQ(shift, 0);
312 opcode = (thumb_form) ? kThumbLslRR : kThumb2LslRRR;
313 break;
314 case kOpLsr:
315 DCHECK_EQ(shift, 0);
316 opcode = (thumb_form) ? kThumbLsrRR : kThumb2LsrRRR;
317 break;
318 case kOpAsr:
319 DCHECK_EQ(shift, 0);
320 opcode = (thumb_form) ? kThumbAsrRR : kThumb2AsrRRR;
321 break;
322 case kOpRor:
323 DCHECK_EQ(shift, 0);
324 opcode = (thumb_form) ? kThumbRorRR : kThumb2RorRRR;
325 break;
326 case kOpAdd:
327 opcode = (thumb_form) ? kThumbAddRRR : kThumb2AddRRR;
328 break;
329 case kOpSub:
330 opcode = (thumb_form) ? kThumbSubRRR : kThumb2SubRRR;
331 break;
332 case kOp2Byte:
333 DCHECK_EQ(shift, 0);
334 return NewLIR4(kThumb2Sbfx, r_dest_src1, r_src2, 0, 8);
335 case kOp2Short:
336 DCHECK_EQ(shift, 0);
337 return NewLIR4(kThumb2Sbfx, r_dest_src1, r_src2, 0, 16);
338 case kOp2Char:
339 DCHECK_EQ(shift, 0);
340 return NewLIR4(kThumb2Ubfx, r_dest_src1, r_src2, 0, 16);
341 default:
342 LOG(FATAL) << "Bad opcode: " << op;
343 break;
344 }
345 DCHECK_GE(static_cast<int>(opcode), 0);
346 if (EncodingMap[opcode].flags & IS_BINARY_OP)
347 return NewLIR2(opcode, r_dest_src1, r_src2);
348 else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
349 if (EncodingMap[opcode].field_loc[2].kind == kFmtShift)
350 return NewLIR3(opcode, r_dest_src1, r_src2, shift);
351 else
352 return NewLIR3(opcode, r_dest_src1, r_dest_src1, r_src2);
353 } else if (EncodingMap[opcode].flags & IS_QUAD_OP)
354 return NewLIR4(opcode, r_dest_src1, r_dest_src1, r_src2, shift);
355 else {
356 LOG(FATAL) << "Unexpected encoding operand count";
357 return NULL;
358 }
359}
360
361LIR* ArmMir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2)
362{
363 return OpRegRegShift(op, r_dest_src1, r_src2, 0);
364}
365
366LIR* ArmMir2Lir::OpRegRegRegShift(OpKind op, int r_dest, int r_src1,
367 int r_src2, int shift)
368{
369 ArmOpcode opcode = kThumbBkpt;
370 bool thumb_form = (shift == 0) && ARM_LOWREG(r_dest) && ARM_LOWREG(r_src1) &&
371 ARM_LOWREG(r_src2);
372 switch (op) {
373 case kOpAdd:
374 opcode = (thumb_form) ? kThumbAddRRR : kThumb2AddRRR;
375 break;
376 case kOpSub:
377 opcode = (thumb_form) ? kThumbSubRRR : kThumb2SubRRR;
378 break;
379 case kOpRsub:
380 opcode = kThumb2RsubRRR;
381 break;
382 case kOpAdc:
383 opcode = kThumb2AdcRRR;
384 break;
385 case kOpAnd:
386 opcode = kThumb2AndRRR;
387 break;
388 case kOpBic:
389 opcode = kThumb2BicRRR;
390 break;
391 case kOpXor:
392 opcode = kThumb2EorRRR;
393 break;
394 case kOpMul:
395 DCHECK_EQ(shift, 0);
396 opcode = kThumb2MulRRR;
397 break;
398 case kOpOr:
399 opcode = kThumb2OrrRRR;
400 break;
401 case kOpSbc:
402 opcode = kThumb2SbcRRR;
403 break;
404 case kOpLsl:
405 DCHECK_EQ(shift, 0);
406 opcode = kThumb2LslRRR;
407 break;
408 case kOpLsr:
409 DCHECK_EQ(shift, 0);
410 opcode = kThumb2LsrRRR;
411 break;
412 case kOpAsr:
413 DCHECK_EQ(shift, 0);
414 opcode = kThumb2AsrRRR;
415 break;
416 case kOpRor:
417 DCHECK_EQ(shift, 0);
418 opcode = kThumb2RorRRR;
419 break;
420 default:
421 LOG(FATAL) << "Bad opcode: " << op;
422 break;
423 }
424 DCHECK_GE(static_cast<int>(opcode), 0);
425 if (EncodingMap[opcode].flags & IS_QUAD_OP)
426 return NewLIR4(opcode, r_dest, r_src1, r_src2, shift);
427 else {
428 DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP);
429 return NewLIR3(opcode, r_dest, r_src1, r_src2);
430 }
431}
432
433LIR* ArmMir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2)
434{
435 return OpRegRegRegShift(op, r_dest, r_src1, r_src2, 0);
436}
437
438LIR* ArmMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value)
439{
440 LIR* res;
441 bool neg = (value < 0);
442 int abs_value = (neg) ? -value : value;
443 ArmOpcode opcode = kThumbBkpt;
444 ArmOpcode alt_opcode = kThumbBkpt;
445 bool all_low_regs = (ARM_LOWREG(r_dest) && ARM_LOWREG(r_src1));
446 int mod_imm = ModifiedImmediate(value);
447 int mod_imm_neg = ModifiedImmediate(-value);
448
449 switch (op) {
450 case kOpLsl:
451 if (all_low_regs)
452 return NewLIR3(kThumbLslRRI5, r_dest, r_src1, value);
453 else
454 return NewLIR3(kThumb2LslRRI5, r_dest, r_src1, value);
455 case kOpLsr:
456 if (all_low_regs)
457 return NewLIR3(kThumbLsrRRI5, r_dest, r_src1, value);
458 else
459 return NewLIR3(kThumb2LsrRRI5, r_dest, r_src1, value);
460 case kOpAsr:
461 if (all_low_regs)
462 return NewLIR3(kThumbAsrRRI5, r_dest, r_src1, value);
463 else
464 return NewLIR3(kThumb2AsrRRI5, r_dest, r_src1, value);
465 case kOpRor:
466 return NewLIR3(kThumb2RorRRI5, r_dest, r_src1, value);
467 case kOpAdd:
468 if (ARM_LOWREG(r_dest) && (r_src1 == r13sp) &&
469 (value <= 1020) && ((value & 0x3)==0)) {
470 return NewLIR3(kThumbAddSpRel, r_dest, r_src1, value >> 2);
471 } else if (ARM_LOWREG(r_dest) && (r_src1 == r15pc) &&
472 (value <= 1020) && ((value & 0x3)==0)) {
473 return NewLIR3(kThumbAddPcRel, r_dest, r_src1, value >> 2);
474 }
475 // Note: intentional fallthrough
476 case kOpSub:
477 if (all_low_regs && ((abs_value & 0x7) == abs_value)) {
478 if (op == kOpAdd)
479 opcode = (neg) ? kThumbSubRRI3 : kThumbAddRRI3;
480 else
481 opcode = (neg) ? kThumbAddRRI3 : kThumbSubRRI3;
482 return NewLIR3(opcode, r_dest, r_src1, abs_value);
483 } else if ((abs_value & 0xff) == abs_value) {
484 if (op == kOpAdd)
485 opcode = (neg) ? kThumb2SubRRI12 : kThumb2AddRRI12;
486 else
487 opcode = (neg) ? kThumb2AddRRI12 : kThumb2SubRRI12;
488 return NewLIR3(opcode, r_dest, r_src1, abs_value);
489 }
490 if (mod_imm_neg >= 0) {
491 op = (op == kOpAdd) ? kOpSub : kOpAdd;
492 mod_imm = mod_imm_neg;
493 }
494 if (op == kOpSub) {
495 opcode = kThumb2SubRRI8;
496 alt_opcode = kThumb2SubRRR;
497 } else {
498 opcode = kThumb2AddRRI8;
499 alt_opcode = kThumb2AddRRR;
500 }
501 break;
502 case kOpRsub:
503 opcode = kThumb2RsubRRI8;
504 alt_opcode = kThumb2RsubRRR;
505 break;
506 case kOpAdc:
507 opcode = kThumb2AdcRRI8;
508 alt_opcode = kThumb2AdcRRR;
509 break;
510 case kOpSbc:
511 opcode = kThumb2SbcRRI8;
512 alt_opcode = kThumb2SbcRRR;
513 break;
514 case kOpOr:
515 opcode = kThumb2OrrRRI8;
516 alt_opcode = kThumb2OrrRRR;
517 break;
518 case kOpAnd:
519 opcode = kThumb2AndRRI8;
520 alt_opcode = kThumb2AndRRR;
521 break;
522 case kOpXor:
523 opcode = kThumb2EorRRI8;
524 alt_opcode = kThumb2EorRRR;
525 break;
526 case kOpMul:
527 //TUNING: power of 2, shift & add
528 mod_imm = -1;
529 alt_opcode = kThumb2MulRRR;
530 break;
531 case kOpCmp: {
532 int mod_imm = ModifiedImmediate(value);
533 LIR* res;
534 if (mod_imm >= 0) {
535 res = NewLIR2(kThumb2CmpRI12, r_src1, mod_imm);
536 } else {
537 int r_tmp = AllocTemp();
538 res = LoadConstant(r_tmp, value);
539 OpRegReg(kOpCmp, r_src1, r_tmp);
540 FreeTemp(r_tmp);
541 }
542 return res;
543 }
544 default:
545 LOG(FATAL) << "Bad opcode: " << op;
546 }
547
548 if (mod_imm >= 0) {
549 return NewLIR3(opcode, r_dest, r_src1, mod_imm);
550 } else {
551 int r_scratch = AllocTemp();
552 LoadConstant(r_scratch, value);
553 if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
554 res = NewLIR4(alt_opcode, r_dest, r_src1, r_scratch, 0);
555 else
556 res = NewLIR3(alt_opcode, r_dest, r_src1, r_scratch);
557 FreeTemp(r_scratch);
558 return res;
559 }
560}
561
562/* Handle Thumb-only variants here - otherwise punt to OpRegRegImm */
563LIR* ArmMir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value)
564{
565 bool neg = (value < 0);
566 int abs_value = (neg) ? -value : value;
567 bool short_form = (((abs_value & 0xff) == abs_value) && ARM_LOWREG(r_dest_src1));
568 ArmOpcode opcode = kThumbBkpt;
569 switch (op) {
570 case kOpAdd:
571 if ( !neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */
572 DCHECK_EQ((value & 0x3), 0);
573 return NewLIR1(kThumbAddSpI7, value >> 2);
574 } else if (short_form) {
575 opcode = (neg) ? kThumbSubRI8 : kThumbAddRI8;
576 }
577 break;
578 case kOpSub:
579 if (!neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */
580 DCHECK_EQ((value & 0x3), 0);
581 return NewLIR1(kThumbSubSpI7, value >> 2);
582 } else if (short_form) {
583 opcode = (neg) ? kThumbAddRI8 : kThumbSubRI8;
584 }
585 break;
586 case kOpCmp:
587 if (ARM_LOWREG(r_dest_src1) && short_form)
588 opcode = (short_form) ? kThumbCmpRI8 : kThumbCmpRR;
589 else if (ARM_LOWREG(r_dest_src1))
590 opcode = kThumbCmpRR;
591 else {
592 short_form = false;
593 opcode = kThumbCmpHL;
594 }
595 break;
596 default:
597 /* Punt to OpRegRegImm - if bad case catch it there */
598 short_form = false;
599 break;
600 }
601 if (short_form)
602 return NewLIR2(opcode, r_dest_src1, abs_value);
603 else {
604 return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
605 }
606}
607
608LIR* ArmMir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value)
609{
610 LIR* res = NULL;
611 int32_t val_lo = Low32Bits(value);
612 int32_t val_hi = High32Bits(value);
613 int target_reg = S2d(r_dest_lo, r_dest_hi);
614 if (ARM_FPREG(r_dest_lo)) {
615 if ((val_lo == 0) && (val_hi == 0)) {
616 // TODO: we need better info about the target CPU. a vector exclusive or
617 // would probably be better here if we could rely on its existance.
618 // Load an immediate +2.0 (which encodes to 0)
619 NewLIR2(kThumb2Vmovd_IMM8, target_reg, 0);
620 // +0.0 = +2.0 - +2.0
621 res = NewLIR3(kThumb2Vsubd, target_reg, target_reg, target_reg);
622 } else {
623 int encoded_imm = EncodeImmDouble(value);
624 if (encoded_imm >= 0) {
625 res = NewLIR2(kThumb2Vmovd_IMM8, target_reg, encoded_imm);
626 }
627 }
628 } else {
629 if ((InexpensiveConstantInt(val_lo) && (InexpensiveConstantInt(val_hi)))) {
630 res = LoadConstantNoClobber(r_dest_lo, val_lo);
631 LoadConstantNoClobber(r_dest_hi, val_hi);
632 }
633 }
634 if (res == NULL) {
635 // No short form - load from the literal pool.
636 LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
637 if (data_target == NULL) {
638 data_target = AddWideData(&literal_list_, val_lo, val_hi);
639 }
640 if (ARM_FPREG(r_dest_lo)) {
641 res = RawLIR(current_dalvik_offset_, kThumb2Vldrd,
642 target_reg, r15pc, 0, 0, 0, data_target);
643 } else {
644 res = RawLIR(current_dalvik_offset_, kThumb2LdrdPcRel8,
645 r_dest_lo, r_dest_hi, r15pc, 0, 0, data_target);
646 }
647 SetMemRefType(res, true, kLiteral);
648 res->alias_info = reinterpret_cast<uintptr_t>(data_target);
649 AppendLIR(res);
650 }
651 return res;
652}
653
654int ArmMir2Lir::EncodeShift(int code, int amount) {
655 return ((amount & 0x1f) << 2) | code;
656}
657
658LIR* ArmMir2Lir::LoadBaseIndexed(int rBase, int r_index, int r_dest,
659 int scale, OpSize size)
660{
661 bool all_low_regs = ARM_LOWREG(rBase) && ARM_LOWREG(r_index) && ARM_LOWREG(r_dest);
662 LIR* load;
663 ArmOpcode opcode = kThumbBkpt;
664 bool thumb_form = (all_low_regs && (scale == 0));
665 int reg_ptr;
666
667 if (ARM_FPREG(r_dest)) {
668 if (ARM_SINGLEREG(r_dest)) {
669 DCHECK((size == kWord) || (size == kSingle));
670 opcode = kThumb2Vldrs;
671 size = kSingle;
672 } else {
673 DCHECK(ARM_DOUBLEREG(r_dest));
674 DCHECK((size == kLong) || (size == kDouble));
675 DCHECK_EQ((r_dest & 0x1), 0);
676 opcode = kThumb2Vldrd;
677 size = kDouble;
678 }
679 } else {
680 if (size == kSingle)
681 size = kWord;
682 }
683
684 switch (size) {
685 case kDouble: // fall-through
686 case kSingle:
687 reg_ptr = AllocTemp();
688 if (scale) {
689 NewLIR4(kThumb2AddRRR, reg_ptr, rBase, r_index,
690 EncodeShift(kArmLsl, scale));
691 } else {
692 OpRegRegReg(kOpAdd, reg_ptr, rBase, r_index);
693 }
694 load = NewLIR3(opcode, r_dest, reg_ptr, 0);
695 FreeTemp(reg_ptr);
696 return load;
697 case kWord:
698 opcode = (thumb_form) ? kThumbLdrRRR : kThumb2LdrRRR;
699 break;
700 case kUnsignedHalf:
701 opcode = (thumb_form) ? kThumbLdrhRRR : kThumb2LdrhRRR;
702 break;
703 case kSignedHalf:
704 opcode = (thumb_form) ? kThumbLdrshRRR : kThumb2LdrshRRR;
705 break;
706 case kUnsignedByte:
707 opcode = (thumb_form) ? kThumbLdrbRRR : kThumb2LdrbRRR;
708 break;
709 case kSignedByte:
710 opcode = (thumb_form) ? kThumbLdrsbRRR : kThumb2LdrsbRRR;
711 break;
712 default:
713 LOG(FATAL) << "Bad size: " << size;
714 }
715 if (thumb_form)
716 load = NewLIR3(opcode, r_dest, rBase, r_index);
717 else
718 load = NewLIR4(opcode, r_dest, rBase, r_index, scale);
719
720 return load;
721}
722
723LIR* ArmMir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src,
724 int scale, OpSize size)
725{
726 bool all_low_regs = ARM_LOWREG(rBase) && ARM_LOWREG(r_index) && ARM_LOWREG(r_src);
727 LIR* store = NULL;
728 ArmOpcode opcode = kThumbBkpt;
729 bool thumb_form = (all_low_regs && (scale == 0));
730 int reg_ptr;
731
732 if (ARM_FPREG(r_src)) {
733 if (ARM_SINGLEREG(r_src)) {
734 DCHECK((size == kWord) || (size == kSingle));
735 opcode = kThumb2Vstrs;
736 size = kSingle;
737 } else {
738 DCHECK(ARM_DOUBLEREG(r_src));
739 DCHECK((size == kLong) || (size == kDouble));
740 DCHECK_EQ((r_src & 0x1), 0);
741 opcode = kThumb2Vstrd;
742 size = kDouble;
743 }
744 } else {
745 if (size == kSingle)
746 size = kWord;
747 }
748
749 switch (size) {
750 case kDouble: // fall-through
751 case kSingle:
752 reg_ptr = AllocTemp();
753 if (scale) {
754 NewLIR4(kThumb2AddRRR, reg_ptr, rBase, r_index,
755 EncodeShift(kArmLsl, scale));
756 } else {
757 OpRegRegReg(kOpAdd, reg_ptr, rBase, r_index);
758 }
759 store = NewLIR3(opcode, r_src, reg_ptr, 0);
760 FreeTemp(reg_ptr);
761 return store;
762 case kWord:
763 opcode = (thumb_form) ? kThumbStrRRR : kThumb2StrRRR;
764 break;
765 case kUnsignedHalf:
766 case kSignedHalf:
767 opcode = (thumb_form) ? kThumbStrhRRR : kThumb2StrhRRR;
768 break;
769 case kUnsignedByte:
770 case kSignedByte:
771 opcode = (thumb_form) ? kThumbStrbRRR : kThumb2StrbRRR;
772 break;
773 default:
774 LOG(FATAL) << "Bad size: " << size;
775 }
776 if (thumb_form)
777 store = NewLIR3(opcode, r_src, rBase, r_index);
778 else
779 store = NewLIR4(opcode, r_src, rBase, r_index, scale);
780
781 return store;
782}
783
784/*
785 * Load value from base + displacement. Optionally perform null check
786 * on base (which must have an associated s_reg and MIR). If not
787 * performing null check, incoming MIR can be null.
788 */
789LIR* ArmMir2Lir::LoadBaseDispBody(int rBase, int displacement, int r_dest,
790 int r_dest_hi, OpSize size, int s_reg)
791{
792 LIR* load = NULL;
793 ArmOpcode opcode = kThumbBkpt;
794 bool short_form = false;
795 bool thumb2Form = (displacement < 4092 && displacement >= 0);
796 bool all_low_regs = (ARM_LOWREG(rBase) && ARM_LOWREG(r_dest));
797 int encoded_disp = displacement;
798 bool is64bit = false;
799 bool already_generated = false;
800 switch (size) {
801 case kDouble:
802 case kLong:
803 is64bit = true;
804 if (ARM_FPREG(r_dest)) {
805 if (ARM_SINGLEREG(r_dest)) {
806 DCHECK(ARM_FPREG(r_dest_hi));
807 r_dest = S2d(r_dest, r_dest_hi);
808 }
809 opcode = kThumb2Vldrd;
810 if (displacement <= 1020) {
811 short_form = true;
812 encoded_disp >>= 2;
813 }
814 break;
815 } else {
816 if (displacement <= 1020) {
817 load = NewLIR4(kThumb2LdrdI8, r_dest, r_dest_hi, rBase, displacement >> 2);
818 } else {
819 load = LoadBaseDispBody(rBase, displacement, r_dest,
820 -1, kWord, s_reg);
821 LoadBaseDispBody(rBase, displacement + 4, r_dest_hi,
822 -1, kWord, INVALID_SREG);
823 }
824 already_generated = true;
825 }
826 case kSingle:
827 case kWord:
828 if (ARM_FPREG(r_dest)) {
829 opcode = kThumb2Vldrs;
830 if (displacement <= 1020) {
831 short_form = true;
832 encoded_disp >>= 2;
833 }
834 break;
835 }
836 if (ARM_LOWREG(r_dest) && (rBase == r15pc) &&
837 (displacement <= 1020) && (displacement >= 0)) {
838 short_form = true;
839 encoded_disp >>= 2;
840 opcode = kThumbLdrPcRel;
841 } else if (ARM_LOWREG(r_dest) && (rBase == r13sp) &&
842 (displacement <= 1020) && (displacement >= 0)) {
843 short_form = true;
844 encoded_disp >>= 2;
845 opcode = kThumbLdrSpRel;
846 } else if (all_low_regs && displacement < 128 && displacement >= 0) {
847 DCHECK_EQ((displacement & 0x3), 0);
848 short_form = true;
849 encoded_disp >>= 2;
850 opcode = kThumbLdrRRI5;
851 } else if (thumb2Form) {
852 short_form = true;
853 opcode = kThumb2LdrRRI12;
854 }
855 break;
856 case kUnsignedHalf:
857 if (all_low_regs && displacement < 64 && displacement >= 0) {
858 DCHECK_EQ((displacement & 0x1), 0);
859 short_form = true;
860 encoded_disp >>= 1;
861 opcode = kThumbLdrhRRI5;
862 } else if (displacement < 4092 && displacement >= 0) {
863 short_form = true;
864 opcode = kThumb2LdrhRRI12;
865 }
866 break;
867 case kSignedHalf:
868 if (thumb2Form) {
869 short_form = true;
870 opcode = kThumb2LdrshRRI12;
871 }
872 break;
873 case kUnsignedByte:
874 if (all_low_regs && displacement < 32 && displacement >= 0) {
875 short_form = true;
876 opcode = kThumbLdrbRRI5;
877 } else if (thumb2Form) {
878 short_form = true;
879 opcode = kThumb2LdrbRRI12;
880 }
881 break;
882 case kSignedByte:
883 if (thumb2Form) {
884 short_form = true;
885 opcode = kThumb2LdrsbRRI12;
886 }
887 break;
888 default:
889 LOG(FATAL) << "Bad size: " << size;
890 }
891
892 if (!already_generated) {
893 if (short_form) {
894 load = NewLIR3(opcode, r_dest, rBase, encoded_disp);
895 } else {
896 int reg_offset = AllocTemp();
897 LoadConstant(reg_offset, encoded_disp);
898 load = LoadBaseIndexed(rBase, reg_offset, r_dest, 0, size);
899 FreeTemp(reg_offset);
900 }
901 }
902
903 // TODO: in future may need to differentiate Dalvik accesses w/ spills
904 if (rBase == rARM_SP) {
905 AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, is64bit);
906 }
907 return load;
908}
909
910LIR* ArmMir2Lir::LoadBaseDisp(int rBase, int displacement, int r_dest,
911 OpSize size, int s_reg)
912{
913 return LoadBaseDispBody(rBase, displacement, r_dest, -1, size, s_reg);
914}
915
916LIR* ArmMir2Lir::LoadBaseDispWide(int rBase, int displacement, int r_dest_lo,
917 int r_dest_hi, int s_reg)
918{
919 return LoadBaseDispBody(rBase, displacement, r_dest_lo, r_dest_hi, kLong, s_reg);
920}
921
922
923LIR* ArmMir2Lir::StoreBaseDispBody(int rBase, int displacement,
924 int r_src, int r_src_hi, OpSize size) {
925 LIR* store = NULL;
926 ArmOpcode opcode = kThumbBkpt;
927 bool short_form = false;
928 bool thumb2Form = (displacement < 4092 && displacement >= 0);
929 bool all_low_regs = (ARM_LOWREG(rBase) && ARM_LOWREG(r_src));
930 int encoded_disp = displacement;
931 bool is64bit = false;
932 bool already_generated = false;
933 switch (size) {
934 case kLong:
935 case kDouble:
936 is64bit = true;
937 if (!ARM_FPREG(r_src)) {
938 if (displacement <= 1020) {
939 store = NewLIR4(kThumb2StrdI8, r_src, r_src_hi, rBase, displacement >> 2);
940 } else {
941 store = StoreBaseDispBody(rBase, displacement, r_src, -1, kWord);
942 StoreBaseDispBody(rBase, displacement + 4, r_src_hi, -1, kWord);
943 }
944 already_generated = true;
945 } else {
946 if (ARM_SINGLEREG(r_src)) {
947 DCHECK(ARM_FPREG(r_src_hi));
948 r_src = S2d(r_src, r_src_hi);
949 }
950 opcode = kThumb2Vstrd;
951 if (displacement <= 1020) {
952 short_form = true;
953 encoded_disp >>= 2;
954 }
955 }
956 break;
957 case kSingle:
958 case kWord:
959 if (ARM_FPREG(r_src)) {
960 DCHECK(ARM_SINGLEREG(r_src));
961 opcode = kThumb2Vstrs;
962 if (displacement <= 1020) {
963 short_form = true;
964 encoded_disp >>= 2;
965 }
966 break;
967 }
968 if (ARM_LOWREG(r_src) && (rBase == r13sp) &&
969 (displacement <= 1020) && (displacement >= 0)) {
970 short_form = true;
971 encoded_disp >>= 2;
972 opcode = kThumbStrSpRel;
973 } else if (all_low_regs && displacement < 128 && displacement >= 0) {
974 DCHECK_EQ((displacement & 0x3), 0);
975 short_form = true;
976 encoded_disp >>= 2;
977 opcode = kThumbStrRRI5;
978 } else if (thumb2Form) {
979 short_form = true;
980 opcode = kThumb2StrRRI12;
981 }
982 break;
983 case kUnsignedHalf:
984 case kSignedHalf:
985 if (all_low_regs && displacement < 64 && displacement >= 0) {
986 DCHECK_EQ((displacement & 0x1), 0);
987 short_form = true;
988 encoded_disp >>= 1;
989 opcode = kThumbStrhRRI5;
990 } else if (thumb2Form) {
991 short_form = true;
992 opcode = kThumb2StrhRRI12;
993 }
994 break;
995 case kUnsignedByte:
996 case kSignedByte:
997 if (all_low_regs && displacement < 32 && displacement >= 0) {
998 short_form = true;
999 opcode = kThumbStrbRRI5;
1000 } else if (thumb2Form) {
1001 short_form = true;
1002 opcode = kThumb2StrbRRI12;
1003 }
1004 break;
1005 default:
1006 LOG(FATAL) << "Bad size: " << size;
1007 }
1008 if (!already_generated) {
1009 if (short_form) {
1010 store = NewLIR3(opcode, r_src, rBase, encoded_disp);
1011 } else {
1012 int r_scratch = AllocTemp();
1013 LoadConstant(r_scratch, encoded_disp);
1014 store = StoreBaseIndexed(rBase, r_scratch, r_src, 0, size);
1015 FreeTemp(r_scratch);
1016 }
1017 }
1018
1019 // TODO: In future, may need to differentiate Dalvik & spill accesses
1020 if (rBase == rARM_SP) {
1021 AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, is64bit);
1022 }
1023 return store;
1024}
1025
1026LIR* ArmMir2Lir::StoreBaseDisp(int rBase, int displacement, int r_src,
1027 OpSize size)
1028{
1029 return StoreBaseDispBody(rBase, displacement, r_src, -1, size);
1030}
1031
1032LIR* ArmMir2Lir::StoreBaseDispWide(int rBase, int displacement,
1033 int r_src_lo, int r_src_hi)
1034{
1035 return StoreBaseDispBody(rBase, displacement, r_src_lo, r_src_hi, kLong);
1036}
1037
1038LIR* ArmMir2Lir::OpFpRegCopy(int r_dest, int r_src)
1039{
1040 int opcode;
1041 DCHECK_EQ(ARM_DOUBLEREG(r_dest), ARM_DOUBLEREG(r_src));
1042 if (ARM_DOUBLEREG(r_dest)) {
1043 opcode = kThumb2Vmovd;
1044 } else {
1045 if (ARM_SINGLEREG(r_dest)) {
1046 opcode = ARM_SINGLEREG(r_src) ? kThumb2Vmovs : kThumb2Fmsr;
1047 } else {
1048 DCHECK(ARM_SINGLEREG(r_src));
1049 opcode = kThumb2Fmrs;
1050 }
1051 }
1052 LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest, r_src);
1053 if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
1054 res->flags.is_nop = true;
1055 }
1056 return res;
1057}
1058
1059LIR* ArmMir2Lir::OpThreadMem(OpKind op, int thread_offset)
1060{
1061 LOG(FATAL) << "Unexpected use of OpThreadMem for Arm";
1062 return NULL;
1063}
1064
1065LIR* ArmMir2Lir::OpMem(OpKind op, int rBase, int disp)
1066{
1067 LOG(FATAL) << "Unexpected use of OpMem for Arm";
1068 return NULL;
1069}
1070
1071LIR* ArmMir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale,
1072 int displacement, int r_src, int r_src_hi, OpSize size,
1073 int s_reg)
1074{
1075 LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for Arm";
1076 return NULL;
1077}
1078
1079LIR* ArmMir2Lir::OpRegMem(OpKind op, int r_dest, int rBase, int offset)
1080{
1081 LOG(FATAL) << "Unexpected use of OpRegMem for Arm";
1082 return NULL;
1083}
1084
1085LIR* ArmMir2Lir::LoadBaseIndexedDisp(int rBase, int r_index, int scale,
1086 int displacement, int r_dest, int r_dest_hi, OpSize size,
1087 int s_reg)
1088{
1089 LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for Arm";
1090 return NULL;
1091}
1092
1093} // namespace art