blob: 38e354cbde7d9c50b8d730db0c7887b4ce295132 [file] [log] [blame]
Maja Gagic6ea651f2015-02-24 16:55:04 +01001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "codegen_mips64.h"
18
19#include "arch/mips64/instruction_set_features_mips64.h"
20#include "base/logging.h"
21#include "dex/quick/mir_to_lir-inl.h"
22#include "dex/reg_storage_eq.h"
23#include "driver/compiler_driver.h"
24#include "mips64_lir.h"
25
26namespace art {
27
28/* This file contains codegen for the MIPS64 ISA. */
29
30LIR* Mips64Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
31 int opcode;
32 // Must be both DOUBLE or both not DOUBLE.
33 DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit());
34 if (r_dest.Is64Bit()) {
35 if (r_dest.IsDouble()) {
36 if (r_src.IsDouble()) {
37 opcode = kMips64Fmovd;
38 } else {
39 // Note the operands are swapped for the dmtc1 instr.
40 RegStorage t_opnd = r_src;
41 r_src = r_dest;
42 r_dest = t_opnd;
43 opcode = kMips64Dmtc1;
44 }
45 } else {
46 DCHECK(r_src.IsDouble());
47 opcode = kMips64Dmfc1;
48 }
49 } else {
50 if (r_dest.IsSingle()) {
51 if (r_src.IsSingle()) {
52 opcode = kMips64Fmovs;
53 } else {
54 // Note the operands are swapped for the mtc1 instr.
55 RegStorage t_opnd = r_src;
56 r_src = r_dest;
57 r_dest = t_opnd;
58 opcode = kMips64Mtc1;
59 }
60 } else {
61 DCHECK(r_src.IsSingle());
62 opcode = kMips64Mfc1;
63 }
64 }
65 LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg());
66 if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
67 res->flags.is_nop = true;
68 }
69 return res;
70}
71
72bool Mips64Mir2Lir::InexpensiveConstantInt(int32_t value) {
73 // For encodings, see LoadConstantNoClobber below.
74 return ((value == 0) || IsUint<16>(value) || IsInt<16>(value));
75}
76
77bool Mips64Mir2Lir::InexpensiveConstantFloat(int32_t value) {
78 UNUSED(value);
79 return false; // TUNING
80}
81
82bool Mips64Mir2Lir::InexpensiveConstantLong(int64_t value) {
83 UNUSED(value);
84 return false; // TUNING
85}
86
87bool Mips64Mir2Lir::InexpensiveConstantDouble(int64_t value) {
88 UNUSED(value);
89 return false; // TUNING
90}
91
92/*
93 * Load a immediate using a shortcut if possible; otherwise
94 * grab from the per-translation literal pool. If target is
95 * a high register, build constant into a low register and copy.
96 *
97 * No additional register clobbering operation performed. Use this version when
98 * 1) r_dest is freshly returned from AllocTemp or
99 * 2) The codegen is under fixed register usage
100 */
101LIR* Mips64Mir2Lir::LoadConstantNoClobber(RegStorage r_dest, int value) {
102 LIR *res;
103
104 RegStorage r_dest_save = r_dest;
105 int is_fp_reg = r_dest.IsFloat();
106 if (is_fp_reg) {
107 DCHECK(r_dest.IsSingle());
108 r_dest = AllocTemp();
109 }
110
111 // See if the value can be constructed cheaply.
112 if (value == 0) {
113 res = NewLIR2(kMips64Move, r_dest.GetReg(), rZERO);
114 } else if (IsUint<16>(value)) {
115 // Use OR with (unsigned) immediate to encode 16b unsigned int.
116 res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZERO, value);
117 } else if (IsInt<16>(value)) {
118 // Use ADD with (signed) immediate to encode 16b signed int.
119 res = NewLIR3(kMips64Addiu, r_dest.GetReg(), rZERO, value);
120 } else {
121 res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
122 if (value & 0xffff)
123 NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), value);
124 }
125
126 if (is_fp_reg) {
127 NewLIR2(kMips64Mtc1, r_dest.GetReg(), r_dest_save.GetReg());
128 FreeTemp(r_dest);
129 }
130
131 return res;
132}
133
134LIR* Mips64Mir2Lir::OpUnconditionalBranch(LIR* target) {
135 LIR* res = NewLIR1(kMips64B, 0 /* offset to be patched during assembly*/);
136 res->target = target;
137 return res;
138}
139
140LIR* Mips64Mir2Lir::OpReg(OpKind op, RegStorage r_dest_src) {
141 Mips64OpCode opcode = kMips64Nop;
142 switch (op) {
143 case kOpBlx:
144 opcode = kMips64Jalr;
145 break;
146 case kOpBx:
147 return NewLIR2(kMips64Jalr, rZERO, r_dest_src.GetReg());
148 break;
149 default:
150 LOG(FATAL) << "Bad case in OpReg";
151 }
152 return NewLIR2(opcode, rRAd, r_dest_src.GetReg());
153}
154
155LIR* Mips64Mir2Lir::OpRegImm(OpKind op, RegStorage r_dest_src1, int value) {
156 LIR *res;
157 bool neg = (value < 0);
158 int abs_value = (neg) ? -value : value;
159 bool short_form = (abs_value & 0xff) == abs_value;
160 bool is64bit = r_dest_src1.Is64Bit();
161 RegStorage r_scratch;
162 Mips64OpCode opcode = kMips64Nop;
163 switch (op) {
164 case kOpAdd:
165 return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
166 case kOpSub:
167 return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
168 default:
169 LOG(FATAL) << "Bad case in OpRegImm";
170 }
171 if (short_form) {
172 res = NewLIR2(opcode, r_dest_src1.GetReg(), abs_value);
173 } else {
174 if (is64bit) {
175 r_scratch = AllocTempWide();
176 res = LoadConstantWide(r_scratch, value);
177 } else {
178 r_scratch = AllocTemp();
179 res = LoadConstant(r_scratch, value);
180 }
181 if (op == kOpCmp) {
182 NewLIR2(opcode, r_dest_src1.GetReg(), r_scratch.GetReg());
183 } else {
184 NewLIR3(opcode, r_dest_src1.GetReg(), r_dest_src1.GetReg(), r_scratch.GetReg());
185 }
186 }
187 return res;
188}
189
190LIR* Mips64Mir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest,
191 RegStorage r_src1, RegStorage r_src2) {
192 Mips64OpCode opcode = kMips64Nop;
193 bool is64bit = r_dest.Is64Bit() || r_src1.Is64Bit() || r_src2.Is64Bit();
194
195 switch (op) {
196 case kOpAdd:
197 if (is64bit) {
198 opcode = kMips64Daddu;
199 } else {
200 opcode = kMips64Addu;
201 }
202 break;
203 case kOpSub:
204 if (is64bit) {
205 opcode = kMips64Dsubu;
206 } else {
207 opcode = kMips64Subu;
208 }
209 break;
210 case kOpAnd:
211 opcode = kMips64And;
212 break;
213 case kOpMul:
214 opcode = kMips64Mul;
215 break;
216 case kOpOr:
217 opcode = kMips64Or;
218 break;
219 case kOpXor:
220 opcode = kMips64Xor;
221 break;
222 case kOpLsl:
223 if (is64bit) {
224 opcode = kMips64Dsllv;
225 } else {
226 opcode = kMips64Sllv;
227 }
228 break;
229 case kOpLsr:
230 if (is64bit) {
231 opcode = kMips64Dsrlv;
232 } else {
233 opcode = kMips64Srlv;
234 }
235 break;
236 case kOpAsr:
237 if (is64bit) {
238 opcode = kMips64Dsrav;
239 } else {
240 opcode = kMips64Srav;
241 }
242 break;
243 case kOpAdc:
244 case kOpSbc:
245 LOG(FATAL) << "No carry bit on MIPS64";
246 break;
247 default:
248 LOG(FATAL) << "Bad case in OpRegRegReg";
249 break;
250 }
251 return NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg());
252}
253
254LIR* Mips64Mir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) {
255 LIR *res;
256 Mips64OpCode opcode = kMips64Nop;
257 bool short_form = true;
258 bool is64bit = r_dest.Is64Bit() || r_src1.Is64Bit();
259
260 switch (op) {
261 case kOpAdd:
262 if (is64bit) {
263 if (IS_SIMM16(value)) {
264 opcode = kMips64Daddiu;
265 } else {
266 short_form = false;
267 opcode = kMips64Daddu;
268 }
269 } else {
270 if (IS_SIMM16(value)) {
271 opcode = kMips64Addiu;
272 } else {
273 short_form = false;
274 opcode = kMips64Addu;
275 }
276 }
277 break;
278 case kOpSub:
279 if (is64bit) {
280 if (IS_SIMM16((-value))) {
281 value = -value;
282 opcode = kMips64Daddiu;
283 } else {
284 short_form = false;
285 opcode = kMips64Dsubu;
286 }
287 } else {
288 if (IS_SIMM16((-value))) {
289 value = -value;
290 opcode = kMips64Addiu;
291 } else {
292 short_form = false;
293 opcode = kMips64Subu;
294 }
295 }
296 break;
297 case kOpLsl:
298 if (is64bit) {
299 DCHECK(value >= 0 && value <= 63);
300 if (value >= 0 && value <= 31) {
301 opcode = kMips64Dsll;
302 } else {
303 opcode = kMips64Dsll32;
304 value = value - 32;
305 }
306 } else {
307 DCHECK(value >= 0 && value <= 31);
308 opcode = kMips64Sll;
309 }
310 break;
311 case kOpLsr:
312 if (is64bit) {
313 DCHECK(value >= 0 && value <= 63);
314 if (value >= 0 && value <= 31) {
315 opcode = kMips64Dsrl;
316 } else {
317 opcode = kMips64Dsrl32;
318 value = value - 32;
319 }
320 } else {
321 DCHECK(value >= 0 && value <= 31);
322 opcode = kMips64Srl;
323 }
324 break;
325 case kOpAsr:
326 if (is64bit) {
327 DCHECK(value >= 0 && value <= 63);
328 if (value >= 0 && value <= 31) {
329 opcode = kMips64Dsra;
330 } else {
331 opcode = kMips64Dsra32;
332 value = value - 32;
333 }
334 } else {
335 DCHECK(value >= 0 && value <= 31);
336 opcode = kMips64Sra;
337 }
338 break;
339 case kOpAnd:
340 if (IS_UIMM16((value))) {
341 opcode = kMips64Andi;
342 } else {
343 short_form = false;
344 opcode = kMips64And;
345 }
346 break;
347 case kOpOr:
348 if (IS_UIMM16((value))) {
349 opcode = kMips64Ori;
350 } else {
351 short_form = false;
352 opcode = kMips64Or;
353 }
354 break;
355 case kOpXor:
356 if (IS_UIMM16((value))) {
357 opcode = kMips64Xori;
358 } else {
359 short_form = false;
360 opcode = kMips64Xor;
361 }
362 break;
363 case kOpMul:
364 short_form = false;
365 opcode = kMips64Mul;
366 break;
367 default:
368 LOG(FATAL) << "Bad case in OpRegRegImm";
369 break;
370 }
371
372 if (short_form) {
373 res = NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), value);
374 } else {
375 if (r_dest != r_src1) {
376 res = LoadConstant(r_dest, value);
377 NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_dest.GetReg());
378 } else {
379 if (is64bit) {
380 RegStorage r_scratch = AllocTempWide();
381 res = LoadConstantWide(r_scratch, value);
382 NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
383 } else {
384 RegStorage r_scratch = AllocTemp();
385 res = LoadConstant(r_scratch, value);
386 NewLIR3(opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
387 }
388 }
389 }
390 return res;
391}
392
393LIR* Mips64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) {
394 Mips64OpCode opcode = kMips64Nop;
395 LIR *res;
396 switch (op) {
397 case kOpMov:
398 opcode = kMips64Move;
399 break;
400 case kOpMvn:
401 return NewLIR3(kMips64Nor, r_dest_src1.GetReg(), r_src2.GetReg(), rZEROd);
402 case kOpNeg:
403 if (r_dest_src1.Is64Bit())
404 return NewLIR3(kMips64Dsubu, r_dest_src1.GetReg(), rZEROd, r_src2.GetReg());
405 else
406 return NewLIR3(kMips64Subu, r_dest_src1.GetReg(), rZERO, r_src2.GetReg());
407 case kOpAdd:
408 case kOpAnd:
409 case kOpMul:
410 case kOpOr:
411 case kOpSub:
412 case kOpXor:
413 return OpRegRegReg(op, r_dest_src1, r_dest_src1, r_src2);
414 case kOp2Byte:
415 res = NewLIR2(kMips64Seb, r_dest_src1.GetReg(), r_src2.GetReg());
416 return res;
417 case kOp2Short:
418 res = NewLIR2(kMips64Seh, r_dest_src1.GetReg(), r_src2.GetReg());
419 return res;
420 case kOp2Char:
421 return NewLIR3(kMips64Andi, r_dest_src1.GetReg(), r_src2.GetReg(), 0xFFFF);
422 default:
423 LOG(FATAL) << "Bad case in OpRegReg";
424 UNREACHABLE();
425 }
426 return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
427}
428
429LIR* Mips64Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
430 MoveType move_type) {
431 UNUSED(r_dest, r_base, offset, move_type);
432 UNIMPLEMENTED(FATAL);
433 UNREACHABLE();
434}
435
436LIR* Mips64Mir2Lir::OpMovMemReg(RegStorage r_base, int offset,
437 RegStorage r_src, MoveType move_type) {
438 UNUSED(r_base, offset, r_src, move_type);
439 UNIMPLEMENTED(FATAL);
440 UNREACHABLE();
441}
442
443LIR* Mips64Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc,
444 RegStorage r_dest, RegStorage r_src) {
445 UNUSED(op, cc, r_dest, r_src);
446 LOG(FATAL) << "Unexpected use of OpCondRegReg for MIPS64";
447 UNREACHABLE();
448}
449
450LIR* Mips64Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
451 LIR *res = nullptr;
452 DCHECK(r_dest.Is64Bit());
453 RegStorage r_dest_save = r_dest;
454 int is_fp_reg = r_dest.IsFloat();
455 if (is_fp_reg) {
456 DCHECK(r_dest.IsDouble());
457 r_dest = AllocTemp();
458 }
459
460 int bit31 = (value & UINT64_C(0x80000000)) != 0;
461
462 // Loads with 1 instruction.
463 if (IsUint<16>(value)) {
464 res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, value);
465 } else if (IsInt<16>(value)) {
466 res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, value);
467 } else if ((value & 0xFFFF) == 0 && IsInt<16>(value >> 16)) {
468 res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
469 } else if (IsInt<32>(value)) {
470 // Loads with 2 instructions.
471 res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
472 NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), value);
473 } else if ((value & 0xFFFF0000) == 0 && IsInt<16>(value >> 32)) {
474 res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, value);
475 NewLIR2(kMips64Dahi, r_dest.GetReg(), value >> 32);
476 } else if ((value & UINT64_C(0xFFFFFFFF0000)) == 0) {
477 res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, value);
478 NewLIR2(kMips64Dati, r_dest.GetReg(), value >> 48);
479 } else if ((value & 0xFFFF) == 0 && (value >> 32) >= (-32768 - bit31) &&
480 (value >> 32) <= (32767 - bit31)) {
481 res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
482 NewLIR2(kMips64Dahi, r_dest.GetReg(), (value >> 32) + bit31);
483 } else if ((value & 0xFFFF) == 0 && ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF)) {
484 res = NewLIR2(kMips64Lui, r_dest.GetReg(), value >> 16);
485 NewLIR2(kMips64Dati, r_dest.GetReg(), (value >> 48) + bit31);
486 } else {
487 int64_t tmp = value;
488 int shift_cnt = 0;
489 while ((tmp & 1) == 0) {
490 tmp >>= 1;
491 shift_cnt++;
492 }
493
494 if (IsUint<16>(tmp)) {
495 res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, tmp);
496 NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
497 shift_cnt & 0x1F);
498 } else if (IsInt<16>(tmp)) {
499 res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, tmp);
500 NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
501 shift_cnt & 0x1F);
502 } else if (IsInt<32>(tmp)) {
503 // Loads with 3 instructions.
504 res = NewLIR2(kMips64Lui, r_dest.GetReg(), tmp >> 16);
505 NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), tmp);
506 NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
507 shift_cnt & 0x1F);
508 } else {
509 tmp = value >> 16;
510 shift_cnt = 16;
511 while ((tmp & 1) == 0) {
512 tmp >>= 1;
513 shift_cnt++;
514 }
515
516 if (IsUint<16>(tmp)) {
517 res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, tmp);
518 NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
519 shift_cnt & 0x1F);
520 NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), value);
521 } else if (IsInt<16>(tmp)) {
522 res = NewLIR3(kMips64Daddiu, r_dest.GetReg(), rZEROd, tmp);
523 NewLIR3((shift_cnt < 32) ? kMips64Dsll : kMips64Dsll32, r_dest.GetReg(), r_dest.GetReg(),
524 shift_cnt & 0x1F);
525 NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), value);
526 } else {
527 // Loads with 3-4 instructions.
528 uint64_t tmp2 = value;
529 if (((tmp2 >> 16) & 0xFFFF) != 0 || (tmp2 & 0xFFFFFFFF) == 0) {
530 res = NewLIR2(kMips64Lui, r_dest.GetReg(), tmp2 >> 16);
531 }
532 if ((tmp2 & 0xFFFF) != 0) {
533 if (res)
534 NewLIR3(kMips64Ori, r_dest.GetReg(), r_dest.GetReg(), tmp2);
535 else
536 res = NewLIR3(kMips64Ori, r_dest.GetReg(), rZEROd, tmp2);
537 }
538 if (bit31) {
539 tmp2 += UINT64_C(0x100000000);
540 }
541 if (((tmp2 >> 32) & 0xFFFF) != 0) {
542 NewLIR2(kMips64Dahi, r_dest.GetReg(), tmp2 >> 32);
543 }
544 if (tmp2 & UINT64_C(0x800000000000)) {
545 tmp2 += UINT64_C(0x1000000000000);
546 }
547 if ((tmp2 >> 48) != 0) {
548 NewLIR2(kMips64Dati, r_dest.GetReg(), tmp2 >> 48);
549 }
550 }
551 }
552 }
553
554 if (is_fp_reg) {
555 NewLIR2(kMips64Dmtc1, r_dest.GetReg(), r_dest_save.GetReg());
556 FreeTemp(r_dest);
557 }
558
559 return res;
560}
561
562/* Load value from base + scaled index. */
563LIR* Mips64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
564 int scale, OpSize size) {
565 LIR *first = NULL;
566 LIR *res;
567 RegStorage t_reg;
568 Mips64OpCode opcode = kMips64Nop;
569 bool is64bit = r_dest.Is64Bit();
570 if (is64bit) {
571 t_reg = AllocTempWide();
572 } else {
573 t_reg = AllocTemp();
574 }
575
576 if (r_dest.IsFloat()) {
577 DCHECK(r_dest.IsSingle());
578 DCHECK((size == k32) || (size == kSingle) || (size == kReference));
579 size = kSingle;
580 } else if (is64bit) {
581 size = k64;
582 } else {
583 if (size == kSingle)
584 size = k32;
585 }
586
587 if (!scale) {
588 if (is64bit) {
589 first = NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
590 } else {
591 first = NewLIR3(kMips64Addu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
592 }
593 } else {
594 first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
595 NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
596 }
597
598 switch (size) {
599 case k64:
600 opcode = kMips64Ld;
601 break;
602 case kSingle:
603 opcode = kMips64Flwc1;
604 break;
605 case k32:
606 case kReference:
607 opcode = kMips64Lw;
608 break;
609 case kUnsignedHalf:
610 opcode = kMips64Lhu;
611 break;
612 case kSignedHalf:
613 opcode = kMips64Lh;
614 break;
615 case kUnsignedByte:
616 opcode = kMips64Lbu;
617 break;
618 case kSignedByte:
619 opcode = kMips64Lb;
620 break;
621 default:
622 LOG(FATAL) << "Bad case in LoadBaseIndexed";
623 }
624
625 res = NewLIR3(opcode, r_dest.GetReg(), 0, t_reg.GetReg());
626 FreeTemp(t_reg);
627 return (first) ? first : res;
628}
629
630/* Store value base base + scaled index. */
631LIR* Mips64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
632 int scale, OpSize size) {
633 LIR *first = NULL;
634 Mips64OpCode opcode = kMips64Nop;
635 RegStorage t_reg = AllocTemp();
636
637 if (r_src.IsFloat()) {
638 DCHECK(r_src.IsSingle());
639 DCHECK((size == k32) || (size == kSingle) || (size == kReference));
640 size = kSingle;
641 } else {
642 if (size == kSingle)
643 size = k32;
644 }
645
646 if (!scale) {
647 first = NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), r_index.GetReg());
648 } else {
649 first = OpRegRegImm(kOpLsl, t_reg, r_index, scale);
650 NewLIR3(kMips64Daddu, t_reg.GetReg() , r_base.GetReg(), t_reg.GetReg());
651 }
652
653 switch (size) {
654 case kSingle:
655 opcode = kMips64Fswc1;
656 break;
657 case k32:
658 case kReference:
659 opcode = kMips64Sw;
660 break;
661 case kUnsignedHalf:
662 case kSignedHalf:
663 opcode = kMips64Sh;
664 break;
665 case kUnsignedByte:
666 case kSignedByte:
667 opcode = kMips64Sb;
668 break;
669 default:
670 LOG(FATAL) << "Bad case in StoreBaseIndexed";
671 }
672 NewLIR3(opcode, r_src.GetReg(), 0, t_reg.GetReg());
673 return first;
674}
675
676// FIXME: don't split r_dest into 2 containers.
677LIR* Mips64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStorage r_dest,
678 OpSize size) {
679/*
680 * Load value from base + displacement. Optionally perform null check
681 * on base (which must have an associated s_reg and MIR). If not
682 * performing null check, incoming MIR can be null. IMPORTANT: this
683 * code must not allocate any new temps. If a new register is needed
684 * and base and dest are the same, spill some other register to
685 * rlp and then restore.
686 */
687 LIR *res;
688 LIR *load = NULL;
689 Mips64OpCode opcode = kMips64Nop;
690 bool short_form = IS_SIMM16(displacement);
691
692 switch (size) {
693 case k64:
694 case kDouble:
695 r_dest = Check64BitReg(r_dest);
696 if (!r_dest.IsFloat())
697 opcode = kMips64Ld;
698 else
699 opcode = kMips64Fldc1;
700 DCHECK_EQ((displacement & 0x3), 0);
701 break;
702 case k32:
703 case kSingle:
704 case kReference:
705 opcode = kMips64Lw;
706 if (r_dest.IsFloat()) {
707 opcode = kMips64Flwc1;
708 DCHECK(r_dest.IsSingle());
709 }
710 DCHECK_EQ((displacement & 0x3), 0);
711 break;
712 case kUnsignedHalf:
713 opcode = kMips64Lhu;
714 DCHECK_EQ((displacement & 0x1), 0);
715 break;
716 case kSignedHalf:
717 opcode = kMips64Lh;
718 DCHECK_EQ((displacement & 0x1), 0);
719 break;
720 case kUnsignedByte:
721 opcode = kMips64Lbu;
722 break;
723 case kSignedByte:
724 opcode = kMips64Lb;
725 break;
726 default:
727 LOG(FATAL) << "Bad case in LoadBaseIndexedBody";
728 }
729
730 if (short_form) {
731 load = res = NewLIR3(opcode, r_dest.GetReg(), displacement, r_base.GetReg());
732 } else {
733 RegStorage r_tmp = (r_base == r_dest) ? AllocTemp() : r_dest;
734 res = OpRegRegImm(kOpAdd, r_tmp, r_base, displacement);
735 load = NewLIR3(opcode, r_dest.GetReg(), 0, r_tmp.GetReg());
736 if (r_tmp != r_dest)
737 FreeTemp(r_tmp);
738 }
739
740 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
741 DCHECK_EQ(r_base, rs_rMIPS64_SP);
742 AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, r_dest.Is64Bit());
743 }
744 return res;
745}
746
747LIR* Mips64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
748 OpSize size, VolatileKind is_volatile) {
749 if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble) &&
750 displacement & 0x7)) {
751 // TODO: use lld/scd instructions for Mips64.
752 // Do atomic 64-bit load.
753 return GenAtomic64Load(r_base, displacement, r_dest);
754 }
755
756 // TODO: base this on target.
757 if (size == kWord) {
758 size = k64;
759 }
760 LIR* load;
761 load = LoadBaseDispBody(r_base, displacement, r_dest, size);
762
763 if (UNLIKELY(is_volatile == kVolatile)) {
764 GenMemBarrier(kLoadAny);
765 }
766
767 return load;
768}
769
770// FIXME: don't split r_dest into 2 containers.
771LIR* Mips64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
772 OpSize size) {
773 LIR *res;
774 LIR *store = NULL;
775 Mips64OpCode opcode = kMips64Nop;
776 bool short_form = IS_SIMM16(displacement);
777
778 switch (size) {
779 case k64:
780 case kDouble:
781 r_src = Check64BitReg(r_src);
782 if (!r_src.IsFloat())
783 opcode = kMips64Sd;
784 else
785 opcode = kMips64Fsdc1;
786 DCHECK_EQ((displacement & 0x3), 0);
787 break;
788 case k32:
789 case kSingle:
790 case kReference:
791 opcode = kMips64Sw;
792 if (r_src.IsFloat()) {
793 opcode = kMips64Fswc1;
794 DCHECK(r_src.IsSingle());
795 }
796 DCHECK_EQ((displacement & 0x3), 0);
797 break;
798 case kUnsignedHalf:
799 case kSignedHalf:
800 opcode = kMips64Sh;
801 DCHECK_EQ((displacement & 0x1), 0);
802 break;
803 case kUnsignedByte:
804 case kSignedByte:
805 opcode = kMips64Sb;
806 break;
807 default:
808 LOG(FATAL) << "Bad case in StoreBaseDispBody";
809 }
810
811 if (short_form) {
812 store = res = NewLIR3(opcode, r_src.GetReg(), displacement, r_base.GetReg());
813 } else {
814 RegStorage r_scratch = AllocTemp();
815 res = OpRegRegImm(kOpAdd, r_scratch, r_base, displacement);
816 store = NewLIR3(opcode, r_src.GetReg(), 0, r_scratch.GetReg());
817 FreeTemp(r_scratch);
818 }
819
820 if (mem_ref_type_ == ResourceMask::kDalvikReg) {
821 DCHECK_EQ(r_base, rs_rMIPS64_SP);
822 AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, r_src.Is64Bit());
823 }
824
825 return res;
826}
827
828LIR* Mips64Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
829 OpSize size, VolatileKind is_volatile) {
830 if (is_volatile == kVolatile) {
831 // Ensure that prior accesses become visible to other threads first.
832 GenMemBarrier(kAnyStore);
833 }
834
835 LIR* store;
836 if (UNLIKELY(is_volatile == kVolatile && (size == k64 || size == kDouble) &&
837 displacement & 0x7)) {
838 // TODO - use lld/scd instructions for Mips64
839 // Do atomic 64-bit load.
840 store = GenAtomic64Store(r_base, displacement, r_src);
841 } else {
842 // TODO: base this on target.
843 if (size == kWord) {
844 size = k64;
845 }
846 store = StoreBaseDispBody(r_base, displacement, r_src, size);
847 }
848
849 if (UNLIKELY(is_volatile == kVolatile)) {
850 // Preserve order with respect to any subsequent volatile loads.
851 // We need StoreLoad, but that generally requires the most expensive barrier.
852 GenMemBarrier(kAnyAny);
853 }
854
855 return store;
856}
857
858LIR* Mips64Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
859 UNUSED(op, r_base, disp);
860 LOG(FATAL) << "Unexpected use of OpMem for MIPS64";
861 UNREACHABLE();
862}
863
864LIR* Mips64Mir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
865 UNUSED(cc, target);
866 LOG(FATAL) << "Unexpected use of OpCondBranch for MIPS64";
867 UNREACHABLE();
868}
869
870LIR* Mips64Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
871 UNUSED(trampoline); // The address of the trampoline is already loaded into r_tgt.
872 return OpReg(op, r_tgt);
873}
874
875} // namespace art