blob: 233ae7db3c37c06bdc0f187e7da5d852e98dbf07 [file] [log] [blame]
Andreas Gampe57b34292015-01-14 15:45:59 -08001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "assembler_mips64.h"
18
19#include "base/casts.h"
20#include "entrypoints/quick/quick_entrypoints.h"
21#include "memory_region.h"
22#include "thread.h"
23
24namespace art {
25namespace mips64 {
26
27void Mips64Assembler::Emit(int32_t value) {
28 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
29 buffer_.Emit<int32_t>(value);
30}
31
32void Mips64Assembler::EmitR(int opcode, GpuRegister rs, GpuRegister rt, GpuRegister rd,
33 int shamt, int funct) {
34 CHECK_NE(rs, kNoGpuRegister);
35 CHECK_NE(rt, kNoGpuRegister);
36 CHECK_NE(rd, kNoGpuRegister);
37 int32_t encoding = opcode << kOpcodeShift |
38 static_cast<int32_t>(rs) << kRsShift |
39 static_cast<int32_t>(rt) << kRtShift |
40 static_cast<int32_t>(rd) << kRdShift |
41 shamt << kShamtShift |
42 funct;
43 Emit(encoding);
44}
45
46void Mips64Assembler::EmitI(int opcode, GpuRegister rs, GpuRegister rt, uint16_t imm) {
47 CHECK_NE(rs, kNoGpuRegister);
48 CHECK_NE(rt, kNoGpuRegister);
49 int32_t encoding = opcode << kOpcodeShift |
50 static_cast<int32_t>(rs) << kRsShift |
51 static_cast<int32_t>(rt) << kRtShift |
52 imm;
53 Emit(encoding);
54}
55
56void Mips64Assembler::EmitJ(int opcode, int address) {
57 int32_t encoding = opcode << kOpcodeShift |
58 address;
59 Emit(encoding);
60}
61
62void Mips64Assembler::EmitFR(int opcode, int fmt, FpuRegister ft, FpuRegister fs, FpuRegister fd,
63int funct) {
64 CHECK_NE(ft, kNoFpuRegister);
65 CHECK_NE(fs, kNoFpuRegister);
66 CHECK_NE(fd, kNoFpuRegister);
67 int32_t encoding = opcode << kOpcodeShift |
68 fmt << kFmtShift |
69 static_cast<int32_t>(ft) << kFtShift |
70 static_cast<int32_t>(fs) << kFsShift |
71 static_cast<int32_t>(fd) << kFdShift |
72 funct;
73 Emit(encoding);
74}
75
76void Mips64Assembler::EmitFI(int opcode, int fmt, FpuRegister rt, uint16_t imm) {
77 CHECK_NE(rt, kNoFpuRegister);
78 int32_t encoding = opcode << kOpcodeShift |
79 fmt << kFmtShift |
80 static_cast<int32_t>(rt) << kRtShift |
81 imm;
82 Emit(encoding);
83}
84
85void Mips64Assembler::EmitBranch(GpuRegister rt, GpuRegister rs, Label* label, bool equal) {
86 int offset;
87 if (label->IsBound()) {
88 offset = label->Position() - buffer_.Size();
89 } else {
90 // Use the offset field of the branch instruction for linking the sites.
91 offset = label->position_;
92 label->LinkTo(buffer_.Size());
93 }
94 if (equal) {
95 Beq(rt, rs, (offset >> 2) & kBranchOffsetMask);
96 } else {
97 Bne(rt, rs, (offset >> 2) & kBranchOffsetMask);
98 }
99}
100
101void Mips64Assembler::EmitJump(Label* label, bool link) {
102 int offset;
103 if (label->IsBound()) {
104 offset = label->Position() - buffer_.Size();
105 } else {
106 // Use the offset field of the jump instruction for linking the sites.
107 offset = label->position_;
108 label->LinkTo(buffer_.Size());
109 }
110 if (link) {
111 Jal((offset >> 2) & kJumpOffsetMask);
112 } else {
113 J((offset >> 2) & kJumpOffsetMask);
114 }
115}
116
117int32_t Mips64Assembler::EncodeBranchOffset(int offset, int32_t inst, bool is_jump) {
118 CHECK_ALIGNED(offset, 4);
119 CHECK(IsInt(POPCOUNT(kBranchOffsetMask), offset)) << offset;
120
121 // Properly preserve only the bits supported in the instruction.
122 offset >>= 2;
123 if (is_jump) {
124 offset &= kJumpOffsetMask;
125 return (inst & ~kJumpOffsetMask) | offset;
126 } else {
127 offset &= kBranchOffsetMask;
128 return (inst & ~kBranchOffsetMask) | offset;
129 }
130}
131
132int Mips64Assembler::DecodeBranchOffset(int32_t inst, bool is_jump) {
133 // Sign-extend, then left-shift by 2.
134 if (is_jump) {
135 return (((inst & kJumpOffsetMask) << 6) >> 4);
136 } else {
137 return (((inst & kBranchOffsetMask) << 16) >> 14);
138 }
139}
140
141void Mips64Assembler::Bind(Label* label, bool is_jump) {
142 CHECK(!label->IsBound());
143 int bound_pc = buffer_.Size();
144 while (label->IsLinked()) {
145 int32_t position = label->Position();
146 int32_t next = buffer_.Load<int32_t>(position);
147 int32_t offset = is_jump ? bound_pc - position : bound_pc - position - 4;
148 int32_t encoded = Mips64Assembler::EncodeBranchOffset(offset, next, is_jump);
149 buffer_.Store<int32_t>(position, encoded);
150 label->position_ = Mips64Assembler::DecodeBranchOffset(next, is_jump);
151 }
152 label->BindTo(bound_pc);
153}
154
155void Mips64Assembler::Add(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
156 EmitR(0, rs, rt, rd, 0, 0x20);
157}
158
159void Mips64Assembler::Addi(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
160 EmitI(0x8, rs, rt, imm16);
161}
162
163void Mips64Assembler::Addu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
164 EmitR(0, rs, rt, rd, 0, 0x21);
165}
166
167void Mips64Assembler::Addiu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
168 EmitI(0x9, rs, rt, imm16);
169}
170
171void Mips64Assembler::Daddiu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
172 EmitI(0x19, rs, rt, imm16);
173}
174
175void Mips64Assembler::Sub(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
176 EmitR(0, rs, rt, rd, 0, 0x22);
177}
178
179void Mips64Assembler::Subu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
180 EmitR(0, rs, rt, rd, 0, 0x23);
181}
182
183void Mips64Assembler::Mult(GpuRegister rs, GpuRegister rt) {
184 EmitR(0, rs, rt, static_cast<GpuRegister>(0), 0, 0x18);
185}
186
187void Mips64Assembler::Multu(GpuRegister rs, GpuRegister rt) {
188 EmitR(0, rs, rt, static_cast<GpuRegister>(0), 0, 0x19);
189}
190
191void Mips64Assembler::Div(GpuRegister rs, GpuRegister rt) {
192 EmitR(0, rs, rt, static_cast<GpuRegister>(0), 0, 0x1a);
193}
194
195void Mips64Assembler::Divu(GpuRegister rs, GpuRegister rt) {
196 EmitR(0, rs, rt, static_cast<GpuRegister>(0), 0, 0x1b);
197}
198
199void Mips64Assembler::And(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
200 EmitR(0, rs, rt, rd, 0, 0x24);
201}
202
203void Mips64Assembler::Andi(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
204 EmitI(0xc, rs, rt, imm16);
205}
206
207void Mips64Assembler::Or(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
208 EmitR(0, rs, rt, rd, 0, 0x25);
209}
210
211void Mips64Assembler::Ori(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
212 EmitI(0xd, rs, rt, imm16);
213}
214
215void Mips64Assembler::Xor(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
216 EmitR(0, rs, rt, rd, 0, 0x26);
217}
218
219void Mips64Assembler::Xori(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
220 EmitI(0xe, rs, rt, imm16);
221}
222
223void Mips64Assembler::Nor(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
224 EmitR(0, rs, rt, rd, 0, 0x27);
225}
226
227void Mips64Assembler::Sll(GpuRegister rd, GpuRegister rs, int shamt) {
228 EmitR(0, rs, static_cast<GpuRegister>(0), rd, shamt, 0x00);
229}
230
231void Mips64Assembler::Srl(GpuRegister rd, GpuRegister rs, int shamt) {
232 EmitR(0, rs, static_cast<GpuRegister>(0), rd, shamt, 0x02);
233}
234
235void Mips64Assembler::Sra(GpuRegister rd, GpuRegister rs, int shamt) {
236 EmitR(0, rs, static_cast<GpuRegister>(0), rd, shamt, 0x03);
237}
238
239void Mips64Assembler::Sllv(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
240 EmitR(0, rs, rt, rd, 0, 0x04);
241}
242
243void Mips64Assembler::Srlv(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
244 EmitR(0, rs, rt, rd, 0, 0x06);
245}
246
247void Mips64Assembler::Srav(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
248 EmitR(0, rs, rt, rd, 0, 0x07);
249}
250
251void Mips64Assembler::Lb(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
252 EmitI(0x20, rs, rt, imm16);
253}
254
255void Mips64Assembler::Lh(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
256 EmitI(0x21, rs, rt, imm16);
257}
258
259void Mips64Assembler::Lw(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
260 EmitI(0x23, rs, rt, imm16);
261}
262
263void Mips64Assembler::Ld(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
264 EmitI(0x37, rs, rt, imm16);
265}
266
267void Mips64Assembler::Lbu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
268 EmitI(0x24, rs, rt, imm16);
269}
270
271void Mips64Assembler::Lhu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
272 EmitI(0x25, rs, rt, imm16);
273}
274
275void Mips64Assembler::Lui(GpuRegister rt, uint16_t imm16) {
276 EmitI(0xf, static_cast<GpuRegister>(0), rt, imm16);
277}
278
279void Mips64Assembler::Mfhi(GpuRegister rd) {
280 EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0), rd, 0, 0x10);
281}
282
283void Mips64Assembler::Mflo(GpuRegister rd) {
284 EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0), rd, 0, 0x12);
285}
286
287void Mips64Assembler::Sb(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
288 EmitI(0x28, rs, rt, imm16);
289}
290
291void Mips64Assembler::Sh(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
292 EmitI(0x29, rs, rt, imm16);
293}
294
295void Mips64Assembler::Sw(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
296 EmitI(0x2b, rs, rt, imm16);
297}
298
299void Mips64Assembler::Sd(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
300 EmitI(0x3f, rs, rt, imm16);
301}
302
303void Mips64Assembler::Slt(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
304 EmitR(0, rs, rt, rd, 0, 0x2a);
305}
306
307void Mips64Assembler::Sltu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
308 EmitR(0, rs, rt, rd, 0, 0x2b);
309}
310
311void Mips64Assembler::Slti(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
312 EmitI(0xa, rs, rt, imm16);
313}
314
315void Mips64Assembler::Sltiu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
316 EmitI(0xb, rs, rt, imm16);
317}
318
319void Mips64Assembler::Beq(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
320 EmitI(0x4, rs, rt, imm16);
321 Nop();
322}
323
324void Mips64Assembler::Bne(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
325 EmitI(0x5, rs, rt, imm16);
326 Nop();
327}
328
329void Mips64Assembler::J(uint32_t address) {
330 EmitJ(0x2, address);
331 Nop();
332}
333
334void Mips64Assembler::Jal(uint32_t address) {
335 EmitJ(0x2, address);
336 Nop();
337}
338
339void Mips64Assembler::Jr(GpuRegister rs) {
340 EmitR(0, rs, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0), 0, 0x09); // Jalr zero, rs
341 Nop();
342}
343
344void Mips64Assembler::Jalr(GpuRegister rs) {
345 EmitR(0, rs, static_cast<GpuRegister>(0), RA, 0, 0x09);
346 Nop();
347}
348
349void Mips64Assembler::AddS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
350 EmitFR(0x11, 0x10, ft, fs, fd, 0x0);
351}
352
353void Mips64Assembler::SubS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
354 EmitFR(0x11, 0x10, ft, fs, fd, 0x1);
355}
356
357void Mips64Assembler::MulS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
358 EmitFR(0x11, 0x10, ft, fs, fd, 0x2);
359}
360
361void Mips64Assembler::DivS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
362 EmitFR(0x11, 0x10, ft, fs, fd, 0x3);
363}
364
365void Mips64Assembler::AddD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
366 EmitFR(0x11, 0x11, static_cast<FpuRegister>(ft), static_cast<FpuRegister>(fs),
367 static_cast<FpuRegister>(fd), 0x0);
368}
369
370void Mips64Assembler::SubD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
371 EmitFR(0x11, 0x11, static_cast<FpuRegister>(ft), static_cast<FpuRegister>(fs),
372 static_cast<FpuRegister>(fd), 0x1);
373}
374
375void Mips64Assembler::MulD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
376 EmitFR(0x11, 0x11, static_cast<FpuRegister>(ft), static_cast<FpuRegister>(fs),
377 static_cast<FpuRegister>(fd), 0x2);
378}
379
380void Mips64Assembler::DivD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
381 EmitFR(0x11, 0x11, static_cast<FpuRegister>(ft), static_cast<FpuRegister>(fs),
382 static_cast<FpuRegister>(fd), 0x3);
383}
384
385void Mips64Assembler::MovS(FpuRegister fd, FpuRegister fs) {
386 EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x6);
387}
388
389void Mips64Assembler::MovD(FpuRegister fd, FpuRegister fs) {
390 EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), static_cast<FpuRegister>(fs),
391 static_cast<FpuRegister>(fd), 0x6);
392}
393
394void Mips64Assembler::Mfc1(GpuRegister rt, FpuRegister fs) {
395 EmitFR(0x11, 0x00, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
396}
397
398void Mips64Assembler::Mtc1(FpuRegister ft, GpuRegister rs) {
399 EmitFR(0x11, 0x04, ft, static_cast<FpuRegister>(rs), static_cast<FpuRegister>(0), 0x0);
400}
401
402void Mips64Assembler::Lwc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
403 EmitI(0x31, rs, static_cast<GpuRegister>(ft), imm16);
404}
405
406void Mips64Assembler::Ldc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
407 EmitI(0x35, rs, static_cast<GpuRegister>(ft), imm16);
408}
409
410void Mips64Assembler::Swc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
411 EmitI(0x39, rs, static_cast<GpuRegister>(ft), imm16);
412}
413
414void Mips64Assembler::Sdc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
415 EmitI(0x3d, rs, static_cast<GpuRegister>(ft), imm16);
416}
417
418void Mips64Assembler::Break() {
419 EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0),
420 static_cast<GpuRegister>(0), 0, 0xD);
421}
422
423void Mips64Assembler::Nop() {
424 EmitR(0x0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0),
425 static_cast<GpuRegister>(0), 0, 0x0);
426}
427
428void Mips64Assembler::Move(GpuRegister rt, GpuRegister rs) {
429 EmitI(0x19, rs, rt, 0); // Daddiu
430}
431
432void Mips64Assembler::Clear(GpuRegister rt) {
433 EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0), rt, 0, 0x20);
434}
435
436void Mips64Assembler::Not(GpuRegister rt, GpuRegister rs) {
437 EmitR(0, static_cast<GpuRegister>(0), rs, rt, 0, 0x27);
438}
439
440void Mips64Assembler::Mul(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
441 Mult(rs, rt);
442 Mflo(rd);
443}
444
445void Mips64Assembler::Div(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
446 Div(rs, rt);
447 Mflo(rd);
448}
449
450void Mips64Assembler::Rem(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
451 Div(rs, rt);
452 Mfhi(rd);
453}
454
455void Mips64Assembler::AddConstant64(GpuRegister rt, GpuRegister rs, int32_t value) {
456 CHECK((value >= -32768) && (value <= 32766));
457 Daddiu(rt, rs, value);
458}
459
460void Mips64Assembler::LoadImmediate64(GpuRegister rt, int32_t value) {
461 CHECK((value >= -32768) && (value <= 32766));
462 Daddiu(rt, ZERO, value);
463}
464
465void Mips64Assembler::LoadFromOffset(LoadOperandType type, GpuRegister reg, GpuRegister base,
466 int32_t offset) {
467 switch (type) {
468 case kLoadSignedByte:
469 Lb(reg, base, offset);
470 break;
471 case kLoadUnsignedByte:
472 Lbu(reg, base, offset);
473 break;
474 case kLoadSignedHalfword:
475 Lh(reg, base, offset);
476 break;
477 case kLoadUnsignedHalfword:
478 Lhu(reg, base, offset);
479 break;
480 case kLoadWord:
481 Lw(reg, base, offset);
482 break;
483 case kLoadDoubleword:
484 // TODO: alignment issues ???
485 Ld(reg, base, offset);
486 break;
487 default:
488 LOG(FATAL) << "UNREACHABLE";
489 }
490}
491
492void Mips64Assembler::LoadFpuFromOffset(LoadOperandType type, FpuRegister reg, GpuRegister base,
493 int32_t offset) {
494 CHECK((offset >= -32768) && (offset <= 32766));
495 switch (type) {
496 case kLoadWord:
497 Lwc1(reg, base, offset);
498 break;
499 case kLoadDoubleword:
500 // TODO: alignment issues ???
501 Ldc1(reg, base, offset);
502 break;
503 default:
504 LOG(FATAL) << "UNREACHABLE";
505 }
506}
507
508void Mips64Assembler::EmitLoad(ManagedRegister m_dst, GpuRegister src_register, int32_t src_offset,
509 size_t size) {
510 Mips64ManagedRegister dst = m_dst.AsMips64();
511 if (dst.IsNoRegister()) {
512 CHECK_EQ(0u, size) << dst;
513 } else if (dst.IsGpuRegister()) {
514 if (size == 4) {
515 CHECK_EQ(4u, size) << dst;
516 LoadFromOffset(kLoadWord, dst.AsGpuRegister(), src_register, src_offset);
517 } else if (size == 8) {
518 CHECK_EQ(8u, size) << dst;
519 LoadFromOffset(kLoadDoubleword, dst.AsGpuRegister(), src_register, src_offset);
520 } else {
521 UNIMPLEMENTED(FATAL) << "We only support Load() of size 4 and 8";
522 }
523 } else if (dst.IsFpuRegister()) {
524 if (size == 4) {
525 CHECK_EQ(4u, size) << dst;
526 LoadFpuFromOffset(kLoadWord, dst.AsFpuRegister(), src_register, src_offset);
527 } else if (size == 8) {
528 CHECK_EQ(8u, size) << dst;
529 LoadFpuFromOffset(kLoadDoubleword, dst.AsFpuRegister(), src_register, src_offset);
530 } else {
531 UNIMPLEMENTED(FATAL) << "We only support Load() of size 4 and 8";
532 }
533 }
534}
535
536void Mips64Assembler::StoreToOffset(StoreOperandType type, GpuRegister reg, GpuRegister base,
537 int32_t offset) {
538 switch (type) {
539 case kStoreByte:
540 Sb(reg, base, offset);
541 break;
542 case kStoreHalfword:
543 Sh(reg, base, offset);
544 break;
545 case kStoreWord:
546 Sw(reg, base, offset);
547 break;
548 case kStoreDoubleword:
549 // TODO: alignment issues ???
550 Sd(reg, base, offset);
551 break;
552 default:
553 LOG(FATAL) << "UNREACHABLE";
554 }
555}
556
557void Mips64Assembler::StoreFpuToOffset(StoreOperandType type, FpuRegister reg, GpuRegister base,
558 int32_t offset) {
559 switch (type) {
560 case kStoreWord:
561 Swc1(reg, base, offset);
562 break;
563 case kStoreDoubleword:
564 Sdc1(reg, base, offset);
565 break;
566 default:
567 LOG(FATAL) << "UNREACHABLE";
568 }
569}
570
571constexpr size_t kFramePointerSize = 8;
572
573void Mips64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
574 const std::vector<ManagedRegister>& callee_save_regs,
575 const ManagedRegisterEntrySpills& entry_spills) {
576 CHECK_ALIGNED(frame_size, kStackAlignment);
577
578 // Increase frame to required size.
579 IncreaseFrameSize(frame_size);
580
581 // Push callee saves and return address
582 int stack_offset = frame_size - kFramePointerSize;
583 StoreToOffset(kStoreDoubleword, RA, SP, stack_offset);
584 for (int i = callee_save_regs.size() - 1; i >= 0; --i) {
585 stack_offset -= kFramePointerSize;
586 GpuRegister reg = callee_save_regs.at(i).AsMips64().AsGpuRegister();
587 StoreToOffset(kStoreDoubleword, reg, SP, stack_offset);
588 }
589
590 // Write out Method*.
591 StoreToOffset(kStoreWord, method_reg.AsMips64().AsGpuRegister(), SP, 0);
592
593 // Write out entry spills.
594 int32_t offset = frame_size + sizeof(StackReference<mirror::ArtMethod>);
595 for (size_t i = 0; i < entry_spills.size(); ++i) {
596 Mips64ManagedRegister reg = entry_spills.at(i).AsMips64();
597 ManagedRegisterSpill spill = entry_spills.at(i);
598 int32_t size = spill.getSize();
599 if (reg.IsNoRegister()) {
600 // only increment stack offset.
601 offset += size;
602 } else if (reg.IsFpuRegister()) {
603 StoreFpuToOffset((size == 4) ? kStoreWord : kStoreDoubleword, reg.AsFpuRegister(), SP, offset);
604 offset += size;
605 } else if (reg.IsGpuRegister()) {
606 StoreToOffset((size == 4) ? kStoreWord : kStoreDoubleword, reg.AsGpuRegister(), SP, offset);
607 offset += size;
608 }
609 }
610}
611
612void Mips64Assembler::RemoveFrame(size_t frame_size,
613 const std::vector<ManagedRegister>& callee_save_regs) {
614 CHECK_ALIGNED(frame_size, kStackAlignment);
615
616 // Pop callee saves and return address
617 int stack_offset = frame_size - (callee_save_regs.size() * kFramePointerSize) - kFramePointerSize;
618 for (size_t i = 0; i < callee_save_regs.size(); ++i) {
619 GpuRegister reg = callee_save_regs.at(i).AsMips64().AsGpuRegister();
620 LoadFromOffset(kLoadDoubleword, reg, SP, stack_offset);
621 stack_offset += kFramePointerSize;
622 }
623 LoadFromOffset(kLoadDoubleword, RA, SP, stack_offset);
624
625 // Decrease frame to required size.
626 DecreaseFrameSize(frame_size);
627
628 // Then jump to the return address.
629 Jr(RA);
630}
631
632void Mips64Assembler::IncreaseFrameSize(size_t adjust) {
633 CHECK_ALIGNED(adjust, kStackAlignment);
634 AddConstant64(SP, SP, -adjust);
635}
636
637void Mips64Assembler::DecreaseFrameSize(size_t adjust) {
638 CHECK_ALIGNED(adjust, kStackAlignment);
639 AddConstant64(SP, SP, adjust);
640}
641
642void Mips64Assembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
643 Mips64ManagedRegister src = msrc.AsMips64();
644 if (src.IsNoRegister()) {
645 CHECK_EQ(0u, size);
646 } else if (src.IsGpuRegister()) {
647 CHECK(size == 4 || size == 8) << size;
648 if (size == 8) {
649 StoreToOffset(kStoreDoubleword, src.AsGpuRegister(), SP, dest.Int32Value());
650 } else if (size == 4) {
651 StoreToOffset(kStoreWord, src.AsGpuRegister(), SP, dest.Int32Value());
652 } else {
653 UNIMPLEMENTED(FATAL) << "We only support Store() of size 4 and 8";
654 }
655 } else if (src.IsFpuRegister()) {
656 CHECK(size == 4 || size == 8) << size;
657 if (size == 8) {
658 StoreFpuToOffset(kStoreDoubleword, src.AsFpuRegister(), SP, dest.Int32Value());
659 } else if (size == 4) {
660 StoreFpuToOffset(kStoreWord, src.AsFpuRegister(), SP, dest.Int32Value());
661 } else {
662 UNIMPLEMENTED(FATAL) << "We only support Store() of size 4 and 8";
663 }
664 }
665}
666
667void Mips64Assembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
668 Mips64ManagedRegister src = msrc.AsMips64();
669 CHECK(src.IsGpuRegister());
670 StoreToOffset(kStoreWord, src.AsGpuRegister(), SP, dest.Int32Value());
671}
672
673void Mips64Assembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
674 Mips64ManagedRegister src = msrc.AsMips64();
675 CHECK(src.IsGpuRegister());
676 StoreToOffset(kStoreDoubleword, src.AsGpuRegister(), SP, dest.Int32Value());
677}
678
679void Mips64Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
680 ManagedRegister mscratch) {
681 Mips64ManagedRegister scratch = mscratch.AsMips64();
682 CHECK(scratch.IsGpuRegister()) << scratch;
683 LoadImmediate64(scratch.AsGpuRegister(), imm);
684 StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value());
685}
686
687void Mips64Assembler::StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm,
688 ManagedRegister mscratch) {
689 Mips64ManagedRegister scratch = mscratch.AsMips64();
690 CHECK(scratch.IsGpuRegister()) << scratch;
691 LoadImmediate64(scratch.AsGpuRegister(), imm);
692 StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), S1, dest.Int32Value());
693}
694
695void Mips64Assembler::StoreStackOffsetToThread64(ThreadOffset<8> thr_offs,
696 FrameOffset fr_offs,
697 ManagedRegister mscratch) {
698 Mips64ManagedRegister scratch = mscratch.AsMips64();
699 CHECK(scratch.IsGpuRegister()) << scratch;
700 AddConstant64(scratch.AsGpuRegister(), SP, fr_offs.Int32Value());
701 StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value());
702}
703
704void Mips64Assembler::StoreStackPointerToThread64(ThreadOffset<8> thr_offs) {
705 StoreToOffset(kStoreDoubleword, SP, S1, thr_offs.Int32Value());
706}
707
708void Mips64Assembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc,
709 FrameOffset in_off, ManagedRegister mscratch) {
710 Mips64ManagedRegister src = msrc.AsMips64();
711 Mips64ManagedRegister scratch = mscratch.AsMips64();
712 StoreToOffset(kStoreDoubleword, src.AsGpuRegister(), SP, dest.Int32Value());
713 LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), SP, in_off.Int32Value());
714 StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, dest.Int32Value() + 8);
715}
716
717void Mips64Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
718 return EmitLoad(mdest, SP, src.Int32Value(), size);
719}
720
721void Mips64Assembler::LoadFromThread64(ManagedRegister mdest, ThreadOffset<8> src, size_t size) {
722 return EmitLoad(mdest, S1, src.Int32Value(), size);
723}
724
725void Mips64Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
726 Mips64ManagedRegister dest = mdest.AsMips64();
727 CHECK(dest.IsGpuRegister());
728 LoadFromOffset(kLoadWord, dest.AsGpuRegister(), SP, src.Int32Value());
729}
730
731void Mips64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base,
732 MemberOffset offs) {
733 Mips64ManagedRegister dest = mdest.AsMips64();
734 CHECK(dest.IsGpuRegister() && dest.IsGpuRegister());
735 LoadFromOffset(kLoadWord, dest.AsGpuRegister(),
736 base.AsMips64().AsGpuRegister(), offs.Int32Value());
737 if (kPoisonHeapReferences) {
738 Subu(dest.AsGpuRegister(), ZERO, dest.AsGpuRegister());
739 }
740}
741
742void Mips64Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
743 Offset offs) {
744 Mips64ManagedRegister dest = mdest.AsMips64();
745 CHECK(dest.IsGpuRegister() && dest.IsGpuRegister()) << dest;
746 LoadFromOffset(kLoadDoubleword, dest.AsGpuRegister(),
747 base.AsMips64().AsGpuRegister(), offs.Int32Value());
748}
749
750void Mips64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest,
751 ThreadOffset<8> offs) {
752 Mips64ManagedRegister dest = mdest.AsMips64();
753 CHECK(dest.IsGpuRegister());
754 LoadFromOffset(kLoadDoubleword, dest.AsGpuRegister(), S1, offs.Int32Value());
755}
756
757void Mips64Assembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
758 UNIMPLEMENTED(FATAL) << "no sign extension necessary for mips";
759}
760
761void Mips64Assembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
762 UNIMPLEMENTED(FATAL) << "no zero extension necessary for mips";
763}
764
765void Mips64Assembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
766 Mips64ManagedRegister dest = mdest.AsMips64();
767 Mips64ManagedRegister src = msrc.AsMips64();
768 if (!dest.Equals(src)) {
769 if (dest.IsGpuRegister()) {
770 CHECK(src.IsGpuRegister()) << src;
771 Move(dest.AsGpuRegister(), src.AsGpuRegister());
772 } else if (dest.IsFpuRegister()) {
773 CHECK(src.IsFpuRegister()) << src;
774 if (size == 4) {
775 MovS(dest.AsFpuRegister(), src.AsFpuRegister());
776 } else if (size == 8) {
777 MovD(dest.AsFpuRegister(), src.AsFpuRegister());
778 } else {
779 UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
780 }
781 }
782 }
783}
784
785void Mips64Assembler::CopyRef(FrameOffset dest, FrameOffset src,
786 ManagedRegister mscratch) {
787 Mips64ManagedRegister scratch = mscratch.AsMips64();
788 CHECK(scratch.IsGpuRegister()) << scratch;
789 LoadFromOffset(kLoadWord, scratch.AsGpuRegister(), SP, src.Int32Value());
790 StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value());
791}
792
793void Mips64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs,
794 ThreadOffset<8> thr_offs,
795 ManagedRegister mscratch) {
796 Mips64ManagedRegister scratch = mscratch.AsMips64();
797 CHECK(scratch.IsGpuRegister()) << scratch;
798 LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value());
799 StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, fr_offs.Int32Value());
800}
801
802void Mips64Assembler::CopyRawPtrToThread64(ThreadOffset<8> thr_offs,
803 FrameOffset fr_offs,
804 ManagedRegister mscratch) {
805 Mips64ManagedRegister scratch = mscratch.AsMips64();
806 CHECK(scratch.IsGpuRegister()) << scratch;
807 LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
808 SP, fr_offs.Int32Value());
809 StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(),
810 S1, thr_offs.Int32Value());
811}
812
813void Mips64Assembler::Copy(FrameOffset dest, FrameOffset src,
814 ManagedRegister mscratch, size_t size) {
815 Mips64ManagedRegister scratch = mscratch.AsMips64();
816 CHECK(scratch.IsGpuRegister()) << scratch;
817 CHECK(size == 4 || size == 8) << size;
818 if (size == 4) {
819 LoadFromOffset(kLoadWord, scratch.AsGpuRegister(), SP, src.Int32Value());
820 StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value());
821 } else if (size == 8) {
822 LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), SP, src.Int32Value());
823 StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, dest.Int32Value());
824 } else {
825 UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
826 }
827}
828
829void Mips64Assembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
830 ManagedRegister mscratch, size_t size) {
831 GpuRegister scratch = mscratch.AsMips64().AsGpuRegister();
832 CHECK(size == 4 || size == 8) << size;
833 if (size == 4) {
834 LoadFromOffset(kLoadWord, scratch, src_base.AsMips64().AsGpuRegister(),
835 src_offset.Int32Value());
836 StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value());
837 } else if (size == 8) {
838 LoadFromOffset(kLoadDoubleword, scratch, src_base.AsMips64().AsGpuRegister(),
839 src_offset.Int32Value());
840 StoreToOffset(kStoreDoubleword, scratch, SP, dest.Int32Value());
841 } else {
842 UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
843 }
844}
845
846void Mips64Assembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
847 ManagedRegister mscratch, size_t size) {
848 GpuRegister scratch = mscratch.AsMips64().AsGpuRegister();
849 CHECK(size == 4 || size == 8) << size;
850 if (size == 4) {
851 LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value());
852 StoreToOffset(kStoreWord, scratch, dest_base.AsMips64().AsGpuRegister(),
853 dest_offset.Int32Value());
854 } else if (size == 8) {
855 LoadFromOffset(kLoadDoubleword, scratch, SP, src.Int32Value());
856 StoreToOffset(kStoreDoubleword, scratch, dest_base.AsMips64().AsGpuRegister(),
857 dest_offset.Int32Value());
858 } else {
859 UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
860 }
861}
862
863void Mips64Assembler::Copy(FrameOffset /*dest*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
864 ManagedRegister /*mscratch*/, size_t /*size*/) {
865 UNIMPLEMENTED(FATAL) << "no mips64 implementation";
866}
867
868void Mips64Assembler::Copy(ManagedRegister dest, Offset dest_offset,
869 ManagedRegister src, Offset src_offset,
870 ManagedRegister mscratch, size_t size) {
871 GpuRegister scratch = mscratch.AsMips64().AsGpuRegister();
872 CHECK(size == 4 || size == 8) << size;
873 if (size == 4) {
874 LoadFromOffset(kLoadWord, scratch, src.AsMips64().AsGpuRegister(), src_offset.Int32Value());
875 StoreToOffset(kStoreWord, scratch, dest.AsMips64().AsGpuRegister(), dest_offset.Int32Value());
876 } else if (size == 8) {
877 LoadFromOffset(kLoadDoubleword, scratch, src.AsMips64().AsGpuRegister(),
878 src_offset.Int32Value());
879 StoreToOffset(kStoreDoubleword, scratch, dest.AsMips64().AsGpuRegister(),
880 dest_offset.Int32Value());
881 } else {
882 UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
883 }
884}
885
886void Mips64Assembler::Copy(FrameOffset /*dest*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset
887/*src_offset*/,
888 ManagedRegister /*mscratch*/, size_t /*size*/) {
889 UNIMPLEMENTED(FATAL) << "no mips64 implementation";
890}
891
892void Mips64Assembler::MemoryBarrier(ManagedRegister) {
893 UNIMPLEMENTED(FATAL) << "no mips64 implementation";
894}
895
896void Mips64Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
897 FrameOffset handle_scope_offset,
898 ManagedRegister min_reg, bool null_allowed) {
899 Mips64ManagedRegister out_reg = mout_reg.AsMips64();
900 Mips64ManagedRegister in_reg = min_reg.AsMips64();
901 CHECK(in_reg.IsNoRegister() || in_reg.IsGpuRegister()) << in_reg;
902 CHECK(out_reg.IsGpuRegister()) << out_reg;
903 if (null_allowed) {
904 Label null_arg;
905 // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
906 // the address in the handle scope holding the reference.
907 // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
908 if (in_reg.IsNoRegister()) {
909 LoadFromOffset(kLoadWord, out_reg.AsGpuRegister(),
910 SP, handle_scope_offset.Int32Value());
911 in_reg = out_reg;
912 }
913 if (!out_reg.Equals(in_reg)) {
914 LoadImmediate64(out_reg.AsGpuRegister(), 0);
915 }
916 EmitBranch(in_reg.AsGpuRegister(), ZERO, &null_arg, true);
917 AddConstant64(out_reg.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
918 Bind(&null_arg, false);
919 } else {
920 AddConstant64(out_reg.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
921 }
922}
923
924void Mips64Assembler::CreateHandleScopeEntry(FrameOffset out_off,
925 FrameOffset handle_scope_offset,
926 ManagedRegister mscratch,
927 bool null_allowed) {
928 Mips64ManagedRegister scratch = mscratch.AsMips64();
929 CHECK(scratch.IsGpuRegister()) << scratch;
930 if (null_allowed) {
931 Label null_arg;
932 LoadFromOffset(kLoadWord, scratch.AsGpuRegister(), SP,
933 handle_scope_offset.Int32Value());
934 // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
935 // the address in the handle scope holding the reference.
936 // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
937 EmitBranch(scratch.AsGpuRegister(), ZERO, &null_arg, true);
938 AddConstant64(scratch.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
939 Bind(&null_arg, false);
940 } else {
941 AddConstant64(scratch.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
942 }
943 StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, out_off.Int32Value());
944}
945
946// Given a handle scope entry, load the associated reference.
947void Mips64Assembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
948 ManagedRegister min_reg) {
949 Mips64ManagedRegister out_reg = mout_reg.AsMips64();
950 Mips64ManagedRegister in_reg = min_reg.AsMips64();
951 CHECK(out_reg.IsGpuRegister()) << out_reg;
952 CHECK(in_reg.IsGpuRegister()) << in_reg;
953 Label null_arg;
954 if (!out_reg.Equals(in_reg)) {
955 LoadImmediate64(out_reg.AsGpuRegister(), 0);
956 }
957 EmitBranch(in_reg.AsGpuRegister(), ZERO, &null_arg, true);
958 LoadFromOffset(kLoadDoubleword, out_reg.AsGpuRegister(),
959 in_reg.AsGpuRegister(), 0);
960 Bind(&null_arg, false);
961}
962
963void Mips64Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
964 // TODO: not validating references
965}
966
967void Mips64Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
968 // TODO: not validating references
969}
970
971void Mips64Assembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister mscratch) {
972 Mips64ManagedRegister base = mbase.AsMips64();
973 Mips64ManagedRegister scratch = mscratch.AsMips64();
974 CHECK(base.IsGpuRegister()) << base;
975 CHECK(scratch.IsGpuRegister()) << scratch;
976 LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
977 base.AsGpuRegister(), offset.Int32Value());
978 Jalr(scratch.AsGpuRegister());
979 // TODO: place reference map on call
980}
981
982void Mips64Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
983 Mips64ManagedRegister scratch = mscratch.AsMips64();
984 CHECK(scratch.IsGpuRegister()) << scratch;
985 // Call *(*(SP + base) + offset)
986 LoadFromOffset(kLoadWord, scratch.AsGpuRegister(),
987 SP, base.Int32Value());
988 LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
989 scratch.AsGpuRegister(), offset.Int32Value());
990 Jalr(scratch.AsGpuRegister());
991 // TODO: place reference map on call
992}
993
994void Mips64Assembler::CallFromThread64(ThreadOffset<8> /*offset*/, ManagedRegister /*mscratch*/) {
995 UNIMPLEMENTED(FATAL) << "no mips64 implementation";
996}
997
998void Mips64Assembler::GetCurrentThread(ManagedRegister tr) {
999 Move(tr.AsMips64().AsGpuRegister(), S1);
1000}
1001
1002void Mips64Assembler::GetCurrentThread(FrameOffset offset,
1003 ManagedRegister /*mscratch*/) {
1004 StoreToOffset(kStoreDoubleword, S1, SP, offset.Int32Value());
1005}
1006
1007void Mips64Assembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
1008 Mips64ManagedRegister scratch = mscratch.AsMips64();
1009 Mips64ExceptionSlowPath* slow = new Mips64ExceptionSlowPath(scratch, stack_adjust);
1010 buffer_.EnqueueSlowPath(slow);
1011 LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
1012 S1, Thread::ExceptionOffset<8>().Int32Value());
1013 EmitBranch(scratch.AsGpuRegister(), ZERO, slow->Entry(), false);
1014}
1015
1016void Mips64ExceptionSlowPath::Emit(Assembler* sasm) {
1017 Mips64Assembler* sp_asm = down_cast<Mips64Assembler*>(sasm);
1018#define __ sp_asm->
1019 __ Bind(&entry_, false);
1020 if (stack_adjust_ != 0) { // Fix up the frame.
1021 __ DecreaseFrameSize(stack_adjust_);
1022 }
1023 // Pass exception object as argument
1024 // Don't care about preserving A0 as this call won't return
1025 __ Move(A0, scratch_.AsGpuRegister());
1026 // Set up call to Thread::Current()->pDeliverException
1027 __ LoadFromOffset(kLoadDoubleword, T9, S1,
1028 QUICK_ENTRYPOINT_OFFSET(4, pDeliverException).Int32Value());
1029 __ Jr(T9);
1030 // Call never returns
1031 __ Break();
1032#undef __
1033}
1034
1035} // namespace mips64
1036} // namespace art