blob: 282ab96ce47e0786d80215a0b613b13995047ef5 [file] [log] [blame]
Andreas Gampe57b34292015-01-14 15:45:59 -08001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "assembler_mips64.h"
18
19#include "base/casts.h"
20#include "entrypoints/quick/quick_entrypoints.h"
21#include "memory_region.h"
22#include "thread.h"
23
24namespace art {
25namespace mips64 {
26
27void Mips64Assembler::Emit(int32_t value) {
28 AssemblerBuffer::EnsureCapacity ensured(&buffer_);
29 buffer_.Emit<int32_t>(value);
30}
31
32void Mips64Assembler::EmitR(int opcode, GpuRegister rs, GpuRegister rt, GpuRegister rd,
33 int shamt, int funct) {
34 CHECK_NE(rs, kNoGpuRegister);
35 CHECK_NE(rt, kNoGpuRegister);
36 CHECK_NE(rd, kNoGpuRegister);
37 int32_t encoding = opcode << kOpcodeShift |
38 static_cast<int32_t>(rs) << kRsShift |
39 static_cast<int32_t>(rt) << kRtShift |
40 static_cast<int32_t>(rd) << kRdShift |
41 shamt << kShamtShift |
42 funct;
43 Emit(encoding);
44}
45
46void Mips64Assembler::EmitI(int opcode, GpuRegister rs, GpuRegister rt, uint16_t imm) {
47 CHECK_NE(rs, kNoGpuRegister);
48 CHECK_NE(rt, kNoGpuRegister);
49 int32_t encoding = opcode << kOpcodeShift |
50 static_cast<int32_t>(rs) << kRsShift |
51 static_cast<int32_t>(rt) << kRtShift |
52 imm;
53 Emit(encoding);
54}
55
56void Mips64Assembler::EmitJ(int opcode, int address) {
57 int32_t encoding = opcode << kOpcodeShift |
58 address;
59 Emit(encoding);
60}
61
62void Mips64Assembler::EmitFR(int opcode, int fmt, FpuRegister ft, FpuRegister fs, FpuRegister fd,
63int funct) {
64 CHECK_NE(ft, kNoFpuRegister);
65 CHECK_NE(fs, kNoFpuRegister);
66 CHECK_NE(fd, kNoFpuRegister);
67 int32_t encoding = opcode << kOpcodeShift |
68 fmt << kFmtShift |
69 static_cast<int32_t>(ft) << kFtShift |
70 static_cast<int32_t>(fs) << kFsShift |
71 static_cast<int32_t>(fd) << kFdShift |
72 funct;
73 Emit(encoding);
74}
75
76void Mips64Assembler::EmitFI(int opcode, int fmt, FpuRegister rt, uint16_t imm) {
77 CHECK_NE(rt, kNoFpuRegister);
78 int32_t encoding = opcode << kOpcodeShift |
79 fmt << kFmtShift |
80 static_cast<int32_t>(rt) << kRtShift |
81 imm;
82 Emit(encoding);
83}
84
85void Mips64Assembler::EmitBranch(GpuRegister rt, GpuRegister rs, Label* label, bool equal) {
86 int offset;
87 if (label->IsBound()) {
88 offset = label->Position() - buffer_.Size();
89 } else {
90 // Use the offset field of the branch instruction for linking the sites.
91 offset = label->position_;
92 label->LinkTo(buffer_.Size());
93 }
94 if (equal) {
95 Beq(rt, rs, (offset >> 2) & kBranchOffsetMask);
96 } else {
97 Bne(rt, rs, (offset >> 2) & kBranchOffsetMask);
98 }
99}
100
101void Mips64Assembler::EmitJump(Label* label, bool link) {
102 int offset;
103 if (label->IsBound()) {
104 offset = label->Position() - buffer_.Size();
105 } else {
106 // Use the offset field of the jump instruction for linking the sites.
107 offset = label->position_;
108 label->LinkTo(buffer_.Size());
109 }
110 if (link) {
111 Jal((offset >> 2) & kJumpOffsetMask);
112 } else {
113 J((offset >> 2) & kJumpOffsetMask);
114 }
115}
116
117int32_t Mips64Assembler::EncodeBranchOffset(int offset, int32_t inst, bool is_jump) {
118 CHECK_ALIGNED(offset, 4);
119 CHECK(IsInt(POPCOUNT(kBranchOffsetMask), offset)) << offset;
120
121 // Properly preserve only the bits supported in the instruction.
122 offset >>= 2;
123 if (is_jump) {
124 offset &= kJumpOffsetMask;
125 return (inst & ~kJumpOffsetMask) | offset;
126 } else {
127 offset &= kBranchOffsetMask;
128 return (inst & ~kBranchOffsetMask) | offset;
129 }
130}
131
132int Mips64Assembler::DecodeBranchOffset(int32_t inst, bool is_jump) {
133 // Sign-extend, then left-shift by 2.
134 if (is_jump) {
135 return (((inst & kJumpOffsetMask) << 6) >> 4);
136 } else {
137 return (((inst & kBranchOffsetMask) << 16) >> 14);
138 }
139}
140
141void Mips64Assembler::Bind(Label* label, bool is_jump) {
142 CHECK(!label->IsBound());
143 int bound_pc = buffer_.Size();
144 while (label->IsLinked()) {
145 int32_t position = label->Position();
146 int32_t next = buffer_.Load<int32_t>(position);
147 int32_t offset = is_jump ? bound_pc - position : bound_pc - position - 4;
148 int32_t encoded = Mips64Assembler::EncodeBranchOffset(offset, next, is_jump);
149 buffer_.Store<int32_t>(position, encoded);
150 label->position_ = Mips64Assembler::DecodeBranchOffset(next, is_jump);
151 }
152 label->BindTo(bound_pc);
153}
154
155void Mips64Assembler::Add(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
156 EmitR(0, rs, rt, rd, 0, 0x20);
157}
158
159void Mips64Assembler::Addi(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
160 EmitI(0x8, rs, rt, imm16);
161}
162
163void Mips64Assembler::Addu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
164 EmitR(0, rs, rt, rd, 0, 0x21);
165}
166
167void Mips64Assembler::Addiu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
168 EmitI(0x9, rs, rt, imm16);
169}
170
171void Mips64Assembler::Daddiu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
172 EmitI(0x19, rs, rt, imm16);
173}
174
175void Mips64Assembler::Sub(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
176 EmitR(0, rs, rt, rd, 0, 0x22);
177}
178
179void Mips64Assembler::Subu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
180 EmitR(0, rs, rt, rd, 0, 0x23);
181}
182
183void Mips64Assembler::Mult(GpuRegister rs, GpuRegister rt) {
184 EmitR(0, rs, rt, static_cast<GpuRegister>(0), 0, 0x18);
185}
186
187void Mips64Assembler::Multu(GpuRegister rs, GpuRegister rt) {
188 EmitR(0, rs, rt, static_cast<GpuRegister>(0), 0, 0x19);
189}
190
191void Mips64Assembler::Div(GpuRegister rs, GpuRegister rt) {
192 EmitR(0, rs, rt, static_cast<GpuRegister>(0), 0, 0x1a);
193}
194
195void Mips64Assembler::Divu(GpuRegister rs, GpuRegister rt) {
196 EmitR(0, rs, rt, static_cast<GpuRegister>(0), 0, 0x1b);
197}
198
199void Mips64Assembler::And(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
200 EmitR(0, rs, rt, rd, 0, 0x24);
201}
202
203void Mips64Assembler::Andi(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
204 EmitI(0xc, rs, rt, imm16);
205}
206
207void Mips64Assembler::Or(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
208 EmitR(0, rs, rt, rd, 0, 0x25);
209}
210
211void Mips64Assembler::Ori(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
212 EmitI(0xd, rs, rt, imm16);
213}
214
215void Mips64Assembler::Xor(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
216 EmitR(0, rs, rt, rd, 0, 0x26);
217}
218
219void Mips64Assembler::Xori(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
220 EmitI(0xe, rs, rt, imm16);
221}
222
223void Mips64Assembler::Nor(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
224 EmitR(0, rs, rt, rd, 0, 0x27);
225}
226
227void Mips64Assembler::Sll(GpuRegister rd, GpuRegister rs, int shamt) {
228 EmitR(0, rs, static_cast<GpuRegister>(0), rd, shamt, 0x00);
229}
230
231void Mips64Assembler::Srl(GpuRegister rd, GpuRegister rs, int shamt) {
232 EmitR(0, rs, static_cast<GpuRegister>(0), rd, shamt, 0x02);
233}
234
235void Mips64Assembler::Sra(GpuRegister rd, GpuRegister rs, int shamt) {
236 EmitR(0, rs, static_cast<GpuRegister>(0), rd, shamt, 0x03);
237}
238
239void Mips64Assembler::Sllv(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
240 EmitR(0, rs, rt, rd, 0, 0x04);
241}
242
243void Mips64Assembler::Srlv(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
244 EmitR(0, rs, rt, rd, 0, 0x06);
245}
246
247void Mips64Assembler::Srav(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
248 EmitR(0, rs, rt, rd, 0, 0x07);
249}
250
251void Mips64Assembler::Lb(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
252 EmitI(0x20, rs, rt, imm16);
253}
254
255void Mips64Assembler::Lh(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
256 EmitI(0x21, rs, rt, imm16);
257}
258
259void Mips64Assembler::Lw(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
260 EmitI(0x23, rs, rt, imm16);
261}
262
263void Mips64Assembler::Ld(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
264 EmitI(0x37, rs, rt, imm16);
265}
266
267void Mips64Assembler::Lbu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
268 EmitI(0x24, rs, rt, imm16);
269}
270
271void Mips64Assembler::Lhu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
272 EmitI(0x25, rs, rt, imm16);
273}
274
275void Mips64Assembler::Lui(GpuRegister rt, uint16_t imm16) {
276 EmitI(0xf, static_cast<GpuRegister>(0), rt, imm16);
277}
278
279void Mips64Assembler::Mfhi(GpuRegister rd) {
280 EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0), rd, 0, 0x10);
281}
282
283void Mips64Assembler::Mflo(GpuRegister rd) {
284 EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0), rd, 0, 0x12);
285}
286
287void Mips64Assembler::Sb(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
288 EmitI(0x28, rs, rt, imm16);
289}
290
291void Mips64Assembler::Sh(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
292 EmitI(0x29, rs, rt, imm16);
293}
294
295void Mips64Assembler::Sw(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
296 EmitI(0x2b, rs, rt, imm16);
297}
298
299void Mips64Assembler::Sd(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
300 EmitI(0x3f, rs, rt, imm16);
301}
302
303void Mips64Assembler::Slt(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
304 EmitR(0, rs, rt, rd, 0, 0x2a);
305}
306
307void Mips64Assembler::Sltu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
308 EmitR(0, rs, rt, rd, 0, 0x2b);
309}
310
311void Mips64Assembler::Slti(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
312 EmitI(0xa, rs, rt, imm16);
313}
314
315void Mips64Assembler::Sltiu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
316 EmitI(0xb, rs, rt, imm16);
317}
318
319void Mips64Assembler::Beq(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
320 EmitI(0x4, rs, rt, imm16);
321 Nop();
322}
323
324void Mips64Assembler::Bne(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
325 EmitI(0x5, rs, rt, imm16);
326 Nop();
327}
328
329void Mips64Assembler::J(uint32_t address) {
330 EmitJ(0x2, address);
331 Nop();
332}
333
334void Mips64Assembler::Jal(uint32_t address) {
335 EmitJ(0x2, address);
336 Nop();
337}
338
339void Mips64Assembler::Jr(GpuRegister rs) {
340 EmitR(0, rs, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0), 0, 0x09); // Jalr zero, rs
341 Nop();
342}
343
344void Mips64Assembler::Jalr(GpuRegister rs) {
345 EmitR(0, rs, static_cast<GpuRegister>(0), RA, 0, 0x09);
346 Nop();
347}
348
349void Mips64Assembler::AddS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
350 EmitFR(0x11, 0x10, ft, fs, fd, 0x0);
351}
352
353void Mips64Assembler::SubS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
354 EmitFR(0x11, 0x10, ft, fs, fd, 0x1);
355}
356
357void Mips64Assembler::MulS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
358 EmitFR(0x11, 0x10, ft, fs, fd, 0x2);
359}
360
361void Mips64Assembler::DivS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
362 EmitFR(0x11, 0x10, ft, fs, fd, 0x3);
363}
364
365void Mips64Assembler::AddD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
366 EmitFR(0x11, 0x11, static_cast<FpuRegister>(ft), static_cast<FpuRegister>(fs),
367 static_cast<FpuRegister>(fd), 0x0);
368}
369
370void Mips64Assembler::SubD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
371 EmitFR(0x11, 0x11, static_cast<FpuRegister>(ft), static_cast<FpuRegister>(fs),
372 static_cast<FpuRegister>(fd), 0x1);
373}
374
375void Mips64Assembler::MulD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
376 EmitFR(0x11, 0x11, static_cast<FpuRegister>(ft), static_cast<FpuRegister>(fs),
377 static_cast<FpuRegister>(fd), 0x2);
378}
379
380void Mips64Assembler::DivD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
381 EmitFR(0x11, 0x11, static_cast<FpuRegister>(ft), static_cast<FpuRegister>(fs),
382 static_cast<FpuRegister>(fd), 0x3);
383}
384
385void Mips64Assembler::MovS(FpuRegister fd, FpuRegister fs) {
386 EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x6);
387}
388
389void Mips64Assembler::MovD(FpuRegister fd, FpuRegister fs) {
390 EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), static_cast<FpuRegister>(fs),
391 static_cast<FpuRegister>(fd), 0x6);
392}
393
394void Mips64Assembler::Mfc1(GpuRegister rt, FpuRegister fs) {
395 EmitFR(0x11, 0x00, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
396}
397
398void Mips64Assembler::Mtc1(FpuRegister ft, GpuRegister rs) {
399 EmitFR(0x11, 0x04, ft, static_cast<FpuRegister>(rs), static_cast<FpuRegister>(0), 0x0);
400}
401
402void Mips64Assembler::Lwc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
403 EmitI(0x31, rs, static_cast<GpuRegister>(ft), imm16);
404}
405
406void Mips64Assembler::Ldc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
407 EmitI(0x35, rs, static_cast<GpuRegister>(ft), imm16);
408}
409
410void Mips64Assembler::Swc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
411 EmitI(0x39, rs, static_cast<GpuRegister>(ft), imm16);
412}
413
414void Mips64Assembler::Sdc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
415 EmitI(0x3d, rs, static_cast<GpuRegister>(ft), imm16);
416}
417
418void Mips64Assembler::Break() {
419 EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0),
420 static_cast<GpuRegister>(0), 0, 0xD);
421}
422
423void Mips64Assembler::Nop() {
424 EmitR(0x0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0),
425 static_cast<GpuRegister>(0), 0, 0x0);
426}
427
428void Mips64Assembler::Move(GpuRegister rt, GpuRegister rs) {
429 EmitI(0x19, rs, rt, 0); // Daddiu
430}
431
432void Mips64Assembler::Clear(GpuRegister rt) {
433 EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0), rt, 0, 0x20);
434}
435
436void Mips64Assembler::Not(GpuRegister rt, GpuRegister rs) {
437 EmitR(0, static_cast<GpuRegister>(0), rs, rt, 0, 0x27);
438}
439
440void Mips64Assembler::Mul(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
441 Mult(rs, rt);
442 Mflo(rd);
443}
444
445void Mips64Assembler::Div(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
446 Div(rs, rt);
447 Mflo(rd);
448}
449
450void Mips64Assembler::Rem(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
451 Div(rs, rt);
452 Mfhi(rd);
453}
454
455void Mips64Assembler::AddConstant64(GpuRegister rt, GpuRegister rs, int32_t value) {
456 CHECK((value >= -32768) && (value <= 32766));
457 Daddiu(rt, rs, value);
458}
459
460void Mips64Assembler::LoadImmediate64(GpuRegister rt, int32_t value) {
461 CHECK((value >= -32768) && (value <= 32766));
462 Daddiu(rt, ZERO, value);
463}
464
465void Mips64Assembler::LoadFromOffset(LoadOperandType type, GpuRegister reg, GpuRegister base,
466 int32_t offset) {
467 switch (type) {
468 case kLoadSignedByte:
469 Lb(reg, base, offset);
470 break;
471 case kLoadUnsignedByte:
472 Lbu(reg, base, offset);
473 break;
474 case kLoadSignedHalfword:
475 Lh(reg, base, offset);
476 break;
477 case kLoadUnsignedHalfword:
478 Lhu(reg, base, offset);
479 break;
480 case kLoadWord:
481 Lw(reg, base, offset);
482 break;
483 case kLoadDoubleword:
484 // TODO: alignment issues ???
485 Ld(reg, base, offset);
486 break;
487 default:
488 LOG(FATAL) << "UNREACHABLE";
489 }
490}
491
492void Mips64Assembler::LoadFpuFromOffset(LoadOperandType type, FpuRegister reg, GpuRegister base,
493 int32_t offset) {
494 CHECK((offset >= -32768) && (offset <= 32766));
495 switch (type) {
496 case kLoadWord:
497 Lwc1(reg, base, offset);
498 break;
499 case kLoadDoubleword:
500 // TODO: alignment issues ???
501 Ldc1(reg, base, offset);
502 break;
503 default:
504 LOG(FATAL) << "UNREACHABLE";
505 }
506}
507
508void Mips64Assembler::EmitLoad(ManagedRegister m_dst, GpuRegister src_register, int32_t src_offset,
509 size_t size) {
510 Mips64ManagedRegister dst = m_dst.AsMips64();
511 if (dst.IsNoRegister()) {
512 CHECK_EQ(0u, size) << dst;
513 } else if (dst.IsGpuRegister()) {
514 if (size == 4) {
515 CHECK_EQ(4u, size) << dst;
516 LoadFromOffset(kLoadWord, dst.AsGpuRegister(), src_register, src_offset);
517 } else if (size == 8) {
518 CHECK_EQ(8u, size) << dst;
519 LoadFromOffset(kLoadDoubleword, dst.AsGpuRegister(), src_register, src_offset);
520 } else {
521 UNIMPLEMENTED(FATAL) << "We only support Load() of size 4 and 8";
522 }
523 } else if (dst.IsFpuRegister()) {
524 if (size == 4) {
525 CHECK_EQ(4u, size) << dst;
526 LoadFpuFromOffset(kLoadWord, dst.AsFpuRegister(), src_register, src_offset);
527 } else if (size == 8) {
528 CHECK_EQ(8u, size) << dst;
529 LoadFpuFromOffset(kLoadDoubleword, dst.AsFpuRegister(), src_register, src_offset);
530 } else {
531 UNIMPLEMENTED(FATAL) << "We only support Load() of size 4 and 8";
532 }
533 }
534}
535
536void Mips64Assembler::StoreToOffset(StoreOperandType type, GpuRegister reg, GpuRegister base,
537 int32_t offset) {
538 switch (type) {
539 case kStoreByte:
540 Sb(reg, base, offset);
541 break;
542 case kStoreHalfword:
543 Sh(reg, base, offset);
544 break;
545 case kStoreWord:
546 Sw(reg, base, offset);
547 break;
548 case kStoreDoubleword:
549 // TODO: alignment issues ???
550 Sd(reg, base, offset);
551 break;
552 default:
553 LOG(FATAL) << "UNREACHABLE";
554 }
555}
556
557void Mips64Assembler::StoreFpuToOffset(StoreOperandType type, FpuRegister reg, GpuRegister base,
558 int32_t offset) {
559 switch (type) {
560 case kStoreWord:
561 Swc1(reg, base, offset);
562 break;
563 case kStoreDoubleword:
564 Sdc1(reg, base, offset);
565 break;
566 default:
567 LOG(FATAL) << "UNREACHABLE";
568 }
569}
570
David Srbeckydd973932015-04-07 20:29:48 +0100571static dwarf::Reg DWARFReg(GpuRegister reg) {
572 return dwarf::Reg::Mips64Core(static_cast<int>(reg));
573}
574
Andreas Gampe57b34292015-01-14 15:45:59 -0800575constexpr size_t kFramePointerSize = 8;
576
577void Mips64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
578 const std::vector<ManagedRegister>& callee_save_regs,
579 const ManagedRegisterEntrySpills& entry_spills) {
580 CHECK_ALIGNED(frame_size, kStackAlignment);
581
582 // Increase frame to required size.
583 IncreaseFrameSize(frame_size);
584
585 // Push callee saves and return address
586 int stack_offset = frame_size - kFramePointerSize;
587 StoreToOffset(kStoreDoubleword, RA, SP, stack_offset);
David Srbeckydd973932015-04-07 20:29:48 +0100588 cfi_.RelOffset(DWARFReg(RA), stack_offset);
Andreas Gampe57b34292015-01-14 15:45:59 -0800589 for (int i = callee_save_regs.size() - 1; i >= 0; --i) {
590 stack_offset -= kFramePointerSize;
591 GpuRegister reg = callee_save_regs.at(i).AsMips64().AsGpuRegister();
592 StoreToOffset(kStoreDoubleword, reg, SP, stack_offset);
David Srbeckydd973932015-04-07 20:29:48 +0100593 cfi_.RelOffset(DWARFReg(reg), stack_offset);
Andreas Gampe57b34292015-01-14 15:45:59 -0800594 }
595
596 // Write out Method*.
597 StoreToOffset(kStoreWord, method_reg.AsMips64().AsGpuRegister(), SP, 0);
598
599 // Write out entry spills.
600 int32_t offset = frame_size + sizeof(StackReference<mirror::ArtMethod>);
601 for (size_t i = 0; i < entry_spills.size(); ++i) {
602 Mips64ManagedRegister reg = entry_spills.at(i).AsMips64();
603 ManagedRegisterSpill spill = entry_spills.at(i);
604 int32_t size = spill.getSize();
605 if (reg.IsNoRegister()) {
606 // only increment stack offset.
607 offset += size;
608 } else if (reg.IsFpuRegister()) {
609 StoreFpuToOffset((size == 4) ? kStoreWord : kStoreDoubleword, reg.AsFpuRegister(), SP, offset);
610 offset += size;
611 } else if (reg.IsGpuRegister()) {
612 StoreToOffset((size == 4) ? kStoreWord : kStoreDoubleword, reg.AsGpuRegister(), SP, offset);
613 offset += size;
614 }
615 }
616}
617
618void Mips64Assembler::RemoveFrame(size_t frame_size,
619 const std::vector<ManagedRegister>& callee_save_regs) {
620 CHECK_ALIGNED(frame_size, kStackAlignment);
David Srbeckydd973932015-04-07 20:29:48 +0100621 cfi_.RememberState();
Andreas Gampe57b34292015-01-14 15:45:59 -0800622
623 // Pop callee saves and return address
624 int stack_offset = frame_size - (callee_save_regs.size() * kFramePointerSize) - kFramePointerSize;
625 for (size_t i = 0; i < callee_save_regs.size(); ++i) {
626 GpuRegister reg = callee_save_regs.at(i).AsMips64().AsGpuRegister();
627 LoadFromOffset(kLoadDoubleword, reg, SP, stack_offset);
David Srbeckydd973932015-04-07 20:29:48 +0100628 cfi_.Restore(DWARFReg(reg));
Andreas Gampe57b34292015-01-14 15:45:59 -0800629 stack_offset += kFramePointerSize;
630 }
631 LoadFromOffset(kLoadDoubleword, RA, SP, stack_offset);
David Srbeckydd973932015-04-07 20:29:48 +0100632 cfi_.Restore(DWARFReg(RA));
Andreas Gampe57b34292015-01-14 15:45:59 -0800633
634 // Decrease frame to required size.
635 DecreaseFrameSize(frame_size);
636
637 // Then jump to the return address.
638 Jr(RA);
David Srbeckydd973932015-04-07 20:29:48 +0100639
640 // The CFI should be restored for any code that follows the exit block.
641 cfi_.RestoreState();
642 cfi_.DefCFAOffset(frame_size);
Andreas Gampe57b34292015-01-14 15:45:59 -0800643}
644
645void Mips64Assembler::IncreaseFrameSize(size_t adjust) {
646 CHECK_ALIGNED(adjust, kStackAlignment);
647 AddConstant64(SP, SP, -adjust);
David Srbeckydd973932015-04-07 20:29:48 +0100648 cfi_.AdjustCFAOffset(adjust);
Andreas Gampe57b34292015-01-14 15:45:59 -0800649}
650
651void Mips64Assembler::DecreaseFrameSize(size_t adjust) {
652 CHECK_ALIGNED(adjust, kStackAlignment);
653 AddConstant64(SP, SP, adjust);
David Srbeckydd973932015-04-07 20:29:48 +0100654 cfi_.AdjustCFAOffset(-adjust);
Andreas Gampe57b34292015-01-14 15:45:59 -0800655}
656
657void Mips64Assembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
658 Mips64ManagedRegister src = msrc.AsMips64();
659 if (src.IsNoRegister()) {
660 CHECK_EQ(0u, size);
661 } else if (src.IsGpuRegister()) {
662 CHECK(size == 4 || size == 8) << size;
663 if (size == 8) {
664 StoreToOffset(kStoreDoubleword, src.AsGpuRegister(), SP, dest.Int32Value());
665 } else if (size == 4) {
666 StoreToOffset(kStoreWord, src.AsGpuRegister(), SP, dest.Int32Value());
667 } else {
668 UNIMPLEMENTED(FATAL) << "We only support Store() of size 4 and 8";
669 }
670 } else if (src.IsFpuRegister()) {
671 CHECK(size == 4 || size == 8) << size;
672 if (size == 8) {
673 StoreFpuToOffset(kStoreDoubleword, src.AsFpuRegister(), SP, dest.Int32Value());
674 } else if (size == 4) {
675 StoreFpuToOffset(kStoreWord, src.AsFpuRegister(), SP, dest.Int32Value());
676 } else {
677 UNIMPLEMENTED(FATAL) << "We only support Store() of size 4 and 8";
678 }
679 }
680}
681
682void Mips64Assembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
683 Mips64ManagedRegister src = msrc.AsMips64();
684 CHECK(src.IsGpuRegister());
685 StoreToOffset(kStoreWord, src.AsGpuRegister(), SP, dest.Int32Value());
686}
687
688void Mips64Assembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
689 Mips64ManagedRegister src = msrc.AsMips64();
690 CHECK(src.IsGpuRegister());
691 StoreToOffset(kStoreDoubleword, src.AsGpuRegister(), SP, dest.Int32Value());
692}
693
694void Mips64Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
695 ManagedRegister mscratch) {
696 Mips64ManagedRegister scratch = mscratch.AsMips64();
697 CHECK(scratch.IsGpuRegister()) << scratch;
698 LoadImmediate64(scratch.AsGpuRegister(), imm);
699 StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value());
700}
701
702void Mips64Assembler::StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm,
703 ManagedRegister mscratch) {
704 Mips64ManagedRegister scratch = mscratch.AsMips64();
705 CHECK(scratch.IsGpuRegister()) << scratch;
706 LoadImmediate64(scratch.AsGpuRegister(), imm);
707 StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), S1, dest.Int32Value());
708}
709
710void Mips64Assembler::StoreStackOffsetToThread64(ThreadOffset<8> thr_offs,
711 FrameOffset fr_offs,
712 ManagedRegister mscratch) {
713 Mips64ManagedRegister scratch = mscratch.AsMips64();
714 CHECK(scratch.IsGpuRegister()) << scratch;
715 AddConstant64(scratch.AsGpuRegister(), SP, fr_offs.Int32Value());
716 StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value());
717}
718
719void Mips64Assembler::StoreStackPointerToThread64(ThreadOffset<8> thr_offs) {
720 StoreToOffset(kStoreDoubleword, SP, S1, thr_offs.Int32Value());
721}
722
723void Mips64Assembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc,
724 FrameOffset in_off, ManagedRegister mscratch) {
725 Mips64ManagedRegister src = msrc.AsMips64();
726 Mips64ManagedRegister scratch = mscratch.AsMips64();
727 StoreToOffset(kStoreDoubleword, src.AsGpuRegister(), SP, dest.Int32Value());
728 LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), SP, in_off.Int32Value());
729 StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, dest.Int32Value() + 8);
730}
731
732void Mips64Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
733 return EmitLoad(mdest, SP, src.Int32Value(), size);
734}
735
736void Mips64Assembler::LoadFromThread64(ManagedRegister mdest, ThreadOffset<8> src, size_t size) {
737 return EmitLoad(mdest, S1, src.Int32Value(), size);
738}
739
740void Mips64Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
741 Mips64ManagedRegister dest = mdest.AsMips64();
742 CHECK(dest.IsGpuRegister());
743 LoadFromOffset(kLoadWord, dest.AsGpuRegister(), SP, src.Int32Value());
744}
745
746void Mips64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base,
747 MemberOffset offs) {
748 Mips64ManagedRegister dest = mdest.AsMips64();
749 CHECK(dest.IsGpuRegister() && dest.IsGpuRegister());
750 LoadFromOffset(kLoadWord, dest.AsGpuRegister(),
751 base.AsMips64().AsGpuRegister(), offs.Int32Value());
752 if (kPoisonHeapReferences) {
753 Subu(dest.AsGpuRegister(), ZERO, dest.AsGpuRegister());
754 }
755}
756
757void Mips64Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
758 Offset offs) {
759 Mips64ManagedRegister dest = mdest.AsMips64();
760 CHECK(dest.IsGpuRegister() && dest.IsGpuRegister()) << dest;
761 LoadFromOffset(kLoadDoubleword, dest.AsGpuRegister(),
762 base.AsMips64().AsGpuRegister(), offs.Int32Value());
763}
764
765void Mips64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest,
766 ThreadOffset<8> offs) {
767 Mips64ManagedRegister dest = mdest.AsMips64();
768 CHECK(dest.IsGpuRegister());
769 LoadFromOffset(kLoadDoubleword, dest.AsGpuRegister(), S1, offs.Int32Value());
770}
771
772void Mips64Assembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
773 UNIMPLEMENTED(FATAL) << "no sign extension necessary for mips";
774}
775
776void Mips64Assembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
777 UNIMPLEMENTED(FATAL) << "no zero extension necessary for mips";
778}
779
780void Mips64Assembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
781 Mips64ManagedRegister dest = mdest.AsMips64();
782 Mips64ManagedRegister src = msrc.AsMips64();
783 if (!dest.Equals(src)) {
784 if (dest.IsGpuRegister()) {
785 CHECK(src.IsGpuRegister()) << src;
786 Move(dest.AsGpuRegister(), src.AsGpuRegister());
787 } else if (dest.IsFpuRegister()) {
788 CHECK(src.IsFpuRegister()) << src;
789 if (size == 4) {
790 MovS(dest.AsFpuRegister(), src.AsFpuRegister());
791 } else if (size == 8) {
792 MovD(dest.AsFpuRegister(), src.AsFpuRegister());
793 } else {
794 UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
795 }
796 }
797 }
798}
799
800void Mips64Assembler::CopyRef(FrameOffset dest, FrameOffset src,
801 ManagedRegister mscratch) {
802 Mips64ManagedRegister scratch = mscratch.AsMips64();
803 CHECK(scratch.IsGpuRegister()) << scratch;
804 LoadFromOffset(kLoadWord, scratch.AsGpuRegister(), SP, src.Int32Value());
805 StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value());
806}
807
808void Mips64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs,
809 ThreadOffset<8> thr_offs,
810 ManagedRegister mscratch) {
811 Mips64ManagedRegister scratch = mscratch.AsMips64();
812 CHECK(scratch.IsGpuRegister()) << scratch;
813 LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value());
814 StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, fr_offs.Int32Value());
815}
816
817void Mips64Assembler::CopyRawPtrToThread64(ThreadOffset<8> thr_offs,
818 FrameOffset fr_offs,
819 ManagedRegister mscratch) {
820 Mips64ManagedRegister scratch = mscratch.AsMips64();
821 CHECK(scratch.IsGpuRegister()) << scratch;
822 LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
823 SP, fr_offs.Int32Value());
824 StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(),
825 S1, thr_offs.Int32Value());
826}
827
828void Mips64Assembler::Copy(FrameOffset dest, FrameOffset src,
829 ManagedRegister mscratch, size_t size) {
830 Mips64ManagedRegister scratch = mscratch.AsMips64();
831 CHECK(scratch.IsGpuRegister()) << scratch;
832 CHECK(size == 4 || size == 8) << size;
833 if (size == 4) {
834 LoadFromOffset(kLoadWord, scratch.AsGpuRegister(), SP, src.Int32Value());
835 StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value());
836 } else if (size == 8) {
837 LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), SP, src.Int32Value());
838 StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, dest.Int32Value());
839 } else {
840 UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
841 }
842}
843
844void Mips64Assembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
845 ManagedRegister mscratch, size_t size) {
846 GpuRegister scratch = mscratch.AsMips64().AsGpuRegister();
847 CHECK(size == 4 || size == 8) << size;
848 if (size == 4) {
849 LoadFromOffset(kLoadWord, scratch, src_base.AsMips64().AsGpuRegister(),
850 src_offset.Int32Value());
851 StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value());
852 } else if (size == 8) {
853 LoadFromOffset(kLoadDoubleword, scratch, src_base.AsMips64().AsGpuRegister(),
854 src_offset.Int32Value());
855 StoreToOffset(kStoreDoubleword, scratch, SP, dest.Int32Value());
856 } else {
857 UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
858 }
859}
860
861void Mips64Assembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
862 ManagedRegister mscratch, size_t size) {
863 GpuRegister scratch = mscratch.AsMips64().AsGpuRegister();
864 CHECK(size == 4 || size == 8) << size;
865 if (size == 4) {
866 LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value());
867 StoreToOffset(kStoreWord, scratch, dest_base.AsMips64().AsGpuRegister(),
868 dest_offset.Int32Value());
869 } else if (size == 8) {
870 LoadFromOffset(kLoadDoubleword, scratch, SP, src.Int32Value());
871 StoreToOffset(kStoreDoubleword, scratch, dest_base.AsMips64().AsGpuRegister(),
872 dest_offset.Int32Value());
873 } else {
874 UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
875 }
876}
877
878void Mips64Assembler::Copy(FrameOffset /*dest*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
879 ManagedRegister /*mscratch*/, size_t /*size*/) {
880 UNIMPLEMENTED(FATAL) << "no mips64 implementation";
881}
882
883void Mips64Assembler::Copy(ManagedRegister dest, Offset dest_offset,
884 ManagedRegister src, Offset src_offset,
885 ManagedRegister mscratch, size_t size) {
886 GpuRegister scratch = mscratch.AsMips64().AsGpuRegister();
887 CHECK(size == 4 || size == 8) << size;
888 if (size == 4) {
889 LoadFromOffset(kLoadWord, scratch, src.AsMips64().AsGpuRegister(), src_offset.Int32Value());
890 StoreToOffset(kStoreWord, scratch, dest.AsMips64().AsGpuRegister(), dest_offset.Int32Value());
891 } else if (size == 8) {
892 LoadFromOffset(kLoadDoubleword, scratch, src.AsMips64().AsGpuRegister(),
893 src_offset.Int32Value());
894 StoreToOffset(kStoreDoubleword, scratch, dest.AsMips64().AsGpuRegister(),
895 dest_offset.Int32Value());
896 } else {
897 UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
898 }
899}
900
901void Mips64Assembler::Copy(FrameOffset /*dest*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset
902/*src_offset*/,
903 ManagedRegister /*mscratch*/, size_t /*size*/) {
904 UNIMPLEMENTED(FATAL) << "no mips64 implementation";
905}
906
907void Mips64Assembler::MemoryBarrier(ManagedRegister) {
908 UNIMPLEMENTED(FATAL) << "no mips64 implementation";
909}
910
911void Mips64Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
912 FrameOffset handle_scope_offset,
913 ManagedRegister min_reg, bool null_allowed) {
914 Mips64ManagedRegister out_reg = mout_reg.AsMips64();
915 Mips64ManagedRegister in_reg = min_reg.AsMips64();
916 CHECK(in_reg.IsNoRegister() || in_reg.IsGpuRegister()) << in_reg;
917 CHECK(out_reg.IsGpuRegister()) << out_reg;
918 if (null_allowed) {
919 Label null_arg;
920 // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
921 // the address in the handle scope holding the reference.
922 // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
923 if (in_reg.IsNoRegister()) {
924 LoadFromOffset(kLoadWord, out_reg.AsGpuRegister(),
925 SP, handle_scope_offset.Int32Value());
926 in_reg = out_reg;
927 }
928 if (!out_reg.Equals(in_reg)) {
929 LoadImmediate64(out_reg.AsGpuRegister(), 0);
930 }
931 EmitBranch(in_reg.AsGpuRegister(), ZERO, &null_arg, true);
932 AddConstant64(out_reg.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
933 Bind(&null_arg, false);
934 } else {
935 AddConstant64(out_reg.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
936 }
937}
938
939void Mips64Assembler::CreateHandleScopeEntry(FrameOffset out_off,
940 FrameOffset handle_scope_offset,
941 ManagedRegister mscratch,
942 bool null_allowed) {
943 Mips64ManagedRegister scratch = mscratch.AsMips64();
944 CHECK(scratch.IsGpuRegister()) << scratch;
945 if (null_allowed) {
946 Label null_arg;
947 LoadFromOffset(kLoadWord, scratch.AsGpuRegister(), SP,
948 handle_scope_offset.Int32Value());
949 // Null values get a handle scope entry value of 0. Otherwise, the handle scope entry is
950 // the address in the handle scope holding the reference.
951 // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
952 EmitBranch(scratch.AsGpuRegister(), ZERO, &null_arg, true);
953 AddConstant64(scratch.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
954 Bind(&null_arg, false);
955 } else {
956 AddConstant64(scratch.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
957 }
958 StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, out_off.Int32Value());
959}
960
961// Given a handle scope entry, load the associated reference.
962void Mips64Assembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
963 ManagedRegister min_reg) {
964 Mips64ManagedRegister out_reg = mout_reg.AsMips64();
965 Mips64ManagedRegister in_reg = min_reg.AsMips64();
966 CHECK(out_reg.IsGpuRegister()) << out_reg;
967 CHECK(in_reg.IsGpuRegister()) << in_reg;
968 Label null_arg;
969 if (!out_reg.Equals(in_reg)) {
970 LoadImmediate64(out_reg.AsGpuRegister(), 0);
971 }
972 EmitBranch(in_reg.AsGpuRegister(), ZERO, &null_arg, true);
973 LoadFromOffset(kLoadDoubleword, out_reg.AsGpuRegister(),
974 in_reg.AsGpuRegister(), 0);
975 Bind(&null_arg, false);
976}
977
978void Mips64Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
979 // TODO: not validating references
980}
981
982void Mips64Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
983 // TODO: not validating references
984}
985
986void Mips64Assembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister mscratch) {
987 Mips64ManagedRegister base = mbase.AsMips64();
988 Mips64ManagedRegister scratch = mscratch.AsMips64();
989 CHECK(base.IsGpuRegister()) << base;
990 CHECK(scratch.IsGpuRegister()) << scratch;
991 LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
992 base.AsGpuRegister(), offset.Int32Value());
993 Jalr(scratch.AsGpuRegister());
994 // TODO: place reference map on call
995}
996
997void Mips64Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
998 Mips64ManagedRegister scratch = mscratch.AsMips64();
999 CHECK(scratch.IsGpuRegister()) << scratch;
1000 // Call *(*(SP + base) + offset)
1001 LoadFromOffset(kLoadWord, scratch.AsGpuRegister(),
1002 SP, base.Int32Value());
1003 LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
1004 scratch.AsGpuRegister(), offset.Int32Value());
1005 Jalr(scratch.AsGpuRegister());
1006 // TODO: place reference map on call
1007}
1008
1009void Mips64Assembler::CallFromThread64(ThreadOffset<8> /*offset*/, ManagedRegister /*mscratch*/) {
1010 UNIMPLEMENTED(FATAL) << "no mips64 implementation";
1011}
1012
1013void Mips64Assembler::GetCurrentThread(ManagedRegister tr) {
1014 Move(tr.AsMips64().AsGpuRegister(), S1);
1015}
1016
1017void Mips64Assembler::GetCurrentThread(FrameOffset offset,
1018 ManagedRegister /*mscratch*/) {
1019 StoreToOffset(kStoreDoubleword, S1, SP, offset.Int32Value());
1020}
1021
1022void Mips64Assembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
1023 Mips64ManagedRegister scratch = mscratch.AsMips64();
1024 Mips64ExceptionSlowPath* slow = new Mips64ExceptionSlowPath(scratch, stack_adjust);
1025 buffer_.EnqueueSlowPath(slow);
1026 LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
1027 S1, Thread::ExceptionOffset<8>().Int32Value());
1028 EmitBranch(scratch.AsGpuRegister(), ZERO, slow->Entry(), false);
1029}
1030
1031void Mips64ExceptionSlowPath::Emit(Assembler* sasm) {
1032 Mips64Assembler* sp_asm = down_cast<Mips64Assembler*>(sasm);
1033#define __ sp_asm->
1034 __ Bind(&entry_, false);
1035 if (stack_adjust_ != 0) { // Fix up the frame.
1036 __ DecreaseFrameSize(stack_adjust_);
1037 }
1038 // Pass exception object as argument
1039 // Don't care about preserving A0 as this call won't return
1040 __ Move(A0, scratch_.AsGpuRegister());
1041 // Set up call to Thread::Current()->pDeliverException
1042 __ LoadFromOffset(kLoadDoubleword, T9, S1,
Goran Jakovljevic75c40d42015-04-03 15:45:21 +02001043 QUICK_ENTRYPOINT_OFFSET(8, pDeliverException).Int32Value());
Andreas Gampe57b34292015-01-14 15:45:59 -08001044 __ Jr(T9);
1045 // Call never returns
1046 __ Break();
1047#undef __
1048}
1049
1050} // namespace mips64
1051} // namespace art