blob: 167e02538314512185f6e076fee6a48c9af747ff [file] [log] [blame]
Alexey Frunze4dda3372015-06-01 18:31:49 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator_mips64.h"
18
19#include "entrypoints/quick/quick_entrypoints.h"
20#include "entrypoints/quick/quick_entrypoints_enum.h"
21#include "gc/accounting/card_table.h"
22#include "intrinsics.h"
23#include "art_method.h"
24#include "mirror/array-inl.h"
25#include "mirror/class-inl.h"
26#include "offsets.h"
27#include "thread.h"
28#include "utils/mips64/assembler_mips64.h"
29#include "utils/assembler.h"
30#include "utils/stack_checks.h"
31
32namespace art {
33namespace mips64 {
34
35static constexpr int kCurrentMethodStackOffset = 0;
36static constexpr GpuRegister kMethodRegisterArgument = A0;
37
38// We need extra temporary/scratch registers (in addition to AT) in some cases.
39static constexpr GpuRegister TMP = T8;
40static constexpr FpuRegister FTMP = F8;
41
42// ART Thread Register.
43static constexpr GpuRegister TR = S1;
44
45Location Mips64ReturnLocation(Primitive::Type return_type) {
46 switch (return_type) {
47 case Primitive::kPrimBoolean:
48 case Primitive::kPrimByte:
49 case Primitive::kPrimChar:
50 case Primitive::kPrimShort:
51 case Primitive::kPrimInt:
52 case Primitive::kPrimNot:
53 case Primitive::kPrimLong:
54 return Location::RegisterLocation(V0);
55
56 case Primitive::kPrimFloat:
57 case Primitive::kPrimDouble:
58 return Location::FpuRegisterLocation(F0);
59
60 case Primitive::kPrimVoid:
61 return Location();
62 }
63 UNREACHABLE();
64}
65
66Location InvokeDexCallingConventionVisitorMIPS64::GetReturnLocation(Primitive::Type type) const {
67 return Mips64ReturnLocation(type);
68}
69
70Location InvokeDexCallingConventionVisitorMIPS64::GetMethodLocation() const {
71 return Location::RegisterLocation(kMethodRegisterArgument);
72}
73
74Location InvokeDexCallingConventionVisitorMIPS64::GetNextLocation(Primitive::Type type) {
75 Location next_location;
76 if (type == Primitive::kPrimVoid) {
77 LOG(FATAL) << "Unexpected parameter type " << type;
78 }
79
80 if (Primitive::IsFloatingPointType(type) &&
81 (float_index_ < calling_convention.GetNumberOfFpuRegisters())) {
82 next_location = Location::FpuRegisterLocation(
83 calling_convention.GetFpuRegisterAt(float_index_++));
84 gp_index_++;
85 } else if (!Primitive::IsFloatingPointType(type) &&
86 (gp_index_ < calling_convention.GetNumberOfRegisters())) {
87 next_location = Location::RegisterLocation(calling_convention.GetRegisterAt(gp_index_++));
88 float_index_++;
89 } else {
90 size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
91 next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
92 : Location::StackSlot(stack_offset);
93 }
94
95 // Space on the stack is reserved for all arguments.
96 stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
97
98 // TODO: review
99
100 // TODO: shouldn't we use a whole machine word per argument on the stack?
101 // Implicit 4-byte method pointer (and such) will cause misalignment.
102
103 return next_location;
104}
105
106Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type type) {
107 return Mips64ReturnLocation(type);
108}
109
110#define __ down_cast<CodeGeneratorMIPS64*>(codegen)->GetAssembler()->
111#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, x).Int32Value()
112
113class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
114 public:
115 BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction,
116 Location index_location,
117 Location length_location)
118 : instruction_(instruction),
119 index_location_(index_location),
120 length_location_(length_location) {}
121
122 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
123 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
124 __ Bind(GetEntryLabel());
125 // We're moving two locations to locations that could overlap, so we need a parallel
126 // move resolver.
127 InvokeRuntimeCallingConvention calling_convention;
128 codegen->EmitParallelMoves(index_location_,
129 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
130 Primitive::kPrimInt,
131 length_location_,
132 Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
133 Primitive::kPrimInt);
134 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
135 instruction_,
136 instruction_->GetDexPc(),
137 this);
138 CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
139 }
140
Alexandre Rames8158f282015-08-07 10:26:17 +0100141 bool IsFatal() const OVERRIDE { return true; }
142
Roland Levillain46648892015-06-19 16:07:18 +0100143 const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS64"; }
144
Alexey Frunze4dda3372015-06-01 18:31:49 -0700145 private:
146 HBoundsCheck* const instruction_;
147 const Location index_location_;
148 const Location length_location_;
149
150 DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS64);
151};
152
153class DivZeroCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
154 public:
155 explicit DivZeroCheckSlowPathMIPS64(HDivZeroCheck* instruction) : instruction_(instruction) {}
156
157 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
158 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
159 __ Bind(GetEntryLabel());
160 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
161 instruction_,
162 instruction_->GetDexPc(),
163 this);
164 CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
165 }
166
Alexandre Rames8158f282015-08-07 10:26:17 +0100167 bool IsFatal() const OVERRIDE { return true; }
168
Roland Levillain46648892015-06-19 16:07:18 +0100169 const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS64"; }
170
Alexey Frunze4dda3372015-06-01 18:31:49 -0700171 private:
172 HDivZeroCheck* const instruction_;
173 DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS64);
174};
175
176class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
177 public:
178 LoadClassSlowPathMIPS64(HLoadClass* cls,
179 HInstruction* at,
180 uint32_t dex_pc,
181 bool do_clinit)
182 : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
183 DCHECK(at->IsLoadClass() || at->IsClinitCheck());
184 }
185
186 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
187 LocationSummary* locations = at_->GetLocations();
188 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
189
190 __ Bind(GetEntryLabel());
191 SaveLiveRegisters(codegen, locations);
192
193 InvokeRuntimeCallingConvention calling_convention;
194 __ LoadConst32(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
195 int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
196 : QUICK_ENTRY_POINT(pInitializeType);
197 mips64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
198 if (do_clinit_) {
199 CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
200 } else {
201 CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
202 }
203
204 // Move the class to the desired location.
205 Location out = locations->Out();
206 if (out.IsValid()) {
207 DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
208 Primitive::Type type = at_->GetType();
209 mips64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
210 }
211
212 RestoreLiveRegisters(codegen, locations);
213 __ B(GetExitLabel());
214 }
215
Roland Levillain46648892015-06-19 16:07:18 +0100216 const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathMIPS64"; }
217
Alexey Frunze4dda3372015-06-01 18:31:49 -0700218 private:
219 // The class this slow path will load.
220 HLoadClass* const cls_;
221
222 // The instruction where this slow path is happening.
223 // (Might be the load class or an initialization check).
224 HInstruction* const at_;
225
226 // The dex PC of `at_`.
227 const uint32_t dex_pc_;
228
229 // Whether to initialize the class.
230 const bool do_clinit_;
231
232 DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS64);
233};
234
235class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
236 public:
237 explicit LoadStringSlowPathMIPS64(HLoadString* instruction) : instruction_(instruction) {}
238
239 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
240 LocationSummary* locations = instruction_->GetLocations();
241 DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
242 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
243
244 __ Bind(GetEntryLabel());
245 SaveLiveRegisters(codegen, locations);
246
247 InvokeRuntimeCallingConvention calling_convention;
248 __ LoadConst32(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
249 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
250 instruction_,
251 instruction_->GetDexPc(),
252 this);
253 CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
254 Primitive::Type type = instruction_->GetType();
255 mips64_codegen->MoveLocation(locations->Out(),
256 calling_convention.GetReturnLocation(type),
257 type);
258
259 RestoreLiveRegisters(codegen, locations);
260 __ B(GetExitLabel());
261 }
262
Roland Levillain46648892015-06-19 16:07:18 +0100263 const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS64"; }
264
Alexey Frunze4dda3372015-06-01 18:31:49 -0700265 private:
266 HLoadString* const instruction_;
267
268 DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS64);
269};
270
271class NullCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
272 public:
273 explicit NullCheckSlowPathMIPS64(HNullCheck* instr) : instruction_(instr) {}
274
275 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
276 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
277 __ Bind(GetEntryLabel());
278 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
279 instruction_,
280 instruction_->GetDexPc(),
281 this);
282 CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
283 }
284
Alexandre Rames8158f282015-08-07 10:26:17 +0100285 bool IsFatal() const OVERRIDE { return true; }
286
Roland Levillain46648892015-06-19 16:07:18 +0100287 const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS64"; }
288
Alexey Frunze4dda3372015-06-01 18:31:49 -0700289 private:
290 HNullCheck* const instruction_;
291
292 DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS64);
293};
294
295class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
296 public:
Roland Levillain3887c462015-08-12 18:15:42 +0100297 SuspendCheckSlowPathMIPS64(HSuspendCheck* instruction, HBasicBlock* successor)
Alexey Frunze4dda3372015-06-01 18:31:49 -0700298 : instruction_(instruction), successor_(successor) {}
299
300 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
301 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
302 __ Bind(GetEntryLabel());
303 SaveLiveRegisters(codegen, instruction_->GetLocations());
304 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
305 instruction_,
306 instruction_->GetDexPc(),
307 this);
308 CheckEntrypointTypes<kQuickTestSuspend, void, void>();
309 RestoreLiveRegisters(codegen, instruction_->GetLocations());
310 if (successor_ == nullptr) {
311 __ B(GetReturnLabel());
312 } else {
313 __ B(mips64_codegen->GetLabelOf(successor_));
314 }
315 }
316
317 Label* GetReturnLabel() {
318 DCHECK(successor_ == nullptr);
319 return &return_label_;
320 }
321
Roland Levillain46648892015-06-19 16:07:18 +0100322 const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS64"; }
323
Alexey Frunze4dda3372015-06-01 18:31:49 -0700324 private:
325 HSuspendCheck* const instruction_;
326 // If not null, the block to branch to after the suspend check.
327 HBasicBlock* const successor_;
328
329 // If `successor_` is null, the label to branch to after the suspend check.
330 Label return_label_;
331
332 DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathMIPS64);
333};
334
335class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
336 public:
337 TypeCheckSlowPathMIPS64(HInstruction* instruction,
338 Location class_to_check,
339 Location object_class,
340 uint32_t dex_pc)
341 : instruction_(instruction),
342 class_to_check_(class_to_check),
343 object_class_(object_class),
344 dex_pc_(dex_pc) {}
345
346 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
347 LocationSummary* locations = instruction_->GetLocations();
348 DCHECK(instruction_->IsCheckCast()
349 || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
350 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
351
352 __ Bind(GetEntryLabel());
353 SaveLiveRegisters(codegen, locations);
354
355 // We're moving two locations to locations that could overlap, so we need a parallel
356 // move resolver.
357 InvokeRuntimeCallingConvention calling_convention;
358 codegen->EmitParallelMoves(class_to_check_,
359 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
360 Primitive::kPrimNot,
361 object_class_,
362 Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
363 Primitive::kPrimNot);
364
365 if (instruction_->IsInstanceOf()) {
366 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
367 instruction_,
368 dex_pc_,
369 this);
370 Primitive::Type ret_type = instruction_->GetType();
371 Location ret_loc = calling_convention.GetReturnLocation(ret_type);
372 mips64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
373 CheckEntrypointTypes<kQuickInstanceofNonTrivial,
374 uint32_t,
375 const mirror::Class*,
376 const mirror::Class*>();
377 } else {
378 DCHECK(instruction_->IsCheckCast());
379 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this);
380 CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
381 }
382
383 RestoreLiveRegisters(codegen, locations);
384 __ B(GetExitLabel());
385 }
386
Roland Levillain46648892015-06-19 16:07:18 +0100387 const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS64"; }
388
Alexey Frunze4dda3372015-06-01 18:31:49 -0700389 private:
390 HInstruction* const instruction_;
391 const Location class_to_check_;
392 const Location object_class_;
393 uint32_t dex_pc_;
394
395 DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS64);
396};
397
398class DeoptimizationSlowPathMIPS64 : public SlowPathCodeMIPS64 {
399 public:
400 explicit DeoptimizationSlowPathMIPS64(HInstruction* instruction)
401 : instruction_(instruction) {}
402
403 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
404 __ Bind(GetEntryLabel());
405 SaveLiveRegisters(codegen, instruction_->GetLocations());
406 DCHECK(instruction_->IsDeoptimize());
407 HDeoptimize* deoptimize = instruction_->AsDeoptimize();
408 uint32_t dex_pc = deoptimize->GetDexPc();
409 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
410 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
411 }
412
Roland Levillain46648892015-06-19 16:07:18 +0100413 const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS64"; }
414
Alexey Frunze4dda3372015-06-01 18:31:49 -0700415 private:
416 HInstruction* const instruction_;
417 DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS64);
418};
419
420CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
421 const Mips64InstructionSetFeatures& isa_features,
422 const CompilerOptions& compiler_options)
423 : CodeGenerator(graph,
424 kNumberOfGpuRegisters,
425 kNumberOfFpuRegisters,
426 0, // kNumberOfRegisterPairs
427 ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
428 arraysize(kCoreCalleeSaves)),
429 ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
430 arraysize(kFpuCalleeSaves)),
431 compiler_options),
432 block_labels_(graph->GetArena(), 0),
433 location_builder_(graph, this),
434 instruction_visitor_(graph, this),
435 move_resolver_(graph->GetArena(), this),
436 isa_features_(isa_features) {
437 // Save RA (containing the return address) to mimic Quick.
438 AddAllocatedRegister(Location::RegisterLocation(RA));
439}
440
441#undef __
442#define __ down_cast<Mips64Assembler*>(GetAssembler())->
443#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, x).Int32Value()
444
445void CodeGeneratorMIPS64::Finalize(CodeAllocator* allocator) {
446 CodeGenerator::Finalize(allocator);
447}
448
449Mips64Assembler* ParallelMoveResolverMIPS64::GetAssembler() const {
450 return codegen_->GetAssembler();
451}
452
453void ParallelMoveResolverMIPS64::EmitMove(size_t index) {
454 MoveOperands* move = moves_.Get(index);
455 codegen_->MoveLocation(move->GetDestination(), move->GetSource(), move->GetType());
456}
457
458void ParallelMoveResolverMIPS64::EmitSwap(size_t index) {
459 MoveOperands* move = moves_.Get(index);
460 codegen_->SwapLocations(move->GetDestination(), move->GetSource(), move->GetType());
461}
462
463void ParallelMoveResolverMIPS64::RestoreScratch(int reg) {
464 // Pop reg
465 __ Ld(GpuRegister(reg), SP, 0);
466 __ DecreaseFrameSize(kMips64WordSize);
467}
468
469void ParallelMoveResolverMIPS64::SpillScratch(int reg) {
470 // Push reg
471 __ IncreaseFrameSize(kMips64WordSize);
472 __ Sd(GpuRegister(reg), SP, 0);
473}
474
475void ParallelMoveResolverMIPS64::Exchange(int index1, int index2, bool double_slot) {
476 LoadOperandType load_type = double_slot ? kLoadDoubleword : kLoadWord;
477 StoreOperandType store_type = double_slot ? kStoreDoubleword : kStoreWord;
478 // Allocate a scratch register other than TMP, if available.
479 // Else, spill V0 (arbitrary choice) and use it as a scratch register (it will be
480 // automatically unspilled when the scratch scope object is destroyed).
481 ScratchRegisterScope ensure_scratch(this, TMP, V0, codegen_->GetNumberOfCoreRegisters());
482 // If V0 spills onto the stack, SP-relative offsets need to be adjusted.
483 int stack_offset = ensure_scratch.IsSpilled() ? kMips64WordSize : 0;
484 __ LoadFromOffset(load_type,
485 GpuRegister(ensure_scratch.GetRegister()),
486 SP,
487 index1 + stack_offset);
488 __ LoadFromOffset(load_type,
489 TMP,
490 SP,
491 index2 + stack_offset);
492 __ StoreToOffset(store_type,
493 GpuRegister(ensure_scratch.GetRegister()),
494 SP,
495 index2 + stack_offset);
496 __ StoreToOffset(store_type, TMP, SP, index1 + stack_offset);
497}
498
499static dwarf::Reg DWARFReg(GpuRegister reg) {
500 return dwarf::Reg::Mips64Core(static_cast<int>(reg));
501}
502
503// TODO: mapping of floating-point registers to DWARF
504
505void CodeGeneratorMIPS64::GenerateFrameEntry() {
506 __ Bind(&frame_entry_label_);
507
508 bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips64) || !IsLeafMethod();
509
510 if (do_overflow_check) {
511 __ LoadFromOffset(kLoadWord,
512 ZERO,
513 SP,
514 -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips64)));
515 RecordPcInfo(nullptr, 0);
516 }
517
518 // TODO: anything related to T9/GP/GOT/PIC/.so's?
519
520 if (HasEmptyFrame()) {
521 return;
522 }
523
524 // Make sure the frame size isn't unreasonably large. Per the various APIs
525 // it looks like it should always be less than 2GB in size, which allows
526 // us using 32-bit signed offsets from the stack pointer.
527 if (GetFrameSize() > 0x7FFFFFFF)
528 LOG(FATAL) << "Stack frame larger than 2GB";
529
530 // Spill callee-saved registers.
531 // Note that their cumulative size is small and they can be indexed using
532 // 16-bit offsets.
533
534 // TODO: increment/decrement SP in one step instead of two or remove this comment.
535
536 uint32_t ofs = FrameEntrySpillSize();
537 __ IncreaseFrameSize(ofs);
538
539 for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
540 GpuRegister reg = kCoreCalleeSaves[i];
541 if (allocated_registers_.ContainsCoreRegister(reg)) {
542 ofs -= kMips64WordSize;
543 __ Sd(reg, SP, ofs);
544 __ cfi().RelOffset(DWARFReg(reg), ofs);
545 }
546 }
547
548 for (int i = arraysize(kFpuCalleeSaves) - 1; i >= 0; --i) {
549 FpuRegister reg = kFpuCalleeSaves[i];
550 if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
551 ofs -= kMips64WordSize;
552 __ Sdc1(reg, SP, ofs);
553 // TODO: __ cfi().RelOffset(DWARFReg(reg), ofs);
554 }
555 }
556
557 // Allocate the rest of the frame and store the current method pointer
558 // at its end.
559
560 __ IncreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
561
562 static_assert(IsInt<16>(kCurrentMethodStackOffset),
563 "kCurrentMethodStackOffset must fit into int16_t");
564 __ Sd(kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
565}
566
567void CodeGeneratorMIPS64::GenerateFrameExit() {
568 __ cfi().RememberState();
569
570 // TODO: anything related to T9/GP/GOT/PIC/.so's?
571
572 if (!HasEmptyFrame()) {
573 // Deallocate the rest of the frame.
574
575 __ DecreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
576
577 // Restore callee-saved registers.
578 // Note that their cumulative size is small and they can be indexed using
579 // 16-bit offsets.
580
581 // TODO: increment/decrement SP in one step instead of two or remove this comment.
582
583 uint32_t ofs = 0;
584
585 for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
586 FpuRegister reg = kFpuCalleeSaves[i];
587 if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
588 __ Ldc1(reg, SP, ofs);
589 ofs += kMips64WordSize;
590 // TODO: __ cfi().Restore(DWARFReg(reg));
591 }
592 }
593
594 for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
595 GpuRegister reg = kCoreCalleeSaves[i];
596 if (allocated_registers_.ContainsCoreRegister(reg)) {
597 __ Ld(reg, SP, ofs);
598 ofs += kMips64WordSize;
599 __ cfi().Restore(DWARFReg(reg));
600 }
601 }
602
603 DCHECK_EQ(ofs, FrameEntrySpillSize());
604 __ DecreaseFrameSize(ofs);
605 }
606
607 __ Jr(RA);
608
609 __ cfi().RestoreState();
610 __ cfi().DefCFAOffset(GetFrameSize());
611}
612
613void CodeGeneratorMIPS64::Bind(HBasicBlock* block) {
614 __ Bind(GetLabelOf(block));
615}
616
617void CodeGeneratorMIPS64::MoveLocation(Location destination,
618 Location source,
619 Primitive::Type type) {
620 if (source.Equals(destination)) {
621 return;
622 }
623
624 // A valid move can always be inferred from the destination and source
625 // locations. When moving from and to a register, the argument type can be
626 // used to generate 32bit instead of 64bit moves.
627 bool unspecified_type = (type == Primitive::kPrimVoid);
628 DCHECK_EQ(unspecified_type, false);
629
630 if (destination.IsRegister() || destination.IsFpuRegister()) {
631 if (unspecified_type) {
632 HConstant* src_cst = source.IsConstant() ? source.GetConstant() : nullptr;
633 if (source.IsStackSlot() ||
634 (src_cst != nullptr && (src_cst->IsIntConstant()
635 || src_cst->IsFloatConstant()
636 || src_cst->IsNullConstant()))) {
637 // For stack slots and 32bit constants, a 64bit type is appropriate.
638 type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
639 } else {
640 // If the source is a double stack slot or a 64bit constant, a 64bit
641 // type is appropriate. Else the source is a register, and since the
642 // type has not been specified, we chose a 64bit type to force a 64bit
643 // move.
644 type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
645 }
646 }
647 DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(type)) ||
648 (destination.IsRegister() && !Primitive::IsFloatingPointType(type)));
649 if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
650 // Move to GPR/FPR from stack
651 LoadOperandType load_type = source.IsStackSlot() ? kLoadWord : kLoadDoubleword;
652 if (Primitive::IsFloatingPointType(type)) {
653 __ LoadFpuFromOffset(load_type,
654 destination.AsFpuRegister<FpuRegister>(),
655 SP,
656 source.GetStackIndex());
657 } else {
658 // TODO: use load_type = kLoadUnsignedWord when type == Primitive::kPrimNot.
659 __ LoadFromOffset(load_type,
660 destination.AsRegister<GpuRegister>(),
661 SP,
662 source.GetStackIndex());
663 }
664 } else if (source.IsConstant()) {
665 // Move to GPR/FPR from constant
666 GpuRegister gpr = AT;
667 if (!Primitive::IsFloatingPointType(type)) {
668 gpr = destination.AsRegister<GpuRegister>();
669 }
670 if (type == Primitive::kPrimInt || type == Primitive::kPrimFloat) {
671 __ LoadConst32(gpr, GetInt32ValueOf(source.GetConstant()->AsConstant()));
672 } else {
673 __ LoadConst64(gpr, GetInt64ValueOf(source.GetConstant()->AsConstant()));
674 }
675 if (type == Primitive::kPrimFloat) {
676 __ Mtc1(gpr, destination.AsFpuRegister<FpuRegister>());
677 } else if (type == Primitive::kPrimDouble) {
678 __ Dmtc1(gpr, destination.AsFpuRegister<FpuRegister>());
679 }
680 } else {
681 if (destination.IsRegister()) {
682 // Move to GPR from GPR
683 __ Move(destination.AsRegister<GpuRegister>(), source.AsRegister<GpuRegister>());
684 } else {
685 // Move to FPR from FPR
686 if (type == Primitive::kPrimFloat) {
687 __ MovS(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
688 } else {
689 DCHECK_EQ(type, Primitive::kPrimDouble);
690 __ MovD(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
691 }
692 }
693 }
694 } else { // The destination is not a register. It must be a stack slot.
695 DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
696 if (source.IsRegister() || source.IsFpuRegister()) {
697 if (unspecified_type) {
698 if (source.IsRegister()) {
699 type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
700 } else {
701 type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
702 }
703 }
704 DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(type)) &&
705 (source.IsFpuRegister() == Primitive::IsFloatingPointType(type)));
706 // Move to stack from GPR/FPR
707 StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
708 if (source.IsRegister()) {
709 __ StoreToOffset(store_type,
710 source.AsRegister<GpuRegister>(),
711 SP,
712 destination.GetStackIndex());
713 } else {
714 __ StoreFpuToOffset(store_type,
715 source.AsFpuRegister<FpuRegister>(),
716 SP,
717 destination.GetStackIndex());
718 }
719 } else if (source.IsConstant()) {
720 // Move to stack from constant
721 HConstant* src_cst = source.GetConstant();
722 StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
723 if (destination.IsStackSlot()) {
724 __ LoadConst32(TMP, GetInt32ValueOf(src_cst->AsConstant()));
725 } else {
726 __ LoadConst64(TMP, GetInt64ValueOf(src_cst->AsConstant()));
727 }
728 __ StoreToOffset(store_type, TMP, SP, destination.GetStackIndex());
729 } else {
730 DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
731 DCHECK_EQ(source.IsDoubleStackSlot(), destination.IsDoubleStackSlot());
732 // Move to stack from stack
733 if (destination.IsStackSlot()) {
734 __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
735 __ StoreToOffset(kStoreWord, TMP, SP, destination.GetStackIndex());
736 } else {
737 __ LoadFromOffset(kLoadDoubleword, TMP, SP, source.GetStackIndex());
738 __ StoreToOffset(kStoreDoubleword, TMP, SP, destination.GetStackIndex());
739 }
740 }
741 }
742}
743
744void CodeGeneratorMIPS64::SwapLocations(Location loc1,
745 Location loc2,
746 Primitive::Type type ATTRIBUTE_UNUSED) {
747 DCHECK(!loc1.IsConstant());
748 DCHECK(!loc2.IsConstant());
749
750 if (loc1.Equals(loc2)) {
751 return;
752 }
753
754 bool is_slot1 = loc1.IsStackSlot() || loc1.IsDoubleStackSlot();
755 bool is_slot2 = loc2.IsStackSlot() || loc2.IsDoubleStackSlot();
756 bool is_fp_reg1 = loc1.IsFpuRegister();
757 bool is_fp_reg2 = loc2.IsFpuRegister();
758
759 if (loc2.IsRegister() && loc1.IsRegister()) {
760 // Swap 2 GPRs
761 GpuRegister r1 = loc1.AsRegister<GpuRegister>();
762 GpuRegister r2 = loc2.AsRegister<GpuRegister>();
763 __ Move(TMP, r2);
764 __ Move(r2, r1);
765 __ Move(r1, TMP);
766 } else if (is_fp_reg2 && is_fp_reg1) {
767 // Swap 2 FPRs
768 FpuRegister r1 = loc1.AsFpuRegister<FpuRegister>();
769 FpuRegister r2 = loc2.AsFpuRegister<FpuRegister>();
770 // TODO: Can MOV.S/MOV.D be used here to save one instruction?
771 // Need to distinguish float from double, right?
772 __ Dmfc1(TMP, r2);
773 __ Dmfc1(AT, r1);
774 __ Dmtc1(TMP, r1);
775 __ Dmtc1(AT, r2);
776 } else if (is_slot1 != is_slot2) {
777 // Swap GPR/FPR and stack slot
778 Location reg_loc = is_slot1 ? loc2 : loc1;
779 Location mem_loc = is_slot1 ? loc1 : loc2;
780 LoadOperandType load_type = mem_loc.IsStackSlot() ? kLoadWord : kLoadDoubleword;
781 StoreOperandType store_type = mem_loc.IsStackSlot() ? kStoreWord : kStoreDoubleword;
782 // TODO: use load_type = kLoadUnsignedWord when type == Primitive::kPrimNot.
783 __ LoadFromOffset(load_type, TMP, SP, mem_loc.GetStackIndex());
784 if (reg_loc.IsFpuRegister()) {
785 __ StoreFpuToOffset(store_type,
786 reg_loc.AsFpuRegister<FpuRegister>(),
787 SP,
788 mem_loc.GetStackIndex());
789 // TODO: review this MTC1/DMTC1 move
790 if (mem_loc.IsStackSlot()) {
791 __ Mtc1(TMP, reg_loc.AsFpuRegister<FpuRegister>());
792 } else {
793 DCHECK(mem_loc.IsDoubleStackSlot());
794 __ Dmtc1(TMP, reg_loc.AsFpuRegister<FpuRegister>());
795 }
796 } else {
797 __ StoreToOffset(store_type, reg_loc.AsRegister<GpuRegister>(), SP, mem_loc.GetStackIndex());
798 __ Move(reg_loc.AsRegister<GpuRegister>(), TMP);
799 }
800 } else if (is_slot1 && is_slot2) {
801 move_resolver_.Exchange(loc1.GetStackIndex(),
802 loc2.GetStackIndex(),
803 loc1.IsDoubleStackSlot());
804 } else {
805 LOG(FATAL) << "Unimplemented swap between locations " << loc1 << " and " << loc2;
806 }
807}
808
809void CodeGeneratorMIPS64::Move(HInstruction* instruction,
810 Location location,
811 HInstruction* move_for) {
812 LocationSummary* locations = instruction->GetLocations();
813 Primitive::Type type = instruction->GetType();
814 DCHECK_NE(type, Primitive::kPrimVoid);
815
816 if (instruction->IsCurrentMethod()) {
817 MoveLocation(location, Location::DoubleStackSlot(kCurrentMethodStackOffset), type);
818 } else if (locations != nullptr && locations->Out().Equals(location)) {
819 return;
820 } else if (instruction->IsIntConstant()
821 || instruction->IsLongConstant()
822 || instruction->IsNullConstant()) {
823 if (location.IsRegister()) {
824 // Move to GPR from constant
825 GpuRegister dst = location.AsRegister<GpuRegister>();
826 if (instruction->IsNullConstant() || instruction->IsIntConstant()) {
827 __ LoadConst32(dst, GetInt32ValueOf(instruction->AsConstant()));
828 } else {
829 __ LoadConst64(dst, instruction->AsLongConstant()->GetValue());
830 }
831 } else {
832 DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot());
833 // Move to stack from constant
834 if (location.IsStackSlot()) {
835 __ LoadConst32(TMP, GetInt32ValueOf(instruction->AsConstant()));
836 __ StoreToOffset(kStoreWord, TMP, SP, location.GetStackIndex());
837 } else {
838 __ LoadConst64(TMP, instruction->AsLongConstant()->GetValue());
839 __ StoreToOffset(kStoreDoubleword, TMP, SP, location.GetStackIndex());
840 }
841 }
842 } else if (instruction->IsTemporary()) {
843 Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
844 MoveLocation(location, temp_location, type);
845 } else if (instruction->IsLoadLocal()) {
846 uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
847 if (Primitive::Is64BitType(type)) {
848 MoveLocation(location, Location::DoubleStackSlot(stack_slot), type);
849 } else {
850 MoveLocation(location, Location::StackSlot(stack_slot), type);
851 }
852 } else {
853 DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
854 MoveLocation(location, locations->Out(), type);
855 }
856}
857
858Location CodeGeneratorMIPS64::GetStackLocation(HLoadLocal* load) const {
859 Primitive::Type type = load->GetType();
860
861 switch (type) {
862 case Primitive::kPrimNot:
863 case Primitive::kPrimInt:
864 case Primitive::kPrimFloat:
865 return Location::StackSlot(GetStackSlot(load->GetLocal()));
866
867 case Primitive::kPrimLong:
868 case Primitive::kPrimDouble:
869 return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
870
871 case Primitive::kPrimBoolean:
872 case Primitive::kPrimByte:
873 case Primitive::kPrimChar:
874 case Primitive::kPrimShort:
875 case Primitive::kPrimVoid:
876 LOG(FATAL) << "Unexpected type " << type;
877 }
878
879 LOG(FATAL) << "Unreachable";
880 return Location::NoLocation();
881}
882
883void CodeGeneratorMIPS64::MarkGCCard(GpuRegister object, GpuRegister value) {
884 Label done;
885 GpuRegister card = AT;
886 GpuRegister temp = TMP;
887 __ Beqzc(value, &done);
888 __ LoadFromOffset(kLoadDoubleword,
889 card,
890 TR,
891 Thread::CardTableOffset<kMips64WordSize>().Int32Value());
892 __ Dsrl(temp, object, gc::accounting::CardTable::kCardShift);
893 __ Daddu(temp, card, temp);
894 __ Sb(card, temp, 0);
895 __ Bind(&done);
896}
897
898void CodeGeneratorMIPS64::SetupBlockedRegisters(bool is_baseline ATTRIBUTE_UNUSED) const {
899 // ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
900 blocked_core_registers_[ZERO] = true;
901 blocked_core_registers_[K0] = true;
902 blocked_core_registers_[K1] = true;
903 blocked_core_registers_[GP] = true;
904 blocked_core_registers_[SP] = true;
905 blocked_core_registers_[RA] = true;
906
907 // AT and TMP(T8) are used as temporary/scratch registers
908 // (similar to how AT is used by MIPS assemblers).
909 blocked_core_registers_[AT] = true;
910 blocked_core_registers_[TMP] = true;
911 blocked_fpu_registers_[FTMP] = true;
912
913 // Reserve suspend and thread registers.
914 blocked_core_registers_[S0] = true;
915 blocked_core_registers_[TR] = true;
916
917 // Reserve T9 for function calls
918 blocked_core_registers_[T9] = true;
919
920 // TODO: review; anything else?
921
922 // TODO: make these two for's conditional on is_baseline once
923 // all the issues with register saving/restoring are sorted out.
924 for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
925 blocked_core_registers_[kCoreCalleeSaves[i]] = true;
926 }
927
928 for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
929 blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
930 }
931}
932
933Location CodeGeneratorMIPS64::AllocateFreeRegister(Primitive::Type type) const {
934 if (type == Primitive::kPrimVoid) {
935 LOG(FATAL) << "Unreachable type " << type;
936 }
937
938 if (Primitive::IsFloatingPointType(type)) {
939 size_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfFpuRegisters);
940 return Location::FpuRegisterLocation(reg);
941 } else {
942 size_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfGpuRegisters);
943 return Location::RegisterLocation(reg);
944 }
945}
946
947size_t CodeGeneratorMIPS64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
948 __ StoreToOffset(kStoreDoubleword, GpuRegister(reg_id), SP, stack_index);
949 return kMips64WordSize;
950}
951
952size_t CodeGeneratorMIPS64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
953 __ LoadFromOffset(kLoadDoubleword, GpuRegister(reg_id), SP, stack_index);
954 return kMips64WordSize;
955}
956
957size_t CodeGeneratorMIPS64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
958 __ StoreFpuToOffset(kStoreDoubleword, FpuRegister(reg_id), SP, stack_index);
959 return kMips64WordSize;
960}
961
962size_t CodeGeneratorMIPS64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
963 __ LoadFpuFromOffset(kLoadDoubleword, FpuRegister(reg_id), SP, stack_index);
964 return kMips64WordSize;
965}
966
967void CodeGeneratorMIPS64::DumpCoreRegister(std::ostream& stream, int reg) const {
968 stream << Mips64ManagedRegister::FromGpuRegister(GpuRegister(reg));
969}
970
971void CodeGeneratorMIPS64::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
972 stream << Mips64ManagedRegister::FromFpuRegister(FpuRegister(reg));
973}
974
975void CodeGeneratorMIPS64::InvokeRuntime(int32_t entry_point_offset,
976 HInstruction* instruction,
977 uint32_t dex_pc,
978 SlowPathCode* slow_path) {
Alexandre Rames78e3ef62015-08-12 13:43:29 +0100979 ValidateInvokeRuntime(instruction, slow_path);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700980 // TODO: anything related to T9/GP/GOT/PIC/.so's?
981 __ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
982 __ Jalr(T9);
983 RecordPcInfo(instruction, dex_pc, slow_path);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700984}
985
986void InstructionCodeGeneratorMIPS64::GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path,
987 GpuRegister class_reg) {
988 __ LoadFromOffset(kLoadWord, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
989 __ LoadConst32(AT, mirror::Class::kStatusInitialized);
990 __ Bltc(TMP, AT, slow_path->GetEntryLabel());
991 // TODO: barrier needed?
992 __ Bind(slow_path->GetExitLabel());
993}
994
995void InstructionCodeGeneratorMIPS64::GenerateMemoryBarrier(MemBarrierKind kind ATTRIBUTE_UNUSED) {
996 __ Sync(0); // only stype 0 is supported
997}
998
999void InstructionCodeGeneratorMIPS64::GenerateSuspendCheck(HSuspendCheck* instruction,
1000 HBasicBlock* successor) {
1001 SuspendCheckSlowPathMIPS64* slow_path =
1002 new (GetGraph()->GetArena()) SuspendCheckSlowPathMIPS64(instruction, successor);
1003 codegen_->AddSlowPath(slow_path);
1004
1005 __ LoadFromOffset(kLoadUnsignedHalfword,
1006 TMP,
1007 TR,
1008 Thread::ThreadFlagsOffset<kMips64WordSize>().Int32Value());
1009 if (successor == nullptr) {
1010 __ Bnezc(TMP, slow_path->GetEntryLabel());
1011 __ Bind(slow_path->GetReturnLabel());
1012 } else {
1013 __ Beqzc(TMP, codegen_->GetLabelOf(successor));
1014 __ B(slow_path->GetEntryLabel());
1015 // slow_path will return to GetLabelOf(successor).
1016 }
1017}
1018
1019InstructionCodeGeneratorMIPS64::InstructionCodeGeneratorMIPS64(HGraph* graph,
1020 CodeGeneratorMIPS64* codegen)
1021 : HGraphVisitor(graph),
1022 assembler_(codegen->GetAssembler()),
1023 codegen_(codegen) {}
1024
1025void LocationsBuilderMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
1026 DCHECK_EQ(instruction->InputCount(), 2U);
1027 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1028 Primitive::Type type = instruction->GetResultType();
1029 switch (type) {
1030 case Primitive::kPrimInt:
1031 case Primitive::kPrimLong: {
1032 locations->SetInAt(0, Location::RequiresRegister());
1033 HInstruction* right = instruction->InputAt(1);
1034 bool can_use_imm = false;
1035 if (right->IsConstant()) {
1036 int64_t imm = CodeGenerator::GetInt64ValueOf(right->AsConstant());
1037 if (instruction->IsAnd() || instruction->IsOr() || instruction->IsXor()) {
1038 can_use_imm = IsUint<16>(imm);
1039 } else if (instruction->IsAdd()) {
1040 can_use_imm = IsInt<16>(imm);
1041 } else {
1042 DCHECK(instruction->IsSub());
1043 can_use_imm = IsInt<16>(-imm);
1044 }
1045 }
1046 if (can_use_imm)
1047 locations->SetInAt(1, Location::ConstantLocation(right->AsConstant()));
1048 else
1049 locations->SetInAt(1, Location::RequiresRegister());
1050 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1051 }
1052 break;
1053
1054 case Primitive::kPrimFloat:
1055 case Primitive::kPrimDouble:
1056 locations->SetInAt(0, Location::RequiresFpuRegister());
1057 locations->SetInAt(1, Location::RequiresFpuRegister());
1058 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1059 break;
1060
1061 default:
1062 LOG(FATAL) << "Unexpected " << instruction->DebugName() << " type " << type;
1063 }
1064}
1065
1066void InstructionCodeGeneratorMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
1067 Primitive::Type type = instruction->GetType();
1068 LocationSummary* locations = instruction->GetLocations();
1069
1070 switch (type) {
1071 case Primitive::kPrimInt:
1072 case Primitive::kPrimLong: {
1073 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1074 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1075 Location rhs_location = locations->InAt(1);
1076
1077 GpuRegister rhs_reg = ZERO;
1078 int64_t rhs_imm = 0;
1079 bool use_imm = rhs_location.IsConstant();
1080 if (use_imm) {
1081 rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
1082 } else {
1083 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1084 }
1085
1086 if (instruction->IsAnd()) {
1087 if (use_imm)
1088 __ Andi(dst, lhs, rhs_imm);
1089 else
1090 __ And(dst, lhs, rhs_reg);
1091 } else if (instruction->IsOr()) {
1092 if (use_imm)
1093 __ Ori(dst, lhs, rhs_imm);
1094 else
1095 __ Or(dst, lhs, rhs_reg);
1096 } else if (instruction->IsXor()) {
1097 if (use_imm)
1098 __ Xori(dst, lhs, rhs_imm);
1099 else
1100 __ Xor(dst, lhs, rhs_reg);
1101 } else if (instruction->IsAdd()) {
1102 if (type == Primitive::kPrimInt) {
1103 if (use_imm)
1104 __ Addiu(dst, lhs, rhs_imm);
1105 else
1106 __ Addu(dst, lhs, rhs_reg);
1107 } else {
1108 if (use_imm)
1109 __ Daddiu(dst, lhs, rhs_imm);
1110 else
1111 __ Daddu(dst, lhs, rhs_reg);
1112 }
1113 } else {
1114 DCHECK(instruction->IsSub());
1115 if (type == Primitive::kPrimInt) {
1116 if (use_imm)
1117 __ Addiu(dst, lhs, -rhs_imm);
1118 else
1119 __ Subu(dst, lhs, rhs_reg);
1120 } else {
1121 if (use_imm)
1122 __ Daddiu(dst, lhs, -rhs_imm);
1123 else
1124 __ Dsubu(dst, lhs, rhs_reg);
1125 }
1126 }
1127 break;
1128 }
1129 case Primitive::kPrimFloat:
1130 case Primitive::kPrimDouble: {
1131 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
1132 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
1133 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
1134 if (instruction->IsAdd()) {
1135 if (type == Primitive::kPrimFloat)
1136 __ AddS(dst, lhs, rhs);
1137 else
1138 __ AddD(dst, lhs, rhs);
1139 } else if (instruction->IsSub()) {
1140 if (type == Primitive::kPrimFloat)
1141 __ SubS(dst, lhs, rhs);
1142 else
1143 __ SubD(dst, lhs, rhs);
1144 } else {
1145 LOG(FATAL) << "Unexpected floating-point binary operation";
1146 }
1147 break;
1148 }
1149 default:
1150 LOG(FATAL) << "Unexpected binary operation type " << type;
1151 }
1152}
1153
1154void LocationsBuilderMIPS64::HandleShift(HBinaryOperation* instr) {
1155 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1156
1157 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
1158 Primitive::Type type = instr->GetResultType();
1159 switch (type) {
1160 case Primitive::kPrimInt:
1161 case Primitive::kPrimLong: {
1162 locations->SetInAt(0, Location::RequiresRegister());
1163 locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
1164 locations->SetOut(Location::RequiresRegister());
1165 break;
1166 }
1167 default:
1168 LOG(FATAL) << "Unexpected shift type " << type;
1169 }
1170}
1171
1172void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) {
1173 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1174 LocationSummary* locations = instr->GetLocations();
1175 Primitive::Type type = instr->GetType();
1176
1177 switch (type) {
1178 case Primitive::kPrimInt:
1179 case Primitive::kPrimLong: {
1180 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1181 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1182 Location rhs_location = locations->InAt(1);
1183
1184 GpuRegister rhs_reg = ZERO;
1185 int64_t rhs_imm = 0;
1186 bool use_imm = rhs_location.IsConstant();
1187 if (use_imm) {
1188 rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
1189 } else {
1190 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1191 }
1192
1193 if (use_imm) {
1194 uint32_t shift_value = (type == Primitive::kPrimInt)
1195 ? static_cast<uint32_t>(rhs_imm & kMaxIntShiftValue)
1196 : static_cast<uint32_t>(rhs_imm & kMaxLongShiftValue);
1197
1198 if (type == Primitive::kPrimInt) {
1199 if (instr->IsShl()) {
1200 __ Sll(dst, lhs, shift_value);
1201 } else if (instr->IsShr()) {
1202 __ Sra(dst, lhs, shift_value);
1203 } else {
1204 __ Srl(dst, lhs, shift_value);
1205 }
1206 } else {
1207 if (shift_value < 32) {
1208 if (instr->IsShl()) {
1209 __ Dsll(dst, lhs, shift_value);
1210 } else if (instr->IsShr()) {
1211 __ Dsra(dst, lhs, shift_value);
1212 } else {
1213 __ Dsrl(dst, lhs, shift_value);
1214 }
1215 } else {
1216 shift_value -= 32;
1217 if (instr->IsShl()) {
1218 __ Dsll32(dst, lhs, shift_value);
1219 } else if (instr->IsShr()) {
1220 __ Dsra32(dst, lhs, shift_value);
1221 } else {
1222 __ Dsrl32(dst, lhs, shift_value);
1223 }
1224 }
1225 }
1226 } else {
1227 if (type == Primitive::kPrimInt) {
1228 if (instr->IsShl()) {
1229 __ Sllv(dst, lhs, rhs_reg);
1230 } else if (instr->IsShr()) {
1231 __ Srav(dst, lhs, rhs_reg);
1232 } else {
1233 __ Srlv(dst, lhs, rhs_reg);
1234 }
1235 } else {
1236 if (instr->IsShl()) {
1237 __ Dsllv(dst, lhs, rhs_reg);
1238 } else if (instr->IsShr()) {
1239 __ Dsrav(dst, lhs, rhs_reg);
1240 } else {
1241 __ Dsrlv(dst, lhs, rhs_reg);
1242 }
1243 }
1244 }
1245 break;
1246 }
1247 default:
1248 LOG(FATAL) << "Unexpected shift operation type " << type;
1249 }
1250}
1251
1252void LocationsBuilderMIPS64::VisitAdd(HAdd* instruction) {
1253 HandleBinaryOp(instruction);
1254}
1255
1256void InstructionCodeGeneratorMIPS64::VisitAdd(HAdd* instruction) {
1257 HandleBinaryOp(instruction);
1258}
1259
1260void LocationsBuilderMIPS64::VisitAnd(HAnd* instruction) {
1261 HandleBinaryOp(instruction);
1262}
1263
1264void InstructionCodeGeneratorMIPS64::VisitAnd(HAnd* instruction) {
1265 HandleBinaryOp(instruction);
1266}
1267
1268void LocationsBuilderMIPS64::VisitArrayGet(HArrayGet* instruction) {
1269 LocationSummary* locations =
1270 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1271 locations->SetInAt(0, Location::RequiresRegister());
1272 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1273 if (Primitive::IsFloatingPointType(instruction->GetType())) {
1274 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1275 } else {
1276 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1277 }
1278}
1279
1280void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
1281 LocationSummary* locations = instruction->GetLocations();
1282 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1283 Location index = locations->InAt(1);
1284 Primitive::Type type = instruction->GetType();
1285
1286 switch (type) {
1287 case Primitive::kPrimBoolean: {
1288 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
1289 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1290 if (index.IsConstant()) {
1291 size_t offset =
1292 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1293 __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
1294 } else {
1295 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1296 __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
1297 }
1298 break;
1299 }
1300
1301 case Primitive::kPrimByte: {
1302 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
1303 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1304 if (index.IsConstant()) {
1305 size_t offset =
1306 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1307 __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
1308 } else {
1309 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1310 __ LoadFromOffset(kLoadSignedByte, out, TMP, data_offset);
1311 }
1312 break;
1313 }
1314
1315 case Primitive::kPrimShort: {
1316 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
1317 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1318 if (index.IsConstant()) {
1319 size_t offset =
1320 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1321 __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
1322 } else {
1323 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1324 __ Daddu(TMP, obj, TMP);
1325 __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset);
1326 }
1327 break;
1328 }
1329
1330 case Primitive::kPrimChar: {
1331 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
1332 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1333 if (index.IsConstant()) {
1334 size_t offset =
1335 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1336 __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
1337 } else {
1338 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1339 __ Daddu(TMP, obj, TMP);
1340 __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
1341 }
1342 break;
1343 }
1344
1345 case Primitive::kPrimInt:
1346 case Primitive::kPrimNot: {
1347 DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
1348 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
1349 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1350 LoadOperandType load_type = (type == Primitive::kPrimNot) ? kLoadUnsignedWord : kLoadWord;
1351 if (index.IsConstant()) {
1352 size_t offset =
1353 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1354 __ LoadFromOffset(load_type, out, obj, offset);
1355 } else {
1356 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1357 __ Daddu(TMP, obj, TMP);
1358 __ LoadFromOffset(load_type, out, TMP, data_offset);
1359 }
1360 break;
1361 }
1362
1363 case Primitive::kPrimLong: {
1364 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
1365 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1366 if (index.IsConstant()) {
1367 size_t offset =
1368 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1369 __ LoadFromOffset(kLoadDoubleword, out, obj, offset);
1370 } else {
1371 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1372 __ Daddu(TMP, obj, TMP);
1373 __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset);
1374 }
1375 break;
1376 }
1377
1378 case Primitive::kPrimFloat: {
1379 uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
1380 FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
1381 if (index.IsConstant()) {
1382 size_t offset =
1383 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1384 __ LoadFpuFromOffset(kLoadWord, out, obj, offset);
1385 } else {
1386 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1387 __ Daddu(TMP, obj, TMP);
1388 __ LoadFpuFromOffset(kLoadWord, out, TMP, data_offset);
1389 }
1390 break;
1391 }
1392
1393 case Primitive::kPrimDouble: {
1394 uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
1395 FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
1396 if (index.IsConstant()) {
1397 size_t offset =
1398 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1399 __ LoadFpuFromOffset(kLoadDoubleword, out, obj, offset);
1400 } else {
1401 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1402 __ Daddu(TMP, obj, TMP);
1403 __ LoadFpuFromOffset(kLoadDoubleword, out, TMP, data_offset);
1404 }
1405 break;
1406 }
1407
1408 case Primitive::kPrimVoid:
1409 LOG(FATAL) << "Unreachable type " << instruction->GetType();
1410 UNREACHABLE();
1411 }
1412 codegen_->MaybeRecordImplicitNullCheck(instruction);
1413}
1414
1415void LocationsBuilderMIPS64::VisitArrayLength(HArrayLength* instruction) {
1416 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1417 locations->SetInAt(0, Location::RequiresRegister());
1418 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1419}
1420
1421void InstructionCodeGeneratorMIPS64::VisitArrayLength(HArrayLength* instruction) {
1422 LocationSummary* locations = instruction->GetLocations();
1423 uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
1424 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1425 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1426 __ LoadFromOffset(kLoadWord, out, obj, offset);
1427 codegen_->MaybeRecordImplicitNullCheck(instruction);
1428}
1429
1430void LocationsBuilderMIPS64::VisitArraySet(HArraySet* instruction) {
1431 Primitive::Type value_type = instruction->GetComponentType();
1432 bool is_object = value_type == Primitive::kPrimNot;
1433 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
1434 instruction,
1435 is_object ? LocationSummary::kCall : LocationSummary::kNoCall);
1436 if (is_object) {
1437 InvokeRuntimeCallingConvention calling_convention;
1438 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
1439 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
1440 locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
1441 } else {
1442 locations->SetInAt(0, Location::RequiresRegister());
1443 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1444 if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
1445 locations->SetInAt(2, Location::RequiresFpuRegister());
1446 } else {
1447 locations->SetInAt(2, Location::RequiresRegister());
1448 }
1449 }
1450}
1451
1452void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
1453 LocationSummary* locations = instruction->GetLocations();
1454 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1455 Location index = locations->InAt(1);
1456 Primitive::Type value_type = instruction->GetComponentType();
1457 bool needs_runtime_call = locations->WillCall();
1458 bool needs_write_barrier =
1459 CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
1460
1461 switch (value_type) {
1462 case Primitive::kPrimBoolean:
1463 case Primitive::kPrimByte: {
1464 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
1465 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1466 if (index.IsConstant()) {
1467 size_t offset =
1468 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1469 __ StoreToOffset(kStoreByte, value, obj, offset);
1470 } else {
1471 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1472 __ StoreToOffset(kStoreByte, value, TMP, data_offset);
1473 }
1474 break;
1475 }
1476
1477 case Primitive::kPrimShort:
1478 case Primitive::kPrimChar: {
1479 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
1480 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1481 if (index.IsConstant()) {
1482 size_t offset =
1483 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1484 __ StoreToOffset(kStoreHalfword, value, obj, offset);
1485 } else {
1486 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1487 __ Daddu(TMP, obj, TMP);
1488 __ StoreToOffset(kStoreHalfword, value, TMP, data_offset);
1489 }
1490 break;
1491 }
1492
1493 case Primitive::kPrimInt:
1494 case Primitive::kPrimNot: {
1495 if (!needs_runtime_call) {
1496 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
1497 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1498 if (index.IsConstant()) {
1499 size_t offset =
1500 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1501 __ StoreToOffset(kStoreWord, value, obj, offset);
1502 } else {
1503 DCHECK(index.IsRegister()) << index;
1504 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1505 __ Daddu(TMP, obj, TMP);
1506 __ StoreToOffset(kStoreWord, value, TMP, data_offset);
1507 }
1508 codegen_->MaybeRecordImplicitNullCheck(instruction);
1509 if (needs_write_barrier) {
1510 DCHECK_EQ(value_type, Primitive::kPrimNot);
1511 codegen_->MarkGCCard(obj, value);
1512 }
1513 } else {
1514 DCHECK_EQ(value_type, Primitive::kPrimNot);
1515 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
1516 instruction,
1517 instruction->GetDexPc(),
1518 nullptr);
1519 }
1520 break;
1521 }
1522
1523 case Primitive::kPrimLong: {
1524 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
1525 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1526 if (index.IsConstant()) {
1527 size_t offset =
1528 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1529 __ StoreToOffset(kStoreDoubleword, value, obj, offset);
1530 } else {
1531 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1532 __ Daddu(TMP, obj, TMP);
1533 __ StoreToOffset(kStoreDoubleword, value, TMP, data_offset);
1534 }
1535 break;
1536 }
1537
1538 case Primitive::kPrimFloat: {
1539 uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
1540 FpuRegister value = locations->InAt(2).AsFpuRegister<FpuRegister>();
1541 DCHECK(locations->InAt(2).IsFpuRegister());
1542 if (index.IsConstant()) {
1543 size_t offset =
1544 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1545 __ StoreFpuToOffset(kStoreWord, value, obj, offset);
1546 } else {
1547 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1548 __ Daddu(TMP, obj, TMP);
1549 __ StoreFpuToOffset(kStoreWord, value, TMP, data_offset);
1550 }
1551 break;
1552 }
1553
1554 case Primitive::kPrimDouble: {
1555 uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
1556 FpuRegister value = locations->InAt(2).AsFpuRegister<FpuRegister>();
1557 DCHECK(locations->InAt(2).IsFpuRegister());
1558 if (index.IsConstant()) {
1559 size_t offset =
1560 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1561 __ StoreFpuToOffset(kStoreDoubleword, value, obj, offset);
1562 } else {
1563 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1564 __ Daddu(TMP, obj, TMP);
1565 __ StoreFpuToOffset(kStoreDoubleword, value, TMP, data_offset);
1566 }
1567 break;
1568 }
1569
1570 case Primitive::kPrimVoid:
1571 LOG(FATAL) << "Unreachable type " << instruction->GetType();
1572 UNREACHABLE();
1573 }
1574
1575 // Ints and objects are handled in the switch.
1576 if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
1577 codegen_->MaybeRecordImplicitNullCheck(instruction);
1578 }
1579}
1580
1581void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
1582 LocationSummary* locations =
1583 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1584 locations->SetInAt(0, Location::RequiresRegister());
1585 locations->SetInAt(1, Location::RequiresRegister());
1586 if (instruction->HasUses()) {
1587 locations->SetOut(Location::SameAsFirstInput());
1588 }
1589}
1590
1591void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
1592 LocationSummary* locations = instruction->GetLocations();
1593 BoundsCheckSlowPathMIPS64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS64(
1594 instruction,
1595 locations->InAt(0),
1596 locations->InAt(1));
1597 codegen_->AddSlowPath(slow_path);
1598
1599 GpuRegister index = locations->InAt(0).AsRegister<GpuRegister>();
1600 GpuRegister length = locations->InAt(1).AsRegister<GpuRegister>();
1601
1602 // length is limited by the maximum positive signed 32-bit integer.
1603 // Unsigned comparison of length and index checks for index < 0
1604 // and for length <= index simultaneously.
1605 // Mips R6 requires lhs != rhs for compact branches.
1606 if (index == length) {
1607 __ B(slow_path->GetEntryLabel());
1608 } else {
1609 __ Bgeuc(index, length, slow_path->GetEntryLabel());
1610 }
1611}
1612
1613void LocationsBuilderMIPS64::VisitCheckCast(HCheckCast* instruction) {
1614 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
1615 instruction,
1616 LocationSummary::kCallOnSlowPath);
1617 locations->SetInAt(0, Location::RequiresRegister());
1618 locations->SetInAt(1, Location::RequiresRegister());
1619 locations->AddTemp(Location::RequiresRegister());
1620}
1621
1622void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) {
1623 LocationSummary* locations = instruction->GetLocations();
1624 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1625 GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
1626 GpuRegister obj_cls = locations->GetTemp(0).AsRegister<GpuRegister>();
1627
1628 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(
1629 instruction,
1630 locations->InAt(1),
1631 Location::RegisterLocation(obj_cls),
1632 instruction->GetDexPc());
1633 codegen_->AddSlowPath(slow_path);
1634
1635 // TODO: avoid this check if we know obj is not null.
1636 __ Beqzc(obj, slow_path->GetExitLabel());
1637 // Compare the class of `obj` with `cls`.
1638 __ LoadFromOffset(kLoadUnsignedWord, obj_cls, obj, mirror::Object::ClassOffset().Int32Value());
1639 __ Bnec(obj_cls, cls, slow_path->GetEntryLabel());
1640 __ Bind(slow_path->GetExitLabel());
1641}
1642
1643void LocationsBuilderMIPS64::VisitClinitCheck(HClinitCheck* check) {
1644 LocationSummary* locations =
1645 new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
1646 locations->SetInAt(0, Location::RequiresRegister());
1647 if (check->HasUses()) {
1648 locations->SetOut(Location::SameAsFirstInput());
1649 }
1650}
1651
1652void InstructionCodeGeneratorMIPS64::VisitClinitCheck(HClinitCheck* check) {
1653 // We assume the class is not null.
1654 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
1655 check->GetLoadClass(),
1656 check,
1657 check->GetDexPc(),
1658 true);
1659 codegen_->AddSlowPath(slow_path);
1660 GenerateClassInitializationCheck(slow_path,
1661 check->GetLocations()->InAt(0).AsRegister<GpuRegister>());
1662}
1663
1664void LocationsBuilderMIPS64::VisitCompare(HCompare* compare) {
1665 Primitive::Type in_type = compare->InputAt(0)->GetType();
1666
1667 LocationSummary::CallKind call_kind = Primitive::IsFloatingPointType(in_type)
1668 ? LocationSummary::kCall
1669 : LocationSummary::kNoCall;
1670
1671 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare, call_kind);
1672
1673 switch (in_type) {
1674 case Primitive::kPrimLong:
1675 locations->SetInAt(0, Location::RequiresRegister());
1676 locations->SetInAt(1, Location::RequiresRegister());
1677 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1678 break;
1679
1680 case Primitive::kPrimFloat:
1681 case Primitive::kPrimDouble: {
1682 InvokeRuntimeCallingConvention calling_convention;
1683 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
1684 locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
1685 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt));
1686 break;
1687 }
1688
1689 default:
1690 LOG(FATAL) << "Unexpected type for compare operation " << in_type;
1691 }
1692}
1693
1694void InstructionCodeGeneratorMIPS64::VisitCompare(HCompare* instruction) {
1695 LocationSummary* locations = instruction->GetLocations();
1696 Primitive::Type in_type = instruction->InputAt(0)->GetType();
1697
1698 // 0 if: left == right
1699 // 1 if: left > right
1700 // -1 if: left < right
1701 switch (in_type) {
1702 case Primitive::kPrimLong: {
1703 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1704 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1705 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
1706 // TODO: more efficient (direct) comparison with a constant
1707 __ Slt(TMP, lhs, rhs);
1708 __ Slt(dst, rhs, lhs);
1709 __ Subu(dst, dst, TMP);
1710 break;
1711 }
1712
1713 case Primitive::kPrimFloat:
1714 case Primitive::kPrimDouble: {
1715 int32_t entry_point_offset;
1716 if (in_type == Primitive::kPrimFloat) {
1717 entry_point_offset = instruction->IsGtBias() ? QUICK_ENTRY_POINT(pCmpgFloat)
1718 : QUICK_ENTRY_POINT(pCmplFloat);
1719 } else {
1720 entry_point_offset = instruction->IsGtBias() ? QUICK_ENTRY_POINT(pCmpgDouble)
1721 : QUICK_ENTRY_POINT(pCmplDouble);
1722 }
1723 codegen_->InvokeRuntime(entry_point_offset, instruction, instruction->GetDexPc(), nullptr);
1724 break;
1725 }
1726
1727 default:
1728 LOG(FATAL) << "Unimplemented compare type " << in_type;
1729 }
1730}
1731
1732void LocationsBuilderMIPS64::VisitCondition(HCondition* instruction) {
1733 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1734 locations->SetInAt(0, Location::RequiresRegister());
1735 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1736 if (instruction->NeedsMaterialization()) {
1737 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1738 }
1739}
1740
1741void InstructionCodeGeneratorMIPS64::VisitCondition(HCondition* instruction) {
1742 if (!instruction->NeedsMaterialization()) {
1743 return;
1744 }
1745
1746 LocationSummary* locations = instruction->GetLocations();
1747
1748 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1749 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1750 Location rhs_location = locations->InAt(1);
1751
1752 GpuRegister rhs_reg = ZERO;
1753 int64_t rhs_imm = 0;
1754 bool use_imm = rhs_location.IsConstant();
1755 if (use_imm) {
1756 rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
1757 } else {
1758 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1759 }
1760
1761 IfCondition if_cond = instruction->GetCondition();
1762
1763 switch (if_cond) {
1764 case kCondEQ:
1765 case kCondNE:
1766 if (use_imm && IsUint<16>(rhs_imm)) {
1767 __ Xori(dst, lhs, rhs_imm);
1768 } else {
1769 if (use_imm) {
1770 rhs_reg = TMP;
1771 __ LoadConst32(rhs_reg, rhs_imm);
1772 }
1773 __ Xor(dst, lhs, rhs_reg);
1774 }
1775 if (if_cond == kCondEQ) {
1776 __ Sltiu(dst, dst, 1);
1777 } else {
1778 __ Sltu(dst, ZERO, dst);
1779 }
1780 break;
1781
1782 case kCondLT:
1783 case kCondGE:
1784 if (use_imm && IsInt<16>(rhs_imm)) {
1785 __ Slti(dst, lhs, rhs_imm);
1786 } else {
1787 if (use_imm) {
1788 rhs_reg = TMP;
1789 __ LoadConst32(rhs_reg, rhs_imm);
1790 }
1791 __ Slt(dst, lhs, rhs_reg);
1792 }
1793 if (if_cond == kCondGE) {
1794 // Simulate lhs >= rhs via !(lhs < rhs) since there's
1795 // only the slt instruction but no sge.
1796 __ Xori(dst, dst, 1);
1797 }
1798 break;
1799
1800 case kCondLE:
1801 case kCondGT:
1802 if (use_imm && IsInt<16>(rhs_imm + 1)) {
1803 // Simulate lhs <= rhs via lhs < rhs + 1.
1804 __ Slti(dst, lhs, rhs_imm + 1);
1805 if (if_cond == kCondGT) {
1806 // Simulate lhs > rhs via !(lhs <= rhs) since there's
1807 // only the slti instruction but no sgti.
1808 __ Xori(dst, dst, 1);
1809 }
1810 } else {
1811 if (use_imm) {
1812 rhs_reg = TMP;
1813 __ LoadConst32(rhs_reg, rhs_imm);
1814 }
1815 __ Slt(dst, rhs_reg, lhs);
1816 if (if_cond == kCondLE) {
1817 // Simulate lhs <= rhs via !(rhs < lhs) since there's
1818 // only the slt instruction but no sle.
1819 __ Xori(dst, dst, 1);
1820 }
1821 }
1822 break;
1823 }
1824}
1825
1826void LocationsBuilderMIPS64::VisitDiv(HDiv* div) {
1827 LocationSummary* locations =
1828 new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
1829 switch (div->GetResultType()) {
1830 case Primitive::kPrimInt:
1831 case Primitive::kPrimLong:
1832 locations->SetInAt(0, Location::RequiresRegister());
1833 locations->SetInAt(1, Location::RequiresRegister());
1834 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1835 break;
1836
1837 case Primitive::kPrimFloat:
1838 case Primitive::kPrimDouble:
1839 locations->SetInAt(0, Location::RequiresFpuRegister());
1840 locations->SetInAt(1, Location::RequiresFpuRegister());
1841 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1842 break;
1843
1844 default:
1845 LOG(FATAL) << "Unexpected div type " << div->GetResultType();
1846 }
1847}
1848
1849void InstructionCodeGeneratorMIPS64::VisitDiv(HDiv* instruction) {
1850 Primitive::Type type = instruction->GetType();
1851 LocationSummary* locations = instruction->GetLocations();
1852
1853 switch (type) {
1854 case Primitive::kPrimInt:
1855 case Primitive::kPrimLong: {
1856 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1857 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1858 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
1859 if (type == Primitive::kPrimInt)
1860 __ DivR6(dst, lhs, rhs);
1861 else
1862 __ Ddiv(dst, lhs, rhs);
1863 break;
1864 }
1865 case Primitive::kPrimFloat:
1866 case Primitive::kPrimDouble: {
1867 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
1868 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
1869 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
1870 if (type == Primitive::kPrimFloat)
1871 __ DivS(dst, lhs, rhs);
1872 else
1873 __ DivD(dst, lhs, rhs);
1874 break;
1875 }
1876 default:
1877 LOG(FATAL) << "Unexpected div type " << type;
1878 }
1879}
1880
1881void LocationsBuilderMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
1882 LocationSummary* locations =
1883 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1884 locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
1885 if (instruction->HasUses()) {
1886 locations->SetOut(Location::SameAsFirstInput());
1887 }
1888}
1889
1890void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
1891 SlowPathCodeMIPS64* slow_path =
1892 new (GetGraph()->GetArena()) DivZeroCheckSlowPathMIPS64(instruction);
1893 codegen_->AddSlowPath(slow_path);
1894 Location value = instruction->GetLocations()->InAt(0);
1895
1896 Primitive::Type type = instruction->GetType();
1897
Serguei Katkov8c0676c2015-08-03 13:55:33 +06001898 if ((type == Primitive::kPrimBoolean) || !Primitive::IsIntegralType(type)) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07001899 LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
Serguei Katkov8c0676c2015-08-03 13:55:33 +06001900 return;
Alexey Frunze4dda3372015-06-01 18:31:49 -07001901 }
1902
1903 if (value.IsConstant()) {
1904 int64_t divisor = codegen_->GetInt64ValueOf(value.GetConstant()->AsConstant());
1905 if (divisor == 0) {
1906 __ B(slow_path->GetEntryLabel());
1907 } else {
1908 // A division by a non-null constant is valid. We don't need to perform
1909 // any check, so simply fall through.
1910 }
1911 } else {
1912 __ Beqzc(value.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
1913 }
1914}
1915
1916void LocationsBuilderMIPS64::VisitDoubleConstant(HDoubleConstant* constant) {
1917 LocationSummary* locations =
1918 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1919 locations->SetOut(Location::ConstantLocation(constant));
1920}
1921
1922void InstructionCodeGeneratorMIPS64::VisitDoubleConstant(HDoubleConstant* cst ATTRIBUTE_UNUSED) {
1923 // Will be generated at use site.
1924}
1925
1926void LocationsBuilderMIPS64::VisitExit(HExit* exit) {
1927 exit->SetLocations(nullptr);
1928}
1929
1930void InstructionCodeGeneratorMIPS64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
1931}
1932
1933void LocationsBuilderMIPS64::VisitFloatConstant(HFloatConstant* constant) {
1934 LocationSummary* locations =
1935 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1936 locations->SetOut(Location::ConstantLocation(constant));
1937}
1938
1939void InstructionCodeGeneratorMIPS64::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
1940 // Will be generated at use site.
1941}
1942
David Brazdilfc6a86a2015-06-26 10:33:45 +00001943void InstructionCodeGeneratorMIPS64::HandleGoto(HInstruction* got, HBasicBlock* successor) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07001944 DCHECK(!successor->IsExitBlock());
1945 HBasicBlock* block = got->GetBlock();
1946 HInstruction* previous = got->GetPrevious();
1947 HLoopInformation* info = block->GetLoopInformation();
1948
1949 if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
1950 codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
1951 GenerateSuspendCheck(info->GetSuspendCheck(), successor);
1952 return;
1953 }
1954 if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
1955 GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
1956 }
1957 if (!codegen_->GoesToNextBlock(block, successor)) {
1958 __ B(codegen_->GetLabelOf(successor));
1959 }
1960}
1961
David Brazdilfc6a86a2015-06-26 10:33:45 +00001962void LocationsBuilderMIPS64::VisitGoto(HGoto* got) {
1963 got->SetLocations(nullptr);
1964}
1965
1966void InstructionCodeGeneratorMIPS64::VisitGoto(HGoto* got) {
1967 HandleGoto(got, got->GetSuccessor());
1968}
1969
1970void LocationsBuilderMIPS64::VisitTryBoundary(HTryBoundary* try_boundary) {
1971 try_boundary->SetLocations(nullptr);
1972}
1973
1974void InstructionCodeGeneratorMIPS64::VisitTryBoundary(HTryBoundary* try_boundary) {
1975 HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
1976 if (!successor->IsExitBlock()) {
1977 HandleGoto(try_boundary, successor);
1978 }
1979}
1980
Alexey Frunze4dda3372015-06-01 18:31:49 -07001981void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruction,
1982 Label* true_target,
1983 Label* false_target,
1984 Label* always_true_target) {
1985 HInstruction* cond = instruction->InputAt(0);
1986 HCondition* condition = cond->AsCondition();
1987
1988 if (cond->IsIntConstant()) {
1989 int32_t cond_value = cond->AsIntConstant()->GetValue();
1990 if (cond_value == 1) {
1991 if (always_true_target != nullptr) {
1992 __ B(always_true_target);
1993 }
1994 return;
1995 } else {
1996 DCHECK_EQ(cond_value, 0);
1997 }
1998 } else if (!cond->IsCondition() || condition->NeedsMaterialization()) {
1999 // The condition instruction has been materialized, compare the output to 0.
2000 Location cond_val = instruction->GetLocations()->InAt(0);
2001 DCHECK(cond_val.IsRegister());
2002 __ Bnezc(cond_val.AsRegister<GpuRegister>(), true_target);
2003 } else {
2004 // The condition instruction has not been materialized, use its inputs as
2005 // the comparison and its condition as the branch condition.
2006 GpuRegister lhs = condition->GetLocations()->InAt(0).AsRegister<GpuRegister>();
2007 Location rhs_location = condition->GetLocations()->InAt(1);
2008 GpuRegister rhs_reg = ZERO;
2009 int32_t rhs_imm = 0;
2010 bool use_imm = rhs_location.IsConstant();
2011 if (use_imm) {
2012 rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
2013 } else {
2014 rhs_reg = rhs_location.AsRegister<GpuRegister>();
2015 }
2016
2017 IfCondition if_cond = condition->GetCondition();
2018 if (use_imm && rhs_imm == 0) {
2019 switch (if_cond) {
2020 case kCondEQ:
2021 __ Beqzc(lhs, true_target);
2022 break;
2023 case kCondNE:
2024 __ Bnezc(lhs, true_target);
2025 break;
2026 case kCondLT:
2027 __ Bltzc(lhs, true_target);
2028 break;
2029 case kCondGE:
2030 __ Bgezc(lhs, true_target);
2031 break;
2032 case kCondLE:
2033 __ Blezc(lhs, true_target);
2034 break;
2035 case kCondGT:
2036 __ Bgtzc(lhs, true_target);
2037 break;
2038 }
2039 } else {
2040 if (use_imm) {
2041 rhs_reg = TMP;
2042 __ LoadConst32(rhs_reg, rhs_imm);
2043 }
2044 // It looks like we can get here with lhs == rhs. Should that be possible at all?
2045 // Mips R6 requires lhs != rhs for compact branches.
2046 if (lhs == rhs_reg) {
2047 DCHECK(!use_imm);
2048 switch (if_cond) {
2049 case kCondEQ:
2050 case kCondGE:
2051 case kCondLE:
2052 // if lhs == rhs for a positive condition, then it is a branch
2053 __ B(true_target);
2054 break;
2055 case kCondNE:
2056 case kCondLT:
2057 case kCondGT:
2058 // if lhs == rhs for a negative condition, then it is a NOP
2059 break;
2060 }
2061 } else {
2062 switch (if_cond) {
2063 case kCondEQ:
2064 __ Beqc(lhs, rhs_reg, true_target);
2065 break;
2066 case kCondNE:
2067 __ Bnec(lhs, rhs_reg, true_target);
2068 break;
2069 case kCondLT:
2070 __ Bltc(lhs, rhs_reg, true_target);
2071 break;
2072 case kCondGE:
2073 __ Bgec(lhs, rhs_reg, true_target);
2074 break;
2075 case kCondLE:
2076 __ Bgec(rhs_reg, lhs, true_target);
2077 break;
2078 case kCondGT:
2079 __ Bltc(rhs_reg, lhs, true_target);
2080 break;
2081 }
2082 }
2083 }
2084 }
2085 if (false_target != nullptr) {
2086 __ B(false_target);
2087 }
2088}
2089
2090void LocationsBuilderMIPS64::VisitIf(HIf* if_instr) {
2091 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
2092 HInstruction* cond = if_instr->InputAt(0);
2093 if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
2094 locations->SetInAt(0, Location::RequiresRegister());
2095 }
2096}
2097
2098void InstructionCodeGeneratorMIPS64::VisitIf(HIf* if_instr) {
2099 Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
2100 Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
2101 Label* always_true_target = true_target;
2102 if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
2103 if_instr->IfTrueSuccessor())) {
2104 always_true_target = nullptr;
2105 }
2106 if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
2107 if_instr->IfFalseSuccessor())) {
2108 false_target = nullptr;
2109 }
2110 GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
2111}
2112
2113void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
2114 LocationSummary* locations = new (GetGraph()->GetArena())
2115 LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
2116 HInstruction* cond = deoptimize->InputAt(0);
2117 DCHECK(cond->IsCondition());
2118 if (cond->AsCondition()->NeedsMaterialization()) {
2119 locations->SetInAt(0, Location::RequiresRegister());
2120 }
2121}
2122
2123void InstructionCodeGeneratorMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
2124 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena())
2125 DeoptimizationSlowPathMIPS64(deoptimize);
2126 codegen_->AddSlowPath(slow_path);
2127 Label* slow_path_entry = slow_path->GetEntryLabel();
2128 GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
2129}
2130
2131void LocationsBuilderMIPS64::HandleFieldGet(HInstruction* instruction,
2132 const FieldInfo& field_info ATTRIBUTE_UNUSED) {
2133 LocationSummary* locations =
2134 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2135 locations->SetInAt(0, Location::RequiresRegister());
2136 if (Primitive::IsFloatingPointType(instruction->GetType())) {
2137 locations->SetOut(Location::RequiresFpuRegister());
2138 } else {
2139 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2140 }
2141}
2142
2143void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction,
2144 const FieldInfo& field_info) {
2145 Primitive::Type type = field_info.GetFieldType();
2146 LocationSummary* locations = instruction->GetLocations();
2147 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2148 LoadOperandType load_type = kLoadUnsignedByte;
2149 switch (type) {
2150 case Primitive::kPrimBoolean:
2151 load_type = kLoadUnsignedByte;
2152 break;
2153 case Primitive::kPrimByte:
2154 load_type = kLoadSignedByte;
2155 break;
2156 case Primitive::kPrimShort:
2157 load_type = kLoadSignedHalfword;
2158 break;
2159 case Primitive::kPrimChar:
2160 load_type = kLoadUnsignedHalfword;
2161 break;
2162 case Primitive::kPrimInt:
2163 case Primitive::kPrimFloat:
2164 load_type = kLoadWord;
2165 break;
2166 case Primitive::kPrimLong:
2167 case Primitive::kPrimDouble:
2168 load_type = kLoadDoubleword;
2169 break;
2170 case Primitive::kPrimNot:
2171 load_type = kLoadUnsignedWord;
2172 break;
2173 case Primitive::kPrimVoid:
2174 LOG(FATAL) << "Unreachable type " << type;
2175 UNREACHABLE();
2176 }
2177 if (!Primitive::IsFloatingPointType(type)) {
2178 DCHECK(locations->Out().IsRegister());
2179 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2180 __ LoadFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
2181 } else {
2182 DCHECK(locations->Out().IsFpuRegister());
2183 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
2184 __ LoadFpuFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
2185 }
2186
2187 codegen_->MaybeRecordImplicitNullCheck(instruction);
2188 // TODO: memory barrier?
2189}
2190
2191void LocationsBuilderMIPS64::HandleFieldSet(HInstruction* instruction,
2192 const FieldInfo& field_info ATTRIBUTE_UNUSED) {
2193 LocationSummary* locations =
2194 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2195 locations->SetInAt(0, Location::RequiresRegister());
2196 if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
2197 locations->SetInAt(1, Location::RequiresFpuRegister());
2198 } else {
2199 locations->SetInAt(1, Location::RequiresRegister());
2200 }
2201}
2202
2203void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction,
2204 const FieldInfo& field_info) {
2205 Primitive::Type type = field_info.GetFieldType();
2206 LocationSummary* locations = instruction->GetLocations();
2207 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2208 StoreOperandType store_type = kStoreByte;
2209 switch (type) {
2210 case Primitive::kPrimBoolean:
2211 case Primitive::kPrimByte:
2212 store_type = kStoreByte;
2213 break;
2214 case Primitive::kPrimShort:
2215 case Primitive::kPrimChar:
2216 store_type = kStoreHalfword;
2217 break;
2218 case Primitive::kPrimInt:
2219 case Primitive::kPrimFloat:
2220 case Primitive::kPrimNot:
2221 store_type = kStoreWord;
2222 break;
2223 case Primitive::kPrimLong:
2224 case Primitive::kPrimDouble:
2225 store_type = kStoreDoubleword;
2226 break;
2227 case Primitive::kPrimVoid:
2228 LOG(FATAL) << "Unreachable type " << type;
2229 UNREACHABLE();
2230 }
2231 if (!Primitive::IsFloatingPointType(type)) {
2232 DCHECK(locations->InAt(1).IsRegister());
2233 GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>();
2234 __ StoreToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
2235 } else {
2236 DCHECK(locations->InAt(1).IsFpuRegister());
2237 FpuRegister src = locations->InAt(1).AsFpuRegister<FpuRegister>();
2238 __ StoreFpuToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
2239 }
2240
2241 codegen_->MaybeRecordImplicitNullCheck(instruction);
2242 // TODO: memory barriers?
2243 if (CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1))) {
2244 DCHECK(locations->InAt(1).IsRegister());
2245 GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>();
2246 codegen_->MarkGCCard(obj, src);
2247 }
2248}
2249
2250void LocationsBuilderMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
2251 HandleFieldGet(instruction, instruction->GetFieldInfo());
2252}
2253
2254void InstructionCodeGeneratorMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
2255 HandleFieldGet(instruction, instruction->GetFieldInfo());
2256}
2257
2258void LocationsBuilderMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
2259 HandleFieldSet(instruction, instruction->GetFieldInfo());
2260}
2261
2262void InstructionCodeGeneratorMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
2263 HandleFieldSet(instruction, instruction->GetFieldInfo());
2264}
2265
2266void LocationsBuilderMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
2267 LocationSummary::CallKind call_kind =
2268 instruction->IsClassFinal() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath;
2269 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
2270 locations->SetInAt(0, Location::RequiresRegister());
2271 locations->SetInAt(1, Location::RequiresRegister());
2272 // The output does overlap inputs.
2273 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
2274}
2275
2276void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
2277 LocationSummary* locations = instruction->GetLocations();
2278 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2279 GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
2280 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2281
2282 Label done;
2283
2284 // Return 0 if `obj` is null.
2285 // TODO: Avoid this check if we know `obj` is not null.
2286 __ Move(out, ZERO);
2287 __ Beqzc(obj, &done);
2288
2289 // Compare the class of `obj` with `cls`.
2290 __ LoadFromOffset(kLoadUnsignedWord, out, obj, mirror::Object::ClassOffset().Int32Value());
2291 if (instruction->IsClassFinal()) {
2292 // Classes must be equal for the instanceof to succeed.
2293 __ Xor(out, out, cls);
2294 __ Sltiu(out, out, 1);
2295 } else {
2296 // If the classes are not equal, we go into a slow path.
2297 DCHECK(locations->OnlyCallsOnSlowPath());
2298 SlowPathCodeMIPS64* slow_path =
2299 new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction,
2300 locations->InAt(1),
2301 locations->Out(),
2302 instruction->GetDexPc());
2303 codegen_->AddSlowPath(slow_path);
2304 __ Bnec(out, cls, slow_path->GetEntryLabel());
2305 __ LoadConst32(out, 1);
2306 __ Bind(slow_path->GetExitLabel());
2307 }
2308
2309 __ Bind(&done);
2310}
2311
2312void LocationsBuilderMIPS64::VisitIntConstant(HIntConstant* constant) {
2313 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2314 locations->SetOut(Location::ConstantLocation(constant));
2315}
2316
2317void InstructionCodeGeneratorMIPS64::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
2318 // Will be generated at use site.
2319}
2320
2321void LocationsBuilderMIPS64::VisitNullConstant(HNullConstant* constant) {
2322 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2323 locations->SetOut(Location::ConstantLocation(constant));
2324}
2325
2326void InstructionCodeGeneratorMIPS64::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
2327 // Will be generated at use site.
2328}
2329
2330void LocationsBuilderMIPS64::HandleInvoke(HInvoke* invoke) {
2331 InvokeDexCallingConventionVisitorMIPS64 calling_convention_visitor;
2332 CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
2333}
2334
2335void LocationsBuilderMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
2336 HandleInvoke(invoke);
2337 // The register T0 is required to be used for the hidden argument in
2338 // art_quick_imt_conflict_trampoline, so add the hidden argument.
2339 invoke->GetLocations()->AddTemp(Location::RegisterLocation(T0));
2340}
2341
2342void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
2343 // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
2344 GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
2345 uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
2346 invoke->GetImtIndex() % mirror::Class::kImtSize, kMips64PointerSize).Uint32Value();
2347 Location receiver = invoke->GetLocations()->InAt(0);
2348 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2349 Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64WordSize);
2350
2351 // Set the hidden argument.
2352 __ LoadConst32(invoke->GetLocations()->GetTemp(1).AsRegister<GpuRegister>(),
2353 invoke->GetDexMethodIndex());
2354
2355 // temp = object->GetClass();
2356 if (receiver.IsStackSlot()) {
2357 __ LoadFromOffset(kLoadUnsignedWord, temp, SP, receiver.GetStackIndex());
2358 __ LoadFromOffset(kLoadUnsignedWord, temp, temp, class_offset);
2359 } else {
2360 __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
2361 }
2362 codegen_->MaybeRecordImplicitNullCheck(invoke);
2363 // temp = temp->GetImtEntryAt(method_offset);
2364 __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
2365 // T9 = temp->GetEntryPoint();
2366 __ LoadFromOffset(kLoadDoubleword, T9, temp, entry_point.Int32Value());
2367 // T9();
2368 __ Jalr(T9);
2369 DCHECK(!codegen_->IsLeafMethod());
2370 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2371}
2372
2373void LocationsBuilderMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
2374 // TODO intrinsic function
2375 HandleInvoke(invoke);
2376}
2377
2378void LocationsBuilderMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
2379 // When we do not run baseline, explicit clinit checks triggered by static
2380 // invokes must have been pruned by art::PrepareForRegisterAllocation.
2381 DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
2382
2383 // TODO - intrinsic function
2384 HandleInvoke(invoke);
2385
2386 // While SetupBlockedRegisters() blocks registers S2-S8 due to their
2387 // clobbering somewhere else, reduce further register pressure by avoiding
2388 // allocation of a register for the current method pointer like on x86 baseline.
2389 // TODO: remove this once all the issues with register saving/restoring are
2390 // sorted out.
2391 LocationSummary* locations = invoke->GetLocations();
2392 Location location = locations->InAt(invoke->GetCurrentMethodInputIndex());
2393 if (location.IsUnallocated() && location.GetPolicy() == Location::kRequiresRegister) {
2394 locations->SetInAt(invoke->GetCurrentMethodInputIndex(), Location::NoLocation());
2395 }
2396}
2397
2398static bool TryGenerateIntrinsicCode(HInvoke* invoke,
2399 CodeGeneratorMIPS64* codegen ATTRIBUTE_UNUSED) {
2400 if (invoke->GetLocations()->Intrinsified()) {
2401 // TODO - intrinsic function
2402 return true;
2403 }
2404 return false;
2405}
2406
2407void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
2408 // All registers are assumed to be correctly set up per the calling convention.
2409
Vladimir Markob2c431e2015-08-19 12:45:42 +00002410 // TODO: Implement all kinds of calls:
2411 // 1) boot -> boot
2412 // 2) app -> boot
2413 // 3) app -> app
2414 //
2415 // Currently we implement the app -> app logic, which looks up in the resolve cache.
Alexey Frunze4dda3372015-06-01 18:31:49 -07002416
Vladimir Markob2c431e2015-08-19 12:45:42 +00002417 if (invoke->IsStringInit()) {
2418 GpuRegister reg = temp.AsRegister<GpuRegister>();
2419 // temp = thread->string_init_entrypoint
2420 __ LoadFromOffset(kLoadDoubleword,
2421 reg,
2422 TR,
2423 invoke->GetStringInitOffset());
2424 // T9 = temp->entry_point_from_quick_compiled_code_;
2425 __ LoadFromOffset(kLoadDoubleword,
2426 T9,
2427 reg,
2428 ArtMethod::EntryPointFromQuickCompiledCodeOffset(
2429 kMips64WordSize).Int32Value());
2430 // T9()
2431 __ Jalr(T9);
2432 } else if (invoke->IsRecursive()) {
2433 __ Jalr(&frame_entry_label_, T9);
2434 } else {
2435 Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
2436 GpuRegister reg = temp.AsRegister<GpuRegister>();
2437 GpuRegister method_reg;
2438 if (current_method.IsRegister()) {
2439 method_reg = current_method.AsRegister<GpuRegister>();
2440 } else {
2441 // TODO: use the appropriate DCHECK() here if possible.
2442 // DCHECK(invoke->GetLocations()->Intrinsified());
2443 DCHECK(!current_method.IsValid());
2444 method_reg = reg;
2445 __ Ld(reg, SP, kCurrentMethodStackOffset);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002446 }
Vladimir Markob2c431e2015-08-19 12:45:42 +00002447
2448 // temp = temp->dex_cache_resolved_methods_;
2449 __ LoadFromOffset(kLoadUnsignedWord,
2450 reg,
2451 method_reg,
2452 ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
2453 // temp = temp[index_in_cache]
2454 __ LoadFromOffset(kLoadDoubleword,
2455 reg,
2456 reg,
2457 CodeGenerator::GetCachePointerOffset(invoke->GetDexMethodIndex()));
2458 // T9 = temp[offset_of_quick_compiled_code]
2459 __ LoadFromOffset(kLoadDoubleword,
2460 T9,
2461 reg,
2462 ArtMethod::EntryPointFromQuickCompiledCodeOffset(
2463 kMips64WordSize).Int32Value());
2464 // T9()
2465 __ Jalr(T9);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002466 }
2467
2468 DCHECK(!IsLeafMethod());
2469}
2470
2471void InstructionCodeGeneratorMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
2472 // When we do not run baseline, explicit clinit checks triggered by static
2473 // invokes must have been pruned by art::PrepareForRegisterAllocation.
2474 DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
2475
2476 if (TryGenerateIntrinsicCode(invoke, codegen_)) {
2477 return;
2478 }
2479
2480 LocationSummary* locations = invoke->GetLocations();
2481 codegen_->GenerateStaticOrDirectCall(invoke,
2482 locations->HasTemps()
2483 ? locations->GetTemp(0)
2484 : Location::NoLocation());
2485 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2486}
2487
2488void InstructionCodeGeneratorMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
2489 // TODO: Try to generate intrinsics code.
2490 LocationSummary* locations = invoke->GetLocations();
2491 Location receiver = locations->InAt(0);
2492 GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
2493 size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
2494 invoke->GetVTableIndex(), kMips64PointerSize).SizeValue();
2495 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2496 Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64WordSize);
2497
2498 // temp = object->GetClass();
2499 DCHECK(receiver.IsRegister());
2500 __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
2501 codegen_->MaybeRecordImplicitNullCheck(invoke);
2502 // temp = temp->GetMethodAt(method_offset);
2503 __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
2504 // T9 = temp->GetEntryPoint();
2505 __ LoadFromOffset(kLoadDoubleword, T9, temp, entry_point.Int32Value());
2506 // T9();
2507 __ Jalr(T9);
2508 DCHECK(!codegen_->IsLeafMethod());
2509 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2510}
2511
2512void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
2513 LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath
2514 : LocationSummary::kNoCall;
2515 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
2516 locations->SetInAt(0, Location::RequiresRegister());
2517 locations->SetOut(Location::RequiresRegister());
2518}
2519
2520void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) {
2521 LocationSummary* locations = cls->GetLocations();
2522 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2523 GpuRegister current_method = locations->InAt(0).AsRegister<GpuRegister>();
2524 if (cls->IsReferrersClass()) {
2525 DCHECK(!cls->CanCallRuntime());
2526 DCHECK(!cls->MustGenerateClinitCheck());
2527 __ LoadFromOffset(kLoadUnsignedWord, out, current_method,
2528 ArtMethod::DeclaringClassOffset().Int32Value());
2529 } else {
2530 DCHECK(cls->CanCallRuntime());
2531 __ LoadFromOffset(kLoadUnsignedWord, out, current_method,
2532 ArtMethod::DexCacheResolvedTypesOffset().Int32Value());
2533 __ LoadFromOffset(kLoadUnsignedWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
2534 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
2535 cls,
2536 cls,
2537 cls->GetDexPc(),
2538 cls->MustGenerateClinitCheck());
2539 codegen_->AddSlowPath(slow_path);
2540 __ Beqzc(out, slow_path->GetEntryLabel());
2541 if (cls->MustGenerateClinitCheck()) {
2542 GenerateClassInitializationCheck(slow_path, out);
2543 } else {
2544 __ Bind(slow_path->GetExitLabel());
2545 }
2546 }
2547}
2548
David Brazdilcb1c0552015-08-04 16:22:25 +01002549static int32_t GetExceptionTlsOffset() {
2550 return Thread::ExceptionOffset<kMips64WordSize>().Int32Value();
2551}
2552
Alexey Frunze4dda3372015-06-01 18:31:49 -07002553void LocationsBuilderMIPS64::VisitLoadException(HLoadException* load) {
2554 LocationSummary* locations =
2555 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
2556 locations->SetOut(Location::RequiresRegister());
2557}
2558
2559void InstructionCodeGeneratorMIPS64::VisitLoadException(HLoadException* load) {
2560 GpuRegister out = load->GetLocations()->Out().AsRegister<GpuRegister>();
David Brazdilcb1c0552015-08-04 16:22:25 +01002561 __ LoadFromOffset(kLoadUnsignedWord, out, TR, GetExceptionTlsOffset());
2562}
2563
2564void LocationsBuilderMIPS64::VisitClearException(HClearException* clear) {
2565 new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
2566}
2567
2568void InstructionCodeGeneratorMIPS64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
2569 __ StoreToOffset(kStoreWord, ZERO, TR, GetExceptionTlsOffset());
Alexey Frunze4dda3372015-06-01 18:31:49 -07002570}
2571
2572void LocationsBuilderMIPS64::VisitLoadLocal(HLoadLocal* load) {
2573 load->SetLocations(nullptr);
2574}
2575
2576void InstructionCodeGeneratorMIPS64::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
2577 // Nothing to do, this is driven by the code generator.
2578}
2579
2580void LocationsBuilderMIPS64::VisitLoadString(HLoadString* load) {
2581 LocationSummary* locations =
2582 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
2583 locations->SetInAt(0, Location::RequiresRegister());
2584 locations->SetOut(Location::RequiresRegister());
2585}
2586
2587void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) {
2588 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load);
2589 codegen_->AddSlowPath(slow_path);
2590
2591 LocationSummary* locations = load->GetLocations();
2592 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2593 GpuRegister current_method = locations->InAt(0).AsRegister<GpuRegister>();
2594 __ LoadFromOffset(kLoadUnsignedWord, out, current_method,
2595 ArtMethod::DeclaringClassOffset().Int32Value());
2596 __ LoadFromOffset(kLoadUnsignedWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
2597 __ LoadFromOffset(kLoadUnsignedWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
2598 __ Beqzc(out, slow_path->GetEntryLabel());
2599 __ Bind(slow_path->GetExitLabel());
2600}
2601
2602void LocationsBuilderMIPS64::VisitLocal(HLocal* local) {
2603 local->SetLocations(nullptr);
2604}
2605
2606void InstructionCodeGeneratorMIPS64::VisitLocal(HLocal* local) {
2607 DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
2608}
2609
2610void LocationsBuilderMIPS64::VisitLongConstant(HLongConstant* constant) {
2611 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2612 locations->SetOut(Location::ConstantLocation(constant));
2613}
2614
2615void InstructionCodeGeneratorMIPS64::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
2616 // Will be generated at use site.
2617}
2618
2619void LocationsBuilderMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
2620 LocationSummary* locations =
2621 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2622 InvokeRuntimeCallingConvention calling_convention;
2623 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2624}
2625
2626void InstructionCodeGeneratorMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
2627 codegen_->InvokeRuntime(instruction->IsEnter()
2628 ? QUICK_ENTRY_POINT(pLockObject)
2629 : QUICK_ENTRY_POINT(pUnlockObject),
2630 instruction,
2631 instruction->GetDexPc(),
2632 nullptr);
2633 CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
2634}
2635
2636void LocationsBuilderMIPS64::VisitMul(HMul* mul) {
2637 LocationSummary* locations =
2638 new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
2639 switch (mul->GetResultType()) {
2640 case Primitive::kPrimInt:
2641 case Primitive::kPrimLong:
2642 locations->SetInAt(0, Location::RequiresRegister());
2643 locations->SetInAt(1, Location::RequiresRegister());
2644 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2645 break;
2646
2647 case Primitive::kPrimFloat:
2648 case Primitive::kPrimDouble:
2649 locations->SetInAt(0, Location::RequiresFpuRegister());
2650 locations->SetInAt(1, Location::RequiresFpuRegister());
2651 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2652 break;
2653
2654 default:
2655 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2656 }
2657}
2658
2659void InstructionCodeGeneratorMIPS64::VisitMul(HMul* instruction) {
2660 Primitive::Type type = instruction->GetType();
2661 LocationSummary* locations = instruction->GetLocations();
2662
2663 switch (type) {
2664 case Primitive::kPrimInt:
2665 case Primitive::kPrimLong: {
2666 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2667 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
2668 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
2669 if (type == Primitive::kPrimInt)
2670 __ MulR6(dst, lhs, rhs);
2671 else
2672 __ Dmul(dst, lhs, rhs);
2673 break;
2674 }
2675 case Primitive::kPrimFloat:
2676 case Primitive::kPrimDouble: {
2677 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
2678 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
2679 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
2680 if (type == Primitive::kPrimFloat)
2681 __ MulS(dst, lhs, rhs);
2682 else
2683 __ MulD(dst, lhs, rhs);
2684 break;
2685 }
2686 default:
2687 LOG(FATAL) << "Unexpected mul type " << type;
2688 }
2689}
2690
2691void LocationsBuilderMIPS64::VisitNeg(HNeg* neg) {
2692 LocationSummary* locations =
2693 new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
2694 switch (neg->GetResultType()) {
2695 case Primitive::kPrimInt:
2696 case Primitive::kPrimLong:
2697 locations->SetInAt(0, Location::RequiresRegister());
2698 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2699 break;
2700
2701 case Primitive::kPrimFloat:
2702 case Primitive::kPrimDouble:
2703 locations->SetInAt(0, Location::RequiresFpuRegister());
2704 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2705 break;
2706
2707 default:
2708 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
2709 }
2710}
2711
2712void InstructionCodeGeneratorMIPS64::VisitNeg(HNeg* instruction) {
2713 Primitive::Type type = instruction->GetType();
2714 LocationSummary* locations = instruction->GetLocations();
2715
2716 switch (type) {
2717 case Primitive::kPrimInt:
2718 case Primitive::kPrimLong: {
2719 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2720 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
2721 if (type == Primitive::kPrimInt)
2722 __ Subu(dst, ZERO, src);
2723 else
2724 __ Dsubu(dst, ZERO, src);
2725 break;
2726 }
2727 case Primitive::kPrimFloat:
2728 case Primitive::kPrimDouble: {
2729 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
2730 FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
2731 if (type == Primitive::kPrimFloat)
2732 __ NegS(dst, src);
2733 else
2734 __ NegD(dst, src);
2735 break;
2736 }
2737 default:
2738 LOG(FATAL) << "Unexpected neg type " << type;
2739 }
2740}
2741
2742void LocationsBuilderMIPS64::VisitNewArray(HNewArray* instruction) {
2743 LocationSummary* locations =
2744 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2745 InvokeRuntimeCallingConvention calling_convention;
2746 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2747 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
2748 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2749 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
2750}
2751
2752void InstructionCodeGeneratorMIPS64::VisitNewArray(HNewArray* instruction) {
2753 LocationSummary* locations = instruction->GetLocations();
2754 // Move an uint16_t value to a register.
2755 __ LoadConst32(locations->GetTemp(0).AsRegister<GpuRegister>(), instruction->GetTypeIndex());
2756 codegen_->InvokeRuntime(
2757 GetThreadOffset<kMips64WordSize>(instruction->GetEntrypoint()).Int32Value(),
2758 instruction,
2759 instruction->GetDexPc(),
2760 nullptr);
2761 CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
2762}
2763
2764void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) {
2765 LocationSummary* locations =
2766 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2767 InvokeRuntimeCallingConvention calling_convention;
2768 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2769 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2770 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
2771}
2772
2773void InstructionCodeGeneratorMIPS64::VisitNewInstance(HNewInstance* instruction) {
2774 LocationSummary* locations = instruction->GetLocations();
2775 // Move an uint16_t value to a register.
2776 __ LoadConst32(locations->GetTemp(0).AsRegister<GpuRegister>(), instruction->GetTypeIndex());
2777 codegen_->InvokeRuntime(
2778 GetThreadOffset<kMips64WordSize>(instruction->GetEntrypoint()).Int32Value(),
2779 instruction,
2780 instruction->GetDexPc(),
2781 nullptr);
2782 CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
2783}
2784
2785void LocationsBuilderMIPS64::VisitNot(HNot* instruction) {
2786 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2787 locations->SetInAt(0, Location::RequiresRegister());
2788 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2789}
2790
2791void InstructionCodeGeneratorMIPS64::VisitNot(HNot* instruction) {
2792 Primitive::Type type = instruction->GetType();
2793 LocationSummary* locations = instruction->GetLocations();
2794
2795 switch (type) {
2796 case Primitive::kPrimInt:
2797 case Primitive::kPrimLong: {
2798 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2799 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
2800 __ Nor(dst, src, ZERO);
2801 break;
2802 }
2803
2804 default:
2805 LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
2806 }
2807}
2808
2809void LocationsBuilderMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
2810 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2811 locations->SetInAt(0, Location::RequiresRegister());
2812 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2813}
2814
2815void InstructionCodeGeneratorMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
2816 LocationSummary* locations = instruction->GetLocations();
2817 __ Xori(locations->Out().AsRegister<GpuRegister>(),
2818 locations->InAt(0).AsRegister<GpuRegister>(),
2819 1);
2820}
2821
2822void LocationsBuilderMIPS64::VisitNullCheck(HNullCheck* instruction) {
2823 LocationSummary* locations =
2824 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2825 locations->SetInAt(0, Location::RequiresRegister());
2826 if (instruction->HasUses()) {
2827 locations->SetOut(Location::SameAsFirstInput());
2828 }
2829}
2830
2831void InstructionCodeGeneratorMIPS64::GenerateImplicitNullCheck(HNullCheck* instruction) {
2832 if (codegen_->CanMoveNullCheckToUser(instruction)) {
2833 return;
2834 }
2835 Location obj = instruction->GetLocations()->InAt(0);
2836
2837 __ Lw(ZERO, obj.AsRegister<GpuRegister>(), 0);
2838 codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
2839}
2840
2841void InstructionCodeGeneratorMIPS64::GenerateExplicitNullCheck(HNullCheck* instruction) {
2842 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathMIPS64(instruction);
2843 codegen_->AddSlowPath(slow_path);
2844
2845 Location obj = instruction->GetLocations()->InAt(0);
2846
2847 __ Beqzc(obj.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
2848}
2849
2850void InstructionCodeGeneratorMIPS64::VisitNullCheck(HNullCheck* instruction) {
2851 if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
2852 GenerateImplicitNullCheck(instruction);
2853 } else {
2854 GenerateExplicitNullCheck(instruction);
2855 }
2856}
2857
2858void LocationsBuilderMIPS64::VisitOr(HOr* instruction) {
2859 HandleBinaryOp(instruction);
2860}
2861
2862void InstructionCodeGeneratorMIPS64::VisitOr(HOr* instruction) {
2863 HandleBinaryOp(instruction);
2864}
2865
2866void LocationsBuilderMIPS64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
2867 LOG(FATAL) << "Unreachable";
2868}
2869
2870void InstructionCodeGeneratorMIPS64::VisitParallelMove(HParallelMove* instruction) {
2871 codegen_->GetMoveResolver()->EmitNativeCode(instruction);
2872}
2873
2874void LocationsBuilderMIPS64::VisitParameterValue(HParameterValue* instruction) {
2875 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2876 Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
2877 if (location.IsStackSlot()) {
2878 location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2879 } else if (location.IsDoubleStackSlot()) {
2880 location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2881 }
2882 locations->SetOut(location);
2883}
2884
2885void InstructionCodeGeneratorMIPS64::VisitParameterValue(HParameterValue* instruction
2886 ATTRIBUTE_UNUSED) {
2887 // Nothing to do, the parameter is already at its location.
2888}
2889
2890void LocationsBuilderMIPS64::VisitCurrentMethod(HCurrentMethod* instruction) {
2891 LocationSummary* locations =
2892 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2893 locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
2894}
2895
2896void InstructionCodeGeneratorMIPS64::VisitCurrentMethod(HCurrentMethod* instruction
2897 ATTRIBUTE_UNUSED) {
2898 // Nothing to do, the method is already at its location.
2899}
2900
2901void LocationsBuilderMIPS64::VisitPhi(HPhi* instruction) {
2902 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2903 for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
2904 locations->SetInAt(i, Location::Any());
2905 }
2906 locations->SetOut(Location::Any());
2907}
2908
2909void InstructionCodeGeneratorMIPS64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
2910 LOG(FATAL) << "Unreachable";
2911}
2912
2913void LocationsBuilderMIPS64::VisitRem(HRem* rem) {
2914 Primitive::Type type = rem->GetResultType();
2915 LocationSummary::CallKind call_kind =
2916 Primitive::IsFloatingPointType(type) ? LocationSummary::kCall : LocationSummary::kNoCall;
2917 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
2918
2919 switch (type) {
2920 case Primitive::kPrimInt:
2921 case Primitive::kPrimLong:
2922 locations->SetInAt(0, Location::RequiresRegister());
2923 locations->SetInAt(1, Location::RequiresRegister());
2924 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2925 break;
2926
2927 case Primitive::kPrimFloat:
2928 case Primitive::kPrimDouble: {
2929 InvokeRuntimeCallingConvention calling_convention;
2930 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
2931 locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
2932 locations->SetOut(calling_convention.GetReturnLocation(type));
2933 break;
2934 }
2935
2936 default:
2937 LOG(FATAL) << "Unexpected rem type " << type;
2938 }
2939}
2940
2941void InstructionCodeGeneratorMIPS64::VisitRem(HRem* instruction) {
2942 Primitive::Type type = instruction->GetType();
2943 LocationSummary* locations = instruction->GetLocations();
2944
2945 switch (type) {
2946 case Primitive::kPrimInt:
2947 case Primitive::kPrimLong: {
2948 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2949 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
2950 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
2951 if (type == Primitive::kPrimInt)
2952 __ ModR6(dst, lhs, rhs);
2953 else
2954 __ Dmod(dst, lhs, rhs);
2955 break;
2956 }
2957
2958 case Primitive::kPrimFloat:
2959 case Primitive::kPrimDouble: {
2960 int32_t entry_offset = (type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pFmodf)
2961 : QUICK_ENTRY_POINT(pFmod);
2962 codegen_->InvokeRuntime(entry_offset, instruction, instruction->GetDexPc(), nullptr);
2963 break;
2964 }
2965 default:
2966 LOG(FATAL) << "Unexpected rem type " << type;
2967 }
2968}
2969
2970void LocationsBuilderMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
2971 memory_barrier->SetLocations(nullptr);
2972}
2973
2974void InstructionCodeGeneratorMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
2975 GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
2976}
2977
2978void LocationsBuilderMIPS64::VisitReturn(HReturn* ret) {
2979 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret);
2980 Primitive::Type return_type = ret->InputAt(0)->GetType();
2981 locations->SetInAt(0, Mips64ReturnLocation(return_type));
2982}
2983
2984void InstructionCodeGeneratorMIPS64::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
2985 codegen_->GenerateFrameExit();
2986}
2987
2988void LocationsBuilderMIPS64::VisitReturnVoid(HReturnVoid* ret) {
2989 ret->SetLocations(nullptr);
2990}
2991
2992void InstructionCodeGeneratorMIPS64::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
2993 codegen_->GenerateFrameExit();
2994}
2995
2996void LocationsBuilderMIPS64::VisitShl(HShl* shl) {
2997 HandleShift(shl);
2998}
2999
3000void InstructionCodeGeneratorMIPS64::VisitShl(HShl* shl) {
3001 HandleShift(shl);
3002}
3003
3004void LocationsBuilderMIPS64::VisitShr(HShr* shr) {
3005 HandleShift(shr);
3006}
3007
3008void InstructionCodeGeneratorMIPS64::VisitShr(HShr* shr) {
3009 HandleShift(shr);
3010}
3011
3012void LocationsBuilderMIPS64::VisitStoreLocal(HStoreLocal* store) {
3013 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
3014 Primitive::Type field_type = store->InputAt(1)->GetType();
3015 switch (field_type) {
3016 case Primitive::kPrimNot:
3017 case Primitive::kPrimBoolean:
3018 case Primitive::kPrimByte:
3019 case Primitive::kPrimChar:
3020 case Primitive::kPrimShort:
3021 case Primitive::kPrimInt:
3022 case Primitive::kPrimFloat:
3023 locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
3024 break;
3025
3026 case Primitive::kPrimLong:
3027 case Primitive::kPrimDouble:
3028 locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
3029 break;
3030
3031 default:
3032 LOG(FATAL) << "Unimplemented local type " << field_type;
3033 }
3034}
3035
3036void InstructionCodeGeneratorMIPS64::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
3037}
3038
3039void LocationsBuilderMIPS64::VisitSub(HSub* instruction) {
3040 HandleBinaryOp(instruction);
3041}
3042
3043void InstructionCodeGeneratorMIPS64::VisitSub(HSub* instruction) {
3044 HandleBinaryOp(instruction);
3045}
3046
3047void LocationsBuilderMIPS64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3048 HandleFieldGet(instruction, instruction->GetFieldInfo());
3049}
3050
3051void InstructionCodeGeneratorMIPS64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3052 HandleFieldGet(instruction, instruction->GetFieldInfo());
3053}
3054
3055void LocationsBuilderMIPS64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3056 HandleFieldSet(instruction, instruction->GetFieldInfo());
3057}
3058
3059void InstructionCodeGeneratorMIPS64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3060 HandleFieldSet(instruction, instruction->GetFieldInfo());
3061}
3062
3063void LocationsBuilderMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
3064 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
3065}
3066
3067void InstructionCodeGeneratorMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
3068 HBasicBlock* block = instruction->GetBlock();
3069 if (block->GetLoopInformation() != nullptr) {
3070 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
3071 // The back edge will generate the suspend check.
3072 return;
3073 }
3074 if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
3075 // The goto will generate the suspend check.
3076 return;
3077 }
3078 GenerateSuspendCheck(instruction, nullptr);
3079}
3080
3081void LocationsBuilderMIPS64::VisitTemporary(HTemporary* temp) {
3082 temp->SetLocations(nullptr);
3083}
3084
3085void InstructionCodeGeneratorMIPS64::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
3086 // Nothing to do, this is driven by the code generator.
3087}
3088
3089void LocationsBuilderMIPS64::VisitThrow(HThrow* instruction) {
3090 LocationSummary* locations =
3091 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3092 InvokeRuntimeCallingConvention calling_convention;
3093 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3094}
3095
3096void InstructionCodeGeneratorMIPS64::VisitThrow(HThrow* instruction) {
3097 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pDeliverException),
3098 instruction,
3099 instruction->GetDexPc(),
3100 nullptr);
3101 CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
3102}
3103
3104void LocationsBuilderMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
3105 Primitive::Type input_type = conversion->GetInputType();
3106 Primitive::Type result_type = conversion->GetResultType();
3107 DCHECK_NE(input_type, result_type);
3108
3109 if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
3110 (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
3111 LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
3112 }
3113
3114 LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
3115 if ((Primitive::IsFloatingPointType(result_type) && input_type == Primitive::kPrimLong) ||
3116 (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type))) {
3117 call_kind = LocationSummary::kCall;
3118 }
3119
3120 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
3121
3122 if (call_kind == LocationSummary::kNoCall) {
3123 if (Primitive::IsFloatingPointType(input_type)) {
3124 locations->SetInAt(0, Location::RequiresFpuRegister());
3125 } else {
3126 locations->SetInAt(0, Location::RequiresRegister());
3127 }
3128
3129 if (Primitive::IsFloatingPointType(result_type)) {
3130 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3131 } else {
3132 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3133 }
3134 } else {
3135 InvokeRuntimeCallingConvention calling_convention;
3136
3137 if (Primitive::IsFloatingPointType(input_type)) {
3138 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
3139 } else {
3140 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3141 }
3142
3143 locations->SetOut(calling_convention.GetReturnLocation(result_type));
3144 }
3145}
3146
3147void InstructionCodeGeneratorMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
3148 LocationSummary* locations = conversion->GetLocations();
3149 Primitive::Type result_type = conversion->GetResultType();
3150 Primitive::Type input_type = conversion->GetInputType();
3151
3152 DCHECK_NE(input_type, result_type);
3153
3154 if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) {
3155 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
3156 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
3157
3158 switch (result_type) {
3159 case Primitive::kPrimChar:
3160 __ Andi(dst, src, 0xFFFF);
3161 break;
3162 case Primitive::kPrimByte:
3163 // long is never converted into types narrower than int directly,
3164 // so SEB and SEH can be used without ever causing unpredictable results
3165 // on 64-bit inputs
3166 DCHECK(input_type != Primitive::kPrimLong);
3167 __ Seb(dst, src);
3168 break;
3169 case Primitive::kPrimShort:
3170 // long is never converted into types narrower than int directly,
3171 // so SEB and SEH can be used without ever causing unpredictable results
3172 // on 64-bit inputs
3173 DCHECK(input_type != Primitive::kPrimLong);
3174 __ Seh(dst, src);
3175 break;
3176 case Primitive::kPrimInt:
3177 case Primitive::kPrimLong:
3178 // Sign-extend 32-bit int into bits 32 through 63 for
3179 // int-to-long and long-to-int conversions
3180 __ Sll(dst, src, 0);
3181 break;
3182
3183 default:
3184 LOG(FATAL) << "Unexpected type conversion from " << input_type
3185 << " to " << result_type;
3186 }
3187 } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) {
3188 if (input_type != Primitive::kPrimLong) {
3189 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
3190 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
3191 __ Mtc1(src, FTMP);
3192 if (result_type == Primitive::kPrimFloat) {
3193 __ Cvtsw(dst, FTMP);
3194 } else {
3195 __ Cvtdw(dst, FTMP);
3196 }
3197 } else {
3198 int32_t entry_offset = (result_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pL2f)
3199 : QUICK_ENTRY_POINT(pL2d);
3200 codegen_->InvokeRuntime(entry_offset,
3201 conversion,
3202 conversion->GetDexPc(),
3203 nullptr);
3204 }
3205 } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) {
3206 CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
3207 int32_t entry_offset;
3208 if (result_type != Primitive::kPrimLong) {
3209 entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2iz)
3210 : QUICK_ENTRY_POINT(pD2iz);
3211 } else {
3212 entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2l)
3213 : QUICK_ENTRY_POINT(pD2l);
3214 }
3215 codegen_->InvokeRuntime(entry_offset,
3216 conversion,
3217 conversion->GetDexPc(),
3218 nullptr);
3219 } else if (Primitive::IsFloatingPointType(result_type) &&
3220 Primitive::IsFloatingPointType(input_type)) {
3221 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
3222 FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
3223 if (result_type == Primitive::kPrimFloat) {
3224 __ Cvtsd(dst, src);
3225 } else {
3226 __ Cvtds(dst, src);
3227 }
3228 } else {
3229 LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
3230 << " to " << result_type;
3231 }
3232}
3233
3234void LocationsBuilderMIPS64::VisitUShr(HUShr* ushr) {
3235 HandleShift(ushr);
3236}
3237
3238void InstructionCodeGeneratorMIPS64::VisitUShr(HUShr* ushr) {
3239 HandleShift(ushr);
3240}
3241
3242void LocationsBuilderMIPS64::VisitXor(HXor* instruction) {
3243 HandleBinaryOp(instruction);
3244}
3245
3246void InstructionCodeGeneratorMIPS64::VisitXor(HXor* instruction) {
3247 HandleBinaryOp(instruction);
3248}
3249
3250void LocationsBuilderMIPS64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
3251 // Nothing to do, this should be removed during prepare for register allocator.
3252 LOG(FATAL) << "Unreachable";
3253}
3254
3255void InstructionCodeGeneratorMIPS64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
3256 // Nothing to do, this should be removed during prepare for register allocator.
3257 LOG(FATAL) << "Unreachable";
3258}
3259
3260void LocationsBuilderMIPS64::VisitEqual(HEqual* comp) {
3261 VisitCondition(comp);
3262}
3263
3264void InstructionCodeGeneratorMIPS64::VisitEqual(HEqual* comp) {
3265 VisitCondition(comp);
3266}
3267
3268void LocationsBuilderMIPS64::VisitNotEqual(HNotEqual* comp) {
3269 VisitCondition(comp);
3270}
3271
3272void InstructionCodeGeneratorMIPS64::VisitNotEqual(HNotEqual* comp) {
3273 VisitCondition(comp);
3274}
3275
3276void LocationsBuilderMIPS64::VisitLessThan(HLessThan* comp) {
3277 VisitCondition(comp);
3278}
3279
3280void InstructionCodeGeneratorMIPS64::VisitLessThan(HLessThan* comp) {
3281 VisitCondition(comp);
3282}
3283
3284void LocationsBuilderMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
3285 VisitCondition(comp);
3286}
3287
3288void InstructionCodeGeneratorMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
3289 VisitCondition(comp);
3290}
3291
3292void LocationsBuilderMIPS64::VisitGreaterThan(HGreaterThan* comp) {
3293 VisitCondition(comp);
3294}
3295
3296void InstructionCodeGeneratorMIPS64::VisitGreaterThan(HGreaterThan* comp) {
3297 VisitCondition(comp);
3298}
3299
3300void LocationsBuilderMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
3301 VisitCondition(comp);
3302}
3303
3304void InstructionCodeGeneratorMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
3305 VisitCondition(comp);
3306}
3307
Nicolas Geoffray2e7cd752015-07-10 11:38:52 +01003308void LocationsBuilderMIPS64::VisitFakeString(HFakeString* instruction) {
3309 DCHECK(codegen_->IsBaseline());
3310 LocationSummary* locations =
3311 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3312 locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
3313}
3314
3315void InstructionCodeGeneratorMIPS64::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
3316 DCHECK(codegen_->IsBaseline());
3317 // Will be generated at use site.
3318}
3319
Alexey Frunze4dda3372015-06-01 18:31:49 -07003320} // namespace mips64
3321} // namespace art