blob: 0787e49dda98900326fbf82dad51f6a5d4be1199 [file] [log] [blame]
Alexey Frunze4dda3372015-06-01 18:31:49 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator_mips64.h"
18
19#include "entrypoints/quick/quick_entrypoints.h"
20#include "entrypoints/quick/quick_entrypoints_enum.h"
21#include "gc/accounting/card_table.h"
22#include "intrinsics.h"
23#include "art_method.h"
24#include "mirror/array-inl.h"
25#include "mirror/class-inl.h"
26#include "offsets.h"
27#include "thread.h"
28#include "utils/mips64/assembler_mips64.h"
29#include "utils/assembler.h"
30#include "utils/stack_checks.h"
31
32namespace art {
33namespace mips64 {
34
35static constexpr int kCurrentMethodStackOffset = 0;
36static constexpr GpuRegister kMethodRegisterArgument = A0;
37
38// We need extra temporary/scratch registers (in addition to AT) in some cases.
39static constexpr GpuRegister TMP = T8;
40static constexpr FpuRegister FTMP = F8;
41
42// ART Thread Register.
43static constexpr GpuRegister TR = S1;
44
45Location Mips64ReturnLocation(Primitive::Type return_type) {
46 switch (return_type) {
47 case Primitive::kPrimBoolean:
48 case Primitive::kPrimByte:
49 case Primitive::kPrimChar:
50 case Primitive::kPrimShort:
51 case Primitive::kPrimInt:
52 case Primitive::kPrimNot:
53 case Primitive::kPrimLong:
54 return Location::RegisterLocation(V0);
55
56 case Primitive::kPrimFloat:
57 case Primitive::kPrimDouble:
58 return Location::FpuRegisterLocation(F0);
59
60 case Primitive::kPrimVoid:
61 return Location();
62 }
63 UNREACHABLE();
64}
65
66Location InvokeDexCallingConventionVisitorMIPS64::GetReturnLocation(Primitive::Type type) const {
67 return Mips64ReturnLocation(type);
68}
69
70Location InvokeDexCallingConventionVisitorMIPS64::GetMethodLocation() const {
71 return Location::RegisterLocation(kMethodRegisterArgument);
72}
73
74Location InvokeDexCallingConventionVisitorMIPS64::GetNextLocation(Primitive::Type type) {
75 Location next_location;
76 if (type == Primitive::kPrimVoid) {
77 LOG(FATAL) << "Unexpected parameter type " << type;
78 }
79
80 if (Primitive::IsFloatingPointType(type) &&
81 (float_index_ < calling_convention.GetNumberOfFpuRegisters())) {
82 next_location = Location::FpuRegisterLocation(
83 calling_convention.GetFpuRegisterAt(float_index_++));
84 gp_index_++;
85 } else if (!Primitive::IsFloatingPointType(type) &&
86 (gp_index_ < calling_convention.GetNumberOfRegisters())) {
87 next_location = Location::RegisterLocation(calling_convention.GetRegisterAt(gp_index_++));
88 float_index_++;
89 } else {
90 size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
91 next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
92 : Location::StackSlot(stack_offset);
93 }
94
95 // Space on the stack is reserved for all arguments.
96 stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
97
98 // TODO: review
99
100 // TODO: shouldn't we use a whole machine word per argument on the stack?
101 // Implicit 4-byte method pointer (and such) will cause misalignment.
102
103 return next_location;
104}
105
106Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type type) {
107 return Mips64ReturnLocation(type);
108}
109
110#define __ down_cast<CodeGeneratorMIPS64*>(codegen)->GetAssembler()->
111#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, x).Int32Value()
112
113class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
114 public:
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100115 explicit BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction) : instruction_(instruction) {}
Alexey Frunze4dda3372015-06-01 18:31:49 -0700116
117 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100118 LocationSummary* locations = instruction_->GetLocations();
Alexey Frunze4dda3372015-06-01 18:31:49 -0700119 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
120 __ Bind(GetEntryLabel());
David Brazdil77a48ae2015-09-15 12:34:04 +0000121 if (instruction_->CanThrowIntoCatchBlock()) {
122 // Live registers will be restored in the catch block if caught.
123 SaveLiveRegisters(codegen, instruction_->GetLocations());
124 }
Alexey Frunze4dda3372015-06-01 18:31:49 -0700125 // We're moving two locations to locations that could overlap, so we need a parallel
126 // move resolver.
127 InvokeRuntimeCallingConvention calling_convention;
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100128 codegen->EmitParallelMoves(locations->InAt(0),
Alexey Frunze4dda3372015-06-01 18:31:49 -0700129 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
130 Primitive::kPrimInt,
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100131 locations->InAt(1),
Alexey Frunze4dda3372015-06-01 18:31:49 -0700132 Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
133 Primitive::kPrimInt);
134 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
135 instruction_,
136 instruction_->GetDexPc(),
137 this);
138 CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
139 }
140
Alexandre Rames8158f282015-08-07 10:26:17 +0100141 bool IsFatal() const OVERRIDE { return true; }
142
Roland Levillain46648892015-06-19 16:07:18 +0100143 const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS64"; }
144
Alexey Frunze4dda3372015-06-01 18:31:49 -0700145 private:
146 HBoundsCheck* const instruction_;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700147
148 DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS64);
149};
150
151class DivZeroCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
152 public:
153 explicit DivZeroCheckSlowPathMIPS64(HDivZeroCheck* instruction) : instruction_(instruction) {}
154
155 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
156 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
157 __ Bind(GetEntryLabel());
David Brazdil77a48ae2015-09-15 12:34:04 +0000158 if (instruction_->CanThrowIntoCatchBlock()) {
159 // Live registers will be restored in the catch block if caught.
160 SaveLiveRegisters(codegen, instruction_->GetLocations());
161 }
Alexey Frunze4dda3372015-06-01 18:31:49 -0700162 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
163 instruction_,
164 instruction_->GetDexPc(),
165 this);
166 CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
167 }
168
Alexandre Rames8158f282015-08-07 10:26:17 +0100169 bool IsFatal() const OVERRIDE { return true; }
170
Roland Levillain46648892015-06-19 16:07:18 +0100171 const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS64"; }
172
Alexey Frunze4dda3372015-06-01 18:31:49 -0700173 private:
174 HDivZeroCheck* const instruction_;
175 DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS64);
176};
177
178class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
179 public:
180 LoadClassSlowPathMIPS64(HLoadClass* cls,
181 HInstruction* at,
182 uint32_t dex_pc,
183 bool do_clinit)
184 : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
185 DCHECK(at->IsLoadClass() || at->IsClinitCheck());
186 }
187
188 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
189 LocationSummary* locations = at_->GetLocations();
190 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
191
192 __ Bind(GetEntryLabel());
193 SaveLiveRegisters(codegen, locations);
194
195 InvokeRuntimeCallingConvention calling_convention;
196 __ LoadConst32(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
197 int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
198 : QUICK_ENTRY_POINT(pInitializeType);
199 mips64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
200 if (do_clinit_) {
201 CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
202 } else {
203 CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
204 }
205
206 // Move the class to the desired location.
207 Location out = locations->Out();
208 if (out.IsValid()) {
209 DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
210 Primitive::Type type = at_->GetType();
211 mips64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
212 }
213
214 RestoreLiveRegisters(codegen, locations);
215 __ B(GetExitLabel());
216 }
217
Roland Levillain46648892015-06-19 16:07:18 +0100218 const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathMIPS64"; }
219
Alexey Frunze4dda3372015-06-01 18:31:49 -0700220 private:
221 // The class this slow path will load.
222 HLoadClass* const cls_;
223
224 // The instruction where this slow path is happening.
225 // (Might be the load class or an initialization check).
226 HInstruction* const at_;
227
228 // The dex PC of `at_`.
229 const uint32_t dex_pc_;
230
231 // Whether to initialize the class.
232 const bool do_clinit_;
233
234 DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS64);
235};
236
237class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
238 public:
239 explicit LoadStringSlowPathMIPS64(HLoadString* instruction) : instruction_(instruction) {}
240
241 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
242 LocationSummary* locations = instruction_->GetLocations();
243 DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
244 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
245
246 __ Bind(GetEntryLabel());
247 SaveLiveRegisters(codegen, locations);
248
249 InvokeRuntimeCallingConvention calling_convention;
250 __ LoadConst32(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
251 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
252 instruction_,
253 instruction_->GetDexPc(),
254 this);
255 CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
256 Primitive::Type type = instruction_->GetType();
257 mips64_codegen->MoveLocation(locations->Out(),
258 calling_convention.GetReturnLocation(type),
259 type);
260
261 RestoreLiveRegisters(codegen, locations);
262 __ B(GetExitLabel());
263 }
264
Roland Levillain46648892015-06-19 16:07:18 +0100265 const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS64"; }
266
Alexey Frunze4dda3372015-06-01 18:31:49 -0700267 private:
268 HLoadString* const instruction_;
269
270 DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS64);
271};
272
273class NullCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
274 public:
275 explicit NullCheckSlowPathMIPS64(HNullCheck* instr) : instruction_(instr) {}
276
277 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
278 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
279 __ Bind(GetEntryLabel());
David Brazdil77a48ae2015-09-15 12:34:04 +0000280 if (instruction_->CanThrowIntoCatchBlock()) {
281 // Live registers will be restored in the catch block if caught.
282 SaveLiveRegisters(codegen, instruction_->GetLocations());
283 }
Alexey Frunze4dda3372015-06-01 18:31:49 -0700284 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
285 instruction_,
286 instruction_->GetDexPc(),
287 this);
288 CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
289 }
290
Alexandre Rames8158f282015-08-07 10:26:17 +0100291 bool IsFatal() const OVERRIDE { return true; }
292
Roland Levillain46648892015-06-19 16:07:18 +0100293 const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS64"; }
294
Alexey Frunze4dda3372015-06-01 18:31:49 -0700295 private:
296 HNullCheck* const instruction_;
297
298 DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS64);
299};
300
301class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
302 public:
Roland Levillain3887c462015-08-12 18:15:42 +0100303 SuspendCheckSlowPathMIPS64(HSuspendCheck* instruction, HBasicBlock* successor)
Alexey Frunze4dda3372015-06-01 18:31:49 -0700304 : instruction_(instruction), successor_(successor) {}
305
306 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
307 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
308 __ Bind(GetEntryLabel());
309 SaveLiveRegisters(codegen, instruction_->GetLocations());
310 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
311 instruction_,
312 instruction_->GetDexPc(),
313 this);
314 CheckEntrypointTypes<kQuickTestSuspend, void, void>();
315 RestoreLiveRegisters(codegen, instruction_->GetLocations());
316 if (successor_ == nullptr) {
317 __ B(GetReturnLabel());
318 } else {
319 __ B(mips64_codegen->GetLabelOf(successor_));
320 }
321 }
322
323 Label* GetReturnLabel() {
324 DCHECK(successor_ == nullptr);
325 return &return_label_;
326 }
327
Roland Levillain46648892015-06-19 16:07:18 +0100328 const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS64"; }
329
Alexey Frunze4dda3372015-06-01 18:31:49 -0700330 private:
331 HSuspendCheck* const instruction_;
332 // If not null, the block to branch to after the suspend check.
333 HBasicBlock* const successor_;
334
335 // If `successor_` is null, the label to branch to after the suspend check.
336 Label return_label_;
337
338 DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathMIPS64);
339};
340
341class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
342 public:
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100343 explicit TypeCheckSlowPathMIPS64(HInstruction* instruction) : instruction_(instruction) {}
Alexey Frunze4dda3372015-06-01 18:31:49 -0700344
345 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
346 LocationSummary* locations = instruction_->GetLocations();
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100347 Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
348 : locations->Out();
349 uint32_t dex_pc = instruction_->GetDexPc();
Alexey Frunze4dda3372015-06-01 18:31:49 -0700350 DCHECK(instruction_->IsCheckCast()
351 || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
352 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
353
354 __ Bind(GetEntryLabel());
355 SaveLiveRegisters(codegen, locations);
356
357 // We're moving two locations to locations that could overlap, so we need a parallel
358 // move resolver.
359 InvokeRuntimeCallingConvention calling_convention;
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100360 codegen->EmitParallelMoves(locations->InAt(1),
Alexey Frunze4dda3372015-06-01 18:31:49 -0700361 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
362 Primitive::kPrimNot,
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100363 object_class,
Alexey Frunze4dda3372015-06-01 18:31:49 -0700364 Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
365 Primitive::kPrimNot);
366
367 if (instruction_->IsInstanceOf()) {
368 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
369 instruction_,
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100370 dex_pc,
Alexey Frunze4dda3372015-06-01 18:31:49 -0700371 this);
372 Primitive::Type ret_type = instruction_->GetType();
373 Location ret_loc = calling_convention.GetReturnLocation(ret_type);
374 mips64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
375 CheckEntrypointTypes<kQuickInstanceofNonTrivial,
376 uint32_t,
377 const mirror::Class*,
378 const mirror::Class*>();
379 } else {
380 DCHECK(instruction_->IsCheckCast());
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100381 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc, this);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700382 CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
383 }
384
385 RestoreLiveRegisters(codegen, locations);
386 __ B(GetExitLabel());
387 }
388
Roland Levillain46648892015-06-19 16:07:18 +0100389 const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS64"; }
390
Alexey Frunze4dda3372015-06-01 18:31:49 -0700391 private:
392 HInstruction* const instruction_;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700393
394 DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS64);
395};
396
397class DeoptimizationSlowPathMIPS64 : public SlowPathCodeMIPS64 {
398 public:
399 explicit DeoptimizationSlowPathMIPS64(HInstruction* instruction)
400 : instruction_(instruction) {}
401
402 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
403 __ Bind(GetEntryLabel());
404 SaveLiveRegisters(codegen, instruction_->GetLocations());
405 DCHECK(instruction_->IsDeoptimize());
406 HDeoptimize* deoptimize = instruction_->AsDeoptimize();
407 uint32_t dex_pc = deoptimize->GetDexPc();
408 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
409 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
410 }
411
Roland Levillain46648892015-06-19 16:07:18 +0100412 const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS64"; }
413
Alexey Frunze4dda3372015-06-01 18:31:49 -0700414 private:
415 HInstruction* const instruction_;
416 DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS64);
417};
418
419CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
420 const Mips64InstructionSetFeatures& isa_features,
Serban Constantinescuecc43662015-08-13 13:33:12 +0100421 const CompilerOptions& compiler_options,
422 OptimizingCompilerStats* stats)
Alexey Frunze4dda3372015-06-01 18:31:49 -0700423 : CodeGenerator(graph,
424 kNumberOfGpuRegisters,
425 kNumberOfFpuRegisters,
426 0, // kNumberOfRegisterPairs
427 ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
428 arraysize(kCoreCalleeSaves)),
429 ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
430 arraysize(kFpuCalleeSaves)),
Serban Constantinescuecc43662015-08-13 13:33:12 +0100431 compiler_options,
432 stats),
Alexey Frunze4dda3372015-06-01 18:31:49 -0700433 block_labels_(graph->GetArena(), 0),
434 location_builder_(graph, this),
435 instruction_visitor_(graph, this),
436 move_resolver_(graph->GetArena(), this),
437 isa_features_(isa_features) {
438 // Save RA (containing the return address) to mimic Quick.
439 AddAllocatedRegister(Location::RegisterLocation(RA));
440}
441
442#undef __
443#define __ down_cast<Mips64Assembler*>(GetAssembler())->
444#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, x).Int32Value()
445
446void CodeGeneratorMIPS64::Finalize(CodeAllocator* allocator) {
447 CodeGenerator::Finalize(allocator);
448}
449
450Mips64Assembler* ParallelMoveResolverMIPS64::GetAssembler() const {
451 return codegen_->GetAssembler();
452}
453
454void ParallelMoveResolverMIPS64::EmitMove(size_t index) {
455 MoveOperands* move = moves_.Get(index);
456 codegen_->MoveLocation(move->GetDestination(), move->GetSource(), move->GetType());
457}
458
459void ParallelMoveResolverMIPS64::EmitSwap(size_t index) {
460 MoveOperands* move = moves_.Get(index);
461 codegen_->SwapLocations(move->GetDestination(), move->GetSource(), move->GetType());
462}
463
464void ParallelMoveResolverMIPS64::RestoreScratch(int reg) {
465 // Pop reg
466 __ Ld(GpuRegister(reg), SP, 0);
467 __ DecreaseFrameSize(kMips64WordSize);
468}
469
470void ParallelMoveResolverMIPS64::SpillScratch(int reg) {
471 // Push reg
472 __ IncreaseFrameSize(kMips64WordSize);
473 __ Sd(GpuRegister(reg), SP, 0);
474}
475
476void ParallelMoveResolverMIPS64::Exchange(int index1, int index2, bool double_slot) {
477 LoadOperandType load_type = double_slot ? kLoadDoubleword : kLoadWord;
478 StoreOperandType store_type = double_slot ? kStoreDoubleword : kStoreWord;
479 // Allocate a scratch register other than TMP, if available.
480 // Else, spill V0 (arbitrary choice) and use it as a scratch register (it will be
481 // automatically unspilled when the scratch scope object is destroyed).
482 ScratchRegisterScope ensure_scratch(this, TMP, V0, codegen_->GetNumberOfCoreRegisters());
483 // If V0 spills onto the stack, SP-relative offsets need to be adjusted.
484 int stack_offset = ensure_scratch.IsSpilled() ? kMips64WordSize : 0;
485 __ LoadFromOffset(load_type,
486 GpuRegister(ensure_scratch.GetRegister()),
487 SP,
488 index1 + stack_offset);
489 __ LoadFromOffset(load_type,
490 TMP,
491 SP,
492 index2 + stack_offset);
493 __ StoreToOffset(store_type,
494 GpuRegister(ensure_scratch.GetRegister()),
495 SP,
496 index2 + stack_offset);
497 __ StoreToOffset(store_type, TMP, SP, index1 + stack_offset);
498}
499
500static dwarf::Reg DWARFReg(GpuRegister reg) {
501 return dwarf::Reg::Mips64Core(static_cast<int>(reg));
502}
503
504// TODO: mapping of floating-point registers to DWARF
505
506void CodeGeneratorMIPS64::GenerateFrameEntry() {
507 __ Bind(&frame_entry_label_);
508
509 bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips64) || !IsLeafMethod();
510
511 if (do_overflow_check) {
512 __ LoadFromOffset(kLoadWord,
513 ZERO,
514 SP,
515 -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips64)));
516 RecordPcInfo(nullptr, 0);
517 }
518
519 // TODO: anything related to T9/GP/GOT/PIC/.so's?
520
521 if (HasEmptyFrame()) {
522 return;
523 }
524
525 // Make sure the frame size isn't unreasonably large. Per the various APIs
526 // it looks like it should always be less than 2GB in size, which allows
527 // us using 32-bit signed offsets from the stack pointer.
528 if (GetFrameSize() > 0x7FFFFFFF)
529 LOG(FATAL) << "Stack frame larger than 2GB";
530
531 // Spill callee-saved registers.
532 // Note that their cumulative size is small and they can be indexed using
533 // 16-bit offsets.
534
535 // TODO: increment/decrement SP in one step instead of two or remove this comment.
536
537 uint32_t ofs = FrameEntrySpillSize();
538 __ IncreaseFrameSize(ofs);
539
540 for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
541 GpuRegister reg = kCoreCalleeSaves[i];
542 if (allocated_registers_.ContainsCoreRegister(reg)) {
543 ofs -= kMips64WordSize;
544 __ Sd(reg, SP, ofs);
545 __ cfi().RelOffset(DWARFReg(reg), ofs);
546 }
547 }
548
549 for (int i = arraysize(kFpuCalleeSaves) - 1; i >= 0; --i) {
550 FpuRegister reg = kFpuCalleeSaves[i];
551 if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
552 ofs -= kMips64WordSize;
553 __ Sdc1(reg, SP, ofs);
554 // TODO: __ cfi().RelOffset(DWARFReg(reg), ofs);
555 }
556 }
557
558 // Allocate the rest of the frame and store the current method pointer
559 // at its end.
560
561 __ IncreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
562
563 static_assert(IsInt<16>(kCurrentMethodStackOffset),
564 "kCurrentMethodStackOffset must fit into int16_t");
565 __ Sd(kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
566}
567
568void CodeGeneratorMIPS64::GenerateFrameExit() {
569 __ cfi().RememberState();
570
571 // TODO: anything related to T9/GP/GOT/PIC/.so's?
572
573 if (!HasEmptyFrame()) {
574 // Deallocate the rest of the frame.
575
576 __ DecreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
577
578 // Restore callee-saved registers.
579 // Note that their cumulative size is small and they can be indexed using
580 // 16-bit offsets.
581
582 // TODO: increment/decrement SP in one step instead of two or remove this comment.
583
584 uint32_t ofs = 0;
585
586 for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
587 FpuRegister reg = kFpuCalleeSaves[i];
588 if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
589 __ Ldc1(reg, SP, ofs);
590 ofs += kMips64WordSize;
591 // TODO: __ cfi().Restore(DWARFReg(reg));
592 }
593 }
594
595 for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
596 GpuRegister reg = kCoreCalleeSaves[i];
597 if (allocated_registers_.ContainsCoreRegister(reg)) {
598 __ Ld(reg, SP, ofs);
599 ofs += kMips64WordSize;
600 __ cfi().Restore(DWARFReg(reg));
601 }
602 }
603
604 DCHECK_EQ(ofs, FrameEntrySpillSize());
605 __ DecreaseFrameSize(ofs);
606 }
607
608 __ Jr(RA);
609
610 __ cfi().RestoreState();
611 __ cfi().DefCFAOffset(GetFrameSize());
612}
613
614void CodeGeneratorMIPS64::Bind(HBasicBlock* block) {
615 __ Bind(GetLabelOf(block));
616}
617
618void CodeGeneratorMIPS64::MoveLocation(Location destination,
619 Location source,
620 Primitive::Type type) {
621 if (source.Equals(destination)) {
622 return;
623 }
624
625 // A valid move can always be inferred from the destination and source
626 // locations. When moving from and to a register, the argument type can be
627 // used to generate 32bit instead of 64bit moves.
628 bool unspecified_type = (type == Primitive::kPrimVoid);
629 DCHECK_EQ(unspecified_type, false);
630
631 if (destination.IsRegister() || destination.IsFpuRegister()) {
632 if (unspecified_type) {
633 HConstant* src_cst = source.IsConstant() ? source.GetConstant() : nullptr;
634 if (source.IsStackSlot() ||
635 (src_cst != nullptr && (src_cst->IsIntConstant()
636 || src_cst->IsFloatConstant()
637 || src_cst->IsNullConstant()))) {
638 // For stack slots and 32bit constants, a 64bit type is appropriate.
639 type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
640 } else {
641 // If the source is a double stack slot or a 64bit constant, a 64bit
642 // type is appropriate. Else the source is a register, and since the
643 // type has not been specified, we chose a 64bit type to force a 64bit
644 // move.
645 type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
646 }
647 }
648 DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(type)) ||
649 (destination.IsRegister() && !Primitive::IsFloatingPointType(type)));
650 if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
651 // Move to GPR/FPR from stack
652 LoadOperandType load_type = source.IsStackSlot() ? kLoadWord : kLoadDoubleword;
653 if (Primitive::IsFloatingPointType(type)) {
654 __ LoadFpuFromOffset(load_type,
655 destination.AsFpuRegister<FpuRegister>(),
656 SP,
657 source.GetStackIndex());
658 } else {
659 // TODO: use load_type = kLoadUnsignedWord when type == Primitive::kPrimNot.
660 __ LoadFromOffset(load_type,
661 destination.AsRegister<GpuRegister>(),
662 SP,
663 source.GetStackIndex());
664 }
665 } else if (source.IsConstant()) {
666 // Move to GPR/FPR from constant
667 GpuRegister gpr = AT;
668 if (!Primitive::IsFloatingPointType(type)) {
669 gpr = destination.AsRegister<GpuRegister>();
670 }
671 if (type == Primitive::kPrimInt || type == Primitive::kPrimFloat) {
672 __ LoadConst32(gpr, GetInt32ValueOf(source.GetConstant()->AsConstant()));
673 } else {
674 __ LoadConst64(gpr, GetInt64ValueOf(source.GetConstant()->AsConstant()));
675 }
676 if (type == Primitive::kPrimFloat) {
677 __ Mtc1(gpr, destination.AsFpuRegister<FpuRegister>());
678 } else if (type == Primitive::kPrimDouble) {
679 __ Dmtc1(gpr, destination.AsFpuRegister<FpuRegister>());
680 }
681 } else {
682 if (destination.IsRegister()) {
683 // Move to GPR from GPR
684 __ Move(destination.AsRegister<GpuRegister>(), source.AsRegister<GpuRegister>());
685 } else {
686 // Move to FPR from FPR
687 if (type == Primitive::kPrimFloat) {
688 __ MovS(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
689 } else {
690 DCHECK_EQ(type, Primitive::kPrimDouble);
691 __ MovD(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
692 }
693 }
694 }
695 } else { // The destination is not a register. It must be a stack slot.
696 DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
697 if (source.IsRegister() || source.IsFpuRegister()) {
698 if (unspecified_type) {
699 if (source.IsRegister()) {
700 type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
701 } else {
702 type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
703 }
704 }
705 DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(type)) &&
706 (source.IsFpuRegister() == Primitive::IsFloatingPointType(type)));
707 // Move to stack from GPR/FPR
708 StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
709 if (source.IsRegister()) {
710 __ StoreToOffset(store_type,
711 source.AsRegister<GpuRegister>(),
712 SP,
713 destination.GetStackIndex());
714 } else {
715 __ StoreFpuToOffset(store_type,
716 source.AsFpuRegister<FpuRegister>(),
717 SP,
718 destination.GetStackIndex());
719 }
720 } else if (source.IsConstant()) {
721 // Move to stack from constant
722 HConstant* src_cst = source.GetConstant();
723 StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
724 if (destination.IsStackSlot()) {
725 __ LoadConst32(TMP, GetInt32ValueOf(src_cst->AsConstant()));
726 } else {
727 __ LoadConst64(TMP, GetInt64ValueOf(src_cst->AsConstant()));
728 }
729 __ StoreToOffset(store_type, TMP, SP, destination.GetStackIndex());
730 } else {
731 DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
732 DCHECK_EQ(source.IsDoubleStackSlot(), destination.IsDoubleStackSlot());
733 // Move to stack from stack
734 if (destination.IsStackSlot()) {
735 __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
736 __ StoreToOffset(kStoreWord, TMP, SP, destination.GetStackIndex());
737 } else {
738 __ LoadFromOffset(kLoadDoubleword, TMP, SP, source.GetStackIndex());
739 __ StoreToOffset(kStoreDoubleword, TMP, SP, destination.GetStackIndex());
740 }
741 }
742 }
743}
744
745void CodeGeneratorMIPS64::SwapLocations(Location loc1,
746 Location loc2,
747 Primitive::Type type ATTRIBUTE_UNUSED) {
748 DCHECK(!loc1.IsConstant());
749 DCHECK(!loc2.IsConstant());
750
751 if (loc1.Equals(loc2)) {
752 return;
753 }
754
755 bool is_slot1 = loc1.IsStackSlot() || loc1.IsDoubleStackSlot();
756 bool is_slot2 = loc2.IsStackSlot() || loc2.IsDoubleStackSlot();
757 bool is_fp_reg1 = loc1.IsFpuRegister();
758 bool is_fp_reg2 = loc2.IsFpuRegister();
759
760 if (loc2.IsRegister() && loc1.IsRegister()) {
761 // Swap 2 GPRs
762 GpuRegister r1 = loc1.AsRegister<GpuRegister>();
763 GpuRegister r2 = loc2.AsRegister<GpuRegister>();
764 __ Move(TMP, r2);
765 __ Move(r2, r1);
766 __ Move(r1, TMP);
767 } else if (is_fp_reg2 && is_fp_reg1) {
768 // Swap 2 FPRs
769 FpuRegister r1 = loc1.AsFpuRegister<FpuRegister>();
770 FpuRegister r2 = loc2.AsFpuRegister<FpuRegister>();
771 // TODO: Can MOV.S/MOV.D be used here to save one instruction?
772 // Need to distinguish float from double, right?
773 __ Dmfc1(TMP, r2);
774 __ Dmfc1(AT, r1);
775 __ Dmtc1(TMP, r1);
776 __ Dmtc1(AT, r2);
777 } else if (is_slot1 != is_slot2) {
778 // Swap GPR/FPR and stack slot
779 Location reg_loc = is_slot1 ? loc2 : loc1;
780 Location mem_loc = is_slot1 ? loc1 : loc2;
781 LoadOperandType load_type = mem_loc.IsStackSlot() ? kLoadWord : kLoadDoubleword;
782 StoreOperandType store_type = mem_loc.IsStackSlot() ? kStoreWord : kStoreDoubleword;
783 // TODO: use load_type = kLoadUnsignedWord when type == Primitive::kPrimNot.
784 __ LoadFromOffset(load_type, TMP, SP, mem_loc.GetStackIndex());
785 if (reg_loc.IsFpuRegister()) {
786 __ StoreFpuToOffset(store_type,
787 reg_loc.AsFpuRegister<FpuRegister>(),
788 SP,
789 mem_loc.GetStackIndex());
790 // TODO: review this MTC1/DMTC1 move
791 if (mem_loc.IsStackSlot()) {
792 __ Mtc1(TMP, reg_loc.AsFpuRegister<FpuRegister>());
793 } else {
794 DCHECK(mem_loc.IsDoubleStackSlot());
795 __ Dmtc1(TMP, reg_loc.AsFpuRegister<FpuRegister>());
796 }
797 } else {
798 __ StoreToOffset(store_type, reg_loc.AsRegister<GpuRegister>(), SP, mem_loc.GetStackIndex());
799 __ Move(reg_loc.AsRegister<GpuRegister>(), TMP);
800 }
801 } else if (is_slot1 && is_slot2) {
802 move_resolver_.Exchange(loc1.GetStackIndex(),
803 loc2.GetStackIndex(),
804 loc1.IsDoubleStackSlot());
805 } else {
806 LOG(FATAL) << "Unimplemented swap between locations " << loc1 << " and " << loc2;
807 }
808}
809
810void CodeGeneratorMIPS64::Move(HInstruction* instruction,
811 Location location,
812 HInstruction* move_for) {
813 LocationSummary* locations = instruction->GetLocations();
814 Primitive::Type type = instruction->GetType();
815 DCHECK_NE(type, Primitive::kPrimVoid);
816
817 if (instruction->IsCurrentMethod()) {
818 MoveLocation(location, Location::DoubleStackSlot(kCurrentMethodStackOffset), type);
819 } else if (locations != nullptr && locations->Out().Equals(location)) {
820 return;
821 } else if (instruction->IsIntConstant()
822 || instruction->IsLongConstant()
823 || instruction->IsNullConstant()) {
824 if (location.IsRegister()) {
825 // Move to GPR from constant
826 GpuRegister dst = location.AsRegister<GpuRegister>();
827 if (instruction->IsNullConstant() || instruction->IsIntConstant()) {
828 __ LoadConst32(dst, GetInt32ValueOf(instruction->AsConstant()));
829 } else {
830 __ LoadConst64(dst, instruction->AsLongConstant()->GetValue());
831 }
832 } else {
833 DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot());
834 // Move to stack from constant
835 if (location.IsStackSlot()) {
836 __ LoadConst32(TMP, GetInt32ValueOf(instruction->AsConstant()));
837 __ StoreToOffset(kStoreWord, TMP, SP, location.GetStackIndex());
838 } else {
839 __ LoadConst64(TMP, instruction->AsLongConstant()->GetValue());
840 __ StoreToOffset(kStoreDoubleword, TMP, SP, location.GetStackIndex());
841 }
842 }
843 } else if (instruction->IsTemporary()) {
844 Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
845 MoveLocation(location, temp_location, type);
846 } else if (instruction->IsLoadLocal()) {
847 uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
848 if (Primitive::Is64BitType(type)) {
849 MoveLocation(location, Location::DoubleStackSlot(stack_slot), type);
850 } else {
851 MoveLocation(location, Location::StackSlot(stack_slot), type);
852 }
853 } else {
854 DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
855 MoveLocation(location, locations->Out(), type);
856 }
857}
858
859Location CodeGeneratorMIPS64::GetStackLocation(HLoadLocal* load) const {
860 Primitive::Type type = load->GetType();
861
862 switch (type) {
863 case Primitive::kPrimNot:
864 case Primitive::kPrimInt:
865 case Primitive::kPrimFloat:
866 return Location::StackSlot(GetStackSlot(load->GetLocal()));
867
868 case Primitive::kPrimLong:
869 case Primitive::kPrimDouble:
870 return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
871
872 case Primitive::kPrimBoolean:
873 case Primitive::kPrimByte:
874 case Primitive::kPrimChar:
875 case Primitive::kPrimShort:
876 case Primitive::kPrimVoid:
877 LOG(FATAL) << "Unexpected type " << type;
878 }
879
880 LOG(FATAL) << "Unreachable";
881 return Location::NoLocation();
882}
883
884void CodeGeneratorMIPS64::MarkGCCard(GpuRegister object, GpuRegister value) {
885 Label done;
886 GpuRegister card = AT;
887 GpuRegister temp = TMP;
888 __ Beqzc(value, &done);
889 __ LoadFromOffset(kLoadDoubleword,
890 card,
891 TR,
892 Thread::CardTableOffset<kMips64WordSize>().Int32Value());
893 __ Dsrl(temp, object, gc::accounting::CardTable::kCardShift);
894 __ Daddu(temp, card, temp);
895 __ Sb(card, temp, 0);
896 __ Bind(&done);
897}
898
899void CodeGeneratorMIPS64::SetupBlockedRegisters(bool is_baseline ATTRIBUTE_UNUSED) const {
900 // ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
901 blocked_core_registers_[ZERO] = true;
902 blocked_core_registers_[K0] = true;
903 blocked_core_registers_[K1] = true;
904 blocked_core_registers_[GP] = true;
905 blocked_core_registers_[SP] = true;
906 blocked_core_registers_[RA] = true;
907
908 // AT and TMP(T8) are used as temporary/scratch registers
909 // (similar to how AT is used by MIPS assemblers).
910 blocked_core_registers_[AT] = true;
911 blocked_core_registers_[TMP] = true;
912 blocked_fpu_registers_[FTMP] = true;
913
914 // Reserve suspend and thread registers.
915 blocked_core_registers_[S0] = true;
916 blocked_core_registers_[TR] = true;
917
918 // Reserve T9 for function calls
919 blocked_core_registers_[T9] = true;
920
921 // TODO: review; anything else?
922
923 // TODO: make these two for's conditional on is_baseline once
924 // all the issues with register saving/restoring are sorted out.
925 for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
926 blocked_core_registers_[kCoreCalleeSaves[i]] = true;
927 }
928
929 for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
930 blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
931 }
932}
933
934Location CodeGeneratorMIPS64::AllocateFreeRegister(Primitive::Type type) const {
935 if (type == Primitive::kPrimVoid) {
936 LOG(FATAL) << "Unreachable type " << type;
937 }
938
939 if (Primitive::IsFloatingPointType(type)) {
940 size_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfFpuRegisters);
941 return Location::FpuRegisterLocation(reg);
942 } else {
943 size_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfGpuRegisters);
944 return Location::RegisterLocation(reg);
945 }
946}
947
948size_t CodeGeneratorMIPS64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
949 __ StoreToOffset(kStoreDoubleword, GpuRegister(reg_id), SP, stack_index);
950 return kMips64WordSize;
951}
952
953size_t CodeGeneratorMIPS64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
954 __ LoadFromOffset(kLoadDoubleword, GpuRegister(reg_id), SP, stack_index);
955 return kMips64WordSize;
956}
957
958size_t CodeGeneratorMIPS64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
959 __ StoreFpuToOffset(kStoreDoubleword, FpuRegister(reg_id), SP, stack_index);
960 return kMips64WordSize;
961}
962
963size_t CodeGeneratorMIPS64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
964 __ LoadFpuFromOffset(kLoadDoubleword, FpuRegister(reg_id), SP, stack_index);
965 return kMips64WordSize;
966}
967
968void CodeGeneratorMIPS64::DumpCoreRegister(std::ostream& stream, int reg) const {
969 stream << Mips64ManagedRegister::FromGpuRegister(GpuRegister(reg));
970}
971
972void CodeGeneratorMIPS64::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
973 stream << Mips64ManagedRegister::FromFpuRegister(FpuRegister(reg));
974}
975
976void CodeGeneratorMIPS64::InvokeRuntime(int32_t entry_point_offset,
977 HInstruction* instruction,
978 uint32_t dex_pc,
979 SlowPathCode* slow_path) {
Alexandre Rames78e3ef62015-08-12 13:43:29 +0100980 ValidateInvokeRuntime(instruction, slow_path);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700981 // TODO: anything related to T9/GP/GOT/PIC/.so's?
982 __ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
983 __ Jalr(T9);
984 RecordPcInfo(instruction, dex_pc, slow_path);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700985}
986
987void InstructionCodeGeneratorMIPS64::GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path,
988 GpuRegister class_reg) {
989 __ LoadFromOffset(kLoadWord, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
990 __ LoadConst32(AT, mirror::Class::kStatusInitialized);
991 __ Bltc(TMP, AT, slow_path->GetEntryLabel());
992 // TODO: barrier needed?
993 __ Bind(slow_path->GetExitLabel());
994}
995
996void InstructionCodeGeneratorMIPS64::GenerateMemoryBarrier(MemBarrierKind kind ATTRIBUTE_UNUSED) {
997 __ Sync(0); // only stype 0 is supported
998}
999
1000void InstructionCodeGeneratorMIPS64::GenerateSuspendCheck(HSuspendCheck* instruction,
1001 HBasicBlock* successor) {
1002 SuspendCheckSlowPathMIPS64* slow_path =
1003 new (GetGraph()->GetArena()) SuspendCheckSlowPathMIPS64(instruction, successor);
1004 codegen_->AddSlowPath(slow_path);
1005
1006 __ LoadFromOffset(kLoadUnsignedHalfword,
1007 TMP,
1008 TR,
1009 Thread::ThreadFlagsOffset<kMips64WordSize>().Int32Value());
1010 if (successor == nullptr) {
1011 __ Bnezc(TMP, slow_path->GetEntryLabel());
1012 __ Bind(slow_path->GetReturnLabel());
1013 } else {
1014 __ Beqzc(TMP, codegen_->GetLabelOf(successor));
1015 __ B(slow_path->GetEntryLabel());
1016 // slow_path will return to GetLabelOf(successor).
1017 }
1018}
1019
1020InstructionCodeGeneratorMIPS64::InstructionCodeGeneratorMIPS64(HGraph* graph,
1021 CodeGeneratorMIPS64* codegen)
1022 : HGraphVisitor(graph),
1023 assembler_(codegen->GetAssembler()),
1024 codegen_(codegen) {}
1025
1026void LocationsBuilderMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
1027 DCHECK_EQ(instruction->InputCount(), 2U);
1028 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1029 Primitive::Type type = instruction->GetResultType();
1030 switch (type) {
1031 case Primitive::kPrimInt:
1032 case Primitive::kPrimLong: {
1033 locations->SetInAt(0, Location::RequiresRegister());
1034 HInstruction* right = instruction->InputAt(1);
1035 bool can_use_imm = false;
1036 if (right->IsConstant()) {
1037 int64_t imm = CodeGenerator::GetInt64ValueOf(right->AsConstant());
1038 if (instruction->IsAnd() || instruction->IsOr() || instruction->IsXor()) {
1039 can_use_imm = IsUint<16>(imm);
1040 } else if (instruction->IsAdd()) {
1041 can_use_imm = IsInt<16>(imm);
1042 } else {
1043 DCHECK(instruction->IsSub());
1044 can_use_imm = IsInt<16>(-imm);
1045 }
1046 }
1047 if (can_use_imm)
1048 locations->SetInAt(1, Location::ConstantLocation(right->AsConstant()));
1049 else
1050 locations->SetInAt(1, Location::RequiresRegister());
1051 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1052 }
1053 break;
1054
1055 case Primitive::kPrimFloat:
1056 case Primitive::kPrimDouble:
1057 locations->SetInAt(0, Location::RequiresFpuRegister());
1058 locations->SetInAt(1, Location::RequiresFpuRegister());
1059 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1060 break;
1061
1062 default:
1063 LOG(FATAL) << "Unexpected " << instruction->DebugName() << " type " << type;
1064 }
1065}
1066
1067void InstructionCodeGeneratorMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
1068 Primitive::Type type = instruction->GetType();
1069 LocationSummary* locations = instruction->GetLocations();
1070
1071 switch (type) {
1072 case Primitive::kPrimInt:
1073 case Primitive::kPrimLong: {
1074 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1075 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1076 Location rhs_location = locations->InAt(1);
1077
1078 GpuRegister rhs_reg = ZERO;
1079 int64_t rhs_imm = 0;
1080 bool use_imm = rhs_location.IsConstant();
1081 if (use_imm) {
1082 rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
1083 } else {
1084 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1085 }
1086
1087 if (instruction->IsAnd()) {
1088 if (use_imm)
1089 __ Andi(dst, lhs, rhs_imm);
1090 else
1091 __ And(dst, lhs, rhs_reg);
1092 } else if (instruction->IsOr()) {
1093 if (use_imm)
1094 __ Ori(dst, lhs, rhs_imm);
1095 else
1096 __ Or(dst, lhs, rhs_reg);
1097 } else if (instruction->IsXor()) {
1098 if (use_imm)
1099 __ Xori(dst, lhs, rhs_imm);
1100 else
1101 __ Xor(dst, lhs, rhs_reg);
1102 } else if (instruction->IsAdd()) {
1103 if (type == Primitive::kPrimInt) {
1104 if (use_imm)
1105 __ Addiu(dst, lhs, rhs_imm);
1106 else
1107 __ Addu(dst, lhs, rhs_reg);
1108 } else {
1109 if (use_imm)
1110 __ Daddiu(dst, lhs, rhs_imm);
1111 else
1112 __ Daddu(dst, lhs, rhs_reg);
1113 }
1114 } else {
1115 DCHECK(instruction->IsSub());
1116 if (type == Primitive::kPrimInt) {
1117 if (use_imm)
1118 __ Addiu(dst, lhs, -rhs_imm);
1119 else
1120 __ Subu(dst, lhs, rhs_reg);
1121 } else {
1122 if (use_imm)
1123 __ Daddiu(dst, lhs, -rhs_imm);
1124 else
1125 __ Dsubu(dst, lhs, rhs_reg);
1126 }
1127 }
1128 break;
1129 }
1130 case Primitive::kPrimFloat:
1131 case Primitive::kPrimDouble: {
1132 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
1133 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
1134 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
1135 if (instruction->IsAdd()) {
1136 if (type == Primitive::kPrimFloat)
1137 __ AddS(dst, lhs, rhs);
1138 else
1139 __ AddD(dst, lhs, rhs);
1140 } else if (instruction->IsSub()) {
1141 if (type == Primitive::kPrimFloat)
1142 __ SubS(dst, lhs, rhs);
1143 else
1144 __ SubD(dst, lhs, rhs);
1145 } else {
1146 LOG(FATAL) << "Unexpected floating-point binary operation";
1147 }
1148 break;
1149 }
1150 default:
1151 LOG(FATAL) << "Unexpected binary operation type " << type;
1152 }
1153}
1154
1155void LocationsBuilderMIPS64::HandleShift(HBinaryOperation* instr) {
1156 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1157
1158 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
1159 Primitive::Type type = instr->GetResultType();
1160 switch (type) {
1161 case Primitive::kPrimInt:
1162 case Primitive::kPrimLong: {
1163 locations->SetInAt(0, Location::RequiresRegister());
1164 locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
1165 locations->SetOut(Location::RequiresRegister());
1166 break;
1167 }
1168 default:
1169 LOG(FATAL) << "Unexpected shift type " << type;
1170 }
1171}
1172
1173void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) {
1174 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1175 LocationSummary* locations = instr->GetLocations();
1176 Primitive::Type type = instr->GetType();
1177
1178 switch (type) {
1179 case Primitive::kPrimInt:
1180 case Primitive::kPrimLong: {
1181 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1182 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1183 Location rhs_location = locations->InAt(1);
1184
1185 GpuRegister rhs_reg = ZERO;
1186 int64_t rhs_imm = 0;
1187 bool use_imm = rhs_location.IsConstant();
1188 if (use_imm) {
1189 rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
1190 } else {
1191 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1192 }
1193
1194 if (use_imm) {
1195 uint32_t shift_value = (type == Primitive::kPrimInt)
1196 ? static_cast<uint32_t>(rhs_imm & kMaxIntShiftValue)
1197 : static_cast<uint32_t>(rhs_imm & kMaxLongShiftValue);
1198
1199 if (type == Primitive::kPrimInt) {
1200 if (instr->IsShl()) {
1201 __ Sll(dst, lhs, shift_value);
1202 } else if (instr->IsShr()) {
1203 __ Sra(dst, lhs, shift_value);
1204 } else {
1205 __ Srl(dst, lhs, shift_value);
1206 }
1207 } else {
1208 if (shift_value < 32) {
1209 if (instr->IsShl()) {
1210 __ Dsll(dst, lhs, shift_value);
1211 } else if (instr->IsShr()) {
1212 __ Dsra(dst, lhs, shift_value);
1213 } else {
1214 __ Dsrl(dst, lhs, shift_value);
1215 }
1216 } else {
1217 shift_value -= 32;
1218 if (instr->IsShl()) {
1219 __ Dsll32(dst, lhs, shift_value);
1220 } else if (instr->IsShr()) {
1221 __ Dsra32(dst, lhs, shift_value);
1222 } else {
1223 __ Dsrl32(dst, lhs, shift_value);
1224 }
1225 }
1226 }
1227 } else {
1228 if (type == Primitive::kPrimInt) {
1229 if (instr->IsShl()) {
1230 __ Sllv(dst, lhs, rhs_reg);
1231 } else if (instr->IsShr()) {
1232 __ Srav(dst, lhs, rhs_reg);
1233 } else {
1234 __ Srlv(dst, lhs, rhs_reg);
1235 }
1236 } else {
1237 if (instr->IsShl()) {
1238 __ Dsllv(dst, lhs, rhs_reg);
1239 } else if (instr->IsShr()) {
1240 __ Dsrav(dst, lhs, rhs_reg);
1241 } else {
1242 __ Dsrlv(dst, lhs, rhs_reg);
1243 }
1244 }
1245 }
1246 break;
1247 }
1248 default:
1249 LOG(FATAL) << "Unexpected shift operation type " << type;
1250 }
1251}
1252
1253void LocationsBuilderMIPS64::VisitAdd(HAdd* instruction) {
1254 HandleBinaryOp(instruction);
1255}
1256
1257void InstructionCodeGeneratorMIPS64::VisitAdd(HAdd* instruction) {
1258 HandleBinaryOp(instruction);
1259}
1260
1261void LocationsBuilderMIPS64::VisitAnd(HAnd* instruction) {
1262 HandleBinaryOp(instruction);
1263}
1264
1265void InstructionCodeGeneratorMIPS64::VisitAnd(HAnd* instruction) {
1266 HandleBinaryOp(instruction);
1267}
1268
1269void LocationsBuilderMIPS64::VisitArrayGet(HArrayGet* instruction) {
1270 LocationSummary* locations =
1271 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1272 locations->SetInAt(0, Location::RequiresRegister());
1273 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1274 if (Primitive::IsFloatingPointType(instruction->GetType())) {
1275 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1276 } else {
1277 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1278 }
1279}
1280
1281void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
1282 LocationSummary* locations = instruction->GetLocations();
1283 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1284 Location index = locations->InAt(1);
1285 Primitive::Type type = instruction->GetType();
1286
1287 switch (type) {
1288 case Primitive::kPrimBoolean: {
1289 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
1290 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1291 if (index.IsConstant()) {
1292 size_t offset =
1293 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1294 __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
1295 } else {
1296 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1297 __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
1298 }
1299 break;
1300 }
1301
1302 case Primitive::kPrimByte: {
1303 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
1304 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1305 if (index.IsConstant()) {
1306 size_t offset =
1307 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1308 __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
1309 } else {
1310 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1311 __ LoadFromOffset(kLoadSignedByte, out, TMP, data_offset);
1312 }
1313 break;
1314 }
1315
1316 case Primitive::kPrimShort: {
1317 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
1318 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1319 if (index.IsConstant()) {
1320 size_t offset =
1321 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1322 __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
1323 } else {
1324 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1325 __ Daddu(TMP, obj, TMP);
1326 __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset);
1327 }
1328 break;
1329 }
1330
1331 case Primitive::kPrimChar: {
1332 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
1333 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1334 if (index.IsConstant()) {
1335 size_t offset =
1336 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1337 __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
1338 } else {
1339 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1340 __ Daddu(TMP, obj, TMP);
1341 __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
1342 }
1343 break;
1344 }
1345
1346 case Primitive::kPrimInt:
1347 case Primitive::kPrimNot: {
1348 DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
1349 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
1350 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1351 LoadOperandType load_type = (type == Primitive::kPrimNot) ? kLoadUnsignedWord : kLoadWord;
1352 if (index.IsConstant()) {
1353 size_t offset =
1354 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1355 __ LoadFromOffset(load_type, out, obj, offset);
1356 } else {
1357 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1358 __ Daddu(TMP, obj, TMP);
1359 __ LoadFromOffset(load_type, out, TMP, data_offset);
1360 }
1361 break;
1362 }
1363
1364 case Primitive::kPrimLong: {
1365 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
1366 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1367 if (index.IsConstant()) {
1368 size_t offset =
1369 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1370 __ LoadFromOffset(kLoadDoubleword, out, obj, offset);
1371 } else {
1372 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1373 __ Daddu(TMP, obj, TMP);
1374 __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset);
1375 }
1376 break;
1377 }
1378
1379 case Primitive::kPrimFloat: {
1380 uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
1381 FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
1382 if (index.IsConstant()) {
1383 size_t offset =
1384 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1385 __ LoadFpuFromOffset(kLoadWord, out, obj, offset);
1386 } else {
1387 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1388 __ Daddu(TMP, obj, TMP);
1389 __ LoadFpuFromOffset(kLoadWord, out, TMP, data_offset);
1390 }
1391 break;
1392 }
1393
1394 case Primitive::kPrimDouble: {
1395 uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
1396 FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
1397 if (index.IsConstant()) {
1398 size_t offset =
1399 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1400 __ LoadFpuFromOffset(kLoadDoubleword, out, obj, offset);
1401 } else {
1402 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1403 __ Daddu(TMP, obj, TMP);
1404 __ LoadFpuFromOffset(kLoadDoubleword, out, TMP, data_offset);
1405 }
1406 break;
1407 }
1408
1409 case Primitive::kPrimVoid:
1410 LOG(FATAL) << "Unreachable type " << instruction->GetType();
1411 UNREACHABLE();
1412 }
1413 codegen_->MaybeRecordImplicitNullCheck(instruction);
1414}
1415
1416void LocationsBuilderMIPS64::VisitArrayLength(HArrayLength* instruction) {
1417 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1418 locations->SetInAt(0, Location::RequiresRegister());
1419 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1420}
1421
1422void InstructionCodeGeneratorMIPS64::VisitArrayLength(HArrayLength* instruction) {
1423 LocationSummary* locations = instruction->GetLocations();
1424 uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
1425 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1426 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1427 __ LoadFromOffset(kLoadWord, out, obj, offset);
1428 codegen_->MaybeRecordImplicitNullCheck(instruction);
1429}
1430
1431void LocationsBuilderMIPS64::VisitArraySet(HArraySet* instruction) {
1432 Primitive::Type value_type = instruction->GetComponentType();
1433 bool is_object = value_type == Primitive::kPrimNot;
1434 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
1435 instruction,
1436 is_object ? LocationSummary::kCall : LocationSummary::kNoCall);
1437 if (is_object) {
1438 InvokeRuntimeCallingConvention calling_convention;
1439 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
1440 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
1441 locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
1442 } else {
1443 locations->SetInAt(0, Location::RequiresRegister());
1444 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1445 if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
1446 locations->SetInAt(2, Location::RequiresFpuRegister());
1447 } else {
1448 locations->SetInAt(2, Location::RequiresRegister());
1449 }
1450 }
1451}
1452
1453void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
1454 LocationSummary* locations = instruction->GetLocations();
1455 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1456 Location index = locations->InAt(1);
1457 Primitive::Type value_type = instruction->GetComponentType();
1458 bool needs_runtime_call = locations->WillCall();
1459 bool needs_write_barrier =
1460 CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
1461
1462 switch (value_type) {
1463 case Primitive::kPrimBoolean:
1464 case Primitive::kPrimByte: {
1465 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
1466 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1467 if (index.IsConstant()) {
1468 size_t offset =
1469 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1470 __ StoreToOffset(kStoreByte, value, obj, offset);
1471 } else {
1472 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1473 __ StoreToOffset(kStoreByte, value, TMP, data_offset);
1474 }
1475 break;
1476 }
1477
1478 case Primitive::kPrimShort:
1479 case Primitive::kPrimChar: {
1480 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
1481 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1482 if (index.IsConstant()) {
1483 size_t offset =
1484 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1485 __ StoreToOffset(kStoreHalfword, value, obj, offset);
1486 } else {
1487 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1488 __ Daddu(TMP, obj, TMP);
1489 __ StoreToOffset(kStoreHalfword, value, TMP, data_offset);
1490 }
1491 break;
1492 }
1493
1494 case Primitive::kPrimInt:
1495 case Primitive::kPrimNot: {
1496 if (!needs_runtime_call) {
1497 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
1498 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1499 if (index.IsConstant()) {
1500 size_t offset =
1501 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1502 __ StoreToOffset(kStoreWord, value, obj, offset);
1503 } else {
1504 DCHECK(index.IsRegister()) << index;
1505 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1506 __ Daddu(TMP, obj, TMP);
1507 __ StoreToOffset(kStoreWord, value, TMP, data_offset);
1508 }
1509 codegen_->MaybeRecordImplicitNullCheck(instruction);
1510 if (needs_write_barrier) {
1511 DCHECK_EQ(value_type, Primitive::kPrimNot);
1512 codegen_->MarkGCCard(obj, value);
1513 }
1514 } else {
1515 DCHECK_EQ(value_type, Primitive::kPrimNot);
1516 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
1517 instruction,
1518 instruction->GetDexPc(),
1519 nullptr);
1520 }
1521 break;
1522 }
1523
1524 case Primitive::kPrimLong: {
1525 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
1526 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1527 if (index.IsConstant()) {
1528 size_t offset =
1529 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1530 __ StoreToOffset(kStoreDoubleword, value, obj, offset);
1531 } else {
1532 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1533 __ Daddu(TMP, obj, TMP);
1534 __ StoreToOffset(kStoreDoubleword, value, TMP, data_offset);
1535 }
1536 break;
1537 }
1538
1539 case Primitive::kPrimFloat: {
1540 uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
1541 FpuRegister value = locations->InAt(2).AsFpuRegister<FpuRegister>();
1542 DCHECK(locations->InAt(2).IsFpuRegister());
1543 if (index.IsConstant()) {
1544 size_t offset =
1545 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1546 __ StoreFpuToOffset(kStoreWord, value, obj, offset);
1547 } else {
1548 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1549 __ Daddu(TMP, obj, TMP);
1550 __ StoreFpuToOffset(kStoreWord, value, TMP, data_offset);
1551 }
1552 break;
1553 }
1554
1555 case Primitive::kPrimDouble: {
1556 uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
1557 FpuRegister value = locations->InAt(2).AsFpuRegister<FpuRegister>();
1558 DCHECK(locations->InAt(2).IsFpuRegister());
1559 if (index.IsConstant()) {
1560 size_t offset =
1561 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1562 __ StoreFpuToOffset(kStoreDoubleword, value, obj, offset);
1563 } else {
1564 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1565 __ Daddu(TMP, obj, TMP);
1566 __ StoreFpuToOffset(kStoreDoubleword, value, TMP, data_offset);
1567 }
1568 break;
1569 }
1570
1571 case Primitive::kPrimVoid:
1572 LOG(FATAL) << "Unreachable type " << instruction->GetType();
1573 UNREACHABLE();
1574 }
1575
1576 // Ints and objects are handled in the switch.
1577 if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
1578 codegen_->MaybeRecordImplicitNullCheck(instruction);
1579 }
1580}
1581
1582void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
David Brazdil77a48ae2015-09-15 12:34:04 +00001583 LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
1584 ? LocationSummary::kCallOnSlowPath
1585 : LocationSummary::kNoCall;
1586 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001587 locations->SetInAt(0, Location::RequiresRegister());
1588 locations->SetInAt(1, Location::RequiresRegister());
1589 if (instruction->HasUses()) {
1590 locations->SetOut(Location::SameAsFirstInput());
1591 }
1592}
1593
1594void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
1595 LocationSummary* locations = instruction->GetLocations();
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01001596 BoundsCheckSlowPathMIPS64* slow_path =
1597 new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS64(instruction);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001598 codegen_->AddSlowPath(slow_path);
1599
1600 GpuRegister index = locations->InAt(0).AsRegister<GpuRegister>();
1601 GpuRegister length = locations->InAt(1).AsRegister<GpuRegister>();
1602
1603 // length is limited by the maximum positive signed 32-bit integer.
1604 // Unsigned comparison of length and index checks for index < 0
1605 // and for length <= index simultaneously.
1606 // Mips R6 requires lhs != rhs for compact branches.
1607 if (index == length) {
1608 __ B(slow_path->GetEntryLabel());
1609 } else {
1610 __ Bgeuc(index, length, slow_path->GetEntryLabel());
1611 }
1612}
1613
1614void LocationsBuilderMIPS64::VisitCheckCast(HCheckCast* instruction) {
1615 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
1616 instruction,
1617 LocationSummary::kCallOnSlowPath);
1618 locations->SetInAt(0, Location::RequiresRegister());
1619 locations->SetInAt(1, Location::RequiresRegister());
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01001620 // Note that TypeCheckSlowPathMIPS64 uses this register too.
Alexey Frunze4dda3372015-06-01 18:31:49 -07001621 locations->AddTemp(Location::RequiresRegister());
1622}
1623
1624void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) {
1625 LocationSummary* locations = instruction->GetLocations();
1626 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1627 GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
1628 GpuRegister obj_cls = locations->GetTemp(0).AsRegister<GpuRegister>();
1629
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01001630 SlowPathCodeMIPS64* slow_path =
1631 new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001632 codegen_->AddSlowPath(slow_path);
1633
1634 // TODO: avoid this check if we know obj is not null.
1635 __ Beqzc(obj, slow_path->GetExitLabel());
1636 // Compare the class of `obj` with `cls`.
1637 __ LoadFromOffset(kLoadUnsignedWord, obj_cls, obj, mirror::Object::ClassOffset().Int32Value());
1638 __ Bnec(obj_cls, cls, slow_path->GetEntryLabel());
1639 __ Bind(slow_path->GetExitLabel());
1640}
1641
1642void LocationsBuilderMIPS64::VisitClinitCheck(HClinitCheck* check) {
1643 LocationSummary* locations =
1644 new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
1645 locations->SetInAt(0, Location::RequiresRegister());
1646 if (check->HasUses()) {
1647 locations->SetOut(Location::SameAsFirstInput());
1648 }
1649}
1650
1651void InstructionCodeGeneratorMIPS64::VisitClinitCheck(HClinitCheck* check) {
1652 // We assume the class is not null.
1653 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
1654 check->GetLoadClass(),
1655 check,
1656 check->GetDexPc(),
1657 true);
1658 codegen_->AddSlowPath(slow_path);
1659 GenerateClassInitializationCheck(slow_path,
1660 check->GetLocations()->InAt(0).AsRegister<GpuRegister>());
1661}
1662
1663void LocationsBuilderMIPS64::VisitCompare(HCompare* compare) {
1664 Primitive::Type in_type = compare->InputAt(0)->GetType();
1665
1666 LocationSummary::CallKind call_kind = Primitive::IsFloatingPointType(in_type)
1667 ? LocationSummary::kCall
1668 : LocationSummary::kNoCall;
1669
1670 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare, call_kind);
1671
1672 switch (in_type) {
1673 case Primitive::kPrimLong:
1674 locations->SetInAt(0, Location::RequiresRegister());
1675 locations->SetInAt(1, Location::RequiresRegister());
1676 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1677 break;
1678
1679 case Primitive::kPrimFloat:
1680 case Primitive::kPrimDouble: {
1681 InvokeRuntimeCallingConvention calling_convention;
1682 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
1683 locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
1684 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt));
1685 break;
1686 }
1687
1688 default:
1689 LOG(FATAL) << "Unexpected type for compare operation " << in_type;
1690 }
1691}
1692
1693void InstructionCodeGeneratorMIPS64::VisitCompare(HCompare* instruction) {
1694 LocationSummary* locations = instruction->GetLocations();
1695 Primitive::Type in_type = instruction->InputAt(0)->GetType();
1696
1697 // 0 if: left == right
1698 // 1 if: left > right
1699 // -1 if: left < right
1700 switch (in_type) {
1701 case Primitive::kPrimLong: {
1702 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1703 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1704 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
1705 // TODO: more efficient (direct) comparison with a constant
1706 __ Slt(TMP, lhs, rhs);
1707 __ Slt(dst, rhs, lhs);
1708 __ Subu(dst, dst, TMP);
1709 break;
1710 }
1711
1712 case Primitive::kPrimFloat:
1713 case Primitive::kPrimDouble: {
1714 int32_t entry_point_offset;
1715 if (in_type == Primitive::kPrimFloat) {
1716 entry_point_offset = instruction->IsGtBias() ? QUICK_ENTRY_POINT(pCmpgFloat)
1717 : QUICK_ENTRY_POINT(pCmplFloat);
1718 } else {
1719 entry_point_offset = instruction->IsGtBias() ? QUICK_ENTRY_POINT(pCmpgDouble)
1720 : QUICK_ENTRY_POINT(pCmplDouble);
1721 }
1722 codegen_->InvokeRuntime(entry_point_offset, instruction, instruction->GetDexPc(), nullptr);
1723 break;
1724 }
1725
1726 default:
1727 LOG(FATAL) << "Unimplemented compare type " << in_type;
1728 }
1729}
1730
1731void LocationsBuilderMIPS64::VisitCondition(HCondition* instruction) {
1732 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1733 locations->SetInAt(0, Location::RequiresRegister());
1734 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1735 if (instruction->NeedsMaterialization()) {
1736 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1737 }
1738}
1739
1740void InstructionCodeGeneratorMIPS64::VisitCondition(HCondition* instruction) {
1741 if (!instruction->NeedsMaterialization()) {
1742 return;
1743 }
1744
1745 LocationSummary* locations = instruction->GetLocations();
1746
1747 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1748 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1749 Location rhs_location = locations->InAt(1);
1750
1751 GpuRegister rhs_reg = ZERO;
1752 int64_t rhs_imm = 0;
1753 bool use_imm = rhs_location.IsConstant();
1754 if (use_imm) {
1755 rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
1756 } else {
1757 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1758 }
1759
1760 IfCondition if_cond = instruction->GetCondition();
1761
1762 switch (if_cond) {
1763 case kCondEQ:
1764 case kCondNE:
1765 if (use_imm && IsUint<16>(rhs_imm)) {
1766 __ Xori(dst, lhs, rhs_imm);
1767 } else {
1768 if (use_imm) {
1769 rhs_reg = TMP;
1770 __ LoadConst32(rhs_reg, rhs_imm);
1771 }
1772 __ Xor(dst, lhs, rhs_reg);
1773 }
1774 if (if_cond == kCondEQ) {
1775 __ Sltiu(dst, dst, 1);
1776 } else {
1777 __ Sltu(dst, ZERO, dst);
1778 }
1779 break;
1780
1781 case kCondLT:
1782 case kCondGE:
1783 if (use_imm && IsInt<16>(rhs_imm)) {
1784 __ Slti(dst, lhs, rhs_imm);
1785 } else {
1786 if (use_imm) {
1787 rhs_reg = TMP;
1788 __ LoadConst32(rhs_reg, rhs_imm);
1789 }
1790 __ Slt(dst, lhs, rhs_reg);
1791 }
1792 if (if_cond == kCondGE) {
1793 // Simulate lhs >= rhs via !(lhs < rhs) since there's
1794 // only the slt instruction but no sge.
1795 __ Xori(dst, dst, 1);
1796 }
1797 break;
1798
1799 case kCondLE:
1800 case kCondGT:
1801 if (use_imm && IsInt<16>(rhs_imm + 1)) {
1802 // Simulate lhs <= rhs via lhs < rhs + 1.
1803 __ Slti(dst, lhs, rhs_imm + 1);
1804 if (if_cond == kCondGT) {
1805 // Simulate lhs > rhs via !(lhs <= rhs) since there's
1806 // only the slti instruction but no sgti.
1807 __ Xori(dst, dst, 1);
1808 }
1809 } else {
1810 if (use_imm) {
1811 rhs_reg = TMP;
1812 __ LoadConst32(rhs_reg, rhs_imm);
1813 }
1814 __ Slt(dst, rhs_reg, lhs);
1815 if (if_cond == kCondLE) {
1816 // Simulate lhs <= rhs via !(rhs < lhs) since there's
1817 // only the slt instruction but no sle.
1818 __ Xori(dst, dst, 1);
1819 }
1820 }
1821 break;
1822 }
1823}
1824
1825void LocationsBuilderMIPS64::VisitDiv(HDiv* div) {
1826 LocationSummary* locations =
1827 new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
1828 switch (div->GetResultType()) {
1829 case Primitive::kPrimInt:
1830 case Primitive::kPrimLong:
1831 locations->SetInAt(0, Location::RequiresRegister());
1832 locations->SetInAt(1, Location::RequiresRegister());
1833 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1834 break;
1835
1836 case Primitive::kPrimFloat:
1837 case Primitive::kPrimDouble:
1838 locations->SetInAt(0, Location::RequiresFpuRegister());
1839 locations->SetInAt(1, Location::RequiresFpuRegister());
1840 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1841 break;
1842
1843 default:
1844 LOG(FATAL) << "Unexpected div type " << div->GetResultType();
1845 }
1846}
1847
1848void InstructionCodeGeneratorMIPS64::VisitDiv(HDiv* instruction) {
1849 Primitive::Type type = instruction->GetType();
1850 LocationSummary* locations = instruction->GetLocations();
1851
1852 switch (type) {
1853 case Primitive::kPrimInt:
1854 case Primitive::kPrimLong: {
1855 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1856 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1857 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
1858 if (type == Primitive::kPrimInt)
1859 __ DivR6(dst, lhs, rhs);
1860 else
1861 __ Ddiv(dst, lhs, rhs);
1862 break;
1863 }
1864 case Primitive::kPrimFloat:
1865 case Primitive::kPrimDouble: {
1866 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
1867 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
1868 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
1869 if (type == Primitive::kPrimFloat)
1870 __ DivS(dst, lhs, rhs);
1871 else
1872 __ DivD(dst, lhs, rhs);
1873 break;
1874 }
1875 default:
1876 LOG(FATAL) << "Unexpected div type " << type;
1877 }
1878}
1879
1880void LocationsBuilderMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
David Brazdil77a48ae2015-09-15 12:34:04 +00001881 LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
1882 ? LocationSummary::kCallOnSlowPath
1883 : LocationSummary::kNoCall;
1884 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001885 locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
1886 if (instruction->HasUses()) {
1887 locations->SetOut(Location::SameAsFirstInput());
1888 }
1889}
1890
1891void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
1892 SlowPathCodeMIPS64* slow_path =
1893 new (GetGraph()->GetArena()) DivZeroCheckSlowPathMIPS64(instruction);
1894 codegen_->AddSlowPath(slow_path);
1895 Location value = instruction->GetLocations()->InAt(0);
1896
1897 Primitive::Type type = instruction->GetType();
1898
Serguei Katkov8c0676c2015-08-03 13:55:33 +06001899 if ((type == Primitive::kPrimBoolean) || !Primitive::IsIntegralType(type)) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07001900 LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
Serguei Katkov8c0676c2015-08-03 13:55:33 +06001901 return;
Alexey Frunze4dda3372015-06-01 18:31:49 -07001902 }
1903
1904 if (value.IsConstant()) {
1905 int64_t divisor = codegen_->GetInt64ValueOf(value.GetConstant()->AsConstant());
1906 if (divisor == 0) {
1907 __ B(slow_path->GetEntryLabel());
1908 } else {
1909 // A division by a non-null constant is valid. We don't need to perform
1910 // any check, so simply fall through.
1911 }
1912 } else {
1913 __ Beqzc(value.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
1914 }
1915}
1916
1917void LocationsBuilderMIPS64::VisitDoubleConstant(HDoubleConstant* constant) {
1918 LocationSummary* locations =
1919 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1920 locations->SetOut(Location::ConstantLocation(constant));
1921}
1922
1923void InstructionCodeGeneratorMIPS64::VisitDoubleConstant(HDoubleConstant* cst ATTRIBUTE_UNUSED) {
1924 // Will be generated at use site.
1925}
1926
1927void LocationsBuilderMIPS64::VisitExit(HExit* exit) {
1928 exit->SetLocations(nullptr);
1929}
1930
1931void InstructionCodeGeneratorMIPS64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
1932}
1933
1934void LocationsBuilderMIPS64::VisitFloatConstant(HFloatConstant* constant) {
1935 LocationSummary* locations =
1936 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1937 locations->SetOut(Location::ConstantLocation(constant));
1938}
1939
1940void InstructionCodeGeneratorMIPS64::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
1941 // Will be generated at use site.
1942}
1943
David Brazdilfc6a86a2015-06-26 10:33:45 +00001944void InstructionCodeGeneratorMIPS64::HandleGoto(HInstruction* got, HBasicBlock* successor) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07001945 DCHECK(!successor->IsExitBlock());
1946 HBasicBlock* block = got->GetBlock();
1947 HInstruction* previous = got->GetPrevious();
1948 HLoopInformation* info = block->GetLoopInformation();
1949
1950 if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
1951 codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
1952 GenerateSuspendCheck(info->GetSuspendCheck(), successor);
1953 return;
1954 }
1955 if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
1956 GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
1957 }
1958 if (!codegen_->GoesToNextBlock(block, successor)) {
1959 __ B(codegen_->GetLabelOf(successor));
1960 }
1961}
1962
David Brazdilfc6a86a2015-06-26 10:33:45 +00001963void LocationsBuilderMIPS64::VisitGoto(HGoto* got) {
1964 got->SetLocations(nullptr);
1965}
1966
1967void InstructionCodeGeneratorMIPS64::VisitGoto(HGoto* got) {
1968 HandleGoto(got, got->GetSuccessor());
1969}
1970
1971void LocationsBuilderMIPS64::VisitTryBoundary(HTryBoundary* try_boundary) {
1972 try_boundary->SetLocations(nullptr);
1973}
1974
1975void InstructionCodeGeneratorMIPS64::VisitTryBoundary(HTryBoundary* try_boundary) {
1976 HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
1977 if (!successor->IsExitBlock()) {
1978 HandleGoto(try_boundary, successor);
1979 }
1980}
1981
Alexey Frunze4dda3372015-06-01 18:31:49 -07001982void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruction,
1983 Label* true_target,
1984 Label* false_target,
1985 Label* always_true_target) {
1986 HInstruction* cond = instruction->InputAt(0);
1987 HCondition* condition = cond->AsCondition();
1988
1989 if (cond->IsIntConstant()) {
1990 int32_t cond_value = cond->AsIntConstant()->GetValue();
1991 if (cond_value == 1) {
1992 if (always_true_target != nullptr) {
1993 __ B(always_true_target);
1994 }
1995 return;
1996 } else {
1997 DCHECK_EQ(cond_value, 0);
1998 }
1999 } else if (!cond->IsCondition() || condition->NeedsMaterialization()) {
2000 // The condition instruction has been materialized, compare the output to 0.
2001 Location cond_val = instruction->GetLocations()->InAt(0);
2002 DCHECK(cond_val.IsRegister());
2003 __ Bnezc(cond_val.AsRegister<GpuRegister>(), true_target);
2004 } else {
2005 // The condition instruction has not been materialized, use its inputs as
2006 // the comparison and its condition as the branch condition.
2007 GpuRegister lhs = condition->GetLocations()->InAt(0).AsRegister<GpuRegister>();
2008 Location rhs_location = condition->GetLocations()->InAt(1);
2009 GpuRegister rhs_reg = ZERO;
2010 int32_t rhs_imm = 0;
2011 bool use_imm = rhs_location.IsConstant();
2012 if (use_imm) {
2013 rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
2014 } else {
2015 rhs_reg = rhs_location.AsRegister<GpuRegister>();
2016 }
2017
2018 IfCondition if_cond = condition->GetCondition();
2019 if (use_imm && rhs_imm == 0) {
2020 switch (if_cond) {
2021 case kCondEQ:
2022 __ Beqzc(lhs, true_target);
2023 break;
2024 case kCondNE:
2025 __ Bnezc(lhs, true_target);
2026 break;
2027 case kCondLT:
2028 __ Bltzc(lhs, true_target);
2029 break;
2030 case kCondGE:
2031 __ Bgezc(lhs, true_target);
2032 break;
2033 case kCondLE:
2034 __ Blezc(lhs, true_target);
2035 break;
2036 case kCondGT:
2037 __ Bgtzc(lhs, true_target);
2038 break;
2039 }
2040 } else {
2041 if (use_imm) {
2042 rhs_reg = TMP;
2043 __ LoadConst32(rhs_reg, rhs_imm);
2044 }
2045 // It looks like we can get here with lhs == rhs. Should that be possible at all?
2046 // Mips R6 requires lhs != rhs for compact branches.
2047 if (lhs == rhs_reg) {
2048 DCHECK(!use_imm);
2049 switch (if_cond) {
2050 case kCondEQ:
2051 case kCondGE:
2052 case kCondLE:
2053 // if lhs == rhs for a positive condition, then it is a branch
2054 __ B(true_target);
2055 break;
2056 case kCondNE:
2057 case kCondLT:
2058 case kCondGT:
2059 // if lhs == rhs for a negative condition, then it is a NOP
2060 break;
2061 }
2062 } else {
2063 switch (if_cond) {
2064 case kCondEQ:
2065 __ Beqc(lhs, rhs_reg, true_target);
2066 break;
2067 case kCondNE:
2068 __ Bnec(lhs, rhs_reg, true_target);
2069 break;
2070 case kCondLT:
2071 __ Bltc(lhs, rhs_reg, true_target);
2072 break;
2073 case kCondGE:
2074 __ Bgec(lhs, rhs_reg, true_target);
2075 break;
2076 case kCondLE:
2077 __ Bgec(rhs_reg, lhs, true_target);
2078 break;
2079 case kCondGT:
2080 __ Bltc(rhs_reg, lhs, true_target);
2081 break;
2082 }
2083 }
2084 }
2085 }
2086 if (false_target != nullptr) {
2087 __ B(false_target);
2088 }
2089}
2090
2091void LocationsBuilderMIPS64::VisitIf(HIf* if_instr) {
2092 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
2093 HInstruction* cond = if_instr->InputAt(0);
2094 if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
2095 locations->SetInAt(0, Location::RequiresRegister());
2096 }
2097}
2098
2099void InstructionCodeGeneratorMIPS64::VisitIf(HIf* if_instr) {
2100 Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
2101 Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
2102 Label* always_true_target = true_target;
2103 if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
2104 if_instr->IfTrueSuccessor())) {
2105 always_true_target = nullptr;
2106 }
2107 if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
2108 if_instr->IfFalseSuccessor())) {
2109 false_target = nullptr;
2110 }
2111 GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
2112}
2113
2114void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
2115 LocationSummary* locations = new (GetGraph()->GetArena())
2116 LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
2117 HInstruction* cond = deoptimize->InputAt(0);
2118 DCHECK(cond->IsCondition());
2119 if (cond->AsCondition()->NeedsMaterialization()) {
2120 locations->SetInAt(0, Location::RequiresRegister());
2121 }
2122}
2123
2124void InstructionCodeGeneratorMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
2125 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena())
2126 DeoptimizationSlowPathMIPS64(deoptimize);
2127 codegen_->AddSlowPath(slow_path);
2128 Label* slow_path_entry = slow_path->GetEntryLabel();
2129 GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
2130}
2131
2132void LocationsBuilderMIPS64::HandleFieldGet(HInstruction* instruction,
2133 const FieldInfo& field_info ATTRIBUTE_UNUSED) {
2134 LocationSummary* locations =
2135 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2136 locations->SetInAt(0, Location::RequiresRegister());
2137 if (Primitive::IsFloatingPointType(instruction->GetType())) {
2138 locations->SetOut(Location::RequiresFpuRegister());
2139 } else {
2140 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2141 }
2142}
2143
2144void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction,
2145 const FieldInfo& field_info) {
2146 Primitive::Type type = field_info.GetFieldType();
2147 LocationSummary* locations = instruction->GetLocations();
2148 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2149 LoadOperandType load_type = kLoadUnsignedByte;
2150 switch (type) {
2151 case Primitive::kPrimBoolean:
2152 load_type = kLoadUnsignedByte;
2153 break;
2154 case Primitive::kPrimByte:
2155 load_type = kLoadSignedByte;
2156 break;
2157 case Primitive::kPrimShort:
2158 load_type = kLoadSignedHalfword;
2159 break;
2160 case Primitive::kPrimChar:
2161 load_type = kLoadUnsignedHalfword;
2162 break;
2163 case Primitive::kPrimInt:
2164 case Primitive::kPrimFloat:
2165 load_type = kLoadWord;
2166 break;
2167 case Primitive::kPrimLong:
2168 case Primitive::kPrimDouble:
2169 load_type = kLoadDoubleword;
2170 break;
2171 case Primitive::kPrimNot:
2172 load_type = kLoadUnsignedWord;
2173 break;
2174 case Primitive::kPrimVoid:
2175 LOG(FATAL) << "Unreachable type " << type;
2176 UNREACHABLE();
2177 }
2178 if (!Primitive::IsFloatingPointType(type)) {
2179 DCHECK(locations->Out().IsRegister());
2180 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2181 __ LoadFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
2182 } else {
2183 DCHECK(locations->Out().IsFpuRegister());
2184 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
2185 __ LoadFpuFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
2186 }
2187
2188 codegen_->MaybeRecordImplicitNullCheck(instruction);
2189 // TODO: memory barrier?
2190}
2191
2192void LocationsBuilderMIPS64::HandleFieldSet(HInstruction* instruction,
2193 const FieldInfo& field_info ATTRIBUTE_UNUSED) {
2194 LocationSummary* locations =
2195 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2196 locations->SetInAt(0, Location::RequiresRegister());
2197 if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
2198 locations->SetInAt(1, Location::RequiresFpuRegister());
2199 } else {
2200 locations->SetInAt(1, Location::RequiresRegister());
2201 }
2202}
2203
2204void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction,
2205 const FieldInfo& field_info) {
2206 Primitive::Type type = field_info.GetFieldType();
2207 LocationSummary* locations = instruction->GetLocations();
2208 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2209 StoreOperandType store_type = kStoreByte;
2210 switch (type) {
2211 case Primitive::kPrimBoolean:
2212 case Primitive::kPrimByte:
2213 store_type = kStoreByte;
2214 break;
2215 case Primitive::kPrimShort:
2216 case Primitive::kPrimChar:
2217 store_type = kStoreHalfword;
2218 break;
2219 case Primitive::kPrimInt:
2220 case Primitive::kPrimFloat:
2221 case Primitive::kPrimNot:
2222 store_type = kStoreWord;
2223 break;
2224 case Primitive::kPrimLong:
2225 case Primitive::kPrimDouble:
2226 store_type = kStoreDoubleword;
2227 break;
2228 case Primitive::kPrimVoid:
2229 LOG(FATAL) << "Unreachable type " << type;
2230 UNREACHABLE();
2231 }
2232 if (!Primitive::IsFloatingPointType(type)) {
2233 DCHECK(locations->InAt(1).IsRegister());
2234 GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>();
2235 __ StoreToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
2236 } else {
2237 DCHECK(locations->InAt(1).IsFpuRegister());
2238 FpuRegister src = locations->InAt(1).AsFpuRegister<FpuRegister>();
2239 __ StoreFpuToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
2240 }
2241
2242 codegen_->MaybeRecordImplicitNullCheck(instruction);
2243 // TODO: memory barriers?
2244 if (CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1))) {
2245 DCHECK(locations->InAt(1).IsRegister());
2246 GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>();
2247 codegen_->MarkGCCard(obj, src);
2248 }
2249}
2250
2251void LocationsBuilderMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
2252 HandleFieldGet(instruction, instruction->GetFieldInfo());
2253}
2254
2255void InstructionCodeGeneratorMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
2256 HandleFieldGet(instruction, instruction->GetFieldInfo());
2257}
2258
2259void LocationsBuilderMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
2260 HandleFieldSet(instruction, instruction->GetFieldInfo());
2261}
2262
2263void InstructionCodeGeneratorMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
2264 HandleFieldSet(instruction, instruction->GetFieldInfo());
2265}
2266
2267void LocationsBuilderMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
2268 LocationSummary::CallKind call_kind =
2269 instruction->IsClassFinal() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath;
2270 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
2271 locations->SetInAt(0, Location::RequiresRegister());
2272 locations->SetInAt(1, Location::RequiresRegister());
2273 // The output does overlap inputs.
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01002274 // Note that TypeCheckSlowPathMIPS64 uses this register too.
Alexey Frunze4dda3372015-06-01 18:31:49 -07002275 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
2276}
2277
2278void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
2279 LocationSummary* locations = instruction->GetLocations();
2280 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2281 GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
2282 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2283
2284 Label done;
2285
2286 // Return 0 if `obj` is null.
2287 // TODO: Avoid this check if we know `obj` is not null.
2288 __ Move(out, ZERO);
2289 __ Beqzc(obj, &done);
2290
2291 // Compare the class of `obj` with `cls`.
2292 __ LoadFromOffset(kLoadUnsignedWord, out, obj, mirror::Object::ClassOffset().Int32Value());
2293 if (instruction->IsClassFinal()) {
2294 // Classes must be equal for the instanceof to succeed.
2295 __ Xor(out, out, cls);
2296 __ Sltiu(out, out, 1);
2297 } else {
2298 // If the classes are not equal, we go into a slow path.
2299 DCHECK(locations->OnlyCallsOnSlowPath());
2300 SlowPathCodeMIPS64* slow_path =
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01002301 new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002302 codegen_->AddSlowPath(slow_path);
2303 __ Bnec(out, cls, slow_path->GetEntryLabel());
2304 __ LoadConst32(out, 1);
2305 __ Bind(slow_path->GetExitLabel());
2306 }
2307
2308 __ Bind(&done);
2309}
2310
2311void LocationsBuilderMIPS64::VisitIntConstant(HIntConstant* constant) {
2312 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2313 locations->SetOut(Location::ConstantLocation(constant));
2314}
2315
2316void InstructionCodeGeneratorMIPS64::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
2317 // Will be generated at use site.
2318}
2319
2320void LocationsBuilderMIPS64::VisitNullConstant(HNullConstant* constant) {
2321 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2322 locations->SetOut(Location::ConstantLocation(constant));
2323}
2324
2325void InstructionCodeGeneratorMIPS64::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
2326 // Will be generated at use site.
2327}
2328
2329void LocationsBuilderMIPS64::HandleInvoke(HInvoke* invoke) {
2330 InvokeDexCallingConventionVisitorMIPS64 calling_convention_visitor;
2331 CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
2332}
2333
2334void LocationsBuilderMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
2335 HandleInvoke(invoke);
2336 // The register T0 is required to be used for the hidden argument in
2337 // art_quick_imt_conflict_trampoline, so add the hidden argument.
2338 invoke->GetLocations()->AddTemp(Location::RegisterLocation(T0));
2339}
2340
2341void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
2342 // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
2343 GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
2344 uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
2345 invoke->GetImtIndex() % mirror::Class::kImtSize, kMips64PointerSize).Uint32Value();
2346 Location receiver = invoke->GetLocations()->InAt(0);
2347 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2348 Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64WordSize);
2349
2350 // Set the hidden argument.
2351 __ LoadConst32(invoke->GetLocations()->GetTemp(1).AsRegister<GpuRegister>(),
2352 invoke->GetDexMethodIndex());
2353
2354 // temp = object->GetClass();
2355 if (receiver.IsStackSlot()) {
2356 __ LoadFromOffset(kLoadUnsignedWord, temp, SP, receiver.GetStackIndex());
2357 __ LoadFromOffset(kLoadUnsignedWord, temp, temp, class_offset);
2358 } else {
2359 __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
2360 }
2361 codegen_->MaybeRecordImplicitNullCheck(invoke);
2362 // temp = temp->GetImtEntryAt(method_offset);
2363 __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
2364 // T9 = temp->GetEntryPoint();
2365 __ LoadFromOffset(kLoadDoubleword, T9, temp, entry_point.Int32Value());
2366 // T9();
2367 __ Jalr(T9);
2368 DCHECK(!codegen_->IsLeafMethod());
2369 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2370}
2371
2372void LocationsBuilderMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
2373 // TODO intrinsic function
2374 HandleInvoke(invoke);
2375}
2376
2377void LocationsBuilderMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
2378 // When we do not run baseline, explicit clinit checks triggered by static
2379 // invokes must have been pruned by art::PrepareForRegisterAllocation.
2380 DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
2381
2382 // TODO - intrinsic function
2383 HandleInvoke(invoke);
2384
2385 // While SetupBlockedRegisters() blocks registers S2-S8 due to their
2386 // clobbering somewhere else, reduce further register pressure by avoiding
2387 // allocation of a register for the current method pointer like on x86 baseline.
2388 // TODO: remove this once all the issues with register saving/restoring are
2389 // sorted out.
2390 LocationSummary* locations = invoke->GetLocations();
2391 Location location = locations->InAt(invoke->GetCurrentMethodInputIndex());
2392 if (location.IsUnallocated() && location.GetPolicy() == Location::kRequiresRegister) {
2393 locations->SetInAt(invoke->GetCurrentMethodInputIndex(), Location::NoLocation());
2394 }
2395}
2396
2397static bool TryGenerateIntrinsicCode(HInvoke* invoke,
2398 CodeGeneratorMIPS64* codegen ATTRIBUTE_UNUSED) {
2399 if (invoke->GetLocations()->Intrinsified()) {
2400 // TODO - intrinsic function
2401 return true;
2402 }
2403 return false;
2404}
2405
2406void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
2407 // All registers are assumed to be correctly set up per the calling convention.
2408
Vladimir Marko58155012015-08-19 12:49:41 +00002409 Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
2410 switch (invoke->GetMethodLoadKind()) {
2411 case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
2412 // temp = thread->string_init_entrypoint
2413 __ LoadFromOffset(kLoadDoubleword,
2414 temp.AsRegister<GpuRegister>(),
2415 TR,
2416 invoke->GetStringInitOffset());
2417 break;
2418 case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
2419 callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
2420 break;
2421 case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
2422 __ LoadConst64(temp.AsRegister<GpuRegister>(), invoke->GetMethodAddress());
2423 break;
2424 case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
2425 // TODO: Implement this type. (Needs literal support.) At the moment, the
2426 // CompilerDriver will not direct the backend to use this type for MIPS.
2427 LOG(FATAL) << "Unsupported!";
2428 UNREACHABLE();
2429 case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
2430 // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
2431 FALLTHROUGH_INTENDED;
2432 case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
2433 Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
2434 GpuRegister reg = temp.AsRegister<GpuRegister>();
2435 GpuRegister method_reg;
2436 if (current_method.IsRegister()) {
2437 method_reg = current_method.AsRegister<GpuRegister>();
2438 } else {
2439 // TODO: use the appropriate DCHECK() here if possible.
2440 // DCHECK(invoke->GetLocations()->Intrinsified());
2441 DCHECK(!current_method.IsValid());
2442 method_reg = reg;
2443 __ Ld(reg, SP, kCurrentMethodStackOffset);
2444 }
Alexey Frunze4dda3372015-06-01 18:31:49 -07002445
Vladimir Marko58155012015-08-19 12:49:41 +00002446 // temp = temp->dex_cache_resolved_methods_;
Vladimir Marko05792b92015-08-03 11:56:49 +01002447 __ LoadFromOffset(kLoadDoubleword,
Vladimir Marko58155012015-08-19 12:49:41 +00002448 reg,
2449 method_reg,
Vladimir Marko05792b92015-08-03 11:56:49 +01002450 ArtMethod::DexCacheResolvedMethodsOffset(kMips64PointerSize).Int32Value());
Vladimir Marko58155012015-08-19 12:49:41 +00002451 // temp = temp[index_in_cache]
2452 uint32_t index_in_cache = invoke->GetTargetMethod().dex_method_index;
2453 __ LoadFromOffset(kLoadDoubleword,
2454 reg,
2455 reg,
2456 CodeGenerator::GetCachePointerOffset(index_in_cache));
2457 break;
Alexey Frunze4dda3372015-06-01 18:31:49 -07002458 }
Alexey Frunze4dda3372015-06-01 18:31:49 -07002459 }
2460
Vladimir Marko58155012015-08-19 12:49:41 +00002461 switch (invoke->GetCodePtrLocation()) {
2462 case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
2463 __ Jalr(&frame_entry_label_, T9);
2464 break;
2465 case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
2466 // LR = invoke->GetDirectCodePtr();
2467 __ LoadConst64(T9, invoke->GetDirectCodePtr());
2468 // LR()
2469 __ Jalr(T9);
2470 break;
2471 case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
2472 // TODO: Implement kCallPCRelative. For the moment, we fall back to kMethodCode.
2473 FALLTHROUGH_INTENDED;
2474 case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
2475 // TODO: Implement kDirectCodeFixup. For the moment, we fall back to kMethodCode.
2476 FALLTHROUGH_INTENDED;
2477 case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
2478 // T9 = callee_method->entry_point_from_quick_compiled_code_;
2479 __ LoadFromOffset(kLoadDoubleword,
2480 T9,
2481 callee_method.AsRegister<GpuRegister>(),
2482 ArtMethod::EntryPointFromQuickCompiledCodeOffset(
2483 kMips64WordSize).Int32Value());
2484 // T9()
2485 __ Jalr(T9);
2486 break;
2487 }
Alexey Frunze4dda3372015-06-01 18:31:49 -07002488 DCHECK(!IsLeafMethod());
2489}
2490
2491void InstructionCodeGeneratorMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
2492 // When we do not run baseline, explicit clinit checks triggered by static
2493 // invokes must have been pruned by art::PrepareForRegisterAllocation.
2494 DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
2495
2496 if (TryGenerateIntrinsicCode(invoke, codegen_)) {
2497 return;
2498 }
2499
2500 LocationSummary* locations = invoke->GetLocations();
2501 codegen_->GenerateStaticOrDirectCall(invoke,
2502 locations->HasTemps()
2503 ? locations->GetTemp(0)
2504 : Location::NoLocation());
2505 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2506}
2507
2508void InstructionCodeGeneratorMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
2509 // TODO: Try to generate intrinsics code.
2510 LocationSummary* locations = invoke->GetLocations();
2511 Location receiver = locations->InAt(0);
2512 GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
2513 size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
2514 invoke->GetVTableIndex(), kMips64PointerSize).SizeValue();
2515 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2516 Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64WordSize);
2517
2518 // temp = object->GetClass();
2519 DCHECK(receiver.IsRegister());
2520 __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
2521 codegen_->MaybeRecordImplicitNullCheck(invoke);
2522 // temp = temp->GetMethodAt(method_offset);
2523 __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
2524 // T9 = temp->GetEntryPoint();
2525 __ LoadFromOffset(kLoadDoubleword, T9, temp, entry_point.Int32Value());
2526 // T9();
2527 __ Jalr(T9);
2528 DCHECK(!codegen_->IsLeafMethod());
2529 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2530}
2531
2532void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
2533 LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath
2534 : LocationSummary::kNoCall;
2535 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
2536 locations->SetInAt(0, Location::RequiresRegister());
2537 locations->SetOut(Location::RequiresRegister());
2538}
2539
2540void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) {
2541 LocationSummary* locations = cls->GetLocations();
2542 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2543 GpuRegister current_method = locations->InAt(0).AsRegister<GpuRegister>();
2544 if (cls->IsReferrersClass()) {
2545 DCHECK(!cls->CanCallRuntime());
2546 DCHECK(!cls->MustGenerateClinitCheck());
2547 __ LoadFromOffset(kLoadUnsignedWord, out, current_method,
2548 ArtMethod::DeclaringClassOffset().Int32Value());
2549 } else {
2550 DCHECK(cls->CanCallRuntime());
Vladimir Marko05792b92015-08-03 11:56:49 +01002551 __ LoadFromOffset(kLoadDoubleword, out, current_method,
2552 ArtMethod::DexCacheResolvedTypesOffset(kMips64PointerSize).Int32Value());
Alexey Frunze4dda3372015-06-01 18:31:49 -07002553 __ LoadFromOffset(kLoadUnsignedWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
Vladimir Marko05792b92015-08-03 11:56:49 +01002554 // TODO: We will need a read barrier here.
Alexey Frunze4dda3372015-06-01 18:31:49 -07002555 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
2556 cls,
2557 cls,
2558 cls->GetDexPc(),
2559 cls->MustGenerateClinitCheck());
2560 codegen_->AddSlowPath(slow_path);
2561 __ Beqzc(out, slow_path->GetEntryLabel());
2562 if (cls->MustGenerateClinitCheck()) {
2563 GenerateClassInitializationCheck(slow_path, out);
2564 } else {
2565 __ Bind(slow_path->GetExitLabel());
2566 }
2567 }
2568}
2569
David Brazdilcb1c0552015-08-04 16:22:25 +01002570static int32_t GetExceptionTlsOffset() {
2571 return Thread::ExceptionOffset<kMips64WordSize>().Int32Value();
2572}
2573
Alexey Frunze4dda3372015-06-01 18:31:49 -07002574void LocationsBuilderMIPS64::VisitLoadException(HLoadException* load) {
2575 LocationSummary* locations =
2576 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
2577 locations->SetOut(Location::RequiresRegister());
2578}
2579
2580void InstructionCodeGeneratorMIPS64::VisitLoadException(HLoadException* load) {
2581 GpuRegister out = load->GetLocations()->Out().AsRegister<GpuRegister>();
David Brazdilcb1c0552015-08-04 16:22:25 +01002582 __ LoadFromOffset(kLoadUnsignedWord, out, TR, GetExceptionTlsOffset());
2583}
2584
2585void LocationsBuilderMIPS64::VisitClearException(HClearException* clear) {
2586 new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
2587}
2588
2589void InstructionCodeGeneratorMIPS64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
2590 __ StoreToOffset(kStoreWord, ZERO, TR, GetExceptionTlsOffset());
Alexey Frunze4dda3372015-06-01 18:31:49 -07002591}
2592
2593void LocationsBuilderMIPS64::VisitLoadLocal(HLoadLocal* load) {
2594 load->SetLocations(nullptr);
2595}
2596
2597void InstructionCodeGeneratorMIPS64::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
2598 // Nothing to do, this is driven by the code generator.
2599}
2600
2601void LocationsBuilderMIPS64::VisitLoadString(HLoadString* load) {
2602 LocationSummary* locations =
2603 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
2604 locations->SetInAt(0, Location::RequiresRegister());
2605 locations->SetOut(Location::RequiresRegister());
2606}
2607
2608void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) {
2609 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load);
2610 codegen_->AddSlowPath(slow_path);
2611
2612 LocationSummary* locations = load->GetLocations();
2613 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2614 GpuRegister current_method = locations->InAt(0).AsRegister<GpuRegister>();
2615 __ LoadFromOffset(kLoadUnsignedWord, out, current_method,
2616 ArtMethod::DeclaringClassOffset().Int32Value());
Vladimir Marko05792b92015-08-03 11:56:49 +01002617 __ LoadFromOffset(kLoadDoubleword, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
Alexey Frunze4dda3372015-06-01 18:31:49 -07002618 __ LoadFromOffset(kLoadUnsignedWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
Vladimir Marko05792b92015-08-03 11:56:49 +01002619 // TODO: We will need a read barrier here.
Alexey Frunze4dda3372015-06-01 18:31:49 -07002620 __ Beqzc(out, slow_path->GetEntryLabel());
2621 __ Bind(slow_path->GetExitLabel());
2622}
2623
2624void LocationsBuilderMIPS64::VisitLocal(HLocal* local) {
2625 local->SetLocations(nullptr);
2626}
2627
2628void InstructionCodeGeneratorMIPS64::VisitLocal(HLocal* local) {
2629 DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
2630}
2631
2632void LocationsBuilderMIPS64::VisitLongConstant(HLongConstant* constant) {
2633 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2634 locations->SetOut(Location::ConstantLocation(constant));
2635}
2636
2637void InstructionCodeGeneratorMIPS64::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
2638 // Will be generated at use site.
2639}
2640
2641void LocationsBuilderMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
2642 LocationSummary* locations =
2643 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2644 InvokeRuntimeCallingConvention calling_convention;
2645 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2646}
2647
2648void InstructionCodeGeneratorMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
2649 codegen_->InvokeRuntime(instruction->IsEnter()
2650 ? QUICK_ENTRY_POINT(pLockObject)
2651 : QUICK_ENTRY_POINT(pUnlockObject),
2652 instruction,
2653 instruction->GetDexPc(),
2654 nullptr);
2655 CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
2656}
2657
2658void LocationsBuilderMIPS64::VisitMul(HMul* mul) {
2659 LocationSummary* locations =
2660 new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
2661 switch (mul->GetResultType()) {
2662 case Primitive::kPrimInt:
2663 case Primitive::kPrimLong:
2664 locations->SetInAt(0, Location::RequiresRegister());
2665 locations->SetInAt(1, Location::RequiresRegister());
2666 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2667 break;
2668
2669 case Primitive::kPrimFloat:
2670 case Primitive::kPrimDouble:
2671 locations->SetInAt(0, Location::RequiresFpuRegister());
2672 locations->SetInAt(1, Location::RequiresFpuRegister());
2673 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2674 break;
2675
2676 default:
2677 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2678 }
2679}
2680
2681void InstructionCodeGeneratorMIPS64::VisitMul(HMul* instruction) {
2682 Primitive::Type type = instruction->GetType();
2683 LocationSummary* locations = instruction->GetLocations();
2684
2685 switch (type) {
2686 case Primitive::kPrimInt:
2687 case Primitive::kPrimLong: {
2688 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2689 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
2690 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
2691 if (type == Primitive::kPrimInt)
2692 __ MulR6(dst, lhs, rhs);
2693 else
2694 __ Dmul(dst, lhs, rhs);
2695 break;
2696 }
2697 case Primitive::kPrimFloat:
2698 case Primitive::kPrimDouble: {
2699 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
2700 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
2701 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
2702 if (type == Primitive::kPrimFloat)
2703 __ MulS(dst, lhs, rhs);
2704 else
2705 __ MulD(dst, lhs, rhs);
2706 break;
2707 }
2708 default:
2709 LOG(FATAL) << "Unexpected mul type " << type;
2710 }
2711}
2712
2713void LocationsBuilderMIPS64::VisitNeg(HNeg* neg) {
2714 LocationSummary* locations =
2715 new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
2716 switch (neg->GetResultType()) {
2717 case Primitive::kPrimInt:
2718 case Primitive::kPrimLong:
2719 locations->SetInAt(0, Location::RequiresRegister());
2720 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2721 break;
2722
2723 case Primitive::kPrimFloat:
2724 case Primitive::kPrimDouble:
2725 locations->SetInAt(0, Location::RequiresFpuRegister());
2726 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2727 break;
2728
2729 default:
2730 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
2731 }
2732}
2733
2734void InstructionCodeGeneratorMIPS64::VisitNeg(HNeg* instruction) {
2735 Primitive::Type type = instruction->GetType();
2736 LocationSummary* locations = instruction->GetLocations();
2737
2738 switch (type) {
2739 case Primitive::kPrimInt:
2740 case Primitive::kPrimLong: {
2741 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2742 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
2743 if (type == Primitive::kPrimInt)
2744 __ Subu(dst, ZERO, src);
2745 else
2746 __ Dsubu(dst, ZERO, src);
2747 break;
2748 }
2749 case Primitive::kPrimFloat:
2750 case Primitive::kPrimDouble: {
2751 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
2752 FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
2753 if (type == Primitive::kPrimFloat)
2754 __ NegS(dst, src);
2755 else
2756 __ NegD(dst, src);
2757 break;
2758 }
2759 default:
2760 LOG(FATAL) << "Unexpected neg type " << type;
2761 }
2762}
2763
2764void LocationsBuilderMIPS64::VisitNewArray(HNewArray* instruction) {
2765 LocationSummary* locations =
2766 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2767 InvokeRuntimeCallingConvention calling_convention;
2768 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2769 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
2770 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2771 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
2772}
2773
2774void InstructionCodeGeneratorMIPS64::VisitNewArray(HNewArray* instruction) {
2775 LocationSummary* locations = instruction->GetLocations();
2776 // Move an uint16_t value to a register.
2777 __ LoadConst32(locations->GetTemp(0).AsRegister<GpuRegister>(), instruction->GetTypeIndex());
2778 codegen_->InvokeRuntime(
2779 GetThreadOffset<kMips64WordSize>(instruction->GetEntrypoint()).Int32Value(),
2780 instruction,
2781 instruction->GetDexPc(),
2782 nullptr);
2783 CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
2784}
2785
2786void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) {
2787 LocationSummary* locations =
2788 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2789 InvokeRuntimeCallingConvention calling_convention;
2790 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2791 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2792 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
2793}
2794
2795void InstructionCodeGeneratorMIPS64::VisitNewInstance(HNewInstance* instruction) {
2796 LocationSummary* locations = instruction->GetLocations();
2797 // Move an uint16_t value to a register.
2798 __ LoadConst32(locations->GetTemp(0).AsRegister<GpuRegister>(), instruction->GetTypeIndex());
2799 codegen_->InvokeRuntime(
2800 GetThreadOffset<kMips64WordSize>(instruction->GetEntrypoint()).Int32Value(),
2801 instruction,
2802 instruction->GetDexPc(),
2803 nullptr);
2804 CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
2805}
2806
2807void LocationsBuilderMIPS64::VisitNot(HNot* instruction) {
2808 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2809 locations->SetInAt(0, Location::RequiresRegister());
2810 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2811}
2812
2813void InstructionCodeGeneratorMIPS64::VisitNot(HNot* instruction) {
2814 Primitive::Type type = instruction->GetType();
2815 LocationSummary* locations = instruction->GetLocations();
2816
2817 switch (type) {
2818 case Primitive::kPrimInt:
2819 case Primitive::kPrimLong: {
2820 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2821 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
2822 __ Nor(dst, src, ZERO);
2823 break;
2824 }
2825
2826 default:
2827 LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
2828 }
2829}
2830
2831void LocationsBuilderMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
2832 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2833 locations->SetInAt(0, Location::RequiresRegister());
2834 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2835}
2836
2837void InstructionCodeGeneratorMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
2838 LocationSummary* locations = instruction->GetLocations();
2839 __ Xori(locations->Out().AsRegister<GpuRegister>(),
2840 locations->InAt(0).AsRegister<GpuRegister>(),
2841 1);
2842}
2843
2844void LocationsBuilderMIPS64::VisitNullCheck(HNullCheck* instruction) {
David Brazdil77a48ae2015-09-15 12:34:04 +00002845 LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
2846 ? LocationSummary::kCallOnSlowPath
2847 : LocationSummary::kNoCall;
2848 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002849 locations->SetInAt(0, Location::RequiresRegister());
2850 if (instruction->HasUses()) {
2851 locations->SetOut(Location::SameAsFirstInput());
2852 }
2853}
2854
2855void InstructionCodeGeneratorMIPS64::GenerateImplicitNullCheck(HNullCheck* instruction) {
2856 if (codegen_->CanMoveNullCheckToUser(instruction)) {
2857 return;
2858 }
2859 Location obj = instruction->GetLocations()->InAt(0);
2860
2861 __ Lw(ZERO, obj.AsRegister<GpuRegister>(), 0);
2862 codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
2863}
2864
2865void InstructionCodeGeneratorMIPS64::GenerateExplicitNullCheck(HNullCheck* instruction) {
2866 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathMIPS64(instruction);
2867 codegen_->AddSlowPath(slow_path);
2868
2869 Location obj = instruction->GetLocations()->InAt(0);
2870
2871 __ Beqzc(obj.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
2872}
2873
2874void InstructionCodeGeneratorMIPS64::VisitNullCheck(HNullCheck* instruction) {
David Brazdil77a48ae2015-09-15 12:34:04 +00002875 if (codegen_->IsImplicitNullCheckAllowed(instruction)) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07002876 GenerateImplicitNullCheck(instruction);
2877 } else {
2878 GenerateExplicitNullCheck(instruction);
2879 }
2880}
2881
2882void LocationsBuilderMIPS64::VisitOr(HOr* instruction) {
2883 HandleBinaryOp(instruction);
2884}
2885
2886void InstructionCodeGeneratorMIPS64::VisitOr(HOr* instruction) {
2887 HandleBinaryOp(instruction);
2888}
2889
2890void LocationsBuilderMIPS64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
2891 LOG(FATAL) << "Unreachable";
2892}
2893
2894void InstructionCodeGeneratorMIPS64::VisitParallelMove(HParallelMove* instruction) {
2895 codegen_->GetMoveResolver()->EmitNativeCode(instruction);
2896}
2897
2898void LocationsBuilderMIPS64::VisitParameterValue(HParameterValue* instruction) {
2899 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2900 Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
2901 if (location.IsStackSlot()) {
2902 location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2903 } else if (location.IsDoubleStackSlot()) {
2904 location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2905 }
2906 locations->SetOut(location);
2907}
2908
2909void InstructionCodeGeneratorMIPS64::VisitParameterValue(HParameterValue* instruction
2910 ATTRIBUTE_UNUSED) {
2911 // Nothing to do, the parameter is already at its location.
2912}
2913
2914void LocationsBuilderMIPS64::VisitCurrentMethod(HCurrentMethod* instruction) {
2915 LocationSummary* locations =
2916 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2917 locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
2918}
2919
2920void InstructionCodeGeneratorMIPS64::VisitCurrentMethod(HCurrentMethod* instruction
2921 ATTRIBUTE_UNUSED) {
2922 // Nothing to do, the method is already at its location.
2923}
2924
2925void LocationsBuilderMIPS64::VisitPhi(HPhi* instruction) {
2926 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2927 for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
2928 locations->SetInAt(i, Location::Any());
2929 }
2930 locations->SetOut(Location::Any());
2931}
2932
2933void InstructionCodeGeneratorMIPS64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
2934 LOG(FATAL) << "Unreachable";
2935}
2936
2937void LocationsBuilderMIPS64::VisitRem(HRem* rem) {
2938 Primitive::Type type = rem->GetResultType();
2939 LocationSummary::CallKind call_kind =
2940 Primitive::IsFloatingPointType(type) ? LocationSummary::kCall : LocationSummary::kNoCall;
2941 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
2942
2943 switch (type) {
2944 case Primitive::kPrimInt:
2945 case Primitive::kPrimLong:
2946 locations->SetInAt(0, Location::RequiresRegister());
2947 locations->SetInAt(1, Location::RequiresRegister());
2948 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2949 break;
2950
2951 case Primitive::kPrimFloat:
2952 case Primitive::kPrimDouble: {
2953 InvokeRuntimeCallingConvention calling_convention;
2954 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
2955 locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
2956 locations->SetOut(calling_convention.GetReturnLocation(type));
2957 break;
2958 }
2959
2960 default:
2961 LOG(FATAL) << "Unexpected rem type " << type;
2962 }
2963}
2964
2965void InstructionCodeGeneratorMIPS64::VisitRem(HRem* instruction) {
2966 Primitive::Type type = instruction->GetType();
2967 LocationSummary* locations = instruction->GetLocations();
2968
2969 switch (type) {
2970 case Primitive::kPrimInt:
2971 case Primitive::kPrimLong: {
2972 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2973 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
2974 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
2975 if (type == Primitive::kPrimInt)
2976 __ ModR6(dst, lhs, rhs);
2977 else
2978 __ Dmod(dst, lhs, rhs);
2979 break;
2980 }
2981
2982 case Primitive::kPrimFloat:
2983 case Primitive::kPrimDouble: {
2984 int32_t entry_offset = (type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pFmodf)
2985 : QUICK_ENTRY_POINT(pFmod);
2986 codegen_->InvokeRuntime(entry_offset, instruction, instruction->GetDexPc(), nullptr);
2987 break;
2988 }
2989 default:
2990 LOG(FATAL) << "Unexpected rem type " << type;
2991 }
2992}
2993
2994void LocationsBuilderMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
2995 memory_barrier->SetLocations(nullptr);
2996}
2997
2998void InstructionCodeGeneratorMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
2999 GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
3000}
3001
3002void LocationsBuilderMIPS64::VisitReturn(HReturn* ret) {
3003 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret);
3004 Primitive::Type return_type = ret->InputAt(0)->GetType();
3005 locations->SetInAt(0, Mips64ReturnLocation(return_type));
3006}
3007
3008void InstructionCodeGeneratorMIPS64::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
3009 codegen_->GenerateFrameExit();
3010}
3011
3012void LocationsBuilderMIPS64::VisitReturnVoid(HReturnVoid* ret) {
3013 ret->SetLocations(nullptr);
3014}
3015
3016void InstructionCodeGeneratorMIPS64::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
3017 codegen_->GenerateFrameExit();
3018}
3019
3020void LocationsBuilderMIPS64::VisitShl(HShl* shl) {
3021 HandleShift(shl);
3022}
3023
3024void InstructionCodeGeneratorMIPS64::VisitShl(HShl* shl) {
3025 HandleShift(shl);
3026}
3027
3028void LocationsBuilderMIPS64::VisitShr(HShr* shr) {
3029 HandleShift(shr);
3030}
3031
3032void InstructionCodeGeneratorMIPS64::VisitShr(HShr* shr) {
3033 HandleShift(shr);
3034}
3035
3036void LocationsBuilderMIPS64::VisitStoreLocal(HStoreLocal* store) {
3037 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
3038 Primitive::Type field_type = store->InputAt(1)->GetType();
3039 switch (field_type) {
3040 case Primitive::kPrimNot:
3041 case Primitive::kPrimBoolean:
3042 case Primitive::kPrimByte:
3043 case Primitive::kPrimChar:
3044 case Primitive::kPrimShort:
3045 case Primitive::kPrimInt:
3046 case Primitive::kPrimFloat:
3047 locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
3048 break;
3049
3050 case Primitive::kPrimLong:
3051 case Primitive::kPrimDouble:
3052 locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
3053 break;
3054
3055 default:
3056 LOG(FATAL) << "Unimplemented local type " << field_type;
3057 }
3058}
3059
3060void InstructionCodeGeneratorMIPS64::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
3061}
3062
3063void LocationsBuilderMIPS64::VisitSub(HSub* instruction) {
3064 HandleBinaryOp(instruction);
3065}
3066
3067void InstructionCodeGeneratorMIPS64::VisitSub(HSub* instruction) {
3068 HandleBinaryOp(instruction);
3069}
3070
3071void LocationsBuilderMIPS64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3072 HandleFieldGet(instruction, instruction->GetFieldInfo());
3073}
3074
3075void InstructionCodeGeneratorMIPS64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3076 HandleFieldGet(instruction, instruction->GetFieldInfo());
3077}
3078
3079void LocationsBuilderMIPS64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3080 HandleFieldSet(instruction, instruction->GetFieldInfo());
3081}
3082
3083void InstructionCodeGeneratorMIPS64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3084 HandleFieldSet(instruction, instruction->GetFieldInfo());
3085}
3086
3087void LocationsBuilderMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
3088 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
3089}
3090
3091void InstructionCodeGeneratorMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
3092 HBasicBlock* block = instruction->GetBlock();
3093 if (block->GetLoopInformation() != nullptr) {
3094 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
3095 // The back edge will generate the suspend check.
3096 return;
3097 }
3098 if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
3099 // The goto will generate the suspend check.
3100 return;
3101 }
3102 GenerateSuspendCheck(instruction, nullptr);
3103}
3104
3105void LocationsBuilderMIPS64::VisitTemporary(HTemporary* temp) {
3106 temp->SetLocations(nullptr);
3107}
3108
3109void InstructionCodeGeneratorMIPS64::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
3110 // Nothing to do, this is driven by the code generator.
3111}
3112
3113void LocationsBuilderMIPS64::VisitThrow(HThrow* instruction) {
3114 LocationSummary* locations =
3115 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3116 InvokeRuntimeCallingConvention calling_convention;
3117 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3118}
3119
3120void InstructionCodeGeneratorMIPS64::VisitThrow(HThrow* instruction) {
3121 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pDeliverException),
3122 instruction,
3123 instruction->GetDexPc(),
3124 nullptr);
3125 CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
3126}
3127
3128void LocationsBuilderMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
3129 Primitive::Type input_type = conversion->GetInputType();
3130 Primitive::Type result_type = conversion->GetResultType();
3131 DCHECK_NE(input_type, result_type);
3132
3133 if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
3134 (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
3135 LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
3136 }
3137
3138 LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
3139 if ((Primitive::IsFloatingPointType(result_type) && input_type == Primitive::kPrimLong) ||
3140 (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type))) {
3141 call_kind = LocationSummary::kCall;
3142 }
3143
3144 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
3145
3146 if (call_kind == LocationSummary::kNoCall) {
3147 if (Primitive::IsFloatingPointType(input_type)) {
3148 locations->SetInAt(0, Location::RequiresFpuRegister());
3149 } else {
3150 locations->SetInAt(0, Location::RequiresRegister());
3151 }
3152
3153 if (Primitive::IsFloatingPointType(result_type)) {
3154 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3155 } else {
3156 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3157 }
3158 } else {
3159 InvokeRuntimeCallingConvention calling_convention;
3160
3161 if (Primitive::IsFloatingPointType(input_type)) {
3162 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
3163 } else {
3164 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3165 }
3166
3167 locations->SetOut(calling_convention.GetReturnLocation(result_type));
3168 }
3169}
3170
3171void InstructionCodeGeneratorMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
3172 LocationSummary* locations = conversion->GetLocations();
3173 Primitive::Type result_type = conversion->GetResultType();
3174 Primitive::Type input_type = conversion->GetInputType();
3175
3176 DCHECK_NE(input_type, result_type);
3177
3178 if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) {
3179 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
3180 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
3181
3182 switch (result_type) {
3183 case Primitive::kPrimChar:
3184 __ Andi(dst, src, 0xFFFF);
3185 break;
3186 case Primitive::kPrimByte:
3187 // long is never converted into types narrower than int directly,
3188 // so SEB and SEH can be used without ever causing unpredictable results
3189 // on 64-bit inputs
3190 DCHECK(input_type != Primitive::kPrimLong);
3191 __ Seb(dst, src);
3192 break;
3193 case Primitive::kPrimShort:
3194 // long is never converted into types narrower than int directly,
3195 // so SEB and SEH can be used without ever causing unpredictable results
3196 // on 64-bit inputs
3197 DCHECK(input_type != Primitive::kPrimLong);
3198 __ Seh(dst, src);
3199 break;
3200 case Primitive::kPrimInt:
3201 case Primitive::kPrimLong:
3202 // Sign-extend 32-bit int into bits 32 through 63 for
3203 // int-to-long and long-to-int conversions
3204 __ Sll(dst, src, 0);
3205 break;
3206
3207 default:
3208 LOG(FATAL) << "Unexpected type conversion from " << input_type
3209 << " to " << result_type;
3210 }
3211 } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) {
3212 if (input_type != Primitive::kPrimLong) {
3213 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
3214 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
3215 __ Mtc1(src, FTMP);
3216 if (result_type == Primitive::kPrimFloat) {
3217 __ Cvtsw(dst, FTMP);
3218 } else {
3219 __ Cvtdw(dst, FTMP);
3220 }
3221 } else {
3222 int32_t entry_offset = (result_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pL2f)
3223 : QUICK_ENTRY_POINT(pL2d);
3224 codegen_->InvokeRuntime(entry_offset,
3225 conversion,
3226 conversion->GetDexPc(),
3227 nullptr);
3228 }
3229 } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) {
3230 CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
3231 int32_t entry_offset;
3232 if (result_type != Primitive::kPrimLong) {
3233 entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2iz)
3234 : QUICK_ENTRY_POINT(pD2iz);
3235 } else {
3236 entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2l)
3237 : QUICK_ENTRY_POINT(pD2l);
3238 }
3239 codegen_->InvokeRuntime(entry_offset,
3240 conversion,
3241 conversion->GetDexPc(),
3242 nullptr);
3243 } else if (Primitive::IsFloatingPointType(result_type) &&
3244 Primitive::IsFloatingPointType(input_type)) {
3245 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
3246 FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
3247 if (result_type == Primitive::kPrimFloat) {
3248 __ Cvtsd(dst, src);
3249 } else {
3250 __ Cvtds(dst, src);
3251 }
3252 } else {
3253 LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
3254 << " to " << result_type;
3255 }
3256}
3257
3258void LocationsBuilderMIPS64::VisitUShr(HUShr* ushr) {
3259 HandleShift(ushr);
3260}
3261
3262void InstructionCodeGeneratorMIPS64::VisitUShr(HUShr* ushr) {
3263 HandleShift(ushr);
3264}
3265
3266void LocationsBuilderMIPS64::VisitXor(HXor* instruction) {
3267 HandleBinaryOp(instruction);
3268}
3269
3270void InstructionCodeGeneratorMIPS64::VisitXor(HXor* instruction) {
3271 HandleBinaryOp(instruction);
3272}
3273
3274void LocationsBuilderMIPS64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
3275 // Nothing to do, this should be removed during prepare for register allocator.
3276 LOG(FATAL) << "Unreachable";
3277}
3278
3279void InstructionCodeGeneratorMIPS64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
3280 // Nothing to do, this should be removed during prepare for register allocator.
3281 LOG(FATAL) << "Unreachable";
3282}
3283
3284void LocationsBuilderMIPS64::VisitEqual(HEqual* comp) {
3285 VisitCondition(comp);
3286}
3287
3288void InstructionCodeGeneratorMIPS64::VisitEqual(HEqual* comp) {
3289 VisitCondition(comp);
3290}
3291
3292void LocationsBuilderMIPS64::VisitNotEqual(HNotEqual* comp) {
3293 VisitCondition(comp);
3294}
3295
3296void InstructionCodeGeneratorMIPS64::VisitNotEqual(HNotEqual* comp) {
3297 VisitCondition(comp);
3298}
3299
3300void LocationsBuilderMIPS64::VisitLessThan(HLessThan* comp) {
3301 VisitCondition(comp);
3302}
3303
3304void InstructionCodeGeneratorMIPS64::VisitLessThan(HLessThan* comp) {
3305 VisitCondition(comp);
3306}
3307
3308void LocationsBuilderMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
3309 VisitCondition(comp);
3310}
3311
3312void InstructionCodeGeneratorMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
3313 VisitCondition(comp);
3314}
3315
3316void LocationsBuilderMIPS64::VisitGreaterThan(HGreaterThan* comp) {
3317 VisitCondition(comp);
3318}
3319
3320void InstructionCodeGeneratorMIPS64::VisitGreaterThan(HGreaterThan* comp) {
3321 VisitCondition(comp);
3322}
3323
3324void LocationsBuilderMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
3325 VisitCondition(comp);
3326}
3327
3328void InstructionCodeGeneratorMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
3329 VisitCondition(comp);
3330}
3331
Nicolas Geoffray2e7cd752015-07-10 11:38:52 +01003332void LocationsBuilderMIPS64::VisitFakeString(HFakeString* instruction) {
3333 DCHECK(codegen_->IsBaseline());
3334 LocationSummary* locations =
3335 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3336 locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
3337}
3338
3339void InstructionCodeGeneratorMIPS64::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
3340 DCHECK(codegen_->IsBaseline());
3341 // Will be generated at use site.
3342}
3343
Alexey Frunze4dda3372015-06-01 18:31:49 -07003344} // namespace mips64
3345} // namespace art