blob: 4191aad5cb927fac955d269581c96df10916798e [file] [log] [blame]
Alexey Frunze4dda3372015-06-01 18:31:49 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator_mips64.h"
18
19#include "entrypoints/quick/quick_entrypoints.h"
20#include "entrypoints/quick/quick_entrypoints_enum.h"
21#include "gc/accounting/card_table.h"
22#include "intrinsics.h"
Chris Larsen3039e382015-08-26 07:54:08 -070023#include "intrinsics_mips64.h"
Alexey Frunze4dda3372015-06-01 18:31:49 -070024#include "art_method.h"
Chris Larsen3039e382015-08-26 07:54:08 -070025#include "code_generator_utils.h"
Alexey Frunze4dda3372015-06-01 18:31:49 -070026#include "mirror/array-inl.h"
27#include "mirror/class-inl.h"
28#include "offsets.h"
29#include "thread.h"
30#include "utils/mips64/assembler_mips64.h"
31#include "utils/assembler.h"
32#include "utils/stack_checks.h"
33
34namespace art {
35namespace mips64 {
36
37static constexpr int kCurrentMethodStackOffset = 0;
38static constexpr GpuRegister kMethodRegisterArgument = A0;
39
40// We need extra temporary/scratch registers (in addition to AT) in some cases.
41static constexpr GpuRegister TMP = T8;
42static constexpr FpuRegister FTMP = F8;
43
44// ART Thread Register.
45static constexpr GpuRegister TR = S1;
46
47Location Mips64ReturnLocation(Primitive::Type return_type) {
48 switch (return_type) {
49 case Primitive::kPrimBoolean:
50 case Primitive::kPrimByte:
51 case Primitive::kPrimChar:
52 case Primitive::kPrimShort:
53 case Primitive::kPrimInt:
54 case Primitive::kPrimNot:
55 case Primitive::kPrimLong:
56 return Location::RegisterLocation(V0);
57
58 case Primitive::kPrimFloat:
59 case Primitive::kPrimDouble:
60 return Location::FpuRegisterLocation(F0);
61
62 case Primitive::kPrimVoid:
63 return Location();
64 }
65 UNREACHABLE();
66}
67
68Location InvokeDexCallingConventionVisitorMIPS64::GetReturnLocation(Primitive::Type type) const {
69 return Mips64ReturnLocation(type);
70}
71
72Location InvokeDexCallingConventionVisitorMIPS64::GetMethodLocation() const {
73 return Location::RegisterLocation(kMethodRegisterArgument);
74}
75
76Location InvokeDexCallingConventionVisitorMIPS64::GetNextLocation(Primitive::Type type) {
77 Location next_location;
78 if (type == Primitive::kPrimVoid) {
79 LOG(FATAL) << "Unexpected parameter type " << type;
80 }
81
82 if (Primitive::IsFloatingPointType(type) &&
83 (float_index_ < calling_convention.GetNumberOfFpuRegisters())) {
84 next_location = Location::FpuRegisterLocation(
85 calling_convention.GetFpuRegisterAt(float_index_++));
86 gp_index_++;
87 } else if (!Primitive::IsFloatingPointType(type) &&
88 (gp_index_ < calling_convention.GetNumberOfRegisters())) {
89 next_location = Location::RegisterLocation(calling_convention.GetRegisterAt(gp_index_++));
90 float_index_++;
91 } else {
92 size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
93 next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
94 : Location::StackSlot(stack_offset);
95 }
96
97 // Space on the stack is reserved for all arguments.
98 stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
99
100 // TODO: review
101
102 // TODO: shouldn't we use a whole machine word per argument on the stack?
103 // Implicit 4-byte method pointer (and such) will cause misalignment.
104
105 return next_location;
106}
107
108Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type type) {
109 return Mips64ReturnLocation(type);
110}
111
112#define __ down_cast<CodeGeneratorMIPS64*>(codegen)->GetAssembler()->
113#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, x).Int32Value()
114
115class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
116 public:
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100117 explicit BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction) : instruction_(instruction) {}
Alexey Frunze4dda3372015-06-01 18:31:49 -0700118
119 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100120 LocationSummary* locations = instruction_->GetLocations();
Alexey Frunze4dda3372015-06-01 18:31:49 -0700121 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
122 __ Bind(GetEntryLabel());
David Brazdil77a48ae2015-09-15 12:34:04 +0000123 if (instruction_->CanThrowIntoCatchBlock()) {
124 // Live registers will be restored in the catch block if caught.
125 SaveLiveRegisters(codegen, instruction_->GetLocations());
126 }
Alexey Frunze4dda3372015-06-01 18:31:49 -0700127 // We're moving two locations to locations that could overlap, so we need a parallel
128 // move resolver.
129 InvokeRuntimeCallingConvention calling_convention;
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100130 codegen->EmitParallelMoves(locations->InAt(0),
Alexey Frunze4dda3372015-06-01 18:31:49 -0700131 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
132 Primitive::kPrimInt,
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100133 locations->InAt(1),
Alexey Frunze4dda3372015-06-01 18:31:49 -0700134 Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
135 Primitive::kPrimInt);
136 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
137 instruction_,
138 instruction_->GetDexPc(),
139 this);
140 CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
141 }
142
Alexandre Rames8158f282015-08-07 10:26:17 +0100143 bool IsFatal() const OVERRIDE { return true; }
144
Roland Levillain46648892015-06-19 16:07:18 +0100145 const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS64"; }
146
Alexey Frunze4dda3372015-06-01 18:31:49 -0700147 private:
148 HBoundsCheck* const instruction_;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700149
150 DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS64);
151};
152
153class DivZeroCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
154 public:
155 explicit DivZeroCheckSlowPathMIPS64(HDivZeroCheck* instruction) : instruction_(instruction) {}
156
157 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
158 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
159 __ Bind(GetEntryLabel());
David Brazdil77a48ae2015-09-15 12:34:04 +0000160 if (instruction_->CanThrowIntoCatchBlock()) {
161 // Live registers will be restored in the catch block if caught.
162 SaveLiveRegisters(codegen, instruction_->GetLocations());
163 }
Alexey Frunze4dda3372015-06-01 18:31:49 -0700164 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
165 instruction_,
166 instruction_->GetDexPc(),
167 this);
168 CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
169 }
170
Alexandre Rames8158f282015-08-07 10:26:17 +0100171 bool IsFatal() const OVERRIDE { return true; }
172
Roland Levillain46648892015-06-19 16:07:18 +0100173 const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS64"; }
174
Alexey Frunze4dda3372015-06-01 18:31:49 -0700175 private:
176 HDivZeroCheck* const instruction_;
177 DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS64);
178};
179
180class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
181 public:
182 LoadClassSlowPathMIPS64(HLoadClass* cls,
183 HInstruction* at,
184 uint32_t dex_pc,
185 bool do_clinit)
186 : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
187 DCHECK(at->IsLoadClass() || at->IsClinitCheck());
188 }
189
190 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
191 LocationSummary* locations = at_->GetLocations();
192 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
193
194 __ Bind(GetEntryLabel());
195 SaveLiveRegisters(codegen, locations);
196
197 InvokeRuntimeCallingConvention calling_convention;
198 __ LoadConst32(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
199 int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
200 : QUICK_ENTRY_POINT(pInitializeType);
201 mips64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
202 if (do_clinit_) {
203 CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
204 } else {
205 CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
206 }
207
208 // Move the class to the desired location.
209 Location out = locations->Out();
210 if (out.IsValid()) {
211 DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
212 Primitive::Type type = at_->GetType();
213 mips64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
214 }
215
216 RestoreLiveRegisters(codegen, locations);
217 __ B(GetExitLabel());
218 }
219
Roland Levillain46648892015-06-19 16:07:18 +0100220 const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathMIPS64"; }
221
Alexey Frunze4dda3372015-06-01 18:31:49 -0700222 private:
223 // The class this slow path will load.
224 HLoadClass* const cls_;
225
226 // The instruction where this slow path is happening.
227 // (Might be the load class or an initialization check).
228 HInstruction* const at_;
229
230 // The dex PC of `at_`.
231 const uint32_t dex_pc_;
232
233 // Whether to initialize the class.
234 const bool do_clinit_;
235
236 DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS64);
237};
238
239class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
240 public:
241 explicit LoadStringSlowPathMIPS64(HLoadString* instruction) : instruction_(instruction) {}
242
243 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
244 LocationSummary* locations = instruction_->GetLocations();
245 DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
246 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
247
248 __ Bind(GetEntryLabel());
249 SaveLiveRegisters(codegen, locations);
250
251 InvokeRuntimeCallingConvention calling_convention;
252 __ LoadConst32(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
253 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
254 instruction_,
255 instruction_->GetDexPc(),
256 this);
257 CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
258 Primitive::Type type = instruction_->GetType();
259 mips64_codegen->MoveLocation(locations->Out(),
260 calling_convention.GetReturnLocation(type),
261 type);
262
263 RestoreLiveRegisters(codegen, locations);
264 __ B(GetExitLabel());
265 }
266
Roland Levillain46648892015-06-19 16:07:18 +0100267 const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS64"; }
268
Alexey Frunze4dda3372015-06-01 18:31:49 -0700269 private:
270 HLoadString* const instruction_;
271
272 DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS64);
273};
274
275class NullCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
276 public:
277 explicit NullCheckSlowPathMIPS64(HNullCheck* instr) : instruction_(instr) {}
278
279 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
280 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
281 __ Bind(GetEntryLabel());
David Brazdil77a48ae2015-09-15 12:34:04 +0000282 if (instruction_->CanThrowIntoCatchBlock()) {
283 // Live registers will be restored in the catch block if caught.
284 SaveLiveRegisters(codegen, instruction_->GetLocations());
285 }
Alexey Frunze4dda3372015-06-01 18:31:49 -0700286 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
287 instruction_,
288 instruction_->GetDexPc(),
289 this);
290 CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
291 }
292
Alexandre Rames8158f282015-08-07 10:26:17 +0100293 bool IsFatal() const OVERRIDE { return true; }
294
Roland Levillain46648892015-06-19 16:07:18 +0100295 const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS64"; }
296
Alexey Frunze4dda3372015-06-01 18:31:49 -0700297 private:
298 HNullCheck* const instruction_;
299
300 DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS64);
301};
302
303class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
304 public:
Roland Levillain3887c462015-08-12 18:15:42 +0100305 SuspendCheckSlowPathMIPS64(HSuspendCheck* instruction, HBasicBlock* successor)
Alexey Frunze4dda3372015-06-01 18:31:49 -0700306 : instruction_(instruction), successor_(successor) {}
307
308 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
309 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
310 __ Bind(GetEntryLabel());
311 SaveLiveRegisters(codegen, instruction_->GetLocations());
312 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
313 instruction_,
314 instruction_->GetDexPc(),
315 this);
316 CheckEntrypointTypes<kQuickTestSuspend, void, void>();
317 RestoreLiveRegisters(codegen, instruction_->GetLocations());
318 if (successor_ == nullptr) {
319 __ B(GetReturnLabel());
320 } else {
321 __ B(mips64_codegen->GetLabelOf(successor_));
322 }
323 }
324
325 Label* GetReturnLabel() {
326 DCHECK(successor_ == nullptr);
327 return &return_label_;
328 }
329
Roland Levillain46648892015-06-19 16:07:18 +0100330 const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS64"; }
331
Alexey Frunze4dda3372015-06-01 18:31:49 -0700332 private:
333 HSuspendCheck* const instruction_;
334 // If not null, the block to branch to after the suspend check.
335 HBasicBlock* const successor_;
336
337 // If `successor_` is null, the label to branch to after the suspend check.
338 Label return_label_;
339
340 DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathMIPS64);
341};
342
343class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
344 public:
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100345 explicit TypeCheckSlowPathMIPS64(HInstruction* instruction) : instruction_(instruction) {}
Alexey Frunze4dda3372015-06-01 18:31:49 -0700346
347 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
348 LocationSummary* locations = instruction_->GetLocations();
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100349 Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
350 : locations->Out();
351 uint32_t dex_pc = instruction_->GetDexPc();
Alexey Frunze4dda3372015-06-01 18:31:49 -0700352 DCHECK(instruction_->IsCheckCast()
353 || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
354 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
355
356 __ Bind(GetEntryLabel());
357 SaveLiveRegisters(codegen, locations);
358
359 // We're moving two locations to locations that could overlap, so we need a parallel
360 // move resolver.
361 InvokeRuntimeCallingConvention calling_convention;
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100362 codegen->EmitParallelMoves(locations->InAt(1),
Alexey Frunze4dda3372015-06-01 18:31:49 -0700363 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
364 Primitive::kPrimNot,
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100365 object_class,
Alexey Frunze4dda3372015-06-01 18:31:49 -0700366 Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
367 Primitive::kPrimNot);
368
369 if (instruction_->IsInstanceOf()) {
370 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
371 instruction_,
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100372 dex_pc,
Alexey Frunze4dda3372015-06-01 18:31:49 -0700373 this);
374 Primitive::Type ret_type = instruction_->GetType();
375 Location ret_loc = calling_convention.GetReturnLocation(ret_type);
376 mips64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
377 CheckEntrypointTypes<kQuickInstanceofNonTrivial,
378 uint32_t,
379 const mirror::Class*,
380 const mirror::Class*>();
381 } else {
382 DCHECK(instruction_->IsCheckCast());
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100383 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc, this);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700384 CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
385 }
386
387 RestoreLiveRegisters(codegen, locations);
388 __ B(GetExitLabel());
389 }
390
Roland Levillain46648892015-06-19 16:07:18 +0100391 const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS64"; }
392
Alexey Frunze4dda3372015-06-01 18:31:49 -0700393 private:
394 HInstruction* const instruction_;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700395
396 DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS64);
397};
398
399class DeoptimizationSlowPathMIPS64 : public SlowPathCodeMIPS64 {
400 public:
401 explicit DeoptimizationSlowPathMIPS64(HInstruction* instruction)
402 : instruction_(instruction) {}
403
404 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
405 __ Bind(GetEntryLabel());
406 SaveLiveRegisters(codegen, instruction_->GetLocations());
407 DCHECK(instruction_->IsDeoptimize());
408 HDeoptimize* deoptimize = instruction_->AsDeoptimize();
409 uint32_t dex_pc = deoptimize->GetDexPc();
410 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
411 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
412 }
413
Roland Levillain46648892015-06-19 16:07:18 +0100414 const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS64"; }
415
Alexey Frunze4dda3372015-06-01 18:31:49 -0700416 private:
417 HInstruction* const instruction_;
418 DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS64);
419};
420
421CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
422 const Mips64InstructionSetFeatures& isa_features,
Serban Constantinescuecc43662015-08-13 13:33:12 +0100423 const CompilerOptions& compiler_options,
424 OptimizingCompilerStats* stats)
Alexey Frunze4dda3372015-06-01 18:31:49 -0700425 : CodeGenerator(graph,
426 kNumberOfGpuRegisters,
427 kNumberOfFpuRegisters,
428 0, // kNumberOfRegisterPairs
429 ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
430 arraysize(kCoreCalleeSaves)),
431 ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
432 arraysize(kFpuCalleeSaves)),
Serban Constantinescuecc43662015-08-13 13:33:12 +0100433 compiler_options,
434 stats),
Alexey Frunze4dda3372015-06-01 18:31:49 -0700435 block_labels_(graph->GetArena(), 0),
436 location_builder_(graph, this),
437 instruction_visitor_(graph, this),
438 move_resolver_(graph->GetArena(), this),
439 isa_features_(isa_features) {
440 // Save RA (containing the return address) to mimic Quick.
441 AddAllocatedRegister(Location::RegisterLocation(RA));
442}
443
444#undef __
445#define __ down_cast<Mips64Assembler*>(GetAssembler())->
446#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, x).Int32Value()
447
448void CodeGeneratorMIPS64::Finalize(CodeAllocator* allocator) {
449 CodeGenerator::Finalize(allocator);
450}
451
452Mips64Assembler* ParallelMoveResolverMIPS64::GetAssembler() const {
453 return codegen_->GetAssembler();
454}
455
456void ParallelMoveResolverMIPS64::EmitMove(size_t index) {
457 MoveOperands* move = moves_.Get(index);
458 codegen_->MoveLocation(move->GetDestination(), move->GetSource(), move->GetType());
459}
460
461void ParallelMoveResolverMIPS64::EmitSwap(size_t index) {
462 MoveOperands* move = moves_.Get(index);
463 codegen_->SwapLocations(move->GetDestination(), move->GetSource(), move->GetType());
464}
465
466void ParallelMoveResolverMIPS64::RestoreScratch(int reg) {
467 // Pop reg
468 __ Ld(GpuRegister(reg), SP, 0);
469 __ DecreaseFrameSize(kMips64WordSize);
470}
471
472void ParallelMoveResolverMIPS64::SpillScratch(int reg) {
473 // Push reg
474 __ IncreaseFrameSize(kMips64WordSize);
475 __ Sd(GpuRegister(reg), SP, 0);
476}
477
478void ParallelMoveResolverMIPS64::Exchange(int index1, int index2, bool double_slot) {
479 LoadOperandType load_type = double_slot ? kLoadDoubleword : kLoadWord;
480 StoreOperandType store_type = double_slot ? kStoreDoubleword : kStoreWord;
481 // Allocate a scratch register other than TMP, if available.
482 // Else, spill V0 (arbitrary choice) and use it as a scratch register (it will be
483 // automatically unspilled when the scratch scope object is destroyed).
484 ScratchRegisterScope ensure_scratch(this, TMP, V0, codegen_->GetNumberOfCoreRegisters());
485 // If V0 spills onto the stack, SP-relative offsets need to be adjusted.
486 int stack_offset = ensure_scratch.IsSpilled() ? kMips64WordSize : 0;
487 __ LoadFromOffset(load_type,
488 GpuRegister(ensure_scratch.GetRegister()),
489 SP,
490 index1 + stack_offset);
491 __ LoadFromOffset(load_type,
492 TMP,
493 SP,
494 index2 + stack_offset);
495 __ StoreToOffset(store_type,
496 GpuRegister(ensure_scratch.GetRegister()),
497 SP,
498 index2 + stack_offset);
499 __ StoreToOffset(store_type, TMP, SP, index1 + stack_offset);
500}
501
502static dwarf::Reg DWARFReg(GpuRegister reg) {
503 return dwarf::Reg::Mips64Core(static_cast<int>(reg));
504}
505
506// TODO: mapping of floating-point registers to DWARF
507
508void CodeGeneratorMIPS64::GenerateFrameEntry() {
509 __ Bind(&frame_entry_label_);
510
511 bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips64) || !IsLeafMethod();
512
513 if (do_overflow_check) {
514 __ LoadFromOffset(kLoadWord,
515 ZERO,
516 SP,
517 -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips64)));
518 RecordPcInfo(nullptr, 0);
519 }
520
521 // TODO: anything related to T9/GP/GOT/PIC/.so's?
522
523 if (HasEmptyFrame()) {
524 return;
525 }
526
527 // Make sure the frame size isn't unreasonably large. Per the various APIs
528 // it looks like it should always be less than 2GB in size, which allows
529 // us using 32-bit signed offsets from the stack pointer.
530 if (GetFrameSize() > 0x7FFFFFFF)
531 LOG(FATAL) << "Stack frame larger than 2GB";
532
533 // Spill callee-saved registers.
534 // Note that their cumulative size is small and they can be indexed using
535 // 16-bit offsets.
536
537 // TODO: increment/decrement SP in one step instead of two or remove this comment.
538
539 uint32_t ofs = FrameEntrySpillSize();
540 __ IncreaseFrameSize(ofs);
541
542 for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
543 GpuRegister reg = kCoreCalleeSaves[i];
544 if (allocated_registers_.ContainsCoreRegister(reg)) {
545 ofs -= kMips64WordSize;
546 __ Sd(reg, SP, ofs);
547 __ cfi().RelOffset(DWARFReg(reg), ofs);
548 }
549 }
550
551 for (int i = arraysize(kFpuCalleeSaves) - 1; i >= 0; --i) {
552 FpuRegister reg = kFpuCalleeSaves[i];
553 if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
554 ofs -= kMips64WordSize;
555 __ Sdc1(reg, SP, ofs);
556 // TODO: __ cfi().RelOffset(DWARFReg(reg), ofs);
557 }
558 }
559
560 // Allocate the rest of the frame and store the current method pointer
561 // at its end.
562
563 __ IncreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
564
565 static_assert(IsInt<16>(kCurrentMethodStackOffset),
566 "kCurrentMethodStackOffset must fit into int16_t");
567 __ Sd(kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
568}
569
570void CodeGeneratorMIPS64::GenerateFrameExit() {
571 __ cfi().RememberState();
572
573 // TODO: anything related to T9/GP/GOT/PIC/.so's?
574
575 if (!HasEmptyFrame()) {
576 // Deallocate the rest of the frame.
577
578 __ DecreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
579
580 // Restore callee-saved registers.
581 // Note that their cumulative size is small and they can be indexed using
582 // 16-bit offsets.
583
584 // TODO: increment/decrement SP in one step instead of two or remove this comment.
585
586 uint32_t ofs = 0;
587
588 for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
589 FpuRegister reg = kFpuCalleeSaves[i];
590 if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
591 __ Ldc1(reg, SP, ofs);
592 ofs += kMips64WordSize;
593 // TODO: __ cfi().Restore(DWARFReg(reg));
594 }
595 }
596
597 for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
598 GpuRegister reg = kCoreCalleeSaves[i];
599 if (allocated_registers_.ContainsCoreRegister(reg)) {
600 __ Ld(reg, SP, ofs);
601 ofs += kMips64WordSize;
602 __ cfi().Restore(DWARFReg(reg));
603 }
604 }
605
606 DCHECK_EQ(ofs, FrameEntrySpillSize());
607 __ DecreaseFrameSize(ofs);
608 }
609
610 __ Jr(RA);
611
612 __ cfi().RestoreState();
613 __ cfi().DefCFAOffset(GetFrameSize());
614}
615
616void CodeGeneratorMIPS64::Bind(HBasicBlock* block) {
617 __ Bind(GetLabelOf(block));
618}
619
620void CodeGeneratorMIPS64::MoveLocation(Location destination,
621 Location source,
622 Primitive::Type type) {
623 if (source.Equals(destination)) {
624 return;
625 }
626
627 // A valid move can always be inferred from the destination and source
628 // locations. When moving from and to a register, the argument type can be
629 // used to generate 32bit instead of 64bit moves.
630 bool unspecified_type = (type == Primitive::kPrimVoid);
631 DCHECK_EQ(unspecified_type, false);
632
633 if (destination.IsRegister() || destination.IsFpuRegister()) {
634 if (unspecified_type) {
635 HConstant* src_cst = source.IsConstant() ? source.GetConstant() : nullptr;
636 if (source.IsStackSlot() ||
637 (src_cst != nullptr && (src_cst->IsIntConstant()
638 || src_cst->IsFloatConstant()
639 || src_cst->IsNullConstant()))) {
640 // For stack slots and 32bit constants, a 64bit type is appropriate.
641 type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
642 } else {
643 // If the source is a double stack slot or a 64bit constant, a 64bit
644 // type is appropriate. Else the source is a register, and since the
645 // type has not been specified, we chose a 64bit type to force a 64bit
646 // move.
647 type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
648 }
649 }
650 DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(type)) ||
651 (destination.IsRegister() && !Primitive::IsFloatingPointType(type)));
652 if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
653 // Move to GPR/FPR from stack
654 LoadOperandType load_type = source.IsStackSlot() ? kLoadWord : kLoadDoubleword;
655 if (Primitive::IsFloatingPointType(type)) {
656 __ LoadFpuFromOffset(load_type,
657 destination.AsFpuRegister<FpuRegister>(),
658 SP,
659 source.GetStackIndex());
660 } else {
661 // TODO: use load_type = kLoadUnsignedWord when type == Primitive::kPrimNot.
662 __ LoadFromOffset(load_type,
663 destination.AsRegister<GpuRegister>(),
664 SP,
665 source.GetStackIndex());
666 }
667 } else if (source.IsConstant()) {
668 // Move to GPR/FPR from constant
669 GpuRegister gpr = AT;
670 if (!Primitive::IsFloatingPointType(type)) {
671 gpr = destination.AsRegister<GpuRegister>();
672 }
673 if (type == Primitive::kPrimInt || type == Primitive::kPrimFloat) {
674 __ LoadConst32(gpr, GetInt32ValueOf(source.GetConstant()->AsConstant()));
675 } else {
676 __ LoadConst64(gpr, GetInt64ValueOf(source.GetConstant()->AsConstant()));
677 }
678 if (type == Primitive::kPrimFloat) {
679 __ Mtc1(gpr, destination.AsFpuRegister<FpuRegister>());
680 } else if (type == Primitive::kPrimDouble) {
681 __ Dmtc1(gpr, destination.AsFpuRegister<FpuRegister>());
682 }
683 } else {
684 if (destination.IsRegister()) {
685 // Move to GPR from GPR
686 __ Move(destination.AsRegister<GpuRegister>(), source.AsRegister<GpuRegister>());
687 } else {
688 // Move to FPR from FPR
689 if (type == Primitive::kPrimFloat) {
690 __ MovS(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
691 } else {
692 DCHECK_EQ(type, Primitive::kPrimDouble);
693 __ MovD(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
694 }
695 }
696 }
697 } else { // The destination is not a register. It must be a stack slot.
698 DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
699 if (source.IsRegister() || source.IsFpuRegister()) {
700 if (unspecified_type) {
701 if (source.IsRegister()) {
702 type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
703 } else {
704 type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
705 }
706 }
707 DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(type)) &&
708 (source.IsFpuRegister() == Primitive::IsFloatingPointType(type)));
709 // Move to stack from GPR/FPR
710 StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
711 if (source.IsRegister()) {
712 __ StoreToOffset(store_type,
713 source.AsRegister<GpuRegister>(),
714 SP,
715 destination.GetStackIndex());
716 } else {
717 __ StoreFpuToOffset(store_type,
718 source.AsFpuRegister<FpuRegister>(),
719 SP,
720 destination.GetStackIndex());
721 }
722 } else if (source.IsConstant()) {
723 // Move to stack from constant
724 HConstant* src_cst = source.GetConstant();
725 StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
726 if (destination.IsStackSlot()) {
727 __ LoadConst32(TMP, GetInt32ValueOf(src_cst->AsConstant()));
728 } else {
729 __ LoadConst64(TMP, GetInt64ValueOf(src_cst->AsConstant()));
730 }
731 __ StoreToOffset(store_type, TMP, SP, destination.GetStackIndex());
732 } else {
733 DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
734 DCHECK_EQ(source.IsDoubleStackSlot(), destination.IsDoubleStackSlot());
735 // Move to stack from stack
736 if (destination.IsStackSlot()) {
737 __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
738 __ StoreToOffset(kStoreWord, TMP, SP, destination.GetStackIndex());
739 } else {
740 __ LoadFromOffset(kLoadDoubleword, TMP, SP, source.GetStackIndex());
741 __ StoreToOffset(kStoreDoubleword, TMP, SP, destination.GetStackIndex());
742 }
743 }
744 }
745}
746
747void CodeGeneratorMIPS64::SwapLocations(Location loc1,
748 Location loc2,
749 Primitive::Type type ATTRIBUTE_UNUSED) {
750 DCHECK(!loc1.IsConstant());
751 DCHECK(!loc2.IsConstant());
752
753 if (loc1.Equals(loc2)) {
754 return;
755 }
756
757 bool is_slot1 = loc1.IsStackSlot() || loc1.IsDoubleStackSlot();
758 bool is_slot2 = loc2.IsStackSlot() || loc2.IsDoubleStackSlot();
759 bool is_fp_reg1 = loc1.IsFpuRegister();
760 bool is_fp_reg2 = loc2.IsFpuRegister();
761
762 if (loc2.IsRegister() && loc1.IsRegister()) {
763 // Swap 2 GPRs
764 GpuRegister r1 = loc1.AsRegister<GpuRegister>();
765 GpuRegister r2 = loc2.AsRegister<GpuRegister>();
766 __ Move(TMP, r2);
767 __ Move(r2, r1);
768 __ Move(r1, TMP);
769 } else if (is_fp_reg2 && is_fp_reg1) {
770 // Swap 2 FPRs
771 FpuRegister r1 = loc1.AsFpuRegister<FpuRegister>();
772 FpuRegister r2 = loc2.AsFpuRegister<FpuRegister>();
773 // TODO: Can MOV.S/MOV.D be used here to save one instruction?
774 // Need to distinguish float from double, right?
775 __ Dmfc1(TMP, r2);
776 __ Dmfc1(AT, r1);
777 __ Dmtc1(TMP, r1);
778 __ Dmtc1(AT, r2);
779 } else if (is_slot1 != is_slot2) {
780 // Swap GPR/FPR and stack slot
781 Location reg_loc = is_slot1 ? loc2 : loc1;
782 Location mem_loc = is_slot1 ? loc1 : loc2;
783 LoadOperandType load_type = mem_loc.IsStackSlot() ? kLoadWord : kLoadDoubleword;
784 StoreOperandType store_type = mem_loc.IsStackSlot() ? kStoreWord : kStoreDoubleword;
785 // TODO: use load_type = kLoadUnsignedWord when type == Primitive::kPrimNot.
786 __ LoadFromOffset(load_type, TMP, SP, mem_loc.GetStackIndex());
787 if (reg_loc.IsFpuRegister()) {
788 __ StoreFpuToOffset(store_type,
789 reg_loc.AsFpuRegister<FpuRegister>(),
790 SP,
791 mem_loc.GetStackIndex());
792 // TODO: review this MTC1/DMTC1 move
793 if (mem_loc.IsStackSlot()) {
794 __ Mtc1(TMP, reg_loc.AsFpuRegister<FpuRegister>());
795 } else {
796 DCHECK(mem_loc.IsDoubleStackSlot());
797 __ Dmtc1(TMP, reg_loc.AsFpuRegister<FpuRegister>());
798 }
799 } else {
800 __ StoreToOffset(store_type, reg_loc.AsRegister<GpuRegister>(), SP, mem_loc.GetStackIndex());
801 __ Move(reg_loc.AsRegister<GpuRegister>(), TMP);
802 }
803 } else if (is_slot1 && is_slot2) {
804 move_resolver_.Exchange(loc1.GetStackIndex(),
805 loc2.GetStackIndex(),
806 loc1.IsDoubleStackSlot());
807 } else {
808 LOG(FATAL) << "Unimplemented swap between locations " << loc1 << " and " << loc2;
809 }
810}
811
812void CodeGeneratorMIPS64::Move(HInstruction* instruction,
813 Location location,
814 HInstruction* move_for) {
815 LocationSummary* locations = instruction->GetLocations();
816 Primitive::Type type = instruction->GetType();
817 DCHECK_NE(type, Primitive::kPrimVoid);
818
819 if (instruction->IsCurrentMethod()) {
820 MoveLocation(location, Location::DoubleStackSlot(kCurrentMethodStackOffset), type);
821 } else if (locations != nullptr && locations->Out().Equals(location)) {
822 return;
823 } else if (instruction->IsIntConstant()
824 || instruction->IsLongConstant()
825 || instruction->IsNullConstant()) {
826 if (location.IsRegister()) {
827 // Move to GPR from constant
828 GpuRegister dst = location.AsRegister<GpuRegister>();
829 if (instruction->IsNullConstant() || instruction->IsIntConstant()) {
830 __ LoadConst32(dst, GetInt32ValueOf(instruction->AsConstant()));
831 } else {
832 __ LoadConst64(dst, instruction->AsLongConstant()->GetValue());
833 }
834 } else {
835 DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot());
836 // Move to stack from constant
837 if (location.IsStackSlot()) {
838 __ LoadConst32(TMP, GetInt32ValueOf(instruction->AsConstant()));
839 __ StoreToOffset(kStoreWord, TMP, SP, location.GetStackIndex());
840 } else {
841 __ LoadConst64(TMP, instruction->AsLongConstant()->GetValue());
842 __ StoreToOffset(kStoreDoubleword, TMP, SP, location.GetStackIndex());
843 }
844 }
845 } else if (instruction->IsTemporary()) {
846 Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
847 MoveLocation(location, temp_location, type);
848 } else if (instruction->IsLoadLocal()) {
849 uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
850 if (Primitive::Is64BitType(type)) {
851 MoveLocation(location, Location::DoubleStackSlot(stack_slot), type);
852 } else {
853 MoveLocation(location, Location::StackSlot(stack_slot), type);
854 }
855 } else {
856 DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
857 MoveLocation(location, locations->Out(), type);
858 }
859}
860
Calin Juravle175dc732015-08-25 15:42:32 +0100861void CodeGeneratorMIPS64::MoveConstant(Location location, int32_t value) {
862 DCHECK(location.IsRegister());
863 __ LoadConst32(location.AsRegister<GpuRegister>(), value);
864}
865
Alexey Frunze4dda3372015-06-01 18:31:49 -0700866Location CodeGeneratorMIPS64::GetStackLocation(HLoadLocal* load) const {
867 Primitive::Type type = load->GetType();
868
869 switch (type) {
870 case Primitive::kPrimNot:
871 case Primitive::kPrimInt:
872 case Primitive::kPrimFloat:
873 return Location::StackSlot(GetStackSlot(load->GetLocal()));
874
875 case Primitive::kPrimLong:
876 case Primitive::kPrimDouble:
877 return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
878
879 case Primitive::kPrimBoolean:
880 case Primitive::kPrimByte:
881 case Primitive::kPrimChar:
882 case Primitive::kPrimShort:
883 case Primitive::kPrimVoid:
884 LOG(FATAL) << "Unexpected type " << type;
885 }
886
887 LOG(FATAL) << "Unreachable";
888 return Location::NoLocation();
889}
890
891void CodeGeneratorMIPS64::MarkGCCard(GpuRegister object, GpuRegister value) {
892 Label done;
893 GpuRegister card = AT;
894 GpuRegister temp = TMP;
895 __ Beqzc(value, &done);
896 __ LoadFromOffset(kLoadDoubleword,
897 card,
898 TR,
899 Thread::CardTableOffset<kMips64WordSize>().Int32Value());
900 __ Dsrl(temp, object, gc::accounting::CardTable::kCardShift);
901 __ Daddu(temp, card, temp);
902 __ Sb(card, temp, 0);
903 __ Bind(&done);
904}
905
906void CodeGeneratorMIPS64::SetupBlockedRegisters(bool is_baseline ATTRIBUTE_UNUSED) const {
907 // ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
908 blocked_core_registers_[ZERO] = true;
909 blocked_core_registers_[K0] = true;
910 blocked_core_registers_[K1] = true;
911 blocked_core_registers_[GP] = true;
912 blocked_core_registers_[SP] = true;
913 blocked_core_registers_[RA] = true;
914
915 // AT and TMP(T8) are used as temporary/scratch registers
916 // (similar to how AT is used by MIPS assemblers).
917 blocked_core_registers_[AT] = true;
918 blocked_core_registers_[TMP] = true;
919 blocked_fpu_registers_[FTMP] = true;
920
921 // Reserve suspend and thread registers.
922 blocked_core_registers_[S0] = true;
923 blocked_core_registers_[TR] = true;
924
925 // Reserve T9 for function calls
926 blocked_core_registers_[T9] = true;
927
928 // TODO: review; anything else?
929
930 // TODO: make these two for's conditional on is_baseline once
931 // all the issues with register saving/restoring are sorted out.
932 for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
933 blocked_core_registers_[kCoreCalleeSaves[i]] = true;
934 }
935
936 for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
937 blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
938 }
939}
940
941Location CodeGeneratorMIPS64::AllocateFreeRegister(Primitive::Type type) const {
942 if (type == Primitive::kPrimVoid) {
943 LOG(FATAL) << "Unreachable type " << type;
944 }
945
946 if (Primitive::IsFloatingPointType(type)) {
947 size_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfFpuRegisters);
948 return Location::FpuRegisterLocation(reg);
949 } else {
950 size_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfGpuRegisters);
951 return Location::RegisterLocation(reg);
952 }
953}
954
955size_t CodeGeneratorMIPS64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
956 __ StoreToOffset(kStoreDoubleword, GpuRegister(reg_id), SP, stack_index);
957 return kMips64WordSize;
958}
959
960size_t CodeGeneratorMIPS64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
961 __ LoadFromOffset(kLoadDoubleword, GpuRegister(reg_id), SP, stack_index);
962 return kMips64WordSize;
963}
964
965size_t CodeGeneratorMIPS64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
966 __ StoreFpuToOffset(kStoreDoubleword, FpuRegister(reg_id), SP, stack_index);
967 return kMips64WordSize;
968}
969
970size_t CodeGeneratorMIPS64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
971 __ LoadFpuFromOffset(kLoadDoubleword, FpuRegister(reg_id), SP, stack_index);
972 return kMips64WordSize;
973}
974
975void CodeGeneratorMIPS64::DumpCoreRegister(std::ostream& stream, int reg) const {
David Brazdil9f0dece2015-09-21 18:20:26 +0100976 stream << GpuRegister(reg);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700977}
978
979void CodeGeneratorMIPS64::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
David Brazdil9f0dece2015-09-21 18:20:26 +0100980 stream << FpuRegister(reg);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700981}
982
Calin Juravle175dc732015-08-25 15:42:32 +0100983void CodeGeneratorMIPS64::InvokeRuntime(QuickEntrypointEnum entrypoint,
984 HInstruction* instruction,
985 uint32_t dex_pc,
986 SlowPathCode* slow_path) {
987 InvokeRuntime(GetThreadOffset<kMips64WordSize>(entrypoint).Int32Value(),
988 instruction,
989 dex_pc,
990 slow_path);
991}
992
Alexey Frunze4dda3372015-06-01 18:31:49 -0700993void CodeGeneratorMIPS64::InvokeRuntime(int32_t entry_point_offset,
994 HInstruction* instruction,
995 uint32_t dex_pc,
996 SlowPathCode* slow_path) {
Alexandre Rames78e3ef62015-08-12 13:43:29 +0100997 ValidateInvokeRuntime(instruction, slow_path);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700998 // TODO: anything related to T9/GP/GOT/PIC/.so's?
999 __ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
1000 __ Jalr(T9);
1001 RecordPcInfo(instruction, dex_pc, slow_path);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001002}
1003
1004void InstructionCodeGeneratorMIPS64::GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path,
1005 GpuRegister class_reg) {
1006 __ LoadFromOffset(kLoadWord, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
1007 __ LoadConst32(AT, mirror::Class::kStatusInitialized);
1008 __ Bltc(TMP, AT, slow_path->GetEntryLabel());
1009 // TODO: barrier needed?
1010 __ Bind(slow_path->GetExitLabel());
1011}
1012
1013void InstructionCodeGeneratorMIPS64::GenerateMemoryBarrier(MemBarrierKind kind ATTRIBUTE_UNUSED) {
1014 __ Sync(0); // only stype 0 is supported
1015}
1016
1017void InstructionCodeGeneratorMIPS64::GenerateSuspendCheck(HSuspendCheck* instruction,
1018 HBasicBlock* successor) {
1019 SuspendCheckSlowPathMIPS64* slow_path =
1020 new (GetGraph()->GetArena()) SuspendCheckSlowPathMIPS64(instruction, successor);
1021 codegen_->AddSlowPath(slow_path);
1022
1023 __ LoadFromOffset(kLoadUnsignedHalfword,
1024 TMP,
1025 TR,
1026 Thread::ThreadFlagsOffset<kMips64WordSize>().Int32Value());
1027 if (successor == nullptr) {
1028 __ Bnezc(TMP, slow_path->GetEntryLabel());
1029 __ Bind(slow_path->GetReturnLabel());
1030 } else {
1031 __ Beqzc(TMP, codegen_->GetLabelOf(successor));
1032 __ B(slow_path->GetEntryLabel());
1033 // slow_path will return to GetLabelOf(successor).
1034 }
1035}
1036
1037InstructionCodeGeneratorMIPS64::InstructionCodeGeneratorMIPS64(HGraph* graph,
1038 CodeGeneratorMIPS64* codegen)
1039 : HGraphVisitor(graph),
1040 assembler_(codegen->GetAssembler()),
1041 codegen_(codegen) {}
1042
1043void LocationsBuilderMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
1044 DCHECK_EQ(instruction->InputCount(), 2U);
1045 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1046 Primitive::Type type = instruction->GetResultType();
1047 switch (type) {
1048 case Primitive::kPrimInt:
1049 case Primitive::kPrimLong: {
1050 locations->SetInAt(0, Location::RequiresRegister());
1051 HInstruction* right = instruction->InputAt(1);
1052 bool can_use_imm = false;
1053 if (right->IsConstant()) {
1054 int64_t imm = CodeGenerator::GetInt64ValueOf(right->AsConstant());
1055 if (instruction->IsAnd() || instruction->IsOr() || instruction->IsXor()) {
1056 can_use_imm = IsUint<16>(imm);
1057 } else if (instruction->IsAdd()) {
1058 can_use_imm = IsInt<16>(imm);
1059 } else {
1060 DCHECK(instruction->IsSub());
1061 can_use_imm = IsInt<16>(-imm);
1062 }
1063 }
1064 if (can_use_imm)
1065 locations->SetInAt(1, Location::ConstantLocation(right->AsConstant()));
1066 else
1067 locations->SetInAt(1, Location::RequiresRegister());
1068 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1069 }
1070 break;
1071
1072 case Primitive::kPrimFloat:
1073 case Primitive::kPrimDouble:
1074 locations->SetInAt(0, Location::RequiresFpuRegister());
1075 locations->SetInAt(1, Location::RequiresFpuRegister());
1076 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1077 break;
1078
1079 default:
1080 LOG(FATAL) << "Unexpected " << instruction->DebugName() << " type " << type;
1081 }
1082}
1083
1084void InstructionCodeGeneratorMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
1085 Primitive::Type type = instruction->GetType();
1086 LocationSummary* locations = instruction->GetLocations();
1087
1088 switch (type) {
1089 case Primitive::kPrimInt:
1090 case Primitive::kPrimLong: {
1091 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1092 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1093 Location rhs_location = locations->InAt(1);
1094
1095 GpuRegister rhs_reg = ZERO;
1096 int64_t rhs_imm = 0;
1097 bool use_imm = rhs_location.IsConstant();
1098 if (use_imm) {
1099 rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
1100 } else {
1101 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1102 }
1103
1104 if (instruction->IsAnd()) {
1105 if (use_imm)
1106 __ Andi(dst, lhs, rhs_imm);
1107 else
1108 __ And(dst, lhs, rhs_reg);
1109 } else if (instruction->IsOr()) {
1110 if (use_imm)
1111 __ Ori(dst, lhs, rhs_imm);
1112 else
1113 __ Or(dst, lhs, rhs_reg);
1114 } else if (instruction->IsXor()) {
1115 if (use_imm)
1116 __ Xori(dst, lhs, rhs_imm);
1117 else
1118 __ Xor(dst, lhs, rhs_reg);
1119 } else if (instruction->IsAdd()) {
1120 if (type == Primitive::kPrimInt) {
1121 if (use_imm)
1122 __ Addiu(dst, lhs, rhs_imm);
1123 else
1124 __ Addu(dst, lhs, rhs_reg);
1125 } else {
1126 if (use_imm)
1127 __ Daddiu(dst, lhs, rhs_imm);
1128 else
1129 __ Daddu(dst, lhs, rhs_reg);
1130 }
1131 } else {
1132 DCHECK(instruction->IsSub());
1133 if (type == Primitive::kPrimInt) {
1134 if (use_imm)
1135 __ Addiu(dst, lhs, -rhs_imm);
1136 else
1137 __ Subu(dst, lhs, rhs_reg);
1138 } else {
1139 if (use_imm)
1140 __ Daddiu(dst, lhs, -rhs_imm);
1141 else
1142 __ Dsubu(dst, lhs, rhs_reg);
1143 }
1144 }
1145 break;
1146 }
1147 case Primitive::kPrimFloat:
1148 case Primitive::kPrimDouble: {
1149 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
1150 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
1151 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
1152 if (instruction->IsAdd()) {
1153 if (type == Primitive::kPrimFloat)
1154 __ AddS(dst, lhs, rhs);
1155 else
1156 __ AddD(dst, lhs, rhs);
1157 } else if (instruction->IsSub()) {
1158 if (type == Primitive::kPrimFloat)
1159 __ SubS(dst, lhs, rhs);
1160 else
1161 __ SubD(dst, lhs, rhs);
1162 } else {
1163 LOG(FATAL) << "Unexpected floating-point binary operation";
1164 }
1165 break;
1166 }
1167 default:
1168 LOG(FATAL) << "Unexpected binary operation type " << type;
1169 }
1170}
1171
1172void LocationsBuilderMIPS64::HandleShift(HBinaryOperation* instr) {
1173 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1174
1175 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
1176 Primitive::Type type = instr->GetResultType();
1177 switch (type) {
1178 case Primitive::kPrimInt:
1179 case Primitive::kPrimLong: {
1180 locations->SetInAt(0, Location::RequiresRegister());
1181 locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
1182 locations->SetOut(Location::RequiresRegister());
1183 break;
1184 }
1185 default:
1186 LOG(FATAL) << "Unexpected shift type " << type;
1187 }
1188}
1189
1190void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) {
1191 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1192 LocationSummary* locations = instr->GetLocations();
1193 Primitive::Type type = instr->GetType();
1194
1195 switch (type) {
1196 case Primitive::kPrimInt:
1197 case Primitive::kPrimLong: {
1198 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1199 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1200 Location rhs_location = locations->InAt(1);
1201
1202 GpuRegister rhs_reg = ZERO;
1203 int64_t rhs_imm = 0;
1204 bool use_imm = rhs_location.IsConstant();
1205 if (use_imm) {
1206 rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
1207 } else {
1208 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1209 }
1210
1211 if (use_imm) {
1212 uint32_t shift_value = (type == Primitive::kPrimInt)
1213 ? static_cast<uint32_t>(rhs_imm & kMaxIntShiftValue)
1214 : static_cast<uint32_t>(rhs_imm & kMaxLongShiftValue);
1215
1216 if (type == Primitive::kPrimInt) {
1217 if (instr->IsShl()) {
1218 __ Sll(dst, lhs, shift_value);
1219 } else if (instr->IsShr()) {
1220 __ Sra(dst, lhs, shift_value);
1221 } else {
1222 __ Srl(dst, lhs, shift_value);
1223 }
1224 } else {
1225 if (shift_value < 32) {
1226 if (instr->IsShl()) {
1227 __ Dsll(dst, lhs, shift_value);
1228 } else if (instr->IsShr()) {
1229 __ Dsra(dst, lhs, shift_value);
1230 } else {
1231 __ Dsrl(dst, lhs, shift_value);
1232 }
1233 } else {
1234 shift_value -= 32;
1235 if (instr->IsShl()) {
1236 __ Dsll32(dst, lhs, shift_value);
1237 } else if (instr->IsShr()) {
1238 __ Dsra32(dst, lhs, shift_value);
1239 } else {
1240 __ Dsrl32(dst, lhs, shift_value);
1241 }
1242 }
1243 }
1244 } else {
1245 if (type == Primitive::kPrimInt) {
1246 if (instr->IsShl()) {
1247 __ Sllv(dst, lhs, rhs_reg);
1248 } else if (instr->IsShr()) {
1249 __ Srav(dst, lhs, rhs_reg);
1250 } else {
1251 __ Srlv(dst, lhs, rhs_reg);
1252 }
1253 } else {
1254 if (instr->IsShl()) {
1255 __ Dsllv(dst, lhs, rhs_reg);
1256 } else if (instr->IsShr()) {
1257 __ Dsrav(dst, lhs, rhs_reg);
1258 } else {
1259 __ Dsrlv(dst, lhs, rhs_reg);
1260 }
1261 }
1262 }
1263 break;
1264 }
1265 default:
1266 LOG(FATAL) << "Unexpected shift operation type " << type;
1267 }
1268}
1269
1270void LocationsBuilderMIPS64::VisitAdd(HAdd* instruction) {
1271 HandleBinaryOp(instruction);
1272}
1273
1274void InstructionCodeGeneratorMIPS64::VisitAdd(HAdd* instruction) {
1275 HandleBinaryOp(instruction);
1276}
1277
1278void LocationsBuilderMIPS64::VisitAnd(HAnd* instruction) {
1279 HandleBinaryOp(instruction);
1280}
1281
1282void InstructionCodeGeneratorMIPS64::VisitAnd(HAnd* instruction) {
1283 HandleBinaryOp(instruction);
1284}
1285
1286void LocationsBuilderMIPS64::VisitArrayGet(HArrayGet* instruction) {
1287 LocationSummary* locations =
1288 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1289 locations->SetInAt(0, Location::RequiresRegister());
1290 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1291 if (Primitive::IsFloatingPointType(instruction->GetType())) {
1292 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1293 } else {
1294 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1295 }
1296}
1297
1298void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
1299 LocationSummary* locations = instruction->GetLocations();
1300 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1301 Location index = locations->InAt(1);
1302 Primitive::Type type = instruction->GetType();
1303
1304 switch (type) {
1305 case Primitive::kPrimBoolean: {
1306 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
1307 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1308 if (index.IsConstant()) {
1309 size_t offset =
1310 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1311 __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
1312 } else {
1313 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1314 __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
1315 }
1316 break;
1317 }
1318
1319 case Primitive::kPrimByte: {
1320 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
1321 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1322 if (index.IsConstant()) {
1323 size_t offset =
1324 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1325 __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
1326 } else {
1327 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1328 __ LoadFromOffset(kLoadSignedByte, out, TMP, data_offset);
1329 }
1330 break;
1331 }
1332
1333 case Primitive::kPrimShort: {
1334 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
1335 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1336 if (index.IsConstant()) {
1337 size_t offset =
1338 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1339 __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
1340 } else {
1341 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1342 __ Daddu(TMP, obj, TMP);
1343 __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset);
1344 }
1345 break;
1346 }
1347
1348 case Primitive::kPrimChar: {
1349 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
1350 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1351 if (index.IsConstant()) {
1352 size_t offset =
1353 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1354 __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
1355 } else {
1356 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1357 __ Daddu(TMP, obj, TMP);
1358 __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
1359 }
1360 break;
1361 }
1362
1363 case Primitive::kPrimInt:
1364 case Primitive::kPrimNot: {
1365 DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
1366 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
1367 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1368 LoadOperandType load_type = (type == Primitive::kPrimNot) ? kLoadUnsignedWord : kLoadWord;
1369 if (index.IsConstant()) {
1370 size_t offset =
1371 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1372 __ LoadFromOffset(load_type, out, obj, offset);
1373 } else {
1374 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1375 __ Daddu(TMP, obj, TMP);
1376 __ LoadFromOffset(load_type, out, TMP, data_offset);
1377 }
1378 break;
1379 }
1380
1381 case Primitive::kPrimLong: {
1382 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
1383 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1384 if (index.IsConstant()) {
1385 size_t offset =
1386 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1387 __ LoadFromOffset(kLoadDoubleword, out, obj, offset);
1388 } else {
1389 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1390 __ Daddu(TMP, obj, TMP);
1391 __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset);
1392 }
1393 break;
1394 }
1395
1396 case Primitive::kPrimFloat: {
1397 uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
1398 FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
1399 if (index.IsConstant()) {
1400 size_t offset =
1401 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1402 __ LoadFpuFromOffset(kLoadWord, out, obj, offset);
1403 } else {
1404 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1405 __ Daddu(TMP, obj, TMP);
1406 __ LoadFpuFromOffset(kLoadWord, out, TMP, data_offset);
1407 }
1408 break;
1409 }
1410
1411 case Primitive::kPrimDouble: {
1412 uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
1413 FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
1414 if (index.IsConstant()) {
1415 size_t offset =
1416 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1417 __ LoadFpuFromOffset(kLoadDoubleword, out, obj, offset);
1418 } else {
1419 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1420 __ Daddu(TMP, obj, TMP);
1421 __ LoadFpuFromOffset(kLoadDoubleword, out, TMP, data_offset);
1422 }
1423 break;
1424 }
1425
1426 case Primitive::kPrimVoid:
1427 LOG(FATAL) << "Unreachable type " << instruction->GetType();
1428 UNREACHABLE();
1429 }
1430 codegen_->MaybeRecordImplicitNullCheck(instruction);
1431}
1432
1433void LocationsBuilderMIPS64::VisitArrayLength(HArrayLength* instruction) {
1434 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1435 locations->SetInAt(0, Location::RequiresRegister());
1436 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1437}
1438
1439void InstructionCodeGeneratorMIPS64::VisitArrayLength(HArrayLength* instruction) {
1440 LocationSummary* locations = instruction->GetLocations();
1441 uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
1442 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1443 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1444 __ LoadFromOffset(kLoadWord, out, obj, offset);
1445 codegen_->MaybeRecordImplicitNullCheck(instruction);
1446}
1447
1448void LocationsBuilderMIPS64::VisitArraySet(HArraySet* instruction) {
David Brazdilbb3d5052015-09-21 18:39:16 +01001449 bool needs_runtime_call = instruction->NeedsTypeCheck();
Alexey Frunze4dda3372015-06-01 18:31:49 -07001450 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
1451 instruction,
David Brazdilbb3d5052015-09-21 18:39:16 +01001452 needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall);
1453 if (needs_runtime_call) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07001454 InvokeRuntimeCallingConvention calling_convention;
1455 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
1456 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
1457 locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
1458 } else {
1459 locations->SetInAt(0, Location::RequiresRegister());
1460 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1461 if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
1462 locations->SetInAt(2, Location::RequiresFpuRegister());
1463 } else {
1464 locations->SetInAt(2, Location::RequiresRegister());
1465 }
1466 }
1467}
1468
1469void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
1470 LocationSummary* locations = instruction->GetLocations();
1471 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1472 Location index = locations->InAt(1);
1473 Primitive::Type value_type = instruction->GetComponentType();
1474 bool needs_runtime_call = locations->WillCall();
1475 bool needs_write_barrier =
1476 CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
1477
1478 switch (value_type) {
1479 case Primitive::kPrimBoolean:
1480 case Primitive::kPrimByte: {
1481 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
1482 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1483 if (index.IsConstant()) {
1484 size_t offset =
1485 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1486 __ StoreToOffset(kStoreByte, value, obj, offset);
1487 } else {
1488 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1489 __ StoreToOffset(kStoreByte, value, TMP, data_offset);
1490 }
1491 break;
1492 }
1493
1494 case Primitive::kPrimShort:
1495 case Primitive::kPrimChar: {
1496 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
1497 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1498 if (index.IsConstant()) {
1499 size_t offset =
1500 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1501 __ StoreToOffset(kStoreHalfword, value, obj, offset);
1502 } else {
1503 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1504 __ Daddu(TMP, obj, TMP);
1505 __ StoreToOffset(kStoreHalfword, value, TMP, data_offset);
1506 }
1507 break;
1508 }
1509
1510 case Primitive::kPrimInt:
1511 case Primitive::kPrimNot: {
1512 if (!needs_runtime_call) {
1513 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
1514 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1515 if (index.IsConstant()) {
1516 size_t offset =
1517 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1518 __ StoreToOffset(kStoreWord, value, obj, offset);
1519 } else {
1520 DCHECK(index.IsRegister()) << index;
1521 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1522 __ Daddu(TMP, obj, TMP);
1523 __ StoreToOffset(kStoreWord, value, TMP, data_offset);
1524 }
1525 codegen_->MaybeRecordImplicitNullCheck(instruction);
1526 if (needs_write_barrier) {
1527 DCHECK_EQ(value_type, Primitive::kPrimNot);
1528 codegen_->MarkGCCard(obj, value);
1529 }
1530 } else {
1531 DCHECK_EQ(value_type, Primitive::kPrimNot);
1532 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
1533 instruction,
1534 instruction->GetDexPc(),
1535 nullptr);
1536 }
1537 break;
1538 }
1539
1540 case Primitive::kPrimLong: {
1541 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
1542 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1543 if (index.IsConstant()) {
1544 size_t offset =
1545 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1546 __ StoreToOffset(kStoreDoubleword, value, obj, offset);
1547 } else {
1548 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1549 __ Daddu(TMP, obj, TMP);
1550 __ StoreToOffset(kStoreDoubleword, value, TMP, data_offset);
1551 }
1552 break;
1553 }
1554
1555 case Primitive::kPrimFloat: {
1556 uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
1557 FpuRegister value = locations->InAt(2).AsFpuRegister<FpuRegister>();
1558 DCHECK(locations->InAt(2).IsFpuRegister());
1559 if (index.IsConstant()) {
1560 size_t offset =
1561 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1562 __ StoreFpuToOffset(kStoreWord, value, obj, offset);
1563 } else {
1564 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1565 __ Daddu(TMP, obj, TMP);
1566 __ StoreFpuToOffset(kStoreWord, value, TMP, data_offset);
1567 }
1568 break;
1569 }
1570
1571 case Primitive::kPrimDouble: {
1572 uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
1573 FpuRegister value = locations->InAt(2).AsFpuRegister<FpuRegister>();
1574 DCHECK(locations->InAt(2).IsFpuRegister());
1575 if (index.IsConstant()) {
1576 size_t offset =
1577 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1578 __ StoreFpuToOffset(kStoreDoubleword, value, obj, offset);
1579 } else {
1580 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1581 __ Daddu(TMP, obj, TMP);
1582 __ StoreFpuToOffset(kStoreDoubleword, value, TMP, data_offset);
1583 }
1584 break;
1585 }
1586
1587 case Primitive::kPrimVoid:
1588 LOG(FATAL) << "Unreachable type " << instruction->GetType();
1589 UNREACHABLE();
1590 }
1591
1592 // Ints and objects are handled in the switch.
1593 if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
1594 codegen_->MaybeRecordImplicitNullCheck(instruction);
1595 }
1596}
1597
1598void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
David Brazdil77a48ae2015-09-15 12:34:04 +00001599 LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
1600 ? LocationSummary::kCallOnSlowPath
1601 : LocationSummary::kNoCall;
1602 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001603 locations->SetInAt(0, Location::RequiresRegister());
1604 locations->SetInAt(1, Location::RequiresRegister());
1605 if (instruction->HasUses()) {
1606 locations->SetOut(Location::SameAsFirstInput());
1607 }
1608}
1609
1610void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
1611 LocationSummary* locations = instruction->GetLocations();
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01001612 BoundsCheckSlowPathMIPS64* slow_path =
1613 new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS64(instruction);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001614 codegen_->AddSlowPath(slow_path);
1615
1616 GpuRegister index = locations->InAt(0).AsRegister<GpuRegister>();
1617 GpuRegister length = locations->InAt(1).AsRegister<GpuRegister>();
1618
1619 // length is limited by the maximum positive signed 32-bit integer.
1620 // Unsigned comparison of length and index checks for index < 0
1621 // and for length <= index simultaneously.
1622 // Mips R6 requires lhs != rhs for compact branches.
1623 if (index == length) {
1624 __ B(slow_path->GetEntryLabel());
1625 } else {
1626 __ Bgeuc(index, length, slow_path->GetEntryLabel());
1627 }
1628}
1629
1630void LocationsBuilderMIPS64::VisitCheckCast(HCheckCast* instruction) {
1631 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
1632 instruction,
1633 LocationSummary::kCallOnSlowPath);
1634 locations->SetInAt(0, Location::RequiresRegister());
1635 locations->SetInAt(1, Location::RequiresRegister());
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01001636 // Note that TypeCheckSlowPathMIPS64 uses this register too.
Alexey Frunze4dda3372015-06-01 18:31:49 -07001637 locations->AddTemp(Location::RequiresRegister());
1638}
1639
1640void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) {
1641 LocationSummary* locations = instruction->GetLocations();
1642 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1643 GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
1644 GpuRegister obj_cls = locations->GetTemp(0).AsRegister<GpuRegister>();
1645
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01001646 SlowPathCodeMIPS64* slow_path =
1647 new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001648 codegen_->AddSlowPath(slow_path);
1649
1650 // TODO: avoid this check if we know obj is not null.
1651 __ Beqzc(obj, slow_path->GetExitLabel());
1652 // Compare the class of `obj` with `cls`.
1653 __ LoadFromOffset(kLoadUnsignedWord, obj_cls, obj, mirror::Object::ClassOffset().Int32Value());
1654 __ Bnec(obj_cls, cls, slow_path->GetEntryLabel());
1655 __ Bind(slow_path->GetExitLabel());
1656}
1657
1658void LocationsBuilderMIPS64::VisitClinitCheck(HClinitCheck* check) {
1659 LocationSummary* locations =
1660 new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
1661 locations->SetInAt(0, Location::RequiresRegister());
1662 if (check->HasUses()) {
1663 locations->SetOut(Location::SameAsFirstInput());
1664 }
1665}
1666
1667void InstructionCodeGeneratorMIPS64::VisitClinitCheck(HClinitCheck* check) {
1668 // We assume the class is not null.
1669 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
1670 check->GetLoadClass(),
1671 check,
1672 check->GetDexPc(),
1673 true);
1674 codegen_->AddSlowPath(slow_path);
1675 GenerateClassInitializationCheck(slow_path,
1676 check->GetLocations()->InAt(0).AsRegister<GpuRegister>());
1677}
1678
1679void LocationsBuilderMIPS64::VisitCompare(HCompare* compare) {
1680 Primitive::Type in_type = compare->InputAt(0)->GetType();
1681
1682 LocationSummary::CallKind call_kind = Primitive::IsFloatingPointType(in_type)
1683 ? LocationSummary::kCall
1684 : LocationSummary::kNoCall;
1685
1686 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare, call_kind);
1687
1688 switch (in_type) {
1689 case Primitive::kPrimLong:
1690 locations->SetInAt(0, Location::RequiresRegister());
1691 locations->SetInAt(1, Location::RequiresRegister());
1692 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1693 break;
1694
1695 case Primitive::kPrimFloat:
1696 case Primitive::kPrimDouble: {
1697 InvokeRuntimeCallingConvention calling_convention;
1698 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
1699 locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
1700 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt));
1701 break;
1702 }
1703
1704 default:
1705 LOG(FATAL) << "Unexpected type for compare operation " << in_type;
1706 }
1707}
1708
1709void InstructionCodeGeneratorMIPS64::VisitCompare(HCompare* instruction) {
1710 LocationSummary* locations = instruction->GetLocations();
1711 Primitive::Type in_type = instruction->InputAt(0)->GetType();
1712
1713 // 0 if: left == right
1714 // 1 if: left > right
1715 // -1 if: left < right
1716 switch (in_type) {
1717 case Primitive::kPrimLong: {
1718 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1719 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1720 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
1721 // TODO: more efficient (direct) comparison with a constant
1722 __ Slt(TMP, lhs, rhs);
1723 __ Slt(dst, rhs, lhs);
1724 __ Subu(dst, dst, TMP);
1725 break;
1726 }
1727
1728 case Primitive::kPrimFloat:
1729 case Primitive::kPrimDouble: {
1730 int32_t entry_point_offset;
1731 if (in_type == Primitive::kPrimFloat) {
1732 entry_point_offset = instruction->IsGtBias() ? QUICK_ENTRY_POINT(pCmpgFloat)
1733 : QUICK_ENTRY_POINT(pCmplFloat);
1734 } else {
1735 entry_point_offset = instruction->IsGtBias() ? QUICK_ENTRY_POINT(pCmpgDouble)
1736 : QUICK_ENTRY_POINT(pCmplDouble);
1737 }
1738 codegen_->InvokeRuntime(entry_point_offset, instruction, instruction->GetDexPc(), nullptr);
1739 break;
1740 }
1741
1742 default:
1743 LOG(FATAL) << "Unimplemented compare type " << in_type;
1744 }
1745}
1746
1747void LocationsBuilderMIPS64::VisitCondition(HCondition* instruction) {
1748 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1749 locations->SetInAt(0, Location::RequiresRegister());
1750 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1751 if (instruction->NeedsMaterialization()) {
1752 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1753 }
1754}
1755
1756void InstructionCodeGeneratorMIPS64::VisitCondition(HCondition* instruction) {
1757 if (!instruction->NeedsMaterialization()) {
1758 return;
1759 }
1760
1761 LocationSummary* locations = instruction->GetLocations();
1762
1763 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1764 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1765 Location rhs_location = locations->InAt(1);
1766
1767 GpuRegister rhs_reg = ZERO;
1768 int64_t rhs_imm = 0;
1769 bool use_imm = rhs_location.IsConstant();
1770 if (use_imm) {
1771 rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
1772 } else {
1773 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1774 }
1775
1776 IfCondition if_cond = instruction->GetCondition();
1777
1778 switch (if_cond) {
1779 case kCondEQ:
1780 case kCondNE:
1781 if (use_imm && IsUint<16>(rhs_imm)) {
1782 __ Xori(dst, lhs, rhs_imm);
1783 } else {
1784 if (use_imm) {
1785 rhs_reg = TMP;
1786 __ LoadConst32(rhs_reg, rhs_imm);
1787 }
1788 __ Xor(dst, lhs, rhs_reg);
1789 }
1790 if (if_cond == kCondEQ) {
1791 __ Sltiu(dst, dst, 1);
1792 } else {
1793 __ Sltu(dst, ZERO, dst);
1794 }
1795 break;
1796
1797 case kCondLT:
1798 case kCondGE:
1799 if (use_imm && IsInt<16>(rhs_imm)) {
1800 __ Slti(dst, lhs, rhs_imm);
1801 } else {
1802 if (use_imm) {
1803 rhs_reg = TMP;
1804 __ LoadConst32(rhs_reg, rhs_imm);
1805 }
1806 __ Slt(dst, lhs, rhs_reg);
1807 }
1808 if (if_cond == kCondGE) {
1809 // Simulate lhs >= rhs via !(lhs < rhs) since there's
1810 // only the slt instruction but no sge.
1811 __ Xori(dst, dst, 1);
1812 }
1813 break;
1814
1815 case kCondLE:
1816 case kCondGT:
1817 if (use_imm && IsInt<16>(rhs_imm + 1)) {
1818 // Simulate lhs <= rhs via lhs < rhs + 1.
1819 __ Slti(dst, lhs, rhs_imm + 1);
1820 if (if_cond == kCondGT) {
1821 // Simulate lhs > rhs via !(lhs <= rhs) since there's
1822 // only the slti instruction but no sgti.
1823 __ Xori(dst, dst, 1);
1824 }
1825 } else {
1826 if (use_imm) {
1827 rhs_reg = TMP;
1828 __ LoadConst32(rhs_reg, rhs_imm);
1829 }
1830 __ Slt(dst, rhs_reg, lhs);
1831 if (if_cond == kCondLE) {
1832 // Simulate lhs <= rhs via !(rhs < lhs) since there's
1833 // only the slt instruction but no sle.
1834 __ Xori(dst, dst, 1);
1835 }
1836 }
1837 break;
1838 }
1839}
1840
1841void LocationsBuilderMIPS64::VisitDiv(HDiv* div) {
1842 LocationSummary* locations =
1843 new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
1844 switch (div->GetResultType()) {
1845 case Primitive::kPrimInt:
1846 case Primitive::kPrimLong:
1847 locations->SetInAt(0, Location::RequiresRegister());
1848 locations->SetInAt(1, Location::RequiresRegister());
1849 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1850 break;
1851
1852 case Primitive::kPrimFloat:
1853 case Primitive::kPrimDouble:
1854 locations->SetInAt(0, Location::RequiresFpuRegister());
1855 locations->SetInAt(1, Location::RequiresFpuRegister());
1856 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1857 break;
1858
1859 default:
1860 LOG(FATAL) << "Unexpected div type " << div->GetResultType();
1861 }
1862}
1863
1864void InstructionCodeGeneratorMIPS64::VisitDiv(HDiv* instruction) {
1865 Primitive::Type type = instruction->GetType();
1866 LocationSummary* locations = instruction->GetLocations();
1867
1868 switch (type) {
1869 case Primitive::kPrimInt:
1870 case Primitive::kPrimLong: {
1871 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1872 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1873 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
1874 if (type == Primitive::kPrimInt)
1875 __ DivR6(dst, lhs, rhs);
1876 else
1877 __ Ddiv(dst, lhs, rhs);
1878 break;
1879 }
1880 case Primitive::kPrimFloat:
1881 case Primitive::kPrimDouble: {
1882 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
1883 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
1884 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
1885 if (type == Primitive::kPrimFloat)
1886 __ DivS(dst, lhs, rhs);
1887 else
1888 __ DivD(dst, lhs, rhs);
1889 break;
1890 }
1891 default:
1892 LOG(FATAL) << "Unexpected div type " << type;
1893 }
1894}
1895
1896void LocationsBuilderMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
David Brazdil77a48ae2015-09-15 12:34:04 +00001897 LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
1898 ? LocationSummary::kCallOnSlowPath
1899 : LocationSummary::kNoCall;
1900 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001901 locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
1902 if (instruction->HasUses()) {
1903 locations->SetOut(Location::SameAsFirstInput());
1904 }
1905}
1906
1907void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
1908 SlowPathCodeMIPS64* slow_path =
1909 new (GetGraph()->GetArena()) DivZeroCheckSlowPathMIPS64(instruction);
1910 codegen_->AddSlowPath(slow_path);
1911 Location value = instruction->GetLocations()->InAt(0);
1912
1913 Primitive::Type type = instruction->GetType();
1914
Serguei Katkov8c0676c2015-08-03 13:55:33 +06001915 if ((type == Primitive::kPrimBoolean) || !Primitive::IsIntegralType(type)) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07001916 LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
Serguei Katkov8c0676c2015-08-03 13:55:33 +06001917 return;
Alexey Frunze4dda3372015-06-01 18:31:49 -07001918 }
1919
1920 if (value.IsConstant()) {
1921 int64_t divisor = codegen_->GetInt64ValueOf(value.GetConstant()->AsConstant());
1922 if (divisor == 0) {
1923 __ B(slow_path->GetEntryLabel());
1924 } else {
1925 // A division by a non-null constant is valid. We don't need to perform
1926 // any check, so simply fall through.
1927 }
1928 } else {
1929 __ Beqzc(value.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
1930 }
1931}
1932
1933void LocationsBuilderMIPS64::VisitDoubleConstant(HDoubleConstant* constant) {
1934 LocationSummary* locations =
1935 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1936 locations->SetOut(Location::ConstantLocation(constant));
1937}
1938
1939void InstructionCodeGeneratorMIPS64::VisitDoubleConstant(HDoubleConstant* cst ATTRIBUTE_UNUSED) {
1940 // Will be generated at use site.
1941}
1942
1943void LocationsBuilderMIPS64::VisitExit(HExit* exit) {
1944 exit->SetLocations(nullptr);
1945}
1946
1947void InstructionCodeGeneratorMIPS64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
1948}
1949
1950void LocationsBuilderMIPS64::VisitFloatConstant(HFloatConstant* constant) {
1951 LocationSummary* locations =
1952 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1953 locations->SetOut(Location::ConstantLocation(constant));
1954}
1955
1956void InstructionCodeGeneratorMIPS64::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
1957 // Will be generated at use site.
1958}
1959
David Brazdilfc6a86a2015-06-26 10:33:45 +00001960void InstructionCodeGeneratorMIPS64::HandleGoto(HInstruction* got, HBasicBlock* successor) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07001961 DCHECK(!successor->IsExitBlock());
1962 HBasicBlock* block = got->GetBlock();
1963 HInstruction* previous = got->GetPrevious();
1964 HLoopInformation* info = block->GetLoopInformation();
1965
1966 if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
1967 codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
1968 GenerateSuspendCheck(info->GetSuspendCheck(), successor);
1969 return;
1970 }
1971 if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
1972 GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
1973 }
1974 if (!codegen_->GoesToNextBlock(block, successor)) {
1975 __ B(codegen_->GetLabelOf(successor));
1976 }
1977}
1978
David Brazdilfc6a86a2015-06-26 10:33:45 +00001979void LocationsBuilderMIPS64::VisitGoto(HGoto* got) {
1980 got->SetLocations(nullptr);
1981}
1982
1983void InstructionCodeGeneratorMIPS64::VisitGoto(HGoto* got) {
1984 HandleGoto(got, got->GetSuccessor());
1985}
1986
1987void LocationsBuilderMIPS64::VisitTryBoundary(HTryBoundary* try_boundary) {
1988 try_boundary->SetLocations(nullptr);
1989}
1990
1991void InstructionCodeGeneratorMIPS64::VisitTryBoundary(HTryBoundary* try_boundary) {
1992 HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
1993 if (!successor->IsExitBlock()) {
1994 HandleGoto(try_boundary, successor);
1995 }
1996}
1997
Alexey Frunze4dda3372015-06-01 18:31:49 -07001998void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruction,
1999 Label* true_target,
2000 Label* false_target,
2001 Label* always_true_target) {
2002 HInstruction* cond = instruction->InputAt(0);
2003 HCondition* condition = cond->AsCondition();
2004
2005 if (cond->IsIntConstant()) {
2006 int32_t cond_value = cond->AsIntConstant()->GetValue();
2007 if (cond_value == 1) {
2008 if (always_true_target != nullptr) {
2009 __ B(always_true_target);
2010 }
2011 return;
2012 } else {
2013 DCHECK_EQ(cond_value, 0);
2014 }
2015 } else if (!cond->IsCondition() || condition->NeedsMaterialization()) {
2016 // The condition instruction has been materialized, compare the output to 0.
2017 Location cond_val = instruction->GetLocations()->InAt(0);
2018 DCHECK(cond_val.IsRegister());
2019 __ Bnezc(cond_val.AsRegister<GpuRegister>(), true_target);
2020 } else {
2021 // The condition instruction has not been materialized, use its inputs as
2022 // the comparison and its condition as the branch condition.
2023 GpuRegister lhs = condition->GetLocations()->InAt(0).AsRegister<GpuRegister>();
2024 Location rhs_location = condition->GetLocations()->InAt(1);
2025 GpuRegister rhs_reg = ZERO;
2026 int32_t rhs_imm = 0;
2027 bool use_imm = rhs_location.IsConstant();
2028 if (use_imm) {
2029 rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
2030 } else {
2031 rhs_reg = rhs_location.AsRegister<GpuRegister>();
2032 }
2033
2034 IfCondition if_cond = condition->GetCondition();
2035 if (use_imm && rhs_imm == 0) {
2036 switch (if_cond) {
2037 case kCondEQ:
2038 __ Beqzc(lhs, true_target);
2039 break;
2040 case kCondNE:
2041 __ Bnezc(lhs, true_target);
2042 break;
2043 case kCondLT:
2044 __ Bltzc(lhs, true_target);
2045 break;
2046 case kCondGE:
2047 __ Bgezc(lhs, true_target);
2048 break;
2049 case kCondLE:
2050 __ Blezc(lhs, true_target);
2051 break;
2052 case kCondGT:
2053 __ Bgtzc(lhs, true_target);
2054 break;
2055 }
2056 } else {
2057 if (use_imm) {
2058 rhs_reg = TMP;
2059 __ LoadConst32(rhs_reg, rhs_imm);
2060 }
2061 // It looks like we can get here with lhs == rhs. Should that be possible at all?
2062 // Mips R6 requires lhs != rhs for compact branches.
2063 if (lhs == rhs_reg) {
2064 DCHECK(!use_imm);
2065 switch (if_cond) {
2066 case kCondEQ:
2067 case kCondGE:
2068 case kCondLE:
2069 // if lhs == rhs for a positive condition, then it is a branch
2070 __ B(true_target);
2071 break;
2072 case kCondNE:
2073 case kCondLT:
2074 case kCondGT:
2075 // if lhs == rhs for a negative condition, then it is a NOP
2076 break;
2077 }
2078 } else {
2079 switch (if_cond) {
2080 case kCondEQ:
2081 __ Beqc(lhs, rhs_reg, true_target);
2082 break;
2083 case kCondNE:
2084 __ Bnec(lhs, rhs_reg, true_target);
2085 break;
2086 case kCondLT:
2087 __ Bltc(lhs, rhs_reg, true_target);
2088 break;
2089 case kCondGE:
2090 __ Bgec(lhs, rhs_reg, true_target);
2091 break;
2092 case kCondLE:
2093 __ Bgec(rhs_reg, lhs, true_target);
2094 break;
2095 case kCondGT:
2096 __ Bltc(rhs_reg, lhs, true_target);
2097 break;
2098 }
2099 }
2100 }
2101 }
2102 if (false_target != nullptr) {
2103 __ B(false_target);
2104 }
2105}
2106
2107void LocationsBuilderMIPS64::VisitIf(HIf* if_instr) {
2108 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
2109 HInstruction* cond = if_instr->InputAt(0);
2110 if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
2111 locations->SetInAt(0, Location::RequiresRegister());
2112 }
2113}
2114
2115void InstructionCodeGeneratorMIPS64::VisitIf(HIf* if_instr) {
2116 Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
2117 Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
2118 Label* always_true_target = true_target;
2119 if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
2120 if_instr->IfTrueSuccessor())) {
2121 always_true_target = nullptr;
2122 }
2123 if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
2124 if_instr->IfFalseSuccessor())) {
2125 false_target = nullptr;
2126 }
2127 GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
2128}
2129
2130void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
2131 LocationSummary* locations = new (GetGraph()->GetArena())
2132 LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
2133 HInstruction* cond = deoptimize->InputAt(0);
2134 DCHECK(cond->IsCondition());
2135 if (cond->AsCondition()->NeedsMaterialization()) {
2136 locations->SetInAt(0, Location::RequiresRegister());
2137 }
2138}
2139
2140void InstructionCodeGeneratorMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
2141 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena())
2142 DeoptimizationSlowPathMIPS64(deoptimize);
2143 codegen_->AddSlowPath(slow_path);
2144 Label* slow_path_entry = slow_path->GetEntryLabel();
2145 GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
2146}
2147
2148void LocationsBuilderMIPS64::HandleFieldGet(HInstruction* instruction,
2149 const FieldInfo& field_info ATTRIBUTE_UNUSED) {
2150 LocationSummary* locations =
2151 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2152 locations->SetInAt(0, Location::RequiresRegister());
2153 if (Primitive::IsFloatingPointType(instruction->GetType())) {
2154 locations->SetOut(Location::RequiresFpuRegister());
2155 } else {
2156 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2157 }
2158}
2159
2160void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction,
2161 const FieldInfo& field_info) {
2162 Primitive::Type type = field_info.GetFieldType();
2163 LocationSummary* locations = instruction->GetLocations();
2164 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2165 LoadOperandType load_type = kLoadUnsignedByte;
2166 switch (type) {
2167 case Primitive::kPrimBoolean:
2168 load_type = kLoadUnsignedByte;
2169 break;
2170 case Primitive::kPrimByte:
2171 load_type = kLoadSignedByte;
2172 break;
2173 case Primitive::kPrimShort:
2174 load_type = kLoadSignedHalfword;
2175 break;
2176 case Primitive::kPrimChar:
2177 load_type = kLoadUnsignedHalfword;
2178 break;
2179 case Primitive::kPrimInt:
2180 case Primitive::kPrimFloat:
2181 load_type = kLoadWord;
2182 break;
2183 case Primitive::kPrimLong:
2184 case Primitive::kPrimDouble:
2185 load_type = kLoadDoubleword;
2186 break;
2187 case Primitive::kPrimNot:
2188 load_type = kLoadUnsignedWord;
2189 break;
2190 case Primitive::kPrimVoid:
2191 LOG(FATAL) << "Unreachable type " << type;
2192 UNREACHABLE();
2193 }
2194 if (!Primitive::IsFloatingPointType(type)) {
2195 DCHECK(locations->Out().IsRegister());
2196 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2197 __ LoadFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
2198 } else {
2199 DCHECK(locations->Out().IsFpuRegister());
2200 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
2201 __ LoadFpuFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
2202 }
2203
2204 codegen_->MaybeRecordImplicitNullCheck(instruction);
2205 // TODO: memory barrier?
2206}
2207
2208void LocationsBuilderMIPS64::HandleFieldSet(HInstruction* instruction,
2209 const FieldInfo& field_info ATTRIBUTE_UNUSED) {
2210 LocationSummary* locations =
2211 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2212 locations->SetInAt(0, Location::RequiresRegister());
2213 if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
2214 locations->SetInAt(1, Location::RequiresFpuRegister());
2215 } else {
2216 locations->SetInAt(1, Location::RequiresRegister());
2217 }
2218}
2219
2220void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction,
2221 const FieldInfo& field_info) {
2222 Primitive::Type type = field_info.GetFieldType();
2223 LocationSummary* locations = instruction->GetLocations();
2224 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2225 StoreOperandType store_type = kStoreByte;
2226 switch (type) {
2227 case Primitive::kPrimBoolean:
2228 case Primitive::kPrimByte:
2229 store_type = kStoreByte;
2230 break;
2231 case Primitive::kPrimShort:
2232 case Primitive::kPrimChar:
2233 store_type = kStoreHalfword;
2234 break;
2235 case Primitive::kPrimInt:
2236 case Primitive::kPrimFloat:
2237 case Primitive::kPrimNot:
2238 store_type = kStoreWord;
2239 break;
2240 case Primitive::kPrimLong:
2241 case Primitive::kPrimDouble:
2242 store_type = kStoreDoubleword;
2243 break;
2244 case Primitive::kPrimVoid:
2245 LOG(FATAL) << "Unreachable type " << type;
2246 UNREACHABLE();
2247 }
2248 if (!Primitive::IsFloatingPointType(type)) {
2249 DCHECK(locations->InAt(1).IsRegister());
2250 GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>();
2251 __ StoreToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
2252 } else {
2253 DCHECK(locations->InAt(1).IsFpuRegister());
2254 FpuRegister src = locations->InAt(1).AsFpuRegister<FpuRegister>();
2255 __ StoreFpuToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
2256 }
2257
2258 codegen_->MaybeRecordImplicitNullCheck(instruction);
2259 // TODO: memory barriers?
2260 if (CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1))) {
2261 DCHECK(locations->InAt(1).IsRegister());
2262 GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>();
2263 codegen_->MarkGCCard(obj, src);
2264 }
2265}
2266
2267void LocationsBuilderMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
2268 HandleFieldGet(instruction, instruction->GetFieldInfo());
2269}
2270
2271void InstructionCodeGeneratorMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
2272 HandleFieldGet(instruction, instruction->GetFieldInfo());
2273}
2274
2275void LocationsBuilderMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
2276 HandleFieldSet(instruction, instruction->GetFieldInfo());
2277}
2278
2279void InstructionCodeGeneratorMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
2280 HandleFieldSet(instruction, instruction->GetFieldInfo());
2281}
2282
2283void LocationsBuilderMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
2284 LocationSummary::CallKind call_kind =
Nicolas Geoffray85c7bab2015-09-18 13:40:46 +00002285 instruction->IsExactCheck() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath;
Alexey Frunze4dda3372015-06-01 18:31:49 -07002286 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
2287 locations->SetInAt(0, Location::RequiresRegister());
2288 locations->SetInAt(1, Location::RequiresRegister());
2289 // The output does overlap inputs.
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01002290 // Note that TypeCheckSlowPathMIPS64 uses this register too.
Alexey Frunze4dda3372015-06-01 18:31:49 -07002291 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
2292}
2293
2294void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
2295 LocationSummary* locations = instruction->GetLocations();
2296 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2297 GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
2298 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2299
2300 Label done;
2301
2302 // Return 0 if `obj` is null.
2303 // TODO: Avoid this check if we know `obj` is not null.
2304 __ Move(out, ZERO);
2305 __ Beqzc(obj, &done);
2306
2307 // Compare the class of `obj` with `cls`.
2308 __ LoadFromOffset(kLoadUnsignedWord, out, obj, mirror::Object::ClassOffset().Int32Value());
Nicolas Geoffray85c7bab2015-09-18 13:40:46 +00002309 if (instruction->IsExactCheck()) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07002310 // Classes must be equal for the instanceof to succeed.
2311 __ Xor(out, out, cls);
2312 __ Sltiu(out, out, 1);
2313 } else {
2314 // If the classes are not equal, we go into a slow path.
2315 DCHECK(locations->OnlyCallsOnSlowPath());
2316 SlowPathCodeMIPS64* slow_path =
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01002317 new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002318 codegen_->AddSlowPath(slow_path);
2319 __ Bnec(out, cls, slow_path->GetEntryLabel());
2320 __ LoadConst32(out, 1);
2321 __ Bind(slow_path->GetExitLabel());
2322 }
2323
2324 __ Bind(&done);
2325}
2326
2327void LocationsBuilderMIPS64::VisitIntConstant(HIntConstant* constant) {
2328 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2329 locations->SetOut(Location::ConstantLocation(constant));
2330}
2331
2332void InstructionCodeGeneratorMIPS64::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
2333 // Will be generated at use site.
2334}
2335
2336void LocationsBuilderMIPS64::VisitNullConstant(HNullConstant* constant) {
2337 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2338 locations->SetOut(Location::ConstantLocation(constant));
2339}
2340
2341void InstructionCodeGeneratorMIPS64::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
2342 // Will be generated at use site.
2343}
2344
Calin Juravle175dc732015-08-25 15:42:32 +01002345void LocationsBuilderMIPS64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
2346 // The trampoline uses the same calling convention as dex calling conventions,
2347 // except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
2348 // the method_idx.
2349 HandleInvoke(invoke);
2350}
2351
2352void InstructionCodeGeneratorMIPS64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
2353 codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
2354}
2355
Alexey Frunze4dda3372015-06-01 18:31:49 -07002356void LocationsBuilderMIPS64::HandleInvoke(HInvoke* invoke) {
2357 InvokeDexCallingConventionVisitorMIPS64 calling_convention_visitor;
2358 CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
2359}
2360
2361void LocationsBuilderMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
2362 HandleInvoke(invoke);
2363 // The register T0 is required to be used for the hidden argument in
2364 // art_quick_imt_conflict_trampoline, so add the hidden argument.
2365 invoke->GetLocations()->AddTemp(Location::RegisterLocation(T0));
2366}
2367
2368void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
2369 // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
2370 GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
2371 uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
2372 invoke->GetImtIndex() % mirror::Class::kImtSize, kMips64PointerSize).Uint32Value();
2373 Location receiver = invoke->GetLocations()->InAt(0);
2374 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2375 Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64WordSize);
2376
2377 // Set the hidden argument.
2378 __ LoadConst32(invoke->GetLocations()->GetTemp(1).AsRegister<GpuRegister>(),
2379 invoke->GetDexMethodIndex());
2380
2381 // temp = object->GetClass();
2382 if (receiver.IsStackSlot()) {
2383 __ LoadFromOffset(kLoadUnsignedWord, temp, SP, receiver.GetStackIndex());
2384 __ LoadFromOffset(kLoadUnsignedWord, temp, temp, class_offset);
2385 } else {
2386 __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
2387 }
2388 codegen_->MaybeRecordImplicitNullCheck(invoke);
2389 // temp = temp->GetImtEntryAt(method_offset);
2390 __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
2391 // T9 = temp->GetEntryPoint();
2392 __ LoadFromOffset(kLoadDoubleword, T9, temp, entry_point.Int32Value());
2393 // T9();
2394 __ Jalr(T9);
2395 DCHECK(!codegen_->IsLeafMethod());
2396 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2397}
2398
2399void LocationsBuilderMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
Chris Larsen3039e382015-08-26 07:54:08 -07002400 IntrinsicLocationsBuilderMIPS64 intrinsic(codegen_);
2401 if (intrinsic.TryDispatch(invoke)) {
2402 return;
2403 }
2404
Alexey Frunze4dda3372015-06-01 18:31:49 -07002405 HandleInvoke(invoke);
2406}
2407
2408void LocationsBuilderMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
2409 // When we do not run baseline, explicit clinit checks triggered by static
2410 // invokes must have been pruned by art::PrepareForRegisterAllocation.
2411 DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
2412
Chris Larsen3039e382015-08-26 07:54:08 -07002413 IntrinsicLocationsBuilderMIPS64 intrinsic(codegen_);
2414 if (intrinsic.TryDispatch(invoke)) {
2415 return;
2416 }
2417
Alexey Frunze4dda3372015-06-01 18:31:49 -07002418 HandleInvoke(invoke);
2419
2420 // While SetupBlockedRegisters() blocks registers S2-S8 due to their
2421 // clobbering somewhere else, reduce further register pressure by avoiding
2422 // allocation of a register for the current method pointer like on x86 baseline.
2423 // TODO: remove this once all the issues with register saving/restoring are
2424 // sorted out.
2425 LocationSummary* locations = invoke->GetLocations();
2426 Location location = locations->InAt(invoke->GetCurrentMethodInputIndex());
2427 if (location.IsUnallocated() && location.GetPolicy() == Location::kRequiresRegister) {
2428 locations->SetInAt(invoke->GetCurrentMethodInputIndex(), Location::NoLocation());
2429 }
2430}
2431
Chris Larsen3039e382015-08-26 07:54:08 -07002432static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorMIPS64* codegen) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07002433 if (invoke->GetLocations()->Intrinsified()) {
Chris Larsen3039e382015-08-26 07:54:08 -07002434 IntrinsicCodeGeneratorMIPS64 intrinsic(codegen);
2435 intrinsic.Dispatch(invoke);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002436 return true;
2437 }
2438 return false;
2439}
2440
2441void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
2442 // All registers are assumed to be correctly set up per the calling convention.
2443
Vladimir Marko58155012015-08-19 12:49:41 +00002444 Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
2445 switch (invoke->GetMethodLoadKind()) {
2446 case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
2447 // temp = thread->string_init_entrypoint
2448 __ LoadFromOffset(kLoadDoubleword,
2449 temp.AsRegister<GpuRegister>(),
2450 TR,
2451 invoke->GetStringInitOffset());
2452 break;
2453 case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
2454 callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
2455 break;
2456 case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
2457 __ LoadConst64(temp.AsRegister<GpuRegister>(), invoke->GetMethodAddress());
2458 break;
2459 case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
2460 // TODO: Implement this type. (Needs literal support.) At the moment, the
2461 // CompilerDriver will not direct the backend to use this type for MIPS.
2462 LOG(FATAL) << "Unsupported!";
2463 UNREACHABLE();
2464 case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
2465 // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
2466 FALLTHROUGH_INTENDED;
2467 case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
2468 Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
2469 GpuRegister reg = temp.AsRegister<GpuRegister>();
2470 GpuRegister method_reg;
2471 if (current_method.IsRegister()) {
2472 method_reg = current_method.AsRegister<GpuRegister>();
2473 } else {
2474 // TODO: use the appropriate DCHECK() here if possible.
2475 // DCHECK(invoke->GetLocations()->Intrinsified());
2476 DCHECK(!current_method.IsValid());
2477 method_reg = reg;
2478 __ Ld(reg, SP, kCurrentMethodStackOffset);
2479 }
Alexey Frunze4dda3372015-06-01 18:31:49 -07002480
Vladimir Marko58155012015-08-19 12:49:41 +00002481 // temp = temp->dex_cache_resolved_methods_;
Vladimir Marko05792b92015-08-03 11:56:49 +01002482 __ LoadFromOffset(kLoadDoubleword,
Vladimir Marko58155012015-08-19 12:49:41 +00002483 reg,
2484 method_reg,
Vladimir Marko05792b92015-08-03 11:56:49 +01002485 ArtMethod::DexCacheResolvedMethodsOffset(kMips64PointerSize).Int32Value());
Vladimir Marko58155012015-08-19 12:49:41 +00002486 // temp = temp[index_in_cache]
2487 uint32_t index_in_cache = invoke->GetTargetMethod().dex_method_index;
2488 __ LoadFromOffset(kLoadDoubleword,
2489 reg,
2490 reg,
2491 CodeGenerator::GetCachePointerOffset(index_in_cache));
2492 break;
Alexey Frunze4dda3372015-06-01 18:31:49 -07002493 }
Alexey Frunze4dda3372015-06-01 18:31:49 -07002494 }
2495
Vladimir Marko58155012015-08-19 12:49:41 +00002496 switch (invoke->GetCodePtrLocation()) {
2497 case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
2498 __ Jalr(&frame_entry_label_, T9);
2499 break;
2500 case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
2501 // LR = invoke->GetDirectCodePtr();
2502 __ LoadConst64(T9, invoke->GetDirectCodePtr());
2503 // LR()
2504 __ Jalr(T9);
2505 break;
2506 case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
2507 // TODO: Implement kCallPCRelative. For the moment, we fall back to kMethodCode.
2508 FALLTHROUGH_INTENDED;
2509 case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
2510 // TODO: Implement kDirectCodeFixup. For the moment, we fall back to kMethodCode.
2511 FALLTHROUGH_INTENDED;
2512 case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
2513 // T9 = callee_method->entry_point_from_quick_compiled_code_;
2514 __ LoadFromOffset(kLoadDoubleword,
2515 T9,
2516 callee_method.AsRegister<GpuRegister>(),
2517 ArtMethod::EntryPointFromQuickCompiledCodeOffset(
2518 kMips64WordSize).Int32Value());
2519 // T9()
2520 __ Jalr(T9);
2521 break;
2522 }
Alexey Frunze4dda3372015-06-01 18:31:49 -07002523 DCHECK(!IsLeafMethod());
2524}
2525
2526void InstructionCodeGeneratorMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
2527 // When we do not run baseline, explicit clinit checks triggered by static
2528 // invokes must have been pruned by art::PrepareForRegisterAllocation.
2529 DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
2530
2531 if (TryGenerateIntrinsicCode(invoke, codegen_)) {
2532 return;
2533 }
2534
2535 LocationSummary* locations = invoke->GetLocations();
2536 codegen_->GenerateStaticOrDirectCall(invoke,
2537 locations->HasTemps()
2538 ? locations->GetTemp(0)
2539 : Location::NoLocation());
2540 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2541}
2542
2543void InstructionCodeGeneratorMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
Chris Larsen3039e382015-08-26 07:54:08 -07002544 if (TryGenerateIntrinsicCode(invoke, codegen_)) {
2545 return;
2546 }
2547
Alexey Frunze4dda3372015-06-01 18:31:49 -07002548 LocationSummary* locations = invoke->GetLocations();
2549 Location receiver = locations->InAt(0);
2550 GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
2551 size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
2552 invoke->GetVTableIndex(), kMips64PointerSize).SizeValue();
2553 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2554 Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64WordSize);
2555
2556 // temp = object->GetClass();
2557 DCHECK(receiver.IsRegister());
2558 __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
2559 codegen_->MaybeRecordImplicitNullCheck(invoke);
2560 // temp = temp->GetMethodAt(method_offset);
2561 __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
2562 // T9 = temp->GetEntryPoint();
2563 __ LoadFromOffset(kLoadDoubleword, T9, temp, entry_point.Int32Value());
2564 // T9();
2565 __ Jalr(T9);
2566 DCHECK(!codegen_->IsLeafMethod());
2567 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2568}
2569
2570void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
2571 LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath
2572 : LocationSummary::kNoCall;
2573 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
2574 locations->SetInAt(0, Location::RequiresRegister());
2575 locations->SetOut(Location::RequiresRegister());
2576}
2577
2578void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) {
2579 LocationSummary* locations = cls->GetLocations();
2580 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2581 GpuRegister current_method = locations->InAt(0).AsRegister<GpuRegister>();
2582 if (cls->IsReferrersClass()) {
2583 DCHECK(!cls->CanCallRuntime());
2584 DCHECK(!cls->MustGenerateClinitCheck());
2585 __ LoadFromOffset(kLoadUnsignedWord, out, current_method,
2586 ArtMethod::DeclaringClassOffset().Int32Value());
2587 } else {
2588 DCHECK(cls->CanCallRuntime());
Vladimir Marko05792b92015-08-03 11:56:49 +01002589 __ LoadFromOffset(kLoadDoubleword, out, current_method,
2590 ArtMethod::DexCacheResolvedTypesOffset(kMips64PointerSize).Int32Value());
Alexey Frunze4dda3372015-06-01 18:31:49 -07002591 __ LoadFromOffset(kLoadUnsignedWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
Vladimir Marko05792b92015-08-03 11:56:49 +01002592 // TODO: We will need a read barrier here.
Alexey Frunze4dda3372015-06-01 18:31:49 -07002593 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
2594 cls,
2595 cls,
2596 cls->GetDexPc(),
2597 cls->MustGenerateClinitCheck());
2598 codegen_->AddSlowPath(slow_path);
2599 __ Beqzc(out, slow_path->GetEntryLabel());
2600 if (cls->MustGenerateClinitCheck()) {
2601 GenerateClassInitializationCheck(slow_path, out);
2602 } else {
2603 __ Bind(slow_path->GetExitLabel());
2604 }
2605 }
2606}
2607
David Brazdilcb1c0552015-08-04 16:22:25 +01002608static int32_t GetExceptionTlsOffset() {
2609 return Thread::ExceptionOffset<kMips64WordSize>().Int32Value();
2610}
2611
Alexey Frunze4dda3372015-06-01 18:31:49 -07002612void LocationsBuilderMIPS64::VisitLoadException(HLoadException* load) {
2613 LocationSummary* locations =
2614 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
2615 locations->SetOut(Location::RequiresRegister());
2616}
2617
2618void InstructionCodeGeneratorMIPS64::VisitLoadException(HLoadException* load) {
2619 GpuRegister out = load->GetLocations()->Out().AsRegister<GpuRegister>();
David Brazdilcb1c0552015-08-04 16:22:25 +01002620 __ LoadFromOffset(kLoadUnsignedWord, out, TR, GetExceptionTlsOffset());
2621}
2622
2623void LocationsBuilderMIPS64::VisitClearException(HClearException* clear) {
2624 new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
2625}
2626
2627void InstructionCodeGeneratorMIPS64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
2628 __ StoreToOffset(kStoreWord, ZERO, TR, GetExceptionTlsOffset());
Alexey Frunze4dda3372015-06-01 18:31:49 -07002629}
2630
2631void LocationsBuilderMIPS64::VisitLoadLocal(HLoadLocal* load) {
2632 load->SetLocations(nullptr);
2633}
2634
2635void InstructionCodeGeneratorMIPS64::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
2636 // Nothing to do, this is driven by the code generator.
2637}
2638
2639void LocationsBuilderMIPS64::VisitLoadString(HLoadString* load) {
2640 LocationSummary* locations =
2641 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
2642 locations->SetInAt(0, Location::RequiresRegister());
2643 locations->SetOut(Location::RequiresRegister());
2644}
2645
2646void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) {
2647 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load);
2648 codegen_->AddSlowPath(slow_path);
2649
2650 LocationSummary* locations = load->GetLocations();
2651 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2652 GpuRegister current_method = locations->InAt(0).AsRegister<GpuRegister>();
2653 __ LoadFromOffset(kLoadUnsignedWord, out, current_method,
2654 ArtMethod::DeclaringClassOffset().Int32Value());
Vladimir Marko05792b92015-08-03 11:56:49 +01002655 __ LoadFromOffset(kLoadDoubleword, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
Alexey Frunze4dda3372015-06-01 18:31:49 -07002656 __ LoadFromOffset(kLoadUnsignedWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
Vladimir Marko05792b92015-08-03 11:56:49 +01002657 // TODO: We will need a read barrier here.
Alexey Frunze4dda3372015-06-01 18:31:49 -07002658 __ Beqzc(out, slow_path->GetEntryLabel());
2659 __ Bind(slow_path->GetExitLabel());
2660}
2661
2662void LocationsBuilderMIPS64::VisitLocal(HLocal* local) {
2663 local->SetLocations(nullptr);
2664}
2665
2666void InstructionCodeGeneratorMIPS64::VisitLocal(HLocal* local) {
2667 DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
2668}
2669
2670void LocationsBuilderMIPS64::VisitLongConstant(HLongConstant* constant) {
2671 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2672 locations->SetOut(Location::ConstantLocation(constant));
2673}
2674
2675void InstructionCodeGeneratorMIPS64::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
2676 // Will be generated at use site.
2677}
2678
2679void LocationsBuilderMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
2680 LocationSummary* locations =
2681 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2682 InvokeRuntimeCallingConvention calling_convention;
2683 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2684}
2685
2686void InstructionCodeGeneratorMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
2687 codegen_->InvokeRuntime(instruction->IsEnter()
2688 ? QUICK_ENTRY_POINT(pLockObject)
2689 : QUICK_ENTRY_POINT(pUnlockObject),
2690 instruction,
2691 instruction->GetDexPc(),
2692 nullptr);
2693 CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
2694}
2695
2696void LocationsBuilderMIPS64::VisitMul(HMul* mul) {
2697 LocationSummary* locations =
2698 new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
2699 switch (mul->GetResultType()) {
2700 case Primitive::kPrimInt:
2701 case Primitive::kPrimLong:
2702 locations->SetInAt(0, Location::RequiresRegister());
2703 locations->SetInAt(1, Location::RequiresRegister());
2704 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2705 break;
2706
2707 case Primitive::kPrimFloat:
2708 case Primitive::kPrimDouble:
2709 locations->SetInAt(0, Location::RequiresFpuRegister());
2710 locations->SetInAt(1, Location::RequiresFpuRegister());
2711 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2712 break;
2713
2714 default:
2715 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2716 }
2717}
2718
2719void InstructionCodeGeneratorMIPS64::VisitMul(HMul* instruction) {
2720 Primitive::Type type = instruction->GetType();
2721 LocationSummary* locations = instruction->GetLocations();
2722
2723 switch (type) {
2724 case Primitive::kPrimInt:
2725 case Primitive::kPrimLong: {
2726 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2727 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
2728 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
2729 if (type == Primitive::kPrimInt)
2730 __ MulR6(dst, lhs, rhs);
2731 else
2732 __ Dmul(dst, lhs, rhs);
2733 break;
2734 }
2735 case Primitive::kPrimFloat:
2736 case Primitive::kPrimDouble: {
2737 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
2738 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
2739 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
2740 if (type == Primitive::kPrimFloat)
2741 __ MulS(dst, lhs, rhs);
2742 else
2743 __ MulD(dst, lhs, rhs);
2744 break;
2745 }
2746 default:
2747 LOG(FATAL) << "Unexpected mul type " << type;
2748 }
2749}
2750
2751void LocationsBuilderMIPS64::VisitNeg(HNeg* neg) {
2752 LocationSummary* locations =
2753 new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
2754 switch (neg->GetResultType()) {
2755 case Primitive::kPrimInt:
2756 case Primitive::kPrimLong:
2757 locations->SetInAt(0, Location::RequiresRegister());
2758 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2759 break;
2760
2761 case Primitive::kPrimFloat:
2762 case Primitive::kPrimDouble:
2763 locations->SetInAt(0, Location::RequiresFpuRegister());
2764 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2765 break;
2766
2767 default:
2768 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
2769 }
2770}
2771
2772void InstructionCodeGeneratorMIPS64::VisitNeg(HNeg* instruction) {
2773 Primitive::Type type = instruction->GetType();
2774 LocationSummary* locations = instruction->GetLocations();
2775
2776 switch (type) {
2777 case Primitive::kPrimInt:
2778 case Primitive::kPrimLong: {
2779 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2780 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
2781 if (type == Primitive::kPrimInt)
2782 __ Subu(dst, ZERO, src);
2783 else
2784 __ Dsubu(dst, ZERO, src);
2785 break;
2786 }
2787 case Primitive::kPrimFloat:
2788 case Primitive::kPrimDouble: {
2789 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
2790 FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
2791 if (type == Primitive::kPrimFloat)
2792 __ NegS(dst, src);
2793 else
2794 __ NegD(dst, src);
2795 break;
2796 }
2797 default:
2798 LOG(FATAL) << "Unexpected neg type " << type;
2799 }
2800}
2801
2802void LocationsBuilderMIPS64::VisitNewArray(HNewArray* instruction) {
2803 LocationSummary* locations =
2804 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2805 InvokeRuntimeCallingConvention calling_convention;
2806 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2807 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
2808 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2809 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
2810}
2811
2812void InstructionCodeGeneratorMIPS64::VisitNewArray(HNewArray* instruction) {
2813 LocationSummary* locations = instruction->GetLocations();
2814 // Move an uint16_t value to a register.
2815 __ LoadConst32(locations->GetTemp(0).AsRegister<GpuRegister>(), instruction->GetTypeIndex());
Calin Juravle175dc732015-08-25 15:42:32 +01002816 codegen_->InvokeRuntime(instruction->GetEntrypoint(),
2817 instruction,
2818 instruction->GetDexPc(),
2819 nullptr);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002820 CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
2821}
2822
2823void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) {
2824 LocationSummary* locations =
2825 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2826 InvokeRuntimeCallingConvention calling_convention;
2827 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2828 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2829 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
2830}
2831
2832void InstructionCodeGeneratorMIPS64::VisitNewInstance(HNewInstance* instruction) {
2833 LocationSummary* locations = instruction->GetLocations();
2834 // Move an uint16_t value to a register.
2835 __ LoadConst32(locations->GetTemp(0).AsRegister<GpuRegister>(), instruction->GetTypeIndex());
Calin Juravle175dc732015-08-25 15:42:32 +01002836 codegen_->InvokeRuntime(instruction->GetEntrypoint(),
2837 instruction,
2838 instruction->GetDexPc(),
2839 nullptr);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002840 CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
2841}
2842
2843void LocationsBuilderMIPS64::VisitNot(HNot* instruction) {
2844 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2845 locations->SetInAt(0, Location::RequiresRegister());
2846 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2847}
2848
2849void InstructionCodeGeneratorMIPS64::VisitNot(HNot* instruction) {
2850 Primitive::Type type = instruction->GetType();
2851 LocationSummary* locations = instruction->GetLocations();
2852
2853 switch (type) {
2854 case Primitive::kPrimInt:
2855 case Primitive::kPrimLong: {
2856 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2857 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
2858 __ Nor(dst, src, ZERO);
2859 break;
2860 }
2861
2862 default:
2863 LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
2864 }
2865}
2866
2867void LocationsBuilderMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
2868 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2869 locations->SetInAt(0, Location::RequiresRegister());
2870 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2871}
2872
2873void InstructionCodeGeneratorMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
2874 LocationSummary* locations = instruction->GetLocations();
2875 __ Xori(locations->Out().AsRegister<GpuRegister>(),
2876 locations->InAt(0).AsRegister<GpuRegister>(),
2877 1);
2878}
2879
2880void LocationsBuilderMIPS64::VisitNullCheck(HNullCheck* instruction) {
David Brazdil77a48ae2015-09-15 12:34:04 +00002881 LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
2882 ? LocationSummary::kCallOnSlowPath
2883 : LocationSummary::kNoCall;
2884 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002885 locations->SetInAt(0, Location::RequiresRegister());
2886 if (instruction->HasUses()) {
2887 locations->SetOut(Location::SameAsFirstInput());
2888 }
2889}
2890
2891void InstructionCodeGeneratorMIPS64::GenerateImplicitNullCheck(HNullCheck* instruction) {
2892 if (codegen_->CanMoveNullCheckToUser(instruction)) {
2893 return;
2894 }
2895 Location obj = instruction->GetLocations()->InAt(0);
2896
2897 __ Lw(ZERO, obj.AsRegister<GpuRegister>(), 0);
2898 codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
2899}
2900
2901void InstructionCodeGeneratorMIPS64::GenerateExplicitNullCheck(HNullCheck* instruction) {
2902 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathMIPS64(instruction);
2903 codegen_->AddSlowPath(slow_path);
2904
2905 Location obj = instruction->GetLocations()->InAt(0);
2906
2907 __ Beqzc(obj.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
2908}
2909
2910void InstructionCodeGeneratorMIPS64::VisitNullCheck(HNullCheck* instruction) {
David Brazdil77a48ae2015-09-15 12:34:04 +00002911 if (codegen_->IsImplicitNullCheckAllowed(instruction)) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07002912 GenerateImplicitNullCheck(instruction);
2913 } else {
2914 GenerateExplicitNullCheck(instruction);
2915 }
2916}
2917
2918void LocationsBuilderMIPS64::VisitOr(HOr* instruction) {
2919 HandleBinaryOp(instruction);
2920}
2921
2922void InstructionCodeGeneratorMIPS64::VisitOr(HOr* instruction) {
2923 HandleBinaryOp(instruction);
2924}
2925
2926void LocationsBuilderMIPS64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
2927 LOG(FATAL) << "Unreachable";
2928}
2929
2930void InstructionCodeGeneratorMIPS64::VisitParallelMove(HParallelMove* instruction) {
2931 codegen_->GetMoveResolver()->EmitNativeCode(instruction);
2932}
2933
2934void LocationsBuilderMIPS64::VisitParameterValue(HParameterValue* instruction) {
2935 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2936 Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
2937 if (location.IsStackSlot()) {
2938 location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2939 } else if (location.IsDoubleStackSlot()) {
2940 location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2941 }
2942 locations->SetOut(location);
2943}
2944
2945void InstructionCodeGeneratorMIPS64::VisitParameterValue(HParameterValue* instruction
2946 ATTRIBUTE_UNUSED) {
2947 // Nothing to do, the parameter is already at its location.
2948}
2949
2950void LocationsBuilderMIPS64::VisitCurrentMethod(HCurrentMethod* instruction) {
2951 LocationSummary* locations =
2952 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2953 locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
2954}
2955
2956void InstructionCodeGeneratorMIPS64::VisitCurrentMethod(HCurrentMethod* instruction
2957 ATTRIBUTE_UNUSED) {
2958 // Nothing to do, the method is already at its location.
2959}
2960
2961void LocationsBuilderMIPS64::VisitPhi(HPhi* instruction) {
2962 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2963 for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
2964 locations->SetInAt(i, Location::Any());
2965 }
2966 locations->SetOut(Location::Any());
2967}
2968
2969void InstructionCodeGeneratorMIPS64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
2970 LOG(FATAL) << "Unreachable";
2971}
2972
2973void LocationsBuilderMIPS64::VisitRem(HRem* rem) {
2974 Primitive::Type type = rem->GetResultType();
2975 LocationSummary::CallKind call_kind =
2976 Primitive::IsFloatingPointType(type) ? LocationSummary::kCall : LocationSummary::kNoCall;
2977 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
2978
2979 switch (type) {
2980 case Primitive::kPrimInt:
2981 case Primitive::kPrimLong:
2982 locations->SetInAt(0, Location::RequiresRegister());
2983 locations->SetInAt(1, Location::RequiresRegister());
2984 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2985 break;
2986
2987 case Primitive::kPrimFloat:
2988 case Primitive::kPrimDouble: {
2989 InvokeRuntimeCallingConvention calling_convention;
2990 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
2991 locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
2992 locations->SetOut(calling_convention.GetReturnLocation(type));
2993 break;
2994 }
2995
2996 default:
2997 LOG(FATAL) << "Unexpected rem type " << type;
2998 }
2999}
3000
3001void InstructionCodeGeneratorMIPS64::VisitRem(HRem* instruction) {
3002 Primitive::Type type = instruction->GetType();
3003 LocationSummary* locations = instruction->GetLocations();
3004
3005 switch (type) {
3006 case Primitive::kPrimInt:
3007 case Primitive::kPrimLong: {
3008 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
3009 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
3010 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
3011 if (type == Primitive::kPrimInt)
3012 __ ModR6(dst, lhs, rhs);
3013 else
3014 __ Dmod(dst, lhs, rhs);
3015 break;
3016 }
3017
3018 case Primitive::kPrimFloat:
3019 case Primitive::kPrimDouble: {
3020 int32_t entry_offset = (type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pFmodf)
3021 : QUICK_ENTRY_POINT(pFmod);
3022 codegen_->InvokeRuntime(entry_offset, instruction, instruction->GetDexPc(), nullptr);
3023 break;
3024 }
3025 default:
3026 LOG(FATAL) << "Unexpected rem type " << type;
3027 }
3028}
3029
3030void LocationsBuilderMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
3031 memory_barrier->SetLocations(nullptr);
3032}
3033
3034void InstructionCodeGeneratorMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
3035 GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
3036}
3037
3038void LocationsBuilderMIPS64::VisitReturn(HReturn* ret) {
3039 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret);
3040 Primitive::Type return_type = ret->InputAt(0)->GetType();
3041 locations->SetInAt(0, Mips64ReturnLocation(return_type));
3042}
3043
3044void InstructionCodeGeneratorMIPS64::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
3045 codegen_->GenerateFrameExit();
3046}
3047
3048void LocationsBuilderMIPS64::VisitReturnVoid(HReturnVoid* ret) {
3049 ret->SetLocations(nullptr);
3050}
3051
3052void InstructionCodeGeneratorMIPS64::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
3053 codegen_->GenerateFrameExit();
3054}
3055
3056void LocationsBuilderMIPS64::VisitShl(HShl* shl) {
3057 HandleShift(shl);
3058}
3059
3060void InstructionCodeGeneratorMIPS64::VisitShl(HShl* shl) {
3061 HandleShift(shl);
3062}
3063
3064void LocationsBuilderMIPS64::VisitShr(HShr* shr) {
3065 HandleShift(shr);
3066}
3067
3068void InstructionCodeGeneratorMIPS64::VisitShr(HShr* shr) {
3069 HandleShift(shr);
3070}
3071
3072void LocationsBuilderMIPS64::VisitStoreLocal(HStoreLocal* store) {
3073 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
3074 Primitive::Type field_type = store->InputAt(1)->GetType();
3075 switch (field_type) {
3076 case Primitive::kPrimNot:
3077 case Primitive::kPrimBoolean:
3078 case Primitive::kPrimByte:
3079 case Primitive::kPrimChar:
3080 case Primitive::kPrimShort:
3081 case Primitive::kPrimInt:
3082 case Primitive::kPrimFloat:
3083 locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
3084 break;
3085
3086 case Primitive::kPrimLong:
3087 case Primitive::kPrimDouble:
3088 locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
3089 break;
3090
3091 default:
3092 LOG(FATAL) << "Unimplemented local type " << field_type;
3093 }
3094}
3095
3096void InstructionCodeGeneratorMIPS64::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
3097}
3098
3099void LocationsBuilderMIPS64::VisitSub(HSub* instruction) {
3100 HandleBinaryOp(instruction);
3101}
3102
3103void InstructionCodeGeneratorMIPS64::VisitSub(HSub* instruction) {
3104 HandleBinaryOp(instruction);
3105}
3106
3107void LocationsBuilderMIPS64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3108 HandleFieldGet(instruction, instruction->GetFieldInfo());
3109}
3110
3111void InstructionCodeGeneratorMIPS64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3112 HandleFieldGet(instruction, instruction->GetFieldInfo());
3113}
3114
3115void LocationsBuilderMIPS64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3116 HandleFieldSet(instruction, instruction->GetFieldInfo());
3117}
3118
3119void InstructionCodeGeneratorMIPS64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3120 HandleFieldSet(instruction, instruction->GetFieldInfo());
3121}
3122
3123void LocationsBuilderMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
3124 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
3125}
3126
3127void InstructionCodeGeneratorMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
3128 HBasicBlock* block = instruction->GetBlock();
3129 if (block->GetLoopInformation() != nullptr) {
3130 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
3131 // The back edge will generate the suspend check.
3132 return;
3133 }
3134 if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
3135 // The goto will generate the suspend check.
3136 return;
3137 }
3138 GenerateSuspendCheck(instruction, nullptr);
3139}
3140
3141void LocationsBuilderMIPS64::VisitTemporary(HTemporary* temp) {
3142 temp->SetLocations(nullptr);
3143}
3144
3145void InstructionCodeGeneratorMIPS64::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
3146 // Nothing to do, this is driven by the code generator.
3147}
3148
3149void LocationsBuilderMIPS64::VisitThrow(HThrow* instruction) {
3150 LocationSummary* locations =
3151 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3152 InvokeRuntimeCallingConvention calling_convention;
3153 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3154}
3155
3156void InstructionCodeGeneratorMIPS64::VisitThrow(HThrow* instruction) {
3157 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pDeliverException),
3158 instruction,
3159 instruction->GetDexPc(),
3160 nullptr);
3161 CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
3162}
3163
3164void LocationsBuilderMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
3165 Primitive::Type input_type = conversion->GetInputType();
3166 Primitive::Type result_type = conversion->GetResultType();
3167 DCHECK_NE(input_type, result_type);
3168
3169 if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
3170 (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
3171 LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
3172 }
3173
3174 LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
3175 if ((Primitive::IsFloatingPointType(result_type) && input_type == Primitive::kPrimLong) ||
3176 (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type))) {
3177 call_kind = LocationSummary::kCall;
3178 }
3179
3180 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
3181
3182 if (call_kind == LocationSummary::kNoCall) {
3183 if (Primitive::IsFloatingPointType(input_type)) {
3184 locations->SetInAt(0, Location::RequiresFpuRegister());
3185 } else {
3186 locations->SetInAt(0, Location::RequiresRegister());
3187 }
3188
3189 if (Primitive::IsFloatingPointType(result_type)) {
3190 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3191 } else {
3192 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3193 }
3194 } else {
3195 InvokeRuntimeCallingConvention calling_convention;
3196
3197 if (Primitive::IsFloatingPointType(input_type)) {
3198 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
3199 } else {
3200 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3201 }
3202
3203 locations->SetOut(calling_convention.GetReturnLocation(result_type));
3204 }
3205}
3206
3207void InstructionCodeGeneratorMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
3208 LocationSummary* locations = conversion->GetLocations();
3209 Primitive::Type result_type = conversion->GetResultType();
3210 Primitive::Type input_type = conversion->GetInputType();
3211
3212 DCHECK_NE(input_type, result_type);
3213
3214 if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) {
3215 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
3216 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
3217
3218 switch (result_type) {
3219 case Primitive::kPrimChar:
3220 __ Andi(dst, src, 0xFFFF);
3221 break;
3222 case Primitive::kPrimByte:
3223 // long is never converted into types narrower than int directly,
3224 // so SEB and SEH can be used without ever causing unpredictable results
3225 // on 64-bit inputs
3226 DCHECK(input_type != Primitive::kPrimLong);
3227 __ Seb(dst, src);
3228 break;
3229 case Primitive::kPrimShort:
3230 // long is never converted into types narrower than int directly,
3231 // so SEB and SEH can be used without ever causing unpredictable results
3232 // on 64-bit inputs
3233 DCHECK(input_type != Primitive::kPrimLong);
3234 __ Seh(dst, src);
3235 break;
3236 case Primitive::kPrimInt:
3237 case Primitive::kPrimLong:
3238 // Sign-extend 32-bit int into bits 32 through 63 for
3239 // int-to-long and long-to-int conversions
3240 __ Sll(dst, src, 0);
3241 break;
3242
3243 default:
3244 LOG(FATAL) << "Unexpected type conversion from " << input_type
3245 << " to " << result_type;
3246 }
3247 } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) {
3248 if (input_type != Primitive::kPrimLong) {
3249 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
3250 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
3251 __ Mtc1(src, FTMP);
3252 if (result_type == Primitive::kPrimFloat) {
3253 __ Cvtsw(dst, FTMP);
3254 } else {
3255 __ Cvtdw(dst, FTMP);
3256 }
3257 } else {
3258 int32_t entry_offset = (result_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pL2f)
3259 : QUICK_ENTRY_POINT(pL2d);
3260 codegen_->InvokeRuntime(entry_offset,
3261 conversion,
3262 conversion->GetDexPc(),
3263 nullptr);
3264 }
3265 } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) {
3266 CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
3267 int32_t entry_offset;
3268 if (result_type != Primitive::kPrimLong) {
3269 entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2iz)
3270 : QUICK_ENTRY_POINT(pD2iz);
3271 } else {
3272 entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2l)
3273 : QUICK_ENTRY_POINT(pD2l);
3274 }
3275 codegen_->InvokeRuntime(entry_offset,
3276 conversion,
3277 conversion->GetDexPc(),
3278 nullptr);
3279 } else if (Primitive::IsFloatingPointType(result_type) &&
3280 Primitive::IsFloatingPointType(input_type)) {
3281 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
3282 FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
3283 if (result_type == Primitive::kPrimFloat) {
3284 __ Cvtsd(dst, src);
3285 } else {
3286 __ Cvtds(dst, src);
3287 }
3288 } else {
3289 LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
3290 << " to " << result_type;
3291 }
3292}
3293
3294void LocationsBuilderMIPS64::VisitUShr(HUShr* ushr) {
3295 HandleShift(ushr);
3296}
3297
3298void InstructionCodeGeneratorMIPS64::VisitUShr(HUShr* ushr) {
3299 HandleShift(ushr);
3300}
3301
3302void LocationsBuilderMIPS64::VisitXor(HXor* instruction) {
3303 HandleBinaryOp(instruction);
3304}
3305
3306void InstructionCodeGeneratorMIPS64::VisitXor(HXor* instruction) {
3307 HandleBinaryOp(instruction);
3308}
3309
3310void LocationsBuilderMIPS64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
3311 // Nothing to do, this should be removed during prepare for register allocator.
3312 LOG(FATAL) << "Unreachable";
3313}
3314
3315void InstructionCodeGeneratorMIPS64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
3316 // Nothing to do, this should be removed during prepare for register allocator.
3317 LOG(FATAL) << "Unreachable";
3318}
3319
3320void LocationsBuilderMIPS64::VisitEqual(HEqual* comp) {
3321 VisitCondition(comp);
3322}
3323
3324void InstructionCodeGeneratorMIPS64::VisitEqual(HEqual* comp) {
3325 VisitCondition(comp);
3326}
3327
3328void LocationsBuilderMIPS64::VisitNotEqual(HNotEqual* comp) {
3329 VisitCondition(comp);
3330}
3331
3332void InstructionCodeGeneratorMIPS64::VisitNotEqual(HNotEqual* comp) {
3333 VisitCondition(comp);
3334}
3335
3336void LocationsBuilderMIPS64::VisitLessThan(HLessThan* comp) {
3337 VisitCondition(comp);
3338}
3339
3340void InstructionCodeGeneratorMIPS64::VisitLessThan(HLessThan* comp) {
3341 VisitCondition(comp);
3342}
3343
3344void LocationsBuilderMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
3345 VisitCondition(comp);
3346}
3347
3348void InstructionCodeGeneratorMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
3349 VisitCondition(comp);
3350}
3351
3352void LocationsBuilderMIPS64::VisitGreaterThan(HGreaterThan* comp) {
3353 VisitCondition(comp);
3354}
3355
3356void InstructionCodeGeneratorMIPS64::VisitGreaterThan(HGreaterThan* comp) {
3357 VisitCondition(comp);
3358}
3359
3360void LocationsBuilderMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
3361 VisitCondition(comp);
3362}
3363
3364void InstructionCodeGeneratorMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
3365 VisitCondition(comp);
3366}
3367
Nicolas Geoffray2e7cd752015-07-10 11:38:52 +01003368void LocationsBuilderMIPS64::VisitFakeString(HFakeString* instruction) {
3369 DCHECK(codegen_->IsBaseline());
3370 LocationSummary* locations =
3371 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3372 locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
3373}
3374
3375void InstructionCodeGeneratorMIPS64::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
3376 DCHECK(codegen_->IsBaseline());
3377 // Will be generated at use site.
3378}
3379
Alexey Frunze4dda3372015-06-01 18:31:49 -07003380} // namespace mips64
3381} // namespace art