blob: 5f014397d933db63d9ae21792cf20d41293fa2ca [file] [log] [blame]
Alexey Frunze4dda3372015-06-01 18:31:49 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator_mips64.h"
18
Alexey Frunzec857c742015-09-23 15:12:39 -070019#include "art_method.h"
20#include "code_generator_utils.h"
Alexey Frunze4dda3372015-06-01 18:31:49 -070021#include "entrypoints/quick/quick_entrypoints.h"
22#include "entrypoints/quick/quick_entrypoints_enum.h"
23#include "gc/accounting/card_table.h"
24#include "intrinsics.h"
Chris Larsen3039e382015-08-26 07:54:08 -070025#include "intrinsics_mips64.h"
Alexey Frunze4dda3372015-06-01 18:31:49 -070026#include "mirror/array-inl.h"
27#include "mirror/class-inl.h"
28#include "offsets.h"
29#include "thread.h"
30#include "utils/mips64/assembler_mips64.h"
31#include "utils/assembler.h"
32#include "utils/stack_checks.h"
33
34namespace art {
35namespace mips64 {
36
37static constexpr int kCurrentMethodStackOffset = 0;
38static constexpr GpuRegister kMethodRegisterArgument = A0;
39
40// We need extra temporary/scratch registers (in addition to AT) in some cases.
Alexey Frunze4dda3372015-06-01 18:31:49 -070041static constexpr FpuRegister FTMP = F8;
42
Alexey Frunze4dda3372015-06-01 18:31:49 -070043Location Mips64ReturnLocation(Primitive::Type return_type) {
44 switch (return_type) {
45 case Primitive::kPrimBoolean:
46 case Primitive::kPrimByte:
47 case Primitive::kPrimChar:
48 case Primitive::kPrimShort:
49 case Primitive::kPrimInt:
50 case Primitive::kPrimNot:
51 case Primitive::kPrimLong:
52 return Location::RegisterLocation(V0);
53
54 case Primitive::kPrimFloat:
55 case Primitive::kPrimDouble:
56 return Location::FpuRegisterLocation(F0);
57
58 case Primitive::kPrimVoid:
59 return Location();
60 }
61 UNREACHABLE();
62}
63
64Location InvokeDexCallingConventionVisitorMIPS64::GetReturnLocation(Primitive::Type type) const {
65 return Mips64ReturnLocation(type);
66}
67
68Location InvokeDexCallingConventionVisitorMIPS64::GetMethodLocation() const {
69 return Location::RegisterLocation(kMethodRegisterArgument);
70}
71
72Location InvokeDexCallingConventionVisitorMIPS64::GetNextLocation(Primitive::Type type) {
73 Location next_location;
74 if (type == Primitive::kPrimVoid) {
75 LOG(FATAL) << "Unexpected parameter type " << type;
76 }
77
78 if (Primitive::IsFloatingPointType(type) &&
79 (float_index_ < calling_convention.GetNumberOfFpuRegisters())) {
80 next_location = Location::FpuRegisterLocation(
81 calling_convention.GetFpuRegisterAt(float_index_++));
82 gp_index_++;
83 } else if (!Primitive::IsFloatingPointType(type) &&
84 (gp_index_ < calling_convention.GetNumberOfRegisters())) {
85 next_location = Location::RegisterLocation(calling_convention.GetRegisterAt(gp_index_++));
86 float_index_++;
87 } else {
88 size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
89 next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
90 : Location::StackSlot(stack_offset);
91 }
92
93 // Space on the stack is reserved for all arguments.
94 stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
95
96 // TODO: review
97
98 // TODO: shouldn't we use a whole machine word per argument on the stack?
99 // Implicit 4-byte method pointer (and such) will cause misalignment.
100
101 return next_location;
102}
103
104Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type type) {
105 return Mips64ReturnLocation(type);
106}
107
108#define __ down_cast<CodeGeneratorMIPS64*>(codegen)->GetAssembler()->
109#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, x).Int32Value()
110
111class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
112 public:
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100113 explicit BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction) : instruction_(instruction) {}
Alexey Frunze4dda3372015-06-01 18:31:49 -0700114
115 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100116 LocationSummary* locations = instruction_->GetLocations();
Alexey Frunze4dda3372015-06-01 18:31:49 -0700117 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
118 __ Bind(GetEntryLabel());
David Brazdil77a48ae2015-09-15 12:34:04 +0000119 if (instruction_->CanThrowIntoCatchBlock()) {
120 // Live registers will be restored in the catch block if caught.
121 SaveLiveRegisters(codegen, instruction_->GetLocations());
122 }
Alexey Frunze4dda3372015-06-01 18:31:49 -0700123 // We're moving two locations to locations that could overlap, so we need a parallel
124 // move resolver.
125 InvokeRuntimeCallingConvention calling_convention;
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100126 codegen->EmitParallelMoves(locations->InAt(0),
Alexey Frunze4dda3372015-06-01 18:31:49 -0700127 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
128 Primitive::kPrimInt,
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100129 locations->InAt(1),
Alexey Frunze4dda3372015-06-01 18:31:49 -0700130 Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
131 Primitive::kPrimInt);
132 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
133 instruction_,
134 instruction_->GetDexPc(),
135 this);
136 CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
137 }
138
Alexandre Rames8158f282015-08-07 10:26:17 +0100139 bool IsFatal() const OVERRIDE { return true; }
140
Roland Levillain46648892015-06-19 16:07:18 +0100141 const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS64"; }
142
Alexey Frunze4dda3372015-06-01 18:31:49 -0700143 private:
144 HBoundsCheck* const instruction_;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700145
146 DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS64);
147};
148
149class DivZeroCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
150 public:
151 explicit DivZeroCheckSlowPathMIPS64(HDivZeroCheck* instruction) : instruction_(instruction) {}
152
153 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
154 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
155 __ Bind(GetEntryLabel());
David Brazdil77a48ae2015-09-15 12:34:04 +0000156 if (instruction_->CanThrowIntoCatchBlock()) {
157 // Live registers will be restored in the catch block if caught.
158 SaveLiveRegisters(codegen, instruction_->GetLocations());
159 }
Alexey Frunze4dda3372015-06-01 18:31:49 -0700160 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
161 instruction_,
162 instruction_->GetDexPc(),
163 this);
164 CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
165 }
166
Alexandre Rames8158f282015-08-07 10:26:17 +0100167 bool IsFatal() const OVERRIDE { return true; }
168
Roland Levillain46648892015-06-19 16:07:18 +0100169 const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS64"; }
170
Alexey Frunze4dda3372015-06-01 18:31:49 -0700171 private:
172 HDivZeroCheck* const instruction_;
173 DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS64);
174};
175
176class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
177 public:
178 LoadClassSlowPathMIPS64(HLoadClass* cls,
179 HInstruction* at,
180 uint32_t dex_pc,
181 bool do_clinit)
182 : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
183 DCHECK(at->IsLoadClass() || at->IsClinitCheck());
184 }
185
186 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
187 LocationSummary* locations = at_->GetLocations();
188 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
189
190 __ Bind(GetEntryLabel());
191 SaveLiveRegisters(codegen, locations);
192
193 InvokeRuntimeCallingConvention calling_convention;
194 __ LoadConst32(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
195 int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
196 : QUICK_ENTRY_POINT(pInitializeType);
197 mips64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
198 if (do_clinit_) {
199 CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
200 } else {
201 CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
202 }
203
204 // Move the class to the desired location.
205 Location out = locations->Out();
206 if (out.IsValid()) {
207 DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
208 Primitive::Type type = at_->GetType();
209 mips64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
210 }
211
212 RestoreLiveRegisters(codegen, locations);
213 __ B(GetExitLabel());
214 }
215
Roland Levillain46648892015-06-19 16:07:18 +0100216 const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathMIPS64"; }
217
Alexey Frunze4dda3372015-06-01 18:31:49 -0700218 private:
219 // The class this slow path will load.
220 HLoadClass* const cls_;
221
222 // The instruction where this slow path is happening.
223 // (Might be the load class or an initialization check).
224 HInstruction* const at_;
225
226 // The dex PC of `at_`.
227 const uint32_t dex_pc_;
228
229 // Whether to initialize the class.
230 const bool do_clinit_;
231
232 DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS64);
233};
234
235class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
236 public:
237 explicit LoadStringSlowPathMIPS64(HLoadString* instruction) : instruction_(instruction) {}
238
239 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
240 LocationSummary* locations = instruction_->GetLocations();
241 DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
242 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
243
244 __ Bind(GetEntryLabel());
245 SaveLiveRegisters(codegen, locations);
246
247 InvokeRuntimeCallingConvention calling_convention;
248 __ LoadConst32(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
249 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
250 instruction_,
251 instruction_->GetDexPc(),
252 this);
253 CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
254 Primitive::Type type = instruction_->GetType();
255 mips64_codegen->MoveLocation(locations->Out(),
256 calling_convention.GetReturnLocation(type),
257 type);
258
259 RestoreLiveRegisters(codegen, locations);
260 __ B(GetExitLabel());
261 }
262
Roland Levillain46648892015-06-19 16:07:18 +0100263 const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS64"; }
264
Alexey Frunze4dda3372015-06-01 18:31:49 -0700265 private:
266 HLoadString* const instruction_;
267
268 DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS64);
269};
270
271class NullCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
272 public:
273 explicit NullCheckSlowPathMIPS64(HNullCheck* instr) : instruction_(instr) {}
274
275 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
276 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
277 __ Bind(GetEntryLabel());
David Brazdil77a48ae2015-09-15 12:34:04 +0000278 if (instruction_->CanThrowIntoCatchBlock()) {
279 // Live registers will be restored in the catch block if caught.
280 SaveLiveRegisters(codegen, instruction_->GetLocations());
281 }
Alexey Frunze4dda3372015-06-01 18:31:49 -0700282 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
283 instruction_,
284 instruction_->GetDexPc(),
285 this);
286 CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
287 }
288
Alexandre Rames8158f282015-08-07 10:26:17 +0100289 bool IsFatal() const OVERRIDE { return true; }
290
Roland Levillain46648892015-06-19 16:07:18 +0100291 const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS64"; }
292
Alexey Frunze4dda3372015-06-01 18:31:49 -0700293 private:
294 HNullCheck* const instruction_;
295
296 DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS64);
297};
298
299class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
300 public:
Roland Levillain3887c462015-08-12 18:15:42 +0100301 SuspendCheckSlowPathMIPS64(HSuspendCheck* instruction, HBasicBlock* successor)
Alexey Frunze4dda3372015-06-01 18:31:49 -0700302 : instruction_(instruction), successor_(successor) {}
303
304 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
305 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
306 __ Bind(GetEntryLabel());
307 SaveLiveRegisters(codegen, instruction_->GetLocations());
308 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
309 instruction_,
310 instruction_->GetDexPc(),
311 this);
312 CheckEntrypointTypes<kQuickTestSuspend, void, void>();
313 RestoreLiveRegisters(codegen, instruction_->GetLocations());
314 if (successor_ == nullptr) {
315 __ B(GetReturnLabel());
316 } else {
317 __ B(mips64_codegen->GetLabelOf(successor_));
318 }
319 }
320
321 Label* GetReturnLabel() {
322 DCHECK(successor_ == nullptr);
323 return &return_label_;
324 }
325
Roland Levillain46648892015-06-19 16:07:18 +0100326 const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS64"; }
327
Alexey Frunze4dda3372015-06-01 18:31:49 -0700328 private:
329 HSuspendCheck* const instruction_;
330 // If not null, the block to branch to after the suspend check.
331 HBasicBlock* const successor_;
332
333 // If `successor_` is null, the label to branch to after the suspend check.
334 Label return_label_;
335
336 DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathMIPS64);
337};
338
339class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
340 public:
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100341 explicit TypeCheckSlowPathMIPS64(HInstruction* instruction) : instruction_(instruction) {}
Alexey Frunze4dda3372015-06-01 18:31:49 -0700342
343 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
344 LocationSummary* locations = instruction_->GetLocations();
Goran Jakovljevicf652cec2015-08-25 16:11:42 +0200345 Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0) : locations->Out();
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100346 uint32_t dex_pc = instruction_->GetDexPc();
Alexey Frunze4dda3372015-06-01 18:31:49 -0700347 DCHECK(instruction_->IsCheckCast()
348 || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
349 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
350
351 __ Bind(GetEntryLabel());
352 SaveLiveRegisters(codegen, locations);
353
354 // We're moving two locations to locations that could overlap, so we need a parallel
355 // move resolver.
356 InvokeRuntimeCallingConvention calling_convention;
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100357 codegen->EmitParallelMoves(locations->InAt(1),
Alexey Frunze4dda3372015-06-01 18:31:49 -0700358 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
359 Primitive::kPrimNot,
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100360 object_class,
Alexey Frunze4dda3372015-06-01 18:31:49 -0700361 Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
362 Primitive::kPrimNot);
363
364 if (instruction_->IsInstanceOf()) {
365 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
366 instruction_,
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100367 dex_pc,
Alexey Frunze4dda3372015-06-01 18:31:49 -0700368 this);
369 Primitive::Type ret_type = instruction_->GetType();
370 Location ret_loc = calling_convention.GetReturnLocation(ret_type);
371 mips64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
372 CheckEntrypointTypes<kQuickInstanceofNonTrivial,
373 uint32_t,
374 const mirror::Class*,
375 const mirror::Class*>();
376 } else {
377 DCHECK(instruction_->IsCheckCast());
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100378 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc, this);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700379 CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
380 }
381
382 RestoreLiveRegisters(codegen, locations);
383 __ B(GetExitLabel());
384 }
385
Roland Levillain46648892015-06-19 16:07:18 +0100386 const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS64"; }
387
Alexey Frunze4dda3372015-06-01 18:31:49 -0700388 private:
389 HInstruction* const instruction_;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700390
391 DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS64);
392};
393
394class DeoptimizationSlowPathMIPS64 : public SlowPathCodeMIPS64 {
395 public:
396 explicit DeoptimizationSlowPathMIPS64(HInstruction* instruction)
397 : instruction_(instruction) {}
398
399 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
400 __ Bind(GetEntryLabel());
401 SaveLiveRegisters(codegen, instruction_->GetLocations());
402 DCHECK(instruction_->IsDeoptimize());
403 HDeoptimize* deoptimize = instruction_->AsDeoptimize();
404 uint32_t dex_pc = deoptimize->GetDexPc();
405 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
406 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
407 }
408
Roland Levillain46648892015-06-19 16:07:18 +0100409 const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS64"; }
410
Alexey Frunze4dda3372015-06-01 18:31:49 -0700411 private:
412 HInstruction* const instruction_;
413 DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS64);
414};
415
416CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
417 const Mips64InstructionSetFeatures& isa_features,
Serban Constantinescuecc43662015-08-13 13:33:12 +0100418 const CompilerOptions& compiler_options,
419 OptimizingCompilerStats* stats)
Alexey Frunze4dda3372015-06-01 18:31:49 -0700420 : CodeGenerator(graph,
421 kNumberOfGpuRegisters,
422 kNumberOfFpuRegisters,
423 0, // kNumberOfRegisterPairs
424 ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
425 arraysize(kCoreCalleeSaves)),
426 ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
427 arraysize(kFpuCalleeSaves)),
Serban Constantinescuecc43662015-08-13 13:33:12 +0100428 compiler_options,
429 stats),
Vladimir Marko225b6462015-09-28 12:17:40 +0100430 block_labels_(nullptr),
Alexey Frunze4dda3372015-06-01 18:31:49 -0700431 location_builder_(graph, this),
432 instruction_visitor_(graph, this),
433 move_resolver_(graph->GetArena(), this),
434 isa_features_(isa_features) {
435 // Save RA (containing the return address) to mimic Quick.
436 AddAllocatedRegister(Location::RegisterLocation(RA));
437}
438
439#undef __
440#define __ down_cast<Mips64Assembler*>(GetAssembler())->
441#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, x).Int32Value()
442
443void CodeGeneratorMIPS64::Finalize(CodeAllocator* allocator) {
444 CodeGenerator::Finalize(allocator);
445}
446
447Mips64Assembler* ParallelMoveResolverMIPS64::GetAssembler() const {
448 return codegen_->GetAssembler();
449}
450
451void ParallelMoveResolverMIPS64::EmitMove(size_t index) {
Vladimir Marko225b6462015-09-28 12:17:40 +0100452 MoveOperands* move = moves_[index];
Alexey Frunze4dda3372015-06-01 18:31:49 -0700453 codegen_->MoveLocation(move->GetDestination(), move->GetSource(), move->GetType());
454}
455
456void ParallelMoveResolverMIPS64::EmitSwap(size_t index) {
Vladimir Marko225b6462015-09-28 12:17:40 +0100457 MoveOperands* move = moves_[index];
Alexey Frunze4dda3372015-06-01 18:31:49 -0700458 codegen_->SwapLocations(move->GetDestination(), move->GetSource(), move->GetType());
459}
460
461void ParallelMoveResolverMIPS64::RestoreScratch(int reg) {
462 // Pop reg
463 __ Ld(GpuRegister(reg), SP, 0);
464 __ DecreaseFrameSize(kMips64WordSize);
465}
466
467void ParallelMoveResolverMIPS64::SpillScratch(int reg) {
468 // Push reg
469 __ IncreaseFrameSize(kMips64WordSize);
470 __ Sd(GpuRegister(reg), SP, 0);
471}
472
473void ParallelMoveResolverMIPS64::Exchange(int index1, int index2, bool double_slot) {
474 LoadOperandType load_type = double_slot ? kLoadDoubleword : kLoadWord;
475 StoreOperandType store_type = double_slot ? kStoreDoubleword : kStoreWord;
476 // Allocate a scratch register other than TMP, if available.
477 // Else, spill V0 (arbitrary choice) and use it as a scratch register (it will be
478 // automatically unspilled when the scratch scope object is destroyed).
479 ScratchRegisterScope ensure_scratch(this, TMP, V0, codegen_->GetNumberOfCoreRegisters());
480 // If V0 spills onto the stack, SP-relative offsets need to be adjusted.
481 int stack_offset = ensure_scratch.IsSpilled() ? kMips64WordSize : 0;
482 __ LoadFromOffset(load_type,
483 GpuRegister(ensure_scratch.GetRegister()),
484 SP,
485 index1 + stack_offset);
486 __ LoadFromOffset(load_type,
487 TMP,
488 SP,
489 index2 + stack_offset);
490 __ StoreToOffset(store_type,
491 GpuRegister(ensure_scratch.GetRegister()),
492 SP,
493 index2 + stack_offset);
494 __ StoreToOffset(store_type, TMP, SP, index1 + stack_offset);
495}
496
497static dwarf::Reg DWARFReg(GpuRegister reg) {
498 return dwarf::Reg::Mips64Core(static_cast<int>(reg));
499}
500
501// TODO: mapping of floating-point registers to DWARF
502
503void CodeGeneratorMIPS64::GenerateFrameEntry() {
504 __ Bind(&frame_entry_label_);
505
506 bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips64) || !IsLeafMethod();
507
508 if (do_overflow_check) {
509 __ LoadFromOffset(kLoadWord,
510 ZERO,
511 SP,
512 -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips64)));
513 RecordPcInfo(nullptr, 0);
514 }
515
516 // TODO: anything related to T9/GP/GOT/PIC/.so's?
517
518 if (HasEmptyFrame()) {
519 return;
520 }
521
522 // Make sure the frame size isn't unreasonably large. Per the various APIs
523 // it looks like it should always be less than 2GB in size, which allows
524 // us using 32-bit signed offsets from the stack pointer.
525 if (GetFrameSize() > 0x7FFFFFFF)
526 LOG(FATAL) << "Stack frame larger than 2GB";
527
528 // Spill callee-saved registers.
529 // Note that their cumulative size is small and they can be indexed using
530 // 16-bit offsets.
531
532 // TODO: increment/decrement SP in one step instead of two or remove this comment.
533
534 uint32_t ofs = FrameEntrySpillSize();
535 __ IncreaseFrameSize(ofs);
536
537 for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
538 GpuRegister reg = kCoreCalleeSaves[i];
539 if (allocated_registers_.ContainsCoreRegister(reg)) {
540 ofs -= kMips64WordSize;
541 __ Sd(reg, SP, ofs);
542 __ cfi().RelOffset(DWARFReg(reg), ofs);
543 }
544 }
545
546 for (int i = arraysize(kFpuCalleeSaves) - 1; i >= 0; --i) {
547 FpuRegister reg = kFpuCalleeSaves[i];
548 if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
549 ofs -= kMips64WordSize;
550 __ Sdc1(reg, SP, ofs);
551 // TODO: __ cfi().RelOffset(DWARFReg(reg), ofs);
552 }
553 }
554
555 // Allocate the rest of the frame and store the current method pointer
556 // at its end.
557
558 __ IncreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
559
560 static_assert(IsInt<16>(kCurrentMethodStackOffset),
561 "kCurrentMethodStackOffset must fit into int16_t");
562 __ Sd(kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
563}
564
565void CodeGeneratorMIPS64::GenerateFrameExit() {
566 __ cfi().RememberState();
567
568 // TODO: anything related to T9/GP/GOT/PIC/.so's?
569
570 if (!HasEmptyFrame()) {
571 // Deallocate the rest of the frame.
572
573 __ DecreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
574
575 // Restore callee-saved registers.
576 // Note that their cumulative size is small and they can be indexed using
577 // 16-bit offsets.
578
579 // TODO: increment/decrement SP in one step instead of two or remove this comment.
580
581 uint32_t ofs = 0;
582
583 for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
584 FpuRegister reg = kFpuCalleeSaves[i];
585 if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
586 __ Ldc1(reg, SP, ofs);
587 ofs += kMips64WordSize;
588 // TODO: __ cfi().Restore(DWARFReg(reg));
589 }
590 }
591
592 for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
593 GpuRegister reg = kCoreCalleeSaves[i];
594 if (allocated_registers_.ContainsCoreRegister(reg)) {
595 __ Ld(reg, SP, ofs);
596 ofs += kMips64WordSize;
597 __ cfi().Restore(DWARFReg(reg));
598 }
599 }
600
601 DCHECK_EQ(ofs, FrameEntrySpillSize());
602 __ DecreaseFrameSize(ofs);
603 }
604
605 __ Jr(RA);
606
607 __ cfi().RestoreState();
608 __ cfi().DefCFAOffset(GetFrameSize());
609}
610
611void CodeGeneratorMIPS64::Bind(HBasicBlock* block) {
612 __ Bind(GetLabelOf(block));
613}
614
615void CodeGeneratorMIPS64::MoveLocation(Location destination,
616 Location source,
Calin Juravlee460d1d2015-09-29 04:52:17 +0100617 Primitive::Type dst_type) {
Alexey Frunze4dda3372015-06-01 18:31:49 -0700618 if (source.Equals(destination)) {
619 return;
620 }
621
622 // A valid move can always be inferred from the destination and source
623 // locations. When moving from and to a register, the argument type can be
624 // used to generate 32bit instead of 64bit moves.
Calin Juravlee460d1d2015-09-29 04:52:17 +0100625 bool unspecified_type = (dst_type == Primitive::kPrimVoid);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700626 DCHECK_EQ(unspecified_type, false);
627
628 if (destination.IsRegister() || destination.IsFpuRegister()) {
629 if (unspecified_type) {
630 HConstant* src_cst = source.IsConstant() ? source.GetConstant() : nullptr;
631 if (source.IsStackSlot() ||
632 (src_cst != nullptr && (src_cst->IsIntConstant()
633 || src_cst->IsFloatConstant()
634 || src_cst->IsNullConstant()))) {
635 // For stack slots and 32bit constants, a 64bit type is appropriate.
Calin Juravlee460d1d2015-09-29 04:52:17 +0100636 dst_type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700637 } else {
638 // If the source is a double stack slot or a 64bit constant, a 64bit
639 // type is appropriate. Else the source is a register, and since the
640 // type has not been specified, we chose a 64bit type to force a 64bit
641 // move.
Calin Juravlee460d1d2015-09-29 04:52:17 +0100642 dst_type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700643 }
644 }
Calin Juravlee460d1d2015-09-29 04:52:17 +0100645 DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(dst_type)) ||
646 (destination.IsRegister() && !Primitive::IsFloatingPointType(dst_type)));
Alexey Frunze4dda3372015-06-01 18:31:49 -0700647 if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
648 // Move to GPR/FPR from stack
649 LoadOperandType load_type = source.IsStackSlot() ? kLoadWord : kLoadDoubleword;
Calin Juravlee460d1d2015-09-29 04:52:17 +0100650 if (Primitive::IsFloatingPointType(dst_type)) {
Alexey Frunze4dda3372015-06-01 18:31:49 -0700651 __ LoadFpuFromOffset(load_type,
652 destination.AsFpuRegister<FpuRegister>(),
653 SP,
654 source.GetStackIndex());
655 } else {
656 // TODO: use load_type = kLoadUnsignedWord when type == Primitive::kPrimNot.
657 __ LoadFromOffset(load_type,
658 destination.AsRegister<GpuRegister>(),
659 SP,
660 source.GetStackIndex());
661 }
662 } else if (source.IsConstant()) {
663 // Move to GPR/FPR from constant
664 GpuRegister gpr = AT;
Calin Juravlee460d1d2015-09-29 04:52:17 +0100665 if (!Primitive::IsFloatingPointType(dst_type)) {
Alexey Frunze4dda3372015-06-01 18:31:49 -0700666 gpr = destination.AsRegister<GpuRegister>();
667 }
Calin Juravlee460d1d2015-09-29 04:52:17 +0100668 if (dst_type == Primitive::kPrimInt || dst_type == Primitive::kPrimFloat) {
Alexey Frunze4dda3372015-06-01 18:31:49 -0700669 __ LoadConst32(gpr, GetInt32ValueOf(source.GetConstant()->AsConstant()));
670 } else {
671 __ LoadConst64(gpr, GetInt64ValueOf(source.GetConstant()->AsConstant()));
672 }
Calin Juravlee460d1d2015-09-29 04:52:17 +0100673 if (dst_type == Primitive::kPrimFloat) {
Alexey Frunze4dda3372015-06-01 18:31:49 -0700674 __ Mtc1(gpr, destination.AsFpuRegister<FpuRegister>());
Calin Juravlee460d1d2015-09-29 04:52:17 +0100675 } else if (dst_type == Primitive::kPrimDouble) {
Alexey Frunze4dda3372015-06-01 18:31:49 -0700676 __ Dmtc1(gpr, destination.AsFpuRegister<FpuRegister>());
677 }
Calin Juravlee460d1d2015-09-29 04:52:17 +0100678 } else if (source.IsRegister()) {
Alexey Frunze4dda3372015-06-01 18:31:49 -0700679 if (destination.IsRegister()) {
680 // Move to GPR from GPR
681 __ Move(destination.AsRegister<GpuRegister>(), source.AsRegister<GpuRegister>());
682 } else {
Calin Juravlee460d1d2015-09-29 04:52:17 +0100683 DCHECK(destination.IsFpuRegister());
684 if (Primitive::Is64BitType(dst_type)) {
685 __ Dmtc1(source.AsRegister<GpuRegister>(), destination.AsFpuRegister<FpuRegister>());
686 } else {
687 __ Mtc1(source.AsRegister<GpuRegister>(), destination.AsFpuRegister<FpuRegister>());
688 }
689 }
690 } else if (source.IsFpuRegister()) {
691 if (destination.IsFpuRegister()) {
Alexey Frunze4dda3372015-06-01 18:31:49 -0700692 // Move to FPR from FPR
Calin Juravlee460d1d2015-09-29 04:52:17 +0100693 if (dst_type == Primitive::kPrimFloat) {
Alexey Frunze4dda3372015-06-01 18:31:49 -0700694 __ MovS(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
695 } else {
Calin Juravlee460d1d2015-09-29 04:52:17 +0100696 DCHECK_EQ(dst_type, Primitive::kPrimDouble);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700697 __ MovD(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
698 }
Calin Juravlee460d1d2015-09-29 04:52:17 +0100699 } else {
700 DCHECK(destination.IsRegister());
701 if (Primitive::Is64BitType(dst_type)) {
702 __ Dmfc1(destination.AsRegister<GpuRegister>(), source.AsFpuRegister<FpuRegister>());
703 } else {
704 __ Mfc1(destination.AsRegister<GpuRegister>(), source.AsFpuRegister<FpuRegister>());
705 }
Alexey Frunze4dda3372015-06-01 18:31:49 -0700706 }
707 }
708 } else { // The destination is not a register. It must be a stack slot.
709 DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
710 if (source.IsRegister() || source.IsFpuRegister()) {
711 if (unspecified_type) {
712 if (source.IsRegister()) {
Calin Juravlee460d1d2015-09-29 04:52:17 +0100713 dst_type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700714 } else {
Calin Juravlee460d1d2015-09-29 04:52:17 +0100715 dst_type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700716 }
717 }
Calin Juravlee460d1d2015-09-29 04:52:17 +0100718 DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(dst_type)) &&
719 (source.IsFpuRegister() == Primitive::IsFloatingPointType(dst_type)));
Alexey Frunze4dda3372015-06-01 18:31:49 -0700720 // Move to stack from GPR/FPR
721 StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
722 if (source.IsRegister()) {
723 __ StoreToOffset(store_type,
724 source.AsRegister<GpuRegister>(),
725 SP,
726 destination.GetStackIndex());
727 } else {
728 __ StoreFpuToOffset(store_type,
729 source.AsFpuRegister<FpuRegister>(),
730 SP,
731 destination.GetStackIndex());
732 }
733 } else if (source.IsConstant()) {
734 // Move to stack from constant
735 HConstant* src_cst = source.GetConstant();
736 StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
737 if (destination.IsStackSlot()) {
738 __ LoadConst32(TMP, GetInt32ValueOf(src_cst->AsConstant()));
739 } else {
740 __ LoadConst64(TMP, GetInt64ValueOf(src_cst->AsConstant()));
741 }
742 __ StoreToOffset(store_type, TMP, SP, destination.GetStackIndex());
743 } else {
744 DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
745 DCHECK_EQ(source.IsDoubleStackSlot(), destination.IsDoubleStackSlot());
746 // Move to stack from stack
747 if (destination.IsStackSlot()) {
748 __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
749 __ StoreToOffset(kStoreWord, TMP, SP, destination.GetStackIndex());
750 } else {
751 __ LoadFromOffset(kLoadDoubleword, TMP, SP, source.GetStackIndex());
752 __ StoreToOffset(kStoreDoubleword, TMP, SP, destination.GetStackIndex());
753 }
754 }
755 }
756}
757
758void CodeGeneratorMIPS64::SwapLocations(Location loc1,
759 Location loc2,
760 Primitive::Type type ATTRIBUTE_UNUSED) {
761 DCHECK(!loc1.IsConstant());
762 DCHECK(!loc2.IsConstant());
763
764 if (loc1.Equals(loc2)) {
765 return;
766 }
767
768 bool is_slot1 = loc1.IsStackSlot() || loc1.IsDoubleStackSlot();
769 bool is_slot2 = loc2.IsStackSlot() || loc2.IsDoubleStackSlot();
770 bool is_fp_reg1 = loc1.IsFpuRegister();
771 bool is_fp_reg2 = loc2.IsFpuRegister();
772
773 if (loc2.IsRegister() && loc1.IsRegister()) {
774 // Swap 2 GPRs
775 GpuRegister r1 = loc1.AsRegister<GpuRegister>();
776 GpuRegister r2 = loc2.AsRegister<GpuRegister>();
777 __ Move(TMP, r2);
778 __ Move(r2, r1);
779 __ Move(r1, TMP);
780 } else if (is_fp_reg2 && is_fp_reg1) {
781 // Swap 2 FPRs
782 FpuRegister r1 = loc1.AsFpuRegister<FpuRegister>();
783 FpuRegister r2 = loc2.AsFpuRegister<FpuRegister>();
784 // TODO: Can MOV.S/MOV.D be used here to save one instruction?
785 // Need to distinguish float from double, right?
786 __ Dmfc1(TMP, r2);
787 __ Dmfc1(AT, r1);
788 __ Dmtc1(TMP, r1);
789 __ Dmtc1(AT, r2);
790 } else if (is_slot1 != is_slot2) {
791 // Swap GPR/FPR and stack slot
792 Location reg_loc = is_slot1 ? loc2 : loc1;
793 Location mem_loc = is_slot1 ? loc1 : loc2;
794 LoadOperandType load_type = mem_loc.IsStackSlot() ? kLoadWord : kLoadDoubleword;
795 StoreOperandType store_type = mem_loc.IsStackSlot() ? kStoreWord : kStoreDoubleword;
796 // TODO: use load_type = kLoadUnsignedWord when type == Primitive::kPrimNot.
797 __ LoadFromOffset(load_type, TMP, SP, mem_loc.GetStackIndex());
798 if (reg_loc.IsFpuRegister()) {
799 __ StoreFpuToOffset(store_type,
800 reg_loc.AsFpuRegister<FpuRegister>(),
801 SP,
802 mem_loc.GetStackIndex());
803 // TODO: review this MTC1/DMTC1 move
804 if (mem_loc.IsStackSlot()) {
805 __ Mtc1(TMP, reg_loc.AsFpuRegister<FpuRegister>());
806 } else {
807 DCHECK(mem_loc.IsDoubleStackSlot());
808 __ Dmtc1(TMP, reg_loc.AsFpuRegister<FpuRegister>());
809 }
810 } else {
811 __ StoreToOffset(store_type, reg_loc.AsRegister<GpuRegister>(), SP, mem_loc.GetStackIndex());
812 __ Move(reg_loc.AsRegister<GpuRegister>(), TMP);
813 }
814 } else if (is_slot1 && is_slot2) {
815 move_resolver_.Exchange(loc1.GetStackIndex(),
816 loc2.GetStackIndex(),
817 loc1.IsDoubleStackSlot());
818 } else {
819 LOG(FATAL) << "Unimplemented swap between locations " << loc1 << " and " << loc2;
820 }
821}
822
823void CodeGeneratorMIPS64::Move(HInstruction* instruction,
824 Location location,
825 HInstruction* move_for) {
826 LocationSummary* locations = instruction->GetLocations();
827 Primitive::Type type = instruction->GetType();
828 DCHECK_NE(type, Primitive::kPrimVoid);
829
830 if (instruction->IsCurrentMethod()) {
831 MoveLocation(location, Location::DoubleStackSlot(kCurrentMethodStackOffset), type);
832 } else if (locations != nullptr && locations->Out().Equals(location)) {
833 return;
834 } else if (instruction->IsIntConstant()
835 || instruction->IsLongConstant()
836 || instruction->IsNullConstant()) {
837 if (location.IsRegister()) {
838 // Move to GPR from constant
839 GpuRegister dst = location.AsRegister<GpuRegister>();
840 if (instruction->IsNullConstant() || instruction->IsIntConstant()) {
841 __ LoadConst32(dst, GetInt32ValueOf(instruction->AsConstant()));
842 } else {
843 __ LoadConst64(dst, instruction->AsLongConstant()->GetValue());
844 }
845 } else {
846 DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot());
847 // Move to stack from constant
848 if (location.IsStackSlot()) {
849 __ LoadConst32(TMP, GetInt32ValueOf(instruction->AsConstant()));
850 __ StoreToOffset(kStoreWord, TMP, SP, location.GetStackIndex());
851 } else {
852 __ LoadConst64(TMP, instruction->AsLongConstant()->GetValue());
853 __ StoreToOffset(kStoreDoubleword, TMP, SP, location.GetStackIndex());
854 }
855 }
856 } else if (instruction->IsTemporary()) {
857 Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
858 MoveLocation(location, temp_location, type);
859 } else if (instruction->IsLoadLocal()) {
860 uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
861 if (Primitive::Is64BitType(type)) {
862 MoveLocation(location, Location::DoubleStackSlot(stack_slot), type);
863 } else {
864 MoveLocation(location, Location::StackSlot(stack_slot), type);
865 }
866 } else {
867 DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
868 MoveLocation(location, locations->Out(), type);
869 }
870}
871
Calin Juravle175dc732015-08-25 15:42:32 +0100872void CodeGeneratorMIPS64::MoveConstant(Location location, int32_t value) {
873 DCHECK(location.IsRegister());
874 __ LoadConst32(location.AsRegister<GpuRegister>(), value);
875}
876
Calin Juravlee460d1d2015-09-29 04:52:17 +0100877void CodeGeneratorMIPS64::AddLocationAsTemp(Location location, LocationSummary* locations) {
878 if (location.IsRegister()) {
879 locations->AddTemp(location);
880 } else {
881 UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
882 }
883}
884
Alexey Frunze4dda3372015-06-01 18:31:49 -0700885Location CodeGeneratorMIPS64::GetStackLocation(HLoadLocal* load) const {
886 Primitive::Type type = load->GetType();
887
888 switch (type) {
889 case Primitive::kPrimNot:
890 case Primitive::kPrimInt:
891 case Primitive::kPrimFloat:
892 return Location::StackSlot(GetStackSlot(load->GetLocal()));
893
894 case Primitive::kPrimLong:
895 case Primitive::kPrimDouble:
896 return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
897
898 case Primitive::kPrimBoolean:
899 case Primitive::kPrimByte:
900 case Primitive::kPrimChar:
901 case Primitive::kPrimShort:
902 case Primitive::kPrimVoid:
903 LOG(FATAL) << "Unexpected type " << type;
904 }
905
906 LOG(FATAL) << "Unreachable";
907 return Location::NoLocation();
908}
909
910void CodeGeneratorMIPS64::MarkGCCard(GpuRegister object, GpuRegister value) {
911 Label done;
912 GpuRegister card = AT;
913 GpuRegister temp = TMP;
914 __ Beqzc(value, &done);
915 __ LoadFromOffset(kLoadDoubleword,
916 card,
917 TR,
918 Thread::CardTableOffset<kMips64WordSize>().Int32Value());
919 __ Dsrl(temp, object, gc::accounting::CardTable::kCardShift);
920 __ Daddu(temp, card, temp);
921 __ Sb(card, temp, 0);
922 __ Bind(&done);
923}
924
925void CodeGeneratorMIPS64::SetupBlockedRegisters(bool is_baseline ATTRIBUTE_UNUSED) const {
926 // ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
927 blocked_core_registers_[ZERO] = true;
928 blocked_core_registers_[K0] = true;
929 blocked_core_registers_[K1] = true;
930 blocked_core_registers_[GP] = true;
931 blocked_core_registers_[SP] = true;
932 blocked_core_registers_[RA] = true;
933
934 // AT and TMP(T8) are used as temporary/scratch registers
935 // (similar to how AT is used by MIPS assemblers).
936 blocked_core_registers_[AT] = true;
937 blocked_core_registers_[TMP] = true;
938 blocked_fpu_registers_[FTMP] = true;
939
940 // Reserve suspend and thread registers.
941 blocked_core_registers_[S0] = true;
942 blocked_core_registers_[TR] = true;
943
944 // Reserve T9 for function calls
945 blocked_core_registers_[T9] = true;
946
947 // TODO: review; anything else?
948
949 // TODO: make these two for's conditional on is_baseline once
950 // all the issues with register saving/restoring are sorted out.
951 for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
952 blocked_core_registers_[kCoreCalleeSaves[i]] = true;
953 }
954
955 for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
956 blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
957 }
958}
959
960Location CodeGeneratorMIPS64::AllocateFreeRegister(Primitive::Type type) const {
961 if (type == Primitive::kPrimVoid) {
962 LOG(FATAL) << "Unreachable type " << type;
963 }
964
965 if (Primitive::IsFloatingPointType(type)) {
966 size_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfFpuRegisters);
967 return Location::FpuRegisterLocation(reg);
968 } else {
969 size_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfGpuRegisters);
970 return Location::RegisterLocation(reg);
971 }
972}
973
974size_t CodeGeneratorMIPS64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
975 __ StoreToOffset(kStoreDoubleword, GpuRegister(reg_id), SP, stack_index);
976 return kMips64WordSize;
977}
978
979size_t CodeGeneratorMIPS64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
980 __ LoadFromOffset(kLoadDoubleword, GpuRegister(reg_id), SP, stack_index);
981 return kMips64WordSize;
982}
983
984size_t CodeGeneratorMIPS64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
985 __ StoreFpuToOffset(kStoreDoubleword, FpuRegister(reg_id), SP, stack_index);
986 return kMips64WordSize;
987}
988
989size_t CodeGeneratorMIPS64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
990 __ LoadFpuFromOffset(kLoadDoubleword, FpuRegister(reg_id), SP, stack_index);
991 return kMips64WordSize;
992}
993
994void CodeGeneratorMIPS64::DumpCoreRegister(std::ostream& stream, int reg) const {
David Brazdil9f0dece2015-09-21 18:20:26 +0100995 stream << GpuRegister(reg);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700996}
997
998void CodeGeneratorMIPS64::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
David Brazdil9f0dece2015-09-21 18:20:26 +0100999 stream << FpuRegister(reg);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001000}
1001
Calin Juravle175dc732015-08-25 15:42:32 +01001002void CodeGeneratorMIPS64::InvokeRuntime(QuickEntrypointEnum entrypoint,
1003 HInstruction* instruction,
1004 uint32_t dex_pc,
1005 SlowPathCode* slow_path) {
1006 InvokeRuntime(GetThreadOffset<kMips64WordSize>(entrypoint).Int32Value(),
1007 instruction,
1008 dex_pc,
1009 slow_path);
1010}
1011
Alexey Frunze4dda3372015-06-01 18:31:49 -07001012void CodeGeneratorMIPS64::InvokeRuntime(int32_t entry_point_offset,
1013 HInstruction* instruction,
1014 uint32_t dex_pc,
1015 SlowPathCode* slow_path) {
Alexandre Rames78e3ef62015-08-12 13:43:29 +01001016 ValidateInvokeRuntime(instruction, slow_path);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001017 // TODO: anything related to T9/GP/GOT/PIC/.so's?
1018 __ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
1019 __ Jalr(T9);
1020 RecordPcInfo(instruction, dex_pc, slow_path);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001021}
1022
1023void InstructionCodeGeneratorMIPS64::GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path,
1024 GpuRegister class_reg) {
1025 __ LoadFromOffset(kLoadWord, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
1026 __ LoadConst32(AT, mirror::Class::kStatusInitialized);
1027 __ Bltc(TMP, AT, slow_path->GetEntryLabel());
1028 // TODO: barrier needed?
1029 __ Bind(slow_path->GetExitLabel());
1030}
1031
1032void InstructionCodeGeneratorMIPS64::GenerateMemoryBarrier(MemBarrierKind kind ATTRIBUTE_UNUSED) {
1033 __ Sync(0); // only stype 0 is supported
1034}
1035
1036void InstructionCodeGeneratorMIPS64::GenerateSuspendCheck(HSuspendCheck* instruction,
1037 HBasicBlock* successor) {
1038 SuspendCheckSlowPathMIPS64* slow_path =
1039 new (GetGraph()->GetArena()) SuspendCheckSlowPathMIPS64(instruction, successor);
1040 codegen_->AddSlowPath(slow_path);
1041
1042 __ LoadFromOffset(kLoadUnsignedHalfword,
1043 TMP,
1044 TR,
1045 Thread::ThreadFlagsOffset<kMips64WordSize>().Int32Value());
1046 if (successor == nullptr) {
1047 __ Bnezc(TMP, slow_path->GetEntryLabel());
1048 __ Bind(slow_path->GetReturnLabel());
1049 } else {
1050 __ Beqzc(TMP, codegen_->GetLabelOf(successor));
1051 __ B(slow_path->GetEntryLabel());
1052 // slow_path will return to GetLabelOf(successor).
1053 }
1054}
1055
1056InstructionCodeGeneratorMIPS64::InstructionCodeGeneratorMIPS64(HGraph* graph,
1057 CodeGeneratorMIPS64* codegen)
1058 : HGraphVisitor(graph),
1059 assembler_(codegen->GetAssembler()),
1060 codegen_(codegen) {}
1061
1062void LocationsBuilderMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
1063 DCHECK_EQ(instruction->InputCount(), 2U);
1064 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1065 Primitive::Type type = instruction->GetResultType();
1066 switch (type) {
1067 case Primitive::kPrimInt:
1068 case Primitive::kPrimLong: {
1069 locations->SetInAt(0, Location::RequiresRegister());
1070 HInstruction* right = instruction->InputAt(1);
1071 bool can_use_imm = false;
1072 if (right->IsConstant()) {
1073 int64_t imm = CodeGenerator::GetInt64ValueOf(right->AsConstant());
1074 if (instruction->IsAnd() || instruction->IsOr() || instruction->IsXor()) {
1075 can_use_imm = IsUint<16>(imm);
1076 } else if (instruction->IsAdd()) {
1077 can_use_imm = IsInt<16>(imm);
1078 } else {
1079 DCHECK(instruction->IsSub());
1080 can_use_imm = IsInt<16>(-imm);
1081 }
1082 }
1083 if (can_use_imm)
1084 locations->SetInAt(1, Location::ConstantLocation(right->AsConstant()));
1085 else
1086 locations->SetInAt(1, Location::RequiresRegister());
1087 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1088 }
1089 break;
1090
1091 case Primitive::kPrimFloat:
1092 case Primitive::kPrimDouble:
1093 locations->SetInAt(0, Location::RequiresFpuRegister());
1094 locations->SetInAt(1, Location::RequiresFpuRegister());
1095 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1096 break;
1097
1098 default:
1099 LOG(FATAL) << "Unexpected " << instruction->DebugName() << " type " << type;
1100 }
1101}
1102
1103void InstructionCodeGeneratorMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
1104 Primitive::Type type = instruction->GetType();
1105 LocationSummary* locations = instruction->GetLocations();
1106
1107 switch (type) {
1108 case Primitive::kPrimInt:
1109 case Primitive::kPrimLong: {
1110 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1111 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1112 Location rhs_location = locations->InAt(1);
1113
1114 GpuRegister rhs_reg = ZERO;
1115 int64_t rhs_imm = 0;
1116 bool use_imm = rhs_location.IsConstant();
1117 if (use_imm) {
1118 rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
1119 } else {
1120 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1121 }
1122
1123 if (instruction->IsAnd()) {
1124 if (use_imm)
1125 __ Andi(dst, lhs, rhs_imm);
1126 else
1127 __ And(dst, lhs, rhs_reg);
1128 } else if (instruction->IsOr()) {
1129 if (use_imm)
1130 __ Ori(dst, lhs, rhs_imm);
1131 else
1132 __ Or(dst, lhs, rhs_reg);
1133 } else if (instruction->IsXor()) {
1134 if (use_imm)
1135 __ Xori(dst, lhs, rhs_imm);
1136 else
1137 __ Xor(dst, lhs, rhs_reg);
1138 } else if (instruction->IsAdd()) {
1139 if (type == Primitive::kPrimInt) {
1140 if (use_imm)
1141 __ Addiu(dst, lhs, rhs_imm);
1142 else
1143 __ Addu(dst, lhs, rhs_reg);
1144 } else {
1145 if (use_imm)
1146 __ Daddiu(dst, lhs, rhs_imm);
1147 else
1148 __ Daddu(dst, lhs, rhs_reg);
1149 }
1150 } else {
1151 DCHECK(instruction->IsSub());
1152 if (type == Primitive::kPrimInt) {
1153 if (use_imm)
1154 __ Addiu(dst, lhs, -rhs_imm);
1155 else
1156 __ Subu(dst, lhs, rhs_reg);
1157 } else {
1158 if (use_imm)
1159 __ Daddiu(dst, lhs, -rhs_imm);
1160 else
1161 __ Dsubu(dst, lhs, rhs_reg);
1162 }
1163 }
1164 break;
1165 }
1166 case Primitive::kPrimFloat:
1167 case Primitive::kPrimDouble: {
1168 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
1169 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
1170 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
1171 if (instruction->IsAdd()) {
1172 if (type == Primitive::kPrimFloat)
1173 __ AddS(dst, lhs, rhs);
1174 else
1175 __ AddD(dst, lhs, rhs);
1176 } else if (instruction->IsSub()) {
1177 if (type == Primitive::kPrimFloat)
1178 __ SubS(dst, lhs, rhs);
1179 else
1180 __ SubD(dst, lhs, rhs);
1181 } else {
1182 LOG(FATAL) << "Unexpected floating-point binary operation";
1183 }
1184 break;
1185 }
1186 default:
1187 LOG(FATAL) << "Unexpected binary operation type " << type;
1188 }
1189}
1190
1191void LocationsBuilderMIPS64::HandleShift(HBinaryOperation* instr) {
1192 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1193
1194 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
1195 Primitive::Type type = instr->GetResultType();
1196 switch (type) {
1197 case Primitive::kPrimInt:
1198 case Primitive::kPrimLong: {
1199 locations->SetInAt(0, Location::RequiresRegister());
1200 locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
1201 locations->SetOut(Location::RequiresRegister());
1202 break;
1203 }
1204 default:
1205 LOG(FATAL) << "Unexpected shift type " << type;
1206 }
1207}
1208
1209void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) {
1210 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1211 LocationSummary* locations = instr->GetLocations();
1212 Primitive::Type type = instr->GetType();
1213
1214 switch (type) {
1215 case Primitive::kPrimInt:
1216 case Primitive::kPrimLong: {
1217 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1218 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1219 Location rhs_location = locations->InAt(1);
1220
1221 GpuRegister rhs_reg = ZERO;
1222 int64_t rhs_imm = 0;
1223 bool use_imm = rhs_location.IsConstant();
1224 if (use_imm) {
1225 rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
1226 } else {
1227 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1228 }
1229
1230 if (use_imm) {
1231 uint32_t shift_value = (type == Primitive::kPrimInt)
1232 ? static_cast<uint32_t>(rhs_imm & kMaxIntShiftValue)
1233 : static_cast<uint32_t>(rhs_imm & kMaxLongShiftValue);
1234
1235 if (type == Primitive::kPrimInt) {
1236 if (instr->IsShl()) {
1237 __ Sll(dst, lhs, shift_value);
1238 } else if (instr->IsShr()) {
1239 __ Sra(dst, lhs, shift_value);
1240 } else {
1241 __ Srl(dst, lhs, shift_value);
1242 }
1243 } else {
1244 if (shift_value < 32) {
1245 if (instr->IsShl()) {
1246 __ Dsll(dst, lhs, shift_value);
1247 } else if (instr->IsShr()) {
1248 __ Dsra(dst, lhs, shift_value);
1249 } else {
1250 __ Dsrl(dst, lhs, shift_value);
1251 }
1252 } else {
1253 shift_value -= 32;
1254 if (instr->IsShl()) {
1255 __ Dsll32(dst, lhs, shift_value);
1256 } else if (instr->IsShr()) {
1257 __ Dsra32(dst, lhs, shift_value);
1258 } else {
1259 __ Dsrl32(dst, lhs, shift_value);
1260 }
1261 }
1262 }
1263 } else {
1264 if (type == Primitive::kPrimInt) {
1265 if (instr->IsShl()) {
1266 __ Sllv(dst, lhs, rhs_reg);
1267 } else if (instr->IsShr()) {
1268 __ Srav(dst, lhs, rhs_reg);
1269 } else {
1270 __ Srlv(dst, lhs, rhs_reg);
1271 }
1272 } else {
1273 if (instr->IsShl()) {
1274 __ Dsllv(dst, lhs, rhs_reg);
1275 } else if (instr->IsShr()) {
1276 __ Dsrav(dst, lhs, rhs_reg);
1277 } else {
1278 __ Dsrlv(dst, lhs, rhs_reg);
1279 }
1280 }
1281 }
1282 break;
1283 }
1284 default:
1285 LOG(FATAL) << "Unexpected shift operation type " << type;
1286 }
1287}
1288
1289void LocationsBuilderMIPS64::VisitAdd(HAdd* instruction) {
1290 HandleBinaryOp(instruction);
1291}
1292
1293void InstructionCodeGeneratorMIPS64::VisitAdd(HAdd* instruction) {
1294 HandleBinaryOp(instruction);
1295}
1296
1297void LocationsBuilderMIPS64::VisitAnd(HAnd* instruction) {
1298 HandleBinaryOp(instruction);
1299}
1300
1301void InstructionCodeGeneratorMIPS64::VisitAnd(HAnd* instruction) {
1302 HandleBinaryOp(instruction);
1303}
1304
1305void LocationsBuilderMIPS64::VisitArrayGet(HArrayGet* instruction) {
1306 LocationSummary* locations =
1307 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1308 locations->SetInAt(0, Location::RequiresRegister());
1309 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1310 if (Primitive::IsFloatingPointType(instruction->GetType())) {
1311 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1312 } else {
1313 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1314 }
1315}
1316
1317void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
1318 LocationSummary* locations = instruction->GetLocations();
1319 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1320 Location index = locations->InAt(1);
1321 Primitive::Type type = instruction->GetType();
1322
1323 switch (type) {
1324 case Primitive::kPrimBoolean: {
1325 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
1326 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1327 if (index.IsConstant()) {
1328 size_t offset =
1329 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1330 __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
1331 } else {
1332 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1333 __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
1334 }
1335 break;
1336 }
1337
1338 case Primitive::kPrimByte: {
1339 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
1340 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1341 if (index.IsConstant()) {
1342 size_t offset =
1343 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1344 __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
1345 } else {
1346 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1347 __ LoadFromOffset(kLoadSignedByte, out, TMP, data_offset);
1348 }
1349 break;
1350 }
1351
1352 case Primitive::kPrimShort: {
1353 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
1354 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1355 if (index.IsConstant()) {
1356 size_t offset =
1357 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1358 __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
1359 } else {
1360 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1361 __ Daddu(TMP, obj, TMP);
1362 __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset);
1363 }
1364 break;
1365 }
1366
1367 case Primitive::kPrimChar: {
1368 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
1369 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1370 if (index.IsConstant()) {
1371 size_t offset =
1372 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1373 __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
1374 } else {
1375 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1376 __ Daddu(TMP, obj, TMP);
1377 __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
1378 }
1379 break;
1380 }
1381
1382 case Primitive::kPrimInt:
1383 case Primitive::kPrimNot: {
1384 DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
1385 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
1386 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1387 LoadOperandType load_type = (type == Primitive::kPrimNot) ? kLoadUnsignedWord : kLoadWord;
1388 if (index.IsConstant()) {
1389 size_t offset =
1390 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1391 __ LoadFromOffset(load_type, out, obj, offset);
1392 } else {
1393 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1394 __ Daddu(TMP, obj, TMP);
1395 __ LoadFromOffset(load_type, out, TMP, data_offset);
1396 }
1397 break;
1398 }
1399
1400 case Primitive::kPrimLong: {
1401 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
1402 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1403 if (index.IsConstant()) {
1404 size_t offset =
1405 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1406 __ LoadFromOffset(kLoadDoubleword, out, obj, offset);
1407 } else {
1408 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1409 __ Daddu(TMP, obj, TMP);
1410 __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset);
1411 }
1412 break;
1413 }
1414
1415 case Primitive::kPrimFloat: {
1416 uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
1417 FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
1418 if (index.IsConstant()) {
1419 size_t offset =
1420 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1421 __ LoadFpuFromOffset(kLoadWord, out, obj, offset);
1422 } else {
1423 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1424 __ Daddu(TMP, obj, TMP);
1425 __ LoadFpuFromOffset(kLoadWord, out, TMP, data_offset);
1426 }
1427 break;
1428 }
1429
1430 case Primitive::kPrimDouble: {
1431 uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
1432 FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
1433 if (index.IsConstant()) {
1434 size_t offset =
1435 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1436 __ LoadFpuFromOffset(kLoadDoubleword, out, obj, offset);
1437 } else {
1438 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1439 __ Daddu(TMP, obj, TMP);
1440 __ LoadFpuFromOffset(kLoadDoubleword, out, TMP, data_offset);
1441 }
1442 break;
1443 }
1444
1445 case Primitive::kPrimVoid:
1446 LOG(FATAL) << "Unreachable type " << instruction->GetType();
1447 UNREACHABLE();
1448 }
1449 codegen_->MaybeRecordImplicitNullCheck(instruction);
1450}
1451
1452void LocationsBuilderMIPS64::VisitArrayLength(HArrayLength* instruction) {
1453 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1454 locations->SetInAt(0, Location::RequiresRegister());
1455 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1456}
1457
1458void InstructionCodeGeneratorMIPS64::VisitArrayLength(HArrayLength* instruction) {
1459 LocationSummary* locations = instruction->GetLocations();
1460 uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
1461 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1462 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1463 __ LoadFromOffset(kLoadWord, out, obj, offset);
1464 codegen_->MaybeRecordImplicitNullCheck(instruction);
1465}
1466
1467void LocationsBuilderMIPS64::VisitArraySet(HArraySet* instruction) {
David Brazdilbb3d5052015-09-21 18:39:16 +01001468 bool needs_runtime_call = instruction->NeedsTypeCheck();
Alexey Frunze4dda3372015-06-01 18:31:49 -07001469 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
1470 instruction,
David Brazdilbb3d5052015-09-21 18:39:16 +01001471 needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall);
1472 if (needs_runtime_call) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07001473 InvokeRuntimeCallingConvention calling_convention;
1474 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
1475 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
1476 locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
1477 } else {
1478 locations->SetInAt(0, Location::RequiresRegister());
1479 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1480 if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
1481 locations->SetInAt(2, Location::RequiresFpuRegister());
1482 } else {
1483 locations->SetInAt(2, Location::RequiresRegister());
1484 }
1485 }
1486}
1487
1488void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
1489 LocationSummary* locations = instruction->GetLocations();
1490 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1491 Location index = locations->InAt(1);
1492 Primitive::Type value_type = instruction->GetComponentType();
1493 bool needs_runtime_call = locations->WillCall();
1494 bool needs_write_barrier =
1495 CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
1496
1497 switch (value_type) {
1498 case Primitive::kPrimBoolean:
1499 case Primitive::kPrimByte: {
1500 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
1501 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1502 if (index.IsConstant()) {
1503 size_t offset =
1504 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1505 __ StoreToOffset(kStoreByte, value, obj, offset);
1506 } else {
1507 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1508 __ StoreToOffset(kStoreByte, value, TMP, data_offset);
1509 }
1510 break;
1511 }
1512
1513 case Primitive::kPrimShort:
1514 case Primitive::kPrimChar: {
1515 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
1516 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1517 if (index.IsConstant()) {
1518 size_t offset =
1519 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1520 __ StoreToOffset(kStoreHalfword, value, obj, offset);
1521 } else {
1522 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1523 __ Daddu(TMP, obj, TMP);
1524 __ StoreToOffset(kStoreHalfword, value, TMP, data_offset);
1525 }
1526 break;
1527 }
1528
1529 case Primitive::kPrimInt:
1530 case Primitive::kPrimNot: {
1531 if (!needs_runtime_call) {
1532 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
1533 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1534 if (index.IsConstant()) {
1535 size_t offset =
1536 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1537 __ StoreToOffset(kStoreWord, value, obj, offset);
1538 } else {
1539 DCHECK(index.IsRegister()) << index;
1540 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1541 __ Daddu(TMP, obj, TMP);
1542 __ StoreToOffset(kStoreWord, value, TMP, data_offset);
1543 }
1544 codegen_->MaybeRecordImplicitNullCheck(instruction);
1545 if (needs_write_barrier) {
1546 DCHECK_EQ(value_type, Primitive::kPrimNot);
1547 codegen_->MarkGCCard(obj, value);
1548 }
1549 } else {
1550 DCHECK_EQ(value_type, Primitive::kPrimNot);
1551 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
1552 instruction,
1553 instruction->GetDexPc(),
1554 nullptr);
1555 }
1556 break;
1557 }
1558
1559 case Primitive::kPrimLong: {
1560 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
1561 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1562 if (index.IsConstant()) {
1563 size_t offset =
1564 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1565 __ StoreToOffset(kStoreDoubleword, value, obj, offset);
1566 } else {
1567 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1568 __ Daddu(TMP, obj, TMP);
1569 __ StoreToOffset(kStoreDoubleword, value, TMP, data_offset);
1570 }
1571 break;
1572 }
1573
1574 case Primitive::kPrimFloat: {
1575 uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
1576 FpuRegister value = locations->InAt(2).AsFpuRegister<FpuRegister>();
1577 DCHECK(locations->InAt(2).IsFpuRegister());
1578 if (index.IsConstant()) {
1579 size_t offset =
1580 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1581 __ StoreFpuToOffset(kStoreWord, value, obj, offset);
1582 } else {
1583 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1584 __ Daddu(TMP, obj, TMP);
1585 __ StoreFpuToOffset(kStoreWord, value, TMP, data_offset);
1586 }
1587 break;
1588 }
1589
1590 case Primitive::kPrimDouble: {
1591 uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
1592 FpuRegister value = locations->InAt(2).AsFpuRegister<FpuRegister>();
1593 DCHECK(locations->InAt(2).IsFpuRegister());
1594 if (index.IsConstant()) {
1595 size_t offset =
1596 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1597 __ StoreFpuToOffset(kStoreDoubleword, value, obj, offset);
1598 } else {
1599 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1600 __ Daddu(TMP, obj, TMP);
1601 __ StoreFpuToOffset(kStoreDoubleword, value, TMP, data_offset);
1602 }
1603 break;
1604 }
1605
1606 case Primitive::kPrimVoid:
1607 LOG(FATAL) << "Unreachable type " << instruction->GetType();
1608 UNREACHABLE();
1609 }
1610
1611 // Ints and objects are handled in the switch.
1612 if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
1613 codegen_->MaybeRecordImplicitNullCheck(instruction);
1614 }
1615}
1616
1617void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
David Brazdil77a48ae2015-09-15 12:34:04 +00001618 LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
1619 ? LocationSummary::kCallOnSlowPath
1620 : LocationSummary::kNoCall;
1621 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001622 locations->SetInAt(0, Location::RequiresRegister());
1623 locations->SetInAt(1, Location::RequiresRegister());
1624 if (instruction->HasUses()) {
1625 locations->SetOut(Location::SameAsFirstInput());
1626 }
1627}
1628
1629void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
1630 LocationSummary* locations = instruction->GetLocations();
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01001631 BoundsCheckSlowPathMIPS64* slow_path =
1632 new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS64(instruction);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001633 codegen_->AddSlowPath(slow_path);
1634
1635 GpuRegister index = locations->InAt(0).AsRegister<GpuRegister>();
1636 GpuRegister length = locations->InAt(1).AsRegister<GpuRegister>();
1637
1638 // length is limited by the maximum positive signed 32-bit integer.
1639 // Unsigned comparison of length and index checks for index < 0
1640 // and for length <= index simultaneously.
1641 // Mips R6 requires lhs != rhs for compact branches.
1642 if (index == length) {
1643 __ B(slow_path->GetEntryLabel());
1644 } else {
1645 __ Bgeuc(index, length, slow_path->GetEntryLabel());
1646 }
1647}
1648
1649void LocationsBuilderMIPS64::VisitCheckCast(HCheckCast* instruction) {
1650 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
1651 instruction,
1652 LocationSummary::kCallOnSlowPath);
1653 locations->SetInAt(0, Location::RequiresRegister());
1654 locations->SetInAt(1, Location::RequiresRegister());
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01001655 // Note that TypeCheckSlowPathMIPS64 uses this register too.
Alexey Frunze4dda3372015-06-01 18:31:49 -07001656 locations->AddTemp(Location::RequiresRegister());
1657}
1658
1659void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) {
1660 LocationSummary* locations = instruction->GetLocations();
1661 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1662 GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
1663 GpuRegister obj_cls = locations->GetTemp(0).AsRegister<GpuRegister>();
1664
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01001665 SlowPathCodeMIPS64* slow_path =
1666 new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001667 codegen_->AddSlowPath(slow_path);
1668
1669 // TODO: avoid this check if we know obj is not null.
1670 __ Beqzc(obj, slow_path->GetExitLabel());
1671 // Compare the class of `obj` with `cls`.
1672 __ LoadFromOffset(kLoadUnsignedWord, obj_cls, obj, mirror::Object::ClassOffset().Int32Value());
1673 __ Bnec(obj_cls, cls, slow_path->GetEntryLabel());
1674 __ Bind(slow_path->GetExitLabel());
1675}
1676
1677void LocationsBuilderMIPS64::VisitClinitCheck(HClinitCheck* check) {
1678 LocationSummary* locations =
1679 new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
1680 locations->SetInAt(0, Location::RequiresRegister());
1681 if (check->HasUses()) {
1682 locations->SetOut(Location::SameAsFirstInput());
1683 }
1684}
1685
1686void InstructionCodeGeneratorMIPS64::VisitClinitCheck(HClinitCheck* check) {
1687 // We assume the class is not null.
1688 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
1689 check->GetLoadClass(),
1690 check,
1691 check->GetDexPc(),
1692 true);
1693 codegen_->AddSlowPath(slow_path);
1694 GenerateClassInitializationCheck(slow_path,
1695 check->GetLocations()->InAt(0).AsRegister<GpuRegister>());
1696}
1697
1698void LocationsBuilderMIPS64::VisitCompare(HCompare* compare) {
1699 Primitive::Type in_type = compare->InputAt(0)->GetType();
1700
1701 LocationSummary::CallKind call_kind = Primitive::IsFloatingPointType(in_type)
1702 ? LocationSummary::kCall
1703 : LocationSummary::kNoCall;
1704
1705 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare, call_kind);
1706
1707 switch (in_type) {
1708 case Primitive::kPrimLong:
1709 locations->SetInAt(0, Location::RequiresRegister());
1710 locations->SetInAt(1, Location::RequiresRegister());
1711 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1712 break;
1713
1714 case Primitive::kPrimFloat:
1715 case Primitive::kPrimDouble: {
1716 InvokeRuntimeCallingConvention calling_convention;
1717 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
1718 locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
1719 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt));
1720 break;
1721 }
1722
1723 default:
1724 LOG(FATAL) << "Unexpected type for compare operation " << in_type;
1725 }
1726}
1727
1728void InstructionCodeGeneratorMIPS64::VisitCompare(HCompare* instruction) {
1729 LocationSummary* locations = instruction->GetLocations();
1730 Primitive::Type in_type = instruction->InputAt(0)->GetType();
1731
1732 // 0 if: left == right
1733 // 1 if: left > right
1734 // -1 if: left < right
1735 switch (in_type) {
1736 case Primitive::kPrimLong: {
1737 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1738 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1739 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
1740 // TODO: more efficient (direct) comparison with a constant
1741 __ Slt(TMP, lhs, rhs);
1742 __ Slt(dst, rhs, lhs);
1743 __ Subu(dst, dst, TMP);
1744 break;
1745 }
1746
1747 case Primitive::kPrimFloat:
1748 case Primitive::kPrimDouble: {
1749 int32_t entry_point_offset;
1750 if (in_type == Primitive::kPrimFloat) {
1751 entry_point_offset = instruction->IsGtBias() ? QUICK_ENTRY_POINT(pCmpgFloat)
1752 : QUICK_ENTRY_POINT(pCmplFloat);
1753 } else {
1754 entry_point_offset = instruction->IsGtBias() ? QUICK_ENTRY_POINT(pCmpgDouble)
1755 : QUICK_ENTRY_POINT(pCmplDouble);
1756 }
1757 codegen_->InvokeRuntime(entry_point_offset, instruction, instruction->GetDexPc(), nullptr);
1758 break;
1759 }
1760
1761 default:
1762 LOG(FATAL) << "Unimplemented compare type " << in_type;
1763 }
1764}
1765
1766void LocationsBuilderMIPS64::VisitCondition(HCondition* instruction) {
1767 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1768 locations->SetInAt(0, Location::RequiresRegister());
1769 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1770 if (instruction->NeedsMaterialization()) {
1771 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1772 }
1773}
1774
1775void InstructionCodeGeneratorMIPS64::VisitCondition(HCondition* instruction) {
1776 if (!instruction->NeedsMaterialization()) {
1777 return;
1778 }
1779
Aart Bike9f37602015-10-09 11:15:55 -07001780 // TODO: generalize to long
1781 DCHECK_NE(instruction->InputAt(0)->GetType(), Primitive::kPrimLong);
1782
Alexey Frunze4dda3372015-06-01 18:31:49 -07001783 LocationSummary* locations = instruction->GetLocations();
1784
1785 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1786 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1787 Location rhs_location = locations->InAt(1);
1788
1789 GpuRegister rhs_reg = ZERO;
1790 int64_t rhs_imm = 0;
1791 bool use_imm = rhs_location.IsConstant();
1792 if (use_imm) {
1793 rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
1794 } else {
1795 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1796 }
1797
1798 IfCondition if_cond = instruction->GetCondition();
1799
1800 switch (if_cond) {
1801 case kCondEQ:
1802 case kCondNE:
1803 if (use_imm && IsUint<16>(rhs_imm)) {
1804 __ Xori(dst, lhs, rhs_imm);
1805 } else {
1806 if (use_imm) {
1807 rhs_reg = TMP;
1808 __ LoadConst32(rhs_reg, rhs_imm);
1809 }
1810 __ Xor(dst, lhs, rhs_reg);
1811 }
1812 if (if_cond == kCondEQ) {
1813 __ Sltiu(dst, dst, 1);
1814 } else {
1815 __ Sltu(dst, ZERO, dst);
1816 }
1817 break;
1818
1819 case kCondLT:
1820 case kCondGE:
1821 if (use_imm && IsInt<16>(rhs_imm)) {
1822 __ Slti(dst, lhs, rhs_imm);
1823 } else {
1824 if (use_imm) {
1825 rhs_reg = TMP;
1826 __ LoadConst32(rhs_reg, rhs_imm);
1827 }
1828 __ Slt(dst, lhs, rhs_reg);
1829 }
1830 if (if_cond == kCondGE) {
1831 // Simulate lhs >= rhs via !(lhs < rhs) since there's
1832 // only the slt instruction but no sge.
1833 __ Xori(dst, dst, 1);
1834 }
1835 break;
1836
1837 case kCondLE:
1838 case kCondGT:
1839 if (use_imm && IsInt<16>(rhs_imm + 1)) {
1840 // Simulate lhs <= rhs via lhs < rhs + 1.
1841 __ Slti(dst, lhs, rhs_imm + 1);
1842 if (if_cond == kCondGT) {
1843 // Simulate lhs > rhs via !(lhs <= rhs) since there's
1844 // only the slti instruction but no sgti.
1845 __ Xori(dst, dst, 1);
1846 }
1847 } else {
1848 if (use_imm) {
1849 rhs_reg = TMP;
1850 __ LoadConst32(rhs_reg, rhs_imm);
1851 }
1852 __ Slt(dst, rhs_reg, lhs);
1853 if (if_cond == kCondLE) {
1854 // Simulate lhs <= rhs via !(rhs < lhs) since there's
1855 // only the slt instruction but no sle.
1856 __ Xori(dst, dst, 1);
1857 }
1858 }
1859 break;
Aart Bike9f37602015-10-09 11:15:55 -07001860
1861 case kCondB:
1862 case kCondAE:
1863 if (use_imm && 0 <= rhs_imm && rhs_imm <= 0x7fff) {
1864 __ Sltiu(dst, lhs, rhs_imm);
1865 } else {
1866 if (use_imm) {
1867 rhs_reg = TMP;
1868 __ LoadConst32(rhs_reg, rhs_imm);
1869 }
1870 __ Sltu(dst, lhs, rhs_reg);
1871 }
1872 if (if_cond == kCondAE) {
1873 // Simulate lhs >= rhs via !(lhs < rhs) since there's
1874 // only the sltu instruction but no sgeu.
1875 __ Xori(dst, dst, 1);
1876 }
1877 break;
1878
1879 case kCondBE:
1880 case kCondA:
1881 if (use_imm && 0 <= rhs_imm && rhs_imm <= 0x7ffe) {
1882 // Simulate lhs <= rhs via lhs < rhs + 1.
1883 __ Sltiu(dst, lhs, rhs_imm + 1);
1884 if (if_cond == kCondA) {
1885 // Simulate lhs > rhs via !(lhs <= rhs) since there's
1886 // only the sltiu instruction but no sgtiu.
1887 __ Xori(dst, dst, 1);
1888 }
1889 } else {
1890 if (use_imm) {
1891 rhs_reg = TMP;
1892 __ LoadConst32(rhs_reg, rhs_imm);
1893 }
1894 __ Sltu(dst, rhs_reg, lhs);
1895 if (if_cond == kCondBE) {
1896 // Simulate lhs <= rhs via !(rhs < lhs) since there's
1897 // only the sltu instruction but no sleu.
1898 __ Xori(dst, dst, 1);
1899 }
1900 }
1901 break;
Alexey Frunze4dda3372015-06-01 18:31:49 -07001902 }
1903}
1904
Alexey Frunzec857c742015-09-23 15:12:39 -07001905void InstructionCodeGeneratorMIPS64::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
1906 DCHECK(instruction->IsDiv() || instruction->IsRem());
1907 Primitive::Type type = instruction->GetResultType();
1908
1909 LocationSummary* locations = instruction->GetLocations();
1910 Location second = locations->InAt(1);
1911 DCHECK(second.IsConstant());
1912
1913 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1914 GpuRegister dividend = locations->InAt(0).AsRegister<GpuRegister>();
1915 int64_t imm = Int64FromConstant(second.GetConstant());
1916 DCHECK(imm == 1 || imm == -1);
1917
1918 if (instruction->IsRem()) {
1919 __ Move(out, ZERO);
1920 } else {
1921 if (imm == -1) {
1922 if (type == Primitive::kPrimInt) {
1923 __ Subu(out, ZERO, dividend);
1924 } else {
1925 DCHECK_EQ(type, Primitive::kPrimLong);
1926 __ Dsubu(out, ZERO, dividend);
1927 }
1928 } else if (out != dividend) {
1929 __ Move(out, dividend);
1930 }
1931 }
1932}
1933
1934void InstructionCodeGeneratorMIPS64::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
1935 DCHECK(instruction->IsDiv() || instruction->IsRem());
1936 Primitive::Type type = instruction->GetResultType();
1937
1938 LocationSummary* locations = instruction->GetLocations();
1939 Location second = locations->InAt(1);
1940 DCHECK(second.IsConstant());
1941
1942 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1943 GpuRegister dividend = locations->InAt(0).AsRegister<GpuRegister>();
1944 int64_t imm = Int64FromConstant(second.GetConstant());
1945 uint64_t abs_imm = static_cast<uint64_t>(std::abs(imm));
1946 DCHECK(IsPowerOfTwo(abs_imm));
1947 int ctz_imm = CTZ(abs_imm);
1948
1949 if (instruction->IsDiv()) {
1950 if (type == Primitive::kPrimInt) {
1951 if (ctz_imm == 1) {
1952 // Fast path for division by +/-2, which is very common.
1953 __ Srl(TMP, dividend, 31);
1954 } else {
1955 __ Sra(TMP, dividend, 31);
1956 __ Srl(TMP, TMP, 32 - ctz_imm);
1957 }
1958 __ Addu(out, dividend, TMP);
1959 __ Sra(out, out, ctz_imm);
1960 if (imm < 0) {
1961 __ Subu(out, ZERO, out);
1962 }
1963 } else {
1964 DCHECK_EQ(type, Primitive::kPrimLong);
1965 if (ctz_imm == 1) {
1966 // Fast path for division by +/-2, which is very common.
1967 __ Dsrl32(TMP, dividend, 31);
1968 } else {
1969 __ Dsra32(TMP, dividend, 31);
1970 if (ctz_imm > 32) {
1971 __ Dsrl(TMP, TMP, 64 - ctz_imm);
1972 } else {
1973 __ Dsrl32(TMP, TMP, 32 - ctz_imm);
1974 }
1975 }
1976 __ Daddu(out, dividend, TMP);
1977 if (ctz_imm < 32) {
1978 __ Dsra(out, out, ctz_imm);
1979 } else {
1980 __ Dsra32(out, out, ctz_imm - 32);
1981 }
1982 if (imm < 0) {
1983 __ Dsubu(out, ZERO, out);
1984 }
1985 }
1986 } else {
1987 if (type == Primitive::kPrimInt) {
1988 if (ctz_imm == 1) {
1989 // Fast path for modulo +/-2, which is very common.
1990 __ Sra(TMP, dividend, 31);
1991 __ Subu(out, dividend, TMP);
1992 __ Andi(out, out, 1);
1993 __ Addu(out, out, TMP);
1994 } else {
1995 __ Sra(TMP, dividend, 31);
1996 __ Srl(TMP, TMP, 32 - ctz_imm);
1997 __ Addu(out, dividend, TMP);
1998 if (IsUint<16>(abs_imm - 1)) {
1999 __ Andi(out, out, abs_imm - 1);
2000 } else {
2001 __ Sll(out, out, 32 - ctz_imm);
2002 __ Srl(out, out, 32 - ctz_imm);
2003 }
2004 __ Subu(out, out, TMP);
2005 }
2006 } else {
2007 DCHECK_EQ(type, Primitive::kPrimLong);
2008 if (ctz_imm == 1) {
2009 // Fast path for modulo +/-2, which is very common.
2010 __ Dsra32(TMP, dividend, 31);
2011 __ Dsubu(out, dividend, TMP);
2012 __ Andi(out, out, 1);
2013 __ Daddu(out, out, TMP);
2014 } else {
2015 __ Dsra32(TMP, dividend, 31);
2016 if (ctz_imm > 32) {
2017 __ Dsrl(TMP, TMP, 64 - ctz_imm);
2018 } else {
2019 __ Dsrl32(TMP, TMP, 32 - ctz_imm);
2020 }
2021 __ Daddu(out, dividend, TMP);
2022 if (IsUint<16>(abs_imm - 1)) {
2023 __ Andi(out, out, abs_imm - 1);
2024 } else {
2025 if (ctz_imm > 32) {
2026 __ Dsll(out, out, 64 - ctz_imm);
2027 __ Dsrl(out, out, 64 - ctz_imm);
2028 } else {
2029 __ Dsll32(out, out, 32 - ctz_imm);
2030 __ Dsrl32(out, out, 32 - ctz_imm);
2031 }
2032 }
2033 __ Dsubu(out, out, TMP);
2034 }
2035 }
2036 }
2037}
2038
2039void InstructionCodeGeneratorMIPS64::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
2040 DCHECK(instruction->IsDiv() || instruction->IsRem());
2041
2042 LocationSummary* locations = instruction->GetLocations();
2043 Location second = locations->InAt(1);
2044 DCHECK(second.IsConstant());
2045
2046 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2047 GpuRegister dividend = locations->InAt(0).AsRegister<GpuRegister>();
2048 int64_t imm = Int64FromConstant(second.GetConstant());
2049
2050 Primitive::Type type = instruction->GetResultType();
2051 DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong) << type;
2052
2053 int64_t magic;
2054 int shift;
2055 CalculateMagicAndShiftForDivRem(imm,
2056 (type == Primitive::kPrimLong),
2057 &magic,
2058 &shift);
2059
2060 if (type == Primitive::kPrimInt) {
2061 __ LoadConst32(TMP, magic);
2062 __ MuhR6(TMP, dividend, TMP);
2063
2064 if (imm > 0 && magic < 0) {
2065 __ Addu(TMP, TMP, dividend);
2066 } else if (imm < 0 && magic > 0) {
2067 __ Subu(TMP, TMP, dividend);
2068 }
2069
2070 if (shift != 0) {
2071 __ Sra(TMP, TMP, shift);
2072 }
2073
2074 if (instruction->IsDiv()) {
2075 __ Sra(out, TMP, 31);
2076 __ Subu(out, TMP, out);
2077 } else {
2078 __ Sra(AT, TMP, 31);
2079 __ Subu(AT, TMP, AT);
2080 __ LoadConst32(TMP, imm);
2081 __ MulR6(TMP, AT, TMP);
2082 __ Subu(out, dividend, TMP);
2083 }
2084 } else {
2085 __ LoadConst64(TMP, magic);
2086 __ Dmuh(TMP, dividend, TMP);
2087
2088 if (imm > 0 && magic < 0) {
2089 __ Daddu(TMP, TMP, dividend);
2090 } else if (imm < 0 && magic > 0) {
2091 __ Dsubu(TMP, TMP, dividend);
2092 }
2093
2094 if (shift >= 32) {
2095 __ Dsra32(TMP, TMP, shift - 32);
2096 } else if (shift > 0) {
2097 __ Dsra(TMP, TMP, shift);
2098 }
2099
2100 if (instruction->IsDiv()) {
2101 __ Dsra32(out, TMP, 31);
2102 __ Dsubu(out, TMP, out);
2103 } else {
2104 __ Dsra32(AT, TMP, 31);
2105 __ Dsubu(AT, TMP, AT);
2106 __ LoadConst64(TMP, imm);
2107 __ Dmul(TMP, AT, TMP);
2108 __ Dsubu(out, dividend, TMP);
2109 }
2110 }
2111}
2112
2113void InstructionCodeGeneratorMIPS64::GenerateDivRemIntegral(HBinaryOperation* instruction) {
2114 DCHECK(instruction->IsDiv() || instruction->IsRem());
2115 Primitive::Type type = instruction->GetResultType();
2116 DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong) << type;
2117
2118 LocationSummary* locations = instruction->GetLocations();
2119 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2120 Location second = locations->InAt(1);
2121
2122 if (second.IsConstant()) {
2123 int64_t imm = Int64FromConstant(second.GetConstant());
2124 if (imm == 0) {
2125 // Do not generate anything. DivZeroCheck would prevent any code to be executed.
2126 } else if (imm == 1 || imm == -1) {
2127 DivRemOneOrMinusOne(instruction);
2128 } else if (IsPowerOfTwo(std::abs(imm))) {
2129 DivRemByPowerOfTwo(instruction);
2130 } else {
2131 DCHECK(imm <= -2 || imm >= 2);
2132 GenerateDivRemWithAnyConstant(instruction);
2133 }
2134 } else {
2135 GpuRegister dividend = locations->InAt(0).AsRegister<GpuRegister>();
2136 GpuRegister divisor = second.AsRegister<GpuRegister>();
2137 if (instruction->IsDiv()) {
2138 if (type == Primitive::kPrimInt)
2139 __ DivR6(out, dividend, divisor);
2140 else
2141 __ Ddiv(out, dividend, divisor);
2142 } else {
2143 if (type == Primitive::kPrimInt)
2144 __ ModR6(out, dividend, divisor);
2145 else
2146 __ Dmod(out, dividend, divisor);
2147 }
2148 }
2149}
2150
Alexey Frunze4dda3372015-06-01 18:31:49 -07002151void LocationsBuilderMIPS64::VisitDiv(HDiv* div) {
2152 LocationSummary* locations =
2153 new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
2154 switch (div->GetResultType()) {
2155 case Primitive::kPrimInt:
2156 case Primitive::kPrimLong:
2157 locations->SetInAt(0, Location::RequiresRegister());
Alexey Frunzec857c742015-09-23 15:12:39 -07002158 locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
Alexey Frunze4dda3372015-06-01 18:31:49 -07002159 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2160 break;
2161
2162 case Primitive::kPrimFloat:
2163 case Primitive::kPrimDouble:
2164 locations->SetInAt(0, Location::RequiresFpuRegister());
2165 locations->SetInAt(1, Location::RequiresFpuRegister());
2166 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2167 break;
2168
2169 default:
2170 LOG(FATAL) << "Unexpected div type " << div->GetResultType();
2171 }
2172}
2173
2174void InstructionCodeGeneratorMIPS64::VisitDiv(HDiv* instruction) {
2175 Primitive::Type type = instruction->GetType();
2176 LocationSummary* locations = instruction->GetLocations();
2177
2178 switch (type) {
2179 case Primitive::kPrimInt:
Alexey Frunzec857c742015-09-23 15:12:39 -07002180 case Primitive::kPrimLong:
2181 GenerateDivRemIntegral(instruction);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002182 break;
Alexey Frunze4dda3372015-06-01 18:31:49 -07002183 case Primitive::kPrimFloat:
2184 case Primitive::kPrimDouble: {
2185 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
2186 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
2187 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
2188 if (type == Primitive::kPrimFloat)
2189 __ DivS(dst, lhs, rhs);
2190 else
2191 __ DivD(dst, lhs, rhs);
2192 break;
2193 }
2194 default:
2195 LOG(FATAL) << "Unexpected div type " << type;
2196 }
2197}
2198
2199void LocationsBuilderMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
David Brazdil77a48ae2015-09-15 12:34:04 +00002200 LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
2201 ? LocationSummary::kCallOnSlowPath
2202 : LocationSummary::kNoCall;
2203 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002204 locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
2205 if (instruction->HasUses()) {
2206 locations->SetOut(Location::SameAsFirstInput());
2207 }
2208}
2209
2210void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2211 SlowPathCodeMIPS64* slow_path =
2212 new (GetGraph()->GetArena()) DivZeroCheckSlowPathMIPS64(instruction);
2213 codegen_->AddSlowPath(slow_path);
2214 Location value = instruction->GetLocations()->InAt(0);
2215
2216 Primitive::Type type = instruction->GetType();
2217
Serguei Katkov8c0676c2015-08-03 13:55:33 +06002218 if ((type == Primitive::kPrimBoolean) || !Primitive::IsIntegralType(type)) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07002219 LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
Serguei Katkov8c0676c2015-08-03 13:55:33 +06002220 return;
Alexey Frunze4dda3372015-06-01 18:31:49 -07002221 }
2222
2223 if (value.IsConstant()) {
2224 int64_t divisor = codegen_->GetInt64ValueOf(value.GetConstant()->AsConstant());
2225 if (divisor == 0) {
2226 __ B(slow_path->GetEntryLabel());
2227 } else {
2228 // A division by a non-null constant is valid. We don't need to perform
2229 // any check, so simply fall through.
2230 }
2231 } else {
2232 __ Beqzc(value.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
2233 }
2234}
2235
2236void LocationsBuilderMIPS64::VisitDoubleConstant(HDoubleConstant* constant) {
2237 LocationSummary* locations =
2238 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
2239 locations->SetOut(Location::ConstantLocation(constant));
2240}
2241
2242void InstructionCodeGeneratorMIPS64::VisitDoubleConstant(HDoubleConstant* cst ATTRIBUTE_UNUSED) {
2243 // Will be generated at use site.
2244}
2245
2246void LocationsBuilderMIPS64::VisitExit(HExit* exit) {
2247 exit->SetLocations(nullptr);
2248}
2249
2250void InstructionCodeGeneratorMIPS64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
2251}
2252
2253void LocationsBuilderMIPS64::VisitFloatConstant(HFloatConstant* constant) {
2254 LocationSummary* locations =
2255 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
2256 locations->SetOut(Location::ConstantLocation(constant));
2257}
2258
2259void InstructionCodeGeneratorMIPS64::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
2260 // Will be generated at use site.
2261}
2262
David Brazdilfc6a86a2015-06-26 10:33:45 +00002263void InstructionCodeGeneratorMIPS64::HandleGoto(HInstruction* got, HBasicBlock* successor) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07002264 DCHECK(!successor->IsExitBlock());
2265 HBasicBlock* block = got->GetBlock();
2266 HInstruction* previous = got->GetPrevious();
2267 HLoopInformation* info = block->GetLoopInformation();
2268
2269 if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
2270 codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
2271 GenerateSuspendCheck(info->GetSuspendCheck(), successor);
2272 return;
2273 }
2274 if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
2275 GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
2276 }
2277 if (!codegen_->GoesToNextBlock(block, successor)) {
2278 __ B(codegen_->GetLabelOf(successor));
2279 }
2280}
2281
David Brazdilfc6a86a2015-06-26 10:33:45 +00002282void LocationsBuilderMIPS64::VisitGoto(HGoto* got) {
2283 got->SetLocations(nullptr);
2284}
2285
2286void InstructionCodeGeneratorMIPS64::VisitGoto(HGoto* got) {
2287 HandleGoto(got, got->GetSuccessor());
2288}
2289
2290void LocationsBuilderMIPS64::VisitTryBoundary(HTryBoundary* try_boundary) {
2291 try_boundary->SetLocations(nullptr);
2292}
2293
2294void InstructionCodeGeneratorMIPS64::VisitTryBoundary(HTryBoundary* try_boundary) {
2295 HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
2296 if (!successor->IsExitBlock()) {
2297 HandleGoto(try_boundary, successor);
2298 }
2299}
2300
Alexey Frunze4dda3372015-06-01 18:31:49 -07002301void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruction,
2302 Label* true_target,
2303 Label* false_target,
2304 Label* always_true_target) {
2305 HInstruction* cond = instruction->InputAt(0);
2306 HCondition* condition = cond->AsCondition();
2307
2308 if (cond->IsIntConstant()) {
2309 int32_t cond_value = cond->AsIntConstant()->GetValue();
2310 if (cond_value == 1) {
2311 if (always_true_target != nullptr) {
2312 __ B(always_true_target);
2313 }
2314 return;
2315 } else {
2316 DCHECK_EQ(cond_value, 0);
2317 }
2318 } else if (!cond->IsCondition() || condition->NeedsMaterialization()) {
2319 // The condition instruction has been materialized, compare the output to 0.
2320 Location cond_val = instruction->GetLocations()->InAt(0);
2321 DCHECK(cond_val.IsRegister());
2322 __ Bnezc(cond_val.AsRegister<GpuRegister>(), true_target);
2323 } else {
2324 // The condition instruction has not been materialized, use its inputs as
2325 // the comparison and its condition as the branch condition.
2326 GpuRegister lhs = condition->GetLocations()->InAt(0).AsRegister<GpuRegister>();
2327 Location rhs_location = condition->GetLocations()->InAt(1);
2328 GpuRegister rhs_reg = ZERO;
2329 int32_t rhs_imm = 0;
2330 bool use_imm = rhs_location.IsConstant();
2331 if (use_imm) {
2332 rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
2333 } else {
2334 rhs_reg = rhs_location.AsRegister<GpuRegister>();
2335 }
2336
2337 IfCondition if_cond = condition->GetCondition();
2338 if (use_imm && rhs_imm == 0) {
2339 switch (if_cond) {
2340 case kCondEQ:
2341 __ Beqzc(lhs, true_target);
2342 break;
2343 case kCondNE:
2344 __ Bnezc(lhs, true_target);
2345 break;
2346 case kCondLT:
2347 __ Bltzc(lhs, true_target);
2348 break;
2349 case kCondGE:
2350 __ Bgezc(lhs, true_target);
2351 break;
2352 case kCondLE:
2353 __ Blezc(lhs, true_target);
2354 break;
2355 case kCondGT:
2356 __ Bgtzc(lhs, true_target);
2357 break;
Aart Bike9f37602015-10-09 11:15:55 -07002358 case kCondB:
2359 break; // always false
2360 case kCondBE:
2361 __ Beqzc(lhs, true_target); // <= 0 if zero
2362 break;
2363 case kCondA:
2364 __ Bnezc(lhs, true_target); // > 0 if non-zero
2365 break;
2366 case kCondAE:
2367 __ B(true_target); // always true
2368 break;
Alexey Frunze4dda3372015-06-01 18:31:49 -07002369 }
2370 } else {
2371 if (use_imm) {
2372 rhs_reg = TMP;
2373 __ LoadConst32(rhs_reg, rhs_imm);
2374 }
2375 // It looks like we can get here with lhs == rhs. Should that be possible at all?
2376 // Mips R6 requires lhs != rhs for compact branches.
2377 if (lhs == rhs_reg) {
2378 DCHECK(!use_imm);
2379 switch (if_cond) {
2380 case kCondEQ:
2381 case kCondGE:
2382 case kCondLE:
Aart Bike9f37602015-10-09 11:15:55 -07002383 case kCondBE:
2384 case kCondAE:
Alexey Frunze4dda3372015-06-01 18:31:49 -07002385 // if lhs == rhs for a positive condition, then it is a branch
2386 __ B(true_target);
2387 break;
2388 case kCondNE:
2389 case kCondLT:
2390 case kCondGT:
Aart Bike9f37602015-10-09 11:15:55 -07002391 case kCondB:
2392 case kCondA:
Alexey Frunze4dda3372015-06-01 18:31:49 -07002393 // if lhs == rhs for a negative condition, then it is a NOP
2394 break;
2395 }
2396 } else {
2397 switch (if_cond) {
2398 case kCondEQ:
2399 __ Beqc(lhs, rhs_reg, true_target);
2400 break;
2401 case kCondNE:
2402 __ Bnec(lhs, rhs_reg, true_target);
2403 break;
2404 case kCondLT:
2405 __ Bltc(lhs, rhs_reg, true_target);
2406 break;
2407 case kCondGE:
2408 __ Bgec(lhs, rhs_reg, true_target);
2409 break;
2410 case kCondLE:
2411 __ Bgec(rhs_reg, lhs, true_target);
2412 break;
2413 case kCondGT:
2414 __ Bltc(rhs_reg, lhs, true_target);
2415 break;
Aart Bike9f37602015-10-09 11:15:55 -07002416 case kCondB:
2417 __ Bltuc(lhs, rhs_reg, true_target);
2418 break;
2419 case kCondAE:
2420 __ Bgeuc(lhs, rhs_reg, true_target);
2421 break;
2422 case kCondBE:
2423 __ Bgeuc(rhs_reg, lhs, true_target);
2424 break;
2425 case kCondA:
2426 __ Bltuc(rhs_reg, lhs, true_target);
2427 break;
Alexey Frunze4dda3372015-06-01 18:31:49 -07002428 }
2429 }
2430 }
2431 }
2432 if (false_target != nullptr) {
2433 __ B(false_target);
2434 }
2435}
2436
2437void LocationsBuilderMIPS64::VisitIf(HIf* if_instr) {
2438 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
2439 HInstruction* cond = if_instr->InputAt(0);
2440 if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
2441 locations->SetInAt(0, Location::RequiresRegister());
2442 }
2443}
2444
2445void InstructionCodeGeneratorMIPS64::VisitIf(HIf* if_instr) {
2446 Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
2447 Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
2448 Label* always_true_target = true_target;
2449 if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
2450 if_instr->IfTrueSuccessor())) {
2451 always_true_target = nullptr;
2452 }
2453 if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
2454 if_instr->IfFalseSuccessor())) {
2455 false_target = nullptr;
2456 }
2457 GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
2458}
2459
2460void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
2461 LocationSummary* locations = new (GetGraph()->GetArena())
2462 LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
2463 HInstruction* cond = deoptimize->InputAt(0);
Aart Bikbb245d12015-10-19 11:05:03 -07002464 if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07002465 locations->SetInAt(0, Location::RequiresRegister());
2466 }
2467}
2468
2469void InstructionCodeGeneratorMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
2470 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena())
2471 DeoptimizationSlowPathMIPS64(deoptimize);
2472 codegen_->AddSlowPath(slow_path);
2473 Label* slow_path_entry = slow_path->GetEntryLabel();
2474 GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
2475}
2476
2477void LocationsBuilderMIPS64::HandleFieldGet(HInstruction* instruction,
2478 const FieldInfo& field_info ATTRIBUTE_UNUSED) {
2479 LocationSummary* locations =
2480 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2481 locations->SetInAt(0, Location::RequiresRegister());
2482 if (Primitive::IsFloatingPointType(instruction->GetType())) {
2483 locations->SetOut(Location::RequiresFpuRegister());
2484 } else {
2485 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2486 }
2487}
2488
2489void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction,
2490 const FieldInfo& field_info) {
2491 Primitive::Type type = field_info.GetFieldType();
2492 LocationSummary* locations = instruction->GetLocations();
2493 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2494 LoadOperandType load_type = kLoadUnsignedByte;
2495 switch (type) {
2496 case Primitive::kPrimBoolean:
2497 load_type = kLoadUnsignedByte;
2498 break;
2499 case Primitive::kPrimByte:
2500 load_type = kLoadSignedByte;
2501 break;
2502 case Primitive::kPrimShort:
2503 load_type = kLoadSignedHalfword;
2504 break;
2505 case Primitive::kPrimChar:
2506 load_type = kLoadUnsignedHalfword;
2507 break;
2508 case Primitive::kPrimInt:
2509 case Primitive::kPrimFloat:
2510 load_type = kLoadWord;
2511 break;
2512 case Primitive::kPrimLong:
2513 case Primitive::kPrimDouble:
2514 load_type = kLoadDoubleword;
2515 break;
2516 case Primitive::kPrimNot:
2517 load_type = kLoadUnsignedWord;
2518 break;
2519 case Primitive::kPrimVoid:
2520 LOG(FATAL) << "Unreachable type " << type;
2521 UNREACHABLE();
2522 }
2523 if (!Primitive::IsFloatingPointType(type)) {
2524 DCHECK(locations->Out().IsRegister());
2525 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2526 __ LoadFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
2527 } else {
2528 DCHECK(locations->Out().IsFpuRegister());
2529 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
2530 __ LoadFpuFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
2531 }
2532
2533 codegen_->MaybeRecordImplicitNullCheck(instruction);
2534 // TODO: memory barrier?
2535}
2536
2537void LocationsBuilderMIPS64::HandleFieldSet(HInstruction* instruction,
2538 const FieldInfo& field_info ATTRIBUTE_UNUSED) {
2539 LocationSummary* locations =
2540 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2541 locations->SetInAt(0, Location::RequiresRegister());
2542 if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
2543 locations->SetInAt(1, Location::RequiresFpuRegister());
2544 } else {
2545 locations->SetInAt(1, Location::RequiresRegister());
2546 }
2547}
2548
2549void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction,
2550 const FieldInfo& field_info) {
2551 Primitive::Type type = field_info.GetFieldType();
2552 LocationSummary* locations = instruction->GetLocations();
2553 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2554 StoreOperandType store_type = kStoreByte;
2555 switch (type) {
2556 case Primitive::kPrimBoolean:
2557 case Primitive::kPrimByte:
2558 store_type = kStoreByte;
2559 break;
2560 case Primitive::kPrimShort:
2561 case Primitive::kPrimChar:
2562 store_type = kStoreHalfword;
2563 break;
2564 case Primitive::kPrimInt:
2565 case Primitive::kPrimFloat:
2566 case Primitive::kPrimNot:
2567 store_type = kStoreWord;
2568 break;
2569 case Primitive::kPrimLong:
2570 case Primitive::kPrimDouble:
2571 store_type = kStoreDoubleword;
2572 break;
2573 case Primitive::kPrimVoid:
2574 LOG(FATAL) << "Unreachable type " << type;
2575 UNREACHABLE();
2576 }
2577 if (!Primitive::IsFloatingPointType(type)) {
2578 DCHECK(locations->InAt(1).IsRegister());
2579 GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>();
2580 __ StoreToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
2581 } else {
2582 DCHECK(locations->InAt(1).IsFpuRegister());
2583 FpuRegister src = locations->InAt(1).AsFpuRegister<FpuRegister>();
2584 __ StoreFpuToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
2585 }
2586
2587 codegen_->MaybeRecordImplicitNullCheck(instruction);
2588 // TODO: memory barriers?
2589 if (CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1))) {
2590 DCHECK(locations->InAt(1).IsRegister());
2591 GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>();
2592 codegen_->MarkGCCard(obj, src);
2593 }
2594}
2595
2596void LocationsBuilderMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
2597 HandleFieldGet(instruction, instruction->GetFieldInfo());
2598}
2599
2600void InstructionCodeGeneratorMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
2601 HandleFieldGet(instruction, instruction->GetFieldInfo());
2602}
2603
2604void LocationsBuilderMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
2605 HandleFieldSet(instruction, instruction->GetFieldInfo());
2606}
2607
2608void InstructionCodeGeneratorMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
2609 HandleFieldSet(instruction, instruction->GetFieldInfo());
2610}
2611
2612void LocationsBuilderMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
2613 LocationSummary::CallKind call_kind =
Nicolas Geoffray85c7bab2015-09-18 13:40:46 +00002614 instruction->IsExactCheck() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath;
Alexey Frunze4dda3372015-06-01 18:31:49 -07002615 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
2616 locations->SetInAt(0, Location::RequiresRegister());
2617 locations->SetInAt(1, Location::RequiresRegister());
2618 // The output does overlap inputs.
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01002619 // Note that TypeCheckSlowPathMIPS64 uses this register too.
Alexey Frunze4dda3372015-06-01 18:31:49 -07002620 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
2621}
2622
2623void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
2624 LocationSummary* locations = instruction->GetLocations();
2625 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2626 GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
2627 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2628
2629 Label done;
2630
2631 // Return 0 if `obj` is null.
2632 // TODO: Avoid this check if we know `obj` is not null.
2633 __ Move(out, ZERO);
2634 __ Beqzc(obj, &done);
2635
2636 // Compare the class of `obj` with `cls`.
2637 __ LoadFromOffset(kLoadUnsignedWord, out, obj, mirror::Object::ClassOffset().Int32Value());
Nicolas Geoffray85c7bab2015-09-18 13:40:46 +00002638 if (instruction->IsExactCheck()) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07002639 // Classes must be equal for the instanceof to succeed.
2640 __ Xor(out, out, cls);
2641 __ Sltiu(out, out, 1);
2642 } else {
2643 // If the classes are not equal, we go into a slow path.
2644 DCHECK(locations->OnlyCallsOnSlowPath());
2645 SlowPathCodeMIPS64* slow_path =
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01002646 new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002647 codegen_->AddSlowPath(slow_path);
2648 __ Bnec(out, cls, slow_path->GetEntryLabel());
2649 __ LoadConst32(out, 1);
2650 __ Bind(slow_path->GetExitLabel());
2651 }
2652
2653 __ Bind(&done);
2654}
2655
2656void LocationsBuilderMIPS64::VisitIntConstant(HIntConstant* constant) {
2657 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2658 locations->SetOut(Location::ConstantLocation(constant));
2659}
2660
2661void InstructionCodeGeneratorMIPS64::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
2662 // Will be generated at use site.
2663}
2664
2665void LocationsBuilderMIPS64::VisitNullConstant(HNullConstant* constant) {
2666 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2667 locations->SetOut(Location::ConstantLocation(constant));
2668}
2669
2670void InstructionCodeGeneratorMIPS64::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
2671 // Will be generated at use site.
2672}
2673
Calin Juravle175dc732015-08-25 15:42:32 +01002674void LocationsBuilderMIPS64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
2675 // The trampoline uses the same calling convention as dex calling conventions,
2676 // except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
2677 // the method_idx.
2678 HandleInvoke(invoke);
2679}
2680
2681void InstructionCodeGeneratorMIPS64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
2682 codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
2683}
2684
Alexey Frunze4dda3372015-06-01 18:31:49 -07002685void LocationsBuilderMIPS64::HandleInvoke(HInvoke* invoke) {
2686 InvokeDexCallingConventionVisitorMIPS64 calling_convention_visitor;
2687 CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
2688}
2689
2690void LocationsBuilderMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
2691 HandleInvoke(invoke);
2692 // The register T0 is required to be used for the hidden argument in
2693 // art_quick_imt_conflict_trampoline, so add the hidden argument.
2694 invoke->GetLocations()->AddTemp(Location::RegisterLocation(T0));
2695}
2696
2697void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
2698 // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
2699 GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
2700 uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
2701 invoke->GetImtIndex() % mirror::Class::kImtSize, kMips64PointerSize).Uint32Value();
2702 Location receiver = invoke->GetLocations()->InAt(0);
2703 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2704 Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64WordSize);
2705
2706 // Set the hidden argument.
2707 __ LoadConst32(invoke->GetLocations()->GetTemp(1).AsRegister<GpuRegister>(),
2708 invoke->GetDexMethodIndex());
2709
2710 // temp = object->GetClass();
2711 if (receiver.IsStackSlot()) {
2712 __ LoadFromOffset(kLoadUnsignedWord, temp, SP, receiver.GetStackIndex());
2713 __ LoadFromOffset(kLoadUnsignedWord, temp, temp, class_offset);
2714 } else {
2715 __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
2716 }
2717 codegen_->MaybeRecordImplicitNullCheck(invoke);
2718 // temp = temp->GetImtEntryAt(method_offset);
2719 __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
2720 // T9 = temp->GetEntryPoint();
2721 __ LoadFromOffset(kLoadDoubleword, T9, temp, entry_point.Int32Value());
2722 // T9();
2723 __ Jalr(T9);
2724 DCHECK(!codegen_->IsLeafMethod());
2725 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2726}
2727
2728void LocationsBuilderMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
Chris Larsen3039e382015-08-26 07:54:08 -07002729 IntrinsicLocationsBuilderMIPS64 intrinsic(codegen_);
2730 if (intrinsic.TryDispatch(invoke)) {
2731 return;
2732 }
2733
Alexey Frunze4dda3372015-06-01 18:31:49 -07002734 HandleInvoke(invoke);
2735}
2736
2737void LocationsBuilderMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
2738 // When we do not run baseline, explicit clinit checks triggered by static
2739 // invokes must have been pruned by art::PrepareForRegisterAllocation.
2740 DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
2741
Chris Larsen3039e382015-08-26 07:54:08 -07002742 IntrinsicLocationsBuilderMIPS64 intrinsic(codegen_);
2743 if (intrinsic.TryDispatch(invoke)) {
2744 return;
2745 }
2746
Alexey Frunze4dda3372015-06-01 18:31:49 -07002747 HandleInvoke(invoke);
2748
2749 // While SetupBlockedRegisters() blocks registers S2-S8 due to their
2750 // clobbering somewhere else, reduce further register pressure by avoiding
2751 // allocation of a register for the current method pointer like on x86 baseline.
2752 // TODO: remove this once all the issues with register saving/restoring are
2753 // sorted out.
2754 LocationSummary* locations = invoke->GetLocations();
2755 Location location = locations->InAt(invoke->GetCurrentMethodInputIndex());
2756 if (location.IsUnallocated() && location.GetPolicy() == Location::kRequiresRegister) {
2757 locations->SetInAt(invoke->GetCurrentMethodInputIndex(), Location::NoLocation());
2758 }
2759}
2760
Chris Larsen3039e382015-08-26 07:54:08 -07002761static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorMIPS64* codegen) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07002762 if (invoke->GetLocations()->Intrinsified()) {
Chris Larsen3039e382015-08-26 07:54:08 -07002763 IntrinsicCodeGeneratorMIPS64 intrinsic(codegen);
2764 intrinsic.Dispatch(invoke);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002765 return true;
2766 }
2767 return false;
2768}
2769
Vladimir Markodc151b22015-10-15 18:02:30 +01002770HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS64::GetSupportedInvokeStaticOrDirectDispatch(
2771 const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
2772 MethodReference target_method ATTRIBUTE_UNUSED) {
2773 switch (desired_dispatch_info.method_load_kind) {
2774 case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
2775 case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
2776 // TODO: Implement these types. For the moment, we fall back to kDexCacheViaMethod.
2777 return HInvokeStaticOrDirect::DispatchInfo {
2778 HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
2779 HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
2780 0u,
2781 0u
2782 };
2783 default:
2784 break;
2785 }
2786 switch (desired_dispatch_info.code_ptr_location) {
2787 case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
2788 case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
2789 // TODO: Implement these types. For the moment, we fall back to kCallArtMethod.
2790 return HInvokeStaticOrDirect::DispatchInfo {
2791 desired_dispatch_info.method_load_kind,
2792 HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
2793 desired_dispatch_info.method_load_data,
2794 0u
2795 };
2796 default:
2797 return desired_dispatch_info;
2798 }
2799}
2800
Alexey Frunze4dda3372015-06-01 18:31:49 -07002801void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
2802 // All registers are assumed to be correctly set up per the calling convention.
2803
Vladimir Marko58155012015-08-19 12:49:41 +00002804 Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
2805 switch (invoke->GetMethodLoadKind()) {
2806 case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
2807 // temp = thread->string_init_entrypoint
2808 __ LoadFromOffset(kLoadDoubleword,
2809 temp.AsRegister<GpuRegister>(),
2810 TR,
2811 invoke->GetStringInitOffset());
2812 break;
2813 case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
2814 callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
2815 break;
2816 case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
2817 __ LoadConst64(temp.AsRegister<GpuRegister>(), invoke->GetMethodAddress());
2818 break;
2819 case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
Vladimir Marko58155012015-08-19 12:49:41 +00002820 case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
Vladimir Markodc151b22015-10-15 18:02:30 +01002821 // TODO: Implement these types.
2822 // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
2823 LOG(FATAL) << "Unsupported";
2824 UNREACHABLE();
Vladimir Marko58155012015-08-19 12:49:41 +00002825 case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
2826 Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
2827 GpuRegister reg = temp.AsRegister<GpuRegister>();
2828 GpuRegister method_reg;
2829 if (current_method.IsRegister()) {
2830 method_reg = current_method.AsRegister<GpuRegister>();
2831 } else {
2832 // TODO: use the appropriate DCHECK() here if possible.
2833 // DCHECK(invoke->GetLocations()->Intrinsified());
2834 DCHECK(!current_method.IsValid());
2835 method_reg = reg;
2836 __ Ld(reg, SP, kCurrentMethodStackOffset);
2837 }
Alexey Frunze4dda3372015-06-01 18:31:49 -07002838
Vladimir Marko58155012015-08-19 12:49:41 +00002839 // temp = temp->dex_cache_resolved_methods_;
Vladimir Marko05792b92015-08-03 11:56:49 +01002840 __ LoadFromOffset(kLoadDoubleword,
Vladimir Marko58155012015-08-19 12:49:41 +00002841 reg,
2842 method_reg,
Vladimir Marko05792b92015-08-03 11:56:49 +01002843 ArtMethod::DexCacheResolvedMethodsOffset(kMips64PointerSize).Int32Value());
Vladimir Marko58155012015-08-19 12:49:41 +00002844 // temp = temp[index_in_cache]
2845 uint32_t index_in_cache = invoke->GetTargetMethod().dex_method_index;
2846 __ LoadFromOffset(kLoadDoubleword,
2847 reg,
2848 reg,
2849 CodeGenerator::GetCachePointerOffset(index_in_cache));
2850 break;
Alexey Frunze4dda3372015-06-01 18:31:49 -07002851 }
Alexey Frunze4dda3372015-06-01 18:31:49 -07002852 }
2853
Vladimir Marko58155012015-08-19 12:49:41 +00002854 switch (invoke->GetCodePtrLocation()) {
2855 case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
2856 __ Jalr(&frame_entry_label_, T9);
2857 break;
2858 case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
2859 // LR = invoke->GetDirectCodePtr();
2860 __ LoadConst64(T9, invoke->GetDirectCodePtr());
2861 // LR()
2862 __ Jalr(T9);
2863 break;
Vladimir Marko58155012015-08-19 12:49:41 +00002864 case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
Vladimir Markodc151b22015-10-15 18:02:30 +01002865 case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
2866 // TODO: Implement these types.
2867 // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
2868 LOG(FATAL) << "Unsupported";
2869 UNREACHABLE();
Vladimir Marko58155012015-08-19 12:49:41 +00002870 case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
2871 // T9 = callee_method->entry_point_from_quick_compiled_code_;
2872 __ LoadFromOffset(kLoadDoubleword,
2873 T9,
2874 callee_method.AsRegister<GpuRegister>(),
2875 ArtMethod::EntryPointFromQuickCompiledCodeOffset(
2876 kMips64WordSize).Int32Value());
2877 // T9()
2878 __ Jalr(T9);
2879 break;
2880 }
Alexey Frunze4dda3372015-06-01 18:31:49 -07002881 DCHECK(!IsLeafMethod());
2882}
2883
2884void InstructionCodeGeneratorMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
2885 // When we do not run baseline, explicit clinit checks triggered by static
2886 // invokes must have been pruned by art::PrepareForRegisterAllocation.
2887 DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
2888
2889 if (TryGenerateIntrinsicCode(invoke, codegen_)) {
2890 return;
2891 }
2892
2893 LocationSummary* locations = invoke->GetLocations();
2894 codegen_->GenerateStaticOrDirectCall(invoke,
2895 locations->HasTemps()
2896 ? locations->GetTemp(0)
2897 : Location::NoLocation());
2898 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2899}
2900
Alexey Frunze53afca12015-11-05 16:34:23 -08002901void CodeGeneratorMIPS64::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp_location) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07002902 LocationSummary* locations = invoke->GetLocations();
2903 Location receiver = locations->InAt(0);
Alexey Frunze53afca12015-11-05 16:34:23 -08002904 GpuRegister temp = temp_location.AsRegister<GpuRegister>();
Alexey Frunze4dda3372015-06-01 18:31:49 -07002905 size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
2906 invoke->GetVTableIndex(), kMips64PointerSize).SizeValue();
2907 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2908 Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64WordSize);
2909
2910 // temp = object->GetClass();
2911 DCHECK(receiver.IsRegister());
2912 __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
Alexey Frunze53afca12015-11-05 16:34:23 -08002913 MaybeRecordImplicitNullCheck(invoke);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002914 // temp = temp->GetMethodAt(method_offset);
2915 __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
2916 // T9 = temp->GetEntryPoint();
2917 __ LoadFromOffset(kLoadDoubleword, T9, temp, entry_point.Int32Value());
2918 // T9();
2919 __ Jalr(T9);
Alexey Frunze53afca12015-11-05 16:34:23 -08002920}
2921
2922void InstructionCodeGeneratorMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
2923 if (TryGenerateIntrinsicCode(invoke, codegen_)) {
2924 return;
2925 }
2926
2927 codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
Alexey Frunze4dda3372015-06-01 18:31:49 -07002928 DCHECK(!codegen_->IsLeafMethod());
2929 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2930}
2931
2932void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
Calin Juravle98893e12015-10-02 21:05:03 +01002933 InvokeRuntimeCallingConvention calling_convention;
2934 CodeGenerator::CreateLoadClassLocationSummary(
2935 cls,
2936 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
2937 Location::RegisterLocation(A0));
Alexey Frunze4dda3372015-06-01 18:31:49 -07002938}
2939
2940void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) {
2941 LocationSummary* locations = cls->GetLocations();
Calin Juravle98893e12015-10-02 21:05:03 +01002942 if (cls->NeedsAccessCheck()) {
2943 codegen_->MoveConstant(locations->GetTemp(0), cls->GetTypeIndex());
2944 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInitializeTypeAndVerifyAccess),
2945 cls,
2946 cls->GetDexPc(),
2947 nullptr);
Calin Juravle580b6092015-10-06 17:35:58 +01002948 return;
2949 }
2950
2951 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2952 GpuRegister current_method = locations->InAt(0).AsRegister<GpuRegister>();
2953 if (cls->IsReferrersClass()) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07002954 DCHECK(!cls->CanCallRuntime());
2955 DCHECK(!cls->MustGenerateClinitCheck());
2956 __ LoadFromOffset(kLoadUnsignedWord, out, current_method,
2957 ArtMethod::DeclaringClassOffset().Int32Value());
2958 } else {
2959 DCHECK(cls->CanCallRuntime());
Vladimir Marko05792b92015-08-03 11:56:49 +01002960 __ LoadFromOffset(kLoadDoubleword, out, current_method,
2961 ArtMethod::DexCacheResolvedTypesOffset(kMips64PointerSize).Int32Value());
Alexey Frunze4dda3372015-06-01 18:31:49 -07002962 __ LoadFromOffset(kLoadUnsignedWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
Vladimir Marko05792b92015-08-03 11:56:49 +01002963 // TODO: We will need a read barrier here.
Alexey Frunze4dda3372015-06-01 18:31:49 -07002964 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
2965 cls,
2966 cls,
2967 cls->GetDexPc(),
2968 cls->MustGenerateClinitCheck());
2969 codegen_->AddSlowPath(slow_path);
2970 __ Beqzc(out, slow_path->GetEntryLabel());
2971 if (cls->MustGenerateClinitCheck()) {
2972 GenerateClassInitializationCheck(slow_path, out);
2973 } else {
2974 __ Bind(slow_path->GetExitLabel());
2975 }
2976 }
2977}
2978
David Brazdilcb1c0552015-08-04 16:22:25 +01002979static int32_t GetExceptionTlsOffset() {
2980 return Thread::ExceptionOffset<kMips64WordSize>().Int32Value();
2981}
2982
Alexey Frunze4dda3372015-06-01 18:31:49 -07002983void LocationsBuilderMIPS64::VisitLoadException(HLoadException* load) {
2984 LocationSummary* locations =
2985 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
2986 locations->SetOut(Location::RequiresRegister());
2987}
2988
2989void InstructionCodeGeneratorMIPS64::VisitLoadException(HLoadException* load) {
2990 GpuRegister out = load->GetLocations()->Out().AsRegister<GpuRegister>();
David Brazdilcb1c0552015-08-04 16:22:25 +01002991 __ LoadFromOffset(kLoadUnsignedWord, out, TR, GetExceptionTlsOffset());
2992}
2993
2994void LocationsBuilderMIPS64::VisitClearException(HClearException* clear) {
2995 new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
2996}
2997
2998void InstructionCodeGeneratorMIPS64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
2999 __ StoreToOffset(kStoreWord, ZERO, TR, GetExceptionTlsOffset());
Alexey Frunze4dda3372015-06-01 18:31:49 -07003000}
3001
3002void LocationsBuilderMIPS64::VisitLoadLocal(HLoadLocal* load) {
3003 load->SetLocations(nullptr);
3004}
3005
3006void InstructionCodeGeneratorMIPS64::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
3007 // Nothing to do, this is driven by the code generator.
3008}
3009
3010void LocationsBuilderMIPS64::VisitLoadString(HLoadString* load) {
3011 LocationSummary* locations =
3012 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
3013 locations->SetInAt(0, Location::RequiresRegister());
3014 locations->SetOut(Location::RequiresRegister());
3015}
3016
3017void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) {
3018 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load);
3019 codegen_->AddSlowPath(slow_path);
3020
3021 LocationSummary* locations = load->GetLocations();
3022 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
3023 GpuRegister current_method = locations->InAt(0).AsRegister<GpuRegister>();
3024 __ LoadFromOffset(kLoadUnsignedWord, out, current_method,
3025 ArtMethod::DeclaringClassOffset().Int32Value());
Vladimir Marko05792b92015-08-03 11:56:49 +01003026 __ LoadFromOffset(kLoadDoubleword, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
Alexey Frunze4dda3372015-06-01 18:31:49 -07003027 __ LoadFromOffset(kLoadUnsignedWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
Vladimir Marko05792b92015-08-03 11:56:49 +01003028 // TODO: We will need a read barrier here.
Alexey Frunze4dda3372015-06-01 18:31:49 -07003029 __ Beqzc(out, slow_path->GetEntryLabel());
3030 __ Bind(slow_path->GetExitLabel());
3031}
3032
3033void LocationsBuilderMIPS64::VisitLocal(HLocal* local) {
3034 local->SetLocations(nullptr);
3035}
3036
3037void InstructionCodeGeneratorMIPS64::VisitLocal(HLocal* local) {
3038 DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
3039}
3040
3041void LocationsBuilderMIPS64::VisitLongConstant(HLongConstant* constant) {
3042 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
3043 locations->SetOut(Location::ConstantLocation(constant));
3044}
3045
3046void InstructionCodeGeneratorMIPS64::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
3047 // Will be generated at use site.
3048}
3049
3050void LocationsBuilderMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
3051 LocationSummary* locations =
3052 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3053 InvokeRuntimeCallingConvention calling_convention;
3054 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3055}
3056
3057void InstructionCodeGeneratorMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
3058 codegen_->InvokeRuntime(instruction->IsEnter()
3059 ? QUICK_ENTRY_POINT(pLockObject)
3060 : QUICK_ENTRY_POINT(pUnlockObject),
3061 instruction,
3062 instruction->GetDexPc(),
3063 nullptr);
3064 CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
3065}
3066
3067void LocationsBuilderMIPS64::VisitMul(HMul* mul) {
3068 LocationSummary* locations =
3069 new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
3070 switch (mul->GetResultType()) {
3071 case Primitive::kPrimInt:
3072 case Primitive::kPrimLong:
3073 locations->SetInAt(0, Location::RequiresRegister());
3074 locations->SetInAt(1, Location::RequiresRegister());
3075 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3076 break;
3077
3078 case Primitive::kPrimFloat:
3079 case Primitive::kPrimDouble:
3080 locations->SetInAt(0, Location::RequiresFpuRegister());
3081 locations->SetInAt(1, Location::RequiresFpuRegister());
3082 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3083 break;
3084
3085 default:
3086 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
3087 }
3088}
3089
3090void InstructionCodeGeneratorMIPS64::VisitMul(HMul* instruction) {
3091 Primitive::Type type = instruction->GetType();
3092 LocationSummary* locations = instruction->GetLocations();
3093
3094 switch (type) {
3095 case Primitive::kPrimInt:
3096 case Primitive::kPrimLong: {
3097 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
3098 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
3099 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
3100 if (type == Primitive::kPrimInt)
3101 __ MulR6(dst, lhs, rhs);
3102 else
3103 __ Dmul(dst, lhs, rhs);
3104 break;
3105 }
3106 case Primitive::kPrimFloat:
3107 case Primitive::kPrimDouble: {
3108 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
3109 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
3110 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
3111 if (type == Primitive::kPrimFloat)
3112 __ MulS(dst, lhs, rhs);
3113 else
3114 __ MulD(dst, lhs, rhs);
3115 break;
3116 }
3117 default:
3118 LOG(FATAL) << "Unexpected mul type " << type;
3119 }
3120}
3121
3122void LocationsBuilderMIPS64::VisitNeg(HNeg* neg) {
3123 LocationSummary* locations =
3124 new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
3125 switch (neg->GetResultType()) {
3126 case Primitive::kPrimInt:
3127 case Primitive::kPrimLong:
3128 locations->SetInAt(0, Location::RequiresRegister());
3129 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3130 break;
3131
3132 case Primitive::kPrimFloat:
3133 case Primitive::kPrimDouble:
3134 locations->SetInAt(0, Location::RequiresFpuRegister());
3135 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3136 break;
3137
3138 default:
3139 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
3140 }
3141}
3142
3143void InstructionCodeGeneratorMIPS64::VisitNeg(HNeg* instruction) {
3144 Primitive::Type type = instruction->GetType();
3145 LocationSummary* locations = instruction->GetLocations();
3146
3147 switch (type) {
3148 case Primitive::kPrimInt:
3149 case Primitive::kPrimLong: {
3150 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
3151 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
3152 if (type == Primitive::kPrimInt)
3153 __ Subu(dst, ZERO, src);
3154 else
3155 __ Dsubu(dst, ZERO, src);
3156 break;
3157 }
3158 case Primitive::kPrimFloat:
3159 case Primitive::kPrimDouble: {
3160 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
3161 FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
3162 if (type == Primitive::kPrimFloat)
3163 __ NegS(dst, src);
3164 else
3165 __ NegD(dst, src);
3166 break;
3167 }
3168 default:
3169 LOG(FATAL) << "Unexpected neg type " << type;
3170 }
3171}
3172
3173void LocationsBuilderMIPS64::VisitNewArray(HNewArray* instruction) {
3174 LocationSummary* locations =
3175 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3176 InvokeRuntimeCallingConvention calling_convention;
3177 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3178 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
3179 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
3180 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
3181}
3182
3183void InstructionCodeGeneratorMIPS64::VisitNewArray(HNewArray* instruction) {
3184 LocationSummary* locations = instruction->GetLocations();
3185 // Move an uint16_t value to a register.
3186 __ LoadConst32(locations->GetTemp(0).AsRegister<GpuRegister>(), instruction->GetTypeIndex());
Calin Juravle175dc732015-08-25 15:42:32 +01003187 codegen_->InvokeRuntime(instruction->GetEntrypoint(),
3188 instruction,
3189 instruction->GetDexPc(),
3190 nullptr);
Alexey Frunze4dda3372015-06-01 18:31:49 -07003191 CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
3192}
3193
3194void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) {
3195 LocationSummary* locations =
3196 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3197 InvokeRuntimeCallingConvention calling_convention;
3198 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3199 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
3200 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
3201}
3202
3203void InstructionCodeGeneratorMIPS64::VisitNewInstance(HNewInstance* instruction) {
3204 LocationSummary* locations = instruction->GetLocations();
3205 // Move an uint16_t value to a register.
3206 __ LoadConst32(locations->GetTemp(0).AsRegister<GpuRegister>(), instruction->GetTypeIndex());
Calin Juravle175dc732015-08-25 15:42:32 +01003207 codegen_->InvokeRuntime(instruction->GetEntrypoint(),
3208 instruction,
3209 instruction->GetDexPc(),
3210 nullptr);
Alexey Frunze4dda3372015-06-01 18:31:49 -07003211 CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
3212}
3213
3214void LocationsBuilderMIPS64::VisitNot(HNot* instruction) {
3215 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
3216 locations->SetInAt(0, Location::RequiresRegister());
3217 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3218}
3219
3220void InstructionCodeGeneratorMIPS64::VisitNot(HNot* instruction) {
3221 Primitive::Type type = instruction->GetType();
3222 LocationSummary* locations = instruction->GetLocations();
3223
3224 switch (type) {
3225 case Primitive::kPrimInt:
3226 case Primitive::kPrimLong: {
3227 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
3228 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
3229 __ Nor(dst, src, ZERO);
3230 break;
3231 }
3232
3233 default:
3234 LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
3235 }
3236}
3237
3238void LocationsBuilderMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
3239 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
3240 locations->SetInAt(0, Location::RequiresRegister());
3241 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3242}
3243
3244void InstructionCodeGeneratorMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
3245 LocationSummary* locations = instruction->GetLocations();
3246 __ Xori(locations->Out().AsRegister<GpuRegister>(),
3247 locations->InAt(0).AsRegister<GpuRegister>(),
3248 1);
3249}
3250
3251void LocationsBuilderMIPS64::VisitNullCheck(HNullCheck* instruction) {
David Brazdil77a48ae2015-09-15 12:34:04 +00003252 LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
3253 ? LocationSummary::kCallOnSlowPath
3254 : LocationSummary::kNoCall;
3255 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
Alexey Frunze4dda3372015-06-01 18:31:49 -07003256 locations->SetInAt(0, Location::RequiresRegister());
3257 if (instruction->HasUses()) {
3258 locations->SetOut(Location::SameAsFirstInput());
3259 }
3260}
3261
3262void InstructionCodeGeneratorMIPS64::GenerateImplicitNullCheck(HNullCheck* instruction) {
3263 if (codegen_->CanMoveNullCheckToUser(instruction)) {
3264 return;
3265 }
3266 Location obj = instruction->GetLocations()->InAt(0);
3267
3268 __ Lw(ZERO, obj.AsRegister<GpuRegister>(), 0);
3269 codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
3270}
3271
3272void InstructionCodeGeneratorMIPS64::GenerateExplicitNullCheck(HNullCheck* instruction) {
3273 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathMIPS64(instruction);
3274 codegen_->AddSlowPath(slow_path);
3275
3276 Location obj = instruction->GetLocations()->InAt(0);
3277
3278 __ Beqzc(obj.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
3279}
3280
3281void InstructionCodeGeneratorMIPS64::VisitNullCheck(HNullCheck* instruction) {
David Brazdil77a48ae2015-09-15 12:34:04 +00003282 if (codegen_->IsImplicitNullCheckAllowed(instruction)) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07003283 GenerateImplicitNullCheck(instruction);
3284 } else {
3285 GenerateExplicitNullCheck(instruction);
3286 }
3287}
3288
3289void LocationsBuilderMIPS64::VisitOr(HOr* instruction) {
3290 HandleBinaryOp(instruction);
3291}
3292
3293void InstructionCodeGeneratorMIPS64::VisitOr(HOr* instruction) {
3294 HandleBinaryOp(instruction);
3295}
3296
3297void LocationsBuilderMIPS64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
3298 LOG(FATAL) << "Unreachable";
3299}
3300
3301void InstructionCodeGeneratorMIPS64::VisitParallelMove(HParallelMove* instruction) {
3302 codegen_->GetMoveResolver()->EmitNativeCode(instruction);
3303}
3304
3305void LocationsBuilderMIPS64::VisitParameterValue(HParameterValue* instruction) {
3306 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
3307 Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
3308 if (location.IsStackSlot()) {
3309 location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
3310 } else if (location.IsDoubleStackSlot()) {
3311 location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
3312 }
3313 locations->SetOut(location);
3314}
3315
3316void InstructionCodeGeneratorMIPS64::VisitParameterValue(HParameterValue* instruction
3317 ATTRIBUTE_UNUSED) {
3318 // Nothing to do, the parameter is already at its location.
3319}
3320
3321void LocationsBuilderMIPS64::VisitCurrentMethod(HCurrentMethod* instruction) {
3322 LocationSummary* locations =
3323 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3324 locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
3325}
3326
3327void InstructionCodeGeneratorMIPS64::VisitCurrentMethod(HCurrentMethod* instruction
3328 ATTRIBUTE_UNUSED) {
3329 // Nothing to do, the method is already at its location.
3330}
3331
3332void LocationsBuilderMIPS64::VisitPhi(HPhi* instruction) {
3333 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
3334 for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
3335 locations->SetInAt(i, Location::Any());
3336 }
3337 locations->SetOut(Location::Any());
3338}
3339
3340void InstructionCodeGeneratorMIPS64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
3341 LOG(FATAL) << "Unreachable";
3342}
3343
3344void LocationsBuilderMIPS64::VisitRem(HRem* rem) {
3345 Primitive::Type type = rem->GetResultType();
3346 LocationSummary::CallKind call_kind =
3347 Primitive::IsFloatingPointType(type) ? LocationSummary::kCall : LocationSummary::kNoCall;
3348 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
3349
3350 switch (type) {
3351 case Primitive::kPrimInt:
3352 case Primitive::kPrimLong:
3353 locations->SetInAt(0, Location::RequiresRegister());
Alexey Frunzec857c742015-09-23 15:12:39 -07003354 locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
Alexey Frunze4dda3372015-06-01 18:31:49 -07003355 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3356 break;
3357
3358 case Primitive::kPrimFloat:
3359 case Primitive::kPrimDouble: {
3360 InvokeRuntimeCallingConvention calling_convention;
3361 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
3362 locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
3363 locations->SetOut(calling_convention.GetReturnLocation(type));
3364 break;
3365 }
3366
3367 default:
3368 LOG(FATAL) << "Unexpected rem type " << type;
3369 }
3370}
3371
3372void InstructionCodeGeneratorMIPS64::VisitRem(HRem* instruction) {
3373 Primitive::Type type = instruction->GetType();
Alexey Frunze4dda3372015-06-01 18:31:49 -07003374
3375 switch (type) {
3376 case Primitive::kPrimInt:
Alexey Frunzec857c742015-09-23 15:12:39 -07003377 case Primitive::kPrimLong:
3378 GenerateDivRemIntegral(instruction);
Alexey Frunze4dda3372015-06-01 18:31:49 -07003379 break;
Alexey Frunze4dda3372015-06-01 18:31:49 -07003380
3381 case Primitive::kPrimFloat:
3382 case Primitive::kPrimDouble: {
3383 int32_t entry_offset = (type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pFmodf)
3384 : QUICK_ENTRY_POINT(pFmod);
3385 codegen_->InvokeRuntime(entry_offset, instruction, instruction->GetDexPc(), nullptr);
3386 break;
3387 }
3388 default:
3389 LOG(FATAL) << "Unexpected rem type " << type;
3390 }
3391}
3392
3393void LocationsBuilderMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
3394 memory_barrier->SetLocations(nullptr);
3395}
3396
3397void InstructionCodeGeneratorMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
3398 GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
3399}
3400
3401void LocationsBuilderMIPS64::VisitReturn(HReturn* ret) {
3402 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret);
3403 Primitive::Type return_type = ret->InputAt(0)->GetType();
3404 locations->SetInAt(0, Mips64ReturnLocation(return_type));
3405}
3406
3407void InstructionCodeGeneratorMIPS64::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
3408 codegen_->GenerateFrameExit();
3409}
3410
3411void LocationsBuilderMIPS64::VisitReturnVoid(HReturnVoid* ret) {
3412 ret->SetLocations(nullptr);
3413}
3414
3415void InstructionCodeGeneratorMIPS64::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
3416 codegen_->GenerateFrameExit();
3417}
3418
3419void LocationsBuilderMIPS64::VisitShl(HShl* shl) {
3420 HandleShift(shl);
3421}
3422
3423void InstructionCodeGeneratorMIPS64::VisitShl(HShl* shl) {
3424 HandleShift(shl);
3425}
3426
3427void LocationsBuilderMIPS64::VisitShr(HShr* shr) {
3428 HandleShift(shr);
3429}
3430
3431void InstructionCodeGeneratorMIPS64::VisitShr(HShr* shr) {
3432 HandleShift(shr);
3433}
3434
3435void LocationsBuilderMIPS64::VisitStoreLocal(HStoreLocal* store) {
3436 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
3437 Primitive::Type field_type = store->InputAt(1)->GetType();
3438 switch (field_type) {
3439 case Primitive::kPrimNot:
3440 case Primitive::kPrimBoolean:
3441 case Primitive::kPrimByte:
3442 case Primitive::kPrimChar:
3443 case Primitive::kPrimShort:
3444 case Primitive::kPrimInt:
3445 case Primitive::kPrimFloat:
3446 locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
3447 break;
3448
3449 case Primitive::kPrimLong:
3450 case Primitive::kPrimDouble:
3451 locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
3452 break;
3453
3454 default:
3455 LOG(FATAL) << "Unimplemented local type " << field_type;
3456 }
3457}
3458
3459void InstructionCodeGeneratorMIPS64::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
3460}
3461
3462void LocationsBuilderMIPS64::VisitSub(HSub* instruction) {
3463 HandleBinaryOp(instruction);
3464}
3465
3466void InstructionCodeGeneratorMIPS64::VisitSub(HSub* instruction) {
3467 HandleBinaryOp(instruction);
3468}
3469
3470void LocationsBuilderMIPS64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3471 HandleFieldGet(instruction, instruction->GetFieldInfo());
3472}
3473
3474void InstructionCodeGeneratorMIPS64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3475 HandleFieldGet(instruction, instruction->GetFieldInfo());
3476}
3477
3478void LocationsBuilderMIPS64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3479 HandleFieldSet(instruction, instruction->GetFieldInfo());
3480}
3481
3482void InstructionCodeGeneratorMIPS64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3483 HandleFieldSet(instruction, instruction->GetFieldInfo());
3484}
3485
Calin Juravlee460d1d2015-09-29 04:52:17 +01003486void LocationsBuilderMIPS64::VisitUnresolvedInstanceFieldGet(
3487 HUnresolvedInstanceFieldGet* instruction) {
3488 FieldAccessCallingConventionMIPS64 calling_convention;
3489 codegen_->CreateUnresolvedFieldLocationSummary(
3490 instruction, instruction->GetFieldType(), calling_convention);
3491}
3492
3493void InstructionCodeGeneratorMIPS64::VisitUnresolvedInstanceFieldGet(
3494 HUnresolvedInstanceFieldGet* instruction) {
3495 FieldAccessCallingConventionMIPS64 calling_convention;
3496 codegen_->GenerateUnresolvedFieldAccess(instruction,
3497 instruction->GetFieldType(),
3498 instruction->GetFieldIndex(),
3499 instruction->GetDexPc(),
3500 calling_convention);
3501}
3502
3503void LocationsBuilderMIPS64::VisitUnresolvedInstanceFieldSet(
3504 HUnresolvedInstanceFieldSet* instruction) {
3505 FieldAccessCallingConventionMIPS64 calling_convention;
3506 codegen_->CreateUnresolvedFieldLocationSummary(
3507 instruction, instruction->GetFieldType(), calling_convention);
3508}
3509
3510void InstructionCodeGeneratorMIPS64::VisitUnresolvedInstanceFieldSet(
3511 HUnresolvedInstanceFieldSet* instruction) {
3512 FieldAccessCallingConventionMIPS64 calling_convention;
3513 codegen_->GenerateUnresolvedFieldAccess(instruction,
3514 instruction->GetFieldType(),
3515 instruction->GetFieldIndex(),
3516 instruction->GetDexPc(),
3517 calling_convention);
3518}
3519
3520void LocationsBuilderMIPS64::VisitUnresolvedStaticFieldGet(
3521 HUnresolvedStaticFieldGet* instruction) {
3522 FieldAccessCallingConventionMIPS64 calling_convention;
3523 codegen_->CreateUnresolvedFieldLocationSummary(
3524 instruction, instruction->GetFieldType(), calling_convention);
3525}
3526
3527void InstructionCodeGeneratorMIPS64::VisitUnresolvedStaticFieldGet(
3528 HUnresolvedStaticFieldGet* instruction) {
3529 FieldAccessCallingConventionMIPS64 calling_convention;
3530 codegen_->GenerateUnresolvedFieldAccess(instruction,
3531 instruction->GetFieldType(),
3532 instruction->GetFieldIndex(),
3533 instruction->GetDexPc(),
3534 calling_convention);
3535}
3536
3537void LocationsBuilderMIPS64::VisitUnresolvedStaticFieldSet(
3538 HUnresolvedStaticFieldSet* instruction) {
3539 FieldAccessCallingConventionMIPS64 calling_convention;
3540 codegen_->CreateUnresolvedFieldLocationSummary(
3541 instruction, instruction->GetFieldType(), calling_convention);
3542}
3543
3544void InstructionCodeGeneratorMIPS64::VisitUnresolvedStaticFieldSet(
3545 HUnresolvedStaticFieldSet* instruction) {
3546 FieldAccessCallingConventionMIPS64 calling_convention;
3547 codegen_->GenerateUnresolvedFieldAccess(instruction,
3548 instruction->GetFieldType(),
3549 instruction->GetFieldIndex(),
3550 instruction->GetDexPc(),
3551 calling_convention);
3552}
3553
Alexey Frunze4dda3372015-06-01 18:31:49 -07003554void LocationsBuilderMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
3555 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
3556}
3557
3558void InstructionCodeGeneratorMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
3559 HBasicBlock* block = instruction->GetBlock();
3560 if (block->GetLoopInformation() != nullptr) {
3561 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
3562 // The back edge will generate the suspend check.
3563 return;
3564 }
3565 if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
3566 // The goto will generate the suspend check.
3567 return;
3568 }
3569 GenerateSuspendCheck(instruction, nullptr);
3570}
3571
3572void LocationsBuilderMIPS64::VisitTemporary(HTemporary* temp) {
3573 temp->SetLocations(nullptr);
3574}
3575
3576void InstructionCodeGeneratorMIPS64::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
3577 // Nothing to do, this is driven by the code generator.
3578}
3579
3580void LocationsBuilderMIPS64::VisitThrow(HThrow* instruction) {
3581 LocationSummary* locations =
3582 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3583 InvokeRuntimeCallingConvention calling_convention;
3584 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3585}
3586
3587void InstructionCodeGeneratorMIPS64::VisitThrow(HThrow* instruction) {
3588 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pDeliverException),
3589 instruction,
3590 instruction->GetDexPc(),
3591 nullptr);
3592 CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
3593}
3594
3595void LocationsBuilderMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
3596 Primitive::Type input_type = conversion->GetInputType();
3597 Primitive::Type result_type = conversion->GetResultType();
3598 DCHECK_NE(input_type, result_type);
3599
3600 if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
3601 (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
3602 LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
3603 }
3604
3605 LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
3606 if ((Primitive::IsFloatingPointType(result_type) && input_type == Primitive::kPrimLong) ||
3607 (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type))) {
3608 call_kind = LocationSummary::kCall;
3609 }
3610
3611 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
3612
3613 if (call_kind == LocationSummary::kNoCall) {
3614 if (Primitive::IsFloatingPointType(input_type)) {
3615 locations->SetInAt(0, Location::RequiresFpuRegister());
3616 } else {
3617 locations->SetInAt(0, Location::RequiresRegister());
3618 }
3619
3620 if (Primitive::IsFloatingPointType(result_type)) {
3621 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3622 } else {
3623 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3624 }
3625 } else {
3626 InvokeRuntimeCallingConvention calling_convention;
3627
3628 if (Primitive::IsFloatingPointType(input_type)) {
3629 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
3630 } else {
3631 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3632 }
3633
3634 locations->SetOut(calling_convention.GetReturnLocation(result_type));
3635 }
3636}
3637
3638void InstructionCodeGeneratorMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
3639 LocationSummary* locations = conversion->GetLocations();
3640 Primitive::Type result_type = conversion->GetResultType();
3641 Primitive::Type input_type = conversion->GetInputType();
3642
3643 DCHECK_NE(input_type, result_type);
3644
3645 if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) {
3646 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
3647 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
3648
3649 switch (result_type) {
3650 case Primitive::kPrimChar:
3651 __ Andi(dst, src, 0xFFFF);
3652 break;
3653 case Primitive::kPrimByte:
3654 // long is never converted into types narrower than int directly,
3655 // so SEB and SEH can be used without ever causing unpredictable results
3656 // on 64-bit inputs
3657 DCHECK(input_type != Primitive::kPrimLong);
3658 __ Seb(dst, src);
3659 break;
3660 case Primitive::kPrimShort:
3661 // long is never converted into types narrower than int directly,
3662 // so SEB and SEH can be used without ever causing unpredictable results
3663 // on 64-bit inputs
3664 DCHECK(input_type != Primitive::kPrimLong);
3665 __ Seh(dst, src);
3666 break;
3667 case Primitive::kPrimInt:
3668 case Primitive::kPrimLong:
3669 // Sign-extend 32-bit int into bits 32 through 63 for
3670 // int-to-long and long-to-int conversions
3671 __ Sll(dst, src, 0);
3672 break;
3673
3674 default:
3675 LOG(FATAL) << "Unexpected type conversion from " << input_type
3676 << " to " << result_type;
3677 }
3678 } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) {
3679 if (input_type != Primitive::kPrimLong) {
3680 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
3681 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
3682 __ Mtc1(src, FTMP);
3683 if (result_type == Primitive::kPrimFloat) {
3684 __ Cvtsw(dst, FTMP);
3685 } else {
3686 __ Cvtdw(dst, FTMP);
3687 }
3688 } else {
3689 int32_t entry_offset = (result_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pL2f)
3690 : QUICK_ENTRY_POINT(pL2d);
3691 codegen_->InvokeRuntime(entry_offset,
3692 conversion,
3693 conversion->GetDexPc(),
3694 nullptr);
3695 }
3696 } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) {
3697 CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
3698 int32_t entry_offset;
3699 if (result_type != Primitive::kPrimLong) {
3700 entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2iz)
3701 : QUICK_ENTRY_POINT(pD2iz);
3702 } else {
3703 entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2l)
3704 : QUICK_ENTRY_POINT(pD2l);
3705 }
3706 codegen_->InvokeRuntime(entry_offset,
3707 conversion,
3708 conversion->GetDexPc(),
3709 nullptr);
3710 } else if (Primitive::IsFloatingPointType(result_type) &&
3711 Primitive::IsFloatingPointType(input_type)) {
3712 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
3713 FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
3714 if (result_type == Primitive::kPrimFloat) {
3715 __ Cvtsd(dst, src);
3716 } else {
3717 __ Cvtds(dst, src);
3718 }
3719 } else {
3720 LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
3721 << " to " << result_type;
3722 }
3723}
3724
3725void LocationsBuilderMIPS64::VisitUShr(HUShr* ushr) {
3726 HandleShift(ushr);
3727}
3728
3729void InstructionCodeGeneratorMIPS64::VisitUShr(HUShr* ushr) {
3730 HandleShift(ushr);
3731}
3732
3733void LocationsBuilderMIPS64::VisitXor(HXor* instruction) {
3734 HandleBinaryOp(instruction);
3735}
3736
3737void InstructionCodeGeneratorMIPS64::VisitXor(HXor* instruction) {
3738 HandleBinaryOp(instruction);
3739}
3740
3741void LocationsBuilderMIPS64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
3742 // Nothing to do, this should be removed during prepare for register allocator.
3743 LOG(FATAL) << "Unreachable";
3744}
3745
3746void InstructionCodeGeneratorMIPS64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
3747 // Nothing to do, this should be removed during prepare for register allocator.
3748 LOG(FATAL) << "Unreachable";
3749}
3750
3751void LocationsBuilderMIPS64::VisitEqual(HEqual* comp) {
3752 VisitCondition(comp);
3753}
3754
3755void InstructionCodeGeneratorMIPS64::VisitEqual(HEqual* comp) {
3756 VisitCondition(comp);
3757}
3758
3759void LocationsBuilderMIPS64::VisitNotEqual(HNotEqual* comp) {
3760 VisitCondition(comp);
3761}
3762
3763void InstructionCodeGeneratorMIPS64::VisitNotEqual(HNotEqual* comp) {
3764 VisitCondition(comp);
3765}
3766
3767void LocationsBuilderMIPS64::VisitLessThan(HLessThan* comp) {
3768 VisitCondition(comp);
3769}
3770
3771void InstructionCodeGeneratorMIPS64::VisitLessThan(HLessThan* comp) {
3772 VisitCondition(comp);
3773}
3774
3775void LocationsBuilderMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
3776 VisitCondition(comp);
3777}
3778
3779void InstructionCodeGeneratorMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
3780 VisitCondition(comp);
3781}
3782
3783void LocationsBuilderMIPS64::VisitGreaterThan(HGreaterThan* comp) {
3784 VisitCondition(comp);
3785}
3786
3787void InstructionCodeGeneratorMIPS64::VisitGreaterThan(HGreaterThan* comp) {
3788 VisitCondition(comp);
3789}
3790
3791void LocationsBuilderMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
3792 VisitCondition(comp);
3793}
3794
3795void InstructionCodeGeneratorMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
3796 VisitCondition(comp);
3797}
3798
Aart Bike9f37602015-10-09 11:15:55 -07003799void LocationsBuilderMIPS64::VisitBelow(HBelow* comp) {
3800 VisitCondition(comp);
3801}
3802
3803void InstructionCodeGeneratorMIPS64::VisitBelow(HBelow* comp) {
3804 VisitCondition(comp);
3805}
3806
3807void LocationsBuilderMIPS64::VisitBelowOrEqual(HBelowOrEqual* comp) {
3808 VisitCondition(comp);
3809}
3810
3811void InstructionCodeGeneratorMIPS64::VisitBelowOrEqual(HBelowOrEqual* comp) {
3812 VisitCondition(comp);
3813}
3814
3815void LocationsBuilderMIPS64::VisitAbove(HAbove* comp) {
3816 VisitCondition(comp);
3817}
3818
3819void InstructionCodeGeneratorMIPS64::VisitAbove(HAbove* comp) {
3820 VisitCondition(comp);
3821}
3822
3823void LocationsBuilderMIPS64::VisitAboveOrEqual(HAboveOrEqual* comp) {
3824 VisitCondition(comp);
3825}
3826
3827void InstructionCodeGeneratorMIPS64::VisitAboveOrEqual(HAboveOrEqual* comp) {
3828 VisitCondition(comp);
3829}
3830
Nicolas Geoffray2e7cd752015-07-10 11:38:52 +01003831void LocationsBuilderMIPS64::VisitFakeString(HFakeString* instruction) {
3832 DCHECK(codegen_->IsBaseline());
3833 LocationSummary* locations =
3834 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3835 locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
3836}
3837
3838void InstructionCodeGeneratorMIPS64::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
3839 DCHECK(codegen_->IsBaseline());
3840 // Will be generated at use site.
3841}
3842
Mark Mendellfe57faa2015-09-18 09:26:15 -04003843// Simple implementation of packed switch - generate cascaded compare/jumps.
3844void LocationsBuilderMIPS64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
3845 LocationSummary* locations =
3846 new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
3847 locations->SetInAt(0, Location::RequiresRegister());
3848}
3849
3850void InstructionCodeGeneratorMIPS64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
3851 int32_t lower_bound = switch_instr->GetStartValue();
3852 int32_t num_entries = switch_instr->GetNumEntries();
3853 LocationSummary* locations = switch_instr->GetLocations();
3854 GpuRegister value_reg = locations->InAt(0).AsRegister<GpuRegister>();
3855 HBasicBlock* default_block = switch_instr->GetDefaultBlock();
3856
3857 // Create a series of compare/jumps.
3858 const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
3859 for (int32_t i = 0; i < num_entries; i++) {
3860 int32_t case_value = lower_bound + i;
Vladimir Markoec7802a2015-10-01 20:57:57 +01003861 Label* succ = codegen_->GetLabelOf(successors[i]);
Mark Mendellfe57faa2015-09-18 09:26:15 -04003862 if (case_value == 0) {
3863 __ Beqzc(value_reg, succ);
3864 } else {
3865 __ LoadConst32(TMP, case_value);
3866 __ Beqc(value_reg, TMP, succ);
3867 }
3868 }
3869
3870 // And the default for any other value.
3871 if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) {
3872 __ B(codegen_->GetLabelOf(default_block));
3873 }
3874}
3875
Alexey Frunze4dda3372015-06-01 18:31:49 -07003876} // namespace mips64
3877} // namespace art