blob: c9f849318cc380c599d580620fccc6d18b5fcde3 [file] [log] [blame]
Alexey Frunze4dda3372015-06-01 18:31:49 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator_mips64.h"
18
19#include "entrypoints/quick/quick_entrypoints.h"
20#include "entrypoints/quick/quick_entrypoints_enum.h"
21#include "gc/accounting/card_table.h"
22#include "intrinsics.h"
Chris Larsen3039e382015-08-26 07:54:08 -070023#include "intrinsics_mips64.h"
Alexey Frunze4dda3372015-06-01 18:31:49 -070024#include "art_method.h"
Chris Larsen3039e382015-08-26 07:54:08 -070025#include "code_generator_utils.h"
Alexey Frunze4dda3372015-06-01 18:31:49 -070026#include "mirror/array-inl.h"
27#include "mirror/class-inl.h"
28#include "offsets.h"
29#include "thread.h"
30#include "utils/mips64/assembler_mips64.h"
31#include "utils/assembler.h"
32#include "utils/stack_checks.h"
33
34namespace art {
35namespace mips64 {
36
37static constexpr int kCurrentMethodStackOffset = 0;
38static constexpr GpuRegister kMethodRegisterArgument = A0;
39
40// We need extra temporary/scratch registers (in addition to AT) in some cases.
Alexey Frunze4dda3372015-06-01 18:31:49 -070041static constexpr FpuRegister FTMP = F8;
42
43// ART Thread Register.
44static constexpr GpuRegister TR = S1;
45
46Location Mips64ReturnLocation(Primitive::Type return_type) {
47 switch (return_type) {
48 case Primitive::kPrimBoolean:
49 case Primitive::kPrimByte:
50 case Primitive::kPrimChar:
51 case Primitive::kPrimShort:
52 case Primitive::kPrimInt:
53 case Primitive::kPrimNot:
54 case Primitive::kPrimLong:
55 return Location::RegisterLocation(V0);
56
57 case Primitive::kPrimFloat:
58 case Primitive::kPrimDouble:
59 return Location::FpuRegisterLocation(F0);
60
61 case Primitive::kPrimVoid:
62 return Location();
63 }
64 UNREACHABLE();
65}
66
67Location InvokeDexCallingConventionVisitorMIPS64::GetReturnLocation(Primitive::Type type) const {
68 return Mips64ReturnLocation(type);
69}
70
71Location InvokeDexCallingConventionVisitorMIPS64::GetMethodLocation() const {
72 return Location::RegisterLocation(kMethodRegisterArgument);
73}
74
75Location InvokeDexCallingConventionVisitorMIPS64::GetNextLocation(Primitive::Type type) {
76 Location next_location;
77 if (type == Primitive::kPrimVoid) {
78 LOG(FATAL) << "Unexpected parameter type " << type;
79 }
80
81 if (Primitive::IsFloatingPointType(type) &&
82 (float_index_ < calling_convention.GetNumberOfFpuRegisters())) {
83 next_location = Location::FpuRegisterLocation(
84 calling_convention.GetFpuRegisterAt(float_index_++));
85 gp_index_++;
86 } else if (!Primitive::IsFloatingPointType(type) &&
87 (gp_index_ < calling_convention.GetNumberOfRegisters())) {
88 next_location = Location::RegisterLocation(calling_convention.GetRegisterAt(gp_index_++));
89 float_index_++;
90 } else {
91 size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
92 next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
93 : Location::StackSlot(stack_offset);
94 }
95
96 // Space on the stack is reserved for all arguments.
97 stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
98
99 // TODO: review
100
101 // TODO: shouldn't we use a whole machine word per argument on the stack?
102 // Implicit 4-byte method pointer (and such) will cause misalignment.
103
104 return next_location;
105}
106
107Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type type) {
108 return Mips64ReturnLocation(type);
109}
110
111#define __ down_cast<CodeGeneratorMIPS64*>(codegen)->GetAssembler()->
112#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, x).Int32Value()
113
114class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
115 public:
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100116 explicit BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction) : instruction_(instruction) {}
Alexey Frunze4dda3372015-06-01 18:31:49 -0700117
118 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100119 LocationSummary* locations = instruction_->GetLocations();
Alexey Frunze4dda3372015-06-01 18:31:49 -0700120 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
121 __ Bind(GetEntryLabel());
David Brazdil77a48ae2015-09-15 12:34:04 +0000122 if (instruction_->CanThrowIntoCatchBlock()) {
123 // Live registers will be restored in the catch block if caught.
124 SaveLiveRegisters(codegen, instruction_->GetLocations());
125 }
Alexey Frunze4dda3372015-06-01 18:31:49 -0700126 // We're moving two locations to locations that could overlap, so we need a parallel
127 // move resolver.
128 InvokeRuntimeCallingConvention calling_convention;
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100129 codegen->EmitParallelMoves(locations->InAt(0),
Alexey Frunze4dda3372015-06-01 18:31:49 -0700130 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
131 Primitive::kPrimInt,
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100132 locations->InAt(1),
Alexey Frunze4dda3372015-06-01 18:31:49 -0700133 Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
134 Primitive::kPrimInt);
135 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
136 instruction_,
137 instruction_->GetDexPc(),
138 this);
139 CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
140 }
141
Alexandre Rames8158f282015-08-07 10:26:17 +0100142 bool IsFatal() const OVERRIDE { return true; }
143
Roland Levillain46648892015-06-19 16:07:18 +0100144 const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS64"; }
145
Alexey Frunze4dda3372015-06-01 18:31:49 -0700146 private:
147 HBoundsCheck* const instruction_;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700148
149 DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS64);
150};
151
152class DivZeroCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
153 public:
154 explicit DivZeroCheckSlowPathMIPS64(HDivZeroCheck* instruction) : instruction_(instruction) {}
155
156 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
157 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
158 __ Bind(GetEntryLabel());
David Brazdil77a48ae2015-09-15 12:34:04 +0000159 if (instruction_->CanThrowIntoCatchBlock()) {
160 // Live registers will be restored in the catch block if caught.
161 SaveLiveRegisters(codegen, instruction_->GetLocations());
162 }
Alexey Frunze4dda3372015-06-01 18:31:49 -0700163 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
164 instruction_,
165 instruction_->GetDexPc(),
166 this);
167 CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
168 }
169
Alexandre Rames8158f282015-08-07 10:26:17 +0100170 bool IsFatal() const OVERRIDE { return true; }
171
Roland Levillain46648892015-06-19 16:07:18 +0100172 const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS64"; }
173
Alexey Frunze4dda3372015-06-01 18:31:49 -0700174 private:
175 HDivZeroCheck* const instruction_;
176 DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS64);
177};
178
179class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
180 public:
181 LoadClassSlowPathMIPS64(HLoadClass* cls,
182 HInstruction* at,
183 uint32_t dex_pc,
184 bool do_clinit)
185 : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
186 DCHECK(at->IsLoadClass() || at->IsClinitCheck());
187 }
188
189 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
190 LocationSummary* locations = at_->GetLocations();
191 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
192
193 __ Bind(GetEntryLabel());
194 SaveLiveRegisters(codegen, locations);
195
196 InvokeRuntimeCallingConvention calling_convention;
197 __ LoadConst32(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
198 int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
199 : QUICK_ENTRY_POINT(pInitializeType);
200 mips64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
201 if (do_clinit_) {
202 CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
203 } else {
204 CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
205 }
206
207 // Move the class to the desired location.
208 Location out = locations->Out();
209 if (out.IsValid()) {
210 DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
211 Primitive::Type type = at_->GetType();
212 mips64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
213 }
214
215 RestoreLiveRegisters(codegen, locations);
216 __ B(GetExitLabel());
217 }
218
Roland Levillain46648892015-06-19 16:07:18 +0100219 const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathMIPS64"; }
220
Alexey Frunze4dda3372015-06-01 18:31:49 -0700221 private:
222 // The class this slow path will load.
223 HLoadClass* const cls_;
224
225 // The instruction where this slow path is happening.
226 // (Might be the load class or an initialization check).
227 HInstruction* const at_;
228
229 // The dex PC of `at_`.
230 const uint32_t dex_pc_;
231
232 // Whether to initialize the class.
233 const bool do_clinit_;
234
235 DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS64);
236};
237
238class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
239 public:
240 explicit LoadStringSlowPathMIPS64(HLoadString* instruction) : instruction_(instruction) {}
241
242 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
243 LocationSummary* locations = instruction_->GetLocations();
244 DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
245 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
246
247 __ Bind(GetEntryLabel());
248 SaveLiveRegisters(codegen, locations);
249
250 InvokeRuntimeCallingConvention calling_convention;
251 __ LoadConst32(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
252 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
253 instruction_,
254 instruction_->GetDexPc(),
255 this);
256 CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
257 Primitive::Type type = instruction_->GetType();
258 mips64_codegen->MoveLocation(locations->Out(),
259 calling_convention.GetReturnLocation(type),
260 type);
261
262 RestoreLiveRegisters(codegen, locations);
263 __ B(GetExitLabel());
264 }
265
Roland Levillain46648892015-06-19 16:07:18 +0100266 const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS64"; }
267
Alexey Frunze4dda3372015-06-01 18:31:49 -0700268 private:
269 HLoadString* const instruction_;
270
271 DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS64);
272};
273
274class NullCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
275 public:
276 explicit NullCheckSlowPathMIPS64(HNullCheck* instr) : instruction_(instr) {}
277
278 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
279 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
280 __ Bind(GetEntryLabel());
David Brazdil77a48ae2015-09-15 12:34:04 +0000281 if (instruction_->CanThrowIntoCatchBlock()) {
282 // Live registers will be restored in the catch block if caught.
283 SaveLiveRegisters(codegen, instruction_->GetLocations());
284 }
Alexey Frunze4dda3372015-06-01 18:31:49 -0700285 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
286 instruction_,
287 instruction_->GetDexPc(),
288 this);
289 CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
290 }
291
Alexandre Rames8158f282015-08-07 10:26:17 +0100292 bool IsFatal() const OVERRIDE { return true; }
293
Roland Levillain46648892015-06-19 16:07:18 +0100294 const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS64"; }
295
Alexey Frunze4dda3372015-06-01 18:31:49 -0700296 private:
297 HNullCheck* const instruction_;
298
299 DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS64);
300};
301
302class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
303 public:
Roland Levillain3887c462015-08-12 18:15:42 +0100304 SuspendCheckSlowPathMIPS64(HSuspendCheck* instruction, HBasicBlock* successor)
Alexey Frunze4dda3372015-06-01 18:31:49 -0700305 : instruction_(instruction), successor_(successor) {}
306
307 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
308 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
309 __ Bind(GetEntryLabel());
310 SaveLiveRegisters(codegen, instruction_->GetLocations());
311 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
312 instruction_,
313 instruction_->GetDexPc(),
314 this);
315 CheckEntrypointTypes<kQuickTestSuspend, void, void>();
316 RestoreLiveRegisters(codegen, instruction_->GetLocations());
317 if (successor_ == nullptr) {
318 __ B(GetReturnLabel());
319 } else {
320 __ B(mips64_codegen->GetLabelOf(successor_));
321 }
322 }
323
324 Label* GetReturnLabel() {
325 DCHECK(successor_ == nullptr);
326 return &return_label_;
327 }
328
Roland Levillain46648892015-06-19 16:07:18 +0100329 const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS64"; }
330
Alexey Frunze4dda3372015-06-01 18:31:49 -0700331 private:
332 HSuspendCheck* const instruction_;
333 // If not null, the block to branch to after the suspend check.
334 HBasicBlock* const successor_;
335
336 // If `successor_` is null, the label to branch to after the suspend check.
337 Label return_label_;
338
339 DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathMIPS64);
340};
341
342class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
343 public:
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100344 explicit TypeCheckSlowPathMIPS64(HInstruction* instruction) : instruction_(instruction) {}
Alexey Frunze4dda3372015-06-01 18:31:49 -0700345
346 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
347 LocationSummary* locations = instruction_->GetLocations();
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100348 Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
349 : locations->Out();
350 uint32_t dex_pc = instruction_->GetDexPc();
Alexey Frunze4dda3372015-06-01 18:31:49 -0700351 DCHECK(instruction_->IsCheckCast()
352 || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
353 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
354
355 __ Bind(GetEntryLabel());
356 SaveLiveRegisters(codegen, locations);
357
358 // We're moving two locations to locations that could overlap, so we need a parallel
359 // move resolver.
360 InvokeRuntimeCallingConvention calling_convention;
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100361 codegen->EmitParallelMoves(locations->InAt(1),
Alexey Frunze4dda3372015-06-01 18:31:49 -0700362 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
363 Primitive::kPrimNot,
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100364 object_class,
Alexey Frunze4dda3372015-06-01 18:31:49 -0700365 Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
366 Primitive::kPrimNot);
367
368 if (instruction_->IsInstanceOf()) {
369 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
370 instruction_,
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100371 dex_pc,
Alexey Frunze4dda3372015-06-01 18:31:49 -0700372 this);
373 Primitive::Type ret_type = instruction_->GetType();
374 Location ret_loc = calling_convention.GetReturnLocation(ret_type);
375 mips64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
376 CheckEntrypointTypes<kQuickInstanceofNonTrivial,
377 uint32_t,
378 const mirror::Class*,
379 const mirror::Class*>();
380 } else {
381 DCHECK(instruction_->IsCheckCast());
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100382 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc, this);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700383 CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
384 }
385
386 RestoreLiveRegisters(codegen, locations);
387 __ B(GetExitLabel());
388 }
389
Roland Levillain46648892015-06-19 16:07:18 +0100390 const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS64"; }
391
Alexey Frunze4dda3372015-06-01 18:31:49 -0700392 private:
393 HInstruction* const instruction_;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700394
395 DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS64);
396};
397
398class DeoptimizationSlowPathMIPS64 : public SlowPathCodeMIPS64 {
399 public:
400 explicit DeoptimizationSlowPathMIPS64(HInstruction* instruction)
401 : instruction_(instruction) {}
402
403 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
404 __ Bind(GetEntryLabel());
405 SaveLiveRegisters(codegen, instruction_->GetLocations());
406 DCHECK(instruction_->IsDeoptimize());
407 HDeoptimize* deoptimize = instruction_->AsDeoptimize();
408 uint32_t dex_pc = deoptimize->GetDexPc();
409 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
410 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
411 }
412
Roland Levillain46648892015-06-19 16:07:18 +0100413 const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS64"; }
414
Alexey Frunze4dda3372015-06-01 18:31:49 -0700415 private:
416 HInstruction* const instruction_;
417 DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS64);
418};
419
420CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
421 const Mips64InstructionSetFeatures& isa_features,
Serban Constantinescuecc43662015-08-13 13:33:12 +0100422 const CompilerOptions& compiler_options,
423 OptimizingCompilerStats* stats)
Alexey Frunze4dda3372015-06-01 18:31:49 -0700424 : CodeGenerator(graph,
425 kNumberOfGpuRegisters,
426 kNumberOfFpuRegisters,
427 0, // kNumberOfRegisterPairs
428 ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
429 arraysize(kCoreCalleeSaves)),
430 ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
431 arraysize(kFpuCalleeSaves)),
Serban Constantinescuecc43662015-08-13 13:33:12 +0100432 compiler_options,
433 stats),
Vladimir Marko225b6462015-09-28 12:17:40 +0100434 block_labels_(nullptr),
Alexey Frunze4dda3372015-06-01 18:31:49 -0700435 location_builder_(graph, this),
436 instruction_visitor_(graph, this),
437 move_resolver_(graph->GetArena(), this),
438 isa_features_(isa_features) {
439 // Save RA (containing the return address) to mimic Quick.
440 AddAllocatedRegister(Location::RegisterLocation(RA));
441}
442
443#undef __
444#define __ down_cast<Mips64Assembler*>(GetAssembler())->
445#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, x).Int32Value()
446
447void CodeGeneratorMIPS64::Finalize(CodeAllocator* allocator) {
448 CodeGenerator::Finalize(allocator);
449}
450
451Mips64Assembler* ParallelMoveResolverMIPS64::GetAssembler() const {
452 return codegen_->GetAssembler();
453}
454
455void ParallelMoveResolverMIPS64::EmitMove(size_t index) {
Vladimir Marko225b6462015-09-28 12:17:40 +0100456 DCHECK_LT(index, moves_.size());
457 MoveOperands* move = moves_[index];
Alexey Frunze4dda3372015-06-01 18:31:49 -0700458 codegen_->MoveLocation(move->GetDestination(), move->GetSource(), move->GetType());
459}
460
461void ParallelMoveResolverMIPS64::EmitSwap(size_t index) {
Vladimir Marko225b6462015-09-28 12:17:40 +0100462 DCHECK_LT(index, moves_.size());
463 MoveOperands* move = moves_[index];
Alexey Frunze4dda3372015-06-01 18:31:49 -0700464 codegen_->SwapLocations(move->GetDestination(), move->GetSource(), move->GetType());
465}
466
467void ParallelMoveResolverMIPS64::RestoreScratch(int reg) {
468 // Pop reg
469 __ Ld(GpuRegister(reg), SP, 0);
470 __ DecreaseFrameSize(kMips64WordSize);
471}
472
473void ParallelMoveResolverMIPS64::SpillScratch(int reg) {
474 // Push reg
475 __ IncreaseFrameSize(kMips64WordSize);
476 __ Sd(GpuRegister(reg), SP, 0);
477}
478
479void ParallelMoveResolverMIPS64::Exchange(int index1, int index2, bool double_slot) {
480 LoadOperandType load_type = double_slot ? kLoadDoubleword : kLoadWord;
481 StoreOperandType store_type = double_slot ? kStoreDoubleword : kStoreWord;
482 // Allocate a scratch register other than TMP, if available.
483 // Else, spill V0 (arbitrary choice) and use it as a scratch register (it will be
484 // automatically unspilled when the scratch scope object is destroyed).
485 ScratchRegisterScope ensure_scratch(this, TMP, V0, codegen_->GetNumberOfCoreRegisters());
486 // If V0 spills onto the stack, SP-relative offsets need to be adjusted.
487 int stack_offset = ensure_scratch.IsSpilled() ? kMips64WordSize : 0;
488 __ LoadFromOffset(load_type,
489 GpuRegister(ensure_scratch.GetRegister()),
490 SP,
491 index1 + stack_offset);
492 __ LoadFromOffset(load_type,
493 TMP,
494 SP,
495 index2 + stack_offset);
496 __ StoreToOffset(store_type,
497 GpuRegister(ensure_scratch.GetRegister()),
498 SP,
499 index2 + stack_offset);
500 __ StoreToOffset(store_type, TMP, SP, index1 + stack_offset);
501}
502
503static dwarf::Reg DWARFReg(GpuRegister reg) {
504 return dwarf::Reg::Mips64Core(static_cast<int>(reg));
505}
506
507// TODO: mapping of floating-point registers to DWARF
508
509void CodeGeneratorMIPS64::GenerateFrameEntry() {
510 __ Bind(&frame_entry_label_);
511
512 bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips64) || !IsLeafMethod();
513
514 if (do_overflow_check) {
515 __ LoadFromOffset(kLoadWord,
516 ZERO,
517 SP,
518 -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips64)));
519 RecordPcInfo(nullptr, 0);
520 }
521
522 // TODO: anything related to T9/GP/GOT/PIC/.so's?
523
524 if (HasEmptyFrame()) {
525 return;
526 }
527
528 // Make sure the frame size isn't unreasonably large. Per the various APIs
529 // it looks like it should always be less than 2GB in size, which allows
530 // us using 32-bit signed offsets from the stack pointer.
531 if (GetFrameSize() > 0x7FFFFFFF)
532 LOG(FATAL) << "Stack frame larger than 2GB";
533
534 // Spill callee-saved registers.
535 // Note that their cumulative size is small and they can be indexed using
536 // 16-bit offsets.
537
538 // TODO: increment/decrement SP in one step instead of two or remove this comment.
539
540 uint32_t ofs = FrameEntrySpillSize();
541 __ IncreaseFrameSize(ofs);
542
543 for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
544 GpuRegister reg = kCoreCalleeSaves[i];
545 if (allocated_registers_.ContainsCoreRegister(reg)) {
546 ofs -= kMips64WordSize;
547 __ Sd(reg, SP, ofs);
548 __ cfi().RelOffset(DWARFReg(reg), ofs);
549 }
550 }
551
552 for (int i = arraysize(kFpuCalleeSaves) - 1; i >= 0; --i) {
553 FpuRegister reg = kFpuCalleeSaves[i];
554 if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
555 ofs -= kMips64WordSize;
556 __ Sdc1(reg, SP, ofs);
557 // TODO: __ cfi().RelOffset(DWARFReg(reg), ofs);
558 }
559 }
560
561 // Allocate the rest of the frame and store the current method pointer
562 // at its end.
563
564 __ IncreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
565
566 static_assert(IsInt<16>(kCurrentMethodStackOffset),
567 "kCurrentMethodStackOffset must fit into int16_t");
568 __ Sd(kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
569}
570
571void CodeGeneratorMIPS64::GenerateFrameExit() {
572 __ cfi().RememberState();
573
574 // TODO: anything related to T9/GP/GOT/PIC/.so's?
575
576 if (!HasEmptyFrame()) {
577 // Deallocate the rest of the frame.
578
579 __ DecreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
580
581 // Restore callee-saved registers.
582 // Note that their cumulative size is small and they can be indexed using
583 // 16-bit offsets.
584
585 // TODO: increment/decrement SP in one step instead of two or remove this comment.
586
587 uint32_t ofs = 0;
588
589 for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
590 FpuRegister reg = kFpuCalleeSaves[i];
591 if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
592 __ Ldc1(reg, SP, ofs);
593 ofs += kMips64WordSize;
594 // TODO: __ cfi().Restore(DWARFReg(reg));
595 }
596 }
597
598 for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
599 GpuRegister reg = kCoreCalleeSaves[i];
600 if (allocated_registers_.ContainsCoreRegister(reg)) {
601 __ Ld(reg, SP, ofs);
602 ofs += kMips64WordSize;
603 __ cfi().Restore(DWARFReg(reg));
604 }
605 }
606
607 DCHECK_EQ(ofs, FrameEntrySpillSize());
608 __ DecreaseFrameSize(ofs);
609 }
610
611 __ Jr(RA);
612
613 __ cfi().RestoreState();
614 __ cfi().DefCFAOffset(GetFrameSize());
615}
616
617void CodeGeneratorMIPS64::Bind(HBasicBlock* block) {
618 __ Bind(GetLabelOf(block));
619}
620
621void CodeGeneratorMIPS64::MoveLocation(Location destination,
622 Location source,
623 Primitive::Type type) {
624 if (source.Equals(destination)) {
625 return;
626 }
627
628 // A valid move can always be inferred from the destination and source
629 // locations. When moving from and to a register, the argument type can be
630 // used to generate 32bit instead of 64bit moves.
631 bool unspecified_type = (type == Primitive::kPrimVoid);
632 DCHECK_EQ(unspecified_type, false);
633
634 if (destination.IsRegister() || destination.IsFpuRegister()) {
635 if (unspecified_type) {
636 HConstant* src_cst = source.IsConstant() ? source.GetConstant() : nullptr;
637 if (source.IsStackSlot() ||
638 (src_cst != nullptr && (src_cst->IsIntConstant()
639 || src_cst->IsFloatConstant()
640 || src_cst->IsNullConstant()))) {
641 // For stack slots and 32bit constants, a 64bit type is appropriate.
642 type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
643 } else {
644 // If the source is a double stack slot or a 64bit constant, a 64bit
645 // type is appropriate. Else the source is a register, and since the
646 // type has not been specified, we chose a 64bit type to force a 64bit
647 // move.
648 type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
649 }
650 }
651 DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(type)) ||
652 (destination.IsRegister() && !Primitive::IsFloatingPointType(type)));
653 if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
654 // Move to GPR/FPR from stack
655 LoadOperandType load_type = source.IsStackSlot() ? kLoadWord : kLoadDoubleword;
656 if (Primitive::IsFloatingPointType(type)) {
657 __ LoadFpuFromOffset(load_type,
658 destination.AsFpuRegister<FpuRegister>(),
659 SP,
660 source.GetStackIndex());
661 } else {
662 // TODO: use load_type = kLoadUnsignedWord when type == Primitive::kPrimNot.
663 __ LoadFromOffset(load_type,
664 destination.AsRegister<GpuRegister>(),
665 SP,
666 source.GetStackIndex());
667 }
668 } else if (source.IsConstant()) {
669 // Move to GPR/FPR from constant
670 GpuRegister gpr = AT;
671 if (!Primitive::IsFloatingPointType(type)) {
672 gpr = destination.AsRegister<GpuRegister>();
673 }
674 if (type == Primitive::kPrimInt || type == Primitive::kPrimFloat) {
675 __ LoadConst32(gpr, GetInt32ValueOf(source.GetConstant()->AsConstant()));
676 } else {
677 __ LoadConst64(gpr, GetInt64ValueOf(source.GetConstant()->AsConstant()));
678 }
679 if (type == Primitive::kPrimFloat) {
680 __ Mtc1(gpr, destination.AsFpuRegister<FpuRegister>());
681 } else if (type == Primitive::kPrimDouble) {
682 __ Dmtc1(gpr, destination.AsFpuRegister<FpuRegister>());
683 }
684 } else {
685 if (destination.IsRegister()) {
686 // Move to GPR from GPR
687 __ Move(destination.AsRegister<GpuRegister>(), source.AsRegister<GpuRegister>());
688 } else {
689 // Move to FPR from FPR
690 if (type == Primitive::kPrimFloat) {
691 __ MovS(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
692 } else {
693 DCHECK_EQ(type, Primitive::kPrimDouble);
694 __ MovD(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
695 }
696 }
697 }
698 } else { // The destination is not a register. It must be a stack slot.
699 DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
700 if (source.IsRegister() || source.IsFpuRegister()) {
701 if (unspecified_type) {
702 if (source.IsRegister()) {
703 type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
704 } else {
705 type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
706 }
707 }
708 DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(type)) &&
709 (source.IsFpuRegister() == Primitive::IsFloatingPointType(type)));
710 // Move to stack from GPR/FPR
711 StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
712 if (source.IsRegister()) {
713 __ StoreToOffset(store_type,
714 source.AsRegister<GpuRegister>(),
715 SP,
716 destination.GetStackIndex());
717 } else {
718 __ StoreFpuToOffset(store_type,
719 source.AsFpuRegister<FpuRegister>(),
720 SP,
721 destination.GetStackIndex());
722 }
723 } else if (source.IsConstant()) {
724 // Move to stack from constant
725 HConstant* src_cst = source.GetConstant();
726 StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
727 if (destination.IsStackSlot()) {
728 __ LoadConst32(TMP, GetInt32ValueOf(src_cst->AsConstant()));
729 } else {
730 __ LoadConst64(TMP, GetInt64ValueOf(src_cst->AsConstant()));
731 }
732 __ StoreToOffset(store_type, TMP, SP, destination.GetStackIndex());
733 } else {
734 DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
735 DCHECK_EQ(source.IsDoubleStackSlot(), destination.IsDoubleStackSlot());
736 // Move to stack from stack
737 if (destination.IsStackSlot()) {
738 __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
739 __ StoreToOffset(kStoreWord, TMP, SP, destination.GetStackIndex());
740 } else {
741 __ LoadFromOffset(kLoadDoubleword, TMP, SP, source.GetStackIndex());
742 __ StoreToOffset(kStoreDoubleword, TMP, SP, destination.GetStackIndex());
743 }
744 }
745 }
746}
747
748void CodeGeneratorMIPS64::SwapLocations(Location loc1,
749 Location loc2,
750 Primitive::Type type ATTRIBUTE_UNUSED) {
751 DCHECK(!loc1.IsConstant());
752 DCHECK(!loc2.IsConstant());
753
754 if (loc1.Equals(loc2)) {
755 return;
756 }
757
758 bool is_slot1 = loc1.IsStackSlot() || loc1.IsDoubleStackSlot();
759 bool is_slot2 = loc2.IsStackSlot() || loc2.IsDoubleStackSlot();
760 bool is_fp_reg1 = loc1.IsFpuRegister();
761 bool is_fp_reg2 = loc2.IsFpuRegister();
762
763 if (loc2.IsRegister() && loc1.IsRegister()) {
764 // Swap 2 GPRs
765 GpuRegister r1 = loc1.AsRegister<GpuRegister>();
766 GpuRegister r2 = loc2.AsRegister<GpuRegister>();
767 __ Move(TMP, r2);
768 __ Move(r2, r1);
769 __ Move(r1, TMP);
770 } else if (is_fp_reg2 && is_fp_reg1) {
771 // Swap 2 FPRs
772 FpuRegister r1 = loc1.AsFpuRegister<FpuRegister>();
773 FpuRegister r2 = loc2.AsFpuRegister<FpuRegister>();
774 // TODO: Can MOV.S/MOV.D be used here to save one instruction?
775 // Need to distinguish float from double, right?
776 __ Dmfc1(TMP, r2);
777 __ Dmfc1(AT, r1);
778 __ Dmtc1(TMP, r1);
779 __ Dmtc1(AT, r2);
780 } else if (is_slot1 != is_slot2) {
781 // Swap GPR/FPR and stack slot
782 Location reg_loc = is_slot1 ? loc2 : loc1;
783 Location mem_loc = is_slot1 ? loc1 : loc2;
784 LoadOperandType load_type = mem_loc.IsStackSlot() ? kLoadWord : kLoadDoubleword;
785 StoreOperandType store_type = mem_loc.IsStackSlot() ? kStoreWord : kStoreDoubleword;
786 // TODO: use load_type = kLoadUnsignedWord when type == Primitive::kPrimNot.
787 __ LoadFromOffset(load_type, TMP, SP, mem_loc.GetStackIndex());
788 if (reg_loc.IsFpuRegister()) {
789 __ StoreFpuToOffset(store_type,
790 reg_loc.AsFpuRegister<FpuRegister>(),
791 SP,
792 mem_loc.GetStackIndex());
793 // TODO: review this MTC1/DMTC1 move
794 if (mem_loc.IsStackSlot()) {
795 __ Mtc1(TMP, reg_loc.AsFpuRegister<FpuRegister>());
796 } else {
797 DCHECK(mem_loc.IsDoubleStackSlot());
798 __ Dmtc1(TMP, reg_loc.AsFpuRegister<FpuRegister>());
799 }
800 } else {
801 __ StoreToOffset(store_type, reg_loc.AsRegister<GpuRegister>(), SP, mem_loc.GetStackIndex());
802 __ Move(reg_loc.AsRegister<GpuRegister>(), TMP);
803 }
804 } else if (is_slot1 && is_slot2) {
805 move_resolver_.Exchange(loc1.GetStackIndex(),
806 loc2.GetStackIndex(),
807 loc1.IsDoubleStackSlot());
808 } else {
809 LOG(FATAL) << "Unimplemented swap between locations " << loc1 << " and " << loc2;
810 }
811}
812
813void CodeGeneratorMIPS64::Move(HInstruction* instruction,
814 Location location,
815 HInstruction* move_for) {
816 LocationSummary* locations = instruction->GetLocations();
817 Primitive::Type type = instruction->GetType();
818 DCHECK_NE(type, Primitive::kPrimVoid);
819
820 if (instruction->IsCurrentMethod()) {
821 MoveLocation(location, Location::DoubleStackSlot(kCurrentMethodStackOffset), type);
822 } else if (locations != nullptr && locations->Out().Equals(location)) {
823 return;
824 } else if (instruction->IsIntConstant()
825 || instruction->IsLongConstant()
826 || instruction->IsNullConstant()) {
827 if (location.IsRegister()) {
828 // Move to GPR from constant
829 GpuRegister dst = location.AsRegister<GpuRegister>();
830 if (instruction->IsNullConstant() || instruction->IsIntConstant()) {
831 __ LoadConst32(dst, GetInt32ValueOf(instruction->AsConstant()));
832 } else {
833 __ LoadConst64(dst, instruction->AsLongConstant()->GetValue());
834 }
835 } else {
836 DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot());
837 // Move to stack from constant
838 if (location.IsStackSlot()) {
839 __ LoadConst32(TMP, GetInt32ValueOf(instruction->AsConstant()));
840 __ StoreToOffset(kStoreWord, TMP, SP, location.GetStackIndex());
841 } else {
842 __ LoadConst64(TMP, instruction->AsLongConstant()->GetValue());
843 __ StoreToOffset(kStoreDoubleword, TMP, SP, location.GetStackIndex());
844 }
845 }
846 } else if (instruction->IsTemporary()) {
847 Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
848 MoveLocation(location, temp_location, type);
849 } else if (instruction->IsLoadLocal()) {
850 uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
851 if (Primitive::Is64BitType(type)) {
852 MoveLocation(location, Location::DoubleStackSlot(stack_slot), type);
853 } else {
854 MoveLocation(location, Location::StackSlot(stack_slot), type);
855 }
856 } else {
857 DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
858 MoveLocation(location, locations->Out(), type);
859 }
860}
861
Calin Juravle175dc732015-08-25 15:42:32 +0100862void CodeGeneratorMIPS64::MoveConstant(Location location, int32_t value) {
863 DCHECK(location.IsRegister());
864 __ LoadConst32(location.AsRegister<GpuRegister>(), value);
865}
866
Alexey Frunze4dda3372015-06-01 18:31:49 -0700867Location CodeGeneratorMIPS64::GetStackLocation(HLoadLocal* load) const {
868 Primitive::Type type = load->GetType();
869
870 switch (type) {
871 case Primitive::kPrimNot:
872 case Primitive::kPrimInt:
873 case Primitive::kPrimFloat:
874 return Location::StackSlot(GetStackSlot(load->GetLocal()));
875
876 case Primitive::kPrimLong:
877 case Primitive::kPrimDouble:
878 return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
879
880 case Primitive::kPrimBoolean:
881 case Primitive::kPrimByte:
882 case Primitive::kPrimChar:
883 case Primitive::kPrimShort:
884 case Primitive::kPrimVoid:
885 LOG(FATAL) << "Unexpected type " << type;
886 }
887
888 LOG(FATAL) << "Unreachable";
889 return Location::NoLocation();
890}
891
892void CodeGeneratorMIPS64::MarkGCCard(GpuRegister object, GpuRegister value) {
893 Label done;
894 GpuRegister card = AT;
895 GpuRegister temp = TMP;
896 __ Beqzc(value, &done);
897 __ LoadFromOffset(kLoadDoubleword,
898 card,
899 TR,
900 Thread::CardTableOffset<kMips64WordSize>().Int32Value());
901 __ Dsrl(temp, object, gc::accounting::CardTable::kCardShift);
902 __ Daddu(temp, card, temp);
903 __ Sb(card, temp, 0);
904 __ Bind(&done);
905}
906
907void CodeGeneratorMIPS64::SetupBlockedRegisters(bool is_baseline ATTRIBUTE_UNUSED) const {
908 // ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
909 blocked_core_registers_[ZERO] = true;
910 blocked_core_registers_[K0] = true;
911 blocked_core_registers_[K1] = true;
912 blocked_core_registers_[GP] = true;
913 blocked_core_registers_[SP] = true;
914 blocked_core_registers_[RA] = true;
915
916 // AT and TMP(T8) are used as temporary/scratch registers
917 // (similar to how AT is used by MIPS assemblers).
918 blocked_core_registers_[AT] = true;
919 blocked_core_registers_[TMP] = true;
920 blocked_fpu_registers_[FTMP] = true;
921
922 // Reserve suspend and thread registers.
923 blocked_core_registers_[S0] = true;
924 blocked_core_registers_[TR] = true;
925
926 // Reserve T9 for function calls
927 blocked_core_registers_[T9] = true;
928
929 // TODO: review; anything else?
930
931 // TODO: make these two for's conditional on is_baseline once
932 // all the issues with register saving/restoring are sorted out.
933 for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
934 blocked_core_registers_[kCoreCalleeSaves[i]] = true;
935 }
936
937 for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
938 blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
939 }
940}
941
942Location CodeGeneratorMIPS64::AllocateFreeRegister(Primitive::Type type) const {
943 if (type == Primitive::kPrimVoid) {
944 LOG(FATAL) << "Unreachable type " << type;
945 }
946
947 if (Primitive::IsFloatingPointType(type)) {
948 size_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfFpuRegisters);
949 return Location::FpuRegisterLocation(reg);
950 } else {
951 size_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfGpuRegisters);
952 return Location::RegisterLocation(reg);
953 }
954}
955
956size_t CodeGeneratorMIPS64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
957 __ StoreToOffset(kStoreDoubleword, GpuRegister(reg_id), SP, stack_index);
958 return kMips64WordSize;
959}
960
961size_t CodeGeneratorMIPS64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
962 __ LoadFromOffset(kLoadDoubleword, GpuRegister(reg_id), SP, stack_index);
963 return kMips64WordSize;
964}
965
966size_t CodeGeneratorMIPS64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
967 __ StoreFpuToOffset(kStoreDoubleword, FpuRegister(reg_id), SP, stack_index);
968 return kMips64WordSize;
969}
970
971size_t CodeGeneratorMIPS64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
972 __ LoadFpuFromOffset(kLoadDoubleword, FpuRegister(reg_id), SP, stack_index);
973 return kMips64WordSize;
974}
975
976void CodeGeneratorMIPS64::DumpCoreRegister(std::ostream& stream, int reg) const {
David Brazdil9f0dece2015-09-21 18:20:26 +0100977 stream << GpuRegister(reg);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700978}
979
980void CodeGeneratorMIPS64::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
David Brazdil9f0dece2015-09-21 18:20:26 +0100981 stream << FpuRegister(reg);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700982}
983
Calin Juravle175dc732015-08-25 15:42:32 +0100984void CodeGeneratorMIPS64::InvokeRuntime(QuickEntrypointEnum entrypoint,
985 HInstruction* instruction,
986 uint32_t dex_pc,
987 SlowPathCode* slow_path) {
988 InvokeRuntime(GetThreadOffset<kMips64WordSize>(entrypoint).Int32Value(),
989 instruction,
990 dex_pc,
991 slow_path);
992}
993
Alexey Frunze4dda3372015-06-01 18:31:49 -0700994void CodeGeneratorMIPS64::InvokeRuntime(int32_t entry_point_offset,
995 HInstruction* instruction,
996 uint32_t dex_pc,
997 SlowPathCode* slow_path) {
Alexandre Rames78e3ef62015-08-12 13:43:29 +0100998 ValidateInvokeRuntime(instruction, slow_path);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700999 // TODO: anything related to T9/GP/GOT/PIC/.so's?
1000 __ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
1001 __ Jalr(T9);
1002 RecordPcInfo(instruction, dex_pc, slow_path);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001003}
1004
1005void InstructionCodeGeneratorMIPS64::GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path,
1006 GpuRegister class_reg) {
1007 __ LoadFromOffset(kLoadWord, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
1008 __ LoadConst32(AT, mirror::Class::kStatusInitialized);
1009 __ Bltc(TMP, AT, slow_path->GetEntryLabel());
1010 // TODO: barrier needed?
1011 __ Bind(slow_path->GetExitLabel());
1012}
1013
1014void InstructionCodeGeneratorMIPS64::GenerateMemoryBarrier(MemBarrierKind kind ATTRIBUTE_UNUSED) {
1015 __ Sync(0); // only stype 0 is supported
1016}
1017
1018void InstructionCodeGeneratorMIPS64::GenerateSuspendCheck(HSuspendCheck* instruction,
1019 HBasicBlock* successor) {
1020 SuspendCheckSlowPathMIPS64* slow_path =
1021 new (GetGraph()->GetArena()) SuspendCheckSlowPathMIPS64(instruction, successor);
1022 codegen_->AddSlowPath(slow_path);
1023
1024 __ LoadFromOffset(kLoadUnsignedHalfword,
1025 TMP,
1026 TR,
1027 Thread::ThreadFlagsOffset<kMips64WordSize>().Int32Value());
1028 if (successor == nullptr) {
1029 __ Bnezc(TMP, slow_path->GetEntryLabel());
1030 __ Bind(slow_path->GetReturnLabel());
1031 } else {
1032 __ Beqzc(TMP, codegen_->GetLabelOf(successor));
1033 __ B(slow_path->GetEntryLabel());
1034 // slow_path will return to GetLabelOf(successor).
1035 }
1036}
1037
1038InstructionCodeGeneratorMIPS64::InstructionCodeGeneratorMIPS64(HGraph* graph,
1039 CodeGeneratorMIPS64* codegen)
1040 : HGraphVisitor(graph),
1041 assembler_(codegen->GetAssembler()),
1042 codegen_(codegen) {}
1043
1044void LocationsBuilderMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
1045 DCHECK_EQ(instruction->InputCount(), 2U);
1046 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1047 Primitive::Type type = instruction->GetResultType();
1048 switch (type) {
1049 case Primitive::kPrimInt:
1050 case Primitive::kPrimLong: {
1051 locations->SetInAt(0, Location::RequiresRegister());
1052 HInstruction* right = instruction->InputAt(1);
1053 bool can_use_imm = false;
1054 if (right->IsConstant()) {
1055 int64_t imm = CodeGenerator::GetInt64ValueOf(right->AsConstant());
1056 if (instruction->IsAnd() || instruction->IsOr() || instruction->IsXor()) {
1057 can_use_imm = IsUint<16>(imm);
1058 } else if (instruction->IsAdd()) {
1059 can_use_imm = IsInt<16>(imm);
1060 } else {
1061 DCHECK(instruction->IsSub());
1062 can_use_imm = IsInt<16>(-imm);
1063 }
1064 }
1065 if (can_use_imm)
1066 locations->SetInAt(1, Location::ConstantLocation(right->AsConstant()));
1067 else
1068 locations->SetInAt(1, Location::RequiresRegister());
1069 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1070 }
1071 break;
1072
1073 case Primitive::kPrimFloat:
1074 case Primitive::kPrimDouble:
1075 locations->SetInAt(0, Location::RequiresFpuRegister());
1076 locations->SetInAt(1, Location::RequiresFpuRegister());
1077 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1078 break;
1079
1080 default:
1081 LOG(FATAL) << "Unexpected " << instruction->DebugName() << " type " << type;
1082 }
1083}
1084
1085void InstructionCodeGeneratorMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
1086 Primitive::Type type = instruction->GetType();
1087 LocationSummary* locations = instruction->GetLocations();
1088
1089 switch (type) {
1090 case Primitive::kPrimInt:
1091 case Primitive::kPrimLong: {
1092 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1093 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1094 Location rhs_location = locations->InAt(1);
1095
1096 GpuRegister rhs_reg = ZERO;
1097 int64_t rhs_imm = 0;
1098 bool use_imm = rhs_location.IsConstant();
1099 if (use_imm) {
1100 rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
1101 } else {
1102 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1103 }
1104
1105 if (instruction->IsAnd()) {
1106 if (use_imm)
1107 __ Andi(dst, lhs, rhs_imm);
1108 else
1109 __ And(dst, lhs, rhs_reg);
1110 } else if (instruction->IsOr()) {
1111 if (use_imm)
1112 __ Ori(dst, lhs, rhs_imm);
1113 else
1114 __ Or(dst, lhs, rhs_reg);
1115 } else if (instruction->IsXor()) {
1116 if (use_imm)
1117 __ Xori(dst, lhs, rhs_imm);
1118 else
1119 __ Xor(dst, lhs, rhs_reg);
1120 } else if (instruction->IsAdd()) {
1121 if (type == Primitive::kPrimInt) {
1122 if (use_imm)
1123 __ Addiu(dst, lhs, rhs_imm);
1124 else
1125 __ Addu(dst, lhs, rhs_reg);
1126 } else {
1127 if (use_imm)
1128 __ Daddiu(dst, lhs, rhs_imm);
1129 else
1130 __ Daddu(dst, lhs, rhs_reg);
1131 }
1132 } else {
1133 DCHECK(instruction->IsSub());
1134 if (type == Primitive::kPrimInt) {
1135 if (use_imm)
1136 __ Addiu(dst, lhs, -rhs_imm);
1137 else
1138 __ Subu(dst, lhs, rhs_reg);
1139 } else {
1140 if (use_imm)
1141 __ Daddiu(dst, lhs, -rhs_imm);
1142 else
1143 __ Dsubu(dst, lhs, rhs_reg);
1144 }
1145 }
1146 break;
1147 }
1148 case Primitive::kPrimFloat:
1149 case Primitive::kPrimDouble: {
1150 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
1151 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
1152 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
1153 if (instruction->IsAdd()) {
1154 if (type == Primitive::kPrimFloat)
1155 __ AddS(dst, lhs, rhs);
1156 else
1157 __ AddD(dst, lhs, rhs);
1158 } else if (instruction->IsSub()) {
1159 if (type == Primitive::kPrimFloat)
1160 __ SubS(dst, lhs, rhs);
1161 else
1162 __ SubD(dst, lhs, rhs);
1163 } else {
1164 LOG(FATAL) << "Unexpected floating-point binary operation";
1165 }
1166 break;
1167 }
1168 default:
1169 LOG(FATAL) << "Unexpected binary operation type " << type;
1170 }
1171}
1172
1173void LocationsBuilderMIPS64::HandleShift(HBinaryOperation* instr) {
1174 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1175
1176 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
1177 Primitive::Type type = instr->GetResultType();
1178 switch (type) {
1179 case Primitive::kPrimInt:
1180 case Primitive::kPrimLong: {
1181 locations->SetInAt(0, Location::RequiresRegister());
1182 locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
1183 locations->SetOut(Location::RequiresRegister());
1184 break;
1185 }
1186 default:
1187 LOG(FATAL) << "Unexpected shift type " << type;
1188 }
1189}
1190
1191void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) {
1192 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1193 LocationSummary* locations = instr->GetLocations();
1194 Primitive::Type type = instr->GetType();
1195
1196 switch (type) {
1197 case Primitive::kPrimInt:
1198 case Primitive::kPrimLong: {
1199 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1200 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1201 Location rhs_location = locations->InAt(1);
1202
1203 GpuRegister rhs_reg = ZERO;
1204 int64_t rhs_imm = 0;
1205 bool use_imm = rhs_location.IsConstant();
1206 if (use_imm) {
1207 rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
1208 } else {
1209 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1210 }
1211
1212 if (use_imm) {
1213 uint32_t shift_value = (type == Primitive::kPrimInt)
1214 ? static_cast<uint32_t>(rhs_imm & kMaxIntShiftValue)
1215 : static_cast<uint32_t>(rhs_imm & kMaxLongShiftValue);
1216
1217 if (type == Primitive::kPrimInt) {
1218 if (instr->IsShl()) {
1219 __ Sll(dst, lhs, shift_value);
1220 } else if (instr->IsShr()) {
1221 __ Sra(dst, lhs, shift_value);
1222 } else {
1223 __ Srl(dst, lhs, shift_value);
1224 }
1225 } else {
1226 if (shift_value < 32) {
1227 if (instr->IsShl()) {
1228 __ Dsll(dst, lhs, shift_value);
1229 } else if (instr->IsShr()) {
1230 __ Dsra(dst, lhs, shift_value);
1231 } else {
1232 __ Dsrl(dst, lhs, shift_value);
1233 }
1234 } else {
1235 shift_value -= 32;
1236 if (instr->IsShl()) {
1237 __ Dsll32(dst, lhs, shift_value);
1238 } else if (instr->IsShr()) {
1239 __ Dsra32(dst, lhs, shift_value);
1240 } else {
1241 __ Dsrl32(dst, lhs, shift_value);
1242 }
1243 }
1244 }
1245 } else {
1246 if (type == Primitive::kPrimInt) {
1247 if (instr->IsShl()) {
1248 __ Sllv(dst, lhs, rhs_reg);
1249 } else if (instr->IsShr()) {
1250 __ Srav(dst, lhs, rhs_reg);
1251 } else {
1252 __ Srlv(dst, lhs, rhs_reg);
1253 }
1254 } else {
1255 if (instr->IsShl()) {
1256 __ Dsllv(dst, lhs, rhs_reg);
1257 } else if (instr->IsShr()) {
1258 __ Dsrav(dst, lhs, rhs_reg);
1259 } else {
1260 __ Dsrlv(dst, lhs, rhs_reg);
1261 }
1262 }
1263 }
1264 break;
1265 }
1266 default:
1267 LOG(FATAL) << "Unexpected shift operation type " << type;
1268 }
1269}
1270
1271void LocationsBuilderMIPS64::VisitAdd(HAdd* instruction) {
1272 HandleBinaryOp(instruction);
1273}
1274
1275void InstructionCodeGeneratorMIPS64::VisitAdd(HAdd* instruction) {
1276 HandleBinaryOp(instruction);
1277}
1278
1279void LocationsBuilderMIPS64::VisitAnd(HAnd* instruction) {
1280 HandleBinaryOp(instruction);
1281}
1282
1283void InstructionCodeGeneratorMIPS64::VisitAnd(HAnd* instruction) {
1284 HandleBinaryOp(instruction);
1285}
1286
1287void LocationsBuilderMIPS64::VisitArrayGet(HArrayGet* instruction) {
1288 LocationSummary* locations =
1289 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1290 locations->SetInAt(0, Location::RequiresRegister());
1291 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1292 if (Primitive::IsFloatingPointType(instruction->GetType())) {
1293 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1294 } else {
1295 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1296 }
1297}
1298
1299void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
1300 LocationSummary* locations = instruction->GetLocations();
1301 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1302 Location index = locations->InAt(1);
1303 Primitive::Type type = instruction->GetType();
1304
1305 switch (type) {
1306 case Primitive::kPrimBoolean: {
1307 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
1308 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1309 if (index.IsConstant()) {
1310 size_t offset =
1311 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1312 __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
1313 } else {
1314 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1315 __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
1316 }
1317 break;
1318 }
1319
1320 case Primitive::kPrimByte: {
1321 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
1322 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1323 if (index.IsConstant()) {
1324 size_t offset =
1325 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1326 __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
1327 } else {
1328 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1329 __ LoadFromOffset(kLoadSignedByte, out, TMP, data_offset);
1330 }
1331 break;
1332 }
1333
1334 case Primitive::kPrimShort: {
1335 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
1336 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1337 if (index.IsConstant()) {
1338 size_t offset =
1339 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1340 __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
1341 } else {
1342 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1343 __ Daddu(TMP, obj, TMP);
1344 __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset);
1345 }
1346 break;
1347 }
1348
1349 case Primitive::kPrimChar: {
1350 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
1351 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1352 if (index.IsConstant()) {
1353 size_t offset =
1354 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1355 __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
1356 } else {
1357 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1358 __ Daddu(TMP, obj, TMP);
1359 __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
1360 }
1361 break;
1362 }
1363
1364 case Primitive::kPrimInt:
1365 case Primitive::kPrimNot: {
1366 DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
1367 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
1368 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1369 LoadOperandType load_type = (type == Primitive::kPrimNot) ? kLoadUnsignedWord : kLoadWord;
1370 if (index.IsConstant()) {
1371 size_t offset =
1372 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1373 __ LoadFromOffset(load_type, out, obj, offset);
1374 } else {
1375 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1376 __ Daddu(TMP, obj, TMP);
1377 __ LoadFromOffset(load_type, out, TMP, data_offset);
1378 }
1379 break;
1380 }
1381
1382 case Primitive::kPrimLong: {
1383 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
1384 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1385 if (index.IsConstant()) {
1386 size_t offset =
1387 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1388 __ LoadFromOffset(kLoadDoubleword, out, obj, offset);
1389 } else {
1390 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1391 __ Daddu(TMP, obj, TMP);
1392 __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset);
1393 }
1394 break;
1395 }
1396
1397 case Primitive::kPrimFloat: {
1398 uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
1399 FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
1400 if (index.IsConstant()) {
1401 size_t offset =
1402 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1403 __ LoadFpuFromOffset(kLoadWord, out, obj, offset);
1404 } else {
1405 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1406 __ Daddu(TMP, obj, TMP);
1407 __ LoadFpuFromOffset(kLoadWord, out, TMP, data_offset);
1408 }
1409 break;
1410 }
1411
1412 case Primitive::kPrimDouble: {
1413 uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
1414 FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
1415 if (index.IsConstant()) {
1416 size_t offset =
1417 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1418 __ LoadFpuFromOffset(kLoadDoubleword, out, obj, offset);
1419 } else {
1420 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1421 __ Daddu(TMP, obj, TMP);
1422 __ LoadFpuFromOffset(kLoadDoubleword, out, TMP, data_offset);
1423 }
1424 break;
1425 }
1426
1427 case Primitive::kPrimVoid:
1428 LOG(FATAL) << "Unreachable type " << instruction->GetType();
1429 UNREACHABLE();
1430 }
1431 codegen_->MaybeRecordImplicitNullCheck(instruction);
1432}
1433
1434void LocationsBuilderMIPS64::VisitArrayLength(HArrayLength* instruction) {
1435 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1436 locations->SetInAt(0, Location::RequiresRegister());
1437 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1438}
1439
1440void InstructionCodeGeneratorMIPS64::VisitArrayLength(HArrayLength* instruction) {
1441 LocationSummary* locations = instruction->GetLocations();
1442 uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
1443 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1444 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1445 __ LoadFromOffset(kLoadWord, out, obj, offset);
1446 codegen_->MaybeRecordImplicitNullCheck(instruction);
1447}
1448
1449void LocationsBuilderMIPS64::VisitArraySet(HArraySet* instruction) {
David Brazdilbb3d5052015-09-21 18:39:16 +01001450 bool needs_runtime_call = instruction->NeedsTypeCheck();
Alexey Frunze4dda3372015-06-01 18:31:49 -07001451 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
1452 instruction,
David Brazdilbb3d5052015-09-21 18:39:16 +01001453 needs_runtime_call ? LocationSummary::kCall : LocationSummary::kNoCall);
1454 if (needs_runtime_call) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07001455 InvokeRuntimeCallingConvention calling_convention;
1456 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
1457 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
1458 locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
1459 } else {
1460 locations->SetInAt(0, Location::RequiresRegister());
1461 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1462 if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
1463 locations->SetInAt(2, Location::RequiresFpuRegister());
1464 } else {
1465 locations->SetInAt(2, Location::RequiresRegister());
1466 }
1467 }
1468}
1469
1470void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
1471 LocationSummary* locations = instruction->GetLocations();
1472 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1473 Location index = locations->InAt(1);
1474 Primitive::Type value_type = instruction->GetComponentType();
1475 bool needs_runtime_call = locations->WillCall();
1476 bool needs_write_barrier =
1477 CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
1478
1479 switch (value_type) {
1480 case Primitive::kPrimBoolean:
1481 case Primitive::kPrimByte: {
1482 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
1483 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1484 if (index.IsConstant()) {
1485 size_t offset =
1486 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1487 __ StoreToOffset(kStoreByte, value, obj, offset);
1488 } else {
1489 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1490 __ StoreToOffset(kStoreByte, value, TMP, data_offset);
1491 }
1492 break;
1493 }
1494
1495 case Primitive::kPrimShort:
1496 case Primitive::kPrimChar: {
1497 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
1498 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1499 if (index.IsConstant()) {
1500 size_t offset =
1501 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1502 __ StoreToOffset(kStoreHalfword, value, obj, offset);
1503 } else {
1504 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1505 __ Daddu(TMP, obj, TMP);
1506 __ StoreToOffset(kStoreHalfword, value, TMP, data_offset);
1507 }
1508 break;
1509 }
1510
1511 case Primitive::kPrimInt:
1512 case Primitive::kPrimNot: {
1513 if (!needs_runtime_call) {
1514 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
1515 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1516 if (index.IsConstant()) {
1517 size_t offset =
1518 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1519 __ StoreToOffset(kStoreWord, value, obj, offset);
1520 } else {
1521 DCHECK(index.IsRegister()) << index;
1522 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1523 __ Daddu(TMP, obj, TMP);
1524 __ StoreToOffset(kStoreWord, value, TMP, data_offset);
1525 }
1526 codegen_->MaybeRecordImplicitNullCheck(instruction);
1527 if (needs_write_barrier) {
1528 DCHECK_EQ(value_type, Primitive::kPrimNot);
1529 codegen_->MarkGCCard(obj, value);
1530 }
1531 } else {
1532 DCHECK_EQ(value_type, Primitive::kPrimNot);
1533 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
1534 instruction,
1535 instruction->GetDexPc(),
1536 nullptr);
1537 }
1538 break;
1539 }
1540
1541 case Primitive::kPrimLong: {
1542 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
1543 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1544 if (index.IsConstant()) {
1545 size_t offset =
1546 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1547 __ StoreToOffset(kStoreDoubleword, value, obj, offset);
1548 } else {
1549 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1550 __ Daddu(TMP, obj, TMP);
1551 __ StoreToOffset(kStoreDoubleword, value, TMP, data_offset);
1552 }
1553 break;
1554 }
1555
1556 case Primitive::kPrimFloat: {
1557 uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
1558 FpuRegister value = locations->InAt(2).AsFpuRegister<FpuRegister>();
1559 DCHECK(locations->InAt(2).IsFpuRegister());
1560 if (index.IsConstant()) {
1561 size_t offset =
1562 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1563 __ StoreFpuToOffset(kStoreWord, value, obj, offset);
1564 } else {
1565 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1566 __ Daddu(TMP, obj, TMP);
1567 __ StoreFpuToOffset(kStoreWord, value, TMP, data_offset);
1568 }
1569 break;
1570 }
1571
1572 case Primitive::kPrimDouble: {
1573 uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
1574 FpuRegister value = locations->InAt(2).AsFpuRegister<FpuRegister>();
1575 DCHECK(locations->InAt(2).IsFpuRegister());
1576 if (index.IsConstant()) {
1577 size_t offset =
1578 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1579 __ StoreFpuToOffset(kStoreDoubleword, value, obj, offset);
1580 } else {
1581 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1582 __ Daddu(TMP, obj, TMP);
1583 __ StoreFpuToOffset(kStoreDoubleword, value, TMP, data_offset);
1584 }
1585 break;
1586 }
1587
1588 case Primitive::kPrimVoid:
1589 LOG(FATAL) << "Unreachable type " << instruction->GetType();
1590 UNREACHABLE();
1591 }
1592
1593 // Ints and objects are handled in the switch.
1594 if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
1595 codegen_->MaybeRecordImplicitNullCheck(instruction);
1596 }
1597}
1598
1599void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
David Brazdil77a48ae2015-09-15 12:34:04 +00001600 LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
1601 ? LocationSummary::kCallOnSlowPath
1602 : LocationSummary::kNoCall;
1603 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001604 locations->SetInAt(0, Location::RequiresRegister());
1605 locations->SetInAt(1, Location::RequiresRegister());
1606 if (instruction->HasUses()) {
1607 locations->SetOut(Location::SameAsFirstInput());
1608 }
1609}
1610
1611void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
1612 LocationSummary* locations = instruction->GetLocations();
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01001613 BoundsCheckSlowPathMIPS64* slow_path =
1614 new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS64(instruction);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001615 codegen_->AddSlowPath(slow_path);
1616
1617 GpuRegister index = locations->InAt(0).AsRegister<GpuRegister>();
1618 GpuRegister length = locations->InAt(1).AsRegister<GpuRegister>();
1619
1620 // length is limited by the maximum positive signed 32-bit integer.
1621 // Unsigned comparison of length and index checks for index < 0
1622 // and for length <= index simultaneously.
1623 // Mips R6 requires lhs != rhs for compact branches.
1624 if (index == length) {
1625 __ B(slow_path->GetEntryLabel());
1626 } else {
1627 __ Bgeuc(index, length, slow_path->GetEntryLabel());
1628 }
1629}
1630
1631void LocationsBuilderMIPS64::VisitCheckCast(HCheckCast* instruction) {
1632 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
1633 instruction,
1634 LocationSummary::kCallOnSlowPath);
1635 locations->SetInAt(0, Location::RequiresRegister());
1636 locations->SetInAt(1, Location::RequiresRegister());
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01001637 // Note that TypeCheckSlowPathMIPS64 uses this register too.
Alexey Frunze4dda3372015-06-01 18:31:49 -07001638 locations->AddTemp(Location::RequiresRegister());
1639}
1640
1641void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) {
1642 LocationSummary* locations = instruction->GetLocations();
1643 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1644 GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
1645 GpuRegister obj_cls = locations->GetTemp(0).AsRegister<GpuRegister>();
1646
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01001647 SlowPathCodeMIPS64* slow_path =
1648 new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001649 codegen_->AddSlowPath(slow_path);
1650
1651 // TODO: avoid this check if we know obj is not null.
1652 __ Beqzc(obj, slow_path->GetExitLabel());
1653 // Compare the class of `obj` with `cls`.
1654 __ LoadFromOffset(kLoadUnsignedWord, obj_cls, obj, mirror::Object::ClassOffset().Int32Value());
1655 __ Bnec(obj_cls, cls, slow_path->GetEntryLabel());
1656 __ Bind(slow_path->GetExitLabel());
1657}
1658
1659void LocationsBuilderMIPS64::VisitClinitCheck(HClinitCheck* check) {
1660 LocationSummary* locations =
1661 new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
1662 locations->SetInAt(0, Location::RequiresRegister());
1663 if (check->HasUses()) {
1664 locations->SetOut(Location::SameAsFirstInput());
1665 }
1666}
1667
1668void InstructionCodeGeneratorMIPS64::VisitClinitCheck(HClinitCheck* check) {
1669 // We assume the class is not null.
1670 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
1671 check->GetLoadClass(),
1672 check,
1673 check->GetDexPc(),
1674 true);
1675 codegen_->AddSlowPath(slow_path);
1676 GenerateClassInitializationCheck(slow_path,
1677 check->GetLocations()->InAt(0).AsRegister<GpuRegister>());
1678}
1679
1680void LocationsBuilderMIPS64::VisitCompare(HCompare* compare) {
1681 Primitive::Type in_type = compare->InputAt(0)->GetType();
1682
1683 LocationSummary::CallKind call_kind = Primitive::IsFloatingPointType(in_type)
1684 ? LocationSummary::kCall
1685 : LocationSummary::kNoCall;
1686
1687 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare, call_kind);
1688
1689 switch (in_type) {
1690 case Primitive::kPrimLong:
1691 locations->SetInAt(0, Location::RequiresRegister());
1692 locations->SetInAt(1, Location::RequiresRegister());
1693 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1694 break;
1695
1696 case Primitive::kPrimFloat:
1697 case Primitive::kPrimDouble: {
1698 InvokeRuntimeCallingConvention calling_convention;
1699 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
1700 locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
1701 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt));
1702 break;
1703 }
1704
1705 default:
1706 LOG(FATAL) << "Unexpected type for compare operation " << in_type;
1707 }
1708}
1709
1710void InstructionCodeGeneratorMIPS64::VisitCompare(HCompare* instruction) {
1711 LocationSummary* locations = instruction->GetLocations();
1712 Primitive::Type in_type = instruction->InputAt(0)->GetType();
1713
1714 // 0 if: left == right
1715 // 1 if: left > right
1716 // -1 if: left < right
1717 switch (in_type) {
1718 case Primitive::kPrimLong: {
1719 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1720 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1721 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
1722 // TODO: more efficient (direct) comparison with a constant
1723 __ Slt(TMP, lhs, rhs);
1724 __ Slt(dst, rhs, lhs);
1725 __ Subu(dst, dst, TMP);
1726 break;
1727 }
1728
1729 case Primitive::kPrimFloat:
1730 case Primitive::kPrimDouble: {
1731 int32_t entry_point_offset;
1732 if (in_type == Primitive::kPrimFloat) {
1733 entry_point_offset = instruction->IsGtBias() ? QUICK_ENTRY_POINT(pCmpgFloat)
1734 : QUICK_ENTRY_POINT(pCmplFloat);
1735 } else {
1736 entry_point_offset = instruction->IsGtBias() ? QUICK_ENTRY_POINT(pCmpgDouble)
1737 : QUICK_ENTRY_POINT(pCmplDouble);
1738 }
1739 codegen_->InvokeRuntime(entry_point_offset, instruction, instruction->GetDexPc(), nullptr);
1740 break;
1741 }
1742
1743 default:
1744 LOG(FATAL) << "Unimplemented compare type " << in_type;
1745 }
1746}
1747
1748void LocationsBuilderMIPS64::VisitCondition(HCondition* instruction) {
1749 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1750 locations->SetInAt(0, Location::RequiresRegister());
1751 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1752 if (instruction->NeedsMaterialization()) {
1753 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1754 }
1755}
1756
1757void InstructionCodeGeneratorMIPS64::VisitCondition(HCondition* instruction) {
1758 if (!instruction->NeedsMaterialization()) {
1759 return;
1760 }
1761
1762 LocationSummary* locations = instruction->GetLocations();
1763
1764 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1765 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1766 Location rhs_location = locations->InAt(1);
1767
1768 GpuRegister rhs_reg = ZERO;
1769 int64_t rhs_imm = 0;
1770 bool use_imm = rhs_location.IsConstant();
1771 if (use_imm) {
1772 rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
1773 } else {
1774 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1775 }
1776
1777 IfCondition if_cond = instruction->GetCondition();
1778
1779 switch (if_cond) {
1780 case kCondEQ:
1781 case kCondNE:
1782 if (use_imm && IsUint<16>(rhs_imm)) {
1783 __ Xori(dst, lhs, rhs_imm);
1784 } else {
1785 if (use_imm) {
1786 rhs_reg = TMP;
1787 __ LoadConst32(rhs_reg, rhs_imm);
1788 }
1789 __ Xor(dst, lhs, rhs_reg);
1790 }
1791 if (if_cond == kCondEQ) {
1792 __ Sltiu(dst, dst, 1);
1793 } else {
1794 __ Sltu(dst, ZERO, dst);
1795 }
1796 break;
1797
1798 case kCondLT:
1799 case kCondGE:
1800 if (use_imm && IsInt<16>(rhs_imm)) {
1801 __ Slti(dst, lhs, rhs_imm);
1802 } else {
1803 if (use_imm) {
1804 rhs_reg = TMP;
1805 __ LoadConst32(rhs_reg, rhs_imm);
1806 }
1807 __ Slt(dst, lhs, rhs_reg);
1808 }
1809 if (if_cond == kCondGE) {
1810 // Simulate lhs >= rhs via !(lhs < rhs) since there's
1811 // only the slt instruction but no sge.
1812 __ Xori(dst, dst, 1);
1813 }
1814 break;
1815
1816 case kCondLE:
1817 case kCondGT:
1818 if (use_imm && IsInt<16>(rhs_imm + 1)) {
1819 // Simulate lhs <= rhs via lhs < rhs + 1.
1820 __ Slti(dst, lhs, rhs_imm + 1);
1821 if (if_cond == kCondGT) {
1822 // Simulate lhs > rhs via !(lhs <= rhs) since there's
1823 // only the slti instruction but no sgti.
1824 __ Xori(dst, dst, 1);
1825 }
1826 } else {
1827 if (use_imm) {
1828 rhs_reg = TMP;
1829 __ LoadConst32(rhs_reg, rhs_imm);
1830 }
1831 __ Slt(dst, rhs_reg, lhs);
1832 if (if_cond == kCondLE) {
1833 // Simulate lhs <= rhs via !(rhs < lhs) since there's
1834 // only the slt instruction but no sle.
1835 __ Xori(dst, dst, 1);
1836 }
1837 }
1838 break;
1839 }
1840}
1841
1842void LocationsBuilderMIPS64::VisitDiv(HDiv* div) {
1843 LocationSummary* locations =
1844 new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
1845 switch (div->GetResultType()) {
1846 case Primitive::kPrimInt:
1847 case Primitive::kPrimLong:
1848 locations->SetInAt(0, Location::RequiresRegister());
1849 locations->SetInAt(1, Location::RequiresRegister());
1850 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1851 break;
1852
1853 case Primitive::kPrimFloat:
1854 case Primitive::kPrimDouble:
1855 locations->SetInAt(0, Location::RequiresFpuRegister());
1856 locations->SetInAt(1, Location::RequiresFpuRegister());
1857 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1858 break;
1859
1860 default:
1861 LOG(FATAL) << "Unexpected div type " << div->GetResultType();
1862 }
1863}
1864
1865void InstructionCodeGeneratorMIPS64::VisitDiv(HDiv* instruction) {
1866 Primitive::Type type = instruction->GetType();
1867 LocationSummary* locations = instruction->GetLocations();
1868
1869 switch (type) {
1870 case Primitive::kPrimInt:
1871 case Primitive::kPrimLong: {
1872 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1873 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1874 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
1875 if (type == Primitive::kPrimInt)
1876 __ DivR6(dst, lhs, rhs);
1877 else
1878 __ Ddiv(dst, lhs, rhs);
1879 break;
1880 }
1881 case Primitive::kPrimFloat:
1882 case Primitive::kPrimDouble: {
1883 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
1884 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
1885 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
1886 if (type == Primitive::kPrimFloat)
1887 __ DivS(dst, lhs, rhs);
1888 else
1889 __ DivD(dst, lhs, rhs);
1890 break;
1891 }
1892 default:
1893 LOG(FATAL) << "Unexpected div type " << type;
1894 }
1895}
1896
1897void LocationsBuilderMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
David Brazdil77a48ae2015-09-15 12:34:04 +00001898 LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
1899 ? LocationSummary::kCallOnSlowPath
1900 : LocationSummary::kNoCall;
1901 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001902 locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
1903 if (instruction->HasUses()) {
1904 locations->SetOut(Location::SameAsFirstInput());
1905 }
1906}
1907
1908void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
1909 SlowPathCodeMIPS64* slow_path =
1910 new (GetGraph()->GetArena()) DivZeroCheckSlowPathMIPS64(instruction);
1911 codegen_->AddSlowPath(slow_path);
1912 Location value = instruction->GetLocations()->InAt(0);
1913
1914 Primitive::Type type = instruction->GetType();
1915
Serguei Katkov8c0676c2015-08-03 13:55:33 +06001916 if ((type == Primitive::kPrimBoolean) || !Primitive::IsIntegralType(type)) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07001917 LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
Serguei Katkov8c0676c2015-08-03 13:55:33 +06001918 return;
Alexey Frunze4dda3372015-06-01 18:31:49 -07001919 }
1920
1921 if (value.IsConstant()) {
1922 int64_t divisor = codegen_->GetInt64ValueOf(value.GetConstant()->AsConstant());
1923 if (divisor == 0) {
1924 __ B(slow_path->GetEntryLabel());
1925 } else {
1926 // A division by a non-null constant is valid. We don't need to perform
1927 // any check, so simply fall through.
1928 }
1929 } else {
1930 __ Beqzc(value.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
1931 }
1932}
1933
1934void LocationsBuilderMIPS64::VisitDoubleConstant(HDoubleConstant* constant) {
1935 LocationSummary* locations =
1936 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1937 locations->SetOut(Location::ConstantLocation(constant));
1938}
1939
1940void InstructionCodeGeneratorMIPS64::VisitDoubleConstant(HDoubleConstant* cst ATTRIBUTE_UNUSED) {
1941 // Will be generated at use site.
1942}
1943
1944void LocationsBuilderMIPS64::VisitExit(HExit* exit) {
1945 exit->SetLocations(nullptr);
1946}
1947
1948void InstructionCodeGeneratorMIPS64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
1949}
1950
1951void LocationsBuilderMIPS64::VisitFloatConstant(HFloatConstant* constant) {
1952 LocationSummary* locations =
1953 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1954 locations->SetOut(Location::ConstantLocation(constant));
1955}
1956
1957void InstructionCodeGeneratorMIPS64::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
1958 // Will be generated at use site.
1959}
1960
David Brazdilfc6a86a2015-06-26 10:33:45 +00001961void InstructionCodeGeneratorMIPS64::HandleGoto(HInstruction* got, HBasicBlock* successor) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07001962 DCHECK(!successor->IsExitBlock());
1963 HBasicBlock* block = got->GetBlock();
1964 HInstruction* previous = got->GetPrevious();
1965 HLoopInformation* info = block->GetLoopInformation();
1966
1967 if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
1968 codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
1969 GenerateSuspendCheck(info->GetSuspendCheck(), successor);
1970 return;
1971 }
1972 if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
1973 GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
1974 }
1975 if (!codegen_->GoesToNextBlock(block, successor)) {
1976 __ B(codegen_->GetLabelOf(successor));
1977 }
1978}
1979
David Brazdilfc6a86a2015-06-26 10:33:45 +00001980void LocationsBuilderMIPS64::VisitGoto(HGoto* got) {
1981 got->SetLocations(nullptr);
1982}
1983
1984void InstructionCodeGeneratorMIPS64::VisitGoto(HGoto* got) {
1985 HandleGoto(got, got->GetSuccessor());
1986}
1987
1988void LocationsBuilderMIPS64::VisitTryBoundary(HTryBoundary* try_boundary) {
1989 try_boundary->SetLocations(nullptr);
1990}
1991
1992void InstructionCodeGeneratorMIPS64::VisitTryBoundary(HTryBoundary* try_boundary) {
1993 HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
1994 if (!successor->IsExitBlock()) {
1995 HandleGoto(try_boundary, successor);
1996 }
1997}
1998
Alexey Frunze4dda3372015-06-01 18:31:49 -07001999void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruction,
2000 Label* true_target,
2001 Label* false_target,
2002 Label* always_true_target) {
2003 HInstruction* cond = instruction->InputAt(0);
2004 HCondition* condition = cond->AsCondition();
2005
2006 if (cond->IsIntConstant()) {
2007 int32_t cond_value = cond->AsIntConstant()->GetValue();
2008 if (cond_value == 1) {
2009 if (always_true_target != nullptr) {
2010 __ B(always_true_target);
2011 }
2012 return;
2013 } else {
2014 DCHECK_EQ(cond_value, 0);
2015 }
2016 } else if (!cond->IsCondition() || condition->NeedsMaterialization()) {
2017 // The condition instruction has been materialized, compare the output to 0.
2018 Location cond_val = instruction->GetLocations()->InAt(0);
2019 DCHECK(cond_val.IsRegister());
2020 __ Bnezc(cond_val.AsRegister<GpuRegister>(), true_target);
2021 } else {
2022 // The condition instruction has not been materialized, use its inputs as
2023 // the comparison and its condition as the branch condition.
2024 GpuRegister lhs = condition->GetLocations()->InAt(0).AsRegister<GpuRegister>();
2025 Location rhs_location = condition->GetLocations()->InAt(1);
2026 GpuRegister rhs_reg = ZERO;
2027 int32_t rhs_imm = 0;
2028 bool use_imm = rhs_location.IsConstant();
2029 if (use_imm) {
2030 rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
2031 } else {
2032 rhs_reg = rhs_location.AsRegister<GpuRegister>();
2033 }
2034
2035 IfCondition if_cond = condition->GetCondition();
2036 if (use_imm && rhs_imm == 0) {
2037 switch (if_cond) {
2038 case kCondEQ:
2039 __ Beqzc(lhs, true_target);
2040 break;
2041 case kCondNE:
2042 __ Bnezc(lhs, true_target);
2043 break;
2044 case kCondLT:
2045 __ Bltzc(lhs, true_target);
2046 break;
2047 case kCondGE:
2048 __ Bgezc(lhs, true_target);
2049 break;
2050 case kCondLE:
2051 __ Blezc(lhs, true_target);
2052 break;
2053 case kCondGT:
2054 __ Bgtzc(lhs, true_target);
2055 break;
2056 }
2057 } else {
2058 if (use_imm) {
2059 rhs_reg = TMP;
2060 __ LoadConst32(rhs_reg, rhs_imm);
2061 }
2062 // It looks like we can get here with lhs == rhs. Should that be possible at all?
2063 // Mips R6 requires lhs != rhs for compact branches.
2064 if (lhs == rhs_reg) {
2065 DCHECK(!use_imm);
2066 switch (if_cond) {
2067 case kCondEQ:
2068 case kCondGE:
2069 case kCondLE:
2070 // if lhs == rhs for a positive condition, then it is a branch
2071 __ B(true_target);
2072 break;
2073 case kCondNE:
2074 case kCondLT:
2075 case kCondGT:
2076 // if lhs == rhs for a negative condition, then it is a NOP
2077 break;
2078 }
2079 } else {
2080 switch (if_cond) {
2081 case kCondEQ:
2082 __ Beqc(lhs, rhs_reg, true_target);
2083 break;
2084 case kCondNE:
2085 __ Bnec(lhs, rhs_reg, true_target);
2086 break;
2087 case kCondLT:
2088 __ Bltc(lhs, rhs_reg, true_target);
2089 break;
2090 case kCondGE:
2091 __ Bgec(lhs, rhs_reg, true_target);
2092 break;
2093 case kCondLE:
2094 __ Bgec(rhs_reg, lhs, true_target);
2095 break;
2096 case kCondGT:
2097 __ Bltc(rhs_reg, lhs, true_target);
2098 break;
2099 }
2100 }
2101 }
2102 }
2103 if (false_target != nullptr) {
2104 __ B(false_target);
2105 }
2106}
2107
2108void LocationsBuilderMIPS64::VisitIf(HIf* if_instr) {
2109 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
2110 HInstruction* cond = if_instr->InputAt(0);
2111 if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
2112 locations->SetInAt(0, Location::RequiresRegister());
2113 }
2114}
2115
2116void InstructionCodeGeneratorMIPS64::VisitIf(HIf* if_instr) {
2117 Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
2118 Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
2119 Label* always_true_target = true_target;
2120 if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
2121 if_instr->IfTrueSuccessor())) {
2122 always_true_target = nullptr;
2123 }
2124 if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
2125 if_instr->IfFalseSuccessor())) {
2126 false_target = nullptr;
2127 }
2128 GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
2129}
2130
2131void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
2132 LocationSummary* locations = new (GetGraph()->GetArena())
2133 LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
2134 HInstruction* cond = deoptimize->InputAt(0);
2135 DCHECK(cond->IsCondition());
2136 if (cond->AsCondition()->NeedsMaterialization()) {
2137 locations->SetInAt(0, Location::RequiresRegister());
2138 }
2139}
2140
2141void InstructionCodeGeneratorMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
2142 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena())
2143 DeoptimizationSlowPathMIPS64(deoptimize);
2144 codegen_->AddSlowPath(slow_path);
2145 Label* slow_path_entry = slow_path->GetEntryLabel();
2146 GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
2147}
2148
2149void LocationsBuilderMIPS64::HandleFieldGet(HInstruction* instruction,
2150 const FieldInfo& field_info ATTRIBUTE_UNUSED) {
2151 LocationSummary* locations =
2152 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2153 locations->SetInAt(0, Location::RequiresRegister());
2154 if (Primitive::IsFloatingPointType(instruction->GetType())) {
2155 locations->SetOut(Location::RequiresFpuRegister());
2156 } else {
2157 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2158 }
2159}
2160
2161void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction,
2162 const FieldInfo& field_info) {
2163 Primitive::Type type = field_info.GetFieldType();
2164 LocationSummary* locations = instruction->GetLocations();
2165 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2166 LoadOperandType load_type = kLoadUnsignedByte;
2167 switch (type) {
2168 case Primitive::kPrimBoolean:
2169 load_type = kLoadUnsignedByte;
2170 break;
2171 case Primitive::kPrimByte:
2172 load_type = kLoadSignedByte;
2173 break;
2174 case Primitive::kPrimShort:
2175 load_type = kLoadSignedHalfword;
2176 break;
2177 case Primitive::kPrimChar:
2178 load_type = kLoadUnsignedHalfword;
2179 break;
2180 case Primitive::kPrimInt:
2181 case Primitive::kPrimFloat:
2182 load_type = kLoadWord;
2183 break;
2184 case Primitive::kPrimLong:
2185 case Primitive::kPrimDouble:
2186 load_type = kLoadDoubleword;
2187 break;
2188 case Primitive::kPrimNot:
2189 load_type = kLoadUnsignedWord;
2190 break;
2191 case Primitive::kPrimVoid:
2192 LOG(FATAL) << "Unreachable type " << type;
2193 UNREACHABLE();
2194 }
2195 if (!Primitive::IsFloatingPointType(type)) {
2196 DCHECK(locations->Out().IsRegister());
2197 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2198 __ LoadFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
2199 } else {
2200 DCHECK(locations->Out().IsFpuRegister());
2201 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
2202 __ LoadFpuFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
2203 }
2204
2205 codegen_->MaybeRecordImplicitNullCheck(instruction);
2206 // TODO: memory barrier?
2207}
2208
2209void LocationsBuilderMIPS64::HandleFieldSet(HInstruction* instruction,
2210 const FieldInfo& field_info ATTRIBUTE_UNUSED) {
2211 LocationSummary* locations =
2212 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2213 locations->SetInAt(0, Location::RequiresRegister());
2214 if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
2215 locations->SetInAt(1, Location::RequiresFpuRegister());
2216 } else {
2217 locations->SetInAt(1, Location::RequiresRegister());
2218 }
2219}
2220
2221void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction,
2222 const FieldInfo& field_info) {
2223 Primitive::Type type = field_info.GetFieldType();
2224 LocationSummary* locations = instruction->GetLocations();
2225 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2226 StoreOperandType store_type = kStoreByte;
2227 switch (type) {
2228 case Primitive::kPrimBoolean:
2229 case Primitive::kPrimByte:
2230 store_type = kStoreByte;
2231 break;
2232 case Primitive::kPrimShort:
2233 case Primitive::kPrimChar:
2234 store_type = kStoreHalfword;
2235 break;
2236 case Primitive::kPrimInt:
2237 case Primitive::kPrimFloat:
2238 case Primitive::kPrimNot:
2239 store_type = kStoreWord;
2240 break;
2241 case Primitive::kPrimLong:
2242 case Primitive::kPrimDouble:
2243 store_type = kStoreDoubleword;
2244 break;
2245 case Primitive::kPrimVoid:
2246 LOG(FATAL) << "Unreachable type " << type;
2247 UNREACHABLE();
2248 }
2249 if (!Primitive::IsFloatingPointType(type)) {
2250 DCHECK(locations->InAt(1).IsRegister());
2251 GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>();
2252 __ StoreToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
2253 } else {
2254 DCHECK(locations->InAt(1).IsFpuRegister());
2255 FpuRegister src = locations->InAt(1).AsFpuRegister<FpuRegister>();
2256 __ StoreFpuToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
2257 }
2258
2259 codegen_->MaybeRecordImplicitNullCheck(instruction);
2260 // TODO: memory barriers?
2261 if (CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1))) {
2262 DCHECK(locations->InAt(1).IsRegister());
2263 GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>();
2264 codegen_->MarkGCCard(obj, src);
2265 }
2266}
2267
2268void LocationsBuilderMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
2269 HandleFieldGet(instruction, instruction->GetFieldInfo());
2270}
2271
2272void InstructionCodeGeneratorMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
2273 HandleFieldGet(instruction, instruction->GetFieldInfo());
2274}
2275
2276void LocationsBuilderMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
2277 HandleFieldSet(instruction, instruction->GetFieldInfo());
2278}
2279
2280void InstructionCodeGeneratorMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
2281 HandleFieldSet(instruction, instruction->GetFieldInfo());
2282}
2283
2284void LocationsBuilderMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
2285 LocationSummary::CallKind call_kind =
Nicolas Geoffray85c7bab2015-09-18 13:40:46 +00002286 instruction->IsExactCheck() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath;
Alexey Frunze4dda3372015-06-01 18:31:49 -07002287 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
2288 locations->SetInAt(0, Location::RequiresRegister());
2289 locations->SetInAt(1, Location::RequiresRegister());
2290 // The output does overlap inputs.
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01002291 // Note that TypeCheckSlowPathMIPS64 uses this register too.
Alexey Frunze4dda3372015-06-01 18:31:49 -07002292 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
2293}
2294
2295void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
2296 LocationSummary* locations = instruction->GetLocations();
2297 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2298 GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
2299 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2300
2301 Label done;
2302
2303 // Return 0 if `obj` is null.
2304 // TODO: Avoid this check if we know `obj` is not null.
2305 __ Move(out, ZERO);
2306 __ Beqzc(obj, &done);
2307
2308 // Compare the class of `obj` with `cls`.
2309 __ LoadFromOffset(kLoadUnsignedWord, out, obj, mirror::Object::ClassOffset().Int32Value());
Nicolas Geoffray85c7bab2015-09-18 13:40:46 +00002310 if (instruction->IsExactCheck()) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07002311 // Classes must be equal for the instanceof to succeed.
2312 __ Xor(out, out, cls);
2313 __ Sltiu(out, out, 1);
2314 } else {
2315 // If the classes are not equal, we go into a slow path.
2316 DCHECK(locations->OnlyCallsOnSlowPath());
2317 SlowPathCodeMIPS64* slow_path =
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01002318 new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002319 codegen_->AddSlowPath(slow_path);
2320 __ Bnec(out, cls, slow_path->GetEntryLabel());
2321 __ LoadConst32(out, 1);
2322 __ Bind(slow_path->GetExitLabel());
2323 }
2324
2325 __ Bind(&done);
2326}
2327
2328void LocationsBuilderMIPS64::VisitIntConstant(HIntConstant* constant) {
2329 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2330 locations->SetOut(Location::ConstantLocation(constant));
2331}
2332
2333void InstructionCodeGeneratorMIPS64::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
2334 // Will be generated at use site.
2335}
2336
2337void LocationsBuilderMIPS64::VisitNullConstant(HNullConstant* constant) {
2338 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2339 locations->SetOut(Location::ConstantLocation(constant));
2340}
2341
2342void InstructionCodeGeneratorMIPS64::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
2343 // Will be generated at use site.
2344}
2345
Calin Juravle175dc732015-08-25 15:42:32 +01002346void LocationsBuilderMIPS64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
2347 // The trampoline uses the same calling convention as dex calling conventions,
2348 // except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
2349 // the method_idx.
2350 HandleInvoke(invoke);
2351}
2352
2353void InstructionCodeGeneratorMIPS64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
2354 codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
2355}
2356
Alexey Frunze4dda3372015-06-01 18:31:49 -07002357void LocationsBuilderMIPS64::HandleInvoke(HInvoke* invoke) {
2358 InvokeDexCallingConventionVisitorMIPS64 calling_convention_visitor;
2359 CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
2360}
2361
2362void LocationsBuilderMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
2363 HandleInvoke(invoke);
2364 // The register T0 is required to be used for the hidden argument in
2365 // art_quick_imt_conflict_trampoline, so add the hidden argument.
2366 invoke->GetLocations()->AddTemp(Location::RegisterLocation(T0));
2367}
2368
2369void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
2370 // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
2371 GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
2372 uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
2373 invoke->GetImtIndex() % mirror::Class::kImtSize, kMips64PointerSize).Uint32Value();
2374 Location receiver = invoke->GetLocations()->InAt(0);
2375 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2376 Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64WordSize);
2377
2378 // Set the hidden argument.
2379 __ LoadConst32(invoke->GetLocations()->GetTemp(1).AsRegister<GpuRegister>(),
2380 invoke->GetDexMethodIndex());
2381
2382 // temp = object->GetClass();
2383 if (receiver.IsStackSlot()) {
2384 __ LoadFromOffset(kLoadUnsignedWord, temp, SP, receiver.GetStackIndex());
2385 __ LoadFromOffset(kLoadUnsignedWord, temp, temp, class_offset);
2386 } else {
2387 __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
2388 }
2389 codegen_->MaybeRecordImplicitNullCheck(invoke);
2390 // temp = temp->GetImtEntryAt(method_offset);
2391 __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
2392 // T9 = temp->GetEntryPoint();
2393 __ LoadFromOffset(kLoadDoubleword, T9, temp, entry_point.Int32Value());
2394 // T9();
2395 __ Jalr(T9);
2396 DCHECK(!codegen_->IsLeafMethod());
2397 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2398}
2399
2400void LocationsBuilderMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
Chris Larsen3039e382015-08-26 07:54:08 -07002401 IntrinsicLocationsBuilderMIPS64 intrinsic(codegen_);
2402 if (intrinsic.TryDispatch(invoke)) {
2403 return;
2404 }
2405
Alexey Frunze4dda3372015-06-01 18:31:49 -07002406 HandleInvoke(invoke);
2407}
2408
2409void LocationsBuilderMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
2410 // When we do not run baseline, explicit clinit checks triggered by static
2411 // invokes must have been pruned by art::PrepareForRegisterAllocation.
2412 DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
2413
Chris Larsen3039e382015-08-26 07:54:08 -07002414 IntrinsicLocationsBuilderMIPS64 intrinsic(codegen_);
2415 if (intrinsic.TryDispatch(invoke)) {
2416 return;
2417 }
2418
Alexey Frunze4dda3372015-06-01 18:31:49 -07002419 HandleInvoke(invoke);
2420
2421 // While SetupBlockedRegisters() blocks registers S2-S8 due to their
2422 // clobbering somewhere else, reduce further register pressure by avoiding
2423 // allocation of a register for the current method pointer like on x86 baseline.
2424 // TODO: remove this once all the issues with register saving/restoring are
2425 // sorted out.
2426 LocationSummary* locations = invoke->GetLocations();
2427 Location location = locations->InAt(invoke->GetCurrentMethodInputIndex());
2428 if (location.IsUnallocated() && location.GetPolicy() == Location::kRequiresRegister) {
2429 locations->SetInAt(invoke->GetCurrentMethodInputIndex(), Location::NoLocation());
2430 }
2431}
2432
Chris Larsen3039e382015-08-26 07:54:08 -07002433static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorMIPS64* codegen) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07002434 if (invoke->GetLocations()->Intrinsified()) {
Chris Larsen3039e382015-08-26 07:54:08 -07002435 IntrinsicCodeGeneratorMIPS64 intrinsic(codegen);
2436 intrinsic.Dispatch(invoke);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002437 return true;
2438 }
2439 return false;
2440}
2441
2442void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
2443 // All registers are assumed to be correctly set up per the calling convention.
2444
Vladimir Marko58155012015-08-19 12:49:41 +00002445 Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
2446 switch (invoke->GetMethodLoadKind()) {
2447 case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
2448 // temp = thread->string_init_entrypoint
2449 __ LoadFromOffset(kLoadDoubleword,
2450 temp.AsRegister<GpuRegister>(),
2451 TR,
2452 invoke->GetStringInitOffset());
2453 break;
2454 case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
2455 callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
2456 break;
2457 case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
2458 __ LoadConst64(temp.AsRegister<GpuRegister>(), invoke->GetMethodAddress());
2459 break;
2460 case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
2461 // TODO: Implement this type. (Needs literal support.) At the moment, the
2462 // CompilerDriver will not direct the backend to use this type for MIPS.
2463 LOG(FATAL) << "Unsupported!";
2464 UNREACHABLE();
2465 case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
2466 // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
2467 FALLTHROUGH_INTENDED;
2468 case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
2469 Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
2470 GpuRegister reg = temp.AsRegister<GpuRegister>();
2471 GpuRegister method_reg;
2472 if (current_method.IsRegister()) {
2473 method_reg = current_method.AsRegister<GpuRegister>();
2474 } else {
2475 // TODO: use the appropriate DCHECK() here if possible.
2476 // DCHECK(invoke->GetLocations()->Intrinsified());
2477 DCHECK(!current_method.IsValid());
2478 method_reg = reg;
2479 __ Ld(reg, SP, kCurrentMethodStackOffset);
2480 }
Alexey Frunze4dda3372015-06-01 18:31:49 -07002481
Vladimir Marko58155012015-08-19 12:49:41 +00002482 // temp = temp->dex_cache_resolved_methods_;
Vladimir Marko05792b92015-08-03 11:56:49 +01002483 __ LoadFromOffset(kLoadDoubleword,
Vladimir Marko58155012015-08-19 12:49:41 +00002484 reg,
2485 method_reg,
Vladimir Marko05792b92015-08-03 11:56:49 +01002486 ArtMethod::DexCacheResolvedMethodsOffset(kMips64PointerSize).Int32Value());
Vladimir Marko58155012015-08-19 12:49:41 +00002487 // temp = temp[index_in_cache]
2488 uint32_t index_in_cache = invoke->GetTargetMethod().dex_method_index;
2489 __ LoadFromOffset(kLoadDoubleword,
2490 reg,
2491 reg,
2492 CodeGenerator::GetCachePointerOffset(index_in_cache));
2493 break;
Alexey Frunze4dda3372015-06-01 18:31:49 -07002494 }
Alexey Frunze4dda3372015-06-01 18:31:49 -07002495 }
2496
Vladimir Marko58155012015-08-19 12:49:41 +00002497 switch (invoke->GetCodePtrLocation()) {
2498 case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
2499 __ Jalr(&frame_entry_label_, T9);
2500 break;
2501 case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
2502 // LR = invoke->GetDirectCodePtr();
2503 __ LoadConst64(T9, invoke->GetDirectCodePtr());
2504 // LR()
2505 __ Jalr(T9);
2506 break;
2507 case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
2508 // TODO: Implement kCallPCRelative. For the moment, we fall back to kMethodCode.
2509 FALLTHROUGH_INTENDED;
2510 case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
2511 // TODO: Implement kDirectCodeFixup. For the moment, we fall back to kMethodCode.
2512 FALLTHROUGH_INTENDED;
2513 case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
2514 // T9 = callee_method->entry_point_from_quick_compiled_code_;
2515 __ LoadFromOffset(kLoadDoubleword,
2516 T9,
2517 callee_method.AsRegister<GpuRegister>(),
2518 ArtMethod::EntryPointFromQuickCompiledCodeOffset(
2519 kMips64WordSize).Int32Value());
2520 // T9()
2521 __ Jalr(T9);
2522 break;
2523 }
Alexey Frunze4dda3372015-06-01 18:31:49 -07002524 DCHECK(!IsLeafMethod());
2525}
2526
2527void InstructionCodeGeneratorMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
2528 // When we do not run baseline, explicit clinit checks triggered by static
2529 // invokes must have been pruned by art::PrepareForRegisterAllocation.
2530 DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
2531
2532 if (TryGenerateIntrinsicCode(invoke, codegen_)) {
2533 return;
2534 }
2535
2536 LocationSummary* locations = invoke->GetLocations();
2537 codegen_->GenerateStaticOrDirectCall(invoke,
2538 locations->HasTemps()
2539 ? locations->GetTemp(0)
2540 : Location::NoLocation());
2541 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2542}
2543
2544void InstructionCodeGeneratorMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
Chris Larsen3039e382015-08-26 07:54:08 -07002545 if (TryGenerateIntrinsicCode(invoke, codegen_)) {
2546 return;
2547 }
2548
Alexey Frunze4dda3372015-06-01 18:31:49 -07002549 LocationSummary* locations = invoke->GetLocations();
2550 Location receiver = locations->InAt(0);
2551 GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
2552 size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
2553 invoke->GetVTableIndex(), kMips64PointerSize).SizeValue();
2554 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2555 Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64WordSize);
2556
2557 // temp = object->GetClass();
2558 DCHECK(receiver.IsRegister());
2559 __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
2560 codegen_->MaybeRecordImplicitNullCheck(invoke);
2561 // temp = temp->GetMethodAt(method_offset);
2562 __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
2563 // T9 = temp->GetEntryPoint();
2564 __ LoadFromOffset(kLoadDoubleword, T9, temp, entry_point.Int32Value());
2565 // T9();
2566 __ Jalr(T9);
2567 DCHECK(!codegen_->IsLeafMethod());
2568 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2569}
2570
2571void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
2572 LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath
2573 : LocationSummary::kNoCall;
2574 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
2575 locations->SetInAt(0, Location::RequiresRegister());
2576 locations->SetOut(Location::RequiresRegister());
2577}
2578
2579void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) {
2580 LocationSummary* locations = cls->GetLocations();
2581 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2582 GpuRegister current_method = locations->InAt(0).AsRegister<GpuRegister>();
2583 if (cls->IsReferrersClass()) {
2584 DCHECK(!cls->CanCallRuntime());
2585 DCHECK(!cls->MustGenerateClinitCheck());
2586 __ LoadFromOffset(kLoadUnsignedWord, out, current_method,
2587 ArtMethod::DeclaringClassOffset().Int32Value());
2588 } else {
2589 DCHECK(cls->CanCallRuntime());
Vladimir Marko05792b92015-08-03 11:56:49 +01002590 __ LoadFromOffset(kLoadDoubleword, out, current_method,
2591 ArtMethod::DexCacheResolvedTypesOffset(kMips64PointerSize).Int32Value());
Alexey Frunze4dda3372015-06-01 18:31:49 -07002592 __ LoadFromOffset(kLoadUnsignedWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
Vladimir Marko05792b92015-08-03 11:56:49 +01002593 // TODO: We will need a read barrier here.
Alexey Frunze4dda3372015-06-01 18:31:49 -07002594 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
2595 cls,
2596 cls,
2597 cls->GetDexPc(),
2598 cls->MustGenerateClinitCheck());
2599 codegen_->AddSlowPath(slow_path);
2600 __ Beqzc(out, slow_path->GetEntryLabel());
2601 if (cls->MustGenerateClinitCheck()) {
2602 GenerateClassInitializationCheck(slow_path, out);
2603 } else {
2604 __ Bind(slow_path->GetExitLabel());
2605 }
2606 }
2607}
2608
David Brazdilcb1c0552015-08-04 16:22:25 +01002609static int32_t GetExceptionTlsOffset() {
2610 return Thread::ExceptionOffset<kMips64WordSize>().Int32Value();
2611}
2612
Alexey Frunze4dda3372015-06-01 18:31:49 -07002613void LocationsBuilderMIPS64::VisitLoadException(HLoadException* load) {
2614 LocationSummary* locations =
2615 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
2616 locations->SetOut(Location::RequiresRegister());
2617}
2618
2619void InstructionCodeGeneratorMIPS64::VisitLoadException(HLoadException* load) {
2620 GpuRegister out = load->GetLocations()->Out().AsRegister<GpuRegister>();
David Brazdilcb1c0552015-08-04 16:22:25 +01002621 __ LoadFromOffset(kLoadUnsignedWord, out, TR, GetExceptionTlsOffset());
2622}
2623
2624void LocationsBuilderMIPS64::VisitClearException(HClearException* clear) {
2625 new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
2626}
2627
2628void InstructionCodeGeneratorMIPS64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
2629 __ StoreToOffset(kStoreWord, ZERO, TR, GetExceptionTlsOffset());
Alexey Frunze4dda3372015-06-01 18:31:49 -07002630}
2631
2632void LocationsBuilderMIPS64::VisitLoadLocal(HLoadLocal* load) {
2633 load->SetLocations(nullptr);
2634}
2635
2636void InstructionCodeGeneratorMIPS64::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
2637 // Nothing to do, this is driven by the code generator.
2638}
2639
2640void LocationsBuilderMIPS64::VisitLoadString(HLoadString* load) {
2641 LocationSummary* locations =
2642 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
2643 locations->SetInAt(0, Location::RequiresRegister());
2644 locations->SetOut(Location::RequiresRegister());
2645}
2646
2647void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) {
2648 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load);
2649 codegen_->AddSlowPath(slow_path);
2650
2651 LocationSummary* locations = load->GetLocations();
2652 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2653 GpuRegister current_method = locations->InAt(0).AsRegister<GpuRegister>();
2654 __ LoadFromOffset(kLoadUnsignedWord, out, current_method,
2655 ArtMethod::DeclaringClassOffset().Int32Value());
Vladimir Marko05792b92015-08-03 11:56:49 +01002656 __ LoadFromOffset(kLoadDoubleword, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
Alexey Frunze4dda3372015-06-01 18:31:49 -07002657 __ LoadFromOffset(kLoadUnsignedWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
Vladimir Marko05792b92015-08-03 11:56:49 +01002658 // TODO: We will need a read barrier here.
Alexey Frunze4dda3372015-06-01 18:31:49 -07002659 __ Beqzc(out, slow_path->GetEntryLabel());
2660 __ Bind(slow_path->GetExitLabel());
2661}
2662
2663void LocationsBuilderMIPS64::VisitLocal(HLocal* local) {
2664 local->SetLocations(nullptr);
2665}
2666
2667void InstructionCodeGeneratorMIPS64::VisitLocal(HLocal* local) {
2668 DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
2669}
2670
2671void LocationsBuilderMIPS64::VisitLongConstant(HLongConstant* constant) {
2672 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2673 locations->SetOut(Location::ConstantLocation(constant));
2674}
2675
2676void InstructionCodeGeneratorMIPS64::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
2677 // Will be generated at use site.
2678}
2679
2680void LocationsBuilderMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
2681 LocationSummary* locations =
2682 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2683 InvokeRuntimeCallingConvention calling_convention;
2684 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2685}
2686
2687void InstructionCodeGeneratorMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
2688 codegen_->InvokeRuntime(instruction->IsEnter()
2689 ? QUICK_ENTRY_POINT(pLockObject)
2690 : QUICK_ENTRY_POINT(pUnlockObject),
2691 instruction,
2692 instruction->GetDexPc(),
2693 nullptr);
2694 CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
2695}
2696
2697void LocationsBuilderMIPS64::VisitMul(HMul* mul) {
2698 LocationSummary* locations =
2699 new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
2700 switch (mul->GetResultType()) {
2701 case Primitive::kPrimInt:
2702 case Primitive::kPrimLong:
2703 locations->SetInAt(0, Location::RequiresRegister());
2704 locations->SetInAt(1, Location::RequiresRegister());
2705 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2706 break;
2707
2708 case Primitive::kPrimFloat:
2709 case Primitive::kPrimDouble:
2710 locations->SetInAt(0, Location::RequiresFpuRegister());
2711 locations->SetInAt(1, Location::RequiresFpuRegister());
2712 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2713 break;
2714
2715 default:
2716 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2717 }
2718}
2719
2720void InstructionCodeGeneratorMIPS64::VisitMul(HMul* instruction) {
2721 Primitive::Type type = instruction->GetType();
2722 LocationSummary* locations = instruction->GetLocations();
2723
2724 switch (type) {
2725 case Primitive::kPrimInt:
2726 case Primitive::kPrimLong: {
2727 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2728 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
2729 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
2730 if (type == Primitive::kPrimInt)
2731 __ MulR6(dst, lhs, rhs);
2732 else
2733 __ Dmul(dst, lhs, rhs);
2734 break;
2735 }
2736 case Primitive::kPrimFloat:
2737 case Primitive::kPrimDouble: {
2738 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
2739 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
2740 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
2741 if (type == Primitive::kPrimFloat)
2742 __ MulS(dst, lhs, rhs);
2743 else
2744 __ MulD(dst, lhs, rhs);
2745 break;
2746 }
2747 default:
2748 LOG(FATAL) << "Unexpected mul type " << type;
2749 }
2750}
2751
2752void LocationsBuilderMIPS64::VisitNeg(HNeg* neg) {
2753 LocationSummary* locations =
2754 new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
2755 switch (neg->GetResultType()) {
2756 case Primitive::kPrimInt:
2757 case Primitive::kPrimLong:
2758 locations->SetInAt(0, Location::RequiresRegister());
2759 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2760 break;
2761
2762 case Primitive::kPrimFloat:
2763 case Primitive::kPrimDouble:
2764 locations->SetInAt(0, Location::RequiresFpuRegister());
2765 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2766 break;
2767
2768 default:
2769 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
2770 }
2771}
2772
2773void InstructionCodeGeneratorMIPS64::VisitNeg(HNeg* instruction) {
2774 Primitive::Type type = instruction->GetType();
2775 LocationSummary* locations = instruction->GetLocations();
2776
2777 switch (type) {
2778 case Primitive::kPrimInt:
2779 case Primitive::kPrimLong: {
2780 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2781 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
2782 if (type == Primitive::kPrimInt)
2783 __ Subu(dst, ZERO, src);
2784 else
2785 __ Dsubu(dst, ZERO, src);
2786 break;
2787 }
2788 case Primitive::kPrimFloat:
2789 case Primitive::kPrimDouble: {
2790 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
2791 FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
2792 if (type == Primitive::kPrimFloat)
2793 __ NegS(dst, src);
2794 else
2795 __ NegD(dst, src);
2796 break;
2797 }
2798 default:
2799 LOG(FATAL) << "Unexpected neg type " << type;
2800 }
2801}
2802
2803void LocationsBuilderMIPS64::VisitNewArray(HNewArray* instruction) {
2804 LocationSummary* locations =
2805 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2806 InvokeRuntimeCallingConvention calling_convention;
2807 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2808 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
2809 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2810 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
2811}
2812
2813void InstructionCodeGeneratorMIPS64::VisitNewArray(HNewArray* instruction) {
2814 LocationSummary* locations = instruction->GetLocations();
2815 // Move an uint16_t value to a register.
2816 __ LoadConst32(locations->GetTemp(0).AsRegister<GpuRegister>(), instruction->GetTypeIndex());
Calin Juravle175dc732015-08-25 15:42:32 +01002817 codegen_->InvokeRuntime(instruction->GetEntrypoint(),
2818 instruction,
2819 instruction->GetDexPc(),
2820 nullptr);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002821 CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
2822}
2823
2824void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) {
2825 LocationSummary* locations =
2826 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2827 InvokeRuntimeCallingConvention calling_convention;
2828 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2829 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2830 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
2831}
2832
2833void InstructionCodeGeneratorMIPS64::VisitNewInstance(HNewInstance* instruction) {
2834 LocationSummary* locations = instruction->GetLocations();
2835 // Move an uint16_t value to a register.
2836 __ LoadConst32(locations->GetTemp(0).AsRegister<GpuRegister>(), instruction->GetTypeIndex());
Calin Juravle175dc732015-08-25 15:42:32 +01002837 codegen_->InvokeRuntime(instruction->GetEntrypoint(),
2838 instruction,
2839 instruction->GetDexPc(),
2840 nullptr);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002841 CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
2842}
2843
2844void LocationsBuilderMIPS64::VisitNot(HNot* instruction) {
2845 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2846 locations->SetInAt(0, Location::RequiresRegister());
2847 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2848}
2849
2850void InstructionCodeGeneratorMIPS64::VisitNot(HNot* instruction) {
2851 Primitive::Type type = instruction->GetType();
2852 LocationSummary* locations = instruction->GetLocations();
2853
2854 switch (type) {
2855 case Primitive::kPrimInt:
2856 case Primitive::kPrimLong: {
2857 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2858 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
2859 __ Nor(dst, src, ZERO);
2860 break;
2861 }
2862
2863 default:
2864 LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
2865 }
2866}
2867
2868void LocationsBuilderMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
2869 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2870 locations->SetInAt(0, Location::RequiresRegister());
2871 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2872}
2873
2874void InstructionCodeGeneratorMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
2875 LocationSummary* locations = instruction->GetLocations();
2876 __ Xori(locations->Out().AsRegister<GpuRegister>(),
2877 locations->InAt(0).AsRegister<GpuRegister>(),
2878 1);
2879}
2880
2881void LocationsBuilderMIPS64::VisitNullCheck(HNullCheck* instruction) {
David Brazdil77a48ae2015-09-15 12:34:04 +00002882 LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
2883 ? LocationSummary::kCallOnSlowPath
2884 : LocationSummary::kNoCall;
2885 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002886 locations->SetInAt(0, Location::RequiresRegister());
2887 if (instruction->HasUses()) {
2888 locations->SetOut(Location::SameAsFirstInput());
2889 }
2890}
2891
2892void InstructionCodeGeneratorMIPS64::GenerateImplicitNullCheck(HNullCheck* instruction) {
2893 if (codegen_->CanMoveNullCheckToUser(instruction)) {
2894 return;
2895 }
2896 Location obj = instruction->GetLocations()->InAt(0);
2897
2898 __ Lw(ZERO, obj.AsRegister<GpuRegister>(), 0);
2899 codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
2900}
2901
2902void InstructionCodeGeneratorMIPS64::GenerateExplicitNullCheck(HNullCheck* instruction) {
2903 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathMIPS64(instruction);
2904 codegen_->AddSlowPath(slow_path);
2905
2906 Location obj = instruction->GetLocations()->InAt(0);
2907
2908 __ Beqzc(obj.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
2909}
2910
2911void InstructionCodeGeneratorMIPS64::VisitNullCheck(HNullCheck* instruction) {
David Brazdil77a48ae2015-09-15 12:34:04 +00002912 if (codegen_->IsImplicitNullCheckAllowed(instruction)) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07002913 GenerateImplicitNullCheck(instruction);
2914 } else {
2915 GenerateExplicitNullCheck(instruction);
2916 }
2917}
2918
2919void LocationsBuilderMIPS64::VisitOr(HOr* instruction) {
2920 HandleBinaryOp(instruction);
2921}
2922
2923void InstructionCodeGeneratorMIPS64::VisitOr(HOr* instruction) {
2924 HandleBinaryOp(instruction);
2925}
2926
2927void LocationsBuilderMIPS64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
2928 LOG(FATAL) << "Unreachable";
2929}
2930
2931void InstructionCodeGeneratorMIPS64::VisitParallelMove(HParallelMove* instruction) {
2932 codegen_->GetMoveResolver()->EmitNativeCode(instruction);
2933}
2934
2935void LocationsBuilderMIPS64::VisitParameterValue(HParameterValue* instruction) {
2936 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2937 Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
2938 if (location.IsStackSlot()) {
2939 location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2940 } else if (location.IsDoubleStackSlot()) {
2941 location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2942 }
2943 locations->SetOut(location);
2944}
2945
2946void InstructionCodeGeneratorMIPS64::VisitParameterValue(HParameterValue* instruction
2947 ATTRIBUTE_UNUSED) {
2948 // Nothing to do, the parameter is already at its location.
2949}
2950
2951void LocationsBuilderMIPS64::VisitCurrentMethod(HCurrentMethod* instruction) {
2952 LocationSummary* locations =
2953 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2954 locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
2955}
2956
2957void InstructionCodeGeneratorMIPS64::VisitCurrentMethod(HCurrentMethod* instruction
2958 ATTRIBUTE_UNUSED) {
2959 // Nothing to do, the method is already at its location.
2960}
2961
2962void LocationsBuilderMIPS64::VisitPhi(HPhi* instruction) {
2963 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2964 for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
2965 locations->SetInAt(i, Location::Any());
2966 }
2967 locations->SetOut(Location::Any());
2968}
2969
2970void InstructionCodeGeneratorMIPS64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
2971 LOG(FATAL) << "Unreachable";
2972}
2973
2974void LocationsBuilderMIPS64::VisitRem(HRem* rem) {
2975 Primitive::Type type = rem->GetResultType();
2976 LocationSummary::CallKind call_kind =
2977 Primitive::IsFloatingPointType(type) ? LocationSummary::kCall : LocationSummary::kNoCall;
2978 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
2979
2980 switch (type) {
2981 case Primitive::kPrimInt:
2982 case Primitive::kPrimLong:
2983 locations->SetInAt(0, Location::RequiresRegister());
2984 locations->SetInAt(1, Location::RequiresRegister());
2985 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2986 break;
2987
2988 case Primitive::kPrimFloat:
2989 case Primitive::kPrimDouble: {
2990 InvokeRuntimeCallingConvention calling_convention;
2991 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
2992 locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
2993 locations->SetOut(calling_convention.GetReturnLocation(type));
2994 break;
2995 }
2996
2997 default:
2998 LOG(FATAL) << "Unexpected rem type " << type;
2999 }
3000}
3001
3002void InstructionCodeGeneratorMIPS64::VisitRem(HRem* instruction) {
3003 Primitive::Type type = instruction->GetType();
3004 LocationSummary* locations = instruction->GetLocations();
3005
3006 switch (type) {
3007 case Primitive::kPrimInt:
3008 case Primitive::kPrimLong: {
3009 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
3010 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
3011 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
3012 if (type == Primitive::kPrimInt)
3013 __ ModR6(dst, lhs, rhs);
3014 else
3015 __ Dmod(dst, lhs, rhs);
3016 break;
3017 }
3018
3019 case Primitive::kPrimFloat:
3020 case Primitive::kPrimDouble: {
3021 int32_t entry_offset = (type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pFmodf)
3022 : QUICK_ENTRY_POINT(pFmod);
3023 codegen_->InvokeRuntime(entry_offset, instruction, instruction->GetDexPc(), nullptr);
3024 break;
3025 }
3026 default:
3027 LOG(FATAL) << "Unexpected rem type " << type;
3028 }
3029}
3030
3031void LocationsBuilderMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
3032 memory_barrier->SetLocations(nullptr);
3033}
3034
3035void InstructionCodeGeneratorMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
3036 GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
3037}
3038
3039void LocationsBuilderMIPS64::VisitReturn(HReturn* ret) {
3040 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret);
3041 Primitive::Type return_type = ret->InputAt(0)->GetType();
3042 locations->SetInAt(0, Mips64ReturnLocation(return_type));
3043}
3044
3045void InstructionCodeGeneratorMIPS64::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
3046 codegen_->GenerateFrameExit();
3047}
3048
3049void LocationsBuilderMIPS64::VisitReturnVoid(HReturnVoid* ret) {
3050 ret->SetLocations(nullptr);
3051}
3052
3053void InstructionCodeGeneratorMIPS64::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
3054 codegen_->GenerateFrameExit();
3055}
3056
3057void LocationsBuilderMIPS64::VisitShl(HShl* shl) {
3058 HandleShift(shl);
3059}
3060
3061void InstructionCodeGeneratorMIPS64::VisitShl(HShl* shl) {
3062 HandleShift(shl);
3063}
3064
3065void LocationsBuilderMIPS64::VisitShr(HShr* shr) {
3066 HandleShift(shr);
3067}
3068
3069void InstructionCodeGeneratorMIPS64::VisitShr(HShr* shr) {
3070 HandleShift(shr);
3071}
3072
3073void LocationsBuilderMIPS64::VisitStoreLocal(HStoreLocal* store) {
3074 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
3075 Primitive::Type field_type = store->InputAt(1)->GetType();
3076 switch (field_type) {
3077 case Primitive::kPrimNot:
3078 case Primitive::kPrimBoolean:
3079 case Primitive::kPrimByte:
3080 case Primitive::kPrimChar:
3081 case Primitive::kPrimShort:
3082 case Primitive::kPrimInt:
3083 case Primitive::kPrimFloat:
3084 locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
3085 break;
3086
3087 case Primitive::kPrimLong:
3088 case Primitive::kPrimDouble:
3089 locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
3090 break;
3091
3092 default:
3093 LOG(FATAL) << "Unimplemented local type " << field_type;
3094 }
3095}
3096
3097void InstructionCodeGeneratorMIPS64::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
3098}
3099
3100void LocationsBuilderMIPS64::VisitSub(HSub* instruction) {
3101 HandleBinaryOp(instruction);
3102}
3103
3104void InstructionCodeGeneratorMIPS64::VisitSub(HSub* instruction) {
3105 HandleBinaryOp(instruction);
3106}
3107
3108void LocationsBuilderMIPS64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3109 HandleFieldGet(instruction, instruction->GetFieldInfo());
3110}
3111
3112void InstructionCodeGeneratorMIPS64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3113 HandleFieldGet(instruction, instruction->GetFieldInfo());
3114}
3115
3116void LocationsBuilderMIPS64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3117 HandleFieldSet(instruction, instruction->GetFieldInfo());
3118}
3119
3120void InstructionCodeGeneratorMIPS64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3121 HandleFieldSet(instruction, instruction->GetFieldInfo());
3122}
3123
3124void LocationsBuilderMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
3125 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
3126}
3127
3128void InstructionCodeGeneratorMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
3129 HBasicBlock* block = instruction->GetBlock();
3130 if (block->GetLoopInformation() != nullptr) {
3131 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
3132 // The back edge will generate the suspend check.
3133 return;
3134 }
3135 if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
3136 // The goto will generate the suspend check.
3137 return;
3138 }
3139 GenerateSuspendCheck(instruction, nullptr);
3140}
3141
3142void LocationsBuilderMIPS64::VisitTemporary(HTemporary* temp) {
3143 temp->SetLocations(nullptr);
3144}
3145
3146void InstructionCodeGeneratorMIPS64::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
3147 // Nothing to do, this is driven by the code generator.
3148}
3149
3150void LocationsBuilderMIPS64::VisitThrow(HThrow* instruction) {
3151 LocationSummary* locations =
3152 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3153 InvokeRuntimeCallingConvention calling_convention;
3154 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3155}
3156
3157void InstructionCodeGeneratorMIPS64::VisitThrow(HThrow* instruction) {
3158 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pDeliverException),
3159 instruction,
3160 instruction->GetDexPc(),
3161 nullptr);
3162 CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
3163}
3164
3165void LocationsBuilderMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
3166 Primitive::Type input_type = conversion->GetInputType();
3167 Primitive::Type result_type = conversion->GetResultType();
3168 DCHECK_NE(input_type, result_type);
3169
3170 if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
3171 (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
3172 LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
3173 }
3174
3175 LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
3176 if ((Primitive::IsFloatingPointType(result_type) && input_type == Primitive::kPrimLong) ||
3177 (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type))) {
3178 call_kind = LocationSummary::kCall;
3179 }
3180
3181 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
3182
3183 if (call_kind == LocationSummary::kNoCall) {
3184 if (Primitive::IsFloatingPointType(input_type)) {
3185 locations->SetInAt(0, Location::RequiresFpuRegister());
3186 } else {
3187 locations->SetInAt(0, Location::RequiresRegister());
3188 }
3189
3190 if (Primitive::IsFloatingPointType(result_type)) {
3191 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3192 } else {
3193 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3194 }
3195 } else {
3196 InvokeRuntimeCallingConvention calling_convention;
3197
3198 if (Primitive::IsFloatingPointType(input_type)) {
3199 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
3200 } else {
3201 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3202 }
3203
3204 locations->SetOut(calling_convention.GetReturnLocation(result_type));
3205 }
3206}
3207
3208void InstructionCodeGeneratorMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
3209 LocationSummary* locations = conversion->GetLocations();
3210 Primitive::Type result_type = conversion->GetResultType();
3211 Primitive::Type input_type = conversion->GetInputType();
3212
3213 DCHECK_NE(input_type, result_type);
3214
3215 if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) {
3216 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
3217 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
3218
3219 switch (result_type) {
3220 case Primitive::kPrimChar:
3221 __ Andi(dst, src, 0xFFFF);
3222 break;
3223 case Primitive::kPrimByte:
3224 // long is never converted into types narrower than int directly,
3225 // so SEB and SEH can be used without ever causing unpredictable results
3226 // on 64-bit inputs
3227 DCHECK(input_type != Primitive::kPrimLong);
3228 __ Seb(dst, src);
3229 break;
3230 case Primitive::kPrimShort:
3231 // long is never converted into types narrower than int directly,
3232 // so SEB and SEH can be used without ever causing unpredictable results
3233 // on 64-bit inputs
3234 DCHECK(input_type != Primitive::kPrimLong);
3235 __ Seh(dst, src);
3236 break;
3237 case Primitive::kPrimInt:
3238 case Primitive::kPrimLong:
3239 // Sign-extend 32-bit int into bits 32 through 63 for
3240 // int-to-long and long-to-int conversions
3241 __ Sll(dst, src, 0);
3242 break;
3243
3244 default:
3245 LOG(FATAL) << "Unexpected type conversion from " << input_type
3246 << " to " << result_type;
3247 }
3248 } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) {
3249 if (input_type != Primitive::kPrimLong) {
3250 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
3251 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
3252 __ Mtc1(src, FTMP);
3253 if (result_type == Primitive::kPrimFloat) {
3254 __ Cvtsw(dst, FTMP);
3255 } else {
3256 __ Cvtdw(dst, FTMP);
3257 }
3258 } else {
3259 int32_t entry_offset = (result_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pL2f)
3260 : QUICK_ENTRY_POINT(pL2d);
3261 codegen_->InvokeRuntime(entry_offset,
3262 conversion,
3263 conversion->GetDexPc(),
3264 nullptr);
3265 }
3266 } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) {
3267 CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
3268 int32_t entry_offset;
3269 if (result_type != Primitive::kPrimLong) {
3270 entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2iz)
3271 : QUICK_ENTRY_POINT(pD2iz);
3272 } else {
3273 entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2l)
3274 : QUICK_ENTRY_POINT(pD2l);
3275 }
3276 codegen_->InvokeRuntime(entry_offset,
3277 conversion,
3278 conversion->GetDexPc(),
3279 nullptr);
3280 } else if (Primitive::IsFloatingPointType(result_type) &&
3281 Primitive::IsFloatingPointType(input_type)) {
3282 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
3283 FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
3284 if (result_type == Primitive::kPrimFloat) {
3285 __ Cvtsd(dst, src);
3286 } else {
3287 __ Cvtds(dst, src);
3288 }
3289 } else {
3290 LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
3291 << " to " << result_type;
3292 }
3293}
3294
3295void LocationsBuilderMIPS64::VisitUShr(HUShr* ushr) {
3296 HandleShift(ushr);
3297}
3298
3299void InstructionCodeGeneratorMIPS64::VisitUShr(HUShr* ushr) {
3300 HandleShift(ushr);
3301}
3302
3303void LocationsBuilderMIPS64::VisitXor(HXor* instruction) {
3304 HandleBinaryOp(instruction);
3305}
3306
3307void InstructionCodeGeneratorMIPS64::VisitXor(HXor* instruction) {
3308 HandleBinaryOp(instruction);
3309}
3310
3311void LocationsBuilderMIPS64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
3312 // Nothing to do, this should be removed during prepare for register allocator.
3313 LOG(FATAL) << "Unreachable";
3314}
3315
3316void InstructionCodeGeneratorMIPS64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
3317 // Nothing to do, this should be removed during prepare for register allocator.
3318 LOG(FATAL) << "Unreachable";
3319}
3320
3321void LocationsBuilderMIPS64::VisitEqual(HEqual* comp) {
3322 VisitCondition(comp);
3323}
3324
3325void InstructionCodeGeneratorMIPS64::VisitEqual(HEqual* comp) {
3326 VisitCondition(comp);
3327}
3328
3329void LocationsBuilderMIPS64::VisitNotEqual(HNotEqual* comp) {
3330 VisitCondition(comp);
3331}
3332
3333void InstructionCodeGeneratorMIPS64::VisitNotEqual(HNotEqual* comp) {
3334 VisitCondition(comp);
3335}
3336
3337void LocationsBuilderMIPS64::VisitLessThan(HLessThan* comp) {
3338 VisitCondition(comp);
3339}
3340
3341void InstructionCodeGeneratorMIPS64::VisitLessThan(HLessThan* comp) {
3342 VisitCondition(comp);
3343}
3344
3345void LocationsBuilderMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
3346 VisitCondition(comp);
3347}
3348
3349void InstructionCodeGeneratorMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
3350 VisitCondition(comp);
3351}
3352
3353void LocationsBuilderMIPS64::VisitGreaterThan(HGreaterThan* comp) {
3354 VisitCondition(comp);
3355}
3356
3357void InstructionCodeGeneratorMIPS64::VisitGreaterThan(HGreaterThan* comp) {
3358 VisitCondition(comp);
3359}
3360
3361void LocationsBuilderMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
3362 VisitCondition(comp);
3363}
3364
3365void InstructionCodeGeneratorMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
3366 VisitCondition(comp);
3367}
3368
Nicolas Geoffray2e7cd752015-07-10 11:38:52 +01003369void LocationsBuilderMIPS64::VisitFakeString(HFakeString* instruction) {
3370 DCHECK(codegen_->IsBaseline());
3371 LocationSummary* locations =
3372 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3373 locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
3374}
3375
3376void InstructionCodeGeneratorMIPS64::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
3377 DCHECK(codegen_->IsBaseline());
3378 // Will be generated at use site.
3379}
3380
Mark Mendellfe57faa2015-09-18 09:26:15 -04003381// Simple implementation of packed switch - generate cascaded compare/jumps.
3382void LocationsBuilderMIPS64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
3383 LocationSummary* locations =
3384 new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
3385 locations->SetInAt(0, Location::RequiresRegister());
3386}
3387
3388void InstructionCodeGeneratorMIPS64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
3389 int32_t lower_bound = switch_instr->GetStartValue();
3390 int32_t num_entries = switch_instr->GetNumEntries();
3391 LocationSummary* locations = switch_instr->GetLocations();
3392 GpuRegister value_reg = locations->InAt(0).AsRegister<GpuRegister>();
3393 HBasicBlock* default_block = switch_instr->GetDefaultBlock();
3394
3395 // Create a series of compare/jumps.
3396 const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
3397 for (int32_t i = 0; i < num_entries; i++) {
3398 int32_t case_value = lower_bound + i;
3399 Label* succ = codegen_->GetLabelOf(successors.at(i));
3400 if (case_value == 0) {
3401 __ Beqzc(value_reg, succ);
3402 } else {
3403 __ LoadConst32(TMP, case_value);
3404 __ Beqc(value_reg, TMP, succ);
3405 }
3406 }
3407
3408 // And the default for any other value.
3409 if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) {
3410 __ B(codegen_->GetLabelOf(default_block));
3411 }
3412}
3413
Alexey Frunze4dda3372015-06-01 18:31:49 -07003414} // namespace mips64
3415} // namespace art