blob: e5761eb9f121dc47d3c2ad2616a29a4c7728b7c8 [file] [log] [blame]
Alexey Frunze4dda3372015-06-01 18:31:49 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator_mips64.h"
18
19#include "entrypoints/quick/quick_entrypoints.h"
20#include "entrypoints/quick/quick_entrypoints_enum.h"
21#include "gc/accounting/card_table.h"
22#include "intrinsics.h"
23#include "art_method.h"
24#include "mirror/array-inl.h"
25#include "mirror/class-inl.h"
26#include "offsets.h"
27#include "thread.h"
28#include "utils/mips64/assembler_mips64.h"
29#include "utils/assembler.h"
30#include "utils/stack_checks.h"
31
32namespace art {
33namespace mips64 {
34
35static constexpr int kCurrentMethodStackOffset = 0;
36static constexpr GpuRegister kMethodRegisterArgument = A0;
37
38// We need extra temporary/scratch registers (in addition to AT) in some cases.
39static constexpr GpuRegister TMP = T8;
40static constexpr FpuRegister FTMP = F8;
41
42// ART Thread Register.
43static constexpr GpuRegister TR = S1;
44
45Location Mips64ReturnLocation(Primitive::Type return_type) {
46 switch (return_type) {
47 case Primitive::kPrimBoolean:
48 case Primitive::kPrimByte:
49 case Primitive::kPrimChar:
50 case Primitive::kPrimShort:
51 case Primitive::kPrimInt:
52 case Primitive::kPrimNot:
53 case Primitive::kPrimLong:
54 return Location::RegisterLocation(V0);
55
56 case Primitive::kPrimFloat:
57 case Primitive::kPrimDouble:
58 return Location::FpuRegisterLocation(F0);
59
60 case Primitive::kPrimVoid:
61 return Location();
62 }
63 UNREACHABLE();
64}
65
66Location InvokeDexCallingConventionVisitorMIPS64::GetReturnLocation(Primitive::Type type) const {
67 return Mips64ReturnLocation(type);
68}
69
70Location InvokeDexCallingConventionVisitorMIPS64::GetMethodLocation() const {
71 return Location::RegisterLocation(kMethodRegisterArgument);
72}
73
74Location InvokeDexCallingConventionVisitorMIPS64::GetNextLocation(Primitive::Type type) {
75 Location next_location;
76 if (type == Primitive::kPrimVoid) {
77 LOG(FATAL) << "Unexpected parameter type " << type;
78 }
79
80 if (Primitive::IsFloatingPointType(type) &&
81 (float_index_ < calling_convention.GetNumberOfFpuRegisters())) {
82 next_location = Location::FpuRegisterLocation(
83 calling_convention.GetFpuRegisterAt(float_index_++));
84 gp_index_++;
85 } else if (!Primitive::IsFloatingPointType(type) &&
86 (gp_index_ < calling_convention.GetNumberOfRegisters())) {
87 next_location = Location::RegisterLocation(calling_convention.GetRegisterAt(gp_index_++));
88 float_index_++;
89 } else {
90 size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
91 next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
92 : Location::StackSlot(stack_offset);
93 }
94
95 // Space on the stack is reserved for all arguments.
96 stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
97
98 // TODO: review
99
100 // TODO: shouldn't we use a whole machine word per argument on the stack?
101 // Implicit 4-byte method pointer (and such) will cause misalignment.
102
103 return next_location;
104}
105
106Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type type) {
107 return Mips64ReturnLocation(type);
108}
109
110#define __ down_cast<CodeGeneratorMIPS64*>(codegen)->GetAssembler()->
111#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, x).Int32Value()
112
113class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
114 public:
115 BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction,
116 Location index_location,
117 Location length_location)
118 : instruction_(instruction),
119 index_location_(index_location),
120 length_location_(length_location) {}
121
122 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
123 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
124 __ Bind(GetEntryLabel());
125 // We're moving two locations to locations that could overlap, so we need a parallel
126 // move resolver.
127 InvokeRuntimeCallingConvention calling_convention;
128 codegen->EmitParallelMoves(index_location_,
129 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
130 Primitive::kPrimInt,
131 length_location_,
132 Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
133 Primitive::kPrimInt);
134 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
135 instruction_,
136 instruction_->GetDexPc(),
137 this);
138 CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
139 }
140
Alexandre Rames8158f282015-08-07 10:26:17 +0100141 bool IsFatal() const OVERRIDE { return true; }
142
Roland Levillain46648892015-06-19 16:07:18 +0100143 const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS64"; }
144
Alexey Frunze4dda3372015-06-01 18:31:49 -0700145 private:
146 HBoundsCheck* const instruction_;
147 const Location index_location_;
148 const Location length_location_;
149
150 DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS64);
151};
152
153class DivZeroCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
154 public:
155 explicit DivZeroCheckSlowPathMIPS64(HDivZeroCheck* instruction) : instruction_(instruction) {}
156
157 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
158 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
159 __ Bind(GetEntryLabel());
160 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
161 instruction_,
162 instruction_->GetDexPc(),
163 this);
164 CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
165 }
166
Alexandre Rames8158f282015-08-07 10:26:17 +0100167 bool IsFatal() const OVERRIDE { return true; }
168
Roland Levillain46648892015-06-19 16:07:18 +0100169 const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS64"; }
170
Alexey Frunze4dda3372015-06-01 18:31:49 -0700171 private:
172 HDivZeroCheck* const instruction_;
173 DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS64);
174};
175
176class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
177 public:
178 LoadClassSlowPathMIPS64(HLoadClass* cls,
179 HInstruction* at,
180 uint32_t dex_pc,
181 bool do_clinit)
182 : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
183 DCHECK(at->IsLoadClass() || at->IsClinitCheck());
184 }
185
186 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
187 LocationSummary* locations = at_->GetLocations();
188 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
189
190 __ Bind(GetEntryLabel());
191 SaveLiveRegisters(codegen, locations);
192
193 InvokeRuntimeCallingConvention calling_convention;
194 __ LoadConst32(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
195 int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
196 : QUICK_ENTRY_POINT(pInitializeType);
197 mips64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
198 if (do_clinit_) {
199 CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
200 } else {
201 CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
202 }
203
204 // Move the class to the desired location.
205 Location out = locations->Out();
206 if (out.IsValid()) {
207 DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
208 Primitive::Type type = at_->GetType();
209 mips64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
210 }
211
212 RestoreLiveRegisters(codegen, locations);
213 __ B(GetExitLabel());
214 }
215
Roland Levillain46648892015-06-19 16:07:18 +0100216 const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathMIPS64"; }
217
Alexey Frunze4dda3372015-06-01 18:31:49 -0700218 private:
219 // The class this slow path will load.
220 HLoadClass* const cls_;
221
222 // The instruction where this slow path is happening.
223 // (Might be the load class or an initialization check).
224 HInstruction* const at_;
225
226 // The dex PC of `at_`.
227 const uint32_t dex_pc_;
228
229 // Whether to initialize the class.
230 const bool do_clinit_;
231
232 DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS64);
233};
234
235class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
236 public:
237 explicit LoadStringSlowPathMIPS64(HLoadString* instruction) : instruction_(instruction) {}
238
239 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
240 LocationSummary* locations = instruction_->GetLocations();
241 DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
242 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
243
244 __ Bind(GetEntryLabel());
245 SaveLiveRegisters(codegen, locations);
246
247 InvokeRuntimeCallingConvention calling_convention;
248 __ LoadConst32(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
249 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
250 instruction_,
251 instruction_->GetDexPc(),
252 this);
253 CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
254 Primitive::Type type = instruction_->GetType();
255 mips64_codegen->MoveLocation(locations->Out(),
256 calling_convention.GetReturnLocation(type),
257 type);
258
259 RestoreLiveRegisters(codegen, locations);
260 __ B(GetExitLabel());
261 }
262
Roland Levillain46648892015-06-19 16:07:18 +0100263 const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS64"; }
264
Alexey Frunze4dda3372015-06-01 18:31:49 -0700265 private:
266 HLoadString* const instruction_;
267
268 DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS64);
269};
270
271class NullCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
272 public:
273 explicit NullCheckSlowPathMIPS64(HNullCheck* instr) : instruction_(instr) {}
274
275 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
276 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
277 __ Bind(GetEntryLabel());
278 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
279 instruction_,
280 instruction_->GetDexPc(),
281 this);
282 CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
283 }
284
Alexandre Rames8158f282015-08-07 10:26:17 +0100285 bool IsFatal() const OVERRIDE { return true; }
286
Roland Levillain46648892015-06-19 16:07:18 +0100287 const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS64"; }
288
Alexey Frunze4dda3372015-06-01 18:31:49 -0700289 private:
290 HNullCheck* const instruction_;
291
292 DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS64);
293};
294
295class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
296 public:
297 explicit SuspendCheckSlowPathMIPS64(HSuspendCheck* instruction,
298 HBasicBlock* successor)
299 : instruction_(instruction), successor_(successor) {}
300
301 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
302 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
303 __ Bind(GetEntryLabel());
304 SaveLiveRegisters(codegen, instruction_->GetLocations());
305 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
306 instruction_,
307 instruction_->GetDexPc(),
308 this);
309 CheckEntrypointTypes<kQuickTestSuspend, void, void>();
310 RestoreLiveRegisters(codegen, instruction_->GetLocations());
311 if (successor_ == nullptr) {
312 __ B(GetReturnLabel());
313 } else {
314 __ B(mips64_codegen->GetLabelOf(successor_));
315 }
316 }
317
318 Label* GetReturnLabel() {
319 DCHECK(successor_ == nullptr);
320 return &return_label_;
321 }
322
Roland Levillain46648892015-06-19 16:07:18 +0100323 const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS64"; }
324
Alexey Frunze4dda3372015-06-01 18:31:49 -0700325 private:
326 HSuspendCheck* const instruction_;
327 // If not null, the block to branch to after the suspend check.
328 HBasicBlock* const successor_;
329
330 // If `successor_` is null, the label to branch to after the suspend check.
331 Label return_label_;
332
333 DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathMIPS64);
334};
335
336class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
337 public:
338 TypeCheckSlowPathMIPS64(HInstruction* instruction,
339 Location class_to_check,
340 Location object_class,
341 uint32_t dex_pc)
342 : instruction_(instruction),
343 class_to_check_(class_to_check),
344 object_class_(object_class),
345 dex_pc_(dex_pc) {}
346
347 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
348 LocationSummary* locations = instruction_->GetLocations();
349 DCHECK(instruction_->IsCheckCast()
350 || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
351 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
352
353 __ Bind(GetEntryLabel());
354 SaveLiveRegisters(codegen, locations);
355
356 // We're moving two locations to locations that could overlap, so we need a parallel
357 // move resolver.
358 InvokeRuntimeCallingConvention calling_convention;
359 codegen->EmitParallelMoves(class_to_check_,
360 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
361 Primitive::kPrimNot,
362 object_class_,
363 Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
364 Primitive::kPrimNot);
365
366 if (instruction_->IsInstanceOf()) {
367 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
368 instruction_,
369 dex_pc_,
370 this);
371 Primitive::Type ret_type = instruction_->GetType();
372 Location ret_loc = calling_convention.GetReturnLocation(ret_type);
373 mips64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
374 CheckEntrypointTypes<kQuickInstanceofNonTrivial,
375 uint32_t,
376 const mirror::Class*,
377 const mirror::Class*>();
378 } else {
379 DCHECK(instruction_->IsCheckCast());
380 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc_, this);
381 CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
382 }
383
384 RestoreLiveRegisters(codegen, locations);
385 __ B(GetExitLabel());
386 }
387
Roland Levillain46648892015-06-19 16:07:18 +0100388 const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS64"; }
389
Alexey Frunze4dda3372015-06-01 18:31:49 -0700390 private:
391 HInstruction* const instruction_;
392 const Location class_to_check_;
393 const Location object_class_;
394 uint32_t dex_pc_;
395
396 DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS64);
397};
398
399class DeoptimizationSlowPathMIPS64 : public SlowPathCodeMIPS64 {
400 public:
401 explicit DeoptimizationSlowPathMIPS64(HInstruction* instruction)
402 : instruction_(instruction) {}
403
404 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
405 __ Bind(GetEntryLabel());
406 SaveLiveRegisters(codegen, instruction_->GetLocations());
407 DCHECK(instruction_->IsDeoptimize());
408 HDeoptimize* deoptimize = instruction_->AsDeoptimize();
409 uint32_t dex_pc = deoptimize->GetDexPc();
410 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
411 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
412 }
413
Roland Levillain46648892015-06-19 16:07:18 +0100414 const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS64"; }
415
Alexey Frunze4dda3372015-06-01 18:31:49 -0700416 private:
417 HInstruction* const instruction_;
418 DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS64);
419};
420
421CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
422 const Mips64InstructionSetFeatures& isa_features,
423 const CompilerOptions& compiler_options)
424 : CodeGenerator(graph,
425 kNumberOfGpuRegisters,
426 kNumberOfFpuRegisters,
427 0, // kNumberOfRegisterPairs
428 ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
429 arraysize(kCoreCalleeSaves)),
430 ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
431 arraysize(kFpuCalleeSaves)),
432 compiler_options),
433 block_labels_(graph->GetArena(), 0),
434 location_builder_(graph, this),
435 instruction_visitor_(graph, this),
436 move_resolver_(graph->GetArena(), this),
437 isa_features_(isa_features) {
438 // Save RA (containing the return address) to mimic Quick.
439 AddAllocatedRegister(Location::RegisterLocation(RA));
440}
441
442#undef __
443#define __ down_cast<Mips64Assembler*>(GetAssembler())->
444#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, x).Int32Value()
445
446void CodeGeneratorMIPS64::Finalize(CodeAllocator* allocator) {
447 CodeGenerator::Finalize(allocator);
448}
449
450Mips64Assembler* ParallelMoveResolverMIPS64::GetAssembler() const {
451 return codegen_->GetAssembler();
452}
453
454void ParallelMoveResolverMIPS64::EmitMove(size_t index) {
455 MoveOperands* move = moves_.Get(index);
456 codegen_->MoveLocation(move->GetDestination(), move->GetSource(), move->GetType());
457}
458
459void ParallelMoveResolverMIPS64::EmitSwap(size_t index) {
460 MoveOperands* move = moves_.Get(index);
461 codegen_->SwapLocations(move->GetDestination(), move->GetSource(), move->GetType());
462}
463
464void ParallelMoveResolverMIPS64::RestoreScratch(int reg) {
465 // Pop reg
466 __ Ld(GpuRegister(reg), SP, 0);
467 __ DecreaseFrameSize(kMips64WordSize);
468}
469
470void ParallelMoveResolverMIPS64::SpillScratch(int reg) {
471 // Push reg
472 __ IncreaseFrameSize(kMips64WordSize);
473 __ Sd(GpuRegister(reg), SP, 0);
474}
475
476void ParallelMoveResolverMIPS64::Exchange(int index1, int index2, bool double_slot) {
477 LoadOperandType load_type = double_slot ? kLoadDoubleword : kLoadWord;
478 StoreOperandType store_type = double_slot ? kStoreDoubleword : kStoreWord;
479 // Allocate a scratch register other than TMP, if available.
480 // Else, spill V0 (arbitrary choice) and use it as a scratch register (it will be
481 // automatically unspilled when the scratch scope object is destroyed).
482 ScratchRegisterScope ensure_scratch(this, TMP, V0, codegen_->GetNumberOfCoreRegisters());
483 // If V0 spills onto the stack, SP-relative offsets need to be adjusted.
484 int stack_offset = ensure_scratch.IsSpilled() ? kMips64WordSize : 0;
485 __ LoadFromOffset(load_type,
486 GpuRegister(ensure_scratch.GetRegister()),
487 SP,
488 index1 + stack_offset);
489 __ LoadFromOffset(load_type,
490 TMP,
491 SP,
492 index2 + stack_offset);
493 __ StoreToOffset(store_type,
494 GpuRegister(ensure_scratch.GetRegister()),
495 SP,
496 index2 + stack_offset);
497 __ StoreToOffset(store_type, TMP, SP, index1 + stack_offset);
498}
499
500static dwarf::Reg DWARFReg(GpuRegister reg) {
501 return dwarf::Reg::Mips64Core(static_cast<int>(reg));
502}
503
504// TODO: mapping of floating-point registers to DWARF
505
506void CodeGeneratorMIPS64::GenerateFrameEntry() {
507 __ Bind(&frame_entry_label_);
508
509 bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips64) || !IsLeafMethod();
510
511 if (do_overflow_check) {
512 __ LoadFromOffset(kLoadWord,
513 ZERO,
514 SP,
515 -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips64)));
516 RecordPcInfo(nullptr, 0);
517 }
518
519 // TODO: anything related to T9/GP/GOT/PIC/.so's?
520
521 if (HasEmptyFrame()) {
522 return;
523 }
524
525 // Make sure the frame size isn't unreasonably large. Per the various APIs
526 // it looks like it should always be less than 2GB in size, which allows
527 // us using 32-bit signed offsets from the stack pointer.
528 if (GetFrameSize() > 0x7FFFFFFF)
529 LOG(FATAL) << "Stack frame larger than 2GB";
530
531 // Spill callee-saved registers.
532 // Note that their cumulative size is small and they can be indexed using
533 // 16-bit offsets.
534
535 // TODO: increment/decrement SP in one step instead of two or remove this comment.
536
537 uint32_t ofs = FrameEntrySpillSize();
538 __ IncreaseFrameSize(ofs);
539
540 for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
541 GpuRegister reg = kCoreCalleeSaves[i];
542 if (allocated_registers_.ContainsCoreRegister(reg)) {
543 ofs -= kMips64WordSize;
544 __ Sd(reg, SP, ofs);
545 __ cfi().RelOffset(DWARFReg(reg), ofs);
546 }
547 }
548
549 for (int i = arraysize(kFpuCalleeSaves) - 1; i >= 0; --i) {
550 FpuRegister reg = kFpuCalleeSaves[i];
551 if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
552 ofs -= kMips64WordSize;
553 __ Sdc1(reg, SP, ofs);
554 // TODO: __ cfi().RelOffset(DWARFReg(reg), ofs);
555 }
556 }
557
558 // Allocate the rest of the frame and store the current method pointer
559 // at its end.
560
561 __ IncreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
562
563 static_assert(IsInt<16>(kCurrentMethodStackOffset),
564 "kCurrentMethodStackOffset must fit into int16_t");
565 __ Sd(kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
566}
567
568void CodeGeneratorMIPS64::GenerateFrameExit() {
569 __ cfi().RememberState();
570
571 // TODO: anything related to T9/GP/GOT/PIC/.so's?
572
573 if (!HasEmptyFrame()) {
574 // Deallocate the rest of the frame.
575
576 __ DecreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
577
578 // Restore callee-saved registers.
579 // Note that their cumulative size is small and they can be indexed using
580 // 16-bit offsets.
581
582 // TODO: increment/decrement SP in one step instead of two or remove this comment.
583
584 uint32_t ofs = 0;
585
586 for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
587 FpuRegister reg = kFpuCalleeSaves[i];
588 if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
589 __ Ldc1(reg, SP, ofs);
590 ofs += kMips64WordSize;
591 // TODO: __ cfi().Restore(DWARFReg(reg));
592 }
593 }
594
595 for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
596 GpuRegister reg = kCoreCalleeSaves[i];
597 if (allocated_registers_.ContainsCoreRegister(reg)) {
598 __ Ld(reg, SP, ofs);
599 ofs += kMips64WordSize;
600 __ cfi().Restore(DWARFReg(reg));
601 }
602 }
603
604 DCHECK_EQ(ofs, FrameEntrySpillSize());
605 __ DecreaseFrameSize(ofs);
606 }
607
608 __ Jr(RA);
609
610 __ cfi().RestoreState();
611 __ cfi().DefCFAOffset(GetFrameSize());
612}
613
614void CodeGeneratorMIPS64::Bind(HBasicBlock* block) {
615 __ Bind(GetLabelOf(block));
616}
617
618void CodeGeneratorMIPS64::MoveLocation(Location destination,
619 Location source,
620 Primitive::Type type) {
621 if (source.Equals(destination)) {
622 return;
623 }
624
625 // A valid move can always be inferred from the destination and source
626 // locations. When moving from and to a register, the argument type can be
627 // used to generate 32bit instead of 64bit moves.
628 bool unspecified_type = (type == Primitive::kPrimVoid);
629 DCHECK_EQ(unspecified_type, false);
630
631 if (destination.IsRegister() || destination.IsFpuRegister()) {
632 if (unspecified_type) {
633 HConstant* src_cst = source.IsConstant() ? source.GetConstant() : nullptr;
634 if (source.IsStackSlot() ||
635 (src_cst != nullptr && (src_cst->IsIntConstant()
636 || src_cst->IsFloatConstant()
637 || src_cst->IsNullConstant()))) {
638 // For stack slots and 32bit constants, a 64bit type is appropriate.
639 type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
640 } else {
641 // If the source is a double stack slot or a 64bit constant, a 64bit
642 // type is appropriate. Else the source is a register, and since the
643 // type has not been specified, we chose a 64bit type to force a 64bit
644 // move.
645 type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
646 }
647 }
648 DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(type)) ||
649 (destination.IsRegister() && !Primitive::IsFloatingPointType(type)));
650 if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
651 // Move to GPR/FPR from stack
652 LoadOperandType load_type = source.IsStackSlot() ? kLoadWord : kLoadDoubleword;
653 if (Primitive::IsFloatingPointType(type)) {
654 __ LoadFpuFromOffset(load_type,
655 destination.AsFpuRegister<FpuRegister>(),
656 SP,
657 source.GetStackIndex());
658 } else {
659 // TODO: use load_type = kLoadUnsignedWord when type == Primitive::kPrimNot.
660 __ LoadFromOffset(load_type,
661 destination.AsRegister<GpuRegister>(),
662 SP,
663 source.GetStackIndex());
664 }
665 } else if (source.IsConstant()) {
666 // Move to GPR/FPR from constant
667 GpuRegister gpr = AT;
668 if (!Primitive::IsFloatingPointType(type)) {
669 gpr = destination.AsRegister<GpuRegister>();
670 }
671 if (type == Primitive::kPrimInt || type == Primitive::kPrimFloat) {
672 __ LoadConst32(gpr, GetInt32ValueOf(source.GetConstant()->AsConstant()));
673 } else {
674 __ LoadConst64(gpr, GetInt64ValueOf(source.GetConstant()->AsConstant()));
675 }
676 if (type == Primitive::kPrimFloat) {
677 __ Mtc1(gpr, destination.AsFpuRegister<FpuRegister>());
678 } else if (type == Primitive::kPrimDouble) {
679 __ Dmtc1(gpr, destination.AsFpuRegister<FpuRegister>());
680 }
681 } else {
682 if (destination.IsRegister()) {
683 // Move to GPR from GPR
684 __ Move(destination.AsRegister<GpuRegister>(), source.AsRegister<GpuRegister>());
685 } else {
686 // Move to FPR from FPR
687 if (type == Primitive::kPrimFloat) {
688 __ MovS(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
689 } else {
690 DCHECK_EQ(type, Primitive::kPrimDouble);
691 __ MovD(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
692 }
693 }
694 }
695 } else { // The destination is not a register. It must be a stack slot.
696 DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
697 if (source.IsRegister() || source.IsFpuRegister()) {
698 if (unspecified_type) {
699 if (source.IsRegister()) {
700 type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
701 } else {
702 type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
703 }
704 }
705 DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(type)) &&
706 (source.IsFpuRegister() == Primitive::IsFloatingPointType(type)));
707 // Move to stack from GPR/FPR
708 StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
709 if (source.IsRegister()) {
710 __ StoreToOffset(store_type,
711 source.AsRegister<GpuRegister>(),
712 SP,
713 destination.GetStackIndex());
714 } else {
715 __ StoreFpuToOffset(store_type,
716 source.AsFpuRegister<FpuRegister>(),
717 SP,
718 destination.GetStackIndex());
719 }
720 } else if (source.IsConstant()) {
721 // Move to stack from constant
722 HConstant* src_cst = source.GetConstant();
723 StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
724 if (destination.IsStackSlot()) {
725 __ LoadConst32(TMP, GetInt32ValueOf(src_cst->AsConstant()));
726 } else {
727 __ LoadConst64(TMP, GetInt64ValueOf(src_cst->AsConstant()));
728 }
729 __ StoreToOffset(store_type, TMP, SP, destination.GetStackIndex());
730 } else {
731 DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
732 DCHECK_EQ(source.IsDoubleStackSlot(), destination.IsDoubleStackSlot());
733 // Move to stack from stack
734 if (destination.IsStackSlot()) {
735 __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
736 __ StoreToOffset(kStoreWord, TMP, SP, destination.GetStackIndex());
737 } else {
738 __ LoadFromOffset(kLoadDoubleword, TMP, SP, source.GetStackIndex());
739 __ StoreToOffset(kStoreDoubleword, TMP, SP, destination.GetStackIndex());
740 }
741 }
742 }
743}
744
745void CodeGeneratorMIPS64::SwapLocations(Location loc1,
746 Location loc2,
747 Primitive::Type type ATTRIBUTE_UNUSED) {
748 DCHECK(!loc1.IsConstant());
749 DCHECK(!loc2.IsConstant());
750
751 if (loc1.Equals(loc2)) {
752 return;
753 }
754
755 bool is_slot1 = loc1.IsStackSlot() || loc1.IsDoubleStackSlot();
756 bool is_slot2 = loc2.IsStackSlot() || loc2.IsDoubleStackSlot();
757 bool is_fp_reg1 = loc1.IsFpuRegister();
758 bool is_fp_reg2 = loc2.IsFpuRegister();
759
760 if (loc2.IsRegister() && loc1.IsRegister()) {
761 // Swap 2 GPRs
762 GpuRegister r1 = loc1.AsRegister<GpuRegister>();
763 GpuRegister r2 = loc2.AsRegister<GpuRegister>();
764 __ Move(TMP, r2);
765 __ Move(r2, r1);
766 __ Move(r1, TMP);
767 } else if (is_fp_reg2 && is_fp_reg1) {
768 // Swap 2 FPRs
769 FpuRegister r1 = loc1.AsFpuRegister<FpuRegister>();
770 FpuRegister r2 = loc2.AsFpuRegister<FpuRegister>();
771 // TODO: Can MOV.S/MOV.D be used here to save one instruction?
772 // Need to distinguish float from double, right?
773 __ Dmfc1(TMP, r2);
774 __ Dmfc1(AT, r1);
775 __ Dmtc1(TMP, r1);
776 __ Dmtc1(AT, r2);
777 } else if (is_slot1 != is_slot2) {
778 // Swap GPR/FPR and stack slot
779 Location reg_loc = is_slot1 ? loc2 : loc1;
780 Location mem_loc = is_slot1 ? loc1 : loc2;
781 LoadOperandType load_type = mem_loc.IsStackSlot() ? kLoadWord : kLoadDoubleword;
782 StoreOperandType store_type = mem_loc.IsStackSlot() ? kStoreWord : kStoreDoubleword;
783 // TODO: use load_type = kLoadUnsignedWord when type == Primitive::kPrimNot.
784 __ LoadFromOffset(load_type, TMP, SP, mem_loc.GetStackIndex());
785 if (reg_loc.IsFpuRegister()) {
786 __ StoreFpuToOffset(store_type,
787 reg_loc.AsFpuRegister<FpuRegister>(),
788 SP,
789 mem_loc.GetStackIndex());
790 // TODO: review this MTC1/DMTC1 move
791 if (mem_loc.IsStackSlot()) {
792 __ Mtc1(TMP, reg_loc.AsFpuRegister<FpuRegister>());
793 } else {
794 DCHECK(mem_loc.IsDoubleStackSlot());
795 __ Dmtc1(TMP, reg_loc.AsFpuRegister<FpuRegister>());
796 }
797 } else {
798 __ StoreToOffset(store_type, reg_loc.AsRegister<GpuRegister>(), SP, mem_loc.GetStackIndex());
799 __ Move(reg_loc.AsRegister<GpuRegister>(), TMP);
800 }
801 } else if (is_slot1 && is_slot2) {
802 move_resolver_.Exchange(loc1.GetStackIndex(),
803 loc2.GetStackIndex(),
804 loc1.IsDoubleStackSlot());
805 } else {
806 LOG(FATAL) << "Unimplemented swap between locations " << loc1 << " and " << loc2;
807 }
808}
809
810void CodeGeneratorMIPS64::Move(HInstruction* instruction,
811 Location location,
812 HInstruction* move_for) {
813 LocationSummary* locations = instruction->GetLocations();
814 Primitive::Type type = instruction->GetType();
815 DCHECK_NE(type, Primitive::kPrimVoid);
816
817 if (instruction->IsCurrentMethod()) {
818 MoveLocation(location, Location::DoubleStackSlot(kCurrentMethodStackOffset), type);
819 } else if (locations != nullptr && locations->Out().Equals(location)) {
820 return;
821 } else if (instruction->IsIntConstant()
822 || instruction->IsLongConstant()
823 || instruction->IsNullConstant()) {
824 if (location.IsRegister()) {
825 // Move to GPR from constant
826 GpuRegister dst = location.AsRegister<GpuRegister>();
827 if (instruction->IsNullConstant() || instruction->IsIntConstant()) {
828 __ LoadConst32(dst, GetInt32ValueOf(instruction->AsConstant()));
829 } else {
830 __ LoadConst64(dst, instruction->AsLongConstant()->GetValue());
831 }
832 } else {
833 DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot());
834 // Move to stack from constant
835 if (location.IsStackSlot()) {
836 __ LoadConst32(TMP, GetInt32ValueOf(instruction->AsConstant()));
837 __ StoreToOffset(kStoreWord, TMP, SP, location.GetStackIndex());
838 } else {
839 __ LoadConst64(TMP, instruction->AsLongConstant()->GetValue());
840 __ StoreToOffset(kStoreDoubleword, TMP, SP, location.GetStackIndex());
841 }
842 }
843 } else if (instruction->IsTemporary()) {
844 Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
845 MoveLocation(location, temp_location, type);
846 } else if (instruction->IsLoadLocal()) {
847 uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
848 if (Primitive::Is64BitType(type)) {
849 MoveLocation(location, Location::DoubleStackSlot(stack_slot), type);
850 } else {
851 MoveLocation(location, Location::StackSlot(stack_slot), type);
852 }
853 } else {
854 DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
855 MoveLocation(location, locations->Out(), type);
856 }
857}
858
859Location CodeGeneratorMIPS64::GetStackLocation(HLoadLocal* load) const {
860 Primitive::Type type = load->GetType();
861
862 switch (type) {
863 case Primitive::kPrimNot:
864 case Primitive::kPrimInt:
865 case Primitive::kPrimFloat:
866 return Location::StackSlot(GetStackSlot(load->GetLocal()));
867
868 case Primitive::kPrimLong:
869 case Primitive::kPrimDouble:
870 return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
871
872 case Primitive::kPrimBoolean:
873 case Primitive::kPrimByte:
874 case Primitive::kPrimChar:
875 case Primitive::kPrimShort:
876 case Primitive::kPrimVoid:
877 LOG(FATAL) << "Unexpected type " << type;
878 }
879
880 LOG(FATAL) << "Unreachable";
881 return Location::NoLocation();
882}
883
884void CodeGeneratorMIPS64::MarkGCCard(GpuRegister object, GpuRegister value) {
885 Label done;
886 GpuRegister card = AT;
887 GpuRegister temp = TMP;
888 __ Beqzc(value, &done);
889 __ LoadFromOffset(kLoadDoubleword,
890 card,
891 TR,
892 Thread::CardTableOffset<kMips64WordSize>().Int32Value());
893 __ Dsrl(temp, object, gc::accounting::CardTable::kCardShift);
894 __ Daddu(temp, card, temp);
895 __ Sb(card, temp, 0);
896 __ Bind(&done);
897}
898
899void CodeGeneratorMIPS64::SetupBlockedRegisters(bool is_baseline ATTRIBUTE_UNUSED) const {
900 // ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
901 blocked_core_registers_[ZERO] = true;
902 blocked_core_registers_[K0] = true;
903 blocked_core_registers_[K1] = true;
904 blocked_core_registers_[GP] = true;
905 blocked_core_registers_[SP] = true;
906 blocked_core_registers_[RA] = true;
907
908 // AT and TMP(T8) are used as temporary/scratch registers
909 // (similar to how AT is used by MIPS assemblers).
910 blocked_core_registers_[AT] = true;
911 blocked_core_registers_[TMP] = true;
912 blocked_fpu_registers_[FTMP] = true;
913
914 // Reserve suspend and thread registers.
915 blocked_core_registers_[S0] = true;
916 blocked_core_registers_[TR] = true;
917
918 // Reserve T9 for function calls
919 blocked_core_registers_[T9] = true;
920
921 // TODO: review; anything else?
922
923 // TODO: make these two for's conditional on is_baseline once
924 // all the issues with register saving/restoring are sorted out.
925 for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
926 blocked_core_registers_[kCoreCalleeSaves[i]] = true;
927 }
928
929 for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
930 blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
931 }
932}
933
934Location CodeGeneratorMIPS64::AllocateFreeRegister(Primitive::Type type) const {
935 if (type == Primitive::kPrimVoid) {
936 LOG(FATAL) << "Unreachable type " << type;
937 }
938
939 if (Primitive::IsFloatingPointType(type)) {
940 size_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfFpuRegisters);
941 return Location::FpuRegisterLocation(reg);
942 } else {
943 size_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfGpuRegisters);
944 return Location::RegisterLocation(reg);
945 }
946}
947
948size_t CodeGeneratorMIPS64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
949 __ StoreToOffset(kStoreDoubleword, GpuRegister(reg_id), SP, stack_index);
950 return kMips64WordSize;
951}
952
953size_t CodeGeneratorMIPS64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
954 __ LoadFromOffset(kLoadDoubleword, GpuRegister(reg_id), SP, stack_index);
955 return kMips64WordSize;
956}
957
958size_t CodeGeneratorMIPS64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
959 __ StoreFpuToOffset(kStoreDoubleword, FpuRegister(reg_id), SP, stack_index);
960 return kMips64WordSize;
961}
962
963size_t CodeGeneratorMIPS64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
964 __ LoadFpuFromOffset(kLoadDoubleword, FpuRegister(reg_id), SP, stack_index);
965 return kMips64WordSize;
966}
967
968void CodeGeneratorMIPS64::DumpCoreRegister(std::ostream& stream, int reg) const {
969 stream << Mips64ManagedRegister::FromGpuRegister(GpuRegister(reg));
970}
971
972void CodeGeneratorMIPS64::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
973 stream << Mips64ManagedRegister::FromFpuRegister(FpuRegister(reg));
974}
975
976void CodeGeneratorMIPS64::InvokeRuntime(int32_t entry_point_offset,
977 HInstruction* instruction,
978 uint32_t dex_pc,
979 SlowPathCode* slow_path) {
Alexandre Rames78e3ef62015-08-12 13:43:29 +0100980 ValidateInvokeRuntime(instruction, slow_path);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700981 // TODO: anything related to T9/GP/GOT/PIC/.so's?
982 __ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
983 __ Jalr(T9);
984 RecordPcInfo(instruction, dex_pc, slow_path);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700985}
986
987void InstructionCodeGeneratorMIPS64::GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path,
988 GpuRegister class_reg) {
989 __ LoadFromOffset(kLoadWord, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
990 __ LoadConst32(AT, mirror::Class::kStatusInitialized);
991 __ Bltc(TMP, AT, slow_path->GetEntryLabel());
992 // TODO: barrier needed?
993 __ Bind(slow_path->GetExitLabel());
994}
995
996void InstructionCodeGeneratorMIPS64::GenerateMemoryBarrier(MemBarrierKind kind ATTRIBUTE_UNUSED) {
997 __ Sync(0); // only stype 0 is supported
998}
999
1000void InstructionCodeGeneratorMIPS64::GenerateSuspendCheck(HSuspendCheck* instruction,
1001 HBasicBlock* successor) {
1002 SuspendCheckSlowPathMIPS64* slow_path =
1003 new (GetGraph()->GetArena()) SuspendCheckSlowPathMIPS64(instruction, successor);
1004 codegen_->AddSlowPath(slow_path);
1005
1006 __ LoadFromOffset(kLoadUnsignedHalfword,
1007 TMP,
1008 TR,
1009 Thread::ThreadFlagsOffset<kMips64WordSize>().Int32Value());
1010 if (successor == nullptr) {
1011 __ Bnezc(TMP, slow_path->GetEntryLabel());
1012 __ Bind(slow_path->GetReturnLabel());
1013 } else {
1014 __ Beqzc(TMP, codegen_->GetLabelOf(successor));
1015 __ B(slow_path->GetEntryLabel());
1016 // slow_path will return to GetLabelOf(successor).
1017 }
1018}
1019
1020InstructionCodeGeneratorMIPS64::InstructionCodeGeneratorMIPS64(HGraph* graph,
1021 CodeGeneratorMIPS64* codegen)
1022 : HGraphVisitor(graph),
1023 assembler_(codegen->GetAssembler()),
1024 codegen_(codegen) {}
1025
1026void LocationsBuilderMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
1027 DCHECK_EQ(instruction->InputCount(), 2U);
1028 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1029 Primitive::Type type = instruction->GetResultType();
1030 switch (type) {
1031 case Primitive::kPrimInt:
1032 case Primitive::kPrimLong: {
1033 locations->SetInAt(0, Location::RequiresRegister());
1034 HInstruction* right = instruction->InputAt(1);
1035 bool can_use_imm = false;
1036 if (right->IsConstant()) {
1037 int64_t imm = CodeGenerator::GetInt64ValueOf(right->AsConstant());
1038 if (instruction->IsAnd() || instruction->IsOr() || instruction->IsXor()) {
1039 can_use_imm = IsUint<16>(imm);
1040 } else if (instruction->IsAdd()) {
1041 can_use_imm = IsInt<16>(imm);
1042 } else {
1043 DCHECK(instruction->IsSub());
1044 can_use_imm = IsInt<16>(-imm);
1045 }
1046 }
1047 if (can_use_imm)
1048 locations->SetInAt(1, Location::ConstantLocation(right->AsConstant()));
1049 else
1050 locations->SetInAt(1, Location::RequiresRegister());
1051 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1052 }
1053 break;
1054
1055 case Primitive::kPrimFloat:
1056 case Primitive::kPrimDouble:
1057 locations->SetInAt(0, Location::RequiresFpuRegister());
1058 locations->SetInAt(1, Location::RequiresFpuRegister());
1059 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1060 break;
1061
1062 default:
1063 LOG(FATAL) << "Unexpected " << instruction->DebugName() << " type " << type;
1064 }
1065}
1066
1067void InstructionCodeGeneratorMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
1068 Primitive::Type type = instruction->GetType();
1069 LocationSummary* locations = instruction->GetLocations();
1070
1071 switch (type) {
1072 case Primitive::kPrimInt:
1073 case Primitive::kPrimLong: {
1074 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1075 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1076 Location rhs_location = locations->InAt(1);
1077
1078 GpuRegister rhs_reg = ZERO;
1079 int64_t rhs_imm = 0;
1080 bool use_imm = rhs_location.IsConstant();
1081 if (use_imm) {
1082 rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
1083 } else {
1084 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1085 }
1086
1087 if (instruction->IsAnd()) {
1088 if (use_imm)
1089 __ Andi(dst, lhs, rhs_imm);
1090 else
1091 __ And(dst, lhs, rhs_reg);
1092 } else if (instruction->IsOr()) {
1093 if (use_imm)
1094 __ Ori(dst, lhs, rhs_imm);
1095 else
1096 __ Or(dst, lhs, rhs_reg);
1097 } else if (instruction->IsXor()) {
1098 if (use_imm)
1099 __ Xori(dst, lhs, rhs_imm);
1100 else
1101 __ Xor(dst, lhs, rhs_reg);
1102 } else if (instruction->IsAdd()) {
1103 if (type == Primitive::kPrimInt) {
1104 if (use_imm)
1105 __ Addiu(dst, lhs, rhs_imm);
1106 else
1107 __ Addu(dst, lhs, rhs_reg);
1108 } else {
1109 if (use_imm)
1110 __ Daddiu(dst, lhs, rhs_imm);
1111 else
1112 __ Daddu(dst, lhs, rhs_reg);
1113 }
1114 } else {
1115 DCHECK(instruction->IsSub());
1116 if (type == Primitive::kPrimInt) {
1117 if (use_imm)
1118 __ Addiu(dst, lhs, -rhs_imm);
1119 else
1120 __ Subu(dst, lhs, rhs_reg);
1121 } else {
1122 if (use_imm)
1123 __ Daddiu(dst, lhs, -rhs_imm);
1124 else
1125 __ Dsubu(dst, lhs, rhs_reg);
1126 }
1127 }
1128 break;
1129 }
1130 case Primitive::kPrimFloat:
1131 case Primitive::kPrimDouble: {
1132 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
1133 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
1134 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
1135 if (instruction->IsAdd()) {
1136 if (type == Primitive::kPrimFloat)
1137 __ AddS(dst, lhs, rhs);
1138 else
1139 __ AddD(dst, lhs, rhs);
1140 } else if (instruction->IsSub()) {
1141 if (type == Primitive::kPrimFloat)
1142 __ SubS(dst, lhs, rhs);
1143 else
1144 __ SubD(dst, lhs, rhs);
1145 } else {
1146 LOG(FATAL) << "Unexpected floating-point binary operation";
1147 }
1148 break;
1149 }
1150 default:
1151 LOG(FATAL) << "Unexpected binary operation type " << type;
1152 }
1153}
1154
1155void LocationsBuilderMIPS64::HandleShift(HBinaryOperation* instr) {
1156 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1157
1158 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
1159 Primitive::Type type = instr->GetResultType();
1160 switch (type) {
1161 case Primitive::kPrimInt:
1162 case Primitive::kPrimLong: {
1163 locations->SetInAt(0, Location::RequiresRegister());
1164 locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
1165 locations->SetOut(Location::RequiresRegister());
1166 break;
1167 }
1168 default:
1169 LOG(FATAL) << "Unexpected shift type " << type;
1170 }
1171}
1172
1173void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) {
1174 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1175 LocationSummary* locations = instr->GetLocations();
1176 Primitive::Type type = instr->GetType();
1177
1178 switch (type) {
1179 case Primitive::kPrimInt:
1180 case Primitive::kPrimLong: {
1181 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1182 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1183 Location rhs_location = locations->InAt(1);
1184
1185 GpuRegister rhs_reg = ZERO;
1186 int64_t rhs_imm = 0;
1187 bool use_imm = rhs_location.IsConstant();
1188 if (use_imm) {
1189 rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
1190 } else {
1191 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1192 }
1193
1194 if (use_imm) {
1195 uint32_t shift_value = (type == Primitive::kPrimInt)
1196 ? static_cast<uint32_t>(rhs_imm & kMaxIntShiftValue)
1197 : static_cast<uint32_t>(rhs_imm & kMaxLongShiftValue);
1198
1199 if (type == Primitive::kPrimInt) {
1200 if (instr->IsShl()) {
1201 __ Sll(dst, lhs, shift_value);
1202 } else if (instr->IsShr()) {
1203 __ Sra(dst, lhs, shift_value);
1204 } else {
1205 __ Srl(dst, lhs, shift_value);
1206 }
1207 } else {
1208 if (shift_value < 32) {
1209 if (instr->IsShl()) {
1210 __ Dsll(dst, lhs, shift_value);
1211 } else if (instr->IsShr()) {
1212 __ Dsra(dst, lhs, shift_value);
1213 } else {
1214 __ Dsrl(dst, lhs, shift_value);
1215 }
1216 } else {
1217 shift_value -= 32;
1218 if (instr->IsShl()) {
1219 __ Dsll32(dst, lhs, shift_value);
1220 } else if (instr->IsShr()) {
1221 __ Dsra32(dst, lhs, shift_value);
1222 } else {
1223 __ Dsrl32(dst, lhs, shift_value);
1224 }
1225 }
1226 }
1227 } else {
1228 if (type == Primitive::kPrimInt) {
1229 if (instr->IsShl()) {
1230 __ Sllv(dst, lhs, rhs_reg);
1231 } else if (instr->IsShr()) {
1232 __ Srav(dst, lhs, rhs_reg);
1233 } else {
1234 __ Srlv(dst, lhs, rhs_reg);
1235 }
1236 } else {
1237 if (instr->IsShl()) {
1238 __ Dsllv(dst, lhs, rhs_reg);
1239 } else if (instr->IsShr()) {
1240 __ Dsrav(dst, lhs, rhs_reg);
1241 } else {
1242 __ Dsrlv(dst, lhs, rhs_reg);
1243 }
1244 }
1245 }
1246 break;
1247 }
1248 default:
1249 LOG(FATAL) << "Unexpected shift operation type " << type;
1250 }
1251}
1252
1253void LocationsBuilderMIPS64::VisitAdd(HAdd* instruction) {
1254 HandleBinaryOp(instruction);
1255}
1256
1257void InstructionCodeGeneratorMIPS64::VisitAdd(HAdd* instruction) {
1258 HandleBinaryOp(instruction);
1259}
1260
1261void LocationsBuilderMIPS64::VisitAnd(HAnd* instruction) {
1262 HandleBinaryOp(instruction);
1263}
1264
1265void InstructionCodeGeneratorMIPS64::VisitAnd(HAnd* instruction) {
1266 HandleBinaryOp(instruction);
1267}
1268
1269void LocationsBuilderMIPS64::VisitArrayGet(HArrayGet* instruction) {
1270 LocationSummary* locations =
1271 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1272 locations->SetInAt(0, Location::RequiresRegister());
1273 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1274 if (Primitive::IsFloatingPointType(instruction->GetType())) {
1275 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1276 } else {
1277 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1278 }
1279}
1280
1281void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
1282 LocationSummary* locations = instruction->GetLocations();
1283 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1284 Location index = locations->InAt(1);
1285 Primitive::Type type = instruction->GetType();
1286
1287 switch (type) {
1288 case Primitive::kPrimBoolean: {
1289 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
1290 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1291 if (index.IsConstant()) {
1292 size_t offset =
1293 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1294 __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
1295 } else {
1296 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1297 __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
1298 }
1299 break;
1300 }
1301
1302 case Primitive::kPrimByte: {
1303 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
1304 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1305 if (index.IsConstant()) {
1306 size_t offset =
1307 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1308 __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
1309 } else {
1310 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1311 __ LoadFromOffset(kLoadSignedByte, out, TMP, data_offset);
1312 }
1313 break;
1314 }
1315
1316 case Primitive::kPrimShort: {
1317 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
1318 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1319 if (index.IsConstant()) {
1320 size_t offset =
1321 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1322 __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
1323 } else {
1324 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1325 __ Daddu(TMP, obj, TMP);
1326 __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset);
1327 }
1328 break;
1329 }
1330
1331 case Primitive::kPrimChar: {
1332 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
1333 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1334 if (index.IsConstant()) {
1335 size_t offset =
1336 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1337 __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
1338 } else {
1339 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1340 __ Daddu(TMP, obj, TMP);
1341 __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
1342 }
1343 break;
1344 }
1345
1346 case Primitive::kPrimInt:
1347 case Primitive::kPrimNot: {
1348 DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
1349 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
1350 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1351 LoadOperandType load_type = (type == Primitive::kPrimNot) ? kLoadUnsignedWord : kLoadWord;
1352 if (index.IsConstant()) {
1353 size_t offset =
1354 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1355 __ LoadFromOffset(load_type, out, obj, offset);
1356 } else {
1357 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1358 __ Daddu(TMP, obj, TMP);
1359 __ LoadFromOffset(load_type, out, TMP, data_offset);
1360 }
1361 break;
1362 }
1363
1364 case Primitive::kPrimLong: {
1365 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
1366 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1367 if (index.IsConstant()) {
1368 size_t offset =
1369 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1370 __ LoadFromOffset(kLoadDoubleword, out, obj, offset);
1371 } else {
1372 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1373 __ Daddu(TMP, obj, TMP);
1374 __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset);
1375 }
1376 break;
1377 }
1378
1379 case Primitive::kPrimFloat: {
1380 uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
1381 FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
1382 if (index.IsConstant()) {
1383 size_t offset =
1384 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1385 __ LoadFpuFromOffset(kLoadWord, out, obj, offset);
1386 } else {
1387 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1388 __ Daddu(TMP, obj, TMP);
1389 __ LoadFpuFromOffset(kLoadWord, out, TMP, data_offset);
1390 }
1391 break;
1392 }
1393
1394 case Primitive::kPrimDouble: {
1395 uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
1396 FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
1397 if (index.IsConstant()) {
1398 size_t offset =
1399 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1400 __ LoadFpuFromOffset(kLoadDoubleword, out, obj, offset);
1401 } else {
1402 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1403 __ Daddu(TMP, obj, TMP);
1404 __ LoadFpuFromOffset(kLoadDoubleword, out, TMP, data_offset);
1405 }
1406 break;
1407 }
1408
1409 case Primitive::kPrimVoid:
1410 LOG(FATAL) << "Unreachable type " << instruction->GetType();
1411 UNREACHABLE();
1412 }
1413 codegen_->MaybeRecordImplicitNullCheck(instruction);
1414}
1415
1416void LocationsBuilderMIPS64::VisitArrayLength(HArrayLength* instruction) {
1417 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1418 locations->SetInAt(0, Location::RequiresRegister());
1419 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1420}
1421
1422void InstructionCodeGeneratorMIPS64::VisitArrayLength(HArrayLength* instruction) {
1423 LocationSummary* locations = instruction->GetLocations();
1424 uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
1425 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1426 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1427 __ LoadFromOffset(kLoadWord, out, obj, offset);
1428 codegen_->MaybeRecordImplicitNullCheck(instruction);
1429}
1430
1431void LocationsBuilderMIPS64::VisitArraySet(HArraySet* instruction) {
1432 Primitive::Type value_type = instruction->GetComponentType();
1433 bool is_object = value_type == Primitive::kPrimNot;
1434 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
1435 instruction,
1436 is_object ? LocationSummary::kCall : LocationSummary::kNoCall);
1437 if (is_object) {
1438 InvokeRuntimeCallingConvention calling_convention;
1439 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
1440 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
1441 locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
1442 } else {
1443 locations->SetInAt(0, Location::RequiresRegister());
1444 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1445 if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
1446 locations->SetInAt(2, Location::RequiresFpuRegister());
1447 } else {
1448 locations->SetInAt(2, Location::RequiresRegister());
1449 }
1450 }
1451}
1452
1453void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
1454 LocationSummary* locations = instruction->GetLocations();
1455 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1456 Location index = locations->InAt(1);
1457 Primitive::Type value_type = instruction->GetComponentType();
1458 bool needs_runtime_call = locations->WillCall();
1459 bool needs_write_barrier =
1460 CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
1461
1462 switch (value_type) {
1463 case Primitive::kPrimBoolean:
1464 case Primitive::kPrimByte: {
1465 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
1466 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1467 if (index.IsConstant()) {
1468 size_t offset =
1469 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1470 __ StoreToOffset(kStoreByte, value, obj, offset);
1471 } else {
1472 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1473 __ StoreToOffset(kStoreByte, value, TMP, data_offset);
1474 }
1475 break;
1476 }
1477
1478 case Primitive::kPrimShort:
1479 case Primitive::kPrimChar: {
1480 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
1481 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1482 if (index.IsConstant()) {
1483 size_t offset =
1484 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1485 __ StoreToOffset(kStoreHalfword, value, obj, offset);
1486 } else {
1487 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1488 __ Daddu(TMP, obj, TMP);
1489 __ StoreToOffset(kStoreHalfword, value, TMP, data_offset);
1490 }
1491 break;
1492 }
1493
1494 case Primitive::kPrimInt:
1495 case Primitive::kPrimNot: {
1496 if (!needs_runtime_call) {
1497 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
1498 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1499 if (index.IsConstant()) {
1500 size_t offset =
1501 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1502 __ StoreToOffset(kStoreWord, value, obj, offset);
1503 } else {
1504 DCHECK(index.IsRegister()) << index;
1505 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1506 __ Daddu(TMP, obj, TMP);
1507 __ StoreToOffset(kStoreWord, value, TMP, data_offset);
1508 }
1509 codegen_->MaybeRecordImplicitNullCheck(instruction);
1510 if (needs_write_barrier) {
1511 DCHECK_EQ(value_type, Primitive::kPrimNot);
1512 codegen_->MarkGCCard(obj, value);
1513 }
1514 } else {
1515 DCHECK_EQ(value_type, Primitive::kPrimNot);
1516 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
1517 instruction,
1518 instruction->GetDexPc(),
1519 nullptr);
1520 }
1521 break;
1522 }
1523
1524 case Primitive::kPrimLong: {
1525 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
1526 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1527 if (index.IsConstant()) {
1528 size_t offset =
1529 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1530 __ StoreToOffset(kStoreDoubleword, value, obj, offset);
1531 } else {
1532 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1533 __ Daddu(TMP, obj, TMP);
1534 __ StoreToOffset(kStoreDoubleword, value, TMP, data_offset);
1535 }
1536 break;
1537 }
1538
1539 case Primitive::kPrimFloat: {
1540 uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
1541 FpuRegister value = locations->InAt(2).AsFpuRegister<FpuRegister>();
1542 DCHECK(locations->InAt(2).IsFpuRegister());
1543 if (index.IsConstant()) {
1544 size_t offset =
1545 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1546 __ StoreFpuToOffset(kStoreWord, value, obj, offset);
1547 } else {
1548 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1549 __ Daddu(TMP, obj, TMP);
1550 __ StoreFpuToOffset(kStoreWord, value, TMP, data_offset);
1551 }
1552 break;
1553 }
1554
1555 case Primitive::kPrimDouble: {
1556 uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
1557 FpuRegister value = locations->InAt(2).AsFpuRegister<FpuRegister>();
1558 DCHECK(locations->InAt(2).IsFpuRegister());
1559 if (index.IsConstant()) {
1560 size_t offset =
1561 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1562 __ StoreFpuToOffset(kStoreDoubleword, value, obj, offset);
1563 } else {
1564 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1565 __ Daddu(TMP, obj, TMP);
1566 __ StoreFpuToOffset(kStoreDoubleword, value, TMP, data_offset);
1567 }
1568 break;
1569 }
1570
1571 case Primitive::kPrimVoid:
1572 LOG(FATAL) << "Unreachable type " << instruction->GetType();
1573 UNREACHABLE();
1574 }
1575
1576 // Ints and objects are handled in the switch.
1577 if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
1578 codegen_->MaybeRecordImplicitNullCheck(instruction);
1579 }
1580}
1581
1582void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
1583 LocationSummary* locations =
1584 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1585 locations->SetInAt(0, Location::RequiresRegister());
1586 locations->SetInAt(1, Location::RequiresRegister());
1587 if (instruction->HasUses()) {
1588 locations->SetOut(Location::SameAsFirstInput());
1589 }
1590}
1591
1592void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
1593 LocationSummary* locations = instruction->GetLocations();
1594 BoundsCheckSlowPathMIPS64* slow_path = new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS64(
1595 instruction,
1596 locations->InAt(0),
1597 locations->InAt(1));
1598 codegen_->AddSlowPath(slow_path);
1599
1600 GpuRegister index = locations->InAt(0).AsRegister<GpuRegister>();
1601 GpuRegister length = locations->InAt(1).AsRegister<GpuRegister>();
1602
1603 // length is limited by the maximum positive signed 32-bit integer.
1604 // Unsigned comparison of length and index checks for index < 0
1605 // and for length <= index simultaneously.
1606 // Mips R6 requires lhs != rhs for compact branches.
1607 if (index == length) {
1608 __ B(slow_path->GetEntryLabel());
1609 } else {
1610 __ Bgeuc(index, length, slow_path->GetEntryLabel());
1611 }
1612}
1613
1614void LocationsBuilderMIPS64::VisitCheckCast(HCheckCast* instruction) {
1615 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
1616 instruction,
1617 LocationSummary::kCallOnSlowPath);
1618 locations->SetInAt(0, Location::RequiresRegister());
1619 locations->SetInAt(1, Location::RequiresRegister());
1620 locations->AddTemp(Location::RequiresRegister());
1621}
1622
1623void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) {
1624 LocationSummary* locations = instruction->GetLocations();
1625 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1626 GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
1627 GpuRegister obj_cls = locations->GetTemp(0).AsRegister<GpuRegister>();
1628
1629 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(
1630 instruction,
1631 locations->InAt(1),
1632 Location::RegisterLocation(obj_cls),
1633 instruction->GetDexPc());
1634 codegen_->AddSlowPath(slow_path);
1635
1636 // TODO: avoid this check if we know obj is not null.
1637 __ Beqzc(obj, slow_path->GetExitLabel());
1638 // Compare the class of `obj` with `cls`.
1639 __ LoadFromOffset(kLoadUnsignedWord, obj_cls, obj, mirror::Object::ClassOffset().Int32Value());
1640 __ Bnec(obj_cls, cls, slow_path->GetEntryLabel());
1641 __ Bind(slow_path->GetExitLabel());
1642}
1643
1644void LocationsBuilderMIPS64::VisitClinitCheck(HClinitCheck* check) {
1645 LocationSummary* locations =
1646 new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
1647 locations->SetInAt(0, Location::RequiresRegister());
1648 if (check->HasUses()) {
1649 locations->SetOut(Location::SameAsFirstInput());
1650 }
1651}
1652
1653void InstructionCodeGeneratorMIPS64::VisitClinitCheck(HClinitCheck* check) {
1654 // We assume the class is not null.
1655 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
1656 check->GetLoadClass(),
1657 check,
1658 check->GetDexPc(),
1659 true);
1660 codegen_->AddSlowPath(slow_path);
1661 GenerateClassInitializationCheck(slow_path,
1662 check->GetLocations()->InAt(0).AsRegister<GpuRegister>());
1663}
1664
1665void LocationsBuilderMIPS64::VisitCompare(HCompare* compare) {
1666 Primitive::Type in_type = compare->InputAt(0)->GetType();
1667
1668 LocationSummary::CallKind call_kind = Primitive::IsFloatingPointType(in_type)
1669 ? LocationSummary::kCall
1670 : LocationSummary::kNoCall;
1671
1672 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare, call_kind);
1673
1674 switch (in_type) {
1675 case Primitive::kPrimLong:
1676 locations->SetInAt(0, Location::RequiresRegister());
1677 locations->SetInAt(1, Location::RequiresRegister());
1678 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1679 break;
1680
1681 case Primitive::kPrimFloat:
1682 case Primitive::kPrimDouble: {
1683 InvokeRuntimeCallingConvention calling_convention;
1684 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
1685 locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
1686 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt));
1687 break;
1688 }
1689
1690 default:
1691 LOG(FATAL) << "Unexpected type for compare operation " << in_type;
1692 }
1693}
1694
1695void InstructionCodeGeneratorMIPS64::VisitCompare(HCompare* instruction) {
1696 LocationSummary* locations = instruction->GetLocations();
1697 Primitive::Type in_type = instruction->InputAt(0)->GetType();
1698
1699 // 0 if: left == right
1700 // 1 if: left > right
1701 // -1 if: left < right
1702 switch (in_type) {
1703 case Primitive::kPrimLong: {
1704 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1705 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1706 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
1707 // TODO: more efficient (direct) comparison with a constant
1708 __ Slt(TMP, lhs, rhs);
1709 __ Slt(dst, rhs, lhs);
1710 __ Subu(dst, dst, TMP);
1711 break;
1712 }
1713
1714 case Primitive::kPrimFloat:
1715 case Primitive::kPrimDouble: {
1716 int32_t entry_point_offset;
1717 if (in_type == Primitive::kPrimFloat) {
1718 entry_point_offset = instruction->IsGtBias() ? QUICK_ENTRY_POINT(pCmpgFloat)
1719 : QUICK_ENTRY_POINT(pCmplFloat);
1720 } else {
1721 entry_point_offset = instruction->IsGtBias() ? QUICK_ENTRY_POINT(pCmpgDouble)
1722 : QUICK_ENTRY_POINT(pCmplDouble);
1723 }
1724 codegen_->InvokeRuntime(entry_point_offset, instruction, instruction->GetDexPc(), nullptr);
1725 break;
1726 }
1727
1728 default:
1729 LOG(FATAL) << "Unimplemented compare type " << in_type;
1730 }
1731}
1732
1733void LocationsBuilderMIPS64::VisitCondition(HCondition* instruction) {
1734 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1735 locations->SetInAt(0, Location::RequiresRegister());
1736 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1737 if (instruction->NeedsMaterialization()) {
1738 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1739 }
1740}
1741
1742void InstructionCodeGeneratorMIPS64::VisitCondition(HCondition* instruction) {
1743 if (!instruction->NeedsMaterialization()) {
1744 return;
1745 }
1746
1747 LocationSummary* locations = instruction->GetLocations();
1748
1749 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1750 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1751 Location rhs_location = locations->InAt(1);
1752
1753 GpuRegister rhs_reg = ZERO;
1754 int64_t rhs_imm = 0;
1755 bool use_imm = rhs_location.IsConstant();
1756 if (use_imm) {
1757 rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
1758 } else {
1759 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1760 }
1761
1762 IfCondition if_cond = instruction->GetCondition();
1763
1764 switch (if_cond) {
1765 case kCondEQ:
1766 case kCondNE:
1767 if (use_imm && IsUint<16>(rhs_imm)) {
1768 __ Xori(dst, lhs, rhs_imm);
1769 } else {
1770 if (use_imm) {
1771 rhs_reg = TMP;
1772 __ LoadConst32(rhs_reg, rhs_imm);
1773 }
1774 __ Xor(dst, lhs, rhs_reg);
1775 }
1776 if (if_cond == kCondEQ) {
1777 __ Sltiu(dst, dst, 1);
1778 } else {
1779 __ Sltu(dst, ZERO, dst);
1780 }
1781 break;
1782
1783 case kCondLT:
1784 case kCondGE:
1785 if (use_imm && IsInt<16>(rhs_imm)) {
1786 __ Slti(dst, lhs, rhs_imm);
1787 } else {
1788 if (use_imm) {
1789 rhs_reg = TMP;
1790 __ LoadConst32(rhs_reg, rhs_imm);
1791 }
1792 __ Slt(dst, lhs, rhs_reg);
1793 }
1794 if (if_cond == kCondGE) {
1795 // Simulate lhs >= rhs via !(lhs < rhs) since there's
1796 // only the slt instruction but no sge.
1797 __ Xori(dst, dst, 1);
1798 }
1799 break;
1800
1801 case kCondLE:
1802 case kCondGT:
1803 if (use_imm && IsInt<16>(rhs_imm + 1)) {
1804 // Simulate lhs <= rhs via lhs < rhs + 1.
1805 __ Slti(dst, lhs, rhs_imm + 1);
1806 if (if_cond == kCondGT) {
1807 // Simulate lhs > rhs via !(lhs <= rhs) since there's
1808 // only the slti instruction but no sgti.
1809 __ Xori(dst, dst, 1);
1810 }
1811 } else {
1812 if (use_imm) {
1813 rhs_reg = TMP;
1814 __ LoadConst32(rhs_reg, rhs_imm);
1815 }
1816 __ Slt(dst, rhs_reg, lhs);
1817 if (if_cond == kCondLE) {
1818 // Simulate lhs <= rhs via !(rhs < lhs) since there's
1819 // only the slt instruction but no sle.
1820 __ Xori(dst, dst, 1);
1821 }
1822 }
1823 break;
1824 }
1825}
1826
1827void LocationsBuilderMIPS64::VisitDiv(HDiv* div) {
1828 LocationSummary* locations =
1829 new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
1830 switch (div->GetResultType()) {
1831 case Primitive::kPrimInt:
1832 case Primitive::kPrimLong:
1833 locations->SetInAt(0, Location::RequiresRegister());
1834 locations->SetInAt(1, Location::RequiresRegister());
1835 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1836 break;
1837
1838 case Primitive::kPrimFloat:
1839 case Primitive::kPrimDouble:
1840 locations->SetInAt(0, Location::RequiresFpuRegister());
1841 locations->SetInAt(1, Location::RequiresFpuRegister());
1842 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1843 break;
1844
1845 default:
1846 LOG(FATAL) << "Unexpected div type " << div->GetResultType();
1847 }
1848}
1849
1850void InstructionCodeGeneratorMIPS64::VisitDiv(HDiv* instruction) {
1851 Primitive::Type type = instruction->GetType();
1852 LocationSummary* locations = instruction->GetLocations();
1853
1854 switch (type) {
1855 case Primitive::kPrimInt:
1856 case Primitive::kPrimLong: {
1857 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1858 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1859 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
1860 if (type == Primitive::kPrimInt)
1861 __ DivR6(dst, lhs, rhs);
1862 else
1863 __ Ddiv(dst, lhs, rhs);
1864 break;
1865 }
1866 case Primitive::kPrimFloat:
1867 case Primitive::kPrimDouble: {
1868 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
1869 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
1870 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
1871 if (type == Primitive::kPrimFloat)
1872 __ DivS(dst, lhs, rhs);
1873 else
1874 __ DivD(dst, lhs, rhs);
1875 break;
1876 }
1877 default:
1878 LOG(FATAL) << "Unexpected div type " << type;
1879 }
1880}
1881
1882void LocationsBuilderMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
1883 LocationSummary* locations =
1884 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1885 locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
1886 if (instruction->HasUses()) {
1887 locations->SetOut(Location::SameAsFirstInput());
1888 }
1889}
1890
1891void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
1892 SlowPathCodeMIPS64* slow_path =
1893 new (GetGraph()->GetArena()) DivZeroCheckSlowPathMIPS64(instruction);
1894 codegen_->AddSlowPath(slow_path);
1895 Location value = instruction->GetLocations()->InAt(0);
1896
1897 Primitive::Type type = instruction->GetType();
1898
Serguei Katkov8c0676c2015-08-03 13:55:33 +06001899 if ((type == Primitive::kPrimBoolean) || !Primitive::IsIntegralType(type)) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07001900 LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
Serguei Katkov8c0676c2015-08-03 13:55:33 +06001901 return;
Alexey Frunze4dda3372015-06-01 18:31:49 -07001902 }
1903
1904 if (value.IsConstant()) {
1905 int64_t divisor = codegen_->GetInt64ValueOf(value.GetConstant()->AsConstant());
1906 if (divisor == 0) {
1907 __ B(slow_path->GetEntryLabel());
1908 } else {
1909 // A division by a non-null constant is valid. We don't need to perform
1910 // any check, so simply fall through.
1911 }
1912 } else {
1913 __ Beqzc(value.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
1914 }
1915}
1916
1917void LocationsBuilderMIPS64::VisitDoubleConstant(HDoubleConstant* constant) {
1918 LocationSummary* locations =
1919 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1920 locations->SetOut(Location::ConstantLocation(constant));
1921}
1922
1923void InstructionCodeGeneratorMIPS64::VisitDoubleConstant(HDoubleConstant* cst ATTRIBUTE_UNUSED) {
1924 // Will be generated at use site.
1925}
1926
1927void LocationsBuilderMIPS64::VisitExit(HExit* exit) {
1928 exit->SetLocations(nullptr);
1929}
1930
1931void InstructionCodeGeneratorMIPS64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
1932}
1933
1934void LocationsBuilderMIPS64::VisitFloatConstant(HFloatConstant* constant) {
1935 LocationSummary* locations =
1936 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1937 locations->SetOut(Location::ConstantLocation(constant));
1938}
1939
1940void InstructionCodeGeneratorMIPS64::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
1941 // Will be generated at use site.
1942}
1943
David Brazdilfc6a86a2015-06-26 10:33:45 +00001944void InstructionCodeGeneratorMIPS64::HandleGoto(HInstruction* got, HBasicBlock* successor) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07001945 DCHECK(!successor->IsExitBlock());
1946 HBasicBlock* block = got->GetBlock();
1947 HInstruction* previous = got->GetPrevious();
1948 HLoopInformation* info = block->GetLoopInformation();
1949
1950 if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
1951 codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
1952 GenerateSuspendCheck(info->GetSuspendCheck(), successor);
1953 return;
1954 }
1955 if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
1956 GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
1957 }
1958 if (!codegen_->GoesToNextBlock(block, successor)) {
1959 __ B(codegen_->GetLabelOf(successor));
1960 }
1961}
1962
David Brazdilfc6a86a2015-06-26 10:33:45 +00001963void LocationsBuilderMIPS64::VisitGoto(HGoto* got) {
1964 got->SetLocations(nullptr);
1965}
1966
1967void InstructionCodeGeneratorMIPS64::VisitGoto(HGoto* got) {
1968 HandleGoto(got, got->GetSuccessor());
1969}
1970
1971void LocationsBuilderMIPS64::VisitTryBoundary(HTryBoundary* try_boundary) {
1972 try_boundary->SetLocations(nullptr);
1973}
1974
1975void InstructionCodeGeneratorMIPS64::VisitTryBoundary(HTryBoundary* try_boundary) {
1976 HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
1977 if (!successor->IsExitBlock()) {
1978 HandleGoto(try_boundary, successor);
1979 }
1980}
1981
Alexey Frunze4dda3372015-06-01 18:31:49 -07001982void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruction,
1983 Label* true_target,
1984 Label* false_target,
1985 Label* always_true_target) {
1986 HInstruction* cond = instruction->InputAt(0);
1987 HCondition* condition = cond->AsCondition();
1988
1989 if (cond->IsIntConstant()) {
1990 int32_t cond_value = cond->AsIntConstant()->GetValue();
1991 if (cond_value == 1) {
1992 if (always_true_target != nullptr) {
1993 __ B(always_true_target);
1994 }
1995 return;
1996 } else {
1997 DCHECK_EQ(cond_value, 0);
1998 }
1999 } else if (!cond->IsCondition() || condition->NeedsMaterialization()) {
2000 // The condition instruction has been materialized, compare the output to 0.
2001 Location cond_val = instruction->GetLocations()->InAt(0);
2002 DCHECK(cond_val.IsRegister());
2003 __ Bnezc(cond_val.AsRegister<GpuRegister>(), true_target);
2004 } else {
2005 // The condition instruction has not been materialized, use its inputs as
2006 // the comparison and its condition as the branch condition.
2007 GpuRegister lhs = condition->GetLocations()->InAt(0).AsRegister<GpuRegister>();
2008 Location rhs_location = condition->GetLocations()->InAt(1);
2009 GpuRegister rhs_reg = ZERO;
2010 int32_t rhs_imm = 0;
2011 bool use_imm = rhs_location.IsConstant();
2012 if (use_imm) {
2013 rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
2014 } else {
2015 rhs_reg = rhs_location.AsRegister<GpuRegister>();
2016 }
2017
2018 IfCondition if_cond = condition->GetCondition();
2019 if (use_imm && rhs_imm == 0) {
2020 switch (if_cond) {
2021 case kCondEQ:
2022 __ Beqzc(lhs, true_target);
2023 break;
2024 case kCondNE:
2025 __ Bnezc(lhs, true_target);
2026 break;
2027 case kCondLT:
2028 __ Bltzc(lhs, true_target);
2029 break;
2030 case kCondGE:
2031 __ Bgezc(lhs, true_target);
2032 break;
2033 case kCondLE:
2034 __ Blezc(lhs, true_target);
2035 break;
2036 case kCondGT:
2037 __ Bgtzc(lhs, true_target);
2038 break;
2039 }
2040 } else {
2041 if (use_imm) {
2042 rhs_reg = TMP;
2043 __ LoadConst32(rhs_reg, rhs_imm);
2044 }
2045 // It looks like we can get here with lhs == rhs. Should that be possible at all?
2046 // Mips R6 requires lhs != rhs for compact branches.
2047 if (lhs == rhs_reg) {
2048 DCHECK(!use_imm);
2049 switch (if_cond) {
2050 case kCondEQ:
2051 case kCondGE:
2052 case kCondLE:
2053 // if lhs == rhs for a positive condition, then it is a branch
2054 __ B(true_target);
2055 break;
2056 case kCondNE:
2057 case kCondLT:
2058 case kCondGT:
2059 // if lhs == rhs for a negative condition, then it is a NOP
2060 break;
2061 }
2062 } else {
2063 switch (if_cond) {
2064 case kCondEQ:
2065 __ Beqc(lhs, rhs_reg, true_target);
2066 break;
2067 case kCondNE:
2068 __ Bnec(lhs, rhs_reg, true_target);
2069 break;
2070 case kCondLT:
2071 __ Bltc(lhs, rhs_reg, true_target);
2072 break;
2073 case kCondGE:
2074 __ Bgec(lhs, rhs_reg, true_target);
2075 break;
2076 case kCondLE:
2077 __ Bgec(rhs_reg, lhs, true_target);
2078 break;
2079 case kCondGT:
2080 __ Bltc(rhs_reg, lhs, true_target);
2081 break;
2082 }
2083 }
2084 }
2085 }
2086 if (false_target != nullptr) {
2087 __ B(false_target);
2088 }
2089}
2090
2091void LocationsBuilderMIPS64::VisitIf(HIf* if_instr) {
2092 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
2093 HInstruction* cond = if_instr->InputAt(0);
2094 if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
2095 locations->SetInAt(0, Location::RequiresRegister());
2096 }
2097}
2098
2099void InstructionCodeGeneratorMIPS64::VisitIf(HIf* if_instr) {
2100 Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
2101 Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
2102 Label* always_true_target = true_target;
2103 if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
2104 if_instr->IfTrueSuccessor())) {
2105 always_true_target = nullptr;
2106 }
2107 if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
2108 if_instr->IfFalseSuccessor())) {
2109 false_target = nullptr;
2110 }
2111 GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
2112}
2113
2114void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
2115 LocationSummary* locations = new (GetGraph()->GetArena())
2116 LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
2117 HInstruction* cond = deoptimize->InputAt(0);
2118 DCHECK(cond->IsCondition());
2119 if (cond->AsCondition()->NeedsMaterialization()) {
2120 locations->SetInAt(0, Location::RequiresRegister());
2121 }
2122}
2123
2124void InstructionCodeGeneratorMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
2125 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena())
2126 DeoptimizationSlowPathMIPS64(deoptimize);
2127 codegen_->AddSlowPath(slow_path);
2128 Label* slow_path_entry = slow_path->GetEntryLabel();
2129 GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
2130}
2131
2132void LocationsBuilderMIPS64::HandleFieldGet(HInstruction* instruction,
2133 const FieldInfo& field_info ATTRIBUTE_UNUSED) {
2134 LocationSummary* locations =
2135 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2136 locations->SetInAt(0, Location::RequiresRegister());
2137 if (Primitive::IsFloatingPointType(instruction->GetType())) {
2138 locations->SetOut(Location::RequiresFpuRegister());
2139 } else {
2140 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2141 }
2142}
2143
2144void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction,
2145 const FieldInfo& field_info) {
2146 Primitive::Type type = field_info.GetFieldType();
2147 LocationSummary* locations = instruction->GetLocations();
2148 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2149 LoadOperandType load_type = kLoadUnsignedByte;
2150 switch (type) {
2151 case Primitive::kPrimBoolean:
2152 load_type = kLoadUnsignedByte;
2153 break;
2154 case Primitive::kPrimByte:
2155 load_type = kLoadSignedByte;
2156 break;
2157 case Primitive::kPrimShort:
2158 load_type = kLoadSignedHalfword;
2159 break;
2160 case Primitive::kPrimChar:
2161 load_type = kLoadUnsignedHalfword;
2162 break;
2163 case Primitive::kPrimInt:
2164 case Primitive::kPrimFloat:
2165 load_type = kLoadWord;
2166 break;
2167 case Primitive::kPrimLong:
2168 case Primitive::kPrimDouble:
2169 load_type = kLoadDoubleword;
2170 break;
2171 case Primitive::kPrimNot:
2172 load_type = kLoadUnsignedWord;
2173 break;
2174 case Primitive::kPrimVoid:
2175 LOG(FATAL) << "Unreachable type " << type;
2176 UNREACHABLE();
2177 }
2178 if (!Primitive::IsFloatingPointType(type)) {
2179 DCHECK(locations->Out().IsRegister());
2180 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2181 __ LoadFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
2182 } else {
2183 DCHECK(locations->Out().IsFpuRegister());
2184 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
2185 __ LoadFpuFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
2186 }
2187
2188 codegen_->MaybeRecordImplicitNullCheck(instruction);
2189 // TODO: memory barrier?
2190}
2191
2192void LocationsBuilderMIPS64::HandleFieldSet(HInstruction* instruction,
2193 const FieldInfo& field_info ATTRIBUTE_UNUSED) {
2194 LocationSummary* locations =
2195 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2196 locations->SetInAt(0, Location::RequiresRegister());
2197 if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
2198 locations->SetInAt(1, Location::RequiresFpuRegister());
2199 } else {
2200 locations->SetInAt(1, Location::RequiresRegister());
2201 }
2202}
2203
2204void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction,
2205 const FieldInfo& field_info) {
2206 Primitive::Type type = field_info.GetFieldType();
2207 LocationSummary* locations = instruction->GetLocations();
2208 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2209 StoreOperandType store_type = kStoreByte;
2210 switch (type) {
2211 case Primitive::kPrimBoolean:
2212 case Primitive::kPrimByte:
2213 store_type = kStoreByte;
2214 break;
2215 case Primitive::kPrimShort:
2216 case Primitive::kPrimChar:
2217 store_type = kStoreHalfword;
2218 break;
2219 case Primitive::kPrimInt:
2220 case Primitive::kPrimFloat:
2221 case Primitive::kPrimNot:
2222 store_type = kStoreWord;
2223 break;
2224 case Primitive::kPrimLong:
2225 case Primitive::kPrimDouble:
2226 store_type = kStoreDoubleword;
2227 break;
2228 case Primitive::kPrimVoid:
2229 LOG(FATAL) << "Unreachable type " << type;
2230 UNREACHABLE();
2231 }
2232 if (!Primitive::IsFloatingPointType(type)) {
2233 DCHECK(locations->InAt(1).IsRegister());
2234 GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>();
2235 __ StoreToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
2236 } else {
2237 DCHECK(locations->InAt(1).IsFpuRegister());
2238 FpuRegister src = locations->InAt(1).AsFpuRegister<FpuRegister>();
2239 __ StoreFpuToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
2240 }
2241
2242 codegen_->MaybeRecordImplicitNullCheck(instruction);
2243 // TODO: memory barriers?
2244 if (CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1))) {
2245 DCHECK(locations->InAt(1).IsRegister());
2246 GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>();
2247 codegen_->MarkGCCard(obj, src);
2248 }
2249}
2250
2251void LocationsBuilderMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
2252 HandleFieldGet(instruction, instruction->GetFieldInfo());
2253}
2254
2255void InstructionCodeGeneratorMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
2256 HandleFieldGet(instruction, instruction->GetFieldInfo());
2257}
2258
2259void LocationsBuilderMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
2260 HandleFieldSet(instruction, instruction->GetFieldInfo());
2261}
2262
2263void InstructionCodeGeneratorMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
2264 HandleFieldSet(instruction, instruction->GetFieldInfo());
2265}
2266
2267void LocationsBuilderMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
2268 LocationSummary::CallKind call_kind =
2269 instruction->IsClassFinal() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath;
2270 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
2271 locations->SetInAt(0, Location::RequiresRegister());
2272 locations->SetInAt(1, Location::RequiresRegister());
2273 // The output does overlap inputs.
2274 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
2275}
2276
2277void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
2278 LocationSummary* locations = instruction->GetLocations();
2279 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2280 GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
2281 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2282
2283 Label done;
2284
2285 // Return 0 if `obj` is null.
2286 // TODO: Avoid this check if we know `obj` is not null.
2287 __ Move(out, ZERO);
2288 __ Beqzc(obj, &done);
2289
2290 // Compare the class of `obj` with `cls`.
2291 __ LoadFromOffset(kLoadUnsignedWord, out, obj, mirror::Object::ClassOffset().Int32Value());
2292 if (instruction->IsClassFinal()) {
2293 // Classes must be equal for the instanceof to succeed.
2294 __ Xor(out, out, cls);
2295 __ Sltiu(out, out, 1);
2296 } else {
2297 // If the classes are not equal, we go into a slow path.
2298 DCHECK(locations->OnlyCallsOnSlowPath());
2299 SlowPathCodeMIPS64* slow_path =
2300 new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction,
2301 locations->InAt(1),
2302 locations->Out(),
2303 instruction->GetDexPc());
2304 codegen_->AddSlowPath(slow_path);
2305 __ Bnec(out, cls, slow_path->GetEntryLabel());
2306 __ LoadConst32(out, 1);
2307 __ Bind(slow_path->GetExitLabel());
2308 }
2309
2310 __ Bind(&done);
2311}
2312
2313void LocationsBuilderMIPS64::VisitIntConstant(HIntConstant* constant) {
2314 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2315 locations->SetOut(Location::ConstantLocation(constant));
2316}
2317
2318void InstructionCodeGeneratorMIPS64::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
2319 // Will be generated at use site.
2320}
2321
2322void LocationsBuilderMIPS64::VisitNullConstant(HNullConstant* constant) {
2323 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2324 locations->SetOut(Location::ConstantLocation(constant));
2325}
2326
2327void InstructionCodeGeneratorMIPS64::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
2328 // Will be generated at use site.
2329}
2330
2331void LocationsBuilderMIPS64::HandleInvoke(HInvoke* invoke) {
2332 InvokeDexCallingConventionVisitorMIPS64 calling_convention_visitor;
2333 CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
2334}
2335
2336void LocationsBuilderMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
2337 HandleInvoke(invoke);
2338 // The register T0 is required to be used for the hidden argument in
2339 // art_quick_imt_conflict_trampoline, so add the hidden argument.
2340 invoke->GetLocations()->AddTemp(Location::RegisterLocation(T0));
2341}
2342
2343void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
2344 // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
2345 GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
2346 uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
2347 invoke->GetImtIndex() % mirror::Class::kImtSize, kMips64PointerSize).Uint32Value();
2348 Location receiver = invoke->GetLocations()->InAt(0);
2349 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2350 Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64WordSize);
2351
2352 // Set the hidden argument.
2353 __ LoadConst32(invoke->GetLocations()->GetTemp(1).AsRegister<GpuRegister>(),
2354 invoke->GetDexMethodIndex());
2355
2356 // temp = object->GetClass();
2357 if (receiver.IsStackSlot()) {
2358 __ LoadFromOffset(kLoadUnsignedWord, temp, SP, receiver.GetStackIndex());
2359 __ LoadFromOffset(kLoadUnsignedWord, temp, temp, class_offset);
2360 } else {
2361 __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
2362 }
2363 codegen_->MaybeRecordImplicitNullCheck(invoke);
2364 // temp = temp->GetImtEntryAt(method_offset);
2365 __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
2366 // T9 = temp->GetEntryPoint();
2367 __ LoadFromOffset(kLoadDoubleword, T9, temp, entry_point.Int32Value());
2368 // T9();
2369 __ Jalr(T9);
2370 DCHECK(!codegen_->IsLeafMethod());
2371 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2372}
2373
2374void LocationsBuilderMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
2375 // TODO intrinsic function
2376 HandleInvoke(invoke);
2377}
2378
2379void LocationsBuilderMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
2380 // When we do not run baseline, explicit clinit checks triggered by static
2381 // invokes must have been pruned by art::PrepareForRegisterAllocation.
2382 DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
2383
2384 // TODO - intrinsic function
2385 HandleInvoke(invoke);
2386
2387 // While SetupBlockedRegisters() blocks registers S2-S8 due to their
2388 // clobbering somewhere else, reduce further register pressure by avoiding
2389 // allocation of a register for the current method pointer like on x86 baseline.
2390 // TODO: remove this once all the issues with register saving/restoring are
2391 // sorted out.
2392 LocationSummary* locations = invoke->GetLocations();
2393 Location location = locations->InAt(invoke->GetCurrentMethodInputIndex());
2394 if (location.IsUnallocated() && location.GetPolicy() == Location::kRequiresRegister) {
2395 locations->SetInAt(invoke->GetCurrentMethodInputIndex(), Location::NoLocation());
2396 }
2397}
2398
2399static bool TryGenerateIntrinsicCode(HInvoke* invoke,
2400 CodeGeneratorMIPS64* codegen ATTRIBUTE_UNUSED) {
2401 if (invoke->GetLocations()->Intrinsified()) {
2402 // TODO - intrinsic function
2403 return true;
2404 }
2405 return false;
2406}
2407
2408void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
2409 // All registers are assumed to be correctly set up per the calling convention.
2410
2411 // TODO: Implement all kinds of calls:
2412 // 1) boot -> boot
2413 // 2) app -> boot
2414 // 3) app -> app
2415 //
2416 // Currently we implement the app -> app logic, which looks up in the resolve cache.
2417
2418 if (invoke->IsStringInit()) {
2419 GpuRegister reg = temp.AsRegister<GpuRegister>();
2420 // temp = thread->string_init_entrypoint
2421 __ LoadFromOffset(kLoadDoubleword,
2422 reg,
2423 TR,
2424 invoke->GetStringInitOffset());
2425 // T9 = temp->entry_point_from_quick_compiled_code_;
2426 __ LoadFromOffset(kLoadDoubleword,
2427 T9,
2428 reg,
2429 ArtMethod::EntryPointFromQuickCompiledCodeOffset(
2430 kMips64WordSize).Int32Value());
2431 // T9()
2432 __ Jalr(T9);
2433 } else if (invoke->IsRecursive()) {
2434 __ Jalr(&frame_entry_label_, T9);
2435 } else {
2436 Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
2437 GpuRegister reg = temp.AsRegister<GpuRegister>();
2438 GpuRegister method_reg;
2439 if (current_method.IsRegister()) {
2440 method_reg = current_method.AsRegister<GpuRegister>();
2441 } else {
2442 // TODO: use the appropriate DCHECK() here if possible.
2443 // DCHECK(invoke->GetLocations()->Intrinsified());
2444 DCHECK(!current_method.IsValid());
2445 method_reg = reg;
2446 __ Ld(reg, SP, kCurrentMethodStackOffset);
2447 }
2448
2449 // temp = temp->dex_cache_resolved_methods_;
2450 __ LoadFromOffset(kLoadUnsignedWord,
2451 reg,
2452 method_reg,
2453 ArtMethod::DexCacheResolvedMethodsOffset().Int32Value());
2454 // temp = temp[index_in_cache]
2455 __ LoadFromOffset(kLoadDoubleword,
2456 reg,
2457 reg,
2458 CodeGenerator::GetCachePointerOffset(invoke->GetDexMethodIndex()));
2459 // T9 = temp[offset_of_quick_compiled_code]
2460 __ LoadFromOffset(kLoadDoubleword,
2461 T9,
2462 reg,
2463 ArtMethod::EntryPointFromQuickCompiledCodeOffset(
2464 kMips64WordSize).Int32Value());
2465 // T9()
2466 __ Jalr(T9);
2467 }
2468
2469 DCHECK(!IsLeafMethod());
2470}
2471
2472void InstructionCodeGeneratorMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
2473 // When we do not run baseline, explicit clinit checks triggered by static
2474 // invokes must have been pruned by art::PrepareForRegisterAllocation.
2475 DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
2476
2477 if (TryGenerateIntrinsicCode(invoke, codegen_)) {
2478 return;
2479 }
2480
2481 LocationSummary* locations = invoke->GetLocations();
2482 codegen_->GenerateStaticOrDirectCall(invoke,
2483 locations->HasTemps()
2484 ? locations->GetTemp(0)
2485 : Location::NoLocation());
2486 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2487}
2488
2489void InstructionCodeGeneratorMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
2490 // TODO: Try to generate intrinsics code.
2491 LocationSummary* locations = invoke->GetLocations();
2492 Location receiver = locations->InAt(0);
2493 GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
2494 size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
2495 invoke->GetVTableIndex(), kMips64PointerSize).SizeValue();
2496 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2497 Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64WordSize);
2498
2499 // temp = object->GetClass();
2500 DCHECK(receiver.IsRegister());
2501 __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
2502 codegen_->MaybeRecordImplicitNullCheck(invoke);
2503 // temp = temp->GetMethodAt(method_offset);
2504 __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
2505 // T9 = temp->GetEntryPoint();
2506 __ LoadFromOffset(kLoadDoubleword, T9, temp, entry_point.Int32Value());
2507 // T9();
2508 __ Jalr(T9);
2509 DCHECK(!codegen_->IsLeafMethod());
2510 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2511}
2512
2513void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
2514 LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath
2515 : LocationSummary::kNoCall;
2516 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
2517 locations->SetInAt(0, Location::RequiresRegister());
2518 locations->SetOut(Location::RequiresRegister());
2519}
2520
2521void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) {
2522 LocationSummary* locations = cls->GetLocations();
2523 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2524 GpuRegister current_method = locations->InAt(0).AsRegister<GpuRegister>();
2525 if (cls->IsReferrersClass()) {
2526 DCHECK(!cls->CanCallRuntime());
2527 DCHECK(!cls->MustGenerateClinitCheck());
2528 __ LoadFromOffset(kLoadUnsignedWord, out, current_method,
2529 ArtMethod::DeclaringClassOffset().Int32Value());
2530 } else {
2531 DCHECK(cls->CanCallRuntime());
2532 __ LoadFromOffset(kLoadUnsignedWord, out, current_method,
2533 ArtMethod::DexCacheResolvedTypesOffset().Int32Value());
2534 __ LoadFromOffset(kLoadUnsignedWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
2535 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
2536 cls,
2537 cls,
2538 cls->GetDexPc(),
2539 cls->MustGenerateClinitCheck());
2540 codegen_->AddSlowPath(slow_path);
2541 __ Beqzc(out, slow_path->GetEntryLabel());
2542 if (cls->MustGenerateClinitCheck()) {
2543 GenerateClassInitializationCheck(slow_path, out);
2544 } else {
2545 __ Bind(slow_path->GetExitLabel());
2546 }
2547 }
2548}
2549
David Brazdilcb1c0552015-08-04 16:22:25 +01002550static int32_t GetExceptionTlsOffset() {
2551 return Thread::ExceptionOffset<kMips64WordSize>().Int32Value();
2552}
2553
Alexey Frunze4dda3372015-06-01 18:31:49 -07002554void LocationsBuilderMIPS64::VisitLoadException(HLoadException* load) {
2555 LocationSummary* locations =
2556 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
2557 locations->SetOut(Location::RequiresRegister());
2558}
2559
2560void InstructionCodeGeneratorMIPS64::VisitLoadException(HLoadException* load) {
2561 GpuRegister out = load->GetLocations()->Out().AsRegister<GpuRegister>();
David Brazdilcb1c0552015-08-04 16:22:25 +01002562 __ LoadFromOffset(kLoadUnsignedWord, out, TR, GetExceptionTlsOffset());
2563}
2564
2565void LocationsBuilderMIPS64::VisitClearException(HClearException* clear) {
2566 new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
2567}
2568
2569void InstructionCodeGeneratorMIPS64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
2570 __ StoreToOffset(kStoreWord, ZERO, TR, GetExceptionTlsOffset());
Alexey Frunze4dda3372015-06-01 18:31:49 -07002571}
2572
2573void LocationsBuilderMIPS64::VisitLoadLocal(HLoadLocal* load) {
2574 load->SetLocations(nullptr);
2575}
2576
2577void InstructionCodeGeneratorMIPS64::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
2578 // Nothing to do, this is driven by the code generator.
2579}
2580
2581void LocationsBuilderMIPS64::VisitLoadString(HLoadString* load) {
2582 LocationSummary* locations =
2583 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
2584 locations->SetInAt(0, Location::RequiresRegister());
2585 locations->SetOut(Location::RequiresRegister());
2586}
2587
2588void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) {
2589 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load);
2590 codegen_->AddSlowPath(slow_path);
2591
2592 LocationSummary* locations = load->GetLocations();
2593 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2594 GpuRegister current_method = locations->InAt(0).AsRegister<GpuRegister>();
2595 __ LoadFromOffset(kLoadUnsignedWord, out, current_method,
2596 ArtMethod::DeclaringClassOffset().Int32Value());
2597 __ LoadFromOffset(kLoadUnsignedWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
2598 __ LoadFromOffset(kLoadUnsignedWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
2599 __ Beqzc(out, slow_path->GetEntryLabel());
2600 __ Bind(slow_path->GetExitLabel());
2601}
2602
2603void LocationsBuilderMIPS64::VisitLocal(HLocal* local) {
2604 local->SetLocations(nullptr);
2605}
2606
2607void InstructionCodeGeneratorMIPS64::VisitLocal(HLocal* local) {
2608 DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
2609}
2610
2611void LocationsBuilderMIPS64::VisitLongConstant(HLongConstant* constant) {
2612 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2613 locations->SetOut(Location::ConstantLocation(constant));
2614}
2615
2616void InstructionCodeGeneratorMIPS64::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
2617 // Will be generated at use site.
2618}
2619
2620void LocationsBuilderMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
2621 LocationSummary* locations =
2622 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2623 InvokeRuntimeCallingConvention calling_convention;
2624 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2625}
2626
2627void InstructionCodeGeneratorMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
2628 codegen_->InvokeRuntime(instruction->IsEnter()
2629 ? QUICK_ENTRY_POINT(pLockObject)
2630 : QUICK_ENTRY_POINT(pUnlockObject),
2631 instruction,
2632 instruction->GetDexPc(),
2633 nullptr);
2634 CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
2635}
2636
2637void LocationsBuilderMIPS64::VisitMul(HMul* mul) {
2638 LocationSummary* locations =
2639 new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
2640 switch (mul->GetResultType()) {
2641 case Primitive::kPrimInt:
2642 case Primitive::kPrimLong:
2643 locations->SetInAt(0, Location::RequiresRegister());
2644 locations->SetInAt(1, Location::RequiresRegister());
2645 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2646 break;
2647
2648 case Primitive::kPrimFloat:
2649 case Primitive::kPrimDouble:
2650 locations->SetInAt(0, Location::RequiresFpuRegister());
2651 locations->SetInAt(1, Location::RequiresFpuRegister());
2652 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2653 break;
2654
2655 default:
2656 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2657 }
2658}
2659
2660void InstructionCodeGeneratorMIPS64::VisitMul(HMul* instruction) {
2661 Primitive::Type type = instruction->GetType();
2662 LocationSummary* locations = instruction->GetLocations();
2663
2664 switch (type) {
2665 case Primitive::kPrimInt:
2666 case Primitive::kPrimLong: {
2667 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2668 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
2669 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
2670 if (type == Primitive::kPrimInt)
2671 __ MulR6(dst, lhs, rhs);
2672 else
2673 __ Dmul(dst, lhs, rhs);
2674 break;
2675 }
2676 case Primitive::kPrimFloat:
2677 case Primitive::kPrimDouble: {
2678 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
2679 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
2680 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
2681 if (type == Primitive::kPrimFloat)
2682 __ MulS(dst, lhs, rhs);
2683 else
2684 __ MulD(dst, lhs, rhs);
2685 break;
2686 }
2687 default:
2688 LOG(FATAL) << "Unexpected mul type " << type;
2689 }
2690}
2691
2692void LocationsBuilderMIPS64::VisitNeg(HNeg* neg) {
2693 LocationSummary* locations =
2694 new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
2695 switch (neg->GetResultType()) {
2696 case Primitive::kPrimInt:
2697 case Primitive::kPrimLong:
2698 locations->SetInAt(0, Location::RequiresRegister());
2699 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2700 break;
2701
2702 case Primitive::kPrimFloat:
2703 case Primitive::kPrimDouble:
2704 locations->SetInAt(0, Location::RequiresFpuRegister());
2705 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2706 break;
2707
2708 default:
2709 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
2710 }
2711}
2712
2713void InstructionCodeGeneratorMIPS64::VisitNeg(HNeg* instruction) {
2714 Primitive::Type type = instruction->GetType();
2715 LocationSummary* locations = instruction->GetLocations();
2716
2717 switch (type) {
2718 case Primitive::kPrimInt:
2719 case Primitive::kPrimLong: {
2720 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2721 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
2722 if (type == Primitive::kPrimInt)
2723 __ Subu(dst, ZERO, src);
2724 else
2725 __ Dsubu(dst, ZERO, src);
2726 break;
2727 }
2728 case Primitive::kPrimFloat:
2729 case Primitive::kPrimDouble: {
2730 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
2731 FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
2732 if (type == Primitive::kPrimFloat)
2733 __ NegS(dst, src);
2734 else
2735 __ NegD(dst, src);
2736 break;
2737 }
2738 default:
2739 LOG(FATAL) << "Unexpected neg type " << type;
2740 }
2741}
2742
2743void LocationsBuilderMIPS64::VisitNewArray(HNewArray* instruction) {
2744 LocationSummary* locations =
2745 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2746 InvokeRuntimeCallingConvention calling_convention;
2747 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2748 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
2749 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2750 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
2751}
2752
2753void InstructionCodeGeneratorMIPS64::VisitNewArray(HNewArray* instruction) {
2754 LocationSummary* locations = instruction->GetLocations();
2755 // Move an uint16_t value to a register.
2756 __ LoadConst32(locations->GetTemp(0).AsRegister<GpuRegister>(), instruction->GetTypeIndex());
2757 codegen_->InvokeRuntime(
2758 GetThreadOffset<kMips64WordSize>(instruction->GetEntrypoint()).Int32Value(),
2759 instruction,
2760 instruction->GetDexPc(),
2761 nullptr);
2762 CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
2763}
2764
2765void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) {
2766 LocationSummary* locations =
2767 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2768 InvokeRuntimeCallingConvention calling_convention;
2769 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2770 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2771 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
2772}
2773
2774void InstructionCodeGeneratorMIPS64::VisitNewInstance(HNewInstance* instruction) {
2775 LocationSummary* locations = instruction->GetLocations();
2776 // Move an uint16_t value to a register.
2777 __ LoadConst32(locations->GetTemp(0).AsRegister<GpuRegister>(), instruction->GetTypeIndex());
2778 codegen_->InvokeRuntime(
2779 GetThreadOffset<kMips64WordSize>(instruction->GetEntrypoint()).Int32Value(),
2780 instruction,
2781 instruction->GetDexPc(),
2782 nullptr);
2783 CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
2784}
2785
2786void LocationsBuilderMIPS64::VisitNot(HNot* instruction) {
2787 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2788 locations->SetInAt(0, Location::RequiresRegister());
2789 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2790}
2791
2792void InstructionCodeGeneratorMIPS64::VisitNot(HNot* instruction) {
2793 Primitive::Type type = instruction->GetType();
2794 LocationSummary* locations = instruction->GetLocations();
2795
2796 switch (type) {
2797 case Primitive::kPrimInt:
2798 case Primitive::kPrimLong: {
2799 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2800 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
2801 __ Nor(dst, src, ZERO);
2802 break;
2803 }
2804
2805 default:
2806 LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
2807 }
2808}
2809
2810void LocationsBuilderMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
2811 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2812 locations->SetInAt(0, Location::RequiresRegister());
2813 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2814}
2815
2816void InstructionCodeGeneratorMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
2817 LocationSummary* locations = instruction->GetLocations();
2818 __ Xori(locations->Out().AsRegister<GpuRegister>(),
2819 locations->InAt(0).AsRegister<GpuRegister>(),
2820 1);
2821}
2822
2823void LocationsBuilderMIPS64::VisitNullCheck(HNullCheck* instruction) {
2824 LocationSummary* locations =
2825 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2826 locations->SetInAt(0, Location::RequiresRegister());
2827 if (instruction->HasUses()) {
2828 locations->SetOut(Location::SameAsFirstInput());
2829 }
2830}
2831
2832void InstructionCodeGeneratorMIPS64::GenerateImplicitNullCheck(HNullCheck* instruction) {
2833 if (codegen_->CanMoveNullCheckToUser(instruction)) {
2834 return;
2835 }
2836 Location obj = instruction->GetLocations()->InAt(0);
2837
2838 __ Lw(ZERO, obj.AsRegister<GpuRegister>(), 0);
2839 codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
2840}
2841
2842void InstructionCodeGeneratorMIPS64::GenerateExplicitNullCheck(HNullCheck* instruction) {
2843 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathMIPS64(instruction);
2844 codegen_->AddSlowPath(slow_path);
2845
2846 Location obj = instruction->GetLocations()->InAt(0);
2847
2848 __ Beqzc(obj.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
2849}
2850
2851void InstructionCodeGeneratorMIPS64::VisitNullCheck(HNullCheck* instruction) {
2852 if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
2853 GenerateImplicitNullCheck(instruction);
2854 } else {
2855 GenerateExplicitNullCheck(instruction);
2856 }
2857}
2858
2859void LocationsBuilderMIPS64::VisitOr(HOr* instruction) {
2860 HandleBinaryOp(instruction);
2861}
2862
2863void InstructionCodeGeneratorMIPS64::VisitOr(HOr* instruction) {
2864 HandleBinaryOp(instruction);
2865}
2866
2867void LocationsBuilderMIPS64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
2868 LOG(FATAL) << "Unreachable";
2869}
2870
2871void InstructionCodeGeneratorMIPS64::VisitParallelMove(HParallelMove* instruction) {
2872 codegen_->GetMoveResolver()->EmitNativeCode(instruction);
2873}
2874
2875void LocationsBuilderMIPS64::VisitParameterValue(HParameterValue* instruction) {
2876 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2877 Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
2878 if (location.IsStackSlot()) {
2879 location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2880 } else if (location.IsDoubleStackSlot()) {
2881 location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2882 }
2883 locations->SetOut(location);
2884}
2885
2886void InstructionCodeGeneratorMIPS64::VisitParameterValue(HParameterValue* instruction
2887 ATTRIBUTE_UNUSED) {
2888 // Nothing to do, the parameter is already at its location.
2889}
2890
2891void LocationsBuilderMIPS64::VisitCurrentMethod(HCurrentMethod* instruction) {
2892 LocationSummary* locations =
2893 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2894 locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
2895}
2896
2897void InstructionCodeGeneratorMIPS64::VisitCurrentMethod(HCurrentMethod* instruction
2898 ATTRIBUTE_UNUSED) {
2899 // Nothing to do, the method is already at its location.
2900}
2901
2902void LocationsBuilderMIPS64::VisitPhi(HPhi* instruction) {
2903 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2904 for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
2905 locations->SetInAt(i, Location::Any());
2906 }
2907 locations->SetOut(Location::Any());
2908}
2909
2910void InstructionCodeGeneratorMIPS64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
2911 LOG(FATAL) << "Unreachable";
2912}
2913
2914void LocationsBuilderMIPS64::VisitRem(HRem* rem) {
2915 Primitive::Type type = rem->GetResultType();
2916 LocationSummary::CallKind call_kind =
2917 Primitive::IsFloatingPointType(type) ? LocationSummary::kCall : LocationSummary::kNoCall;
2918 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
2919
2920 switch (type) {
2921 case Primitive::kPrimInt:
2922 case Primitive::kPrimLong:
2923 locations->SetInAt(0, Location::RequiresRegister());
2924 locations->SetInAt(1, Location::RequiresRegister());
2925 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2926 break;
2927
2928 case Primitive::kPrimFloat:
2929 case Primitive::kPrimDouble: {
2930 InvokeRuntimeCallingConvention calling_convention;
2931 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
2932 locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
2933 locations->SetOut(calling_convention.GetReturnLocation(type));
2934 break;
2935 }
2936
2937 default:
2938 LOG(FATAL) << "Unexpected rem type " << type;
2939 }
2940}
2941
2942void InstructionCodeGeneratorMIPS64::VisitRem(HRem* instruction) {
2943 Primitive::Type type = instruction->GetType();
2944 LocationSummary* locations = instruction->GetLocations();
2945
2946 switch (type) {
2947 case Primitive::kPrimInt:
2948 case Primitive::kPrimLong: {
2949 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2950 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
2951 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
2952 if (type == Primitive::kPrimInt)
2953 __ ModR6(dst, lhs, rhs);
2954 else
2955 __ Dmod(dst, lhs, rhs);
2956 break;
2957 }
2958
2959 case Primitive::kPrimFloat:
2960 case Primitive::kPrimDouble: {
2961 int32_t entry_offset = (type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pFmodf)
2962 : QUICK_ENTRY_POINT(pFmod);
2963 codegen_->InvokeRuntime(entry_offset, instruction, instruction->GetDexPc(), nullptr);
2964 break;
2965 }
2966 default:
2967 LOG(FATAL) << "Unexpected rem type " << type;
2968 }
2969}
2970
2971void LocationsBuilderMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
2972 memory_barrier->SetLocations(nullptr);
2973}
2974
2975void InstructionCodeGeneratorMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
2976 GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
2977}
2978
2979void LocationsBuilderMIPS64::VisitReturn(HReturn* ret) {
2980 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret);
2981 Primitive::Type return_type = ret->InputAt(0)->GetType();
2982 locations->SetInAt(0, Mips64ReturnLocation(return_type));
2983}
2984
2985void InstructionCodeGeneratorMIPS64::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
2986 codegen_->GenerateFrameExit();
2987}
2988
2989void LocationsBuilderMIPS64::VisitReturnVoid(HReturnVoid* ret) {
2990 ret->SetLocations(nullptr);
2991}
2992
2993void InstructionCodeGeneratorMIPS64::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
2994 codegen_->GenerateFrameExit();
2995}
2996
2997void LocationsBuilderMIPS64::VisitShl(HShl* shl) {
2998 HandleShift(shl);
2999}
3000
3001void InstructionCodeGeneratorMIPS64::VisitShl(HShl* shl) {
3002 HandleShift(shl);
3003}
3004
3005void LocationsBuilderMIPS64::VisitShr(HShr* shr) {
3006 HandleShift(shr);
3007}
3008
3009void InstructionCodeGeneratorMIPS64::VisitShr(HShr* shr) {
3010 HandleShift(shr);
3011}
3012
3013void LocationsBuilderMIPS64::VisitStoreLocal(HStoreLocal* store) {
3014 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
3015 Primitive::Type field_type = store->InputAt(1)->GetType();
3016 switch (field_type) {
3017 case Primitive::kPrimNot:
3018 case Primitive::kPrimBoolean:
3019 case Primitive::kPrimByte:
3020 case Primitive::kPrimChar:
3021 case Primitive::kPrimShort:
3022 case Primitive::kPrimInt:
3023 case Primitive::kPrimFloat:
3024 locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
3025 break;
3026
3027 case Primitive::kPrimLong:
3028 case Primitive::kPrimDouble:
3029 locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
3030 break;
3031
3032 default:
3033 LOG(FATAL) << "Unimplemented local type " << field_type;
3034 }
3035}
3036
3037void InstructionCodeGeneratorMIPS64::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
3038}
3039
3040void LocationsBuilderMIPS64::VisitSub(HSub* instruction) {
3041 HandleBinaryOp(instruction);
3042}
3043
3044void InstructionCodeGeneratorMIPS64::VisitSub(HSub* instruction) {
3045 HandleBinaryOp(instruction);
3046}
3047
3048void LocationsBuilderMIPS64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3049 HandleFieldGet(instruction, instruction->GetFieldInfo());
3050}
3051
3052void InstructionCodeGeneratorMIPS64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3053 HandleFieldGet(instruction, instruction->GetFieldInfo());
3054}
3055
3056void LocationsBuilderMIPS64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3057 HandleFieldSet(instruction, instruction->GetFieldInfo());
3058}
3059
3060void InstructionCodeGeneratorMIPS64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3061 HandleFieldSet(instruction, instruction->GetFieldInfo());
3062}
3063
3064void LocationsBuilderMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
3065 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
3066}
3067
3068void InstructionCodeGeneratorMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
3069 HBasicBlock* block = instruction->GetBlock();
3070 if (block->GetLoopInformation() != nullptr) {
3071 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
3072 // The back edge will generate the suspend check.
3073 return;
3074 }
3075 if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
3076 // The goto will generate the suspend check.
3077 return;
3078 }
3079 GenerateSuspendCheck(instruction, nullptr);
3080}
3081
3082void LocationsBuilderMIPS64::VisitTemporary(HTemporary* temp) {
3083 temp->SetLocations(nullptr);
3084}
3085
3086void InstructionCodeGeneratorMIPS64::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
3087 // Nothing to do, this is driven by the code generator.
3088}
3089
3090void LocationsBuilderMIPS64::VisitThrow(HThrow* instruction) {
3091 LocationSummary* locations =
3092 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3093 InvokeRuntimeCallingConvention calling_convention;
3094 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3095}
3096
3097void InstructionCodeGeneratorMIPS64::VisitThrow(HThrow* instruction) {
3098 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pDeliverException),
3099 instruction,
3100 instruction->GetDexPc(),
3101 nullptr);
3102 CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
3103}
3104
3105void LocationsBuilderMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
3106 Primitive::Type input_type = conversion->GetInputType();
3107 Primitive::Type result_type = conversion->GetResultType();
3108 DCHECK_NE(input_type, result_type);
3109
3110 if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
3111 (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
3112 LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
3113 }
3114
3115 LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
3116 if ((Primitive::IsFloatingPointType(result_type) && input_type == Primitive::kPrimLong) ||
3117 (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type))) {
3118 call_kind = LocationSummary::kCall;
3119 }
3120
3121 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
3122
3123 if (call_kind == LocationSummary::kNoCall) {
3124 if (Primitive::IsFloatingPointType(input_type)) {
3125 locations->SetInAt(0, Location::RequiresFpuRegister());
3126 } else {
3127 locations->SetInAt(0, Location::RequiresRegister());
3128 }
3129
3130 if (Primitive::IsFloatingPointType(result_type)) {
3131 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3132 } else {
3133 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3134 }
3135 } else {
3136 InvokeRuntimeCallingConvention calling_convention;
3137
3138 if (Primitive::IsFloatingPointType(input_type)) {
3139 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
3140 } else {
3141 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3142 }
3143
3144 locations->SetOut(calling_convention.GetReturnLocation(result_type));
3145 }
3146}
3147
3148void InstructionCodeGeneratorMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
3149 LocationSummary* locations = conversion->GetLocations();
3150 Primitive::Type result_type = conversion->GetResultType();
3151 Primitive::Type input_type = conversion->GetInputType();
3152
3153 DCHECK_NE(input_type, result_type);
3154
3155 if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) {
3156 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
3157 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
3158
3159 switch (result_type) {
3160 case Primitive::kPrimChar:
3161 __ Andi(dst, src, 0xFFFF);
3162 break;
3163 case Primitive::kPrimByte:
3164 // long is never converted into types narrower than int directly,
3165 // so SEB and SEH can be used without ever causing unpredictable results
3166 // on 64-bit inputs
3167 DCHECK(input_type != Primitive::kPrimLong);
3168 __ Seb(dst, src);
3169 break;
3170 case Primitive::kPrimShort:
3171 // long is never converted into types narrower than int directly,
3172 // so SEB and SEH can be used without ever causing unpredictable results
3173 // on 64-bit inputs
3174 DCHECK(input_type != Primitive::kPrimLong);
3175 __ Seh(dst, src);
3176 break;
3177 case Primitive::kPrimInt:
3178 case Primitive::kPrimLong:
3179 // Sign-extend 32-bit int into bits 32 through 63 for
3180 // int-to-long and long-to-int conversions
3181 __ Sll(dst, src, 0);
3182 break;
3183
3184 default:
3185 LOG(FATAL) << "Unexpected type conversion from " << input_type
3186 << " to " << result_type;
3187 }
3188 } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) {
3189 if (input_type != Primitive::kPrimLong) {
3190 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
3191 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
3192 __ Mtc1(src, FTMP);
3193 if (result_type == Primitive::kPrimFloat) {
3194 __ Cvtsw(dst, FTMP);
3195 } else {
3196 __ Cvtdw(dst, FTMP);
3197 }
3198 } else {
3199 int32_t entry_offset = (result_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pL2f)
3200 : QUICK_ENTRY_POINT(pL2d);
3201 codegen_->InvokeRuntime(entry_offset,
3202 conversion,
3203 conversion->GetDexPc(),
3204 nullptr);
3205 }
3206 } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) {
3207 CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
3208 int32_t entry_offset;
3209 if (result_type != Primitive::kPrimLong) {
3210 entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2iz)
3211 : QUICK_ENTRY_POINT(pD2iz);
3212 } else {
3213 entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2l)
3214 : QUICK_ENTRY_POINT(pD2l);
3215 }
3216 codegen_->InvokeRuntime(entry_offset,
3217 conversion,
3218 conversion->GetDexPc(),
3219 nullptr);
3220 } else if (Primitive::IsFloatingPointType(result_type) &&
3221 Primitive::IsFloatingPointType(input_type)) {
3222 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
3223 FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
3224 if (result_type == Primitive::kPrimFloat) {
3225 __ Cvtsd(dst, src);
3226 } else {
3227 __ Cvtds(dst, src);
3228 }
3229 } else {
3230 LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
3231 << " to " << result_type;
3232 }
3233}
3234
3235void LocationsBuilderMIPS64::VisitUShr(HUShr* ushr) {
3236 HandleShift(ushr);
3237}
3238
3239void InstructionCodeGeneratorMIPS64::VisitUShr(HUShr* ushr) {
3240 HandleShift(ushr);
3241}
3242
3243void LocationsBuilderMIPS64::VisitXor(HXor* instruction) {
3244 HandleBinaryOp(instruction);
3245}
3246
3247void InstructionCodeGeneratorMIPS64::VisitXor(HXor* instruction) {
3248 HandleBinaryOp(instruction);
3249}
3250
3251void LocationsBuilderMIPS64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
3252 // Nothing to do, this should be removed during prepare for register allocator.
3253 LOG(FATAL) << "Unreachable";
3254}
3255
3256void InstructionCodeGeneratorMIPS64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
3257 // Nothing to do, this should be removed during prepare for register allocator.
3258 LOG(FATAL) << "Unreachable";
3259}
3260
3261void LocationsBuilderMIPS64::VisitEqual(HEqual* comp) {
3262 VisitCondition(comp);
3263}
3264
3265void InstructionCodeGeneratorMIPS64::VisitEqual(HEqual* comp) {
3266 VisitCondition(comp);
3267}
3268
3269void LocationsBuilderMIPS64::VisitNotEqual(HNotEqual* comp) {
3270 VisitCondition(comp);
3271}
3272
3273void InstructionCodeGeneratorMIPS64::VisitNotEqual(HNotEqual* comp) {
3274 VisitCondition(comp);
3275}
3276
3277void LocationsBuilderMIPS64::VisitLessThan(HLessThan* comp) {
3278 VisitCondition(comp);
3279}
3280
3281void InstructionCodeGeneratorMIPS64::VisitLessThan(HLessThan* comp) {
3282 VisitCondition(comp);
3283}
3284
3285void LocationsBuilderMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
3286 VisitCondition(comp);
3287}
3288
3289void InstructionCodeGeneratorMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
3290 VisitCondition(comp);
3291}
3292
3293void LocationsBuilderMIPS64::VisitGreaterThan(HGreaterThan* comp) {
3294 VisitCondition(comp);
3295}
3296
3297void InstructionCodeGeneratorMIPS64::VisitGreaterThan(HGreaterThan* comp) {
3298 VisitCondition(comp);
3299}
3300
3301void LocationsBuilderMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
3302 VisitCondition(comp);
3303}
3304
3305void InstructionCodeGeneratorMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
3306 VisitCondition(comp);
3307}
3308
Nicolas Geoffray2e7cd752015-07-10 11:38:52 +01003309void LocationsBuilderMIPS64::VisitFakeString(HFakeString* instruction) {
3310 DCHECK(codegen_->IsBaseline());
3311 LocationSummary* locations =
3312 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3313 locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
3314}
3315
3316void InstructionCodeGeneratorMIPS64::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
3317 DCHECK(codegen_->IsBaseline());
3318 // Will be generated at use site.
3319}
3320
Alexey Frunze4dda3372015-06-01 18:31:49 -07003321} // namespace mips64
3322} // namespace art