blob: 10942ef3a59f67669ce665606e724ae85756064f [file] [log] [blame]
Alexey Frunze4dda3372015-06-01 18:31:49 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_generator_mips64.h"
18
19#include "entrypoints/quick/quick_entrypoints.h"
20#include "entrypoints/quick/quick_entrypoints_enum.h"
21#include "gc/accounting/card_table.h"
22#include "intrinsics.h"
23#include "art_method.h"
24#include "mirror/array-inl.h"
25#include "mirror/class-inl.h"
26#include "offsets.h"
27#include "thread.h"
28#include "utils/mips64/assembler_mips64.h"
29#include "utils/assembler.h"
30#include "utils/stack_checks.h"
31
32namespace art {
33namespace mips64 {
34
35static constexpr int kCurrentMethodStackOffset = 0;
36static constexpr GpuRegister kMethodRegisterArgument = A0;
37
38// We need extra temporary/scratch registers (in addition to AT) in some cases.
39static constexpr GpuRegister TMP = T8;
40static constexpr FpuRegister FTMP = F8;
41
42// ART Thread Register.
43static constexpr GpuRegister TR = S1;
44
45Location Mips64ReturnLocation(Primitive::Type return_type) {
46 switch (return_type) {
47 case Primitive::kPrimBoolean:
48 case Primitive::kPrimByte:
49 case Primitive::kPrimChar:
50 case Primitive::kPrimShort:
51 case Primitive::kPrimInt:
52 case Primitive::kPrimNot:
53 case Primitive::kPrimLong:
54 return Location::RegisterLocation(V0);
55
56 case Primitive::kPrimFloat:
57 case Primitive::kPrimDouble:
58 return Location::FpuRegisterLocation(F0);
59
60 case Primitive::kPrimVoid:
61 return Location();
62 }
63 UNREACHABLE();
64}
65
66Location InvokeDexCallingConventionVisitorMIPS64::GetReturnLocation(Primitive::Type type) const {
67 return Mips64ReturnLocation(type);
68}
69
70Location InvokeDexCallingConventionVisitorMIPS64::GetMethodLocation() const {
71 return Location::RegisterLocation(kMethodRegisterArgument);
72}
73
74Location InvokeDexCallingConventionVisitorMIPS64::GetNextLocation(Primitive::Type type) {
75 Location next_location;
76 if (type == Primitive::kPrimVoid) {
77 LOG(FATAL) << "Unexpected parameter type " << type;
78 }
79
80 if (Primitive::IsFloatingPointType(type) &&
81 (float_index_ < calling_convention.GetNumberOfFpuRegisters())) {
82 next_location = Location::FpuRegisterLocation(
83 calling_convention.GetFpuRegisterAt(float_index_++));
84 gp_index_++;
85 } else if (!Primitive::IsFloatingPointType(type) &&
86 (gp_index_ < calling_convention.GetNumberOfRegisters())) {
87 next_location = Location::RegisterLocation(calling_convention.GetRegisterAt(gp_index_++));
88 float_index_++;
89 } else {
90 size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
91 next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
92 : Location::StackSlot(stack_offset);
93 }
94
95 // Space on the stack is reserved for all arguments.
96 stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
97
98 // TODO: review
99
100 // TODO: shouldn't we use a whole machine word per argument on the stack?
101 // Implicit 4-byte method pointer (and such) will cause misalignment.
102
103 return next_location;
104}
105
106Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type type) {
107 return Mips64ReturnLocation(type);
108}
109
110#define __ down_cast<CodeGeneratorMIPS64*>(codegen)->GetAssembler()->
111#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, x).Int32Value()
112
113class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
114 public:
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100115 explicit BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction) : instruction_(instruction) {}
Alexey Frunze4dda3372015-06-01 18:31:49 -0700116
117 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100118 LocationSummary* locations = instruction_->GetLocations();
Alexey Frunze4dda3372015-06-01 18:31:49 -0700119 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
120 __ Bind(GetEntryLabel());
121 // We're moving two locations to locations that could overlap, so we need a parallel
122 // move resolver.
123 InvokeRuntimeCallingConvention calling_convention;
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100124 codegen->EmitParallelMoves(locations->InAt(0),
Alexey Frunze4dda3372015-06-01 18:31:49 -0700125 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
126 Primitive::kPrimInt,
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100127 locations->InAt(1),
Alexey Frunze4dda3372015-06-01 18:31:49 -0700128 Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
129 Primitive::kPrimInt);
130 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowArrayBounds),
131 instruction_,
132 instruction_->GetDexPc(),
133 this);
134 CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
135 }
136
Alexandre Rames8158f282015-08-07 10:26:17 +0100137 bool IsFatal() const OVERRIDE { return true; }
138
Roland Levillain46648892015-06-19 16:07:18 +0100139 const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS64"; }
140
Alexey Frunze4dda3372015-06-01 18:31:49 -0700141 private:
142 HBoundsCheck* const instruction_;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700143
144 DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS64);
145};
146
147class DivZeroCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
148 public:
149 explicit DivZeroCheckSlowPathMIPS64(HDivZeroCheck* instruction) : instruction_(instruction) {}
150
151 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
152 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
153 __ Bind(GetEntryLabel());
154 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowDivZero),
155 instruction_,
156 instruction_->GetDexPc(),
157 this);
158 CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
159 }
160
Alexandre Rames8158f282015-08-07 10:26:17 +0100161 bool IsFatal() const OVERRIDE { return true; }
162
Roland Levillain46648892015-06-19 16:07:18 +0100163 const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS64"; }
164
Alexey Frunze4dda3372015-06-01 18:31:49 -0700165 private:
166 HDivZeroCheck* const instruction_;
167 DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS64);
168};
169
170class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
171 public:
172 LoadClassSlowPathMIPS64(HLoadClass* cls,
173 HInstruction* at,
174 uint32_t dex_pc,
175 bool do_clinit)
176 : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
177 DCHECK(at->IsLoadClass() || at->IsClinitCheck());
178 }
179
180 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
181 LocationSummary* locations = at_->GetLocations();
182 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
183
184 __ Bind(GetEntryLabel());
185 SaveLiveRegisters(codegen, locations);
186
187 InvokeRuntimeCallingConvention calling_convention;
188 __ LoadConst32(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
189 int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
190 : QUICK_ENTRY_POINT(pInitializeType);
191 mips64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
192 if (do_clinit_) {
193 CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
194 } else {
195 CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
196 }
197
198 // Move the class to the desired location.
199 Location out = locations->Out();
200 if (out.IsValid()) {
201 DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
202 Primitive::Type type = at_->GetType();
203 mips64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
204 }
205
206 RestoreLiveRegisters(codegen, locations);
207 __ B(GetExitLabel());
208 }
209
Roland Levillain46648892015-06-19 16:07:18 +0100210 const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathMIPS64"; }
211
Alexey Frunze4dda3372015-06-01 18:31:49 -0700212 private:
213 // The class this slow path will load.
214 HLoadClass* const cls_;
215
216 // The instruction where this slow path is happening.
217 // (Might be the load class or an initialization check).
218 HInstruction* const at_;
219
220 // The dex PC of `at_`.
221 const uint32_t dex_pc_;
222
223 // Whether to initialize the class.
224 const bool do_clinit_;
225
226 DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS64);
227};
228
229class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
230 public:
231 explicit LoadStringSlowPathMIPS64(HLoadString* instruction) : instruction_(instruction) {}
232
233 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
234 LocationSummary* locations = instruction_->GetLocations();
235 DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
236 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
237
238 __ Bind(GetEntryLabel());
239 SaveLiveRegisters(codegen, locations);
240
241 InvokeRuntimeCallingConvention calling_convention;
242 __ LoadConst32(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex());
243 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString),
244 instruction_,
245 instruction_->GetDexPc(),
246 this);
247 CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
248 Primitive::Type type = instruction_->GetType();
249 mips64_codegen->MoveLocation(locations->Out(),
250 calling_convention.GetReturnLocation(type),
251 type);
252
253 RestoreLiveRegisters(codegen, locations);
254 __ B(GetExitLabel());
255 }
256
Roland Levillain46648892015-06-19 16:07:18 +0100257 const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS64"; }
258
Alexey Frunze4dda3372015-06-01 18:31:49 -0700259 private:
260 HLoadString* const instruction_;
261
262 DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS64);
263};
264
265class NullCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
266 public:
267 explicit NullCheckSlowPathMIPS64(HNullCheck* instr) : instruction_(instr) {}
268
269 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
270 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
271 __ Bind(GetEntryLabel());
272 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pThrowNullPointer),
273 instruction_,
274 instruction_->GetDexPc(),
275 this);
276 CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
277 }
278
Alexandre Rames8158f282015-08-07 10:26:17 +0100279 bool IsFatal() const OVERRIDE { return true; }
280
Roland Levillain46648892015-06-19 16:07:18 +0100281 const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS64"; }
282
Alexey Frunze4dda3372015-06-01 18:31:49 -0700283 private:
284 HNullCheck* const instruction_;
285
286 DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS64);
287};
288
289class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
290 public:
Roland Levillain3887c462015-08-12 18:15:42 +0100291 SuspendCheckSlowPathMIPS64(HSuspendCheck* instruction, HBasicBlock* successor)
Alexey Frunze4dda3372015-06-01 18:31:49 -0700292 : instruction_(instruction), successor_(successor) {}
293
294 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
295 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
296 __ Bind(GetEntryLabel());
297 SaveLiveRegisters(codegen, instruction_->GetLocations());
298 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pTestSuspend),
299 instruction_,
300 instruction_->GetDexPc(),
301 this);
302 CheckEntrypointTypes<kQuickTestSuspend, void, void>();
303 RestoreLiveRegisters(codegen, instruction_->GetLocations());
304 if (successor_ == nullptr) {
305 __ B(GetReturnLabel());
306 } else {
307 __ B(mips64_codegen->GetLabelOf(successor_));
308 }
309 }
310
311 Label* GetReturnLabel() {
312 DCHECK(successor_ == nullptr);
313 return &return_label_;
314 }
315
Roland Levillain46648892015-06-19 16:07:18 +0100316 const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS64"; }
317
Alexey Frunze4dda3372015-06-01 18:31:49 -0700318 private:
319 HSuspendCheck* const instruction_;
320 // If not null, the block to branch to after the suspend check.
321 HBasicBlock* const successor_;
322
323 // If `successor_` is null, the label to branch to after the suspend check.
324 Label return_label_;
325
326 DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathMIPS64);
327};
328
329class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
330 public:
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100331 explicit TypeCheckSlowPathMIPS64(HInstruction* instruction) : instruction_(instruction) {}
Alexey Frunze4dda3372015-06-01 18:31:49 -0700332
333 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
334 LocationSummary* locations = instruction_->GetLocations();
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100335 Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
336 : locations->Out();
337 uint32_t dex_pc = instruction_->GetDexPc();
Alexey Frunze4dda3372015-06-01 18:31:49 -0700338 DCHECK(instruction_->IsCheckCast()
339 || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
340 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
341
342 __ Bind(GetEntryLabel());
343 SaveLiveRegisters(codegen, locations);
344
345 // We're moving two locations to locations that could overlap, so we need a parallel
346 // move resolver.
347 InvokeRuntimeCallingConvention calling_convention;
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100348 codegen->EmitParallelMoves(locations->InAt(1),
Alexey Frunze4dda3372015-06-01 18:31:49 -0700349 Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
350 Primitive::kPrimNot,
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100351 object_class,
Alexey Frunze4dda3372015-06-01 18:31:49 -0700352 Location::RegisterLocation(calling_convention.GetRegisterAt(1)),
353 Primitive::kPrimNot);
354
355 if (instruction_->IsInstanceOf()) {
356 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pInstanceofNonTrivial),
357 instruction_,
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100358 dex_pc,
Alexey Frunze4dda3372015-06-01 18:31:49 -0700359 this);
360 Primitive::Type ret_type = instruction_->GetType();
361 Location ret_loc = calling_convention.GetReturnLocation(ret_type);
362 mips64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
363 CheckEntrypointTypes<kQuickInstanceofNonTrivial,
364 uint32_t,
365 const mirror::Class*,
366 const mirror::Class*>();
367 } else {
368 DCHECK(instruction_->IsCheckCast());
Serban Constantinescu5a6cc492015-08-13 15:20:25 +0100369 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc, this);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700370 CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
371 }
372
373 RestoreLiveRegisters(codegen, locations);
374 __ B(GetExitLabel());
375 }
376
Roland Levillain46648892015-06-19 16:07:18 +0100377 const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS64"; }
378
Alexey Frunze4dda3372015-06-01 18:31:49 -0700379 private:
380 HInstruction* const instruction_;
Alexey Frunze4dda3372015-06-01 18:31:49 -0700381
382 DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS64);
383};
384
385class DeoptimizationSlowPathMIPS64 : public SlowPathCodeMIPS64 {
386 public:
387 explicit DeoptimizationSlowPathMIPS64(HInstruction* instruction)
388 : instruction_(instruction) {}
389
390 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
391 __ Bind(GetEntryLabel());
392 SaveLiveRegisters(codegen, instruction_->GetLocations());
393 DCHECK(instruction_->IsDeoptimize());
394 HDeoptimize* deoptimize = instruction_->AsDeoptimize();
395 uint32_t dex_pc = deoptimize->GetDexPc();
396 CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
397 mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize), instruction_, dex_pc, this);
398 }
399
Roland Levillain46648892015-06-19 16:07:18 +0100400 const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS64"; }
401
Alexey Frunze4dda3372015-06-01 18:31:49 -0700402 private:
403 HInstruction* const instruction_;
404 DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS64);
405};
406
407CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
408 const Mips64InstructionSetFeatures& isa_features,
409 const CompilerOptions& compiler_options)
410 : CodeGenerator(graph,
411 kNumberOfGpuRegisters,
412 kNumberOfFpuRegisters,
413 0, // kNumberOfRegisterPairs
414 ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
415 arraysize(kCoreCalleeSaves)),
416 ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
417 arraysize(kFpuCalleeSaves)),
418 compiler_options),
419 block_labels_(graph->GetArena(), 0),
420 location_builder_(graph, this),
421 instruction_visitor_(graph, this),
422 move_resolver_(graph->GetArena(), this),
423 isa_features_(isa_features) {
424 // Save RA (containing the return address) to mimic Quick.
425 AddAllocatedRegister(Location::RegisterLocation(RA));
426}
427
428#undef __
429#define __ down_cast<Mips64Assembler*>(GetAssembler())->
430#define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kMips64WordSize, x).Int32Value()
431
432void CodeGeneratorMIPS64::Finalize(CodeAllocator* allocator) {
433 CodeGenerator::Finalize(allocator);
434}
435
436Mips64Assembler* ParallelMoveResolverMIPS64::GetAssembler() const {
437 return codegen_->GetAssembler();
438}
439
440void ParallelMoveResolverMIPS64::EmitMove(size_t index) {
441 MoveOperands* move = moves_.Get(index);
442 codegen_->MoveLocation(move->GetDestination(), move->GetSource(), move->GetType());
443}
444
445void ParallelMoveResolverMIPS64::EmitSwap(size_t index) {
446 MoveOperands* move = moves_.Get(index);
447 codegen_->SwapLocations(move->GetDestination(), move->GetSource(), move->GetType());
448}
449
450void ParallelMoveResolverMIPS64::RestoreScratch(int reg) {
451 // Pop reg
452 __ Ld(GpuRegister(reg), SP, 0);
453 __ DecreaseFrameSize(kMips64WordSize);
454}
455
456void ParallelMoveResolverMIPS64::SpillScratch(int reg) {
457 // Push reg
458 __ IncreaseFrameSize(kMips64WordSize);
459 __ Sd(GpuRegister(reg), SP, 0);
460}
461
462void ParallelMoveResolverMIPS64::Exchange(int index1, int index2, bool double_slot) {
463 LoadOperandType load_type = double_slot ? kLoadDoubleword : kLoadWord;
464 StoreOperandType store_type = double_slot ? kStoreDoubleword : kStoreWord;
465 // Allocate a scratch register other than TMP, if available.
466 // Else, spill V0 (arbitrary choice) and use it as a scratch register (it will be
467 // automatically unspilled when the scratch scope object is destroyed).
468 ScratchRegisterScope ensure_scratch(this, TMP, V0, codegen_->GetNumberOfCoreRegisters());
469 // If V0 spills onto the stack, SP-relative offsets need to be adjusted.
470 int stack_offset = ensure_scratch.IsSpilled() ? kMips64WordSize : 0;
471 __ LoadFromOffset(load_type,
472 GpuRegister(ensure_scratch.GetRegister()),
473 SP,
474 index1 + stack_offset);
475 __ LoadFromOffset(load_type,
476 TMP,
477 SP,
478 index2 + stack_offset);
479 __ StoreToOffset(store_type,
480 GpuRegister(ensure_scratch.GetRegister()),
481 SP,
482 index2 + stack_offset);
483 __ StoreToOffset(store_type, TMP, SP, index1 + stack_offset);
484}
485
486static dwarf::Reg DWARFReg(GpuRegister reg) {
487 return dwarf::Reg::Mips64Core(static_cast<int>(reg));
488}
489
490// TODO: mapping of floating-point registers to DWARF
491
492void CodeGeneratorMIPS64::GenerateFrameEntry() {
493 __ Bind(&frame_entry_label_);
494
495 bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kMips64) || !IsLeafMethod();
496
497 if (do_overflow_check) {
498 __ LoadFromOffset(kLoadWord,
499 ZERO,
500 SP,
501 -static_cast<int32_t>(GetStackOverflowReservedBytes(kMips64)));
502 RecordPcInfo(nullptr, 0);
503 }
504
505 // TODO: anything related to T9/GP/GOT/PIC/.so's?
506
507 if (HasEmptyFrame()) {
508 return;
509 }
510
511 // Make sure the frame size isn't unreasonably large. Per the various APIs
512 // it looks like it should always be less than 2GB in size, which allows
513 // us using 32-bit signed offsets from the stack pointer.
514 if (GetFrameSize() > 0x7FFFFFFF)
515 LOG(FATAL) << "Stack frame larger than 2GB";
516
517 // Spill callee-saved registers.
518 // Note that their cumulative size is small and they can be indexed using
519 // 16-bit offsets.
520
521 // TODO: increment/decrement SP in one step instead of two or remove this comment.
522
523 uint32_t ofs = FrameEntrySpillSize();
524 __ IncreaseFrameSize(ofs);
525
526 for (int i = arraysize(kCoreCalleeSaves) - 1; i >= 0; --i) {
527 GpuRegister reg = kCoreCalleeSaves[i];
528 if (allocated_registers_.ContainsCoreRegister(reg)) {
529 ofs -= kMips64WordSize;
530 __ Sd(reg, SP, ofs);
531 __ cfi().RelOffset(DWARFReg(reg), ofs);
532 }
533 }
534
535 for (int i = arraysize(kFpuCalleeSaves) - 1; i >= 0; --i) {
536 FpuRegister reg = kFpuCalleeSaves[i];
537 if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
538 ofs -= kMips64WordSize;
539 __ Sdc1(reg, SP, ofs);
540 // TODO: __ cfi().RelOffset(DWARFReg(reg), ofs);
541 }
542 }
543
544 // Allocate the rest of the frame and store the current method pointer
545 // at its end.
546
547 __ IncreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
548
549 static_assert(IsInt<16>(kCurrentMethodStackOffset),
550 "kCurrentMethodStackOffset must fit into int16_t");
551 __ Sd(kMethodRegisterArgument, SP, kCurrentMethodStackOffset);
552}
553
554void CodeGeneratorMIPS64::GenerateFrameExit() {
555 __ cfi().RememberState();
556
557 // TODO: anything related to T9/GP/GOT/PIC/.so's?
558
559 if (!HasEmptyFrame()) {
560 // Deallocate the rest of the frame.
561
562 __ DecreaseFrameSize(GetFrameSize() - FrameEntrySpillSize());
563
564 // Restore callee-saved registers.
565 // Note that their cumulative size is small and they can be indexed using
566 // 16-bit offsets.
567
568 // TODO: increment/decrement SP in one step instead of two or remove this comment.
569
570 uint32_t ofs = 0;
571
572 for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
573 FpuRegister reg = kFpuCalleeSaves[i];
574 if (allocated_registers_.ContainsFloatingPointRegister(reg)) {
575 __ Ldc1(reg, SP, ofs);
576 ofs += kMips64WordSize;
577 // TODO: __ cfi().Restore(DWARFReg(reg));
578 }
579 }
580
581 for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
582 GpuRegister reg = kCoreCalleeSaves[i];
583 if (allocated_registers_.ContainsCoreRegister(reg)) {
584 __ Ld(reg, SP, ofs);
585 ofs += kMips64WordSize;
586 __ cfi().Restore(DWARFReg(reg));
587 }
588 }
589
590 DCHECK_EQ(ofs, FrameEntrySpillSize());
591 __ DecreaseFrameSize(ofs);
592 }
593
594 __ Jr(RA);
595
596 __ cfi().RestoreState();
597 __ cfi().DefCFAOffset(GetFrameSize());
598}
599
600void CodeGeneratorMIPS64::Bind(HBasicBlock* block) {
601 __ Bind(GetLabelOf(block));
602}
603
604void CodeGeneratorMIPS64::MoveLocation(Location destination,
605 Location source,
606 Primitive::Type type) {
607 if (source.Equals(destination)) {
608 return;
609 }
610
611 // A valid move can always be inferred from the destination and source
612 // locations. When moving from and to a register, the argument type can be
613 // used to generate 32bit instead of 64bit moves.
614 bool unspecified_type = (type == Primitive::kPrimVoid);
615 DCHECK_EQ(unspecified_type, false);
616
617 if (destination.IsRegister() || destination.IsFpuRegister()) {
618 if (unspecified_type) {
619 HConstant* src_cst = source.IsConstant() ? source.GetConstant() : nullptr;
620 if (source.IsStackSlot() ||
621 (src_cst != nullptr && (src_cst->IsIntConstant()
622 || src_cst->IsFloatConstant()
623 || src_cst->IsNullConstant()))) {
624 // For stack slots and 32bit constants, a 64bit type is appropriate.
625 type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
626 } else {
627 // If the source is a double stack slot or a 64bit constant, a 64bit
628 // type is appropriate. Else the source is a register, and since the
629 // type has not been specified, we chose a 64bit type to force a 64bit
630 // move.
631 type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
632 }
633 }
634 DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(type)) ||
635 (destination.IsRegister() && !Primitive::IsFloatingPointType(type)));
636 if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
637 // Move to GPR/FPR from stack
638 LoadOperandType load_type = source.IsStackSlot() ? kLoadWord : kLoadDoubleword;
639 if (Primitive::IsFloatingPointType(type)) {
640 __ LoadFpuFromOffset(load_type,
641 destination.AsFpuRegister<FpuRegister>(),
642 SP,
643 source.GetStackIndex());
644 } else {
645 // TODO: use load_type = kLoadUnsignedWord when type == Primitive::kPrimNot.
646 __ LoadFromOffset(load_type,
647 destination.AsRegister<GpuRegister>(),
648 SP,
649 source.GetStackIndex());
650 }
651 } else if (source.IsConstant()) {
652 // Move to GPR/FPR from constant
653 GpuRegister gpr = AT;
654 if (!Primitive::IsFloatingPointType(type)) {
655 gpr = destination.AsRegister<GpuRegister>();
656 }
657 if (type == Primitive::kPrimInt || type == Primitive::kPrimFloat) {
658 __ LoadConst32(gpr, GetInt32ValueOf(source.GetConstant()->AsConstant()));
659 } else {
660 __ LoadConst64(gpr, GetInt64ValueOf(source.GetConstant()->AsConstant()));
661 }
662 if (type == Primitive::kPrimFloat) {
663 __ Mtc1(gpr, destination.AsFpuRegister<FpuRegister>());
664 } else if (type == Primitive::kPrimDouble) {
665 __ Dmtc1(gpr, destination.AsFpuRegister<FpuRegister>());
666 }
667 } else {
668 if (destination.IsRegister()) {
669 // Move to GPR from GPR
670 __ Move(destination.AsRegister<GpuRegister>(), source.AsRegister<GpuRegister>());
671 } else {
672 // Move to FPR from FPR
673 if (type == Primitive::kPrimFloat) {
674 __ MovS(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
675 } else {
676 DCHECK_EQ(type, Primitive::kPrimDouble);
677 __ MovD(destination.AsFpuRegister<FpuRegister>(), source.AsFpuRegister<FpuRegister>());
678 }
679 }
680 }
681 } else { // The destination is not a register. It must be a stack slot.
682 DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
683 if (source.IsRegister() || source.IsFpuRegister()) {
684 if (unspecified_type) {
685 if (source.IsRegister()) {
686 type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
687 } else {
688 type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
689 }
690 }
691 DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(type)) &&
692 (source.IsFpuRegister() == Primitive::IsFloatingPointType(type)));
693 // Move to stack from GPR/FPR
694 StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
695 if (source.IsRegister()) {
696 __ StoreToOffset(store_type,
697 source.AsRegister<GpuRegister>(),
698 SP,
699 destination.GetStackIndex());
700 } else {
701 __ StoreFpuToOffset(store_type,
702 source.AsFpuRegister<FpuRegister>(),
703 SP,
704 destination.GetStackIndex());
705 }
706 } else if (source.IsConstant()) {
707 // Move to stack from constant
708 HConstant* src_cst = source.GetConstant();
709 StoreOperandType store_type = destination.IsStackSlot() ? kStoreWord : kStoreDoubleword;
710 if (destination.IsStackSlot()) {
711 __ LoadConst32(TMP, GetInt32ValueOf(src_cst->AsConstant()));
712 } else {
713 __ LoadConst64(TMP, GetInt64ValueOf(src_cst->AsConstant()));
714 }
715 __ StoreToOffset(store_type, TMP, SP, destination.GetStackIndex());
716 } else {
717 DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
718 DCHECK_EQ(source.IsDoubleStackSlot(), destination.IsDoubleStackSlot());
719 // Move to stack from stack
720 if (destination.IsStackSlot()) {
721 __ LoadFromOffset(kLoadWord, TMP, SP, source.GetStackIndex());
722 __ StoreToOffset(kStoreWord, TMP, SP, destination.GetStackIndex());
723 } else {
724 __ LoadFromOffset(kLoadDoubleword, TMP, SP, source.GetStackIndex());
725 __ StoreToOffset(kStoreDoubleword, TMP, SP, destination.GetStackIndex());
726 }
727 }
728 }
729}
730
731void CodeGeneratorMIPS64::SwapLocations(Location loc1,
732 Location loc2,
733 Primitive::Type type ATTRIBUTE_UNUSED) {
734 DCHECK(!loc1.IsConstant());
735 DCHECK(!loc2.IsConstant());
736
737 if (loc1.Equals(loc2)) {
738 return;
739 }
740
741 bool is_slot1 = loc1.IsStackSlot() || loc1.IsDoubleStackSlot();
742 bool is_slot2 = loc2.IsStackSlot() || loc2.IsDoubleStackSlot();
743 bool is_fp_reg1 = loc1.IsFpuRegister();
744 bool is_fp_reg2 = loc2.IsFpuRegister();
745
746 if (loc2.IsRegister() && loc1.IsRegister()) {
747 // Swap 2 GPRs
748 GpuRegister r1 = loc1.AsRegister<GpuRegister>();
749 GpuRegister r2 = loc2.AsRegister<GpuRegister>();
750 __ Move(TMP, r2);
751 __ Move(r2, r1);
752 __ Move(r1, TMP);
753 } else if (is_fp_reg2 && is_fp_reg1) {
754 // Swap 2 FPRs
755 FpuRegister r1 = loc1.AsFpuRegister<FpuRegister>();
756 FpuRegister r2 = loc2.AsFpuRegister<FpuRegister>();
757 // TODO: Can MOV.S/MOV.D be used here to save one instruction?
758 // Need to distinguish float from double, right?
759 __ Dmfc1(TMP, r2);
760 __ Dmfc1(AT, r1);
761 __ Dmtc1(TMP, r1);
762 __ Dmtc1(AT, r2);
763 } else if (is_slot1 != is_slot2) {
764 // Swap GPR/FPR and stack slot
765 Location reg_loc = is_slot1 ? loc2 : loc1;
766 Location mem_loc = is_slot1 ? loc1 : loc2;
767 LoadOperandType load_type = mem_loc.IsStackSlot() ? kLoadWord : kLoadDoubleword;
768 StoreOperandType store_type = mem_loc.IsStackSlot() ? kStoreWord : kStoreDoubleword;
769 // TODO: use load_type = kLoadUnsignedWord when type == Primitive::kPrimNot.
770 __ LoadFromOffset(load_type, TMP, SP, mem_loc.GetStackIndex());
771 if (reg_loc.IsFpuRegister()) {
772 __ StoreFpuToOffset(store_type,
773 reg_loc.AsFpuRegister<FpuRegister>(),
774 SP,
775 mem_loc.GetStackIndex());
776 // TODO: review this MTC1/DMTC1 move
777 if (mem_loc.IsStackSlot()) {
778 __ Mtc1(TMP, reg_loc.AsFpuRegister<FpuRegister>());
779 } else {
780 DCHECK(mem_loc.IsDoubleStackSlot());
781 __ Dmtc1(TMP, reg_loc.AsFpuRegister<FpuRegister>());
782 }
783 } else {
784 __ StoreToOffset(store_type, reg_loc.AsRegister<GpuRegister>(), SP, mem_loc.GetStackIndex());
785 __ Move(reg_loc.AsRegister<GpuRegister>(), TMP);
786 }
787 } else if (is_slot1 && is_slot2) {
788 move_resolver_.Exchange(loc1.GetStackIndex(),
789 loc2.GetStackIndex(),
790 loc1.IsDoubleStackSlot());
791 } else {
792 LOG(FATAL) << "Unimplemented swap between locations " << loc1 << " and " << loc2;
793 }
794}
795
796void CodeGeneratorMIPS64::Move(HInstruction* instruction,
797 Location location,
798 HInstruction* move_for) {
799 LocationSummary* locations = instruction->GetLocations();
800 Primitive::Type type = instruction->GetType();
801 DCHECK_NE(type, Primitive::kPrimVoid);
802
803 if (instruction->IsCurrentMethod()) {
804 MoveLocation(location, Location::DoubleStackSlot(kCurrentMethodStackOffset), type);
805 } else if (locations != nullptr && locations->Out().Equals(location)) {
806 return;
807 } else if (instruction->IsIntConstant()
808 || instruction->IsLongConstant()
809 || instruction->IsNullConstant()) {
810 if (location.IsRegister()) {
811 // Move to GPR from constant
812 GpuRegister dst = location.AsRegister<GpuRegister>();
813 if (instruction->IsNullConstant() || instruction->IsIntConstant()) {
814 __ LoadConst32(dst, GetInt32ValueOf(instruction->AsConstant()));
815 } else {
816 __ LoadConst64(dst, instruction->AsLongConstant()->GetValue());
817 }
818 } else {
819 DCHECK(location.IsStackSlot() || location.IsDoubleStackSlot());
820 // Move to stack from constant
821 if (location.IsStackSlot()) {
822 __ LoadConst32(TMP, GetInt32ValueOf(instruction->AsConstant()));
823 __ StoreToOffset(kStoreWord, TMP, SP, location.GetStackIndex());
824 } else {
825 __ LoadConst64(TMP, instruction->AsLongConstant()->GetValue());
826 __ StoreToOffset(kStoreDoubleword, TMP, SP, location.GetStackIndex());
827 }
828 }
829 } else if (instruction->IsTemporary()) {
830 Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
831 MoveLocation(location, temp_location, type);
832 } else if (instruction->IsLoadLocal()) {
833 uint32_t stack_slot = GetStackSlot(instruction->AsLoadLocal()->GetLocal());
834 if (Primitive::Is64BitType(type)) {
835 MoveLocation(location, Location::DoubleStackSlot(stack_slot), type);
836 } else {
837 MoveLocation(location, Location::StackSlot(stack_slot), type);
838 }
839 } else {
840 DCHECK((instruction->GetNext() == move_for) || instruction->GetNext()->IsTemporary());
841 MoveLocation(location, locations->Out(), type);
842 }
843}
844
845Location CodeGeneratorMIPS64::GetStackLocation(HLoadLocal* load) const {
846 Primitive::Type type = load->GetType();
847
848 switch (type) {
849 case Primitive::kPrimNot:
850 case Primitive::kPrimInt:
851 case Primitive::kPrimFloat:
852 return Location::StackSlot(GetStackSlot(load->GetLocal()));
853
854 case Primitive::kPrimLong:
855 case Primitive::kPrimDouble:
856 return Location::DoubleStackSlot(GetStackSlot(load->GetLocal()));
857
858 case Primitive::kPrimBoolean:
859 case Primitive::kPrimByte:
860 case Primitive::kPrimChar:
861 case Primitive::kPrimShort:
862 case Primitive::kPrimVoid:
863 LOG(FATAL) << "Unexpected type " << type;
864 }
865
866 LOG(FATAL) << "Unreachable";
867 return Location::NoLocation();
868}
869
870void CodeGeneratorMIPS64::MarkGCCard(GpuRegister object, GpuRegister value) {
871 Label done;
872 GpuRegister card = AT;
873 GpuRegister temp = TMP;
874 __ Beqzc(value, &done);
875 __ LoadFromOffset(kLoadDoubleword,
876 card,
877 TR,
878 Thread::CardTableOffset<kMips64WordSize>().Int32Value());
879 __ Dsrl(temp, object, gc::accounting::CardTable::kCardShift);
880 __ Daddu(temp, card, temp);
881 __ Sb(card, temp, 0);
882 __ Bind(&done);
883}
884
885void CodeGeneratorMIPS64::SetupBlockedRegisters(bool is_baseline ATTRIBUTE_UNUSED) const {
886 // ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
887 blocked_core_registers_[ZERO] = true;
888 blocked_core_registers_[K0] = true;
889 blocked_core_registers_[K1] = true;
890 blocked_core_registers_[GP] = true;
891 blocked_core_registers_[SP] = true;
892 blocked_core_registers_[RA] = true;
893
894 // AT and TMP(T8) are used as temporary/scratch registers
895 // (similar to how AT is used by MIPS assemblers).
896 blocked_core_registers_[AT] = true;
897 blocked_core_registers_[TMP] = true;
898 blocked_fpu_registers_[FTMP] = true;
899
900 // Reserve suspend and thread registers.
901 blocked_core_registers_[S0] = true;
902 blocked_core_registers_[TR] = true;
903
904 // Reserve T9 for function calls
905 blocked_core_registers_[T9] = true;
906
907 // TODO: review; anything else?
908
909 // TODO: make these two for's conditional on is_baseline once
910 // all the issues with register saving/restoring are sorted out.
911 for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
912 blocked_core_registers_[kCoreCalleeSaves[i]] = true;
913 }
914
915 for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
916 blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
917 }
918}
919
920Location CodeGeneratorMIPS64::AllocateFreeRegister(Primitive::Type type) const {
921 if (type == Primitive::kPrimVoid) {
922 LOG(FATAL) << "Unreachable type " << type;
923 }
924
925 if (Primitive::IsFloatingPointType(type)) {
926 size_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfFpuRegisters);
927 return Location::FpuRegisterLocation(reg);
928 } else {
929 size_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfGpuRegisters);
930 return Location::RegisterLocation(reg);
931 }
932}
933
934size_t CodeGeneratorMIPS64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
935 __ StoreToOffset(kStoreDoubleword, GpuRegister(reg_id), SP, stack_index);
936 return kMips64WordSize;
937}
938
939size_t CodeGeneratorMIPS64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
940 __ LoadFromOffset(kLoadDoubleword, GpuRegister(reg_id), SP, stack_index);
941 return kMips64WordSize;
942}
943
944size_t CodeGeneratorMIPS64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
945 __ StoreFpuToOffset(kStoreDoubleword, FpuRegister(reg_id), SP, stack_index);
946 return kMips64WordSize;
947}
948
949size_t CodeGeneratorMIPS64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
950 __ LoadFpuFromOffset(kLoadDoubleword, FpuRegister(reg_id), SP, stack_index);
951 return kMips64WordSize;
952}
953
954void CodeGeneratorMIPS64::DumpCoreRegister(std::ostream& stream, int reg) const {
955 stream << Mips64ManagedRegister::FromGpuRegister(GpuRegister(reg));
956}
957
958void CodeGeneratorMIPS64::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
959 stream << Mips64ManagedRegister::FromFpuRegister(FpuRegister(reg));
960}
961
962void CodeGeneratorMIPS64::InvokeRuntime(int32_t entry_point_offset,
963 HInstruction* instruction,
964 uint32_t dex_pc,
965 SlowPathCode* slow_path) {
Alexandre Rames78e3ef62015-08-12 13:43:29 +0100966 ValidateInvokeRuntime(instruction, slow_path);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700967 // TODO: anything related to T9/GP/GOT/PIC/.so's?
968 __ LoadFromOffset(kLoadDoubleword, T9, TR, entry_point_offset);
969 __ Jalr(T9);
970 RecordPcInfo(instruction, dex_pc, slow_path);
Alexey Frunze4dda3372015-06-01 18:31:49 -0700971}
972
973void InstructionCodeGeneratorMIPS64::GenerateClassInitializationCheck(SlowPathCodeMIPS64* slow_path,
974 GpuRegister class_reg) {
975 __ LoadFromOffset(kLoadWord, TMP, class_reg, mirror::Class::StatusOffset().Int32Value());
976 __ LoadConst32(AT, mirror::Class::kStatusInitialized);
977 __ Bltc(TMP, AT, slow_path->GetEntryLabel());
978 // TODO: barrier needed?
979 __ Bind(slow_path->GetExitLabel());
980}
981
982void InstructionCodeGeneratorMIPS64::GenerateMemoryBarrier(MemBarrierKind kind ATTRIBUTE_UNUSED) {
983 __ Sync(0); // only stype 0 is supported
984}
985
986void InstructionCodeGeneratorMIPS64::GenerateSuspendCheck(HSuspendCheck* instruction,
987 HBasicBlock* successor) {
988 SuspendCheckSlowPathMIPS64* slow_path =
989 new (GetGraph()->GetArena()) SuspendCheckSlowPathMIPS64(instruction, successor);
990 codegen_->AddSlowPath(slow_path);
991
992 __ LoadFromOffset(kLoadUnsignedHalfword,
993 TMP,
994 TR,
995 Thread::ThreadFlagsOffset<kMips64WordSize>().Int32Value());
996 if (successor == nullptr) {
997 __ Bnezc(TMP, slow_path->GetEntryLabel());
998 __ Bind(slow_path->GetReturnLabel());
999 } else {
1000 __ Beqzc(TMP, codegen_->GetLabelOf(successor));
1001 __ B(slow_path->GetEntryLabel());
1002 // slow_path will return to GetLabelOf(successor).
1003 }
1004}
1005
1006InstructionCodeGeneratorMIPS64::InstructionCodeGeneratorMIPS64(HGraph* graph,
1007 CodeGeneratorMIPS64* codegen)
1008 : HGraphVisitor(graph),
1009 assembler_(codegen->GetAssembler()),
1010 codegen_(codegen) {}
1011
1012void LocationsBuilderMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
1013 DCHECK_EQ(instruction->InputCount(), 2U);
1014 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1015 Primitive::Type type = instruction->GetResultType();
1016 switch (type) {
1017 case Primitive::kPrimInt:
1018 case Primitive::kPrimLong: {
1019 locations->SetInAt(0, Location::RequiresRegister());
1020 HInstruction* right = instruction->InputAt(1);
1021 bool can_use_imm = false;
1022 if (right->IsConstant()) {
1023 int64_t imm = CodeGenerator::GetInt64ValueOf(right->AsConstant());
1024 if (instruction->IsAnd() || instruction->IsOr() || instruction->IsXor()) {
1025 can_use_imm = IsUint<16>(imm);
1026 } else if (instruction->IsAdd()) {
1027 can_use_imm = IsInt<16>(imm);
1028 } else {
1029 DCHECK(instruction->IsSub());
1030 can_use_imm = IsInt<16>(-imm);
1031 }
1032 }
1033 if (can_use_imm)
1034 locations->SetInAt(1, Location::ConstantLocation(right->AsConstant()));
1035 else
1036 locations->SetInAt(1, Location::RequiresRegister());
1037 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1038 }
1039 break;
1040
1041 case Primitive::kPrimFloat:
1042 case Primitive::kPrimDouble:
1043 locations->SetInAt(0, Location::RequiresFpuRegister());
1044 locations->SetInAt(1, Location::RequiresFpuRegister());
1045 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1046 break;
1047
1048 default:
1049 LOG(FATAL) << "Unexpected " << instruction->DebugName() << " type " << type;
1050 }
1051}
1052
1053void InstructionCodeGeneratorMIPS64::HandleBinaryOp(HBinaryOperation* instruction) {
1054 Primitive::Type type = instruction->GetType();
1055 LocationSummary* locations = instruction->GetLocations();
1056
1057 switch (type) {
1058 case Primitive::kPrimInt:
1059 case Primitive::kPrimLong: {
1060 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1061 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1062 Location rhs_location = locations->InAt(1);
1063
1064 GpuRegister rhs_reg = ZERO;
1065 int64_t rhs_imm = 0;
1066 bool use_imm = rhs_location.IsConstant();
1067 if (use_imm) {
1068 rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
1069 } else {
1070 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1071 }
1072
1073 if (instruction->IsAnd()) {
1074 if (use_imm)
1075 __ Andi(dst, lhs, rhs_imm);
1076 else
1077 __ And(dst, lhs, rhs_reg);
1078 } else if (instruction->IsOr()) {
1079 if (use_imm)
1080 __ Ori(dst, lhs, rhs_imm);
1081 else
1082 __ Or(dst, lhs, rhs_reg);
1083 } else if (instruction->IsXor()) {
1084 if (use_imm)
1085 __ Xori(dst, lhs, rhs_imm);
1086 else
1087 __ Xor(dst, lhs, rhs_reg);
1088 } else if (instruction->IsAdd()) {
1089 if (type == Primitive::kPrimInt) {
1090 if (use_imm)
1091 __ Addiu(dst, lhs, rhs_imm);
1092 else
1093 __ Addu(dst, lhs, rhs_reg);
1094 } else {
1095 if (use_imm)
1096 __ Daddiu(dst, lhs, rhs_imm);
1097 else
1098 __ Daddu(dst, lhs, rhs_reg);
1099 }
1100 } else {
1101 DCHECK(instruction->IsSub());
1102 if (type == Primitive::kPrimInt) {
1103 if (use_imm)
1104 __ Addiu(dst, lhs, -rhs_imm);
1105 else
1106 __ Subu(dst, lhs, rhs_reg);
1107 } else {
1108 if (use_imm)
1109 __ Daddiu(dst, lhs, -rhs_imm);
1110 else
1111 __ Dsubu(dst, lhs, rhs_reg);
1112 }
1113 }
1114 break;
1115 }
1116 case Primitive::kPrimFloat:
1117 case Primitive::kPrimDouble: {
1118 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
1119 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
1120 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
1121 if (instruction->IsAdd()) {
1122 if (type == Primitive::kPrimFloat)
1123 __ AddS(dst, lhs, rhs);
1124 else
1125 __ AddD(dst, lhs, rhs);
1126 } else if (instruction->IsSub()) {
1127 if (type == Primitive::kPrimFloat)
1128 __ SubS(dst, lhs, rhs);
1129 else
1130 __ SubD(dst, lhs, rhs);
1131 } else {
1132 LOG(FATAL) << "Unexpected floating-point binary operation";
1133 }
1134 break;
1135 }
1136 default:
1137 LOG(FATAL) << "Unexpected binary operation type " << type;
1138 }
1139}
1140
1141void LocationsBuilderMIPS64::HandleShift(HBinaryOperation* instr) {
1142 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1143
1144 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
1145 Primitive::Type type = instr->GetResultType();
1146 switch (type) {
1147 case Primitive::kPrimInt:
1148 case Primitive::kPrimLong: {
1149 locations->SetInAt(0, Location::RequiresRegister());
1150 locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
1151 locations->SetOut(Location::RequiresRegister());
1152 break;
1153 }
1154 default:
1155 LOG(FATAL) << "Unexpected shift type " << type;
1156 }
1157}
1158
1159void InstructionCodeGeneratorMIPS64::HandleShift(HBinaryOperation* instr) {
1160 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1161 LocationSummary* locations = instr->GetLocations();
1162 Primitive::Type type = instr->GetType();
1163
1164 switch (type) {
1165 case Primitive::kPrimInt:
1166 case Primitive::kPrimLong: {
1167 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1168 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1169 Location rhs_location = locations->InAt(1);
1170
1171 GpuRegister rhs_reg = ZERO;
1172 int64_t rhs_imm = 0;
1173 bool use_imm = rhs_location.IsConstant();
1174 if (use_imm) {
1175 rhs_imm = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant());
1176 } else {
1177 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1178 }
1179
1180 if (use_imm) {
1181 uint32_t shift_value = (type == Primitive::kPrimInt)
1182 ? static_cast<uint32_t>(rhs_imm & kMaxIntShiftValue)
1183 : static_cast<uint32_t>(rhs_imm & kMaxLongShiftValue);
1184
1185 if (type == Primitive::kPrimInt) {
1186 if (instr->IsShl()) {
1187 __ Sll(dst, lhs, shift_value);
1188 } else if (instr->IsShr()) {
1189 __ Sra(dst, lhs, shift_value);
1190 } else {
1191 __ Srl(dst, lhs, shift_value);
1192 }
1193 } else {
1194 if (shift_value < 32) {
1195 if (instr->IsShl()) {
1196 __ Dsll(dst, lhs, shift_value);
1197 } else if (instr->IsShr()) {
1198 __ Dsra(dst, lhs, shift_value);
1199 } else {
1200 __ Dsrl(dst, lhs, shift_value);
1201 }
1202 } else {
1203 shift_value -= 32;
1204 if (instr->IsShl()) {
1205 __ Dsll32(dst, lhs, shift_value);
1206 } else if (instr->IsShr()) {
1207 __ Dsra32(dst, lhs, shift_value);
1208 } else {
1209 __ Dsrl32(dst, lhs, shift_value);
1210 }
1211 }
1212 }
1213 } else {
1214 if (type == Primitive::kPrimInt) {
1215 if (instr->IsShl()) {
1216 __ Sllv(dst, lhs, rhs_reg);
1217 } else if (instr->IsShr()) {
1218 __ Srav(dst, lhs, rhs_reg);
1219 } else {
1220 __ Srlv(dst, lhs, rhs_reg);
1221 }
1222 } else {
1223 if (instr->IsShl()) {
1224 __ Dsllv(dst, lhs, rhs_reg);
1225 } else if (instr->IsShr()) {
1226 __ Dsrav(dst, lhs, rhs_reg);
1227 } else {
1228 __ Dsrlv(dst, lhs, rhs_reg);
1229 }
1230 }
1231 }
1232 break;
1233 }
1234 default:
1235 LOG(FATAL) << "Unexpected shift operation type " << type;
1236 }
1237}
1238
1239void LocationsBuilderMIPS64::VisitAdd(HAdd* instruction) {
1240 HandleBinaryOp(instruction);
1241}
1242
1243void InstructionCodeGeneratorMIPS64::VisitAdd(HAdd* instruction) {
1244 HandleBinaryOp(instruction);
1245}
1246
1247void LocationsBuilderMIPS64::VisitAnd(HAnd* instruction) {
1248 HandleBinaryOp(instruction);
1249}
1250
1251void InstructionCodeGeneratorMIPS64::VisitAnd(HAnd* instruction) {
1252 HandleBinaryOp(instruction);
1253}
1254
1255void LocationsBuilderMIPS64::VisitArrayGet(HArrayGet* instruction) {
1256 LocationSummary* locations =
1257 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1258 locations->SetInAt(0, Location::RequiresRegister());
1259 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1260 if (Primitive::IsFloatingPointType(instruction->GetType())) {
1261 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1262 } else {
1263 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1264 }
1265}
1266
1267void InstructionCodeGeneratorMIPS64::VisitArrayGet(HArrayGet* instruction) {
1268 LocationSummary* locations = instruction->GetLocations();
1269 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1270 Location index = locations->InAt(1);
1271 Primitive::Type type = instruction->GetType();
1272
1273 switch (type) {
1274 case Primitive::kPrimBoolean: {
1275 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
1276 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1277 if (index.IsConstant()) {
1278 size_t offset =
1279 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1280 __ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
1281 } else {
1282 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1283 __ LoadFromOffset(kLoadUnsignedByte, out, TMP, data_offset);
1284 }
1285 break;
1286 }
1287
1288 case Primitive::kPrimByte: {
1289 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
1290 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1291 if (index.IsConstant()) {
1292 size_t offset =
1293 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1294 __ LoadFromOffset(kLoadSignedByte, out, obj, offset);
1295 } else {
1296 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1297 __ LoadFromOffset(kLoadSignedByte, out, TMP, data_offset);
1298 }
1299 break;
1300 }
1301
1302 case Primitive::kPrimShort: {
1303 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
1304 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1305 if (index.IsConstant()) {
1306 size_t offset =
1307 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1308 __ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
1309 } else {
1310 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1311 __ Daddu(TMP, obj, TMP);
1312 __ LoadFromOffset(kLoadSignedHalfword, out, TMP, data_offset);
1313 }
1314 break;
1315 }
1316
1317 case Primitive::kPrimChar: {
1318 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
1319 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1320 if (index.IsConstant()) {
1321 size_t offset =
1322 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1323 __ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
1324 } else {
1325 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1326 __ Daddu(TMP, obj, TMP);
1327 __ LoadFromOffset(kLoadUnsignedHalfword, out, TMP, data_offset);
1328 }
1329 break;
1330 }
1331
1332 case Primitive::kPrimInt:
1333 case Primitive::kPrimNot: {
1334 DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
1335 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
1336 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1337 LoadOperandType load_type = (type == Primitive::kPrimNot) ? kLoadUnsignedWord : kLoadWord;
1338 if (index.IsConstant()) {
1339 size_t offset =
1340 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1341 __ LoadFromOffset(load_type, out, obj, offset);
1342 } else {
1343 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1344 __ Daddu(TMP, obj, TMP);
1345 __ LoadFromOffset(load_type, out, TMP, data_offset);
1346 }
1347 break;
1348 }
1349
1350 case Primitive::kPrimLong: {
1351 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
1352 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1353 if (index.IsConstant()) {
1354 size_t offset =
1355 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1356 __ LoadFromOffset(kLoadDoubleword, out, obj, offset);
1357 } else {
1358 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1359 __ Daddu(TMP, obj, TMP);
1360 __ LoadFromOffset(kLoadDoubleword, out, TMP, data_offset);
1361 }
1362 break;
1363 }
1364
1365 case Primitive::kPrimFloat: {
1366 uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
1367 FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
1368 if (index.IsConstant()) {
1369 size_t offset =
1370 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1371 __ LoadFpuFromOffset(kLoadWord, out, obj, offset);
1372 } else {
1373 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1374 __ Daddu(TMP, obj, TMP);
1375 __ LoadFpuFromOffset(kLoadWord, out, TMP, data_offset);
1376 }
1377 break;
1378 }
1379
1380 case Primitive::kPrimDouble: {
1381 uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
1382 FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
1383 if (index.IsConstant()) {
1384 size_t offset =
1385 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1386 __ LoadFpuFromOffset(kLoadDoubleword, out, obj, offset);
1387 } else {
1388 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1389 __ Daddu(TMP, obj, TMP);
1390 __ LoadFpuFromOffset(kLoadDoubleword, out, TMP, data_offset);
1391 }
1392 break;
1393 }
1394
1395 case Primitive::kPrimVoid:
1396 LOG(FATAL) << "Unreachable type " << instruction->GetType();
1397 UNREACHABLE();
1398 }
1399 codegen_->MaybeRecordImplicitNullCheck(instruction);
1400}
1401
1402void LocationsBuilderMIPS64::VisitArrayLength(HArrayLength* instruction) {
1403 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1404 locations->SetInAt(0, Location::RequiresRegister());
1405 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1406}
1407
1408void InstructionCodeGeneratorMIPS64::VisitArrayLength(HArrayLength* instruction) {
1409 LocationSummary* locations = instruction->GetLocations();
1410 uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
1411 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1412 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
1413 __ LoadFromOffset(kLoadWord, out, obj, offset);
1414 codegen_->MaybeRecordImplicitNullCheck(instruction);
1415}
1416
1417void LocationsBuilderMIPS64::VisitArraySet(HArraySet* instruction) {
1418 Primitive::Type value_type = instruction->GetComponentType();
1419 bool is_object = value_type == Primitive::kPrimNot;
1420 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
1421 instruction,
1422 is_object ? LocationSummary::kCall : LocationSummary::kNoCall);
1423 if (is_object) {
1424 InvokeRuntimeCallingConvention calling_convention;
1425 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
1426 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
1427 locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
1428 } else {
1429 locations->SetInAt(0, Location::RequiresRegister());
1430 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1431 if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) {
1432 locations->SetInAt(2, Location::RequiresFpuRegister());
1433 } else {
1434 locations->SetInAt(2, Location::RequiresRegister());
1435 }
1436 }
1437}
1438
1439void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) {
1440 LocationSummary* locations = instruction->GetLocations();
1441 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1442 Location index = locations->InAt(1);
1443 Primitive::Type value_type = instruction->GetComponentType();
1444 bool needs_runtime_call = locations->WillCall();
1445 bool needs_write_barrier =
1446 CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
1447
1448 switch (value_type) {
1449 case Primitive::kPrimBoolean:
1450 case Primitive::kPrimByte: {
1451 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
1452 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1453 if (index.IsConstant()) {
1454 size_t offset =
1455 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
1456 __ StoreToOffset(kStoreByte, value, obj, offset);
1457 } else {
1458 __ Daddu(TMP, obj, index.AsRegister<GpuRegister>());
1459 __ StoreToOffset(kStoreByte, value, TMP, data_offset);
1460 }
1461 break;
1462 }
1463
1464 case Primitive::kPrimShort:
1465 case Primitive::kPrimChar: {
1466 uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
1467 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1468 if (index.IsConstant()) {
1469 size_t offset =
1470 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
1471 __ StoreToOffset(kStoreHalfword, value, obj, offset);
1472 } else {
1473 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2);
1474 __ Daddu(TMP, obj, TMP);
1475 __ StoreToOffset(kStoreHalfword, value, TMP, data_offset);
1476 }
1477 break;
1478 }
1479
1480 case Primitive::kPrimInt:
1481 case Primitive::kPrimNot: {
1482 if (!needs_runtime_call) {
1483 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
1484 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1485 if (index.IsConstant()) {
1486 size_t offset =
1487 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1488 __ StoreToOffset(kStoreWord, value, obj, offset);
1489 } else {
1490 DCHECK(index.IsRegister()) << index;
1491 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1492 __ Daddu(TMP, obj, TMP);
1493 __ StoreToOffset(kStoreWord, value, TMP, data_offset);
1494 }
1495 codegen_->MaybeRecordImplicitNullCheck(instruction);
1496 if (needs_write_barrier) {
1497 DCHECK_EQ(value_type, Primitive::kPrimNot);
1498 codegen_->MarkGCCard(obj, value);
1499 }
1500 } else {
1501 DCHECK_EQ(value_type, Primitive::kPrimNot);
1502 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
1503 instruction,
1504 instruction->GetDexPc(),
1505 nullptr);
1506 }
1507 break;
1508 }
1509
1510 case Primitive::kPrimLong: {
1511 uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
1512 GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>();
1513 if (index.IsConstant()) {
1514 size_t offset =
1515 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1516 __ StoreToOffset(kStoreDoubleword, value, obj, offset);
1517 } else {
1518 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1519 __ Daddu(TMP, obj, TMP);
1520 __ StoreToOffset(kStoreDoubleword, value, TMP, data_offset);
1521 }
1522 break;
1523 }
1524
1525 case Primitive::kPrimFloat: {
1526 uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value();
1527 FpuRegister value = locations->InAt(2).AsFpuRegister<FpuRegister>();
1528 DCHECK(locations->InAt(2).IsFpuRegister());
1529 if (index.IsConstant()) {
1530 size_t offset =
1531 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
1532 __ StoreFpuToOffset(kStoreWord, value, obj, offset);
1533 } else {
1534 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4);
1535 __ Daddu(TMP, obj, TMP);
1536 __ StoreFpuToOffset(kStoreWord, value, TMP, data_offset);
1537 }
1538 break;
1539 }
1540
1541 case Primitive::kPrimDouble: {
1542 uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value();
1543 FpuRegister value = locations->InAt(2).AsFpuRegister<FpuRegister>();
1544 DCHECK(locations->InAt(2).IsFpuRegister());
1545 if (index.IsConstant()) {
1546 size_t offset =
1547 (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
1548 __ StoreFpuToOffset(kStoreDoubleword, value, obj, offset);
1549 } else {
1550 __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8);
1551 __ Daddu(TMP, obj, TMP);
1552 __ StoreFpuToOffset(kStoreDoubleword, value, TMP, data_offset);
1553 }
1554 break;
1555 }
1556
1557 case Primitive::kPrimVoid:
1558 LOG(FATAL) << "Unreachable type " << instruction->GetType();
1559 UNREACHABLE();
1560 }
1561
1562 // Ints and objects are handled in the switch.
1563 if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) {
1564 codegen_->MaybeRecordImplicitNullCheck(instruction);
1565 }
1566}
1567
1568void LocationsBuilderMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
David Brazdil659562a2015-09-14 21:26:33 +00001569 LocationSummary* locations =
1570 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001571 locations->SetInAt(0, Location::RequiresRegister());
1572 locations->SetInAt(1, Location::RequiresRegister());
1573 if (instruction->HasUses()) {
1574 locations->SetOut(Location::SameAsFirstInput());
1575 }
1576}
1577
1578void InstructionCodeGeneratorMIPS64::VisitBoundsCheck(HBoundsCheck* instruction) {
1579 LocationSummary* locations = instruction->GetLocations();
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01001580 BoundsCheckSlowPathMIPS64* slow_path =
1581 new (GetGraph()->GetArena()) BoundsCheckSlowPathMIPS64(instruction);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001582 codegen_->AddSlowPath(slow_path);
1583
1584 GpuRegister index = locations->InAt(0).AsRegister<GpuRegister>();
1585 GpuRegister length = locations->InAt(1).AsRegister<GpuRegister>();
1586
1587 // length is limited by the maximum positive signed 32-bit integer.
1588 // Unsigned comparison of length and index checks for index < 0
1589 // and for length <= index simultaneously.
1590 // Mips R6 requires lhs != rhs for compact branches.
1591 if (index == length) {
1592 __ B(slow_path->GetEntryLabel());
1593 } else {
1594 __ Bgeuc(index, length, slow_path->GetEntryLabel());
1595 }
1596}
1597
1598void LocationsBuilderMIPS64::VisitCheckCast(HCheckCast* instruction) {
1599 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
1600 instruction,
1601 LocationSummary::kCallOnSlowPath);
1602 locations->SetInAt(0, Location::RequiresRegister());
1603 locations->SetInAt(1, Location::RequiresRegister());
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01001604 // Note that TypeCheckSlowPathMIPS64 uses this register too.
Alexey Frunze4dda3372015-06-01 18:31:49 -07001605 locations->AddTemp(Location::RequiresRegister());
1606}
1607
1608void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) {
1609 LocationSummary* locations = instruction->GetLocations();
1610 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
1611 GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
1612 GpuRegister obj_cls = locations->GetTemp(0).AsRegister<GpuRegister>();
1613
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01001614 SlowPathCodeMIPS64* slow_path =
1615 new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001616 codegen_->AddSlowPath(slow_path);
1617
1618 // TODO: avoid this check if we know obj is not null.
1619 __ Beqzc(obj, slow_path->GetExitLabel());
1620 // Compare the class of `obj` with `cls`.
1621 __ LoadFromOffset(kLoadUnsignedWord, obj_cls, obj, mirror::Object::ClassOffset().Int32Value());
1622 __ Bnec(obj_cls, cls, slow_path->GetEntryLabel());
1623 __ Bind(slow_path->GetExitLabel());
1624}
1625
1626void LocationsBuilderMIPS64::VisitClinitCheck(HClinitCheck* check) {
1627 LocationSummary* locations =
1628 new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
1629 locations->SetInAt(0, Location::RequiresRegister());
1630 if (check->HasUses()) {
1631 locations->SetOut(Location::SameAsFirstInput());
1632 }
1633}
1634
1635void InstructionCodeGeneratorMIPS64::VisitClinitCheck(HClinitCheck* check) {
1636 // We assume the class is not null.
1637 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
1638 check->GetLoadClass(),
1639 check,
1640 check->GetDexPc(),
1641 true);
1642 codegen_->AddSlowPath(slow_path);
1643 GenerateClassInitializationCheck(slow_path,
1644 check->GetLocations()->InAt(0).AsRegister<GpuRegister>());
1645}
1646
1647void LocationsBuilderMIPS64::VisitCompare(HCompare* compare) {
1648 Primitive::Type in_type = compare->InputAt(0)->GetType();
1649
1650 LocationSummary::CallKind call_kind = Primitive::IsFloatingPointType(in_type)
1651 ? LocationSummary::kCall
1652 : LocationSummary::kNoCall;
1653
1654 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(compare, call_kind);
1655
1656 switch (in_type) {
1657 case Primitive::kPrimLong:
1658 locations->SetInAt(0, Location::RequiresRegister());
1659 locations->SetInAt(1, Location::RequiresRegister());
1660 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1661 break;
1662
1663 case Primitive::kPrimFloat:
1664 case Primitive::kPrimDouble: {
1665 InvokeRuntimeCallingConvention calling_convention;
1666 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
1667 locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
1668 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimInt));
1669 break;
1670 }
1671
1672 default:
1673 LOG(FATAL) << "Unexpected type for compare operation " << in_type;
1674 }
1675}
1676
1677void InstructionCodeGeneratorMIPS64::VisitCompare(HCompare* instruction) {
1678 LocationSummary* locations = instruction->GetLocations();
1679 Primitive::Type in_type = instruction->InputAt(0)->GetType();
1680
1681 // 0 if: left == right
1682 // 1 if: left > right
1683 // -1 if: left < right
1684 switch (in_type) {
1685 case Primitive::kPrimLong: {
1686 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1687 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1688 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
1689 // TODO: more efficient (direct) comparison with a constant
1690 __ Slt(TMP, lhs, rhs);
1691 __ Slt(dst, rhs, lhs);
1692 __ Subu(dst, dst, TMP);
1693 break;
1694 }
1695
1696 case Primitive::kPrimFloat:
1697 case Primitive::kPrimDouble: {
1698 int32_t entry_point_offset;
1699 if (in_type == Primitive::kPrimFloat) {
1700 entry_point_offset = instruction->IsGtBias() ? QUICK_ENTRY_POINT(pCmpgFloat)
1701 : QUICK_ENTRY_POINT(pCmplFloat);
1702 } else {
1703 entry_point_offset = instruction->IsGtBias() ? QUICK_ENTRY_POINT(pCmpgDouble)
1704 : QUICK_ENTRY_POINT(pCmplDouble);
1705 }
1706 codegen_->InvokeRuntime(entry_point_offset, instruction, instruction->GetDexPc(), nullptr);
1707 break;
1708 }
1709
1710 default:
1711 LOG(FATAL) << "Unimplemented compare type " << in_type;
1712 }
1713}
1714
1715void LocationsBuilderMIPS64::VisitCondition(HCondition* instruction) {
1716 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
1717 locations->SetInAt(0, Location::RequiresRegister());
1718 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
1719 if (instruction->NeedsMaterialization()) {
1720 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1721 }
1722}
1723
1724void InstructionCodeGeneratorMIPS64::VisitCondition(HCondition* instruction) {
1725 if (!instruction->NeedsMaterialization()) {
1726 return;
1727 }
1728
1729 LocationSummary* locations = instruction->GetLocations();
1730
1731 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1732 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1733 Location rhs_location = locations->InAt(1);
1734
1735 GpuRegister rhs_reg = ZERO;
1736 int64_t rhs_imm = 0;
1737 bool use_imm = rhs_location.IsConstant();
1738 if (use_imm) {
1739 rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
1740 } else {
1741 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1742 }
1743
1744 IfCondition if_cond = instruction->GetCondition();
1745
1746 switch (if_cond) {
1747 case kCondEQ:
1748 case kCondNE:
1749 if (use_imm && IsUint<16>(rhs_imm)) {
1750 __ Xori(dst, lhs, rhs_imm);
1751 } else {
1752 if (use_imm) {
1753 rhs_reg = TMP;
1754 __ LoadConst32(rhs_reg, rhs_imm);
1755 }
1756 __ Xor(dst, lhs, rhs_reg);
1757 }
1758 if (if_cond == kCondEQ) {
1759 __ Sltiu(dst, dst, 1);
1760 } else {
1761 __ Sltu(dst, ZERO, dst);
1762 }
1763 break;
1764
1765 case kCondLT:
1766 case kCondGE:
1767 if (use_imm && IsInt<16>(rhs_imm)) {
1768 __ Slti(dst, lhs, rhs_imm);
1769 } else {
1770 if (use_imm) {
1771 rhs_reg = TMP;
1772 __ LoadConst32(rhs_reg, rhs_imm);
1773 }
1774 __ Slt(dst, lhs, rhs_reg);
1775 }
1776 if (if_cond == kCondGE) {
1777 // Simulate lhs >= rhs via !(lhs < rhs) since there's
1778 // only the slt instruction but no sge.
1779 __ Xori(dst, dst, 1);
1780 }
1781 break;
1782
1783 case kCondLE:
1784 case kCondGT:
1785 if (use_imm && IsInt<16>(rhs_imm + 1)) {
1786 // Simulate lhs <= rhs via lhs < rhs + 1.
1787 __ Slti(dst, lhs, rhs_imm + 1);
1788 if (if_cond == kCondGT) {
1789 // Simulate lhs > rhs via !(lhs <= rhs) since there's
1790 // only the slti instruction but no sgti.
1791 __ Xori(dst, dst, 1);
1792 }
1793 } else {
1794 if (use_imm) {
1795 rhs_reg = TMP;
1796 __ LoadConst32(rhs_reg, rhs_imm);
1797 }
1798 __ Slt(dst, rhs_reg, lhs);
1799 if (if_cond == kCondLE) {
1800 // Simulate lhs <= rhs via !(rhs < lhs) since there's
1801 // only the slt instruction but no sle.
1802 __ Xori(dst, dst, 1);
1803 }
1804 }
1805 break;
1806 }
1807}
1808
1809void LocationsBuilderMIPS64::VisitDiv(HDiv* div) {
1810 LocationSummary* locations =
1811 new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
1812 switch (div->GetResultType()) {
1813 case Primitive::kPrimInt:
1814 case Primitive::kPrimLong:
1815 locations->SetInAt(0, Location::RequiresRegister());
1816 locations->SetInAt(1, Location::RequiresRegister());
1817 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1818 break;
1819
1820 case Primitive::kPrimFloat:
1821 case Primitive::kPrimDouble:
1822 locations->SetInAt(0, Location::RequiresFpuRegister());
1823 locations->SetInAt(1, Location::RequiresFpuRegister());
1824 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1825 break;
1826
1827 default:
1828 LOG(FATAL) << "Unexpected div type " << div->GetResultType();
1829 }
1830}
1831
1832void InstructionCodeGeneratorMIPS64::VisitDiv(HDiv* instruction) {
1833 Primitive::Type type = instruction->GetType();
1834 LocationSummary* locations = instruction->GetLocations();
1835
1836 switch (type) {
1837 case Primitive::kPrimInt:
1838 case Primitive::kPrimLong: {
1839 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
1840 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
1841 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
1842 if (type == Primitive::kPrimInt)
1843 __ DivR6(dst, lhs, rhs);
1844 else
1845 __ Ddiv(dst, lhs, rhs);
1846 break;
1847 }
1848 case Primitive::kPrimFloat:
1849 case Primitive::kPrimDouble: {
1850 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
1851 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
1852 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
1853 if (type == Primitive::kPrimFloat)
1854 __ DivS(dst, lhs, rhs);
1855 else
1856 __ DivD(dst, lhs, rhs);
1857 break;
1858 }
1859 default:
1860 LOG(FATAL) << "Unexpected div type " << type;
1861 }
1862}
1863
1864void LocationsBuilderMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
David Brazdil659562a2015-09-14 21:26:33 +00001865 LocationSummary* locations =
1866 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
Alexey Frunze4dda3372015-06-01 18:31:49 -07001867 locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
1868 if (instruction->HasUses()) {
1869 locations->SetOut(Location::SameAsFirstInput());
1870 }
1871}
1872
1873void InstructionCodeGeneratorMIPS64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
1874 SlowPathCodeMIPS64* slow_path =
1875 new (GetGraph()->GetArena()) DivZeroCheckSlowPathMIPS64(instruction);
1876 codegen_->AddSlowPath(slow_path);
1877 Location value = instruction->GetLocations()->InAt(0);
1878
1879 Primitive::Type type = instruction->GetType();
1880
Serguei Katkov8c0676c2015-08-03 13:55:33 +06001881 if ((type == Primitive::kPrimBoolean) || !Primitive::IsIntegralType(type)) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07001882 LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
Serguei Katkov8c0676c2015-08-03 13:55:33 +06001883 return;
Alexey Frunze4dda3372015-06-01 18:31:49 -07001884 }
1885
1886 if (value.IsConstant()) {
1887 int64_t divisor = codegen_->GetInt64ValueOf(value.GetConstant()->AsConstant());
1888 if (divisor == 0) {
1889 __ B(slow_path->GetEntryLabel());
1890 } else {
1891 // A division by a non-null constant is valid. We don't need to perform
1892 // any check, so simply fall through.
1893 }
1894 } else {
1895 __ Beqzc(value.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
1896 }
1897}
1898
1899void LocationsBuilderMIPS64::VisitDoubleConstant(HDoubleConstant* constant) {
1900 LocationSummary* locations =
1901 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1902 locations->SetOut(Location::ConstantLocation(constant));
1903}
1904
1905void InstructionCodeGeneratorMIPS64::VisitDoubleConstant(HDoubleConstant* cst ATTRIBUTE_UNUSED) {
1906 // Will be generated at use site.
1907}
1908
1909void LocationsBuilderMIPS64::VisitExit(HExit* exit) {
1910 exit->SetLocations(nullptr);
1911}
1912
1913void InstructionCodeGeneratorMIPS64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
1914}
1915
1916void LocationsBuilderMIPS64::VisitFloatConstant(HFloatConstant* constant) {
1917 LocationSummary* locations =
1918 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
1919 locations->SetOut(Location::ConstantLocation(constant));
1920}
1921
1922void InstructionCodeGeneratorMIPS64::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
1923 // Will be generated at use site.
1924}
1925
David Brazdilfc6a86a2015-06-26 10:33:45 +00001926void InstructionCodeGeneratorMIPS64::HandleGoto(HInstruction* got, HBasicBlock* successor) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07001927 DCHECK(!successor->IsExitBlock());
1928 HBasicBlock* block = got->GetBlock();
1929 HInstruction* previous = got->GetPrevious();
1930 HLoopInformation* info = block->GetLoopInformation();
1931
1932 if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
1933 codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
1934 GenerateSuspendCheck(info->GetSuspendCheck(), successor);
1935 return;
1936 }
1937 if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
1938 GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
1939 }
1940 if (!codegen_->GoesToNextBlock(block, successor)) {
1941 __ B(codegen_->GetLabelOf(successor));
1942 }
1943}
1944
David Brazdilfc6a86a2015-06-26 10:33:45 +00001945void LocationsBuilderMIPS64::VisitGoto(HGoto* got) {
1946 got->SetLocations(nullptr);
1947}
1948
1949void InstructionCodeGeneratorMIPS64::VisitGoto(HGoto* got) {
1950 HandleGoto(got, got->GetSuccessor());
1951}
1952
1953void LocationsBuilderMIPS64::VisitTryBoundary(HTryBoundary* try_boundary) {
1954 try_boundary->SetLocations(nullptr);
1955}
1956
1957void InstructionCodeGeneratorMIPS64::VisitTryBoundary(HTryBoundary* try_boundary) {
1958 HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
1959 if (!successor->IsExitBlock()) {
1960 HandleGoto(try_boundary, successor);
1961 }
1962}
1963
Alexey Frunze4dda3372015-06-01 18:31:49 -07001964void InstructionCodeGeneratorMIPS64::GenerateTestAndBranch(HInstruction* instruction,
1965 Label* true_target,
1966 Label* false_target,
1967 Label* always_true_target) {
1968 HInstruction* cond = instruction->InputAt(0);
1969 HCondition* condition = cond->AsCondition();
1970
1971 if (cond->IsIntConstant()) {
1972 int32_t cond_value = cond->AsIntConstant()->GetValue();
1973 if (cond_value == 1) {
1974 if (always_true_target != nullptr) {
1975 __ B(always_true_target);
1976 }
1977 return;
1978 } else {
1979 DCHECK_EQ(cond_value, 0);
1980 }
1981 } else if (!cond->IsCondition() || condition->NeedsMaterialization()) {
1982 // The condition instruction has been materialized, compare the output to 0.
1983 Location cond_val = instruction->GetLocations()->InAt(0);
1984 DCHECK(cond_val.IsRegister());
1985 __ Bnezc(cond_val.AsRegister<GpuRegister>(), true_target);
1986 } else {
1987 // The condition instruction has not been materialized, use its inputs as
1988 // the comparison and its condition as the branch condition.
1989 GpuRegister lhs = condition->GetLocations()->InAt(0).AsRegister<GpuRegister>();
1990 Location rhs_location = condition->GetLocations()->InAt(1);
1991 GpuRegister rhs_reg = ZERO;
1992 int32_t rhs_imm = 0;
1993 bool use_imm = rhs_location.IsConstant();
1994 if (use_imm) {
1995 rhs_imm = CodeGenerator::GetInt32ValueOf(rhs_location.GetConstant());
1996 } else {
1997 rhs_reg = rhs_location.AsRegister<GpuRegister>();
1998 }
1999
2000 IfCondition if_cond = condition->GetCondition();
2001 if (use_imm && rhs_imm == 0) {
2002 switch (if_cond) {
2003 case kCondEQ:
2004 __ Beqzc(lhs, true_target);
2005 break;
2006 case kCondNE:
2007 __ Bnezc(lhs, true_target);
2008 break;
2009 case kCondLT:
2010 __ Bltzc(lhs, true_target);
2011 break;
2012 case kCondGE:
2013 __ Bgezc(lhs, true_target);
2014 break;
2015 case kCondLE:
2016 __ Blezc(lhs, true_target);
2017 break;
2018 case kCondGT:
2019 __ Bgtzc(lhs, true_target);
2020 break;
2021 }
2022 } else {
2023 if (use_imm) {
2024 rhs_reg = TMP;
2025 __ LoadConst32(rhs_reg, rhs_imm);
2026 }
2027 // It looks like we can get here with lhs == rhs. Should that be possible at all?
2028 // Mips R6 requires lhs != rhs for compact branches.
2029 if (lhs == rhs_reg) {
2030 DCHECK(!use_imm);
2031 switch (if_cond) {
2032 case kCondEQ:
2033 case kCondGE:
2034 case kCondLE:
2035 // if lhs == rhs for a positive condition, then it is a branch
2036 __ B(true_target);
2037 break;
2038 case kCondNE:
2039 case kCondLT:
2040 case kCondGT:
2041 // if lhs == rhs for a negative condition, then it is a NOP
2042 break;
2043 }
2044 } else {
2045 switch (if_cond) {
2046 case kCondEQ:
2047 __ Beqc(lhs, rhs_reg, true_target);
2048 break;
2049 case kCondNE:
2050 __ Bnec(lhs, rhs_reg, true_target);
2051 break;
2052 case kCondLT:
2053 __ Bltc(lhs, rhs_reg, true_target);
2054 break;
2055 case kCondGE:
2056 __ Bgec(lhs, rhs_reg, true_target);
2057 break;
2058 case kCondLE:
2059 __ Bgec(rhs_reg, lhs, true_target);
2060 break;
2061 case kCondGT:
2062 __ Bltc(rhs_reg, lhs, true_target);
2063 break;
2064 }
2065 }
2066 }
2067 }
2068 if (false_target != nullptr) {
2069 __ B(false_target);
2070 }
2071}
2072
2073void LocationsBuilderMIPS64::VisitIf(HIf* if_instr) {
2074 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
2075 HInstruction* cond = if_instr->InputAt(0);
2076 if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
2077 locations->SetInAt(0, Location::RequiresRegister());
2078 }
2079}
2080
2081void InstructionCodeGeneratorMIPS64::VisitIf(HIf* if_instr) {
2082 Label* true_target = codegen_->GetLabelOf(if_instr->IfTrueSuccessor());
2083 Label* false_target = codegen_->GetLabelOf(if_instr->IfFalseSuccessor());
2084 Label* always_true_target = true_target;
2085 if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
2086 if_instr->IfTrueSuccessor())) {
2087 always_true_target = nullptr;
2088 }
2089 if (codegen_->GoesToNextBlock(if_instr->GetBlock(),
2090 if_instr->IfFalseSuccessor())) {
2091 false_target = nullptr;
2092 }
2093 GenerateTestAndBranch(if_instr, true_target, false_target, always_true_target);
2094}
2095
2096void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
2097 LocationSummary* locations = new (GetGraph()->GetArena())
2098 LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
2099 HInstruction* cond = deoptimize->InputAt(0);
2100 DCHECK(cond->IsCondition());
2101 if (cond->AsCondition()->NeedsMaterialization()) {
2102 locations->SetInAt(0, Location::RequiresRegister());
2103 }
2104}
2105
2106void InstructionCodeGeneratorMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
2107 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena())
2108 DeoptimizationSlowPathMIPS64(deoptimize);
2109 codegen_->AddSlowPath(slow_path);
2110 Label* slow_path_entry = slow_path->GetEntryLabel();
2111 GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
2112}
2113
2114void LocationsBuilderMIPS64::HandleFieldGet(HInstruction* instruction,
2115 const FieldInfo& field_info ATTRIBUTE_UNUSED) {
2116 LocationSummary* locations =
2117 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2118 locations->SetInAt(0, Location::RequiresRegister());
2119 if (Primitive::IsFloatingPointType(instruction->GetType())) {
2120 locations->SetOut(Location::RequiresFpuRegister());
2121 } else {
2122 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2123 }
2124}
2125
2126void InstructionCodeGeneratorMIPS64::HandleFieldGet(HInstruction* instruction,
2127 const FieldInfo& field_info) {
2128 Primitive::Type type = field_info.GetFieldType();
2129 LocationSummary* locations = instruction->GetLocations();
2130 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2131 LoadOperandType load_type = kLoadUnsignedByte;
2132 switch (type) {
2133 case Primitive::kPrimBoolean:
2134 load_type = kLoadUnsignedByte;
2135 break;
2136 case Primitive::kPrimByte:
2137 load_type = kLoadSignedByte;
2138 break;
2139 case Primitive::kPrimShort:
2140 load_type = kLoadSignedHalfword;
2141 break;
2142 case Primitive::kPrimChar:
2143 load_type = kLoadUnsignedHalfword;
2144 break;
2145 case Primitive::kPrimInt:
2146 case Primitive::kPrimFloat:
2147 load_type = kLoadWord;
2148 break;
2149 case Primitive::kPrimLong:
2150 case Primitive::kPrimDouble:
2151 load_type = kLoadDoubleword;
2152 break;
2153 case Primitive::kPrimNot:
2154 load_type = kLoadUnsignedWord;
2155 break;
2156 case Primitive::kPrimVoid:
2157 LOG(FATAL) << "Unreachable type " << type;
2158 UNREACHABLE();
2159 }
2160 if (!Primitive::IsFloatingPointType(type)) {
2161 DCHECK(locations->Out().IsRegister());
2162 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2163 __ LoadFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
2164 } else {
2165 DCHECK(locations->Out().IsFpuRegister());
2166 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
2167 __ LoadFpuFromOffset(load_type, dst, obj, field_info.GetFieldOffset().Uint32Value());
2168 }
2169
2170 codegen_->MaybeRecordImplicitNullCheck(instruction);
2171 // TODO: memory barrier?
2172}
2173
2174void LocationsBuilderMIPS64::HandleFieldSet(HInstruction* instruction,
2175 const FieldInfo& field_info ATTRIBUTE_UNUSED) {
2176 LocationSummary* locations =
2177 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2178 locations->SetInAt(0, Location::RequiresRegister());
2179 if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
2180 locations->SetInAt(1, Location::RequiresFpuRegister());
2181 } else {
2182 locations->SetInAt(1, Location::RequiresRegister());
2183 }
2184}
2185
2186void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction,
2187 const FieldInfo& field_info) {
2188 Primitive::Type type = field_info.GetFieldType();
2189 LocationSummary* locations = instruction->GetLocations();
2190 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2191 StoreOperandType store_type = kStoreByte;
2192 switch (type) {
2193 case Primitive::kPrimBoolean:
2194 case Primitive::kPrimByte:
2195 store_type = kStoreByte;
2196 break;
2197 case Primitive::kPrimShort:
2198 case Primitive::kPrimChar:
2199 store_type = kStoreHalfword;
2200 break;
2201 case Primitive::kPrimInt:
2202 case Primitive::kPrimFloat:
2203 case Primitive::kPrimNot:
2204 store_type = kStoreWord;
2205 break;
2206 case Primitive::kPrimLong:
2207 case Primitive::kPrimDouble:
2208 store_type = kStoreDoubleword;
2209 break;
2210 case Primitive::kPrimVoid:
2211 LOG(FATAL) << "Unreachable type " << type;
2212 UNREACHABLE();
2213 }
2214 if (!Primitive::IsFloatingPointType(type)) {
2215 DCHECK(locations->InAt(1).IsRegister());
2216 GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>();
2217 __ StoreToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
2218 } else {
2219 DCHECK(locations->InAt(1).IsFpuRegister());
2220 FpuRegister src = locations->InAt(1).AsFpuRegister<FpuRegister>();
2221 __ StoreFpuToOffset(store_type, src, obj, field_info.GetFieldOffset().Uint32Value());
2222 }
2223
2224 codegen_->MaybeRecordImplicitNullCheck(instruction);
2225 // TODO: memory barriers?
2226 if (CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1))) {
2227 DCHECK(locations->InAt(1).IsRegister());
2228 GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>();
2229 codegen_->MarkGCCard(obj, src);
2230 }
2231}
2232
2233void LocationsBuilderMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
2234 HandleFieldGet(instruction, instruction->GetFieldInfo());
2235}
2236
2237void InstructionCodeGeneratorMIPS64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
2238 HandleFieldGet(instruction, instruction->GetFieldInfo());
2239}
2240
2241void LocationsBuilderMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
2242 HandleFieldSet(instruction, instruction->GetFieldInfo());
2243}
2244
2245void InstructionCodeGeneratorMIPS64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
2246 HandleFieldSet(instruction, instruction->GetFieldInfo());
2247}
2248
2249void LocationsBuilderMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
2250 LocationSummary::CallKind call_kind =
2251 instruction->IsClassFinal() ? LocationSummary::kNoCall : LocationSummary::kCallOnSlowPath;
2252 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
2253 locations->SetInAt(0, Location::RequiresRegister());
2254 locations->SetInAt(1, Location::RequiresRegister());
2255 // The output does overlap inputs.
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01002256 // Note that TypeCheckSlowPathMIPS64 uses this register too.
Alexey Frunze4dda3372015-06-01 18:31:49 -07002257 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
2258}
2259
2260void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
2261 LocationSummary* locations = instruction->GetLocations();
2262 GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>();
2263 GpuRegister cls = locations->InAt(1).AsRegister<GpuRegister>();
2264 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2265
2266 Label done;
2267
2268 // Return 0 if `obj` is null.
2269 // TODO: Avoid this check if we know `obj` is not null.
2270 __ Move(out, ZERO);
2271 __ Beqzc(obj, &done);
2272
2273 // Compare the class of `obj` with `cls`.
2274 __ LoadFromOffset(kLoadUnsignedWord, out, obj, mirror::Object::ClassOffset().Int32Value());
2275 if (instruction->IsClassFinal()) {
2276 // Classes must be equal for the instanceof to succeed.
2277 __ Xor(out, out, cls);
2278 __ Sltiu(out, out, 1);
2279 } else {
2280 // If the classes are not equal, we go into a slow path.
2281 DCHECK(locations->OnlyCallsOnSlowPath());
2282 SlowPathCodeMIPS64* slow_path =
Serban Constantinescu5a6cc492015-08-13 15:20:25 +01002283 new (GetGraph()->GetArena()) TypeCheckSlowPathMIPS64(instruction);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002284 codegen_->AddSlowPath(slow_path);
2285 __ Bnec(out, cls, slow_path->GetEntryLabel());
2286 __ LoadConst32(out, 1);
2287 __ Bind(slow_path->GetExitLabel());
2288 }
2289
2290 __ Bind(&done);
2291}
2292
2293void LocationsBuilderMIPS64::VisitIntConstant(HIntConstant* constant) {
2294 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2295 locations->SetOut(Location::ConstantLocation(constant));
2296}
2297
2298void InstructionCodeGeneratorMIPS64::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
2299 // Will be generated at use site.
2300}
2301
2302void LocationsBuilderMIPS64::VisitNullConstant(HNullConstant* constant) {
2303 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2304 locations->SetOut(Location::ConstantLocation(constant));
2305}
2306
2307void InstructionCodeGeneratorMIPS64::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
2308 // Will be generated at use site.
2309}
2310
2311void LocationsBuilderMIPS64::HandleInvoke(HInvoke* invoke) {
2312 InvokeDexCallingConventionVisitorMIPS64 calling_convention_visitor;
2313 CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
2314}
2315
2316void LocationsBuilderMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
2317 HandleInvoke(invoke);
2318 // The register T0 is required to be used for the hidden argument in
2319 // art_quick_imt_conflict_trampoline, so add the hidden argument.
2320 invoke->GetLocations()->AddTemp(Location::RegisterLocation(T0));
2321}
2322
2323void InstructionCodeGeneratorMIPS64::VisitInvokeInterface(HInvokeInterface* invoke) {
2324 // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
2325 GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
2326 uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
2327 invoke->GetImtIndex() % mirror::Class::kImtSize, kMips64PointerSize).Uint32Value();
2328 Location receiver = invoke->GetLocations()->InAt(0);
2329 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2330 Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64WordSize);
2331
2332 // Set the hidden argument.
2333 __ LoadConst32(invoke->GetLocations()->GetTemp(1).AsRegister<GpuRegister>(),
2334 invoke->GetDexMethodIndex());
2335
2336 // temp = object->GetClass();
2337 if (receiver.IsStackSlot()) {
2338 __ LoadFromOffset(kLoadUnsignedWord, temp, SP, receiver.GetStackIndex());
2339 __ LoadFromOffset(kLoadUnsignedWord, temp, temp, class_offset);
2340 } else {
2341 __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
2342 }
2343 codegen_->MaybeRecordImplicitNullCheck(invoke);
2344 // temp = temp->GetImtEntryAt(method_offset);
2345 __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
2346 // T9 = temp->GetEntryPoint();
2347 __ LoadFromOffset(kLoadDoubleword, T9, temp, entry_point.Int32Value());
2348 // T9();
2349 __ Jalr(T9);
2350 DCHECK(!codegen_->IsLeafMethod());
2351 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2352}
2353
2354void LocationsBuilderMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
2355 // TODO intrinsic function
2356 HandleInvoke(invoke);
2357}
2358
2359void LocationsBuilderMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
2360 // When we do not run baseline, explicit clinit checks triggered by static
2361 // invokes must have been pruned by art::PrepareForRegisterAllocation.
2362 DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
2363
2364 // TODO - intrinsic function
2365 HandleInvoke(invoke);
2366
2367 // While SetupBlockedRegisters() blocks registers S2-S8 due to their
2368 // clobbering somewhere else, reduce further register pressure by avoiding
2369 // allocation of a register for the current method pointer like on x86 baseline.
2370 // TODO: remove this once all the issues with register saving/restoring are
2371 // sorted out.
2372 LocationSummary* locations = invoke->GetLocations();
2373 Location location = locations->InAt(invoke->GetCurrentMethodInputIndex());
2374 if (location.IsUnallocated() && location.GetPolicy() == Location::kRequiresRegister) {
2375 locations->SetInAt(invoke->GetCurrentMethodInputIndex(), Location::NoLocation());
2376 }
2377}
2378
2379static bool TryGenerateIntrinsicCode(HInvoke* invoke,
2380 CodeGeneratorMIPS64* codegen ATTRIBUTE_UNUSED) {
2381 if (invoke->GetLocations()->Intrinsified()) {
2382 // TODO - intrinsic function
2383 return true;
2384 }
2385 return false;
2386}
2387
2388void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
2389 // All registers are assumed to be correctly set up per the calling convention.
2390
Vladimir Marko58155012015-08-19 12:49:41 +00002391 Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
2392 switch (invoke->GetMethodLoadKind()) {
2393 case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
2394 // temp = thread->string_init_entrypoint
2395 __ LoadFromOffset(kLoadDoubleword,
2396 temp.AsRegister<GpuRegister>(),
2397 TR,
2398 invoke->GetStringInitOffset());
2399 break;
2400 case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
2401 callee_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
2402 break;
2403 case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
2404 __ LoadConst64(temp.AsRegister<GpuRegister>(), invoke->GetMethodAddress());
2405 break;
2406 case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
2407 // TODO: Implement this type. (Needs literal support.) At the moment, the
2408 // CompilerDriver will not direct the backend to use this type for MIPS.
2409 LOG(FATAL) << "Unsupported!";
2410 UNREACHABLE();
2411 case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
2412 // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
2413 FALLTHROUGH_INTENDED;
2414 case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
2415 Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
2416 GpuRegister reg = temp.AsRegister<GpuRegister>();
2417 GpuRegister method_reg;
2418 if (current_method.IsRegister()) {
2419 method_reg = current_method.AsRegister<GpuRegister>();
2420 } else {
2421 // TODO: use the appropriate DCHECK() here if possible.
2422 // DCHECK(invoke->GetLocations()->Intrinsified());
2423 DCHECK(!current_method.IsValid());
2424 method_reg = reg;
2425 __ Ld(reg, SP, kCurrentMethodStackOffset);
2426 }
Alexey Frunze4dda3372015-06-01 18:31:49 -07002427
Vladimir Marko58155012015-08-19 12:49:41 +00002428 // temp = temp->dex_cache_resolved_methods_;
Vladimir Marko05792b92015-08-03 11:56:49 +01002429 __ LoadFromOffset(kLoadDoubleword,
Vladimir Marko58155012015-08-19 12:49:41 +00002430 reg,
2431 method_reg,
Vladimir Marko05792b92015-08-03 11:56:49 +01002432 ArtMethod::DexCacheResolvedMethodsOffset(kMips64PointerSize).Int32Value());
Vladimir Marko58155012015-08-19 12:49:41 +00002433 // temp = temp[index_in_cache]
2434 uint32_t index_in_cache = invoke->GetTargetMethod().dex_method_index;
2435 __ LoadFromOffset(kLoadDoubleword,
2436 reg,
2437 reg,
2438 CodeGenerator::GetCachePointerOffset(index_in_cache));
2439 break;
Alexey Frunze4dda3372015-06-01 18:31:49 -07002440 }
Alexey Frunze4dda3372015-06-01 18:31:49 -07002441 }
2442
Vladimir Marko58155012015-08-19 12:49:41 +00002443 switch (invoke->GetCodePtrLocation()) {
2444 case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
2445 __ Jalr(&frame_entry_label_, T9);
2446 break;
2447 case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
2448 // LR = invoke->GetDirectCodePtr();
2449 __ LoadConst64(T9, invoke->GetDirectCodePtr());
2450 // LR()
2451 __ Jalr(T9);
2452 break;
2453 case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
2454 // TODO: Implement kCallPCRelative. For the moment, we fall back to kMethodCode.
2455 FALLTHROUGH_INTENDED;
2456 case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
2457 // TODO: Implement kDirectCodeFixup. For the moment, we fall back to kMethodCode.
2458 FALLTHROUGH_INTENDED;
2459 case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
2460 // T9 = callee_method->entry_point_from_quick_compiled_code_;
2461 __ LoadFromOffset(kLoadDoubleword,
2462 T9,
2463 callee_method.AsRegister<GpuRegister>(),
2464 ArtMethod::EntryPointFromQuickCompiledCodeOffset(
2465 kMips64WordSize).Int32Value());
2466 // T9()
2467 __ Jalr(T9);
2468 break;
2469 }
Alexey Frunze4dda3372015-06-01 18:31:49 -07002470 DCHECK(!IsLeafMethod());
2471}
2472
2473void InstructionCodeGeneratorMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
2474 // When we do not run baseline, explicit clinit checks triggered by static
2475 // invokes must have been pruned by art::PrepareForRegisterAllocation.
2476 DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
2477
2478 if (TryGenerateIntrinsicCode(invoke, codegen_)) {
2479 return;
2480 }
2481
2482 LocationSummary* locations = invoke->GetLocations();
2483 codegen_->GenerateStaticOrDirectCall(invoke,
2484 locations->HasTemps()
2485 ? locations->GetTemp(0)
2486 : Location::NoLocation());
2487 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2488}
2489
2490void InstructionCodeGeneratorMIPS64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
2491 // TODO: Try to generate intrinsics code.
2492 LocationSummary* locations = invoke->GetLocations();
2493 Location receiver = locations->InAt(0);
2494 GpuRegister temp = invoke->GetLocations()->GetTemp(0).AsRegister<GpuRegister>();
2495 size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
2496 invoke->GetVTableIndex(), kMips64PointerSize).SizeValue();
2497 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2498 Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kMips64WordSize);
2499
2500 // temp = object->GetClass();
2501 DCHECK(receiver.IsRegister());
2502 __ LoadFromOffset(kLoadUnsignedWord, temp, receiver.AsRegister<GpuRegister>(), class_offset);
2503 codegen_->MaybeRecordImplicitNullCheck(invoke);
2504 // temp = temp->GetMethodAt(method_offset);
2505 __ LoadFromOffset(kLoadDoubleword, temp, temp, method_offset);
2506 // T9 = temp->GetEntryPoint();
2507 __ LoadFromOffset(kLoadDoubleword, T9, temp, entry_point.Int32Value());
2508 // T9();
2509 __ Jalr(T9);
2510 DCHECK(!codegen_->IsLeafMethod());
2511 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
2512}
2513
2514void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
2515 LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath
2516 : LocationSummary::kNoCall;
2517 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
2518 locations->SetInAt(0, Location::RequiresRegister());
2519 locations->SetOut(Location::RequiresRegister());
2520}
2521
2522void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) {
2523 LocationSummary* locations = cls->GetLocations();
2524 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2525 GpuRegister current_method = locations->InAt(0).AsRegister<GpuRegister>();
2526 if (cls->IsReferrersClass()) {
2527 DCHECK(!cls->CanCallRuntime());
2528 DCHECK(!cls->MustGenerateClinitCheck());
2529 __ LoadFromOffset(kLoadUnsignedWord, out, current_method,
2530 ArtMethod::DeclaringClassOffset().Int32Value());
2531 } else {
2532 DCHECK(cls->CanCallRuntime());
Vladimir Marko05792b92015-08-03 11:56:49 +01002533 __ LoadFromOffset(kLoadDoubleword, out, current_method,
2534 ArtMethod::DexCacheResolvedTypesOffset(kMips64PointerSize).Int32Value());
Alexey Frunze4dda3372015-06-01 18:31:49 -07002535 __ LoadFromOffset(kLoadUnsignedWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
Vladimir Marko05792b92015-08-03 11:56:49 +01002536 // TODO: We will need a read barrier here.
Alexey Frunze4dda3372015-06-01 18:31:49 -07002537 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathMIPS64(
2538 cls,
2539 cls,
2540 cls->GetDexPc(),
2541 cls->MustGenerateClinitCheck());
2542 codegen_->AddSlowPath(slow_path);
2543 __ Beqzc(out, slow_path->GetEntryLabel());
2544 if (cls->MustGenerateClinitCheck()) {
2545 GenerateClassInitializationCheck(slow_path, out);
2546 } else {
2547 __ Bind(slow_path->GetExitLabel());
2548 }
2549 }
2550}
2551
David Brazdilcb1c0552015-08-04 16:22:25 +01002552static int32_t GetExceptionTlsOffset() {
2553 return Thread::ExceptionOffset<kMips64WordSize>().Int32Value();
2554}
2555
Alexey Frunze4dda3372015-06-01 18:31:49 -07002556void LocationsBuilderMIPS64::VisitLoadException(HLoadException* load) {
2557 LocationSummary* locations =
2558 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
2559 locations->SetOut(Location::RequiresRegister());
2560}
2561
2562void InstructionCodeGeneratorMIPS64::VisitLoadException(HLoadException* load) {
2563 GpuRegister out = load->GetLocations()->Out().AsRegister<GpuRegister>();
David Brazdilcb1c0552015-08-04 16:22:25 +01002564 __ LoadFromOffset(kLoadUnsignedWord, out, TR, GetExceptionTlsOffset());
2565}
2566
2567void LocationsBuilderMIPS64::VisitClearException(HClearException* clear) {
2568 new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
2569}
2570
2571void InstructionCodeGeneratorMIPS64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
2572 __ StoreToOffset(kStoreWord, ZERO, TR, GetExceptionTlsOffset());
Alexey Frunze4dda3372015-06-01 18:31:49 -07002573}
2574
2575void LocationsBuilderMIPS64::VisitLoadLocal(HLoadLocal* load) {
2576 load->SetLocations(nullptr);
2577}
2578
2579void InstructionCodeGeneratorMIPS64::VisitLoadLocal(HLoadLocal* load ATTRIBUTE_UNUSED) {
2580 // Nothing to do, this is driven by the code generator.
2581}
2582
2583void LocationsBuilderMIPS64::VisitLoadString(HLoadString* load) {
2584 LocationSummary* locations =
2585 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
2586 locations->SetInAt(0, Location::RequiresRegister());
2587 locations->SetOut(Location::RequiresRegister());
2588}
2589
2590void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) {
2591 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathMIPS64(load);
2592 codegen_->AddSlowPath(slow_path);
2593
2594 LocationSummary* locations = load->GetLocations();
2595 GpuRegister out = locations->Out().AsRegister<GpuRegister>();
2596 GpuRegister current_method = locations->InAt(0).AsRegister<GpuRegister>();
2597 __ LoadFromOffset(kLoadUnsignedWord, out, current_method,
2598 ArtMethod::DeclaringClassOffset().Int32Value());
Vladimir Marko05792b92015-08-03 11:56:49 +01002599 __ LoadFromOffset(kLoadDoubleword, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
Alexey Frunze4dda3372015-06-01 18:31:49 -07002600 __ LoadFromOffset(kLoadUnsignedWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
Vladimir Marko05792b92015-08-03 11:56:49 +01002601 // TODO: We will need a read barrier here.
Alexey Frunze4dda3372015-06-01 18:31:49 -07002602 __ Beqzc(out, slow_path->GetEntryLabel());
2603 __ Bind(slow_path->GetExitLabel());
2604}
2605
2606void LocationsBuilderMIPS64::VisitLocal(HLocal* local) {
2607 local->SetLocations(nullptr);
2608}
2609
2610void InstructionCodeGeneratorMIPS64::VisitLocal(HLocal* local) {
2611 DCHECK_EQ(local->GetBlock(), GetGraph()->GetEntryBlock());
2612}
2613
2614void LocationsBuilderMIPS64::VisitLongConstant(HLongConstant* constant) {
2615 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
2616 locations->SetOut(Location::ConstantLocation(constant));
2617}
2618
2619void InstructionCodeGeneratorMIPS64::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
2620 // Will be generated at use site.
2621}
2622
2623void LocationsBuilderMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
2624 LocationSummary* locations =
2625 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2626 InvokeRuntimeCallingConvention calling_convention;
2627 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2628}
2629
2630void InstructionCodeGeneratorMIPS64::VisitMonitorOperation(HMonitorOperation* instruction) {
2631 codegen_->InvokeRuntime(instruction->IsEnter()
2632 ? QUICK_ENTRY_POINT(pLockObject)
2633 : QUICK_ENTRY_POINT(pUnlockObject),
2634 instruction,
2635 instruction->GetDexPc(),
2636 nullptr);
2637 CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
2638}
2639
2640void LocationsBuilderMIPS64::VisitMul(HMul* mul) {
2641 LocationSummary* locations =
2642 new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
2643 switch (mul->GetResultType()) {
2644 case Primitive::kPrimInt:
2645 case Primitive::kPrimLong:
2646 locations->SetInAt(0, Location::RequiresRegister());
2647 locations->SetInAt(1, Location::RequiresRegister());
2648 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2649 break;
2650
2651 case Primitive::kPrimFloat:
2652 case Primitive::kPrimDouble:
2653 locations->SetInAt(0, Location::RequiresFpuRegister());
2654 locations->SetInAt(1, Location::RequiresFpuRegister());
2655 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2656 break;
2657
2658 default:
2659 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
2660 }
2661}
2662
2663void InstructionCodeGeneratorMIPS64::VisitMul(HMul* instruction) {
2664 Primitive::Type type = instruction->GetType();
2665 LocationSummary* locations = instruction->GetLocations();
2666
2667 switch (type) {
2668 case Primitive::kPrimInt:
2669 case Primitive::kPrimLong: {
2670 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2671 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
2672 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
2673 if (type == Primitive::kPrimInt)
2674 __ MulR6(dst, lhs, rhs);
2675 else
2676 __ Dmul(dst, lhs, rhs);
2677 break;
2678 }
2679 case Primitive::kPrimFloat:
2680 case Primitive::kPrimDouble: {
2681 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
2682 FpuRegister lhs = locations->InAt(0).AsFpuRegister<FpuRegister>();
2683 FpuRegister rhs = locations->InAt(1).AsFpuRegister<FpuRegister>();
2684 if (type == Primitive::kPrimFloat)
2685 __ MulS(dst, lhs, rhs);
2686 else
2687 __ MulD(dst, lhs, rhs);
2688 break;
2689 }
2690 default:
2691 LOG(FATAL) << "Unexpected mul type " << type;
2692 }
2693}
2694
2695void LocationsBuilderMIPS64::VisitNeg(HNeg* neg) {
2696 LocationSummary* locations =
2697 new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
2698 switch (neg->GetResultType()) {
2699 case Primitive::kPrimInt:
2700 case Primitive::kPrimLong:
2701 locations->SetInAt(0, Location::RequiresRegister());
2702 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2703 break;
2704
2705 case Primitive::kPrimFloat:
2706 case Primitive::kPrimDouble:
2707 locations->SetInAt(0, Location::RequiresFpuRegister());
2708 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2709 break;
2710
2711 default:
2712 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
2713 }
2714}
2715
2716void InstructionCodeGeneratorMIPS64::VisitNeg(HNeg* instruction) {
2717 Primitive::Type type = instruction->GetType();
2718 LocationSummary* locations = instruction->GetLocations();
2719
2720 switch (type) {
2721 case Primitive::kPrimInt:
2722 case Primitive::kPrimLong: {
2723 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2724 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
2725 if (type == Primitive::kPrimInt)
2726 __ Subu(dst, ZERO, src);
2727 else
2728 __ Dsubu(dst, ZERO, src);
2729 break;
2730 }
2731 case Primitive::kPrimFloat:
2732 case Primitive::kPrimDouble: {
2733 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
2734 FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
2735 if (type == Primitive::kPrimFloat)
2736 __ NegS(dst, src);
2737 else
2738 __ NegD(dst, src);
2739 break;
2740 }
2741 default:
2742 LOG(FATAL) << "Unexpected neg type " << type;
2743 }
2744}
2745
2746void LocationsBuilderMIPS64::VisitNewArray(HNewArray* instruction) {
2747 LocationSummary* locations =
2748 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2749 InvokeRuntimeCallingConvention calling_convention;
2750 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2751 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
2752 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2753 locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
2754}
2755
2756void InstructionCodeGeneratorMIPS64::VisitNewArray(HNewArray* instruction) {
2757 LocationSummary* locations = instruction->GetLocations();
2758 // Move an uint16_t value to a register.
2759 __ LoadConst32(locations->GetTemp(0).AsRegister<GpuRegister>(), instruction->GetTypeIndex());
2760 codegen_->InvokeRuntime(
2761 GetThreadOffset<kMips64WordSize>(instruction->GetEntrypoint()).Int32Value(),
2762 instruction,
2763 instruction->GetDexPc(),
2764 nullptr);
2765 CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
2766}
2767
2768void LocationsBuilderMIPS64::VisitNewInstance(HNewInstance* instruction) {
2769 LocationSummary* locations =
2770 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
2771 InvokeRuntimeCallingConvention calling_convention;
2772 locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
2773 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
2774 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
2775}
2776
2777void InstructionCodeGeneratorMIPS64::VisitNewInstance(HNewInstance* instruction) {
2778 LocationSummary* locations = instruction->GetLocations();
2779 // Move an uint16_t value to a register.
2780 __ LoadConst32(locations->GetTemp(0).AsRegister<GpuRegister>(), instruction->GetTypeIndex());
2781 codegen_->InvokeRuntime(
2782 GetThreadOffset<kMips64WordSize>(instruction->GetEntrypoint()).Int32Value(),
2783 instruction,
2784 instruction->GetDexPc(),
2785 nullptr);
2786 CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
2787}
2788
2789void LocationsBuilderMIPS64::VisitNot(HNot* instruction) {
2790 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2791 locations->SetInAt(0, Location::RequiresRegister());
2792 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2793}
2794
2795void InstructionCodeGeneratorMIPS64::VisitNot(HNot* instruction) {
2796 Primitive::Type type = instruction->GetType();
2797 LocationSummary* locations = instruction->GetLocations();
2798
2799 switch (type) {
2800 case Primitive::kPrimInt:
2801 case Primitive::kPrimLong: {
2802 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2803 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
2804 __ Nor(dst, src, ZERO);
2805 break;
2806 }
2807
2808 default:
2809 LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
2810 }
2811}
2812
2813void LocationsBuilderMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
2814 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2815 locations->SetInAt(0, Location::RequiresRegister());
2816 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2817}
2818
2819void InstructionCodeGeneratorMIPS64::VisitBooleanNot(HBooleanNot* instruction) {
2820 LocationSummary* locations = instruction->GetLocations();
2821 __ Xori(locations->Out().AsRegister<GpuRegister>(),
2822 locations->InAt(0).AsRegister<GpuRegister>(),
2823 1);
2824}
2825
2826void LocationsBuilderMIPS64::VisitNullCheck(HNullCheck* instruction) {
David Brazdil659562a2015-09-14 21:26:33 +00002827 LocationSummary* locations =
2828 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
Alexey Frunze4dda3372015-06-01 18:31:49 -07002829 locations->SetInAt(0, Location::RequiresRegister());
2830 if (instruction->HasUses()) {
2831 locations->SetOut(Location::SameAsFirstInput());
2832 }
2833}
2834
2835void InstructionCodeGeneratorMIPS64::GenerateImplicitNullCheck(HNullCheck* instruction) {
2836 if (codegen_->CanMoveNullCheckToUser(instruction)) {
2837 return;
2838 }
2839 Location obj = instruction->GetLocations()->InAt(0);
2840
2841 __ Lw(ZERO, obj.AsRegister<GpuRegister>(), 0);
2842 codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
2843}
2844
2845void InstructionCodeGeneratorMIPS64::GenerateExplicitNullCheck(HNullCheck* instruction) {
2846 SlowPathCodeMIPS64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathMIPS64(instruction);
2847 codegen_->AddSlowPath(slow_path);
2848
2849 Location obj = instruction->GetLocations()->InAt(0);
2850
2851 __ Beqzc(obj.AsRegister<GpuRegister>(), slow_path->GetEntryLabel());
2852}
2853
2854void InstructionCodeGeneratorMIPS64::VisitNullCheck(HNullCheck* instruction) {
David Brazdil659562a2015-09-14 21:26:33 +00002855 if (codegen_->GetCompilerOptions().GetImplicitNullChecks()) {
Alexey Frunze4dda3372015-06-01 18:31:49 -07002856 GenerateImplicitNullCheck(instruction);
2857 } else {
2858 GenerateExplicitNullCheck(instruction);
2859 }
2860}
2861
2862void LocationsBuilderMIPS64::VisitOr(HOr* instruction) {
2863 HandleBinaryOp(instruction);
2864}
2865
2866void InstructionCodeGeneratorMIPS64::VisitOr(HOr* instruction) {
2867 HandleBinaryOp(instruction);
2868}
2869
2870void LocationsBuilderMIPS64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
2871 LOG(FATAL) << "Unreachable";
2872}
2873
2874void InstructionCodeGeneratorMIPS64::VisitParallelMove(HParallelMove* instruction) {
2875 codegen_->GetMoveResolver()->EmitNativeCode(instruction);
2876}
2877
2878void LocationsBuilderMIPS64::VisitParameterValue(HParameterValue* instruction) {
2879 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2880 Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
2881 if (location.IsStackSlot()) {
2882 location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2883 } else if (location.IsDoubleStackSlot()) {
2884 location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
2885 }
2886 locations->SetOut(location);
2887}
2888
2889void InstructionCodeGeneratorMIPS64::VisitParameterValue(HParameterValue* instruction
2890 ATTRIBUTE_UNUSED) {
2891 // Nothing to do, the parameter is already at its location.
2892}
2893
2894void LocationsBuilderMIPS64::VisitCurrentMethod(HCurrentMethod* instruction) {
2895 LocationSummary* locations =
2896 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
2897 locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
2898}
2899
2900void InstructionCodeGeneratorMIPS64::VisitCurrentMethod(HCurrentMethod* instruction
2901 ATTRIBUTE_UNUSED) {
2902 // Nothing to do, the method is already at its location.
2903}
2904
2905void LocationsBuilderMIPS64::VisitPhi(HPhi* instruction) {
2906 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2907 for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
2908 locations->SetInAt(i, Location::Any());
2909 }
2910 locations->SetOut(Location::Any());
2911}
2912
2913void InstructionCodeGeneratorMIPS64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
2914 LOG(FATAL) << "Unreachable";
2915}
2916
2917void LocationsBuilderMIPS64::VisitRem(HRem* rem) {
2918 Primitive::Type type = rem->GetResultType();
2919 LocationSummary::CallKind call_kind =
2920 Primitive::IsFloatingPointType(type) ? LocationSummary::kCall : LocationSummary::kNoCall;
2921 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
2922
2923 switch (type) {
2924 case Primitive::kPrimInt:
2925 case Primitive::kPrimLong:
2926 locations->SetInAt(0, Location::RequiresRegister());
2927 locations->SetInAt(1, Location::RequiresRegister());
2928 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2929 break;
2930
2931 case Primitive::kPrimFloat:
2932 case Primitive::kPrimDouble: {
2933 InvokeRuntimeCallingConvention calling_convention;
2934 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
2935 locations->SetInAt(1, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(1)));
2936 locations->SetOut(calling_convention.GetReturnLocation(type));
2937 break;
2938 }
2939
2940 default:
2941 LOG(FATAL) << "Unexpected rem type " << type;
2942 }
2943}
2944
2945void InstructionCodeGeneratorMIPS64::VisitRem(HRem* instruction) {
2946 Primitive::Type type = instruction->GetType();
2947 LocationSummary* locations = instruction->GetLocations();
2948
2949 switch (type) {
2950 case Primitive::kPrimInt:
2951 case Primitive::kPrimLong: {
2952 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
2953 GpuRegister lhs = locations->InAt(0).AsRegister<GpuRegister>();
2954 GpuRegister rhs = locations->InAt(1).AsRegister<GpuRegister>();
2955 if (type == Primitive::kPrimInt)
2956 __ ModR6(dst, lhs, rhs);
2957 else
2958 __ Dmod(dst, lhs, rhs);
2959 break;
2960 }
2961
2962 case Primitive::kPrimFloat:
2963 case Primitive::kPrimDouble: {
2964 int32_t entry_offset = (type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pFmodf)
2965 : QUICK_ENTRY_POINT(pFmod);
2966 codegen_->InvokeRuntime(entry_offset, instruction, instruction->GetDexPc(), nullptr);
2967 break;
2968 }
2969 default:
2970 LOG(FATAL) << "Unexpected rem type " << type;
2971 }
2972}
2973
2974void LocationsBuilderMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
2975 memory_barrier->SetLocations(nullptr);
2976}
2977
2978void InstructionCodeGeneratorMIPS64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
2979 GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
2980}
2981
2982void LocationsBuilderMIPS64::VisitReturn(HReturn* ret) {
2983 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(ret);
2984 Primitive::Type return_type = ret->InputAt(0)->GetType();
2985 locations->SetInAt(0, Mips64ReturnLocation(return_type));
2986}
2987
2988void InstructionCodeGeneratorMIPS64::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
2989 codegen_->GenerateFrameExit();
2990}
2991
2992void LocationsBuilderMIPS64::VisitReturnVoid(HReturnVoid* ret) {
2993 ret->SetLocations(nullptr);
2994}
2995
2996void InstructionCodeGeneratorMIPS64::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNUSED) {
2997 codegen_->GenerateFrameExit();
2998}
2999
3000void LocationsBuilderMIPS64::VisitShl(HShl* shl) {
3001 HandleShift(shl);
3002}
3003
3004void InstructionCodeGeneratorMIPS64::VisitShl(HShl* shl) {
3005 HandleShift(shl);
3006}
3007
3008void LocationsBuilderMIPS64::VisitShr(HShr* shr) {
3009 HandleShift(shr);
3010}
3011
3012void InstructionCodeGeneratorMIPS64::VisitShr(HShr* shr) {
3013 HandleShift(shr);
3014}
3015
3016void LocationsBuilderMIPS64::VisitStoreLocal(HStoreLocal* store) {
3017 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(store);
3018 Primitive::Type field_type = store->InputAt(1)->GetType();
3019 switch (field_type) {
3020 case Primitive::kPrimNot:
3021 case Primitive::kPrimBoolean:
3022 case Primitive::kPrimByte:
3023 case Primitive::kPrimChar:
3024 case Primitive::kPrimShort:
3025 case Primitive::kPrimInt:
3026 case Primitive::kPrimFloat:
3027 locations->SetInAt(1, Location::StackSlot(codegen_->GetStackSlot(store->GetLocal())));
3028 break;
3029
3030 case Primitive::kPrimLong:
3031 case Primitive::kPrimDouble:
3032 locations->SetInAt(1, Location::DoubleStackSlot(codegen_->GetStackSlot(store->GetLocal())));
3033 break;
3034
3035 default:
3036 LOG(FATAL) << "Unimplemented local type " << field_type;
3037 }
3038}
3039
3040void InstructionCodeGeneratorMIPS64::VisitStoreLocal(HStoreLocal* store ATTRIBUTE_UNUSED) {
3041}
3042
3043void LocationsBuilderMIPS64::VisitSub(HSub* instruction) {
3044 HandleBinaryOp(instruction);
3045}
3046
3047void InstructionCodeGeneratorMIPS64::VisitSub(HSub* instruction) {
3048 HandleBinaryOp(instruction);
3049}
3050
3051void LocationsBuilderMIPS64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3052 HandleFieldGet(instruction, instruction->GetFieldInfo());
3053}
3054
3055void InstructionCodeGeneratorMIPS64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
3056 HandleFieldGet(instruction, instruction->GetFieldInfo());
3057}
3058
3059void LocationsBuilderMIPS64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3060 HandleFieldSet(instruction, instruction->GetFieldInfo());
3061}
3062
3063void InstructionCodeGeneratorMIPS64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
3064 HandleFieldSet(instruction, instruction->GetFieldInfo());
3065}
3066
3067void LocationsBuilderMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
3068 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
3069}
3070
3071void InstructionCodeGeneratorMIPS64::VisitSuspendCheck(HSuspendCheck* instruction) {
3072 HBasicBlock* block = instruction->GetBlock();
3073 if (block->GetLoopInformation() != nullptr) {
3074 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
3075 // The back edge will generate the suspend check.
3076 return;
3077 }
3078 if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
3079 // The goto will generate the suspend check.
3080 return;
3081 }
3082 GenerateSuspendCheck(instruction, nullptr);
3083}
3084
3085void LocationsBuilderMIPS64::VisitTemporary(HTemporary* temp) {
3086 temp->SetLocations(nullptr);
3087}
3088
3089void InstructionCodeGeneratorMIPS64::VisitTemporary(HTemporary* temp ATTRIBUTE_UNUSED) {
3090 // Nothing to do, this is driven by the code generator.
3091}
3092
3093void LocationsBuilderMIPS64::VisitThrow(HThrow* instruction) {
3094 LocationSummary* locations =
3095 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
3096 InvokeRuntimeCallingConvention calling_convention;
3097 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3098}
3099
3100void InstructionCodeGeneratorMIPS64::VisitThrow(HThrow* instruction) {
3101 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pDeliverException),
3102 instruction,
3103 instruction->GetDexPc(),
3104 nullptr);
3105 CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
3106}
3107
3108void LocationsBuilderMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
3109 Primitive::Type input_type = conversion->GetInputType();
3110 Primitive::Type result_type = conversion->GetResultType();
3111 DCHECK_NE(input_type, result_type);
3112
3113 if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
3114 (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
3115 LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
3116 }
3117
3118 LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
3119 if ((Primitive::IsFloatingPointType(result_type) && input_type == Primitive::kPrimLong) ||
3120 (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type))) {
3121 call_kind = LocationSummary::kCall;
3122 }
3123
3124 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(conversion, call_kind);
3125
3126 if (call_kind == LocationSummary::kNoCall) {
3127 if (Primitive::IsFloatingPointType(input_type)) {
3128 locations->SetInAt(0, Location::RequiresFpuRegister());
3129 } else {
3130 locations->SetInAt(0, Location::RequiresRegister());
3131 }
3132
3133 if (Primitive::IsFloatingPointType(result_type)) {
3134 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
3135 } else {
3136 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
3137 }
3138 } else {
3139 InvokeRuntimeCallingConvention calling_convention;
3140
3141 if (Primitive::IsFloatingPointType(input_type)) {
3142 locations->SetInAt(0, Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(0)));
3143 } else {
3144 locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
3145 }
3146
3147 locations->SetOut(calling_convention.GetReturnLocation(result_type));
3148 }
3149}
3150
3151void InstructionCodeGeneratorMIPS64::VisitTypeConversion(HTypeConversion* conversion) {
3152 LocationSummary* locations = conversion->GetLocations();
3153 Primitive::Type result_type = conversion->GetResultType();
3154 Primitive::Type input_type = conversion->GetInputType();
3155
3156 DCHECK_NE(input_type, result_type);
3157
3158 if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) {
3159 GpuRegister dst = locations->Out().AsRegister<GpuRegister>();
3160 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
3161
3162 switch (result_type) {
3163 case Primitive::kPrimChar:
3164 __ Andi(dst, src, 0xFFFF);
3165 break;
3166 case Primitive::kPrimByte:
3167 // long is never converted into types narrower than int directly,
3168 // so SEB and SEH can be used without ever causing unpredictable results
3169 // on 64-bit inputs
3170 DCHECK(input_type != Primitive::kPrimLong);
3171 __ Seb(dst, src);
3172 break;
3173 case Primitive::kPrimShort:
3174 // long is never converted into types narrower than int directly,
3175 // so SEB and SEH can be used without ever causing unpredictable results
3176 // on 64-bit inputs
3177 DCHECK(input_type != Primitive::kPrimLong);
3178 __ Seh(dst, src);
3179 break;
3180 case Primitive::kPrimInt:
3181 case Primitive::kPrimLong:
3182 // Sign-extend 32-bit int into bits 32 through 63 for
3183 // int-to-long and long-to-int conversions
3184 __ Sll(dst, src, 0);
3185 break;
3186
3187 default:
3188 LOG(FATAL) << "Unexpected type conversion from " << input_type
3189 << " to " << result_type;
3190 }
3191 } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) {
3192 if (input_type != Primitive::kPrimLong) {
3193 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
3194 GpuRegister src = locations->InAt(0).AsRegister<GpuRegister>();
3195 __ Mtc1(src, FTMP);
3196 if (result_type == Primitive::kPrimFloat) {
3197 __ Cvtsw(dst, FTMP);
3198 } else {
3199 __ Cvtdw(dst, FTMP);
3200 }
3201 } else {
3202 int32_t entry_offset = (result_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pL2f)
3203 : QUICK_ENTRY_POINT(pL2d);
3204 codegen_->InvokeRuntime(entry_offset,
3205 conversion,
3206 conversion->GetDexPc(),
3207 nullptr);
3208 }
3209 } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) {
3210 CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
3211 int32_t entry_offset;
3212 if (result_type != Primitive::kPrimLong) {
3213 entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2iz)
3214 : QUICK_ENTRY_POINT(pD2iz);
3215 } else {
3216 entry_offset = (input_type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pF2l)
3217 : QUICK_ENTRY_POINT(pD2l);
3218 }
3219 codegen_->InvokeRuntime(entry_offset,
3220 conversion,
3221 conversion->GetDexPc(),
3222 nullptr);
3223 } else if (Primitive::IsFloatingPointType(result_type) &&
3224 Primitive::IsFloatingPointType(input_type)) {
3225 FpuRegister dst = locations->Out().AsFpuRegister<FpuRegister>();
3226 FpuRegister src = locations->InAt(0).AsFpuRegister<FpuRegister>();
3227 if (result_type == Primitive::kPrimFloat) {
3228 __ Cvtsd(dst, src);
3229 } else {
3230 __ Cvtds(dst, src);
3231 }
3232 } else {
3233 LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
3234 << " to " << result_type;
3235 }
3236}
3237
3238void LocationsBuilderMIPS64::VisitUShr(HUShr* ushr) {
3239 HandleShift(ushr);
3240}
3241
3242void InstructionCodeGeneratorMIPS64::VisitUShr(HUShr* ushr) {
3243 HandleShift(ushr);
3244}
3245
3246void LocationsBuilderMIPS64::VisitXor(HXor* instruction) {
3247 HandleBinaryOp(instruction);
3248}
3249
3250void InstructionCodeGeneratorMIPS64::VisitXor(HXor* instruction) {
3251 HandleBinaryOp(instruction);
3252}
3253
3254void LocationsBuilderMIPS64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
3255 // Nothing to do, this should be removed during prepare for register allocator.
3256 LOG(FATAL) << "Unreachable";
3257}
3258
3259void InstructionCodeGeneratorMIPS64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
3260 // Nothing to do, this should be removed during prepare for register allocator.
3261 LOG(FATAL) << "Unreachable";
3262}
3263
3264void LocationsBuilderMIPS64::VisitEqual(HEqual* comp) {
3265 VisitCondition(comp);
3266}
3267
3268void InstructionCodeGeneratorMIPS64::VisitEqual(HEqual* comp) {
3269 VisitCondition(comp);
3270}
3271
3272void LocationsBuilderMIPS64::VisitNotEqual(HNotEqual* comp) {
3273 VisitCondition(comp);
3274}
3275
3276void InstructionCodeGeneratorMIPS64::VisitNotEqual(HNotEqual* comp) {
3277 VisitCondition(comp);
3278}
3279
3280void LocationsBuilderMIPS64::VisitLessThan(HLessThan* comp) {
3281 VisitCondition(comp);
3282}
3283
3284void InstructionCodeGeneratorMIPS64::VisitLessThan(HLessThan* comp) {
3285 VisitCondition(comp);
3286}
3287
3288void LocationsBuilderMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
3289 VisitCondition(comp);
3290}
3291
3292void InstructionCodeGeneratorMIPS64::VisitLessThanOrEqual(HLessThanOrEqual* comp) {
3293 VisitCondition(comp);
3294}
3295
3296void LocationsBuilderMIPS64::VisitGreaterThan(HGreaterThan* comp) {
3297 VisitCondition(comp);
3298}
3299
3300void InstructionCodeGeneratorMIPS64::VisitGreaterThan(HGreaterThan* comp) {
3301 VisitCondition(comp);
3302}
3303
3304void LocationsBuilderMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
3305 VisitCondition(comp);
3306}
3307
3308void InstructionCodeGeneratorMIPS64::VisitGreaterThanOrEqual(HGreaterThanOrEqual* comp) {
3309 VisitCondition(comp);
3310}
3311
Nicolas Geoffray2e7cd752015-07-10 11:38:52 +01003312void LocationsBuilderMIPS64::VisitFakeString(HFakeString* instruction) {
3313 DCHECK(codegen_->IsBaseline());
3314 LocationSummary* locations =
3315 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
3316 locations->SetOut(Location::ConstantLocation(GetGraph()->GetNullConstant()));
3317}
3318
3319void InstructionCodeGeneratorMIPS64::VisitFakeString(HFakeString* instruction ATTRIBUTE_UNUSED) {
3320 DCHECK(codegen_->IsBaseline());
3321 // Will be generated at use site.
3322}
3323
Alexey Frunze4dda3372015-06-01 18:31:49 -07003324} // namespace mips64
3325} // namespace art