blob: 87832a2d9f24f7e19ad651570b0b763f5a516d74 [file] [log] [blame]
Nicolas Geoffrayd4dd2552014-02-28 10:23:58 +00001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
18#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
19
Ian Rogersd582fa42014-11-05 23:46:43 -080020#include "arch/instruction_set.h"
Calin Juravle34166012014-12-19 17:22:29 +000021#include "arch/instruction_set_features.h"
Vladimir Markof9f64412015-09-02 14:05:49 +010022#include "base/arena_containers.h"
23#include "base/arena_object.h"
Nicolas Geoffray01bc96d2014-04-11 17:43:50 +010024#include "base/bit_field.h"
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +000025#include "compiled_method.h"
Calin Juravlecd6dffe2015-01-08 17:35:35 +000026#include "driver/compiler_options.h"
Nicolas Geoffraybab4ed72014-03-11 17:53:17 +000027#include "globals.h"
Alexandre Rameseb7b7392015-06-19 14:47:01 +010028#include "graph_visualizer.h"
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +010029#include "locations.h"
Nicolas Geoffrayd4dd2552014-02-28 10:23:58 +000030#include "memory_region.h"
31#include "nodes.h"
Serban Constantinescuecc43662015-08-13 13:33:12 +010032#include "optimizing_compiler_stats.h"
Nicolas Geoffray39468442014-09-02 15:17:15 +010033#include "stack_map_stream.h"
Andreas Gampe85b62f22015-09-09 13:15:38 -070034#include "utils/label.h"
Nicolas Geoffrayd4dd2552014-02-28 10:23:58 +000035
36namespace art {
37
Roland Levillain6d0e4832014-11-27 18:31:21 +000038// Binary encoding of 2^32 for type double.
39static int64_t constexpr k2Pow32EncodingForDouble = INT64_C(0x41F0000000000000);
40// Binary encoding of 2^31 for type double.
41static int64_t constexpr k2Pow31EncodingForDouble = INT64_C(0x41E0000000000000);
42
Mark Mendelle82549b2015-05-06 10:55:34 -040043// Minimum value for a primitive integer.
44static int32_t constexpr kPrimIntMin = 0x80000000;
45// Minimum value for a primitive long.
46static int64_t constexpr kPrimLongMin = INT64_C(0x8000000000000000);
47
Roland Levillain3f8f9362014-12-02 17:45:01 +000048// Maximum value for a primitive integer.
49static int32_t constexpr kPrimIntMax = 0x7fffffff;
Roland Levillain624279f2014-12-04 11:54:28 +000050// Maximum value for a primitive long.
Mark Mendelle82549b2015-05-06 10:55:34 -040051static int64_t constexpr kPrimLongMax = INT64_C(0x7fffffffffffffff);
Roland Levillain3f8f9362014-12-02 17:45:01 +000052
Nicolas Geoffray92a73ae2014-10-16 11:12:52 +010053class Assembler;
Nicolas Geoffraye5038322014-07-04 09:41:32 +010054class CodeGenerator;
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +000055class CompilerDriver;
Vladimir Marko58155012015-08-19 12:49:41 +000056class LinkerPatch;
Nicolas Geoffrayf0e39372014-11-12 17:50:07 +000057class ParallelMoveResolver;
Nicolas Geoffray92cf83e2014-03-18 17:59:20 +000058
Nicolas Geoffrayd4dd2552014-02-28 10:23:58 +000059class CodeAllocator {
60 public:
Nicolas Geoffraye5038322014-07-04 09:41:32 +010061 CodeAllocator() {}
62 virtual ~CodeAllocator() {}
Nicolas Geoffrayd4dd2552014-02-28 10:23:58 +000063
64 virtual uint8_t* Allocate(size_t size) = 0;
65
66 private:
67 DISALLOW_COPY_AND_ASSIGN(CodeAllocator);
68};
69
Ian Rogers6a3c1fc2014-10-31 00:33:20 -070070class SlowPathCode : public ArenaObject<kArenaAllocSlowPaths> {
Nicolas Geoffraye5038322014-07-04 09:41:32 +010071 public:
David Srbecky9cd6d372016-02-09 15:24:47 +000072 explicit SlowPathCode(HInstruction* instruction) : instruction_(instruction) {
Nicolas Geoffrayeeefa122015-03-13 18:52:59 +000073 for (size_t i = 0; i < kMaximumNumberOfExpectedRegisters; ++i) {
74 saved_core_stack_offsets_[i] = kRegisterNotSaved;
75 saved_fpu_stack_offsets_[i] = kRegisterNotSaved;
76 }
77 }
78
Nicolas Geoffraye5038322014-07-04 09:41:32 +010079 virtual ~SlowPathCode() {}
80
Nicolas Geoffraye5038322014-07-04 09:41:32 +010081 virtual void EmitNativeCode(CodeGenerator* codegen) = 0;
82
Zheng Xuda403092015-04-24 17:35:39 +080083 virtual void SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
84 virtual void RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations);
Nicolas Geoffraya8ac9132015-03-13 16:36:36 +000085
Nicolas Geoffrayeeefa122015-03-13 18:52:59 +000086 bool IsCoreRegisterSaved(int reg) const {
87 return saved_core_stack_offsets_[reg] != kRegisterNotSaved;
88 }
89
90 bool IsFpuRegisterSaved(int reg) const {
91 return saved_fpu_stack_offsets_[reg] != kRegisterNotSaved;
92 }
93
94 uint32_t GetStackOffsetOfCoreRegister(int reg) const {
95 return saved_core_stack_offsets_[reg];
96 }
97
98 uint32_t GetStackOffsetOfFpuRegister(int reg) const {
99 return saved_fpu_stack_offsets_[reg];
100 }
101
Alexandre Rames8158f282015-08-07 10:26:17 +0100102 virtual bool IsFatal() const { return false; }
103
Alexandre Rames9931f312015-06-19 14:47:01 +0100104 virtual const char* GetDescription() const = 0;
105
Andreas Gampe85b62f22015-09-09 13:15:38 -0700106 Label* GetEntryLabel() { return &entry_label_; }
107 Label* GetExitLabel() { return &exit_label_; }
108
David Srbeckyd28f4a02016-03-14 17:14:24 +0000109 HInstruction* GetInstruction() const {
110 return instruction_;
111 }
112
David Srbecky9cd6d372016-02-09 15:24:47 +0000113 uint32_t GetDexPc() const {
114 return instruction_ != nullptr ? instruction_->GetDexPc() : kNoDexPc;
115 }
116
Zheng Xuda403092015-04-24 17:35:39 +0800117 protected:
Nicolas Geoffrayeeefa122015-03-13 18:52:59 +0000118 static constexpr size_t kMaximumNumberOfExpectedRegisters = 32;
119 static constexpr uint32_t kRegisterNotSaved = -1;
David Srbecky9cd6d372016-02-09 15:24:47 +0000120 // The instruction where this slow path is happening.
121 HInstruction* instruction_;
Nicolas Geoffrayeeefa122015-03-13 18:52:59 +0000122 uint32_t saved_core_stack_offsets_[kMaximumNumberOfExpectedRegisters];
123 uint32_t saved_fpu_stack_offsets_[kMaximumNumberOfExpectedRegisters];
Zheng Xuda403092015-04-24 17:35:39 +0800124
125 private:
Andreas Gampe85b62f22015-09-09 13:15:38 -0700126 Label entry_label_;
127 Label exit_label_;
128
Nicolas Geoffraye5038322014-07-04 09:41:32 +0100129 DISALLOW_COPY_AND_ASSIGN(SlowPathCode);
130};
131
Roland Levillain2d27c8e2015-04-28 15:48:45 +0100132class InvokeDexCallingConventionVisitor {
133 public:
134 virtual Location GetNextLocation(Primitive::Type type) = 0;
Nicolas Geoffrayfd88f162015-06-03 11:23:52 +0100135 virtual Location GetReturnLocation(Primitive::Type type) const = 0;
136 virtual Location GetMethodLocation() const = 0;
Roland Levillain2d27c8e2015-04-28 15:48:45 +0100137
138 protected:
139 InvokeDexCallingConventionVisitor() {}
140 virtual ~InvokeDexCallingConventionVisitor() {}
141
142 // The current index for core registers.
143 uint32_t gp_index_ = 0u;
144 // The current index for floating-point registers.
145 uint32_t float_index_ = 0u;
146 // The current stack index.
147 uint32_t stack_index_ = 0u;
148
149 private:
150 DISALLOW_COPY_AND_ASSIGN(InvokeDexCallingConventionVisitor);
151};
152
Calin Juravlee460d1d2015-09-29 04:52:17 +0100153class FieldAccessCallingConvention {
154 public:
155 virtual Location GetObjectLocation() const = 0;
156 virtual Location GetFieldIndexLocation() const = 0;
157 virtual Location GetReturnLocation(Primitive::Type type) const = 0;
158 virtual Location GetSetValueLocation(Primitive::Type type, bool is_instance) const = 0;
159 virtual Location GetFpuLocation(Primitive::Type type) const = 0;
160 virtual ~FieldAccessCallingConvention() {}
161
162 protected:
163 FieldAccessCallingConvention() {}
164
165 private:
166 DISALLOW_COPY_AND_ASSIGN(FieldAccessCallingConvention);
167};
168
Vladimir Markod58b8372016-04-12 18:51:43 +0100169class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> {
Nicolas Geoffrayd4dd2552014-02-28 10:23:58 +0000170 public:
David Brazdil58282f42016-01-14 12:45:10 +0000171 // Compiles the graph to executable instructions.
172 void Compile(CodeAllocator* allocator);
Vladimir Markod58b8372016-04-12 18:51:43 +0100173 static std::unique_ptr<CodeGenerator> Create(HGraph* graph,
174 InstructionSet instruction_set,
175 const InstructionSetFeatures& isa_features,
176 const CompilerOptions& compiler_options,
177 OptimizingCompilerStats* stats = nullptr);
Nicolas Geoffray12df9eb2015-01-09 14:53:50 +0000178 virtual ~CodeGenerator() {}
Nicolas Geoffrayd4dd2552014-02-28 10:23:58 +0000179
Vladimir Markodc151b22015-10-15 18:02:30 +0100180 // Get the graph. This is the outermost graph, never the graph of a method being inlined.
Nicolas Geoffray787c3072014-03-17 10:20:19 +0000181 HGraph* GetGraph() const { return graph_; }
Nicolas Geoffrayd4dd2552014-02-28 10:23:58 +0000182
Nicolas Geoffraydc23d832015-02-16 11:15:43 +0000183 HBasicBlock* GetNextBlockToEmit() const;
184 HBasicBlock* FirstNonEmptyBlock(HBasicBlock* block) const;
Nicolas Geoffraybab4ed72014-03-11 17:53:17 +0000185 bool GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const;
Nicolas Geoffrayd4dd2552014-02-28 10:23:58 +0000186
Nicolas Geoffray86dbb9a2014-06-04 11:12:39 +0100187 size_t GetStackSlotOfParameter(HParameterValue* parameter) const {
188 // Note that this follows the current calling convention.
189 return GetFrameSize()
Mathieu Chartiere401d142015-04-22 13:56:20 -0700190 + InstructionSetPointerSize(GetInstructionSet()) // Art method
Nicolas Geoffraye27f31a2014-06-12 17:53:14 +0100191 + parameter->GetIndex() * kVRegSize;
Nicolas Geoffray86dbb9a2014-06-04 11:12:39 +0100192 }
193
Nicolas Geoffray92a73ae2014-10-16 11:12:52 +0100194 virtual void Initialize() = 0;
Serban Constantinescu32f5b4d2014-11-25 20:05:46 +0000195 virtual void Finalize(CodeAllocator* allocator);
Vladimir Marko58155012015-08-19 12:49:41 +0000196 virtual void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches);
Nicolas Geoffrayd4dd2552014-02-28 10:23:58 +0000197 virtual void GenerateFrameEntry() = 0;
198 virtual void GenerateFrameExit() = 0;
Nicolas Geoffray92a73ae2014-10-16 11:12:52 +0100199 virtual void Bind(HBasicBlock* block) = 0;
Calin Juravle175dc732015-08-25 15:42:32 +0100200 virtual void MoveConstant(Location destination, int32_t value) = 0;
Calin Juravlee460d1d2015-09-29 04:52:17 +0100201 virtual void MoveLocation(Location dst, Location src, Primitive::Type dst_type) = 0;
202 virtual void AddLocationAsTemp(Location location, LocationSummary* locations) = 0;
203
Nicolas Geoffray787c3072014-03-17 10:20:19 +0000204 virtual Assembler* GetAssembler() = 0;
Alexandre Rameseb7b7392015-06-19 14:47:01 +0100205 virtual const Assembler& GetAssembler() const = 0;
Nicolas Geoffray707c8092014-04-04 10:50:14 +0100206 virtual size_t GetWordSize() const = 0;
Mark Mendellf85a9ca2015-01-13 09:20:58 -0500207 virtual size_t GetFloatingPointSpillSlotSize() const = 0;
Nicolas Geoffrayde58ab22014-11-05 12:46:03 +0000208 virtual uintptr_t GetAddressOf(HBasicBlock* block) const = 0;
Nicolas Geoffray4c204ba2015-02-03 15:12:35 +0000209 void InitializeCodeGeneration(size_t number_of_spill_slots,
210 size_t maximum_number_of_live_core_registers,
Roland Levillain0d5a2812015-11-13 10:07:31 +0000211 size_t maximum_number_of_live_fpu_registers,
Nicolas Geoffray4c204ba2015-02-03 15:12:35 +0000212 size_t number_of_out_slots,
Vladimir Markofa6b93c2015-09-15 10:15:55 +0100213 const ArenaVector<HBasicBlock*>& block_order);
Nicolas Geoffray787c3072014-03-17 10:20:19 +0000214
215 uint32_t GetFrameSize() const { return frame_size_; }
216 void SetFrameSize(uint32_t size) { frame_size_ = size; }
Nicolas Geoffray8ccc3f52014-03-19 10:34:11 +0000217 uint32_t GetCoreSpillMask() const { return core_spill_mask_; }
Nicolas Geoffrayd97dc402015-01-22 13:50:01 +0000218 uint32_t GetFpuSpillMask() const { return fpu_spill_mask_; }
Nicolas Geoffray787c3072014-03-17 10:20:19 +0000219
Nicolas Geoffray71175b72014-10-09 22:13:55 +0100220 size_t GetNumberOfCoreRegisters() const { return number_of_core_registers_; }
221 size_t GetNumberOfFloatingPointRegisters() const { return number_of_fpu_registers_; }
David Brazdil58282f42016-01-14 12:45:10 +0000222 virtual void SetupBlockedRegisters() const = 0;
Nicolas Geoffray71175b72014-10-09 22:13:55 +0100223
Nicolas Geoffray4dee6362015-01-23 18:23:14 +0000224 virtual void ComputeSpillMask() {
225 core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_;
226 DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved";
227 fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_;
228 }
229
230 static uint32_t ComputeRegisterMask(const int* registers, size_t length) {
231 uint32_t mask = 0;
232 for (size_t i = 0, e = length; i < e; ++i) {
233 mask |= (1 << registers[i]);
234 }
235 return mask;
236 }
237
Nicolas Geoffraya7062e02014-05-22 12:50:17 +0100238 virtual void DumpCoreRegister(std::ostream& stream, int reg) const = 0;
239 virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const = 0;
Nicolas Geoffray412f10c2014-06-19 10:00:34 +0100240 virtual InstructionSet GetInstructionSet() const = 0;
Calin Juravlecd6dffe2015-01-08 17:35:35 +0000241
242 const CompilerOptions& GetCompilerOptions() const { return compiler_options_; }
243
Serban Constantinescuecc43662015-08-13 13:33:12 +0100244 void MaybeRecordStat(MethodCompilationStat compilation_stat, size_t count = 1) const;
245
Nicolas Geoffray102cbed2014-10-15 18:31:05 +0100246 // Saves the register in the stack. Returns the size taken on stack.
247 virtual size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) = 0;
248 // Restores the register from the stack. Returns the size taken on stack.
249 virtual size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) = 0;
Nicolas Geoffray234d69d2015-03-09 10:28:50 +0000250
251 virtual size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) = 0;
252 virtual size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) = 0;
253
Nicolas Geoffray840e5462015-01-07 16:01:24 +0000254 virtual bool NeedsTwoRegisters(Primitive::Type type) const = 0;
Nicolas Geoffray234d69d2015-03-09 10:28:50 +0000255 // Returns whether we should split long moves in parallel moves.
256 virtual bool ShouldSplitLongMoves() const { return false; }
Nicolas Geoffraya7062e02014-05-22 12:50:17 +0100257
Roland Levillain0d5a2812015-11-13 10:07:31 +0000258 size_t GetNumberOfCoreCalleeSaveRegisters() const {
259 return POPCOUNT(core_callee_save_mask_);
260 }
261
262 size_t GetNumberOfCoreCallerSaveRegisters() const {
263 DCHECK_GE(GetNumberOfCoreRegisters(), GetNumberOfCoreCalleeSaveRegisters());
264 return GetNumberOfCoreRegisters() - GetNumberOfCoreCalleeSaveRegisters();
265 }
266
Nicolas Geoffray98893962015-01-21 12:32:32 +0000267 bool IsCoreCalleeSaveRegister(int reg) const {
268 return (core_callee_save_mask_ & (1 << reg)) != 0;
269 }
270
271 bool IsFloatingPointCalleeSaveRegister(int reg) const {
272 return (fpu_callee_save_mask_ & (1 << reg)) != 0;
273 }
274
Yevgeny Rouban2a7c1ef2015-07-22 18:36:24 +0600275 // Record native to dex mapping for a suspend point. Required by runtime.
Nicolas Geoffrayeeefa122015-03-13 18:52:59 +0000276 void RecordPcInfo(HInstruction* instruction, uint32_t dex_pc, SlowPathCode* slow_path = nullptr);
David Srbeckyb7070a22016-01-08 18:13:53 +0000277 // Check whether we have already recorded mapping at this PC.
278 bool HasStackMapAtCurrentPc();
David Srbeckyc7098ff2016-02-09 14:30:11 +0000279 // Record extra stack maps if we support native debugging.
David Srbeckyd28f4a02016-03-14 17:14:24 +0000280 void MaybeRecordNativeDebugInfo(HInstruction* instruction,
281 uint32_t dex_pc,
282 SlowPathCode* slow_path = nullptr);
Yevgeny Rouban2a7c1ef2015-07-22 18:36:24 +0600283
Calin Juravle77520bc2015-01-12 18:45:46 +0000284 bool CanMoveNullCheckToUser(HNullCheck* null_check);
285 void MaybeRecordImplicitNullCheck(HInstruction* instruction);
Calin Juravle2ae48182016-03-16 14:05:09 +0000286 void GenerateNullCheck(HNullCheck* null_check);
287 virtual void GenerateImplicitNullCheck(HNullCheck* null_check) = 0;
288 virtual void GenerateExplicitNullCheck(HNullCheck* null_check) = 0;
Nicolas Geoffray8ccc3f52014-03-19 10:34:11 +0000289
David Brazdil77a48ae2015-09-15 12:34:04 +0000290 // Records a stack map which the runtime might use to set catch phi values
291 // during exception delivery.
292 // TODO: Replace with a catch-entering instruction that records the environment.
293 void RecordCatchBlockInfo();
294
295 // Returns true if implicit null checks are allowed in the compiler options
296 // and if the null check is not inside a try block. We currently cannot do
297 // implicit null checks in that case because we need the NullCheckSlowPath to
298 // save live registers, which may be needed by the runtime to set catch phis.
299 bool IsImplicitNullCheckAllowed(HNullCheck* null_check) const;
300
Nicolas Geoffraye5038322014-07-04 09:41:32 +0100301 void AddSlowPath(SlowPathCode* slow_path) {
Vladimir Marko225b6462015-09-28 12:17:40 +0100302 slow_paths_.push_back(slow_path);
Nicolas Geoffraye5038322014-07-04 09:41:32 +0100303 }
304
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000305 void BuildStackMaps(MemoryRegion region, const DexFile::CodeItem& code_item);
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +0000306 size_t ComputeStackMapsSize();
Nicolas Geoffray787c3072014-03-17 10:20:19 +0000307
Nicolas Geoffrayf12feb82014-07-17 18:32:41 +0100308 bool IsLeafMethod() const {
309 return is_leaf_;
310 }
311
312 void MarkNotLeaf() {
313 is_leaf_ = false;
Nicolas Geoffrayc0572a42015-02-06 14:35:25 +0000314 requires_current_method_ = true;
315 }
316
317 void SetRequiresCurrentMethod() {
318 requires_current_method_ = true;
319 }
320
321 bool RequiresCurrentMethod() const {
322 return requires_current_method_;
Nicolas Geoffrayf12feb82014-07-17 18:32:41 +0100323 }
324
Nicolas Geoffray3c049742014-09-24 18:10:46 +0100325 // Clears the spill slots taken by loop phis in the `LocationSummary` of the
326 // suspend check. This is called when the code generator generates code
327 // for the suspend check at the back edge (instead of where the suspend check
328 // is, which is the loop entry). At this point, the spill slots for the phis
329 // have not been written to.
330 void ClearSpillSlotsFromLoopPhisInStackMap(HSuspendCheck* suspend_check) const;
331
Nicolas Geoffray71175b72014-10-09 22:13:55 +0100332 bool* GetBlockedCoreRegisters() const { return blocked_core_registers_; }
Nicolas Geoffray102cbed2014-10-15 18:31:05 +0100333 bool* GetBlockedFloatingPointRegisters() const { return blocked_fpu_registers_; }
Nicolas Geoffray71175b72014-10-09 22:13:55 +0100334
Nicolas Geoffray19a19cf2014-10-22 16:07:05 +0100335 // Helper that returns the pointer offset of an index in an object array.
336 // Note: this method assumes we always have the same pointer size, regardless
337 // of the architecture.
338 static size_t GetCacheOffset(uint32_t index);
Mathieu Chartiere401d142015-04-22 13:56:20 -0700339 // Pointer variant for ArtMethod and ArtField arrays.
340 size_t GetCachePointerOffset(uint32_t index);
Nicolas Geoffray19a19cf2014-10-22 16:07:05 +0100341
Nicolas Geoffray90218252015-04-15 11:56:51 +0100342 void EmitParallelMoves(Location from1,
343 Location to1,
344 Primitive::Type type1,
345 Location from2,
346 Location to2,
347 Primitive::Type type2);
Nicolas Geoffrayf0e39372014-11-12 17:50:07 +0000348
Nicolas Geoffrayaf07bc12014-11-12 18:08:09 +0000349 static bool StoreNeedsWriteBarrier(Primitive::Type type, HInstruction* value) {
David Brazdil522e2242015-03-17 18:48:28 +0000350 // Check that null value is not represented as an integer constant.
351 DCHECK(type != Primitive::kPrimNot || !value->IsIntConstant());
352 return type == Primitive::kPrimNot && !value->IsNullConstant();
Nicolas Geoffrayaf07bc12014-11-12 18:08:09 +0000353 }
354
Alexandre Rames78e3ef62015-08-12 13:43:29 +0100355 void ValidateInvokeRuntime(HInstruction* instruction, SlowPathCode* slow_path);
356
Nicolas Geoffray98893962015-01-21 12:32:32 +0000357 void AddAllocatedRegister(Location location) {
358 allocated_registers_.Add(location);
359 }
360
Nicolas Geoffray45b83af2015-07-06 15:12:53 +0000361 bool HasAllocatedRegister(bool is_core, int reg) const {
362 return is_core
363 ? allocated_registers_.ContainsCoreRegister(reg)
364 : allocated_registers_.ContainsFloatingPointRegister(reg);
365 }
366
Nicolas Geoffrayc0572a42015-02-06 14:35:25 +0000367 void AllocateLocations(HInstruction* instruction);
368
Roland Levillainaa9b7c42015-02-17 15:40:09 +0000369 // Tells whether the stack frame of the compiled method is
370 // considered "empty", that is either actually having a size of zero,
371 // or just containing the saved return address register.
372 bool HasEmptyFrame() const {
373 return GetFrameSize() == (CallPushesPC() ? GetWordSize() : 0);
374 }
375
Nicolas Geoffrayd6138ef2015-02-18 14:48:53 +0000376 static int32_t GetInt32ValueOf(HConstant* constant) {
377 if (constant->IsIntConstant()) {
378 return constant->AsIntConstant()->GetValue();
379 } else if (constant->IsNullConstant()) {
380 return 0;
381 } else {
382 DCHECK(constant->IsFloatConstant());
Roland Levillainda4d79b2015-03-24 14:36:11 +0000383 return bit_cast<int32_t, float>(constant->AsFloatConstant()->GetValue());
Nicolas Geoffrayd6138ef2015-02-18 14:48:53 +0000384 }
385 }
386
387 static int64_t GetInt64ValueOf(HConstant* constant) {
388 if (constant->IsIntConstant()) {
389 return constant->AsIntConstant()->GetValue();
390 } else if (constant->IsNullConstant()) {
391 return 0;
392 } else if (constant->IsFloatConstant()) {
Roland Levillainda4d79b2015-03-24 14:36:11 +0000393 return bit_cast<int32_t, float>(constant->AsFloatConstant()->GetValue());
Nicolas Geoffrayd6138ef2015-02-18 14:48:53 +0000394 } else if (constant->IsLongConstant()) {
395 return constant->AsLongConstant()->GetValue();
396 } else {
397 DCHECK(constant->IsDoubleConstant());
Roland Levillainda4d79b2015-03-24 14:36:11 +0000398 return bit_cast<int64_t, double>(constant->AsDoubleConstant()->GetValue());
Nicolas Geoffrayd6138ef2015-02-18 14:48:53 +0000399 }
400 }
401
Nicolas Geoffraya8ac9132015-03-13 16:36:36 +0000402 size_t GetFirstRegisterSlotInSlowPath() const {
403 return first_register_slot_in_slow_path_;
404 }
405
406 uint32_t FrameEntrySpillSize() const {
407 return GetFpuSpillSize() + GetCoreSpillSize();
408 }
409
Roland Levillainec525fc2015-04-28 15:50:20 +0100410 virtual ParallelMoveResolver* GetMoveResolver() = 0;
Nicolas Geoffraya8ac9132015-03-13 16:36:36 +0000411
Nicolas Geoffrayfd88f162015-06-03 11:23:52 +0100412 static void CreateCommonInvokeLocationSummary(
413 HInvoke* invoke, InvokeDexCallingConventionVisitor* visitor);
414
Calin Juravle175dc732015-08-25 15:42:32 +0100415 void GenerateInvokeUnresolvedRuntimeCall(HInvokeUnresolved* invoke);
416
Calin Juravlee460d1d2015-09-29 04:52:17 +0100417 void CreateUnresolvedFieldLocationSummary(
418 HInstruction* field_access,
419 Primitive::Type field_type,
420 const FieldAccessCallingConvention& calling_convention);
421
422 void GenerateUnresolvedFieldAccess(
423 HInstruction* field_access,
424 Primitive::Type field_type,
425 uint32_t field_index,
426 uint32_t dex_pc,
427 const FieldAccessCallingConvention& calling_convention);
428
Calin Juravle98893e12015-10-02 21:05:03 +0100429 // TODO: This overlaps a bit with MoveFromReturnRegister. Refactor for a better design.
430 static void CreateLoadClassLocationSummary(HLoadClass* cls,
431 Location runtime_type_index_location,
Roland Levillain0d5a2812015-11-13 10:07:31 +0000432 Location runtime_return_location,
433 bool code_generator_supports_read_barrier = false);
Calin Juravle98893e12015-10-02 21:05:03 +0100434
Nicolas Geoffray5bd05a52015-10-13 09:48:30 +0100435 static void CreateSystemArrayCopyLocationSummary(HInvoke* invoke);
436
Alexandre Rameseb7b7392015-06-19 14:47:01 +0100437 void SetDisassemblyInformation(DisassemblyInformation* info) { disasm_info_ = info; }
438 DisassemblyInformation* GetDisassemblyInformation() const { return disasm_info_; }
439
Calin Juravle175dc732015-08-25 15:42:32 +0100440 virtual void InvokeRuntime(QuickEntrypointEnum entrypoint,
441 HInstruction* instruction,
442 uint32_t dex_pc,
443 SlowPathCode* slow_path) = 0;
444
Vladimir Markocac5a7e2016-02-22 10:39:50 +0000445 // Check if the desired_string_load_kind is supported. If it is, return it,
446 // otherwise return a fall-back info that should be used instead.
447 virtual HLoadString::LoadKind GetSupportedLoadStringKind(
448 HLoadString::LoadKind desired_string_load_kind) = 0;
449
Vladimir Markodc151b22015-10-15 18:02:30 +0100450 // Check if the desired_dispatch_info is supported. If it is, return it,
451 // otherwise return a fall-back info that should be used instead.
452 virtual HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
453 const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
454 MethodReference target_method) = 0;
455
Andreas Gampe85b62f22015-09-09 13:15:38 -0700456 // Generate a call to a static or direct method.
457 virtual void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) = 0;
458 // Generate a call to a virtual method.
459 virtual void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) = 0;
460
461 // Copy the result of a call into the given target.
462 virtual void MoveFromReturnRegister(Location trg, Primitive::Type type) = 0;
463
David Srbeckyc7098ff2016-02-09 14:30:11 +0000464 virtual void GenerateNop() = 0;
465
Nicolas Geoffray787c3072014-03-17 10:20:19 +0000466 protected:
Vladimir Marko58155012015-08-19 12:49:41 +0000467 // Method patch info used for recording locations of required linker patches and
468 // target methods. The target method can be used for various purposes, whether for
469 // patching the address of the method or the code pointer or a PC-relative call.
470 template <typename LabelType>
471 struct MethodPatchInfo {
472 explicit MethodPatchInfo(MethodReference m) : target_method(m), label() { }
473
474 MethodReference target_method;
475 LabelType label;
476 };
477
Vladimir Markocac5a7e2016-02-22 10:39:50 +0000478 // String patch info used for recording locations of required linker patches and
479 // target strings. The actual string address can be absolute or PC-relative.
480 template <typename LabelType>
481 struct StringPatchInfo {
482 StringPatchInfo(const DexFile& df, uint32_t index)
483 : dex_file(df), string_index(index), label() { }
484
485 const DexFile& dex_file;
486 uint32_t string_index;
487 LabelType label;
488 };
489
Nicolas Geoffray71175b72014-10-09 22:13:55 +0100490 CodeGenerator(HGraph* graph,
491 size_t number_of_core_registers,
492 size_t number_of_fpu_registers,
Calin Juravlecd6dffe2015-01-08 17:35:35 +0000493 size_t number_of_register_pairs,
Nicolas Geoffray98893962015-01-21 12:32:32 +0000494 uint32_t core_callee_save_mask,
495 uint32_t fpu_callee_save_mask,
Serban Constantinescuecc43662015-08-13 13:33:12 +0100496 const CompilerOptions& compiler_options,
497 OptimizingCompilerStats* stats)
Nicolas Geoffrayc0572a42015-02-06 14:35:25 +0000498 : frame_size_(0),
Nicolas Geoffray4361bef2014-08-20 04:59:12 +0100499 core_spill_mask_(0),
Nicolas Geoffrayd97dc402015-01-22 13:50:01 +0000500 fpu_spill_mask_(0),
Nicolas Geoffray3bca0df2014-09-19 11:01:00 +0100501 first_register_slot_in_slow_path_(0),
Vladimir Marko5233f932015-09-29 19:01:15 +0100502 blocked_core_registers_(graph->GetArena()->AllocArray<bool>(number_of_core_registers,
503 kArenaAllocCodeGenerator)),
504 blocked_fpu_registers_(graph->GetArena()->AllocArray<bool>(number_of_fpu_registers,
505 kArenaAllocCodeGenerator)),
506 blocked_register_pairs_(graph->GetArena()->AllocArray<bool>(number_of_register_pairs,
507 kArenaAllocCodeGenerator)),
Nicolas Geoffray71175b72014-10-09 22:13:55 +0100508 number_of_core_registers_(number_of_core_registers),
509 number_of_fpu_registers_(number_of_fpu_registers),
510 number_of_register_pairs_(number_of_register_pairs),
Nicolas Geoffray98893962015-01-21 12:32:32 +0000511 core_callee_save_mask_(core_callee_save_mask),
512 fpu_callee_save_mask_(fpu_callee_save_mask),
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000513 stack_map_stream_(graph->GetArena()),
514 block_order_(nullptr),
Alexandre Rameseb7b7392015-06-19 14:47:01 +0100515 disasm_info_(nullptr),
Serban Constantinescuecc43662015-08-13 13:33:12 +0100516 stats_(stats),
Nicolas Geoffray787c3072014-03-17 10:20:19 +0000517 graph_(graph),
Calin Juravlecd6dffe2015-01-08 17:35:35 +0000518 compiler_options_(compiler_options),
Vladimir Marko225b6462015-09-28 12:17:40 +0100519 slow_paths_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
Vladimir Marko0f7dca42015-11-02 14:36:43 +0000520 current_slow_path_(nullptr),
Nicolas Geoffray4c204ba2015-02-03 15:12:35 +0000521 current_block_index_(0),
Nicolas Geoffray39468442014-09-02 15:17:15 +0100522 is_leaf_(true),
Vladimir Marko225b6462015-09-28 12:17:40 +0100523 requires_current_method_(false) {
524 slow_paths_.reserve(8);
525 }
Nicolas Geoffrayd4dd2552014-02-28 10:23:58 +0000526
Nicolas Geoffrayc0572a42015-02-06 14:35:25 +0000527 virtual HGraphVisitor* GetLocationBuilder() = 0;
528 virtual HGraphVisitor* GetInstructionVisitor() = 0;
Nicolas Geoffrayf0e39372014-11-12 17:50:07 +0000529
Nicolas Geoffrayd97dc402015-01-22 13:50:01 +0000530 // Returns the location of the first spilled entry for floating point registers,
531 // relative to the stack pointer.
532 uint32_t GetFpuSpillStart() const {
Nicolas Geoffrayd97dc402015-01-22 13:50:01 +0000533 return GetFrameSize() - FrameEntrySpillSize();
534 }
535
536 uint32_t GetFpuSpillSize() const {
537 return POPCOUNT(fpu_spill_mask_) * GetFloatingPointSpillSlotSize();
538 }
539
540 uint32_t GetCoreSpillSize() const {
541 return POPCOUNT(core_spill_mask_) * GetWordSize();
542 }
543
Nicolas Geoffrayc0572a42015-02-06 14:35:25 +0000544 bool HasAllocatedCalleeSaveRegisters() const {
545 // We check the core registers against 1 because it always comprises the return PC.
546 return (POPCOUNT(allocated_registers_.GetCoreRegisters() & core_callee_save_mask_) != 1)
547 || (POPCOUNT(allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_) != 0);
548 }
549
550 bool CallPushesPC() const {
551 InstructionSet instruction_set = GetInstructionSet();
552 return instruction_set == kX86 || instruction_set == kX86_64;
553 }
554
Vladimir Marko225b6462015-09-28 12:17:40 +0100555 // Arm64 has its own type for a label, so we need to templatize these methods
Nicolas Geoffraydc23d832015-02-16 11:15:43 +0000556 // to share the logic.
Vladimir Marko225b6462015-09-28 12:17:40 +0100557
558 template <typename LabelType>
559 LabelType* CommonInitializeLabels() {
Vladimir Markob95fb772015-09-30 13:32:31 +0100560 // We use raw array allocations instead of ArenaVector<> because Labels are
561 // non-constructible and non-movable and as such cannot be held in a vector.
Vladimir Marko225b6462015-09-28 12:17:40 +0100562 size_t size = GetGraph()->GetBlocks().size();
563 LabelType* labels = GetGraph()->GetArena()->AllocArray<LabelType>(size,
564 kArenaAllocCodeGenerator);
565 for (size_t i = 0; i != size; ++i) {
566 new(labels + i) LabelType();
567 }
568 return labels;
569 }
570
Vladimir Marko58155012015-08-19 12:49:41 +0000571 template <typename LabelType>
572 LabelType* CommonGetLabelOf(LabelType* raw_pointer_to_labels_array, HBasicBlock* block) const {
Nicolas Geoffraydc23d832015-02-16 11:15:43 +0000573 block = FirstNonEmptyBlock(block);
574 return raw_pointer_to_labels_array + block->GetBlockId();
575 }
576
Vladimir Marko0f7dca42015-11-02 14:36:43 +0000577 SlowPathCode* GetCurrentSlowPath() {
578 return current_slow_path_;
579 }
580
Nicolas Geoffray8ccc3f52014-03-19 10:34:11 +0000581 // Frame size required for this method.
582 uint32_t frame_size_;
583 uint32_t core_spill_mask_;
Nicolas Geoffrayd97dc402015-01-22 13:50:01 +0000584 uint32_t fpu_spill_mask_;
Nicolas Geoffray3bca0df2014-09-19 11:01:00 +0100585 uint32_t first_register_slot_in_slow_path_;
Nicolas Geoffray8ccc3f52014-03-19 10:34:11 +0000586
Nicolas Geoffray98893962015-01-21 12:32:32 +0000587 // Registers that were allocated during linear scan.
588 RegisterSet allocated_registers_;
589
Nicolas Geoffray71175b72014-10-09 22:13:55 +0100590 // Arrays used when doing register allocation to know which
591 // registers we can allocate. `SetupBlockedRegisters` updates the
592 // arrays.
593 bool* const blocked_core_registers_;
594 bool* const blocked_fpu_registers_;
595 bool* const blocked_register_pairs_;
596 size_t number_of_core_registers_;
597 size_t number_of_fpu_registers_;
598 size_t number_of_register_pairs_;
Nicolas Geoffray98893962015-01-21 12:32:32 +0000599 const uint32_t core_callee_save_mask_;
600 const uint32_t fpu_callee_save_mask_;
Nicolas Geoffray71175b72014-10-09 22:13:55 +0100601
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000602 StackMapStream stack_map_stream_;
603
604 // The order to use for code generation.
Vladimir Markofa6b93c2015-09-15 10:15:55 +0100605 const ArenaVector<HBasicBlock*>* block_order_;
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000606
Alexandre Rameseb7b7392015-06-19 14:47:01 +0100607 DisassemblyInformation* disasm_info_;
608
Nicolas Geoffraybab4ed72014-03-11 17:53:17 +0000609 private:
Nicolas Geoffray3bca0df2014-09-19 11:01:00 +0100610 size_t GetStackOffsetOfSavedRegister(size_t index);
Alexandre Rameseb7b7392015-06-19 14:47:01 +0100611 void GenerateSlowPaths();
Mark Mendell5f874182015-03-04 15:42:45 -0500612 void BlockIfInRegister(Location location, bool is_out = false) const;
Nicolas Geoffrayb1d0f3f2015-05-14 12:41:51 +0100613 void EmitEnvironment(HEnvironment* environment, SlowPathCode* slow_path);
Nicolas Geoffrayd4dd2552014-02-28 10:23:58 +0000614
Serban Constantinescuecc43662015-08-13 13:33:12 +0100615 OptimizingCompilerStats* stats_;
616
Nicolas Geoffray787c3072014-03-17 10:20:19 +0000617 HGraph* const graph_;
Calin Juravlecd6dffe2015-01-08 17:35:35 +0000618 const CompilerOptions& compiler_options_;
Nicolas Geoffrayd4dd2552014-02-28 10:23:58 +0000619
Vladimir Marko225b6462015-09-28 12:17:40 +0100620 ArenaVector<SlowPathCode*> slow_paths_;
Nicolas Geoffrayd4dd2552014-02-28 10:23:58 +0000621
Aart Bik42249c32016-01-07 15:33:50 -0800622 // The current slow-path that we're generating code for.
Vladimir Marko0f7dca42015-11-02 14:36:43 +0000623 SlowPathCode* current_slow_path_;
624
Nicolas Geoffray4c204ba2015-02-03 15:12:35 +0000625 // The current block index in `block_order_` of the block
626 // we are generating code for.
627 size_t current_block_index_;
628
Nicolas Geoffrayc0572a42015-02-06 14:35:25 +0000629 // Whether the method is a leaf method.
Nicolas Geoffrayf12feb82014-07-17 18:32:41 +0100630 bool is_leaf_;
631
Nicolas Geoffrayc0572a42015-02-06 14:35:25 +0000632 // Whether an instruction in the graph accesses the current method.
633 bool requires_current_method_;
634
David Srbeckyc6b4dd82015-04-07 20:32:43 +0100635 friend class OptimizingCFITest;
636
Nicolas Geoffrayd4dd2552014-02-28 10:23:58 +0000637 DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
638};
639
Nicolas Geoffray7fb49da2014-10-06 09:12:41 +0100640template <typename C, typename F>
Nicolas Geoffray4a34a422014-04-03 10:38:37 +0100641class CallingConvention {
642 public:
Nicolas Geoffray7fb49da2014-10-06 09:12:41 +0100643 CallingConvention(const C* registers,
644 size_t number_of_registers,
645 const F* fpu_registers,
Mathieu Chartiere401d142015-04-22 13:56:20 -0700646 size_t number_of_fpu_registers,
647 size_t pointer_size)
Nicolas Geoffray7fb49da2014-10-06 09:12:41 +0100648 : registers_(registers),
649 number_of_registers_(number_of_registers),
650 fpu_registers_(fpu_registers),
Mathieu Chartiere401d142015-04-22 13:56:20 -0700651 number_of_fpu_registers_(number_of_fpu_registers),
652 pointer_size_(pointer_size) {}
Nicolas Geoffray4a34a422014-04-03 10:38:37 +0100653
654 size_t GetNumberOfRegisters() const { return number_of_registers_; }
Nicolas Geoffray7fb49da2014-10-06 09:12:41 +0100655 size_t GetNumberOfFpuRegisters() const { return number_of_fpu_registers_; }
Nicolas Geoffray4a34a422014-04-03 10:38:37 +0100656
Nicolas Geoffray7fb49da2014-10-06 09:12:41 +0100657 C GetRegisterAt(size_t index) const {
Nicolas Geoffray4a34a422014-04-03 10:38:37 +0100658 DCHECK_LT(index, number_of_registers_);
659 return registers_[index];
660 }
661
Nicolas Geoffray7fb49da2014-10-06 09:12:41 +0100662 F GetFpuRegisterAt(size_t index) const {
663 DCHECK_LT(index, number_of_fpu_registers_);
664 return fpu_registers_[index];
665 }
666
667 size_t GetStackOffsetOf(size_t index) const {
Nicolas Geoffray4a34a422014-04-03 10:38:37 +0100668 // We still reserve the space for parameters passed by registers.
Mathieu Chartiere401d142015-04-22 13:56:20 -0700669 // Add space for the method pointer.
670 return pointer_size_ + index * kVRegSize;
Nicolas Geoffray4a34a422014-04-03 10:38:37 +0100671 }
672
673 private:
Nicolas Geoffray7fb49da2014-10-06 09:12:41 +0100674 const C* registers_;
Nicolas Geoffray4a34a422014-04-03 10:38:37 +0100675 const size_t number_of_registers_;
Nicolas Geoffray7fb49da2014-10-06 09:12:41 +0100676 const F* fpu_registers_;
677 const size_t number_of_fpu_registers_;
Mathieu Chartiere401d142015-04-22 13:56:20 -0700678 const size_t pointer_size_;
Nicolas Geoffray4a34a422014-04-03 10:38:37 +0100679
680 DISALLOW_COPY_AND_ASSIGN(CallingConvention);
681};
682
Aart Bik42249c32016-01-07 15:33:50 -0800683/**
684 * A templated class SlowPathGenerator with a templated method NewSlowPath()
685 * that can be used by any code generator to share equivalent slow-paths with
686 * the objective of reducing generated code size.
687 *
688 * InstructionType: instruction that requires SlowPathCodeType
689 * SlowPathCodeType: subclass of SlowPathCode, with constructor SlowPathCodeType(InstructionType *)
690 */
691template <typename InstructionType>
692class SlowPathGenerator {
693 static_assert(std::is_base_of<HInstruction, InstructionType>::value,
694 "InstructionType is not a subclass of art::HInstruction");
695
696 public:
697 SlowPathGenerator(HGraph* graph, CodeGenerator* codegen)
698 : graph_(graph),
699 codegen_(codegen),
700 slow_path_map_(std::less<uint32_t>(), graph->GetArena()->Adapter(kArenaAllocSlowPaths)) {}
701
702 // Creates and adds a new slow-path, if needed, or returns existing one otherwise.
703 // Templating the method (rather than the whole class) on the slow-path type enables
704 // keeping this code at a generic, non architecture-specific place.
705 //
706 // NOTE: This approach assumes each InstructionType only generates one SlowPathCodeType.
707 // To relax this requirement, we would need some RTTI on the stored slow-paths,
708 // or template the class as a whole on SlowPathType.
709 template <typename SlowPathCodeType>
710 SlowPathCodeType* NewSlowPath(InstructionType* instruction) {
711 static_assert(std::is_base_of<SlowPathCode, SlowPathCodeType>::value,
712 "SlowPathCodeType is not a subclass of art::SlowPathCode");
713 static_assert(std::is_constructible<SlowPathCodeType, InstructionType*>::value,
714 "SlowPathCodeType is not constructible from InstructionType*");
715 // Iterate over potential candidates for sharing. Currently, only same-typed
716 // slow-paths with exactly the same dex-pc are viable candidates.
717 // TODO: pass dex-pc/slow-path-type to run-time to allow even more sharing?
718 const uint32_t dex_pc = instruction->GetDexPc();
719 auto iter = slow_path_map_.find(dex_pc);
720 if (iter != slow_path_map_.end()) {
721 auto candidates = iter->second;
722 for (const auto& it : candidates) {
723 InstructionType* other_instruction = it.first;
724 SlowPathCodeType* other_slow_path = down_cast<SlowPathCodeType*>(it.second);
725 // Determine if the instructions allow for slow-path sharing.
726 if (HaveSameLiveRegisters(instruction, other_instruction) &&
727 HaveSameStackMap(instruction, other_instruction)) {
728 // Can share: reuse existing one.
729 return other_slow_path;
730 }
731 }
732 } else {
733 // First time this dex-pc is seen.
734 iter = slow_path_map_.Put(dex_pc, {{}, {graph_->GetArena()->Adapter(kArenaAllocSlowPaths)}});
735 }
736 // Cannot share: create and add new slow-path for this particular dex-pc.
737 SlowPathCodeType* slow_path = new (graph_->GetArena()) SlowPathCodeType(instruction);
738 iter->second.emplace_back(std::make_pair(instruction, slow_path));
739 codegen_->AddSlowPath(slow_path);
740 return slow_path;
741 }
742
743 private:
744 // Tests if both instructions have same set of live physical registers. This ensures
745 // the slow-path has exactly the same preamble on saving these registers to stack.
746 bool HaveSameLiveRegisters(const InstructionType* i1, const InstructionType* i2) const {
747 const uint32_t core_spill = ~codegen_->GetCoreSpillMask();
748 const uint32_t fpu_spill = ~codegen_->GetFpuSpillMask();
749 RegisterSet* live1 = i1->GetLocations()->GetLiveRegisters();
750 RegisterSet* live2 = i2->GetLocations()->GetLiveRegisters();
751 return (((live1->GetCoreRegisters() & core_spill) ==
752 (live2->GetCoreRegisters() & core_spill)) &&
753 ((live1->GetFloatingPointRegisters() & fpu_spill) ==
754 (live2->GetFloatingPointRegisters() & fpu_spill)));
755 }
756
757 // Tests if both instructions have the same stack map. This ensures the interpreter
758 // will find exactly the same dex-registers at the same entries.
759 bool HaveSameStackMap(const InstructionType* i1, const InstructionType* i2) const {
760 DCHECK(i1->HasEnvironment());
761 DCHECK(i2->HasEnvironment());
762 // We conservatively test if the two instructions find exactly the same instructions
763 // and location in each dex-register. This guarantees they will have the same stack map.
764 HEnvironment* e1 = i1->GetEnvironment();
765 HEnvironment* e2 = i2->GetEnvironment();
766 if (e1->GetParent() != e2->GetParent() || e1->Size() != e2->Size()) {
767 return false;
768 }
769 for (size_t i = 0, sz = e1->Size(); i < sz; ++i) {
770 if (e1->GetInstructionAt(i) != e2->GetInstructionAt(i) ||
771 !e1->GetLocationAt(i).Equals(e2->GetLocationAt(i))) {
772 return false;
773 }
774 }
775 return true;
776 }
777
778 HGraph* const graph_;
779 CodeGenerator* const codegen_;
780
781 // Map from dex-pc to vector of already existing instruction/slow-path pairs.
782 ArenaSafeMap<uint32_t, ArenaVector<std::pair<InstructionType*, SlowPathCode*>>> slow_path_map_;
783
784 DISALLOW_COPY_AND_ASSIGN(SlowPathGenerator);
785};
786
787class InstructionCodeGenerator : public HGraphVisitor {
788 public:
789 InstructionCodeGenerator(HGraph* graph, CodeGenerator* codegen)
790 : HGraphVisitor(graph),
791 deopt_slow_paths_(graph, codegen) {}
792
793 protected:
794 // Add slow-path generator for each instruction/slow-path combination that desires sharing.
795 // TODO: under current regime, only deopt sharing make sense; extend later.
796 SlowPathGenerator<HDeoptimize> deopt_slow_paths_;
797};
798
Nicolas Geoffrayd4dd2552014-02-28 10:23:58 +0000799} // namespace art
800
801#endif // ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_