Merge "ARM: Change mem address mode for array accesses."
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index bd13d16..a679ac2 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -158,6 +158,10 @@
# Enable warning for unreachable break & return.
art_clang_cflags += -Wunreachable-code-break -Wunreachable-code-return
+# Bug: http://b/29823425 Disable -Wconstant-conversion and
+# -Wundefined-var-template for Clang update to r271374
+art_clang_cflags += -Wno-constant-conversion -Wno-undefined-var-template
+
# Enable missing-noreturn only on non-Mac. As lots of things are not implemented for Apple, it's
# a pain.
ifneq ($(HOST_OS),darwin)
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 51fddc9..f310565 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -67,6 +67,7 @@
optimizing/parallel_move_resolver.cc \
optimizing/prepare_for_register_allocation.cc \
optimizing/reference_type_propagation.cc \
+ optimizing/register_allocation_resolver.cc \
optimizing/register_allocator_linear_scan.cc \
optimizing/select_generator.cc \
optimizing/sharpening.cc \
diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc
new file mode 100644
index 0000000..8f5a2a8
--- /dev/null
+++ b/compiler/optimizing/register_allocation_resolver.cc
@@ -0,0 +1,653 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "register_allocation_resolver.h"
+
+#include "code_generator.h"
+#include "ssa_liveness_analysis.h"
+
+namespace art {
+
+RegisterAllocationResolver::RegisterAllocationResolver(ArenaAllocator* allocator,
+ CodeGenerator* codegen,
+ const SsaLivenessAnalysis& liveness)
+ : allocator_(allocator),
+ codegen_(codegen),
+ liveness_(liveness) {}
+
+void RegisterAllocationResolver::Resolve(size_t max_safepoint_live_core_regs,
+ size_t max_safepoint_live_fp_regs,
+ size_t reserved_out_slots,
+ size_t int_spill_slots,
+ size_t long_spill_slots,
+ size_t float_spill_slots,
+ size_t double_spill_slots,
+ size_t catch_phi_spill_slots,
+ const ArenaVector<LiveInterval*> temp_intervals) {
+ size_t spill_slots = int_spill_slots
+ + long_spill_slots
+ + float_spill_slots
+ + double_spill_slots
+ + catch_phi_spill_slots;
+
+ // Computes frame size and spill mask.
+ codegen_->InitializeCodeGeneration(spill_slots,
+ max_safepoint_live_core_regs,
+ max_safepoint_live_fp_regs,
+ reserved_out_slots, // Includes slot(s) for the art method.
+ codegen_->GetGraph()->GetLinearOrder());
+
+ // Resolve outputs, including stack locations.
+ // TODO: Use pointers of Location inside LiveInterval to avoid doing another iteration.
+ for (size_t i = 0, e = liveness_.GetNumberOfSsaValues(); i < e; ++i) {
+ HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
+ LiveInterval* current = instruction->GetLiveInterval();
+ LocationSummary* locations = instruction->GetLocations();
+ Location location = locations->Out();
+ if (instruction->IsParameterValue()) {
+ // Now that we know the frame size, adjust the parameter's location.
+ if (location.IsStackSlot()) {
+ location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
+ current->SetSpillSlot(location.GetStackIndex());
+ locations->UpdateOut(location);
+ } else if (location.IsDoubleStackSlot()) {
+ location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
+ current->SetSpillSlot(location.GetStackIndex());
+ locations->UpdateOut(location);
+ } else if (current->HasSpillSlot()) {
+ current->SetSpillSlot(current->GetSpillSlot() + codegen_->GetFrameSize());
+ }
+ } else if (instruction->IsCurrentMethod()) {
+ // The current method is always at offset 0.
+ DCHECK(!current->HasSpillSlot() || (current->GetSpillSlot() == 0));
+ } else if (instruction->IsPhi() && instruction->AsPhi()->IsCatchPhi()) {
+ DCHECK(current->HasSpillSlot());
+ size_t slot = current->GetSpillSlot()
+ + spill_slots
+ + reserved_out_slots
+ - catch_phi_spill_slots;
+ current->SetSpillSlot(slot * kVRegSize);
+ } else if (current->HasSpillSlot()) {
+ // Adjust the stack slot, now that we know the number of them for each type.
+ // The way this implementation lays out the stack is the following:
+ // [parameter slots ]
+ // [catch phi spill slots ]
+ // [double spill slots ]
+ // [long spill slots ]
+ // [float spill slots ]
+ // [int/ref values ]
+ // [maximum out values ] (number of arguments for calls)
+ // [art method ].
+ size_t slot = current->GetSpillSlot();
+ switch (current->GetType()) {
+ case Primitive::kPrimDouble:
+ slot += long_spill_slots;
+ FALLTHROUGH_INTENDED;
+ case Primitive::kPrimLong:
+ slot += float_spill_slots;
+ FALLTHROUGH_INTENDED;
+ case Primitive::kPrimFloat:
+ slot += int_spill_slots;
+ FALLTHROUGH_INTENDED;
+ case Primitive::kPrimNot:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimByte:
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimShort:
+ slot += reserved_out_slots;
+ break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unexpected type for interval " << current->GetType();
+ }
+ current->SetSpillSlot(slot * kVRegSize);
+ }
+
+ Location source = current->ToLocation();
+
+ if (location.IsUnallocated()) {
+ if (location.GetPolicy() == Location::kSameAsFirstInput) {
+ if (locations->InAt(0).IsUnallocated()) {
+ locations->SetInAt(0, source);
+ } else {
+ DCHECK(locations->InAt(0).Equals(source));
+ }
+ }
+ locations->UpdateOut(source);
+ } else {
+ DCHECK(source.Equals(location));
+ }
+ }
+
+ // Connect siblings and resolve inputs.
+ for (size_t i = 0, e = liveness_.GetNumberOfSsaValues(); i < e; ++i) {
+ HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
+ ConnectSiblings(instruction->GetLiveInterval(),
+ max_safepoint_live_core_regs + max_safepoint_live_fp_regs);
+ }
+
+ // Resolve non-linear control flow across branches. Order does not matter.
+ for (HLinearOrderIterator it(*codegen_->GetGraph()); !it.Done(); it.Advance()) {
+ HBasicBlock* block = it.Current();
+ if (block->IsCatchBlock() ||
+ (block->IsLoopHeader() && block->GetLoopInformation()->IsIrreducible())) {
+ // Instructions live at the top of catch blocks or irreducible loop header
+ // were forced to spill.
+ if (kIsDebugBuild) {
+ BitVector* live = liveness_.GetLiveInSet(*block);
+ for (uint32_t idx : live->Indexes()) {
+ LiveInterval* interval = liveness_.GetInstructionFromSsaIndex(idx)->GetLiveInterval();
+ LiveInterval* sibling = interval->GetSiblingAt(block->GetLifetimeStart());
+ // `GetSiblingAt` returns the sibling that contains a position, but there could be
+ // a lifetime hole in it. `CoversSlow` returns whether the interval is live at that
+ // position.
+ if ((sibling != nullptr) && sibling->CoversSlow(block->GetLifetimeStart())) {
+ DCHECK(!sibling->HasRegister());
+ }
+ }
+ }
+ } else {
+ BitVector* live = liveness_.GetLiveInSet(*block);
+ for (uint32_t idx : live->Indexes()) {
+ LiveInterval* interval = liveness_.GetInstructionFromSsaIndex(idx)->GetLiveInterval();
+ for (HBasicBlock* predecessor : block->GetPredecessors()) {
+ ConnectSplitSiblings(interval, predecessor, block);
+ }
+ }
+ }
+ }
+
+ // Resolve phi inputs. Order does not matter.
+ for (HLinearOrderIterator it(*codegen_->GetGraph()); !it.Done(); it.Advance()) {
+ HBasicBlock* current = it.Current();
+ if (current->IsCatchBlock()) {
+ // Catch phi values are set at runtime by the exception delivery mechanism.
+ } else {
+ for (HInstructionIterator inst_it(current->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
+ HInstruction* phi = inst_it.Current();
+ for (size_t i = 0, e = current->GetPredecessors().size(); i < e; ++i) {
+ HBasicBlock* predecessor = current->GetPredecessors()[i];
+ DCHECK_EQ(predecessor->GetNormalSuccessors().size(), 1u);
+ HInstruction* input = phi->InputAt(i);
+ Location source = input->GetLiveInterval()->GetLocationAt(
+ predecessor->GetLifetimeEnd() - 1);
+ Location destination = phi->GetLiveInterval()->ToLocation();
+ InsertParallelMoveAtExitOf(predecessor, phi, source, destination);
+ }
+ }
+ }
+ }
+
+ // Resolve temp locations.
+ for (LiveInterval* temp : temp_intervals) {
+ if (temp->IsHighInterval()) {
+ // High intervals can be skipped, they are already handled by the low interval.
+ continue;
+ }
+ HInstruction* at = liveness_.GetTempUser(temp);
+ size_t temp_index = liveness_.GetTempIndex(temp);
+ LocationSummary* locations = at->GetLocations();
+ switch (temp->GetType()) {
+ case Primitive::kPrimInt:
+ locations->SetTempAt(temp_index, Location::RegisterLocation(temp->GetRegister()));
+ break;
+
+ case Primitive::kPrimDouble:
+ if (codegen_->NeedsTwoRegisters(Primitive::kPrimDouble)) {
+ Location location = Location::FpuRegisterPairLocation(
+ temp->GetRegister(), temp->GetHighInterval()->GetRegister());
+ locations->SetTempAt(temp_index, location);
+ } else {
+ locations->SetTempAt(temp_index, Location::FpuRegisterLocation(temp->GetRegister()));
+ }
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected type for temporary location "
+ << temp->GetType();
+ }
+ }
+}
+
+void RegisterAllocationResolver::ConnectSiblings(LiveInterval* interval,
+ size_t max_safepoint_live_regs) {
+ LiveInterval* current = interval;
+ if (current->HasSpillSlot()
+ && current->HasRegister()
+ // Currently, we spill unconditionnally the current method in the code generators.
+ && !interval->GetDefinedBy()->IsCurrentMethod()) {
+ // We spill eagerly, so move must be at definition.
+ InsertMoveAfter(interval->GetDefinedBy(),
+ interval->ToLocation(),
+ interval->NeedsTwoSpillSlots()
+ ? Location::DoubleStackSlot(interval->GetParent()->GetSpillSlot())
+ : Location::StackSlot(interval->GetParent()->GetSpillSlot()));
+ }
+ UsePosition* use = current->GetFirstUse();
+ UsePosition* env_use = current->GetFirstEnvironmentUse();
+
+ // Walk over all siblings, updating locations of use positions, and
+ // connecting them when they are adjacent.
+ do {
+ Location source = current->ToLocation();
+
+ // Walk over all uses covered by this interval, and update the location
+ // information.
+
+ LiveRange* range = current->GetFirstRange();
+ while (range != nullptr) {
+ while (use != nullptr && use->GetPosition() < range->GetStart()) {
+ DCHECK(use->IsSynthesized());
+ use = use->GetNext();
+ }
+ while (use != nullptr && use->GetPosition() <= range->GetEnd()) {
+ DCHECK(!use->GetIsEnvironment());
+ DCHECK(current->CoversSlow(use->GetPosition()) || (use->GetPosition() == range->GetEnd()));
+ if (!use->IsSynthesized()) {
+ LocationSummary* locations = use->GetUser()->GetLocations();
+ Location expected_location = locations->InAt(use->GetInputIndex());
+ // The expected (actual) location may be invalid in case the input is unused. Currently
+ // this only happens for intrinsics.
+ if (expected_location.IsValid()) {
+ if (expected_location.IsUnallocated()) {
+ locations->SetInAt(use->GetInputIndex(), source);
+ } else if (!expected_location.IsConstant()) {
+ AddInputMoveFor(interval->GetDefinedBy(), use->GetUser(), source, expected_location);
+ }
+ } else {
+ DCHECK(use->GetUser()->IsInvoke());
+ DCHECK(use->GetUser()->AsInvoke()->GetIntrinsic() != Intrinsics::kNone);
+ }
+ }
+ use = use->GetNext();
+ }
+
+ // Walk over the environment uses, and update their locations.
+ while (env_use != nullptr && env_use->GetPosition() < range->GetStart()) {
+ env_use = env_use->GetNext();
+ }
+
+ while (env_use != nullptr && env_use->GetPosition() <= range->GetEnd()) {
+ DCHECK(current->CoversSlow(env_use->GetPosition())
+ || (env_use->GetPosition() == range->GetEnd()));
+ HEnvironment* environment = env_use->GetEnvironment();
+ environment->SetLocationAt(env_use->GetInputIndex(), source);
+ env_use = env_use->GetNext();
+ }
+
+ range = range->GetNext();
+ }
+
+ // If the next interval starts just after this one, and has a register,
+ // insert a move.
+ LiveInterval* next_sibling = current->GetNextSibling();
+ if (next_sibling != nullptr
+ && next_sibling->HasRegister()
+ && current->GetEnd() == next_sibling->GetStart()) {
+ Location destination = next_sibling->ToLocation();
+ InsertParallelMoveAt(current->GetEnd(), interval->GetDefinedBy(), source, destination);
+ }
+
+ for (SafepointPosition* safepoint_position = current->GetFirstSafepoint();
+ safepoint_position != nullptr;
+ safepoint_position = safepoint_position->GetNext()) {
+ DCHECK(current->CoversSlow(safepoint_position->GetPosition()));
+
+ LocationSummary* locations = safepoint_position->GetLocations();
+ if ((current->GetType() == Primitive::kPrimNot) && current->GetParent()->HasSpillSlot()) {
+ DCHECK(interval->GetDefinedBy()->IsActualObject())
+ << interval->GetDefinedBy()->DebugName()
+ << "@" << safepoint_position->GetInstruction()->DebugName();
+ locations->SetStackBit(current->GetParent()->GetSpillSlot() / kVRegSize);
+ }
+
+ switch (source.GetKind()) {
+ case Location::kRegister: {
+ locations->AddLiveRegister(source);
+ if (kIsDebugBuild && locations->OnlyCallsOnSlowPath()) {
+ DCHECK_LE(locations->GetNumberOfLiveRegisters(),
+ max_safepoint_live_regs);
+ }
+ if (current->GetType() == Primitive::kPrimNot) {
+ DCHECK(interval->GetDefinedBy()->IsActualObject())
+ << interval->GetDefinedBy()->DebugName()
+ << "@" << safepoint_position->GetInstruction()->DebugName();
+ locations->SetRegisterBit(source.reg());
+ }
+ break;
+ }
+ case Location::kFpuRegister: {
+ locations->AddLiveRegister(source);
+ break;
+ }
+
+ case Location::kRegisterPair:
+ case Location::kFpuRegisterPair: {
+ locations->AddLiveRegister(source.ToLow());
+ locations->AddLiveRegister(source.ToHigh());
+ break;
+ }
+ case Location::kStackSlot: // Fall-through
+ case Location::kDoubleStackSlot: // Fall-through
+ case Location::kConstant: {
+ // Nothing to do.
+ break;
+ }
+ default: {
+ LOG(FATAL) << "Unexpected location for object";
+ }
+ }
+ }
+ current = next_sibling;
+ } while (current != nullptr);
+
+ if (kIsDebugBuild) {
+ // Following uses can only be synthesized uses.
+ while (use != nullptr) {
+ DCHECK(use->IsSynthesized());
+ use = use->GetNext();
+ }
+ }
+}
+
+static bool IsMaterializableEntryBlockInstructionOfGraphWithIrreducibleLoop(
+ HInstruction* instruction) {
+ return instruction->GetBlock()->GetGraph()->HasIrreducibleLoops() &&
+ (instruction->IsConstant() || instruction->IsCurrentMethod());
+}
+
+void RegisterAllocationResolver::ConnectSplitSiblings(LiveInterval* interval,
+ HBasicBlock* from,
+ HBasicBlock* to) const {
+ if (interval->GetNextSibling() == nullptr) {
+ // Nothing to connect. The whole range was allocated to the same location.
+ return;
+ }
+
+ // Find the intervals that cover `from` and `to`.
+ size_t destination_position = to->GetLifetimeStart();
+ size_t source_position = from->GetLifetimeEnd() - 1;
+ LiveInterval* destination = interval->GetSiblingAt(destination_position);
+ LiveInterval* source = interval->GetSiblingAt(source_position);
+
+ if (destination == source) {
+ // Interval was not split.
+ return;
+ }
+
+ LiveInterval* parent = interval->GetParent();
+ HInstruction* defined_by = parent->GetDefinedBy();
+ if (codegen_->GetGraph()->HasIrreducibleLoops() &&
+ (destination == nullptr || !destination->CoversSlow(destination_position))) {
+ // Our live_in fixed point calculation has found that the instruction is live
+ // in the `to` block because it will eventually enter an irreducible loop. Our
+ // live interval computation however does not compute a fixed point, and
+ // therefore will not have a location for that instruction for `to`.
+ // Because the instruction is a constant or the ArtMethod, we don't need to
+ // do anything: it will be materialized in the irreducible loop.
+ DCHECK(IsMaterializableEntryBlockInstructionOfGraphWithIrreducibleLoop(defined_by))
+ << defined_by->DebugName() << ":" << defined_by->GetId()
+ << " " << from->GetBlockId() << " -> " << to->GetBlockId();
+ return;
+ }
+
+ if (!destination->HasRegister()) {
+ // Values are eagerly spilled. Spill slot already contains appropriate value.
+ return;
+ }
+
+ Location location_source;
+ // `GetSiblingAt` returns the interval whose start and end cover `position`,
+ // but does not check whether the interval is inactive at that position.
+ // The only situation where the interval is inactive at that position is in the
+ // presence of irreducible loops for constants and ArtMethod.
+ if (codegen_->GetGraph()->HasIrreducibleLoops() &&
+ (source == nullptr || !source->CoversSlow(source_position))) {
+ DCHECK(IsMaterializableEntryBlockInstructionOfGraphWithIrreducibleLoop(defined_by));
+ if (defined_by->IsConstant()) {
+ location_source = defined_by->GetLocations()->Out();
+ } else {
+ DCHECK(defined_by->IsCurrentMethod());
+ location_source = parent->NeedsTwoSpillSlots()
+ ? Location::DoubleStackSlot(parent->GetSpillSlot())
+ : Location::StackSlot(parent->GetSpillSlot());
+ }
+ } else {
+ DCHECK(source != nullptr);
+ DCHECK(source->CoversSlow(source_position));
+ DCHECK(destination->CoversSlow(destination_position));
+ location_source = source->ToLocation();
+ }
+
+ // If `from` has only one successor, we can put the moves at the exit of it. Otherwise
+ // we need to put the moves at the entry of `to`.
+ if (from->GetNormalSuccessors().size() == 1) {
+ InsertParallelMoveAtExitOf(from,
+ defined_by,
+ location_source,
+ destination->ToLocation());
+ } else {
+ DCHECK_EQ(to->GetPredecessors().size(), 1u);
+ InsertParallelMoveAtEntryOf(to,
+ defined_by,
+ location_source,
+ destination->ToLocation());
+ }
+}
+
+static bool IsValidDestination(Location destination) {
+ return destination.IsRegister()
+ || destination.IsRegisterPair()
+ || destination.IsFpuRegister()
+ || destination.IsFpuRegisterPair()
+ || destination.IsStackSlot()
+ || destination.IsDoubleStackSlot();
+}
+
+void RegisterAllocationResolver::AddMove(HParallelMove* move,
+ Location source,
+ Location destination,
+ HInstruction* instruction,
+ Primitive::Type type) const {
+ if (type == Primitive::kPrimLong
+ && codegen_->ShouldSplitLongMoves()
+ // The parallel move resolver knows how to deal with long constants.
+ && !source.IsConstant()) {
+ move->AddMove(source.ToLow(), destination.ToLow(), Primitive::kPrimInt, instruction);
+ move->AddMove(source.ToHigh(), destination.ToHigh(), Primitive::kPrimInt, nullptr);
+ } else {
+ move->AddMove(source, destination, type, instruction);
+ }
+}
+
+void RegisterAllocationResolver::AddInputMoveFor(HInstruction* input,
+ HInstruction* user,
+ Location source,
+ Location destination) const {
+ if (source.Equals(destination)) return;
+
+ DCHECK(!user->IsPhi());
+
+ HInstruction* previous = user->GetPrevious();
+ HParallelMove* move = nullptr;
+ if (previous == nullptr
+ || !previous->IsParallelMove()
+ || previous->GetLifetimePosition() < user->GetLifetimePosition()) {
+ move = new (allocator_) HParallelMove(allocator_);
+ move->SetLifetimePosition(user->GetLifetimePosition());
+ user->GetBlock()->InsertInstructionBefore(move, user);
+ } else {
+ move = previous->AsParallelMove();
+ }
+ DCHECK_EQ(move->GetLifetimePosition(), user->GetLifetimePosition());
+ AddMove(move, source, destination, nullptr, input->GetType());
+}
+
+static bool IsInstructionStart(size_t position) {
+ return (position & 1) == 0;
+}
+
+static bool IsInstructionEnd(size_t position) {
+ return (position & 1) == 1;
+}
+
+void RegisterAllocationResolver::InsertParallelMoveAt(size_t position,
+ HInstruction* instruction,
+ Location source,
+ Location destination) const {
+ DCHECK(IsValidDestination(destination)) << destination;
+ if (source.Equals(destination)) return;
+
+ HInstruction* at = liveness_.GetInstructionFromPosition(position / 2);
+ HParallelMove* move;
+ if (at == nullptr) {
+ if (IsInstructionStart(position)) {
+ // Block boundary, don't do anything the connection of split siblings will handle it.
+ return;
+ } else {
+ // Move must happen before the first instruction of the block.
+ at = liveness_.GetInstructionFromPosition((position + 1) / 2);
+ // Note that parallel moves may have already been inserted, so we explicitly
+ // ask for the first instruction of the block: `GetInstructionFromPosition` does
+ // not contain the `HParallelMove` instructions.
+ at = at->GetBlock()->GetFirstInstruction();
+
+ if (at->GetLifetimePosition() < position) {
+ // We may insert moves for split siblings and phi spills at the beginning of the block.
+ // Since this is a different lifetime position, we need to go to the next instruction.
+ DCHECK(at->IsParallelMove());
+ at = at->GetNext();
+ }
+
+ if (at->GetLifetimePosition() != position) {
+ DCHECK_GT(at->GetLifetimePosition(), position);
+ move = new (allocator_) HParallelMove(allocator_);
+ move->SetLifetimePosition(position);
+ at->GetBlock()->InsertInstructionBefore(move, at);
+ } else {
+ DCHECK(at->IsParallelMove());
+ move = at->AsParallelMove();
+ }
+ }
+ } else if (IsInstructionEnd(position)) {
+ // Move must happen after the instruction.
+ DCHECK(!at->IsControlFlow());
+ move = at->GetNext()->AsParallelMove();
+ // This is a parallel move for connecting siblings in a same block. We need to
+ // differentiate it with moves for connecting blocks, and input moves.
+ if (move == nullptr || move->GetLifetimePosition() > position) {
+ move = new (allocator_) HParallelMove(allocator_);
+ move->SetLifetimePosition(position);
+ at->GetBlock()->InsertInstructionBefore(move, at->GetNext());
+ }
+ } else {
+ // Move must happen before the instruction.
+ HInstruction* previous = at->GetPrevious();
+ if (previous == nullptr
+ || !previous->IsParallelMove()
+ || previous->GetLifetimePosition() != position) {
+ // If the previous is a parallel move, then its position must be lower
+ // than the given `position`: it was added just after the non-parallel
+ // move instruction that precedes `instruction`.
+ DCHECK(previous == nullptr
+ || !previous->IsParallelMove()
+ || previous->GetLifetimePosition() < position);
+ move = new (allocator_) HParallelMove(allocator_);
+ move->SetLifetimePosition(position);
+ at->GetBlock()->InsertInstructionBefore(move, at);
+ } else {
+ move = previous->AsParallelMove();
+ }
+ }
+ DCHECK_EQ(move->GetLifetimePosition(), position);
+ AddMove(move, source, destination, instruction, instruction->GetType());
+}
+
+void RegisterAllocationResolver::InsertParallelMoveAtExitOf(HBasicBlock* block,
+ HInstruction* instruction,
+ Location source,
+ Location destination) const {
+ DCHECK(IsValidDestination(destination)) << destination;
+ if (source.Equals(destination)) return;
+
+ DCHECK_EQ(block->GetNormalSuccessors().size(), 1u);
+ HInstruction* last = block->GetLastInstruction();
+ // We insert moves at exit for phi predecessors and connecting blocks.
+ // A block ending with an if or a packed switch cannot branch to a block
+ // with phis because we do not allow critical edges. It can also not connect
+ // a split interval between two blocks: the move has to happen in the successor.
+ DCHECK(!last->IsIf() && !last->IsPackedSwitch());
+ HInstruction* previous = last->GetPrevious();
+ HParallelMove* move;
+ // This is a parallel move for connecting blocks. We need to differentiate
+ // it with moves for connecting siblings in a same block, and output moves.
+ size_t position = last->GetLifetimePosition();
+ if (previous == nullptr || !previous->IsParallelMove()
+ || previous->AsParallelMove()->GetLifetimePosition() != position) {
+ move = new (allocator_) HParallelMove(allocator_);
+ move->SetLifetimePosition(position);
+ block->InsertInstructionBefore(move, last);
+ } else {
+ move = previous->AsParallelMove();
+ }
+ AddMove(move, source, destination, instruction, instruction->GetType());
+}
+
+void RegisterAllocationResolver::InsertParallelMoveAtEntryOf(HBasicBlock* block,
+ HInstruction* instruction,
+ Location source,
+ Location destination) const {
+ DCHECK(IsValidDestination(destination)) << destination;
+ if (source.Equals(destination)) return;
+
+ HInstruction* first = block->GetFirstInstruction();
+ HParallelMove* move = first->AsParallelMove();
+ size_t position = block->GetLifetimeStart();
+ // This is a parallel move for connecting blocks. We need to differentiate
+ // it with moves for connecting siblings in a same block, and input moves.
+ if (move == nullptr || move->GetLifetimePosition() != position) {
+ move = new (allocator_) HParallelMove(allocator_);
+ move->SetLifetimePosition(position);
+ block->InsertInstructionBefore(move, first);
+ }
+ AddMove(move, source, destination, instruction, instruction->GetType());
+}
+
+void RegisterAllocationResolver::InsertMoveAfter(HInstruction* instruction,
+ Location source,
+ Location destination) const {
+ DCHECK(IsValidDestination(destination)) << destination;
+ if (source.Equals(destination)) return;
+
+ if (instruction->IsPhi()) {
+ InsertParallelMoveAtEntryOf(instruction->GetBlock(), instruction, source, destination);
+ return;
+ }
+
+ size_t position = instruction->GetLifetimePosition() + 1;
+ HParallelMove* move = instruction->GetNext()->AsParallelMove();
+ // This is a parallel move for moving the output of an instruction. We need
+ // to differentiate with input moves, moves for connecting siblings in a
+ // and moves for connecting blocks.
+ if (move == nullptr || move->GetLifetimePosition() != position) {
+ move = new (allocator_) HParallelMove(allocator_);
+ move->SetLifetimePosition(position);
+ instruction->GetBlock()->InsertInstructionBefore(move, instruction->GetNext());
+ }
+ AddMove(move, source, destination, instruction, instruction->GetType());
+}
+
+} // namespace art
diff --git a/compiler/optimizing/register_allocation_resolver.h b/compiler/optimizing/register_allocation_resolver.h
new file mode 100644
index 0000000..16a8a87
--- /dev/null
+++ b/compiler/optimizing/register_allocation_resolver.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATION_RESOLVER_H_
+#define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATION_RESOLVER_H_
+
+#include "base/arena_containers.h"
+#include "base/value_object.h"
+#include "primitive.h"
+
+namespace art {
+
+class ArenaAllocator;
+class CodeGenerator;
+class HBasicBlock;
+class HInstruction;
+class HParallelMove;
+class LiveInterval;
+class Location;
+class SsaLivenessAnalysis;
+
+/**
+ * Reconciles the locations assigned to live intervals with the location
+ * summary of each instruction, and inserts moves to resolve split intervals,
+ * nonlinear control flow, and phi inputs.
+ */
+class RegisterAllocationResolver : ValueObject {
+ public:
+ RegisterAllocationResolver(ArenaAllocator* allocator,
+ CodeGenerator* codegen,
+ const SsaLivenessAnalysis& liveness);
+
+ void Resolve(size_t max_safepoint_live_core_regs,
+ size_t max_safepoint_live_fp_regs,
+ size_t reserved_out_slots, // Includes slot(s) for the art method.
+ size_t int_spill_slots,
+ size_t long_spill_slots,
+ size_t float_spill_slots,
+ size_t double_spill_slots,
+ size_t catch_phi_spill_slots,
+ const ArenaVector<LiveInterval*> temp_intervals);
+
+ private:
+ // Connect adjacent siblings within blocks, and resolve inputs along the way.
+ // Uses max_safepoint_live_regs to check that we did not underestimate the
+ // number of live registers at safepoints.
+ void ConnectSiblings(LiveInterval* interval, size_t max_safepoint_live_regs);
+
+ // Connect siblings between block entries and exits.
+ void ConnectSplitSiblings(LiveInterval* interval, HBasicBlock* from, HBasicBlock* to) const;
+
+ // Helper methods for inserting parallel moves in the graph.
+ void InsertParallelMoveAtExitOf(HBasicBlock* block,
+ HInstruction* instruction,
+ Location source,
+ Location destination) const;
+ void InsertParallelMoveAtEntryOf(HBasicBlock* block,
+ HInstruction* instruction,
+ Location source,
+ Location destination) const;
+ void InsertMoveAfter(HInstruction* instruction, Location source, Location destination) const;
+ void AddInputMoveFor(HInstruction* input,
+ HInstruction* user,
+ Location source,
+ Location destination) const;
+ void InsertParallelMoveAt(size_t position,
+ HInstruction* instruction,
+ Location source,
+ Location destination) const;
+ void AddMove(HParallelMove* move,
+ Location source,
+ Location destination,
+ HInstruction* instruction,
+ Primitive::Type type) const;
+
+ ArenaAllocator* const allocator_;
+ CodeGenerator* const codegen_;
+ const SsaLivenessAnalysis& liveness_;
+
+ DISALLOW_COPY_AND_ASSIGN(RegisterAllocationResolver);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATION_RESOLVER_H_
diff --git a/compiler/optimizing/register_allocator_linear_scan.cc b/compiler/optimizing/register_allocator_linear_scan.cc
index 90c845f..c1797b0 100644
--- a/compiler/optimizing/register_allocator_linear_scan.cc
+++ b/compiler/optimizing/register_allocator_linear_scan.cc
@@ -21,6 +21,7 @@
#include "base/bit_vector-inl.h"
#include "code_generator.h"
+#include "register_allocation_resolver.h"
#include "ssa_liveness_analysis.h"
namespace art {
@@ -102,7 +103,16 @@
void RegisterAllocator::AllocateRegisters() {
AllocateRegistersInternal();
- Resolve();
+ RegisterAllocationResolver(allocator_, codegen_, liveness_)
+ .Resolve(maximum_number_of_live_core_registers_,
+ maximum_number_of_live_fp_registers_,
+ reserved_out_slots_,
+ int_spill_slots_.size(),
+ long_spill_slots_.size(),
+ float_spill_slots_.size(),
+ double_spill_slots_.size(),
+ catch_phi_spill_slots_,
+ temp_intervals_);
if (kIsDebugBuild) {
processing_core_registers_ = true;
@@ -1380,15 +1390,6 @@
parent->SetSpillSlot(slot);
}
-static bool IsValidDestination(Location destination) {
- return destination.IsRegister()
- || destination.IsRegisterPair()
- || destination.IsFpuRegister()
- || destination.IsFpuRegisterPair()
- || destination.IsStackSlot()
- || destination.IsDoubleStackSlot();
-}
-
void RegisterAllocator::AllocateSpillSlotForCatchPhi(HPhi* phi) {
LiveInterval* interval = phi->GetLiveInterval();
@@ -1411,601 +1412,4 @@
}
}
-void RegisterAllocator::AddMove(HParallelMove* move,
- Location source,
- Location destination,
- HInstruction* instruction,
- Primitive::Type type) const {
- if (type == Primitive::kPrimLong
- && codegen_->ShouldSplitLongMoves()
- // The parallel move resolver knows how to deal with long constants.
- && !source.IsConstant()) {
- move->AddMove(source.ToLow(), destination.ToLow(), Primitive::kPrimInt, instruction);
- move->AddMove(source.ToHigh(), destination.ToHigh(), Primitive::kPrimInt, nullptr);
- } else {
- move->AddMove(source, destination, type, instruction);
- }
-}
-
-void RegisterAllocator::AddInputMoveFor(HInstruction* input,
- HInstruction* user,
- Location source,
- Location destination) const {
- if (source.Equals(destination)) return;
-
- DCHECK(!user->IsPhi());
-
- HInstruction* previous = user->GetPrevious();
- HParallelMove* move = nullptr;
- if (previous == nullptr
- || !previous->IsParallelMove()
- || previous->GetLifetimePosition() < user->GetLifetimePosition()) {
- move = new (allocator_) HParallelMove(allocator_);
- move->SetLifetimePosition(user->GetLifetimePosition());
- user->GetBlock()->InsertInstructionBefore(move, user);
- } else {
- move = previous->AsParallelMove();
- }
- DCHECK_EQ(move->GetLifetimePosition(), user->GetLifetimePosition());
- AddMove(move, source, destination, nullptr, input->GetType());
-}
-
-static bool IsInstructionStart(size_t position) {
- return (position & 1) == 0;
-}
-
-static bool IsInstructionEnd(size_t position) {
- return (position & 1) == 1;
-}
-
-void RegisterAllocator::InsertParallelMoveAt(size_t position,
- HInstruction* instruction,
- Location source,
- Location destination) const {
- DCHECK(IsValidDestination(destination)) << destination;
- if (source.Equals(destination)) return;
-
- HInstruction* at = liveness_.GetInstructionFromPosition(position / 2);
- HParallelMove* move;
- if (at == nullptr) {
- if (IsInstructionStart(position)) {
- // Block boundary, don't do anything the connection of split siblings will handle it.
- return;
- } else {
- // Move must happen before the first instruction of the block.
- at = liveness_.GetInstructionFromPosition((position + 1) / 2);
- // Note that parallel moves may have already been inserted, so we explicitly
- // ask for the first instruction of the block: `GetInstructionFromPosition` does
- // not contain the `HParallelMove` instructions.
- at = at->GetBlock()->GetFirstInstruction();
-
- if (at->GetLifetimePosition() < position) {
- // We may insert moves for split siblings and phi spills at the beginning of the block.
- // Since this is a different lifetime position, we need to go to the next instruction.
- DCHECK(at->IsParallelMove());
- at = at->GetNext();
- }
-
- if (at->GetLifetimePosition() != position) {
- DCHECK_GT(at->GetLifetimePosition(), position);
- move = new (allocator_) HParallelMove(allocator_);
- move->SetLifetimePosition(position);
- at->GetBlock()->InsertInstructionBefore(move, at);
- } else {
- DCHECK(at->IsParallelMove());
- move = at->AsParallelMove();
- }
- }
- } else if (IsInstructionEnd(position)) {
- // Move must happen after the instruction.
- DCHECK(!at->IsControlFlow());
- move = at->GetNext()->AsParallelMove();
- // This is a parallel move for connecting siblings in a same block. We need to
- // differentiate it with moves for connecting blocks, and input moves.
- if (move == nullptr || move->GetLifetimePosition() > position) {
- move = new (allocator_) HParallelMove(allocator_);
- move->SetLifetimePosition(position);
- at->GetBlock()->InsertInstructionBefore(move, at->GetNext());
- }
- } else {
- // Move must happen before the instruction.
- HInstruction* previous = at->GetPrevious();
- if (previous == nullptr
- || !previous->IsParallelMove()
- || previous->GetLifetimePosition() != position) {
- // If the previous is a parallel move, then its position must be lower
- // than the given `position`: it was added just after the non-parallel
- // move instruction that precedes `instruction`.
- DCHECK(previous == nullptr
- || !previous->IsParallelMove()
- || previous->GetLifetimePosition() < position);
- move = new (allocator_) HParallelMove(allocator_);
- move->SetLifetimePosition(position);
- at->GetBlock()->InsertInstructionBefore(move, at);
- } else {
- move = previous->AsParallelMove();
- }
- }
- DCHECK_EQ(move->GetLifetimePosition(), position);
- AddMove(move, source, destination, instruction, instruction->GetType());
-}
-
-void RegisterAllocator::InsertParallelMoveAtExitOf(HBasicBlock* block,
- HInstruction* instruction,
- Location source,
- Location destination) const {
- DCHECK(IsValidDestination(destination)) << destination;
- if (source.Equals(destination)) return;
-
- DCHECK_EQ(block->GetNormalSuccessors().size(), 1u);
- HInstruction* last = block->GetLastInstruction();
- // We insert moves at exit for phi predecessors and connecting blocks.
- // A block ending with an if or a packed switch cannot branch to a block
- // with phis because we do not allow critical edges. It can also not connect
- // a split interval between two blocks: the move has to happen in the successor.
- DCHECK(!last->IsIf() && !last->IsPackedSwitch());
- HInstruction* previous = last->GetPrevious();
- HParallelMove* move;
- // This is a parallel move for connecting blocks. We need to differentiate
- // it with moves for connecting siblings in a same block, and output moves.
- size_t position = last->GetLifetimePosition();
- if (previous == nullptr || !previous->IsParallelMove()
- || previous->AsParallelMove()->GetLifetimePosition() != position) {
- move = new (allocator_) HParallelMove(allocator_);
- move->SetLifetimePosition(position);
- block->InsertInstructionBefore(move, last);
- } else {
- move = previous->AsParallelMove();
- }
- AddMove(move, source, destination, instruction, instruction->GetType());
-}
-
-void RegisterAllocator::InsertParallelMoveAtEntryOf(HBasicBlock* block,
- HInstruction* instruction,
- Location source,
- Location destination) const {
- DCHECK(IsValidDestination(destination)) << destination;
- if (source.Equals(destination)) return;
-
- HInstruction* first = block->GetFirstInstruction();
- HParallelMove* move = first->AsParallelMove();
- size_t position = block->GetLifetimeStart();
- // This is a parallel move for connecting blocks. We need to differentiate
- // it with moves for connecting siblings in a same block, and input moves.
- if (move == nullptr || move->GetLifetimePosition() != position) {
- move = new (allocator_) HParallelMove(allocator_);
- move->SetLifetimePosition(position);
- block->InsertInstructionBefore(move, first);
- }
- AddMove(move, source, destination, instruction, instruction->GetType());
-}
-
-void RegisterAllocator::InsertMoveAfter(HInstruction* instruction,
- Location source,
- Location destination) const {
- DCHECK(IsValidDestination(destination)) << destination;
- if (source.Equals(destination)) return;
-
- if (instruction->IsPhi()) {
- InsertParallelMoveAtEntryOf(instruction->GetBlock(), instruction, source, destination);
- return;
- }
-
- size_t position = instruction->GetLifetimePosition() + 1;
- HParallelMove* move = instruction->GetNext()->AsParallelMove();
- // This is a parallel move for moving the output of an instruction. We need
- // to differentiate with input moves, moves for connecting siblings in a
- // and moves for connecting blocks.
- if (move == nullptr || move->GetLifetimePosition() != position) {
- move = new (allocator_) HParallelMove(allocator_);
- move->SetLifetimePosition(position);
- instruction->GetBlock()->InsertInstructionBefore(move, instruction->GetNext());
- }
- AddMove(move, source, destination, instruction, instruction->GetType());
-}
-
-void RegisterAllocator::ConnectSiblings(LiveInterval* interval) {
- LiveInterval* current = interval;
- if (current->HasSpillSlot()
- && current->HasRegister()
- // Currently, we spill unconditionnally the current method in the code generators.
- && !interval->GetDefinedBy()->IsCurrentMethod()) {
- // We spill eagerly, so move must be at definition.
- InsertMoveAfter(interval->GetDefinedBy(),
- interval->ToLocation(),
- interval->NeedsTwoSpillSlots()
- ? Location::DoubleStackSlot(interval->GetParent()->GetSpillSlot())
- : Location::StackSlot(interval->GetParent()->GetSpillSlot()));
- }
- UsePosition* use = current->GetFirstUse();
- UsePosition* env_use = current->GetFirstEnvironmentUse();
-
- // Walk over all siblings, updating locations of use positions, and
- // connecting them when they are adjacent.
- do {
- Location source = current->ToLocation();
-
- // Walk over all uses covered by this interval, and update the location
- // information.
-
- LiveRange* range = current->GetFirstRange();
- while (range != nullptr) {
- while (use != nullptr && use->GetPosition() < range->GetStart()) {
- DCHECK(use->IsSynthesized());
- use = use->GetNext();
- }
- while (use != nullptr && use->GetPosition() <= range->GetEnd()) {
- DCHECK(!use->GetIsEnvironment());
- DCHECK(current->CoversSlow(use->GetPosition()) || (use->GetPosition() == range->GetEnd()));
- if (!use->IsSynthesized()) {
- LocationSummary* locations = use->GetUser()->GetLocations();
- Location expected_location = locations->InAt(use->GetInputIndex());
- // The expected (actual) location may be invalid in case the input is unused. Currently
- // this only happens for intrinsics.
- if (expected_location.IsValid()) {
- if (expected_location.IsUnallocated()) {
- locations->SetInAt(use->GetInputIndex(), source);
- } else if (!expected_location.IsConstant()) {
- AddInputMoveFor(interval->GetDefinedBy(), use->GetUser(), source, expected_location);
- }
- } else {
- DCHECK(use->GetUser()->IsInvoke());
- DCHECK(use->GetUser()->AsInvoke()->GetIntrinsic() != Intrinsics::kNone);
- }
- }
- use = use->GetNext();
- }
-
- // Walk over the environment uses, and update their locations.
- while (env_use != nullptr && env_use->GetPosition() < range->GetStart()) {
- env_use = env_use->GetNext();
- }
-
- while (env_use != nullptr && env_use->GetPosition() <= range->GetEnd()) {
- DCHECK(current->CoversSlow(env_use->GetPosition())
- || (env_use->GetPosition() == range->GetEnd()));
- HEnvironment* environment = env_use->GetEnvironment();
- environment->SetLocationAt(env_use->GetInputIndex(), source);
- env_use = env_use->GetNext();
- }
-
- range = range->GetNext();
- }
-
- // If the next interval starts just after this one, and has a register,
- // insert a move.
- LiveInterval* next_sibling = current->GetNextSibling();
- if (next_sibling != nullptr
- && next_sibling->HasRegister()
- && current->GetEnd() == next_sibling->GetStart()) {
- Location destination = next_sibling->ToLocation();
- InsertParallelMoveAt(current->GetEnd(), interval->GetDefinedBy(), source, destination);
- }
-
- for (SafepointPosition* safepoint_position = current->GetFirstSafepoint();
- safepoint_position != nullptr;
- safepoint_position = safepoint_position->GetNext()) {
- DCHECK(current->CoversSlow(safepoint_position->GetPosition()));
-
- LocationSummary* locations = safepoint_position->GetLocations();
- if ((current->GetType() == Primitive::kPrimNot) && current->GetParent()->HasSpillSlot()) {
- DCHECK(interval->GetDefinedBy()->IsActualObject())
- << interval->GetDefinedBy()->DebugName()
- << "@" << safepoint_position->GetInstruction()->DebugName();
- locations->SetStackBit(current->GetParent()->GetSpillSlot() / kVRegSize);
- }
-
- switch (source.GetKind()) {
- case Location::kRegister: {
- locations->AddLiveRegister(source);
- if (kIsDebugBuild && locations->OnlyCallsOnSlowPath()) {
- DCHECK_LE(locations->GetNumberOfLiveRegisters(),
- maximum_number_of_live_core_registers_ +
- maximum_number_of_live_fp_registers_);
- }
- if (current->GetType() == Primitive::kPrimNot) {
- DCHECK(interval->GetDefinedBy()->IsActualObject())
- << interval->GetDefinedBy()->DebugName()
- << "@" << safepoint_position->GetInstruction()->DebugName();
- locations->SetRegisterBit(source.reg());
- }
- break;
- }
- case Location::kFpuRegister: {
- locations->AddLiveRegister(source);
- break;
- }
-
- case Location::kRegisterPair:
- case Location::kFpuRegisterPair: {
- locations->AddLiveRegister(source.ToLow());
- locations->AddLiveRegister(source.ToHigh());
- break;
- }
- case Location::kStackSlot: // Fall-through
- case Location::kDoubleStackSlot: // Fall-through
- case Location::kConstant: {
- // Nothing to do.
- break;
- }
- default: {
- LOG(FATAL) << "Unexpected location for object";
- }
- }
- }
- current = next_sibling;
- } while (current != nullptr);
-
- if (kIsDebugBuild) {
- // Following uses can only be synthesized uses.
- while (use != nullptr) {
- DCHECK(use->IsSynthesized());
- use = use->GetNext();
- }
- }
-}
-
-static bool IsMaterializableEntryBlockInstructionOfGraphWithIrreducibleLoop(
- HInstruction* instruction) {
- return instruction->GetBlock()->GetGraph()->HasIrreducibleLoops() &&
- (instruction->IsConstant() || instruction->IsCurrentMethod());
-}
-
-void RegisterAllocator::ConnectSplitSiblings(LiveInterval* interval,
- HBasicBlock* from,
- HBasicBlock* to) const {
- if (interval->GetNextSibling() == nullptr) {
- // Nothing to connect. The whole range was allocated to the same location.
- return;
- }
-
- // Find the intervals that cover `from` and `to`.
- size_t destination_position = to->GetLifetimeStart();
- size_t source_position = from->GetLifetimeEnd() - 1;
- LiveInterval* destination = interval->GetSiblingAt(destination_position);
- LiveInterval* source = interval->GetSiblingAt(source_position);
-
- if (destination == source) {
- // Interval was not split.
- return;
- }
-
- LiveInterval* parent = interval->GetParent();
- HInstruction* defined_by = parent->GetDefinedBy();
- if (codegen_->GetGraph()->HasIrreducibleLoops() &&
- (destination == nullptr || !destination->CoversSlow(destination_position))) {
- // Our live_in fixed point calculation has found that the instruction is live
- // in the `to` block because it will eventually enter an irreducible loop. Our
- // live interval computation however does not compute a fixed point, and
- // therefore will not have a location for that instruction for `to`.
- // Because the instruction is a constant or the ArtMethod, we don't need to
- // do anything: it will be materialized in the irreducible loop.
- DCHECK(IsMaterializableEntryBlockInstructionOfGraphWithIrreducibleLoop(defined_by))
- << defined_by->DebugName() << ":" << defined_by->GetId()
- << " " << from->GetBlockId() << " -> " << to->GetBlockId();
- return;
- }
-
- if (!destination->HasRegister()) {
- // Values are eagerly spilled. Spill slot already contains appropriate value.
- return;
- }
-
- Location location_source;
- // `GetSiblingAt` returns the interval whose start and end cover `position`,
- // but does not check whether the interval is inactive at that position.
- // The only situation where the interval is inactive at that position is in the
- // presence of irreducible loops for constants and ArtMethod.
- if (codegen_->GetGraph()->HasIrreducibleLoops() &&
- (source == nullptr || !source->CoversSlow(source_position))) {
- DCHECK(IsMaterializableEntryBlockInstructionOfGraphWithIrreducibleLoop(defined_by));
- if (defined_by->IsConstant()) {
- location_source = defined_by->GetLocations()->Out();
- } else {
- DCHECK(defined_by->IsCurrentMethod());
- location_source = parent->NeedsTwoSpillSlots()
- ? Location::DoubleStackSlot(parent->GetSpillSlot())
- : Location::StackSlot(parent->GetSpillSlot());
- }
- } else {
- DCHECK(source != nullptr);
- DCHECK(source->CoversSlow(source_position));
- DCHECK(destination->CoversSlow(destination_position));
- location_source = source->ToLocation();
- }
-
- // If `from` has only one successor, we can put the moves at the exit of it. Otherwise
- // we need to put the moves at the entry of `to`.
- if (from->GetNormalSuccessors().size() == 1) {
- InsertParallelMoveAtExitOf(from,
- defined_by,
- location_source,
- destination->ToLocation());
- } else {
- DCHECK_EQ(to->GetPredecessors().size(), 1u);
- InsertParallelMoveAtEntryOf(to,
- defined_by,
- location_source,
- destination->ToLocation());
- }
-}
-
-void RegisterAllocator::Resolve() {
- codegen_->InitializeCodeGeneration(GetNumberOfSpillSlots(),
- maximum_number_of_live_core_registers_,
- maximum_number_of_live_fp_registers_,
- reserved_out_slots_,
- codegen_->GetGraph()->GetLinearOrder());
-
- // Adjust the Out Location of instructions.
- // TODO: Use pointers of Location inside LiveInterval to avoid doing another iteration.
- for (size_t i = 0, e = liveness_.GetNumberOfSsaValues(); i < e; ++i) {
- HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
- LiveInterval* current = instruction->GetLiveInterval();
- LocationSummary* locations = instruction->GetLocations();
- Location location = locations->Out();
- if (instruction->IsParameterValue()) {
- // Now that we know the frame size, adjust the parameter's location.
- if (location.IsStackSlot()) {
- location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
- current->SetSpillSlot(location.GetStackIndex());
- locations->UpdateOut(location);
- } else if (location.IsDoubleStackSlot()) {
- location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
- current->SetSpillSlot(location.GetStackIndex());
- locations->UpdateOut(location);
- } else if (current->HasSpillSlot()) {
- current->SetSpillSlot(current->GetSpillSlot() + codegen_->GetFrameSize());
- }
- } else if (instruction->IsCurrentMethod()) {
- // The current method is always at offset 0.
- DCHECK(!current->HasSpillSlot() || (current->GetSpillSlot() == 0));
- } else if (instruction->IsPhi() && instruction->AsPhi()->IsCatchPhi()) {
- DCHECK(current->HasSpillSlot());
- size_t slot = current->GetSpillSlot()
- + GetNumberOfSpillSlots()
- + reserved_out_slots_
- - catch_phi_spill_slots_;
- current->SetSpillSlot(slot * kVRegSize);
- } else if (current->HasSpillSlot()) {
- // Adjust the stack slot, now that we know the number of them for each type.
- // The way this implementation lays out the stack is the following:
- // [parameter slots ]
- // [catch phi spill slots ]
- // [double spill slots ]
- // [long spill slots ]
- // [float spill slots ]
- // [int/ref values ]
- // [maximum out values ] (number of arguments for calls)
- // [art method ].
- size_t slot = current->GetSpillSlot();
- switch (current->GetType()) {
- case Primitive::kPrimDouble:
- slot += long_spill_slots_.size();
- FALLTHROUGH_INTENDED;
- case Primitive::kPrimLong:
- slot += float_spill_slots_.size();
- FALLTHROUGH_INTENDED;
- case Primitive::kPrimFloat:
- slot += int_spill_slots_.size();
- FALLTHROUGH_INTENDED;
- case Primitive::kPrimNot:
- case Primitive::kPrimInt:
- case Primitive::kPrimChar:
- case Primitive::kPrimByte:
- case Primitive::kPrimBoolean:
- case Primitive::kPrimShort:
- slot += reserved_out_slots_;
- break;
- case Primitive::kPrimVoid:
- LOG(FATAL) << "Unexpected type for interval " << current->GetType();
- }
- current->SetSpillSlot(slot * kVRegSize);
- }
-
- Location source = current->ToLocation();
-
- if (location.IsUnallocated()) {
- if (location.GetPolicy() == Location::kSameAsFirstInput) {
- if (locations->InAt(0).IsUnallocated()) {
- locations->SetInAt(0, source);
- } else {
- DCHECK(locations->InAt(0).Equals(source));
- }
- }
- locations->UpdateOut(source);
- } else {
- DCHECK(source.Equals(location));
- }
- }
-
- // Connect siblings.
- for (size_t i = 0, e = liveness_.GetNumberOfSsaValues(); i < e; ++i) {
- HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
- ConnectSiblings(instruction->GetLiveInterval());
- }
-
- // Resolve non-linear control flow across branches. Order does not matter.
- for (HLinearOrderIterator it(*codegen_->GetGraph()); !it.Done(); it.Advance()) {
- HBasicBlock* block = it.Current();
- if (block->IsCatchBlock() ||
- (block->IsLoopHeader() && block->GetLoopInformation()->IsIrreducible())) {
- // Instructions live at the top of catch blocks or irreducible loop header
- // were forced to spill.
- if (kIsDebugBuild) {
- BitVector* live = liveness_.GetLiveInSet(*block);
- for (uint32_t idx : live->Indexes()) {
- LiveInterval* interval = liveness_.GetInstructionFromSsaIndex(idx)->GetLiveInterval();
- LiveInterval* sibling = interval->GetSiblingAt(block->GetLifetimeStart());
- // `GetSiblingAt` returns the sibling that contains a position, but there could be
- // a lifetime hole in it. `CoversSlow` returns whether the interval is live at that
- // position.
- if ((sibling != nullptr) && sibling->CoversSlow(block->GetLifetimeStart())) {
- DCHECK(!sibling->HasRegister());
- }
- }
- }
- } else {
- BitVector* live = liveness_.GetLiveInSet(*block);
- for (uint32_t idx : live->Indexes()) {
- LiveInterval* interval = liveness_.GetInstructionFromSsaIndex(idx)->GetLiveInterval();
- for (HBasicBlock* predecessor : block->GetPredecessors()) {
- ConnectSplitSiblings(interval, predecessor, block);
- }
- }
- }
- }
-
- // Resolve phi inputs. Order does not matter.
- for (HLinearOrderIterator it(*codegen_->GetGraph()); !it.Done(); it.Advance()) {
- HBasicBlock* current = it.Current();
- if (current->IsCatchBlock()) {
- // Catch phi values are set at runtime by the exception delivery mechanism.
- } else {
- for (HInstructionIterator inst_it(current->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
- HInstruction* phi = inst_it.Current();
- for (size_t i = 0, e = current->GetPredecessors().size(); i < e; ++i) {
- HBasicBlock* predecessor = current->GetPredecessors()[i];
- DCHECK_EQ(predecessor->GetNormalSuccessors().size(), 1u);
- HInstruction* input = phi->InputAt(i);
- Location source = input->GetLiveInterval()->GetLocationAt(
- predecessor->GetLifetimeEnd() - 1);
- Location destination = phi->GetLiveInterval()->ToLocation();
- InsertParallelMoveAtExitOf(predecessor, phi, source, destination);
- }
- }
- }
- }
-
- // Assign temp locations.
- for (LiveInterval* temp : temp_intervals_) {
- if (temp->IsHighInterval()) {
- // High intervals can be skipped, they are already handled by the low interval.
- continue;
- }
- HInstruction* at = liveness_.GetTempUser(temp);
- size_t temp_index = liveness_.GetTempIndex(temp);
- LocationSummary* locations = at->GetLocations();
- switch (temp->GetType()) {
- case Primitive::kPrimInt:
- locations->SetTempAt(temp_index, Location::RegisterLocation(temp->GetRegister()));
- break;
-
- case Primitive::kPrimDouble:
- if (codegen_->NeedsTwoRegisters(Primitive::kPrimDouble)) {
- Location location = Location::FpuRegisterPairLocation(
- temp->GetRegister(), temp->GetHighInterval()->GetRegister());
- locations->SetTempAt(temp_index, location);
- } else {
- locations->SetTempAt(temp_index, Location::FpuRegisterLocation(temp->GetRegister()));
- }
- break;
-
- default:
- LOG(FATAL) << "Unexpected type for temporary location "
- << temp->GetType();
- }
- }
-}
-
} // namespace art
diff --git a/compiler/optimizing/register_allocator_linear_scan.h b/compiler/optimizing/register_allocator_linear_scan.h
index 1a1248b..f32a4db 100644
--- a/compiler/optimizing/register_allocator_linear_scan.h
+++ b/compiler/optimizing/register_allocator_linear_scan.h
@@ -84,7 +84,6 @@
void LinearScan();
bool TryAllocateFreeReg(LiveInterval* interval);
bool AllocateBlockedReg(LiveInterval* interval);
- void Resolve();
// Add `interval` in the given sorted list.
static void AddSorted(ArenaVector<LiveInterval*>* array, LiveInterval* interval);
@@ -112,37 +111,6 @@
// of lifetime positions and ascending vreg numbers for correctness.
void AllocateSpillSlotForCatchPhi(HPhi* phi);
- // Connect adjacent siblings within blocks.
- void ConnectSiblings(LiveInterval* interval);
-
- // Connect siblings between block entries and exits.
- void ConnectSplitSiblings(LiveInterval* interval, HBasicBlock* from, HBasicBlock* to) const;
-
- // Helper methods to insert parallel moves in the graph.
- void InsertParallelMoveAtExitOf(HBasicBlock* block,
- HInstruction* instruction,
- Location source,
- Location destination) const;
- void InsertParallelMoveAtEntryOf(HBasicBlock* block,
- HInstruction* instruction,
- Location source,
- Location destination) const;
- void InsertMoveAfter(HInstruction* instruction, Location source, Location destination) const;
- void AddInputMoveFor(HInstruction* input,
- HInstruction* user,
- Location source,
- Location destination) const;
- void InsertParallelMoveAt(size_t position,
- HInstruction* instruction,
- Location source,
- Location destination) const;
-
- void AddMove(HParallelMove* move,
- Location source,
- Location destination,
- HInstruction* instruction,
- Primitive::Type type) const;
-
// Helper methods.
void AllocateRegistersInternal();
void ProcessInstruction(HInstruction* instruction);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 8da1493..a6d62a9 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -121,6 +121,10 @@
static constexpr size_t kNativeAllocationHistogramBuckets = 16;
+// Extra added to the heap growth multiplier. Used to adjust the GC ergonomics for the read barrier
+// config.
+static constexpr double kExtraHeapGrowthMultiplier = kUseReadBarrier ? 1.0 : 0.0;
+
static inline bool CareAboutPauseTimes() {
return Runtime::Current()->InJankPerceptibleProcessState();
}
@@ -220,7 +224,8 @@
min_free_(min_free),
max_free_(max_free),
target_utilization_(target_utilization),
- foreground_heap_growth_multiplier_(foreground_heap_growth_multiplier),
+ foreground_heap_growth_multiplier_(
+ foreground_heap_growth_multiplier + kExtraHeapGrowthMultiplier),
total_wait_time_(0),
verify_object_mode_(kVerifyObjectModeDisabled),
disable_moving_gc_count_(0),