Use ScopedArenaAllocator for register allocation.
Memory needed to compile the two most expensive methods for
aosp_angler-userdebug boot image:
BatteryStats.dumpCheckinLocked() : 25.1MiB -> 21.1MiB
BatteryStats.dumpLocked(): 49.6MiB -> 42.0MiB
This is because all the memory previously used by Scheduler
is reused by the register allocator; the register allocator
has a higher peak usage of the ArenaStack.
And continue the "arena"->"allocator" renaming.
Test: m test-art-host-gtest
Test: testrunner.py --host
Bug: 64312607
Change-Id: Idfd79a9901552b5147ec0bf591cb38120de86b01
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index 7e1ec70..dfc1633 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -146,8 +146,7 @@
environment->SetRawEnvAt(1, mul);
mul->AddEnvUseAt(div_check->GetEnvironment(), 1);
- ScopedArenaAllocator allocator(graph_->GetArenaStack());
- SchedulingGraph scheduling_graph(scheduler, &allocator);
+ SchedulingGraph scheduling_graph(scheduler, GetScopedAllocator());
// Instructions must be inserted in reverse order into the scheduling graph.
for (HInstruction* instr : ReverseRange(block_instructions)) {
scheduling_graph.AddNode(instr);
@@ -273,8 +272,7 @@
entry->AddInstruction(instr);
}
- ScopedArenaAllocator allocator(graph_->GetArenaStack());
- SchedulingGraph scheduling_graph(scheduler, &allocator);
+ SchedulingGraph scheduling_graph(scheduler, GetScopedAllocator());
HeapLocationCollector heap_location_collector(graph_);
heap_location_collector.VisitBasicBlock(entry);
heap_location_collector.BuildAliasingMatrix();
@@ -352,15 +350,13 @@
#if defined(ART_ENABLE_CODEGEN_arm64)
TEST_F(SchedulerTest, DependencyGraphAndSchedulerARM64) {
CriticalPathSchedulingNodeSelector critical_path_selector;
- ScopedArenaAllocator allocator(GetArenaStack());
- arm64::HSchedulerARM64 scheduler(&allocator, &critical_path_selector);
+ arm64::HSchedulerARM64 scheduler(GetScopedAllocator(), &critical_path_selector);
TestBuildDependencyGraphAndSchedule(&scheduler);
}
TEST_F(SchedulerTest, ArrayAccessAliasingARM64) {
CriticalPathSchedulingNodeSelector critical_path_selector;
- ScopedArenaAllocator allocator(GetArenaStack());
- arm64::HSchedulerARM64 scheduler(&allocator, &critical_path_selector);
+ arm64::HSchedulerARM64 scheduler(GetScopedAllocator(), &critical_path_selector);
TestDependencyGraphOnAliasingArrayAccesses(&scheduler);
}
#endif
@@ -369,16 +365,14 @@
TEST_F(SchedulerTest, DependencyGraphAndSchedulerARM) {
CriticalPathSchedulingNodeSelector critical_path_selector;
arm::SchedulingLatencyVisitorARM arm_latency_visitor(/*CodeGenerator*/ nullptr);
- ScopedArenaAllocator allocator(GetArenaStack());
- arm::HSchedulerARM scheduler(&allocator, &critical_path_selector, &arm_latency_visitor);
+ arm::HSchedulerARM scheduler(GetScopedAllocator(), &critical_path_selector, &arm_latency_visitor);
TestBuildDependencyGraphAndSchedule(&scheduler);
}
TEST_F(SchedulerTest, ArrayAccessAliasingARM) {
CriticalPathSchedulingNodeSelector critical_path_selector;
arm::SchedulingLatencyVisitorARM arm_latency_visitor(/*CodeGenerator*/ nullptr);
- ScopedArenaAllocator allocator(GetArenaStack());
- arm::HSchedulerARM scheduler(&allocator, &critical_path_selector, &arm_latency_visitor);
+ arm::HSchedulerARM scheduler(GetScopedAllocator(), &critical_path_selector, &arm_latency_visitor);
TestDependencyGraphOnAliasingArrayAccesses(&scheduler);
}
#endif