blob: 147fa1c05a628a53f31b3c368f141726e76a885d [file] [log] [blame]
Alexandre Rames22aa54b2016-10-18 09:32:29 +01001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <string>
18
19#include "prepare_for_register_allocation.h"
20#include "scheduler.h"
21
22#ifdef ART_ENABLE_CODEGEN_arm64
23#include "scheduler_arm64.h"
24#endif
25
xueliang.zhongf7caf682017-03-01 16:07:02 +000026#ifdef ART_ENABLE_CODEGEN_arm
27#include "scheduler_arm.h"
28#endif
29
Alexandre Rames22aa54b2016-10-18 09:32:29 +010030namespace art {
31
32void SchedulingGraph::AddDependency(SchedulingNode* node,
33 SchedulingNode* dependency,
34 bool is_data_dependency) {
35 if (node == nullptr || dependency == nullptr) {
36 // A `nullptr` node indicates an instruction out of scheduling range (eg. in
37 // an other block), so we do not need to add a dependency edge to the graph.
38 return;
39 }
40
41 if (is_data_dependency) {
42 if (!HasImmediateDataDependency(node, dependency)) {
43 node->AddDataPredecessor(dependency);
44 }
45 } else if (!HasImmediateOtherDependency(node, dependency)) {
46 node->AddOtherPredecessor(dependency);
47 }
48}
49
50static bool MayHaveReorderingDependency(SideEffects node, SideEffects other) {
51 // Read after write.
52 if (node.MayDependOn(other)) {
53 return true;
54 }
55
56 // Write after read.
57 if (other.MayDependOn(node)) {
58 return true;
59 }
60
61 // Memory write after write.
62 if (node.DoesAnyWrite() && other.DoesAnyWrite()) {
63 return true;
64 }
65
66 return false;
67}
68
69
70// Check whether `node` depends on `other`, taking into account `SideEffect`
71// information and `CanThrow` information.
72static bool HasSideEffectDependency(const HInstruction* node, const HInstruction* other) {
73 if (MayHaveReorderingDependency(node->GetSideEffects(), other->GetSideEffects())) {
74 return true;
75 }
76
77 if (other->CanThrow() && node->GetSideEffects().DoesAnyWrite()) {
78 return true;
79 }
80
81 if (other->GetSideEffects().DoesAnyWrite() && node->CanThrow()) {
82 return true;
83 }
84
85 if (other->CanThrow() && node->CanThrow()) {
86 return true;
87 }
88
89 // Check side-effect dependency between ArrayGet and BoundsCheck.
90 if (node->IsArrayGet() && other->IsBoundsCheck() && node->InputAt(1) == other) {
91 return true;
92 }
93
94 return false;
95}
96
97void SchedulingGraph::AddDependencies(HInstruction* instruction, bool is_scheduling_barrier) {
98 SchedulingNode* instruction_node = GetNode(instruction);
99
100 // Define-use dependencies.
101 for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
102 AddDataDependency(GetNode(use.GetUser()), instruction_node);
103 }
104
105 // Scheduling barrier dependencies.
106 DCHECK(!is_scheduling_barrier || contains_scheduling_barrier_);
107 if (contains_scheduling_barrier_) {
108 // A barrier depends on instructions after it. And instructions before the
109 // barrier depend on it.
110 for (HInstruction* other = instruction->GetNext(); other != nullptr; other = other->GetNext()) {
111 SchedulingNode* other_node = GetNode(other);
Nicolas Geoffray757b26c2017-06-29 16:11:41 +0100112 CHECK(other_node != nullptr)
113 << other->DebugName()
114 << " is in block " << other->GetBlock()->GetBlockId()
115 << ", and expected in block " << instruction->GetBlock()->GetBlockId();
Alexandre Rames22aa54b2016-10-18 09:32:29 +0100116 bool other_is_barrier = other_node->IsSchedulingBarrier();
117 if (is_scheduling_barrier || other_is_barrier) {
118 AddOtherDependency(other_node, instruction_node);
119 }
120 if (other_is_barrier) {
121 // This other scheduling barrier guarantees ordering of instructions after
122 // it, so avoid creating additional useless dependencies in the graph.
123 // For example if we have
124 // instr_1
125 // barrier_2
126 // instr_3
127 // barrier_4
128 // instr_5
129 // we only create the following non-data dependencies
130 // 1 -> 2
131 // 2 -> 3
132 // 2 -> 4
133 // 3 -> 4
134 // 4 -> 5
135 // and do not create
136 // 1 -> 4
137 // 2 -> 5
138 // Note that in this example we could also avoid creating the dependency
139 // `2 -> 4`. But if we remove `instr_3` that dependency is required to
140 // order the barriers. So we generate it to avoid a special case.
141 break;
142 }
143 }
144 }
145
146 // Side effect dependencies.
147 if (!instruction->GetSideEffects().DoesNothing() || instruction->CanThrow()) {
148 for (HInstruction* other = instruction->GetNext(); other != nullptr; other = other->GetNext()) {
149 SchedulingNode* other_node = GetNode(other);
150 if (other_node->IsSchedulingBarrier()) {
151 // We have reached a scheduling barrier so we can stop further
152 // processing.
153 DCHECK(HasImmediateOtherDependency(other_node, instruction_node));
154 break;
155 }
156 if (HasSideEffectDependency(other, instruction)) {
157 AddOtherDependency(other_node, instruction_node);
158 }
159 }
160 }
161
162 // Environment dependencies.
163 // We do not need to process those if the instruction is a scheduling barrier,
164 // since the barrier already has non-data dependencies on all following
165 // instructions.
166 if (!is_scheduling_barrier) {
167 for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) {
168 // Note that here we could stop processing if the environment holder is
169 // across a scheduling barrier. But checking this would likely require
170 // more work than simply iterating through environment uses.
171 AddOtherDependency(GetNode(use.GetUser()->GetHolder()), instruction_node);
172 }
173 }
174}
175
176bool SchedulingGraph::HasImmediateDataDependency(const SchedulingNode* node,
177 const SchedulingNode* other) const {
178 return ContainsElement(node->GetDataPredecessors(), other);
179}
180
181bool SchedulingGraph::HasImmediateDataDependency(const HInstruction* instruction,
182 const HInstruction* other_instruction) const {
183 const SchedulingNode* node = GetNode(instruction);
184 const SchedulingNode* other = GetNode(other_instruction);
185 if (node == nullptr || other == nullptr) {
186 // Both instructions must be in current basic block, i.e. the SchedulingGraph can see their
187 // corresponding SchedulingNode in the graph, and tell whether there is a dependency.
188 // Otherwise there is no dependency from SchedulingGraph's perspective, for example,
189 // instruction and other_instruction are in different basic blocks.
190 return false;
191 }
192 return HasImmediateDataDependency(node, other);
193}
194
195bool SchedulingGraph::HasImmediateOtherDependency(const SchedulingNode* node,
196 const SchedulingNode* other) const {
197 return ContainsElement(node->GetOtherPredecessors(), other);
198}
199
200bool SchedulingGraph::HasImmediateOtherDependency(const HInstruction* instruction,
201 const HInstruction* other_instruction) const {
202 const SchedulingNode* node = GetNode(instruction);
203 const SchedulingNode* other = GetNode(other_instruction);
204 if (node == nullptr || other == nullptr) {
205 // Both instructions must be in current basic block, i.e. the SchedulingGraph can see their
206 // corresponding SchedulingNode in the graph, and tell whether there is a dependency.
207 // Otherwise there is no dependency from SchedulingGraph's perspective, for example,
208 // instruction and other_instruction are in different basic blocks.
209 return false;
210 }
211 return HasImmediateOtherDependency(node, other);
212}
213
214static const std::string InstructionTypeId(const HInstruction* instruction) {
215 std::string id;
216 Primitive::Type type = instruction->GetType();
217 if (type == Primitive::kPrimNot) {
218 id.append("l");
219 } else {
220 id.append(Primitive::Descriptor(instruction->GetType()));
221 }
222 // Use lower-case to be closer to the `HGraphVisualizer` output.
223 id[0] = std::tolower(id[0]);
224 id.append(std::to_string(instruction->GetId()));
225 return id;
226}
227
228// Ideally we would reuse the graph visualizer code, but it is not available
229// from here and it is not worth moving all that code only for our use.
230static void DumpAsDotNode(std::ostream& output, const SchedulingNode* node) {
231 const HInstruction* instruction = node->GetInstruction();
232 // Use the instruction typed id as the node identifier.
233 std::string instruction_id = InstructionTypeId(instruction);
234 output << instruction_id << "[shape=record, label=\""
235 << instruction_id << ' ' << instruction->DebugName() << " [";
236 // List the instruction's inputs in its description. When visualizing the
237 // graph this helps differentiating data inputs from other dependencies.
238 const char* seperator = "";
239 for (const HInstruction* input : instruction->GetInputs()) {
240 output << seperator << InstructionTypeId(input);
241 seperator = ",";
242 }
243 output << "]";
244 // Other properties of the node.
245 output << "\\ninternal_latency: " << node->GetInternalLatency();
246 output << "\\ncritical_path: " << node->GetCriticalPath();
247 if (node->IsSchedulingBarrier()) {
248 output << "\\n(barrier)";
249 }
250 output << "\"];\n";
251 // We want program order to go from top to bottom in the graph output, so we
252 // reverse the edges and specify `dir=back`.
253 for (const SchedulingNode* predecessor : node->GetDataPredecessors()) {
254 const HInstruction* predecessor_instruction = predecessor->GetInstruction();
255 output << InstructionTypeId(predecessor_instruction) << ":s -> " << instruction_id << ":n "
256 << "[label=\"" << predecessor->GetLatency() << "\",dir=back]\n";
257 }
258 for (const SchedulingNode* predecessor : node->GetOtherPredecessors()) {
259 const HInstruction* predecessor_instruction = predecessor->GetInstruction();
260 output << InstructionTypeId(predecessor_instruction) << ":s -> " << instruction_id << ":n "
261 << "[dir=back,color=blue]\n";
262 }
263}
264
265void SchedulingGraph::DumpAsDotGraph(const std::string& description,
266 const ArenaVector<SchedulingNode*>& initial_candidates) {
267 // TODO(xueliang): ideally we should move scheduling information into HInstruction, after that
268 // we should move this dotty graph dump feature to visualizer, and have a compiler option for it.
269 std::ofstream output("scheduling_graphs.dot", std::ofstream::out | std::ofstream::app);
270 // Description of this graph, as a comment.
271 output << "// " << description << "\n";
272 // Start the dot graph. Use an increasing index for easier differentiation.
273 output << "digraph G {\n";
274 for (const auto& entry : nodes_map_) {
Vladimir Marko7d157fc2017-05-10 16:29:23 +0100275 SchedulingNode* node = entry.second;
276 DumpAsDotNode(output, node);
Alexandre Rames22aa54b2016-10-18 09:32:29 +0100277 }
278 // Create a fake 'end_of_scheduling' node to help visualization of critical_paths.
Vladimir Marko7d157fc2017-05-10 16:29:23 +0100279 for (SchedulingNode* node : initial_candidates) {
Alexandre Rames22aa54b2016-10-18 09:32:29 +0100280 const HInstruction* instruction = node->GetInstruction();
281 output << InstructionTypeId(instruction) << ":s -> end_of_scheduling:n "
282 << "[label=\"" << node->GetLatency() << "\",dir=back]\n";
283 }
284 // End of the dot graph.
285 output << "}\n";
286 output.close();
287}
288
289SchedulingNode* CriticalPathSchedulingNodeSelector::SelectMaterializedCondition(
290 ArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) const {
291 // Schedule condition inputs that can be materialized immediately before their use.
292 // In following example, after we've scheduled HSelect, we want LessThan to be scheduled
293 // immediately, because it is a materialized condition, and will be emitted right before HSelect
294 // in codegen phase.
295 //
296 // i20 HLessThan [...] HLessThan HAdd HAdd
297 // i21 HAdd [...] ===> | | |
298 // i22 HAdd [...] +----------+---------+
299 // i23 HSelect [i21, i22, i20] HSelect
300
301 if (prev_select_ == nullptr) {
302 return nullptr;
303 }
304
305 const HInstruction* instruction = prev_select_->GetInstruction();
306 const HCondition* condition = nullptr;
307 DCHECK(instruction != nullptr);
308
309 if (instruction->IsIf()) {
310 condition = instruction->AsIf()->InputAt(0)->AsCondition();
311 } else if (instruction->IsSelect()) {
312 condition = instruction->AsSelect()->GetCondition()->AsCondition();
313 }
314
315 SchedulingNode* condition_node = (condition != nullptr) ? graph.GetNode(condition) : nullptr;
316
317 if ((condition_node != nullptr) &&
318 condition->HasOnlyOneNonEnvironmentUse() &&
319 ContainsElement(*nodes, condition_node)) {
320 DCHECK(!condition_node->HasUnscheduledSuccessors());
321 // Remove the condition from the list of candidates and schedule it.
322 RemoveElement(*nodes, condition_node);
323 return condition_node;
324 }
325
326 return nullptr;
327}
328
329SchedulingNode* CriticalPathSchedulingNodeSelector::PopHighestPriorityNode(
330 ArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) {
331 DCHECK(!nodes->empty());
332 SchedulingNode* select_node = nullptr;
333
334 // Optimize for materialized condition and its emit before use scenario.
335 select_node = SelectMaterializedCondition(nodes, graph);
336
337 if (select_node == nullptr) {
338 // Get highest priority node based on critical path information.
339 select_node = (*nodes)[0];
340 size_t select = 0;
341 for (size_t i = 1, e = nodes->size(); i < e; i++) {
342 SchedulingNode* check = (*nodes)[i];
343 SchedulingNode* candidate = (*nodes)[select];
344 select_node = GetHigherPrioritySchedulingNode(candidate, check);
345 if (select_node == check) {
346 select = i;
347 }
348 }
349 DeleteNodeAtIndex(nodes, select);
350 }
351
352 prev_select_ = select_node;
353 return select_node;
354}
355
356SchedulingNode* CriticalPathSchedulingNodeSelector::GetHigherPrioritySchedulingNode(
357 SchedulingNode* candidate, SchedulingNode* check) const {
358 uint32_t candidate_path = candidate->GetCriticalPath();
359 uint32_t check_path = check->GetCriticalPath();
360 // First look at the critical_path.
361 if (check_path != candidate_path) {
362 return check_path < candidate_path ? check : candidate;
363 }
364 // If both critical paths are equal, schedule instructions with a higher latency
365 // first in program order.
366 return check->GetLatency() < candidate->GetLatency() ? check : candidate;
367}
368
369void HScheduler::Schedule(HGraph* graph) {
370 for (HBasicBlock* block : graph->GetReversePostOrder()) {
371 if (IsSchedulable(block)) {
372 Schedule(block);
373 }
374 }
375}
376
377void HScheduler::Schedule(HBasicBlock* block) {
378 ArenaVector<SchedulingNode*> scheduling_nodes(arena_->Adapter(kArenaAllocScheduler));
379
380 // Build the scheduling graph.
381 scheduling_graph_.Clear();
382 for (HBackwardInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
383 HInstruction* instruction = it.Current();
Nicolas Geoffray757b26c2017-06-29 16:11:41 +0100384 CHECK_EQ(instruction->GetBlock(), block)
385 << instruction->DebugName()
386 << " is in block " << instruction->GetBlock()->GetBlockId()
387 << ", and expected in block " << block->GetBlockId();
Alexandre Rames22aa54b2016-10-18 09:32:29 +0100388 SchedulingNode* node = scheduling_graph_.AddNode(instruction, IsSchedulingBarrier(instruction));
389 CalculateLatency(node);
390 scheduling_nodes.push_back(node);
391 }
392
393 if (scheduling_graph_.Size() <= 1) {
394 scheduling_graph_.Clear();
395 return;
396 }
397
398 cursor_ = block->GetLastInstruction();
399
400 // Find the initial candidates for scheduling.
401 candidates_.clear();
402 for (SchedulingNode* node : scheduling_nodes) {
403 if (!node->HasUnscheduledSuccessors()) {
404 node->MaybeUpdateCriticalPath(node->GetLatency());
405 candidates_.push_back(node);
406 }
407 }
408
409 ArenaVector<SchedulingNode*> initial_candidates(arena_->Adapter(kArenaAllocScheduler));
410 if (kDumpDotSchedulingGraphs) {
411 // Remember the list of initial candidates for debug output purposes.
412 initial_candidates.assign(candidates_.begin(), candidates_.end());
413 }
414
415 // Schedule all nodes.
416 while (!candidates_.empty()) {
417 Schedule(selector_->PopHighestPriorityNode(&candidates_, scheduling_graph_));
418 }
419
420 if (kDumpDotSchedulingGraphs) {
421 // Dump the graph in `dot` format.
422 HGraph* graph = block->GetGraph();
423 std::stringstream description;
424 description << graph->GetDexFile().PrettyMethod(graph->GetMethodIdx())
425 << " B" << block->GetBlockId();
426 scheduling_graph_.DumpAsDotGraph(description.str(), initial_candidates);
427 }
428}
429
430void HScheduler::Schedule(SchedulingNode* scheduling_node) {
431 // Check whether any of the node's predecessors will be valid candidates after
432 // this node is scheduled.
433 uint32_t path_to_node = scheduling_node->GetCriticalPath();
434 for (SchedulingNode* predecessor : scheduling_node->GetDataPredecessors()) {
435 predecessor->MaybeUpdateCriticalPath(
436 path_to_node + predecessor->GetInternalLatency() + predecessor->GetLatency());
437 predecessor->DecrementNumberOfUnscheduledSuccessors();
438 if (!predecessor->HasUnscheduledSuccessors()) {
439 candidates_.push_back(predecessor);
440 }
441 }
442 for (SchedulingNode* predecessor : scheduling_node->GetOtherPredecessors()) {
443 // Do not update the critical path.
444 // The 'other' (so 'non-data') dependencies (usually) do not represent a
445 // 'material' dependency of nodes on others. They exist for program
446 // correctness. So we do not use them to compute the critical path.
447 predecessor->DecrementNumberOfUnscheduledSuccessors();
448 if (!predecessor->HasUnscheduledSuccessors()) {
449 candidates_.push_back(predecessor);
450 }
451 }
452
453 Schedule(scheduling_node->GetInstruction());
454}
455
456// Move an instruction after cursor instruction inside one basic block.
457static void MoveAfterInBlock(HInstruction* instruction, HInstruction* cursor) {
458 DCHECK_EQ(instruction->GetBlock(), cursor->GetBlock());
459 DCHECK_NE(cursor, cursor->GetBlock()->GetLastInstruction());
460 DCHECK(!instruction->IsControlFlow());
461 DCHECK(!cursor->IsControlFlow());
462 instruction->MoveBefore(cursor->GetNext(), /* do_checks */ false);
463}
464
465void HScheduler::Schedule(HInstruction* instruction) {
466 if (instruction == cursor_) {
467 cursor_ = cursor_->GetPrevious();
468 } else {
469 MoveAfterInBlock(instruction, cursor_);
470 }
471}
472
473bool HScheduler::IsSchedulable(const HInstruction* instruction) const {
474 // We want to avoid exhaustively listing all instructions, so we first check
475 // for instruction categories that we know are safe.
476 if (instruction->IsControlFlow() ||
477 instruction->IsConstant()) {
478 return true;
479 }
480 // Currently all unary and binary operations are safe to schedule, so avoid
481 // checking for each of them individually.
482 // Since nothing prevents a new scheduling-unsafe HInstruction to subclass
483 // HUnaryOperation (or HBinaryOperation), check in debug mode that we have
484 // the exhaustive lists here.
485 if (instruction->IsUnaryOperation()) {
486 DCHECK(instruction->IsBooleanNot() ||
487 instruction->IsNot() ||
488 instruction->IsNeg()) << "unexpected instruction " << instruction->DebugName();
489 return true;
490 }
491 if (instruction->IsBinaryOperation()) {
492 DCHECK(instruction->IsAdd() ||
493 instruction->IsAnd() ||
494 instruction->IsCompare() ||
495 instruction->IsCondition() ||
496 instruction->IsDiv() ||
497 instruction->IsMul() ||
498 instruction->IsOr() ||
499 instruction->IsRem() ||
500 instruction->IsRor() ||
501 instruction->IsShl() ||
502 instruction->IsShr() ||
503 instruction->IsSub() ||
504 instruction->IsUShr() ||
505 instruction->IsXor()) << "unexpected instruction " << instruction->DebugName();
506 return true;
507 }
508 // The scheduler should not see any of these.
509 DCHECK(!instruction->IsParallelMove()) << "unexpected instruction " << instruction->DebugName();
510 // List of instructions explicitly excluded:
511 // HClearException
512 // HClinitCheck
513 // HDeoptimize
514 // HLoadClass
515 // HLoadException
516 // HMemoryBarrier
517 // HMonitorOperation
518 // HNativeDebugInfo
519 // HThrow
520 // HTryBoundary
521 // TODO: Some of the instructions above may be safe to schedule (maybe as
522 // scheduling barriers).
523 return instruction->IsArrayGet() ||
524 instruction->IsArraySet() ||
525 instruction->IsArrayLength() ||
526 instruction->IsBoundType() ||
527 instruction->IsBoundsCheck() ||
528 instruction->IsCheckCast() ||
529 instruction->IsClassTableGet() ||
530 instruction->IsCurrentMethod() ||
531 instruction->IsDivZeroCheck() ||
532 instruction->IsInstanceFieldGet() ||
533 instruction->IsInstanceFieldSet() ||
534 instruction->IsInstanceOf() ||
535 instruction->IsInvokeInterface() ||
536 instruction->IsInvokeStaticOrDirect() ||
537 instruction->IsInvokeUnresolved() ||
538 instruction->IsInvokeVirtual() ||
539 instruction->IsLoadString() ||
540 instruction->IsNewArray() ||
541 instruction->IsNewInstance() ||
542 instruction->IsNullCheck() ||
543 instruction->IsPackedSwitch() ||
544 instruction->IsParameterValue() ||
545 instruction->IsPhi() ||
546 instruction->IsReturn() ||
547 instruction->IsReturnVoid() ||
548 instruction->IsSelect() ||
549 instruction->IsStaticFieldGet() ||
550 instruction->IsStaticFieldSet() ||
551 instruction->IsSuspendCheck() ||
552 instruction->IsTypeConversion() ||
553 instruction->IsUnresolvedInstanceFieldGet() ||
554 instruction->IsUnresolvedInstanceFieldSet() ||
555 instruction->IsUnresolvedStaticFieldGet() ||
556 instruction->IsUnresolvedStaticFieldSet();
557}
558
559bool HScheduler::IsSchedulable(const HBasicBlock* block) const {
560 // We may be only interested in loop blocks.
561 if (only_optimize_loop_blocks_ && !block->IsInLoop()) {
562 return false;
563 }
564 if (block->GetTryCatchInformation() != nullptr) {
565 // Do not schedule blocks that are part of try-catch.
566 // Because scheduler cannot see if catch block has assumptions on the instruction order in
567 // the try block. In following example, if we enable scheduler for the try block,
568 // MulitiplyAccumulate may be scheduled before DivZeroCheck,
569 // which can result in an incorrect value in the catch block.
570 // try {
571 // a = a/b; // DivZeroCheck
572 // // Div
573 // c = c*d+e; // MulitiplyAccumulate
574 // } catch {System.out.print(c); }
575 return false;
576 }
577 // Check whether all instructions in this block are schedulable.
578 for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
579 if (!IsSchedulable(it.Current())) {
580 return false;
581 }
582 }
583 return true;
584}
585
586bool HScheduler::IsSchedulingBarrier(const HInstruction* instr) const {
587 return instr->IsControlFlow() ||
588 // Don't break calling convention.
589 instr->IsParameterValue() ||
590 // Code generation of goto relies on SuspendCheck's position.
591 instr->IsSuspendCheck();
592}
593
594void HInstructionScheduling::Run(bool only_optimize_loop_blocks,
595 bool schedule_randomly) {
xueliang.zhongf7caf682017-03-01 16:07:02 +0000596#if defined(ART_ENABLE_CODEGEN_arm64) || defined(ART_ENABLE_CODEGEN_arm)
597 // Phase-local allocator that allocates scheduler internal data structures like
598 // scheduling nodes, internel nodes map, dependencies, etc.
599 ArenaAllocator arena_allocator(graph_->GetArena()->GetArenaPool());
600 CriticalPathSchedulingNodeSelector critical_path_selector;
601 RandomSchedulingNodeSelector random_selector;
602 SchedulingNodeSelector* selector = schedule_randomly
603 ? static_cast<SchedulingNodeSelector*>(&random_selector)
604 : static_cast<SchedulingNodeSelector*>(&critical_path_selector);
605#else
Alexandre Rames22aa54b2016-10-18 09:32:29 +0100606 // Avoid compilation error when compiling for unsupported instruction set.
607 UNUSED(only_optimize_loop_blocks);
608 UNUSED(schedule_randomly);
xueliang.zhongf7caf682017-03-01 16:07:02 +0000609#endif
Alexandre Rames22aa54b2016-10-18 09:32:29 +0100610 switch (instruction_set_) {
611#ifdef ART_ENABLE_CODEGEN_arm64
612 case kArm64: {
Alexandre Rames22aa54b2016-10-18 09:32:29 +0100613 arm64::HSchedulerARM64 scheduler(&arena_allocator, selector);
614 scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks);
615 scheduler.Schedule(graph_);
616 break;
617 }
618#endif
xueliang.zhongf7caf682017-03-01 16:07:02 +0000619#if defined(ART_ENABLE_CODEGEN_arm)
620 case kThumb2:
621 case kArm: {
622 arm::SchedulingLatencyVisitorARM arm_latency_visitor(codegen_);
623 arm::HSchedulerARM scheduler(&arena_allocator, selector, &arm_latency_visitor);
624 scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks);
625 scheduler.Schedule(graph_);
626 break;
627 }
628#endif
Alexandre Rames22aa54b2016-10-18 09:32:29 +0100629 default:
630 break;
631 }
632}
633
634} // namespace art