blob: ac0ccf5d0183f2a3b407dff9f1baa5aa11fcd722 [file] [log] [blame]
Alexandre Rames22aa54b2016-10-18 09:32:29 +01001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <string>
18
19#include "prepare_for_register_allocation.h"
20#include "scheduler.h"
21
22#ifdef ART_ENABLE_CODEGEN_arm64
23#include "scheduler_arm64.h"
24#endif
25
xueliang.zhongf7caf682017-03-01 16:07:02 +000026#ifdef ART_ENABLE_CODEGEN_arm
27#include "scheduler_arm.h"
28#endif
29
Alexandre Rames22aa54b2016-10-18 09:32:29 +010030namespace art {
31
32void SchedulingGraph::AddDependency(SchedulingNode* node,
33 SchedulingNode* dependency,
34 bool is_data_dependency) {
35 if (node == nullptr || dependency == nullptr) {
36 // A `nullptr` node indicates an instruction out of scheduling range (eg. in
37 // an other block), so we do not need to add a dependency edge to the graph.
38 return;
39 }
40
41 if (is_data_dependency) {
42 if (!HasImmediateDataDependency(node, dependency)) {
43 node->AddDataPredecessor(dependency);
44 }
45 } else if (!HasImmediateOtherDependency(node, dependency)) {
46 node->AddOtherPredecessor(dependency);
47 }
48}
49
50static bool MayHaveReorderingDependency(SideEffects node, SideEffects other) {
51 // Read after write.
52 if (node.MayDependOn(other)) {
53 return true;
54 }
55
56 // Write after read.
57 if (other.MayDependOn(node)) {
58 return true;
59 }
60
61 // Memory write after write.
62 if (node.DoesAnyWrite() && other.DoesAnyWrite()) {
63 return true;
64 }
65
66 return false;
67}
68
69
70// Check whether `node` depends on `other`, taking into account `SideEffect`
71// information and `CanThrow` information.
72static bool HasSideEffectDependency(const HInstruction* node, const HInstruction* other) {
73 if (MayHaveReorderingDependency(node->GetSideEffects(), other->GetSideEffects())) {
74 return true;
75 }
76
77 if (other->CanThrow() && node->GetSideEffects().DoesAnyWrite()) {
78 return true;
79 }
80
81 if (other->GetSideEffects().DoesAnyWrite() && node->CanThrow()) {
82 return true;
83 }
84
85 if (other->CanThrow() && node->CanThrow()) {
86 return true;
87 }
88
89 // Check side-effect dependency between ArrayGet and BoundsCheck.
90 if (node->IsArrayGet() && other->IsBoundsCheck() && node->InputAt(1) == other) {
91 return true;
92 }
93
94 return false;
95}
96
97void SchedulingGraph::AddDependencies(HInstruction* instruction, bool is_scheduling_barrier) {
98 SchedulingNode* instruction_node = GetNode(instruction);
99
100 // Define-use dependencies.
101 for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
102 AddDataDependency(GetNode(use.GetUser()), instruction_node);
103 }
104
105 // Scheduling barrier dependencies.
106 DCHECK(!is_scheduling_barrier || contains_scheduling_barrier_);
107 if (contains_scheduling_barrier_) {
108 // A barrier depends on instructions after it. And instructions before the
109 // barrier depend on it.
110 for (HInstruction* other = instruction->GetNext(); other != nullptr; other = other->GetNext()) {
111 SchedulingNode* other_node = GetNode(other);
112 bool other_is_barrier = other_node->IsSchedulingBarrier();
113 if (is_scheduling_barrier || other_is_barrier) {
114 AddOtherDependency(other_node, instruction_node);
115 }
116 if (other_is_barrier) {
117 // This other scheduling barrier guarantees ordering of instructions after
118 // it, so avoid creating additional useless dependencies in the graph.
119 // For example if we have
120 // instr_1
121 // barrier_2
122 // instr_3
123 // barrier_4
124 // instr_5
125 // we only create the following non-data dependencies
126 // 1 -> 2
127 // 2 -> 3
128 // 2 -> 4
129 // 3 -> 4
130 // 4 -> 5
131 // and do not create
132 // 1 -> 4
133 // 2 -> 5
134 // Note that in this example we could also avoid creating the dependency
135 // `2 -> 4`. But if we remove `instr_3` that dependency is required to
136 // order the barriers. So we generate it to avoid a special case.
137 break;
138 }
139 }
140 }
141
142 // Side effect dependencies.
143 if (!instruction->GetSideEffects().DoesNothing() || instruction->CanThrow()) {
144 for (HInstruction* other = instruction->GetNext(); other != nullptr; other = other->GetNext()) {
145 SchedulingNode* other_node = GetNode(other);
146 if (other_node->IsSchedulingBarrier()) {
147 // We have reached a scheduling barrier so we can stop further
148 // processing.
149 DCHECK(HasImmediateOtherDependency(other_node, instruction_node));
150 break;
151 }
152 if (HasSideEffectDependency(other, instruction)) {
153 AddOtherDependency(other_node, instruction_node);
154 }
155 }
156 }
157
158 // Environment dependencies.
159 // We do not need to process those if the instruction is a scheduling barrier,
160 // since the barrier already has non-data dependencies on all following
161 // instructions.
162 if (!is_scheduling_barrier) {
163 for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) {
164 // Note that here we could stop processing if the environment holder is
165 // across a scheduling barrier. But checking this would likely require
166 // more work than simply iterating through environment uses.
167 AddOtherDependency(GetNode(use.GetUser()->GetHolder()), instruction_node);
168 }
169 }
170}
171
172bool SchedulingGraph::HasImmediateDataDependency(const SchedulingNode* node,
173 const SchedulingNode* other) const {
174 return ContainsElement(node->GetDataPredecessors(), other);
175}
176
177bool SchedulingGraph::HasImmediateDataDependency(const HInstruction* instruction,
178 const HInstruction* other_instruction) const {
179 const SchedulingNode* node = GetNode(instruction);
180 const SchedulingNode* other = GetNode(other_instruction);
181 if (node == nullptr || other == nullptr) {
182 // Both instructions must be in current basic block, i.e. the SchedulingGraph can see their
183 // corresponding SchedulingNode in the graph, and tell whether there is a dependency.
184 // Otherwise there is no dependency from SchedulingGraph's perspective, for example,
185 // instruction and other_instruction are in different basic blocks.
186 return false;
187 }
188 return HasImmediateDataDependency(node, other);
189}
190
191bool SchedulingGraph::HasImmediateOtherDependency(const SchedulingNode* node,
192 const SchedulingNode* other) const {
193 return ContainsElement(node->GetOtherPredecessors(), other);
194}
195
196bool SchedulingGraph::HasImmediateOtherDependency(const HInstruction* instruction,
197 const HInstruction* other_instruction) const {
198 const SchedulingNode* node = GetNode(instruction);
199 const SchedulingNode* other = GetNode(other_instruction);
200 if (node == nullptr || other == nullptr) {
201 // Both instructions must be in current basic block, i.e. the SchedulingGraph can see their
202 // corresponding SchedulingNode in the graph, and tell whether there is a dependency.
203 // Otherwise there is no dependency from SchedulingGraph's perspective, for example,
204 // instruction and other_instruction are in different basic blocks.
205 return false;
206 }
207 return HasImmediateOtherDependency(node, other);
208}
209
210static const std::string InstructionTypeId(const HInstruction* instruction) {
211 std::string id;
212 Primitive::Type type = instruction->GetType();
213 if (type == Primitive::kPrimNot) {
214 id.append("l");
215 } else {
216 id.append(Primitive::Descriptor(instruction->GetType()));
217 }
218 // Use lower-case to be closer to the `HGraphVisualizer` output.
219 id[0] = std::tolower(id[0]);
220 id.append(std::to_string(instruction->GetId()));
221 return id;
222}
223
224// Ideally we would reuse the graph visualizer code, but it is not available
225// from here and it is not worth moving all that code only for our use.
226static void DumpAsDotNode(std::ostream& output, const SchedulingNode* node) {
227 const HInstruction* instruction = node->GetInstruction();
228 // Use the instruction typed id as the node identifier.
229 std::string instruction_id = InstructionTypeId(instruction);
230 output << instruction_id << "[shape=record, label=\""
231 << instruction_id << ' ' << instruction->DebugName() << " [";
232 // List the instruction's inputs in its description. When visualizing the
233 // graph this helps differentiating data inputs from other dependencies.
234 const char* seperator = "";
235 for (const HInstruction* input : instruction->GetInputs()) {
236 output << seperator << InstructionTypeId(input);
237 seperator = ",";
238 }
239 output << "]";
240 // Other properties of the node.
241 output << "\\ninternal_latency: " << node->GetInternalLatency();
242 output << "\\ncritical_path: " << node->GetCriticalPath();
243 if (node->IsSchedulingBarrier()) {
244 output << "\\n(barrier)";
245 }
246 output << "\"];\n";
247 // We want program order to go from top to bottom in the graph output, so we
248 // reverse the edges and specify `dir=back`.
249 for (const SchedulingNode* predecessor : node->GetDataPredecessors()) {
250 const HInstruction* predecessor_instruction = predecessor->GetInstruction();
251 output << InstructionTypeId(predecessor_instruction) << ":s -> " << instruction_id << ":n "
252 << "[label=\"" << predecessor->GetLatency() << "\",dir=back]\n";
253 }
254 for (const SchedulingNode* predecessor : node->GetOtherPredecessors()) {
255 const HInstruction* predecessor_instruction = predecessor->GetInstruction();
256 output << InstructionTypeId(predecessor_instruction) << ":s -> " << instruction_id << ":n "
257 << "[dir=back,color=blue]\n";
258 }
259}
260
261void SchedulingGraph::DumpAsDotGraph(const std::string& description,
262 const ArenaVector<SchedulingNode*>& initial_candidates) {
263 // TODO(xueliang): ideally we should move scheduling information into HInstruction, after that
264 // we should move this dotty graph dump feature to visualizer, and have a compiler option for it.
265 std::ofstream output("scheduling_graphs.dot", std::ofstream::out | std::ofstream::app);
266 // Description of this graph, as a comment.
267 output << "// " << description << "\n";
268 // Start the dot graph. Use an increasing index for easier differentiation.
269 output << "digraph G {\n";
270 for (const auto& entry : nodes_map_) {
271 DumpAsDotNode(output, entry.second);
272 }
273 // Create a fake 'end_of_scheduling' node to help visualization of critical_paths.
274 for (auto node : initial_candidates) {
275 const HInstruction* instruction = node->GetInstruction();
276 output << InstructionTypeId(instruction) << ":s -> end_of_scheduling:n "
277 << "[label=\"" << node->GetLatency() << "\",dir=back]\n";
278 }
279 // End of the dot graph.
280 output << "}\n";
281 output.close();
282}
283
284SchedulingNode* CriticalPathSchedulingNodeSelector::SelectMaterializedCondition(
285 ArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) const {
286 // Schedule condition inputs that can be materialized immediately before their use.
287 // In following example, after we've scheduled HSelect, we want LessThan to be scheduled
288 // immediately, because it is a materialized condition, and will be emitted right before HSelect
289 // in codegen phase.
290 //
291 // i20 HLessThan [...] HLessThan HAdd HAdd
292 // i21 HAdd [...] ===> | | |
293 // i22 HAdd [...] +----------+---------+
294 // i23 HSelect [i21, i22, i20] HSelect
295
296 if (prev_select_ == nullptr) {
297 return nullptr;
298 }
299
300 const HInstruction* instruction = prev_select_->GetInstruction();
301 const HCondition* condition = nullptr;
302 DCHECK(instruction != nullptr);
303
304 if (instruction->IsIf()) {
305 condition = instruction->AsIf()->InputAt(0)->AsCondition();
306 } else if (instruction->IsSelect()) {
307 condition = instruction->AsSelect()->GetCondition()->AsCondition();
308 }
309
310 SchedulingNode* condition_node = (condition != nullptr) ? graph.GetNode(condition) : nullptr;
311
312 if ((condition_node != nullptr) &&
313 condition->HasOnlyOneNonEnvironmentUse() &&
314 ContainsElement(*nodes, condition_node)) {
315 DCHECK(!condition_node->HasUnscheduledSuccessors());
316 // Remove the condition from the list of candidates and schedule it.
317 RemoveElement(*nodes, condition_node);
318 return condition_node;
319 }
320
321 return nullptr;
322}
323
324SchedulingNode* CriticalPathSchedulingNodeSelector::PopHighestPriorityNode(
325 ArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) {
326 DCHECK(!nodes->empty());
327 SchedulingNode* select_node = nullptr;
328
329 // Optimize for materialized condition and its emit before use scenario.
330 select_node = SelectMaterializedCondition(nodes, graph);
331
332 if (select_node == nullptr) {
333 // Get highest priority node based on critical path information.
334 select_node = (*nodes)[0];
335 size_t select = 0;
336 for (size_t i = 1, e = nodes->size(); i < e; i++) {
337 SchedulingNode* check = (*nodes)[i];
338 SchedulingNode* candidate = (*nodes)[select];
339 select_node = GetHigherPrioritySchedulingNode(candidate, check);
340 if (select_node == check) {
341 select = i;
342 }
343 }
344 DeleteNodeAtIndex(nodes, select);
345 }
346
347 prev_select_ = select_node;
348 return select_node;
349}
350
351SchedulingNode* CriticalPathSchedulingNodeSelector::GetHigherPrioritySchedulingNode(
352 SchedulingNode* candidate, SchedulingNode* check) const {
353 uint32_t candidate_path = candidate->GetCriticalPath();
354 uint32_t check_path = check->GetCriticalPath();
355 // First look at the critical_path.
356 if (check_path != candidate_path) {
357 return check_path < candidate_path ? check : candidate;
358 }
359 // If both critical paths are equal, schedule instructions with a higher latency
360 // first in program order.
361 return check->GetLatency() < candidate->GetLatency() ? check : candidate;
362}
363
364void HScheduler::Schedule(HGraph* graph) {
365 for (HBasicBlock* block : graph->GetReversePostOrder()) {
366 if (IsSchedulable(block)) {
367 Schedule(block);
368 }
369 }
370}
371
372void HScheduler::Schedule(HBasicBlock* block) {
373 ArenaVector<SchedulingNode*> scheduling_nodes(arena_->Adapter(kArenaAllocScheduler));
374
375 // Build the scheduling graph.
376 scheduling_graph_.Clear();
377 for (HBackwardInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
378 HInstruction* instruction = it.Current();
379 SchedulingNode* node = scheduling_graph_.AddNode(instruction, IsSchedulingBarrier(instruction));
380 CalculateLatency(node);
381 scheduling_nodes.push_back(node);
382 }
383
384 if (scheduling_graph_.Size() <= 1) {
385 scheduling_graph_.Clear();
386 return;
387 }
388
389 cursor_ = block->GetLastInstruction();
390
391 // Find the initial candidates for scheduling.
392 candidates_.clear();
393 for (SchedulingNode* node : scheduling_nodes) {
394 if (!node->HasUnscheduledSuccessors()) {
395 node->MaybeUpdateCriticalPath(node->GetLatency());
396 candidates_.push_back(node);
397 }
398 }
399
400 ArenaVector<SchedulingNode*> initial_candidates(arena_->Adapter(kArenaAllocScheduler));
401 if (kDumpDotSchedulingGraphs) {
402 // Remember the list of initial candidates for debug output purposes.
403 initial_candidates.assign(candidates_.begin(), candidates_.end());
404 }
405
406 // Schedule all nodes.
407 while (!candidates_.empty()) {
408 Schedule(selector_->PopHighestPriorityNode(&candidates_, scheduling_graph_));
409 }
410
411 if (kDumpDotSchedulingGraphs) {
412 // Dump the graph in `dot` format.
413 HGraph* graph = block->GetGraph();
414 std::stringstream description;
415 description << graph->GetDexFile().PrettyMethod(graph->GetMethodIdx())
416 << " B" << block->GetBlockId();
417 scheduling_graph_.DumpAsDotGraph(description.str(), initial_candidates);
418 }
419}
420
421void HScheduler::Schedule(SchedulingNode* scheduling_node) {
422 // Check whether any of the node's predecessors will be valid candidates after
423 // this node is scheduled.
424 uint32_t path_to_node = scheduling_node->GetCriticalPath();
425 for (SchedulingNode* predecessor : scheduling_node->GetDataPredecessors()) {
426 predecessor->MaybeUpdateCriticalPath(
427 path_to_node + predecessor->GetInternalLatency() + predecessor->GetLatency());
428 predecessor->DecrementNumberOfUnscheduledSuccessors();
429 if (!predecessor->HasUnscheduledSuccessors()) {
430 candidates_.push_back(predecessor);
431 }
432 }
433 for (SchedulingNode* predecessor : scheduling_node->GetOtherPredecessors()) {
434 // Do not update the critical path.
435 // The 'other' (so 'non-data') dependencies (usually) do not represent a
436 // 'material' dependency of nodes on others. They exist for program
437 // correctness. So we do not use them to compute the critical path.
438 predecessor->DecrementNumberOfUnscheduledSuccessors();
439 if (!predecessor->HasUnscheduledSuccessors()) {
440 candidates_.push_back(predecessor);
441 }
442 }
443
444 Schedule(scheduling_node->GetInstruction());
445}
446
447// Move an instruction after cursor instruction inside one basic block.
448static void MoveAfterInBlock(HInstruction* instruction, HInstruction* cursor) {
449 DCHECK_EQ(instruction->GetBlock(), cursor->GetBlock());
450 DCHECK_NE(cursor, cursor->GetBlock()->GetLastInstruction());
451 DCHECK(!instruction->IsControlFlow());
452 DCHECK(!cursor->IsControlFlow());
453 instruction->MoveBefore(cursor->GetNext(), /* do_checks */ false);
454}
455
456void HScheduler::Schedule(HInstruction* instruction) {
457 if (instruction == cursor_) {
458 cursor_ = cursor_->GetPrevious();
459 } else {
460 MoveAfterInBlock(instruction, cursor_);
461 }
462}
463
464bool HScheduler::IsSchedulable(const HInstruction* instruction) const {
465 // We want to avoid exhaustively listing all instructions, so we first check
466 // for instruction categories that we know are safe.
467 if (instruction->IsControlFlow() ||
468 instruction->IsConstant()) {
469 return true;
470 }
471 // Currently all unary and binary operations are safe to schedule, so avoid
472 // checking for each of them individually.
473 // Since nothing prevents a new scheduling-unsafe HInstruction to subclass
474 // HUnaryOperation (or HBinaryOperation), check in debug mode that we have
475 // the exhaustive lists here.
476 if (instruction->IsUnaryOperation()) {
477 DCHECK(instruction->IsBooleanNot() ||
478 instruction->IsNot() ||
479 instruction->IsNeg()) << "unexpected instruction " << instruction->DebugName();
480 return true;
481 }
482 if (instruction->IsBinaryOperation()) {
483 DCHECK(instruction->IsAdd() ||
484 instruction->IsAnd() ||
485 instruction->IsCompare() ||
486 instruction->IsCondition() ||
487 instruction->IsDiv() ||
488 instruction->IsMul() ||
489 instruction->IsOr() ||
490 instruction->IsRem() ||
491 instruction->IsRor() ||
492 instruction->IsShl() ||
493 instruction->IsShr() ||
494 instruction->IsSub() ||
495 instruction->IsUShr() ||
496 instruction->IsXor()) << "unexpected instruction " << instruction->DebugName();
497 return true;
498 }
499 // The scheduler should not see any of these.
500 DCHECK(!instruction->IsParallelMove()) << "unexpected instruction " << instruction->DebugName();
501 // List of instructions explicitly excluded:
502 // HClearException
503 // HClinitCheck
504 // HDeoptimize
505 // HLoadClass
506 // HLoadException
507 // HMemoryBarrier
508 // HMonitorOperation
509 // HNativeDebugInfo
510 // HThrow
511 // HTryBoundary
512 // TODO: Some of the instructions above may be safe to schedule (maybe as
513 // scheduling barriers).
514 return instruction->IsArrayGet() ||
515 instruction->IsArraySet() ||
516 instruction->IsArrayLength() ||
517 instruction->IsBoundType() ||
518 instruction->IsBoundsCheck() ||
519 instruction->IsCheckCast() ||
520 instruction->IsClassTableGet() ||
521 instruction->IsCurrentMethod() ||
522 instruction->IsDivZeroCheck() ||
523 instruction->IsInstanceFieldGet() ||
524 instruction->IsInstanceFieldSet() ||
525 instruction->IsInstanceOf() ||
526 instruction->IsInvokeInterface() ||
527 instruction->IsInvokeStaticOrDirect() ||
528 instruction->IsInvokeUnresolved() ||
529 instruction->IsInvokeVirtual() ||
530 instruction->IsLoadString() ||
531 instruction->IsNewArray() ||
532 instruction->IsNewInstance() ||
533 instruction->IsNullCheck() ||
534 instruction->IsPackedSwitch() ||
535 instruction->IsParameterValue() ||
536 instruction->IsPhi() ||
537 instruction->IsReturn() ||
538 instruction->IsReturnVoid() ||
539 instruction->IsSelect() ||
540 instruction->IsStaticFieldGet() ||
541 instruction->IsStaticFieldSet() ||
542 instruction->IsSuspendCheck() ||
543 instruction->IsTypeConversion() ||
544 instruction->IsUnresolvedInstanceFieldGet() ||
545 instruction->IsUnresolvedInstanceFieldSet() ||
546 instruction->IsUnresolvedStaticFieldGet() ||
547 instruction->IsUnresolvedStaticFieldSet();
548}
549
550bool HScheduler::IsSchedulable(const HBasicBlock* block) const {
551 // We may be only interested in loop blocks.
552 if (only_optimize_loop_blocks_ && !block->IsInLoop()) {
553 return false;
554 }
555 if (block->GetTryCatchInformation() != nullptr) {
556 // Do not schedule blocks that are part of try-catch.
557 // Because scheduler cannot see if catch block has assumptions on the instruction order in
558 // the try block. In following example, if we enable scheduler for the try block,
559 // MulitiplyAccumulate may be scheduled before DivZeroCheck,
560 // which can result in an incorrect value in the catch block.
561 // try {
562 // a = a/b; // DivZeroCheck
563 // // Div
564 // c = c*d+e; // MulitiplyAccumulate
565 // } catch {System.out.print(c); }
566 return false;
567 }
568 // Check whether all instructions in this block are schedulable.
569 for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
570 if (!IsSchedulable(it.Current())) {
571 return false;
572 }
573 }
574 return true;
575}
576
577bool HScheduler::IsSchedulingBarrier(const HInstruction* instr) const {
578 return instr->IsControlFlow() ||
579 // Don't break calling convention.
580 instr->IsParameterValue() ||
581 // Code generation of goto relies on SuspendCheck's position.
582 instr->IsSuspendCheck();
583}
584
585void HInstructionScheduling::Run(bool only_optimize_loop_blocks,
586 bool schedule_randomly) {
xueliang.zhongf7caf682017-03-01 16:07:02 +0000587#if defined(ART_ENABLE_CODEGEN_arm64) || defined(ART_ENABLE_CODEGEN_arm)
588 // Phase-local allocator that allocates scheduler internal data structures like
589 // scheduling nodes, internel nodes map, dependencies, etc.
590 ArenaAllocator arena_allocator(graph_->GetArena()->GetArenaPool());
591 CriticalPathSchedulingNodeSelector critical_path_selector;
592 RandomSchedulingNodeSelector random_selector;
593 SchedulingNodeSelector* selector = schedule_randomly
594 ? static_cast<SchedulingNodeSelector*>(&random_selector)
595 : static_cast<SchedulingNodeSelector*>(&critical_path_selector);
596#else
Alexandre Rames22aa54b2016-10-18 09:32:29 +0100597 // Avoid compilation error when compiling for unsupported instruction set.
598 UNUSED(only_optimize_loop_blocks);
599 UNUSED(schedule_randomly);
xueliang.zhongf7caf682017-03-01 16:07:02 +0000600#endif
Alexandre Rames22aa54b2016-10-18 09:32:29 +0100601 switch (instruction_set_) {
602#ifdef ART_ENABLE_CODEGEN_arm64
603 case kArm64: {
Alexandre Rames22aa54b2016-10-18 09:32:29 +0100604 arm64::HSchedulerARM64 scheduler(&arena_allocator, selector);
605 scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks);
606 scheduler.Schedule(graph_);
607 break;
608 }
609#endif
xueliang.zhongf7caf682017-03-01 16:07:02 +0000610#if defined(ART_ENABLE_CODEGEN_arm)
611 case kThumb2:
612 case kArm: {
613 arm::SchedulingLatencyVisitorARM arm_latency_visitor(codegen_);
614 arm::HSchedulerARM scheduler(&arena_allocator, selector, &arm_latency_visitor);
615 scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks);
616 scheduler.Schedule(graph_);
617 break;
618 }
619#endif
Alexandre Rames22aa54b2016-10-18 09:32:29 +0100620 default:
621 break;
622 }
623}
624
625} // namespace art