Artem Serov | 121f203 | 2017-10-23 19:19:06 +0100 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (C) 2018 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "loop_analysis.h" |
| 18 | |
| 19 | namespace art { |
| 20 | |
| 21 | void LoopAnalysis::CalculateLoopBasicProperties(HLoopInformation* loop_info, |
| 22 | LoopAnalysisInfo* analysis_results) { |
| 23 | for (HBlocksInLoopIterator block_it(*loop_info); |
| 24 | !block_it.Done(); |
| 25 | block_it.Advance()) { |
| 26 | HBasicBlock* block = block_it.Current(); |
| 27 | |
| 28 | for (HBasicBlock* successor : block->GetSuccessors()) { |
| 29 | if (!loop_info->Contains(*successor)) { |
| 30 | analysis_results->exits_num_++; |
| 31 | } |
| 32 | } |
| 33 | |
| 34 | for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { |
| 35 | HInstruction* instruction = it.Current(); |
| 36 | if (MakesScalarUnrollingNonBeneficial(instruction)) { |
| 37 | analysis_results->has_instructions_preventing_scalar_unrolling_ = true; |
| 38 | } |
| 39 | analysis_results->instr_num_++; |
| 40 | } |
| 41 | analysis_results->bb_num_++; |
| 42 | } |
| 43 | } |
| 44 | |
| 45 | class Arm64LoopHelper : public ArchDefaultLoopHelper { |
| 46 | public: |
| 47 | // Scalar loop unrolling parameters and heuristics. |
| 48 | // |
| 49 | // Maximum possible unrolling factor. |
| 50 | static constexpr uint32_t kArm64ScalarMaxUnrollFactor = 2; |
| 51 | // Loop's maximum instruction count. Loops with higher count will not be peeled/unrolled. |
| 52 | static constexpr uint32_t kArm64ScalarHeuristicMaxBodySizeInstr = 40; |
| 53 | // Loop's maximum basic block count. Loops with higher count will not be peeled/unrolled. |
| 54 | static constexpr uint32_t kArm64ScalarHeuristicMaxBodySizeBlocks = 8; |
| 55 | |
| 56 | // SIMD loop unrolling parameters and heuristics. |
| 57 | // |
| 58 | // Maximum possible unrolling factor. |
| 59 | static constexpr uint32_t kArm64SimdMaxUnrollFactor = 8; |
| 60 | // Loop's maximum instruction count. Loops with higher count will not be unrolled. |
| 61 | static constexpr uint32_t kArm64SimdHeuristicMaxBodySizeInstr = 50; |
| 62 | |
| 63 | bool IsLoopTooBigForScalarUnrolling(LoopAnalysisInfo* loop_analysis_info) const OVERRIDE { |
| 64 | size_t instr_num = loop_analysis_info->GetNumberOfInstructions(); |
| 65 | size_t bb_num = loop_analysis_info->GetNumberOfBasicBlocks(); |
| 66 | return (instr_num >= kArm64ScalarHeuristicMaxBodySizeInstr || |
| 67 | bb_num >= kArm64ScalarHeuristicMaxBodySizeBlocks); |
| 68 | } |
| 69 | |
| 70 | uint32_t GetScalarUnrollingFactor(HLoopInformation* loop_info ATTRIBUTE_UNUSED, |
| 71 | uint64_t trip_count) const OVERRIDE { |
| 72 | uint32_t desired_unrolling_factor = kArm64ScalarMaxUnrollFactor; |
| 73 | if (trip_count < desired_unrolling_factor || trip_count % desired_unrolling_factor != 0) { |
| 74 | return kNoUnrollingFactor; |
| 75 | } |
| 76 | |
| 77 | return desired_unrolling_factor; |
| 78 | } |
| 79 | |
| 80 | uint32_t GetSIMDUnrollingFactor(HBasicBlock* block, |
| 81 | int64_t trip_count, |
| 82 | uint32_t max_peel, |
| 83 | uint32_t vector_length) const OVERRIDE { |
| 84 | // Don't unroll with insufficient iterations. |
| 85 | // TODO: Unroll loops with unknown trip count. |
| 86 | DCHECK_NE(vector_length, 0u); |
| 87 | if (trip_count < (2 * vector_length + max_peel)) { |
| 88 | return kNoUnrollingFactor; |
| 89 | } |
| 90 | // Don't unroll for large loop body size. |
| 91 | uint32_t instruction_count = block->GetInstructions().CountSize(); |
| 92 | if (instruction_count >= kArm64SimdHeuristicMaxBodySizeInstr) { |
| 93 | return kNoUnrollingFactor; |
| 94 | } |
| 95 | // Find a beneficial unroll factor with the following restrictions: |
| 96 | // - At least one iteration of the transformed loop should be executed. |
| 97 | // - The loop body shouldn't be "too big" (heuristic). |
| 98 | |
| 99 | uint32_t uf1 = kArm64SimdHeuristicMaxBodySizeInstr / instruction_count; |
| 100 | uint32_t uf2 = (trip_count - max_peel) / vector_length; |
| 101 | uint32_t unroll_factor = |
| 102 | TruncToPowerOfTwo(std::min({uf1, uf2, kArm64SimdMaxUnrollFactor})); |
| 103 | DCHECK_GE(unroll_factor, 1u); |
| 104 | return unroll_factor; |
| 105 | } |
| 106 | }; |
| 107 | |
| 108 | ArchDefaultLoopHelper* ArchDefaultLoopHelper::Create(InstructionSet isa, |
| 109 | ArenaAllocator* allocator) { |
| 110 | switch (isa) { |
| 111 | case InstructionSet::kArm64: { |
| 112 | return new (allocator) Arm64LoopHelper; |
| 113 | } |
| 114 | default: { |
| 115 | return new (allocator) ArchDefaultLoopHelper; |
| 116 | } |
| 117 | } |
| 118 | } |
| 119 | |
| 120 | } // namespace art |