blob: dc3d378e75f66c4defda93c90658bc280d2eb104 [file] [log] [blame]
Nicolas Geoffrayb813ca12017-02-16 22:08:29 +00001/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "code_sinking.h"
18
19#include "common_dominator.h"
20#include "nodes.h"
21
22namespace art {
23
24void CodeSinking::Run() {
25 HBasicBlock* exit = graph_->GetExitBlock();
26 if (exit == nullptr) {
27 // Infinite loop, just bail.
28 return;
29 }
30 // TODO(ngeoffray): we do not profile branches yet, so use throw instructions
31 // as an indicator of an uncommon branch.
32 for (HBasicBlock* exit_predecessor : exit->GetPredecessors()) {
33 if (exit_predecessor->GetLastInstruction()->IsThrow()) {
34 SinkCodeToUncommonBranch(exit_predecessor);
35 }
36 }
37}
38
39static bool IsInterestingInstruction(HInstruction* instruction) {
40 // Instructions from the entry graph (for example constants) are never interesting to move.
41 if (instruction->GetBlock() == instruction->GetBlock()->GetGraph()->GetEntryBlock()) {
42 return false;
43 }
44 // We want to move moveable instructions that cannot throw, as well as
45 // heap stores and allocations.
46
47 // Volatile stores cannot be moved.
48 if (instruction->IsInstanceFieldSet()) {
49 if (instruction->AsInstanceFieldSet()->IsVolatile()) {
50 return false;
51 }
52 }
53
54 // Check allocations first, as they can throw, but it is safe to move them.
55 if (instruction->IsNewInstance() || instruction->IsNewArray()) {
56 return true;
57 }
58
59 // All other instructions that can throw cannot be moved.
60 if (instruction->CanThrow()) {
61 return false;
62 }
63
64 // We can only store on local allocations. Other heap references can
65 // be escaping. Note that allocations can escape too, but we only move
66 // allocations if their users can move to, or are in the list of
67 // post dominated blocks.
68 if (instruction->IsInstanceFieldSet()) {
69 if (!instruction->InputAt(0)->IsNewInstance()) {
70 return false;
71 }
72 }
73
74 if (instruction->IsArraySet()) {
75 if (!instruction->InputAt(0)->IsNewArray()) {
76 return false;
77 }
78 }
79
80 // Heap accesses cannot go pass instructions that have memory side effects, which
81 // we are not tracking here. Note that the load/store elimination optimization
82 // runs before this optimization, and should have removed interesting ones.
83 // In theory, we could handle loads of local allocations, but this is currently
84 // hard to test, as LSE removes them.
85 if (instruction->IsStaticFieldGet() ||
86 instruction->IsInstanceFieldGet() ||
87 instruction->IsArrayGet()) {
88 return false;
89 }
90
91 if (instruction->IsInstanceFieldSet() ||
92 instruction->IsArraySet() ||
93 instruction->CanBeMoved()) {
94 return true;
95 }
96 return false;
97}
98
99static void AddInstruction(HInstruction* instruction,
100 const ArenaBitVector& processed_instructions,
101 const ArenaBitVector& discard_blocks,
102 ArenaVector<HInstruction*>* worklist) {
103 // Add to the work list if the instruction is not in the list of blocks
104 // to discard, hasn't been already processed and is of interest.
105 if (!discard_blocks.IsBitSet(instruction->GetBlock()->GetBlockId()) &&
106 !processed_instructions.IsBitSet(instruction->GetId()) &&
107 IsInterestingInstruction(instruction)) {
108 worklist->push_back(instruction);
109 }
110}
111
112static void AddInputs(HInstruction* instruction,
113 const ArenaBitVector& processed_instructions,
114 const ArenaBitVector& discard_blocks,
115 ArenaVector<HInstruction*>* worklist) {
116 for (HInstruction* input : instruction->GetInputs()) {
117 AddInstruction(input, processed_instructions, discard_blocks, worklist);
118 }
119}
120
121static void AddInputs(HBasicBlock* block,
122 const ArenaBitVector& processed_instructions,
123 const ArenaBitVector& discard_blocks,
124 ArenaVector<HInstruction*>* worklist) {
125 for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
126 AddInputs(it.Current(), processed_instructions, discard_blocks, worklist);
127 }
128 for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
129 AddInputs(it.Current(), processed_instructions, discard_blocks, worklist);
130 }
131}
132
133static bool ShouldFilterUse(HInstruction* instruction,
134 HInstruction* user,
135 const ArenaBitVector& post_dominated) {
136 if (instruction->IsNewInstance()) {
137 return user->IsInstanceFieldSet() &&
138 (user->InputAt(0) == instruction) &&
139 !post_dominated.IsBitSet(user->GetBlock()->GetBlockId());
140 } else if (instruction->IsNewArray()) {
141 return user->IsArraySet() &&
142 (user->InputAt(0) == instruction) &&
143 !post_dominated.IsBitSet(user->GetBlock()->GetBlockId());
144 }
145 return false;
146}
147
148
149// Find the ideal position for moving `instruction`. If `filter` is true,
150// we filter out store instructions to that instruction, which are processed
151// first in the step (3) of the sinking algorithm.
152// This method is tailored to the sinking algorithm, unlike
153// the generic HInstruction::MoveBeforeFirstUserAndOutOfLoops.
154static HInstruction* FindIdealPosition(HInstruction* instruction,
155 const ArenaBitVector& post_dominated,
156 bool filter = false) {
157 DCHECK(!instruction->IsPhi()); // Makes no sense for Phi.
158
159 // Find the target block.
160 CommonDominator finder(/* start_block */ nullptr);
161 for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
162 HInstruction* user = use.GetUser();
163 if (!(filter && ShouldFilterUse(instruction, user, post_dominated))) {
164 finder.Update(user->IsPhi()
165 ? user->GetBlock()->GetPredecessors()[use.GetIndex()]
166 : user->GetBlock());
167 }
168 }
169 for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) {
170 DCHECK(!use.GetUser()->GetHolder()->IsPhi());
171 DCHECK(!filter || !ShouldFilterUse(instruction, use.GetUser()->GetHolder(), post_dominated));
172 finder.Update(use.GetUser()->GetHolder()->GetBlock());
173 }
174 HBasicBlock* target_block = finder.Get();
175 if (target_block == nullptr) {
176 // No user we can go next to? Likely a LSE or DCE limitation.
177 return nullptr;
178 }
179
180 // Move to the first dominator not in a loop, if we can.
181 while (target_block->IsInLoop()) {
182 if (!post_dominated.IsBitSet(target_block->GetDominator()->GetBlockId())) {
183 break;
184 }
185 target_block = target_block->GetDominator();
186 DCHECK(target_block != nullptr);
187 }
188
189 // Find insertion position. No need to filter anymore, as we have found a
190 // target block.
191 HInstruction* insert_pos = nullptr;
192 for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
193 if (use.GetUser()->GetBlock() == target_block &&
194 (insert_pos == nullptr || use.GetUser()->StrictlyDominates(insert_pos))) {
195 insert_pos = use.GetUser();
196 }
197 }
198 for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) {
199 HInstruction* user = use.GetUser()->GetHolder();
200 if (user->GetBlock() == target_block &&
201 (insert_pos == nullptr || user->StrictlyDominates(insert_pos))) {
202 insert_pos = user;
203 }
204 }
205 if (insert_pos == nullptr) {
206 // No user in `target_block`, insert before the control flow instruction.
207 insert_pos = target_block->GetLastInstruction();
208 DCHECK(insert_pos->IsControlFlow());
209 // Avoid splitting HCondition from HIf to prevent unnecessary materialization.
210 if (insert_pos->IsIf()) {
211 HInstruction* if_input = insert_pos->AsIf()->InputAt(0);
212 if (if_input == insert_pos->GetPrevious()) {
213 insert_pos = if_input;
214 }
215 }
216 }
217 DCHECK(!insert_pos->IsPhi());
218 return insert_pos;
219}
220
221
222void CodeSinking::SinkCodeToUncommonBranch(HBasicBlock* end_block) {
223 // Local allocator to discard data structures created below at the end of
224 // this optimization.
225 ArenaAllocator allocator(graph_->GetArena()->GetArenaPool());
226
227 size_t number_of_instructions = graph_->GetCurrentInstructionId();
228 ArenaVector<HInstruction*> worklist(allocator.Adapter(kArenaAllocMisc));
229 ArenaBitVector processed_instructions(&allocator, number_of_instructions, /* expandable */ false);
230 ArenaBitVector post_dominated(&allocator, graph_->GetBlocks().size(), /* expandable */ false);
231 ArenaBitVector instructions_that_can_move(
232 &allocator, number_of_instructions, /* expandable */ false);
233 ArenaVector<HInstruction*> move_in_order(allocator.Adapter(kArenaAllocMisc));
234
235 // Step (1): Visit post order to get a subset of blocks post dominated by `end_block`.
236 // TODO(ngeoffray): Getting the full set of post-dominated shoud be done by
237 // computint the post dominator tree, but that could be too time consuming. Also,
238 // we should start the analysis from blocks dominated by an uncommon branch, but we
239 // don't profile branches yet.
240 bool found_block = false;
241 for (HBasicBlock* block : graph_->GetPostOrder()) {
242 if (block == end_block) {
243 found_block = true;
244 post_dominated.SetBit(block->GetBlockId());
245 } else if (found_block) {
246 bool is_post_dominated = true;
247 if (block->GetSuccessors().empty()) {
248 // We currently bail for loops.
249 is_post_dominated = false;
250 } else {
251 for (HBasicBlock* successor : block->GetSuccessors()) {
252 if (!post_dominated.IsBitSet(successor->GetBlockId())) {
253 is_post_dominated = false;
254 break;
255 }
256 }
257 }
258 if (is_post_dominated) {
259 post_dominated.SetBit(block->GetBlockId());
260 }
261 }
262 }
263
264 // Now that we have found a subset of post-dominated blocks, add to the worklist all inputs
265 // of instructions in these blocks that are not themselves in these blocks.
266 // Also find the common dominator of the found post dominated blocks, to help filtering
267 // out un-movable uses in step (2).
268 CommonDominator finder(end_block);
269 for (size_t i = 0, e = graph_->GetBlocks().size(); i < e; ++i) {
270 if (post_dominated.IsBitSet(i)) {
271 finder.Update(graph_->GetBlocks()[i]);
272 AddInputs(graph_->GetBlocks()[i], processed_instructions, post_dominated, &worklist);
273 }
274 }
275 HBasicBlock* common_dominator = finder.Get();
276
277 // Step (2): iterate over the worklist to find sinking candidates.
278 while (!worklist.empty()) {
279 HInstruction* instruction = worklist.back();
280 if (processed_instructions.IsBitSet(instruction->GetId())) {
281 // The instruction has already been processed, continue. This happens
282 // when the instruction is the input/user of multiple instructions.
283 worklist.pop_back();
284 continue;
285 }
286 bool all_users_in_post_dominated_blocks = true;
287 bool can_move = true;
288 // Check users of the instruction.
289 for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
290 HInstruction* user = use.GetUser();
291 if (!post_dominated.IsBitSet(user->GetBlock()->GetBlockId()) &&
292 !instructions_that_can_move.IsBitSet(user->GetId())) {
293 all_users_in_post_dominated_blocks = false;
294 // If we've already processed this user, or the user cannot be moved, or
295 // is not dominating the post dominated blocks, bail.
296 // TODO(ngeoffray): The domination check is an approximation. We should
297 // instead check if the dominated blocks post dominate the user's block,
298 // but we do not have post dominance information here.
299 if (processed_instructions.IsBitSet(user->GetId()) ||
300 !IsInterestingInstruction(user) ||
301 !user->GetBlock()->Dominates(common_dominator)) {
302 can_move = false;
303 break;
304 }
305 }
306 }
307
308 // Check environment users of the instruction. Some of these users require
309 // the instruction not to move.
310 if (all_users_in_post_dominated_blocks) {
311 for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) {
312 HEnvironment* environment = use.GetUser();
313 HInstruction* user = environment->GetHolder();
314 if (!post_dominated.IsBitSet(user->GetBlock()->GetBlockId())) {
315 if (graph_->IsDebuggable() ||
316 user->IsDeoptimize() ||
317 user->CanThrowIntoCatchBlock() ||
318 (user->IsSuspendCheck() && graph_->IsCompilingOsr())) {
319 can_move = false;
320 break;
321 }
322 }
323 }
324 }
325 if (!can_move) {
326 // Instruction cannot be moved, mark it as processed and remove it from the work
327 // list.
328 processed_instructions.SetBit(instruction->GetId());
329 worklist.pop_back();
330 } else if (all_users_in_post_dominated_blocks) {
331 // Instruction is a candidate for being sunk. Mark it as such, remove it from the
332 // work list, and add its inputs to the work list.
333 instructions_that_can_move.SetBit(instruction->GetId());
334 move_in_order.push_back(instruction);
335 processed_instructions.SetBit(instruction->GetId());
336 worklist.pop_back();
337 AddInputs(instruction, processed_instructions, post_dominated, &worklist);
338 // Drop the environment use not in the list of post-dominated block. This is
339 // to help step (3) of this optimization, when we start moving instructions
340 // closer to their use.
341 for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) {
342 HEnvironment* environment = use.GetUser();
343 HInstruction* user = environment->GetHolder();
344 if (!post_dominated.IsBitSet(user->GetBlock()->GetBlockId())) {
345 environment->RemoveAsUserOfInput(use.GetIndex());
346 environment->SetRawEnvAt(use.GetIndex(), nullptr);
347 }
348 }
349 } else {
350 // The information we have on the users was not enough to decide whether the
351 // instruction could be moved.
352 // Add the users to the work list, and keep the instruction in the work list
353 // to process it again once all users have been processed.
354 for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
355 AddInstruction(use.GetUser(), processed_instructions, post_dominated, &worklist);
356 }
357 }
358 }
359
360 // Make sure we process instructions in dominated order. This is required for heap
361 // stores.
362 std::sort(move_in_order.begin(), move_in_order.end(), [](HInstruction* a, HInstruction* b) {
363 return b->StrictlyDominates(a);
364 });
365
366 // Step (3): Try to move sinking candidates.
367 for (HInstruction* instruction : move_in_order) {
368 HInstruction* position = nullptr;
369 if (instruction->IsArraySet() || instruction->IsInstanceFieldSet()) {
370 if (!instructions_that_can_move.IsBitSet(instruction->InputAt(0)->GetId())) {
371 // A store can trivially move, but it can safely do so only if the heap
372 // location it stores to can also move.
373 // TODO(ngeoffray): Handle allocation/store cycles by pruning these instructions
374 // from the set and all their inputs.
375 continue;
376 }
377 // Find the position of the instruction we're storing into, filtering out this
378 // store and all other stores to that instruction.
379 position = FindIdealPosition(instruction->InputAt(0), post_dominated, /* filter */ true);
380
381 // The position needs to be dominated by the store, in order for the store to move there.
382 if (position == nullptr || !instruction->GetBlock()->Dominates(position->GetBlock())) {
383 continue;
384 }
385 } else {
386 // Find the ideal position within the post dominated blocks.
387 position = FindIdealPosition(instruction, post_dominated);
388 if (position == nullptr) {
389 continue;
390 }
391 }
392 // Bail if we could not find a position in the post dominated blocks (for example,
393 // if there are multiple users whose common dominator is not in the list of
394 // post dominated blocks).
395 if (!post_dominated.IsBitSet(position->GetBlock()->GetBlockId())) {
396 continue;
397 }
398 MaybeRecordStat(MethodCompilationStat::kInstructionSunk);
399 instruction->MoveBefore(position, /* ensure_safety */ false);
400 }
401}
402
403} // namespace art