blob: 256686ebe12041464c5f28843a3101391a86b0a3 [file] [log] [blame]
buzbee311ca162013-02-28 15:56:43 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "compiler_internals.h"
18#include "local_value_numbering.h"
Ian Rogers8d3a1172013-06-04 01:13:28 -070019#include "dataflow_iterator-inl.h"
Vladimir Marko9820b7c2014-01-02 16:40:37 +000020#include "dex/quick/dex_file_method_inliner.h"
21#include "dex/quick/dex_file_to_method_inliner_map.h"
Vladimir Marko69f08ba2014-04-11 12:28:11 +010022#include "utils/scoped_arena_containers.h"
buzbee311ca162013-02-28 15:56:43 -080023
24namespace art {
25
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070026static unsigned int Predecessors(BasicBlock* bb) {
buzbee862a7602013-04-05 10:58:54 -070027 return bb->predecessors->Size();
buzbee311ca162013-02-28 15:56:43 -080028}
29
30/* Setup a constant value for opcodes thare have the DF_SETS_CONST attribute */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070031void MIRGraph::SetConstant(int32_t ssa_reg, int value) {
buzbee862a7602013-04-05 10:58:54 -070032 is_constant_v_->SetBit(ssa_reg);
buzbee311ca162013-02-28 15:56:43 -080033 constant_values_[ssa_reg] = value;
34}
35
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070036void MIRGraph::SetConstantWide(int ssa_reg, int64_t value) {
buzbee862a7602013-04-05 10:58:54 -070037 is_constant_v_->SetBit(ssa_reg);
buzbee311ca162013-02-28 15:56:43 -080038 constant_values_[ssa_reg] = Low32Bits(value);
39 constant_values_[ssa_reg + 1] = High32Bits(value);
40}
41
Jean Christophe Beyler4e97c532014-01-07 10:07:18 -080042void MIRGraph::DoConstantPropagation(BasicBlock* bb) {
buzbee311ca162013-02-28 15:56:43 -080043 MIR* mir;
buzbee311ca162013-02-28 15:56:43 -080044
45 for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
Alexei Zavjalov9d894662014-04-21 20:45:24 +070046 // Skip pass if BB has MIR without SSA representation.
Jean Christophe Beylercc794c32014-05-02 09:34:13 -070047 if (mir->ssa_rep == nullptr) {
Alexei Zavjalov9d894662014-04-21 20:45:24 +070048 return;
49 }
50
Jean Christophe Beylercc794c32014-05-02 09:34:13 -070051 uint64_t df_attributes = GetDataFlowAttributes(mir);
buzbee311ca162013-02-28 15:56:43 -080052
Ian Rogers29a26482014-05-02 15:27:29 -070053 MIR::DecodedInstruction* d_insn = &mir->dalvikInsn;
buzbee311ca162013-02-28 15:56:43 -080054
55 if (!(df_attributes & DF_HAS_DEFS)) continue;
56
57 /* Handle instructions that set up constants directly */
58 if (df_attributes & DF_SETS_CONST) {
59 if (df_attributes & DF_DA) {
60 int32_t vB = static_cast<int32_t>(d_insn->vB);
61 switch (d_insn->opcode) {
62 case Instruction::CONST_4:
63 case Instruction::CONST_16:
64 case Instruction::CONST:
65 SetConstant(mir->ssa_rep->defs[0], vB);
66 break;
67 case Instruction::CONST_HIGH16:
68 SetConstant(mir->ssa_rep->defs[0], vB << 16);
69 break;
70 case Instruction::CONST_WIDE_16:
71 case Instruction::CONST_WIDE_32:
72 SetConstantWide(mir->ssa_rep->defs[0], static_cast<int64_t>(vB));
73 break;
74 case Instruction::CONST_WIDE:
Brian Carlstromb1eba212013-07-17 18:07:19 -070075 SetConstantWide(mir->ssa_rep->defs[0], d_insn->vB_wide);
buzbee311ca162013-02-28 15:56:43 -080076 break;
77 case Instruction::CONST_WIDE_HIGH16:
78 SetConstantWide(mir->ssa_rep->defs[0], static_cast<int64_t>(vB) << 48);
79 break;
80 default:
81 break;
82 }
83 }
84 /* Handle instructions that set up constants directly */
85 } else if (df_attributes & DF_IS_MOVE) {
86 int i;
87
88 for (i = 0; i < mir->ssa_rep->num_uses; i++) {
buzbee862a7602013-04-05 10:58:54 -070089 if (!is_constant_v_->IsBitSet(mir->ssa_rep->uses[i])) break;
buzbee311ca162013-02-28 15:56:43 -080090 }
91 /* Move a register holding a constant to another register */
92 if (i == mir->ssa_rep->num_uses) {
93 SetConstant(mir->ssa_rep->defs[0], constant_values_[mir->ssa_rep->uses[0]]);
94 if (df_attributes & DF_A_WIDE) {
95 SetConstant(mir->ssa_rep->defs[1], constant_values_[mir->ssa_rep->uses[1]]);
96 }
97 }
98 }
99 }
100 /* TODO: implement code to handle arithmetic operations */
buzbee311ca162013-02-28 15:56:43 -0800101}
102
buzbee311ca162013-02-28 15:56:43 -0800103/* Advance to next strictly dominated MIR node in an extended basic block */
buzbee0d829482013-10-11 15:24:55 -0700104MIR* MIRGraph::AdvanceMIR(BasicBlock** p_bb, MIR* mir) {
buzbee311ca162013-02-28 15:56:43 -0800105 BasicBlock* bb = *p_bb;
106 if (mir != NULL) {
107 mir = mir->next;
108 if (mir == NULL) {
buzbee0d829482013-10-11 15:24:55 -0700109 bb = GetBasicBlock(bb->fall_through);
buzbee311ca162013-02-28 15:56:43 -0800110 if ((bb == NULL) || Predecessors(bb) != 1) {
111 mir = NULL;
112 } else {
113 *p_bb = bb;
114 mir = bb->first_mir_insn;
115 }
116 }
117 }
118 return mir;
119}
120
121/*
122 * To be used at an invoke mir. If the logically next mir node represents
123 * a move-result, return it. Else, return NULL. If a move-result exists,
124 * it is required to immediately follow the invoke with no intervening
125 * opcodes or incoming arcs. However, if the result of the invoke is not
126 * used, a move-result may not be present.
127 */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700128MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) {
buzbee311ca162013-02-28 15:56:43 -0800129 BasicBlock* tbb = bb;
130 mir = AdvanceMIR(&tbb, mir);
131 while (mir != NULL) {
132 int opcode = mir->dalvikInsn.opcode;
133 if ((mir->dalvikInsn.opcode == Instruction::MOVE_RESULT) ||
134 (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) ||
135 (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_WIDE)) {
136 break;
137 }
138 // Keep going if pseudo op, otherwise terminate
139 if (opcode < kNumPackedOpcodes) {
140 mir = NULL;
141 } else {
142 mir = AdvanceMIR(&tbb, mir);
143 }
144 }
145 return mir;
146}
147
buzbee0d829482013-10-11 15:24:55 -0700148BasicBlock* MIRGraph::NextDominatedBlock(BasicBlock* bb) {
buzbee311ca162013-02-28 15:56:43 -0800149 if (bb->block_type == kDead) {
150 return NULL;
151 }
152 DCHECK((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
153 || (bb->block_type == kExitBlock));
buzbee0d829482013-10-11 15:24:55 -0700154 BasicBlock* bb_taken = GetBasicBlock(bb->taken);
155 BasicBlock* bb_fall_through = GetBasicBlock(bb->fall_through);
buzbee1da1e2f2013-11-15 13:37:01 -0800156 if (((bb_fall_through == NULL) && (bb_taken != NULL)) &&
buzbee0d829482013-10-11 15:24:55 -0700157 ((bb_taken->block_type == kDalvikByteCode) || (bb_taken->block_type == kExitBlock))) {
buzbeecbcfaf32013-08-19 07:37:40 -0700158 // Follow simple unconditional branches.
buzbee0d829482013-10-11 15:24:55 -0700159 bb = bb_taken;
buzbeecbcfaf32013-08-19 07:37:40 -0700160 } else {
161 // Follow simple fallthrough
buzbee0d829482013-10-11 15:24:55 -0700162 bb = (bb_taken != NULL) ? NULL : bb_fall_through;
buzbeecbcfaf32013-08-19 07:37:40 -0700163 }
buzbee311ca162013-02-28 15:56:43 -0800164 if (bb == NULL || (Predecessors(bb) != 1)) {
165 return NULL;
166 }
167 DCHECK((bb->block_type == kDalvikByteCode) || (bb->block_type == kExitBlock));
168 return bb;
169}
170
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700171static MIR* FindPhi(BasicBlock* bb, int ssa_name) {
buzbee311ca162013-02-28 15:56:43 -0800172 for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
173 if (static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi) {
174 for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
175 if (mir->ssa_rep->uses[i] == ssa_name) {
176 return mir;
177 }
178 }
179 }
180 }
181 return NULL;
182}
183
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700184static SelectInstructionKind SelectKind(MIR* mir) {
buzbee311ca162013-02-28 15:56:43 -0800185 switch (mir->dalvikInsn.opcode) {
186 case Instruction::MOVE:
187 case Instruction::MOVE_OBJECT:
188 case Instruction::MOVE_16:
189 case Instruction::MOVE_OBJECT_16:
190 case Instruction::MOVE_FROM16:
191 case Instruction::MOVE_OBJECT_FROM16:
192 return kSelectMove;
Brian Carlstrom6f485c62013-07-18 15:35:35 -0700193 case Instruction::CONST:
194 case Instruction::CONST_4:
195 case Instruction::CONST_16:
buzbee311ca162013-02-28 15:56:43 -0800196 return kSelectConst;
Brian Carlstrom6f485c62013-07-18 15:35:35 -0700197 case Instruction::GOTO:
198 case Instruction::GOTO_16:
199 case Instruction::GOTO_32:
buzbee311ca162013-02-28 15:56:43 -0800200 return kSelectGoto;
Brian Carlstrom02c8cc62013-07-18 15:54:44 -0700201 default:
202 return kSelectNone;
buzbee311ca162013-02-28 15:56:43 -0800203 }
buzbee311ca162013-02-28 15:56:43 -0800204}
205
Vladimir Markoa1a70742014-03-03 10:28:05 +0000206static constexpr ConditionCode kIfCcZConditionCodes[] = {
207 kCondEq, kCondNe, kCondLt, kCondGe, kCondGt, kCondLe
208};
209
210COMPILE_ASSERT(arraysize(kIfCcZConditionCodes) == Instruction::IF_LEZ - Instruction::IF_EQZ + 1,
211 if_ccz_ccodes_size1);
212
213static constexpr bool IsInstructionIfCcZ(Instruction::Code opcode) {
214 return Instruction::IF_EQZ <= opcode && opcode <= Instruction::IF_LEZ;
215}
216
217static constexpr ConditionCode ConditionCodeForIfCcZ(Instruction::Code opcode) {
218 return kIfCcZConditionCodes[opcode - Instruction::IF_EQZ];
219}
220
221COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_EQZ) == kCondEq, check_if_eqz_ccode);
222COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_NEZ) == kCondNe, check_if_nez_ccode);
223COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_LTZ) == kCondLt, check_if_ltz_ccode);
224COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_GEZ) == kCondGe, check_if_gez_ccode);
225COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_GTZ) == kCondGt, check_if_gtz_ccode);
226COMPILE_ASSERT(ConditionCodeForIfCcZ(Instruction::IF_LEZ) == kCondLe, check_if_lez_ccode);
227
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700228int MIRGraph::GetSSAUseCount(int s_reg) {
buzbee862a7602013-04-05 10:58:54 -0700229 return raw_use_counts_.Get(s_reg);
buzbee311ca162013-02-28 15:56:43 -0800230}
231
Razvan A Lupusoruda7a69b2014-01-08 15:09:50 -0800232size_t MIRGraph::GetNumAvailableNonSpecialCompilerTemps() {
233 if (num_non_special_compiler_temps_ >= max_available_non_special_compiler_temps_) {
234 return 0;
235 } else {
236 return max_available_non_special_compiler_temps_ - num_non_special_compiler_temps_;
237 }
238}
239
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000240
241// FIXME - will probably need to revisit all uses of this, as type not defined.
Razvan A Lupusoruda7a69b2014-01-08 15:09:50 -0800242static const RegLocation temp_loc = {kLocCompilerTemp,
buzbee091cc402014-03-31 10:14:40 -0700243 0, 1 /*defined*/, 0, 0, 0, 0, 0, 1 /*home*/,
Bill Buzbee00e1ec62014-02-27 23:44:13 +0000244 RegStorage(), INVALID_SREG, INVALID_SREG};
Razvan A Lupusoruda7a69b2014-01-08 15:09:50 -0800245
246CompilerTemp* MIRGraph::GetNewCompilerTemp(CompilerTempType ct_type, bool wide) {
247 // There is a limit to the number of non-special temps so check to make sure it wasn't exceeded.
248 if (ct_type == kCompilerTempVR) {
249 size_t available_temps = GetNumAvailableNonSpecialCompilerTemps();
250 if (available_temps <= 0 || (available_temps <= 1 && wide)) {
251 return 0;
252 }
253 }
254
255 CompilerTemp *compiler_temp = static_cast<CompilerTemp *>(arena_->Alloc(sizeof(CompilerTemp),
Vladimir Marko83cc7ae2014-02-12 18:02:05 +0000256 kArenaAllocRegAlloc));
Razvan A Lupusoruda7a69b2014-01-08 15:09:50 -0800257
258 // Create the type of temp requested. Special temps need special handling because
259 // they have a specific virtual register assignment.
260 if (ct_type == kCompilerTempSpecialMethodPtr) {
261 DCHECK_EQ(wide, false);
262 compiler_temp->v_reg = static_cast<int>(kVRegMethodPtrBaseReg);
263 compiler_temp->s_reg_low = AddNewSReg(compiler_temp->v_reg);
264
265 // The MIR graph keeps track of the sreg for method pointer specially, so record that now.
266 method_sreg_ = compiler_temp->s_reg_low;
267 } else {
268 DCHECK_EQ(ct_type, kCompilerTempVR);
269
270 // The new non-special compiler temp must receive a unique v_reg with a negative value.
Chao-ying Fu54d36b62014-05-22 17:25:02 -0700271 compiler_temp->v_reg = static_cast<int>(kVRegNonSpecialTempBaseReg) -
272 num_non_special_compiler_temps_;
Razvan A Lupusoruda7a69b2014-01-08 15:09:50 -0800273 compiler_temp->s_reg_low = AddNewSReg(compiler_temp->v_reg);
274 num_non_special_compiler_temps_++;
275
276 if (wide) {
Chao-ying Fu54d36b62014-05-22 17:25:02 -0700277 // Create a new CompilerTemp for the high part.
278 CompilerTemp *compiler_temp_high =
279 static_cast<CompilerTemp *>(arena_->Alloc(sizeof(CompilerTemp), kArenaAllocRegAlloc));
280 compiler_temp_high->v_reg = compiler_temp->v_reg;
281 compiler_temp_high->s_reg_low = compiler_temp->s_reg_low;
282 compiler_temps_.Insert(compiler_temp_high);
283
284 // Ensure that the two registers are consecutive. Since the virtual registers used for temps
285 // grow in a negative fashion, we need the smaller to refer to the low part. Thus, we
286 // redefine the v_reg and s_reg_low.
Razvan A Lupusoruda7a69b2014-01-08 15:09:50 -0800287 compiler_temp->v_reg--;
288 int ssa_reg_high = compiler_temp->s_reg_low;
289 compiler_temp->s_reg_low = AddNewSReg(compiler_temp->v_reg);
290 int ssa_reg_low = compiler_temp->s_reg_low;
291
292 // If needed initialize the register location for the high part.
293 // The low part is handled later in this method on a common path.
294 if (reg_location_ != nullptr) {
295 reg_location_[ssa_reg_high] = temp_loc;
296 reg_location_[ssa_reg_high].high_word = 1;
297 reg_location_[ssa_reg_high].s_reg_low = ssa_reg_low;
298 reg_location_[ssa_reg_high].wide = true;
Razvan A Lupusoruda7a69b2014-01-08 15:09:50 -0800299 }
300
301 num_non_special_compiler_temps_++;
302 }
303 }
304
305 // Have we already allocated the register locations?
306 if (reg_location_ != nullptr) {
307 int ssa_reg_low = compiler_temp->s_reg_low;
308 reg_location_[ssa_reg_low] = temp_loc;
309 reg_location_[ssa_reg_low].s_reg_low = ssa_reg_low;
310 reg_location_[ssa_reg_low].wide = wide;
Razvan A Lupusoruda7a69b2014-01-08 15:09:50 -0800311 }
312
313 compiler_temps_.Insert(compiler_temp);
314 return compiler_temp;
315}
buzbee311ca162013-02-28 15:56:43 -0800316
317/* Do some MIR-level extended basic block optimizations */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700318bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
buzbee311ca162013-02-28 15:56:43 -0800319 if (bb->block_type == kDead) {
320 return true;
321 }
buzbee1da1e2f2013-11-15 13:37:01 -0800322 bool use_lvn = bb->use_lvn;
Vladimir Marko2ac01fc2014-05-22 12:09:08 +0100323 std::unique_ptr<ScopedArenaAllocator> allocator;
Ian Rogers700a4022014-05-19 16:49:03 -0700324 std::unique_ptr<LocalValueNumbering> local_valnum;
buzbee1da1e2f2013-11-15 13:37:01 -0800325 if (use_lvn) {
Vladimir Marko2ac01fc2014-05-22 12:09:08 +0100326 allocator.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
327 local_valnum.reset(new (allocator.get()) LocalValueNumbering(cu_, allocator.get()));
buzbee1da1e2f2013-11-15 13:37:01 -0800328 }
buzbee311ca162013-02-28 15:56:43 -0800329 while (bb != NULL) {
330 for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
331 // TUNING: use the returned value number for CSE.
buzbee1da1e2f2013-11-15 13:37:01 -0800332 if (use_lvn) {
333 local_valnum->GetValueNumber(mir);
334 }
buzbee311ca162013-02-28 15:56:43 -0800335 // Look for interesting opcodes, skip otherwise
336 Instruction::Code opcode = mir->dalvikInsn.opcode;
337 switch (opcode) {
338 case Instruction::CMPL_FLOAT:
339 case Instruction::CMPL_DOUBLE:
340 case Instruction::CMPG_FLOAT:
341 case Instruction::CMPG_DOUBLE:
342 case Instruction::CMP_LONG:
buzbee1fd33462013-03-25 13:40:45 -0700343 if ((cu_->disable_opt & (1 << kBranchFusing)) != 0) {
buzbee311ca162013-02-28 15:56:43 -0800344 // Bitcode doesn't allow this optimization.
345 break;
346 }
347 if (mir->next != NULL) {
348 MIR* mir_next = mir->next;
buzbee311ca162013-02-28 15:56:43 -0800349 // Make sure result of cmp is used by next insn and nowhere else
Vladimir Markoa1a70742014-03-03 10:28:05 +0000350 if (IsInstructionIfCcZ(mir->next->dalvikInsn.opcode) &&
buzbee311ca162013-02-28 15:56:43 -0800351 (mir->ssa_rep->defs[0] == mir_next->ssa_rep->uses[0]) &&
352 (GetSSAUseCount(mir->ssa_rep->defs[0]) == 1)) {
Vladimir Markoa1a70742014-03-03 10:28:05 +0000353 mir_next->meta.ccode = ConditionCodeForIfCcZ(mir_next->dalvikInsn.opcode);
Brian Carlstromdf629502013-07-17 22:39:56 -0700354 switch (opcode) {
buzbee311ca162013-02-28 15:56:43 -0800355 case Instruction::CMPL_FLOAT:
356 mir_next->dalvikInsn.opcode =
357 static_cast<Instruction::Code>(kMirOpFusedCmplFloat);
358 break;
359 case Instruction::CMPL_DOUBLE:
360 mir_next->dalvikInsn.opcode =
361 static_cast<Instruction::Code>(kMirOpFusedCmplDouble);
362 break;
363 case Instruction::CMPG_FLOAT:
364 mir_next->dalvikInsn.opcode =
365 static_cast<Instruction::Code>(kMirOpFusedCmpgFloat);
366 break;
367 case Instruction::CMPG_DOUBLE:
368 mir_next->dalvikInsn.opcode =
369 static_cast<Instruction::Code>(kMirOpFusedCmpgDouble);
370 break;
371 case Instruction::CMP_LONG:
372 mir_next->dalvikInsn.opcode =
373 static_cast<Instruction::Code>(kMirOpFusedCmpLong);
374 break;
375 default: LOG(ERROR) << "Unexpected opcode: " << opcode;
376 }
377 mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
378 mir_next->ssa_rep->num_uses = mir->ssa_rep->num_uses;
379 mir_next->ssa_rep->uses = mir->ssa_rep->uses;
380 mir_next->ssa_rep->fp_use = mir->ssa_rep->fp_use;
381 mir_next->ssa_rep->num_defs = 0;
382 mir->ssa_rep->num_uses = 0;
383 mir->ssa_rep->num_defs = 0;
384 }
385 }
386 break;
387 case Instruction::GOTO:
388 case Instruction::GOTO_16:
389 case Instruction::GOTO_32:
390 case Instruction::IF_EQ:
391 case Instruction::IF_NE:
392 case Instruction::IF_LT:
393 case Instruction::IF_GE:
394 case Instruction::IF_GT:
395 case Instruction::IF_LE:
396 case Instruction::IF_EQZ:
397 case Instruction::IF_NEZ:
398 case Instruction::IF_LTZ:
399 case Instruction::IF_GEZ:
400 case Instruction::IF_GTZ:
401 case Instruction::IF_LEZ:
buzbeecbcfaf32013-08-19 07:37:40 -0700402 // If we've got a backwards branch to return, no need to suspend check.
buzbee0d829482013-10-11 15:24:55 -0700403 if ((IsBackedge(bb, bb->taken) && GetBasicBlock(bb->taken)->dominates_return) ||
404 (IsBackedge(bb, bb->fall_through) &&
405 GetBasicBlock(bb->fall_through)->dominates_return)) {
buzbee311ca162013-02-28 15:56:43 -0800406 mir->optimization_flags |= MIR_IGNORE_SUSPEND_CHECK;
407 if (cu_->verbose) {
buzbee0d829482013-10-11 15:24:55 -0700408 LOG(INFO) << "Suppressed suspend check on branch to return at 0x" << std::hex
409 << mir->offset;
buzbee311ca162013-02-28 15:56:43 -0800410 }
411 }
412 break;
413 default:
414 break;
415 }
416 // Is this the select pattern?
Razvan A Lupusorue27b3bf2014-01-23 09:41:45 -0800417 // TODO: flesh out support for Mips. NOTE: llvm's select op doesn't quite work here.
buzbee311ca162013-02-28 15:56:43 -0800418 // TUNING: expand to support IF_xx compare & branches
Nicolas Geoffrayb34f69a2014-03-07 15:28:39 +0000419 if (!cu_->compiler->IsPortable() &&
Dmitry Petrochenko6a58cb12014-04-02 17:27:59 +0700420 (cu_->instruction_set == kThumb2 || cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) &&
Vladimir Markoa1a70742014-03-03 10:28:05 +0000421 IsInstructionIfCcZ(mir->dalvikInsn.opcode)) {
buzbee0d829482013-10-11 15:24:55 -0700422 BasicBlock* ft = GetBasicBlock(bb->fall_through);
buzbee311ca162013-02-28 15:56:43 -0800423 DCHECK(ft != NULL);
buzbee0d829482013-10-11 15:24:55 -0700424 BasicBlock* ft_ft = GetBasicBlock(ft->fall_through);
425 BasicBlock* ft_tk = GetBasicBlock(ft->taken);
buzbee311ca162013-02-28 15:56:43 -0800426
buzbee0d829482013-10-11 15:24:55 -0700427 BasicBlock* tk = GetBasicBlock(bb->taken);
buzbee311ca162013-02-28 15:56:43 -0800428 DCHECK(tk != NULL);
buzbee0d829482013-10-11 15:24:55 -0700429 BasicBlock* tk_ft = GetBasicBlock(tk->fall_through);
430 BasicBlock* tk_tk = GetBasicBlock(tk->taken);
buzbee311ca162013-02-28 15:56:43 -0800431
432 /*
433 * In the select pattern, the taken edge goes to a block that unconditionally
434 * transfers to the rejoin block and the fall_though edge goes to a block that
435 * unconditionally falls through to the rejoin block.
436 */
437 if ((tk_ft == NULL) && (ft_tk == NULL) && (tk_tk == ft_ft) &&
438 (Predecessors(tk) == 1) && (Predecessors(ft) == 1)) {
439 /*
440 * Okay - we have the basic diamond shape. At the very least, we can eliminate the
441 * suspend check on the taken-taken branch back to the join point.
442 */
443 if (SelectKind(tk->last_mir_insn) == kSelectGoto) {
444 tk->last_mir_insn->optimization_flags |= (MIR_IGNORE_SUSPEND_CHECK);
445 }
446 // Are the block bodies something we can handle?
447 if ((ft->first_mir_insn == ft->last_mir_insn) &&
448 (tk->first_mir_insn != tk->last_mir_insn) &&
449 (tk->first_mir_insn->next == tk->last_mir_insn) &&
450 ((SelectKind(ft->first_mir_insn) == kSelectMove) ||
451 (SelectKind(ft->first_mir_insn) == kSelectConst)) &&
452 (SelectKind(ft->first_mir_insn) == SelectKind(tk->first_mir_insn)) &&
453 (SelectKind(tk->last_mir_insn) == kSelectGoto)) {
454 // Almost there. Are the instructions targeting the same vreg?
455 MIR* if_true = tk->first_mir_insn;
456 MIR* if_false = ft->first_mir_insn;
457 // It's possible that the target of the select isn't used - skip those (rare) cases.
458 MIR* phi = FindPhi(tk_tk, if_true->ssa_rep->defs[0]);
459 if ((phi != NULL) && (if_true->dalvikInsn.vA == if_false->dalvikInsn.vA)) {
460 /*
461 * We'll convert the IF_EQZ/IF_NEZ to a SELECT. We need to find the
462 * Phi node in the merge block and delete it (while using the SSA name
463 * of the merge as the target of the SELECT. Delete both taken and
464 * fallthrough blocks, and set fallthrough to merge block.
465 * NOTE: not updating other dataflow info (no longer used at this point).
466 * If this changes, need to update i_dom, etc. here (and in CombineBlocks).
467 */
Vladimir Markoa1a70742014-03-03 10:28:05 +0000468 mir->meta.ccode = ConditionCodeForIfCcZ(mir->dalvikInsn.opcode);
buzbee311ca162013-02-28 15:56:43 -0800469 mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpSelect);
470 bool const_form = (SelectKind(if_true) == kSelectConst);
471 if ((SelectKind(if_true) == kSelectMove)) {
472 if (IsConst(if_true->ssa_rep->uses[0]) &&
473 IsConst(if_false->ssa_rep->uses[0])) {
474 const_form = true;
475 if_true->dalvikInsn.vB = ConstantValue(if_true->ssa_rep->uses[0]);
476 if_false->dalvikInsn.vB = ConstantValue(if_false->ssa_rep->uses[0]);
477 }
478 }
479 if (const_form) {
Razvan A Lupusorue27b3bf2014-01-23 09:41:45 -0800480 /*
481 * TODO: If both constants are the same value, then instead of generating
482 * a select, we should simply generate a const bytecode. This should be
483 * considered after inlining which can lead to CFG of this form.
484 */
buzbee311ca162013-02-28 15:56:43 -0800485 // "true" set val in vB
486 mir->dalvikInsn.vB = if_true->dalvikInsn.vB;
487 // "false" set val in vC
488 mir->dalvikInsn.vC = if_false->dalvikInsn.vB;
489 } else {
490 DCHECK_EQ(SelectKind(if_true), kSelectMove);
491 DCHECK_EQ(SelectKind(if_false), kSelectMove);
buzbee862a7602013-04-05 10:58:54 -0700492 int* src_ssa =
Vladimir Marko83cc7ae2014-02-12 18:02:05 +0000493 static_cast<int*>(arena_->Alloc(sizeof(int) * 3, kArenaAllocDFInfo));
buzbee311ca162013-02-28 15:56:43 -0800494 src_ssa[0] = mir->ssa_rep->uses[0];
495 src_ssa[1] = if_true->ssa_rep->uses[0];
496 src_ssa[2] = if_false->ssa_rep->uses[0];
497 mir->ssa_rep->uses = src_ssa;
498 mir->ssa_rep->num_uses = 3;
499 }
500 mir->ssa_rep->num_defs = 1;
buzbee862a7602013-04-05 10:58:54 -0700501 mir->ssa_rep->defs =
Vladimir Marko83cc7ae2014-02-12 18:02:05 +0000502 static_cast<int*>(arena_->Alloc(sizeof(int) * 1, kArenaAllocDFInfo));
buzbee862a7602013-04-05 10:58:54 -0700503 mir->ssa_rep->fp_def =
Vladimir Marko83cc7ae2014-02-12 18:02:05 +0000504 static_cast<bool*>(arena_->Alloc(sizeof(bool) * 1, kArenaAllocDFInfo));
buzbee311ca162013-02-28 15:56:43 -0800505 mir->ssa_rep->fp_def[0] = if_true->ssa_rep->fp_def[0];
buzbee817e45a2013-05-30 18:59:12 -0700506 // Match type of uses to def.
507 mir->ssa_rep->fp_use =
Mathieu Chartierf6c4b3b2013-08-24 16:11:37 -0700508 static_cast<bool*>(arena_->Alloc(sizeof(bool) * mir->ssa_rep->num_uses,
Vladimir Marko83cc7ae2014-02-12 18:02:05 +0000509 kArenaAllocDFInfo));
buzbee817e45a2013-05-30 18:59:12 -0700510 for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
511 mir->ssa_rep->fp_use[i] = mir->ssa_rep->fp_def[0];
512 }
buzbee311ca162013-02-28 15:56:43 -0800513 /*
514 * There is usually a Phi node in the join block for our two cases. If the
515 * Phi node only contains our two cases as input, we will use the result
516 * SSA name of the Phi node as our select result and delete the Phi. If
517 * the Phi node has more than two operands, we will arbitrarily use the SSA
518 * name of the "true" path, delete the SSA name of the "false" path from the
519 * Phi node (and fix up the incoming arc list).
520 */
521 if (phi->ssa_rep->num_uses == 2) {
522 mir->ssa_rep->defs[0] = phi->ssa_rep->defs[0];
523 phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
524 } else {
525 int dead_def = if_false->ssa_rep->defs[0];
526 int live_def = if_true->ssa_rep->defs[0];
527 mir->ssa_rep->defs[0] = live_def;
buzbee0d829482013-10-11 15:24:55 -0700528 BasicBlockId* incoming = phi->meta.phi_incoming;
buzbee311ca162013-02-28 15:56:43 -0800529 for (int i = 0; i < phi->ssa_rep->num_uses; i++) {
530 if (phi->ssa_rep->uses[i] == live_def) {
531 incoming[i] = bb->id;
532 }
533 }
534 for (int i = 0; i < phi->ssa_rep->num_uses; i++) {
535 if (phi->ssa_rep->uses[i] == dead_def) {
536 int last_slot = phi->ssa_rep->num_uses - 1;
537 phi->ssa_rep->uses[i] = phi->ssa_rep->uses[last_slot];
538 incoming[i] = incoming[last_slot];
539 }
540 }
541 }
542 phi->ssa_rep->num_uses--;
buzbee0d829482013-10-11 15:24:55 -0700543 bb->taken = NullBasicBlockId;
buzbee311ca162013-02-28 15:56:43 -0800544 tk->block_type = kDead;
545 for (MIR* tmir = ft->first_mir_insn; tmir != NULL; tmir = tmir->next) {
546 tmir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
547 }
548 }
549 }
550 }
551 }
552 }
buzbee1da1e2f2013-11-15 13:37:01 -0800553 bb = ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) ? NextDominatedBlock(bb) : NULL;
buzbee311ca162013-02-28 15:56:43 -0800554 }
Vladimir Marko2ac01fc2014-05-22 12:09:08 +0100555 if (use_lvn && UNLIKELY(!local_valnum->Good())) {
556 LOG(WARNING) << "LVN overflow in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
557 }
buzbee311ca162013-02-28 15:56:43 -0800558
buzbee311ca162013-02-28 15:56:43 -0800559 return true;
560}
561
buzbee311ca162013-02-28 15:56:43 -0800562/* Collect stats on number of checks removed */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700563void MIRGraph::CountChecks(struct BasicBlock* bb) {
buzbee862a7602013-04-05 10:58:54 -0700564 if (bb->data_flow_info != NULL) {
565 for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
566 if (mir->ssa_rep == NULL) {
567 continue;
buzbee311ca162013-02-28 15:56:43 -0800568 }
Jean Christophe Beylercc794c32014-05-02 09:34:13 -0700569 uint64_t df_attributes = GetDataFlowAttributes(mir);
buzbee862a7602013-04-05 10:58:54 -0700570 if (df_attributes & DF_HAS_NULL_CHKS) {
571 checkstats_->null_checks++;
572 if (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) {
573 checkstats_->null_checks_eliminated++;
574 }
575 }
576 if (df_attributes & DF_HAS_RANGE_CHKS) {
577 checkstats_->range_checks++;
578 if (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) {
579 checkstats_->range_checks_eliminated++;
580 }
buzbee311ca162013-02-28 15:56:43 -0800581 }
582 }
583 }
buzbee311ca162013-02-28 15:56:43 -0800584}
585
586/* Try to make common case the fallthrough path */
buzbee0d829482013-10-11 15:24:55 -0700587bool MIRGraph::LayoutBlocks(BasicBlock* bb) {
buzbee311ca162013-02-28 15:56:43 -0800588 // TODO: For now, just looking for direct throws. Consider generalizing for profile feedback
589 if (!bb->explicit_throw) {
590 return false;
591 }
592 BasicBlock* walker = bb;
593 while (true) {
594 // Check termination conditions
595 if ((walker->block_type == kEntryBlock) || (Predecessors(walker) != 1)) {
596 break;
597 }
buzbee0d829482013-10-11 15:24:55 -0700598 BasicBlock* prev = GetBasicBlock(walker->predecessors->Get(0));
buzbee311ca162013-02-28 15:56:43 -0800599 if (prev->conditional_branch) {
buzbee0d829482013-10-11 15:24:55 -0700600 if (GetBasicBlock(prev->fall_through) == walker) {
buzbee311ca162013-02-28 15:56:43 -0800601 // Already done - return
602 break;
603 }
buzbee0d829482013-10-11 15:24:55 -0700604 DCHECK_EQ(walker, GetBasicBlock(prev->taken));
buzbee311ca162013-02-28 15:56:43 -0800605 // Got one. Flip it and exit
606 Instruction::Code opcode = prev->last_mir_insn->dalvikInsn.opcode;
607 switch (opcode) {
608 case Instruction::IF_EQ: opcode = Instruction::IF_NE; break;
609 case Instruction::IF_NE: opcode = Instruction::IF_EQ; break;
610 case Instruction::IF_LT: opcode = Instruction::IF_GE; break;
611 case Instruction::IF_GE: opcode = Instruction::IF_LT; break;
612 case Instruction::IF_GT: opcode = Instruction::IF_LE; break;
613 case Instruction::IF_LE: opcode = Instruction::IF_GT; break;
614 case Instruction::IF_EQZ: opcode = Instruction::IF_NEZ; break;
615 case Instruction::IF_NEZ: opcode = Instruction::IF_EQZ; break;
616 case Instruction::IF_LTZ: opcode = Instruction::IF_GEZ; break;
617 case Instruction::IF_GEZ: opcode = Instruction::IF_LTZ; break;
618 case Instruction::IF_GTZ: opcode = Instruction::IF_LEZ; break;
619 case Instruction::IF_LEZ: opcode = Instruction::IF_GTZ; break;
620 default: LOG(FATAL) << "Unexpected opcode " << opcode;
621 }
622 prev->last_mir_insn->dalvikInsn.opcode = opcode;
buzbee0d829482013-10-11 15:24:55 -0700623 BasicBlockId t_bb = prev->taken;
buzbee311ca162013-02-28 15:56:43 -0800624 prev->taken = prev->fall_through;
625 prev->fall_through = t_bb;
626 break;
627 }
628 walker = prev;
629 }
630 return false;
631}
632
633/* Combine any basic blocks terminated by instructions that we now know can't throw */
Jean Christophe Beyler4e97c532014-01-07 10:07:18 -0800634void MIRGraph::CombineBlocks(struct BasicBlock* bb) {
buzbee311ca162013-02-28 15:56:43 -0800635 // Loop here to allow combining a sequence of blocks
636 while (true) {
637 // Check termination conditions
638 if ((bb->first_mir_insn == NULL)
639 || (bb->data_flow_info == NULL)
640 || (bb->block_type == kExceptionHandling)
641 || (bb->block_type == kExitBlock)
642 || (bb->block_type == kDead)
buzbee0d829482013-10-11 15:24:55 -0700643 || (bb->taken == NullBasicBlockId)
644 || (GetBasicBlock(bb->taken)->block_type != kExceptionHandling)
645 || (bb->successor_block_list_type != kNotUsed)
buzbee311ca162013-02-28 15:56:43 -0800646 || (static_cast<int>(bb->last_mir_insn->dalvikInsn.opcode) != kMirOpCheck)) {
647 break;
648 }
649
650 // Test the kMirOpCheck instruction
651 MIR* mir = bb->last_mir_insn;
652 // Grab the attributes from the paired opcode
653 MIR* throw_insn = mir->meta.throw_insn;
Jean Christophe Beylercc794c32014-05-02 09:34:13 -0700654 uint64_t df_attributes = GetDataFlowAttributes(throw_insn);
buzbee311ca162013-02-28 15:56:43 -0800655 bool can_combine = true;
656 if (df_attributes & DF_HAS_NULL_CHKS) {
657 can_combine &= ((throw_insn->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0);
658 }
659 if (df_attributes & DF_HAS_RANGE_CHKS) {
660 can_combine &= ((throw_insn->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0);
661 }
662 if (!can_combine) {
663 break;
664 }
665 // OK - got one. Combine
buzbee0d829482013-10-11 15:24:55 -0700666 BasicBlock* bb_next = GetBasicBlock(bb->fall_through);
buzbee311ca162013-02-28 15:56:43 -0800667 DCHECK(!bb_next->catch_entry);
668 DCHECK_EQ(Predecessors(bb_next), 1U);
buzbee311ca162013-02-28 15:56:43 -0800669 // Overwrite the kOpCheck insn with the paired opcode
670 DCHECK_EQ(bb_next->first_mir_insn, throw_insn);
671 *bb->last_mir_insn = *throw_insn;
buzbee311ca162013-02-28 15:56:43 -0800672 // Use the successor info from the next block
buzbee0d829482013-10-11 15:24:55 -0700673 bb->successor_block_list_type = bb_next->successor_block_list_type;
674 bb->successor_blocks = bb_next->successor_blocks;
buzbee311ca162013-02-28 15:56:43 -0800675 // Use the ending block linkage from the next block
676 bb->fall_through = bb_next->fall_through;
buzbee0d829482013-10-11 15:24:55 -0700677 GetBasicBlock(bb->taken)->block_type = kDead; // Kill the unused exception block
buzbee311ca162013-02-28 15:56:43 -0800678 bb->taken = bb_next->taken;
679 // Include the rest of the instructions
680 bb->last_mir_insn = bb_next->last_mir_insn;
681 /*
682 * If lower-half of pair of blocks to combine contained a return, move the flag
683 * to the newly combined block.
684 */
685 bb->terminated_by_return = bb_next->terminated_by_return;
686
687 /*
688 * NOTE: we aren't updating all dataflow info here. Should either make sure this pass
689 * happens after uses of i_dominated, dom_frontier or update the dataflow info here.
690 */
691
692 // Kill bb_next and remap now-dead id to parent
693 bb_next->block_type = kDead;
buzbee1fd33462013-03-25 13:40:45 -0700694 block_id_map_.Overwrite(bb_next->id, bb->id);
buzbee311ca162013-02-28 15:56:43 -0800695
696 // Now, loop back and see if we can keep going
697 }
buzbee311ca162013-02-28 15:56:43 -0800698}
699
Vladimir Markobfea9c22014-01-17 17:49:33 +0000700void MIRGraph::EliminateNullChecksAndInferTypesStart() {
701 if ((cu_->disable_opt & (1 << kNullCheckElimination)) == 0) {
702 if (kIsDebugBuild) {
703 AllNodesIterator iter(this);
704 for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
705 CHECK(bb->data_flow_info == nullptr || bb->data_flow_info->ending_check_v == nullptr);
706 }
707 }
708
709 DCHECK(temp_scoped_alloc_.get() == nullptr);
710 temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
711 temp_bit_vector_size_ = GetNumSSARegs();
712 temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
713 temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapTempSSARegisterV);
714 }
715}
716
buzbee1da1e2f2013-11-15 13:37:01 -0800717/*
718 * Eliminate unnecessary null checks for a basic block. Also, while we're doing
719 * an iterative walk go ahead and perform type and size inference.
720 */
Jean Christophe Beyler4e97c532014-01-07 10:07:18 -0800721bool MIRGraph::EliminateNullChecksAndInferTypes(BasicBlock* bb) {
buzbee311ca162013-02-28 15:56:43 -0800722 if (bb->data_flow_info == NULL) return false;
buzbee1da1e2f2013-11-15 13:37:01 -0800723 bool infer_changed = false;
724 bool do_nce = ((cu_->disable_opt & (1 << kNullCheckElimination)) == 0);
buzbee311ca162013-02-28 15:56:43 -0800725
Vladimir Markobfea9c22014-01-17 17:49:33 +0000726 ArenaBitVector* ssa_regs_to_check = temp_bit_vector_;
buzbee1da1e2f2013-11-15 13:37:01 -0800727 if (do_nce) {
728 /*
729 * Set initial state. Be conservative with catch
730 * blocks and start with no assumptions about null check
731 * status (except for "this").
732 */
733 if ((bb->block_type == kEntryBlock) | bb->catch_entry) {
Vladimir Markobfea9c22014-01-17 17:49:33 +0000734 ssa_regs_to_check->ClearAllBits();
buzbee1da1e2f2013-11-15 13:37:01 -0800735 // Assume all ins are objects.
736 for (uint16_t in_reg = cu_->num_dalvik_registers - cu_->num_ins;
737 in_reg < cu_->num_dalvik_registers; in_reg++) {
Vladimir Markobfea9c22014-01-17 17:49:33 +0000738 ssa_regs_to_check->SetBit(in_reg);
buzbee1da1e2f2013-11-15 13:37:01 -0800739 }
740 if ((cu_->access_flags & kAccStatic) == 0) {
741 // If non-static method, mark "this" as non-null
742 int this_reg = cu_->num_dalvik_registers - cu_->num_ins;
Vladimir Markobfea9c22014-01-17 17:49:33 +0000743 ssa_regs_to_check->ClearBit(this_reg);
buzbee1da1e2f2013-11-15 13:37:01 -0800744 }
745 } else if (bb->predecessors->Size() == 1) {
746 BasicBlock* pred_bb = GetBasicBlock(bb->predecessors->Get(0));
Vladimir Markobfea9c22014-01-17 17:49:33 +0000747 // pred_bb must have already been processed at least once.
748 DCHECK(pred_bb->data_flow_info->ending_check_v != nullptr);
749 ssa_regs_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
buzbee1da1e2f2013-11-15 13:37:01 -0800750 if (pred_bb->block_type == kDalvikByteCode) {
751 // Check to see if predecessor had an explicit null-check.
752 MIR* last_insn = pred_bb->last_mir_insn;
Jean Christophe Beylerb5c9b402014-04-30 14:52:00 -0700753 if (last_insn != nullptr) {
754 Instruction::Code last_opcode = last_insn->dalvikInsn.opcode;
755 if (last_opcode == Instruction::IF_EQZ) {
756 if (pred_bb->fall_through == bb->id) {
757 // The fall-through of a block following a IF_EQZ, set the vA of the IF_EQZ to show that
758 // it can't be null.
759 ssa_regs_to_check->ClearBit(last_insn->ssa_rep->uses[0]);
760 }
761 } else if (last_opcode == Instruction::IF_NEZ) {
762 if (pred_bb->taken == bb->id) {
763 // The taken block following a IF_NEZ, set the vA of the IF_NEZ to show that it can't be
764 // null.
765 ssa_regs_to_check->ClearBit(last_insn->ssa_rep->uses[0]);
766 }
buzbee1da1e2f2013-11-15 13:37:01 -0800767 }
Ian Rogers22fd6a02013-06-13 15:06:54 -0700768 }
769 }
buzbee1da1e2f2013-11-15 13:37:01 -0800770 } else {
771 // Starting state is union of all incoming arcs
772 GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
773 BasicBlock* pred_bb = GetBasicBlock(iter.Next());
Vladimir Markobfea9c22014-01-17 17:49:33 +0000774 CHECK(pred_bb != NULL);
775 while (pred_bb->data_flow_info->ending_check_v == nullptr) {
776 pred_bb = GetBasicBlock(iter.Next());
777 // At least one predecessor must have been processed before this bb.
778 DCHECK(pred_bb != nullptr);
779 DCHECK(pred_bb->data_flow_info != nullptr);
780 }
781 ssa_regs_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
buzbee1da1e2f2013-11-15 13:37:01 -0800782 while (true) {
783 pred_bb = GetBasicBlock(iter.Next());
784 if (!pred_bb) break;
Vladimir Markobfea9c22014-01-17 17:49:33 +0000785 DCHECK(pred_bb->data_flow_info != nullptr);
786 if (pred_bb->data_flow_info->ending_check_v == nullptr) {
buzbee1da1e2f2013-11-15 13:37:01 -0800787 continue;
788 }
Vladimir Markobfea9c22014-01-17 17:49:33 +0000789 ssa_regs_to_check->Union(pred_bb->data_flow_info->ending_check_v);
buzbee311ca162013-02-28 15:56:43 -0800790 }
buzbee311ca162013-02-28 15:56:43 -0800791 }
Vladimir Markobfea9c22014-01-17 17:49:33 +0000792 // At this point, ssa_regs_to_check shows which sregs have an object definition with
buzbee1da1e2f2013-11-15 13:37:01 -0800793 // no intervening uses.
buzbee311ca162013-02-28 15:56:43 -0800794 }
795
796 // Walk through the instruction in the block, updating as necessary
797 for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
798 if (mir->ssa_rep == NULL) {
799 continue;
800 }
buzbee1da1e2f2013-11-15 13:37:01 -0800801
802 // Propagate type info.
803 infer_changed = InferTypeAndSize(bb, mir, infer_changed);
804 if (!do_nce) {
805 continue;
806 }
807
Jean Christophe Beylercc794c32014-05-02 09:34:13 -0700808 uint64_t df_attributes = GetDataFlowAttributes(mir);
buzbee311ca162013-02-28 15:56:43 -0800809
Bill Buzbee0b1191c2013-10-28 22:11:59 +0000810 // Might need a null check?
811 if (df_attributes & DF_HAS_NULL_CHKS) {
812 int src_idx;
813 if (df_attributes & DF_NULL_CHK_1) {
814 src_idx = 1;
815 } else if (df_attributes & DF_NULL_CHK_2) {
816 src_idx = 2;
817 } else {
818 src_idx = 0;
819 }
820 int src_sreg = mir->ssa_rep->uses[src_idx];
Vladimir Markobfea9c22014-01-17 17:49:33 +0000821 if (!ssa_regs_to_check->IsBitSet(src_sreg)) {
Bill Buzbee0b1191c2013-10-28 22:11:59 +0000822 // Eliminate the null check.
823 mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
824 } else {
825 // Do the null check.
826 mir->optimization_flags &= ~MIR_IGNORE_NULL_CHECK;
827 // Mark s_reg as null-checked
Vladimir Markobfea9c22014-01-17 17:49:33 +0000828 ssa_regs_to_check->ClearBit(src_sreg);
Bill Buzbee0b1191c2013-10-28 22:11:59 +0000829 }
830 }
831
832 if ((df_attributes & DF_A_WIDE) ||
833 (df_attributes & (DF_REF_A | DF_SETS_CONST | DF_NULL_TRANSFER)) == 0) {
834 continue;
835 }
836
837 /*
838 * First, mark all object definitions as requiring null check.
839 * Note: we can't tell if a CONST definition might be used as an object, so treat
840 * them all as object definitions.
841 */
842 if (((df_attributes & (DF_DA | DF_REF_A)) == (DF_DA | DF_REF_A)) ||
843 (df_attributes & DF_SETS_CONST)) {
Vladimir Markobfea9c22014-01-17 17:49:33 +0000844 ssa_regs_to_check->SetBit(mir->ssa_rep->defs[0]);
buzbee4db179d2013-10-23 12:16:39 -0700845 }
846
Bill Buzbee0b1191c2013-10-28 22:11:59 +0000847 // Now, remove mark from all object definitions we know are non-null.
848 if (df_attributes & DF_NON_NULL_DST) {
849 // Mark target of NEW* as non-null
Vladimir Markobfea9c22014-01-17 17:49:33 +0000850 ssa_regs_to_check->ClearBit(mir->ssa_rep->defs[0]);
Bill Buzbee0b1191c2013-10-28 22:11:59 +0000851 }
852
buzbee311ca162013-02-28 15:56:43 -0800853 // Mark non-null returns from invoke-style NEW*
854 if (df_attributes & DF_NON_NULL_RET) {
855 MIR* next_mir = mir->next;
856 // Next should be an MOVE_RESULT_OBJECT
857 if (next_mir &&
858 next_mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
859 // Mark as null checked
Vladimir Markobfea9c22014-01-17 17:49:33 +0000860 ssa_regs_to_check->ClearBit(next_mir->ssa_rep->defs[0]);
buzbee311ca162013-02-28 15:56:43 -0800861 } else {
862 if (next_mir) {
863 LOG(WARNING) << "Unexpected opcode following new: " << next_mir->dalvikInsn.opcode;
buzbee0d829482013-10-11 15:24:55 -0700864 } else if (bb->fall_through != NullBasicBlockId) {
buzbee311ca162013-02-28 15:56:43 -0800865 // Look in next basic block
buzbee0d829482013-10-11 15:24:55 -0700866 struct BasicBlock* next_bb = GetBasicBlock(bb->fall_through);
buzbee311ca162013-02-28 15:56:43 -0800867 for (MIR* tmir = next_bb->first_mir_insn; tmir != NULL;
868 tmir =tmir->next) {
869 if (static_cast<int>(tmir->dalvikInsn.opcode) >= static_cast<int>(kMirOpFirst)) {
870 continue;
871 }
872 // First non-pseudo should be MOVE_RESULT_OBJECT
873 if (tmir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
874 // Mark as null checked
Vladimir Markobfea9c22014-01-17 17:49:33 +0000875 ssa_regs_to_check->ClearBit(tmir->ssa_rep->defs[0]);
buzbee311ca162013-02-28 15:56:43 -0800876 } else {
877 LOG(WARNING) << "Unexpected op after new: " << tmir->dalvikInsn.opcode;
878 }
879 break;
880 }
881 }
882 }
883 }
884
885 /*
886 * Propagate nullcheck state on register copies (including
887 * Phi pseudo copies. For the latter, nullcheck state is
Bill Buzbee0b1191c2013-10-28 22:11:59 +0000888 * the "or" of all the Phi's operands.
buzbee311ca162013-02-28 15:56:43 -0800889 */
890 if (df_attributes & (DF_NULL_TRANSFER_0 | DF_NULL_TRANSFER_N)) {
891 int tgt_sreg = mir->ssa_rep->defs[0];
892 int operands = (df_attributes & DF_NULL_TRANSFER_0) ? 1 :
893 mir->ssa_rep->num_uses;
Bill Buzbee0b1191c2013-10-28 22:11:59 +0000894 bool needs_null_check = false;
buzbee311ca162013-02-28 15:56:43 -0800895 for (int i = 0; i < operands; i++) {
Vladimir Markobfea9c22014-01-17 17:49:33 +0000896 needs_null_check |= ssa_regs_to_check->IsBitSet(mir->ssa_rep->uses[i]);
buzbee311ca162013-02-28 15:56:43 -0800897 }
Bill Buzbee0b1191c2013-10-28 22:11:59 +0000898 if (needs_null_check) {
Vladimir Markobfea9c22014-01-17 17:49:33 +0000899 ssa_regs_to_check->SetBit(tgt_sreg);
Bill Buzbee0b1191c2013-10-28 22:11:59 +0000900 } else {
Vladimir Markobfea9c22014-01-17 17:49:33 +0000901 ssa_regs_to_check->ClearBit(tgt_sreg);
buzbee311ca162013-02-28 15:56:43 -0800902 }
903 }
buzbee311ca162013-02-28 15:56:43 -0800904 }
905
906 // Did anything change?
Vladimir Markobfea9c22014-01-17 17:49:33 +0000907 bool nce_changed = false;
908 if (do_nce) {
909 if (bb->data_flow_info->ending_check_v == nullptr) {
910 DCHECK(temp_scoped_alloc_.get() != nullptr);
911 bb->data_flow_info->ending_check_v = new (temp_scoped_alloc_.get()) ArenaBitVector(
912 temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapNullCheck);
913 nce_changed = ssa_regs_to_check->GetHighestBitSet() != -1;
914 bb->data_flow_info->ending_check_v->Copy(ssa_regs_to_check);
Jean Christophe Beylerb5c9b402014-04-30 14:52:00 -0700915 } else if (!ssa_regs_to_check->SameBitsSet(bb->data_flow_info->ending_check_v)) {
Vladimir Markobfea9c22014-01-17 17:49:33 +0000916 nce_changed = true;
917 bb->data_flow_info->ending_check_v->Copy(ssa_regs_to_check);
918 }
buzbee311ca162013-02-28 15:56:43 -0800919 }
buzbee1da1e2f2013-11-15 13:37:01 -0800920 return infer_changed | nce_changed;
buzbee311ca162013-02-28 15:56:43 -0800921}
922
Vladimir Markobfea9c22014-01-17 17:49:33 +0000923void MIRGraph::EliminateNullChecksAndInferTypesEnd() {
924 if ((cu_->disable_opt & (1 << kNullCheckElimination)) == 0) {
925 // Clean up temporaries.
926 temp_bit_vector_size_ = 0u;
927 temp_bit_vector_ = nullptr;
928 AllNodesIterator iter(this);
929 for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
930 if (bb->data_flow_info != nullptr) {
931 bb->data_flow_info->ending_check_v = nullptr;
932 }
933 }
934 DCHECK(temp_scoped_alloc_.get() != nullptr);
935 temp_scoped_alloc_.reset();
936 }
937}
938
939bool MIRGraph::EliminateClassInitChecksGate() {
940 if ((cu_->disable_opt & (1 << kClassInitCheckElimination)) != 0 ||
941 !cu_->mir_graph->HasStaticFieldAccess()) {
942 return false;
943 }
944
945 if (kIsDebugBuild) {
946 AllNodesIterator iter(this);
947 for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
948 CHECK(bb->data_flow_info == nullptr || bb->data_flow_info->ending_check_v == nullptr);
949 }
950 }
951
952 DCHECK(temp_scoped_alloc_.get() == nullptr);
953 temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
954
955 // Each insn we use here has at least 2 code units, offset/2 will be a unique index.
956 const size_t end = (cu_->code_item->insns_size_in_code_units_ + 1u) / 2u;
957 temp_insn_data_ = static_cast<uint16_t*>(
958 temp_scoped_alloc_->Alloc(end * sizeof(*temp_insn_data_), kArenaAllocGrowableArray));
959
960 uint32_t unique_class_count = 0u;
961 {
962 // Get unique_class_count and store indexes in temp_insn_data_ using a map on a nested
963 // ScopedArenaAllocator.
964
965 // Embed the map value in the entry to save space.
966 struct MapEntry {
967 // Map key: the class identified by the declaring dex file and type index.
968 const DexFile* declaring_dex_file;
969 uint16_t declaring_class_idx;
970 // Map value: index into bit vectors of classes requiring initialization checks.
971 uint16_t index;
972 };
973 struct MapEntryComparator {
974 bool operator()(const MapEntry& lhs, const MapEntry& rhs) const {
975 if (lhs.declaring_class_idx != rhs.declaring_class_idx) {
976 return lhs.declaring_class_idx < rhs.declaring_class_idx;
977 }
978 return lhs.declaring_dex_file < rhs.declaring_dex_file;
979 }
980 };
981
Vladimir Markobfea9c22014-01-17 17:49:33 +0000982 ScopedArenaAllocator allocator(&cu_->arena_stack);
Vladimir Marko69f08ba2014-04-11 12:28:11 +0100983 ScopedArenaSet<MapEntry, MapEntryComparator> class_to_index_map(MapEntryComparator(),
984 allocator.Adapter());
Vladimir Markobfea9c22014-01-17 17:49:33 +0000985
986 // First, find all SGET/SPUTs that may need class initialization checks, record INVOKE_STATICs.
987 AllNodesIterator iter(this);
988 for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
989 for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
990 DCHECK(bb->data_flow_info != nullptr);
991 if (mir->dalvikInsn.opcode >= Instruction::SGET &&
992 mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
993 const MirSFieldLoweringInfo& field_info = GetSFieldLoweringInfo(mir);
994 uint16_t index = 0xffffu;
995 if (field_info.IsResolved() && !field_info.IsInitialized()) {
996 DCHECK_LT(class_to_index_map.size(), 0xffffu);
997 MapEntry entry = {
998 field_info.DeclaringDexFile(),
999 field_info.DeclaringClassIndex(),
1000 static_cast<uint16_t>(class_to_index_map.size())
1001 };
1002 index = class_to_index_map.insert(entry).first->index;
1003 }
1004 // Using offset/2 for index into temp_insn_data_.
1005 temp_insn_data_[mir->offset / 2u] = index;
1006 }
1007 }
1008 }
1009 unique_class_count = static_cast<uint32_t>(class_to_index_map.size());
1010 }
1011
1012 if (unique_class_count == 0u) {
1013 // All SGET/SPUTs refer to initialized classes. Nothing to do.
1014 temp_insn_data_ = nullptr;
1015 temp_scoped_alloc_.reset();
1016 return false;
1017 }
1018
1019 temp_bit_vector_size_ = unique_class_count;
1020 temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
1021 temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapClInitCheck);
1022 DCHECK_GT(temp_bit_vector_size_, 0u);
1023 return true;
1024}
1025
1026/*
1027 * Eliminate unnecessary class initialization checks for a basic block.
1028 */
1029bool MIRGraph::EliminateClassInitChecks(BasicBlock* bb) {
1030 DCHECK_EQ((cu_->disable_opt & (1 << kClassInitCheckElimination)), 0u);
1031 if (bb->data_flow_info == NULL) {
1032 return false;
1033 }
1034
1035 /*
1036 * Set initial state. Be conservative with catch
1037 * blocks and start with no assumptions about class init check status.
1038 */
1039 ArenaBitVector* classes_to_check = temp_bit_vector_;
1040 DCHECK(classes_to_check != nullptr);
1041 if ((bb->block_type == kEntryBlock) | bb->catch_entry) {
1042 classes_to_check->SetInitialBits(temp_bit_vector_size_);
1043 } else if (bb->predecessors->Size() == 1) {
1044 BasicBlock* pred_bb = GetBasicBlock(bb->predecessors->Get(0));
1045 // pred_bb must have already been processed at least once.
1046 DCHECK(pred_bb != nullptr);
1047 DCHECK(pred_bb->data_flow_info != nullptr);
1048 DCHECK(pred_bb->data_flow_info->ending_check_v != nullptr);
1049 classes_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
1050 } else {
1051 // Starting state is union of all incoming arcs
1052 GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
1053 BasicBlock* pred_bb = GetBasicBlock(iter.Next());
1054 DCHECK(pred_bb != NULL);
1055 DCHECK(pred_bb->data_flow_info != NULL);
1056 while (pred_bb->data_flow_info->ending_check_v == nullptr) {
1057 pred_bb = GetBasicBlock(iter.Next());
1058 // At least one predecessor must have been processed before this bb.
1059 DCHECK(pred_bb != nullptr);
1060 DCHECK(pred_bb->data_flow_info != nullptr);
1061 }
1062 classes_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
1063 while (true) {
1064 pred_bb = GetBasicBlock(iter.Next());
1065 if (!pred_bb) break;
1066 DCHECK(pred_bb->data_flow_info != nullptr);
1067 if (pred_bb->data_flow_info->ending_check_v == nullptr) {
1068 continue;
1069 }
1070 classes_to_check->Union(pred_bb->data_flow_info->ending_check_v);
1071 }
1072 }
1073 // At this point, classes_to_check shows which classes need clinit checks.
1074
1075 // Walk through the instruction in the block, updating as necessary
1076 for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
1077 if (mir->dalvikInsn.opcode >= Instruction::SGET &&
1078 mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
1079 uint16_t index = temp_insn_data_[mir->offset / 2u];
1080 if (index != 0xffffu) {
1081 if (mir->dalvikInsn.opcode >= Instruction::SGET &&
1082 mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
1083 if (!classes_to_check->IsBitSet(index)) {
1084 // Eliminate the class init check.
1085 mir->optimization_flags |= MIR_IGNORE_CLINIT_CHECK;
1086 } else {
1087 // Do the class init check.
1088 mir->optimization_flags &= ~MIR_IGNORE_CLINIT_CHECK;
1089 }
1090 }
1091 // Mark the class as initialized.
1092 classes_to_check->ClearBit(index);
1093 }
1094 }
1095 }
1096
1097 // Did anything change?
1098 bool changed = false;
1099 if (bb->data_flow_info->ending_check_v == nullptr) {
1100 DCHECK(temp_scoped_alloc_.get() != nullptr);
1101 DCHECK(bb->data_flow_info != nullptr);
1102 bb->data_flow_info->ending_check_v = new (temp_scoped_alloc_.get()) ArenaBitVector(
1103 temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapClInitCheck);
1104 changed = classes_to_check->GetHighestBitSet() != -1;
1105 bb->data_flow_info->ending_check_v->Copy(classes_to_check);
1106 } else if (!classes_to_check->Equal(bb->data_flow_info->ending_check_v)) {
1107 changed = true;
1108 bb->data_flow_info->ending_check_v->Copy(classes_to_check);
1109 }
1110 return changed;
1111}
1112
1113void MIRGraph::EliminateClassInitChecksEnd() {
1114 // Clean up temporaries.
1115 temp_bit_vector_size_ = 0u;
1116 temp_bit_vector_ = nullptr;
1117 AllNodesIterator iter(this);
1118 for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
1119 if (bb->data_flow_info != nullptr) {
1120 bb->data_flow_info->ending_check_v = nullptr;
1121 }
1122 }
1123
1124 DCHECK(temp_insn_data_ != nullptr);
1125 temp_insn_data_ = nullptr;
1126 DCHECK(temp_scoped_alloc_.get() != nullptr);
1127 temp_scoped_alloc_.reset();
1128}
1129
Vladimir Marko9820b7c2014-01-02 16:40:37 +00001130void MIRGraph::ComputeInlineIFieldLoweringInfo(uint16_t field_idx, MIR* invoke, MIR* iget_or_iput) {
1131 uint32_t method_index = invoke->meta.method_lowering_info;
1132 if (temp_bit_vector_->IsBitSet(method_index)) {
1133 iget_or_iput->meta.ifield_lowering_info = temp_insn_data_[method_index];
1134 DCHECK_EQ(field_idx, GetIFieldLoweringInfo(iget_or_iput).FieldIndex());
1135 return;
1136 }
1137
1138 const MirMethodLoweringInfo& method_info = GetMethodLoweringInfo(invoke);
1139 MethodReference target = method_info.GetTargetMethod();
1140 DexCompilationUnit inlined_unit(
1141 cu_, cu_->class_loader, cu_->class_linker, *target.dex_file,
1142 nullptr /* code_item not used */, 0u /* class_def_idx not used */, target.dex_method_index,
1143 0u /* access_flags not used */, nullptr /* verified_method not used */);
1144 MirIFieldLoweringInfo inlined_field_info(field_idx);
1145 MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, &inlined_unit, &inlined_field_info, 1u);
1146 DCHECK(inlined_field_info.IsResolved());
1147
1148 uint32_t field_info_index = ifield_lowering_infos_.Size();
1149 ifield_lowering_infos_.Insert(inlined_field_info);
1150 temp_bit_vector_->SetBit(method_index);
1151 temp_insn_data_[method_index] = field_info_index;
1152 iget_or_iput->meta.ifield_lowering_info = field_info_index;
1153}
1154
1155bool MIRGraph::InlineCallsGate() {
1156 if ((cu_->disable_opt & (1 << kSuppressMethodInlining)) != 0 ||
1157 method_lowering_infos_.Size() == 0u) {
1158 return false;
1159 }
1160 if (cu_->compiler_driver->GetMethodInlinerMap() == nullptr) {
1161 // This isn't the Quick compiler.
1162 return false;
1163 }
1164 return true;
1165}
1166
1167void MIRGraph::InlineCallsStart() {
1168 // Prepare for inlining getters/setters. Since we're inlining at most 1 IGET/IPUT from
1169 // each INVOKE, we can index the data by the MIR::meta::method_lowering_info index.
1170
1171 DCHECK(temp_scoped_alloc_.get() == nullptr);
1172 temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
1173 temp_bit_vector_size_ = method_lowering_infos_.Size();
1174 temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
1175 temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapMisc);
1176 temp_bit_vector_->ClearAllBits();
1177 temp_insn_data_ = static_cast<uint16_t*>(temp_scoped_alloc_->Alloc(
1178 temp_bit_vector_size_ * sizeof(*temp_insn_data_), kArenaAllocGrowableArray));
1179}
1180
1181void MIRGraph::InlineCalls(BasicBlock* bb) {
1182 if (bb->block_type != kDalvikByteCode) {
1183 return;
1184 }
1185 for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
1186 if (!(Instruction::FlagsOf(mir->dalvikInsn.opcode) & Instruction::kInvoke)) {
1187 continue;
1188 }
1189 const MirMethodLoweringInfo& method_info = GetMethodLoweringInfo(mir);
1190 if (!method_info.FastPath()) {
1191 continue;
1192 }
1193 InvokeType sharp_type = method_info.GetSharpType();
1194 if ((sharp_type != kDirect) &&
1195 (sharp_type != kStatic || method_info.NeedsClassInitialization())) {
1196 continue;
1197 }
1198 DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
1199 MethodReference target = method_info.GetTargetMethod();
1200 if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(target.dex_file)
1201 ->GenInline(this, bb, mir, target.dex_method_index)) {
1202 if (cu_->verbose) {
1203 LOG(INFO) << "In \"" << PrettyMethod(cu_->method_idx, *cu_->dex_file)
1204 << "\" @0x" << std::hex << mir->offset
1205 << " inlined " << method_info.GetInvokeType() << " (" << sharp_type << ") call to \""
1206 << PrettyMethod(target.dex_method_index, *target.dex_file) << "\"";
1207 }
1208 }
1209 }
1210}
1211
1212void MIRGraph::InlineCallsEnd() {
1213 DCHECK(temp_insn_data_ != nullptr);
1214 temp_insn_data_ = nullptr;
1215 DCHECK(temp_bit_vector_ != nullptr);
1216 temp_bit_vector_ = nullptr;
1217 DCHECK(temp_scoped_alloc_.get() != nullptr);
1218 temp_scoped_alloc_.reset();
1219}
1220
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001221void MIRGraph::DumpCheckStats() {
buzbee311ca162013-02-28 15:56:43 -08001222 Checkstats* stats =
Vladimir Marko83cc7ae2014-02-12 18:02:05 +00001223 static_cast<Checkstats*>(arena_->Alloc(sizeof(Checkstats), kArenaAllocDFInfo));
buzbee1fd33462013-03-25 13:40:45 -07001224 checkstats_ = stats;
buzbee56c71782013-09-05 17:13:19 -07001225 AllNodesIterator iter(this);
buzbee311ca162013-02-28 15:56:43 -08001226 for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
1227 CountChecks(bb);
1228 }
1229 if (stats->null_checks > 0) {
1230 float eliminated = static_cast<float>(stats->null_checks_eliminated);
1231 float checks = static_cast<float>(stats->null_checks);
1232 LOG(INFO) << "Null Checks: " << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
1233 << stats->null_checks_eliminated << " of " << stats->null_checks << " -> "
1234 << (eliminated/checks) * 100.0 << "%";
1235 }
1236 if (stats->range_checks > 0) {
1237 float eliminated = static_cast<float>(stats->range_checks_eliminated);
1238 float checks = static_cast<float>(stats->range_checks);
1239 LOG(INFO) << "Range Checks: " << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
1240 << stats->range_checks_eliminated << " of " << stats->range_checks << " -> "
1241 << (eliminated/checks) * 100.0 << "%";
1242 }
1243}
1244
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001245bool MIRGraph::BuildExtendedBBList(struct BasicBlock* bb) {
buzbee311ca162013-02-28 15:56:43 -08001246 if (bb->visited) return false;
1247 if (!((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
1248 || (bb->block_type == kExitBlock))) {
1249 // Ignore special blocks
1250 bb->visited = true;
1251 return false;
1252 }
1253 // Must be head of extended basic block.
1254 BasicBlock* start_bb = bb;
buzbee0d829482013-10-11 15:24:55 -07001255 extended_basic_blocks_.push_back(bb->id);
buzbee311ca162013-02-28 15:56:43 -08001256 bool terminated_by_return = false;
buzbee1da1e2f2013-11-15 13:37:01 -08001257 bool do_local_value_numbering = false;
buzbee311ca162013-02-28 15:56:43 -08001258 // Visit blocks strictly dominated by this head.
1259 while (bb != NULL) {
1260 bb->visited = true;
1261 terminated_by_return |= bb->terminated_by_return;
buzbee1da1e2f2013-11-15 13:37:01 -08001262 do_local_value_numbering |= bb->use_lvn;
buzbee311ca162013-02-28 15:56:43 -08001263 bb = NextDominatedBlock(bb);
1264 }
buzbee1da1e2f2013-11-15 13:37:01 -08001265 if (terminated_by_return || do_local_value_numbering) {
1266 // Do lvn for all blocks in this extended set.
buzbee311ca162013-02-28 15:56:43 -08001267 bb = start_bb;
1268 while (bb != NULL) {
buzbee1da1e2f2013-11-15 13:37:01 -08001269 bb->use_lvn = do_local_value_numbering;
1270 bb->dominates_return = terminated_by_return;
buzbee311ca162013-02-28 15:56:43 -08001271 bb = NextDominatedBlock(bb);
1272 }
1273 }
Brian Carlstrom7934ac22013-07-26 10:54:15 -07001274 return false; // Not iterative - return value will be ignored
buzbee311ca162013-02-28 15:56:43 -08001275}
1276
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001277void MIRGraph::BasicBlockOptimization() {
Jean Christophe Beyler4e97c532014-01-07 10:07:18 -08001278 if ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) {
1279 ClearAllVisitedFlags();
1280 PreOrderDfsIterator iter2(this);
1281 for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
1282 BuildExtendedBBList(bb);
buzbee311ca162013-02-28 15:56:43 -08001283 }
Jean Christophe Beyler4e97c532014-01-07 10:07:18 -08001284 // Perform extended basic block optimizations.
1285 for (unsigned int i = 0; i < extended_basic_blocks_.size(); i++) {
1286 BasicBlockOpt(GetBasicBlock(extended_basic_blocks_[i]));
1287 }
1288 } else {
1289 PreOrderDfsIterator iter(this);
1290 for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
1291 BasicBlockOpt(bb);
1292 }
buzbee311ca162013-02-28 15:56:43 -08001293 }
1294}
1295
1296} // namespace art