blob: 200795e53526553118fa72c4c60d894e49f832bb [file] [log] [blame]
buzbeeee17e0a2013-07-31 10:47:37 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Vladimir Markobe0e5462014-02-26 11:24:15 +000017#include <algorithm>
buzbeeee17e0a2013-07-31 10:47:37 -070018#include "compiler_internals.h"
19#include "dataflow_iterator-inl.h"
Vladimir Markobe0e5462014-02-26 11:24:15 +000020#include "dex_instruction.h"
21#include "dex_instruction-inl.h"
Vladimir Markof096aad2014-01-23 15:51:58 +000022#include "dex/verified_method.h"
Vladimir Marko5816ed42013-11-27 17:04:20 +000023#include "dex/quick/dex_file_method_inliner.h"
24#include "dex/quick/dex_file_to_method_inliner_map.h"
Brian Carlstrom6449c622014-02-10 23:48:36 -080025#include "driver/compiler_options.h"
Vladimir Markobe0e5462014-02-26 11:24:15 +000026#include "UniquePtr.h"
buzbeeee17e0a2013-07-31 10:47:37 -070027
28namespace art {
29
30 // Instruction characteristics used to statically identify computation-intensive methods.
31const uint32_t MIRGraph::analysis_attributes_[kMirOpLast] = {
32 // 00 NOP
33 AN_NONE,
34
35 // 01 MOVE vA, vB
36 AN_MOVE,
37
38 // 02 MOVE_FROM16 vAA, vBBBB
39 AN_MOVE,
40
41 // 03 MOVE_16 vAAAA, vBBBB
42 AN_MOVE,
43
44 // 04 MOVE_WIDE vA, vB
45 AN_MOVE,
46
47 // 05 MOVE_WIDE_FROM16 vAA, vBBBB
48 AN_MOVE,
49
50 // 06 MOVE_WIDE_16 vAAAA, vBBBB
51 AN_MOVE,
52
53 // 07 MOVE_OBJECT vA, vB
54 AN_MOVE,
55
56 // 08 MOVE_OBJECT_FROM16 vAA, vBBBB
57 AN_MOVE,
58
59 // 09 MOVE_OBJECT_16 vAAAA, vBBBB
60 AN_MOVE,
61
62 // 0A MOVE_RESULT vAA
63 AN_MOVE,
64
65 // 0B MOVE_RESULT_WIDE vAA
66 AN_MOVE,
67
68 // 0C MOVE_RESULT_OBJECT vAA
69 AN_MOVE,
70
71 // 0D MOVE_EXCEPTION vAA
72 AN_MOVE,
73
74 // 0E RETURN_VOID
75 AN_BRANCH,
76
77 // 0F RETURN vAA
78 AN_BRANCH,
79
80 // 10 RETURN_WIDE vAA
81 AN_BRANCH,
82
83 // 11 RETURN_OBJECT vAA
84 AN_BRANCH,
85
86 // 12 CONST_4 vA, #+B
87 AN_SIMPLECONST,
88
89 // 13 CONST_16 vAA, #+BBBB
90 AN_SIMPLECONST,
91
92 // 14 CONST vAA, #+BBBBBBBB
93 AN_SIMPLECONST,
94
95 // 15 CONST_HIGH16 VAA, #+BBBB0000
96 AN_SIMPLECONST,
97
98 // 16 CONST_WIDE_16 vAA, #+BBBB
99 AN_SIMPLECONST,
100
101 // 17 CONST_WIDE_32 vAA, #+BBBBBBBB
102 AN_SIMPLECONST,
103
104 // 18 CONST_WIDE vAA, #+BBBBBBBBBBBBBBBB
105 AN_SIMPLECONST,
106
107 // 19 CONST_WIDE_HIGH16 vAA, #+BBBB000000000000
108 AN_SIMPLECONST,
109
110 // 1A CONST_STRING vAA, string@BBBB
111 AN_NONE,
112
113 // 1B CONST_STRING_JUMBO vAA, string@BBBBBBBB
114 AN_NONE,
115
116 // 1C CONST_CLASS vAA, type@BBBB
117 AN_NONE,
118
119 // 1D MONITOR_ENTER vAA
120 AN_NONE,
121
122 // 1E MONITOR_EXIT vAA
123 AN_NONE,
124
125 // 1F CHK_CAST vAA, type@BBBB
126 AN_NONE,
127
128 // 20 INSTANCE_OF vA, vB, type@CCCC
129 AN_NONE,
130
131 // 21 ARRAY_LENGTH vA, vB
132 AN_ARRAYOP,
133
134 // 22 NEW_INSTANCE vAA, type@BBBB
135 AN_HEAVYWEIGHT,
136
137 // 23 NEW_ARRAY vA, vB, type@CCCC
138 AN_HEAVYWEIGHT,
139
140 // 24 FILLED_NEW_ARRAY {vD, vE, vF, vG, vA}
141 AN_HEAVYWEIGHT,
142
143 // 25 FILLED_NEW_ARRAY_RANGE {vCCCC .. vNNNN}, type@BBBB
144 AN_HEAVYWEIGHT,
145
146 // 26 FILL_ARRAY_DATA vAA, +BBBBBBBB
147 AN_NONE,
148
149 // 27 THROW vAA
150 AN_HEAVYWEIGHT | AN_BRANCH,
151
152 // 28 GOTO
153 AN_BRANCH,
154
155 // 29 GOTO_16
156 AN_BRANCH,
157
158 // 2A GOTO_32
159 AN_BRANCH,
160
161 // 2B PACKED_SWITCH vAA, +BBBBBBBB
buzbeefe9ca402013-08-21 09:48:11 -0700162 AN_SWITCH,
buzbeeee17e0a2013-07-31 10:47:37 -0700163
164 // 2C SPARSE_SWITCH vAA, +BBBBBBBB
buzbeefe9ca402013-08-21 09:48:11 -0700165 AN_SWITCH,
buzbeeee17e0a2013-07-31 10:47:37 -0700166
167 // 2D CMPL_FLOAT vAA, vBB, vCC
168 AN_MATH | AN_FP | AN_SINGLE,
169
170 // 2E CMPG_FLOAT vAA, vBB, vCC
171 AN_MATH | AN_FP | AN_SINGLE,
172
173 // 2F CMPL_DOUBLE vAA, vBB, vCC
174 AN_MATH | AN_FP | AN_DOUBLE,
175
176 // 30 CMPG_DOUBLE vAA, vBB, vCC
177 AN_MATH | AN_FP | AN_DOUBLE,
178
179 // 31 CMP_LONG vAA, vBB, vCC
180 AN_MATH | AN_LONG,
181
182 // 32 IF_EQ vA, vB, +CCCC
183 AN_MATH | AN_BRANCH | AN_INT,
184
185 // 33 IF_NE vA, vB, +CCCC
186 AN_MATH | AN_BRANCH | AN_INT,
187
188 // 34 IF_LT vA, vB, +CCCC
189 AN_MATH | AN_BRANCH | AN_INT,
190
191 // 35 IF_GE vA, vB, +CCCC
192 AN_MATH | AN_BRANCH | AN_INT,
193
194 // 36 IF_GT vA, vB, +CCCC
195 AN_MATH | AN_BRANCH | AN_INT,
196
197 // 37 IF_LE vA, vB, +CCCC
198 AN_MATH | AN_BRANCH | AN_INT,
199
200 // 38 IF_EQZ vAA, +BBBB
201 AN_MATH | AN_BRANCH | AN_INT,
202
203 // 39 IF_NEZ vAA, +BBBB
204 AN_MATH | AN_BRANCH | AN_INT,
205
206 // 3A IF_LTZ vAA, +BBBB
207 AN_MATH | AN_BRANCH | AN_INT,
208
209 // 3B IF_GEZ vAA, +BBBB
210 AN_MATH | AN_BRANCH | AN_INT,
211
212 // 3C IF_GTZ vAA, +BBBB
213 AN_MATH | AN_BRANCH | AN_INT,
214
215 // 3D IF_LEZ vAA, +BBBB
216 AN_MATH | AN_BRANCH | AN_INT,
217
218 // 3E UNUSED_3E
219 AN_NONE,
220
221 // 3F UNUSED_3F
222 AN_NONE,
223
224 // 40 UNUSED_40
225 AN_NONE,
226
227 // 41 UNUSED_41
228 AN_NONE,
229
230 // 42 UNUSED_42
231 AN_NONE,
232
233 // 43 UNUSED_43
234 AN_NONE,
235
236 // 44 AGET vAA, vBB, vCC
237 AN_ARRAYOP,
238
239 // 45 AGET_WIDE vAA, vBB, vCC
240 AN_ARRAYOP,
241
242 // 46 AGET_OBJECT vAA, vBB, vCC
243 AN_ARRAYOP,
244
245 // 47 AGET_BOOLEAN vAA, vBB, vCC
246 AN_ARRAYOP,
247
248 // 48 AGET_BYTE vAA, vBB, vCC
249 AN_ARRAYOP,
250
251 // 49 AGET_CHAR vAA, vBB, vCC
252 AN_ARRAYOP,
253
254 // 4A AGET_SHORT vAA, vBB, vCC
255 AN_ARRAYOP,
256
257 // 4B APUT vAA, vBB, vCC
258 AN_ARRAYOP,
259
260 // 4C APUT_WIDE vAA, vBB, vCC
261 AN_ARRAYOP,
262
263 // 4D APUT_OBJECT vAA, vBB, vCC
264 AN_ARRAYOP,
265
266 // 4E APUT_BOOLEAN vAA, vBB, vCC
267 AN_ARRAYOP,
268
269 // 4F APUT_BYTE vAA, vBB, vCC
270 AN_ARRAYOP,
271
272 // 50 APUT_CHAR vAA, vBB, vCC
273 AN_ARRAYOP,
274
275 // 51 APUT_SHORT vAA, vBB, vCC
276 AN_ARRAYOP,
277
278 // 52 IGET vA, vB, field@CCCC
279 AN_NONE,
280
281 // 53 IGET_WIDE vA, vB, field@CCCC
282 AN_NONE,
283
284 // 54 IGET_OBJECT vA, vB, field@CCCC
285 AN_NONE,
286
287 // 55 IGET_BOOLEAN vA, vB, field@CCCC
288 AN_NONE,
289
290 // 56 IGET_BYTE vA, vB, field@CCCC
291 AN_NONE,
292
293 // 57 IGET_CHAR vA, vB, field@CCCC
294 AN_NONE,
295
296 // 58 IGET_SHORT vA, vB, field@CCCC
297 AN_NONE,
298
299 // 59 IPUT vA, vB, field@CCCC
300 AN_NONE,
301
302 // 5A IPUT_WIDE vA, vB, field@CCCC
303 AN_NONE,
304
305 // 5B IPUT_OBJECT vA, vB, field@CCCC
306 AN_NONE,
307
308 // 5C IPUT_BOOLEAN vA, vB, field@CCCC
309 AN_NONE,
310
311 // 5D IPUT_BYTE vA, vB, field@CCCC
312 AN_NONE,
313
314 // 5E IPUT_CHAR vA, vB, field@CCCC
315 AN_NONE,
316
317 // 5F IPUT_SHORT vA, vB, field@CCCC
318 AN_NONE,
319
320 // 60 SGET vAA, field@BBBB
321 AN_NONE,
322
323 // 61 SGET_WIDE vAA, field@BBBB
324 AN_NONE,
325
326 // 62 SGET_OBJECT vAA, field@BBBB
327 AN_NONE,
328
329 // 63 SGET_BOOLEAN vAA, field@BBBB
330 AN_NONE,
331
332 // 64 SGET_BYTE vAA, field@BBBB
333 AN_NONE,
334
335 // 65 SGET_CHAR vAA, field@BBBB
336 AN_NONE,
337
338 // 66 SGET_SHORT vAA, field@BBBB
339 AN_NONE,
340
341 // 67 SPUT vAA, field@BBBB
342 AN_NONE,
343
344 // 68 SPUT_WIDE vAA, field@BBBB
345 AN_NONE,
346
347 // 69 SPUT_OBJECT vAA, field@BBBB
348 AN_NONE,
349
350 // 6A SPUT_BOOLEAN vAA, field@BBBB
351 AN_NONE,
352
353 // 6B SPUT_BYTE vAA, field@BBBB
354 AN_NONE,
355
356 // 6C SPUT_CHAR vAA, field@BBBB
357 AN_NONE,
358
359 // 6D SPUT_SHORT vAA, field@BBBB
360 AN_NONE,
361
362 // 6E INVOKE_VIRTUAL {vD, vE, vF, vG, vA}
363 AN_INVOKE | AN_HEAVYWEIGHT,
364
365 // 6F INVOKE_SUPER {vD, vE, vF, vG, vA}
366 AN_INVOKE | AN_HEAVYWEIGHT,
367
368 // 70 INVOKE_DIRECT {vD, vE, vF, vG, vA}
369 AN_INVOKE | AN_HEAVYWEIGHT,
370
371 // 71 INVOKE_STATIC {vD, vE, vF, vG, vA}
372 AN_INVOKE | AN_HEAVYWEIGHT,
373
374 // 72 INVOKE_INTERFACE {vD, vE, vF, vG, vA}
375 AN_INVOKE | AN_HEAVYWEIGHT,
376
377 // 73 UNUSED_73
378 AN_NONE,
379
380 // 74 INVOKE_VIRTUAL_RANGE {vCCCC .. vNNNN}
381 AN_INVOKE | AN_HEAVYWEIGHT,
382
383 // 75 INVOKE_SUPER_RANGE {vCCCC .. vNNNN}
384 AN_INVOKE | AN_HEAVYWEIGHT,
385
386 // 76 INVOKE_DIRECT_RANGE {vCCCC .. vNNNN}
387 AN_INVOKE | AN_HEAVYWEIGHT,
388
389 // 77 INVOKE_STATIC_RANGE {vCCCC .. vNNNN}
390 AN_INVOKE | AN_HEAVYWEIGHT,
391
392 // 78 INVOKE_INTERFACE_RANGE {vCCCC .. vNNNN}
393 AN_INVOKE | AN_HEAVYWEIGHT,
394
395 // 79 UNUSED_79
396 AN_NONE,
397
398 // 7A UNUSED_7A
399 AN_NONE,
400
401 // 7B NEG_INT vA, vB
402 AN_MATH | AN_INT,
403
404 // 7C NOT_INT vA, vB
405 AN_MATH | AN_INT,
406
407 // 7D NEG_LONG vA, vB
408 AN_MATH | AN_LONG,
409
410 // 7E NOT_LONG vA, vB
411 AN_MATH | AN_LONG,
412
413 // 7F NEG_FLOAT vA, vB
414 AN_MATH | AN_FP | AN_SINGLE,
415
416 // 80 NEG_DOUBLE vA, vB
417 AN_MATH | AN_FP | AN_DOUBLE,
418
419 // 81 INT_TO_LONG vA, vB
420 AN_MATH | AN_INT | AN_LONG,
421
422 // 82 INT_TO_FLOAT vA, vB
423 AN_MATH | AN_FP | AN_INT | AN_SINGLE,
424
425 // 83 INT_TO_DOUBLE vA, vB
426 AN_MATH | AN_FP | AN_INT | AN_DOUBLE,
427
428 // 84 LONG_TO_INT vA, vB
429 AN_MATH | AN_INT | AN_LONG,
430
431 // 85 LONG_TO_FLOAT vA, vB
432 AN_MATH | AN_FP | AN_LONG | AN_SINGLE,
433
434 // 86 LONG_TO_DOUBLE vA, vB
435 AN_MATH | AN_FP | AN_LONG | AN_DOUBLE,
436
437 // 87 FLOAT_TO_INT vA, vB
438 AN_MATH | AN_FP | AN_INT | AN_SINGLE,
439
440 // 88 FLOAT_TO_LONG vA, vB
441 AN_MATH | AN_FP | AN_LONG | AN_SINGLE,
442
443 // 89 FLOAT_TO_DOUBLE vA, vB
444 AN_MATH | AN_FP | AN_SINGLE | AN_DOUBLE,
445
446 // 8A DOUBLE_TO_INT vA, vB
447 AN_MATH | AN_FP | AN_INT | AN_DOUBLE,
448
449 // 8B DOUBLE_TO_LONG vA, vB
450 AN_MATH | AN_FP | AN_LONG | AN_DOUBLE,
451
452 // 8C DOUBLE_TO_FLOAT vA, vB
453 AN_MATH | AN_FP | AN_SINGLE | AN_DOUBLE,
454
455 // 8D INT_TO_BYTE vA, vB
456 AN_MATH | AN_INT,
457
458 // 8E INT_TO_CHAR vA, vB
459 AN_MATH | AN_INT,
460
461 // 8F INT_TO_SHORT vA, vB
462 AN_MATH | AN_INT,
463
464 // 90 ADD_INT vAA, vBB, vCC
465 AN_MATH | AN_INT,
466
467 // 91 SUB_INT vAA, vBB, vCC
468 AN_MATH | AN_INT,
469
470 // 92 MUL_INT vAA, vBB, vCC
471 AN_MATH | AN_INT,
472
473 // 93 DIV_INT vAA, vBB, vCC
474 AN_MATH | AN_INT,
475
476 // 94 REM_INT vAA, vBB, vCC
477 AN_MATH | AN_INT,
478
479 // 95 AND_INT vAA, vBB, vCC
480 AN_MATH | AN_INT,
481
482 // 96 OR_INT vAA, vBB, vCC
483 AN_MATH | AN_INT,
484
485 // 97 XOR_INT vAA, vBB, vCC
486 AN_MATH | AN_INT,
487
488 // 98 SHL_INT vAA, vBB, vCC
489 AN_MATH | AN_INT,
490
491 // 99 SHR_INT vAA, vBB, vCC
492 AN_MATH | AN_INT,
493
494 // 9A USHR_INT vAA, vBB, vCC
495 AN_MATH | AN_INT,
496
497 // 9B ADD_LONG vAA, vBB, vCC
498 AN_MATH | AN_LONG,
499
500 // 9C SUB_LONG vAA, vBB, vCC
501 AN_MATH | AN_LONG,
502
503 // 9D MUL_LONG vAA, vBB, vCC
504 AN_MATH | AN_LONG,
505
506 // 9E DIV_LONG vAA, vBB, vCC
507 AN_MATH | AN_LONG,
508
509 // 9F REM_LONG vAA, vBB, vCC
510 AN_MATH | AN_LONG,
511
512 // A0 AND_LONG vAA, vBB, vCC
513 AN_MATH | AN_LONG,
514
515 // A1 OR_LONG vAA, vBB, vCC
516 AN_MATH | AN_LONG,
517
518 // A2 XOR_LONG vAA, vBB, vCC
519 AN_MATH | AN_LONG,
520
521 // A3 SHL_LONG vAA, vBB, vCC
522 AN_MATH | AN_LONG,
523
524 // A4 SHR_LONG vAA, vBB, vCC
525 AN_MATH | AN_LONG,
526
527 // A5 USHR_LONG vAA, vBB, vCC
528 AN_MATH | AN_LONG,
529
530 // A6 ADD_FLOAT vAA, vBB, vCC
531 AN_MATH | AN_FP | AN_SINGLE,
532
533 // A7 SUB_FLOAT vAA, vBB, vCC
534 AN_MATH | AN_FP | AN_SINGLE,
535
536 // A8 MUL_FLOAT vAA, vBB, vCC
537 AN_MATH | AN_FP | AN_SINGLE,
538
539 // A9 DIV_FLOAT vAA, vBB, vCC
540 AN_MATH | AN_FP | AN_SINGLE,
541
542 // AA REM_FLOAT vAA, vBB, vCC
543 AN_MATH | AN_FP | AN_SINGLE,
544
545 // AB ADD_DOUBLE vAA, vBB, vCC
546 AN_MATH | AN_FP | AN_DOUBLE,
547
548 // AC SUB_DOUBLE vAA, vBB, vCC
549 AN_MATH | AN_FP | AN_DOUBLE,
550
551 // AD MUL_DOUBLE vAA, vBB, vCC
552 AN_MATH | AN_FP | AN_DOUBLE,
553
554 // AE DIV_DOUBLE vAA, vBB, vCC
555 AN_MATH | AN_FP | AN_DOUBLE,
556
557 // AF REM_DOUBLE vAA, vBB, vCC
558 AN_MATH | AN_FP | AN_DOUBLE,
559
560 // B0 ADD_INT_2ADDR vA, vB
561 AN_MATH | AN_INT,
562
563 // B1 SUB_INT_2ADDR vA, vB
564 AN_MATH | AN_INT,
565
566 // B2 MUL_INT_2ADDR vA, vB
567 AN_MATH | AN_INT,
568
569 // B3 DIV_INT_2ADDR vA, vB
570 AN_MATH | AN_INT,
571
572 // B4 REM_INT_2ADDR vA, vB
573 AN_MATH | AN_INT,
574
575 // B5 AND_INT_2ADDR vA, vB
576 AN_MATH | AN_INT,
577
578 // B6 OR_INT_2ADDR vA, vB
579 AN_MATH | AN_INT,
580
581 // B7 XOR_INT_2ADDR vA, vB
582 AN_MATH | AN_INT,
583
584 // B8 SHL_INT_2ADDR vA, vB
585 AN_MATH | AN_INT,
586
587 // B9 SHR_INT_2ADDR vA, vB
588 AN_MATH | AN_INT,
589
590 // BA USHR_INT_2ADDR vA, vB
591 AN_MATH | AN_INT,
592
593 // BB ADD_LONG_2ADDR vA, vB
594 AN_MATH | AN_LONG,
595
596 // BC SUB_LONG_2ADDR vA, vB
597 AN_MATH | AN_LONG,
598
599 // BD MUL_LONG_2ADDR vA, vB
600 AN_MATH | AN_LONG,
601
602 // BE DIV_LONG_2ADDR vA, vB
603 AN_MATH | AN_LONG,
604
605 // BF REM_LONG_2ADDR vA, vB
606 AN_MATH | AN_LONG,
607
608 // C0 AND_LONG_2ADDR vA, vB
609 AN_MATH | AN_LONG,
610
611 // C1 OR_LONG_2ADDR vA, vB
612 AN_MATH | AN_LONG,
613
614 // C2 XOR_LONG_2ADDR vA, vB
615 AN_MATH | AN_LONG,
616
617 // C3 SHL_LONG_2ADDR vA, vB
618 AN_MATH | AN_LONG,
619
620 // C4 SHR_LONG_2ADDR vA, vB
621 AN_MATH | AN_LONG,
622
623 // C5 USHR_LONG_2ADDR vA, vB
624 AN_MATH | AN_LONG,
625
626 // C6 ADD_FLOAT_2ADDR vA, vB
627 AN_MATH | AN_FP | AN_SINGLE,
628
629 // C7 SUB_FLOAT_2ADDR vA, vB
630 AN_MATH | AN_FP | AN_SINGLE,
631
632 // C8 MUL_FLOAT_2ADDR vA, vB
633 AN_MATH | AN_FP | AN_SINGLE,
634
635 // C9 DIV_FLOAT_2ADDR vA, vB
636 AN_MATH | AN_FP | AN_SINGLE,
637
638 // CA REM_FLOAT_2ADDR vA, vB
639 AN_MATH | AN_FP | AN_SINGLE,
640
641 // CB ADD_DOUBLE_2ADDR vA, vB
642 AN_MATH | AN_FP | AN_DOUBLE,
643
644 // CC SUB_DOUBLE_2ADDR vA, vB
645 AN_MATH | AN_FP | AN_DOUBLE,
646
647 // CD MUL_DOUBLE_2ADDR vA, vB
648 AN_MATH | AN_FP | AN_DOUBLE,
649
650 // CE DIV_DOUBLE_2ADDR vA, vB
651 AN_MATH | AN_FP | AN_DOUBLE,
652
653 // CF REM_DOUBLE_2ADDR vA, vB
654 AN_MATH | AN_FP | AN_DOUBLE,
655
656 // D0 ADD_INT_LIT16 vA, vB, #+CCCC
657 AN_MATH | AN_INT,
658
659 // D1 RSUB_INT vA, vB, #+CCCC
660 AN_MATH | AN_INT,
661
662 // D2 MUL_INT_LIT16 vA, vB, #+CCCC
663 AN_MATH | AN_INT,
664
665 // D3 DIV_INT_LIT16 vA, vB, #+CCCC
666 AN_MATH | AN_INT,
667
668 // D4 REM_INT_LIT16 vA, vB, #+CCCC
669 AN_MATH | AN_INT,
670
671 // D5 AND_INT_LIT16 vA, vB, #+CCCC
672 AN_MATH | AN_INT,
673
674 // D6 OR_INT_LIT16 vA, vB, #+CCCC
675 AN_MATH | AN_INT,
676
677 // D7 XOR_INT_LIT16 vA, vB, #+CCCC
678 AN_MATH | AN_INT,
679
680 // D8 ADD_INT_LIT8 vAA, vBB, #+CC
681 AN_MATH | AN_INT,
682
683 // D9 RSUB_INT_LIT8 vAA, vBB, #+CC
684 AN_MATH | AN_INT,
685
686 // DA MUL_INT_LIT8 vAA, vBB, #+CC
687 AN_MATH | AN_INT,
688
689 // DB DIV_INT_LIT8 vAA, vBB, #+CC
690 AN_MATH | AN_INT,
691
692 // DC REM_INT_LIT8 vAA, vBB, #+CC
693 AN_MATH | AN_INT,
694
695 // DD AND_INT_LIT8 vAA, vBB, #+CC
696 AN_MATH | AN_INT,
697
698 // DE OR_INT_LIT8 vAA, vBB, #+CC
699 AN_MATH | AN_INT,
700
701 // DF XOR_INT_LIT8 vAA, vBB, #+CC
702 AN_MATH | AN_INT,
703
704 // E0 SHL_INT_LIT8 vAA, vBB, #+CC
705 AN_MATH | AN_INT,
706
707 // E1 SHR_INT_LIT8 vAA, vBB, #+CC
708 AN_MATH | AN_INT,
709
710 // E2 USHR_INT_LIT8 vAA, vBB, #+CC
711 AN_MATH | AN_INT,
712
713 // E3 IGET_VOLATILE
714 AN_NONE,
715
716 // E4 IPUT_VOLATILE
717 AN_NONE,
718
719 // E5 SGET_VOLATILE
720 AN_NONE,
721
722 // E6 SPUT_VOLATILE
723 AN_NONE,
724
725 // E7 IGET_OBJECT_VOLATILE
726 AN_NONE,
727
728 // E8 IGET_WIDE_VOLATILE
729 AN_NONE,
730
731 // E9 IPUT_WIDE_VOLATILE
732 AN_NONE,
733
734 // EA SGET_WIDE_VOLATILE
735 AN_NONE,
736
737 // EB SPUT_WIDE_VOLATILE
738 AN_NONE,
739
740 // EC BREAKPOINT
741 AN_NONE,
742
743 // ED THROW_VERIFICATION_ERROR
744 AN_HEAVYWEIGHT | AN_BRANCH,
745
746 // EE EXECUTE_INLINE
747 AN_NONE,
748
749 // EF EXECUTE_INLINE_RANGE
750 AN_NONE,
751
752 // F0 INVOKE_OBJECT_INIT_RANGE
753 AN_INVOKE | AN_HEAVYWEIGHT,
754
755 // F1 RETURN_VOID_BARRIER
756 AN_BRANCH,
757
758 // F2 IGET_QUICK
759 AN_NONE,
760
761 // F3 IGET_WIDE_QUICK
762 AN_NONE,
763
764 // F4 IGET_OBJECT_QUICK
765 AN_NONE,
766
767 // F5 IPUT_QUICK
768 AN_NONE,
769
770 // F6 IPUT_WIDE_QUICK
771 AN_NONE,
772
773 // F7 IPUT_OBJECT_QUICK
774 AN_NONE,
775
776 // F8 INVOKE_VIRTUAL_QUICK
777 AN_INVOKE | AN_HEAVYWEIGHT,
778
779 // F9 INVOKE_VIRTUAL_QUICK_RANGE
780 AN_INVOKE | AN_HEAVYWEIGHT,
781
782 // FA INVOKE_SUPER_QUICK
783 AN_INVOKE | AN_HEAVYWEIGHT,
784
785 // FB INVOKE_SUPER_QUICK_RANGE
786 AN_INVOKE | AN_HEAVYWEIGHT,
787
788 // FC IPUT_OBJECT_VOLATILE
789 AN_NONE,
790
791 // FD SGET_OBJECT_VOLATILE
792 AN_NONE,
793
794 // FE SPUT_OBJECT_VOLATILE
795 AN_NONE,
796
797 // FF UNUSED_FF
798 AN_NONE,
799
800 // Beginning of extended MIR opcodes
801 // 100 MIR_PHI
802 AN_NONE,
803
804 // 101 MIR_COPY
805 AN_NONE,
806
807 // 102 MIR_FUSED_CMPL_FLOAT
808 AN_NONE,
809
810 // 103 MIR_FUSED_CMPG_FLOAT
811 AN_NONE,
812
813 // 104 MIR_FUSED_CMPL_DOUBLE
814 AN_NONE,
815
816 // 105 MIR_FUSED_CMPG_DOUBLE
817 AN_NONE,
818
819 // 106 MIR_FUSED_CMP_LONG
820 AN_NONE,
821
822 // 107 MIR_NOP
823 AN_NONE,
824
825 // 108 MIR_NULL_CHECK
826 AN_NONE,
827
828 // 109 MIR_RANGE_CHECK
829 AN_NONE,
830
831 // 110 MIR_DIV_ZERO_CHECK
832 AN_NONE,
833
834 // 111 MIR_CHECK
835 AN_NONE,
836
837 // 112 MIR_CHECKPART2
838 AN_NONE,
839
840 // 113 MIR_SELECT
841 AN_NONE,
842};
843
844struct MethodStats {
845 int dex_instructions;
846 int math_ops;
847 int fp_ops;
848 int array_ops;
849 int branch_ops;
850 int heavyweight_ops;
851 bool has_computational_loop;
buzbeefe9ca402013-08-21 09:48:11 -0700852 bool has_switch;
buzbeeee17e0a2013-07-31 10:47:37 -0700853 float math_ratio;
854 float fp_ratio;
855 float array_ratio;
856 float branch_ratio;
857 float heavyweight_ratio;
858};
859
860void MIRGraph::AnalyzeBlock(BasicBlock* bb, MethodStats* stats) {
861 if (bb->visited || (bb->block_type != kDalvikByteCode)) {
862 return;
863 }
864 bool computational_block = true;
865 bool has_math = false;
866 /*
867 * For the purposes of this scan, we want to treat the set of basic blocks broken
868 * by an exception edge as a single basic block. We'll scan forward along the fallthrough
869 * edges until we reach an explicit branch or return.
870 */
871 BasicBlock* ending_bb = bb;
872 if (ending_bb->last_mir_insn != NULL) {
873 uint32_t ending_flags = analysis_attributes_[ending_bb->last_mir_insn->dalvikInsn.opcode];
874 while ((ending_flags & AN_BRANCH) == 0) {
buzbee0d829482013-10-11 15:24:55 -0700875 ending_bb = GetBasicBlock(ending_bb->fall_through);
buzbeeee17e0a2013-07-31 10:47:37 -0700876 ending_flags = analysis_attributes_[ending_bb->last_mir_insn->dalvikInsn.opcode];
877 }
878 }
879 /*
880 * Ideally, we'd weight the operations by loop nesting level, but to do so we'd
881 * first need to do some expensive loop detection - and the point of this is to make
882 * an informed guess before investing in computation. However, we can cheaply detect
883 * many simple loop forms without having to do full dataflow analysis.
884 */
885 int loop_scale_factor = 1;
886 // Simple for and while loops
buzbee0d829482013-10-11 15:24:55 -0700887 if ((ending_bb->taken != NullBasicBlockId) && (ending_bb->fall_through == NullBasicBlockId)) {
888 if ((GetBasicBlock(ending_bb->taken)->taken == bb->id) ||
889 (GetBasicBlock(ending_bb->taken)->fall_through == bb->id)) {
buzbeeee17e0a2013-07-31 10:47:37 -0700890 loop_scale_factor = 25;
891 }
892 }
893 // Simple do-while loop
buzbee0d829482013-10-11 15:24:55 -0700894 if ((ending_bb->taken != NullBasicBlockId) && (ending_bb->taken == bb->id)) {
buzbeeee17e0a2013-07-31 10:47:37 -0700895 loop_scale_factor = 25;
896 }
897
898 BasicBlock* tbb = bb;
899 bool done = false;
900 while (!done) {
901 tbb->visited = true;
902 for (MIR* mir = tbb->first_mir_insn; mir != NULL; mir = mir->next) {
903 if (static_cast<uint32_t>(mir->dalvikInsn.opcode) >= kMirOpFirst) {
904 // Skip any MIR pseudo-op.
905 continue;
906 }
907 uint32_t flags = analysis_attributes_[mir->dalvikInsn.opcode];
908 stats->dex_instructions += loop_scale_factor;
909 if ((flags & AN_BRANCH) == 0) {
910 computational_block &= ((flags & AN_COMPUTATIONAL) != 0);
911 } else {
912 stats->branch_ops += loop_scale_factor;
913 }
914 if ((flags & AN_MATH) != 0) {
915 stats->math_ops += loop_scale_factor;
916 has_math = true;
917 }
918 if ((flags & AN_FP) != 0) {
919 stats->fp_ops += loop_scale_factor;
920 }
921 if ((flags & AN_ARRAYOP) != 0) {
922 stats->array_ops += loop_scale_factor;
923 }
924 if ((flags & AN_HEAVYWEIGHT) != 0) {
925 stats->heavyweight_ops += loop_scale_factor;
926 }
buzbeefe9ca402013-08-21 09:48:11 -0700927 if ((flags & AN_SWITCH) != 0) {
928 stats->has_switch = true;
929 }
buzbeeee17e0a2013-07-31 10:47:37 -0700930 }
931 if (tbb == ending_bb) {
932 done = true;
933 } else {
buzbee0d829482013-10-11 15:24:55 -0700934 tbb = GetBasicBlock(tbb->fall_through);
buzbeeee17e0a2013-07-31 10:47:37 -0700935 }
936 }
937 if (has_math && computational_block && (loop_scale_factor > 1)) {
938 stats->has_computational_loop = true;
939 }
940}
941
942bool MIRGraph::ComputeSkipCompilation(MethodStats* stats, bool skip_default) {
943 float count = stats->dex_instructions;
944 stats->math_ratio = stats->math_ops / count;
945 stats->fp_ratio = stats->fp_ops / count;
946 stats->branch_ratio = stats->branch_ops / count;
947 stats->array_ratio = stats->array_ops / count;
948 stats->heavyweight_ratio = stats->heavyweight_ops / count;
949
950 if (cu_->enable_debug & (1 << kDebugShowFilterStats)) {
951 LOG(INFO) << "STATS " << stats->dex_instructions << ", math:"
952 << stats->math_ratio << ", fp:"
953 << stats->fp_ratio << ", br:"
954 << stats->branch_ratio << ", hw:"
buzbeefe9ca402013-08-21 09:48:11 -0700955 << stats->heavyweight_ratio << ", arr:"
buzbeeee17e0a2013-07-31 10:47:37 -0700956 << stats->array_ratio << ", hot:"
957 << stats->has_computational_loop << ", "
958 << PrettyMethod(cu_->method_idx, *cu_->dex_file);
959 }
960
961 // Computation intensive?
962 if (stats->has_computational_loop && (stats->heavyweight_ratio < 0.04)) {
963 return false;
964 }
965
966 // Complex, logic-intensive?
Brian Carlstrom6449c622014-02-10 23:48:36 -0800967 if (cu_->compiler_driver->GetCompilerOptions().IsSmallMethod(GetNumDalvikInsns()) &&
buzbeeee17e0a2013-07-31 10:47:37 -0700968 stats->branch_ratio > 0.3) {
969 return false;
970 }
971
972 // Significant floating point?
973 if (stats->fp_ratio > 0.05) {
974 return false;
975 }
976
977 // Significant generic math?
978 if (stats->math_ratio > 0.3) {
979 return false;
980 }
981
982 // If array-intensive, compiling is probably worthwhile.
983 if (stats->array_ratio > 0.1) {
984 return false;
985 }
986
buzbeefe9ca402013-08-21 09:48:11 -0700987 // Switch operations benefit greatly from compilation, so go ahead and spend the cycles.
988 if (stats->has_switch) {
989 return false;
990 }
991
992 // If significant in size and high proportion of expensive operations, skip.
Brian Carlstrom6449c622014-02-10 23:48:36 -0800993 if (cu_->compiler_driver->GetCompilerOptions().IsSmallMethod(GetNumDalvikInsns()) &&
buzbeefe9ca402013-08-21 09:48:11 -0700994 (stats->heavyweight_ratio > 0.3)) {
buzbeeee17e0a2013-07-31 10:47:37 -0700995 return true;
996 }
997
998 return skip_default;
999}
1000
1001 /*
1002 * Will eventually want this to be a bit more sophisticated and happen at verification time.
buzbeeee17e0a2013-07-31 10:47:37 -07001003 */
Brian Carlstrom6449c622014-02-10 23:48:36 -08001004bool MIRGraph::SkipCompilation() {
1005 const CompilerOptions& compiler_options = cu_->compiler_driver->GetCompilerOptions();
1006 CompilerOptions::CompilerFilter compiler_filter = compiler_options.GetCompilerFilter();
1007 if (compiler_filter == CompilerOptions::kEverything) {
buzbeeee17e0a2013-07-31 10:47:37 -07001008 return false;
1009 }
1010
buzbeeb1f1d642014-02-27 12:55:32 -08001011 // Contains a pattern we don't want to compile?
1012 if (punt_to_interpreter_) {
1013 return true;
1014 }
1015
Jeff Hao4a200f52014-04-01 14:58:49 -07001016 if (!compiler_options.IsCompilationEnabled() || compiler_filter == CompilerOptions::kProfiled) {
buzbeeee17e0a2013-07-31 10:47:37 -07001017 return true;
1018 }
1019
buzbeefe9ca402013-08-21 09:48:11 -07001020 // Set up compilation cutoffs based on current filter mode.
1021 size_t small_cutoff = 0;
1022 size_t default_cutoff = 0;
1023 switch (compiler_filter) {
Brian Carlstrom6449c622014-02-10 23:48:36 -08001024 case CompilerOptions::kBalanced:
1025 small_cutoff = compiler_options.GetSmallMethodThreshold();
1026 default_cutoff = compiler_options.GetLargeMethodThreshold();
buzbeefe9ca402013-08-21 09:48:11 -07001027 break;
Brian Carlstrom6449c622014-02-10 23:48:36 -08001028 case CompilerOptions::kSpace:
1029 small_cutoff = compiler_options.GetTinyMethodThreshold();
1030 default_cutoff = compiler_options.GetSmallMethodThreshold();
buzbeefe9ca402013-08-21 09:48:11 -07001031 break;
Brian Carlstrom6449c622014-02-10 23:48:36 -08001032 case CompilerOptions::kSpeed:
1033 small_cutoff = compiler_options.GetHugeMethodThreshold();
1034 default_cutoff = compiler_options.GetHugeMethodThreshold();
buzbeefe9ca402013-08-21 09:48:11 -07001035 break;
1036 default:
1037 LOG(FATAL) << "Unexpected compiler_filter_: " << compiler_filter;
1038 }
1039
1040 // If size < cutoff, assume we'll compile - but allow removal.
1041 bool skip_compilation = (GetNumDalvikInsns() >= default_cutoff);
1042
1043 /*
1044 * Filter 1: Huge methods are likely to be machine generated, but some aren't.
1045 * If huge, assume we won't compile, but allow futher analysis to turn it back on.
1046 */
Brian Carlstrom6449c622014-02-10 23:48:36 -08001047 if (compiler_options.IsHugeMethod(GetNumDalvikInsns())) {
buzbeefe9ca402013-08-21 09:48:11 -07001048 skip_compilation = true;
buzbeeb48819d2013-09-14 16:15:25 -07001049 // If we're got a huge number of basic blocks, don't bother with further analysis.
Brian Carlstrom6449c622014-02-10 23:48:36 -08001050 if (static_cast<size_t>(num_blocks_) > (compiler_options.GetHugeMethodThreshold() / 2)) {
buzbeeb48819d2013-09-14 16:15:25 -07001051 return true;
1052 }
Brian Carlstrom6449c622014-02-10 23:48:36 -08001053 } else if (compiler_options.IsLargeMethod(GetNumDalvikInsns()) &&
buzbeeb48819d2013-09-14 16:15:25 -07001054 /* If it's large and contains no branches, it's likely to be machine generated initialization */
1055 (GetBranchCount() == 0)) {
1056 return true;
Brian Carlstrom6449c622014-02-10 23:48:36 -08001057 } else if (compiler_filter == CompilerOptions::kSpeed) {
buzbeefe9ca402013-08-21 09:48:11 -07001058 // If not huge, compile.
1059 return false;
buzbeeee17e0a2013-07-31 10:47:37 -07001060 }
1061
1062 // Filter 2: Skip class initializers.
1063 if (((cu_->access_flags & kAccConstructor) != 0) && ((cu_->access_flags & kAccStatic) != 0)) {
1064 return true;
1065 }
1066
1067 // Filter 3: if this method is a special pattern, go ahead and emit the canned pattern.
Vladimir Marko5816ed42013-11-27 17:04:20 +00001068 if (cu_->compiler_driver->GetMethodInlinerMap() != nullptr &&
1069 cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
1070 ->IsSpecial(cu_->method_idx)) {
buzbeeee17e0a2013-07-31 10:47:37 -07001071 return false;
1072 }
1073
buzbeefe9ca402013-08-21 09:48:11 -07001074 // Filter 4: if small, just compile.
buzbeeee17e0a2013-07-31 10:47:37 -07001075 if (GetNumDalvikInsns() < small_cutoff) {
1076 return false;
1077 }
1078
1079 // Analyze graph for:
1080 // o floating point computation
1081 // o basic blocks contained in loop with heavy arithmetic.
1082 // o proportion of conditional branches.
1083
1084 MethodStats stats;
1085 memset(&stats, 0, sizeof(stats));
1086
1087 ClearAllVisitedFlags();
buzbee56c71782013-09-05 17:13:19 -07001088 AllNodesIterator iter(this);
buzbeeee17e0a2013-07-31 10:47:37 -07001089 for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
1090 AnalyzeBlock(bb, &stats);
1091 }
1092
1093 return ComputeSkipCompilation(&stats, skip_compilation);
1094}
1095
Vladimir Markobe0e5462014-02-26 11:24:15 +00001096void MIRGraph::DoCacheFieldLoweringInfo() {
Vladimir Markoa24122d2014-03-07 10:18:14 +00001097 // All IGET/IPUT/SGET/SPUT instructions take 2 code units and there must also be a RETURN.
1098 const uint32_t max_refs = (current_code_item_->insns_size_in_code_units_ - 1u) / 2u;
1099 ScopedArenaAllocator allocator(&cu_->arena_stack);
1100 uint16_t* field_idxs =
1101 reinterpret_cast<uint16_t*>(allocator.Alloc(max_refs * sizeof(uint16_t), kArenaAllocMisc));
Vladimir Markobe0e5462014-02-26 11:24:15 +00001102
1103 // Find IGET/IPUT/SGET/SPUT insns, store IGET/IPUT fields at the beginning, SGET/SPUT at the end.
1104 size_t ifield_pos = 0u;
Vladimir Markoa24122d2014-03-07 10:18:14 +00001105 size_t sfield_pos = max_refs;
Vladimir Markobe0e5462014-02-26 11:24:15 +00001106 AllNodesIterator iter(this);
1107 for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
1108 if (bb->block_type != kDalvikByteCode) {
1109 continue;
1110 }
1111 for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
1112 if (mir->dalvikInsn.opcode >= Instruction::IGET &&
1113 mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
Vladimir Markobe0e5462014-02-26 11:24:15 +00001114 const Instruction* insn = Instruction::At(current_code_item_->insns_ + mir->offset);
Vladimir Markobe0e5462014-02-26 11:24:15 +00001115 // Get field index and try to find it among existing indexes. If found, it's usually among
1116 // the last few added, so we'll start the search from ifield_pos/sfield_pos. Though this
1117 // is a linear search, it actually performs much better than map based approach.
1118 if (mir->dalvikInsn.opcode <= Instruction::IPUT_SHORT) {
Vladimir Markoa24122d2014-03-07 10:18:14 +00001119 uint16_t field_idx = insn->VRegC_22c();
Vladimir Markobe0e5462014-02-26 11:24:15 +00001120 size_t i = ifield_pos;
1121 while (i != 0u && field_idxs[i - 1] != field_idx) {
1122 --i;
1123 }
1124 if (i != 0u) {
1125 mir->meta.ifield_lowering_info = i - 1;
1126 } else {
1127 mir->meta.ifield_lowering_info = ifield_pos;
Vladimir Markoa24122d2014-03-07 10:18:14 +00001128 field_idxs[ifield_pos++] = field_idx;
Vladimir Markobe0e5462014-02-26 11:24:15 +00001129 }
1130 } else {
Vladimir Markoa24122d2014-03-07 10:18:14 +00001131 uint16_t field_idx = insn->VRegB_21c();
Vladimir Markobe0e5462014-02-26 11:24:15 +00001132 size_t i = sfield_pos;
Vladimir Markoa24122d2014-03-07 10:18:14 +00001133 while (i != max_refs && field_idxs[i] != field_idx) {
Vladimir Markobe0e5462014-02-26 11:24:15 +00001134 ++i;
1135 }
Vladimir Markoa24122d2014-03-07 10:18:14 +00001136 if (i != max_refs) {
1137 mir->meta.sfield_lowering_info = max_refs - i - 1u;
Vladimir Markobe0e5462014-02-26 11:24:15 +00001138 } else {
Vladimir Markoa24122d2014-03-07 10:18:14 +00001139 mir->meta.sfield_lowering_info = max_refs - sfield_pos;
Vladimir Markobe0e5462014-02-26 11:24:15 +00001140 field_idxs[--sfield_pos] = field_idx;
1141 }
1142 }
1143 DCHECK_LE(ifield_pos, sfield_pos);
1144 }
1145 }
1146 }
1147
1148 if (ifield_pos != 0u) {
1149 // Resolve instance field infos.
1150 DCHECK_EQ(ifield_lowering_infos_.Size(), 0u);
1151 ifield_lowering_infos_.Resize(ifield_pos);
1152 for (size_t pos = 0u; pos != ifield_pos; ++pos) {
1153 ifield_lowering_infos_.Insert(MirIFieldLoweringInfo(field_idxs[pos]));
1154 }
1155 MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
1156 ifield_lowering_infos_.GetRawStorage(), ifield_pos);
1157 }
1158
Vladimir Markoa24122d2014-03-07 10:18:14 +00001159 if (sfield_pos != max_refs) {
Vladimir Markobe0e5462014-02-26 11:24:15 +00001160 // Resolve static field infos.
1161 DCHECK_EQ(sfield_lowering_infos_.Size(), 0u);
Vladimir Markoa24122d2014-03-07 10:18:14 +00001162 sfield_lowering_infos_.Resize(max_refs - sfield_pos);
1163 for (size_t pos = max_refs; pos != sfield_pos;) {
Vladimir Markobe0e5462014-02-26 11:24:15 +00001164 --pos;
1165 sfield_lowering_infos_.Insert(MirSFieldLoweringInfo(field_idxs[pos]));
1166 }
1167 MirSFieldLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
Vladimir Markoa24122d2014-03-07 10:18:14 +00001168 sfield_lowering_infos_.GetRawStorage(), max_refs - sfield_pos);
Vladimir Markobe0e5462014-02-26 11:24:15 +00001169 }
1170}
1171
Vladimir Markof096aad2014-01-23 15:51:58 +00001172void MIRGraph::DoCacheMethodLoweringInfo() {
1173 static constexpr uint16_t invoke_types[] = { kVirtual, kSuper, kDirect, kStatic, kInterface };
1174
1175 // Embed the map value in the entry to avoid extra padding in 64-bit builds.
1176 struct MapEntry {
1177 // Map key: target_method_idx, invoke_type, devirt_target. Ordered to avoid padding.
1178 const MethodReference* devirt_target;
1179 uint16_t target_method_idx;
1180 uint16_t invoke_type;
1181 // Map value.
1182 uint32_t lowering_info_index;
1183 };
1184
1185 // Sort INVOKEs by method index, then by opcode, then by devirtualization target.
1186 struct MapEntryComparator {
1187 bool operator()(const MapEntry& lhs, const MapEntry& rhs) const {
1188 if (lhs.target_method_idx != rhs.target_method_idx) {
1189 return lhs.target_method_idx < rhs.target_method_idx;
1190 }
1191 if (lhs.invoke_type != rhs.invoke_type) {
1192 return lhs.invoke_type < rhs.invoke_type;
1193 }
1194 if (lhs.devirt_target != rhs.devirt_target) {
1195 if (lhs.devirt_target == nullptr) {
1196 return true;
1197 }
1198 if (rhs.devirt_target == nullptr) {
1199 return false;
1200 }
1201 return devirt_cmp(*lhs.devirt_target, *rhs.devirt_target);
1202 }
1203 return false;
1204 }
1205 MethodReferenceComparator devirt_cmp;
1206 };
1207
1208 // Map invoke key (see MapEntry) to lowering info index.
1209 typedef std::set<MapEntry, MapEntryComparator, ScopedArenaAllocatorAdapter<MapEntry> > InvokeMap;
1210
1211 ScopedArenaAllocator allocator(&cu_->arena_stack);
1212
1213 // All INVOKE instructions take 3 code units and there must also be a RETURN.
1214 uint32_t max_refs = (current_code_item_->insns_size_in_code_units_ - 1u) / 3u;
1215
1216 // The invoke_map and sequential entries are essentially equivalent to Boost.MultiIndex's
1217 // multi_index_container with one ordered index and one sequential index.
1218 InvokeMap invoke_map(MapEntryComparator(), allocator.Adapter());
1219 const MapEntry** sequential_entries = reinterpret_cast<const MapEntry**>(
1220 allocator.Alloc(max_refs * sizeof(sequential_entries[0]), kArenaAllocMisc));
1221
1222 // Find INVOKE insns and their devirtualization targets.
1223 AllNodesIterator iter(this);
1224 for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
1225 if (bb->block_type != kDalvikByteCode) {
1226 continue;
1227 }
1228 for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
1229 if (mir->dalvikInsn.opcode >= Instruction::INVOKE_VIRTUAL &&
1230 mir->dalvikInsn.opcode <= Instruction::INVOKE_INTERFACE_RANGE &&
1231 mir->dalvikInsn.opcode != Instruction::RETURN_VOID_BARRIER) {
1232 // Decode target method index and invoke type.
1233 const Instruction* insn = Instruction::At(current_code_item_->insns_ + mir->offset);
1234 uint16_t target_method_idx;
1235 uint16_t invoke_type_idx;
1236 if (mir->dalvikInsn.opcode <= Instruction::INVOKE_INTERFACE) {
1237 target_method_idx = insn->VRegB_35c();
1238 invoke_type_idx = mir->dalvikInsn.opcode - Instruction::INVOKE_VIRTUAL;
1239 } else {
1240 target_method_idx = insn->VRegB_3rc();
1241 invoke_type_idx = mir->dalvikInsn.opcode - Instruction::INVOKE_VIRTUAL_RANGE;
1242 }
1243
1244 // Find devirtualization target.
1245 // TODO: The devirt map is ordered by the dex pc here. Is there a way to get INVOKEs
1246 // ordered by dex pc as well? That would allow us to keep an iterator to devirt targets
1247 // and increment it as needed instead of making O(log n) lookups.
1248 const VerifiedMethod* verified_method = GetCurrentDexCompilationUnit()->GetVerifiedMethod();
1249 const MethodReference* devirt_target = verified_method->GetDevirtTarget(mir->offset);
1250
1251 // Try to insert a new entry. If the insertion fails, we will have found an old one.
1252 MapEntry entry = {
1253 devirt_target,
1254 target_method_idx,
1255 invoke_types[invoke_type_idx],
1256 static_cast<uint32_t>(invoke_map.size())
1257 };
1258 auto it = invoke_map.insert(entry).first; // Iterator to either the old or the new entry.
1259 mir->meta.method_lowering_info = it->lowering_info_index;
1260 // If we didn't actually insert, this will just overwrite an existing value with the same.
1261 sequential_entries[it->lowering_info_index] = &*it;
1262 }
1263 }
1264 }
1265
1266 if (invoke_map.empty()) {
1267 return;
1268 }
1269
1270 // Prepare unique method infos, set method info indexes for their MIRs.
1271 DCHECK_EQ(method_lowering_infos_.Size(), 0u);
1272 const size_t count = invoke_map.size();
1273 method_lowering_infos_.Resize(count);
1274 for (size_t pos = 0u; pos != count; ++pos) {
1275 const MapEntry* entry = sequential_entries[pos];
1276 MirMethodLoweringInfo method_info(entry->target_method_idx,
1277 static_cast<InvokeType>(entry->invoke_type));
1278 if (entry->devirt_target != nullptr) {
1279 method_info.SetDevirtualizationTarget(*entry->devirt_target);
1280 }
1281 method_lowering_infos_.Insert(method_info);
1282 }
1283 MirMethodLoweringInfo::Resolve(cu_->compiler_driver, GetCurrentDexCompilationUnit(),
1284 method_lowering_infos_.GetRawStorage(), count);
1285}
1286
Dave Allison39c3bfb2014-01-28 18:33:52 -08001287bool MIRGraph::SkipCompilation(const std::string& methodname) {
1288 return cu_->compiler_driver->SkipCompilation(methodname);
1289}
1290
buzbeeee17e0a2013-07-31 10:47:37 -07001291} // namespace art