Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2011 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | /* This file contains codegen for the Thumb2 ISA. */ |
| 18 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 19 | #include "codegen_arm.h" |
Andreas Gampe | 0b9203e | 2015-01-22 20:39:27 -0800 | [diff] [blame] | 20 | |
| 21 | #include "arm_lir.h" |
| 22 | #include "base/logging.h" |
| 23 | #include "dex/mir_graph.h" |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 24 | #include "dex/quick/mir_to_lir-inl.h" |
Andreas Gampe | 0b9203e | 2015-01-22 20:39:27 -0800 | [diff] [blame] | 25 | #include "driver/compiler_driver.h" |
Vladimir Marko | 20f8559 | 2015-03-19 10:07:02 +0000 | [diff] [blame] | 26 | #include "driver/compiler_options.h" |
Ian Rogers | 576ca0c | 2014-06-06 15:58:22 -0700 | [diff] [blame] | 27 | #include "gc/accounting/card_table.h" |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 28 | #include "mirror/art_method.h" |
| 29 | #include "mirror/object_array-inl.h" |
Ian Rogers | 166db04 | 2013-07-26 12:05:57 -0700 | [diff] [blame] | 30 | #include "entrypoints/quick/quick_entrypoints.h" |
Andreas Gampe | 7e49992 | 2015-01-06 08:28:12 -0800 | [diff] [blame] | 31 | #include "utils.h" |
Vladimir Marko | e5c76c5 | 2015-04-06 12:10:19 +0100 | [diff] [blame] | 32 | #include "utils/dex_cache_arrays_layout-inl.h" |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 33 | |
| 34 | namespace art { |
| 35 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 36 | /* |
| 37 | * The sparse table in the literal pool is an array of <key,displacement> |
| 38 | * pairs. For each set, we'll load them as a pair using ldmia. |
| 39 | * This means that the register number of the temp we use for the key |
| 40 | * must be lower than the reg for the displacement. |
| 41 | * |
| 42 | * The test loop will look something like: |
| 43 | * |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 44 | * adr r_base, <table> |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 45 | * ldr r_val, [rARM_SP, v_reg_off] |
| 46 | * mov r_idx, #table_size |
| 47 | * lp: |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 48 | * ldmia r_base!, {r_key, r_disp} |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 49 | * sub r_idx, #1 |
| 50 | * cmp r_val, r_key |
| 51 | * ifeq |
| 52 | * add rARM_PC, r_disp ; This is the branch from which we compute displacement |
| 53 | * cbnz r_idx, lp |
| 54 | */ |
Andreas Gampe | 48971b3 | 2014-08-06 10:09:01 -0700 | [diff] [blame] | 55 | void ArmMir2Lir::GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src) { |
Razvan A Lupusoru | 8d0d03e | 2014-06-06 17:04:52 -0700 | [diff] [blame] | 56 | const uint16_t* table = mir_graph_->GetTable(mir, table_offset); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 57 | // Add the table to the list - we'll process it later |
| 58 | SwitchTable *tab_rec = |
Vladimir Marko | 83cc7ae | 2014-02-12 18:02:05 +0000 | [diff] [blame] | 59 | static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData)); |
Chao-ying Fu | 72f53af | 2014-11-11 16:48:40 -0800 | [diff] [blame] | 60 | tab_rec->switch_mir = mir; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 61 | tab_rec->table = table; |
| 62 | tab_rec->vaddr = current_dalvik_offset_; |
buzbee | 0d82948 | 2013-10-11 15:24:55 -0700 | [diff] [blame] | 63 | uint32_t size = table[1]; |
Vladimir Marko | e39c54e | 2014-09-22 14:50:02 +0100 | [diff] [blame] | 64 | switch_tables_.push_back(tab_rec); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 65 | |
| 66 | // Get the switch value |
| 67 | rl_src = LoadValue(rl_src, kCoreReg); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 68 | RegStorage r_base = AllocTemp(); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 69 | /* Allocate key and disp temps */ |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 70 | RegStorage r_key = AllocTemp(); |
| 71 | RegStorage r_disp = AllocTemp(); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 72 | // Make sure r_key's register number is less than r_disp's number for ldmia |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 73 | if (r_key.GetReg() > r_disp.GetReg()) { |
| 74 | RegStorage tmp = r_disp; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 75 | r_disp = r_key; |
| 76 | r_key = tmp; |
| 77 | } |
| 78 | // Materialize a pointer to the switch table |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 79 | NewLIR3(kThumb2Adr, r_base.GetReg(), 0, WrapPointer(tab_rec)); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 80 | // Set up r_idx |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 81 | RegStorage r_idx = AllocTemp(); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 82 | LoadConstant(r_idx, size); |
| 83 | // Establish loop branch target |
| 84 | LIR* target = NewLIR0(kPseudoTargetLabel); |
| 85 | // Load next key/disp |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 86 | NewLIR2(kThumb2LdmiaWB, r_base.GetReg(), (1 << r_key.GetRegNum()) | (1 << r_disp.GetRegNum())); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 87 | OpRegReg(kOpCmp, r_key, rl_src.reg); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 88 | // Go if match. NOTE: No instruction set switch here - must stay Thumb2 |
Dave Allison | 3da67a5 | 2014-04-02 17:03:45 -0700 | [diff] [blame] | 89 | LIR* it = OpIT(kCondEq, ""); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 90 | LIR* switch_branch = NewLIR1(kThumb2AddPCR, r_disp.GetReg()); |
Dave Allison | 3da67a5 | 2014-04-02 17:03:45 -0700 | [diff] [blame] | 91 | OpEndIT(it); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 92 | tab_rec->anchor = switch_branch; |
| 93 | // Needs to use setflags encoding here |
Vladimir Marko | dbb8c49 | 2014-02-28 17:36:39 +0000 | [diff] [blame] | 94 | OpRegRegImm(kOpSub, r_idx, r_idx, 1); // For value == 1, this should set flags. |
Vladimir Marko | 8dea81c | 2014-06-06 14:50:36 +0100 | [diff] [blame] | 95 | DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode)); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 96 | OpCondBranch(kCondNe, target); |
| 97 | } |
| 98 | |
| 99 | |
Andreas Gampe | 48971b3 | 2014-08-06 10:09:01 -0700 | [diff] [blame] | 100 | void ArmMir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src) { |
Razvan A Lupusoru | 8d0d03e | 2014-06-06 17:04:52 -0700 | [diff] [blame] | 101 | const uint16_t* table = mir_graph_->GetTable(mir, table_offset); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 102 | // Add the table to the list - we'll process it later |
| 103 | SwitchTable *tab_rec = |
Vladimir Marko | 83cc7ae | 2014-02-12 18:02:05 +0000 | [diff] [blame] | 104 | static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData)); |
Chao-ying Fu | 72f53af | 2014-11-11 16:48:40 -0800 | [diff] [blame] | 105 | tab_rec->switch_mir = mir; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 106 | tab_rec->table = table; |
| 107 | tab_rec->vaddr = current_dalvik_offset_; |
buzbee | 0d82948 | 2013-10-11 15:24:55 -0700 | [diff] [blame] | 108 | uint32_t size = table[1]; |
Vladimir Marko | e39c54e | 2014-09-22 14:50:02 +0100 | [diff] [blame] | 109 | switch_tables_.push_back(tab_rec); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 110 | |
| 111 | // Get the switch value |
| 112 | rl_src = LoadValue(rl_src, kCoreReg); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 113 | RegStorage table_base = AllocTemp(); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 114 | // Materialize a pointer to the switch table |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 115 | NewLIR3(kThumb2Adr, table_base.GetReg(), 0, WrapPointer(tab_rec)); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 116 | int low_key = s4FromSwitchData(&table[2]); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 117 | RegStorage keyReg; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 118 | // Remove the bias, if necessary |
| 119 | if (low_key == 0) { |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 120 | keyReg = rl_src.reg; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 121 | } else { |
| 122 | keyReg = AllocTemp(); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 123 | OpRegRegImm(kOpSub, keyReg, rl_src.reg, low_key); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 124 | } |
| 125 | // Bounds check - if < 0 or >= size continue following switch |
| 126 | OpRegImm(kOpCmp, keyReg, size-1); |
| 127 | LIR* branch_over = OpCondBranch(kCondHi, NULL); |
| 128 | |
| 129 | // Load the displacement from the switch table |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 130 | RegStorage disp_reg = AllocTemp(); |
buzbee | 695d13a | 2014-04-19 13:32:20 -0700 | [diff] [blame] | 131 | LoadBaseIndexed(table_base, keyReg, disp_reg, 2, k32); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 132 | |
| 133 | // ..and go! NOTE: No instruction set switch here - must stay Thumb2 |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 134 | LIR* switch_branch = NewLIR1(kThumb2AddPCR, disp_reg.GetReg()); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 135 | tab_rec->anchor = switch_branch; |
| 136 | |
| 137 | /* branch_over target here */ |
| 138 | LIR* target = NewLIR0(kPseudoTargetLabel); |
| 139 | branch_over->target = target; |
| 140 | } |
| 141 | |
| 142 | /* |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 143 | * Handle unlocked -> thin locked transition inline or else call out to quick entrypoint. For more |
| 144 | * details see monitor.cc. |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 145 | */ |
Brian Carlstrom | 2ce745c | 2013-07-17 17:44:30 -0700 | [diff] [blame] | 146 | void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) { |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 147 | FlushAllRegs(); |
buzbee | 695d13a | 2014-04-19 13:32:20 -0700 | [diff] [blame] | 148 | // FIXME: need separate LoadValues for object references. |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 149 | LoadValueDirectFixed(rl_src, rs_r0); // Get obj |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 150 | LockCallTemps(); // Prepare for explicit register usage |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 151 | constexpr bool kArchVariantHasGoodBranchPredictor = false; // TODO: true if cortex-A15. |
| 152 | if (kArchVariantHasGoodBranchPredictor) { |
Dave Allison | f943914 | 2014-03-27 15:10:22 -0700 | [diff] [blame] | 153 | LIR* null_check_branch = nullptr; |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 154 | if ((opt_flags & MIR_IGNORE_NULL_CHECK) && !(cu_->disable_opt & (1 << kNullCheckElimination))) { |
| 155 | null_check_branch = nullptr; // No null check. |
| 156 | } else { |
| 157 | // If the null-check fails its handled by the slow-path to reduce exception related meta-data. |
Dave Allison | 69dfe51 | 2014-07-11 17:11:58 +0000 | [diff] [blame] | 158 | if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { |
Dave Allison | f943914 | 2014-03-27 15:10:22 -0700 | [diff] [blame] | 159 | null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL); |
| 160 | } |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 161 | } |
buzbee | 695d13a | 2014-04-19 13:32:20 -0700 | [diff] [blame] | 162 | Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2); |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 163 | NewLIR3(kThumb2Ldrex, rs_r1.GetReg(), rs_r0.GetReg(), |
| 164 | mirror::Object::MonitorOffset().Int32Value() >> 2); |
Dave Allison | f943914 | 2014-03-27 15:10:22 -0700 | [diff] [blame] | 165 | MarkPossibleNullPointerException(opt_flags); |
Hiroshi Yamauchi | e15ea08 | 2015-02-09 17:11:42 -0800 | [diff] [blame] | 166 | // Zero out the read barrier bits. |
| 167 | OpRegRegImm(kOpAnd, rs_r3, rs_r1, LockWord::kReadBarrierStateMaskShiftedToggled); |
| 168 | LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r3, 0, NULL); |
| 169 | // r1 is zero except for the rb bits here. Copy the read barrier bits into r2. |
| 170 | OpRegRegReg(kOpOr, rs_r2, rs_r2, rs_r1); |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 171 | NewLIR4(kThumb2Strex, rs_r1.GetReg(), rs_r2.GetReg(), rs_r0.GetReg(), |
| 172 | mirror::Object::MonitorOffset().Int32Value() >> 2); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 173 | LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_r1, 0, NULL); |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 174 | |
| 175 | |
| 176 | LIR* slow_path_target = NewLIR0(kPseudoTargetLabel); |
| 177 | not_unlocked_branch->target = slow_path_target; |
| 178 | if (null_check_branch != nullptr) { |
| 179 | null_check_branch->target = slow_path_target; |
| 180 | } |
| 181 | // TODO: move to a slow path. |
| 182 | // Go expensive route - artLockObjectFromCode(obj); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 183 | LoadWordDisp(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pLockObject).Int32Value(), rs_rARM_LR); |
Vladimir Marko | 31c2aac | 2013-12-09 16:31:19 +0000 | [diff] [blame] | 184 | ClobberCallerSave(); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 185 | LIR* call_inst = OpReg(kOpBlx, rs_rARM_LR); |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 186 | MarkSafepointPC(call_inst); |
| 187 | |
| 188 | LIR* success_target = NewLIR0(kPseudoTargetLabel); |
| 189 | lock_success_branch->target = success_target; |
Hans Boehm | 48f5c47 | 2014-06-27 14:50:10 -0700 | [diff] [blame] | 190 | GenMemBarrier(kLoadAny); |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 191 | } else { |
| 192 | // Explicit null-check as slow-path is entered using an IT. |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 193 | GenNullCheck(rs_r0, opt_flags); |
buzbee | 695d13a | 2014-04-19 13:32:20 -0700 | [diff] [blame] | 194 | Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2); |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 195 | NewLIR3(kThumb2Ldrex, rs_r1.GetReg(), rs_r0.GetReg(), |
| 196 | mirror::Object::MonitorOffset().Int32Value() >> 2); |
Dave Allison | f943914 | 2014-03-27 15:10:22 -0700 | [diff] [blame] | 197 | MarkPossibleNullPointerException(opt_flags); |
Hiroshi Yamauchi | e15ea08 | 2015-02-09 17:11:42 -0800 | [diff] [blame] | 198 | // Zero out the read barrier bits. |
| 199 | OpRegRegImm(kOpAnd, rs_r3, rs_r1, LockWord::kReadBarrierStateMaskShiftedToggled); |
| 200 | // r1 will be zero except for the rb bits if the following |
| 201 | // cmp-and-branch branches to eq where r2 will be used. Copy the |
| 202 | // read barrier bits into r2. |
| 203 | OpRegRegReg(kOpOr, rs_r2, rs_r2, rs_r1); |
| 204 | OpRegImm(kOpCmp, rs_r3, 0); |
| 205 | |
Dave Allison | 3da67a5 | 2014-04-02 17:03:45 -0700 | [diff] [blame] | 206 | LIR* it = OpIT(kCondEq, ""); |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 207 | NewLIR4(kThumb2Strex/*eq*/, rs_r1.GetReg(), rs_r2.GetReg(), rs_r0.GetReg(), |
| 208 | mirror::Object::MonitorOffset().Int32Value() >> 2); |
Dave Allison | 3da67a5 | 2014-04-02 17:03:45 -0700 | [diff] [blame] | 209 | OpEndIT(it); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 210 | OpRegImm(kOpCmp, rs_r1, 0); |
Dave Allison | 3da67a5 | 2014-04-02 17:03:45 -0700 | [diff] [blame] | 211 | it = OpIT(kCondNe, "T"); |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 212 | // Go expensive route - artLockObjectFromCode(self, obj); |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 213 | LoadWordDisp/*ne*/(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pLockObject).Int32Value(), |
| 214 | rs_rARM_LR); |
Vladimir Marko | 31c2aac | 2013-12-09 16:31:19 +0000 | [diff] [blame] | 215 | ClobberCallerSave(); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 216 | LIR* call_inst = OpReg(kOpBlx/*ne*/, rs_rARM_LR); |
Dave Allison | 3da67a5 | 2014-04-02 17:03:45 -0700 | [diff] [blame] | 217 | OpEndIT(it); |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 218 | MarkSafepointPC(call_inst); |
Hans Boehm | 48f5c47 | 2014-06-27 14:50:10 -0700 | [diff] [blame] | 219 | GenMemBarrier(kLoadAny); |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 220 | } |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 221 | } |
| 222 | |
| 223 | /* |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 224 | * Handle thin locked -> unlocked transition inline or else call out to quick entrypoint. For more |
| 225 | * details see monitor.cc. Note the code below doesn't use ldrex/strex as the code holds the lock |
| 226 | * and can only give away ownership if its suspended. |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 227 | */ |
Brian Carlstrom | 2ce745c | 2013-07-17 17:44:30 -0700 | [diff] [blame] | 228 | void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) { |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 229 | FlushAllRegs(); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 230 | LoadValueDirectFixed(rl_src, rs_r0); // Get obj |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 231 | LockCallTemps(); // Prepare for explicit register usage |
Dave Allison | f943914 | 2014-03-27 15:10:22 -0700 | [diff] [blame] | 232 | LIR* null_check_branch = nullptr; |
buzbee | 695d13a | 2014-04-19 13:32:20 -0700 | [diff] [blame] | 233 | Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2); |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 234 | constexpr bool kArchVariantHasGoodBranchPredictor = false; // TODO: true if cortex-A15. |
| 235 | if (kArchVariantHasGoodBranchPredictor) { |
| 236 | if ((opt_flags & MIR_IGNORE_NULL_CHECK) && !(cu_->disable_opt & (1 << kNullCheckElimination))) { |
| 237 | null_check_branch = nullptr; // No null check. |
| 238 | } else { |
| 239 | // If the null-check fails its handled by the slow-path to reduce exception related meta-data. |
Dave Allison | 69dfe51 | 2014-07-11 17:11:58 +0000 | [diff] [blame] | 240 | if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) { |
Dave Allison | f943914 | 2014-03-27 15:10:22 -0700 | [diff] [blame] | 241 | null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL); |
| 242 | } |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 243 | } |
Hiroshi Yamauchi | e15ea08 | 2015-02-09 17:11:42 -0800 | [diff] [blame] | 244 | if (!kUseReadBarrier) { |
| 245 | Load32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1); // Get lock |
| 246 | } else { |
| 247 | NewLIR3(kThumb2Ldrex, rs_r1.GetReg(), rs_r0.GetReg(), |
| 248 | mirror::Object::MonitorOffset().Int32Value() >> 2); |
| 249 | } |
Dave Allison | f943914 | 2014-03-27 15:10:22 -0700 | [diff] [blame] | 250 | MarkPossibleNullPointerException(opt_flags); |
Hiroshi Yamauchi | e15ea08 | 2015-02-09 17:11:42 -0800 | [diff] [blame] | 251 | // Zero out the read barrier bits. |
| 252 | OpRegRegImm(kOpAnd, rs_r3, rs_r1, LockWord::kReadBarrierStateMaskShiftedToggled); |
| 253 | // Zero out except the read barrier bits. |
| 254 | OpRegRegImm(kOpAnd, rs_r1, rs_r1, LockWord::kReadBarrierStateMaskShifted); |
| 255 | LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_r3, rs_r2, NULL); |
Hans Boehm | 48f5c47 | 2014-06-27 14:50:10 -0700 | [diff] [blame] | 256 | GenMemBarrier(kAnyStore); |
Hiroshi Yamauchi | e15ea08 | 2015-02-09 17:11:42 -0800 | [diff] [blame] | 257 | LIR* unlock_success_branch; |
| 258 | if (!kUseReadBarrier) { |
| 259 | Store32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1); |
| 260 | unlock_success_branch = OpUnconditionalBranch(NULL); |
| 261 | } else { |
| 262 | NewLIR4(kThumb2Strex, rs_r2.GetReg(), rs_r1.GetReg(), rs_r0.GetReg(), |
| 263 | mirror::Object::MonitorOffset().Int32Value() >> 2); |
| 264 | unlock_success_branch = OpCmpImmBranch(kCondEq, rs_r2, 0, NULL); |
| 265 | } |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 266 | LIR* slow_path_target = NewLIR0(kPseudoTargetLabel); |
| 267 | slow_unlock_branch->target = slow_path_target; |
| 268 | if (null_check_branch != nullptr) { |
| 269 | null_check_branch->target = slow_path_target; |
| 270 | } |
| 271 | // TODO: move to a slow path. |
| 272 | // Go expensive route - artUnlockObjectFromCode(obj); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 273 | LoadWordDisp(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject).Int32Value(), rs_rARM_LR); |
Vladimir Marko | 31c2aac | 2013-12-09 16:31:19 +0000 | [diff] [blame] | 274 | ClobberCallerSave(); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 275 | LIR* call_inst = OpReg(kOpBlx, rs_rARM_LR); |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 276 | MarkSafepointPC(call_inst); |
| 277 | |
| 278 | LIR* success_target = NewLIR0(kPseudoTargetLabel); |
| 279 | unlock_success_branch->target = success_target; |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 280 | } else { |
| 281 | // Explicit null-check as slow-path is entered using an IT. |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 282 | GenNullCheck(rs_r0, opt_flags); |
Hiroshi Yamauchi | e15ea08 | 2015-02-09 17:11:42 -0800 | [diff] [blame] | 283 | if (!kUseReadBarrier) { |
| 284 | Load32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1); // Get lock |
| 285 | } else { |
| 286 | // If we use read barriers, we need to use atomic instructions. |
| 287 | NewLIR3(kThumb2Ldrex, rs_r1.GetReg(), rs_r0.GetReg(), |
| 288 | mirror::Object::MonitorOffset().Int32Value() >> 2); |
| 289 | } |
Dave Allison | b373e09 | 2014-02-20 16:06:36 -0800 | [diff] [blame] | 290 | MarkPossibleNullPointerException(opt_flags); |
buzbee | 695d13a | 2014-04-19 13:32:20 -0700 | [diff] [blame] | 291 | Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2); |
Hiroshi Yamauchi | e15ea08 | 2015-02-09 17:11:42 -0800 | [diff] [blame] | 292 | // Zero out the read barrier bits. |
| 293 | OpRegRegImm(kOpAnd, rs_r3, rs_r1, LockWord::kReadBarrierStateMaskShiftedToggled); |
| 294 | // Zero out except the read barrier bits. |
| 295 | OpRegRegImm(kOpAnd, rs_r1, rs_r1, LockWord::kReadBarrierStateMaskShifted); |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 296 | // Is lock unheld on lock or held by us (==thread_id) on unlock? |
Hiroshi Yamauchi | e15ea08 | 2015-02-09 17:11:42 -0800 | [diff] [blame] | 297 | OpRegReg(kOpCmp, rs_r3, rs_r2); |
| 298 | if (!kUseReadBarrier) { |
| 299 | LIR* it = OpIT(kCondEq, "EE"); |
| 300 | if (GenMemBarrier(kAnyStore)) { |
| 301 | UpdateIT(it, "TEE"); |
| 302 | } |
| 303 | Store32Disp/*eq*/(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1); |
| 304 | // Go expensive route - UnlockObjectFromCode(obj); |
| 305 | LoadWordDisp/*ne*/(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject).Int32Value(), |
| 306 | rs_rARM_LR); |
| 307 | ClobberCallerSave(); |
| 308 | LIR* call_inst = OpReg(kOpBlx/*ne*/, rs_rARM_LR); |
| 309 | OpEndIT(it); |
| 310 | MarkSafepointPC(call_inst); |
| 311 | } else { |
| 312 | // If we use read barriers, we need to use atomic instructions. |
| 313 | LIR* it = OpIT(kCondEq, ""); |
| 314 | if (GenMemBarrier(kAnyStore)) { |
| 315 | UpdateIT(it, "T"); |
| 316 | } |
| 317 | NewLIR4/*eq*/(kThumb2Strex, rs_r2.GetReg(), rs_r1.GetReg(), rs_r0.GetReg(), |
| 318 | mirror::Object::MonitorOffset().Int32Value() >> 2); |
| 319 | OpEndIT(it); |
| 320 | // Since we know r2 wasn't zero before the above it instruction, |
| 321 | // if r2 is zero here, we know r3 was equal to r2 and the strex |
| 322 | // suceeded (we're done). Otherwise (either r3 wasn't equal to r2 |
| 323 | // or the strex failed), call the entrypoint. |
| 324 | OpRegImm(kOpCmp, rs_r2, 0); |
| 325 | LIR* it2 = OpIT(kCondNe, "T"); |
| 326 | // Go expensive route - UnlockObjectFromCode(obj); |
| 327 | LoadWordDisp/*ne*/(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject).Int32Value(), |
| 328 | rs_rARM_LR); |
| 329 | ClobberCallerSave(); |
| 330 | LIR* call_inst = OpReg(kOpBlx/*ne*/, rs_rARM_LR); |
| 331 | OpEndIT(it2); |
| 332 | MarkSafepointPC(call_inst); |
Andreas Gampe | b14329f | 2014-05-15 11:16:06 -0700 | [diff] [blame] | 333 | } |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 334 | } |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 335 | } |
| 336 | |
Brian Carlstrom | 2ce745c | 2013-07-17 17:44:30 -0700 | [diff] [blame] | 337 | void ArmMir2Lir::GenMoveException(RegLocation rl_dest) { |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 338 | int ex_offset = Thread::ExceptionOffset<4>().Int32Value(); |
buzbee | a0cd2d7 | 2014-06-01 09:33:49 -0700 | [diff] [blame] | 339 | RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); |
| 340 | RegStorage reset_reg = AllocTempRef(); |
Andreas Gampe | 3c12c51 | 2014-06-24 18:46:29 +0000 | [diff] [blame] | 341 | LoadRefDisp(rs_rARM_SELF, ex_offset, rl_result.reg, kNotVolatile); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 342 | LoadConstant(reset_reg, 0); |
Andreas Gampe | 3c12c51 | 2014-06-24 18:46:29 +0000 | [diff] [blame] | 343 | StoreRefDisp(rs_rARM_SELF, ex_offset, reset_reg, kNotVolatile); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 344 | FreeTemp(reset_reg); |
| 345 | StoreValue(rl_dest, rl_result); |
| 346 | } |
| 347 | |
Vladimir Marko | bf535be | 2014-11-19 18:52:35 +0000 | [diff] [blame] | 348 | void ArmMir2Lir::UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) { |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 349 | RegStorage reg_card_base = AllocTemp(); |
| 350 | RegStorage reg_card_no = AllocTemp(); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 351 | LoadWordDisp(rs_rARM_SELF, Thread::CardTableOffset<4>().Int32Value(), reg_card_base); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 352 | OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 353 | StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 354 | FreeTemp(reg_card_base); |
| 355 | FreeTemp(reg_card_no); |
| 356 | } |
| 357 | |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 358 | static dwarf::Reg DwarfCoreReg(int num) { |
| 359 | return dwarf::Reg::ArmCore(num); |
| 360 | } |
| 361 | |
| 362 | static dwarf::Reg DwarfFpReg(int num) { |
| 363 | return dwarf::Reg::ArmFp(num); |
| 364 | } |
| 365 | |
Brian Carlstrom | 2ce745c | 2013-07-17 17:44:30 -0700 | [diff] [blame] | 366 | void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 367 | DCHECK_EQ(cfi_.GetCurrentCFAOffset(), 0); // empty stack. |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 368 | int spill_count = num_core_spills_ + num_fp_spills_; |
| 369 | /* |
| 370 | * On entry, r0, r1, r2 & r3 are live. Let the register allocation |
| 371 | * mechanism know so it doesn't try to use any of them when |
| 372 | * expanding the frame or flushing. This leaves the utility |
| 373 | * code with a single temp: r12. This should be enough. |
| 374 | */ |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 375 | LockTemp(rs_r0); |
| 376 | LockTemp(rs_r1); |
| 377 | LockTemp(rs_r2); |
| 378 | LockTemp(rs_r3); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 379 | |
| 380 | /* |
| 381 | * We can safely skip the stack overflow check if we're |
| 382 | * a leaf *and* our frame size < fudge factor. |
| 383 | */ |
Dave Allison | 648d711 | 2014-07-25 16:15:27 -0700 | [diff] [blame] | 384 | bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, kArm); |
Dave Allison | 648d711 | 2014-07-25 16:15:27 -0700 | [diff] [blame] | 385 | const size_t kStackOverflowReservedUsableBytes = GetStackOverflowReservedBytes(kArm); |
Andreas Gampe | 7cd26f3 | 2014-06-18 17:01:15 -0700 | [diff] [blame] | 386 | bool large_frame = (static_cast<size_t>(frame_size_) > kStackOverflowReservedUsableBytes); |
Dave Allison | 648d711 | 2014-07-25 16:15:27 -0700 | [diff] [blame] | 387 | bool generate_explicit_stack_overflow_check = large_frame || |
| 388 | !cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks(); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 389 | if (!skip_overflow_check) { |
Dave Allison | 648d711 | 2014-07-25 16:15:27 -0700 | [diff] [blame] | 390 | if (generate_explicit_stack_overflow_check) { |
Bill Buzbee | fe8cf8b | 2014-05-15 13:57:54 +0000 | [diff] [blame] | 391 | if (!large_frame) { |
| 392 | /* Load stack limit */ |
| 393 | LockTemp(rs_r12); |
| 394 | Load32Disp(rs_rARM_SELF, Thread::StackEndOffset<4>().Int32Value(), rs_r12); |
| 395 | } |
Dave Allison | 5cd3375 | 2014-04-15 15:57:58 -0700 | [diff] [blame] | 396 | } else { |
| 397 | // Implicit stack overflow check. |
| 398 | // Generate a load from [sp, #-overflowsize]. If this is in the stack |
| 399 | // redzone we will get a segmentation fault. |
| 400 | // |
| 401 | // Caveat coder: if someone changes the kStackOverflowReservedBytes value |
| 402 | // we need to make sure that it's loadable in an immediate field of |
| 403 | // a sub instruction. Otherwise we will get a temp allocation and the |
| 404 | // code size will increase. |
| 405 | // |
| 406 | // This is done before the callee save instructions to avoid any possibility |
| 407 | // of these overflowing. This uses r12 and that's never saved in a callee |
| 408 | // save. |
Andreas Gampe | 7ea6f79 | 2014-07-14 16:21:44 -0700 | [diff] [blame] | 409 | OpRegRegImm(kOpSub, rs_r12, rs_rARM_SP, GetStackOverflowReservedBytes(kArm)); |
Dave Allison | 5cd3375 | 2014-04-15 15:57:58 -0700 | [diff] [blame] | 410 | Load32Disp(rs_r12, 0, rs_r12); |
| 411 | MarkPossibleStackOverflowException(); |
Dave Allison | b373e09 | 2014-02-20 16:06:36 -0800 | [diff] [blame] | 412 | } |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 413 | } |
| 414 | /* Spill core callee saves */ |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 415 | if (core_spill_mask_ != 0u) { |
| 416 | if ((core_spill_mask_ & ~(0xffu | (1u << rs_rARM_LR.GetRegNum()))) == 0u) { |
| 417 | // Spilling only low regs and/or LR, use 16-bit PUSH. |
| 418 | constexpr int lr_bit_shift = rs_rARM_LR.GetRegNum() - 8; |
| 419 | NewLIR1(kThumbPush, |
| 420 | (core_spill_mask_ & ~(1u << rs_rARM_LR.GetRegNum())) | |
| 421 | ((core_spill_mask_ & (1u << rs_rARM_LR.GetRegNum())) >> lr_bit_shift)); |
| 422 | } else if (IsPowerOfTwo(core_spill_mask_)) { |
| 423 | // kThumb2Push cannot be used to spill a single register. |
| 424 | NewLIR1(kThumb2Push1, CTZ(core_spill_mask_)); |
| 425 | } else { |
| 426 | NewLIR1(kThumb2Push, core_spill_mask_); |
| 427 | } |
| 428 | cfi_.AdjustCFAOffset(num_core_spills_ * kArmPointerSize); |
| 429 | cfi_.RelOffsetForMany(DwarfCoreReg(0), 0, core_spill_mask_, kArmPointerSize); |
Vladimir Marko | 9d5c25a | 2014-11-26 15:42:32 +0000 | [diff] [blame] | 430 | } |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 431 | /* Need to spill any FP regs? */ |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 432 | if (num_fp_spills_ != 0u) { |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 433 | /* |
| 434 | * NOTE: fp spills are a little different from core spills in that |
| 435 | * they are pushed as a contiguous block. When promoting from |
| 436 | * the fp set, we must allocate all singles from s16..highest-promoted |
| 437 | */ |
| 438 | NewLIR1(kThumb2VPushCS, num_fp_spills_); |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 439 | cfi_.AdjustCFAOffset(num_fp_spills_ * kArmPointerSize); |
| 440 | cfi_.RelOffsetForMany(DwarfFpReg(0), 0, fp_spill_mask_, kArmPointerSize); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 441 | } |
Mathieu Chartier | 0d507d1 | 2014-03-19 10:17:28 -0700 | [diff] [blame] | 442 | |
Mathieu Chartier | 05a48b1 | 2014-03-31 16:11:41 -0700 | [diff] [blame] | 443 | const int spill_size = spill_count * 4; |
| 444 | const int frame_size_without_spills = frame_size_ - spill_size; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 445 | if (!skip_overflow_check) { |
Dave Allison | 648d711 | 2014-07-25 16:15:27 -0700 | [diff] [blame] | 446 | if (generate_explicit_stack_overflow_check) { |
Mathieu Chartier | 0d507d1 | 2014-03-19 10:17:28 -0700 | [diff] [blame] | 447 | class StackOverflowSlowPath : public LIRSlowPath { |
| 448 | public: |
| 449 | StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, bool restore_lr, size_t sp_displace) |
Vladimir Marko | 0b40ecf | 2015-03-20 12:08:03 +0000 | [diff] [blame] | 450 | : LIRSlowPath(m2l, branch), restore_lr_(restore_lr), |
Mathieu Chartier | 0d507d1 | 2014-03-19 10:17:28 -0700 | [diff] [blame] | 451 | sp_displace_(sp_displace) { |
| 452 | } |
| 453 | void Compile() OVERRIDE { |
| 454 | m2l_->ResetRegPool(); |
| 455 | m2l_->ResetDefTracking(); |
Mingyao Yang | 6ffcfa0 | 2014-04-25 11:06:00 -0700 | [diff] [blame] | 456 | GenerateTargetLabel(kPseudoThrowTarget); |
Mathieu Chartier | 0d507d1 | 2014-03-19 10:17:28 -0700 | [diff] [blame] | 457 | if (restore_lr_) { |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 458 | m2l_->LoadWordDisp(rs_rARM_SP, sp_displace_ - 4, rs_rARM_LR); |
Mathieu Chartier | 0d507d1 | 2014-03-19 10:17:28 -0700 | [diff] [blame] | 459 | } |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 460 | m2l_->OpRegImm(kOpAdd, rs_rARM_SP, sp_displace_); |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 461 | m2l_->cfi().AdjustCFAOffset(-sp_displace_); |
Mathieu Chartier | 0d507d1 | 2014-03-19 10:17:28 -0700 | [diff] [blame] | 462 | m2l_->ClobberCallerSave(); |
Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 463 | ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pThrowStackOverflow); |
Mathieu Chartier | 0d507d1 | 2014-03-19 10:17:28 -0700 | [diff] [blame] | 464 | // Load the entrypoint directly into the pc instead of doing a load + branch. Assumes |
| 465 | // codegen and target are in thumb2 mode. |
buzbee | 695d13a | 2014-04-19 13:32:20 -0700 | [diff] [blame] | 466 | // NOTE: native pointer. |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 467 | m2l_->LoadWordDisp(rs_rARM_SELF, func_offset.Int32Value(), rs_rARM_PC); |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 468 | m2l_->cfi().AdjustCFAOffset(sp_displace_); |
Mathieu Chartier | 0d507d1 | 2014-03-19 10:17:28 -0700 | [diff] [blame] | 469 | } |
| 470 | |
| 471 | private: |
| 472 | const bool restore_lr_; |
| 473 | const size_t sp_displace_; |
| 474 | }; |
Bill Buzbee | fe8cf8b | 2014-05-15 13:57:54 +0000 | [diff] [blame] | 475 | if (large_frame) { |
| 476 | // Note: may need a temp reg, and we only have r12 free at this point. |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 477 | OpRegRegImm(kOpSub, rs_rARM_LR, rs_rARM_SP, frame_size_without_spills); |
Bill Buzbee | fe8cf8b | 2014-05-15 13:57:54 +0000 | [diff] [blame] | 478 | Load32Disp(rs_rARM_SELF, Thread::StackEndOffset<4>().Int32Value(), rs_r12); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 479 | LIR* branch = OpCmpBranch(kCondUlt, rs_rARM_LR, rs_r12, nullptr); |
Mathieu Chartier | 0d507d1 | 2014-03-19 10:17:28 -0700 | [diff] [blame] | 480 | // Need to restore LR since we used it as a temp. |
Mathieu Chartier | 05a48b1 | 2014-03-31 16:11:41 -0700 | [diff] [blame] | 481 | AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, true, spill_size)); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 482 | OpRegCopy(rs_rARM_SP, rs_rARM_LR); // Establish stack |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 483 | cfi_.AdjustCFAOffset(frame_size_without_spills); |
Mathieu Chartier | 0d507d1 | 2014-03-19 10:17:28 -0700 | [diff] [blame] | 484 | } else { |
Bill Buzbee | fe8cf8b | 2014-05-15 13:57:54 +0000 | [diff] [blame] | 485 | /* |
| 486 | * If the frame is small enough we are guaranteed to have enough space that remains to |
| 487 | * handle signals on the user stack. However, we may not have any free temp |
| 488 | * registers at this point, so we'll temporarily add LR to the temp pool. |
| 489 | */ |
| 490 | DCHECK(!GetRegInfo(rs_rARM_LR)->IsTemp()); |
| 491 | MarkTemp(rs_rARM_LR); |
| 492 | FreeTemp(rs_rARM_LR); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 493 | OpRegRegImm(kOpSub, rs_rARM_SP, rs_rARM_SP, frame_size_without_spills); |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 494 | cfi_.AdjustCFAOffset(frame_size_without_spills); |
Bill Buzbee | fe8cf8b | 2014-05-15 13:57:54 +0000 | [diff] [blame] | 495 | Clobber(rs_rARM_LR); |
| 496 | UnmarkTemp(rs_rARM_LR); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 497 | LIR* branch = OpCmpBranch(kCondUlt, rs_rARM_SP, rs_r12, nullptr); |
Mathieu Chartier | 0d507d1 | 2014-03-19 10:17:28 -0700 | [diff] [blame] | 498 | AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, false, frame_size_)); |
| 499 | } |
Dave Allison | b373e09 | 2014-02-20 16:06:36 -0800 | [diff] [blame] | 500 | } else { |
Dave Allison | 5cd3375 | 2014-04-15 15:57:58 -0700 | [diff] [blame] | 501 | // Implicit stack overflow check has already been done. Just make room on the |
| 502 | // stack for the frame now. |
Dave Allison | f943914 | 2014-03-27 15:10:22 -0700 | [diff] [blame] | 503 | OpRegImm(kOpSub, rs_rARM_SP, frame_size_without_spills); |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 504 | cfi_.AdjustCFAOffset(frame_size_without_spills); |
Dave Allison | b373e09 | 2014-02-20 16:06:36 -0800 | [diff] [blame] | 505 | } |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 506 | } else { |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 507 | OpRegImm(kOpSub, rs_rARM_SP, frame_size_without_spills); |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 508 | cfi_.AdjustCFAOffset(frame_size_without_spills); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 509 | } |
| 510 | |
| 511 | FlushIns(ArgLocs, rl_method); |
| 512 | |
Vladimir Marko | cc23481 | 2015-04-07 09:36:09 +0100 | [diff] [blame] | 513 | // We can promote a PC-relative reference to dex cache arrays to a register |
| 514 | // if it's used at least twice. Without investigating where we should lazily |
| 515 | // load the reference, we conveniently load it after flushing inputs. |
| 516 | if (dex_cache_arrays_base_reg_.Valid()) { |
| 517 | OpPcRelDexCacheArrayAddr(cu_->dex_file, dex_cache_arrays_min_offset_, |
| 518 | dex_cache_arrays_base_reg_); |
| 519 | } |
| 520 | |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 521 | FreeTemp(rs_r0); |
| 522 | FreeTemp(rs_r1); |
| 523 | FreeTemp(rs_r2); |
| 524 | FreeTemp(rs_r3); |
Bill Buzbee | fe8cf8b | 2014-05-15 13:57:54 +0000 | [diff] [blame] | 525 | FreeTemp(rs_r12); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 526 | } |
| 527 | |
Brian Carlstrom | 2ce745c | 2013-07-17 17:44:30 -0700 | [diff] [blame] | 528 | void ArmMir2Lir::GenExitSequence() { |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 529 | cfi_.RememberState(); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 530 | int spill_count = num_core_spills_ + num_fp_spills_; |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 531 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 532 | /* |
| 533 | * In the exit path, r0/r1 are live - make sure they aren't |
| 534 | * allocated by the register utilities as temps. |
| 535 | */ |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 536 | LockTemp(rs_r0); |
| 537 | LockTemp(rs_r1); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 538 | |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 539 | int adjust = frame_size_ - (spill_count * kArmPointerSize); |
| 540 | OpRegImm(kOpAdd, rs_rARM_SP, adjust); |
| 541 | cfi_.AdjustCFAOffset(-adjust); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 542 | /* Need to restore any FP callee saves? */ |
| 543 | if (num_fp_spills_) { |
| 544 | NewLIR1(kThumb2VPopCS, num_fp_spills_); |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 545 | cfi_.AdjustCFAOffset(-num_fp_spills_ * kArmPointerSize); |
| 546 | cfi_.RestoreMany(DwarfFpReg(0), fp_spill_mask_); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 547 | } |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 548 | bool unspill_LR_to_PC = (core_spill_mask_ & (1 << rs_rARM_LR.GetRegNum())) != 0; |
| 549 | if (unspill_LR_to_PC) { |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 550 | core_spill_mask_ &= ~(1 << rs_rARM_LR.GetRegNum()); |
| 551 | core_spill_mask_ |= (1 << rs_rARM_PC.GetRegNum()); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 552 | } |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 553 | if (core_spill_mask_ != 0u) { |
| 554 | if ((core_spill_mask_ & ~(0xffu | (1u << rs_rARM_PC.GetRegNum()))) == 0u) { |
| 555 | // Unspilling only low regs and/or PC, use 16-bit POP. |
| 556 | constexpr int pc_bit_shift = rs_rARM_PC.GetRegNum() - 8; |
| 557 | NewLIR1(kThumbPop, |
| 558 | (core_spill_mask_ & ~(1u << rs_rARM_PC.GetRegNum())) | |
| 559 | ((core_spill_mask_ & (1u << rs_rARM_PC.GetRegNum())) >> pc_bit_shift)); |
| 560 | } else if (IsPowerOfTwo(core_spill_mask_)) { |
| 561 | // kThumb2Pop cannot be used to unspill a single register. |
| 562 | NewLIR1(kThumb2Pop1, CTZ(core_spill_mask_)); |
| 563 | } else { |
| 564 | NewLIR1(kThumb2Pop, core_spill_mask_); |
| 565 | } |
| 566 | // If we pop to PC, there is no further epilogue code. |
| 567 | if (!unspill_LR_to_PC) { |
| 568 | cfi_.AdjustCFAOffset(-num_core_spills_ * kArmPointerSize); |
| 569 | cfi_.RestoreMany(DwarfCoreReg(0), core_spill_mask_); |
| 570 | DCHECK_EQ(cfi_.GetCurrentCFAOffset(), 0); // empty stack. |
| 571 | } |
Vladimir Marko | 9d5c25a | 2014-11-26 15:42:32 +0000 | [diff] [blame] | 572 | } |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 573 | if (!unspill_LR_to_PC) { |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 574 | /* We didn't pop to rARM_PC, so must do a bv rARM_LR */ |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 575 | NewLIR1(kThumbBx, rs_rARM_LR.GetReg()); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 576 | } |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 577 | // The CFI should be restored for any code that follows the exit block. |
| 578 | cfi_.RestoreState(); |
| 579 | cfi_.DefCFAOffset(frame_size_); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 580 | } |
| 581 | |
Razvan A Lupusoru | 3bc0174 | 2014-02-06 13:18:43 -0800 | [diff] [blame] | 582 | void ArmMir2Lir::GenSpecialExitSequence() { |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 583 | NewLIR1(kThumbBx, rs_rARM_LR.GetReg()); |
Razvan A Lupusoru | 3bc0174 | 2014-02-06 13:18:43 -0800 | [diff] [blame] | 584 | } |
| 585 | |
Vladimir Marko | 6ce3eba | 2015-02-16 13:05:59 +0000 | [diff] [blame] | 586 | void ArmMir2Lir::GenSpecialEntryForSuspend() { |
| 587 | // Keep 16-byte stack alignment - push r0, i.e. ArtMethod*, r5, r6, lr. |
| 588 | DCHECK(!IsTemp(rs_r5)); |
| 589 | DCHECK(!IsTemp(rs_r6)); |
| 590 | core_spill_mask_ = |
| 591 | (1u << rs_r5.GetRegNum()) | (1u << rs_r6.GetRegNum()) | (1u << rs_rARM_LR.GetRegNum()); |
| 592 | num_core_spills_ = 3u; |
| 593 | fp_spill_mask_ = 0u; |
| 594 | num_fp_spills_ = 0u; |
| 595 | frame_size_ = 16u; |
| 596 | core_vmap_table_.clear(); |
| 597 | fp_vmap_table_.clear(); |
| 598 | NewLIR1(kThumbPush, (1u << rs_r0.GetRegNum()) | // ArtMethod* |
| 599 | (core_spill_mask_ & ~(1u << rs_rARM_LR.GetRegNum())) | // Spills other than LR. |
| 600 | (1u << 8)); // LR encoded for 16-bit push. |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 601 | cfi_.AdjustCFAOffset(frame_size_); |
| 602 | // Do not generate CFI for scratch register r0. |
| 603 | cfi_.RelOffsetForMany(DwarfCoreReg(0), 4, core_spill_mask_, kArmPointerSize); |
Vladimir Marko | 6ce3eba | 2015-02-16 13:05:59 +0000 | [diff] [blame] | 604 | } |
| 605 | |
| 606 | void ArmMir2Lir::GenSpecialExitForSuspend() { |
| 607 | // Pop the frame. (ArtMethod* no longer needed but restore it anyway.) |
| 608 | NewLIR1(kThumb2Pop, (1u << rs_r0.GetRegNum()) | core_spill_mask_); // 32-bit because of LR. |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 609 | cfi_.AdjustCFAOffset(-frame_size_); |
| 610 | cfi_.RestoreMany(DwarfCoreReg(0), core_spill_mask_); |
Vladimir Marko | 6ce3eba | 2015-02-16 13:05:59 +0000 | [diff] [blame] | 611 | } |
| 612 | |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 613 | static bool ArmUseRelativeCall(CompilationUnit* cu, const MethodReference& target_method) { |
| 614 | // Emit relative calls only within a dex file due to the limited range of the BL insn. |
| 615 | return cu->dex_file == target_method.dex_file; |
| 616 | } |
| 617 | |
| 618 | /* |
| 619 | * Bit of a hack here - in the absence of a real scheduling pass, |
| 620 | * emit the next instruction in static & direct invoke sequences. |
| 621 | */ |
Vladimir Marko | e5c76c5 | 2015-04-06 12:10:19 +0100 | [diff] [blame] | 622 | int ArmMir2Lir::ArmNextSDCallInsn(CompilationUnit* cu, CallInfo* info ATTRIBUTE_UNUSED, |
| 623 | int state, const MethodReference& target_method, |
| 624 | uint32_t unused_idx ATTRIBUTE_UNUSED, |
| 625 | uintptr_t direct_code, uintptr_t direct_method, |
| 626 | InvokeType type) { |
| 627 | ArmMir2Lir* cg = static_cast<ArmMir2Lir*>(cu->cg.get()); |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 628 | if (direct_code != 0 && direct_method != 0) { |
| 629 | switch (state) { |
| 630 | case 0: // Get the current Method* [sets kArg0] |
| 631 | if (direct_code != static_cast<uintptr_t>(-1)) { |
| 632 | cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code); |
| 633 | } else if (ArmUseRelativeCall(cu, target_method)) { |
| 634 | // Defer to linker patch. |
| 635 | } else { |
| 636 | cg->LoadCodeAddress(target_method, type, kInvokeTgt); |
| 637 | } |
| 638 | if (direct_method != static_cast<uintptr_t>(-1)) { |
| 639 | cg->LoadConstant(cg->TargetReg(kArg0, kRef), direct_method); |
| 640 | } else { |
| 641 | cg->LoadMethodAddress(target_method, type, kArg0); |
| 642 | } |
| 643 | break; |
| 644 | default: |
| 645 | return -1; |
| 646 | } |
| 647 | } else { |
Vladimir Marko | e5c76c5 | 2015-04-06 12:10:19 +0100 | [diff] [blame] | 648 | bool use_pc_rel = cg->CanUseOpPcRelDexCacheArrayLoad(); |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 649 | RegStorage arg0_ref = cg->TargetReg(kArg0, kRef); |
| 650 | switch (state) { |
| 651 | case 0: // Get the current Method* [sets kArg0] |
| 652 | // TUNING: we can save a reg copy if Method* has been promoted. |
Vladimir Marko | e5c76c5 | 2015-04-06 12:10:19 +0100 | [diff] [blame] | 653 | if (!use_pc_rel) { |
| 654 | cg->LoadCurrMethodDirect(arg0_ref); |
| 655 | break; |
| 656 | } |
| 657 | ++state; |
| 658 | FALLTHROUGH_INTENDED; |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 659 | case 1: // Get method->dex_cache_resolved_methods_ |
Vladimir Marko | e5c76c5 | 2015-04-06 12:10:19 +0100 | [diff] [blame] | 660 | if (!use_pc_rel) { |
| 661 | cg->LoadRefDisp(arg0_ref, |
| 662 | mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(), |
| 663 | arg0_ref, |
| 664 | kNotVolatile); |
| 665 | } |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 666 | // Set up direct code if known. |
| 667 | if (direct_code != 0) { |
| 668 | if (direct_code != static_cast<uintptr_t>(-1)) { |
| 669 | cg->LoadConstant(cg->TargetPtrReg(kInvokeTgt), direct_code); |
| 670 | } else if (ArmUseRelativeCall(cu, target_method)) { |
| 671 | // Defer to linker patch. |
| 672 | } else { |
| 673 | CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds()); |
| 674 | cg->LoadCodeAddress(target_method, type, kInvokeTgt); |
| 675 | } |
| 676 | } |
Vladimir Marko | e5c76c5 | 2015-04-06 12:10:19 +0100 | [diff] [blame] | 677 | if (!use_pc_rel || direct_code != 0) { |
| 678 | break; |
| 679 | } |
| 680 | ++state; |
| 681 | FALLTHROUGH_INTENDED; |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 682 | case 2: // Grab target method* |
| 683 | CHECK_EQ(cu->dex_file, target_method.dex_file); |
Vladimir Marko | e5c76c5 | 2015-04-06 12:10:19 +0100 | [diff] [blame] | 684 | if (!use_pc_rel) { |
| 685 | cg->LoadRefDisp(arg0_ref, |
| 686 | mirror::ObjectArray<mirror::Object>::OffsetOfElement( |
| 687 | target_method.dex_method_index).Int32Value(), |
| 688 | arg0_ref, |
| 689 | kNotVolatile); |
| 690 | } else { |
| 691 | size_t offset = cg->dex_cache_arrays_layout_.MethodOffset(target_method.dex_method_index); |
| 692 | cg->OpPcRelDexCacheArrayLoad(cu->dex_file, offset, arg0_ref); |
| 693 | } |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 694 | break; |
| 695 | case 3: // Grab the code from the method* |
| 696 | if (direct_code == 0) { |
| 697 | // kInvokeTgt := arg0_ref->entrypoint |
| 698 | cg->LoadWordDisp(arg0_ref, |
Mathieu Chartier | 2d72101 | 2014-11-10 11:08:06 -0800 | [diff] [blame] | 699 | mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( |
| 700 | kArmPointerSize).Int32Value(), cg->TargetPtrReg(kInvokeTgt)); |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 701 | } |
| 702 | break; |
| 703 | default: |
| 704 | return -1; |
| 705 | } |
| 706 | } |
| 707 | return state + 1; |
| 708 | } |
| 709 | |
| 710 | NextCallInsn ArmMir2Lir::GetNextSDCallInsn() { |
| 711 | return ArmNextSDCallInsn; |
| 712 | } |
| 713 | |
| 714 | LIR* ArmMir2Lir::CallWithLinkerFixup(const MethodReference& target_method, InvokeType type) { |
| 715 | // For ARM, just generate a relative BL instruction that will be filled in at 'link time'. |
| 716 | // If the target turns out to be too far, the linker will generate a thunk for dispatch. |
| 717 | int target_method_idx = target_method.dex_method_index; |
| 718 | const DexFile* target_dex_file = target_method.dex_file; |
| 719 | |
| 720 | // Generate the call instruction and save index, dex_file, and type. |
| 721 | // NOTE: Method deduplication takes linker patches into account, so we can just pass 0 |
| 722 | // as a placeholder for the offset. |
| 723 | LIR* call = RawLIR(current_dalvik_offset_, kThumb2Bl, 0, |
Vladimir Marko | f6737f7 | 2015-03-23 17:05:14 +0000 | [diff] [blame] | 724 | target_method_idx, WrapPointer(target_dex_file), type); |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 725 | AppendLIR(call); |
| 726 | call_method_insns_.push_back(call); |
| 727 | return call; |
| 728 | } |
| 729 | |
| 730 | LIR* ArmMir2Lir::GenCallInsn(const MirMethodLoweringInfo& method_info) { |
| 731 | LIR* call_insn; |
| 732 | if (method_info.FastPath() && ArmUseRelativeCall(cu_, method_info.GetTargetMethod()) && |
| 733 | (method_info.GetSharpType() == kDirect || method_info.GetSharpType() == kStatic) && |
| 734 | method_info.DirectCode() == static_cast<uintptr_t>(-1)) { |
| 735 | call_insn = CallWithLinkerFixup(method_info.GetTargetMethod(), method_info.GetSharpType()); |
| 736 | } else { |
| 737 | call_insn = OpReg(kOpBlx, TargetPtrReg(kInvokeTgt)); |
| 738 | } |
| 739 | return call_insn; |
| 740 | } |
| 741 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 742 | } // namespace art |