Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | /* This file contains codegen for the X86 ISA */ |
| 18 | |
| 19 | #include "codegen_x86.h" |
| 20 | #include "dex/quick/mir_to_lir-inl.h" |
Ian Rogers | 576ca0c | 2014-06-06 15:58:22 -0700 | [diff] [blame] | 21 | #include "gc/accounting/card_table.h" |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 22 | #include "mirror/art_method.h" |
| 23 | #include "mirror/object_array-inl.h" |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 24 | #include "x86_lir.h" |
| 25 | |
| 26 | namespace art { |
| 27 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 28 | /* |
| 29 | * The sparse table in the literal pool is an array of <key,displacement> |
| 30 | * pairs. |
| 31 | */ |
Andreas Gampe | 48971b3 | 2014-08-06 10:09:01 -0700 | [diff] [blame] | 32 | void X86Mir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) { |
Razvan A Lupusoru | 8d0d03e | 2014-06-06 17:04:52 -0700 | [diff] [blame] | 33 | const uint16_t* table = mir_graph_->GetTable(mir, table_offset); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 34 | if (cu_->verbose) { |
| 35 | DumpSparseSwitchTable(table); |
| 36 | } |
| 37 | int entries = table[1]; |
buzbee | 0d82948 | 2013-10-11 15:24:55 -0700 | [diff] [blame] | 38 | const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]); |
| 39 | const int32_t* targets = &keys[entries]; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 40 | rl_src = LoadValue(rl_src, kCoreReg); |
| 41 | for (int i = 0; i < entries; i++) { |
| 42 | int key = keys[i]; |
| 43 | BasicBlock* case_block = |
| 44 | mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 45 | OpCmpImmBranch(kCondEq, rl_src.reg, key, &block_label_list_[case_block->id]); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 46 | } |
| 47 | } |
| 48 | |
| 49 | /* |
| 50 | * Code pattern will look something like: |
| 51 | * |
| 52 | * mov r_val, .. |
| 53 | * call 0 |
| 54 | * pop r_start_of_method |
| 55 | * sub r_start_of_method, .. |
| 56 | * mov r_key_reg, r_val |
| 57 | * sub r_key_reg, low_key |
| 58 | * cmp r_key_reg, size-1 ; bound check |
| 59 | * ja done |
| 60 | * mov r_disp, [r_start_of_method + r_key_reg * 4 + table_offset] |
| 61 | * add r_start_of_method, r_disp |
| 62 | * jmp r_start_of_method |
| 63 | * done: |
| 64 | */ |
Andreas Gampe | 48971b3 | 2014-08-06 10:09:01 -0700 | [diff] [blame] | 65 | void X86Mir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) { |
Razvan A Lupusoru | 8d0d03e | 2014-06-06 17:04:52 -0700 | [diff] [blame] | 66 | const uint16_t* table = mir_graph_->GetTable(mir, table_offset); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 67 | if (cu_->verbose) { |
| 68 | DumpPackedSwitchTable(table); |
| 69 | } |
| 70 | // Add the table to the list - we'll process it later |
buzbee | 0d82948 | 2013-10-11 15:24:55 -0700 | [diff] [blame] | 71 | SwitchTable* tab_rec = |
Vladimir Marko | 83cc7ae | 2014-02-12 18:02:05 +0000 | [diff] [blame] | 72 | static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData)); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 73 | tab_rec->table = table; |
| 74 | tab_rec->vaddr = current_dalvik_offset_; |
| 75 | int size = table[1]; |
Mathieu Chartier | f6c4b3b | 2013-08-24 16:11:37 -0700 | [diff] [blame] | 76 | tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), |
Vladimir Marko | 83cc7ae | 2014-02-12 18:02:05 +0000 | [diff] [blame] | 77 | kArenaAllocLIR)); |
Vladimir Marko | e39c54e | 2014-09-22 14:50:02 +0100 | [diff] [blame] | 78 | switch_tables_.push_back(tab_rec); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 79 | |
| 80 | // Get the switch value |
| 81 | rl_src = LoadValue(rl_src, kCoreReg); |
Brian Carlstrom | 7934ac2 | 2013-07-26 10:54:15 -0700 | [diff] [blame] | 82 | // NewLIR0(kX86Bkpt); |
Mark Mendell | 67c39c4 | 2014-01-31 17:28:00 -0800 | [diff] [blame] | 83 | |
| 84 | // Materialize a pointer to the switch table |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 85 | RegStorage start_of_method_reg; |
Mark Mendell | 67c39c4 | 2014-01-31 17:28:00 -0800 | [diff] [blame] | 86 | if (base_of_code_ != nullptr) { |
| 87 | // We can use the saved value. |
| 88 | RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low); |
Chao-ying Fu | e0ccdc0 | 2014-06-06 17:32:37 -0700 | [diff] [blame] | 89 | if (rl_method.wide) { |
| 90 | rl_method = LoadValueWide(rl_method, kCoreReg); |
| 91 | } else { |
| 92 | rl_method = LoadValue(rl_method, kCoreReg); |
| 93 | } |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 94 | start_of_method_reg = rl_method.reg; |
Mark Mendell | 55d0eac | 2014-02-06 11:02:52 -0800 | [diff] [blame] | 95 | store_method_addr_used_ = true; |
Mark Mendell | 67c39c4 | 2014-01-31 17:28:00 -0800 | [diff] [blame] | 96 | } else { |
Serguei Katkov | 407a9d2 | 2014-07-05 03:09:32 +0700 | [diff] [blame] | 97 | start_of_method_reg = AllocTempRef(); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 98 | NewLIR1(kX86StartOfMethod, start_of_method_reg.GetReg()); |
Mark Mendell | 67c39c4 | 2014-01-31 17:28:00 -0800 | [diff] [blame] | 99 | } |
Serguei Katkov | 407a9d2 | 2014-07-05 03:09:32 +0700 | [diff] [blame] | 100 | DCHECK_EQ(start_of_method_reg.Is64Bit(), cu_->target64); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 101 | int low_key = s4FromSwitchData(&table[2]); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 102 | RegStorage keyReg; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 103 | // Remove the bias, if necessary |
| 104 | if (low_key == 0) { |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 105 | keyReg = rl_src.reg; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 106 | } else { |
| 107 | keyReg = AllocTemp(); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 108 | OpRegRegImm(kOpSub, keyReg, rl_src.reg, low_key); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 109 | } |
| 110 | // Bounds check - if < 0 or >= size continue following switch |
Serguei Katkov | 407a9d2 | 2014-07-05 03:09:32 +0700 | [diff] [blame] | 111 | OpRegImm(kOpCmp, keyReg, size - 1); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 112 | LIR* branch_over = OpCondBranch(kCondHi, NULL); |
| 113 | |
| 114 | // Load the displacement from the switch table |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 115 | RegStorage disp_reg = AllocTemp(); |
Chao-ying Fu | e0ccdc0 | 2014-06-06 17:32:37 -0700 | [diff] [blame] | 116 | NewLIR5(kX86PcRelLoadRA, disp_reg.GetReg(), start_of_method_reg.GetReg(), keyReg.GetReg(), |
| 117 | 2, WrapPointer(tab_rec)); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 118 | // Add displacement to start of method |
Serguei Katkov | 407a9d2 | 2014-07-05 03:09:32 +0700 | [diff] [blame] | 119 | OpRegReg(kOpAdd, start_of_method_reg, cu_->target64 ? As64BitReg(disp_reg) : disp_reg); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 120 | // ..and go! |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 121 | LIR* switch_branch = NewLIR1(kX86JmpR, start_of_method_reg.GetReg()); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 122 | tab_rec->anchor = switch_branch; |
| 123 | |
| 124 | /* branch_over target here */ |
| 125 | LIR* target = NewLIR0(kPseudoTargetLabel); |
| 126 | branch_over->target = target; |
| 127 | } |
| 128 | |
Brian Carlstrom | 2ce745c | 2013-07-17 17:44:30 -0700 | [diff] [blame] | 129 | void X86Mir2Lir::GenMoveException(RegLocation rl_dest) { |
buzbee | 33ae558 | 2014-06-12 14:56:32 -0700 | [diff] [blame] | 130 | int ex_offset = cu_->target64 ? |
Andreas Gampe | 2f244e9 | 2014-05-08 03:35:25 -0700 | [diff] [blame] | 131 | Thread::ExceptionOffset<8>().Int32Value() : |
| 132 | Thread::ExceptionOffset<4>().Int32Value(); |
buzbee | a0cd2d7 | 2014-06-01 09:33:49 -0700 | [diff] [blame] | 133 | RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); |
Serguei Katkov | 407a9d2 | 2014-07-05 03:09:32 +0700 | [diff] [blame] | 134 | NewLIR2(cu_->target64 ? kX86Mov64RT : kX86Mov32RT, rl_result.reg.GetReg(), ex_offset); |
| 135 | NewLIR2(cu_->target64 ? kX86Mov64TI : kX86Mov32TI, ex_offset, 0); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 136 | StoreValue(rl_dest, rl_result); |
| 137 | } |
| 138 | |
| 139 | /* |
| 140 | * Mark garbage collection card. Skip if the value we're storing is null. |
| 141 | */ |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 142 | void X86Mir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) { |
Serguei Katkov | 407a9d2 | 2014-07-05 03:09:32 +0700 | [diff] [blame] | 143 | DCHECK_EQ(tgt_addr_reg.Is64Bit(), cu_->target64); |
| 144 | DCHECK_EQ(val_reg.Is64Bit(), cu_->target64); |
| 145 | RegStorage reg_card_base = AllocTempRef(); |
| 146 | RegStorage reg_card_no = AllocTempRef(); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 147 | LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL); |
buzbee | 33ae558 | 2014-06-12 14:56:32 -0700 | [diff] [blame] | 148 | int ct_offset = cu_->target64 ? |
Andreas Gampe | 2f244e9 | 2014-05-08 03:35:25 -0700 | [diff] [blame] | 149 | Thread::CardTableOffset<8>().Int32Value() : |
| 150 | Thread::CardTableOffset<4>().Int32Value(); |
Serguei Katkov | 407a9d2 | 2014-07-05 03:09:32 +0700 | [diff] [blame] | 151 | NewLIR2(cu_->target64 ? kX86Mov64RT : kX86Mov32RT, reg_card_base.GetReg(), ct_offset); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 152 | OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 153 | StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 154 | LIR* target = NewLIR0(kPseudoTargetLabel); |
| 155 | branch_over->target = target; |
| 156 | FreeTemp(reg_card_base); |
| 157 | FreeTemp(reg_card_no); |
| 158 | } |
| 159 | |
Brian Carlstrom | 2ce745c | 2013-07-17 17:44:30 -0700 | [diff] [blame] | 160 | void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 161 | /* |
| 162 | * On entry, rX86_ARG0, rX86_ARG1, rX86_ARG2 are live. Let the register |
| 163 | * allocation mechanism know so it doesn't try to use any of them when |
| 164 | * expanding the frame or flushing. This leaves the utility |
| 165 | * code with no spare temps. |
| 166 | */ |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 167 | LockTemp(rs_rX86_ARG0); |
| 168 | LockTemp(rs_rX86_ARG1); |
| 169 | LockTemp(rs_rX86_ARG2); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 170 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 171 | /* |
| 172 | * We can safely skip the stack overflow check if we're |
| 173 | * a leaf *and* our frame size < fudge factor. |
| 174 | */ |
Dave Allison | 69dfe51 | 2014-07-11 17:11:58 +0000 | [diff] [blame] | 175 | InstructionSet isa = cu_->target64 ? kX86_64 : kX86; |
Dave Allison | 648d711 | 2014-07-25 16:15:27 -0700 | [diff] [blame] | 176 | bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, isa); |
Dave Allison | 69dfe51 | 2014-07-11 17:11:58 +0000 | [diff] [blame] | 177 | |
| 178 | // If we doing an implicit stack overflow check, perform the load immediately |
| 179 | // before the stack pointer is decremented and anything is saved. |
| 180 | if (!skip_overflow_check && |
| 181 | cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) { |
| 182 | // Implicit stack overflow check. |
| 183 | // test eax,[esp + -overflow] |
| 184 | int overflow = GetStackOverflowReservedBytes(isa); |
| 185 | NewLIR3(kX86Test32RM, rs_rAX.GetReg(), rs_rX86_SP.GetReg(), -overflow); |
| 186 | MarkPossibleStackOverflowException(); |
| 187 | } |
| 188 | |
| 189 | /* Build frame, return address already on stack */ |
| 190 | stack_decrement_ = OpRegImm(kOpSub, rs_rX86_SP, frame_size_ - |
| 191 | GetInstructionSetPointerSize(cu_->instruction_set)); |
| 192 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 193 | NewLIR0(kPseudoMethodEntry); |
| 194 | /* Spill core callee saves */ |
| 195 | SpillCoreRegs(); |
Serguei Katkov | c380191 | 2014-07-08 17:21:53 +0700 | [diff] [blame] | 196 | SpillFPRegs(); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 197 | if (!skip_overflow_check) { |
Mathieu Chartier | 0d507d1 | 2014-03-19 10:17:28 -0700 | [diff] [blame] | 198 | class StackOverflowSlowPath : public LIRSlowPath { |
| 199 | public: |
| 200 | StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace) |
| 201 | : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, nullptr), sp_displace_(sp_displace) { |
| 202 | } |
| 203 | void Compile() OVERRIDE { |
| 204 | m2l_->ResetRegPool(); |
| 205 | m2l_->ResetDefTracking(); |
Mingyao Yang | 6ffcfa0 | 2014-04-25 11:06:00 -0700 | [diff] [blame] | 206 | GenerateTargetLabel(kPseudoThrowTarget); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 207 | m2l_->OpRegImm(kOpAdd, rs_rX86_SP, sp_displace_); |
Mathieu Chartier | 0d507d1 | 2014-03-19 10:17:28 -0700 | [diff] [blame] | 208 | m2l_->ClobberCallerSave(); |
Mathieu Chartier | 0d507d1 | 2014-03-19 10:17:28 -0700 | [diff] [blame] | 209 | // Assumes codegen and target are in thumb2 mode. |
Andreas Gampe | 9843059 | 2014-07-27 19:44:50 -0700 | [diff] [blame] | 210 | m2l_->CallHelper(RegStorage::InvalidReg(), kQuickThrowStackOverflow, |
| 211 | false /* MarkSafepointPC */, false /* UseLink */); |
Mathieu Chartier | 0d507d1 | 2014-03-19 10:17:28 -0700 | [diff] [blame] | 212 | } |
| 213 | |
| 214 | private: |
| 215 | const size_t sp_displace_; |
| 216 | }; |
Dave Allison | 69dfe51 | 2014-07-11 17:11:58 +0000 | [diff] [blame] | 217 | if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) { |
| 218 | // TODO: for large frames we should do something like: |
| 219 | // spill ebp |
| 220 | // lea ebp, [esp + frame_size] |
| 221 | // cmp ebp, fs:[stack_end_] |
| 222 | // jcc stack_overflow_exception |
| 223 | // mov esp, ebp |
| 224 | // in case a signal comes in that's not using an alternate signal stack and the large frame |
| 225 | // may have moved us outside of the reserved area at the end of the stack. |
| 226 | // cmp rs_rX86_SP, fs:[stack_end_]; jcc throw_slowpath |
| 227 | if (cu_->target64) { |
| 228 | OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<8>()); |
| 229 | } else { |
| 230 | OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<4>()); |
| 231 | } |
| 232 | LIR* branch = OpCondBranch(kCondUlt, nullptr); |
| 233 | AddSlowPath( |
Chao-ying Fu | e0ccdc0 | 2014-06-06 17:32:37 -0700 | [diff] [blame] | 234 | new(arena_)StackOverflowSlowPath(this, branch, |
| 235 | frame_size_ - |
| 236 | GetInstructionSetPointerSize(cu_->instruction_set))); |
Dave Allison | 69dfe51 | 2014-07-11 17:11:58 +0000 | [diff] [blame] | 237 | } |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 238 | } |
| 239 | |
| 240 | FlushIns(ArgLocs, rl_method); |
| 241 | |
Mark Mendell | 67c39c4 | 2014-01-31 17:28:00 -0800 | [diff] [blame] | 242 | if (base_of_code_ != nullptr) { |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 243 | RegStorage method_start = TargetPtrReg(kArg0); |
Mark Mendell | 67c39c4 | 2014-01-31 17:28:00 -0800 | [diff] [blame] | 244 | // We have been asked to save the address of the method start for later use. |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 245 | setup_method_address_[0] = NewLIR1(kX86StartOfMethod, method_start.GetReg()); |
Mark Mendell | 67c39c4 | 2014-01-31 17:28:00 -0800 | [diff] [blame] | 246 | int displacement = SRegOffset(base_of_code_->s_reg_low); |
buzbee | 695d13a | 2014-04-19 13:32:20 -0700 | [diff] [blame] | 247 | // Native pointer - must be natural word size. |
Chao-ying Fu | a77ee51 | 2014-07-01 17:43:41 -0700 | [diff] [blame] | 248 | setup_method_address_[1] = StoreBaseDisp(rs_rX86_SP, displacement, method_start, |
Elena Sayapina | dd64450 | 2014-07-01 18:39:52 +0700 | [diff] [blame] | 249 | cu_->target64 ? k64 : k32, kNotVolatile); |
Mark Mendell | 67c39c4 | 2014-01-31 17:28:00 -0800 | [diff] [blame] | 250 | } |
| 251 | |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 252 | FreeTemp(rs_rX86_ARG0); |
| 253 | FreeTemp(rs_rX86_ARG1); |
| 254 | FreeTemp(rs_rX86_ARG2); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 255 | } |
| 256 | |
| 257 | void X86Mir2Lir::GenExitSequence() { |
| 258 | /* |
| 259 | * In the exit path, rX86_RET0/rX86_RET1 are live - make sure they aren't |
| 260 | * allocated by the register utilities as temps. |
| 261 | */ |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 262 | LockTemp(rs_rX86_RET0); |
| 263 | LockTemp(rs_rX86_RET1); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 264 | |
| 265 | NewLIR0(kPseudoMethodExit); |
| 266 | UnSpillCoreRegs(); |
Serguei Katkov | c380191 | 2014-07-08 17:21:53 +0700 | [diff] [blame] | 267 | UnSpillFPRegs(); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 268 | /* Remove frame except for return address */ |
Dmitry Petrochenko | 9ee801f | 2014-05-12 11:31:37 +0700 | [diff] [blame] | 269 | stack_increment_ = OpRegImm(kOpAdd, rs_rX86_SP, frame_size_ - GetInstructionSetPointerSize(cu_->instruction_set)); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 270 | NewLIR0(kX86Ret); |
| 271 | } |
| 272 | |
Razvan A Lupusoru | 3bc0174 | 2014-02-06 13:18:43 -0800 | [diff] [blame] | 273 | void X86Mir2Lir::GenSpecialExitSequence() { |
| 274 | NewLIR0(kX86Ret); |
| 275 | } |
| 276 | |
Dave Allison | 69dfe51 | 2014-07-11 17:11:58 +0000 | [diff] [blame] | 277 | void X86Mir2Lir::GenImplicitNullCheck(RegStorage reg, int opt_flags) { |
| 278 | if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { |
| 279 | return; |
| 280 | } |
| 281 | // Implicit null pointer check. |
| 282 | // test eax,[arg1+0] |
| 283 | NewLIR3(kX86Test32RM, rs_rAX.GetReg(), reg.GetReg(), 0); |
| 284 | MarkPossibleNullPointerException(opt_flags); |
| 285 | } |
| 286 | |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 287 | /* |
| 288 | * Bit of a hack here - in the absence of a real scheduling pass, |
| 289 | * emit the next instruction in static & direct invoke sequences. |
| 290 | */ |
| 291 | static int X86NextSDCallInsn(CompilationUnit* cu, CallInfo* info, |
| 292 | int state, const MethodReference& target_method, |
Ian Rogers | 6a3c1fc | 2014-10-31 00:33:20 -0700 | [diff] [blame^] | 293 | uint32_t, |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 294 | uintptr_t direct_code, uintptr_t direct_method, |
| 295 | InvokeType type) { |
Ian Rogers | 6a3c1fc | 2014-10-31 00:33:20 -0700 | [diff] [blame^] | 296 | UNUSED(info, direct_code); |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 297 | Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get()); |
| 298 | if (direct_method != 0) { |
| 299 | switch (state) { |
| 300 | case 0: // Get the current Method* [sets kArg0] |
| 301 | if (direct_method != static_cast<uintptr_t>(-1)) { |
| 302 | cg->LoadConstant(cg->TargetReg(kArg0, kRef), direct_method); |
| 303 | } else { |
| 304 | cg->LoadMethodAddress(target_method, type, kArg0); |
| 305 | } |
| 306 | break; |
| 307 | default: |
| 308 | return -1; |
| 309 | } |
| 310 | } else { |
| 311 | RegStorage arg0_ref = cg->TargetReg(kArg0, kRef); |
| 312 | switch (state) { |
| 313 | case 0: // Get the current Method* [sets kArg0] |
| 314 | // TUNING: we can save a reg copy if Method* has been promoted. |
| 315 | cg->LoadCurrMethodDirect(arg0_ref); |
| 316 | break; |
| 317 | case 1: // Get method->dex_cache_resolved_methods_ |
| 318 | cg->LoadRefDisp(arg0_ref, |
| 319 | mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(), |
| 320 | arg0_ref, |
| 321 | kNotVolatile); |
| 322 | break; |
| 323 | case 2: // Grab target method* |
| 324 | CHECK_EQ(cu->dex_file, target_method.dex_file); |
| 325 | cg->LoadRefDisp(arg0_ref, |
| 326 | mirror::ObjectArray<mirror::Object>::OffsetOfElement( |
| 327 | target_method.dex_method_index).Int32Value(), |
| 328 | arg0_ref, |
| 329 | kNotVolatile); |
| 330 | break; |
| 331 | default: |
| 332 | return -1; |
| 333 | } |
| 334 | } |
| 335 | return state + 1; |
| 336 | } |
| 337 | |
| 338 | NextCallInsn X86Mir2Lir::GetNextSDCallInsn() { |
| 339 | return X86NextSDCallInsn; |
| 340 | } |
| 341 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 342 | } // namespace art |