Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | /* This file contains codegen for the X86 ISA */ |
| 18 | |
| 19 | #include "codegen_x86.h" |
Andreas Gampe | 0b9203e | 2015-01-22 20:39:27 -0800 | [diff] [blame] | 20 | |
| 21 | #include "base/logging.h" |
Jeff Hao | 848f70a | 2014-01-15 13:49:50 -0800 | [diff] [blame] | 22 | #include "dex/quick/dex_file_to_method_inliner_map.h" |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 23 | #include "dex/quick/mir_to_lir-inl.h" |
Andreas Gampe | 0b9203e | 2015-01-22 20:39:27 -0800 | [diff] [blame] | 24 | #include "driver/compiler_driver.h" |
Vladimir Marko | 20f8559 | 2015-03-19 10:07:02 +0000 | [diff] [blame] | 25 | #include "driver/compiler_options.h" |
Ian Rogers | 576ca0c | 2014-06-06 15:58:22 -0700 | [diff] [blame] | 26 | #include "gc/accounting/card_table.h" |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 27 | #include "mirror/art_method.h" |
| 28 | #include "mirror/object_array-inl.h" |
Vladimir Marko | dc56cc5 | 2015-03-27 18:18:36 +0000 | [diff] [blame] | 29 | #include "utils/dex_cache_arrays_layout-inl.h" |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 30 | #include "x86_lir.h" |
| 31 | |
| 32 | namespace art { |
| 33 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 34 | /* |
| 35 | * The sparse table in the literal pool is an array of <key,displacement> |
| 36 | * pairs. |
| 37 | */ |
Andreas Gampe | 48971b3 | 2014-08-06 10:09:01 -0700 | [diff] [blame] | 38 | void X86Mir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) { |
Chao-ying Fu | da96aed | 2014-10-27 14:42:00 -0700 | [diff] [blame] | 39 | GenSmallSparseSwitch(mir, table_offset, rl_src); |
| 40 | } |
| 41 | |
| 42 | /* |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 43 | * Code pattern will look something like: |
| 44 | * |
| 45 | * mov r_val, .. |
| 46 | * call 0 |
| 47 | * pop r_start_of_method |
| 48 | * sub r_start_of_method, .. |
| 49 | * mov r_key_reg, r_val |
| 50 | * sub r_key_reg, low_key |
| 51 | * cmp r_key_reg, size-1 ; bound check |
| 52 | * ja done |
| 53 | * mov r_disp, [r_start_of_method + r_key_reg * 4 + table_offset] |
| 54 | * add r_start_of_method, r_disp |
| 55 | * jmp r_start_of_method |
| 56 | * done: |
| 57 | */ |
Andreas Gampe | 48971b3 | 2014-08-06 10:09:01 -0700 | [diff] [blame] | 58 | void X86Mir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) { |
Chao-ying Fu | 72f53af | 2014-11-11 16:48:40 -0800 | [diff] [blame] | 59 | const uint16_t* table = mir_graph_->GetTable(mir, table_offset); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 60 | // Add the table to the list - we'll process it later |
buzbee | 0d82948 | 2013-10-11 15:24:55 -0700 | [diff] [blame] | 61 | SwitchTable* tab_rec = |
Vladimir Marko | 83cc7ae | 2014-02-12 18:02:05 +0000 | [diff] [blame] | 62 | static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData)); |
Chao-ying Fu | 72f53af | 2014-11-11 16:48:40 -0800 | [diff] [blame] | 63 | tab_rec->switch_mir = mir; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 64 | tab_rec->table = table; |
| 65 | tab_rec->vaddr = current_dalvik_offset_; |
| 66 | int size = table[1]; |
Vladimir Marko | e39c54e | 2014-09-22 14:50:02 +0100 | [diff] [blame] | 67 | switch_tables_.push_back(tab_rec); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 68 | |
| 69 | // Get the switch value |
| 70 | rl_src = LoadValue(rl_src, kCoreReg); |
Mark Mendell | 67c39c4 | 2014-01-31 17:28:00 -0800 | [diff] [blame] | 71 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 72 | int low_key = s4FromSwitchData(&table[2]); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 73 | RegStorage keyReg; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 74 | // Remove the bias, if necessary |
| 75 | if (low_key == 0) { |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 76 | keyReg = rl_src.reg; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 77 | } else { |
| 78 | keyReg = AllocTemp(); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 79 | OpRegRegImm(kOpSub, keyReg, rl_src.reg, low_key); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 80 | } |
Mark Mendell | 27dee8b | 2014-12-01 19:06:12 -0500 | [diff] [blame] | 81 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 82 | // Bounds check - if < 0 or >= size continue following switch |
Serguei Katkov | 407a9d2 | 2014-07-05 03:09:32 +0700 | [diff] [blame] | 83 | OpRegImm(kOpCmp, keyReg, size - 1); |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 84 | LIR* branch_over = OpCondBranch(kCondHi, nullptr); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 85 | |
Mark Mendell | 27dee8b | 2014-12-01 19:06:12 -0500 | [diff] [blame] | 86 | RegStorage addr_for_jump; |
| 87 | if (cu_->target64) { |
| 88 | RegStorage table_base = AllocTempWide(); |
| 89 | // Load the address of the table into table_base. |
| 90 | LIR* lea = RawLIR(current_dalvik_offset_, kX86Lea64RM, table_base.GetReg(), kRIPReg, |
| 91 | 256, 0, WrapPointer(tab_rec)); |
| 92 | lea->flags.fixup = kFixupSwitchTable; |
| 93 | AppendLIR(lea); |
| 94 | |
| 95 | // Load the offset from the table out of the table. |
| 96 | addr_for_jump = AllocTempWide(); |
| 97 | NewLIR5(kX86MovsxdRA, addr_for_jump.GetReg(), table_base.GetReg(), keyReg.GetReg(), 2, 0); |
| 98 | |
| 99 | // Add the offset from the table to the table base. |
| 100 | OpRegReg(kOpAdd, addr_for_jump, table_base); |
Vladimir Marko | 1961b60 | 2015-04-08 20:51:48 +0100 | [diff] [blame] | 101 | tab_rec->anchor = nullptr; // Unused for x86-64. |
Mark Mendell | 27dee8b | 2014-12-01 19:06:12 -0500 | [diff] [blame] | 102 | } else { |
Vladimir Marko | 1961b60 | 2015-04-08 20:51:48 +0100 | [diff] [blame] | 103 | // Get the PC to a register and get the anchor. |
| 104 | LIR* anchor; |
| 105 | RegStorage r_pc = GetPcAndAnchor(&anchor); |
| 106 | |
Mark Mendell | 27dee8b | 2014-12-01 19:06:12 -0500 | [diff] [blame] | 107 | // Load the displacement from the switch table. |
| 108 | addr_for_jump = AllocTemp(); |
Vladimir Marko | 1961b60 | 2015-04-08 20:51:48 +0100 | [diff] [blame] | 109 | NewLIR5(kX86PcRelLoadRA, addr_for_jump.GetReg(), r_pc.GetReg(), keyReg.GetReg(), |
Mark Mendell | 27dee8b | 2014-12-01 19:06:12 -0500 | [diff] [blame] | 110 | 2, WrapPointer(tab_rec)); |
Vladimir Marko | 1961b60 | 2015-04-08 20:51:48 +0100 | [diff] [blame] | 111 | // Add displacement and r_pc to get the address. |
| 112 | OpRegReg(kOpAdd, addr_for_jump, r_pc); |
| 113 | tab_rec->anchor = anchor; |
Mark Mendell | 27dee8b | 2014-12-01 19:06:12 -0500 | [diff] [blame] | 114 | } |
| 115 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 116 | // ..and go! |
Vladimir Marko | 1961b60 | 2015-04-08 20:51:48 +0100 | [diff] [blame] | 117 | NewLIR1(kX86JmpR, addr_for_jump.GetReg()); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 118 | |
| 119 | /* branch_over target here */ |
| 120 | LIR* target = NewLIR0(kPseudoTargetLabel); |
| 121 | branch_over->target = target; |
| 122 | } |
| 123 | |
Brian Carlstrom | 2ce745c | 2013-07-17 17:44:30 -0700 | [diff] [blame] | 124 | void X86Mir2Lir::GenMoveException(RegLocation rl_dest) { |
buzbee | 33ae558 | 2014-06-12 14:56:32 -0700 | [diff] [blame] | 125 | int ex_offset = cu_->target64 ? |
Andreas Gampe | 2f244e9 | 2014-05-08 03:35:25 -0700 | [diff] [blame] | 126 | Thread::ExceptionOffset<8>().Int32Value() : |
| 127 | Thread::ExceptionOffset<4>().Int32Value(); |
buzbee | a0cd2d7 | 2014-06-01 09:33:49 -0700 | [diff] [blame] | 128 | RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); |
Serguei Katkov | 407a9d2 | 2014-07-05 03:09:32 +0700 | [diff] [blame] | 129 | NewLIR2(cu_->target64 ? kX86Mov64RT : kX86Mov32RT, rl_result.reg.GetReg(), ex_offset); |
| 130 | NewLIR2(cu_->target64 ? kX86Mov64TI : kX86Mov32TI, ex_offset, 0); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 131 | StoreValue(rl_dest, rl_result); |
| 132 | } |
| 133 | |
Vladimir Marko | bf535be | 2014-11-19 18:52:35 +0000 | [diff] [blame] | 134 | void X86Mir2Lir::UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) { |
Serguei Katkov | 407a9d2 | 2014-07-05 03:09:32 +0700 | [diff] [blame] | 135 | DCHECK_EQ(tgt_addr_reg.Is64Bit(), cu_->target64); |
Serguei Katkov | 407a9d2 | 2014-07-05 03:09:32 +0700 | [diff] [blame] | 136 | RegStorage reg_card_base = AllocTempRef(); |
| 137 | RegStorage reg_card_no = AllocTempRef(); |
buzbee | 33ae558 | 2014-06-12 14:56:32 -0700 | [diff] [blame] | 138 | int ct_offset = cu_->target64 ? |
Andreas Gampe | 2f244e9 | 2014-05-08 03:35:25 -0700 | [diff] [blame] | 139 | Thread::CardTableOffset<8>().Int32Value() : |
| 140 | Thread::CardTableOffset<4>().Int32Value(); |
Serguei Katkov | 407a9d2 | 2014-07-05 03:09:32 +0700 | [diff] [blame] | 141 | NewLIR2(cu_->target64 ? kX86Mov64RT : kX86Mov32RT, reg_card_base.GetReg(), ct_offset); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 142 | OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift); |
buzbee | 2700f7e | 2014-03-07 09:46:20 -0800 | [diff] [blame] | 143 | StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 144 | FreeTemp(reg_card_base); |
| 145 | FreeTemp(reg_card_no); |
| 146 | } |
| 147 | |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 148 | static dwarf::Reg DwarfCoreReg(bool is_x86_64, int num) { |
| 149 | return is_x86_64 ? dwarf::Reg::X86_64Core(num) : dwarf::Reg::X86Core(num); |
| 150 | } |
| 151 | |
Brian Carlstrom | 2ce745c | 2013-07-17 17:44:30 -0700 | [diff] [blame] | 152 | void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) { |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 153 | /* |
| 154 | * On entry, rX86_ARG0, rX86_ARG1, rX86_ARG2 are live. Let the register |
| 155 | * allocation mechanism know so it doesn't try to use any of them when |
| 156 | * expanding the frame or flushing. This leaves the utility |
| 157 | * code with no spare temps. |
| 158 | */ |
Ian Rogers | b28c1c0 | 2014-11-08 11:21:21 -0800 | [diff] [blame] | 159 | const RegStorage arg0 = TargetReg32(kArg0); |
| 160 | const RegStorage arg1 = TargetReg32(kArg1); |
| 161 | const RegStorage arg2 = TargetReg32(kArg2); |
| 162 | LockTemp(arg0); |
| 163 | LockTemp(arg1); |
| 164 | LockTemp(arg2); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 165 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 166 | /* |
| 167 | * We can safely skip the stack overflow check if we're |
| 168 | * a leaf *and* our frame size < fudge factor. |
| 169 | */ |
Ian Rogers | b28c1c0 | 2014-11-08 11:21:21 -0800 | [diff] [blame] | 170 | const InstructionSet isa = cu_->target64 ? kX86_64 : kX86; |
Dave Allison | 648d711 | 2014-07-25 16:15:27 -0700 | [diff] [blame] | 171 | bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, isa); |
Ian Rogers | b28c1c0 | 2014-11-08 11:21:21 -0800 | [diff] [blame] | 172 | const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32; |
Dave Allison | 69dfe51 | 2014-07-11 17:11:58 +0000 | [diff] [blame] | 173 | |
| 174 | // If we doing an implicit stack overflow check, perform the load immediately |
| 175 | // before the stack pointer is decremented and anything is saved. |
| 176 | if (!skip_overflow_check && |
| 177 | cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) { |
| 178 | // Implicit stack overflow check. |
| 179 | // test eax,[esp + -overflow] |
| 180 | int overflow = GetStackOverflowReservedBytes(isa); |
Ian Rogers | b28c1c0 | 2014-11-08 11:21:21 -0800 | [diff] [blame] | 181 | NewLIR3(kX86Test32RM, rs_rAX.GetReg(), rs_rSP.GetReg(), -overflow); |
Dave Allison | 69dfe51 | 2014-07-11 17:11:58 +0000 | [diff] [blame] | 182 | MarkPossibleStackOverflowException(); |
| 183 | } |
| 184 | |
| 185 | /* Build frame, return address already on stack */ |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 186 | cfi_.SetCurrentCFAOffset(GetInstructionSetPointerSize(cu_->instruction_set)); |
David Srbecky | 8c57831 | 2015-04-07 19:46:22 +0100 | [diff] [blame] | 187 | OpRegImm(kOpSub, rs_rSP, frame_size_ - GetInstructionSetPointerSize(cu_->instruction_set)); |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 188 | cfi_.DefCFAOffset(frame_size_); |
Dave Allison | 69dfe51 | 2014-07-11 17:11:58 +0000 | [diff] [blame] | 189 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 190 | /* Spill core callee saves */ |
| 191 | SpillCoreRegs(); |
Serguei Katkov | c380191 | 2014-07-08 17:21:53 +0700 | [diff] [blame] | 192 | SpillFPRegs(); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 193 | if (!skip_overflow_check) { |
Mathieu Chartier | 0d507d1 | 2014-03-19 10:17:28 -0700 | [diff] [blame] | 194 | class StackOverflowSlowPath : public LIRSlowPath { |
| 195 | public: |
| 196 | StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace) |
Vladimir Marko | 0b40ecf | 2015-03-20 12:08:03 +0000 | [diff] [blame] | 197 | : LIRSlowPath(m2l, branch), sp_displace_(sp_displace) { |
Mathieu Chartier | 0d507d1 | 2014-03-19 10:17:28 -0700 | [diff] [blame] | 198 | } |
| 199 | void Compile() OVERRIDE { |
| 200 | m2l_->ResetRegPool(); |
| 201 | m2l_->ResetDefTracking(); |
Mingyao Yang | 6ffcfa0 | 2014-04-25 11:06:00 -0700 | [diff] [blame] | 202 | GenerateTargetLabel(kPseudoThrowTarget); |
Ian Rogers | b28c1c0 | 2014-11-08 11:21:21 -0800 | [diff] [blame] | 203 | const RegStorage local_rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32; |
| 204 | m2l_->OpRegImm(kOpAdd, local_rs_rSP, sp_displace_); |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 205 | m2l_->cfi().AdjustCFAOffset(-sp_displace_); |
Mathieu Chartier | 0d507d1 | 2014-03-19 10:17:28 -0700 | [diff] [blame] | 206 | m2l_->ClobberCallerSave(); |
Mathieu Chartier | 0d507d1 | 2014-03-19 10:17:28 -0700 | [diff] [blame] | 207 | // Assumes codegen and target are in thumb2 mode. |
Andreas Gampe | 9843059 | 2014-07-27 19:44:50 -0700 | [diff] [blame] | 208 | m2l_->CallHelper(RegStorage::InvalidReg(), kQuickThrowStackOverflow, |
| 209 | false /* MarkSafepointPC */, false /* UseLink */); |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 210 | m2l_->cfi().AdjustCFAOffset(sp_displace_); |
Mathieu Chartier | 0d507d1 | 2014-03-19 10:17:28 -0700 | [diff] [blame] | 211 | } |
| 212 | |
| 213 | private: |
| 214 | const size_t sp_displace_; |
| 215 | }; |
Dave Allison | 69dfe51 | 2014-07-11 17:11:58 +0000 | [diff] [blame] | 216 | if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) { |
| 217 | // TODO: for large frames we should do something like: |
| 218 | // spill ebp |
| 219 | // lea ebp, [esp + frame_size] |
| 220 | // cmp ebp, fs:[stack_end_] |
| 221 | // jcc stack_overflow_exception |
| 222 | // mov esp, ebp |
| 223 | // in case a signal comes in that's not using an alternate signal stack and the large frame |
| 224 | // may have moved us outside of the reserved area at the end of the stack. |
| 225 | // cmp rs_rX86_SP, fs:[stack_end_]; jcc throw_slowpath |
| 226 | if (cu_->target64) { |
Ian Rogers | b28c1c0 | 2014-11-08 11:21:21 -0800 | [diff] [blame] | 227 | OpRegThreadMem(kOpCmp, rs_rX86_SP_64, Thread::StackEndOffset<8>()); |
Dave Allison | 69dfe51 | 2014-07-11 17:11:58 +0000 | [diff] [blame] | 228 | } else { |
Ian Rogers | b28c1c0 | 2014-11-08 11:21:21 -0800 | [diff] [blame] | 229 | OpRegThreadMem(kOpCmp, rs_rX86_SP_32, Thread::StackEndOffset<4>()); |
Dave Allison | 69dfe51 | 2014-07-11 17:11:58 +0000 | [diff] [blame] | 230 | } |
| 231 | LIR* branch = OpCondBranch(kCondUlt, nullptr); |
| 232 | AddSlowPath( |
Chao-ying Fu | e0ccdc0 | 2014-06-06 17:32:37 -0700 | [diff] [blame] | 233 | new(arena_)StackOverflowSlowPath(this, branch, |
| 234 | frame_size_ - |
| 235 | GetInstructionSetPointerSize(cu_->instruction_set))); |
Dave Allison | 69dfe51 | 2014-07-11 17:11:58 +0000 | [diff] [blame] | 236 | } |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 237 | } |
| 238 | |
| 239 | FlushIns(ArgLocs, rl_method); |
| 240 | |
Vladimir Marko | 1961b60 | 2015-04-08 20:51:48 +0100 | [diff] [blame] | 241 | // We can promote the PC of an anchor for PC-relative addressing to a register |
| 242 | // if it's used at least twice. Without investigating where we should lazily |
| 243 | // load the reference, we conveniently load it after flushing inputs. |
| 244 | if (pc_rel_base_reg_.Valid()) { |
| 245 | DCHECK(!cu_->target64); |
| 246 | setup_pc_rel_base_reg_ = OpLoadPc(pc_rel_base_reg_); |
Mark Mendell | 67c39c4 | 2014-01-31 17:28:00 -0800 | [diff] [blame] | 247 | } |
| 248 | |
Ian Rogers | b28c1c0 | 2014-11-08 11:21:21 -0800 | [diff] [blame] | 249 | FreeTemp(arg0); |
| 250 | FreeTemp(arg1); |
| 251 | FreeTemp(arg2); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 252 | } |
| 253 | |
| 254 | void X86Mir2Lir::GenExitSequence() { |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 255 | cfi_.RememberState(); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 256 | /* |
| 257 | * In the exit path, rX86_RET0/rX86_RET1 are live - make sure they aren't |
| 258 | * allocated by the register utilities as temps. |
| 259 | */ |
buzbee | 091cc40 | 2014-03-31 10:14:40 -0700 | [diff] [blame] | 260 | LockTemp(rs_rX86_RET0); |
| 261 | LockTemp(rs_rX86_RET1); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 262 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 263 | UnSpillCoreRegs(); |
Serguei Katkov | c380191 | 2014-07-08 17:21:53 +0700 | [diff] [blame] | 264 | UnSpillFPRegs(); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 265 | /* Remove frame except for return address */ |
Ian Rogers | b28c1c0 | 2014-11-08 11:21:21 -0800 | [diff] [blame] | 266 | const RegStorage rs_rSP = cu_->target64 ? rs_rX86_SP_64 : rs_rX86_SP_32; |
David Srbecky | 8c57831 | 2015-04-07 19:46:22 +0100 | [diff] [blame] | 267 | int adjust = frame_size_ - GetInstructionSetPointerSize(cu_->instruction_set); |
| 268 | OpRegImm(kOpAdd, rs_rSP, adjust); |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 269 | cfi_.AdjustCFAOffset(-adjust); |
| 270 | // There is only the return PC on the stack now. |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 271 | NewLIR0(kX86Ret); |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 272 | // The CFI should be restored for any code that follows the exit block. |
| 273 | cfi_.RestoreState(); |
| 274 | cfi_.DefCFAOffset(frame_size_); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 275 | } |
| 276 | |
Razvan A Lupusoru | 3bc0174 | 2014-02-06 13:18:43 -0800 | [diff] [blame] | 277 | void X86Mir2Lir::GenSpecialExitSequence() { |
| 278 | NewLIR0(kX86Ret); |
| 279 | } |
| 280 | |
Vladimir Marko | 6ce3eba | 2015-02-16 13:05:59 +0000 | [diff] [blame] | 281 | void X86Mir2Lir::GenSpecialEntryForSuspend() { |
| 282 | // Keep 16-byte stack alignment, there's already the return address, so |
| 283 | // - for 32-bit push EAX, i.e. ArtMethod*, ESI, EDI, |
| 284 | // - for 64-bit push RAX, i.e. ArtMethod*. |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 285 | const int kRegSize = cu_->target64 ? 8 : 4; |
| 286 | cfi_.SetCurrentCFAOffset(kRegSize); // Return address. |
Vladimir Marko | 6ce3eba | 2015-02-16 13:05:59 +0000 | [diff] [blame] | 287 | if (!cu_->target64) { |
| 288 | DCHECK(!IsTemp(rs_rSI)); |
| 289 | DCHECK(!IsTemp(rs_rDI)); |
| 290 | core_spill_mask_ = |
Vladimir Marko | d7a5e55 | 2015-02-20 14:53:53 +0000 | [diff] [blame] | 291 | (1u << rs_rDI.GetRegNum()) | (1u << rs_rSI.GetRegNum()) | (1u << rs_rRET.GetRegNum()); |
Vladimir Marko | 6ce3eba | 2015-02-16 13:05:59 +0000 | [diff] [blame] | 292 | num_core_spills_ = 3u; |
| 293 | } else { |
| 294 | core_spill_mask_ = (1u << rs_rRET.GetRegNum()); |
| 295 | num_core_spills_ = 1u; |
| 296 | } |
| 297 | fp_spill_mask_ = 0u; |
| 298 | num_fp_spills_ = 0u; |
| 299 | frame_size_ = 16u; |
| 300 | core_vmap_table_.clear(); |
| 301 | fp_vmap_table_.clear(); |
| 302 | if (!cu_->target64) { |
| 303 | NewLIR1(kX86Push32R, rs_rDI.GetReg()); |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 304 | cfi_.AdjustCFAOffset(kRegSize); |
| 305 | cfi_.RelOffset(DwarfCoreReg(cu_->target64, rs_rDI.GetRegNum()), 0); |
Vladimir Marko | 6ce3eba | 2015-02-16 13:05:59 +0000 | [diff] [blame] | 306 | NewLIR1(kX86Push32R, rs_rSI.GetReg()); |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 307 | cfi_.AdjustCFAOffset(kRegSize); |
| 308 | cfi_.RelOffset(DwarfCoreReg(cu_->target64, rs_rSI.GetRegNum()), 0); |
Vladimir Marko | 6ce3eba | 2015-02-16 13:05:59 +0000 | [diff] [blame] | 309 | } |
| 310 | NewLIR1(kX86Push32R, TargetReg(kArg0, kRef).GetReg()); // ArtMethod* |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 311 | cfi_.AdjustCFAOffset(kRegSize); |
| 312 | // Do not generate CFI for scratch register. |
Vladimir Marko | 6ce3eba | 2015-02-16 13:05:59 +0000 | [diff] [blame] | 313 | } |
| 314 | |
| 315 | void X86Mir2Lir::GenSpecialExitForSuspend() { |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 316 | const int kRegSize = cu_->target64 ? 8 : 4; |
Vladimir Marko | 6ce3eba | 2015-02-16 13:05:59 +0000 | [diff] [blame] | 317 | // Pop the frame. (ArtMethod* no longer needed but restore it anyway.) |
| 318 | NewLIR1(kX86Pop32R, TargetReg(kArg0, kRef).GetReg()); // ArtMethod* |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 319 | cfi_.AdjustCFAOffset(-kRegSize); |
Vladimir Marko | 6ce3eba | 2015-02-16 13:05:59 +0000 | [diff] [blame] | 320 | if (!cu_->target64) { |
| 321 | NewLIR1(kX86Pop32R, rs_rSI.GetReg()); |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 322 | cfi_.AdjustCFAOffset(-kRegSize); |
| 323 | cfi_.Restore(DwarfCoreReg(cu_->target64, rs_rSI.GetRegNum())); |
Vladimir Marko | 6ce3eba | 2015-02-16 13:05:59 +0000 | [diff] [blame] | 324 | NewLIR1(kX86Pop32R, rs_rDI.GetReg()); |
David Srbecky | 1109fb3 | 2015-04-07 20:21:06 +0100 | [diff] [blame] | 325 | cfi_.AdjustCFAOffset(-kRegSize); |
| 326 | cfi_.Restore(DwarfCoreReg(cu_->target64, rs_rDI.GetRegNum())); |
Vladimir Marko | 6ce3eba | 2015-02-16 13:05:59 +0000 | [diff] [blame] | 327 | } |
| 328 | } |
| 329 | |
Dave Allison | 69dfe51 | 2014-07-11 17:11:58 +0000 | [diff] [blame] | 330 | void X86Mir2Lir::GenImplicitNullCheck(RegStorage reg, int opt_flags) { |
| 331 | if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) { |
| 332 | return; |
| 333 | } |
| 334 | // Implicit null pointer check. |
| 335 | // test eax,[arg1+0] |
| 336 | NewLIR3(kX86Test32RM, rs_rAX.GetReg(), reg.GetReg(), 0); |
| 337 | MarkPossibleNullPointerException(opt_flags); |
| 338 | } |
| 339 | |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 340 | /* |
| 341 | * Bit of a hack here - in the absence of a real scheduling pass, |
| 342 | * emit the next instruction in static & direct invoke sequences. |
| 343 | */ |
Vladimir Marko | dc56cc5 | 2015-03-27 18:18:36 +0000 | [diff] [blame] | 344 | int X86Mir2Lir::X86NextSDCallInsn(CompilationUnit* cu, CallInfo* info, |
| 345 | int state, const MethodReference& target_method, |
| 346 | uint32_t, |
Jeff Hao | 848f70a | 2014-01-15 13:49:50 -0800 | [diff] [blame] | 347 | uintptr_t direct_code ATTRIBUTE_UNUSED, uintptr_t direct_method, |
Vladimir Marko | dc56cc5 | 2015-03-27 18:18:36 +0000 | [diff] [blame] | 348 | InvokeType type) { |
Vladimir Marko | dc56cc5 | 2015-03-27 18:18:36 +0000 | [diff] [blame] | 349 | X86Mir2Lir* cg = static_cast<X86Mir2Lir*>(cu->cg.get()); |
Jeff Hao | 848f70a | 2014-01-15 13:49:50 -0800 | [diff] [blame] | 350 | if (info->string_init_offset != 0) { |
| 351 | RegStorage arg0_ref = cg->TargetReg(kArg0, kRef); |
| 352 | switch (state) { |
| 353 | case 0: { // Grab target method* from thread pointer |
| 354 | cg->NewLIR2(kX86Mov32RT, arg0_ref.GetReg(), info->string_init_offset); |
| 355 | break; |
| 356 | } |
| 357 | default: |
| 358 | return -1; |
| 359 | } |
| 360 | } else if (direct_method != 0) { |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 361 | switch (state) { |
| 362 | case 0: // Get the current Method* [sets kArg0] |
| 363 | if (direct_method != static_cast<uintptr_t>(-1)) { |
Mathieu Chartier | 921d6eb | 2015-03-13 16:32:44 -0700 | [diff] [blame] | 364 | auto target_reg = cg->TargetReg(kArg0, kRef); |
| 365 | if (target_reg.Is64Bit()) { |
| 366 | cg->LoadConstantWide(target_reg, direct_method); |
| 367 | } else { |
| 368 | cg->LoadConstant(target_reg, direct_method); |
| 369 | } |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 370 | } else { |
| 371 | cg->LoadMethodAddress(target_method, type, kArg0); |
| 372 | } |
| 373 | break; |
| 374 | default: |
| 375 | return -1; |
| 376 | } |
Vladimir Marko | dc56cc5 | 2015-03-27 18:18:36 +0000 | [diff] [blame] | 377 | } else if (cg->CanUseOpPcRelDexCacheArrayLoad()) { |
| 378 | switch (state) { |
| 379 | case 0: { |
| 380 | CHECK_EQ(cu->dex_file, target_method.dex_file); |
| 381 | size_t offset = cg->dex_cache_arrays_layout_.MethodOffset(target_method.dex_method_index); |
| 382 | cg->OpPcRelDexCacheArrayLoad(cu->dex_file, offset, cg->TargetReg(kArg0, kRef)); |
| 383 | break; |
| 384 | } |
| 385 | default: |
| 386 | return -1; |
| 387 | } |
Vladimir Marko | f4da675 | 2014-08-01 19:04:18 +0100 | [diff] [blame] | 388 | } else { |
| 389 | RegStorage arg0_ref = cg->TargetReg(kArg0, kRef); |
| 390 | switch (state) { |
| 391 | case 0: // Get the current Method* [sets kArg0] |
| 392 | // TUNING: we can save a reg copy if Method* has been promoted. |
| 393 | cg->LoadCurrMethodDirect(arg0_ref); |
| 394 | break; |
| 395 | case 1: // Get method->dex_cache_resolved_methods_ |
| 396 | cg->LoadRefDisp(arg0_ref, |
| 397 | mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(), |
| 398 | arg0_ref, |
| 399 | kNotVolatile); |
| 400 | break; |
| 401 | case 2: // Grab target method* |
| 402 | CHECK_EQ(cu->dex_file, target_method.dex_file); |
| 403 | cg->LoadRefDisp(arg0_ref, |
| 404 | mirror::ObjectArray<mirror::Object>::OffsetOfElement( |
| 405 | target_method.dex_method_index).Int32Value(), |
| 406 | arg0_ref, |
| 407 | kNotVolatile); |
| 408 | break; |
| 409 | default: |
| 410 | return -1; |
| 411 | } |
| 412 | } |
| 413 | return state + 1; |
| 414 | } |
| 415 | |
| 416 | NextCallInsn X86Mir2Lir::GetNextSDCallInsn() { |
| 417 | return X86NextSDCallInsn; |
| 418 | } |
| 419 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 420 | } // namespace art |