blob: 482c430e88f530ae25fa78267595cadd0690d6b0 [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This file contains codegen for the X86 ISA */
18
19#include "codegen_x86.h"
20#include "dex/quick/mir_to_lir-inl.h"
Ian Rogers576ca0c2014-06-06 15:58:22 -070021#include "gc/accounting/card_table.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070022#include "x86_lir.h"
23
24namespace art {
25
Brian Carlstrom7940e442013-07-12 13:46:57 -070026/*
27 * The sparse table in the literal pool is an array of <key,displacement>
28 * pairs.
29 */
Andreas Gampe48971b32014-08-06 10:09:01 -070030void X86Mir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
Razvan A Lupusoru8d0d03e2014-06-06 17:04:52 -070031 const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
Brian Carlstrom7940e442013-07-12 13:46:57 -070032 if (cu_->verbose) {
33 DumpSparseSwitchTable(table);
34 }
35 int entries = table[1];
buzbee0d829482013-10-11 15:24:55 -070036 const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]);
37 const int32_t* targets = &keys[entries];
Brian Carlstrom7940e442013-07-12 13:46:57 -070038 rl_src = LoadValue(rl_src, kCoreReg);
39 for (int i = 0; i < entries; i++) {
40 int key = keys[i];
41 BasicBlock* case_block =
42 mir_graph_->FindBlock(current_dalvik_offset_ + targets[i]);
buzbee2700f7e2014-03-07 09:46:20 -080043 OpCmpImmBranch(kCondEq, rl_src.reg, key, &block_label_list_[case_block->id]);
Brian Carlstrom7940e442013-07-12 13:46:57 -070044 }
45}
46
47/*
48 * Code pattern will look something like:
49 *
50 * mov r_val, ..
51 * call 0
52 * pop r_start_of_method
53 * sub r_start_of_method, ..
54 * mov r_key_reg, r_val
55 * sub r_key_reg, low_key
56 * cmp r_key_reg, size-1 ; bound check
57 * ja done
58 * mov r_disp, [r_start_of_method + r_key_reg * 4 + table_offset]
59 * add r_start_of_method, r_disp
60 * jmp r_start_of_method
61 * done:
62 */
Andreas Gampe48971b32014-08-06 10:09:01 -070063void X86Mir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
Razvan A Lupusoru8d0d03e2014-06-06 17:04:52 -070064 const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
Brian Carlstrom7940e442013-07-12 13:46:57 -070065 if (cu_->verbose) {
66 DumpPackedSwitchTable(table);
67 }
68 // Add the table to the list - we'll process it later
buzbee0d829482013-10-11 15:24:55 -070069 SwitchTable* tab_rec =
Vladimir Marko83cc7ae2014-02-12 18:02:05 +000070 static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
Brian Carlstrom7940e442013-07-12 13:46:57 -070071 tab_rec->table = table;
72 tab_rec->vaddr = current_dalvik_offset_;
73 int size = table[1];
Mathieu Chartierf6c4b3b2013-08-24 16:11:37 -070074 tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*),
Vladimir Marko83cc7ae2014-02-12 18:02:05 +000075 kArenaAllocLIR));
Brian Carlstrom7940e442013-07-12 13:46:57 -070076 switch_tables_.Insert(tab_rec);
77
78 // Get the switch value
79 rl_src = LoadValue(rl_src, kCoreReg);
Brian Carlstrom7934ac22013-07-26 10:54:15 -070080 // NewLIR0(kX86Bkpt);
Mark Mendell67c39c42014-01-31 17:28:00 -080081
82 // Materialize a pointer to the switch table
buzbee2700f7e2014-03-07 09:46:20 -080083 RegStorage start_of_method_reg;
Mark Mendell67c39c42014-01-31 17:28:00 -080084 if (base_of_code_ != nullptr) {
85 // We can use the saved value.
86 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
Chao-ying Fue0ccdc02014-06-06 17:32:37 -070087 if (rl_method.wide) {
88 rl_method = LoadValueWide(rl_method, kCoreReg);
89 } else {
90 rl_method = LoadValue(rl_method, kCoreReg);
91 }
buzbee2700f7e2014-03-07 09:46:20 -080092 start_of_method_reg = rl_method.reg;
Mark Mendell55d0eac2014-02-06 11:02:52 -080093 store_method_addr_used_ = true;
Mark Mendell67c39c42014-01-31 17:28:00 -080094 } else {
Serguei Katkov407a9d22014-07-05 03:09:32 +070095 start_of_method_reg = AllocTempRef();
buzbee2700f7e2014-03-07 09:46:20 -080096 NewLIR1(kX86StartOfMethod, start_of_method_reg.GetReg());
Mark Mendell67c39c42014-01-31 17:28:00 -080097 }
Serguei Katkov407a9d22014-07-05 03:09:32 +070098 DCHECK_EQ(start_of_method_reg.Is64Bit(), cu_->target64);
Brian Carlstrom7940e442013-07-12 13:46:57 -070099 int low_key = s4FromSwitchData(&table[2]);
buzbee2700f7e2014-03-07 09:46:20 -0800100 RegStorage keyReg;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700101 // Remove the bias, if necessary
102 if (low_key == 0) {
buzbee2700f7e2014-03-07 09:46:20 -0800103 keyReg = rl_src.reg;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700104 } else {
105 keyReg = AllocTemp();
buzbee2700f7e2014-03-07 09:46:20 -0800106 OpRegRegImm(kOpSub, keyReg, rl_src.reg, low_key);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700107 }
108 // Bounds check - if < 0 or >= size continue following switch
Serguei Katkov407a9d22014-07-05 03:09:32 +0700109 OpRegImm(kOpCmp, keyReg, size - 1);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700110 LIR* branch_over = OpCondBranch(kCondHi, NULL);
111
112 // Load the displacement from the switch table
buzbee2700f7e2014-03-07 09:46:20 -0800113 RegStorage disp_reg = AllocTemp();
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700114 NewLIR5(kX86PcRelLoadRA, disp_reg.GetReg(), start_of_method_reg.GetReg(), keyReg.GetReg(),
115 2, WrapPointer(tab_rec));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700116 // Add displacement to start of method
Serguei Katkov407a9d22014-07-05 03:09:32 +0700117 OpRegReg(kOpAdd, start_of_method_reg, cu_->target64 ? As64BitReg(disp_reg) : disp_reg);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700118 // ..and go!
buzbee2700f7e2014-03-07 09:46:20 -0800119 LIR* switch_branch = NewLIR1(kX86JmpR, start_of_method_reg.GetReg());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700120 tab_rec->anchor = switch_branch;
121
122 /* branch_over target here */
123 LIR* target = NewLIR0(kPseudoTargetLabel);
124 branch_over->target = target;
125}
126
127/*
128 * Array data table format:
129 * ushort ident = 0x0300 magic value
130 * ushort width width of each element in the table
131 * uint size number of elements in the table
132 * ubyte data[size*width] table of data values (may contain a single-byte
133 * padding at the end)
134 *
135 * Total size is 4+(width * size + 1)/2 16-bit code units.
136 */
Razvan A Lupusoru8d0d03e2014-06-06 17:04:52 -0700137void X86Mir2Lir::GenFillArrayData(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
138 const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700139 // Add the table to the list - we'll process it later
buzbee0d829482013-10-11 15:24:55 -0700140 FillArrayData* tab_rec =
Vladimir Marko83cc7ae2014-02-12 18:02:05 +0000141 static_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData), kArenaAllocData));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700142 tab_rec->table = table;
143 tab_rec->vaddr = current_dalvik_offset_;
144 uint16_t width = tab_rec->table[1];
145 uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
146 tab_rec->size = (size * width) + 8;
147
148 fill_array_data_.Insert(tab_rec);
149
150 // Making a call - use explicit registers
151 FlushAllRegs(); /* Everything to home location */
Andreas Gampeccc60262014-07-04 18:02:38 -0700152 RegStorage array_ptr = TargetReg(kArg0, kRef);
Chao-ying Fua77ee512014-07-01 17:43:41 -0700153 RegStorage payload = TargetPtrReg(kArg1);
154 RegStorage method_start = TargetPtrReg(kArg2);
155
156 LoadValueDirectFixed(rl_src, array_ptr);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700157 // Materialize a pointer to the fill data image
Mark Mendell67c39c42014-01-31 17:28:00 -0800158 if (base_of_code_ != nullptr) {
159 // We can use the saved value.
160 RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700161 if (rl_method.wide) {
Chao-ying Fua77ee512014-07-01 17:43:41 -0700162 LoadValueDirectWide(rl_method, method_start);
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700163 } else {
Chao-ying Fua77ee512014-07-01 17:43:41 -0700164 LoadValueDirect(rl_method, method_start);
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700165 }
Mark Mendell55d0eac2014-02-06 11:02:52 -0800166 store_method_addr_used_ = true;
Mark Mendell67c39c42014-01-31 17:28:00 -0800167 } else {
Chao-ying Fua77ee512014-07-01 17:43:41 -0700168 NewLIR1(kX86StartOfMethod, method_start.GetReg());
Mark Mendell67c39c42014-01-31 17:28:00 -0800169 }
Chao-ying Fua77ee512014-07-01 17:43:41 -0700170 NewLIR2(kX86PcRelAdr, payload.GetReg(), WrapPointer(tab_rec));
171 OpRegReg(kOpAdd, payload, method_start);
Andreas Gampe98430592014-07-27 19:44:50 -0700172 CallRuntimeHelperRegReg(kQuickHandleFillArrayData, array_ptr, payload, true);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700173}
174
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700175void X86Mir2Lir::GenMoveException(RegLocation rl_dest) {
buzbee33ae5582014-06-12 14:56:32 -0700176 int ex_offset = cu_->target64 ?
Andreas Gampe2f244e92014-05-08 03:35:25 -0700177 Thread::ExceptionOffset<8>().Int32Value() :
178 Thread::ExceptionOffset<4>().Int32Value();
buzbeea0cd2d72014-06-01 09:33:49 -0700179 RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
Serguei Katkov407a9d22014-07-05 03:09:32 +0700180 NewLIR2(cu_->target64 ? kX86Mov64RT : kX86Mov32RT, rl_result.reg.GetReg(), ex_offset);
181 NewLIR2(cu_->target64 ? kX86Mov64TI : kX86Mov32TI, ex_offset, 0);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700182 StoreValue(rl_dest, rl_result);
183}
184
185/*
186 * Mark garbage collection card. Skip if the value we're storing is null.
187 */
buzbee2700f7e2014-03-07 09:46:20 -0800188void X86Mir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) {
Serguei Katkov407a9d22014-07-05 03:09:32 +0700189 DCHECK_EQ(tgt_addr_reg.Is64Bit(), cu_->target64);
190 DCHECK_EQ(val_reg.Is64Bit(), cu_->target64);
191 RegStorage reg_card_base = AllocTempRef();
192 RegStorage reg_card_no = AllocTempRef();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700193 LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
buzbee33ae5582014-06-12 14:56:32 -0700194 int ct_offset = cu_->target64 ?
Andreas Gampe2f244e92014-05-08 03:35:25 -0700195 Thread::CardTableOffset<8>().Int32Value() :
196 Thread::CardTableOffset<4>().Int32Value();
Serguei Katkov407a9d22014-07-05 03:09:32 +0700197 NewLIR2(cu_->target64 ? kX86Mov64RT : kX86Mov32RT, reg_card_base.GetReg(), ct_offset);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700198 OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
buzbee2700f7e2014-03-07 09:46:20 -0800199 StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700200 LIR* target = NewLIR0(kPseudoTargetLabel);
201 branch_over->target = target;
202 FreeTemp(reg_card_base);
203 FreeTemp(reg_card_no);
204}
205
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700206void X86Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700207 /*
208 * On entry, rX86_ARG0, rX86_ARG1, rX86_ARG2 are live. Let the register
209 * allocation mechanism know so it doesn't try to use any of them when
210 * expanding the frame or flushing. This leaves the utility
211 * code with no spare temps.
212 */
buzbee091cc402014-03-31 10:14:40 -0700213 LockTemp(rs_rX86_ARG0);
214 LockTemp(rs_rX86_ARG1);
215 LockTemp(rs_rX86_ARG2);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700216
Brian Carlstrom7940e442013-07-12 13:46:57 -0700217 /*
218 * We can safely skip the stack overflow check if we're
219 * a leaf *and* our frame size < fudge factor.
220 */
Dave Allison69dfe512014-07-11 17:11:58 +0000221 InstructionSet isa = cu_->target64 ? kX86_64 : kX86;
Dave Allison648d7112014-07-25 16:15:27 -0700222 bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, isa);
Dave Allison69dfe512014-07-11 17:11:58 +0000223
224 // If we doing an implicit stack overflow check, perform the load immediately
225 // before the stack pointer is decremented and anything is saved.
226 if (!skip_overflow_check &&
227 cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
228 // Implicit stack overflow check.
229 // test eax,[esp + -overflow]
230 int overflow = GetStackOverflowReservedBytes(isa);
231 NewLIR3(kX86Test32RM, rs_rAX.GetReg(), rs_rX86_SP.GetReg(), -overflow);
232 MarkPossibleStackOverflowException();
233 }
234
235 /* Build frame, return address already on stack */
236 stack_decrement_ = OpRegImm(kOpSub, rs_rX86_SP, frame_size_ -
237 GetInstructionSetPointerSize(cu_->instruction_set));
238
Brian Carlstrom7940e442013-07-12 13:46:57 -0700239 NewLIR0(kPseudoMethodEntry);
240 /* Spill core callee saves */
241 SpillCoreRegs();
Serguei Katkovc3801912014-07-08 17:21:53 +0700242 SpillFPRegs();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700243 if (!skip_overflow_check) {
Mathieu Chartier0d507d12014-03-19 10:17:28 -0700244 class StackOverflowSlowPath : public LIRSlowPath {
245 public:
246 StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace)
247 : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, nullptr), sp_displace_(sp_displace) {
248 }
249 void Compile() OVERRIDE {
250 m2l_->ResetRegPool();
251 m2l_->ResetDefTracking();
Mingyao Yang6ffcfa02014-04-25 11:06:00 -0700252 GenerateTargetLabel(kPseudoThrowTarget);
buzbee2700f7e2014-03-07 09:46:20 -0800253 m2l_->OpRegImm(kOpAdd, rs_rX86_SP, sp_displace_);
Mathieu Chartier0d507d12014-03-19 10:17:28 -0700254 m2l_->ClobberCallerSave();
Mathieu Chartier0d507d12014-03-19 10:17:28 -0700255 // Assumes codegen and target are in thumb2 mode.
Andreas Gampe98430592014-07-27 19:44:50 -0700256 m2l_->CallHelper(RegStorage::InvalidReg(), kQuickThrowStackOverflow,
257 false /* MarkSafepointPC */, false /* UseLink */);
Mathieu Chartier0d507d12014-03-19 10:17:28 -0700258 }
259
260 private:
261 const size_t sp_displace_;
262 };
Dave Allison69dfe512014-07-11 17:11:58 +0000263 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
264 // TODO: for large frames we should do something like:
265 // spill ebp
266 // lea ebp, [esp + frame_size]
267 // cmp ebp, fs:[stack_end_]
268 // jcc stack_overflow_exception
269 // mov esp, ebp
270 // in case a signal comes in that's not using an alternate signal stack and the large frame
271 // may have moved us outside of the reserved area at the end of the stack.
272 // cmp rs_rX86_SP, fs:[stack_end_]; jcc throw_slowpath
273 if (cu_->target64) {
274 OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<8>());
275 } else {
276 OpRegThreadMem(kOpCmp, rs_rX86_SP, Thread::StackEndOffset<4>());
277 }
278 LIR* branch = OpCondBranch(kCondUlt, nullptr);
279 AddSlowPath(
Chao-ying Fue0ccdc02014-06-06 17:32:37 -0700280 new(arena_)StackOverflowSlowPath(this, branch,
281 frame_size_ -
282 GetInstructionSetPointerSize(cu_->instruction_set)));
Dave Allison69dfe512014-07-11 17:11:58 +0000283 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700284 }
285
286 FlushIns(ArgLocs, rl_method);
287
Mark Mendell67c39c42014-01-31 17:28:00 -0800288 if (base_of_code_ != nullptr) {
Chao-ying Fua77ee512014-07-01 17:43:41 -0700289 RegStorage method_start = TargetPtrReg(kArg0);
Mark Mendell67c39c42014-01-31 17:28:00 -0800290 // We have been asked to save the address of the method start for later use.
Chao-ying Fua77ee512014-07-01 17:43:41 -0700291 setup_method_address_[0] = NewLIR1(kX86StartOfMethod, method_start.GetReg());
Mark Mendell67c39c42014-01-31 17:28:00 -0800292 int displacement = SRegOffset(base_of_code_->s_reg_low);
buzbee695d13a2014-04-19 13:32:20 -0700293 // Native pointer - must be natural word size.
Chao-ying Fua77ee512014-07-01 17:43:41 -0700294 setup_method_address_[1] = StoreBaseDisp(rs_rX86_SP, displacement, method_start,
Elena Sayapinadd644502014-07-01 18:39:52 +0700295 cu_->target64 ? k64 : k32, kNotVolatile);
Mark Mendell67c39c42014-01-31 17:28:00 -0800296 }
297
buzbee091cc402014-03-31 10:14:40 -0700298 FreeTemp(rs_rX86_ARG0);
299 FreeTemp(rs_rX86_ARG1);
300 FreeTemp(rs_rX86_ARG2);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700301}
302
303void X86Mir2Lir::GenExitSequence() {
304 /*
305 * In the exit path, rX86_RET0/rX86_RET1 are live - make sure they aren't
306 * allocated by the register utilities as temps.
307 */
buzbee091cc402014-03-31 10:14:40 -0700308 LockTemp(rs_rX86_RET0);
309 LockTemp(rs_rX86_RET1);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700310
311 NewLIR0(kPseudoMethodExit);
312 UnSpillCoreRegs();
Serguei Katkovc3801912014-07-08 17:21:53 +0700313 UnSpillFPRegs();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700314 /* Remove frame except for return address */
Dmitry Petrochenko9ee801f2014-05-12 11:31:37 +0700315 stack_increment_ = OpRegImm(kOpAdd, rs_rX86_SP, frame_size_ - GetInstructionSetPointerSize(cu_->instruction_set));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700316 NewLIR0(kX86Ret);
317}
318
Razvan A Lupusoru3bc01742014-02-06 13:18:43 -0800319void X86Mir2Lir::GenSpecialExitSequence() {
320 NewLIR0(kX86Ret);
321}
322
Dave Allison69dfe512014-07-11 17:11:58 +0000323void X86Mir2Lir::GenImplicitNullCheck(RegStorage reg, int opt_flags) {
324 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
325 return;
326 }
327 // Implicit null pointer check.
328 // test eax,[arg1+0]
329 NewLIR3(kX86Test32RM, rs_rAX.GetReg(), reg.GetReg(), 0);
330 MarkPossibleNullPointerException(opt_flags);
331}
332
Brian Carlstrom7940e442013-07-12 13:46:57 -0700333} // namespace art