blob: d946ee39efd7be868a5051a38983da7a09731dd3 [file] [log] [blame]
Matteo Franchin43ec8732014-03-31 15:00:14 +01001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This file contains codegen for the Thumb2 ISA. */
18
19#include "arm64_lir.h"
20#include "codegen_arm64.h"
21#include "dex/quick/mir_to_lir-inl.h"
Ian Rogers576ca0c2014-06-06 15:58:22 -070022#include "gc/accounting/card_table.h"
Matteo Franchin43ec8732014-03-31 15:00:14 +010023#include "entrypoints/quick/quick_entrypoints.h"
24
25namespace art {
26
27/*
28 * The sparse table in the literal pool is an array of <key,displacement>
Matteo Franchine45fb9e2014-05-06 10:10:30 +010029 * pairs. For each set, we'll load them as a pair using ldp.
Matteo Franchin43ec8732014-03-31 15:00:14 +010030 * The test loop will look something like:
31 *
32 * adr r_base, <table>
Matteo Franchine45fb9e2014-05-06 10:10:30 +010033 * ldr r_val, [rA64_SP, v_reg_off]
Matteo Franchin43ec8732014-03-31 15:00:14 +010034 * mov r_idx, #table_size
Matteo Franchine45fb9e2014-05-06 10:10:30 +010035 * loop:
36 * cbz r_idx, quit
37 * ldp r_key, r_disp, [r_base], #8
Matteo Franchin43ec8732014-03-31 15:00:14 +010038 * sub r_idx, #1
39 * cmp r_val, r_key
Matteo Franchine45fb9e2014-05-06 10:10:30 +010040 * b.ne loop
41 * adr r_base, #0 ; This is the instruction from which we compute displacements
42 * add r_base, r_disp
43 * br r_base
44 * quit:
Matteo Franchin43ec8732014-03-31 15:00:14 +010045 */
46void Arm64Mir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset,
Matteo Franchine45fb9e2014-05-06 10:10:30 +010047 RegLocation rl_src) {
Matteo Franchin43ec8732014-03-31 15:00:14 +010048 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
49 if (cu_->verbose) {
50 DumpSparseSwitchTable(table);
51 }
52 // Add the table to the list - we'll process it later
53 SwitchTable *tab_rec =
54 static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
55 tab_rec->table = table;
56 tab_rec->vaddr = current_dalvik_offset_;
57 uint32_t size = table[1];
58 tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR));
59 switch_tables_.Insert(tab_rec);
60
61 // Get the switch value
62 rl_src = LoadValue(rl_src, kCoreReg);
Matteo Franchin5acc8b02014-06-05 15:10:35 +010063 RegStorage r_base = AllocTempWide();
Matteo Franchine45fb9e2014-05-06 10:10:30 +010064 // Allocate key and disp temps.
Matteo Franchin43ec8732014-03-31 15:00:14 +010065 RegStorage r_key = AllocTemp();
66 RegStorage r_disp = AllocTemp();
Matteo Franchin43ec8732014-03-31 15:00:14 +010067 // Materialize a pointer to the switch table
Matteo Franchine45fb9e2014-05-06 10:10:30 +010068 NewLIR3(kA64Adr2xd, r_base.GetReg(), 0, WrapPointer(tab_rec));
Matteo Franchin43ec8732014-03-31 15:00:14 +010069 // Set up r_idx
70 RegStorage r_idx = AllocTemp();
71 LoadConstant(r_idx, size);
Matteo Franchine45fb9e2014-05-06 10:10:30 +010072
73 // Entry of loop.
74 LIR* loop_entry = NewLIR0(kPseudoTargetLabel);
75 LIR* branch_out = NewLIR2(kA64Cbz2rt, r_idx.GetReg(), 0);
76
77 // Load next key/disp.
78 NewLIR4(kA64LdpPost4rrXD, r_key.GetReg(), r_disp.GetReg(), r_base.GetReg(), 2);
79 OpRegRegImm(kOpSub, r_idx, r_idx, 1);
80
81 // Go to next case, if key does not match.
Matteo Franchin43ec8732014-03-31 15:00:14 +010082 OpRegReg(kOpCmp, r_key, rl_src.reg);
Matteo Franchine45fb9e2014-05-06 10:10:30 +010083 OpCondBranch(kCondNe, loop_entry);
84
85 // Key does match: branch to case label.
86 LIR* switch_label = NewLIR3(kA64Adr2xd, r_base.GetReg(), 0, -1);
87 tab_rec->anchor = switch_label;
88
89 // Add displacement to base branch address and go!
Andreas Gampe47b31aa2014-06-19 01:10:07 -070090 OpRegRegRegExtend(kOpAdd, r_base, r_base, As64BitReg(r_disp), kA64Sxtw, 0U);
Matteo Franchine45fb9e2014-05-06 10:10:30 +010091 NewLIR1(kA64Br1x, r_base.GetReg());
92
93 // Loop exit label.
94 LIR* loop_exit = NewLIR0(kPseudoTargetLabel);
95 branch_out->target = loop_exit;
Matteo Franchin43ec8732014-03-31 15:00:14 +010096}
97
98
99void Arm64Mir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset,
Matteo Franchin5acc8b02014-06-05 15:10:35 +0100100 RegLocation rl_src) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100101 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
102 if (cu_->verbose) {
103 DumpPackedSwitchTable(table);
104 }
105 // Add the table to the list - we'll process it later
106 SwitchTable *tab_rec =
107 static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
108 tab_rec->table = table;
109 tab_rec->vaddr = current_dalvik_offset_;
110 uint32_t size = table[1];
111 tab_rec->targets =
112 static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR));
113 switch_tables_.Insert(tab_rec);
114
115 // Get the switch value
116 rl_src = LoadValue(rl_src, kCoreReg);
Matteo Franchin5acc8b02014-06-05 15:10:35 +0100117 RegStorage table_base = AllocTempWide();
Matteo Franchin43ec8732014-03-31 15:00:14 +0100118 // Materialize a pointer to the switch table
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100119 NewLIR3(kA64Adr2xd, table_base.GetReg(), 0, WrapPointer(tab_rec));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100120 int low_key = s4FromSwitchData(&table[2]);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100121 RegStorage key_reg;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100122 // Remove the bias, if necessary
123 if (low_key == 0) {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100124 key_reg = rl_src.reg;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100125 } else {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100126 key_reg = AllocTemp();
127 OpRegRegImm(kOpSub, key_reg, rl_src.reg, low_key);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100128 }
129 // Bounds check - if < 0 or >= size continue following switch
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100130 OpRegImm(kOpCmp, key_reg, size - 1);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100131 LIR* branch_over = OpCondBranch(kCondHi, NULL);
132
133 // Load the displacement from the switch table
134 RegStorage disp_reg = AllocTemp();
Andreas Gampe4b537a82014-06-30 22:24:53 -0700135 LoadBaseIndexed(table_base, As64BitReg(key_reg), disp_reg, 2, k32);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100136
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100137 // Get base branch address.
Matteo Franchin5acc8b02014-06-05 15:10:35 +0100138 RegStorage branch_reg = AllocTempWide();
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100139 LIR* switch_label = NewLIR3(kA64Adr2xd, branch_reg.GetReg(), 0, -1);
140 tab_rec->anchor = switch_label;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100141
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100142 // Add displacement to base branch address and go!
Andreas Gampe47b31aa2014-06-19 01:10:07 -0700143 OpRegRegRegExtend(kOpAdd, branch_reg, branch_reg, As64BitReg(disp_reg), kA64Sxtw, 0U);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100144 NewLIR1(kA64Br1x, branch_reg.GetReg());
145
146 // branch_over target here
Matteo Franchin43ec8732014-03-31 15:00:14 +0100147 LIR* target = NewLIR0(kPseudoTargetLabel);
148 branch_over->target = target;
149}
150
151/*
152 * Array data table format:
153 * ushort ident = 0x0300 magic value
154 * ushort width width of each element in the table
155 * uint size number of elements in the table
156 * ubyte data[size*width] table of data values (may contain a single-byte
157 * padding at the end)
158 *
159 * Total size is 4+(width * size + 1)/2 16-bit code units.
160 */
161void Arm64Mir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) {
162 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
163 // Add the table to the list - we'll process it later
164 FillArrayData *tab_rec =
165 static_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData), kArenaAllocData));
166 tab_rec->table = table;
167 tab_rec->vaddr = current_dalvik_offset_;
168 uint16_t width = tab_rec->table[1];
169 uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
170 tab_rec->size = (size * width) + 8;
171
172 fill_array_data_.Insert(tab_rec);
173
174 // Making a call - use explicit registers
175 FlushAllRegs(); /* Everything to home location */
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100176 LoadValueDirectFixed(rl_src, rs_x0);
Zheng Xubaa7c882014-06-30 14:26:50 +0800177 LoadWordDisp(rs_xSELF, QUICK_ENTRYPOINT_OFFSET(8, pHandleFillArrayData).Int32Value(),
178 rs_xLR);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100179 // Materialize a pointer to the fill data image
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100180 NewLIR3(kA64Adr2xd, rx1, 0, WrapPointer(tab_rec));
Matteo Franchin43ec8732014-03-31 15:00:14 +0100181 ClobberCallerSave();
Zheng Xubaa7c882014-06-30 14:26:50 +0800182 LIR* call_inst = OpReg(kOpBlx, rs_xLR);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100183 MarkSafepointPC(call_inst);
184}
185
186/*
187 * Handle unlocked -> thin locked transition inline or else call out to quick entrypoint. For more
188 * details see monitor.cc.
189 */
190void Arm64Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
Zheng Xuc8304302014-05-15 17:21:01 +0100191 // x0/w0 = object
192 // w1 = thin lock thread id
193 // x2 = address of lock word
194 // w3 = lock word / store failure
195 // TUNING: How much performance we get when we inline this?
196 // Since we've already flush all register.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100197 FlushAllRegs();
Andreas Gampeccc60262014-07-04 18:02:38 -0700198 LoadValueDirectFixed(rl_src, rs_x0); // = TargetReg(kArg0, kRef)
Matteo Franchin43ec8732014-03-31 15:00:14 +0100199 LockCallTemps(); // Prepare for explicit register usage
Zheng Xuc8304302014-05-15 17:21:01 +0100200 LIR* null_check_branch = nullptr;
201 if ((opt_flags & MIR_IGNORE_NULL_CHECK) && !(cu_->disable_opt & (1 << kNullCheckElimination))) {
202 null_check_branch = nullptr; // No null check.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100203 } else {
Zheng Xuc8304302014-05-15 17:21:01 +0100204 // If the null-check fails its handled by the slow-path to reduce exception related meta-data.
Dave Allison69dfe512014-07-11 17:11:58 +0000205 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
Zheng Xuc8304302014-05-15 17:21:01 +0100206 null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL);
207 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100208 }
Zheng Xubaa7c882014-06-30 14:26:50 +0800209 Load32Disp(rs_xSELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_w1);
Zheng Xuc8304302014-05-15 17:21:01 +0100210 OpRegRegImm(kOpAdd, rs_x2, rs_x0, mirror::Object::MonitorOffset().Int32Value());
211 NewLIR2(kA64Ldxr2rX, rw3, rx2);
212 MarkPossibleNullPointerException(opt_flags);
213 LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_x1, 0, NULL);
214 NewLIR3(kA64Stxr3wrX, rw3, rw1, rx2);
215 LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_x1, 0, NULL);
216
217 LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
218 not_unlocked_branch->target = slow_path_target;
219 if (null_check_branch != nullptr) {
220 null_check_branch->target = slow_path_target;
221 }
222 // TODO: move to a slow path.
223 // Go expensive route - artLockObjectFromCode(obj);
Zheng Xubaa7c882014-06-30 14:26:50 +0800224 LoadWordDisp(rs_xSELF, QUICK_ENTRYPOINT_OFFSET(8, pLockObject).Int32Value(), rs_xLR);
Zheng Xuc8304302014-05-15 17:21:01 +0100225 ClobberCallerSave();
Zheng Xubaa7c882014-06-30 14:26:50 +0800226 LIR* call_inst = OpReg(kOpBlx, rs_xLR);
Zheng Xuc8304302014-05-15 17:21:01 +0100227 MarkSafepointPC(call_inst);
228
229 LIR* success_target = NewLIR0(kPseudoTargetLabel);
230 lock_success_branch->target = success_target;
Hans Boehm48f5c472014-06-27 14:50:10 -0700231 GenMemBarrier(kLoadAny);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100232}
233
234/*
235 * Handle thin locked -> unlocked transition inline or else call out to quick entrypoint. For more
Zheng Xuc8304302014-05-15 17:21:01 +0100236 * details see monitor.cc. Note the code below doesn't use ldxr/stxr as the code holds the lock
Matteo Franchin43ec8732014-03-31 15:00:14 +0100237 * and can only give away ownership if its suspended.
238 */
239void Arm64Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
Zheng Xuc8304302014-05-15 17:21:01 +0100240 // x0/w0 = object
241 // w1 = thin lock thread id
242 // w2 = lock word
243 // TUNING: How much performance we get when we inline this?
244 // Since we've already flush all register.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100245 FlushAllRegs();
Andreas Gampe4b537a82014-06-30 22:24:53 -0700246 LoadValueDirectFixed(rl_src, rs_x0); // Get obj
Matteo Franchin43ec8732014-03-31 15:00:14 +0100247 LockCallTemps(); // Prepare for explicit register usage
248 LIR* null_check_branch = nullptr;
Zheng Xuc8304302014-05-15 17:21:01 +0100249 if ((opt_flags & MIR_IGNORE_NULL_CHECK) && !(cu_->disable_opt & (1 << kNullCheckElimination))) {
250 null_check_branch = nullptr; // No null check.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100251 } else {
Zheng Xuc8304302014-05-15 17:21:01 +0100252 // If the null-check fails its handled by the slow-path to reduce exception related meta-data.
Dave Allison69dfe512014-07-11 17:11:58 +0000253 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
Zheng Xuc8304302014-05-15 17:21:01 +0100254 null_check_branch = OpCmpImmBranch(kCondEq, rs_x0, 0, NULL);
255 }
Matteo Franchin43ec8732014-03-31 15:00:14 +0100256 }
Zheng Xubaa7c882014-06-30 14:26:50 +0800257 Load32Disp(rs_xSELF, Thread::ThinLockIdOffset<8>().Int32Value(), rs_w1);
Zheng Xuc8304302014-05-15 17:21:01 +0100258 Load32Disp(rs_x0, mirror::Object::MonitorOffset().Int32Value(), rs_w2);
259 MarkPossibleNullPointerException(opt_flags);
260 LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_w1, rs_w2, NULL);
Hans Boehm48f5c472014-06-27 14:50:10 -0700261 GenMemBarrier(kAnyStore);
Andreas Gampe3c12c512014-06-24 18:46:29 +0000262 Store32Disp(rs_x0, mirror::Object::MonitorOffset().Int32Value(), rs_wzr);
Zheng Xuc8304302014-05-15 17:21:01 +0100263 LIR* unlock_success_branch = OpUnconditionalBranch(NULL);
264
265 LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
266 slow_unlock_branch->target = slow_path_target;
267 if (null_check_branch != nullptr) {
268 null_check_branch->target = slow_path_target;
269 }
270 // TODO: move to a slow path.
271 // Go expensive route - artUnlockObjectFromCode(obj);
Zheng Xubaa7c882014-06-30 14:26:50 +0800272 LoadWordDisp(rs_xSELF, QUICK_ENTRYPOINT_OFFSET(8, pUnlockObject).Int32Value(), rs_xLR);
Zheng Xuc8304302014-05-15 17:21:01 +0100273 ClobberCallerSave();
Zheng Xubaa7c882014-06-30 14:26:50 +0800274 LIR* call_inst = OpReg(kOpBlx, rs_xLR);
Zheng Xuc8304302014-05-15 17:21:01 +0100275 MarkSafepointPC(call_inst);
276
277 LIR* success_target = NewLIR0(kPseudoTargetLabel);
278 unlock_success_branch->target = success_target;
Matteo Franchin43ec8732014-03-31 15:00:14 +0100279}
280
281void Arm64Mir2Lir::GenMoveException(RegLocation rl_dest) {
Andreas Gampe2f244e92014-05-08 03:35:25 -0700282 int ex_offset = Thread::ExceptionOffset<8>().Int32Value();
buzbeea0cd2d72014-06-01 09:33:49 -0700283 RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
Zheng Xubaa7c882014-06-30 14:26:50 +0800284 LoadRefDisp(rs_xSELF, ex_offset, rl_result.reg, kNotVolatile);
285 StoreRefDisp(rs_xSELF, ex_offset, rs_xzr, kNotVolatile);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100286 StoreValue(rl_dest, rl_result);
287}
288
289/*
290 * Mark garbage collection card. Skip if the value we're storing is null.
291 */
292void Arm64Mir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) {
Matteo Franchinfd2e2912014-06-06 10:09:56 +0100293 RegStorage reg_card_base = AllocTempWide();
Andreas Gampe4b537a82014-06-30 22:24:53 -0700294 RegStorage reg_card_no = AllocTempWide(); // Needs to be wide as addr is ref=64b
Matteo Franchin43ec8732014-03-31 15:00:14 +0100295 LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
Zheng Xubaa7c882014-06-30 14:26:50 +0800296 LoadWordDisp(rs_xSELF, Thread::CardTableOffset<8>().Int32Value(), reg_card_base);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100297 OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
Matteo Franchinfd2e2912014-06-06 10:09:56 +0100298 // TODO(Arm64): generate "strb wB, [xB, wC, uxtw]" rather than "strb wB, [xB, xC]"?
Andreas Gampe4b537a82014-06-30 22:24:53 -0700299 StoreBaseIndexed(reg_card_base, reg_card_no, As32BitReg(reg_card_base),
Matteo Franchinfd2e2912014-06-06 10:09:56 +0100300 0, kUnsignedByte);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100301 LIR* target = NewLIR0(kPseudoTargetLabel);
302 branch_over->target = target;
303 FreeTemp(reg_card_base);
304 FreeTemp(reg_card_no);
305}
306
307void Arm64Mir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100308 /*
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100309 * On entry, x0 to x7 are live. Let the register allocation
Matteo Franchin43ec8732014-03-31 15:00:14 +0100310 * mechanism know so it doesn't try to use any of them when
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100311 * expanding the frame or flushing.
312 * Reserve x8 & x9 for temporaries.
Matteo Franchin43ec8732014-03-31 15:00:14 +0100313 */
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100314 LockTemp(rs_x0);
315 LockTemp(rs_x1);
316 LockTemp(rs_x2);
317 LockTemp(rs_x3);
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100318 LockTemp(rs_x4);
319 LockTemp(rs_x5);
320 LockTemp(rs_x6);
321 LockTemp(rs_x7);
322 LockTemp(rs_x8);
323 LockTemp(rs_x9);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100324
325 /*
326 * We can safely skip the stack overflow check if we're
327 * a leaf *and* our frame size < fudge factor.
328 */
Andreas Gampe7cd26f32014-06-18 17:01:15 -0700329 bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !IsLargeFrame(frame_size_, kArm64);
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100330
Matteo Franchin43ec8732014-03-31 15:00:14 +0100331 NewLIR0(kPseudoMethodEntry);
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100332
Andreas Gampe7cd26f32014-06-18 17:01:15 -0700333 constexpr size_t kStackOverflowReservedUsableBytes = kArm64StackOverflowReservedBytes -
334 Thread::kStackOverflowSignalReservedBytes;
335 const bool large_frame = static_cast<size_t>(frame_size_) > kStackOverflowReservedUsableBytes;
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100336 const int spill_count = num_core_spills_ + num_fp_spills_;
337 const int spill_size = (spill_count * kArm64PointerSize + 15) & ~0xf; // SP 16 byte alignment.
338 const int frame_size_without_spills = frame_size_ - spill_size;
339
Matteo Franchin43ec8732014-03-31 15:00:14 +0100340 if (!skip_overflow_check) {
Dave Allison69dfe512014-07-11 17:11:58 +0000341 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100342 if (!large_frame) {
343 // Load stack limit
Zheng Xubaa7c882014-06-30 14:26:50 +0800344 LoadWordDisp(rs_xSELF, Thread::StackEndOffset<8>().Int32Value(), rs_x9);
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100345 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100346 } else {
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100347 // TODO(Arm64) Implement implicit checks.
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100348 // Implicit stack overflow check.
349 // Generate a load from [sp, #-framesize]. If this is in the stack
350 // redzone we will get a segmentation fault.
Zheng Xubaa7c882014-06-30 14:26:50 +0800351 // Load32Disp(rs_wSP, -Thread::kStackOverflowReservedBytes, rs_wzr);
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100352 // MarkPossibleStackOverflowException();
353 LOG(FATAL) << "Implicit stack overflow checks not implemented.";
Matteo Franchin43ec8732014-03-31 15:00:14 +0100354 }
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100355 }
356
357 if (frame_size_ > 0) {
Zheng Xubaa7c882014-06-30 14:26:50 +0800358 OpRegImm64(kOpSub, rs_sp, spill_size);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100359 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100360
Matteo Franchin43ec8732014-03-31 15:00:14 +0100361 /* Need to spill any FP regs? */
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100362 if (fp_spill_mask_) {
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100363 int spill_offset = spill_size - kArm64PointerSize*(num_fp_spills_ + num_core_spills_);
Zheng Xubaa7c882014-06-30 14:26:50 +0800364 SpillFPRegs(rs_sp, spill_offset, fp_spill_mask_);
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100365 }
366
367 /* Spill core callee saves. */
368 if (core_spill_mask_) {
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100369 int spill_offset = spill_size - kArm64PointerSize*num_core_spills_;
Zheng Xubaa7c882014-06-30 14:26:50 +0800370 SpillCoreRegs(rs_sp, spill_offset, core_spill_mask_);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100371 }
372
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100373 if (!skip_overflow_check) {
Dave Allison69dfe512014-07-11 17:11:58 +0000374 if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks()) {
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100375 class StackOverflowSlowPath: public LIRSlowPath {
376 public:
377 StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, size_t sp_displace) :
378 LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, nullptr),
379 sp_displace_(sp_displace) {
380 }
381 void Compile() OVERRIDE {
382 m2l_->ResetRegPool();
383 m2l_->ResetDefTracking();
384 GenerateTargetLabel(kPseudoThrowTarget);
385 // Unwinds stack.
Zheng Xubaa7c882014-06-30 14:26:50 +0800386 m2l_->OpRegImm(kOpAdd, rs_sp, sp_displace_);
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100387 m2l_->ClobberCallerSave();
388 ThreadOffset<8> func_offset = QUICK_ENTRYPOINT_OFFSET(8, pThrowStackOverflow);
389 m2l_->LockTemp(rs_x8);
Zheng Xubaa7c882014-06-30 14:26:50 +0800390 m2l_->LoadWordDisp(rs_xSELF, func_offset.Int32Value(), rs_x8);
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100391 m2l_->NewLIR1(kA64Br1x, rs_x8.GetReg());
392 m2l_->FreeTemp(rs_x8);
393 }
394
395 private:
396 const size_t sp_displace_;
397 };
398
399 if (large_frame) {
400 // Compare Expected SP against bottom of stack.
401 // Branch to throw target if there is not enough room.
Zheng Xubaa7c882014-06-30 14:26:50 +0800402 OpRegRegImm(kOpSub, rs_x9, rs_sp, frame_size_without_spills);
403 LoadWordDisp(rs_xSELF, Thread::StackEndOffset<8>().Int32Value(), rs_x8);
Andreas Gampe7cd26f32014-06-18 17:01:15 -0700404 LIR* branch = OpCmpBranch(kCondUlt, rs_x9, rs_x8, nullptr);
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100405 AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, spill_size));
Zheng Xubaa7c882014-06-30 14:26:50 +0800406 OpRegCopy(rs_sp, rs_x9); // Establish stack after checks.
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100407 } else {
408 /*
409 * If the frame is small enough we are guaranteed to have enough space that remains to
410 * handle signals on the user stack.
411 * Establishes stack before checks.
412 */
Zheng Xubaa7c882014-06-30 14:26:50 +0800413 OpRegRegImm(kOpSub, rs_sp, rs_sp, frame_size_without_spills);
414 LIR* branch = OpCmpBranch(kCondUlt, rs_sp, rs_x9, nullptr);
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100415 AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, frame_size_));
416 }
417 } else {
Zheng Xubaa7c882014-06-30 14:26:50 +0800418 OpRegImm(kOpSub, rs_sp, frame_size_without_spills);
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100419 }
420 } else {
Zheng Xubaa7c882014-06-30 14:26:50 +0800421 OpRegImm(kOpSub, rs_sp, frame_size_without_spills);
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100422 }
423
Matteo Franchin43ec8732014-03-31 15:00:14 +0100424 FlushIns(ArgLocs, rl_method);
425
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100426 FreeTemp(rs_x0);
427 FreeTemp(rs_x1);
428 FreeTemp(rs_x2);
429 FreeTemp(rs_x3);
Stuart Monteithf8ec48e2014-06-06 17:05:08 +0100430 FreeTemp(rs_x4);
431 FreeTemp(rs_x5);
432 FreeTemp(rs_x6);
433 FreeTemp(rs_x7);
434 FreeTemp(rs_x8);
435 FreeTemp(rs_x9);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100436}
437
438void Arm64Mir2Lir::GenExitSequence() {
Matteo Franchin43ec8732014-03-31 15:00:14 +0100439 /*
440 * In the exit path, r0/r1 are live - make sure they aren't
441 * allocated by the register utilities as temps.
442 */
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100443 LockTemp(rs_x0);
444 LockTemp(rs_x1);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100445
446 NewLIR0(kPseudoMethodExit);
Matteo Franchinbc6d1972014-05-13 12:33:28 +0100447
buzbeeb5860fb2014-06-21 15:31:01 -0700448 // Restore saves and drop stack frame.
449 // 2 versions:
450 //
451 // 1. (Original): Try to address directly, then drop the whole frame.
452 // Limitation: ldp is a 7b signed immediate. There should have been a DCHECK!
453 //
454 // 2. (New): Drop the non-save-part. Then do similar to original, which is now guaranteed to be
455 // in range. Then drop the rest.
456 //
457 // TODO: In methods with few spills but huge frame, it would be better to do non-immediate loads
458 // in variant 1.
459
460 if (frame_size_ <= 504) {
461 // "Magic" constant, 63 (max signed 7b) * 8. Do variant 1.
462 // Could be tighter, as the last load is below frame_size_ offset.
463 if (fp_spill_mask_) {
464 int spill_offset = frame_size_ - kArm64PointerSize * (num_fp_spills_ + num_core_spills_);
465 UnSpillFPRegs(rs_sp, spill_offset, fp_spill_mask_);
466 }
467 if (core_spill_mask_) {
468 int spill_offset = frame_size_ - kArm64PointerSize * num_core_spills_;
469 UnSpillCoreRegs(rs_sp, spill_offset, core_spill_mask_);
470 }
471
472 OpRegImm64(kOpAdd, rs_sp, frame_size_);
473 } else {
474 // Second variant. Drop the frame part.
475 int drop = 0;
476 // TODO: Always use the first formula, as num_fp_spills would be zero?
477 if (fp_spill_mask_) {
478 drop = frame_size_ - kArm64PointerSize * (num_fp_spills_ + num_core_spills_);
479 } else {
480 drop = frame_size_ - kArm64PointerSize * num_core_spills_;
481 }
482
483 // Drop needs to be 16B aligned, so that SP keeps aligned.
484 drop = RoundDown(drop, 16);
485
486 OpRegImm64(kOpAdd, rs_sp, drop);
487
488 if (fp_spill_mask_) {
489 int offset = frame_size_ - drop - kArm64PointerSize * (num_fp_spills_ + num_core_spills_);
490 UnSpillFPRegs(rs_sp, offset, fp_spill_mask_);
491 }
492 if (core_spill_mask_) {
493 int offset = frame_size_ - drop - kArm64PointerSize * num_core_spills_;
494 UnSpillCoreRegs(rs_sp, offset, core_spill_mask_);
495 }
496
497 OpRegImm64(kOpAdd, rs_sp, frame_size_ - drop);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100498 }
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100499
buzbeeb5860fb2014-06-21 15:31:01 -0700500 // Finally return.
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100501 NewLIR0(kA64Ret);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100502}
503
504void Arm64Mir2Lir::GenSpecialExitSequence() {
Matteo Franchine45fb9e2014-05-06 10:10:30 +0100505 NewLIR0(kA64Ret);
Matteo Franchin43ec8732014-03-31 15:00:14 +0100506}
507
508} // namespace art