blob: b25b7e6c7ab81d2709cfd8e7927bf45c04ab5274 [file] [log] [blame]
buzbeeefc63692012-11-14 16:31:52 -08001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This file contains codegen for the Mips ISA */
18
19#include "oat/runtime/oat_support_entrypoints.h"
buzbee1bc37c62012-11-20 13:35:41 -080020#include "mips_lir.h"
21#include "../codegen_util.h"
22#include "../ralloc_util.h"
buzbeeefc63692012-11-14 16:31:52 -080023
24namespace art {
25
buzbeefa57c472012-11-21 12:06:18 -080026void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
27 SpecialCaseHandler special_case)
buzbeeefc63692012-11-14 16:31:52 -080028{
29 // TODO
30}
31
32/*
33 * The lack of pc-relative loads on Mips presents somewhat of a challenge
34 * for our PIC switch table strategy. To materialize the current location
35 * we'll do a dummy JAL and reference our tables using r_RA as the
36 * base register. Note that r_RA will be used both as the base to
37 * locate the switch table data and as the reference base for the switch
38 * target offsets stored in the table. We'll use a special pseudo-instruction
39 * to represent the jal and trigger the construction of the
40 * switch table offsets (which will happen after final assembly and all
41 * labels are fixed).
42 *
43 * The test loop will look something like:
44 *
buzbeefa57c472012-11-21 12:06:18 -080045 * ori rEnd, r_ZERO, #table_size ; size in bytes
buzbeeefc63692012-11-14 16:31:52 -080046 * jal BaseLabel ; stores "return address" (BaseLabel) in r_RA
47 * nop ; opportunistically fill
48 * BaseLabel:
49 * addiu rBase, r_RA, <table> - <BaseLabel> ; table relative to BaseLabel
50 addu rEnd, rEnd, rBase ; end of table
buzbeefa57c472012-11-21 12:06:18 -080051 * lw r_val, [rSP, v_reg_off] ; Test Value
buzbeeefc63692012-11-14 16:31:52 -080052 * loop:
53 * beq rBase, rEnd, done
buzbeefa57c472012-11-21 12:06:18 -080054 * lw r_key, 0(rBase)
buzbeeefc63692012-11-14 16:31:52 -080055 * addu rBase, 8
buzbeefa57c472012-11-21 12:06:18 -080056 * bne r_val, r_key, loop
57 * lw r_disp, -4(rBase)
58 * addu r_RA, r_disp
buzbeeefc63692012-11-14 16:31:52 -080059 * jr r_RA
60 * done:
61 *
62 */
buzbeefa57c472012-11-21 12:06:18 -080063void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset,
64 RegLocation rl_src)
buzbeeefc63692012-11-14 16:31:52 -080065{
buzbeefa57c472012-11-21 12:06:18 -080066 const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
67 if (cu->verbose) {
buzbee52a77fc2012-11-20 19:50:46 -080068 DumpSparseSwitchTable(table);
buzbeeefc63692012-11-14 16:31:52 -080069 }
70 // Add the table to the list - we'll process it later
buzbeefa57c472012-11-21 12:06:18 -080071 SwitchTable *tab_rec =
72 static_cast<SwitchTable*>(NewMem(cu, sizeof(SwitchTable), true, kAllocData));
73 tab_rec->table = table;
74 tab_rec->vaddr = cu->current_dalvik_offset;
buzbeeefc63692012-11-14 16:31:52 -080075 int elements = table[1];
buzbeefa57c472012-11-21 12:06:18 -080076 tab_rec->targets =
77 static_cast<LIR**>(NewMem(cu, elements * sizeof(LIR*), true, kAllocLIR));
78 InsertGrowableList(cu, &cu->switch_tables, reinterpret_cast<uintptr_t>(tab_rec));
buzbeeefc63692012-11-14 16:31:52 -080079
80 // The table is composed of 8-byte key/disp pairs
buzbeefa57c472012-11-21 12:06:18 -080081 int byte_size = elements * 8;
buzbeeefc63692012-11-14 16:31:52 -080082
buzbeefa57c472012-11-21 12:06:18 -080083 int size_hi = byte_size >> 16;
84 int size_lo = byte_size & 0xffff;
buzbeeefc63692012-11-14 16:31:52 -080085
buzbeefa57c472012-11-21 12:06:18 -080086 int rEnd = AllocTemp(cu);
87 if (size_hi) {
88 NewLIR2(cu, kMipsLui, rEnd, size_hi);
buzbeeefc63692012-11-14 16:31:52 -080089 }
90 // Must prevent code motion for the curr pc pair
buzbeefa57c472012-11-21 12:06:18 -080091 GenBarrier(cu); // Scheduling barrier
92 NewLIR0(cu, kMipsCurrPC); // Really a jal to .+8
buzbeeefc63692012-11-14 16:31:52 -080093 // Now, fill the branch delay slot
buzbeefa57c472012-11-21 12:06:18 -080094 if (size_hi) {
95 NewLIR3(cu, kMipsOri, rEnd, rEnd, size_lo);
buzbeeefc63692012-11-14 16:31:52 -080096 } else {
buzbeefa57c472012-11-21 12:06:18 -080097 NewLIR3(cu, kMipsOri, rEnd, r_ZERO, size_lo);
buzbeeefc63692012-11-14 16:31:52 -080098 }
buzbeefa57c472012-11-21 12:06:18 -080099 GenBarrier(cu); // Scheduling barrier
buzbeeefc63692012-11-14 16:31:52 -0800100
101 // Construct BaseLabel and set up table base register
buzbeefa57c472012-11-21 12:06:18 -0800102 LIR* base_label = NewLIR0(cu, kPseudoTargetLabel);
buzbeeefc63692012-11-14 16:31:52 -0800103 // Remember base label so offsets can be computed later
buzbeefa57c472012-11-21 12:06:18 -0800104 tab_rec->anchor = base_label;
105 int rBase = AllocTemp(cu);
106 NewLIR4(cu, kMipsDelta, rBase, 0, reinterpret_cast<uintptr_t>(base_label),
107 reinterpret_cast<uintptr_t>(tab_rec));
108 OpRegRegReg(cu, kOpAdd, rEnd, rEnd, rBase);
buzbeeefc63692012-11-14 16:31:52 -0800109
110 // Grab switch test value
buzbeefa57c472012-11-21 12:06:18 -0800111 rl_src = LoadValue(cu, rl_src, kCoreReg);
buzbeeefc63692012-11-14 16:31:52 -0800112
113 // Test loop
buzbeefa57c472012-11-21 12:06:18 -0800114 int r_key = AllocTemp(cu);
115 LIR* loop_label = NewLIR0(cu, kPseudoTargetLabel);
116 LIR* exit_branch = OpCmpBranch(cu , kCondEq, rBase, rEnd, NULL);
117 LoadWordDisp(cu, rBase, 0, r_key);
118 OpRegImm(cu, kOpAdd, rBase, 8);
119 OpCmpBranch(cu, kCondNe, rl_src.low_reg, r_key, loop_label);
120 int r_disp = AllocTemp(cu);
121 LoadWordDisp(cu, rBase, -4, r_disp);
122 OpRegRegReg(cu, kOpAdd, r_RA, r_RA, r_disp);
123 OpReg(cu, kOpBx, r_RA);
buzbeeefc63692012-11-14 16:31:52 -0800124
125 // Loop exit
buzbeefa57c472012-11-21 12:06:18 -0800126 LIR* exit_label = NewLIR0(cu, kPseudoTargetLabel);
127 exit_branch->target = exit_label;
buzbeeefc63692012-11-14 16:31:52 -0800128}
129
130/*
131 * Code pattern will look something like:
132 *
buzbeefa57c472012-11-21 12:06:18 -0800133 * lw r_val
buzbeeefc63692012-11-14 16:31:52 -0800134 * jal BaseLabel ; stores "return address" (BaseLabel) in r_RA
135 * nop ; opportunistically fill
buzbeefa57c472012-11-21 12:06:18 -0800136 * [subiu r_val, bias] ; Remove bias if low_val != 0
buzbeeefc63692012-11-14 16:31:52 -0800137 * bound check -> done
buzbeefa57c472012-11-21 12:06:18 -0800138 * lw r_disp, [r_RA, r_val]
139 * addu r_RA, r_disp
buzbeeefc63692012-11-14 16:31:52 -0800140 * jr r_RA
141 * done:
142 */
buzbeefa57c472012-11-21 12:06:18 -0800143void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset,
144 RegLocation rl_src)
buzbeeefc63692012-11-14 16:31:52 -0800145{
buzbeefa57c472012-11-21 12:06:18 -0800146 const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
147 if (cu->verbose) {
buzbee52a77fc2012-11-20 19:50:46 -0800148 DumpPackedSwitchTable(table);
buzbeeefc63692012-11-14 16:31:52 -0800149 }
150 // Add the table to the list - we'll process it later
buzbeefa57c472012-11-21 12:06:18 -0800151 SwitchTable *tab_rec =
152 static_cast<SwitchTable*>(NewMem(cu, sizeof(SwitchTable), true, kAllocData));
153 tab_rec->table = table;
154 tab_rec->vaddr = cu->current_dalvik_offset;
buzbeeefc63692012-11-14 16:31:52 -0800155 int size = table[1];
buzbeefa57c472012-11-21 12:06:18 -0800156 tab_rec->targets = static_cast<LIR**>(NewMem(cu, size * sizeof(LIR*), true, kAllocLIR));
157 InsertGrowableList(cu, &cu->switch_tables, reinterpret_cast<uintptr_t>(tab_rec));
buzbeeefc63692012-11-14 16:31:52 -0800158
159 // Get the switch value
buzbeefa57c472012-11-21 12:06:18 -0800160 rl_src = LoadValue(cu, rl_src, kCoreReg);
buzbeeefc63692012-11-14 16:31:52 -0800161
162 // Prepare the bias. If too big, handle 1st stage here
buzbeefa57c472012-11-21 12:06:18 -0800163 int low_key = s4FromSwitchData(&table[2]);
164 bool large_bias = false;
165 int r_key;
166 if (low_key == 0) {
167 r_key = rl_src.low_reg;
168 } else if ((low_key & 0xffff) != low_key) {
169 r_key = AllocTemp(cu);
170 LoadConstant(cu, r_key, low_key);
171 large_bias = true;
buzbeeefc63692012-11-14 16:31:52 -0800172 } else {
buzbeefa57c472012-11-21 12:06:18 -0800173 r_key = AllocTemp(cu);
buzbeeefc63692012-11-14 16:31:52 -0800174 }
175
176 // Must prevent code motion for the curr pc pair
buzbeefa57c472012-11-21 12:06:18 -0800177 GenBarrier(cu);
178 NewLIR0(cu, kMipsCurrPC); // Really a jal to .+8
buzbeeefc63692012-11-14 16:31:52 -0800179 // Now, fill the branch delay slot with bias strip
buzbeefa57c472012-11-21 12:06:18 -0800180 if (low_key == 0) {
181 NewLIR0(cu, kMipsNop);
buzbeeefc63692012-11-14 16:31:52 -0800182 } else {
buzbeefa57c472012-11-21 12:06:18 -0800183 if (large_bias) {
184 OpRegRegReg(cu, kOpSub, r_key, rl_src.low_reg, r_key);
buzbeeefc63692012-11-14 16:31:52 -0800185 } else {
buzbeefa57c472012-11-21 12:06:18 -0800186 OpRegRegImm(cu, kOpSub, r_key, rl_src.low_reg, low_key);
buzbeeefc63692012-11-14 16:31:52 -0800187 }
188 }
buzbeefa57c472012-11-21 12:06:18 -0800189 GenBarrier(cu); // Scheduling barrier
buzbeeefc63692012-11-14 16:31:52 -0800190
191 // Construct BaseLabel and set up table base register
buzbeefa57c472012-11-21 12:06:18 -0800192 LIR* base_label = NewLIR0(cu, kPseudoTargetLabel);
buzbeeefc63692012-11-14 16:31:52 -0800193 // Remember base label so offsets can be computed later
buzbeefa57c472012-11-21 12:06:18 -0800194 tab_rec->anchor = base_label;
buzbeeefc63692012-11-14 16:31:52 -0800195
196 // Bounds check - if < 0 or >= size continue following switch
buzbeefa57c472012-11-21 12:06:18 -0800197 LIR* branch_over = OpCmpImmBranch(cu, kCondHi, r_key, size-1, NULL);
buzbeeefc63692012-11-14 16:31:52 -0800198
199 // Materialize the table base pointer
buzbeefa57c472012-11-21 12:06:18 -0800200 int rBase = AllocTemp(cu);
201 NewLIR4(cu, kMipsDelta, rBase, 0, reinterpret_cast<uintptr_t>(base_label),
202 reinterpret_cast<uintptr_t>(tab_rec));
buzbeeefc63692012-11-14 16:31:52 -0800203
204 // Load the displacement from the switch table
buzbeefa57c472012-11-21 12:06:18 -0800205 int r_disp = AllocTemp(cu);
206 LoadBaseIndexed(cu, rBase, r_key, r_disp, 2, kWord);
buzbeeefc63692012-11-14 16:31:52 -0800207
208 // Add to r_AP and go
buzbeefa57c472012-11-21 12:06:18 -0800209 OpRegRegReg(cu, kOpAdd, r_RA, r_RA, r_disp);
210 OpReg(cu, kOpBx, r_RA);
buzbeeefc63692012-11-14 16:31:52 -0800211
buzbeefa57c472012-11-21 12:06:18 -0800212 /* branch_over target here */
213 LIR* target = NewLIR0(cu, kPseudoTargetLabel);
214 branch_over->target = target;
buzbeeefc63692012-11-14 16:31:52 -0800215}
216
217/*
218 * Array data table format:
219 * ushort ident = 0x0300 magic value
220 * ushort width width of each element in the table
221 * uint size number of elements in the table
222 * ubyte data[size*width] table of data values (may contain a single-byte
223 * padding at the end)
224 *
225 * Total size is 4+(width * size + 1)/2 16-bit code units.
226 */
buzbeefa57c472012-11-21 12:06:18 -0800227void GenFillArrayData(CompilationUnit* cu, uint32_t table_offset,
228 RegLocation rl_src)
buzbeeefc63692012-11-14 16:31:52 -0800229{
buzbeefa57c472012-11-21 12:06:18 -0800230 const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
buzbeeefc63692012-11-14 16:31:52 -0800231 // Add the table to the list - we'll process it later
buzbeefa57c472012-11-21 12:06:18 -0800232 FillArrayData *tab_rec =
233 reinterpret_cast<FillArrayData*>(NewMem(cu, sizeof(FillArrayData), true, kAllocData));
234 tab_rec->table = table;
235 tab_rec->vaddr = cu->current_dalvik_offset;
236 uint16_t width = tab_rec->table[1];
237 uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
238 tab_rec->size = (size * width) + 8;
buzbeeefc63692012-11-14 16:31:52 -0800239
buzbeefa57c472012-11-21 12:06:18 -0800240 InsertGrowableList(cu, &cu->fill_array_data, reinterpret_cast<uintptr_t>(tab_rec));
buzbeeefc63692012-11-14 16:31:52 -0800241
242 // Making a call - use explicit registers
buzbeefa57c472012-11-21 12:06:18 -0800243 FlushAllRegs(cu); /* Everything to home location */
244 LockCallTemps(cu);
245 LoadValueDirectFixed(cu, rl_src, rMIPS_ARG0);
buzbeeefc63692012-11-14 16:31:52 -0800246
247 // Must prevent code motion for the curr pc pair
buzbeefa57c472012-11-21 12:06:18 -0800248 GenBarrier(cu);
249 NewLIR0(cu, kMipsCurrPC); // Really a jal to .+8
buzbeeefc63692012-11-14 16:31:52 -0800250 // Now, fill the branch delay slot with the helper load
buzbeefa57c472012-11-21 12:06:18 -0800251 int r_tgt = LoadHelper(cu, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode));
252 GenBarrier(cu); // Scheduling barrier
buzbeeefc63692012-11-14 16:31:52 -0800253
254 // Construct BaseLabel and set up table base register
buzbeefa57c472012-11-21 12:06:18 -0800255 LIR* base_label = NewLIR0(cu, kPseudoTargetLabel);
buzbeeefc63692012-11-14 16:31:52 -0800256
257 // Materialize a pointer to the fill data image
buzbeefa57c472012-11-21 12:06:18 -0800258 NewLIR4(cu, kMipsDelta, rMIPS_ARG1, 0, reinterpret_cast<uintptr_t>(base_label),
259 reinterpret_cast<uintptr_t>(tab_rec));
buzbeeefc63692012-11-14 16:31:52 -0800260
261 // And go...
buzbeefa57c472012-11-21 12:06:18 -0800262 ClobberCalleeSave(cu);
263 LIR* call_inst = OpReg(cu, kOpBlx, r_tgt); // ( array*, fill_data* )
264 MarkSafepointPC(cu, call_inst);
buzbeeefc63692012-11-14 16:31:52 -0800265}
266
267/*
268 * TODO: implement fast path to short-circuit thin-lock case
269 */
buzbeefa57c472012-11-21 12:06:18 -0800270void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
buzbeeefc63692012-11-14 16:31:52 -0800271{
buzbeefa57c472012-11-21 12:06:18 -0800272 FlushAllRegs(cu);
273 LoadValueDirectFixed(cu, rl_src, rMIPS_ARG0); // Get obj
274 LockCallTemps(cu); // Prepare for explicit register usage
275 GenNullCheck(cu, rl_src.s_reg_low, rMIPS_ARG0, opt_flags);
buzbeeefc63692012-11-14 16:31:52 -0800276 // Go expensive route - artLockObjectFromCode(self, obj);
buzbeefa57c472012-11-21 12:06:18 -0800277 int r_tgt = LoadHelper(cu, ENTRYPOINT_OFFSET(pLockObjectFromCode));
278 ClobberCalleeSave(cu);
279 LIR* call_inst = OpReg(cu, kOpBlx, r_tgt);
280 MarkSafepointPC(cu, call_inst);
buzbeeefc63692012-11-14 16:31:52 -0800281}
282
283/*
284 * TODO: implement fast path to short-circuit thin-lock case
285 */
buzbeefa57c472012-11-21 12:06:18 -0800286void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
buzbeeefc63692012-11-14 16:31:52 -0800287{
buzbeefa57c472012-11-21 12:06:18 -0800288 FlushAllRegs(cu);
289 LoadValueDirectFixed(cu, rl_src, rMIPS_ARG0); // Get obj
290 LockCallTemps(cu); // Prepare for explicit register usage
291 GenNullCheck(cu, rl_src.s_reg_low, rMIPS_ARG0, opt_flags);
buzbeeefc63692012-11-14 16:31:52 -0800292 // Go expensive route - UnlockObjectFromCode(obj);
buzbeefa57c472012-11-21 12:06:18 -0800293 int r_tgt = LoadHelper(cu, ENTRYPOINT_OFFSET(pUnlockObjectFromCode));
294 ClobberCalleeSave(cu);
295 LIR* call_inst = OpReg(cu, kOpBlx, r_tgt);
296 MarkSafepointPC(cu, call_inst);
buzbeeefc63692012-11-14 16:31:52 -0800297}
298
299/*
300 * Mark garbage collection card. Skip if the value we're storing is null.
301 */
buzbeefa57c472012-11-21 12:06:18 -0800302void MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg)
buzbeeefc63692012-11-14 16:31:52 -0800303{
buzbeefa57c472012-11-21 12:06:18 -0800304 int reg_card_base = AllocTemp(cu);
305 int reg_card_no = AllocTemp(cu);
306 LIR* branch_over = OpCmpImmBranch(cu, kCondEq, val_reg, 0, NULL);
307 LoadWordDisp(cu, rMIPS_SELF, Thread::CardTableOffset().Int32Value(), reg_card_base);
308 OpRegRegImm(cu, kOpLsr, reg_card_no, tgt_addr_reg, CardTable::kCardShift);
309 StoreBaseIndexed(cu, reg_card_base, reg_card_no, reg_card_base, 0,
buzbeeefc63692012-11-14 16:31:52 -0800310 kUnsignedByte);
buzbeefa57c472012-11-21 12:06:18 -0800311 LIR* target = NewLIR0(cu, kPseudoTargetLabel);
312 branch_over->target = target;
313 FreeTemp(cu, reg_card_base);
314 FreeTemp(cu, reg_card_no);
buzbeeefc63692012-11-14 16:31:52 -0800315}
buzbeefa57c472012-11-21 12:06:18 -0800316void GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs,
317 RegLocation rl_method)
buzbeeefc63692012-11-14 16:31:52 -0800318{
buzbeefa57c472012-11-21 12:06:18 -0800319 int spill_count = cu->num_core_spills + cu->num_fp_spills;
buzbeeefc63692012-11-14 16:31:52 -0800320 /*
321 * On entry, rMIPS_ARG0, rMIPS_ARG1, rMIPS_ARG2 & rMIPS_ARG3 are live. Let the register
322 * allocation mechanism know so it doesn't try to use any of them when
323 * expanding the frame or flushing. This leaves the utility
324 * code with a single temp: r12. This should be enough.
325 */
buzbeefa57c472012-11-21 12:06:18 -0800326 LockTemp(cu, rMIPS_ARG0);
327 LockTemp(cu, rMIPS_ARG1);
328 LockTemp(cu, rMIPS_ARG2);
329 LockTemp(cu, rMIPS_ARG3);
buzbeeefc63692012-11-14 16:31:52 -0800330
331 /*
332 * We can safely skip the stack overflow check if we're
333 * a leaf *and* our frame size < fudge factor.
334 */
buzbeefa57c472012-11-21 12:06:18 -0800335 bool skip_overflow_check = ((cu->attrs & METHOD_IS_LEAF) &&
336 (static_cast<size_t>(cu->frame_size) < Thread::kStackOverflowReservedBytes));
337 NewLIR0(cu, kPseudoMethodEntry);
338 int check_reg = AllocTemp(cu);
339 int new_sp = AllocTemp(cu);
340 if (!skip_overflow_check) {
buzbeeefc63692012-11-14 16:31:52 -0800341 /* Load stack limit */
buzbeefa57c472012-11-21 12:06:18 -0800342 LoadWordDisp(cu, rMIPS_SELF, Thread::StackEndOffset().Int32Value(), check_reg);
buzbeeefc63692012-11-14 16:31:52 -0800343 }
344 /* Spill core callee saves */
buzbeefa57c472012-11-21 12:06:18 -0800345 SpillCoreRegs(cu);
buzbeeefc63692012-11-14 16:31:52 -0800346 /* NOTE: promotion of FP regs currently unsupported, thus no FP spill */
buzbeefa57c472012-11-21 12:06:18 -0800347 DCHECK_EQ(cu->num_fp_spills, 0);
348 if (!skip_overflow_check) {
349 OpRegRegImm(cu, kOpSub, new_sp, rMIPS_SP, cu->frame_size - (spill_count * 4));
350 GenRegRegCheck(cu, kCondCc, new_sp, check_reg, kThrowStackOverflow);
351 OpRegCopy(cu, rMIPS_SP, new_sp); // Establish stack
buzbeeefc63692012-11-14 16:31:52 -0800352 } else {
buzbeefa57c472012-11-21 12:06:18 -0800353 OpRegImm(cu, kOpSub, rMIPS_SP, cu->frame_size - (spill_count * 4));
buzbeeefc63692012-11-14 16:31:52 -0800354 }
355
buzbeefa57c472012-11-21 12:06:18 -0800356 FlushIns(cu, ArgLocs, rl_method);
buzbeeefc63692012-11-14 16:31:52 -0800357
buzbeefa57c472012-11-21 12:06:18 -0800358 FreeTemp(cu, rMIPS_ARG0);
359 FreeTemp(cu, rMIPS_ARG1);
360 FreeTemp(cu, rMIPS_ARG2);
361 FreeTemp(cu, rMIPS_ARG3);
buzbeeefc63692012-11-14 16:31:52 -0800362}
363
buzbeefa57c472012-11-21 12:06:18 -0800364void GenExitSequence(CompilationUnit* cu)
buzbeeefc63692012-11-14 16:31:52 -0800365{
366 /*
367 * In the exit path, rMIPS_RET0/rMIPS_RET1 are live - make sure they aren't
368 * allocated by the register utilities as temps.
369 */
buzbeefa57c472012-11-21 12:06:18 -0800370 LockTemp(cu, rMIPS_RET0);
371 LockTemp(cu, rMIPS_RET1);
buzbeeefc63692012-11-14 16:31:52 -0800372
buzbeefa57c472012-11-21 12:06:18 -0800373 NewLIR0(cu, kPseudoMethodExit);
374 UnSpillCoreRegs(cu);
375 OpReg(cu, kOpBx, r_RA);
buzbeeefc63692012-11-14 16:31:52 -0800376}
377
378} // namespace art