blob: e3993e06172b9144855b9430a4377a43fc7fbaa7 [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "dex/compiler_ir.h"
18#include "dex_file-inl.h"
19#include "invoke_type.h"
20#include "mirror/array.h"
21#include "mirror/string.h"
22#include "mir_to_lir-inl.h"
23#include "oat/runtime/oat_support_entrypoints.h"
24#include "x86/codegen_x86.h"
25
26namespace art {
27
28/*
29 * This source files contains "gen" codegen routines that should
30 * be applicable to most targets. Only mid-level support utilities
31 * and "op" calls may be used here.
32 */
33
34/*
35 * To save scheduling time, helper calls are broken into two parts: generation of
36 * the helper target address, and the actuall call to the helper. Because x86
37 * has a memory call operation, part 1 is a NOP for x86. For other targets,
38 * load arguments between the two parts.
39 */
40int Mir2Lir::CallHelperSetup(int helper_offset)
41{
42 return (cu_->instruction_set == kX86) ? 0 : LoadHelper(helper_offset);
43}
44
45/* NOTE: if r_tgt is a temp, it will be freed following use */
46LIR* Mir2Lir::CallHelper(int r_tgt, int helper_offset, bool safepoint_pc)
47{
48 LIR* call_inst;
49 if (cu_->instruction_set == kX86) {
50 call_inst = OpThreadMem(kOpBlx, helper_offset);
51 } else {
52 call_inst = OpReg(kOpBlx, r_tgt);
53 FreeTemp(r_tgt);
54 }
55 if (safepoint_pc) {
56 MarkSafepointPC(call_inst);
57 }
58 return call_inst;
59}
60
61void Mir2Lir::CallRuntimeHelperImm(int helper_offset, int arg0, bool safepoint_pc) {
62 int r_tgt = CallHelperSetup(helper_offset);
63 LoadConstant(TargetReg(kArg0), arg0);
64 ClobberCalleeSave();
65 CallHelper(r_tgt, helper_offset, safepoint_pc);
66}
67
68void Mir2Lir::CallRuntimeHelperReg(int helper_offset, int arg0, bool safepoint_pc) {
69 int r_tgt = CallHelperSetup(helper_offset);
70 OpRegCopy(TargetReg(kArg0), arg0);
71 ClobberCalleeSave();
72 CallHelper(r_tgt, helper_offset, safepoint_pc);
73}
74
75void Mir2Lir::CallRuntimeHelperRegLocation(int helper_offset, RegLocation arg0, bool safepoint_pc) {
76 int r_tgt = CallHelperSetup(helper_offset);
77 if (arg0.wide == 0) {
78 LoadValueDirectFixed(arg0, TargetReg(kArg0));
79 } else {
80 LoadValueDirectWideFixed(arg0, TargetReg(kArg0), TargetReg(kArg1));
81 }
82 ClobberCalleeSave();
83 CallHelper(r_tgt, helper_offset, safepoint_pc);
84}
85
86void Mir2Lir::CallRuntimeHelperImmImm(int helper_offset, int arg0, int arg1,
87 bool safepoint_pc) {
88 int r_tgt = CallHelperSetup(helper_offset);
89 LoadConstant(TargetReg(kArg0), arg0);
90 LoadConstant(TargetReg(kArg1), arg1);
91 ClobberCalleeSave();
92 CallHelper(r_tgt, helper_offset, safepoint_pc);
93}
94
95void Mir2Lir::CallRuntimeHelperImmRegLocation(int helper_offset, int arg0,
96 RegLocation arg1, bool safepoint_pc) {
97 int r_tgt = CallHelperSetup(helper_offset);
98 if (arg1.wide == 0) {
99 LoadValueDirectFixed(arg1, TargetReg(kArg1));
100 } else {
101 LoadValueDirectWideFixed(arg1, TargetReg(kArg1), TargetReg(kArg2));
102 }
103 LoadConstant(TargetReg(kArg0), arg0);
104 ClobberCalleeSave();
105 CallHelper(r_tgt, helper_offset, safepoint_pc);
106}
107
108void Mir2Lir::CallRuntimeHelperRegLocationImm(int helper_offset, RegLocation arg0, int arg1,
109 bool safepoint_pc) {
110 int r_tgt = CallHelperSetup(helper_offset);
111 LoadValueDirectFixed(arg0, TargetReg(kArg0));
112 LoadConstant(TargetReg(kArg1), arg1);
113 ClobberCalleeSave();
114 CallHelper(r_tgt, helper_offset, safepoint_pc);
115}
116
117void Mir2Lir::CallRuntimeHelperImmReg(int helper_offset, int arg0, int arg1,
118 bool safepoint_pc) {
119 int r_tgt = CallHelperSetup(helper_offset);
120 OpRegCopy(TargetReg(kArg1), arg1);
121 LoadConstant(TargetReg(kArg0), arg0);
122 ClobberCalleeSave();
123 CallHelper(r_tgt, helper_offset, safepoint_pc);
124}
125
126void Mir2Lir::CallRuntimeHelperRegImm(int helper_offset, int arg0, int arg1,
127 bool safepoint_pc) {
128 int r_tgt = CallHelperSetup(helper_offset);
129 OpRegCopy(TargetReg(kArg0), arg0);
130 LoadConstant(TargetReg(kArg1), arg1);
131 ClobberCalleeSave();
132 CallHelper(r_tgt, helper_offset, safepoint_pc);
133}
134
135void Mir2Lir::CallRuntimeHelperImmMethod(int helper_offset, int arg0, bool safepoint_pc) {
136 int r_tgt = CallHelperSetup(helper_offset);
137 LoadCurrMethodDirect(TargetReg(kArg1));
138 LoadConstant(TargetReg(kArg0), arg0);
139 ClobberCalleeSave();
140 CallHelper(r_tgt, helper_offset, safepoint_pc);
141}
142
143void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(int helper_offset, RegLocation arg0,
144 RegLocation arg1, bool safepoint_pc) {
145 int r_tgt = CallHelperSetup(helper_offset);
146 if (arg0.wide == 0) {
147 LoadValueDirectFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
148 if (arg1.wide == 0) {
149 if (cu_->instruction_set == kMips) {
150 LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1));
151 } else {
152 LoadValueDirectFixed(arg1, TargetReg(kArg1));
153 }
154 } else {
155 if (cu_->instruction_set == kMips) {
156 LoadValueDirectWideFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg2));
157 } else {
158 LoadValueDirectWideFixed(arg1, TargetReg(kArg1), TargetReg(kArg2));
159 }
160 }
161 } else {
162 LoadValueDirectWideFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0), arg0.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
163 if (arg1.wide == 0) {
164 LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2));
165 } else {
166 LoadValueDirectWideFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg3));
167 }
168 }
169 ClobberCalleeSave();
170 CallHelper(r_tgt, helper_offset, safepoint_pc);
171}
172
173void Mir2Lir::CallRuntimeHelperRegReg(int helper_offset, int arg0, int arg1, bool safepoint_pc) {
174 int r_tgt = CallHelperSetup(helper_offset);
175 DCHECK_NE(TargetReg(kArg0), arg1); // check copy into arg0 won't clobber arg1
176 OpRegCopy(TargetReg(kArg0), arg0);
177 OpRegCopy(TargetReg(kArg1), arg1);
178 ClobberCalleeSave();
179 CallHelper(r_tgt, helper_offset, safepoint_pc);
180}
181
182void Mir2Lir::CallRuntimeHelperRegRegImm(int helper_offset, int arg0, int arg1,
183 int arg2, bool safepoint_pc) {
184 int r_tgt = CallHelperSetup(helper_offset);
185 DCHECK_NE(TargetReg(kArg0), arg1); // check copy into arg0 won't clobber arg1
186 OpRegCopy(TargetReg(kArg0), arg0);
187 OpRegCopy(TargetReg(kArg1), arg1);
188 LoadConstant(TargetReg(kArg2), arg2);
189 ClobberCalleeSave();
190 CallHelper(r_tgt, helper_offset, safepoint_pc);
191}
192
193void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(int helper_offset,
194 int arg0, RegLocation arg2, bool safepoint_pc) {
195 int r_tgt = CallHelperSetup(helper_offset);
196 LoadValueDirectFixed(arg2, TargetReg(kArg2));
197 LoadCurrMethodDirect(TargetReg(kArg1));
198 LoadConstant(TargetReg(kArg0), arg0);
199 ClobberCalleeSave();
200 CallHelper(r_tgt, helper_offset, safepoint_pc);
201}
202
203void Mir2Lir::CallRuntimeHelperImmMethodImm(int helper_offset, int arg0,
204 int arg2, bool safepoint_pc) {
205 int r_tgt = CallHelperSetup(helper_offset);
206 LoadCurrMethodDirect(TargetReg(kArg1));
207 LoadConstant(TargetReg(kArg2), arg2);
208 LoadConstant(TargetReg(kArg0), arg0);
209 ClobberCalleeSave();
210 CallHelper(r_tgt, helper_offset, safepoint_pc);
211}
212
213void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(int helper_offset,
214 int arg0, RegLocation arg1,
215 RegLocation arg2, bool safepoint_pc) {
216 int r_tgt = CallHelperSetup(helper_offset);
217 LoadValueDirectFixed(arg1, TargetReg(kArg1));
218 if (arg2.wide == 0) {
219 LoadValueDirectFixed(arg2, TargetReg(kArg2));
220 } else {
221 LoadValueDirectWideFixed(arg2, TargetReg(kArg2), TargetReg(kArg3));
222 }
223 LoadConstant(TargetReg(kArg0), arg0);
224 ClobberCalleeSave();
225 CallHelper(r_tgt, helper_offset, safepoint_pc);
226}
227
228/*
229 * If there are any ins passed in registers that have not been promoted
230 * to a callee-save register, flush them to the frame. Perform intial
231 * assignment of promoted arguments.
232 *
233 * ArgLocs is an array of location records describing the incoming arguments
234 * with one location record per word of argument.
235 */
236void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method)
237{
238 /*
239 * Dummy up a RegLocation for the incoming Method*
240 * It will attempt to keep kArg0 live (or copy it to home location
241 * if promoted).
242 */
243 RegLocation rl_src = rl_method;
244 rl_src.location = kLocPhysReg;
245 rl_src.low_reg = TargetReg(kArg0);
246 rl_src.home = false;
247 MarkLive(rl_src.low_reg, rl_src.s_reg_low);
248 StoreValue(rl_method, rl_src);
249 // If Method* has been promoted, explicitly flush
250 if (rl_method.location == kLocPhysReg) {
251 StoreWordDisp(TargetReg(kSp), 0, TargetReg(kArg0));
252 }
253
254 if (cu_->num_ins == 0)
255 return;
256 const int num_arg_regs = 3;
257 static SpecialTargetRegister arg_regs[] = {kArg1, kArg2, kArg3};
258 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
259 /*
260 * Copy incoming arguments to their proper home locations.
261 * NOTE: an older version of dx had an issue in which
262 * it would reuse static method argument registers.
263 * This could result in the same Dalvik virtual register
264 * being promoted to both core and fp regs. To account for this,
265 * we only copy to the corresponding promoted physical register
266 * if it matches the type of the SSA name for the incoming
267 * argument. It is also possible that long and double arguments
268 * end up half-promoted. In those cases, we must flush the promoted
269 * half to memory as well.
270 */
271 for (int i = 0; i < cu_->num_ins; i++) {
272 PromotionMap* v_map = &promotion_map_[start_vreg + i];
273 if (i < num_arg_regs) {
274 // If arriving in register
275 bool need_flush = true;
276 RegLocation* t_loc = &ArgLocs[i];
277 if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) {
278 OpRegCopy(v_map->core_reg, TargetReg(arg_regs[i]));
279 need_flush = false;
280 } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) {
281 OpRegCopy(v_map->FpReg, TargetReg(arg_regs[i]));
282 need_flush = false;
283 } else {
284 need_flush = true;
285 }
286
287 // For wide args, force flush if only half is promoted
288 if (t_loc->wide) {
289 PromotionMap* p_map = v_map + (t_loc->high_word ? -1 : +1);
290 need_flush |= (p_map->core_location != v_map->core_location) ||
291 (p_map->fp_location != v_map->fp_location);
292 }
293 if (need_flush) {
294 StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
295 TargetReg(arg_regs[i]), kWord);
296 }
297 } else {
298 // If arriving in frame & promoted
299 if (v_map->core_location == kLocPhysReg) {
300 LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
301 v_map->core_reg);
302 }
303 if (v_map->fp_location == kLocPhysReg) {
304 LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
305 v_map->FpReg);
306 }
307 }
308 }
309}
310
311/*
312 * Bit of a hack here - in the absence of a real scheduling pass,
313 * emit the next instruction in static & direct invoke sequences.
314 */
315static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
316 int state, const MethodReference& target_method,
317 uint32_t unused,
318 uintptr_t direct_code, uintptr_t direct_method,
319 InvokeType type)
320{
321 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
322 if (cu->instruction_set != kThumb2) {
323 // Disable sharpening
324 direct_code = 0;
325 direct_method = 0;
326 }
327 if (direct_code != 0 && direct_method != 0) {
328 switch (state) {
329 case 0: // Get the current Method* [sets kArg0]
330 if (direct_code != static_cast<unsigned int>(-1)) {
331 cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
332 } else {
333 CHECK_EQ(cu->dex_file, target_method.dex_file);
334 LIR* data_target = cg->ScanLiteralPool(cg->code_literal_list_,
335 target_method.dex_method_index, 0);
336 if (data_target == NULL) {
337 data_target = cg->AddWordData(&cg->code_literal_list_, target_method.dex_method_index);
338 data_target->operands[1] = type;
339 }
340 LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kInvokeTgt), data_target);
341 cg->AppendLIR(load_pc_rel);
342 DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
343 }
344 if (direct_method != static_cast<unsigned int>(-1)) {
345 cg->LoadConstant(cg->TargetReg(kArg0), direct_method);
346 } else {
347 CHECK_EQ(cu->dex_file, target_method.dex_file);
348 LIR* data_target = cg->ScanLiteralPool(cg->method_literal_list_,
349 target_method.dex_method_index, 0);
350 if (data_target == NULL) {
351 data_target = cg->AddWordData(&cg->method_literal_list_, target_method.dex_method_index);
352 data_target->operands[1] = type;
353 }
354 LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kArg0), data_target);
355 cg->AppendLIR(load_pc_rel);
356 DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
357 }
358 break;
359 default:
360 return -1;
361 }
362 } else {
363 switch (state) {
364 case 0: // Get the current Method* [sets kArg0]
365 // TUNING: we can save a reg copy if Method* has been promoted.
366 cg->LoadCurrMethodDirect(cg->TargetReg(kArg0));
367 break;
368 case 1: // Get method->dex_cache_resolved_methods_
369 cg->LoadWordDisp(cg->TargetReg(kArg0),
370 mirror::AbstractMethod::DexCacheResolvedMethodsOffset().Int32Value(), cg->TargetReg(kArg0));
371 // Set up direct code if known.
372 if (direct_code != 0) {
373 if (direct_code != static_cast<unsigned int>(-1)) {
374 cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
375 } else {
376 CHECK_EQ(cu->dex_file, target_method.dex_file);
377 LIR* data_target = cg->ScanLiteralPool(cg->code_literal_list_,
378 target_method.dex_method_index, 0);
379 if (data_target == NULL) {
380 data_target = cg->AddWordData(&cg->code_literal_list_, target_method.dex_method_index);
381 data_target->operands[1] = type;
382 }
383 LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kInvokeTgt), data_target);
384 cg->AppendLIR(load_pc_rel);
385 DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
386 }
387 }
388 break;
389 case 2: // Grab target method*
390 CHECK_EQ(cu->dex_file, target_method.dex_file);
391 cg->LoadWordDisp(cg->TargetReg(kArg0),
392 mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
393 (target_method.dex_method_index * 4),
394 cg-> TargetReg(kArg0));
395 break;
396 case 3: // Grab the code from the method*
397 if (cu->instruction_set != kX86) {
398 if (direct_code == 0) {
399 cg->LoadWordDisp(cg->TargetReg(kArg0),
400 mirror::AbstractMethod::GetEntryPointFromCompiledCodeOffset().Int32Value(),
401 cg->TargetReg(kInvokeTgt));
402 }
403 break;
404 }
405 // Intentional fallthrough for x86
406 default:
407 return -1;
408 }
409 }
410 return state + 1;
411}
412
413/*
414 * Bit of a hack here - in the absence of a real scheduling pass,
415 * emit the next instruction in a virtual invoke sequence.
416 * We can use kLr as a temp prior to target address loading
417 * Note also that we'll load the first argument ("this") into
418 * kArg1 here rather than the standard LoadArgRegs.
419 */
420static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
421 int state, const MethodReference& target_method,
422 uint32_t method_idx, uintptr_t unused, uintptr_t unused2,
423 InvokeType unused3)
424{
425 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
426 /*
427 * This is the fast path in which the target virtual method is
428 * fully resolved at compile time.
429 */
430 switch (state) {
431 case 0: { // Get "this" [set kArg1]
432 RegLocation rl_arg = info->args[0];
433 cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1));
434 break;
435 }
436 case 1: // Is "this" null? [use kArg1]
437 cg->GenNullCheck(info->args[0].s_reg_low, cg->TargetReg(kArg1), info->opt_flags);
438 // get this->klass_ [use kArg1, set kInvokeTgt]
439 cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
440 cg->TargetReg(kInvokeTgt));
441 break;
442 case 2: // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt]
443 cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(),
444 cg->TargetReg(kInvokeTgt));
445 break;
446 case 3: // Get target method [use kInvokeTgt, set kArg0]
447 cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), (method_idx * 4) +
448 mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(),
449 cg->TargetReg(kArg0));
450 break;
451 case 4: // Get the compiled code address [uses kArg0, sets kInvokeTgt]
452 if (cu->instruction_set != kX86) {
453 cg->LoadWordDisp(cg->TargetReg(kArg0),
454 mirror::AbstractMethod::GetEntryPointFromCompiledCodeOffset().Int32Value(),
455 cg->TargetReg(kInvokeTgt));
456 break;
457 }
458 // Intentional fallthrough for X86
459 default:
460 return -1;
461 }
462 return state + 1;
463}
464
465/*
466 * All invoke-interface calls bounce off of art_quick_invoke_interface_trampoline,
467 * which will locate the target and continue on via a tail call.
468 */
469static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
470 const MethodReference& target_method,
471 uint32_t unused, uintptr_t unused2,
472 uintptr_t direct_method, InvokeType unused4)
473{
474 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
475 if (cu->instruction_set != kThumb2) {
476 // Disable sharpening
477 direct_method = 0;
478 }
479 int trampoline = (cu->instruction_set == kX86) ? 0
480 : ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline);
481
482 if (direct_method != 0) {
483 switch (state) {
484 case 0: // Load the trampoline target [sets kInvokeTgt].
485 if (cu->instruction_set != kX86) {
486 cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
487 }
488 // Get the interface Method* [sets kArg0]
489 if (direct_method != static_cast<unsigned int>(-1)) {
490 cg->LoadConstant(cg->TargetReg(kArg0), direct_method);
491 } else {
492 CHECK_EQ(cu->dex_file, target_method.dex_file);
493 LIR* data_target = cg->ScanLiteralPool(cg->method_literal_list_,
494 target_method.dex_method_index, 0);
495 if (data_target == NULL) {
496 data_target = cg->AddWordData(&cg->method_literal_list_,
497 target_method.dex_method_index);
498 data_target->operands[1] = kInterface;
499 }
500 LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kArg0), data_target);
501 cg->AppendLIR(load_pc_rel);
502 DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
503 }
504 break;
505 default:
506 return -1;
507 }
508 } else {
509 switch (state) {
510 case 0:
511 // Get the current Method* [sets kArg0] - TUNING: remove copy of method if it is promoted.
512 cg->LoadCurrMethodDirect(cg->TargetReg(kArg0));
513 // Load the trampoline target [sets kInvokeTgt].
514 if (cu->instruction_set != kX86) {
515 cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
516 }
517 break;
518 case 1: // Get method->dex_cache_resolved_methods_ [set/use kArg0]
519 cg->LoadWordDisp(cg->TargetReg(kArg0),
520 mirror::AbstractMethod::DexCacheResolvedMethodsOffset().Int32Value(),
521 cg->TargetReg(kArg0));
522 break;
523 case 2: // Grab target method* [set/use kArg0]
524 CHECK_EQ(cu->dex_file, target_method.dex_file);
525 cg->LoadWordDisp(cg->TargetReg(kArg0),
526 mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
527 (target_method.dex_method_index * 4),
528 cg->TargetReg(kArg0));
529 break;
530 default:
531 return -1;
532 }
533 }
534 return state + 1;
535}
536
537static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, int trampoline,
538 int state, const MethodReference& target_method,
539 uint32_t method_idx)
540{
541 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
542 /*
543 * This handles the case in which the base method is not fully
544 * resolved at compile time, we bail to a runtime helper.
545 */
546 if (state == 0) {
547 if (cu->instruction_set != kX86) {
548 // Load trampoline target
549 cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
550 }
551 // Load kArg0 with method index
552 CHECK_EQ(cu->dex_file, target_method.dex_file);
553 cg->LoadConstant(cg->TargetReg(kArg0), target_method.dex_method_index);
554 return 1;
555 }
556 return -1;
557}
558
559static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
560 int state,
561 const MethodReference& target_method,
562 uint32_t method_idx,
563 uintptr_t unused, uintptr_t unused2,
564 InvokeType unused3)
565{
566 int trampoline = ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
567 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
568}
569
570static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
571 const MethodReference& target_method,
572 uint32_t method_idx, uintptr_t unused,
573 uintptr_t unused2, InvokeType unused3)
574{
575 int trampoline = ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
576 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
577}
578
579static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
580 const MethodReference& target_method,
581 uint32_t method_idx, uintptr_t unused,
582 uintptr_t unused2, InvokeType unused3)
583{
584 int trampoline = ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
585 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
586}
587
588static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
589 const MethodReference& target_method,
590 uint32_t method_idx, uintptr_t unused,
591 uintptr_t unused2, InvokeType unused3)
592{
593 int trampoline = ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
594 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
595}
596
597static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
598 CallInfo* info, int state,
599 const MethodReference& target_method,
600 uint32_t unused,
601 uintptr_t unused2, uintptr_t unused3,
602 InvokeType unused4)
603{
604 int trampoline = ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
605 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
606}
607
608int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state,
609 NextCallInsn next_call_insn,
610 const MethodReference& target_method,
611 uint32_t vtable_idx, uintptr_t direct_code,
612 uintptr_t direct_method, InvokeType type, bool skip_this)
613{
614 int last_arg_reg = TargetReg(kArg3);
615 int next_reg = TargetReg(kArg1);
616 int next_arg = 0;
617 if (skip_this) {
618 next_reg++;
619 next_arg++;
620 }
621 for (; (next_reg <= last_arg_reg) && (next_arg < info->num_arg_words); next_reg++) {
622 RegLocation rl_arg = info->args[next_arg++];
623 rl_arg = UpdateRawLoc(rl_arg);
624 if (rl_arg.wide && (next_reg <= TargetReg(kArg2))) {
625 LoadValueDirectWideFixed(rl_arg, next_reg, next_reg + 1);
626 next_reg++;
627 next_arg++;
628 } else {
629 if (rl_arg.wide) {
630 rl_arg.wide = false;
631 rl_arg.is_const = false;
632 }
633 LoadValueDirectFixed(rl_arg, next_reg);
634 }
635 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
636 direct_code, direct_method, type);
637 }
638 return call_state;
639}
640
641/*
642 * Load up to 5 arguments, the first three of which will be in
643 * kArg1 .. kArg3. On entry kArg0 contains the current method pointer,
644 * and as part of the load sequence, it must be replaced with
645 * the target method pointer. Note, this may also be called
646 * for "range" variants if the number of arguments is 5 or fewer.
647 */
648int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
649 int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
650 const MethodReference& target_method,
651 uint32_t vtable_idx, uintptr_t direct_code,
652 uintptr_t direct_method, InvokeType type, bool skip_this)
653{
654 RegLocation rl_arg;
655
656 /* If no arguments, just return */
657 if (info->num_arg_words == 0)
658 return call_state;
659
660 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
661 direct_code, direct_method, type);
662
663 DCHECK_LE(info->num_arg_words, 5);
664 if (info->num_arg_words > 3) {
665 int32_t next_use = 3;
666 //Detect special case of wide arg spanning arg3/arg4
667 RegLocation rl_use0 = info->args[0];
668 RegLocation rl_use1 = info->args[1];
669 RegLocation rl_use2 = info->args[2];
670 if (((!rl_use0.wide && !rl_use1.wide) || rl_use0.wide) &&
671 rl_use2.wide) {
672 int reg = -1;
673 // Wide spans, we need the 2nd half of uses[2].
674 rl_arg = UpdateLocWide(rl_use2);
675 if (rl_arg.location == kLocPhysReg) {
676 reg = rl_arg.high_reg;
677 } else {
678 // kArg2 & rArg3 can safely be used here
679 reg = TargetReg(kArg3);
680 LoadWordDisp(TargetReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg);
681 call_state = next_call_insn(cu_, info, call_state, target_method,
682 vtable_idx, direct_code, direct_method, type);
683 }
684 StoreBaseDisp(TargetReg(kSp), (next_use + 1) * 4, reg, kWord);
685 StoreBaseDisp(TargetReg(kSp), 16 /* (3+1)*4 */, reg, kWord);
686 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
687 direct_code, direct_method, type);
688 next_use++;
689 }
690 // Loop through the rest
691 while (next_use < info->num_arg_words) {
692 int low_reg;
693 int high_reg = -1;
694 rl_arg = info->args[next_use];
695 rl_arg = UpdateRawLoc(rl_arg);
696 if (rl_arg.location == kLocPhysReg) {
697 low_reg = rl_arg.low_reg;
698 high_reg = rl_arg.high_reg;
699 } else {
700 low_reg = TargetReg(kArg2);
701 if (rl_arg.wide) {
702 high_reg = TargetReg(kArg3);
703 LoadValueDirectWideFixed(rl_arg, low_reg, high_reg);
704 } else {
705 LoadValueDirectFixed(rl_arg, low_reg);
706 }
707 call_state = next_call_insn(cu_, info, call_state, target_method,
708 vtable_idx, direct_code, direct_method, type);
709 }
710 int outs_offset = (next_use + 1) * 4;
711 if (rl_arg.wide) {
712 StoreBaseDispWide(TargetReg(kSp), outs_offset, low_reg, high_reg);
713 next_use += 2;
714 } else {
715 StoreWordDisp(TargetReg(kSp), outs_offset, low_reg);
716 next_use++;
717 }
718 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
719 direct_code, direct_method, type);
720 }
721 }
722
723 call_state = LoadArgRegs(info, call_state, next_call_insn,
724 target_method, vtable_idx, direct_code, direct_method,
725 type, skip_this);
726
727 if (pcrLabel) {
728 *pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
729 }
730 return call_state;
731}
732
733/*
734 * May have 0+ arguments (also used for jumbo). Note that
735 * source virtual registers may be in physical registers, so may
736 * need to be flushed to home location before copying. This
737 * applies to arg3 and above (see below).
738 *
739 * Two general strategies:
740 * If < 20 arguments
741 * Pass args 3-18 using vldm/vstm block copy
742 * Pass arg0, arg1 & arg2 in kArg1-kArg3
743 * If 20+ arguments
744 * Pass args arg19+ using memcpy block copy
745 * Pass arg0, arg1 & arg2 in kArg1-kArg3
746 *
747 */
748int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
749 LIR** pcrLabel, NextCallInsn next_call_insn,
750 const MethodReference& target_method,
751 uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
752 InvokeType type, bool skip_this)
753{
754
755 // If we can treat it as non-range (Jumbo ops will use range form)
756 if (info->num_arg_words <= 5)
757 return GenDalvikArgsNoRange(info, call_state, pcrLabel,
758 next_call_insn, target_method, vtable_idx,
759 direct_code, direct_method, type, skip_this);
760 /*
761 * First load the non-register arguments. Both forms expect all
762 * of the source arguments to be in their home frame location, so
763 * scan the s_reg names and flush any that have been promoted to
764 * frame backing storage.
765 */
766 // Scan the rest of the args - if in phys_reg flush to memory
767 for (int next_arg = 0; next_arg < info->num_arg_words;) {
768 RegLocation loc = info->args[next_arg];
769 if (loc.wide) {
770 loc = UpdateLocWide(loc);
771 if ((next_arg >= 2) && (loc.location == kLocPhysReg)) {
772 StoreBaseDispWide(TargetReg(kSp), SRegOffset(loc.s_reg_low),
773 loc.low_reg, loc.high_reg);
774 }
775 next_arg += 2;
776 } else {
777 loc = UpdateLoc(loc);
778 if ((next_arg >= 3) && (loc.location == kLocPhysReg)) {
779 StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low),
780 loc.low_reg, kWord);
781 }
782 next_arg++;
783 }
784 }
785
786 int start_offset = SRegOffset(info->args[3].s_reg_low);
787 int outs_offset = 4 /* Method* */ + (3 * 4);
788 if (cu_->instruction_set != kThumb2) {
789 // Generate memcpy
790 OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
791 OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
792 CallRuntimeHelperRegRegImm(ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
793 TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
794 } else {
795 if (info->num_arg_words >= 20) {
796 // Generate memcpy
797 OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
798 OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
799 CallRuntimeHelperRegRegImm(ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
800 TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
801 } else {
802 // Use vldm/vstm pair using kArg3 as a temp
803 int regs_left = std::min(info->num_arg_words - 3, 16);
804 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
805 direct_code, direct_method, type);
806 OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), start_offset);
807 LIR* ld = OpVldm(TargetReg(kArg3), regs_left);
808 //TUNING: loosen barrier
809 ld->def_mask = ENCODE_ALL;
810 SetMemRefType(ld, true /* is_load */, kDalvikReg);
811 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
812 direct_code, direct_method, type);
813 OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), 4 /* Method* */ + (3 * 4));
814 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
815 direct_code, direct_method, type);
816 LIR* st = OpVstm(TargetReg(kArg3), regs_left);
817 SetMemRefType(st, false /* is_load */, kDalvikReg);
818 st->def_mask = ENCODE_ALL;
819 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
820 direct_code, direct_method, type);
821 }
822 }
823
824 call_state = LoadArgRegs(info, call_state, next_call_insn,
825 target_method, vtable_idx, direct_code, direct_method,
826 type, skip_this);
827
828 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
829 direct_code, direct_method, type);
830 if (pcrLabel) {
831 *pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
832 }
833 return call_state;
834}
835
836RegLocation Mir2Lir::InlineTarget(CallInfo* info)
837{
838 RegLocation res;
839 if (info->result.location == kLocInvalid) {
840 res = GetReturn(false);
841 } else {
842 res = info->result;
843 }
844 return res;
845}
846
847RegLocation Mir2Lir::InlineTargetWide(CallInfo* info)
848{
849 RegLocation res;
850 if (info->result.location == kLocInvalid) {
851 res = GetReturnWide(false);
852 } else {
853 res = info->result;
854 }
855 return res;
856}
857
858bool Mir2Lir::GenInlinedCharAt(CallInfo* info)
859{
860 if (cu_->instruction_set == kMips) {
861 // TODO - add Mips implementation
862 return false;
863 }
864 // Location of reference to data array
865 int value_offset = mirror::String::ValueOffset().Int32Value();
866 // Location of count
867 int count_offset = mirror::String::CountOffset().Int32Value();
868 // Starting offset within data array
869 int offset_offset = mirror::String::OffsetOffset().Int32Value();
870 // Start of char data with array_
871 int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
872
873 RegLocation rl_obj = info->args[0];
874 RegLocation rl_idx = info->args[1];
875 rl_obj = LoadValue(rl_obj, kCoreReg);
876 rl_idx = LoadValue(rl_idx, kCoreReg);
877 int reg_max;
878 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
879 bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
880 LIR* launch_pad = NULL;
881 int reg_off = INVALID_REG;
882 int reg_ptr = INVALID_REG;
883 if (cu_->instruction_set != kX86) {
884 reg_off = AllocTemp();
885 reg_ptr = AllocTemp();
886 if (range_check) {
887 reg_max = AllocTemp();
888 LoadWordDisp(rl_obj.low_reg, count_offset, reg_max);
889 }
890 LoadWordDisp(rl_obj.low_reg, offset_offset, reg_off);
891 LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr);
892 if (range_check) {
893 // Set up a launch pad to allow retry in case of bounds violation */
894 launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
895 intrinsic_launchpads_.Insert(launch_pad);
896 OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
897 FreeTemp(reg_max);
898 OpCondBranch(kCondCs, launch_pad);
899 }
900 } else {
901 if (range_check) {
902 reg_max = AllocTemp();
903 LoadWordDisp(rl_obj.low_reg, count_offset, reg_max);
904 // Set up a launch pad to allow retry in case of bounds violation */
905 launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
906 intrinsic_launchpads_.Insert(launch_pad);
907 OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
908 FreeTemp(reg_max);
909 OpCondBranch(kCondCc, launch_pad);
910 }
911 reg_off = AllocTemp();
912 reg_ptr = AllocTemp();
913 LoadWordDisp(rl_obj.low_reg, offset_offset, reg_off);
914 LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr);
915 }
916 OpRegImm(kOpAdd, reg_ptr, data_offset);
917 OpRegReg(kOpAdd, reg_off, rl_idx.low_reg);
918 FreeTemp(rl_obj.low_reg);
919 FreeTemp(rl_idx.low_reg);
920 RegLocation rl_dest = InlineTarget(info);
921 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
922 LoadBaseIndexed(reg_ptr, reg_off, rl_result.low_reg, 1, kUnsignedHalf);
923 FreeTemp(reg_off);
924 FreeTemp(reg_ptr);
925 StoreValue(rl_dest, rl_result);
926 if (range_check) {
927 launch_pad->operands[2] = 0; // no resumption
928 }
929 // Record that we've already inlined & null checked
930 info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
931 return true;
932}
933
934// Generates an inlined String.is_empty or String.length.
935bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty)
936{
937 if (cu_->instruction_set == kMips) {
938 // TODO - add Mips implementation
939 return false;
940 }
941 // dst = src.length();
942 RegLocation rl_obj = info->args[0];
943 rl_obj = LoadValue(rl_obj, kCoreReg);
944 RegLocation rl_dest = InlineTarget(info);
945 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
946 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
947 LoadWordDisp(rl_obj.low_reg, mirror::String::CountOffset().Int32Value(), rl_result.low_reg);
948 if (is_empty) {
949 // dst = (dst == 0);
950 if (cu_->instruction_set == kThumb2) {
951 int t_reg = AllocTemp();
952 OpRegReg(kOpNeg, t_reg, rl_result.low_reg);
953 OpRegRegReg(kOpAdc, rl_result.low_reg, rl_result.low_reg, t_reg);
954 } else {
955 DCHECK_EQ(cu_->instruction_set, kX86);
956 OpRegImm(kOpSub, rl_result.low_reg, 1);
957 OpRegImm(kOpLsr, rl_result.low_reg, 31);
958 }
959 }
960 StoreValue(rl_dest, rl_result);
961 return true;
962}
963
964bool Mir2Lir::GenInlinedAbsInt(CallInfo* info)
965{
966 if (cu_->instruction_set == kMips) {
967 // TODO - add Mips implementation
968 return false;
969 }
970 RegLocation rl_src = info->args[0];
971 rl_src = LoadValue(rl_src, kCoreReg);
972 RegLocation rl_dest = InlineTarget(info);
973 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
974 int sign_reg = AllocTemp();
975 // abs(x) = y<=x>>31, (x+y)^y.
976 OpRegRegImm(kOpAsr, sign_reg, rl_src.low_reg, 31);
977 OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg);
978 OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
979 StoreValue(rl_dest, rl_result);
980 return true;
981}
982
983bool Mir2Lir::GenInlinedAbsLong(CallInfo* info)
984{
985 if (cu_->instruction_set == kMips) {
986 // TODO - add Mips implementation
987 return false;
988 }
989 if (cu_->instruction_set == kThumb2) {
990 RegLocation rl_src = info->args[0];
991 rl_src = LoadValueWide(rl_src, kCoreReg);
992 RegLocation rl_dest = InlineTargetWide(info);
993 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
994 int sign_reg = AllocTemp();
995 // abs(x) = y<=x>>31, (x+y)^y.
996 OpRegRegImm(kOpAsr, sign_reg, rl_src.high_reg, 31);
997 OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg);
998 OpRegRegReg(kOpAdc, rl_result.high_reg, rl_src.high_reg, sign_reg);
999 OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
1000 OpRegReg(kOpXor, rl_result.high_reg, sign_reg);
1001 StoreValueWide(rl_dest, rl_result);
1002 return true;
1003 } else {
1004 DCHECK_EQ(cu_->instruction_set, kX86);
1005 // Reuse source registers to avoid running out of temps
1006 RegLocation rl_src = info->args[0];
1007 rl_src = LoadValueWide(rl_src, kCoreReg);
1008 RegLocation rl_dest = InlineTargetWide(info);
1009 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1010 OpRegCopyWide(rl_result.low_reg, rl_result.high_reg, rl_src.low_reg, rl_src.high_reg);
1011 FreeTemp(rl_src.low_reg);
1012 FreeTemp(rl_src.high_reg);
1013 int sign_reg = AllocTemp();
1014 // abs(x) = y<=x>>31, (x+y)^y.
1015 OpRegRegImm(kOpAsr, sign_reg, rl_result.high_reg, 31);
1016 OpRegReg(kOpAdd, rl_result.low_reg, sign_reg);
1017 OpRegReg(kOpAdc, rl_result.high_reg, sign_reg);
1018 OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
1019 OpRegReg(kOpXor, rl_result.high_reg, sign_reg);
1020 StoreValueWide(rl_dest, rl_result);
1021 return true;
1022 }
1023}
1024
1025bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info)
1026{
1027 if (cu_->instruction_set == kMips) {
1028 // TODO - add Mips implementation
1029 return false;
1030 }
1031 RegLocation rl_src = info->args[0];
1032 RegLocation rl_dest = InlineTarget(info);
1033 StoreValue(rl_dest, rl_src);
1034 return true;
1035}
1036
1037bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info)
1038{
1039 if (cu_->instruction_set == kMips) {
1040 // TODO - add Mips implementation
1041 return false;
1042 }
1043 RegLocation rl_src = info->args[0];
1044 RegLocation rl_dest = InlineTargetWide(info);
1045 StoreValueWide(rl_dest, rl_src);
1046 return true;
1047}
1048
1049/*
1050 * Fast string.index_of(I) & (II). Tests for simple case of char <= 0xffff,
1051 * otherwise bails to standard library code.
1052 */
1053bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based)
1054{
1055 if (cu_->instruction_set == kMips) {
1056 // TODO - add Mips implementation
1057 return false;
1058 }
1059 ClobberCalleeSave();
1060 LockCallTemps(); // Using fixed registers
1061 int reg_ptr = TargetReg(kArg0);
1062 int reg_char = TargetReg(kArg1);
1063 int reg_start = TargetReg(kArg2);
1064
1065 RegLocation rl_obj = info->args[0];
1066 RegLocation rl_char = info->args[1];
1067 RegLocation rl_start = info->args[2];
1068 LoadValueDirectFixed(rl_obj, reg_ptr);
1069 LoadValueDirectFixed(rl_char, reg_char);
1070 if (zero_based) {
1071 LoadConstant(reg_start, 0);
1072 } else {
1073 LoadValueDirectFixed(rl_start, reg_start);
1074 }
1075 int r_tgt = (cu_->instruction_set != kX86) ? LoadHelper(ENTRYPOINT_OFFSET(pIndexOf)) : 0;
1076 GenNullCheck(rl_obj.s_reg_low, reg_ptr, info->opt_flags);
1077 LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
1078 intrinsic_launchpads_.Insert(launch_pad);
1079 OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, launch_pad);
1080 // NOTE: not a safepoint
1081 if (cu_->instruction_set != kX86) {
1082 OpReg(kOpBlx, r_tgt);
1083 } else {
1084 OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pIndexOf));
1085 }
1086 LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
1087 launch_pad->operands[2] = reinterpret_cast<uintptr_t>(resume_tgt);
1088 // Record that we've already inlined & null checked
1089 info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
1090 RegLocation rl_return = GetReturn(false);
1091 RegLocation rl_dest = InlineTarget(info);
1092 StoreValue(rl_dest, rl_return);
1093 return true;
1094}
1095
1096/* Fast string.compareTo(Ljava/lang/string;)I. */
1097bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info)
1098{
1099 if (cu_->instruction_set == kMips) {
1100 // TODO - add Mips implementation
1101 return false;
1102 }
1103 ClobberCalleeSave();
1104 LockCallTemps(); // Using fixed registers
1105 int reg_this = TargetReg(kArg0);
1106 int reg_cmp = TargetReg(kArg1);
1107
1108 RegLocation rl_this = info->args[0];
1109 RegLocation rl_cmp = info->args[1];
1110 LoadValueDirectFixed(rl_this, reg_this);
1111 LoadValueDirectFixed(rl_cmp, reg_cmp);
1112 int r_tgt = (cu_->instruction_set != kX86) ?
1113 LoadHelper(ENTRYPOINT_OFFSET(pStringCompareTo)) : 0;
1114 GenNullCheck(rl_this.s_reg_low, reg_this, info->opt_flags);
1115 //TUNING: check if rl_cmp.s_reg_low is already null checked
1116 LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
1117 intrinsic_launchpads_.Insert(launch_pad);
1118 OpCmpImmBranch(kCondEq, reg_cmp, 0, launch_pad);
1119 // NOTE: not a safepoint
1120 if (cu_->instruction_set != kX86) {
1121 OpReg(kOpBlx, r_tgt);
1122 } else {
1123 OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pStringCompareTo));
1124 }
1125 launch_pad->operands[2] = 0; // No return possible
1126 // Record that we've already inlined & null checked
1127 info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
1128 RegLocation rl_return = GetReturn(false);
1129 RegLocation rl_dest = InlineTarget(info);
1130 StoreValue(rl_dest, rl_return);
1131 return true;
1132}
1133
1134bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
1135 RegLocation rl_dest = InlineTarget(info);
1136 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1137 int offset = Thread::PeerOffset().Int32Value();
1138 if (cu_->instruction_set == kThumb2 || cu_->instruction_set == kMips) {
1139 LoadWordDisp(TargetReg(kSelf), offset, rl_result.low_reg);
1140 } else {
1141 CHECK(cu_->instruction_set == kX86);
1142 ((X86Mir2Lir*)this)->OpRegThreadMem(kOpMov, rl_result.low_reg, offset);
1143 }
1144 StoreValue(rl_dest, rl_result);
1145 return true;
1146}
1147
1148bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
1149 bool is_long, bool is_volatile) {
1150 if (cu_->instruction_set == kMips) {
1151 // TODO - add Mips implementation
1152 return false;
1153 }
1154 // Unused - RegLocation rl_src_unsafe = info->args[0];
1155 RegLocation rl_src_obj = info->args[1]; // Object
1156 RegLocation rl_src_offset = info->args[2]; // long low
1157 rl_src_offset.wide = 0; // ignore high half in info->args[3]
1158 RegLocation rl_dest = InlineTarget(info); // result reg
1159 if (is_volatile) {
1160 GenMemBarrier(kLoadLoad);
1161 }
1162 RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
1163 RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1164 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1165 if (is_long) {
1166 OpRegReg(kOpAdd, rl_object.low_reg, rl_offset.low_reg);
1167 LoadBaseDispWide(rl_object.low_reg, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
1168 StoreValueWide(rl_dest, rl_result);
1169 } else {
1170 LoadBaseIndexed(rl_object.low_reg, rl_offset.low_reg, rl_result.low_reg, 0, kWord);
1171 StoreValue(rl_dest, rl_result);
1172 }
1173 return true;
1174}
1175
1176bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
1177 bool is_object, bool is_volatile, bool is_ordered) {
1178 if (cu_->instruction_set == kMips) {
1179 // TODO - add Mips implementation
1180 return false;
1181 }
1182 if (cu_->instruction_set == kX86 && is_object) {
1183 // TODO: fix X86, it exhausts registers for card marking.
1184 return false;
1185 }
1186 // Unused - RegLocation rl_src_unsafe = info->args[0];
1187 RegLocation rl_src_obj = info->args[1]; // Object
1188 RegLocation rl_src_offset = info->args[2]; // long low
1189 rl_src_offset.wide = 0; // ignore high half in info->args[3]
1190 RegLocation rl_src_value = info->args[4]; // value to store
1191 if (is_volatile || is_ordered) {
1192 GenMemBarrier(kStoreStore);
1193 }
1194 RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
1195 RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1196 RegLocation rl_value;
1197 if (is_long) {
1198 rl_value = LoadValueWide(rl_src_value, kCoreReg);
1199 OpRegReg(kOpAdd, rl_object.low_reg, rl_offset.low_reg);
1200 StoreBaseDispWide(rl_object.low_reg, 0, rl_value.low_reg, rl_value.high_reg);
1201 } else {
1202 rl_value = LoadValue(rl_src_value, kCoreReg);
1203 StoreBaseIndexed(rl_object.low_reg, rl_offset.low_reg, rl_value.low_reg, 0, kWord);
1204 }
1205 if (is_volatile) {
1206 GenMemBarrier(kStoreLoad);
1207 }
1208 if (is_object) {
1209 MarkGCCard(rl_value.low_reg, rl_object.low_reg);
1210 }
1211 return true;
1212}
1213
1214bool Mir2Lir::GenIntrinsic(CallInfo* info)
1215{
1216 if (info->opt_flags & MIR_INLINED) {
1217 return false;
1218 }
1219 /*
1220 * TODO: move these to a target-specific structured constant array
1221 * and use a generic match function. The list of intrinsics may be
1222 * slightly different depending on target.
1223 * TODO: Fold this into a matching function that runs during
1224 * basic block building. This should be part of the action for
1225 * small method inlining and recognition of the special object init
1226 * method. By doing this during basic block construction, we can also
1227 * take advantage of/generate new useful dataflow info.
1228 */
1229 StringPiece tgt_methods_declaring_class(
1230 cu_->dex_file->GetMethodDeclaringClassDescriptor(cu_->dex_file->GetMethodId(info->index)));
1231 if (tgt_methods_declaring_class.starts_with("Ljava/lang/Double;")) {
1232 std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
1233 if (tgt_method == "long java.lang.Double.doubleToRawLongBits(double)") {
1234 return GenInlinedDoubleCvt(info);
1235 }
1236 if (tgt_method == "double java.lang.Double.longBitsToDouble(long)") {
1237 return GenInlinedDoubleCvt(info);
1238 }
1239 } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/Float;")) {
1240 std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
1241 if (tgt_method == "int java.lang.Float.float_to_raw_int_bits(float)") {
1242 return GenInlinedFloatCvt(info);
1243 }
1244 if (tgt_method == "float java.lang.Float.intBitsToFloat(int)") {
1245 return GenInlinedFloatCvt(info);
1246 }
1247 } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/Math;") ||
1248 tgt_methods_declaring_class.starts_with("Ljava/lang/StrictMath;")) {
1249 std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
1250 if (tgt_method == "int java.lang.Math.abs(int)" ||
1251 tgt_method == "int java.lang.StrictMath.abs(int)") {
1252 return GenInlinedAbsInt(info);
1253 }
1254 if (tgt_method == "long java.lang.Math.abs(long)" ||
1255 tgt_method == "long java.lang.StrictMath.abs(long)") {
1256 return GenInlinedAbsLong(info);
1257 }
1258 if (tgt_method == "int java.lang.Math.max(int, int)" ||
1259 tgt_method == "int java.lang.StrictMath.max(int, int)") {
1260 return GenInlinedMinMaxInt(info, false /* is_min */);
1261 }
1262 if (tgt_method == "int java.lang.Math.min(int, int)" ||
1263 tgt_method == "int java.lang.StrictMath.min(int, int)") {
1264 return GenInlinedMinMaxInt(info, true /* is_min */);
1265 }
1266 if (tgt_method == "double java.lang.Math.sqrt(double)" ||
1267 tgt_method == "double java.lang.StrictMath.sqrt(double)") {
1268 return GenInlinedSqrt(info);
1269 }
1270 } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/String;")) {
1271 std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
1272 if (tgt_method == "char java.lang.String.charAt(int)") {
1273 return GenInlinedCharAt(info);
1274 }
1275 if (tgt_method == "int java.lang.String.compareTo(java.lang.String)") {
1276 return GenInlinedStringCompareTo(info);
1277 }
1278 if (tgt_method == "boolean java.lang.String.is_empty()") {
1279 return GenInlinedStringIsEmptyOrLength(info, true /* is_empty */);
1280 }
1281 if (tgt_method == "int java.lang.String.index_of(int, int)") {
1282 return GenInlinedIndexOf(info, false /* base 0 */);
1283 }
1284 if (tgt_method == "int java.lang.String.index_of(int)") {
1285 return GenInlinedIndexOf(info, true /* base 0 */);
1286 }
1287 if (tgt_method == "int java.lang.String.length()") {
1288 return GenInlinedStringIsEmptyOrLength(info, false /* is_empty */);
1289 }
1290 } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/Thread;")) {
1291 std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
1292 if (tgt_method == "java.lang.Thread java.lang.Thread.currentThread()") {
1293 return GenInlinedCurrentThread(info);
1294 }
1295 } else if (tgt_methods_declaring_class.starts_with("Lsun/misc/Unsafe;")) {
1296 std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
1297 if (tgt_method == "boolean sun.misc.Unsafe.compareAndSwapInt(java.lang.Object, long, int, int)") {
1298 return GenInlinedCas32(info, false);
1299 }
1300 if (tgt_method == "boolean sun.misc.Unsafe.compareAndSwapObject(java.lang.Object, long, java.lang.Object, java.lang.Object)") {
1301 return GenInlinedCas32(info, true);
1302 }
1303 if (tgt_method == "int sun.misc.Unsafe.getInt(java.lang.Object, long)") {
1304 return GenInlinedUnsafeGet(info, false /* is_long */, false /* is_volatile */);
1305 }
1306 if (tgt_method == "int sun.misc.Unsafe.getIntVolatile(java.lang.Object, long)") {
1307 return GenInlinedUnsafeGet(info, false /* is_long */, true /* is_volatile */);
1308 }
1309 if (tgt_method == "void sun.misc.Unsafe.putInt(java.lang.Object, long, int)") {
1310 return GenInlinedUnsafePut(info, false /* is_long */, false /* is_object */,
1311 false /* is_volatile */, false /* is_ordered */);
1312 }
1313 if (tgt_method == "void sun.misc.Unsafe.putIntVolatile(java.lang.Object, long, int)") {
1314 return GenInlinedUnsafePut(info, false /* is_long */, false /* is_object */,
1315 true /* is_volatile */, false /* is_ordered */);
1316 }
1317 if (tgt_method == "void sun.misc.Unsafe.putOrderedInt(java.lang.Object, long, int)") {
1318 return GenInlinedUnsafePut(info, false /* is_long */, false /* is_object */,
1319 false /* is_volatile */, true /* is_ordered */);
1320 }
1321 if (tgt_method == "long sun.misc.Unsafe.getLong(java.lang.Object, long)") {
1322 return GenInlinedUnsafeGet(info, true /* is_long */, false /* is_volatile */);
1323 }
1324 if (tgt_method == "long sun.misc.Unsafe.getLongVolatile(java.lang.Object, long)") {
1325 return GenInlinedUnsafeGet(info, true /* is_long */, true /* is_volatile */);
1326 }
1327 if (tgt_method == "void sun.misc.Unsafe.putLong(java.lang.Object, long, long)") {
1328 return GenInlinedUnsafePut(info, true /* is_long */, false /* is_object */,
1329 false /* is_volatile */, false /* is_ordered */);
1330 }
1331 if (tgt_method == "void sun.misc.Unsafe.putLongVolatile(java.lang.Object, long, long)") {
1332 return GenInlinedUnsafePut(info, true /* is_long */, false /* is_object */,
1333 true /* is_volatile */, false /* is_ordered */);
1334 }
1335 if (tgt_method == "void sun.misc.Unsafe.putOrderedLong(java.lang.Object, long, long)") {
1336 return GenInlinedUnsafePut(info, true /* is_long */, false /* is_object */,
1337 false /* is_volatile */, true /* is_ordered */);
1338 }
1339 if (tgt_method == "java.lang.Object sun.misc.Unsafe.getObject(java.lang.Object, long)") {
1340 return GenInlinedUnsafeGet(info, false /* is_long */, false /* is_volatile */);
1341 }
1342 if (tgt_method == "java.lang.Object sun.misc.Unsafe.getObjectVolatile(java.lang.Object, long)") {
1343 return GenInlinedUnsafeGet(info, false /* is_long */, true /* is_volatile */);
1344 }
1345 if (tgt_method == "void sun.misc.Unsafe.putObject(java.lang.Object, long, java.lang.Object)") {
1346 return GenInlinedUnsafePut(info, false /* is_long */, true /* is_object */,
1347 false /* is_volatile */, false /* is_ordered */);
1348 }
1349 if (tgt_method == "void sun.misc.Unsafe.putObjectVolatile(java.lang.Object, long, java.lang.Object)") {
1350 return GenInlinedUnsafePut(info, false /* is_long */, true /* is_object */,
1351 true /* is_volatile */, false /* is_ordered */);
1352 }
1353 if (tgt_method == "void sun.misc.Unsafe.putOrderedObject(java.lang.Object, long, java.lang.Object)") {
1354 return GenInlinedUnsafePut(info, false /* is_long */, true /* is_object */,
1355 false /* is_volatile */, true /* is_ordered */);
1356 }
1357 }
1358 return false;
1359}
1360
1361void Mir2Lir::GenInvoke(CallInfo* info)
1362{
1363 if (GenIntrinsic(info)) {
1364 return;
1365 }
1366 InvokeType original_type = info->type; // avoiding mutation by ComputeInvokeInfo
1367 int call_state = 0;
1368 LIR* null_ck;
1369 LIR** p_null_ck = NULL;
1370 NextCallInsn next_call_insn;
1371 FlushAllRegs(); /* Everything to home location */
1372 // Explicit register usage
1373 LockCallTemps();
1374
1375 DexCompilationUnit* cUnit = mir_graph_->GetCurrentDexCompilationUnit();
1376 MethodReference target_method(cUnit->GetDexFile(), info->index);
1377 int vtable_idx;
1378 uintptr_t direct_code;
1379 uintptr_t direct_method;
1380 bool skip_this;
1381 bool fast_path =
1382 cu_->compiler_driver->ComputeInvokeInfo(mir_graph_->GetCurrentDexCompilationUnit(),
1383 current_dalvik_offset_,
1384 info->type, target_method,
1385 vtable_idx,
1386 direct_code, direct_method,
1387 true) && !SLOW_INVOKE_PATH;
1388 if (info->type == kInterface) {
1389 if (fast_path) {
1390 p_null_ck = &null_ck;
1391 }
1392 next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck;
1393 skip_this = false;
1394 } else if (info->type == kDirect) {
1395 if (fast_path) {
1396 p_null_ck = &null_ck;
1397 }
1398 next_call_insn = fast_path ? NextSDCallInsn : NextDirectCallInsnSP;
1399 skip_this = false;
1400 } else if (info->type == kStatic) {
1401 next_call_insn = fast_path ? NextSDCallInsn : NextStaticCallInsnSP;
1402 skip_this = false;
1403 } else if (info->type == kSuper) {
1404 DCHECK(!fast_path); // Fast path is a direct call.
1405 next_call_insn = NextSuperCallInsnSP;
1406 skip_this = false;
1407 } else {
1408 DCHECK_EQ(info->type, kVirtual);
1409 next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP;
1410 skip_this = fast_path;
1411 }
1412 if (!info->is_range) {
1413 call_state = GenDalvikArgsNoRange(info, call_state, p_null_ck,
1414 next_call_insn, target_method,
1415 vtable_idx, direct_code, direct_method,
1416 original_type, skip_this);
1417 } else {
1418 call_state = GenDalvikArgsRange(info, call_state, p_null_ck,
1419 next_call_insn, target_method, vtable_idx,
1420 direct_code, direct_method, original_type,
1421 skip_this);
1422 }
1423 // Finish up any of the call sequence not interleaved in arg loading
1424 while (call_state >= 0) {
1425 call_state = next_call_insn(cu_, info, call_state, target_method,
1426 vtable_idx, direct_code, direct_method,
1427 original_type);
1428 }
1429 LIR* call_inst;
1430 if (cu_->instruction_set != kX86) {
1431 call_inst = OpReg(kOpBlx, TargetReg(kInvokeTgt));
1432 } else {
1433 if (fast_path && info->type != kInterface) {
1434 call_inst = OpMem(kOpBlx, TargetReg(kArg0),
1435 mirror::AbstractMethod::GetEntryPointFromCompiledCodeOffset().Int32Value());
1436 } else {
1437 int trampoline = 0;
1438 switch (info->type) {
1439 case kInterface:
1440 trampoline = fast_path ? ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline)
1441 : ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
1442 break;
1443 case kDirect:
1444 trampoline = ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
1445 break;
1446 case kStatic:
1447 trampoline = ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
1448 break;
1449 case kSuper:
1450 trampoline = ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
1451 break;
1452 case kVirtual:
1453 trampoline = ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
1454 break;
1455 default:
1456 LOG(FATAL) << "Unexpected invoke type";
1457 }
1458 call_inst = OpThreadMem(kOpBlx, trampoline);
1459 }
1460 }
1461 MarkSafepointPC(call_inst);
1462
1463 ClobberCalleeSave();
1464 if (info->result.location != kLocInvalid) {
1465 // We have a following MOVE_RESULT - do it now.
1466 if (info->result.wide) {
1467 RegLocation ret_loc = GetReturnWide(info->result.fp);
1468 StoreValueWide(info->result, ret_loc);
1469 } else {
1470 RegLocation ret_loc = GetReturn(info->result.fp);
1471 StoreValue(info->result, ret_loc);
1472 }
1473 }
1474}
1475
1476} // namespace art