blob: 865b9c5c4d7d2fd47f8b30d2a990d2ce6e5d86e3 [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "dex/compiler_ir.h"
18#include "dex/compiler_internals.h"
19#include "dex/quick/mir_to_lir-inl.h"
20#include "mirror/array.h"
21#include "oat/runtime/oat_support_entrypoints.h"
22#include "verifier/method_verifier.h"
23
24namespace art {
25
26/*
27 * This source files contains "gen" codegen routines that should
28 * be applicable to most targets. Only mid-level support utilities
29 * and "op" calls may be used here.
30 */
31
32/*
33 * Generate an kPseudoBarrier marker to indicate the boundary of special
34 * blocks.
35 */
36void Mir2Lir::GenBarrier()
37{
38 LIR* barrier = NewLIR0(kPseudoBarrier);
39 /* Mark all resources as being clobbered */
40 barrier->def_mask = -1;
41}
42
43// FIXME: need to do some work to split out targets with
44// condition codes and those without
45LIR* Mir2Lir::GenCheck(ConditionCode c_code, ThrowKind kind)
46{
47 DCHECK_NE(cu_->instruction_set, kMips);
48 LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_);
49 LIR* branch = OpCondBranch(c_code, tgt);
50 // Remember branch target - will process later
51 throw_launchpads_.Insert(tgt);
52 return branch;
53}
54
55LIR* Mir2Lir::GenImmedCheck(ConditionCode c_code, int reg, int imm_val, ThrowKind kind)
56{
57 LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg, imm_val);
58 LIR* branch;
59 if (c_code == kCondAl) {
60 branch = OpUnconditionalBranch(tgt);
61 } else {
62 branch = OpCmpImmBranch(c_code, reg, imm_val, tgt);
63 }
64 // Remember branch target - will process later
65 throw_launchpads_.Insert(tgt);
66 return branch;
67}
68
69/* Perform null-check on a register. */
70LIR* Mir2Lir::GenNullCheck(int s_reg, int m_reg, int opt_flags)
71{
72 if (!(cu_->disable_opt & (1 << kNullCheckElimination)) &&
73 opt_flags & MIR_IGNORE_NULL_CHECK) {
74 return NULL;
75 }
76 return GenImmedCheck(kCondEq, m_reg, 0, kThrowNullPointer);
77}
78
79/* Perform check on two registers */
80LIR* Mir2Lir::GenRegRegCheck(ConditionCode c_code, int reg1, int reg2,
81 ThrowKind kind)
82{
83 LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg1, reg2);
84 LIR* branch = OpCmpBranch(c_code, reg1, reg2, tgt);
85 // Remember branch target - will process later
86 throw_launchpads_.Insert(tgt);
87 return branch;
88}
89
90void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
91 RegLocation rl_src2, LIR* taken,
92 LIR* fall_through)
93{
94 ConditionCode cond;
95 switch (opcode) {
96 case Instruction::IF_EQ:
97 cond = kCondEq;
98 break;
99 case Instruction::IF_NE:
100 cond = kCondNe;
101 break;
102 case Instruction::IF_LT:
103 cond = kCondLt;
104 break;
105 case Instruction::IF_GE:
106 cond = kCondGe;
107 break;
108 case Instruction::IF_GT:
109 cond = kCondGt;
110 break;
111 case Instruction::IF_LE:
112 cond = kCondLe;
113 break;
114 default:
115 cond = static_cast<ConditionCode>(0);
116 LOG(FATAL) << "Unexpected opcode " << opcode;
117 }
118
119 // Normalize such that if either operand is constant, src2 will be constant
120 if (rl_src1.is_const) {
121 RegLocation rl_temp = rl_src1;
122 rl_src1 = rl_src2;
123 rl_src2 = rl_temp;
124 cond = FlipComparisonOrder(cond);
125 }
126
127 rl_src1 = LoadValue(rl_src1, kCoreReg);
128 // Is this really an immediate comparison?
129 if (rl_src2.is_const) {
130 // If it's already live in a register or not easily materialized, just keep going
131 RegLocation rl_temp = UpdateLoc(rl_src2);
132 if ((rl_temp.location == kLocDalvikFrame) &&
133 InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src2))) {
134 // OK - convert this to a compare immediate and branch
135 OpCmpImmBranch(cond, rl_src1.low_reg, mir_graph_->ConstantValue(rl_src2), taken);
136 OpUnconditionalBranch(fall_through);
137 return;
138 }
139 }
140 rl_src2 = LoadValue(rl_src2, kCoreReg);
141 OpCmpBranch(cond, rl_src1.low_reg, rl_src2.low_reg, taken);
142 OpUnconditionalBranch(fall_through);
143}
144
145void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken,
146 LIR* fall_through)
147{
148 ConditionCode cond;
149 rl_src = LoadValue(rl_src, kCoreReg);
150 switch (opcode) {
151 case Instruction::IF_EQZ:
152 cond = kCondEq;
153 break;
154 case Instruction::IF_NEZ:
155 cond = kCondNe;
156 break;
157 case Instruction::IF_LTZ:
158 cond = kCondLt;
159 break;
160 case Instruction::IF_GEZ:
161 cond = kCondGe;
162 break;
163 case Instruction::IF_GTZ:
164 cond = kCondGt;
165 break;
166 case Instruction::IF_LEZ:
167 cond = kCondLe;
168 break;
169 default:
170 cond = static_cast<ConditionCode>(0);
171 LOG(FATAL) << "Unexpected opcode " << opcode;
172 }
173 OpCmpImmBranch(cond, rl_src.low_reg, 0, taken);
174 OpUnconditionalBranch(fall_through);
175}
176
177void Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src)
178{
179 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
180 if (rl_src.location == kLocPhysReg) {
181 OpRegCopy(rl_result.low_reg, rl_src.low_reg);
182 } else {
183 LoadValueDirect(rl_src, rl_result.low_reg);
184 }
185 OpRegRegImm(kOpAsr, rl_result.high_reg, rl_result.low_reg, 31);
186 StoreValueWide(rl_dest, rl_result);
187}
188
189void Mir2Lir::GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
190 RegLocation rl_src)
191{
192 rl_src = LoadValue(rl_src, kCoreReg);
193 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
194 OpKind op = kOpInvalid;
195 switch (opcode) {
196 case Instruction::INT_TO_BYTE:
197 op = kOp2Byte;
198 break;
199 case Instruction::INT_TO_SHORT:
200 op = kOp2Short;
201 break;
202 case Instruction::INT_TO_CHAR:
203 op = kOp2Char;
204 break;
205 default:
206 LOG(ERROR) << "Bad int conversion type";
207 }
208 OpRegReg(op, rl_result.low_reg, rl_src.low_reg);
209 StoreValue(rl_dest, rl_result);
210}
211
212/*
213 * Let helper function take care of everything. Will call
214 * Array::AllocFromCode(type_idx, method, count);
215 * Note: AllocFromCode will handle checks for errNegativeArraySize.
216 */
217void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest,
218 RegLocation rl_src)
219{
220 FlushAllRegs(); /* Everything to home location */
221 int func_offset;
222 if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
223 type_idx)) {
224 func_offset = ENTRYPOINT_OFFSET(pAllocArrayFromCode);
225 } else {
226 func_offset= ENTRYPOINT_OFFSET(pAllocArrayFromCodeWithAccessCheck);
227 }
228 CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true);
229 RegLocation rl_result = GetReturn(false);
230 StoreValue(rl_dest, rl_result);
231}
232
233/*
234 * Similar to GenNewArray, but with post-allocation initialization.
235 * Verifier guarantees we're dealing with an array class. Current
236 * code throws runtime exception "bad Filled array req" for 'D' and 'J'.
237 * Current code also throws internal unimp if not 'L', '[' or 'I'.
238 */
239void Mir2Lir::GenFilledNewArray(CallInfo* info)
240{
241 int elems = info->num_arg_words;
242 int type_idx = info->index;
243 FlushAllRegs(); /* Everything to home location */
244 int func_offset;
245 if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
246 type_idx)) {
247 func_offset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCode);
248 } else {
249 func_offset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCodeWithAccessCheck);
250 }
251 CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true);
252 FreeTemp(TargetReg(kArg2));
253 FreeTemp(TargetReg(kArg1));
254 /*
255 * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the
256 * return region. Because AllocFromCode placed the new array
257 * in kRet0, we'll just lock it into place. When debugger support is
258 * added, it may be necessary to additionally copy all return
259 * values to a home location in thread-local storage
260 */
261 LockTemp(TargetReg(kRet0));
262
263 // TODO: use the correct component size, currently all supported types
264 // share array alignment with ints (see comment at head of function)
265 size_t component_size = sizeof(int32_t);
266
267 // Having a range of 0 is legal
268 if (info->is_range && (elems > 0)) {
269 /*
270 * Bit of ugliness here. We're going generate a mem copy loop
271 * on the register range, but it is possible that some regs
272 * in the range have been promoted. This is unlikely, but
273 * before generating the copy, we'll just force a flush
274 * of any regs in the source range that have been promoted to
275 * home location.
276 */
277 for (int i = 0; i < elems; i++) {
278 RegLocation loc = UpdateLoc(info->args[i]);
279 if (loc.location == kLocPhysReg) {
280 StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low),
281 loc.low_reg, kWord);
282 }
283 }
284 /*
285 * TUNING note: generated code here could be much improved, but
286 * this is an uncommon operation and isn't especially performance
287 * critical.
288 */
289 int r_src = AllocTemp();
290 int r_dst = AllocTemp();
291 int r_idx = AllocTemp();
292 int r_val = INVALID_REG;
293 switch(cu_->instruction_set) {
294 case kThumb2:
295 r_val = TargetReg(kLr);
296 break;
297 case kX86:
298 FreeTemp(TargetReg(kRet0));
299 r_val = AllocTemp();
300 break;
301 case kMips:
302 r_val = AllocTemp();
303 break;
304 default: LOG(FATAL) << "Unexpected instruction set: " << cu_->instruction_set;
305 }
306 // Set up source pointer
307 RegLocation rl_first = info->args[0];
308 OpRegRegImm(kOpAdd, r_src, TargetReg(kSp), SRegOffset(rl_first.s_reg_low));
309 // Set up the target pointer
310 OpRegRegImm(kOpAdd, r_dst, TargetReg(kRet0),
311 mirror::Array::DataOffset(component_size).Int32Value());
312 // Set up the loop counter (known to be > 0)
313 LoadConstant(r_idx, elems - 1);
314 // Generate the copy loop. Going backwards for convenience
315 LIR* target = NewLIR0(kPseudoTargetLabel);
316 // Copy next element
317 LoadBaseIndexed(r_src, r_idx, r_val, 2, kWord);
318 StoreBaseIndexed(r_dst, r_idx, r_val, 2, kWord);
319 FreeTemp(r_val);
320 OpDecAndBranch(kCondGe, r_idx, target);
321 if (cu_->instruction_set == kX86) {
322 // Restore the target pointer
323 OpRegRegImm(kOpAdd, TargetReg(kRet0), r_dst,
324 -mirror::Array::DataOffset(component_size).Int32Value());
325 }
326 } else if (!info->is_range) {
327 // TUNING: interleave
328 for (int i = 0; i < elems; i++) {
329 RegLocation rl_arg = LoadValue(info->args[i], kCoreReg);
330 StoreBaseDisp(TargetReg(kRet0),
331 mirror::Array::DataOffset(component_size).Int32Value() +
332 i * 4, rl_arg.low_reg, kWord);
333 // If the LoadValue caused a temp to be allocated, free it
334 if (IsTemp(rl_arg.low_reg)) {
335 FreeTemp(rl_arg.low_reg);
336 }
337 }
338 }
339 if (info->result.location != kLocInvalid) {
340 StoreValue(info->result, GetReturn(false /* not fp */));
341 }
342}
343
344void Mir2Lir::GenSput(uint32_t field_idx, RegLocation rl_src, bool is_long_or_double,
345 bool is_object)
346{
347 int field_offset;
348 int ssb_index;
349 bool is_volatile;
350 bool is_referrers_class;
351 bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo(
352 field_idx, mir_graph_->GetCurrentDexCompilationUnit(), field_offset, ssb_index,
353 is_referrers_class, is_volatile, true);
354 if (fast_path && !SLOW_FIELD_PATH) {
355 DCHECK_GE(field_offset, 0);
356 int rBase;
357 if (is_referrers_class) {
358 // Fast path, static storage base is this method's class
359 RegLocation rl_method = LoadCurrMethod();
360 rBase = AllocTemp();
361 LoadWordDisp(rl_method.low_reg,
362 mirror::AbstractMethod::DeclaringClassOffset().Int32Value(), rBase);
363 if (IsTemp(rl_method.low_reg)) {
364 FreeTemp(rl_method.low_reg);
365 }
366 } else {
367 // Medium path, static storage base in a different class which requires checks that the other
368 // class is initialized.
369 // TODO: remove initialized check now that we are initializing classes in the compiler driver.
370 DCHECK_GE(ssb_index, 0);
371 // May do runtime call so everything to home locations.
372 FlushAllRegs();
373 // Using fixed register to sync with possible call to runtime support.
374 int r_method = TargetReg(kArg1);
375 LockTemp(r_method);
376 LoadCurrMethodDirect(r_method);
377 rBase = TargetReg(kArg0);
378 LockTemp(rBase);
379 LoadWordDisp(r_method,
380 mirror::AbstractMethod::DexCacheInitializedStaticStorageOffset().Int32Value(),
381 rBase);
382 LoadWordDisp(rBase,
383 mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
384 sizeof(int32_t*) * ssb_index, rBase);
385 // rBase now points at appropriate static storage base (Class*)
386 // or NULL if not initialized. Check for NULL and call helper if NULL.
387 // TUNING: fast path should fall through
388 LIR* branch_over = OpCmpImmBranch(kCondNe, rBase, 0, NULL);
389 LoadConstant(TargetReg(kArg0), ssb_index);
390 CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
391 if (cu_->instruction_set == kMips) {
392 // For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy
393 OpRegCopy(rBase, TargetReg(kRet0));
394 }
395 LIR* skip_target = NewLIR0(kPseudoTargetLabel);
396 branch_over->target = skip_target;
397 FreeTemp(r_method);
398 }
399 // rBase now holds static storage base
400 if (is_long_or_double) {
401 rl_src = LoadValueWide(rl_src, kAnyReg);
402 } else {
403 rl_src = LoadValue(rl_src, kAnyReg);
404 }
405 if (is_volatile) {
406 GenMemBarrier(kStoreStore);
407 }
408 if (is_long_or_double) {
409 StoreBaseDispWide(rBase, field_offset, rl_src.low_reg,
410 rl_src.high_reg);
411 } else {
412 StoreWordDisp(rBase, field_offset, rl_src.low_reg);
413 }
414 if (is_volatile) {
415 GenMemBarrier(kStoreLoad);
416 }
417 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
418 MarkGCCard(rl_src.low_reg, rBase);
419 }
420 FreeTemp(rBase);
421 } else {
422 FlushAllRegs(); // Everything to home locations
423 int setter_offset = is_long_or_double ? ENTRYPOINT_OFFSET(pSet64Static) :
424 (is_object ? ENTRYPOINT_OFFSET(pSetObjStatic)
425 : ENTRYPOINT_OFFSET(pSet32Static));
426 CallRuntimeHelperImmRegLocation(setter_offset, field_idx, rl_src, true);
427 }
428}
429
430void Mir2Lir::GenSget(uint32_t field_idx, RegLocation rl_dest,
431 bool is_long_or_double, bool is_object)
432{
433 int field_offset;
434 int ssb_index;
435 bool is_volatile;
436 bool is_referrers_class;
437 bool fast_path = cu_->compiler_driver->ComputeStaticFieldInfo(
438 field_idx, mir_graph_->GetCurrentDexCompilationUnit(), field_offset, ssb_index,
439 is_referrers_class, is_volatile, false);
440 if (fast_path && !SLOW_FIELD_PATH) {
441 DCHECK_GE(field_offset, 0);
442 int rBase;
443 if (is_referrers_class) {
444 // Fast path, static storage base is this method's class
445 RegLocation rl_method = LoadCurrMethod();
446 rBase = AllocTemp();
447 LoadWordDisp(rl_method.low_reg,
448 mirror::AbstractMethod::DeclaringClassOffset().Int32Value(), rBase);
449 } else {
450 // Medium path, static storage base in a different class which requires checks that the other
451 // class is initialized
452 // TODO: remove initialized check now that we are initializing classes in the compiler driver.
453 DCHECK_GE(ssb_index, 0);
454 // May do runtime call so everything to home locations.
455 FlushAllRegs();
456 // Using fixed register to sync with possible call to runtime support.
457 int r_method = TargetReg(kArg1);
458 LockTemp(r_method);
459 LoadCurrMethodDirect(r_method);
460 rBase = TargetReg(kArg0);
461 LockTemp(rBase);
462 LoadWordDisp(r_method,
463 mirror::AbstractMethod::DexCacheInitializedStaticStorageOffset().Int32Value(),
464 rBase);
465 LoadWordDisp(rBase, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
466 sizeof(int32_t*) * ssb_index, rBase);
467 // rBase now points at appropriate static storage base (Class*)
468 // or NULL if not initialized. Check for NULL and call helper if NULL.
469 // TUNING: fast path should fall through
470 LIR* branch_over = OpCmpImmBranch(kCondNe, rBase, 0, NULL);
471 CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
472 if (cu_->instruction_set == kMips) {
473 // For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy
474 OpRegCopy(rBase, TargetReg(kRet0));
475 }
476 LIR* skip_target = NewLIR0(kPseudoTargetLabel);
477 branch_over->target = skip_target;
478 FreeTemp(r_method);
479 }
480 // rBase now holds static storage base
481 RegLocation rl_result = EvalLoc(rl_dest, kAnyReg, true);
482 if (is_volatile) {
483 GenMemBarrier(kLoadLoad);
484 }
485 if (is_long_or_double) {
486 LoadBaseDispWide(rBase, field_offset, rl_result.low_reg,
487 rl_result.high_reg, INVALID_SREG);
488 } else {
489 LoadWordDisp(rBase, field_offset, rl_result.low_reg);
490 }
491 FreeTemp(rBase);
492 if (is_long_or_double) {
493 StoreValueWide(rl_dest, rl_result);
494 } else {
495 StoreValue(rl_dest, rl_result);
496 }
497 } else {
498 FlushAllRegs(); // Everything to home locations
499 int getterOffset = is_long_or_double ? ENTRYPOINT_OFFSET(pGet64Static) :
500 (is_object ? ENTRYPOINT_OFFSET(pGetObjStatic)
501 : ENTRYPOINT_OFFSET(pGet32Static));
502 CallRuntimeHelperImm(getterOffset, field_idx, true);
503 if (is_long_or_double) {
504 RegLocation rl_result = GetReturnWide(rl_dest.fp);
505 StoreValueWide(rl_dest, rl_result);
506 } else {
507 RegLocation rl_result = GetReturn(rl_dest.fp);
508 StoreValue(rl_dest, rl_result);
509 }
510 }
511}
512
513void Mir2Lir::HandleSuspendLaunchPads()
514{
515 int num_elems = suspend_launchpads_.Size();
516 int helper_offset = ENTRYPOINT_OFFSET(pTestSuspendFromCode);
517 for (int i = 0; i < num_elems; i++) {
518 ResetRegPool();
519 ResetDefTracking();
520 LIR* lab = suspend_launchpads_.Get(i);
521 LIR* resume_lab = reinterpret_cast<LIR*>(lab->operands[0]);
522 current_dalvik_offset_ = lab->operands[1];
523 AppendLIR(lab);
524 int r_tgt = CallHelperSetup(helper_offset);
525 CallHelper(r_tgt, helper_offset, true /* MarkSafepointPC */);
526 OpUnconditionalBranch(resume_lab);
527 }
528}
529
530void Mir2Lir::HandleIntrinsicLaunchPads()
531{
532 int num_elems = intrinsic_launchpads_.Size();
533 for (int i = 0; i < num_elems; i++) {
534 ResetRegPool();
535 ResetDefTracking();
536 LIR* lab = intrinsic_launchpads_.Get(i);
537 CallInfo* info = reinterpret_cast<CallInfo*>(lab->operands[0]);
538 current_dalvik_offset_ = info->offset;
539 AppendLIR(lab);
540 // NOTE: GenInvoke handles MarkSafepointPC
541 GenInvoke(info);
542 LIR* resume_lab = reinterpret_cast<LIR*>(lab->operands[2]);
543 if (resume_lab != NULL) {
544 OpUnconditionalBranch(resume_lab);
545 }
546 }
547}
548
549void Mir2Lir::HandleThrowLaunchPads()
550{
551 int num_elems = throw_launchpads_.Size();
552 for (int i = 0; i < num_elems; i++) {
553 ResetRegPool();
554 ResetDefTracking();
555 LIR* lab = throw_launchpads_.Get(i);
556 current_dalvik_offset_ = lab->operands[1];
557 AppendLIR(lab);
558 int func_offset = 0;
559 int v1 = lab->operands[2];
560 int v2 = lab->operands[3];
561 bool target_x86 = (cu_->instruction_set == kX86);
562 switch (lab->operands[0]) {
563 case kThrowNullPointer:
564 func_offset = ENTRYPOINT_OFFSET(pThrowNullPointerFromCode);
565 break;
566 case kThrowConstantArrayBounds: // v1 is length reg (for Arm/Mips), v2 constant index
567 // v1 holds the constant array index. Mips/Arm uses v2 for length, x86 reloads.
568 if (target_x86) {
569 OpRegMem(kOpMov, TargetReg(kArg1), v1, mirror::Array::LengthOffset().Int32Value());
570 } else {
571 OpRegCopy(TargetReg(kArg1), v1);
572 }
573 // Make sure the following LoadConstant doesn't mess with kArg1.
574 LockTemp(TargetReg(kArg1));
575 LoadConstant(TargetReg(kArg0), v2);
576 func_offset = ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
577 break;
578 case kThrowArrayBounds:
579 // Move v1 (array index) to kArg0 and v2 (array length) to kArg1
580 if (v2 != TargetReg(kArg0)) {
581 OpRegCopy(TargetReg(kArg0), v1);
582 if (target_x86) {
583 // x86 leaves the array pointer in v2, so load the array length that the handler expects
584 OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
585 } else {
586 OpRegCopy(TargetReg(kArg1), v2);
587 }
588 } else {
589 if (v1 == TargetReg(kArg1)) {
590 // Swap v1 and v2, using kArg2 as a temp
591 OpRegCopy(TargetReg(kArg2), v1);
592 if (target_x86) {
593 // x86 leaves the array pointer in v2; load the array length that the handler expects
594 OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
595 } else {
596 OpRegCopy(TargetReg(kArg1), v2);
597 }
598 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
599 } else {
600 if (target_x86) {
601 // x86 leaves the array pointer in v2; load the array length that the handler expects
602 OpRegMem(kOpMov, TargetReg(kArg1), v2, mirror::Array::LengthOffset().Int32Value());
603 } else {
604 OpRegCopy(TargetReg(kArg1), v2);
605 }
606 OpRegCopy(TargetReg(kArg0), v1);
607 }
608 }
609 func_offset = ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
610 break;
611 case kThrowDivZero:
612 func_offset = ENTRYPOINT_OFFSET(pThrowDivZeroFromCode);
613 break;
614 case kThrowNoSuchMethod:
615 OpRegCopy(TargetReg(kArg0), v1);
616 func_offset =
617 ENTRYPOINT_OFFSET(pThrowNoSuchMethodFromCode);
618 break;
619 case kThrowStackOverflow:
620 func_offset = ENTRYPOINT_OFFSET(pThrowStackOverflowFromCode);
621 // Restore stack alignment
622 if (target_x86) {
623 OpRegImm(kOpAdd, TargetReg(kSp), frame_size_);
624 } else {
625 OpRegImm(kOpAdd, TargetReg(kSp), (num_core_spills_ + num_fp_spills_) * 4);
626 }
627 break;
628 default:
629 LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0];
630 }
631 ClobberCalleeSave();
632 int r_tgt = CallHelperSetup(func_offset);
633 CallHelper(r_tgt, func_offset, true /* MarkSafepointPC */);
634 }
635}
636
637void Mir2Lir::GenIGet(uint32_t field_idx, int opt_flags, OpSize size,
638 RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double,
639 bool is_object)
640{
641 int field_offset;
642 bool is_volatile;
643
644 bool fast_path = FastInstance(field_idx, field_offset, is_volatile, false);
645
646 if (fast_path && !SLOW_FIELD_PATH) {
647 RegLocation rl_result;
648 RegisterClass reg_class = oat_reg_class_by_size(size);
649 DCHECK_GE(field_offset, 0);
650 rl_obj = LoadValue(rl_obj, kCoreReg);
651 if (is_long_or_double) {
652 DCHECK(rl_dest.wide);
653 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
654 if (cu_->instruction_set == kX86) {
655 rl_result = EvalLoc(rl_dest, reg_class, true);
656 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
657 LoadBaseDispWide(rl_obj.low_reg, field_offset, rl_result.low_reg,
658 rl_result.high_reg, rl_obj.s_reg_low);
659 if (is_volatile) {
660 GenMemBarrier(kLoadLoad);
661 }
662 } else {
663 int reg_ptr = AllocTemp();
664 OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
665 rl_result = EvalLoc(rl_dest, reg_class, true);
666 LoadBaseDispWide(reg_ptr, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
667 if (is_volatile) {
668 GenMemBarrier(kLoadLoad);
669 }
670 FreeTemp(reg_ptr);
671 }
672 StoreValueWide(rl_dest, rl_result);
673 } else {
674 rl_result = EvalLoc(rl_dest, reg_class, true);
675 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
676 LoadBaseDisp(rl_obj.low_reg, field_offset, rl_result.low_reg,
677 kWord, rl_obj.s_reg_low);
678 if (is_volatile) {
679 GenMemBarrier(kLoadLoad);
680 }
681 StoreValue(rl_dest, rl_result);
682 }
683 } else {
684 int getterOffset = is_long_or_double ? ENTRYPOINT_OFFSET(pGet64Instance) :
685 (is_object ? ENTRYPOINT_OFFSET(pGetObjInstance)
686 : ENTRYPOINT_OFFSET(pGet32Instance));
687 CallRuntimeHelperImmRegLocation(getterOffset, field_idx, rl_obj, true);
688 if (is_long_or_double) {
689 RegLocation rl_result = GetReturnWide(rl_dest.fp);
690 StoreValueWide(rl_dest, rl_result);
691 } else {
692 RegLocation rl_result = GetReturn(rl_dest.fp);
693 StoreValue(rl_dest, rl_result);
694 }
695 }
696}
697
698void Mir2Lir::GenIPut(uint32_t field_idx, int opt_flags, OpSize size,
699 RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double,
700 bool is_object)
701{
702 int field_offset;
703 bool is_volatile;
704
705 bool fast_path = FastInstance(field_idx, field_offset, is_volatile,
706 true);
707 if (fast_path && !SLOW_FIELD_PATH) {
708 RegisterClass reg_class = oat_reg_class_by_size(size);
709 DCHECK_GE(field_offset, 0);
710 rl_obj = LoadValue(rl_obj, kCoreReg);
711 if (is_long_or_double) {
712 int reg_ptr;
713 rl_src = LoadValueWide(rl_src, kAnyReg);
714 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
715 reg_ptr = AllocTemp();
716 OpRegRegImm(kOpAdd, reg_ptr, rl_obj.low_reg, field_offset);
717 if (is_volatile) {
718 GenMemBarrier(kStoreStore);
719 }
720 StoreBaseDispWide(reg_ptr, 0, rl_src.low_reg, rl_src.high_reg);
721 if (is_volatile) {
722 GenMemBarrier(kLoadLoad);
723 }
724 FreeTemp(reg_ptr);
725 } else {
726 rl_src = LoadValue(rl_src, reg_class);
727 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, opt_flags);
728 if (is_volatile) {
729 GenMemBarrier(kStoreStore);
730 }
731 StoreBaseDisp(rl_obj.low_reg, field_offset, rl_src.low_reg, kWord);
732 if (is_volatile) {
733 GenMemBarrier(kLoadLoad);
734 }
735 if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
736 MarkGCCard(rl_src.low_reg, rl_obj.low_reg);
737 }
738 }
739 } else {
740 int setter_offset = is_long_or_double ? ENTRYPOINT_OFFSET(pSet64Instance) :
741 (is_object ? ENTRYPOINT_OFFSET(pSetObjInstance)
742 : ENTRYPOINT_OFFSET(pSet32Instance));
743 CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_idx, rl_obj, rl_src, true);
744 }
745}
746
747void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest)
748{
749 RegLocation rl_method = LoadCurrMethod();
750 int res_reg = AllocTemp();
751 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
752 if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
753 *cu_->dex_file,
754 type_idx)) {
755 // Call out to helper which resolves type and verifies access.
756 // Resolved type returned in kRet0.
757 CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
758 type_idx, rl_method.low_reg, true);
759 RegLocation rl_result = GetReturn(false);
760 StoreValue(rl_dest, rl_result);
761 } else {
762 // We're don't need access checks, load type from dex cache
763 int32_t dex_cache_offset =
764 mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value();
765 LoadWordDisp(rl_method.low_reg, dex_cache_offset, res_reg);
766 int32_t offset_of_type =
767 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
768 * type_idx);
769 LoadWordDisp(res_reg, offset_of_type, rl_result.low_reg);
770 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file,
771 type_idx) || SLOW_TYPE_PATH) {
772 // Slow path, at runtime test if type is null and if so initialize
773 FlushAllRegs();
774 LIR* branch1 = OpCmpImmBranch(kCondEq, rl_result.low_reg, 0, NULL);
775 // Resolved, store and hop over following code
776 StoreValue(rl_dest, rl_result);
777 /*
778 * Because we have stores of the target value on two paths,
779 * clobber temp tracking for the destination using the ssa name
780 */
781 ClobberSReg(rl_dest.s_reg_low);
782 LIR* branch2 = OpUnconditionalBranch(0);
783 // TUNING: move slow path to end & remove unconditional branch
784 LIR* target1 = NewLIR0(kPseudoTargetLabel);
785 // Call out to helper, which will return resolved type in kArg0
786 CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx,
787 rl_method.low_reg, true);
788 RegLocation rl_result = GetReturn(false);
789 StoreValue(rl_dest, rl_result);
790 /*
791 * Because we have stores of the target value on two paths,
792 * clobber temp tracking for the destination using the ssa name
793 */
794 ClobberSReg(rl_dest.s_reg_low);
795 // Rejoin code paths
796 LIR* target2 = NewLIR0(kPseudoTargetLabel);
797 branch1->target = target1;
798 branch2->target = target2;
799 } else {
800 // Fast path, we're done - just store result
801 StoreValue(rl_dest, rl_result);
802 }
803 }
804}
805
806void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest)
807{
808 /* NOTE: Most strings should be available at compile time */
809 int32_t offset_of_string = mirror::Array::DataOffset(sizeof(mirror::String*)).Int32Value() +
810 (sizeof(mirror::String*) * string_idx);
811 if (!cu_->compiler_driver->CanAssumeStringIsPresentInDexCache(
812 *cu_->dex_file, string_idx) || SLOW_STRING_PATH) {
813 // slow path, resolve string if not in dex cache
814 FlushAllRegs();
815 LockCallTemps(); // Using explicit registers
816 LoadCurrMethodDirect(TargetReg(kArg2));
817 LoadWordDisp(TargetReg(kArg2),
818 mirror::AbstractMethod::DexCacheStringsOffset().Int32Value(), TargetReg(kArg0));
819 // Might call out to helper, which will return resolved string in kRet0
820 int r_tgt = CallHelperSetup(ENTRYPOINT_OFFSET(pResolveStringFromCode));
821 LoadWordDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0));
822 LoadConstant(TargetReg(kArg1), string_idx);
823 if (cu_->instruction_set == kThumb2) {
824 OpRegImm(kOpCmp, TargetReg(kRet0), 0); // Is resolved?
825 GenBarrier();
826 // For testing, always force through helper
827 if (!EXERCISE_SLOWEST_STRING_PATH) {
828 OpIT(kCondEq, "T");
829 }
830 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .eq
831 LIR* call_inst = OpReg(kOpBlx, r_tgt); // .eq, helper(Method*, string_idx)
832 MarkSafepointPC(call_inst);
833 FreeTemp(r_tgt);
834 } else if (cu_->instruction_set == kMips) {
835 LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kRet0), 0, NULL);
836 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .eq
837 LIR* call_inst = OpReg(kOpBlx, r_tgt);
838 MarkSafepointPC(call_inst);
839 FreeTemp(r_tgt);
840 LIR* target = NewLIR0(kPseudoTargetLabel);
841 branch->target = target;
842 } else {
843 DCHECK_EQ(cu_->instruction_set, kX86);
844 CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pResolveStringFromCode), TargetReg(kArg2), TargetReg(kArg1), true);
845 }
846 GenBarrier();
847 StoreValue(rl_dest, GetReturn(false));
848 } else {
849 RegLocation rl_method = LoadCurrMethod();
850 int res_reg = AllocTemp();
851 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
852 LoadWordDisp(rl_method.low_reg,
853 mirror::AbstractMethod::DexCacheStringsOffset().Int32Value(), res_reg);
854 LoadWordDisp(res_reg, offset_of_string, rl_result.low_reg);
855 StoreValue(rl_dest, rl_result);
856 }
857}
858
859/*
860 * Let helper function take care of everything. Will
861 * call Class::NewInstanceFromCode(type_idx, method);
862 */
863void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest)
864{
865 FlushAllRegs(); /* Everything to home location */
866 // alloc will always check for resolution, do we also need to verify
867 // access because the verifier was unable to?
868 int func_offset;
869 if (cu_->compiler_driver->CanAccessInstantiableTypeWithoutChecks(
870 cu_->method_idx, *cu_->dex_file, type_idx)) {
871 func_offset = ENTRYPOINT_OFFSET(pAllocObjectFromCode);
872 } else {
873 func_offset = ENTRYPOINT_OFFSET(pAllocObjectFromCodeWithAccessCheck);
874 }
875 CallRuntimeHelperImmMethod(func_offset, type_idx, true);
876 RegLocation rl_result = GetReturn(false);
877 StoreValue(rl_dest, rl_result);
878}
879
880void Mir2Lir::GenThrow(RegLocation rl_src)
881{
882 FlushAllRegs();
883 CallRuntimeHelperRegLocation(ENTRYPOINT_OFFSET(pDeliverException), rl_src, true);
884}
885
886// For final classes there are no sub-classes to check and so we can answer the instance-of
887// question with simple comparisons.
888void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest,
889 RegLocation rl_src) {
890 RegLocation object = LoadValue(rl_src, kCoreReg);
891 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
892 int result_reg = rl_result.low_reg;
893 if (result_reg == object.low_reg) {
894 result_reg = AllocTypedTemp(false, kCoreReg);
895 }
896 LoadConstant(result_reg, 0); // assume false
897 LIR* null_branchover = OpCmpImmBranch(kCondEq, object.low_reg, 0, NULL);
898
899 int check_class = AllocTypedTemp(false, kCoreReg);
900 int object_class = AllocTypedTemp(false, kCoreReg);
901
902 LoadCurrMethodDirect(check_class);
903 if (use_declaring_class) {
904 LoadWordDisp(check_class, mirror::AbstractMethod::DeclaringClassOffset().Int32Value(),
905 check_class);
906 LoadWordDisp(object.low_reg, mirror::Object::ClassOffset().Int32Value(), object_class);
907 } else {
908 LoadWordDisp(check_class, mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(),
909 check_class);
910 LoadWordDisp(object.low_reg, mirror::Object::ClassOffset().Int32Value(), object_class);
911 int32_t offset_of_type =
912 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
913 (sizeof(mirror::Class*) * type_idx);
914 LoadWordDisp(check_class, offset_of_type, check_class);
915 }
916
917 LIR* ne_branchover = NULL;
918 if (cu_->instruction_set == kThumb2) {
919 OpRegReg(kOpCmp, check_class, object_class); // Same?
920 OpIT(kCondEq, ""); // if-convert the test
921 LoadConstant(result_reg, 1); // .eq case - load true
922 } else {
923 ne_branchover = OpCmpBranch(kCondNe, check_class, object_class, NULL);
924 LoadConstant(result_reg, 1); // eq case - load true
925 }
926 LIR* target = NewLIR0(kPseudoTargetLabel);
927 null_branchover->target = target;
928 if (ne_branchover != NULL) {
929 ne_branchover->target = target;
930 }
931 FreeTemp(object_class);
932 FreeTemp(check_class);
933 if (IsTemp(result_reg)) {
934 OpRegCopy(rl_result.low_reg, result_reg);
935 FreeTemp(result_reg);
936 }
937 StoreValue(rl_dest, rl_result);
938}
939
940void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
941 bool type_known_abstract, bool use_declaring_class,
942 bool can_assume_type_is_in_dex_cache,
943 uint32_t type_idx, RegLocation rl_dest,
944 RegLocation rl_src) {
945 FlushAllRegs();
946 // May generate a call - use explicit registers
947 LockCallTemps();
948 LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method*
949 int class_reg = TargetReg(kArg2); // kArg2 will hold the Class*
950 if (needs_access_check) {
951 // Check we have access to type_idx and if not throw IllegalAccessError,
952 // returns Class* in kArg0
953 CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
954 type_idx, true);
955 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path
956 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
957 } else if (use_declaring_class) {
958 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
959 LoadWordDisp(TargetReg(kArg1),
960 mirror::AbstractMethod::DeclaringClassOffset().Int32Value(), class_reg);
961 } else {
962 // Load dex cache entry into class_reg (kArg2)
963 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
964 LoadWordDisp(TargetReg(kArg1),
965 mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg);
966 int32_t offset_of_type =
967 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + (sizeof(mirror::Class*)
968 * type_idx);
969 LoadWordDisp(class_reg, offset_of_type, class_reg);
970 if (!can_assume_type_is_in_dex_cache) {
971 // Need to test presence of type in dex cache at runtime
972 LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
973 // Not resolved
974 // Call out to helper, which will return resolved type in kRet0
975 CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, true);
976 OpRegCopy(TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path
977 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); /* reload Ref */
978 // Rejoin code paths
979 LIR* hop_target = NewLIR0(kPseudoTargetLabel);
980 hop_branch->target = hop_target;
981 }
982 }
983 /* kArg0 is ref, kArg2 is class. If ref==null, use directly as bool result */
984 RegLocation rl_result = GetReturn(false);
985 if (cu_->instruction_set == kMips) {
986 // On MIPS rArg0 != rl_result, place false in result if branch is taken.
987 LoadConstant(rl_result.low_reg, 0);
988 }
989 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
990
991 /* load object->klass_ */
992 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
993 LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
994 /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */
995 LIR* branchover = NULL;
996 if (type_known_final) {
997 // rl_result == ref == null == 0.
998 if (cu_->instruction_set == kThumb2) {
999 OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same?
1000 OpIT(kCondEq, "E"); // if-convert the test
1001 LoadConstant(rl_result.low_reg, 1); // .eq case - load true
1002 LoadConstant(rl_result.low_reg, 0); // .ne case - load false
1003 } else {
1004 LoadConstant(rl_result.low_reg, 0); // ne case - load false
1005 branchover = OpCmpBranch(kCondNe, TargetReg(kArg1), TargetReg(kArg2), NULL);
1006 LoadConstant(rl_result.low_reg, 1); // eq case - load true
1007 }
1008 } else {
1009 if (cu_->instruction_set == kThumb2) {
1010 int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
1011 if (!type_known_abstract) {
1012 /* Uses conditional nullification */
1013 OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same?
1014 OpIT(kCondEq, "EE"); // if-convert the test
1015 LoadConstant(TargetReg(kArg0), 1); // .eq case - load true
1016 }
1017 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class
1018 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class)
1019 FreeTemp(r_tgt);
1020 } else {
1021 if (!type_known_abstract) {
1022 /* Uses branchovers */
1023 LoadConstant(rl_result.low_reg, 1); // assume true
1024 branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
1025 }
1026 if (cu_->instruction_set != kX86) {
1027 int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
1028 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class
1029 OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class)
1030 FreeTemp(r_tgt);
1031 } else {
1032 OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
1033 OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
1034 }
1035 }
1036 }
1037 // TODO: only clobber when type isn't final?
1038 ClobberCalleeSave();
1039 /* branch targets here */
1040 LIR* target = NewLIR0(kPseudoTargetLabel);
1041 StoreValue(rl_dest, rl_result);
1042 branch1->target = target;
1043 if (branchover != NULL) {
1044 branchover->target = target;
1045 }
1046}
1047
1048void Mir2Lir::GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) {
1049 bool type_known_final, type_known_abstract, use_declaring_class;
1050 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
1051 *cu_->dex_file,
1052 type_idx,
1053 &type_known_final,
1054 &type_known_abstract,
1055 &use_declaring_class);
1056 bool can_assume_type_is_in_dex_cache = !needs_access_check &&
1057 cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx);
1058
1059 if ((use_declaring_class || can_assume_type_is_in_dex_cache) && type_known_final) {
1060 GenInstanceofFinal(use_declaring_class, type_idx, rl_dest, rl_src);
1061 } else {
1062 GenInstanceofCallingHelper(needs_access_check, type_known_final, type_known_abstract,
1063 use_declaring_class, can_assume_type_is_in_dex_cache,
1064 type_idx, rl_dest, rl_src);
1065 }
1066}
1067
1068void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src)
1069{
1070 bool type_known_final, type_known_abstract, use_declaring_class;
1071 bool needs_access_check = !cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx,
1072 *cu_->dex_file,
1073 type_idx,
1074 &type_known_final,
1075 &type_known_abstract,
1076 &use_declaring_class);
1077 // Note: currently type_known_final is unused, as optimizing will only improve the performance
1078 // of the exception throw path.
1079 DexCompilationUnit* cu = mir_graph_->GetCurrentDexCompilationUnit();
1080 const MethodReference mr(cu->GetDexFile(), cu->GetDexMethodIndex());
1081 if (!needs_access_check && cu_->compiler_driver->IsSafeCast(mr, insn_idx)) {
1082 // Verifier type analysis proved this check cast would never cause an exception.
1083 return;
1084 }
1085 FlushAllRegs();
1086 // May generate a call - use explicit registers
1087 LockCallTemps();
1088 LoadCurrMethodDirect(TargetReg(kArg1)); // kArg1 <= current Method*
1089 int class_reg = TargetReg(kArg2); // kArg2 will hold the Class*
1090 if (needs_access_check) {
1091 // Check we have access to type_idx and if not throw IllegalAccessError,
1092 // returns Class* in kRet0
1093 // InitializeTypeAndVerifyAccess(idx, method)
1094 CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
1095 type_idx, TargetReg(kArg1), true);
1096 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path
1097 } else if (use_declaring_class) {
1098 LoadWordDisp(TargetReg(kArg1),
1099 mirror::AbstractMethod::DeclaringClassOffset().Int32Value(), class_reg);
1100 } else {
1101 // Load dex cache entry into class_reg (kArg2)
1102 LoadWordDisp(TargetReg(kArg1),
1103 mirror::AbstractMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg);
1104 int32_t offset_of_type =
1105 mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() +
1106 (sizeof(mirror::Class*) * type_idx);
1107 LoadWordDisp(class_reg, offset_of_type, class_reg);
1108 if (!cu_->compiler_driver->CanAssumeTypeIsPresentInDexCache(*cu_->dex_file, type_idx)) {
1109 // Need to test presence of type in dex cache at runtime
1110 LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
1111 // Not resolved
1112 // Call out to helper, which will return resolved type in kArg0
1113 // InitializeTypeFromCode(idx, method)
1114 CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, TargetReg(kArg1),
1115 true);
1116 OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path
1117 // Rejoin code paths
1118 LIR* hop_target = NewLIR0(kPseudoTargetLabel);
1119 hop_branch->target = hop_target;
1120 }
1121 }
1122 // At this point, class_reg (kArg2) has class
1123 LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
1124 /* Null is OK - continue */
1125 LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0), 0, NULL);
1126 /* load object->klass_ */
1127 DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0);
1128 LoadWordDisp(TargetReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetReg(kArg1));
1129 /* kArg1 now contains object->klass_ */
1130 LIR* branch2 = NULL;
1131 if (!type_known_abstract) {
1132 branch2 = OpCmpBranch(kCondEq, TargetReg(kArg1), class_reg, NULL);
1133 }
1134 CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCheckCastFromCode), TargetReg(kArg1), TargetReg(kArg2),
1135 true);
1136 /* branch target here */
1137 LIR* target = NewLIR0(kPseudoTargetLabel);
1138 branch1->target = target;
1139 if (branch2 != NULL) {
1140 branch2->target = target;
1141 }
1142}
1143
1144void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest,
1145 RegLocation rl_src1, RegLocation rl_src2)
1146{
1147 RegLocation rl_result;
1148 if (cu_->instruction_set == kThumb2) {
1149 /*
1150 * NOTE: This is the one place in the code in which we might have
1151 * as many as six live temporary registers. There are 5 in the normal
1152 * set for Arm. Until we have spill capabilities, temporarily add
1153 * lr to the temp set. It is safe to do this locally, but note that
1154 * lr is used explicitly elsewhere in the code generator and cannot
1155 * normally be used as a general temp register.
1156 */
1157 MarkTemp(TargetReg(kLr)); // Add lr to the temp pool
1158 FreeTemp(TargetReg(kLr)); // and make it available
1159 }
1160 rl_src1 = LoadValueWide(rl_src1, kCoreReg);
1161 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
1162 rl_result = EvalLoc(rl_dest, kCoreReg, true);
1163 // The longs may overlap - use intermediate temp if so
1164 if ((rl_result.low_reg == rl_src1.high_reg) || (rl_result.low_reg == rl_src2.high_reg)){
1165 int t_reg = AllocTemp();
1166 OpRegRegReg(first_op, t_reg, rl_src1.low_reg, rl_src2.low_reg);
1167 OpRegRegReg(second_op, rl_result.high_reg, rl_src1.high_reg, rl_src2.high_reg);
1168 OpRegCopy(rl_result.low_reg, t_reg);
1169 FreeTemp(t_reg);
1170 } else {
1171 OpRegRegReg(first_op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
1172 OpRegRegReg(second_op, rl_result.high_reg, rl_src1.high_reg,
1173 rl_src2.high_reg);
1174 }
1175 /*
1176 * NOTE: If rl_dest refers to a frame variable in a large frame, the
1177 * following StoreValueWide might need to allocate a temp register.
1178 * To further work around the lack of a spill capability, explicitly
1179 * free any temps from rl_src1 & rl_src2 that aren't still live in rl_result.
1180 * Remove when spill is functional.
1181 */
1182 FreeRegLocTemps(rl_result, rl_src1);
1183 FreeRegLocTemps(rl_result, rl_src2);
1184 StoreValueWide(rl_dest, rl_result);
1185 if (cu_->instruction_set == kThumb2) {
1186 Clobber(TargetReg(kLr));
1187 UnmarkTemp(TargetReg(kLr)); // Remove lr from the temp pool
1188 }
1189}
1190
1191
1192void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
1193 RegLocation rl_src1, RegLocation rl_shift)
1194{
1195 int func_offset = -1; // Make gcc happy
1196
1197 switch (opcode) {
1198 case Instruction::SHL_LONG:
1199 case Instruction::SHL_LONG_2ADDR:
1200 func_offset = ENTRYPOINT_OFFSET(pShlLong);
1201 break;
1202 case Instruction::SHR_LONG:
1203 case Instruction::SHR_LONG_2ADDR:
1204 func_offset = ENTRYPOINT_OFFSET(pShrLong);
1205 break;
1206 case Instruction::USHR_LONG:
1207 case Instruction::USHR_LONG_2ADDR:
1208 func_offset = ENTRYPOINT_OFFSET(pUshrLong);
1209 break;
1210 default:
1211 LOG(FATAL) << "Unexpected case";
1212 }
1213 FlushAllRegs(); /* Send everything to home location */
1214 CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_shift, false);
1215 RegLocation rl_result = GetReturnWide(false);
1216 StoreValueWide(rl_dest, rl_result);
1217}
1218
1219
1220void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
1221 RegLocation rl_src1, RegLocation rl_src2)
1222{
1223 OpKind op = kOpBkpt;
1224 bool is_div_rem = false;
1225 bool check_zero = false;
1226 bool unary = false;
1227 RegLocation rl_result;
1228 bool shift_op = false;
1229 switch (opcode) {
1230 case Instruction::NEG_INT:
1231 op = kOpNeg;
1232 unary = true;
1233 break;
1234 case Instruction::NOT_INT:
1235 op = kOpMvn;
1236 unary = true;
1237 break;
1238 case Instruction::ADD_INT:
1239 case Instruction::ADD_INT_2ADDR:
1240 op = kOpAdd;
1241 break;
1242 case Instruction::SUB_INT:
1243 case Instruction::SUB_INT_2ADDR:
1244 op = kOpSub;
1245 break;
1246 case Instruction::MUL_INT:
1247 case Instruction::MUL_INT_2ADDR:
1248 op = kOpMul;
1249 break;
1250 case Instruction::DIV_INT:
1251 case Instruction::DIV_INT_2ADDR:
1252 check_zero = true;
1253 op = kOpDiv;
1254 is_div_rem = true;
1255 break;
1256 /* NOTE: returns in kArg1 */
1257 case Instruction::REM_INT:
1258 case Instruction::REM_INT_2ADDR:
1259 check_zero = true;
1260 op = kOpRem;
1261 is_div_rem = true;
1262 break;
1263 case Instruction::AND_INT:
1264 case Instruction::AND_INT_2ADDR:
1265 op = kOpAnd;
1266 break;
1267 case Instruction::OR_INT:
1268 case Instruction::OR_INT_2ADDR:
1269 op = kOpOr;
1270 break;
1271 case Instruction::XOR_INT:
1272 case Instruction::XOR_INT_2ADDR:
1273 op = kOpXor;
1274 break;
1275 case Instruction::SHL_INT:
1276 case Instruction::SHL_INT_2ADDR:
1277 shift_op = true;
1278 op = kOpLsl;
1279 break;
1280 case Instruction::SHR_INT:
1281 case Instruction::SHR_INT_2ADDR:
1282 shift_op = true;
1283 op = kOpAsr;
1284 break;
1285 case Instruction::USHR_INT:
1286 case Instruction::USHR_INT_2ADDR:
1287 shift_op = true;
1288 op = kOpLsr;
1289 break;
1290 default:
1291 LOG(FATAL) << "Invalid word arith op: " << opcode;
1292 }
1293 if (!is_div_rem) {
1294 if (unary) {
1295 rl_src1 = LoadValue(rl_src1, kCoreReg);
1296 rl_result = EvalLoc(rl_dest, kCoreReg, true);
1297 OpRegReg(op, rl_result.low_reg, rl_src1.low_reg);
1298 } else {
1299 if (shift_op) {
1300 int t_reg = INVALID_REG;
1301 if (cu_->instruction_set == kX86) {
1302 // X86 doesn't require masking and must use ECX
1303 t_reg = TargetReg(kCount); // rCX
1304 LoadValueDirectFixed(rl_src2, t_reg);
1305 } else {
1306 rl_src2 = LoadValue(rl_src2, kCoreReg);
1307 t_reg = AllocTemp();
1308 OpRegRegImm(kOpAnd, t_reg, rl_src2.low_reg, 31);
1309 }
1310 rl_src1 = LoadValue(rl_src1, kCoreReg);
1311 rl_result = EvalLoc(rl_dest, kCoreReg, true);
1312 OpRegRegReg(op, rl_result.low_reg, rl_src1.low_reg, t_reg);
1313 FreeTemp(t_reg);
1314 } else {
1315 rl_src1 = LoadValue(rl_src1, kCoreReg);
1316 rl_src2 = LoadValue(rl_src2, kCoreReg);
1317 rl_result = EvalLoc(rl_dest, kCoreReg, true);
1318 OpRegRegReg(op, rl_result.low_reg, rl_src1.low_reg, rl_src2.low_reg);
1319 }
1320 }
1321 StoreValue(rl_dest, rl_result);
1322 } else {
1323 if (cu_->instruction_set == kMips) {
1324 rl_src1 = LoadValue(rl_src1, kCoreReg);
1325 rl_src2 = LoadValue(rl_src2, kCoreReg);
1326 if (check_zero) {
1327 GenImmedCheck(kCondEq, rl_src2.low_reg, 0, kThrowDivZero);
1328 }
1329 rl_result = GenDivRem(rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv);
1330 } else {
1331 int func_offset = ENTRYPOINT_OFFSET(pIdivmod);
1332 FlushAllRegs(); /* Send everything to home location */
1333 LoadValueDirectFixed(rl_src2, TargetReg(kArg1));
1334 int r_tgt = CallHelperSetup(func_offset);
1335 LoadValueDirectFixed(rl_src1, TargetReg(kArg0));
1336 if (check_zero) {
1337 GenImmedCheck(kCondEq, TargetReg(kArg1), 0, kThrowDivZero);
1338 }
1339 // NOTE: callout here is not a safepoint
1340 CallHelper(r_tgt, func_offset, false /* not a safepoint */ );
1341 if (op == kOpDiv)
1342 rl_result = GetReturn(false);
1343 else
1344 rl_result = GetReturnAlt();
1345 }
1346 StoreValue(rl_dest, rl_result);
1347 }
1348}
1349
1350/*
1351 * The following are the first-level codegen routines that analyze the format
1352 * of each bytecode then either dispatch special purpose codegen routines
1353 * or produce corresponding Thumb instructions directly.
1354 */
1355
1356static bool IsPowerOfTwo(int x)
1357{
1358 return (x & (x - 1)) == 0;
1359}
1360
1361// Returns true if no more than two bits are set in 'x'.
1362static bool IsPopCountLE2(unsigned int x)
1363{
1364 x &= x - 1;
1365 return (x & (x - 1)) == 0;
1366}
1367
1368// Returns the index of the lowest set bit in 'x'.
1369static int LowestSetBit(unsigned int x) {
1370 int bit_posn = 0;
1371 while ((x & 0xf) == 0) {
1372 bit_posn += 4;
1373 x >>= 4;
1374 }
1375 while ((x & 1) == 0) {
1376 bit_posn++;
1377 x >>= 1;
1378 }
1379 return bit_posn;
1380}
1381
1382// Returns true if it added instructions to 'cu' to divide 'rl_src' by 'lit'
1383// and store the result in 'rl_dest'.
1384bool Mir2Lir::HandleEasyDivide(Instruction::Code dalvik_opcode,
1385 RegLocation rl_src, RegLocation rl_dest, int lit)
1386{
1387 if ((lit < 2) || ((cu_->instruction_set != kThumb2) && !IsPowerOfTwo(lit))) {
1388 return false;
1389 }
1390 // No divide instruction for Arm, so check for more special cases
1391 if ((cu_->instruction_set == kThumb2) && !IsPowerOfTwo(lit)) {
1392 return SmallLiteralDivide(dalvik_opcode, rl_src, rl_dest, lit);
1393 }
1394 int k = LowestSetBit(lit);
1395 if (k >= 30) {
1396 // Avoid special cases.
1397 return false;
1398 }
1399 bool div = (dalvik_opcode == Instruction::DIV_INT_LIT8 ||
1400 dalvik_opcode == Instruction::DIV_INT_LIT16);
1401 rl_src = LoadValue(rl_src, kCoreReg);
1402 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1403 if (div) {
1404 int t_reg = AllocTemp();
1405 if (lit == 2) {
1406 // Division by 2 is by far the most common division by constant.
1407 OpRegRegImm(kOpLsr, t_reg, rl_src.low_reg, 32 - k);
1408 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.low_reg);
1409 OpRegRegImm(kOpAsr, rl_result.low_reg, t_reg, k);
1410 } else {
1411 OpRegRegImm(kOpAsr, t_reg, rl_src.low_reg, 31);
1412 OpRegRegImm(kOpLsr, t_reg, t_reg, 32 - k);
1413 OpRegRegReg(kOpAdd, t_reg, t_reg, rl_src.low_reg);
1414 OpRegRegImm(kOpAsr, rl_result.low_reg, t_reg, k);
1415 }
1416 } else {
1417 int t_reg1 = AllocTemp();
1418 int t_reg2 = AllocTemp();
1419 if (lit == 2) {
1420 OpRegRegImm(kOpLsr, t_reg1, rl_src.low_reg, 32 - k);
1421 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.low_reg);
1422 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit -1);
1423 OpRegRegReg(kOpSub, rl_result.low_reg, t_reg2, t_reg1);
1424 } else {
1425 OpRegRegImm(kOpAsr, t_reg1, rl_src.low_reg, 31);
1426 OpRegRegImm(kOpLsr, t_reg1, t_reg1, 32 - k);
1427 OpRegRegReg(kOpAdd, t_reg2, t_reg1, rl_src.low_reg);
1428 OpRegRegImm(kOpAnd, t_reg2, t_reg2, lit - 1);
1429 OpRegRegReg(kOpSub, rl_result.low_reg, t_reg2, t_reg1);
1430 }
1431 }
1432 StoreValue(rl_dest, rl_result);
1433 return true;
1434}
1435
1436// Returns true if it added instructions to 'cu' to multiply 'rl_src' by 'lit'
1437// and store the result in 'rl_dest'.
1438bool Mir2Lir::HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit)
1439{
1440 // Can we simplify this multiplication?
1441 bool power_of_two = false;
1442 bool pop_count_le2 = false;
1443 bool power_of_two_minus_one = false;
1444 if (lit < 2) {
1445 // Avoid special cases.
1446 return false;
1447 } else if (IsPowerOfTwo(lit)) {
1448 power_of_two = true;
1449 } else if (IsPopCountLE2(lit)) {
1450 pop_count_le2 = true;
1451 } else if (IsPowerOfTwo(lit + 1)) {
1452 power_of_two_minus_one = true;
1453 } else {
1454 return false;
1455 }
1456 rl_src = LoadValue(rl_src, kCoreReg);
1457 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1458 if (power_of_two) {
1459 // Shift.
1460 OpRegRegImm(kOpLsl, rl_result.low_reg, rl_src.low_reg, LowestSetBit(lit));
1461 } else if (pop_count_le2) {
1462 // Shift and add and shift.
1463 int first_bit = LowestSetBit(lit);
1464 int second_bit = LowestSetBit(lit ^ (1 << first_bit));
1465 GenMultiplyByTwoBitMultiplier(rl_src, rl_result, lit, first_bit, second_bit);
1466 } else {
1467 // Reverse subtract: (src << (shift + 1)) - src.
1468 DCHECK(power_of_two_minus_one);
1469 // TUNING: rsb dst, src, src lsl#LowestSetBit(lit + 1)
1470 int t_reg = AllocTemp();
1471 OpRegRegImm(kOpLsl, t_reg, rl_src.low_reg, LowestSetBit(lit + 1));
1472 OpRegRegReg(kOpSub, rl_result.low_reg, t_reg, rl_src.low_reg);
1473 }
1474 StoreValue(rl_dest, rl_result);
1475 return true;
1476}
1477
1478void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src,
1479 int lit)
1480{
1481 RegLocation rl_result;
1482 OpKind op = static_cast<OpKind>(0); /* Make gcc happy */
1483 int shift_op = false;
1484 bool is_div = false;
1485
1486 switch (opcode) {
1487 case Instruction::RSUB_INT_LIT8:
1488 case Instruction::RSUB_INT: {
1489 rl_src = LoadValue(rl_src, kCoreReg);
1490 rl_result = EvalLoc(rl_dest, kCoreReg, true);
1491 if (cu_->instruction_set == kThumb2) {
1492 OpRegRegImm(kOpRsub, rl_result.low_reg, rl_src.low_reg, lit);
1493 } else {
1494 OpRegReg(kOpNeg, rl_result.low_reg, rl_src.low_reg);
1495 OpRegImm(kOpAdd, rl_result.low_reg, lit);
1496 }
1497 StoreValue(rl_dest, rl_result);
1498 return;
1499 }
1500
1501 case Instruction::SUB_INT:
1502 case Instruction::SUB_INT_2ADDR:
1503 lit = -lit;
1504 // Intended fallthrough
1505 case Instruction::ADD_INT:
1506 case Instruction::ADD_INT_2ADDR:
1507 case Instruction::ADD_INT_LIT8:
1508 case Instruction::ADD_INT_LIT16:
1509 op = kOpAdd;
1510 break;
1511 case Instruction::MUL_INT:
1512 case Instruction::MUL_INT_2ADDR:
1513 case Instruction::MUL_INT_LIT8:
1514 case Instruction::MUL_INT_LIT16: {
1515 if (HandleEasyMultiply(rl_src, rl_dest, lit)) {
1516 return;
1517 }
1518 op = kOpMul;
1519 break;
1520 }
1521 case Instruction::AND_INT:
1522 case Instruction::AND_INT_2ADDR:
1523 case Instruction::AND_INT_LIT8:
1524 case Instruction::AND_INT_LIT16:
1525 op = kOpAnd;
1526 break;
1527 case Instruction::OR_INT:
1528 case Instruction::OR_INT_2ADDR:
1529 case Instruction::OR_INT_LIT8:
1530 case Instruction::OR_INT_LIT16:
1531 op = kOpOr;
1532 break;
1533 case Instruction::XOR_INT:
1534 case Instruction::XOR_INT_2ADDR:
1535 case Instruction::XOR_INT_LIT8:
1536 case Instruction::XOR_INT_LIT16:
1537 op = kOpXor;
1538 break;
1539 case Instruction::SHL_INT_LIT8:
1540 case Instruction::SHL_INT:
1541 case Instruction::SHL_INT_2ADDR:
1542 lit &= 31;
1543 shift_op = true;
1544 op = kOpLsl;
1545 break;
1546 case Instruction::SHR_INT_LIT8:
1547 case Instruction::SHR_INT:
1548 case Instruction::SHR_INT_2ADDR:
1549 lit &= 31;
1550 shift_op = true;
1551 op = kOpAsr;
1552 break;
1553 case Instruction::USHR_INT_LIT8:
1554 case Instruction::USHR_INT:
1555 case Instruction::USHR_INT_2ADDR:
1556 lit &= 31;
1557 shift_op = true;
1558 op = kOpLsr;
1559 break;
1560
1561 case Instruction::DIV_INT:
1562 case Instruction::DIV_INT_2ADDR:
1563 case Instruction::DIV_INT_LIT8:
1564 case Instruction::DIV_INT_LIT16:
1565 case Instruction::REM_INT:
1566 case Instruction::REM_INT_2ADDR:
1567 case Instruction::REM_INT_LIT8:
1568 case Instruction::REM_INT_LIT16: {
1569 if (lit == 0) {
1570 GenImmedCheck(kCondAl, 0, 0, kThrowDivZero);
1571 return;
1572 }
1573 if (HandleEasyDivide(opcode, rl_src, rl_dest, lit)) {
1574 return;
1575 }
1576 if ((opcode == Instruction::DIV_INT_LIT8) ||
1577 (opcode == Instruction::DIV_INT) ||
1578 (opcode == Instruction::DIV_INT_2ADDR) ||
1579 (opcode == Instruction::DIV_INT_LIT16)) {
1580 is_div = true;
1581 } else {
1582 is_div = false;
1583 }
1584 if (cu_->instruction_set == kMips) {
1585 rl_src = LoadValue(rl_src, kCoreReg);
1586 rl_result = GenDivRemLit(rl_dest, rl_src.low_reg, lit, is_div);
1587 } else {
1588 FlushAllRegs(); /* Everything to home location */
1589 LoadValueDirectFixed(rl_src, TargetReg(kArg0));
1590 Clobber(TargetReg(kArg0));
1591 int func_offset = ENTRYPOINT_OFFSET(pIdivmod);
1592 CallRuntimeHelperRegImm(func_offset, TargetReg(kArg0), lit, false);
1593 if (is_div)
1594 rl_result = GetReturn(false);
1595 else
1596 rl_result = GetReturnAlt();
1597 }
1598 StoreValue(rl_dest, rl_result);
1599 return;
1600 }
1601 default:
1602 LOG(FATAL) << "Unexpected opcode " << opcode;
1603 }
1604 rl_src = LoadValue(rl_src, kCoreReg);
1605 rl_result = EvalLoc(rl_dest, kCoreReg, true);
1606 // Avoid shifts by literal 0 - no support in Thumb. Change to copy
1607 if (shift_op && (lit == 0)) {
1608 OpRegCopy(rl_result.low_reg, rl_src.low_reg);
1609 } else {
1610 OpRegRegImm(op, rl_result.low_reg, rl_src.low_reg, lit);
1611 }
1612 StoreValue(rl_dest, rl_result);
1613}
1614
1615void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
1616 RegLocation rl_src1, RegLocation rl_src2)
1617{
1618 RegLocation rl_result;
1619 OpKind first_op = kOpBkpt;
1620 OpKind second_op = kOpBkpt;
1621 bool call_out = false;
1622 bool check_zero = false;
1623 int func_offset;
1624 int ret_reg = TargetReg(kRet0);
1625
1626 switch (opcode) {
1627 case Instruction::NOT_LONG:
1628 rl_src2 = LoadValueWide(rl_src2, kCoreReg);
1629 rl_result = EvalLoc(rl_dest, kCoreReg, true);
1630 // Check for destructive overlap
1631 if (rl_result.low_reg == rl_src2.high_reg) {
1632 int t_reg = AllocTemp();
1633 OpRegCopy(t_reg, rl_src2.high_reg);
1634 OpRegReg(kOpMvn, rl_result.low_reg, rl_src2.low_reg);
1635 OpRegReg(kOpMvn, rl_result.high_reg, t_reg);
1636 FreeTemp(t_reg);
1637 } else {
1638 OpRegReg(kOpMvn, rl_result.low_reg, rl_src2.low_reg);
1639 OpRegReg(kOpMvn, rl_result.high_reg, rl_src2.high_reg);
1640 }
1641 StoreValueWide(rl_dest, rl_result);
1642 return;
1643 case Instruction::ADD_LONG:
1644 case Instruction::ADD_LONG_2ADDR:
1645 if (cu_->instruction_set != kThumb2) {
1646 GenAddLong(rl_dest, rl_src1, rl_src2);
1647 return;
1648 }
1649 first_op = kOpAdd;
1650 second_op = kOpAdc;
1651 break;
1652 case Instruction::SUB_LONG:
1653 case Instruction::SUB_LONG_2ADDR:
1654 if (cu_->instruction_set != kThumb2) {
1655 GenSubLong(rl_dest, rl_src1, rl_src2);
1656 return;
1657 }
1658 first_op = kOpSub;
1659 second_op = kOpSbc;
1660 break;
1661 case Instruction::MUL_LONG:
1662 case Instruction::MUL_LONG_2ADDR:
1663 if (cu_->instruction_set == kThumb2) {
1664 GenMulLong(rl_dest, rl_src1, rl_src2);
1665 return;
1666 } else {
1667 call_out = true;
1668 ret_reg = TargetReg(kRet0);
1669 func_offset = ENTRYPOINT_OFFSET(pLmul);
1670 }
1671 break;
1672 case Instruction::DIV_LONG:
1673 case Instruction::DIV_LONG_2ADDR:
1674 call_out = true;
1675 check_zero = true;
1676 ret_reg = TargetReg(kRet0);
1677 func_offset = ENTRYPOINT_OFFSET(pLdiv);
1678 break;
1679 case Instruction::REM_LONG:
1680 case Instruction::REM_LONG_2ADDR:
1681 call_out = true;
1682 check_zero = true;
1683 func_offset = ENTRYPOINT_OFFSET(pLdivmod);
1684 /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */
1685 ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2) : TargetReg(kRet0);
1686 break;
1687 case Instruction::AND_LONG_2ADDR:
1688 case Instruction::AND_LONG:
1689 if (cu_->instruction_set == kX86) {
1690 return GenAndLong(rl_dest, rl_src1, rl_src2);
1691 }
1692 first_op = kOpAnd;
1693 second_op = kOpAnd;
1694 break;
1695 case Instruction::OR_LONG:
1696 case Instruction::OR_LONG_2ADDR:
1697 if (cu_->instruction_set == kX86) {
1698 GenOrLong(rl_dest, rl_src1, rl_src2);
1699 return;
1700 }
1701 first_op = kOpOr;
1702 second_op = kOpOr;
1703 break;
1704 case Instruction::XOR_LONG:
1705 case Instruction::XOR_LONG_2ADDR:
1706 if (cu_->instruction_set == kX86) {
1707 GenXorLong(rl_dest, rl_src1, rl_src2);
1708 return;
1709 }
1710 first_op = kOpXor;
1711 second_op = kOpXor;
1712 break;
1713 case Instruction::NEG_LONG: {
1714 GenNegLong(rl_dest, rl_src2);
1715 return;
1716 }
1717 default:
1718 LOG(FATAL) << "Invalid long arith op";
1719 }
1720 if (!call_out) {
1721 GenLong3Addr(first_op, second_op, rl_dest, rl_src1, rl_src2);
1722 } else {
1723 FlushAllRegs(); /* Send everything to home location */
1724 if (check_zero) {
1725 LoadValueDirectWideFixed(rl_src2, TargetReg(kArg2), TargetReg(kArg3));
1726 int r_tgt = CallHelperSetup(func_offset);
1727 GenDivZeroCheck(TargetReg(kArg2), TargetReg(kArg3));
1728 LoadValueDirectWideFixed(rl_src1, TargetReg(kArg0), TargetReg(kArg1));
1729 // NOTE: callout here is not a safepoint
1730 CallHelper(r_tgt, func_offset, false /* not safepoint */);
1731 } else {
1732 CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false);
1733 }
1734 // Adjust return regs in to handle case of rem returning kArg2/kArg3
1735 if (ret_reg == TargetReg(kRet0))
1736 rl_result = GetReturnWide(false);
1737 else
1738 rl_result = GetReturnWideAlt();
1739 StoreValueWide(rl_dest, rl_result);
1740 }
1741}
1742
1743void Mir2Lir::GenConversionCall(int func_offset,
1744 RegLocation rl_dest, RegLocation rl_src)
1745{
1746 /*
1747 * Don't optimize the register usage since it calls out to support
1748 * functions
1749 */
1750 FlushAllRegs(); /* Send everything to home location */
1751 if (rl_src.wide) {
1752 LoadValueDirectWideFixed(rl_src, rl_src.fp ? TargetReg(kFArg0) : TargetReg(kArg0),
1753 rl_src.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
1754 } else {
1755 LoadValueDirectFixed(rl_src, rl_src.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
1756 }
1757 CallRuntimeHelperRegLocation(func_offset, rl_src, false);
1758 if (rl_dest.wide) {
1759 RegLocation rl_result;
1760 rl_result = GetReturnWide(rl_dest.fp);
1761 StoreValueWide(rl_dest, rl_result);
1762 } else {
1763 RegLocation rl_result;
1764 rl_result = GetReturn(rl_dest.fp);
1765 StoreValue(rl_dest, rl_result);
1766 }
1767}
1768
1769/* Check if we need to check for pending suspend request */
1770void Mir2Lir::GenSuspendTest(int opt_flags)
1771{
1772 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
1773 return;
1774 }
1775 FlushAllRegs();
1776 LIR* branch = OpTestSuspend(NULL);
1777 LIR* ret_lab = NewLIR0(kPseudoTargetLabel);
1778 LIR* target = RawLIR(current_dalvik_offset_, kPseudoSuspendTarget,
1779 reinterpret_cast<uintptr_t>(ret_lab), current_dalvik_offset_);
1780 branch->target = target;
1781 suspend_launchpads_.Insert(target);
1782}
1783
1784/* Check if we need to check for pending suspend request */
1785void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target)
1786{
1787 if (NO_SUSPEND || (opt_flags & MIR_IGNORE_SUSPEND_CHECK)) {
1788 OpUnconditionalBranch(target);
1789 return;
1790 }
1791 OpTestSuspend(target);
1792 LIR* launch_pad =
1793 RawLIR(current_dalvik_offset_, kPseudoSuspendTarget,
1794 reinterpret_cast<uintptr_t>(target), current_dalvik_offset_);
1795 FlushAllRegs();
1796 OpUnconditionalBranch(launch_pad);
1797 suspend_launchpads_.Insert(launch_pad);
1798}
1799
1800} // namespace art