blob: 417fdca622b9c42d4592aaa5ce624853756d398e [file] [log] [blame]
buzbee67bf8852011-08-17 17:51:35 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17//#define TESTMODE
18
19#ifdef TESTMODE
20#include "CalloutHelper.h"
21#endif
22
23static const RegLocation badLoc = {kLocDalvikFrame, 0, 0, INVALID_REG,
24 INVALID_REG, INVALID_SREG, 0,
25 kLocDalvikFrame, INVALID_REG, INVALID_REG,
26 INVALID_OFFSET};
27static const RegLocation retLoc = LOC_DALVIK_RETURN_VAL;
28static const RegLocation retLocWide = LOC_DALVIK_RETURN_VAL_WIDE;
29
30static void genNewArray(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
31 RegLocation rlSrc)
32{
33 oatFlushAllRegs(cUnit); /* All temps to home location */
34 void* classPtr = (void*)
35 (cUnit->method->clazz->pDvmDex->pResClasses[mir->dalvikInsn.vC]);
36 if (classPtr == NULL) {
37 LOG(FATAL) << "Unexpected null passPtr";
38 } else {
39 loadValueDirectFixed(cUnit, rlSrc, r1); /* get Len */
40 loadConstant(cUnit, r0, (int)classPtr);
41 }
42 // FIXME: need this to throw errNegativeArraySize
43 genRegImmCheck(cUnit, kArmCondMi, r1, 0, mir->offset, NULL);
44#ifdef TESTMODE
45// Hack until we get rSELF setup
46 loadConstant(cUnit, rLR, (int)dvmAllocArrayByClass);
47#else
48 loadWordDisp(cUnit, rSELF, OFFSETOF_MEMBER(Thread, pArtAllocArrayByClass),
49 rLR);
50#endif
51 loadConstant(cUnit, r2, ALLOC_DONT_TRACK);
52 newLIR1(cUnit, kThumbBlxR, rLR); // (arrayClass, length, allocFlags)
53 storeValue(cUnit, rlDest, retLoc);
54}
55
56/*
57 * Similar to genNewArray, but with post-allocation initialization.
58 * Verifier guarantees we're dealing with an array class. Current
59 * code throws runtime exception "bad Filled array req" for 'D' and 'J'.
60 * Current code also throws internal unimp if not 'L', '[' or 'I'.
61 */
62static void genFilledNewArray(CompilationUnit* cUnit, MIR* mir, bool isRange)
63{
64 DecodedInstruction* dInsn = &mir->dalvikInsn;
65 int elems;
66 int typeIndex;
67 if (isRange) {
68 elems = dInsn->vA;
69 typeIndex = dInsn->vB;
70 } else {
71 elems = dInsn->vB;
72 typeIndex = dInsn->vC;
73 }
74 oatFlushAllRegs(cUnit); /* All temps to home location */
75 void* classPtr = (void*)
76 (cUnit->method->clazz->pDvmDex->pResClasses[typeIndex]);
77 if (classPtr == NULL) {
78 LOG(FATAL) << "Unexpected null passPtr";
79 } else {
80 loadConstant(cUnit, r0, (int)classPtr);
81 loadConstant(cUnit, r1, elems);
82 }
83 if (elems < 0) {
84 LOG(FATAL) << "Unexpected empty array";
85 }
86 /*
87 * FIXME: Need a new NoThrow allocator that checks for and handles
88 * the above mentioned bad cases of 'D', 'J' or !('L' | '[' | 'I').
89 * That will keep us from wasting space generating an inline check here.
90 */
91#ifdef TESTMODE
92// Hack until we get rSELF setup
93 loadConstant(cUnit, rLR, (int)dvmAllocArrayByClass);
94#else
95 loadWordDisp(cUnit, rSELF, OFFSETOF_MEMBER(Thread, pArtAllocArrayByClass),
96 rLR);
97#endif
98 loadConstant(cUnit, r2, ALLOC_DONT_TRACK);
99 newLIR1(cUnit, kThumbBlxR, rLR); // (arrayClass, length, allocFlags)
100 // Reserve ret0 (r0) - we'll use it in place.
101 oatLockTemp(cUnit, r0);
102 // Having a range of 0 is legal
103 if (isRange && (dInsn->vA > 0)) {
104 /*
105 * Bit of ugliness here. We're going generate a mem copy loop
106 * on the register range, but it is possible that some regs
107 * in the range have been promoted. This is unlikely, but
108 * before generating the copy, we'll just force a flush
109 * of any regs in the source range that have been promoted to
110 * home location.
111 */
112 for (unsigned int i = 0; i < dInsn->vA; i++) {
113 RegLocation loc = oatUpdateLoc(cUnit,
114 oatGetSrc(cUnit, mir, i));
115 if (loc.location == kLocPhysReg) {
116 storeBaseDisp(cUnit, rSP, loc.spOffset, loc.lowReg, kWord);
117 }
118 }
119 /*
120 * TUNING note: generated code here could be much improved, but
121 * this is an uncommon operation and isn't especially performance
122 * critical.
123 */
124 int rSrc = oatAllocTemp(cUnit);
125 int rDst = oatAllocTemp(cUnit);
126 int rIdx = oatAllocTemp(cUnit);
127 int rVal = rLR; // Using a lot of temps, rLR is known free here
128 // Set up source pointer
129 RegLocation rlFirst = oatGetSrc(cUnit, mir, 0);
130 opRegRegImm(cUnit, kOpAdd, rSrc, rSP, rlFirst.spOffset);
131 // Set up the target pointer
132 opRegRegImm(cUnit, kOpAdd, rDst, r0,
133 OFFSETOF_MEMBER(ArrayObject, contents));
134 // Set up the loop counter (known to be > 0)
135 loadConstant(cUnit, rIdx, dInsn->vA);
136 // Generate the copy loop. Going backwards for convenience
137 ArmLIR* target = newLIR0(cUnit, kArmPseudoTargetLabel);
138 target->defMask = ENCODE_ALL;
139 // Copy next element
140 loadBaseIndexed(cUnit, rSrc, rIdx, rVal, 2, kWord);
141 storeBaseIndexed(cUnit, rDst, rIdx, rVal, 2, kWord);
142 // Use setflags encoding here
143 newLIR3(cUnit, kThumb2SubsRRI12, rIdx, rIdx, 1);
144 ArmLIR* branch = opCondBranch(cUnit, kArmCondNe);
145 branch->generic.target = (LIR*)target;
146 } else if (!isRange) {
147 // TUNING: interleave
148 for (unsigned int i = 0; i < dInsn->vA; i++) {
149 RegLocation rlArg = loadValue(cUnit,
150 oatGetSrc(cUnit, mir, i), kCoreReg);
151 storeBaseDisp(cUnit, r0, OFFSETOF_MEMBER(ArrayObject, contents) +
152 i * 4, rlArg.lowReg, kWord);
153 // If the loadValue caused a temp to be allocated, free it
154 if (oatIsTemp(cUnit, rlArg.lowReg)) {
155 oatFreeTemp(cUnit, rlArg.lowReg);
156 }
157 }
158 }
159}
160
161static void genSput(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
162{
163 int valOffset = OFFSETOF_MEMBER(StaticField, value);
164 int tReg = oatAllocTemp(cUnit);
165 int objHead;
166 bool isVolatile;
167 bool isSputObject;
168 const Method *method = (mir->OptimizationFlags & MIR_CALLEE) ?
169 mir->meta.calleeMethod : cUnit->method;
170 void* fieldPtr = (void*)
171 (method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
172 Opcode opcode = mir->dalvikInsn.opcode;
173
174 if (fieldPtr == NULL) {
175 // FIXME: need to handle this case for oat();
176 UNIMPLEMENTED(FATAL);
177 }
178
179#if ANDROID_SMP != 0
180 isVolatile = (opcode == OP_SPUT_VOLATILE) ||
181 (opcode == OP_SPUT_VOLATILE_JUMBO) ||
182 (opcode == OP_SPUT_OBJECT_VOLATILE) ||
183 (opcode == OP_SPUT_OBJECT_VOLATILE_JUMBO);
184 assert(isVolatile == dvmIsVolatileField((Field *) fieldPtr));
185#else
186 isVolatile = dvmIsVolatileField((Field *) fieldPtr);
187#endif
188
189 isSputObject = (opcode == OP_SPUT_OBJECT) ||
190 (opcode == OP_SPUT_OBJECT_VOLATILE);
191
192 rlSrc = oatGetSrc(cUnit, mir, 0);
193 rlSrc = loadValue(cUnit, rlSrc, kAnyReg);
194 loadConstant(cUnit, tReg, (int) fieldPtr);
195 if (isSputObject) {
196 objHead = oatAllocTemp(cUnit);
197 loadWordDisp(cUnit, tReg, OFFSETOF_MEMBER(Field, clazz), objHead);
198 }
199 storeWordDisp(cUnit, tReg, valOffset ,rlSrc.lowReg);
200 oatFreeTemp(cUnit, tReg);
201 if (isVolatile) {
202 oatGenMemBarrier(cUnit, kSY);
203 }
204 if (isSputObject) {
205 /* NOTE: marking card based sfield->clazz */
206 markGCCard(cUnit, rlSrc.lowReg, objHead);
207 oatFreeTemp(cUnit, objHead);
208 }
209}
210
211static void genSputWide(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
212{
213 int tReg = oatAllocTemp(cUnit);
214 int valOffset = OFFSETOF_MEMBER(StaticField, value);
215 const Method *method = (mir->OptimizationFlags & MIR_CALLEE) ?
216 mir->meta.calleeMethod : cUnit->method;
217 void* fieldPtr = (void*)
218 (method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
219
220 if (fieldPtr == NULL) {
221 // FIXME: need to handle this case for oat();
222 UNIMPLEMENTED(FATAL);
223 }
224
225 rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
226 rlSrc = loadValueWide(cUnit, rlSrc, kAnyReg);
227 loadConstant(cUnit, tReg, (int) fieldPtr + valOffset);
228
229 storePair(cUnit, tReg, rlSrc.lowReg, rlSrc.highReg);
230}
231
232
233
234static void genSgetWide(CompilationUnit* cUnit, MIR* mir,
235 RegLocation rlResult, RegLocation rlDest)
236{
237 int valOffset = OFFSETOF_MEMBER(StaticField, value);
238 const Method *method = (mir->OptimizationFlags & MIR_CALLEE) ?
239 mir->meta.calleeMethod : cUnit->method;
240 void* fieldPtr = (void*)
241 (method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
242
243 if (fieldPtr == NULL) {
244 // FIXME: need to handle this case for oat();
245 UNIMPLEMENTED(FATAL);
246 }
247
248 int tReg = oatAllocTemp(cUnit);
249 rlDest = oatGetDestWide(cUnit, mir, 0, 1);
250 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
251 loadConstant(cUnit, tReg, (int) fieldPtr + valOffset);
252
253 loadPair(cUnit, tReg, rlResult.lowReg, rlResult.highReg);
254
255 storeValueWide(cUnit, rlDest, rlResult);
256}
257
258static void genSget(CompilationUnit* cUnit, MIR* mir,
259 RegLocation rlResult, RegLocation rlDest)
260{
261 int valOffset = OFFSETOF_MEMBER(StaticField, value);
262 int tReg = oatAllocTemp(cUnit);
263 bool isVolatile;
264 const Method *method = cUnit->method;
265 void* fieldPtr = (void*)
266 (method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
267
268 if (fieldPtr == NULL) {
269 // FIXME: need to handle this case for oat();
270 UNIMPLEMENTED(FATAL);
271 }
272
273 /*
274 * On SMP systems, Dalvik opcodes found to be referencing
275 * volatile fields are rewritten to their _VOLATILE variant.
276 * However, this does not happen on non-SMP systems. The compiler
277 * still needs to know about volatility to avoid unsafe
278 * optimizations so we determine volatility based on either
279 * the opcode or the field access flags.
280 */
281#if ANDROID_SMP != 0
282 Opcode opcode = mir->dalvikInsn.opcode;
283 isVolatile = (opcode == OP_SGET_VOLATILE) ||
284 (opcode == OP_SGET_OBJECT_VOLATILE);
285 assert(isVolatile == dvmIsVolatileField((Field *) fieldPtr));
286#else
287 isVolatile = dvmIsVolatileField((Field *) fieldPtr);
288#endif
289
290 rlDest = oatGetDest(cUnit, mir, 0);
291 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
292 loadConstant(cUnit, tReg, (int) fieldPtr + valOffset);
293
294 if (isVolatile) {
295 oatGenMemBarrier(cUnit, kSY);
296 }
297 loadWordDisp(cUnit, tReg, 0, rlResult.lowReg);
298
299 storeValue(cUnit, rlDest, rlResult);
300}
301
302typedef int (*NextCallInsn)(CompilationUnit*, MIR*, DecodedInstruction*, int);
303
304/*
305 * Bit of a hack here - in leiu of a real scheduling pass,
306 * emit the next instruction in static & direct invoke sequences.
307 */
308static int nextSDCallInsn(CompilationUnit* cUnit, MIR* mir,
309 DecodedInstruction* dInsn, int state)
310{
311 switch(state) {
312 case 0: // Get the current Method* [sets r0]
313 loadBaseDisp(cUnit, mir, rSP, 0, r0, kWord, INVALID_SREG);
314 break;
315 case 1: // Get the pResMethods pointer [uses r0, sets r0]
316 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, pResMethods),
317 r0, kWord, INVALID_SREG);
318 break;
319 case 2: // Get the target Method* [uses r0, sets r0]
320 loadBaseDisp(cUnit, mir, r0, dInsn->vB * 4, r0,
321 kWord, INVALID_SREG);
322 break;
323 case 3: // Get the target compiled code address [uses r0, sets rLR]
324 loadBaseDisp(cUnit, mir, r0,
325 OFFSETOF_MEMBER(Method, compiledInsns), rLR,
326 kWord, INVALID_SREG);
327 break;
328 default:
329 return -1;
330 }
331 return state + 1;
332}
333
334/*
335 * Bit of a hack here - in leiu of a real scheduling pass,
336 * emit the next instruction in a virtual invoke sequence.
337 * We can use rLR as a temp prior to target address loading
338 * Note also that we'll load the first argument ("this") into
339 * r1 here rather than the standard loadArgRegs.
340 */
341static int nextVCallInsn(CompilationUnit* cUnit, MIR* mir,
342 DecodedInstruction* dInsn, int state)
343{
344 RegLocation rlArg;
345 switch(state) {
346 case 0: // Get the current Method* [set r0]
347 loadBaseDisp(cUnit, mir, rSP, 0, r0, kWord, INVALID_SREG);
348 // Load "this" [set r1]
349 rlArg = oatGetSrc(cUnit, mir, 0);
350 loadValueDirectFixed(cUnit, rlArg, r1);
351 break;
352 case 1: // Get the pResMethods pointer [use r0, set r12]
353 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, pResMethods),
354 r12, kWord, INVALID_SREG);
355 // Is "this" null? [use r1]
356 genNullCheck(cUnit, oatSSASrc(mir,0), r1,
357 mir->offset, NULL);
358 break;
359 case 2: // Get the base Method* [use r12, set r0]
360 loadBaseDisp(cUnit, mir, r12, dInsn->vB * 4, r0,
361 kWord, INVALID_SREG);
362 // get this->clazz [use r1, set rLR]
363 loadBaseDisp(cUnit, mir, r1, OFFSETOF_MEMBER(Object, clazz), rLR,
364 kWord, INVALID_SREG);
365 break;
366 case 3: // Get the method index [use r0, set r12]
367 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, methodIndex),
368 r12, kUnsignedHalf, INVALID_SREG);
369 // get this->clazz->vtable [use rLR, set rLR]
370 loadBaseDisp(cUnit, mir, rLR,
371 OFFSETOF_MEMBER(ClassObject, vtable), rLR, kWord,
372 INVALID_SREG);
373 break;
374 case 4: // get target Method* [use rLR, use r12, set r0]
375 loadBaseIndexed(cUnit, rLR, r12, r0, 2, kWord);
376 break;
377 case 5: // Get the target compiled code address [use r0, set rLR]
378 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, compiledInsns),
379 rLR, kWord, INVALID_SREG);
380 break;
381 default:
382 return -1;
383 }
384 return state + 1;
385}
386
387/* Load up to 3 arguments in r1..r3 */
388static int loadArgRegs(CompilationUnit* cUnit, MIR* mir,
389 DecodedInstruction* dInsn, int callState,
390 int *args, NextCallInsn nextCallInsn)
391{
392 for (int i = 0; i < 3; i++) {
393 if (args[i] != INVALID_REG) {
394 RegLocation rlArg = oatGetSrc(cUnit, mir, i);
395 loadValueDirectFixed(cUnit, rlArg, r1 + i);
396 callState = nextCallInsn(cUnit, mir, dInsn, callState);
397 }
398 }
399 return callState;
400}
401
402/*
403 * Interleave launch code for INVOKE_INTERFACE. The target is
404 * identified using artFindInterfaceMethodInCache(class, ref, method, dex)
405 * Note that we'll have to reload "this" following the helper call.
406 *
407 * FIXME: do we need to have artFindInterfaceMethodInCache return
408 * a NULL if not found so we can throw exception here? Otherwise,
409 * may need to pass some additional info to allow the helper function
410 * to throw on its own.
411 */
412static int nextInterfaceCallInsn(CompilationUnit* cUnit, MIR* mir,
413 DecodedInstruction* dInsn, int state)
414{
415 RegLocation rlArg;
416 switch(state) {
417 case 0:
418 // Load "this" [set r12]
419 rlArg = oatGetSrc(cUnit, mir, 0);
420 loadValueDirectFixed(cUnit, rlArg, r12);
421 // Get the current Method* [set arg2]
422 loadBaseDisp(cUnit, mir, rSP, 0, r2, kWord, INVALID_SREG);
423 // Is "this" null? [use r12]
424 genNullCheck(cUnit, oatSSASrc(mir,0), r12,
425 mir->offset, NULL);
426 // Get curMethod->clazz [set arg3]
427 loadBaseDisp(cUnit, mir, r2, OFFSETOF_MEMBER(Method, clazz),
428 r3, kWord, INVALID_SREG);
429 // Load this->class [usr r12, set arg0]
430 loadBaseDisp(cUnit, mir, r12, OFFSETOF_MEMBER(ClassObject, clazz),
431 r3, kWord, INVALID_SREG);
432 // Load address of helper function
433 loadBaseDisp(cUnit, mir, rSELF,
434 OFFSETOF_MEMBER(Thread, pArtFindInterfaceMethodInCache),
435 rLR, kWord, INVALID_SREG);
436 // Get dvmDex
437 loadBaseDisp(cUnit, mir, r3, OFFSETOF_MEMBER(ClassObject, pDvmDex),
438 r3, kWord, INVALID_SREG);
439 // Load ref [set arg1]
440 loadConstant(cUnit, r1, dInsn->vB);
441 // Call out to helper, target Method returned in ret0
442 newLIR1(cUnit, kThumbBlxR, rLR);
443 break;
444 case 1: // Get the target compiled code address [use r0, set rLR]
445 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, compiledInsns),
446 rLR, kWord, INVALID_SREG);
447 default:
448 return -1;
449 }
450 return state + 1;
451}
452
453
454/*
455 * Interleave launch code for INVOKE_SUPER. See comments
456 * for nextVCallIns.
457 */
458static int nextSuperCallInsn(CompilationUnit* cUnit, MIR* mir,
459 DecodedInstruction* dInsn, int state)
460{
461 RegLocation rlArg;
462 switch(state) {
463 case 0:
464 // Get the current Method* [set r0]
465 loadBaseDisp(cUnit, mir, rSP, 0, r0, kWord, INVALID_SREG);
466 // Load "this" [set r1]
467 rlArg = oatGetSrc(cUnit, mir, 0);
468 loadValueDirectFixed(cUnit, rlArg, r1);
469 // Get method->clazz [use r0, set r12]
470 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, clazz),
471 r12, kWord, INVALID_SREG);
472 // Get pResmethods [use r0, set rLR]
473 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, pResMethods),
474 rLR, kWord, INVALID_SREG);
475 // Get clazz->super [use r12, set r12]
476 loadBaseDisp(cUnit, mir, r12, OFFSETOF_MEMBER(ClassObject, super),
477 r12, kWord, INVALID_SREG);
478 // Get base method [use rLR, set r0]
479 loadBaseDisp(cUnit, mir, rLR, dInsn->vB * 4, r0,
480 kWord, INVALID_SREG);
481 // Is "this" null? [use r1]
482 genNullCheck(cUnit, oatSSASrc(mir,0), r1,
483 mir->offset, NULL);
484 // Get methodIndex [use r0, set rLR]
485 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, methodIndex),
486 rLR, kUnsignedHalf, INVALID_SREG);
487 // Get vtableCount [use r12, set r0]
488 loadBaseDisp(cUnit, mir, r12,
489 OFFSETOF_MEMBER(ClassObject, vtableCount),
490 r0, kWord, INVALID_SREG);
491 // Compare method index w/ vtable count [use r12, use rLR]
492 genRegRegCheck(cUnit, kArmCondGe, rLR, r0, mir->offset, NULL);
493 // get target Method* [use rLR, use r12, set r0]
494 loadBaseIndexed(cUnit, r0, r12, rLR, 2, kWord);
495 case 1: // Get the target compiled code address [use r0, set rLR]
496 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, compiledInsns),
497 rLR, kWord, INVALID_SREG);
498 default:
499 return -1;
500 }
501 return state + 1;
502}
503
504/*
505 * Load up to 5 arguments, the first three of which will be in
506 * r1 .. r3. On entry r0 contains the current method pointer,
507 * and as part of the load sequence, it must be replaced with
508 * the target method pointer. Note, this may also be called
509 * for "range" variants if the number of arguments is 5 or fewer.
510 */
511static int genDalvikArgsNoRange(CompilationUnit* cUnit, MIR* mir,
512 DecodedInstruction* dInsn, int callState,
513 ArmLIR** pcrLabel, bool isRange,
514 NextCallInsn nextCallInsn)
515{
516 RegLocation rlArg;
517 int registerArgs[3];
518
519 /* If no arguments, just return */
520 if (dInsn->vA == 0)
521 return callState;
522
523 oatLockAllTemps(cUnit);
524 callState = nextCallInsn(cUnit, mir, dInsn, callState);
525
526 /*
527 * Load frame arguments arg4 & arg5 first. Coded a little odd to
528 * pre-schedule the method pointer target.
529 */
530 for (unsigned int i=3; i < dInsn->vA; i++) {
531 int reg;
532 int arg = (isRange) ? dInsn->vC + i : i;
533 rlArg = oatUpdateLoc(cUnit, oatGetSrc(cUnit, mir, arg));
534 if (rlArg.location == kLocPhysReg) {
535 reg = rlArg.lowReg;
536 } else {
537 reg = r1;
538 loadValueDirectFixed(cUnit, rlArg, r1);
539 callState = nextCallInsn(cUnit, mir, dInsn, callState);
540 }
541 storeBaseDisp(cUnit, rSP, (i + 1) * 4, reg, kWord);
542 callState = nextCallInsn(cUnit, mir, dInsn, callState);
543 }
544
545 /* Load register arguments r1..r3 */
546 for (unsigned int i = 0; i < 3; i++) {
547 if (i < dInsn->vA)
548 registerArgs[i] = (isRange) ? dInsn->vC + i : i;
549 else
550 registerArgs[i] = INVALID_REG;
551 }
552 callState = loadArgRegs(cUnit, mir, dInsn, callState, registerArgs,
553 nextCallInsn);
554
555 // Load direct & need a "this" null check?
556 if (pcrLabel) {
557 *pcrLabel = genNullCheck(cUnit, oatSSASrc(mir,0), r1,
558 mir->offset, NULL);
559 }
560 return callState;
561}
562
563/*
564 * May have 0+ arguments (also used for jumbo). Note that
565 * source virtual registers may be in physical registers, so may
566 * need to be flushed to home location before copying. This
567 * applies to arg3 and above (see below).
568 *
569 * Two general strategies:
570 * If < 20 arguments
571 * Pass args 3-18 using vldm/vstm block copy
572 * Pass arg0, arg1 & arg2 in r1-r3
573 * If 20+ arguments
574 * Pass args arg19+ using memcpy block copy
575 * Pass arg0, arg1 & arg2 in r1-r3
576 *
577 */
578static int genDalvikArgsRange(CompilationUnit* cUnit, MIR* mir,
579 DecodedInstruction* dInsn, int callState,
580 ArmLIR** pcrLabel, NextCallInsn nextCallInsn)
581{
582 int firstArg = dInsn->vC;
583 int numArgs = dInsn->vA;
584
585 // If we can treat it as non-range (Jumbo ops will use range form)
586 if (numArgs <= 5)
587 return genDalvikArgsNoRange(cUnit, mir, dInsn, callState, pcrLabel,
588 true, nextCallInsn);
589 /*
590 * Make sure range list doesn't span the break between in normal
591 * Dalvik vRegs and the ins.
592 */
593 int highestVreg = oatGetSrc(cUnit, mir, numArgs-1).sRegLow;
594 if (highestVreg >= cUnit->method->registersSize - cUnit->method->insSize) {
595 LOG(FATAL) << "Wide argument spanned locals & args";
596 }
597
598 /*
599 * First load the non-register arguments. Both forms expect all
600 * of the source arguments to be in their home frame location, so
601 * scan the sReg names and flush any that have been promoted to
602 * frame backing storage.
603 */
604 // Scan the rest of the args - if in physReg flush to memory
605 for (int i = 4; i < numArgs; i++) {
606 RegLocation loc = oatUpdateLoc(cUnit,
607 oatGetSrc(cUnit, mir, i));
608 if (loc.location == kLocPhysReg) { // TUNING: if dirty?
609 storeBaseDisp(cUnit, rSP, loc.spOffset, loc.lowReg, kWord);
610 callState = nextCallInsn(cUnit, mir, dInsn, callState);
611 }
612 }
613
614 int startOffset = cUnit->regLocation[mir->ssaRep->uses[3]].spOffset;
615 int outsOffset = 4 /* Method* */ + (3 * 4);
616 if (numArgs >= 20) {
617 // Generate memcpy, but first make sure all of
618 opRegRegImm(cUnit, kOpAdd, r0, rSP, startOffset);
619 opRegRegImm(cUnit, kOpAdd, r1, rSP, outsOffset);
620 loadWordDisp(cUnit, rSELF, OFFSETOF_MEMBER(Thread, pMemcpy), rLR);
621 loadConstant(cUnit, r2, (numArgs - 3) * 4);
622 newLIR1(cUnit, kThumbBlxR, rLR);
623 } else {
624 // Use vldm/vstm pair using r3 as a temp
625 int regsLeft = MIN(numArgs - 3, 16);
626 callState = nextCallInsn(cUnit, mir, dInsn, callState);
627 opRegRegImm(cUnit, kOpAdd, r3, rSP, startOffset);
628 newLIR3(cUnit, kThumb2Vldms, r3, fr0 & FP_REG_MASK, regsLeft);
629 callState = nextCallInsn(cUnit, mir, dInsn, callState);
630 opRegRegImm(cUnit, kOpAdd, r3, rSP, 4 /* Method* */ + (3 * 4));
631 callState = nextCallInsn(cUnit, mir, dInsn, callState);
632 newLIR3(cUnit, kThumb2Vstms, r3, fr0 & FP_REG_MASK, regsLeft);
633 callState = nextCallInsn(cUnit, mir, dInsn, callState);
634 }
635
636 // Handle the 1st 3 in r1, r2 & r3
637 for (unsigned int i = 0; i < dInsn->vA && i < 3; i++) {
638 RegLocation loc = oatGetSrc(cUnit, mir, firstArg + i);
639 loadValueDirectFixed(cUnit, loc, r1 + i);
640 callState = nextCallInsn(cUnit, mir, dInsn, callState);
641 }
642
643 // Finally, deal with the register arguments
644 // We'll be using fixed registers here
645 oatLockAllTemps(cUnit);
646 callState = nextCallInsn(cUnit, mir, dInsn, callState);
647 return callState;
648}
649
650static void genInvokeStatic(CompilationUnit* cUnit, MIR* mir)
651{
652 DecodedInstruction* dInsn = &mir->dalvikInsn;
653 int callState = 0;
654 if (mir->dalvikInsn.opcode == OP_INVOKE_STATIC) {
655 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, NULL,
656 false, nextSDCallInsn);
657 } else {
658 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, NULL,
659 nextSDCallInsn);
660 }
661 // Finish up any of the call sequence not interleaved in arg loading
662 while (callState >= 0) {
663 callState = nextSDCallInsn(cUnit, mir, dInsn, callState);
664 }
665 newLIR1(cUnit, kThumbBlxR, rLR);
666}
667
668static void genInvokeDirect(CompilationUnit* cUnit, MIR* mir)
669{
670 DecodedInstruction* dInsn = &mir->dalvikInsn;
671 int callState = 0;
672 ArmLIR* nullCk;
673 if (mir->dalvikInsn.opcode == OP_INVOKE_DIRECT)
674 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, &nullCk,
675 false, nextSDCallInsn);
676 else
677 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, &nullCk,
678 nextSDCallInsn);
679 // Finish up any of the call sequence not interleaved in arg loading
680 while (callState >= 0) {
681 callState = nextSDCallInsn(cUnit, mir, dInsn, callState);
682 }
683 newLIR1(cUnit, kThumbBlxR, rLR);
684}
685
686static void genInvokeInterface(CompilationUnit* cUnit, MIR* mir)
687{
688 DecodedInstruction* dInsn = &mir->dalvikInsn;
689 int callState = 0;
690 ArmLIR* nullCk;
691 /* Note: must call nextInterfaceCallInsn() prior to 1st argument load */
692 callState = nextInterfaceCallInsn(cUnit, mir, dInsn, callState);
693 if (mir->dalvikInsn.opcode == OP_INVOKE_INTERFACE)
694 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, &nullCk,
695 false, nextInterfaceCallInsn);
696 else
697 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, &nullCk,
698 nextInterfaceCallInsn);
699 // Finish up any of the call sequence not interleaved in arg loading
700 while (callState >= 0) {
701 callState = nextInterfaceCallInsn(cUnit, mir, dInsn, callState);
702 }
703 newLIR1(cUnit, kThumbBlxR, rLR);
704}
705
706static void genInvokeSuper(CompilationUnit* cUnit, MIR* mir)
707{
708 DecodedInstruction* dInsn = &mir->dalvikInsn;
709 int callState = 0;
710 ArmLIR* nullCk;
711// FIXME - redundantly loading arg0/r1 ("this")
712 if (mir->dalvikInsn.opcode == OP_INVOKE_SUPER)
713 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, &nullCk,
714 false, nextSuperCallInsn);
715 else
716 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, &nullCk,
717 nextSuperCallInsn);
718 // Finish up any of the call sequence not interleaved in arg loading
719 while (callState >= 0) {
720 callState = nextSuperCallInsn(cUnit, mir, dInsn, callState);
721 }
722 newLIR1(cUnit, kThumbBlxR, rLR);
723}
724
725static void genInvokeVirtual(CompilationUnit* cUnit, MIR* mir)
726{
727 DecodedInstruction* dInsn = &mir->dalvikInsn;
728 int callState = 0;
729 ArmLIR* nullCk;
730// FIXME - redundantly loading arg0/r1 ("this")
731 if (mir->dalvikInsn.opcode == OP_INVOKE_VIRTUAL)
732 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, &nullCk,
733 false, nextVCallInsn);
734 else
735 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, &nullCk,
736 nextVCallInsn);
737 // Finish up any of the call sequence not interleaved in arg loading
738 while (callState >= 0) {
739 callState = nextVCallInsn(cUnit, mir, dInsn, callState);
740 }
741 newLIR1(cUnit, kThumbBlxR, rLR);
742}
743
744// TODO: break out the case handlers. Might make it easier to support x86
745static bool compileDalvikInstruction(CompilationUnit* cUnit, MIR* mir,
746 BasicBlock* bb, ArmLIR* labelList)
747{
748 bool res = false; // Assume success
749 RegLocation rlSrc[3];
750 RegLocation rlDest = badLoc;
751 RegLocation rlResult = badLoc;
752 Opcode opcode = mir->dalvikInsn.opcode;
753
754 /* Prep Src and Dest locations */
755 int nextSreg = 0;
756 int nextLoc = 0;
757 int attrs = oatDataFlowAttributes[opcode];
758 rlSrc[0] = rlSrc[1] = rlSrc[2] = badLoc;
759 if (attrs & DF_UA) {
760 rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
761 nextSreg++;
762 } else if (attrs & DF_UA_WIDE) {
763 rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg,
764 nextSreg + 1);
765 nextSreg+= 2;
766 }
767 if (attrs & DF_UB) {
768 rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
769 nextSreg++;
770 } else if (attrs & DF_UB_WIDE) {
771 rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg,
772 nextSreg + 1);
773 nextSreg+= 2;
774 }
775 if (attrs & DF_UC) {
776 rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
777 } else if (attrs & DF_UC_WIDE) {
778 rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg,
779 nextSreg + 1);
780 }
781 if (attrs & DF_DA) {
782 rlDest = oatGetDest(cUnit, mir, 0);
783 } else if (attrs & DF_DA_WIDE) {
784 rlDest = oatGetDestWide(cUnit, mir, 0, 1);
785 }
786
787 switch(opcode) {
788 case OP_NOP:
789 break;
790
791 case OP_MOVE_EXCEPTION:
792 int exOffset;
793 int resetReg;
794 exOffset = OFFSETOF_MEMBER(Thread, exception);
795 resetReg = oatAllocTemp(cUnit);
796 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
797 loadWordDisp(cUnit, rSELF, exOffset, rlResult.lowReg);
798 loadConstant(cUnit, resetReg, 0);
799 storeWordDisp(cUnit, rSELF, exOffset, resetReg);
800 storeValue(cUnit, rlDest, rlResult);
801 break;
802
803 case OP_RETURN_VOID:
804 break;
805
806 case OP_RETURN:
807 case OP_RETURN_OBJECT:
808 storeValue(cUnit, retLoc, rlSrc[0]);
809 break;
810
811 case OP_RETURN_WIDE:
812 rlDest = retLocWide;
813 rlDest.fp = rlSrc[0].fp;
814 storeValueWide(cUnit, rlDest, rlSrc[0]);
815 break;
816
817 case OP_MOVE_RESULT_WIDE:
818 if (mir->OptimizationFlags & MIR_INLINED)
819 break; // Nop - combined w/ previous invoke
820 /*
821 * Somewhat hacky here. Because we're now passing
822 * return values in registers, we have to let the
823 * register allocation utilities know that the return
824 * registers are live and may not be used for address
825 * formation in storeValueWide.
826 */
827 assert(retLocWide.lowReg == r0);
828 assert(retLocWide.lowReg == r1);
829 oatLockTemp(cUnit, retLocWide.lowReg);
830 oatLockTemp(cUnit, retLocWide.highReg);
831 storeValueWide(cUnit, rlDest, retLocWide);
832 oatFreeTemp(cUnit, retLocWide.lowReg);
833 oatFreeTemp(cUnit, retLocWide.highReg);
834 break;
835
836 case OP_MOVE_RESULT:
837 case OP_MOVE_RESULT_OBJECT:
838 if (mir->OptimizationFlags & MIR_INLINED)
839 break; // Nop - combined w/ previous invoke
840 /* See comment for OP_MOVE_RESULT_WIDE */
841 assert(retLoc.lowReg == r0);
842 oatLockTemp(cUnit, retLoc.lowReg);
843 storeValue(cUnit, rlDest, retLoc);
844 oatFreeTemp(cUnit, retLoc.lowReg);
845 break;
846
847 case OP_MOVE:
848 case OP_MOVE_OBJECT:
849 case OP_MOVE_16:
850 case OP_MOVE_OBJECT_16:
851 case OP_MOVE_FROM16:
852 case OP_MOVE_OBJECT_FROM16:
853 storeValue(cUnit, rlDest, rlSrc[0]);
854 break;
855
856 case OP_MOVE_WIDE:
857 case OP_MOVE_WIDE_16:
858 case OP_MOVE_WIDE_FROM16:
859 storeValueWide(cUnit, rlDest, rlSrc[0]);
860 break;
861
862 case OP_CONST:
863 case OP_CONST_4:
864 case OP_CONST_16:
865 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
866 loadConstantNoClobber(cUnit, rlResult.lowReg, mir->dalvikInsn.vB);
867 storeValue(cUnit, rlDest, rlResult);
868 break;
869
870 case OP_CONST_HIGH16:
871 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
872 loadConstantNoClobber(cUnit, rlResult.lowReg,
873 mir->dalvikInsn.vB << 16);
874 storeValue(cUnit, rlDest, rlResult);
875 break;
876
877 case OP_CONST_WIDE_16:
878 case OP_CONST_WIDE_32:
879 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
880 loadConstantNoClobber(cUnit, rlResult.lowReg, mir->dalvikInsn.vB);
881 //TUNING: do high separately to avoid load dependency
882 opRegRegImm(cUnit, kOpAsr, rlResult.highReg, rlResult.lowReg, 31);
883 storeValueWide(cUnit, rlDest, rlResult);
884 break;
885
886 case OP_CONST_WIDE:
887 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
888 loadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg,
889 0, mir->dalvikInsn.vB);
890 storeValue(cUnit, rlDest, rlResult);
891 break;
892
893 case OP_CONST_WIDE_HIGH16:
894 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
895 loadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg,
896 0, mir->dalvikInsn.vB << 16);
897 storeValue(cUnit, rlDest, rlResult);
898 break;
899
900 case OP_MONITOR_ENTER:
901 genMonitorEnter(cUnit, mir, rlSrc[0]);
902 break;
903
904 case OP_MONITOR_EXIT:
905 genMonitorExit(cUnit, mir, rlSrc[0]);
906 break;
907
908 case OP_CHECK_CAST:
909 genCheckCast(cUnit, mir, rlSrc[0]);
910 break;
911
912 case OP_INSTANCE_OF:
913 genInstanceof(cUnit, mir, rlDest, rlSrc[0]);
914 break;
915
916 case OP_NEW_INSTANCE:
917 genNewInstance(cUnit, mir, rlDest);
918 break;
919
920 case OP_THROW:
921 genThrow(cUnit, mir, rlSrc[0]);
922 break;
923
924 case OP_ARRAY_LENGTH:
925 int lenOffset;
926 lenOffset = OFFSETOF_MEMBER(ArrayObject, length);
927 genNullCheck(cUnit, rlSrc[0].sRegLow, rlSrc[0].lowReg,
928 mir->offset, NULL);
929 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
930 loadWordDisp(cUnit, rlSrc[0].lowReg, lenOffset,
931 rlResult.lowReg);
932 storeValue(cUnit, rlDest, rlResult);
933 break;
934
935 case OP_CONST_STRING:
936 case OP_CONST_STRING_JUMBO:
937 genConstString(cUnit, mir, rlDest, rlSrc[0]);
938 break;
939
940 case OP_CONST_CLASS:
941 genConstClass(cUnit, mir, rlDest, rlSrc[0]);
942 break;
943
944 case OP_FILL_ARRAY_DATA:
945 genFillArrayData(cUnit, mir, rlSrc[0]);
946 break;
947
948 case OP_FILLED_NEW_ARRAY:
949 genFilledNewArray(cUnit, mir, false /* not range */);
950 break;
951
952 case OP_FILLED_NEW_ARRAY_RANGE:
953 genFilledNewArray(cUnit, mir, true /* range */);
954 break;
955
956 case OP_NEW_ARRAY:
957 genNewArray(cUnit, mir, rlDest, rlSrc[0]);
958 break;
959
960 case OP_GOTO:
961 case OP_GOTO_16:
962 case OP_GOTO_32:
963 // TUNING: add MIR flag to disable when unnecessary
964 bool backwardBranch;
965 backwardBranch = (bb->taken->startOffset <= mir->offset);
966 if (backwardBranch) {
967 genSuspendPoll(cUnit, mir);
968 }
969 genUnconditionalBranch(cUnit, &labelList[bb->taken->id]);
970 break;
971
972 case OP_PACKED_SWITCH:
973 genPackedSwitch(cUnit, mir, rlSrc[0]);
974 break;
975
976 case OP_SPARSE_SWITCH:
977 genSparseSwitch(cUnit, mir, rlSrc[0]);
978 break;
979
980 case OP_CMPL_FLOAT:
981 case OP_CMPG_FLOAT:
982 case OP_CMPL_DOUBLE:
983 case OP_CMPG_DOUBLE:
984 res = genCmpFP(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
985 break;
986
987 case OP_CMP_LONG:
988 genCmpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
989 break;
990
991 case OP_IF_EQ:
992 case OP_IF_NE:
993 case OP_IF_LT:
994 case OP_IF_GE:
995 case OP_IF_GT:
996 case OP_IF_LE: {
997 bool backwardBranch;
998 ArmConditionCode cond;
999 backwardBranch = (bb->taken->startOffset <= mir->offset);
1000 if (backwardBranch) {
1001 genSuspendPoll(cUnit, mir);
1002 }
1003 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1004 rlSrc[1] = loadValue(cUnit, rlSrc[1], kCoreReg);
1005 opRegReg(cUnit, kOpCmp, rlSrc[0].lowReg, rlSrc[1].lowReg);
1006 switch(opcode) {
1007 case OP_IF_EQ:
1008 cond = kArmCondEq;
1009 break;
1010 case OP_IF_NE:
1011 cond = kArmCondNe;
1012 break;
1013 case OP_IF_LT:
1014 cond = kArmCondLt;
1015 break;
1016 case OP_IF_GE:
1017 cond = kArmCondGe;
1018 break;
1019 case OP_IF_GT:
1020 cond = kArmCondGt;
1021 break;
1022 case OP_IF_LE:
1023 cond = kArmCondLe;
1024 break;
1025 default:
1026 cond = (ArmConditionCode)0;
1027 LOG(FATAL) << "Unexpected opcode " << (int)opcode;
1028 }
1029 genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]);
1030 genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
1031 break;
1032 }
1033
1034 case OP_IF_EQZ:
1035 case OP_IF_NEZ:
1036 case OP_IF_LTZ:
1037 case OP_IF_GEZ:
1038 case OP_IF_GTZ:
1039 case OP_IF_LEZ: {
1040 bool backwardBranch;
1041 ArmConditionCode cond;
1042 backwardBranch = (bb->taken->startOffset <= mir->offset);
1043 if (backwardBranch) {
1044 genSuspendPoll(cUnit, mir);
1045 }
1046 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1047 opRegImm(cUnit, kOpCmp, rlSrc[0].lowReg, 0);
1048 switch(opcode) {
1049 case OP_IF_EQZ:
1050 cond = kArmCondEq;
1051 break;
1052 case OP_IF_NEZ:
1053 cond = kArmCondNe;
1054 break;
1055 case OP_IF_LTZ:
1056 cond = kArmCondLt;
1057 break;
1058 case OP_IF_GEZ:
1059 cond = kArmCondGe;
1060 break;
1061 case OP_IF_GTZ:
1062 cond = kArmCondGt;
1063 break;
1064 case OP_IF_LEZ:
1065 cond = kArmCondLe;
1066 break;
1067 default:
1068 cond = (ArmConditionCode)0;
1069 LOG(FATAL) << "Unexpected opcode " << (int)opcode;
1070 }
1071 genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]);
1072 genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
1073 break;
1074 }
1075
1076 case OP_AGET_WIDE:
1077 genArrayGet(cUnit, mir, kLong, rlSrc[0], rlSrc[1], rlDest, 3);
1078 break;
1079 case OP_AGET:
1080 case OP_AGET_OBJECT:
1081 genArrayGet(cUnit, mir, kWord, rlSrc[0], rlSrc[1], rlDest, 2);
1082 break;
1083 case OP_AGET_BOOLEAN:
1084 genArrayGet(cUnit, mir, kUnsignedByte, rlSrc[0], rlSrc[1],
1085 rlDest, 0);
1086 break;
1087 case OP_AGET_BYTE:
1088 genArrayGet(cUnit, mir, kSignedByte, rlSrc[0], rlSrc[1], rlDest, 0);
1089 break;
1090 case OP_AGET_CHAR:
1091 genArrayGet(cUnit, mir, kUnsignedHalf, rlSrc[0], rlSrc[1],
1092 rlDest, 1);
1093 break;
1094 case OP_AGET_SHORT:
1095 genArrayGet(cUnit, mir, kSignedHalf, rlSrc[0], rlSrc[1], rlDest, 1);
1096 break;
1097 case OP_APUT_WIDE:
1098 genArrayPut(cUnit, mir, kLong, rlSrc[1], rlSrc[2], rlSrc[0], 3);
1099 break;
1100 case OP_APUT:
1101 genArrayPut(cUnit, mir, kWord, rlSrc[1], rlSrc[2], rlSrc[0], 2);
1102 break;
1103 case OP_APUT_OBJECT:
1104 genArrayObjectPut(cUnit, mir, rlSrc[1], rlSrc[2], rlSrc[0], 2);
1105 break;
1106 case OP_APUT_SHORT:
1107 case OP_APUT_CHAR:
1108 genArrayPut(cUnit, mir, kUnsignedHalf, rlSrc[1], rlSrc[2],
1109 rlSrc[0], 1);
1110 break;
1111 case OP_APUT_BYTE:
1112 case OP_APUT_BOOLEAN:
1113 genArrayPut(cUnit, mir, kUnsignedByte, rlSrc[1], rlSrc[2],
1114 rlSrc[0], 0);
1115 break;
1116
1117 case OP_IGET_WIDE:
1118 case OP_IGET_WIDE_VOLATILE:
1119 genIGetWideX(cUnit, mir, rlDest, rlSrc[0]);
1120 break;
1121
1122 case OP_IGET:
1123 case OP_IGET_VOLATILE:
1124 case OP_IGET_OBJECT:
1125 case OP_IGET_OBJECT_VOLATILE:
1126 genIGetX(cUnit, mir, kWord, rlDest, rlSrc[0]);
1127 break;
1128
1129 case OP_IGET_BOOLEAN:
1130 case OP_IGET_BYTE:
1131 genIGetX(cUnit, mir, kUnsignedByte, rlDest, rlSrc[0]);
1132 break;
1133
1134 case OP_IGET_CHAR:
1135 genIGetX(cUnit, mir, kUnsignedHalf, rlDest, rlSrc[0]);
1136 break;
1137
1138 case OP_IGET_SHORT:
1139 genIGetX(cUnit, mir, kSignedHalf, rlDest, rlSrc[0]);
1140 break;
1141
1142 case OP_IPUT_WIDE:
1143 case OP_IPUT_WIDE_VOLATILE:
1144 genIPutWideX(cUnit, mir, rlSrc[0], rlSrc[1]);
1145 break;
1146
1147 case OP_IPUT_OBJECT:
1148 case OP_IPUT_OBJECT_VOLATILE:
1149 genIPutX(cUnit, mir, kWord, rlSrc[0], rlSrc[1], true);
1150 break;
1151
1152 case OP_IPUT:
1153 case OP_IPUT_VOLATILE:
1154 genIPutX(cUnit, mir, kWord, rlSrc[0], rlSrc[1], false);
1155 break;
1156
1157 case OP_IPUT_BOOLEAN:
1158 case OP_IPUT_BYTE:
1159 genIPutX(cUnit, mir, kUnsignedByte, rlSrc[0], rlSrc[1], false);
1160 break;
1161
1162 case OP_IPUT_CHAR:
1163 genIPutX(cUnit, mir, kUnsignedHalf, rlSrc[0], rlSrc[1], false);
1164 break;
1165
1166 case OP_IPUT_SHORT:
1167 genIPutX(cUnit, mir, kSignedHalf, rlSrc[0], rlSrc[1], false);
1168 break;
1169
1170 case OP_SGET:
1171 case OP_SGET_OBJECT:
1172 case OP_SGET_BOOLEAN:
1173 case OP_SGET_BYTE:
1174 case OP_SGET_CHAR:
1175 case OP_SGET_SHORT:
1176 genSget(cUnit, mir, rlResult, rlDest);
1177 break;
1178
1179 case OP_SGET_WIDE:
1180 genSgetWide(cUnit, mir, rlResult, rlDest);
1181 break;
1182
1183 case OP_SPUT:
1184 case OP_SPUT_OBJECT:
1185 case OP_SPUT_BOOLEAN:
1186 case OP_SPUT_BYTE:
1187 case OP_SPUT_CHAR:
1188 case OP_SPUT_SHORT:
1189 genSput(cUnit, mir, rlSrc[0]);
1190 break;
1191
1192 case OP_SPUT_WIDE:
1193 genSputWide(cUnit, mir, rlSrc[0]);
1194 break;
1195
1196 case OP_INVOKE_STATIC_RANGE:
1197 case OP_INVOKE_STATIC:
1198 genInvokeStatic(cUnit, mir);
1199 break;
1200
1201 case OP_INVOKE_DIRECT:
1202 case OP_INVOKE_DIRECT_RANGE:
1203 genInvokeDirect(cUnit, mir);
1204 break;
1205
1206 case OP_INVOKE_VIRTUAL:
1207 case OP_INVOKE_VIRTUAL_RANGE:
1208 genInvokeVirtual(cUnit, mir);
1209 break;
1210
1211 case OP_INVOKE_SUPER:
1212 case OP_INVOKE_SUPER_RANGE:
1213 genInvokeSuper(cUnit, mir);
1214 break;
1215
1216 case OP_INVOKE_INTERFACE:
1217 case OP_INVOKE_INTERFACE_RANGE:
1218 genInvokeInterface(cUnit, mir);
1219 break;
1220
1221 case OP_NEG_INT:
1222 case OP_NOT_INT:
1223 res = genArithOpInt(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1224 break;
1225
1226 case OP_NEG_LONG:
1227 case OP_NOT_LONG:
1228 res = genArithOpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1229 break;
1230
1231 case OP_NEG_FLOAT:
1232 res = genArithOpFloat(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1233 break;
1234
1235 case OP_NEG_DOUBLE:
1236 res = genArithOpDouble(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1237 break;
1238
1239 case OP_INT_TO_LONG:
1240 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1241 if (rlSrc[0].location == kLocPhysReg) {
1242 genRegCopy(cUnit, rlResult.lowReg, rlSrc[0].lowReg);
1243 } else {
1244 loadValueDirect(cUnit, rlSrc[0], rlResult.lowReg);
1245 }
1246 opRegRegImm(cUnit, kOpAsr, rlResult.highReg,
1247 rlResult.lowReg, 31);
1248 storeValueWide(cUnit, rlDest, rlResult);
1249 break;
1250
1251 case OP_LONG_TO_INT:
1252 rlSrc[0] = oatUpdateLocWide(cUnit, rlSrc[0]);
1253 rlSrc[0] = oatWideToNarrow(cUnit, rlSrc[0]);
1254 storeValue(cUnit, rlDest, rlSrc[0]);
1255 break;
1256
1257 case OP_INT_TO_BYTE:
1258 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1259 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1260 opRegReg(cUnit, kOp2Byte, rlResult.lowReg, rlSrc[0].lowReg);
1261 storeValue(cUnit, rlDest, rlResult);
1262 break;
1263
1264 case OP_INT_TO_SHORT:
1265 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1266 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1267 opRegReg(cUnit, kOp2Short, rlResult.lowReg, rlSrc[0].lowReg);
1268 storeValue(cUnit, rlDest, rlResult);
1269 break;
1270
1271 case OP_INT_TO_CHAR:
1272 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1273 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1274 opRegReg(cUnit, kOp2Char, rlResult.lowReg, rlSrc[0].lowReg);
1275 storeValue(cUnit, rlDest, rlResult);
1276 break;
1277
1278 case OP_INT_TO_FLOAT:
1279 case OP_INT_TO_DOUBLE:
1280 case OP_LONG_TO_FLOAT:
1281 case OP_LONG_TO_DOUBLE:
1282 case OP_FLOAT_TO_INT:
1283 case OP_FLOAT_TO_LONG:
1284 case OP_FLOAT_TO_DOUBLE:
1285 case OP_DOUBLE_TO_INT:
1286 case OP_DOUBLE_TO_LONG:
1287 case OP_DOUBLE_TO_FLOAT:
1288 genConversion(cUnit, mir);
1289 break;
1290
1291 case OP_ADD_INT:
1292 case OP_SUB_INT:
1293 case OP_MUL_INT:
1294 case OP_DIV_INT:
1295 case OP_REM_INT:
1296 case OP_AND_INT:
1297 case OP_OR_INT:
1298 case OP_XOR_INT:
1299 case OP_SHL_INT:
1300 case OP_SHR_INT:
1301 case OP_USHR_INT:
1302 case OP_ADD_INT_2ADDR:
1303 case OP_SUB_INT_2ADDR:
1304 case OP_MUL_INT_2ADDR:
1305 case OP_DIV_INT_2ADDR:
1306 case OP_REM_INT_2ADDR:
1307 case OP_AND_INT_2ADDR:
1308 case OP_OR_INT_2ADDR:
1309 case OP_XOR_INT_2ADDR:
1310 case OP_SHL_INT_2ADDR:
1311 case OP_SHR_INT_2ADDR:
1312 case OP_USHR_INT_2ADDR:
1313 genArithOpInt(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1314 break;
1315
1316 case OP_ADD_LONG:
1317 case OP_SUB_LONG:
1318 case OP_MUL_LONG:
1319 case OP_DIV_LONG:
1320 case OP_REM_LONG:
1321 case OP_AND_LONG:
1322 case OP_OR_LONG:
1323 case OP_XOR_LONG:
1324 case OP_ADD_LONG_2ADDR:
1325 case OP_SUB_LONG_2ADDR:
1326 case OP_MUL_LONG_2ADDR:
1327 case OP_DIV_LONG_2ADDR:
1328 case OP_REM_LONG_2ADDR:
1329 case OP_AND_LONG_2ADDR:
1330 case OP_OR_LONG_2ADDR:
1331 case OP_XOR_LONG_2ADDR:
1332 genArithOpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1333 break;
1334
1335 case OP_SHL_LONG_2ADDR:
1336 case OP_SHR_LONG_2ADDR:
1337 case OP_USHR_LONG_2ADDR:
1338 genShiftOpLong(cUnit,mir, rlDest, rlSrc[0], rlSrc[0]);
1339 break;
1340
1341 case OP_SHL_LONG:
1342 case OP_SHR_LONG:
1343 case OP_USHR_LONG:
1344 genShiftOpLong(cUnit,mir, rlDest, rlSrc[0], rlSrc[1]);
1345 break;
1346
1347 case OP_ADD_FLOAT:
1348 case OP_SUB_FLOAT:
1349 case OP_MUL_FLOAT:
1350 case OP_DIV_FLOAT:
1351 case OP_REM_FLOAT:
1352 case OP_ADD_FLOAT_2ADDR:
1353 case OP_SUB_FLOAT_2ADDR:
1354 case OP_MUL_FLOAT_2ADDR:
1355 case OP_DIV_FLOAT_2ADDR:
1356 case OP_REM_FLOAT_2ADDR:
1357 genArithOpFloat(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1358 break;
1359
1360 case OP_ADD_DOUBLE:
1361 case OP_SUB_DOUBLE:
1362 case OP_MUL_DOUBLE:
1363 case OP_DIV_DOUBLE:
1364 case OP_REM_DOUBLE:
1365 case OP_ADD_DOUBLE_2ADDR:
1366 case OP_SUB_DOUBLE_2ADDR:
1367 case OP_MUL_DOUBLE_2ADDR:
1368 case OP_DIV_DOUBLE_2ADDR:
1369 case OP_REM_DOUBLE_2ADDR:
1370 genArithOpDouble(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1371 break;
1372
1373 case OP_RSUB_INT:
1374 case OP_ADD_INT_LIT16:
1375 case OP_MUL_INT_LIT16:
1376 case OP_DIV_INT_LIT16:
1377 case OP_REM_INT_LIT16:
1378 case OP_AND_INT_LIT16:
1379 case OP_OR_INT_LIT16:
1380 case OP_XOR_INT_LIT16:
1381 case OP_ADD_INT_LIT8:
1382 case OP_RSUB_INT_LIT8:
1383 case OP_MUL_INT_LIT8:
1384 case OP_DIV_INT_LIT8:
1385 case OP_REM_INT_LIT8:
1386 case OP_AND_INT_LIT8:
1387 case OP_OR_INT_LIT8:
1388 case OP_XOR_INT_LIT8:
1389 case OP_SHL_INT_LIT8:
1390 case OP_SHR_INT_LIT8:
1391 case OP_USHR_INT_LIT8:
1392 genArithOpIntLit(cUnit, mir, rlDest, rlSrc[0], mir->dalvikInsn.vC);
1393 break;
1394
1395 default:
1396 res = true;
1397 }
1398 return res;
1399}
1400
1401static const char *extendedMIROpNames[kMirOpLast - kMirOpFirst] = {
1402 "kMirOpPhi",
1403 "kMirOpNullNRangeUpCheck",
1404 "kMirOpNullNRangeDownCheck",
1405 "kMirOpLowerBound",
1406 "kMirOpPunt",
1407 "kMirOpCheckInlinePrediction",
1408};
1409
1410/* Extended MIR instructions like PHI */
1411static void handleExtendedMethodMIR(CompilationUnit* cUnit, MIR* mir)
1412{
1413 int opOffset = mir->dalvikInsn.opcode - kMirOpFirst;
1414 char* msg = (char*)oatNew(strlen(extendedMIROpNames[opOffset]) + 1, false);
1415 strcpy(msg, extendedMIROpNames[opOffset]);
1416 ArmLIR* op = newLIR1(cUnit, kArmPseudoExtended, (int) msg);
1417
1418 switch ((ExtendedMIROpcode)mir->dalvikInsn.opcode) {
1419 case kMirOpPhi: {
1420 char* ssaString = oatGetSSAString(cUnit, mir->ssaRep);
1421 op->flags.isNop = true;
1422 newLIR1(cUnit, kArmPseudoSSARep, (int) ssaString);
1423 break;
1424 }
1425 default:
1426 break;
1427 }
1428}
1429
1430/* If there are any ins passed in registers that have not been promoted
1431 * to a callee-save register, flush them to the frame.
1432 * Note: at this pointCopy any ins that are passed in register to their home location */
1433static void flushIns(CompilationUnit* cUnit)
1434{
1435 if (cUnit->method->insSize == 0)
1436 return;
1437 int inRegs = (cUnit->method->insSize > 2) ? 3 : cUnit->method->insSize;
1438 int startReg = r1;
1439 int startLoc = cUnit->method->registersSize - cUnit->method->insSize;
1440 for (int i = 0; i < inRegs; i++) {
1441 RegLocation loc = cUnit->regLocation[startLoc + i];
1442 if (loc.location == kLocPhysReg) {
1443 genRegCopy(cUnit, loc.lowReg, startReg + i);
1444 } else {
1445 assert(loc.location == kLocDalvikFrame);
1446 storeBaseDisp(cUnit, rSP, loc.spOffset, startReg + i, kWord);
1447 }
1448 }
1449
1450 // Handle special case of wide argument half in regs, half in frame
1451 if (inRegs == 3) {
1452 RegLocation loc = cUnit->regLocation[startLoc + 2];
1453 if (loc.wide && loc.location == kLocPhysReg) {
1454 // Load the other half of the arg into the promoted pair
1455 loadBaseDisp(cUnit, NULL, rSP, loc.spOffset+4,
1456 loc.highReg, kWord, INVALID_SREG);
1457 inRegs++;
1458 }
1459 }
1460
1461 // Now, do initial assignment of all promoted arguments passed in frame
1462 for (int i = inRegs; i < cUnit->method->insSize;) {
1463 RegLocation loc = cUnit->regLocation[startLoc + i];
1464 if (loc.fpLocation == kLocPhysReg) {
1465 loc.location = kLocPhysReg;
1466 loc.fp = true;
1467 loc.lowReg = loc.fpLowReg;
1468 loc.highReg = loc.fpHighReg;
1469 }
1470 if (loc.location == kLocPhysReg) {
1471 if (loc.wide) {
1472 loadBaseDispWide(cUnit, NULL, rSP, loc.spOffset,
1473 loc.lowReg, loc.highReg, INVALID_SREG);
1474 i++;
1475 } else {
1476 loadBaseDisp(cUnit, NULL, rSP, loc.spOffset,
1477 loc.lowReg, kWord, INVALID_SREG);
1478 }
1479 }
1480 i++;
1481 }
1482}
1483
1484/* Handle the content in each basic block */
1485static bool methodBlockCodeGen(CompilationUnit* cUnit, BasicBlock* bb)
1486{
1487 MIR* mir;
1488 ArmLIR* labelList = (ArmLIR*) cUnit->blockLabelList;
1489 int blockId = bb->id;
1490
1491 cUnit->curBlock = bb;
1492 labelList[blockId].operands[0] = bb->startOffset;
1493
1494 /* Insert the block label */
1495 labelList[blockId].opcode = kArmPseudoNormalBlockLabel;
1496 oatAppendLIR(cUnit, (LIR*) &labelList[blockId]);
1497
1498 oatClobberAllRegs(cUnit);
1499 oatResetNullCheck(cUnit);
1500
1501 ArmLIR* headLIR = NULL;
1502
1503 if (bb->blockType == kEntryBlock) {
1504 /*
1505 * On entry, r0, r1, r2 & r3 are live. Let the register allocation
1506 * mechanism know so it doesn't try to use any of them when
1507 * expanding the frame or flushing. This leaves the utility
1508 * code with a single temp: r12. This should be enough.
1509 */
1510 oatLockTemp(cUnit, r0);
1511 oatLockTemp(cUnit, r1);
1512 oatLockTemp(cUnit, r2);
1513 oatLockTemp(cUnit, r3);
1514 newLIR0(cUnit, kArmPseudoMethodEntry);
1515 /* Spill core callee saves */
1516 newLIR1(cUnit, kThumb2Push, cUnit->coreSpillMask);
1517 /* Need to spill any FP regs? */
1518 if (cUnit->numFPSpills) {
1519 newLIR1(cUnit, kThumb2VPushCS, cUnit->numFPSpills);
1520 }
1521 opRegImm(cUnit, kOpSub, rSP, cUnit->frameSize - (cUnit->numSpills * 4));
1522 storeBaseDisp(cUnit, rSP, 0, r0, kWord);
1523 flushIns(cUnit);
1524 oatFreeTemp(cUnit, r0);
1525 oatFreeTemp(cUnit, r1);
1526 oatFreeTemp(cUnit, r2);
1527 oatFreeTemp(cUnit, r3);
1528 } else if (bb->blockType == kExitBlock) {
1529 newLIR0(cUnit, kArmPseudoMethodExit);
1530 opRegImm(cUnit, kOpAdd, rSP, cUnit->frameSize - (cUnit->numSpills * 4));
1531 /* Need to restore any FP callee saves? */
1532 if (cUnit->numFPSpills) {
1533 newLIR1(cUnit, kThumb2VPopCS, cUnit->numFPSpills);
1534 }
1535 if (cUnit->coreSpillMask & (1 << rLR)) {
1536 /* Unspill rLR to rPC */
1537 cUnit->coreSpillMask &= ~(1 << rLR);
1538 cUnit->coreSpillMask |= (1 << rPC);
1539 }
1540 newLIR1(cUnit, kThumb2Pop, cUnit->coreSpillMask);
1541 if (!(cUnit->coreSpillMask & (1 << rPC))) {
1542 /* We didn't pop to rPC, so must do a bv rLR */
1543 newLIR1(cUnit, kThumbBx, rLR);
1544 }
1545 }
1546
1547 for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
1548
1549 oatResetRegPool(cUnit);
1550 if (cUnit->disableOpt & (1 << kTrackLiveTemps)) {
1551 oatClobberAllRegs(cUnit);
1552 }
1553
1554 if (cUnit->disableOpt & (1 << kSuppressLoads)) {
1555 oatResetDefTracking(cUnit);
1556 }
1557
1558 if ((int)mir->dalvikInsn.opcode >= (int)kMirOpFirst) {
1559 handleExtendedMethodMIR(cUnit, mir);
1560 continue;
1561 }
1562
1563 cUnit->currentDalvikOffset = mir->offset;
1564
1565 Opcode dalvikOpcode = mir->dalvikInsn.opcode;
1566 InstructionFormat dalvikFormat =
1567 dexGetFormatFromOpcode(dalvikOpcode);
1568
1569 ArmLIR* boundaryLIR;
1570
1571 /* Mark the beginning of a Dalvik instruction for line tracking */
1572 boundaryLIR = newLIR1(cUnit, kArmPseudoDalvikByteCodeBoundary,
1573 (int) oatGetDalvikDisassembly(
1574 &mir->dalvikInsn, ""));
1575 /* Remember the first LIR for this block */
1576 if (headLIR == NULL) {
1577 headLIR = boundaryLIR;
1578 /* Set the first boundaryLIR as a scheduling barrier */
1579 headLIR->defMask = ENCODE_ALL;
1580 }
1581
1582 /* Don't generate the SSA annotation unless verbose mode is on */
1583 if (cUnit->printMe && mir->ssaRep) {
1584 char *ssaString = oatGetSSAString(cUnit, mir->ssaRep);
1585 newLIR1(cUnit, kArmPseudoSSARep, (int) ssaString);
1586 }
1587
1588 bool notHandled = compileDalvikInstruction(cUnit, mir, bb, labelList);
1589
1590 if (notHandled) {
1591 char buf[100];
1592 snprintf(buf, 100, "%#06x: Opcode %#x (%s) / Fmt %d not handled",
1593 mir->offset,
1594 dalvikOpcode, dexGetOpcodeName(dalvikOpcode),
1595 dalvikFormat);
1596 LOG(FATAL) << buf;
1597 }
1598 }
1599
1600 if (headLIR) {
1601 /*
1602 * Eliminate redundant loads/stores and delay stores into later
1603 * slots
1604 */
1605 oatApplyLocalOptimizations(cUnit, (LIR*) headLIR,
1606 cUnit->lastLIRInsn);
1607
1608 /*
1609 * Generate an unconditional branch to the fallthrough block.
1610 */
1611 if (bb->fallThrough) {
1612 genUnconditionalBranch(cUnit,
1613 &labelList[bb->fallThrough->id]);
1614 }
1615 }
1616 return false;
1617}
1618
1619/*
1620 * Nop any unconditional branches that go to the next instruction.
1621 * Note: new redundant branches may be inserted later, and we'll
1622 * use a check in final instruction assembly to nop those out.
1623 */
1624void removeRedundantBranches(CompilationUnit* cUnit)
1625{
1626 ArmLIR* thisLIR;
1627
1628 for (thisLIR = (ArmLIR*) cUnit->firstLIRInsn;
1629 thisLIR != (ArmLIR*) cUnit->lastLIRInsn;
1630 thisLIR = NEXT_LIR(thisLIR)) {
1631
1632 /* Branch to the next instruction */
1633 if ((thisLIR->opcode == kThumbBUncond) ||
1634 (thisLIR->opcode == kThumb2BUncond)) {
1635 ArmLIR* nextLIR = thisLIR;
1636
1637 while (true) {
1638 nextLIR = NEXT_LIR(nextLIR);
1639
1640 /*
1641 * Is the branch target the next instruction?
1642 */
1643 if (nextLIR == (ArmLIR*) thisLIR->generic.target) {
1644 thisLIR->flags.isNop = true;
1645 break;
1646 }
1647
1648 /*
1649 * Found real useful stuff between the branch and the target.
1650 * Need to explicitly check the lastLIRInsn here because it
1651 * might be the last real instruction.
1652 */
1653 if (!isPseudoOpcode(nextLIR->opcode) ||
1654 (nextLIR = (ArmLIR*) cUnit->lastLIRInsn))
1655 break;
1656 }
1657 }
1658 }
1659}
1660
1661void oatMethodMIR2LIR(CompilationUnit* cUnit)
1662{
1663 /* Used to hold the labels of each block */
1664 cUnit->blockLabelList =
1665 (void *) oatNew(sizeof(ArmLIR) * cUnit->numBlocks, true);
1666
1667 oatDataFlowAnalysisDispatcher(cUnit, methodBlockCodeGen,
1668 kPreOrderDFSTraversal, false /* Iterative */);
1669 removeRedundantBranches(cUnit);
1670}
1671
1672/* Common initialization routine for an architecture family */
1673bool oatArchInit()
1674{
1675 int i;
1676
1677 for (i = 0; i < kArmLast; i++) {
1678 if (EncodingMap[i].opcode != i) {
1679 LOG(FATAL) << "Encoding order for " << EncodingMap[i].name <<
1680 " is wrong: expecting " << i << ", seeing " <<
1681 (int)EncodingMap[i].opcode;
1682 }
1683 }
1684
1685 return oatArchVariantInit();
1686}
1687
1688/* Needed by the Assembler */
1689void oatSetupResourceMasks(ArmLIR* lir)
1690{
1691 setupResourceMasks(lir);
1692}
1693
1694/* Needed by the ld/st optmizatons */
1695ArmLIR* oatRegCopyNoInsert(CompilationUnit* cUnit, int rDest, int rSrc)
1696{
1697 return genRegCopyNoInsert(cUnit, rDest, rSrc);
1698}
1699
1700/* Needed by the register allocator */
1701ArmLIR* oatRegCopy(CompilationUnit* cUnit, int rDest, int rSrc)
1702{
1703 return genRegCopy(cUnit, rDest, rSrc);
1704}
1705
1706/* Needed by the register allocator */
1707void oatRegCopyWide(CompilationUnit* cUnit, int destLo, int destHi,
1708 int srcLo, int srcHi)
1709{
1710 genRegCopyWide(cUnit, destLo, destHi, srcLo, srcHi);
1711}
1712
1713void oatFlushRegImpl(CompilationUnit* cUnit, int rBase,
1714 int displacement, int rSrc, OpSize size)
1715{
1716 storeBaseDisp(cUnit, rBase, displacement, rSrc, size);
1717}
1718
1719void oatFlushRegWideImpl(CompilationUnit* cUnit, int rBase,
1720 int displacement, int rSrcLo, int rSrcHi)
1721{
1722 storeBaseDispWide(cUnit, rBase, displacement, rSrcLo, rSrcHi);
1723}
1724
1725#ifdef TESTMODE
1726// Will be done at runtime by art. Keep for debugging
1727void oatInitHelpers(Thread* thread)
1728{
1729 thread->pMemcpy = memcpy;
1730 thread->pI2f = __aeabi_i2f;
1731 thread->pF2iz = __aeabi_f2iz;
1732 thread->pD2f = __aeabi_d2f;
1733 thread->pF2d = __aeabi_f2d;
1734 thread->pI2d = __aeabi_i2d;
1735 thread->pD2iz = __aeabi_d2iz;
1736 thread->pL2f = __aeabi_l2f;
1737 thread->pL2d = __aeabi_l2d;
1738 thread->pArtF2l = artF2L;
1739 thread->pArtD2l = artD2L;
1740 thread->pFadd = __aeabi_fadd;
1741 thread->pFsub = __aeabi_fsub;
1742 thread->pFdiv = __aeabi_fdiv;
1743 thread->pFmul = __aeabi_fmul;
1744 thread->pFmodf = fmodf;
1745 thread->pDadd = __aeabi_dadd;
1746 thread->pDsub = __aeabi_dsub;
1747 thread->pDdiv = __aeabi_ddiv;
1748 thread->pDmul = __aeabi_dmul;
1749 thread->pFmod = fmod;
1750 thread->pIdivmod = __aeabi_idivmod;
1751 thread->pIdiv = __aeabi_idiv;
1752 thread->pLdivmod = __aeabi_ldivmod;
1753 thread->pArtUnlockObject = dvmUnlockObject;
1754 thread->pArtCanPutArrayElementNoThrow = dvmCanPutArrayElement;
1755 thread->pArtInstanceofNonTrivialNoThrow = dvmInstanceofNonTrivial;
1756 thread->pArtInstanceofNonTrivial = dvmInstanceofNonTrivial;
1757 thread->pArtAllocArrayByClass = dvmAllocArrayByClass;
1758 thread->pArtFindInterfaceMethodInCache = dvmFindInterfaceMethodInCache;
1759 thread->pArtUnlockObjectNoThrow = dvmUnlockObject;
1760 thread->pArtLockObjectNoThrow = dvmLockObject;
1761 thread->pArtAllocObjectNoThrow = dvmAllocObject;
1762 thread->pArtThrowException = NULL; //TBD
1763 thread->pArtHandleFillArrayDataNoThrow = dvmInterpHandleFillArrayData;
1764}
1765#endif