blob: eb9296bb056977042e1a572101a483b011a9851a [file] [log] [blame]
buzbee67bf8852011-08-17 17:51:35 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
buzbee67bf8852011-08-17 17:51:35 -070017static const RegLocation badLoc = {kLocDalvikFrame, 0, 0, INVALID_REG,
18 INVALID_REG, INVALID_SREG, 0,
19 kLocDalvikFrame, INVALID_REG, INVALID_REG,
20 INVALID_OFFSET};
21static const RegLocation retLoc = LOC_DALVIK_RETURN_VAL;
22static const RegLocation retLocWide = LOC_DALVIK_RETURN_VAL_WIDE;
23
buzbeedfd3d702011-08-28 12:56:51 -070024/*
25 * Let helper function take care of everything. Will call
26 * Array::AllocFromCode(type_idx, method, count);
27 * Note: AllocFromCode will handle checks for errNegativeArraySize.
28 */
buzbee67bf8852011-08-17 17:51:35 -070029static void genNewArray(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
30 RegLocation rlSrc)
31{
buzbeedfd3d702011-08-28 12:56:51 -070032 oatFlushAllRegs(cUnit); /* Everything to home location */
33 loadWordDisp(cUnit, rSELF,
34 OFFSETOF_MEMBER(Thread, pAllocFromCode), rLR);
35 loadCurrMethodDirect(cUnit, r1); // arg1 <- Method*
36 loadConstant(cUnit, r0, mir->dalvikInsn.vC); // arg0 <- type_id
37 loadValueDirectFixed(cUnit, rlSrc, r2); // arg2 <- count
38 opReg(cUnit, kOpBlx, rLR);
39 oatClobberCallRegs(cUnit);
40 RegLocation rlResult = oatGetReturn(cUnit);
41 storeValue(cUnit, rlDest, rlResult);
buzbee67bf8852011-08-17 17:51:35 -070042}
43
44/*
45 * Similar to genNewArray, but with post-allocation initialization.
46 * Verifier guarantees we're dealing with an array class. Current
47 * code throws runtime exception "bad Filled array req" for 'D' and 'J'.
48 * Current code also throws internal unimp if not 'L', '[' or 'I'.
49 */
50static void genFilledNewArray(CompilationUnit* cUnit, MIR* mir, bool isRange)
51{
52 DecodedInstruction* dInsn = &mir->dalvikInsn;
53 int elems;
buzbeedfd3d702011-08-28 12:56:51 -070054 int typeId;
buzbee67bf8852011-08-17 17:51:35 -070055 if (isRange) {
56 elems = dInsn->vA;
buzbeedfd3d702011-08-28 12:56:51 -070057 typeId = dInsn->vB;
buzbee67bf8852011-08-17 17:51:35 -070058 } else {
59 elems = dInsn->vB;
buzbeedfd3d702011-08-28 12:56:51 -070060 typeId = dInsn->vC;
buzbee67bf8852011-08-17 17:51:35 -070061 }
buzbeedfd3d702011-08-28 12:56:51 -070062 oatFlushAllRegs(cUnit); /* Everything to home location */
63 // TODO: Alloc variant that checks types (see header comment) */
64 UNIMPLEMENTED(WARNING) << "Need AllocFromCode variant w/ extra checks";
65 loadWordDisp(cUnit, rSELF,
66 OFFSETOF_MEMBER(Thread, pAllocFromCode), rLR);
67 loadCurrMethodDirect(cUnit, r1); // arg1 <- Method*
68 loadConstant(cUnit, r0, typeId); // arg0 <- type_id
69 loadConstant(cUnit, r2, elems); // arg2 <- count
70 opReg(cUnit, kOpBlx, rLR);
buzbee67bf8852011-08-17 17:51:35 -070071 /*
buzbeedfd3d702011-08-28 12:56:51 -070072 * NOTE: the implicit target for OP_FILLED_NEW_ARRAY is the
73 * return region. Because AllocFromCode placed the new array
74 * in r0, we'll just lock it into place. When debugger support is
75 * added, it may be necessary to additionally copy all return
76 * values to a home location in thread-local storage
buzbee67bf8852011-08-17 17:51:35 -070077 */
buzbee67bf8852011-08-17 17:51:35 -070078 oatLockTemp(cUnit, r0);
buzbeedfd3d702011-08-28 12:56:51 -070079
buzbee67bf8852011-08-17 17:51:35 -070080 // Having a range of 0 is legal
81 if (isRange && (dInsn->vA > 0)) {
82 /*
83 * Bit of ugliness here. We're going generate a mem copy loop
84 * on the register range, but it is possible that some regs
85 * in the range have been promoted. This is unlikely, but
86 * before generating the copy, we'll just force a flush
87 * of any regs in the source range that have been promoted to
88 * home location.
89 */
90 for (unsigned int i = 0; i < dInsn->vA; i++) {
91 RegLocation loc = oatUpdateLoc(cUnit,
92 oatGetSrc(cUnit, mir, i));
93 if (loc.location == kLocPhysReg) {
94 storeBaseDisp(cUnit, rSP, loc.spOffset, loc.lowReg, kWord);
95 }
96 }
97 /*
98 * TUNING note: generated code here could be much improved, but
99 * this is an uncommon operation and isn't especially performance
100 * critical.
101 */
102 int rSrc = oatAllocTemp(cUnit);
103 int rDst = oatAllocTemp(cUnit);
104 int rIdx = oatAllocTemp(cUnit);
105 int rVal = rLR; // Using a lot of temps, rLR is known free here
106 // Set up source pointer
107 RegLocation rlFirst = oatGetSrc(cUnit, mir, 0);
108 opRegRegImm(cUnit, kOpAdd, rSrc, rSP, rlFirst.spOffset);
109 // Set up the target pointer
110 opRegRegImm(cUnit, kOpAdd, rDst, r0,
buzbeec143c552011-08-20 17:38:58 -0700111 Array::DataOffset().Int32Value());
buzbee67bf8852011-08-17 17:51:35 -0700112 // Set up the loop counter (known to be > 0)
113 loadConstant(cUnit, rIdx, dInsn->vA);
114 // Generate the copy loop. Going backwards for convenience
115 ArmLIR* target = newLIR0(cUnit, kArmPseudoTargetLabel);
116 target->defMask = ENCODE_ALL;
117 // Copy next element
118 loadBaseIndexed(cUnit, rSrc, rIdx, rVal, 2, kWord);
119 storeBaseIndexed(cUnit, rDst, rIdx, rVal, 2, kWord);
120 // Use setflags encoding here
121 newLIR3(cUnit, kThumb2SubsRRI12, rIdx, rIdx, 1);
122 ArmLIR* branch = opCondBranch(cUnit, kArmCondNe);
123 branch->generic.target = (LIR*)target;
124 } else if (!isRange) {
125 // TUNING: interleave
126 for (unsigned int i = 0; i < dInsn->vA; i++) {
127 RegLocation rlArg = loadValue(cUnit,
128 oatGetSrc(cUnit, mir, i), kCoreReg);
buzbeec143c552011-08-20 17:38:58 -0700129 storeBaseDisp(cUnit, r0,
130 Array::DataOffset().Int32Value() +
buzbee67bf8852011-08-17 17:51:35 -0700131 i * 4, rlArg.lowReg, kWord);
132 // If the loadValue caused a temp to be allocated, free it
133 if (oatIsTemp(cUnit, rlArg.lowReg)) {
134 oatFreeTemp(cUnit, rlArg.lowReg);
135 }
136 }
137 }
138}
139
140static void genSput(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
141{
buzbeee1931742011-08-28 21:15:53 -0700142 bool slow_path = true;
143 bool isObject = ((mir->dalvikInsn.opcode == OP_SPUT_OBJECT) ||
144 (mir->dalvikInsn.opcode == OP_SPUT_OBJECT_VOLATILE));
145 UNIMPLEMENTED(WARNING) << "Implement sput fast path";
146 int funcOffset;
147 if (slow_path) {
148 if (isObject) {
149 funcOffset = OFFSETOF_MEMBER(Thread, pSetObjStatic);
150 } else {
151 funcOffset = OFFSETOF_MEMBER(Thread, pSet32Static);
152 }
153 oatFlushAllRegs(cUnit);
154 loadWordDisp(cUnit, rSELF, funcOffset, rLR);
155 loadConstant(cUnit, r0, mir->dalvikInsn.vB);
156 loadCurrMethodDirect(cUnit, r1);
157 loadValueDirect(cUnit, rlSrc, r2);
158 opReg(cUnit, kOpBlx, rLR);
159 oatClobberCallRegs(cUnit);
160 } else {
161 UNIMPLEMENTED(FATAL) << "Must update for new world";
buzbeec143c552011-08-20 17:38:58 -0700162#if 0
buzbee67bf8852011-08-17 17:51:35 -0700163 int valOffset = OFFSETOF_MEMBER(StaticField, value);
164 int tReg = oatAllocTemp(cUnit);
165 int objHead;
166 bool isVolatile;
167 bool isSputObject;
168 const Method *method = (mir->OptimizationFlags & MIR_CALLEE) ?
169 mir->meta.calleeMethod : cUnit->method;
170 void* fieldPtr = (void*)
171 (method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
172 Opcode opcode = mir->dalvikInsn.opcode;
173
174 if (fieldPtr == NULL) {
175 // FIXME: need to handle this case for oat();
176 UNIMPLEMENTED(FATAL);
177 }
178
179#if ANDROID_SMP != 0
180 isVolatile = (opcode == OP_SPUT_VOLATILE) ||
181 (opcode == OP_SPUT_VOLATILE_JUMBO) ||
182 (opcode == OP_SPUT_OBJECT_VOLATILE) ||
183 (opcode == OP_SPUT_OBJECT_VOLATILE_JUMBO);
buzbeec143c552011-08-20 17:38:58 -0700184 assert(isVolatile == artIsVolatileField((Field *) fieldPtr));
buzbee67bf8852011-08-17 17:51:35 -0700185#else
buzbeec143c552011-08-20 17:38:58 -0700186 isVolatile = artIsVolatileField((Field *) fieldPtr);
buzbee67bf8852011-08-17 17:51:35 -0700187#endif
188
189 isSputObject = (opcode == OP_SPUT_OBJECT) ||
190 (opcode == OP_SPUT_OBJECT_VOLATILE);
191
192 rlSrc = oatGetSrc(cUnit, mir, 0);
193 rlSrc = loadValue(cUnit, rlSrc, kAnyReg);
194 loadConstant(cUnit, tReg, (int) fieldPtr);
195 if (isSputObject) {
196 objHead = oatAllocTemp(cUnit);
197 loadWordDisp(cUnit, tReg, OFFSETOF_MEMBER(Field, clazz), objHead);
198 }
199 storeWordDisp(cUnit, tReg, valOffset ,rlSrc.lowReg);
200 oatFreeTemp(cUnit, tReg);
201 if (isVolatile) {
202 oatGenMemBarrier(cUnit, kSY);
203 }
204 if (isSputObject) {
205 /* NOTE: marking card based sfield->clazz */
206 markGCCard(cUnit, rlSrc.lowReg, objHead);
207 oatFreeTemp(cUnit, objHead);
208 }
buzbeec143c552011-08-20 17:38:58 -0700209#endif
buzbeee1931742011-08-28 21:15:53 -0700210 }
buzbee67bf8852011-08-17 17:51:35 -0700211}
212
213static void genSputWide(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
214{
buzbeee1931742011-08-28 21:15:53 -0700215 bool slow_path = true;
216 UNIMPLEMENTED(WARNING) << "Implement sput-wide fast path";
217 int funcOffset;
218 if (slow_path) {
219 funcOffset = OFFSETOF_MEMBER(Thread, pSet64Static);
220 oatFlushAllRegs(cUnit);
221 loadWordDisp(cUnit, rSELF, funcOffset, rLR);
222 loadConstant(cUnit, r0, mir->dalvikInsn.vB);
223 loadCurrMethodDirect(cUnit, r1);
224 loadValueDirectWideFixed(cUnit, rlSrc, r2, r3);
225 opReg(cUnit, kOpBlx, rLR);
226 oatClobberCallRegs(cUnit);
227 } else {
228 UNIMPLEMENTED(FATAL) << "Must update for new world";
buzbeec143c552011-08-20 17:38:58 -0700229#if 0
buzbee67bf8852011-08-17 17:51:35 -0700230 int tReg = oatAllocTemp(cUnit);
231 int valOffset = OFFSETOF_MEMBER(StaticField, value);
232 const Method *method = (mir->OptimizationFlags & MIR_CALLEE) ?
233 mir->meta.calleeMethod : cUnit->method;
234 void* fieldPtr = (void*)
235 (method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
236
237 if (fieldPtr == NULL) {
238 // FIXME: need to handle this case for oat();
239 UNIMPLEMENTED(FATAL);
240 }
241
242 rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
243 rlSrc = loadValueWide(cUnit, rlSrc, kAnyReg);
244 loadConstant(cUnit, tReg, (int) fieldPtr + valOffset);
245
246 storePair(cUnit, tReg, rlSrc.lowReg, rlSrc.highReg);
buzbeec143c552011-08-20 17:38:58 -0700247#endif
buzbeee1931742011-08-28 21:15:53 -0700248 }
buzbee67bf8852011-08-17 17:51:35 -0700249}
250
251
252
253static void genSgetWide(CompilationUnit* cUnit, MIR* mir,
254 RegLocation rlResult, RegLocation rlDest)
255{
buzbeee1931742011-08-28 21:15:53 -0700256 bool slow_path = true;
257 UNIMPLEMENTED(WARNING) << "Implement sget-wide fast path";
258 int funcOffset;
259 if (slow_path) {
260 funcOffset = OFFSETOF_MEMBER(Thread, pGet64Static);
261 oatFlushAllRegs(cUnit);
262 loadWordDisp(cUnit, rSELF, funcOffset, rLR);
263 loadConstant(cUnit, r0, mir->dalvikInsn.vB);
264 loadCurrMethodDirect(cUnit, r1);
265 opReg(cUnit, kOpBlx, rLR);
266 RegLocation rlResult = oatGetReturnWide(cUnit);
267 storeValueWide(cUnit, rlDest, rlResult);
268 } else {
269 UNIMPLEMENTED(FATAL) << "Must update for new world";
buzbeec143c552011-08-20 17:38:58 -0700270#if 0
buzbee67bf8852011-08-17 17:51:35 -0700271 int valOffset = OFFSETOF_MEMBER(StaticField, value);
272 const Method *method = (mir->OptimizationFlags & MIR_CALLEE) ?
273 mir->meta.calleeMethod : cUnit->method;
274 void* fieldPtr = (void*)
275 (method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
276
277 if (fieldPtr == NULL) {
278 // FIXME: need to handle this case for oat();
279 UNIMPLEMENTED(FATAL);
280 }
281
282 int tReg = oatAllocTemp(cUnit);
283 rlDest = oatGetDestWide(cUnit, mir, 0, 1);
284 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
285 loadConstant(cUnit, tReg, (int) fieldPtr + valOffset);
286
287 loadPair(cUnit, tReg, rlResult.lowReg, rlResult.highReg);
288
289 storeValueWide(cUnit, rlDest, rlResult);
buzbeec143c552011-08-20 17:38:58 -0700290#endif
buzbeee1931742011-08-28 21:15:53 -0700291 }
buzbee67bf8852011-08-17 17:51:35 -0700292}
293
294static void genSget(CompilationUnit* cUnit, MIR* mir,
295 RegLocation rlResult, RegLocation rlDest)
296{
buzbeee1931742011-08-28 21:15:53 -0700297 bool slow_path = true;
298 bool isObject = ((mir->dalvikInsn.opcode == OP_SGET_OBJECT) ||
299 (mir->dalvikInsn.opcode == OP_SGET_OBJECT_VOLATILE));
300 UNIMPLEMENTED(WARNING) << "Implement sget fast path";
301 int funcOffset;
302 if (slow_path) {
303 if (isObject) {
304 funcOffset = OFFSETOF_MEMBER(Thread, pGetObjStatic);
305 } else {
306 funcOffset = OFFSETOF_MEMBER(Thread, pGet32Static);
307 }
308 oatFlushAllRegs(cUnit);
309 loadWordDisp(cUnit, rSELF, funcOffset, rLR);
310 loadConstant(cUnit, r0, mir->dalvikInsn.vB);
311 loadCurrMethodDirect(cUnit, r1);
312 opReg(cUnit, kOpBlx, rLR);
313 RegLocation rlResult = oatGetReturn(cUnit);
314 storeValue(cUnit, rlDest, rlResult);
315 } else {
316 UNIMPLEMENTED(FATAL) << "Must update for new world";
buzbeec143c552011-08-20 17:38:58 -0700317#if 0
buzbee67bf8852011-08-17 17:51:35 -0700318 int valOffset = OFFSETOF_MEMBER(StaticField, value);
319 int tReg = oatAllocTemp(cUnit);
320 bool isVolatile;
321 const Method *method = cUnit->method;
322 void* fieldPtr = (void*)
323 (method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
324
325 if (fieldPtr == NULL) {
326 // FIXME: need to handle this case for oat();
327 UNIMPLEMENTED(FATAL);
328 }
329
330 /*
331 * On SMP systems, Dalvik opcodes found to be referencing
332 * volatile fields are rewritten to their _VOLATILE variant.
333 * However, this does not happen on non-SMP systems. The compiler
334 * still needs to know about volatility to avoid unsafe
335 * optimizations so we determine volatility based on either
336 * the opcode or the field access flags.
337 */
338#if ANDROID_SMP != 0
339 Opcode opcode = mir->dalvikInsn.opcode;
340 isVolatile = (opcode == OP_SGET_VOLATILE) ||
341 (opcode == OP_SGET_OBJECT_VOLATILE);
buzbeec143c552011-08-20 17:38:58 -0700342 assert(isVolatile == artIsVolatileField((Field *) fieldPtr));
buzbee67bf8852011-08-17 17:51:35 -0700343#else
buzbeec143c552011-08-20 17:38:58 -0700344 isVolatile = artIsVolatileField((Field *) fieldPtr);
buzbee67bf8852011-08-17 17:51:35 -0700345#endif
346
347 rlDest = oatGetDest(cUnit, mir, 0);
348 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
349 loadConstant(cUnit, tReg, (int) fieldPtr + valOffset);
350
351 if (isVolatile) {
352 oatGenMemBarrier(cUnit, kSY);
353 }
354 loadWordDisp(cUnit, tReg, 0, rlResult.lowReg);
355
356 storeValue(cUnit, rlDest, rlResult);
buzbeec143c552011-08-20 17:38:58 -0700357#endif
buzbeee1931742011-08-28 21:15:53 -0700358 }
buzbee67bf8852011-08-17 17:51:35 -0700359}
360
buzbee561227c2011-09-02 15:28:19 -0700361typedef int (*NextCallInsn)(CompilationUnit*, MIR*, DecodedInstruction*, int,
362 ArmLIR*);
buzbee67bf8852011-08-17 17:51:35 -0700363
364/*
365 * Bit of a hack here - in leiu of a real scheduling pass,
366 * emit the next instruction in static & direct invoke sequences.
367 */
368static int nextSDCallInsn(CompilationUnit* cUnit, MIR* mir,
buzbee561227c2011-09-02 15:28:19 -0700369 DecodedInstruction* dInsn, int state,
370 ArmLIR* rollback)
buzbee67bf8852011-08-17 17:51:35 -0700371{
buzbee561227c2011-09-02 15:28:19 -0700372 DCHECK(rollback == NULL);
373 uint32_t idx = dInsn->vB;
buzbee67bf8852011-08-17 17:51:35 -0700374 switch(state) {
375 case 0: // Get the current Method* [sets r0]
buzbeedfd3d702011-08-28 12:56:51 -0700376 loadCurrMethodDirect(cUnit, r0);
buzbee67bf8852011-08-17 17:51:35 -0700377 break;
buzbee561227c2011-09-02 15:28:19 -0700378 case 1: // Get method->code_and_direct_methods_
379 loadWordDisp(cUnit, r0,
380 Method::GetDexCacheCodeAndDirectMethodsOffset().Int32Value(),
381 r0);
buzbee67bf8852011-08-17 17:51:35 -0700382 break;
buzbee561227c2011-09-02 15:28:19 -0700383 case 2: // Grab target method* and target code_
384 loadWordDisp(cUnit, r0,
385 art::CodeAndDirectMethods::CodeOffsetInBytes(idx), rLR);
386 loadWordDisp(cUnit, r0,
387 art::CodeAndDirectMethods::MethodOffsetInBytes(idx), r0);
buzbeec5ef0462011-08-25 18:44:49 -0700388 break;
389 default:
390 return -1;
391 }
392 return state + 1;
393}
394
buzbee67bf8852011-08-17 17:51:35 -0700395/*
396 * Bit of a hack here - in leiu of a real scheduling pass,
397 * emit the next instruction in a virtual invoke sequence.
398 * We can use rLR as a temp prior to target address loading
399 * Note also that we'll load the first argument ("this") into
400 * r1 here rather than the standard loadArgRegs.
401 */
402static int nextVCallInsn(CompilationUnit* cUnit, MIR* mir,
buzbee561227c2011-09-02 15:28:19 -0700403 DecodedInstruction* dInsn, int state,
404 ArmLIR* rollback)
buzbee67bf8852011-08-17 17:51:35 -0700405{
buzbee561227c2011-09-02 15:28:19 -0700406 DCHECK(rollback == NULL);
buzbee67bf8852011-08-17 17:51:35 -0700407 RegLocation rlArg;
buzbee561227c2011-09-02 15:28:19 -0700408 /*
409 * This is the fast path in which the target virtual method is
410 * fully resolved at compile time.
411 */
412 Method* baseMethod = cUnit->method->GetDexCacheResolvedMethods()->
413 Get(dInsn->vB);
414 CHECK(baseMethod != NULL);
415 uint32_t target_idx = baseMethod->GetMethodIndex();
buzbee67bf8852011-08-17 17:51:35 -0700416 switch(state) {
buzbee561227c2011-09-02 15:28:19 -0700417 case 0: // Get "this" [set r1]
buzbee67bf8852011-08-17 17:51:35 -0700418 rlArg = oatGetSrc(cUnit, mir, 0);
419 loadValueDirectFixed(cUnit, rlArg, r1);
420 break;
buzbee561227c2011-09-02 15:28:19 -0700421 case 1: // Is "this" null? [use r1]
422 genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir->offset, NULL);
423 // get this->klass_ [use r1, set rLR]
424 loadWordDisp(cUnit, r1, Object::ClassOffset().Int32Value(), rLR);
buzbee67bf8852011-08-17 17:51:35 -0700425 break;
buzbee561227c2011-09-02 15:28:19 -0700426 case 2: // Get this->klass_->vtable [usr rLR, set rLR]
427 loadWordDisp(cUnit, rLR, Class::VTableOffset().Int32Value(), rLR);
buzbee67bf8852011-08-17 17:51:35 -0700428 break;
buzbee561227c2011-09-02 15:28:19 -0700429 case 3: // Get target method [use rLR, set r0]
430 loadWordDisp(cUnit, rLR, (target_idx * 4) +
431 art::Array::DataOffset().Int32Value(), r0);
432 break;
433 case 4: // Get the target compiled code address [uses r0, sets rLR]
434 loadWordDisp(cUnit, r0, Method::GetCodeOffset().Int32Value(), rLR);
buzbee67bf8852011-08-17 17:51:35 -0700435 break;
436 default:
437 return -1;
438 }
439 return state + 1;
440}
441
buzbee7b1b86d2011-08-26 18:59:10 -0700442static int nextVCallInsnSP(CompilationUnit* cUnit, MIR* mir,
buzbee561227c2011-09-02 15:28:19 -0700443 DecodedInstruction* dInsn, int state,
444 ArmLIR* rollback)
buzbee7b1b86d2011-08-26 18:59:10 -0700445{
buzbee561227c2011-09-02 15:28:19 -0700446 DCHECK(rollback != NULL);
buzbee7b1b86d2011-08-26 18:59:10 -0700447 RegLocation rlArg;
buzbee561227c2011-09-02 15:28:19 -0700448 ArmLIR* skipBranch;
449 ArmLIR* skipTarget;
450 /*
451 * This handles the case in which the base method is not fully
452 * resolved at compile time. We must generate code to test
453 * for resolution a run time, bail to the slow path if not to
454 * fill in all the tables. In the latter case, we'll restart at
455 * at the beginning of the sequence.
456 */
buzbee7b1b86d2011-08-26 18:59:10 -0700457 switch(state) {
458 case 0: // Get the current Method* [sets r0]
buzbeedfd3d702011-08-28 12:56:51 -0700459 loadCurrMethodDirect(cUnit, r0);
buzbee7b1b86d2011-08-26 18:59:10 -0700460 break;
buzbee561227c2011-09-02 15:28:19 -0700461 case 1: // Get method->dex_cache_resolved_methods_
462 loadWordDisp(cUnit, r0,
463 Method::GetDexCacheResolvedMethodsOffset().Int32Value(), rLR);
buzbee7b1b86d2011-08-26 18:59:10 -0700464 break;
buzbee561227c2011-09-02 15:28:19 -0700465 case 2: // method->dex_cache_resolved_methods_->Get(method_idx)
466 loadWordDisp(cUnit, rLR, (dInsn->vB * 4) +
467 art::Array::DataOffset().Int32Value(), rLR);
buzbee7b1b86d2011-08-26 18:59:10 -0700468 break;
buzbee561227c2011-09-02 15:28:19 -0700469 case 3: // Resolved?
470 skipBranch = genCmpImmBranch(cUnit, kArmCondNe, rLR, 0);
471 // Slowest path, bail to helper, rollback and retry
472 loadWordDisp(cUnit, rSELF,
473 OFFSETOF_MEMBER(Thread, pResolveMethodFromCode), rLR);
474 loadConstant(cUnit, r1, dInsn->vB);
475 newLIR1(cUnit, kThumbBlxR, rLR);
476 genUnconditionalBranch(cUnit, rollback);
477 // Resume normal slow path
478 skipTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
479 skipTarget->defMask = ENCODE_ALL;
480 skipBranch->generic.target = (LIR*)skipTarget;
buzbee4a3164f2011-09-03 11:25:10 -0700481 // Get base_method->method_index [usr rLR, set r0]
buzbee561227c2011-09-02 15:28:19 -0700482 loadBaseDisp(cUnit, mir, rLR,
483 Method::GetMethodIndexOffset().Int32Value(), r0,
484 kUnsignedHalf, INVALID_SREG);
buzbee7b1b86d2011-08-26 18:59:10 -0700485 // Load "this" [set r1]
486 rlArg = oatGetSrc(cUnit, mir, 0);
487 loadValueDirectFixed(cUnit, rlArg, r1);
buzbee7b1b86d2011-08-26 18:59:10 -0700488 break;
489 case 4:
490 // Is "this" null? [use r1]
491 genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir->offset, NULL);
492 // get this->clazz [use r1, set rLR]
buzbee561227c2011-09-02 15:28:19 -0700493 loadWordDisp(cUnit, r1, Object::ClassOffset().Int32Value(), rLR);
buzbee7b1b86d2011-08-26 18:59:10 -0700494 break;
buzbee561227c2011-09-02 15:28:19 -0700495 case 5:
496 // get this->klass_->vtable_ [usr rLR, set rLR]
497 loadWordDisp(cUnit, rLR, Class::VTableOffset().Int32Value(), rLR);
498 DCHECK((art::Array::DataOffset().Int32Value() & 0x3) == 0);
499 // In load shadow fold vtable_ object header size into method_index_
500 opRegImm(cUnit, kOpAdd, r0,
501 art::Array::DataOffset().Int32Value() / 4);
502 // Get target Method*
503 loadBaseIndexed(cUnit, rLR, r0, r0, 2, kWord);
504 break;
505 case 6: // Get the target compiled code address [uses r0, sets rLR]
506 loadWordDisp(cUnit, r0, Method::GetCodeOffset().Int32Value(), rLR);
buzbee7b1b86d2011-08-26 18:59:10 -0700507 break;
508 default:
509 return -1;
510 }
511 return state + 1;
512}
513
buzbee67bf8852011-08-17 17:51:35 -0700514/* Load up to 3 arguments in r1..r3 */
515static int loadArgRegs(CompilationUnit* cUnit, MIR* mir,
516 DecodedInstruction* dInsn, int callState,
buzbee561227c2011-09-02 15:28:19 -0700517 int *args, NextCallInsn nextCallInsn, ArmLIR* rollback)
buzbee67bf8852011-08-17 17:51:35 -0700518{
519 for (int i = 0; i < 3; i++) {
520 if (args[i] != INVALID_REG) {
521 RegLocation rlArg = oatGetSrc(cUnit, mir, i);
buzbee1b4c8592011-08-31 10:43:51 -0700522 // Arguments are treated as a series of untyped 32-bit values.
523 rlArg.wide = false;
buzbee67bf8852011-08-17 17:51:35 -0700524 loadValueDirectFixed(cUnit, rlArg, r1 + i);
buzbee561227c2011-09-02 15:28:19 -0700525 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700526 }
527 }
528 return callState;
529}
530
buzbee4a3164f2011-09-03 11:25:10 -0700531// Interleave launch code for INVOKE_INTERFACE.
buzbee67bf8852011-08-17 17:51:35 -0700532static int nextInterfaceCallInsn(CompilationUnit* cUnit, MIR* mir,
buzbee561227c2011-09-02 15:28:19 -0700533 DecodedInstruction* dInsn, int state,
534 ArmLIR* rollback)
buzbee67bf8852011-08-17 17:51:35 -0700535{
buzbee67bf8852011-08-17 17:51:35 -0700536 switch(state) {
buzbee4a3164f2011-09-03 11:25:10 -0700537 case 0: // Load trampoline target
538 loadWordDisp(cUnit, rSELF,
539 OFFSETOF_MEMBER(Thread, pInvokeInterfaceTrampoline),
540 rLR);
541 // Load r0 with method index
542 loadConstant(cUnit, r0, dInsn->vB);
buzbee67bf8852011-08-17 17:51:35 -0700543 break;
buzbee67bf8852011-08-17 17:51:35 -0700544 default:
545 return -1;
546 }
547 return state + 1;
548}
549
buzbee67bf8852011-08-17 17:51:35 -0700550/*
551 * Interleave launch code for INVOKE_SUPER. See comments
552 * for nextVCallIns.
553 */
554static int nextSuperCallInsn(CompilationUnit* cUnit, MIR* mir,
buzbee561227c2011-09-02 15:28:19 -0700555 DecodedInstruction* dInsn, int state,
556 ArmLIR* rollback)
buzbee67bf8852011-08-17 17:51:35 -0700557{
buzbee4a3164f2011-09-03 11:25:10 -0700558 DCHECK(rollback == NULL);
buzbee67bf8852011-08-17 17:51:35 -0700559 RegLocation rlArg;
buzbee4a3164f2011-09-03 11:25:10 -0700560 /*
561 * This is the fast path in which the target virtual method is
562 * fully resolved at compile time. Note also that this path assumes
563 * that the check to verify that the target method index falls
564 * within the size of the super's vtable has been done at compile-time.
565 */
566 Method* baseMethod = cUnit->method->GetDexCacheResolvedMethods()->
567 Get(dInsn->vB);
568 CHECK(baseMethod != NULL);
569 Class* superClass = cUnit->method->GetDeclaringClass()->GetSuperClass();
570 CHECK(superClass != NULL);
571 int32_t target_idx = baseMethod->GetMethodIndex();
572 CHECK(superClass->GetVTable()->GetLength() > target_idx);
573 Method* targetMethod = superClass->GetVTable()->Get(target_idx);
574 CHECK(targetMethod != NULL);
buzbee67bf8852011-08-17 17:51:35 -0700575 switch(state) {
buzbee4a3164f2011-09-03 11:25:10 -0700576 case 0: // Get current Method* [set r0]
buzbeedfd3d702011-08-28 12:56:51 -0700577 loadCurrMethodDirect(cUnit, r0);
buzbee67bf8852011-08-17 17:51:35 -0700578 // Load "this" [set r1]
579 rlArg = oatGetSrc(cUnit, mir, 0);
580 loadValueDirectFixed(cUnit, rlArg, r1);
buzbee4a3164f2011-09-03 11:25:10 -0700581 // Get method->declaring_class_ [use r0, set rLR]
582 loadWordDisp(cUnit, r0, Method::DeclaringClassOffset().Int32Value(),
583 rLR);
buzbee67bf8852011-08-17 17:51:35 -0700584 // Is "this" null? [use r1]
585 genNullCheck(cUnit, oatSSASrc(mir,0), r1,
586 mir->offset, NULL);
buzbee4a3164f2011-09-03 11:25:10 -0700587 break;
588 case 1: // Get method->declaring_class_->super_class [usr rLR, set rLR]
589 loadWordDisp(cUnit, rLR, Class::SuperClassOffset().Int32Value(),
590 rLR);
591 break;
592 case 2: // Get ...->super_class_->vtable [u/s rLR]
593 loadWordDisp(cUnit, rLR, Class::VTableOffset().Int32Value(), rLR);
594 break;
595 case 3: // Get target method [use rLR, set r0]
596 loadWordDisp(cUnit, rLR, (target_idx * 4) +
597 art::Array::DataOffset().Int32Value(), r0);
598 break;
599 case 4: // Get the target compiled code address [uses r0, sets rLR]
600 loadWordDisp(cUnit, r0, Method::GetCodeOffset().Int32Value(), rLR);
601 break;
buzbee67bf8852011-08-17 17:51:35 -0700602 default:
603 return -1;
604 }
buzbee4a3164f2011-09-03 11:25:10 -0700605 return state + 1;
606}
607
608/* Slow-path version of nextSuperCallInsn */
609static int nextSuperCallInsnSP(CompilationUnit* cUnit, MIR* mir,
610 DecodedInstruction* dInsn, int state,
611 ArmLIR* rollback)
612{
613 DCHECK(rollback != NULL);
614 RegLocation rlArg;
615 ArmLIR* skipBranch;
616 ArmLIR* skipTarget;
617 int tReg;
618 /*
619 * This handles the case in which the base method is not fully
620 * resolved at compile time. We must generate code to test
621 * for resolution a run time, bail to the slow path if not to
622 * fill in all the tables. In the latter case, we'll restart at
623 * at the beginning of the sequence.
624 */
625 switch(state) {
626 case 0: // Get the current Method* [sets r0]
627 loadCurrMethodDirect(cUnit, r0);
628 break;
629 case 1: // Get method->dex_cache_resolved_methods_ [usr r0, set rLR]
630 loadWordDisp(cUnit, r0,
631 Method::GetDexCacheResolvedMethodsOffset().Int32Value(), rLR);
632 break;
633 case 2: // method->dex_cache_resolved_methods_->Get(meth_idx) [u/s rLR]
634 loadWordDisp(cUnit, rLR, (dInsn->vB * 4) +
635 art::Array::DataOffset().Int32Value(), rLR);
636 break;
637 case 3: // Resolved?
638 skipBranch = genCmpImmBranch(cUnit, kArmCondNe, rLR, 0);
639 // Slowest path, bail to helper, rollback and retry
640 loadWordDisp(cUnit, rSELF,
641 OFFSETOF_MEMBER(Thread, pResolveMethodFromCode), rLR);
642 loadConstant(cUnit, r1, dInsn->vB);
643 newLIR1(cUnit, kThumbBlxR, rLR);
644 genUnconditionalBranch(cUnit, rollback);
645 // Resume normal slow path
646 skipTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
647 skipTarget->defMask = ENCODE_ALL;
648 skipBranch->generic.target = (LIR*)skipTarget;
649 // Get base_method->method_index [usr rLR, set rLR]
650 loadBaseDisp(cUnit, mir, rLR,
651 Method::GetMethodIndexOffset().Int32Value(), rLR,
652 kUnsignedHalf, INVALID_SREG);
653 // Load "this" [set r1]
654 rlArg = oatGetSrc(cUnit, mir, 0);
655 loadValueDirectFixed(cUnit, rlArg, r1);
656 // Load curMethod->declaring_class_ [uses r0, sets r0]
657 loadWordDisp(cUnit, r0, Method::DeclaringClassOffset().Int32Value(),
658 r0);
659 case 4: // Get method->declaring_class_->super_class [usr r0, set r0]
660 loadWordDisp(cUnit, r0, Class::SuperClassOffset().Int32Value(), r0);
661 break;
662 case 5: // Get ...->super_class_->vtable [u/s r0]
663 loadWordDisp(cUnit, r0, Class::VTableOffset().Int32Value(), r0);
664 // In load shadow fold vtable_ object header size into method_index_
665 opRegImm(cUnit, kOpAdd, rLR,
666 art::Array::DataOffset().Int32Value() / 4);
667 if (!(mir->OptimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
668 // Range check, throw NSM on failure
669 tReg = oatAllocTemp(cUnit);
670 loadWordDisp(cUnit, r0, art::Array::LengthOffset().Int32Value(),
671 tReg);
672 genBoundsCheck(cUnit, tReg, rLR, mir->offset, NULL);
673 oatFreeTemp(cUnit, tReg);
674 }
675 // Get target Method*
676 loadBaseIndexed(cUnit, r0, r0, rLR, 2, kWord);
677 break;
678 case 6: // Get the target compiled code address [uses r0, sets rLR]
679 loadWordDisp(cUnit, r0, Method::GetCodeOffset().Int32Value(), rLR);
680 break;
681 default:
682 return -1;
683 }
buzbee67bf8852011-08-17 17:51:35 -0700684 return state + 1;
685}
686
687/*
688 * Load up to 5 arguments, the first three of which will be in
689 * r1 .. r3. On entry r0 contains the current method pointer,
690 * and as part of the load sequence, it must be replaced with
691 * the target method pointer. Note, this may also be called
692 * for "range" variants if the number of arguments is 5 or fewer.
693 */
694static int genDalvikArgsNoRange(CompilationUnit* cUnit, MIR* mir,
695 DecodedInstruction* dInsn, int callState,
696 ArmLIR** pcrLabel, bool isRange,
buzbee561227c2011-09-02 15:28:19 -0700697 NextCallInsn nextCallInsn, ArmLIR* rollback)
buzbee67bf8852011-08-17 17:51:35 -0700698{
699 RegLocation rlArg;
700 int registerArgs[3];
701
702 /* If no arguments, just return */
703 if (dInsn->vA == 0)
704 return callState;
705
buzbee2e748f32011-08-29 21:02:19 -0700706 oatLockCallTemps(cUnit);
buzbee561227c2011-09-02 15:28:19 -0700707 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700708
709 /*
710 * Load frame arguments arg4 & arg5 first. Coded a little odd to
711 * pre-schedule the method pointer target.
712 */
713 for (unsigned int i=3; i < dInsn->vA; i++) {
714 int reg;
715 int arg = (isRange) ? dInsn->vC + i : i;
716 rlArg = oatUpdateLoc(cUnit, oatGetSrc(cUnit, mir, arg));
717 if (rlArg.location == kLocPhysReg) {
718 reg = rlArg.lowReg;
719 } else {
720 reg = r1;
721 loadValueDirectFixed(cUnit, rlArg, r1);
buzbee561227c2011-09-02 15:28:19 -0700722 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700723 }
724 storeBaseDisp(cUnit, rSP, (i + 1) * 4, reg, kWord);
buzbee561227c2011-09-02 15:28:19 -0700725 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700726 }
727
728 /* Load register arguments r1..r3 */
729 for (unsigned int i = 0; i < 3; i++) {
730 if (i < dInsn->vA)
731 registerArgs[i] = (isRange) ? dInsn->vC + i : i;
732 else
733 registerArgs[i] = INVALID_REG;
734 }
735 callState = loadArgRegs(cUnit, mir, dInsn, callState, registerArgs,
buzbee561227c2011-09-02 15:28:19 -0700736 nextCallInsn, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700737
738 // Load direct & need a "this" null check?
739 if (pcrLabel) {
740 *pcrLabel = genNullCheck(cUnit, oatSSASrc(mir,0), r1,
741 mir->offset, NULL);
742 }
743 return callState;
744}
745
746/*
747 * May have 0+ arguments (also used for jumbo). Note that
748 * source virtual registers may be in physical registers, so may
749 * need to be flushed to home location before copying. This
750 * applies to arg3 and above (see below).
751 *
752 * Two general strategies:
753 * If < 20 arguments
754 * Pass args 3-18 using vldm/vstm block copy
755 * Pass arg0, arg1 & arg2 in r1-r3
756 * If 20+ arguments
757 * Pass args arg19+ using memcpy block copy
758 * Pass arg0, arg1 & arg2 in r1-r3
759 *
760 */
761static int genDalvikArgsRange(CompilationUnit* cUnit, MIR* mir,
762 DecodedInstruction* dInsn, int callState,
buzbee561227c2011-09-02 15:28:19 -0700763 ArmLIR** pcrLabel, NextCallInsn nextCallInsn,
764 ArmLIR* rollback)
buzbee67bf8852011-08-17 17:51:35 -0700765{
766 int firstArg = dInsn->vC;
767 int numArgs = dInsn->vA;
768
769 // If we can treat it as non-range (Jumbo ops will use range form)
770 if (numArgs <= 5)
771 return genDalvikArgsNoRange(cUnit, mir, dInsn, callState, pcrLabel,
buzbee561227c2011-09-02 15:28:19 -0700772 true, nextCallInsn, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700773 /*
774 * Make sure range list doesn't span the break between in normal
775 * Dalvik vRegs and the ins.
776 */
buzbee1b4c8592011-08-31 10:43:51 -0700777 int highestArg = oatGetSrc(cUnit, mir, numArgs-1).sRegLow;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700778 int boundaryReg = cUnit->method->NumRegisters() - cUnit->method->NumIns();
buzbee1b4c8592011-08-31 10:43:51 -0700779 if ((firstArg < boundaryReg) && (highestArg >= boundaryReg)) {
780 LOG(FATAL) << "Argument list spanned locals & args";
buzbee67bf8852011-08-17 17:51:35 -0700781 }
782
783 /*
784 * First load the non-register arguments. Both forms expect all
785 * of the source arguments to be in their home frame location, so
786 * scan the sReg names and flush any that have been promoted to
787 * frame backing storage.
788 */
789 // Scan the rest of the args - if in physReg flush to memory
790 for (int i = 4; i < numArgs; i++) {
buzbee1b4c8592011-08-31 10:43:51 -0700791 RegLocation loc = oatGetSrc(cUnit, mir, i);
792 //TODO: generic loc flushing routine
793 if (loc.wide) {
794 loc = oatUpdateLocWide(cUnit, loc);
795 if (loc.location == kLocPhysReg) { // TUNING: if dirty?
796 storeBaseDispWide(cUnit, rSP, loc.spOffset, loc.lowReg,
797 loc.highReg);
buzbee561227c2011-09-02 15:28:19 -0700798 callState = nextCallInsn(cUnit, mir, dInsn, callState,
799 rollback);
buzbee1b4c8592011-08-31 10:43:51 -0700800 }
801 } else {
802 loc = oatUpdateLoc(cUnit, loc);
803 if (loc.location == kLocPhysReg) { // TUNING: if dirty?
804 storeBaseDisp(cUnit, rSP, loc.spOffset, loc.lowReg, kWord);
buzbee561227c2011-09-02 15:28:19 -0700805 callState = nextCallInsn(cUnit, mir, dInsn, callState,
806 rollback);
buzbee1b4c8592011-08-31 10:43:51 -0700807 }
buzbee67bf8852011-08-17 17:51:35 -0700808 }
809 }
810
811 int startOffset = cUnit->regLocation[mir->ssaRep->uses[3]].spOffset;
812 int outsOffset = 4 /* Method* */ + (3 * 4);
813 if (numArgs >= 20) {
814 // Generate memcpy, but first make sure all of
815 opRegRegImm(cUnit, kOpAdd, r0, rSP, startOffset);
816 opRegRegImm(cUnit, kOpAdd, r1, rSP, outsOffset);
817 loadWordDisp(cUnit, rSELF, OFFSETOF_MEMBER(Thread, pMemcpy), rLR);
818 loadConstant(cUnit, r2, (numArgs - 3) * 4);
819 newLIR1(cUnit, kThumbBlxR, rLR);
820 } else {
821 // Use vldm/vstm pair using r3 as a temp
buzbeec143c552011-08-20 17:38:58 -0700822 int regsLeft = std::min(numArgs - 3, 16);
buzbee561227c2011-09-02 15:28:19 -0700823 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700824 opRegRegImm(cUnit, kOpAdd, r3, rSP, startOffset);
buzbee1b4c8592011-08-31 10:43:51 -0700825 newLIR3(cUnit, kThumb2Vldms, r3, fr0, regsLeft);
buzbee561227c2011-09-02 15:28:19 -0700826 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700827 opRegRegImm(cUnit, kOpAdd, r3, rSP, 4 /* Method* */ + (3 * 4));
buzbee561227c2011-09-02 15:28:19 -0700828 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee1b4c8592011-08-31 10:43:51 -0700829 newLIR3(cUnit, kThumb2Vstms, r3, fr0, regsLeft);
buzbee561227c2011-09-02 15:28:19 -0700830 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700831 }
832
833 // Handle the 1st 3 in r1, r2 & r3
834 for (unsigned int i = 0; i < dInsn->vA && i < 3; i++) {
835 RegLocation loc = oatGetSrc(cUnit, mir, firstArg + i);
836 loadValueDirectFixed(cUnit, loc, r1 + i);
buzbee561227c2011-09-02 15:28:19 -0700837 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700838 }
839
840 // Finally, deal with the register arguments
841 // We'll be using fixed registers here
buzbee2e748f32011-08-29 21:02:19 -0700842 oatLockCallTemps(cUnit);
buzbee561227c2011-09-02 15:28:19 -0700843 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700844 return callState;
845}
846
buzbee561227c2011-09-02 15:28:19 -0700847static void genInvokeStaticDirect(CompilationUnit* cUnit, MIR* mir,
848 bool direct, bool range)
buzbee67bf8852011-08-17 17:51:35 -0700849{
850 DecodedInstruction* dInsn = &mir->dalvikInsn;
851 int callState = 0;
852 ArmLIR* nullCk;
buzbee561227c2011-09-02 15:28:19 -0700853 ArmLIR** pNullCk = direct ? &nullCk : NULL;
buzbee7b1b86d2011-08-26 18:59:10 -0700854
buzbee561227c2011-09-02 15:28:19 -0700855 NextCallInsn nextCallInsn = nextSDCallInsn;
856
857 if (range) {
858 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, pNullCk,
859 nextCallInsn, NULL);
860 } else {
861 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, pNullCk,
862 false, nextCallInsn, NULL);
863 }
buzbee67bf8852011-08-17 17:51:35 -0700864 // Finish up any of the call sequence not interleaved in arg loading
865 while (callState >= 0) {
buzbee561227c2011-09-02 15:28:19 -0700866 callState = nextCallInsn(cUnit, mir, dInsn, callState, NULL);
buzbee67bf8852011-08-17 17:51:35 -0700867 }
868 newLIR1(cUnit, kThumbBlxR, rLR);
869}
870
buzbee4a3164f2011-09-03 11:25:10 -0700871/*
872 * All invoke-interface calls bounce off of art_invoke_interface_trampoline,
873 * which will locate the target and continue on via a tail call.
874 */
buzbee67bf8852011-08-17 17:51:35 -0700875static void genInvokeInterface(CompilationUnit* cUnit, MIR* mir)
876{
877 DecodedInstruction* dInsn = &mir->dalvikInsn;
878 int callState = 0;
879 ArmLIR* nullCk;
880 /* Note: must call nextInterfaceCallInsn() prior to 1st argument load */
buzbee561227c2011-09-02 15:28:19 -0700881 callState = nextInterfaceCallInsn(cUnit, mir, dInsn, callState, NULL);
buzbee67bf8852011-08-17 17:51:35 -0700882 if (mir->dalvikInsn.opcode == OP_INVOKE_INTERFACE)
883 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee561227c2011-09-02 15:28:19 -0700884 false, nextInterfaceCallInsn, NULL);
buzbee67bf8852011-08-17 17:51:35 -0700885 else
886 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee561227c2011-09-02 15:28:19 -0700887 nextInterfaceCallInsn, NULL);
buzbee67bf8852011-08-17 17:51:35 -0700888 // Finish up any of the call sequence not interleaved in arg loading
889 while (callState >= 0) {
buzbee561227c2011-09-02 15:28:19 -0700890 callState = nextInterfaceCallInsn(cUnit, mir, dInsn, callState, NULL);
buzbee67bf8852011-08-17 17:51:35 -0700891 }
892 newLIR1(cUnit, kThumbBlxR, rLR);
893}
894
895static void genInvokeSuper(CompilationUnit* cUnit, MIR* mir)
896{
897 DecodedInstruction* dInsn = &mir->dalvikInsn;
898 int callState = 0;
899 ArmLIR* nullCk;
buzbee4a3164f2011-09-03 11:25:10 -0700900 ArmLIR* rollback;
901 Method* baseMethod = cUnit->method->GetDexCacheResolvedMethods()->
902 Get(dInsn->vB);
903 NextCallInsn nextCallInsn;
904 bool fastPath = true;
905 if (baseMethod == NULL) {
906 fastPath = false;
907 } else {
908 Class* superClass = cUnit->method->GetDeclaringClass()->GetSuperClass();
909 if (superClass == NULL) {
910 fastPath = false;
911 } else {
912 int32_t target_idx = baseMethod->GetMethodIndex();
913 if (superClass->GetVTable()->GetLength() <= target_idx) {
914 fastPath = false;
915 } else {
916 fastPath = (superClass->GetVTable()->Get(target_idx) != NULL);
917 }
918 }
919 }
920 if (fastPath) {
921 nextCallInsn = nextSuperCallInsn;
922 rollback = NULL;
923 } else {
924 nextCallInsn = nextSuperCallInsnSP;
925 rollback = newLIR0(cUnit, kArmPseudoTargetLabel);
926 rollback->defMask = -1;
927 }
buzbee561227c2011-09-02 15:28:19 -0700928// TODO - redundantly loading arg0/r1 ("this")
buzbee67bf8852011-08-17 17:51:35 -0700929 if (mir->dalvikInsn.opcode == OP_INVOKE_SUPER)
930 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee4a3164f2011-09-03 11:25:10 -0700931 false, nextCallInsn, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700932 else
933 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee4a3164f2011-09-03 11:25:10 -0700934 nextCallInsn, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700935 // Finish up any of the call sequence not interleaved in arg loading
936 while (callState >= 0) {
buzbee561227c2011-09-02 15:28:19 -0700937 callState = nextSuperCallInsn(cUnit, mir, dInsn, callState, NULL);
buzbee67bf8852011-08-17 17:51:35 -0700938 }
939 newLIR1(cUnit, kThumbBlxR, rLR);
940}
941
942static void genInvokeVirtual(CompilationUnit* cUnit, MIR* mir)
943{
944 DecodedInstruction* dInsn = &mir->dalvikInsn;
945 int callState = 0;
946 ArmLIR* nullCk;
buzbee561227c2011-09-02 15:28:19 -0700947 ArmLIR* rollback;
948 Method* method = cUnit->method->GetDexCacheResolvedMethods()->
949 Get(dInsn->vB);
950 NextCallInsn nextCallInsn;
buzbee7b1b86d2011-08-26 18:59:10 -0700951
buzbee561227c2011-09-02 15:28:19 -0700952 if (method == NULL) {
953 // Slow path
954 nextCallInsn = nextVCallInsnSP;
955 // If we need a slow-path callout, we'll restart here
956 rollback = newLIR0(cUnit, kArmPseudoTargetLabel);
957 rollback->defMask = -1;
958 } else {
959 // Fast path
960 nextCallInsn = nextVCallInsn;
961 rollback = NULL;
962 }
buzbee7b1b86d2011-08-26 18:59:10 -0700963 // TODO - redundantly loading arg0/r1 ("this")
buzbee67bf8852011-08-17 17:51:35 -0700964 if (mir->dalvikInsn.opcode == OP_INVOKE_VIRTUAL)
965 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee561227c2011-09-02 15:28:19 -0700966 false, nextCallInsn, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700967 else
968 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee561227c2011-09-02 15:28:19 -0700969 nextCallInsn, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700970 // Finish up any of the call sequence not interleaved in arg loading
971 while (callState >= 0) {
buzbee561227c2011-09-02 15:28:19 -0700972 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700973 }
974 newLIR1(cUnit, kThumbBlxR, rLR);
975}
976
977// TODO: break out the case handlers. Might make it easier to support x86
978static bool compileDalvikInstruction(CompilationUnit* cUnit, MIR* mir,
979 BasicBlock* bb, ArmLIR* labelList)
980{
981 bool res = false; // Assume success
982 RegLocation rlSrc[3];
983 RegLocation rlDest = badLoc;
984 RegLocation rlResult = badLoc;
985 Opcode opcode = mir->dalvikInsn.opcode;
986
987 /* Prep Src and Dest locations */
988 int nextSreg = 0;
989 int nextLoc = 0;
990 int attrs = oatDataFlowAttributes[opcode];
991 rlSrc[0] = rlSrc[1] = rlSrc[2] = badLoc;
992 if (attrs & DF_UA) {
993 rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
994 nextSreg++;
995 } else if (attrs & DF_UA_WIDE) {
996 rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg,
997 nextSreg + 1);
998 nextSreg+= 2;
999 }
1000 if (attrs & DF_UB) {
1001 rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
1002 nextSreg++;
1003 } else if (attrs & DF_UB_WIDE) {
1004 rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg,
1005 nextSreg + 1);
1006 nextSreg+= 2;
1007 }
1008 if (attrs & DF_UC) {
1009 rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
1010 } else if (attrs & DF_UC_WIDE) {
1011 rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg,
1012 nextSreg + 1);
1013 }
1014 if (attrs & DF_DA) {
1015 rlDest = oatGetDest(cUnit, mir, 0);
1016 } else if (attrs & DF_DA_WIDE) {
1017 rlDest = oatGetDestWide(cUnit, mir, 0, 1);
1018 }
1019
1020 switch(opcode) {
1021 case OP_NOP:
1022 break;
1023
1024 case OP_MOVE_EXCEPTION:
1025 int exOffset;
1026 int resetReg;
buzbeec143c552011-08-20 17:38:58 -07001027 exOffset = Thread::ExceptionOffset().Int32Value();
buzbee67bf8852011-08-17 17:51:35 -07001028 resetReg = oatAllocTemp(cUnit);
1029 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1030 loadWordDisp(cUnit, rSELF, exOffset, rlResult.lowReg);
1031 loadConstant(cUnit, resetReg, 0);
1032 storeWordDisp(cUnit, rSELF, exOffset, resetReg);
1033 storeValue(cUnit, rlDest, rlResult);
1034 break;
1035
1036 case OP_RETURN_VOID:
1037 break;
1038
1039 case OP_RETURN:
1040 case OP_RETURN_OBJECT:
1041 storeValue(cUnit, retLoc, rlSrc[0]);
1042 break;
1043
1044 case OP_RETURN_WIDE:
1045 rlDest = retLocWide;
1046 rlDest.fp = rlSrc[0].fp;
1047 storeValueWide(cUnit, rlDest, rlSrc[0]);
1048 break;
1049
1050 case OP_MOVE_RESULT_WIDE:
1051 if (mir->OptimizationFlags & MIR_INLINED)
1052 break; // Nop - combined w/ previous invoke
1053 /*
1054 * Somewhat hacky here. Because we're now passing
1055 * return values in registers, we have to let the
1056 * register allocation utilities know that the return
1057 * registers are live and may not be used for address
1058 * formation in storeValueWide.
1059 */
1060 assert(retLocWide.lowReg == r0);
1061 assert(retLocWide.lowReg == r1);
1062 oatLockTemp(cUnit, retLocWide.lowReg);
1063 oatLockTemp(cUnit, retLocWide.highReg);
1064 storeValueWide(cUnit, rlDest, retLocWide);
1065 oatFreeTemp(cUnit, retLocWide.lowReg);
1066 oatFreeTemp(cUnit, retLocWide.highReg);
1067 break;
1068
1069 case OP_MOVE_RESULT:
1070 case OP_MOVE_RESULT_OBJECT:
1071 if (mir->OptimizationFlags & MIR_INLINED)
1072 break; // Nop - combined w/ previous invoke
1073 /* See comment for OP_MOVE_RESULT_WIDE */
1074 assert(retLoc.lowReg == r0);
1075 oatLockTemp(cUnit, retLoc.lowReg);
1076 storeValue(cUnit, rlDest, retLoc);
1077 oatFreeTemp(cUnit, retLoc.lowReg);
1078 break;
1079
1080 case OP_MOVE:
1081 case OP_MOVE_OBJECT:
1082 case OP_MOVE_16:
1083 case OP_MOVE_OBJECT_16:
1084 case OP_MOVE_FROM16:
1085 case OP_MOVE_OBJECT_FROM16:
1086 storeValue(cUnit, rlDest, rlSrc[0]);
1087 break;
1088
1089 case OP_MOVE_WIDE:
1090 case OP_MOVE_WIDE_16:
1091 case OP_MOVE_WIDE_FROM16:
1092 storeValueWide(cUnit, rlDest, rlSrc[0]);
1093 break;
1094
1095 case OP_CONST:
1096 case OP_CONST_4:
1097 case OP_CONST_16:
1098 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
1099 loadConstantNoClobber(cUnit, rlResult.lowReg, mir->dalvikInsn.vB);
1100 storeValue(cUnit, rlDest, rlResult);
1101 break;
1102
1103 case OP_CONST_HIGH16:
1104 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
1105 loadConstantNoClobber(cUnit, rlResult.lowReg,
1106 mir->dalvikInsn.vB << 16);
1107 storeValue(cUnit, rlDest, rlResult);
1108 break;
1109
1110 case OP_CONST_WIDE_16:
1111 case OP_CONST_WIDE_32:
1112 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1113 loadConstantNoClobber(cUnit, rlResult.lowReg, mir->dalvikInsn.vB);
1114 //TUNING: do high separately to avoid load dependency
1115 opRegRegImm(cUnit, kOpAsr, rlResult.highReg, rlResult.lowReg, 31);
1116 storeValueWide(cUnit, rlDest, rlResult);
1117 break;
1118
1119 case OP_CONST_WIDE:
1120 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
1121 loadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg,
buzbee54330722011-08-23 16:46:55 -07001122 mir->dalvikInsn.vB_wide & 0xffffffff,
1123 (mir->dalvikInsn.vB_wide >> 32) & 0xffffffff);
buzbee3ea4ec52011-08-22 17:37:19 -07001124 storeValueWide(cUnit, rlDest, rlResult);
buzbee67bf8852011-08-17 17:51:35 -07001125 break;
1126
1127 case OP_CONST_WIDE_HIGH16:
1128 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
1129 loadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg,
1130 0, mir->dalvikInsn.vB << 16);
buzbee7b1b86d2011-08-26 18:59:10 -07001131 storeValueWide(cUnit, rlDest, rlResult);
buzbee67bf8852011-08-17 17:51:35 -07001132 break;
1133
1134 case OP_MONITOR_ENTER:
1135 genMonitorEnter(cUnit, mir, rlSrc[0]);
1136 break;
1137
1138 case OP_MONITOR_EXIT:
1139 genMonitorExit(cUnit, mir, rlSrc[0]);
1140 break;
1141
1142 case OP_CHECK_CAST:
1143 genCheckCast(cUnit, mir, rlSrc[0]);
1144 break;
1145
1146 case OP_INSTANCE_OF:
1147 genInstanceof(cUnit, mir, rlDest, rlSrc[0]);
1148 break;
1149
1150 case OP_NEW_INSTANCE:
1151 genNewInstance(cUnit, mir, rlDest);
1152 break;
1153
1154 case OP_THROW:
1155 genThrow(cUnit, mir, rlSrc[0]);
1156 break;
1157
1158 case OP_ARRAY_LENGTH:
1159 int lenOffset;
buzbeec143c552011-08-20 17:38:58 -07001160 lenOffset = Array::LengthOffset().Int32Value();
buzbee7b1b86d2011-08-26 18:59:10 -07001161 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
buzbee67bf8852011-08-17 17:51:35 -07001162 genNullCheck(cUnit, rlSrc[0].sRegLow, rlSrc[0].lowReg,
1163 mir->offset, NULL);
1164 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1165 loadWordDisp(cUnit, rlSrc[0].lowReg, lenOffset,
1166 rlResult.lowReg);
1167 storeValue(cUnit, rlDest, rlResult);
1168 break;
1169
1170 case OP_CONST_STRING:
1171 case OP_CONST_STRING_JUMBO:
1172 genConstString(cUnit, mir, rlDest, rlSrc[0]);
1173 break;
1174
1175 case OP_CONST_CLASS:
1176 genConstClass(cUnit, mir, rlDest, rlSrc[0]);
1177 break;
1178
1179 case OP_FILL_ARRAY_DATA:
1180 genFillArrayData(cUnit, mir, rlSrc[0]);
1181 break;
1182
1183 case OP_FILLED_NEW_ARRAY:
1184 genFilledNewArray(cUnit, mir, false /* not range */);
1185 break;
1186
1187 case OP_FILLED_NEW_ARRAY_RANGE:
1188 genFilledNewArray(cUnit, mir, true /* range */);
1189 break;
1190
1191 case OP_NEW_ARRAY:
1192 genNewArray(cUnit, mir, rlDest, rlSrc[0]);
1193 break;
1194
1195 case OP_GOTO:
1196 case OP_GOTO_16:
1197 case OP_GOTO_32:
1198 // TUNING: add MIR flag to disable when unnecessary
1199 bool backwardBranch;
1200 backwardBranch = (bb->taken->startOffset <= mir->offset);
1201 if (backwardBranch) {
1202 genSuspendPoll(cUnit, mir);
1203 }
1204 genUnconditionalBranch(cUnit, &labelList[bb->taken->id]);
1205 break;
1206
1207 case OP_PACKED_SWITCH:
1208 genPackedSwitch(cUnit, mir, rlSrc[0]);
1209 break;
1210
1211 case OP_SPARSE_SWITCH:
1212 genSparseSwitch(cUnit, mir, rlSrc[0]);
1213 break;
1214
1215 case OP_CMPL_FLOAT:
1216 case OP_CMPG_FLOAT:
1217 case OP_CMPL_DOUBLE:
1218 case OP_CMPG_DOUBLE:
1219 res = genCmpFP(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1220 break;
1221
1222 case OP_CMP_LONG:
1223 genCmpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1224 break;
1225
1226 case OP_IF_EQ:
1227 case OP_IF_NE:
1228 case OP_IF_LT:
1229 case OP_IF_GE:
1230 case OP_IF_GT:
1231 case OP_IF_LE: {
1232 bool backwardBranch;
1233 ArmConditionCode cond;
1234 backwardBranch = (bb->taken->startOffset <= mir->offset);
1235 if (backwardBranch) {
1236 genSuspendPoll(cUnit, mir);
1237 }
1238 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1239 rlSrc[1] = loadValue(cUnit, rlSrc[1], kCoreReg);
1240 opRegReg(cUnit, kOpCmp, rlSrc[0].lowReg, rlSrc[1].lowReg);
1241 switch(opcode) {
1242 case OP_IF_EQ:
1243 cond = kArmCondEq;
1244 break;
1245 case OP_IF_NE:
1246 cond = kArmCondNe;
1247 break;
1248 case OP_IF_LT:
1249 cond = kArmCondLt;
1250 break;
1251 case OP_IF_GE:
1252 cond = kArmCondGe;
1253 break;
1254 case OP_IF_GT:
1255 cond = kArmCondGt;
1256 break;
1257 case OP_IF_LE:
1258 cond = kArmCondLe;
1259 break;
1260 default:
1261 cond = (ArmConditionCode)0;
1262 LOG(FATAL) << "Unexpected opcode " << (int)opcode;
1263 }
1264 genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]);
1265 genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
1266 break;
1267 }
1268
1269 case OP_IF_EQZ:
1270 case OP_IF_NEZ:
1271 case OP_IF_LTZ:
1272 case OP_IF_GEZ:
1273 case OP_IF_GTZ:
1274 case OP_IF_LEZ: {
1275 bool backwardBranch;
1276 ArmConditionCode cond;
1277 backwardBranch = (bb->taken->startOffset <= mir->offset);
1278 if (backwardBranch) {
1279 genSuspendPoll(cUnit, mir);
1280 }
1281 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1282 opRegImm(cUnit, kOpCmp, rlSrc[0].lowReg, 0);
1283 switch(opcode) {
1284 case OP_IF_EQZ:
1285 cond = kArmCondEq;
1286 break;
1287 case OP_IF_NEZ:
1288 cond = kArmCondNe;
1289 break;
1290 case OP_IF_LTZ:
1291 cond = kArmCondLt;
1292 break;
1293 case OP_IF_GEZ:
1294 cond = kArmCondGe;
1295 break;
1296 case OP_IF_GTZ:
1297 cond = kArmCondGt;
1298 break;
1299 case OP_IF_LEZ:
1300 cond = kArmCondLe;
1301 break;
1302 default:
1303 cond = (ArmConditionCode)0;
1304 LOG(FATAL) << "Unexpected opcode " << (int)opcode;
1305 }
1306 genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]);
1307 genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
1308 break;
1309 }
1310
1311 case OP_AGET_WIDE:
1312 genArrayGet(cUnit, mir, kLong, rlSrc[0], rlSrc[1], rlDest, 3);
1313 break;
1314 case OP_AGET:
1315 case OP_AGET_OBJECT:
1316 genArrayGet(cUnit, mir, kWord, rlSrc[0], rlSrc[1], rlDest, 2);
1317 break;
1318 case OP_AGET_BOOLEAN:
1319 genArrayGet(cUnit, mir, kUnsignedByte, rlSrc[0], rlSrc[1],
1320 rlDest, 0);
1321 break;
1322 case OP_AGET_BYTE:
1323 genArrayGet(cUnit, mir, kSignedByte, rlSrc[0], rlSrc[1], rlDest, 0);
1324 break;
1325 case OP_AGET_CHAR:
1326 genArrayGet(cUnit, mir, kUnsignedHalf, rlSrc[0], rlSrc[1],
1327 rlDest, 1);
1328 break;
1329 case OP_AGET_SHORT:
1330 genArrayGet(cUnit, mir, kSignedHalf, rlSrc[0], rlSrc[1], rlDest, 1);
1331 break;
1332 case OP_APUT_WIDE:
1333 genArrayPut(cUnit, mir, kLong, rlSrc[1], rlSrc[2], rlSrc[0], 3);
1334 break;
1335 case OP_APUT:
1336 genArrayPut(cUnit, mir, kWord, rlSrc[1], rlSrc[2], rlSrc[0], 2);
1337 break;
1338 case OP_APUT_OBJECT:
buzbee1b4c8592011-08-31 10:43:51 -07001339 genArrayObjPut(cUnit, mir, rlSrc[1], rlSrc[2], rlSrc[0], 2);
buzbee67bf8852011-08-17 17:51:35 -07001340 break;
1341 case OP_APUT_SHORT:
1342 case OP_APUT_CHAR:
1343 genArrayPut(cUnit, mir, kUnsignedHalf, rlSrc[1], rlSrc[2],
1344 rlSrc[0], 1);
1345 break;
1346 case OP_APUT_BYTE:
1347 case OP_APUT_BOOLEAN:
1348 genArrayPut(cUnit, mir, kUnsignedByte, rlSrc[1], rlSrc[2],
1349 rlSrc[0], 0);
1350 break;
1351
1352 case OP_IGET_WIDE:
1353 case OP_IGET_WIDE_VOLATILE:
1354 genIGetWideX(cUnit, mir, rlDest, rlSrc[0]);
1355 break;
1356
1357 case OP_IGET:
1358 case OP_IGET_VOLATILE:
1359 case OP_IGET_OBJECT:
1360 case OP_IGET_OBJECT_VOLATILE:
1361 genIGetX(cUnit, mir, kWord, rlDest, rlSrc[0]);
1362 break;
1363
1364 case OP_IGET_BOOLEAN:
1365 case OP_IGET_BYTE:
1366 genIGetX(cUnit, mir, kUnsignedByte, rlDest, rlSrc[0]);
1367 break;
1368
1369 case OP_IGET_CHAR:
1370 genIGetX(cUnit, mir, kUnsignedHalf, rlDest, rlSrc[0]);
1371 break;
1372
1373 case OP_IGET_SHORT:
1374 genIGetX(cUnit, mir, kSignedHalf, rlDest, rlSrc[0]);
1375 break;
1376
1377 case OP_IPUT_WIDE:
1378 case OP_IPUT_WIDE_VOLATILE:
1379 genIPutWideX(cUnit, mir, rlSrc[0], rlSrc[1]);
1380 break;
1381
1382 case OP_IPUT_OBJECT:
1383 case OP_IPUT_OBJECT_VOLATILE:
1384 genIPutX(cUnit, mir, kWord, rlSrc[0], rlSrc[1], true);
1385 break;
1386
1387 case OP_IPUT:
1388 case OP_IPUT_VOLATILE:
1389 genIPutX(cUnit, mir, kWord, rlSrc[0], rlSrc[1], false);
1390 break;
1391
1392 case OP_IPUT_BOOLEAN:
1393 case OP_IPUT_BYTE:
1394 genIPutX(cUnit, mir, kUnsignedByte, rlSrc[0], rlSrc[1], false);
1395 break;
1396
1397 case OP_IPUT_CHAR:
1398 genIPutX(cUnit, mir, kUnsignedHalf, rlSrc[0], rlSrc[1], false);
1399 break;
1400
1401 case OP_IPUT_SHORT:
1402 genIPutX(cUnit, mir, kSignedHalf, rlSrc[0], rlSrc[1], false);
1403 break;
1404
1405 case OP_SGET:
1406 case OP_SGET_OBJECT:
1407 case OP_SGET_BOOLEAN:
1408 case OP_SGET_BYTE:
1409 case OP_SGET_CHAR:
1410 case OP_SGET_SHORT:
1411 genSget(cUnit, mir, rlResult, rlDest);
1412 break;
1413
1414 case OP_SGET_WIDE:
1415 genSgetWide(cUnit, mir, rlResult, rlDest);
1416 break;
1417
1418 case OP_SPUT:
1419 case OP_SPUT_OBJECT:
1420 case OP_SPUT_BOOLEAN:
1421 case OP_SPUT_BYTE:
1422 case OP_SPUT_CHAR:
1423 case OP_SPUT_SHORT:
1424 genSput(cUnit, mir, rlSrc[0]);
1425 break;
1426
1427 case OP_SPUT_WIDE:
1428 genSputWide(cUnit, mir, rlSrc[0]);
1429 break;
1430
1431 case OP_INVOKE_STATIC_RANGE:
buzbee561227c2011-09-02 15:28:19 -07001432 genInvokeStaticDirect(cUnit, mir, false /*direct*/,
1433 true /*range*/);
1434 break;
buzbee67bf8852011-08-17 17:51:35 -07001435 case OP_INVOKE_STATIC:
buzbee561227c2011-09-02 15:28:19 -07001436 genInvokeStaticDirect(cUnit, mir, false /*direct*/,
1437 false /*range*/);
buzbee67bf8852011-08-17 17:51:35 -07001438 break;
1439
1440 case OP_INVOKE_DIRECT:
buzbee561227c2011-09-02 15:28:19 -07001441 genInvokeStaticDirect(cUnit, mir, true /*direct*/,
1442 false /*range*/);
1443 break;
buzbee67bf8852011-08-17 17:51:35 -07001444 case OP_INVOKE_DIRECT_RANGE:
buzbee561227c2011-09-02 15:28:19 -07001445 genInvokeStaticDirect(cUnit, mir, true /*direct*/,
1446 true /*range*/);
buzbee67bf8852011-08-17 17:51:35 -07001447 break;
1448
1449 case OP_INVOKE_VIRTUAL:
1450 case OP_INVOKE_VIRTUAL_RANGE:
1451 genInvokeVirtual(cUnit, mir);
1452 break;
1453
1454 case OP_INVOKE_SUPER:
1455 case OP_INVOKE_SUPER_RANGE:
1456 genInvokeSuper(cUnit, mir);
1457 break;
1458
1459 case OP_INVOKE_INTERFACE:
1460 case OP_INVOKE_INTERFACE_RANGE:
1461 genInvokeInterface(cUnit, mir);
1462 break;
1463
1464 case OP_NEG_INT:
1465 case OP_NOT_INT:
1466 res = genArithOpInt(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1467 break;
1468
1469 case OP_NEG_LONG:
1470 case OP_NOT_LONG:
1471 res = genArithOpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1472 break;
1473
1474 case OP_NEG_FLOAT:
1475 res = genArithOpFloat(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1476 break;
1477
1478 case OP_NEG_DOUBLE:
1479 res = genArithOpDouble(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1480 break;
1481
1482 case OP_INT_TO_LONG:
1483 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1484 if (rlSrc[0].location == kLocPhysReg) {
1485 genRegCopy(cUnit, rlResult.lowReg, rlSrc[0].lowReg);
1486 } else {
1487 loadValueDirect(cUnit, rlSrc[0], rlResult.lowReg);
1488 }
1489 opRegRegImm(cUnit, kOpAsr, rlResult.highReg,
1490 rlResult.lowReg, 31);
1491 storeValueWide(cUnit, rlDest, rlResult);
1492 break;
1493
1494 case OP_LONG_TO_INT:
1495 rlSrc[0] = oatUpdateLocWide(cUnit, rlSrc[0]);
1496 rlSrc[0] = oatWideToNarrow(cUnit, rlSrc[0]);
1497 storeValue(cUnit, rlDest, rlSrc[0]);
1498 break;
1499
1500 case OP_INT_TO_BYTE:
1501 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1502 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1503 opRegReg(cUnit, kOp2Byte, rlResult.lowReg, rlSrc[0].lowReg);
1504 storeValue(cUnit, rlDest, rlResult);
1505 break;
1506
1507 case OP_INT_TO_SHORT:
1508 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1509 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1510 opRegReg(cUnit, kOp2Short, rlResult.lowReg, rlSrc[0].lowReg);
1511 storeValue(cUnit, rlDest, rlResult);
1512 break;
1513
1514 case OP_INT_TO_CHAR:
1515 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1516 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1517 opRegReg(cUnit, kOp2Char, rlResult.lowReg, rlSrc[0].lowReg);
1518 storeValue(cUnit, rlDest, rlResult);
1519 break;
1520
1521 case OP_INT_TO_FLOAT:
1522 case OP_INT_TO_DOUBLE:
1523 case OP_LONG_TO_FLOAT:
1524 case OP_LONG_TO_DOUBLE:
1525 case OP_FLOAT_TO_INT:
1526 case OP_FLOAT_TO_LONG:
1527 case OP_FLOAT_TO_DOUBLE:
1528 case OP_DOUBLE_TO_INT:
1529 case OP_DOUBLE_TO_LONG:
1530 case OP_DOUBLE_TO_FLOAT:
1531 genConversion(cUnit, mir);
1532 break;
1533
1534 case OP_ADD_INT:
1535 case OP_SUB_INT:
1536 case OP_MUL_INT:
1537 case OP_DIV_INT:
1538 case OP_REM_INT:
1539 case OP_AND_INT:
1540 case OP_OR_INT:
1541 case OP_XOR_INT:
1542 case OP_SHL_INT:
1543 case OP_SHR_INT:
1544 case OP_USHR_INT:
1545 case OP_ADD_INT_2ADDR:
1546 case OP_SUB_INT_2ADDR:
1547 case OP_MUL_INT_2ADDR:
1548 case OP_DIV_INT_2ADDR:
1549 case OP_REM_INT_2ADDR:
1550 case OP_AND_INT_2ADDR:
1551 case OP_OR_INT_2ADDR:
1552 case OP_XOR_INT_2ADDR:
1553 case OP_SHL_INT_2ADDR:
1554 case OP_SHR_INT_2ADDR:
1555 case OP_USHR_INT_2ADDR:
1556 genArithOpInt(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1557 break;
1558
1559 case OP_ADD_LONG:
1560 case OP_SUB_LONG:
1561 case OP_MUL_LONG:
1562 case OP_DIV_LONG:
1563 case OP_REM_LONG:
1564 case OP_AND_LONG:
1565 case OP_OR_LONG:
1566 case OP_XOR_LONG:
1567 case OP_ADD_LONG_2ADDR:
1568 case OP_SUB_LONG_2ADDR:
1569 case OP_MUL_LONG_2ADDR:
1570 case OP_DIV_LONG_2ADDR:
1571 case OP_REM_LONG_2ADDR:
1572 case OP_AND_LONG_2ADDR:
1573 case OP_OR_LONG_2ADDR:
1574 case OP_XOR_LONG_2ADDR:
1575 genArithOpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1576 break;
1577
buzbee67bf8852011-08-17 17:51:35 -07001578 case OP_SHL_LONG:
1579 case OP_SHR_LONG:
1580 case OP_USHR_LONG:
buzbeee6d61962011-08-27 11:58:19 -07001581 case OP_SHL_LONG_2ADDR:
1582 case OP_SHR_LONG_2ADDR:
1583 case OP_USHR_LONG_2ADDR:
buzbee67bf8852011-08-17 17:51:35 -07001584 genShiftOpLong(cUnit,mir, rlDest, rlSrc[0], rlSrc[1]);
1585 break;
1586
1587 case OP_ADD_FLOAT:
1588 case OP_SUB_FLOAT:
1589 case OP_MUL_FLOAT:
1590 case OP_DIV_FLOAT:
1591 case OP_REM_FLOAT:
1592 case OP_ADD_FLOAT_2ADDR:
1593 case OP_SUB_FLOAT_2ADDR:
1594 case OP_MUL_FLOAT_2ADDR:
1595 case OP_DIV_FLOAT_2ADDR:
1596 case OP_REM_FLOAT_2ADDR:
1597 genArithOpFloat(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1598 break;
1599
1600 case OP_ADD_DOUBLE:
1601 case OP_SUB_DOUBLE:
1602 case OP_MUL_DOUBLE:
1603 case OP_DIV_DOUBLE:
1604 case OP_REM_DOUBLE:
1605 case OP_ADD_DOUBLE_2ADDR:
1606 case OP_SUB_DOUBLE_2ADDR:
1607 case OP_MUL_DOUBLE_2ADDR:
1608 case OP_DIV_DOUBLE_2ADDR:
1609 case OP_REM_DOUBLE_2ADDR:
1610 genArithOpDouble(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1611 break;
1612
1613 case OP_RSUB_INT:
1614 case OP_ADD_INT_LIT16:
1615 case OP_MUL_INT_LIT16:
1616 case OP_DIV_INT_LIT16:
1617 case OP_REM_INT_LIT16:
1618 case OP_AND_INT_LIT16:
1619 case OP_OR_INT_LIT16:
1620 case OP_XOR_INT_LIT16:
1621 case OP_ADD_INT_LIT8:
1622 case OP_RSUB_INT_LIT8:
1623 case OP_MUL_INT_LIT8:
1624 case OP_DIV_INT_LIT8:
1625 case OP_REM_INT_LIT8:
1626 case OP_AND_INT_LIT8:
1627 case OP_OR_INT_LIT8:
1628 case OP_XOR_INT_LIT8:
1629 case OP_SHL_INT_LIT8:
1630 case OP_SHR_INT_LIT8:
1631 case OP_USHR_INT_LIT8:
1632 genArithOpIntLit(cUnit, mir, rlDest, rlSrc[0], mir->dalvikInsn.vC);
1633 break;
1634
1635 default:
1636 res = true;
1637 }
1638 return res;
1639}
1640
1641static const char *extendedMIROpNames[kMirOpLast - kMirOpFirst] = {
1642 "kMirOpPhi",
1643 "kMirOpNullNRangeUpCheck",
1644 "kMirOpNullNRangeDownCheck",
1645 "kMirOpLowerBound",
1646 "kMirOpPunt",
1647 "kMirOpCheckInlinePrediction",
1648};
1649
1650/* Extended MIR instructions like PHI */
1651static void handleExtendedMethodMIR(CompilationUnit* cUnit, MIR* mir)
1652{
1653 int opOffset = mir->dalvikInsn.opcode - kMirOpFirst;
1654 char* msg = (char*)oatNew(strlen(extendedMIROpNames[opOffset]) + 1, false);
1655 strcpy(msg, extendedMIROpNames[opOffset]);
1656 ArmLIR* op = newLIR1(cUnit, kArmPseudoExtended, (int) msg);
1657
1658 switch ((ExtendedMIROpcode)mir->dalvikInsn.opcode) {
1659 case kMirOpPhi: {
1660 char* ssaString = oatGetSSAString(cUnit, mir->ssaRep);
1661 op->flags.isNop = true;
1662 newLIR1(cUnit, kArmPseudoSSARep, (int) ssaString);
1663 break;
1664 }
1665 default:
1666 break;
1667 }
1668}
1669
1670/* If there are any ins passed in registers that have not been promoted
1671 * to a callee-save register, flush them to the frame.
buzbeedfd3d702011-08-28 12:56:51 -07001672 * Note: at this pointCopy any ins that are passed in register to their
1673 * home location */
buzbee67bf8852011-08-17 17:51:35 -07001674static void flushIns(CompilationUnit* cUnit)
1675{
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001676 if (cUnit->method->NumIns() == 0)
buzbee67bf8852011-08-17 17:51:35 -07001677 return;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001678 int inRegs = (cUnit->method->NumIns() > 2) ? 3
1679 : cUnit->method->NumIns();
buzbee67bf8852011-08-17 17:51:35 -07001680 int startReg = r1;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001681 int startLoc = cUnit->method->NumRegisters() -
1682 cUnit->method->NumIns();
buzbee67bf8852011-08-17 17:51:35 -07001683 for (int i = 0; i < inRegs; i++) {
1684 RegLocation loc = cUnit->regLocation[startLoc + i];
buzbeedfd3d702011-08-28 12:56:51 -07001685 //TUNING: be smarter about flushing ins to frame
1686 storeBaseDisp(cUnit, rSP, loc.spOffset, startReg + i, kWord);
buzbee67bf8852011-08-17 17:51:35 -07001687 if (loc.location == kLocPhysReg) {
1688 genRegCopy(cUnit, loc.lowReg, startReg + i);
buzbee67bf8852011-08-17 17:51:35 -07001689 }
1690 }
1691
1692 // Handle special case of wide argument half in regs, half in frame
1693 if (inRegs == 3) {
1694 RegLocation loc = cUnit->regLocation[startLoc + 2];
1695 if (loc.wide && loc.location == kLocPhysReg) {
1696 // Load the other half of the arg into the promoted pair
buzbee561227c2011-09-02 15:28:19 -07001697 loadWordDisp(cUnit, rSP, loc.spOffset + 4, loc.highReg);
buzbee67bf8852011-08-17 17:51:35 -07001698 inRegs++;
1699 }
1700 }
1701
1702 // Now, do initial assignment of all promoted arguments passed in frame
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001703 for (int i = inRegs; i < cUnit->method->NumIns();) {
buzbee67bf8852011-08-17 17:51:35 -07001704 RegLocation loc = cUnit->regLocation[startLoc + i];
1705 if (loc.fpLocation == kLocPhysReg) {
1706 loc.location = kLocPhysReg;
1707 loc.fp = true;
1708 loc.lowReg = loc.fpLowReg;
1709 loc.highReg = loc.fpHighReg;
1710 }
1711 if (loc.location == kLocPhysReg) {
1712 if (loc.wide) {
1713 loadBaseDispWide(cUnit, NULL, rSP, loc.spOffset,
1714 loc.lowReg, loc.highReg, INVALID_SREG);
1715 i++;
1716 } else {
buzbee561227c2011-09-02 15:28:19 -07001717 loadWordDisp(cUnit, rSP, loc.spOffset, loc.lowReg);
buzbee67bf8852011-08-17 17:51:35 -07001718 }
1719 }
1720 i++;
1721 }
1722}
1723
1724/* Handle the content in each basic block */
1725static bool methodBlockCodeGen(CompilationUnit* cUnit, BasicBlock* bb)
1726{
1727 MIR* mir;
1728 ArmLIR* labelList = (ArmLIR*) cUnit->blockLabelList;
1729 int blockId = bb->id;
1730
1731 cUnit->curBlock = bb;
1732 labelList[blockId].operands[0] = bb->startOffset;
1733
1734 /* Insert the block label */
1735 labelList[blockId].opcode = kArmPseudoNormalBlockLabel;
1736 oatAppendLIR(cUnit, (LIR*) &labelList[blockId]);
1737
1738 oatClobberAllRegs(cUnit);
1739 oatResetNullCheck(cUnit);
1740
1741 ArmLIR* headLIR = NULL;
1742
1743 if (bb->blockType == kEntryBlock) {
1744 /*
1745 * On entry, r0, r1, r2 & r3 are live. Let the register allocation
1746 * mechanism know so it doesn't try to use any of them when
1747 * expanding the frame or flushing. This leaves the utility
1748 * code with a single temp: r12. This should be enough.
1749 */
1750 oatLockTemp(cUnit, r0);
1751 oatLockTemp(cUnit, r1);
1752 oatLockTemp(cUnit, r2);
1753 oatLockTemp(cUnit, r3);
1754 newLIR0(cUnit, kArmPseudoMethodEntry);
1755 /* Spill core callee saves */
1756 newLIR1(cUnit, kThumb2Push, cUnit->coreSpillMask);
1757 /* Need to spill any FP regs? */
1758 if (cUnit->numFPSpills) {
1759 newLIR1(cUnit, kThumb2VPushCS, cUnit->numFPSpills);
1760 }
1761 opRegImm(cUnit, kOpSub, rSP, cUnit->frameSize - (cUnit->numSpills * 4));
1762 storeBaseDisp(cUnit, rSP, 0, r0, kWord);
1763 flushIns(cUnit);
1764 oatFreeTemp(cUnit, r0);
1765 oatFreeTemp(cUnit, r1);
1766 oatFreeTemp(cUnit, r2);
1767 oatFreeTemp(cUnit, r3);
1768 } else if (bb->blockType == kExitBlock) {
1769 newLIR0(cUnit, kArmPseudoMethodExit);
1770 opRegImm(cUnit, kOpAdd, rSP, cUnit->frameSize - (cUnit->numSpills * 4));
1771 /* Need to restore any FP callee saves? */
1772 if (cUnit->numFPSpills) {
1773 newLIR1(cUnit, kThumb2VPopCS, cUnit->numFPSpills);
1774 }
1775 if (cUnit->coreSpillMask & (1 << rLR)) {
1776 /* Unspill rLR to rPC */
1777 cUnit->coreSpillMask &= ~(1 << rLR);
1778 cUnit->coreSpillMask |= (1 << rPC);
1779 }
1780 newLIR1(cUnit, kThumb2Pop, cUnit->coreSpillMask);
1781 if (!(cUnit->coreSpillMask & (1 << rPC))) {
1782 /* We didn't pop to rPC, so must do a bv rLR */
1783 newLIR1(cUnit, kThumbBx, rLR);
1784 }
1785 }
1786
1787 for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
1788
1789 oatResetRegPool(cUnit);
1790 if (cUnit->disableOpt & (1 << kTrackLiveTemps)) {
1791 oatClobberAllRegs(cUnit);
1792 }
1793
1794 if (cUnit->disableOpt & (1 << kSuppressLoads)) {
1795 oatResetDefTracking(cUnit);
1796 }
1797
1798 if ((int)mir->dalvikInsn.opcode >= (int)kMirOpFirst) {
1799 handleExtendedMethodMIR(cUnit, mir);
1800 continue;
1801 }
1802
1803 cUnit->currentDalvikOffset = mir->offset;
1804
1805 Opcode dalvikOpcode = mir->dalvikInsn.opcode;
1806 InstructionFormat dalvikFormat =
1807 dexGetFormatFromOpcode(dalvikOpcode);
1808
1809 ArmLIR* boundaryLIR;
1810
1811 /* Mark the beginning of a Dalvik instruction for line tracking */
1812 boundaryLIR = newLIR1(cUnit, kArmPseudoDalvikByteCodeBoundary,
1813 (int) oatGetDalvikDisassembly(
1814 &mir->dalvikInsn, ""));
1815 /* Remember the first LIR for this block */
1816 if (headLIR == NULL) {
1817 headLIR = boundaryLIR;
1818 /* Set the first boundaryLIR as a scheduling barrier */
1819 headLIR->defMask = ENCODE_ALL;
1820 }
1821
1822 /* Don't generate the SSA annotation unless verbose mode is on */
1823 if (cUnit->printMe && mir->ssaRep) {
1824 char *ssaString = oatGetSSAString(cUnit, mir->ssaRep);
1825 newLIR1(cUnit, kArmPseudoSSARep, (int) ssaString);
1826 }
1827
1828 bool notHandled = compileDalvikInstruction(cUnit, mir, bb, labelList);
1829
1830 if (notHandled) {
1831 char buf[100];
1832 snprintf(buf, 100, "%#06x: Opcode %#x (%s) / Fmt %d not handled",
1833 mir->offset,
1834 dalvikOpcode, dexGetOpcodeName(dalvikOpcode),
1835 dalvikFormat);
1836 LOG(FATAL) << buf;
1837 }
1838 }
1839
1840 if (headLIR) {
1841 /*
1842 * Eliminate redundant loads/stores and delay stores into later
1843 * slots
1844 */
1845 oatApplyLocalOptimizations(cUnit, (LIR*) headLIR,
1846 cUnit->lastLIRInsn);
1847
1848 /*
1849 * Generate an unconditional branch to the fallthrough block.
1850 */
1851 if (bb->fallThrough) {
1852 genUnconditionalBranch(cUnit,
1853 &labelList[bb->fallThrough->id]);
1854 }
1855 }
1856 return false;
1857}
1858
1859/*
1860 * Nop any unconditional branches that go to the next instruction.
1861 * Note: new redundant branches may be inserted later, and we'll
1862 * use a check in final instruction assembly to nop those out.
1863 */
1864void removeRedundantBranches(CompilationUnit* cUnit)
1865{
1866 ArmLIR* thisLIR;
1867
1868 for (thisLIR = (ArmLIR*) cUnit->firstLIRInsn;
1869 thisLIR != (ArmLIR*) cUnit->lastLIRInsn;
1870 thisLIR = NEXT_LIR(thisLIR)) {
1871
1872 /* Branch to the next instruction */
1873 if ((thisLIR->opcode == kThumbBUncond) ||
1874 (thisLIR->opcode == kThumb2BUncond)) {
1875 ArmLIR* nextLIR = thisLIR;
1876
1877 while (true) {
1878 nextLIR = NEXT_LIR(nextLIR);
1879
1880 /*
1881 * Is the branch target the next instruction?
1882 */
1883 if (nextLIR == (ArmLIR*) thisLIR->generic.target) {
1884 thisLIR->flags.isNop = true;
1885 break;
1886 }
1887
1888 /*
1889 * Found real useful stuff between the branch and the target.
1890 * Need to explicitly check the lastLIRInsn here because it
1891 * might be the last real instruction.
1892 */
1893 if (!isPseudoOpcode(nextLIR->opcode) ||
1894 (nextLIR = (ArmLIR*) cUnit->lastLIRInsn))
1895 break;
1896 }
1897 }
1898 }
1899}
1900
1901void oatMethodMIR2LIR(CompilationUnit* cUnit)
1902{
1903 /* Used to hold the labels of each block */
1904 cUnit->blockLabelList =
1905 (void *) oatNew(sizeof(ArmLIR) * cUnit->numBlocks, true);
1906
1907 oatDataFlowAnalysisDispatcher(cUnit, methodBlockCodeGen,
1908 kPreOrderDFSTraversal, false /* Iterative */);
1909 removeRedundantBranches(cUnit);
1910}
1911
1912/* Common initialization routine for an architecture family */
1913bool oatArchInit()
1914{
1915 int i;
1916
1917 for (i = 0; i < kArmLast; i++) {
1918 if (EncodingMap[i].opcode != i) {
1919 LOG(FATAL) << "Encoding order for " << EncodingMap[i].name <<
1920 " is wrong: expecting " << i << ", seeing " <<
1921 (int)EncodingMap[i].opcode;
1922 }
1923 }
1924
1925 return oatArchVariantInit();
1926}
1927
1928/* Needed by the Assembler */
1929void oatSetupResourceMasks(ArmLIR* lir)
1930{
1931 setupResourceMasks(lir);
1932}
1933
1934/* Needed by the ld/st optmizatons */
1935ArmLIR* oatRegCopyNoInsert(CompilationUnit* cUnit, int rDest, int rSrc)
1936{
1937 return genRegCopyNoInsert(cUnit, rDest, rSrc);
1938}
1939
1940/* Needed by the register allocator */
1941ArmLIR* oatRegCopy(CompilationUnit* cUnit, int rDest, int rSrc)
1942{
1943 return genRegCopy(cUnit, rDest, rSrc);
1944}
1945
1946/* Needed by the register allocator */
1947void oatRegCopyWide(CompilationUnit* cUnit, int destLo, int destHi,
1948 int srcLo, int srcHi)
1949{
1950 genRegCopyWide(cUnit, destLo, destHi, srcLo, srcHi);
1951}
1952
1953void oatFlushRegImpl(CompilationUnit* cUnit, int rBase,
1954 int displacement, int rSrc, OpSize size)
1955{
1956 storeBaseDisp(cUnit, rBase, displacement, rSrc, size);
1957}
1958
1959void oatFlushRegWideImpl(CompilationUnit* cUnit, int rBase,
1960 int displacement, int rSrcLo, int rSrcHi)
1961{
1962 storeBaseDispWide(cUnit, rBase, displacement, rSrcLo, rSrcHi);
1963}