blob: 8070e8d4e62c95132d59d1600fc5d24ce0999834 [file] [log] [blame]
buzbee67bf8852011-08-17 17:51:35 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Elliott Hughes1240dad2011-09-09 16:24:50 -070017#define DISPLAY_MISSING_TARGETS 1
18
buzbee67bf8852011-08-17 17:51:35 -070019static const RegLocation badLoc = {kLocDalvikFrame, 0, 0, INVALID_REG,
20 INVALID_REG, INVALID_SREG, 0,
21 kLocDalvikFrame, INVALID_REG, INVALID_REG,
22 INVALID_OFFSET};
23static const RegLocation retLoc = LOC_DALVIK_RETURN_VAL;
24static const RegLocation retLocWide = LOC_DALVIK_RETURN_VAL_WIDE;
25
buzbeedfd3d702011-08-28 12:56:51 -070026/*
27 * Let helper function take care of everything. Will call
28 * Array::AllocFromCode(type_idx, method, count);
29 * Note: AllocFromCode will handle checks for errNegativeArraySize.
30 */
buzbee67bf8852011-08-17 17:51:35 -070031static void genNewArray(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
32 RegLocation rlSrc)
33{
buzbeedfd3d702011-08-28 12:56:51 -070034 oatFlushAllRegs(cUnit); /* Everything to home location */
35 loadWordDisp(cUnit, rSELF,
36 OFFSETOF_MEMBER(Thread, pAllocFromCode), rLR);
37 loadCurrMethodDirect(cUnit, r1); // arg1 <- Method*
38 loadConstant(cUnit, r0, mir->dalvikInsn.vC); // arg0 <- type_id
39 loadValueDirectFixed(cUnit, rlSrc, r2); // arg2 <- count
buzbeeec5adf32011-09-11 15:25:43 -070040 callUnwindableHelper(cUnit, rLR);
buzbeedfd3d702011-08-28 12:56:51 -070041 oatClobberCallRegs(cUnit);
42 RegLocation rlResult = oatGetReturn(cUnit);
43 storeValue(cUnit, rlDest, rlResult);
buzbee67bf8852011-08-17 17:51:35 -070044}
45
46/*
47 * Similar to genNewArray, but with post-allocation initialization.
48 * Verifier guarantees we're dealing with an array class. Current
49 * code throws runtime exception "bad Filled array req" for 'D' and 'J'.
50 * Current code also throws internal unimp if not 'L', '[' or 'I'.
51 */
52static void genFilledNewArray(CompilationUnit* cUnit, MIR* mir, bool isRange)
53{
54 DecodedInstruction* dInsn = &mir->dalvikInsn;
55 int elems;
buzbeedfd3d702011-08-28 12:56:51 -070056 int typeId;
buzbee67bf8852011-08-17 17:51:35 -070057 if (isRange) {
58 elems = dInsn->vA;
buzbeedfd3d702011-08-28 12:56:51 -070059 typeId = dInsn->vB;
buzbee67bf8852011-08-17 17:51:35 -070060 } else {
61 elems = dInsn->vB;
buzbeedfd3d702011-08-28 12:56:51 -070062 typeId = dInsn->vC;
buzbee67bf8852011-08-17 17:51:35 -070063 }
buzbeedfd3d702011-08-28 12:56:51 -070064 oatFlushAllRegs(cUnit); /* Everything to home location */
buzbeedfd3d702011-08-28 12:56:51 -070065 loadWordDisp(cUnit, rSELF,
buzbee1da522d2011-09-04 11:22:20 -070066 OFFSETOF_MEMBER(Thread, pCheckAndAllocFromCode), rLR);
buzbeedfd3d702011-08-28 12:56:51 -070067 loadCurrMethodDirect(cUnit, r1); // arg1 <- Method*
68 loadConstant(cUnit, r0, typeId); // arg0 <- type_id
69 loadConstant(cUnit, r2, elems); // arg2 <- count
buzbeeec5adf32011-09-11 15:25:43 -070070 callUnwindableHelper(cUnit, rLR);
buzbee67bf8852011-08-17 17:51:35 -070071 /*
buzbeedfd3d702011-08-28 12:56:51 -070072 * NOTE: the implicit target for OP_FILLED_NEW_ARRAY is the
73 * return region. Because AllocFromCode placed the new array
74 * in r0, we'll just lock it into place. When debugger support is
75 * added, it may be necessary to additionally copy all return
76 * values to a home location in thread-local storage
buzbee67bf8852011-08-17 17:51:35 -070077 */
buzbee67bf8852011-08-17 17:51:35 -070078 oatLockTemp(cUnit, r0);
buzbeedfd3d702011-08-28 12:56:51 -070079
buzbee67bf8852011-08-17 17:51:35 -070080 // Having a range of 0 is legal
81 if (isRange && (dInsn->vA > 0)) {
82 /*
83 * Bit of ugliness here. We're going generate a mem copy loop
84 * on the register range, but it is possible that some regs
85 * in the range have been promoted. This is unlikely, but
86 * before generating the copy, we'll just force a flush
87 * of any regs in the source range that have been promoted to
88 * home location.
89 */
90 for (unsigned int i = 0; i < dInsn->vA; i++) {
91 RegLocation loc = oatUpdateLoc(cUnit,
92 oatGetSrc(cUnit, mir, i));
93 if (loc.location == kLocPhysReg) {
94 storeBaseDisp(cUnit, rSP, loc.spOffset, loc.lowReg, kWord);
95 }
96 }
97 /*
98 * TUNING note: generated code here could be much improved, but
99 * this is an uncommon operation and isn't especially performance
100 * critical.
101 */
102 int rSrc = oatAllocTemp(cUnit);
103 int rDst = oatAllocTemp(cUnit);
104 int rIdx = oatAllocTemp(cUnit);
105 int rVal = rLR; // Using a lot of temps, rLR is known free here
106 // Set up source pointer
107 RegLocation rlFirst = oatGetSrc(cUnit, mir, 0);
108 opRegRegImm(cUnit, kOpAdd, rSrc, rSP, rlFirst.spOffset);
109 // Set up the target pointer
110 opRegRegImm(cUnit, kOpAdd, rDst, r0,
buzbeec143c552011-08-20 17:38:58 -0700111 Array::DataOffset().Int32Value());
buzbee67bf8852011-08-17 17:51:35 -0700112 // Set up the loop counter (known to be > 0)
113 loadConstant(cUnit, rIdx, dInsn->vA);
114 // Generate the copy loop. Going backwards for convenience
115 ArmLIR* target = newLIR0(cUnit, kArmPseudoTargetLabel);
116 target->defMask = ENCODE_ALL;
117 // Copy next element
118 loadBaseIndexed(cUnit, rSrc, rIdx, rVal, 2, kWord);
119 storeBaseIndexed(cUnit, rDst, rIdx, rVal, 2, kWord);
120 // Use setflags encoding here
121 newLIR3(cUnit, kThumb2SubsRRI12, rIdx, rIdx, 1);
122 ArmLIR* branch = opCondBranch(cUnit, kArmCondNe);
123 branch->generic.target = (LIR*)target;
124 } else if (!isRange) {
125 // TUNING: interleave
126 for (unsigned int i = 0; i < dInsn->vA; i++) {
127 RegLocation rlArg = loadValue(cUnit,
128 oatGetSrc(cUnit, mir, i), kCoreReg);
buzbeec143c552011-08-20 17:38:58 -0700129 storeBaseDisp(cUnit, r0,
130 Array::DataOffset().Int32Value() +
buzbee67bf8852011-08-17 17:51:35 -0700131 i * 4, rlArg.lowReg, kWord);
132 // If the loadValue caused a temp to be allocated, free it
133 if (oatIsTemp(cUnit, rlArg.lowReg)) {
134 oatFreeTemp(cUnit, rlArg.lowReg);
135 }
136 }
137 }
138}
139
140static void genSput(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
141{
buzbeee1931742011-08-28 21:15:53 -0700142 bool isObject = ((mir->dalvikInsn.opcode == OP_SPUT_OBJECT) ||
143 (mir->dalvikInsn.opcode == OP_SPUT_OBJECT_VOLATILE));
buzbee1da522d2011-09-04 11:22:20 -0700144 int fieldIdx = mir->dalvikInsn.vB;
145 Field* field = cUnit->method->GetDexCacheResolvedFields()->Get(fieldIdx);
146 if (field == NULL) {
147 // Slow path
buzbee34cd9e52011-09-08 14:31:52 -0700148 LOG(INFO) << "Field " << fieldNameFromIndex(cUnit->method, fieldIdx)
149 << " unresolved at compile time";
buzbee1da522d2011-09-04 11:22:20 -0700150 int funcOffset = isObject ? OFFSETOF_MEMBER(Thread, pSetObjStatic)
151 : OFFSETOF_MEMBER(Thread, pSet32Static);
buzbeee1931742011-08-28 21:15:53 -0700152 oatFlushAllRegs(cUnit);
153 loadWordDisp(cUnit, rSELF, funcOffset, rLR);
154 loadConstant(cUnit, r0, mir->dalvikInsn.vB);
155 loadCurrMethodDirect(cUnit, r1);
156 loadValueDirect(cUnit, rlSrc, r2);
buzbeeec5adf32011-09-11 15:25:43 -0700157 callUnwindableHelper(cUnit, rLR);
buzbeee1931742011-08-28 21:15:53 -0700158 oatClobberCallRegs(cUnit);
159 } else {
buzbee1da522d2011-09-04 11:22:20 -0700160 // fast path
161 int fieldOffset = field->GetOffset().Int32Value();
162 art::ClassLinker* class_linker = art::Runtime::Current()->
163 GetClassLinker();
164 const art::DexFile& dex_file = class_linker->
Elliott Hughesb1539062011-09-11 16:14:31 -0700165 FindDexFile(cUnit->method->GetDeclaringClass()->GetDexCache());
buzbee1da522d2011-09-04 11:22:20 -0700166 const art::DexFile::FieldId& field_id = dex_file.GetFieldId(fieldIdx);
167 int typeIdx = field_id.class_idx_;
168 // Using fixed register to sync with slow path
169 int rMethod = r1;
170 oatLockTemp(cUnit, rMethod);
171 loadCurrMethodDirect(cUnit, rMethod);
172 int rBase = r0;
173 oatLockTemp(cUnit, rBase);
174 loadWordDisp(cUnit, rMethod,
175 Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
176 rBase);
177 loadWordDisp(cUnit, rBase, art::Array::DataOffset().Int32Value() +
178 sizeof(int32_t*)* typeIdx, rBase);
179 // TUNING: fast path should fall through
180 ArmLIR* branchOver = genCmpImmBranch(cUnit, kArmCondNe, rBase, 0);
181 loadWordDisp(cUnit, rSELF,
182 OFFSETOF_MEMBER(Thread, pInitializeStaticStorage), rLR);
183 loadConstant(cUnit, r0, typeIdx);
buzbeeec5adf32011-09-11 15:25:43 -0700184 callUnwindableHelper(cUnit, rLR);
buzbee1da522d2011-09-04 11:22:20 -0700185 ArmLIR* skipTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
186 skipTarget->defMask = ENCODE_ALL;
187 branchOver->generic.target = (LIR*)skipTarget;
188 rlSrc = oatGetSrc(cUnit, mir, 0);
189 rlSrc = loadValue(cUnit, rlSrc, kAnyReg);
190 storeWordDisp(cUnit, rBase, fieldOffset, rlSrc.lowReg);
buzbee67bf8852011-08-17 17:51:35 -0700191#if ANDROID_SMP != 0
buzbee1da522d2011-09-04 11:22:20 -0700192 if (field->IsVolatile()) {
193 oatGenMemBarrier(cUnit, kSY);
194 }
buzbee67bf8852011-08-17 17:51:35 -0700195#endif
buzbee1da522d2011-09-04 11:22:20 -0700196 if (isObject) {
197 markGCCard(cUnit, rlSrc.lowReg, rBase);
198 }
199 oatFreeTemp(cUnit, rBase);
buzbeee1931742011-08-28 21:15:53 -0700200 }
buzbee67bf8852011-08-17 17:51:35 -0700201}
202
203static void genSputWide(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
204{
buzbee1da522d2011-09-04 11:22:20 -0700205 int fieldIdx = mir->dalvikInsn.vB;
206 Field* field = cUnit->method->GetDexCacheResolvedFields()->Get(fieldIdx);
buzbee34cd9e52011-09-08 14:31:52 -0700207 if (SLOW_FIELD_PATH || field == NULL) {
208 LOG(INFO) << "Field " << fieldNameFromIndex(cUnit->method, fieldIdx)
209 << " unresolved at compile time";
buzbeee1931742011-08-28 21:15:53 -0700210 oatFlushAllRegs(cUnit);
buzbee1da522d2011-09-04 11:22:20 -0700211 loadWordDisp(cUnit, rSELF, OFFSETOF_MEMBER(Thread, pSet64Static), rLR);
buzbeee1931742011-08-28 21:15:53 -0700212 loadConstant(cUnit, r0, mir->dalvikInsn.vB);
213 loadCurrMethodDirect(cUnit, r1);
214 loadValueDirectWideFixed(cUnit, rlSrc, r2, r3);
buzbeeec5adf32011-09-11 15:25:43 -0700215 callUnwindableHelper(cUnit, rLR);
buzbeee1931742011-08-28 21:15:53 -0700216 oatClobberCallRegs(cUnit);
217 } else {
buzbee1da522d2011-09-04 11:22:20 -0700218 // fast path
219 int fieldOffset = field->GetOffset().Int32Value();
220 art::ClassLinker* class_linker = art::Runtime::Current()->
221 GetClassLinker();
222 const art::DexFile& dex_file = class_linker->
Elliott Hughesb1539062011-09-11 16:14:31 -0700223 FindDexFile(cUnit->method->GetDeclaringClass()->GetDexCache());
buzbee1da522d2011-09-04 11:22:20 -0700224 const art::DexFile::FieldId& field_id = dex_file.GetFieldId(fieldIdx);
225 int typeIdx = field_id.class_idx_;
226 // Using fixed register to sync with slow path
227 int rMethod = r1;
228 oatLockTemp(cUnit, rMethod);
229 loadCurrMethodDirect(cUnit, r1);
230 int rBase = r0;
231 oatLockTemp(cUnit, rBase);
232 loadWordDisp(cUnit, rMethod,
233 Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
234 rBase);
235 loadWordDisp(cUnit, rBase, art::Array::DataOffset().Int32Value() +
236 sizeof(int32_t*)* typeIdx, rBase);
237 // TUNING: fast path should fall through
238 ArmLIR* branchOver = genCmpImmBranch(cUnit, kArmCondNe, rBase, 0);
239 loadWordDisp(cUnit, rSELF,
240 OFFSETOF_MEMBER(Thread, pInitializeStaticStorage), rLR);
241 loadConstant(cUnit, r0, typeIdx);
buzbeeec5adf32011-09-11 15:25:43 -0700242 callUnwindableHelper(cUnit, rLR);
buzbee1da522d2011-09-04 11:22:20 -0700243 ArmLIR* skipTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
244 skipTarget->defMask = ENCODE_ALL;
245 branchOver->generic.target = (LIR*)skipTarget;
246 rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
247 rlSrc = loadValueWide(cUnit, rlSrc, kAnyReg);
248 storeBaseDispWide(cUnit, rBase, fieldOffset, rlSrc.lowReg,
249 rlSrc.highReg);
250#if ANDROID_SMP != 0
251 if (field->IsVolatile()) {
252 oatGenMemBarrier(cUnit, kSY);
253 }
buzbeec143c552011-08-20 17:38:58 -0700254#endif
buzbee1da522d2011-09-04 11:22:20 -0700255 oatFreeTemp(cUnit, rBase);
buzbeee1931742011-08-28 21:15:53 -0700256 }
buzbee67bf8852011-08-17 17:51:35 -0700257}
258
259
buzbee67bf8852011-08-17 17:51:35 -0700260static void genSgetWide(CompilationUnit* cUnit, MIR* mir,
261 RegLocation rlResult, RegLocation rlDest)
262{
buzbee1da522d2011-09-04 11:22:20 -0700263 int fieldIdx = mir->dalvikInsn.vB;
264 Field* field = cUnit->method->GetDexCacheResolvedFields()->Get(fieldIdx);
buzbee34cd9e52011-09-08 14:31:52 -0700265 if (SLOW_FIELD_PATH || field == NULL) {
266 LOG(INFO) << "Field " << fieldNameFromIndex(cUnit->method, fieldIdx)
267 << " unresolved at compile time";
buzbeee1931742011-08-28 21:15:53 -0700268 oatFlushAllRegs(cUnit);
buzbee1da522d2011-09-04 11:22:20 -0700269 loadWordDisp(cUnit, rSELF, OFFSETOF_MEMBER(Thread, pGet64Static), rLR);
buzbeee1931742011-08-28 21:15:53 -0700270 loadConstant(cUnit, r0, mir->dalvikInsn.vB);
271 loadCurrMethodDirect(cUnit, r1);
buzbeeec5adf32011-09-11 15:25:43 -0700272 callUnwindableHelper(cUnit, rLR);
buzbeee1931742011-08-28 21:15:53 -0700273 RegLocation rlResult = oatGetReturnWide(cUnit);
274 storeValueWide(cUnit, rlDest, rlResult);
275 } else {
buzbee1da522d2011-09-04 11:22:20 -0700276 // Fast path
277 int fieldOffset = field->GetOffset().Int32Value();
278 art::ClassLinker* class_linker = art::Runtime::Current()->
279 GetClassLinker();
280 const art::DexFile& dex_file = class_linker->
Elliott Hughesb1539062011-09-11 16:14:31 -0700281 FindDexFile(cUnit->method->GetDeclaringClass()->GetDexCache());
buzbee1da522d2011-09-04 11:22:20 -0700282 const art::DexFile::FieldId& field_id = dex_file.GetFieldId(fieldIdx);
283 int typeIdx = field_id.class_idx_;
284 // Using fixed register to sync with slow path
285 int rMethod = r1;
286 oatLockTemp(cUnit, rMethod);
287 loadCurrMethodDirect(cUnit, rMethod);
288 int rBase = r0;
289 oatLockTemp(cUnit, rBase);
290 loadWordDisp(cUnit, rMethod,
291 Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
292 rBase);
293 loadWordDisp(cUnit, rBase, art::Array::DataOffset().Int32Value() +
294 sizeof(int32_t*)* typeIdx, rBase);
295 // TUNING: fast path should fall through
296 ArmLIR* branchOver = genCmpImmBranch(cUnit, kArmCondNe, rBase, 0);
297 loadWordDisp(cUnit, rSELF,
298 OFFSETOF_MEMBER(Thread, pInitializeStaticStorage), rLR);
299 loadConstant(cUnit, r0, typeIdx);
buzbeeec5adf32011-09-11 15:25:43 -0700300 callUnwindableHelper(cUnit, rLR);
buzbee1da522d2011-09-04 11:22:20 -0700301 ArmLIR* skipTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
302 skipTarget->defMask = ENCODE_ALL;
303 branchOver->generic.target = (LIR*)skipTarget;
304 rlDest = oatGetDestWide(cUnit, mir, 0, 1);
305 RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
306#if ANDROID_SMP != 0
Elliott Hughes1d3f1142011-09-13 12:00:00 -0700307 if (field->IsVolatile()) {
buzbee1da522d2011-09-04 11:22:20 -0700308 oatGenMemBarrier(cUnit, kSY);
309 }
buzbeec143c552011-08-20 17:38:58 -0700310#endif
buzbee1da522d2011-09-04 11:22:20 -0700311 loadBaseDispWide(cUnit, NULL, rBase, fieldOffset, rlResult.lowReg,
312 rlResult.highReg, INVALID_SREG);
313 oatFreeTemp(cUnit, rBase);
314 storeValueWide(cUnit, rlDest, rlResult);
buzbeee1931742011-08-28 21:15:53 -0700315 }
buzbee67bf8852011-08-17 17:51:35 -0700316}
317
318static void genSget(CompilationUnit* cUnit, MIR* mir,
319 RegLocation rlResult, RegLocation rlDest)
320{
buzbee1da522d2011-09-04 11:22:20 -0700321 int fieldIdx = mir->dalvikInsn.vB;
322 Field* field = cUnit->method->GetDexCacheResolvedFields()->Get(fieldIdx);
buzbeee1931742011-08-28 21:15:53 -0700323 bool isObject = ((mir->dalvikInsn.opcode == OP_SGET_OBJECT) ||
324 (mir->dalvikInsn.opcode == OP_SGET_OBJECT_VOLATILE));
buzbee34cd9e52011-09-08 14:31:52 -0700325 if (SLOW_FIELD_PATH || field == NULL) {
326 LOG(INFO) << "Field " << fieldNameFromIndex(cUnit->method, fieldIdx)
327 << " unresolved at compile time";
buzbee1da522d2011-09-04 11:22:20 -0700328 // Slow path
329 int funcOffset = isObject ? OFFSETOF_MEMBER(Thread, pGetObjStatic)
330 : OFFSETOF_MEMBER(Thread, pGet32Static);
buzbeee1931742011-08-28 21:15:53 -0700331 oatFlushAllRegs(cUnit);
332 loadWordDisp(cUnit, rSELF, funcOffset, rLR);
333 loadConstant(cUnit, r0, mir->dalvikInsn.vB);
334 loadCurrMethodDirect(cUnit, r1);
buzbeeec5adf32011-09-11 15:25:43 -0700335 callUnwindableHelper(cUnit, rLR);
buzbeee1931742011-08-28 21:15:53 -0700336 RegLocation rlResult = oatGetReturn(cUnit);
337 storeValue(cUnit, rlDest, rlResult);
338 } else {
buzbee1da522d2011-09-04 11:22:20 -0700339 // Fast path
340 int fieldOffset = field->GetOffset().Int32Value();
341 art::ClassLinker* class_linker = art::Runtime::Current()->
342 GetClassLinker();
343 const art::DexFile& dex_file = class_linker->
Elliott Hughesb1539062011-09-11 16:14:31 -0700344 FindDexFile(cUnit->method->GetDeclaringClass()->GetDexCache());
buzbee1da522d2011-09-04 11:22:20 -0700345 const art::DexFile::FieldId& field_id = dex_file.GetFieldId(fieldIdx);
346 int typeIdx = field_id.class_idx_;
347 // Using fixed register to sync with slow path
348 int rMethod = r1;
349 oatLockTemp(cUnit, rMethod);
350 loadCurrMethodDirect(cUnit, rMethod);
351 int rBase = r0;
352 oatLockTemp(cUnit, rBase);
353 loadWordDisp(cUnit, rMethod,
354 Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
355 rBase);
356 loadWordDisp(cUnit, rBase, art::Array::DataOffset().Int32Value() +
357 sizeof(int32_t*)* typeIdx, rBase);
358 // TUNING: fast path should fall through
359 ArmLIR* branchOver = genCmpImmBranch(cUnit, kArmCondNe, rBase, 0);
360 loadWordDisp(cUnit, rSELF,
361 OFFSETOF_MEMBER(Thread, pInitializeStaticStorage), rLR);
362 loadConstant(cUnit, r0, typeIdx);
buzbeeec5adf32011-09-11 15:25:43 -0700363 callUnwindableHelper(cUnit, rLR);
buzbee1da522d2011-09-04 11:22:20 -0700364 ArmLIR* skipTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
365 skipTarget->defMask = ENCODE_ALL;
366 branchOver->generic.target = (LIR*)skipTarget;
367 rlDest = oatGetDest(cUnit, mir, 0);
368 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
buzbee67bf8852011-08-17 17:51:35 -0700369#if ANDROID_SMP != 0
Elliott Hughes1d3f1142011-09-13 12:00:00 -0700370 if (field->IsVolatile()) {
buzbee1da522d2011-09-04 11:22:20 -0700371 oatGenMemBarrier(cUnit, kSY);
372 }
buzbee67bf8852011-08-17 17:51:35 -0700373#endif
buzbee1da522d2011-09-04 11:22:20 -0700374 loadWordDisp(cUnit, rBase, fieldOffset, rlResult.lowReg);
375 oatFreeTemp(cUnit, rBase);
376 storeValue(cUnit, rlDest, rlResult);
buzbeee1931742011-08-28 21:15:53 -0700377 }
buzbee67bf8852011-08-17 17:51:35 -0700378}
379
buzbee561227c2011-09-02 15:28:19 -0700380typedef int (*NextCallInsn)(CompilationUnit*, MIR*, DecodedInstruction*, int,
381 ArmLIR*);
buzbee67bf8852011-08-17 17:51:35 -0700382
383/*
384 * Bit of a hack here - in leiu of a real scheduling pass,
385 * emit the next instruction in static & direct invoke sequences.
386 */
387static int nextSDCallInsn(CompilationUnit* cUnit, MIR* mir,
buzbee561227c2011-09-02 15:28:19 -0700388 DecodedInstruction* dInsn, int state,
389 ArmLIR* rollback)
buzbee67bf8852011-08-17 17:51:35 -0700390{
buzbee561227c2011-09-02 15:28:19 -0700391 DCHECK(rollback == NULL);
392 uint32_t idx = dInsn->vB;
buzbee67bf8852011-08-17 17:51:35 -0700393 switch(state) {
394 case 0: // Get the current Method* [sets r0]
buzbeedfd3d702011-08-28 12:56:51 -0700395 loadCurrMethodDirect(cUnit, r0);
buzbee67bf8852011-08-17 17:51:35 -0700396 break;
buzbee561227c2011-09-02 15:28:19 -0700397 case 1: // Get method->code_and_direct_methods_
398 loadWordDisp(cUnit, r0,
399 Method::GetDexCacheCodeAndDirectMethodsOffset().Int32Value(),
400 r0);
buzbee67bf8852011-08-17 17:51:35 -0700401 break;
buzbee561227c2011-09-02 15:28:19 -0700402 case 2: // Grab target method* and target code_
403 loadWordDisp(cUnit, r0,
404 art::CodeAndDirectMethods::CodeOffsetInBytes(idx), rLR);
405 loadWordDisp(cUnit, r0,
406 art::CodeAndDirectMethods::MethodOffsetInBytes(idx), r0);
buzbeec5ef0462011-08-25 18:44:49 -0700407 break;
408 default:
409 return -1;
410 }
411 return state + 1;
412}
413
buzbee67bf8852011-08-17 17:51:35 -0700414/*
415 * Bit of a hack here - in leiu of a real scheduling pass,
416 * emit the next instruction in a virtual invoke sequence.
417 * We can use rLR as a temp prior to target address loading
418 * Note also that we'll load the first argument ("this") into
419 * r1 here rather than the standard loadArgRegs.
420 */
421static int nextVCallInsn(CompilationUnit* cUnit, MIR* mir,
buzbee561227c2011-09-02 15:28:19 -0700422 DecodedInstruction* dInsn, int state,
423 ArmLIR* rollback)
buzbee67bf8852011-08-17 17:51:35 -0700424{
buzbee561227c2011-09-02 15:28:19 -0700425 DCHECK(rollback == NULL);
buzbee67bf8852011-08-17 17:51:35 -0700426 RegLocation rlArg;
buzbee561227c2011-09-02 15:28:19 -0700427 /*
428 * This is the fast path in which the target virtual method is
429 * fully resolved at compile time.
430 */
431 Method* baseMethod = cUnit->method->GetDexCacheResolvedMethods()->
432 Get(dInsn->vB);
433 CHECK(baseMethod != NULL);
434 uint32_t target_idx = baseMethod->GetMethodIndex();
buzbee67bf8852011-08-17 17:51:35 -0700435 switch(state) {
buzbee561227c2011-09-02 15:28:19 -0700436 case 0: // Get "this" [set r1]
buzbee67bf8852011-08-17 17:51:35 -0700437 rlArg = oatGetSrc(cUnit, mir, 0);
438 loadValueDirectFixed(cUnit, rlArg, r1);
439 break;
buzbee561227c2011-09-02 15:28:19 -0700440 case 1: // Is "this" null? [use r1]
buzbee5ade1d22011-09-09 14:44:52 -0700441 genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir);
buzbee561227c2011-09-02 15:28:19 -0700442 // get this->klass_ [use r1, set rLR]
443 loadWordDisp(cUnit, r1, Object::ClassOffset().Int32Value(), rLR);
buzbee67bf8852011-08-17 17:51:35 -0700444 break;
buzbee561227c2011-09-02 15:28:19 -0700445 case 2: // Get this->klass_->vtable [usr rLR, set rLR]
446 loadWordDisp(cUnit, rLR, Class::VTableOffset().Int32Value(), rLR);
buzbee67bf8852011-08-17 17:51:35 -0700447 break;
buzbee561227c2011-09-02 15:28:19 -0700448 case 3: // Get target method [use rLR, set r0]
449 loadWordDisp(cUnit, rLR, (target_idx * 4) +
450 art::Array::DataOffset().Int32Value(), r0);
451 break;
452 case 4: // Get the target compiled code address [uses r0, sets rLR]
453 loadWordDisp(cUnit, r0, Method::GetCodeOffset().Int32Value(), rLR);
buzbee67bf8852011-08-17 17:51:35 -0700454 break;
455 default:
456 return -1;
457 }
458 return state + 1;
459}
460
buzbee7b1b86d2011-08-26 18:59:10 -0700461static int nextVCallInsnSP(CompilationUnit* cUnit, MIR* mir,
buzbee561227c2011-09-02 15:28:19 -0700462 DecodedInstruction* dInsn, int state,
463 ArmLIR* rollback)
buzbee7b1b86d2011-08-26 18:59:10 -0700464{
buzbee561227c2011-09-02 15:28:19 -0700465 DCHECK(rollback != NULL);
buzbee7b1b86d2011-08-26 18:59:10 -0700466 RegLocation rlArg;
buzbee561227c2011-09-02 15:28:19 -0700467 ArmLIR* skipBranch;
468 ArmLIR* skipTarget;
469 /*
470 * This handles the case in which the base method is not fully
471 * resolved at compile time. We must generate code to test
472 * for resolution a run time, bail to the slow path if not to
473 * fill in all the tables. In the latter case, we'll restart at
474 * at the beginning of the sequence.
475 */
buzbee7b1b86d2011-08-26 18:59:10 -0700476 switch(state) {
477 case 0: // Get the current Method* [sets r0]
buzbeedfd3d702011-08-28 12:56:51 -0700478 loadCurrMethodDirect(cUnit, r0);
buzbee7b1b86d2011-08-26 18:59:10 -0700479 break;
buzbee561227c2011-09-02 15:28:19 -0700480 case 1: // Get method->dex_cache_resolved_methods_
481 loadWordDisp(cUnit, r0,
482 Method::GetDexCacheResolvedMethodsOffset().Int32Value(), rLR);
buzbee7b1b86d2011-08-26 18:59:10 -0700483 break;
buzbee561227c2011-09-02 15:28:19 -0700484 case 2: // method->dex_cache_resolved_methods_->Get(method_idx)
485 loadWordDisp(cUnit, rLR, (dInsn->vB * 4) +
486 art::Array::DataOffset().Int32Value(), rLR);
buzbee7b1b86d2011-08-26 18:59:10 -0700487 break;
buzbee561227c2011-09-02 15:28:19 -0700488 case 3: // Resolved?
489 skipBranch = genCmpImmBranch(cUnit, kArmCondNe, rLR, 0);
490 // Slowest path, bail to helper, rollback and retry
491 loadWordDisp(cUnit, rSELF,
492 OFFSETOF_MEMBER(Thread, pResolveMethodFromCode), rLR);
493 loadConstant(cUnit, r1, dInsn->vB);
buzbeeec5adf32011-09-11 15:25:43 -0700494 callUnwindableHelper(cUnit, rLR);
buzbee561227c2011-09-02 15:28:19 -0700495 genUnconditionalBranch(cUnit, rollback);
496 // Resume normal slow path
497 skipTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
498 skipTarget->defMask = ENCODE_ALL;
499 skipBranch->generic.target = (LIR*)skipTarget;
buzbee4a3164f2011-09-03 11:25:10 -0700500 // Get base_method->method_index [usr rLR, set r0]
buzbee561227c2011-09-02 15:28:19 -0700501 loadBaseDisp(cUnit, mir, rLR,
502 Method::GetMethodIndexOffset().Int32Value(), r0,
503 kUnsignedHalf, INVALID_SREG);
buzbee7b1b86d2011-08-26 18:59:10 -0700504 // Load "this" [set r1]
505 rlArg = oatGetSrc(cUnit, mir, 0);
506 loadValueDirectFixed(cUnit, rlArg, r1);
buzbee7b1b86d2011-08-26 18:59:10 -0700507 break;
508 case 4:
509 // Is "this" null? [use r1]
buzbee5ade1d22011-09-09 14:44:52 -0700510 genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir);
buzbee7b1b86d2011-08-26 18:59:10 -0700511 // get this->clazz [use r1, set rLR]
buzbee561227c2011-09-02 15:28:19 -0700512 loadWordDisp(cUnit, r1, Object::ClassOffset().Int32Value(), rLR);
buzbee7b1b86d2011-08-26 18:59:10 -0700513 break;
buzbee561227c2011-09-02 15:28:19 -0700514 case 5:
515 // get this->klass_->vtable_ [usr rLR, set rLR]
516 loadWordDisp(cUnit, rLR, Class::VTableOffset().Int32Value(), rLR);
517 DCHECK((art::Array::DataOffset().Int32Value() & 0x3) == 0);
518 // In load shadow fold vtable_ object header size into method_index_
519 opRegImm(cUnit, kOpAdd, r0,
520 art::Array::DataOffset().Int32Value() / 4);
521 // Get target Method*
522 loadBaseIndexed(cUnit, rLR, r0, r0, 2, kWord);
523 break;
524 case 6: // Get the target compiled code address [uses r0, sets rLR]
525 loadWordDisp(cUnit, r0, Method::GetCodeOffset().Int32Value(), rLR);
buzbee7b1b86d2011-08-26 18:59:10 -0700526 break;
527 default:
528 return -1;
529 }
530 return state + 1;
531}
532
buzbee67bf8852011-08-17 17:51:35 -0700533/* Load up to 3 arguments in r1..r3 */
534static int loadArgRegs(CompilationUnit* cUnit, MIR* mir,
535 DecodedInstruction* dInsn, int callState,
buzbee561227c2011-09-02 15:28:19 -0700536 int *args, NextCallInsn nextCallInsn, ArmLIR* rollback)
buzbee67bf8852011-08-17 17:51:35 -0700537{
538 for (int i = 0; i < 3; i++) {
539 if (args[i] != INVALID_REG) {
buzbee1b4c8592011-08-31 10:43:51 -0700540 // Arguments are treated as a series of untyped 32-bit values.
buzbeee9a72f62011-09-04 17:59:07 -0700541 RegLocation rlArg = oatGetRawSrc(cUnit, mir, i);
buzbee1b4c8592011-08-31 10:43:51 -0700542 rlArg.wide = false;
buzbee67bf8852011-08-17 17:51:35 -0700543 loadValueDirectFixed(cUnit, rlArg, r1 + i);
buzbee561227c2011-09-02 15:28:19 -0700544 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700545 }
546 }
547 return callState;
548}
549
buzbee4a3164f2011-09-03 11:25:10 -0700550// Interleave launch code for INVOKE_INTERFACE.
buzbee67bf8852011-08-17 17:51:35 -0700551static int nextInterfaceCallInsn(CompilationUnit* cUnit, MIR* mir,
buzbee561227c2011-09-02 15:28:19 -0700552 DecodedInstruction* dInsn, int state,
553 ArmLIR* rollback)
buzbee67bf8852011-08-17 17:51:35 -0700554{
buzbee67bf8852011-08-17 17:51:35 -0700555 switch(state) {
buzbee4a3164f2011-09-03 11:25:10 -0700556 case 0: // Load trampoline target
557 loadWordDisp(cUnit, rSELF,
558 OFFSETOF_MEMBER(Thread, pInvokeInterfaceTrampoline),
559 rLR);
560 // Load r0 with method index
561 loadConstant(cUnit, r0, dInsn->vB);
buzbee67bf8852011-08-17 17:51:35 -0700562 break;
buzbee67bf8852011-08-17 17:51:35 -0700563 default:
564 return -1;
565 }
566 return state + 1;
567}
568
buzbee67bf8852011-08-17 17:51:35 -0700569/*
570 * Interleave launch code for INVOKE_SUPER. See comments
571 * for nextVCallIns.
572 */
573static int nextSuperCallInsn(CompilationUnit* cUnit, MIR* mir,
buzbee561227c2011-09-02 15:28:19 -0700574 DecodedInstruction* dInsn, int state,
575 ArmLIR* rollback)
buzbee67bf8852011-08-17 17:51:35 -0700576{
buzbee4a3164f2011-09-03 11:25:10 -0700577 DCHECK(rollback == NULL);
buzbee67bf8852011-08-17 17:51:35 -0700578 RegLocation rlArg;
buzbee4a3164f2011-09-03 11:25:10 -0700579 /*
580 * This is the fast path in which the target virtual method is
581 * fully resolved at compile time. Note also that this path assumes
582 * that the check to verify that the target method index falls
583 * within the size of the super's vtable has been done at compile-time.
584 */
585 Method* baseMethod = cUnit->method->GetDexCacheResolvedMethods()->
586 Get(dInsn->vB);
587 CHECK(baseMethod != NULL);
588 Class* superClass = cUnit->method->GetDeclaringClass()->GetSuperClass();
589 CHECK(superClass != NULL);
590 int32_t target_idx = baseMethod->GetMethodIndex();
591 CHECK(superClass->GetVTable()->GetLength() > target_idx);
592 Method* targetMethod = superClass->GetVTable()->Get(target_idx);
593 CHECK(targetMethod != NULL);
buzbee67bf8852011-08-17 17:51:35 -0700594 switch(state) {
buzbee4a3164f2011-09-03 11:25:10 -0700595 case 0: // Get current Method* [set r0]
buzbeedfd3d702011-08-28 12:56:51 -0700596 loadCurrMethodDirect(cUnit, r0);
buzbee67bf8852011-08-17 17:51:35 -0700597 // Load "this" [set r1]
598 rlArg = oatGetSrc(cUnit, mir, 0);
599 loadValueDirectFixed(cUnit, rlArg, r1);
buzbee4a3164f2011-09-03 11:25:10 -0700600 // Get method->declaring_class_ [use r0, set rLR]
601 loadWordDisp(cUnit, r0, Method::DeclaringClassOffset().Int32Value(),
602 rLR);
buzbee67bf8852011-08-17 17:51:35 -0700603 // Is "this" null? [use r1]
buzbee5ade1d22011-09-09 14:44:52 -0700604 genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir);
buzbee4a3164f2011-09-03 11:25:10 -0700605 break;
606 case 1: // Get method->declaring_class_->super_class [usr rLR, set rLR]
607 loadWordDisp(cUnit, rLR, Class::SuperClassOffset().Int32Value(),
608 rLR);
609 break;
610 case 2: // Get ...->super_class_->vtable [u/s rLR]
611 loadWordDisp(cUnit, rLR, Class::VTableOffset().Int32Value(), rLR);
612 break;
613 case 3: // Get target method [use rLR, set r0]
614 loadWordDisp(cUnit, rLR, (target_idx * 4) +
615 art::Array::DataOffset().Int32Value(), r0);
616 break;
617 case 4: // Get the target compiled code address [uses r0, sets rLR]
618 loadWordDisp(cUnit, r0, Method::GetCodeOffset().Int32Value(), rLR);
619 break;
buzbee67bf8852011-08-17 17:51:35 -0700620 default:
621 return -1;
622 }
buzbee4a3164f2011-09-03 11:25:10 -0700623 return state + 1;
624}
625
626/* Slow-path version of nextSuperCallInsn */
627static int nextSuperCallInsnSP(CompilationUnit* cUnit, MIR* mir,
628 DecodedInstruction* dInsn, int state,
629 ArmLIR* rollback)
630{
631 DCHECK(rollback != NULL);
632 RegLocation rlArg;
633 ArmLIR* skipBranch;
634 ArmLIR* skipTarget;
635 int tReg;
636 /*
637 * This handles the case in which the base method is not fully
638 * resolved at compile time. We must generate code to test
639 * for resolution a run time, bail to the slow path if not to
640 * fill in all the tables. In the latter case, we'll restart at
641 * at the beginning of the sequence.
642 */
643 switch(state) {
644 case 0: // Get the current Method* [sets r0]
645 loadCurrMethodDirect(cUnit, r0);
646 break;
647 case 1: // Get method->dex_cache_resolved_methods_ [usr r0, set rLR]
648 loadWordDisp(cUnit, r0,
649 Method::GetDexCacheResolvedMethodsOffset().Int32Value(), rLR);
650 break;
651 case 2: // method->dex_cache_resolved_methods_->Get(meth_idx) [u/s rLR]
652 loadWordDisp(cUnit, rLR, (dInsn->vB * 4) +
653 art::Array::DataOffset().Int32Value(), rLR);
654 break;
655 case 3: // Resolved?
656 skipBranch = genCmpImmBranch(cUnit, kArmCondNe, rLR, 0);
657 // Slowest path, bail to helper, rollback and retry
658 loadWordDisp(cUnit, rSELF,
659 OFFSETOF_MEMBER(Thread, pResolveMethodFromCode), rLR);
660 loadConstant(cUnit, r1, dInsn->vB);
buzbeeec5adf32011-09-11 15:25:43 -0700661 callUnwindableHelper(cUnit, rLR);
buzbee4a3164f2011-09-03 11:25:10 -0700662 genUnconditionalBranch(cUnit, rollback);
663 // Resume normal slow path
664 skipTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
665 skipTarget->defMask = ENCODE_ALL;
666 skipBranch->generic.target = (LIR*)skipTarget;
667 // Get base_method->method_index [usr rLR, set rLR]
668 loadBaseDisp(cUnit, mir, rLR,
669 Method::GetMethodIndexOffset().Int32Value(), rLR,
670 kUnsignedHalf, INVALID_SREG);
671 // Load "this" [set r1]
672 rlArg = oatGetSrc(cUnit, mir, 0);
673 loadValueDirectFixed(cUnit, rlArg, r1);
674 // Load curMethod->declaring_class_ [uses r0, sets r0]
675 loadWordDisp(cUnit, r0, Method::DeclaringClassOffset().Int32Value(),
676 r0);
buzbee6a0f7f52011-09-05 16:14:20 -0700677 // Null this?
buzbee5ade1d22011-09-09 14:44:52 -0700678 genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir);
buzbee6a0f7f52011-09-05 16:14:20 -0700679 // Get method->declaring_class_->super_class [usr r0, set r0]
buzbee4a3164f2011-09-03 11:25:10 -0700680 loadWordDisp(cUnit, r0, Class::SuperClassOffset().Int32Value(), r0);
681 break;
buzbee6a0f7f52011-09-05 16:14:20 -0700682 case 4: // Get ...->super_class_->vtable [u/s r0]
buzbee4a3164f2011-09-03 11:25:10 -0700683 loadWordDisp(cUnit, r0, Class::VTableOffset().Int32Value(), r0);
buzbee4a3164f2011-09-03 11:25:10 -0700684 if (!(mir->OptimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
685 // Range check, throw NSM on failure
686 tReg = oatAllocTemp(cUnit);
687 loadWordDisp(cUnit, r0, art::Array::LengthOffset().Int32Value(),
688 tReg);
buzbeeec5adf32011-09-11 15:25:43 -0700689 genRegRegCheck(cUnit, kArmCondCs, tReg, rLR, mir,
690 kArmThrowNoSuchMethod);
buzbee4a3164f2011-09-03 11:25:10 -0700691 oatFreeTemp(cUnit, tReg);
692 }
buzbee6a0f7f52011-09-05 16:14:20 -0700693 // Adjust vtable_ base past object header
694 opRegImm(cUnit, kOpAdd, r0, art::Array::DataOffset().Int32Value());
buzbee4a3164f2011-09-03 11:25:10 -0700695 // Get target Method*
buzbee6a0f7f52011-09-05 16:14:20 -0700696 loadBaseIndexed(cUnit, r0, rLR, r0, 2, kWord);
buzbee4a3164f2011-09-03 11:25:10 -0700697 break;
buzbee6a0f7f52011-09-05 16:14:20 -0700698 case 5: // Get the target compiled code address [uses r0, sets rLR]
buzbee4a3164f2011-09-03 11:25:10 -0700699 loadWordDisp(cUnit, r0, Method::GetCodeOffset().Int32Value(), rLR);
700 break;
701 default:
702 return -1;
703 }
buzbee67bf8852011-08-17 17:51:35 -0700704 return state + 1;
705}
706
707/*
708 * Load up to 5 arguments, the first three of which will be in
709 * r1 .. r3. On entry r0 contains the current method pointer,
710 * and as part of the load sequence, it must be replaced with
711 * the target method pointer. Note, this may also be called
712 * for "range" variants if the number of arguments is 5 or fewer.
713 */
714static int genDalvikArgsNoRange(CompilationUnit* cUnit, MIR* mir,
715 DecodedInstruction* dInsn, int callState,
716 ArmLIR** pcrLabel, bool isRange,
buzbee1da522d2011-09-04 11:22:20 -0700717 NextCallInsn nextCallInsn, ArmLIR* rollback,
718 bool skipThis)
buzbee67bf8852011-08-17 17:51:35 -0700719{
720 RegLocation rlArg;
721 int registerArgs[3];
722
723 /* If no arguments, just return */
724 if (dInsn->vA == 0)
725 return callState;
726
buzbee561227c2011-09-02 15:28:19 -0700727 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700728
729 /*
730 * Load frame arguments arg4 & arg5 first. Coded a little odd to
731 * pre-schedule the method pointer target.
732 */
733 for (unsigned int i=3; i < dInsn->vA; i++) {
734 int reg;
buzbeeec5adf32011-09-11 15:25:43 -0700735 // Treating args as untyped 32-bit chunks
736 rlArg = oatGetRawSrc(cUnit, mir, i);
737 rlArg.wide = false;
738 rlArg = oatUpdateLoc(cUnit, rlArg);
buzbee67bf8852011-08-17 17:51:35 -0700739 if (rlArg.location == kLocPhysReg) {
740 reg = rlArg.lowReg;
741 } else {
buzbee109bd6a2011-09-06 13:58:41 -0700742 // r3 is the last arg register loaded, so can safely be used here
743 reg = r3;
744 loadValueDirectFixed(cUnit, rlArg, reg);
buzbee561227c2011-09-02 15:28:19 -0700745 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700746 }
747 storeBaseDisp(cUnit, rSP, (i + 1) * 4, reg, kWord);
buzbee561227c2011-09-02 15:28:19 -0700748 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700749 }
750
751 /* Load register arguments r1..r3 */
buzbeee9a72f62011-09-04 17:59:07 -0700752 for (unsigned int i = 0; i < 3; i++) {
buzbee67bf8852011-08-17 17:51:35 -0700753 if (i < dInsn->vA)
754 registerArgs[i] = (isRange) ? dInsn->vC + i : i;
755 else
756 registerArgs[i] = INVALID_REG;
757 }
buzbeee9a72f62011-09-04 17:59:07 -0700758 if (skipThis) {
759 registerArgs[0] = INVALID_REG;
760 }
buzbee67bf8852011-08-17 17:51:35 -0700761 callState = loadArgRegs(cUnit, mir, dInsn, callState, registerArgs,
buzbee561227c2011-09-02 15:28:19 -0700762 nextCallInsn, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700763
buzbee6a0f7f52011-09-05 16:14:20 -0700764 //TODO: better to move this into CallInsn lists
buzbee67bf8852011-08-17 17:51:35 -0700765 // Load direct & need a "this" null check?
766 if (pcrLabel) {
buzbee5ade1d22011-09-09 14:44:52 -0700767 *pcrLabel = genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir);
buzbee67bf8852011-08-17 17:51:35 -0700768 }
769 return callState;
770}
771
772/*
773 * May have 0+ arguments (also used for jumbo). Note that
774 * source virtual registers may be in physical registers, so may
775 * need to be flushed to home location before copying. This
776 * applies to arg3 and above (see below).
777 *
778 * Two general strategies:
779 * If < 20 arguments
780 * Pass args 3-18 using vldm/vstm block copy
781 * Pass arg0, arg1 & arg2 in r1-r3
782 * If 20+ arguments
783 * Pass args arg19+ using memcpy block copy
784 * Pass arg0, arg1 & arg2 in r1-r3
785 *
786 */
787static int genDalvikArgsRange(CompilationUnit* cUnit, MIR* mir,
788 DecodedInstruction* dInsn, int callState,
buzbee561227c2011-09-02 15:28:19 -0700789 ArmLIR** pcrLabel, NextCallInsn nextCallInsn,
buzbee1da522d2011-09-04 11:22:20 -0700790 ArmLIR* rollback, bool skipThis)
buzbee67bf8852011-08-17 17:51:35 -0700791{
792 int firstArg = dInsn->vC;
793 int numArgs = dInsn->vA;
buzbeee9a72f62011-09-04 17:59:07 -0700794 int registerArgs[3];
795
buzbee67bf8852011-08-17 17:51:35 -0700796 // If we can treat it as non-range (Jumbo ops will use range form)
797 if (numArgs <= 5)
798 return genDalvikArgsNoRange(cUnit, mir, dInsn, callState, pcrLabel,
buzbee1da522d2011-09-04 11:22:20 -0700799 true, nextCallInsn, rollback, skipThis);
buzbee67bf8852011-08-17 17:51:35 -0700800 /*
801 * Make sure range list doesn't span the break between in normal
802 * Dalvik vRegs and the ins.
803 */
buzbee1b4c8592011-08-31 10:43:51 -0700804 int highestArg = oatGetSrc(cUnit, mir, numArgs-1).sRegLow;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700805 int boundaryReg = cUnit->method->NumRegisters() - cUnit->method->NumIns();
buzbee1b4c8592011-08-31 10:43:51 -0700806 if ((firstArg < boundaryReg) && (highestArg >= boundaryReg)) {
807 LOG(FATAL) << "Argument list spanned locals & args";
buzbee67bf8852011-08-17 17:51:35 -0700808 }
809
810 /*
811 * First load the non-register arguments. Both forms expect all
812 * of the source arguments to be in their home frame location, so
813 * scan the sReg names and flush any that have been promoted to
814 * frame backing storage.
815 */
816 // Scan the rest of the args - if in physReg flush to memory
buzbee0c7f26d2011-09-07 12:28:51 -0700817 for (int i = 3; i < numArgs; i++) {
buzbeee9a72f62011-09-04 17:59:07 -0700818 RegLocation loc = oatGetRawSrc(cUnit, mir, i);
buzbee1b4c8592011-08-31 10:43:51 -0700819 if (loc.wide) {
820 loc = oatUpdateLocWide(cUnit, loc);
821 if (loc.location == kLocPhysReg) { // TUNING: if dirty?
822 storeBaseDispWide(cUnit, rSP, loc.spOffset, loc.lowReg,
823 loc.highReg);
buzbee561227c2011-09-02 15:28:19 -0700824 callState = nextCallInsn(cUnit, mir, dInsn, callState,
825 rollback);
buzbee1b4c8592011-08-31 10:43:51 -0700826 }
827 } else {
828 loc = oatUpdateLoc(cUnit, loc);
829 if (loc.location == kLocPhysReg) { // TUNING: if dirty?
830 storeBaseDisp(cUnit, rSP, loc.spOffset, loc.lowReg, kWord);
buzbee561227c2011-09-02 15:28:19 -0700831 callState = nextCallInsn(cUnit, mir, dInsn, callState,
832 rollback);
buzbee1b4c8592011-08-31 10:43:51 -0700833 }
buzbee67bf8852011-08-17 17:51:35 -0700834 }
835 }
836
837 int startOffset = cUnit->regLocation[mir->ssaRep->uses[3]].spOffset;
838 int outsOffset = 4 /* Method* */ + (3 * 4);
839 if (numArgs >= 20) {
840 // Generate memcpy, but first make sure all of
841 opRegRegImm(cUnit, kOpAdd, r0, rSP, startOffset);
842 opRegRegImm(cUnit, kOpAdd, r1, rSP, outsOffset);
843 loadWordDisp(cUnit, rSELF, OFFSETOF_MEMBER(Thread, pMemcpy), rLR);
844 loadConstant(cUnit, r2, (numArgs - 3) * 4);
buzbeeec5adf32011-09-11 15:25:43 -0700845 callNoUnwindHelper(cUnit, rLR);
buzbee67bf8852011-08-17 17:51:35 -0700846 } else {
847 // Use vldm/vstm pair using r3 as a temp
buzbeec143c552011-08-20 17:38:58 -0700848 int regsLeft = std::min(numArgs - 3, 16);
buzbee561227c2011-09-02 15:28:19 -0700849 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700850 opRegRegImm(cUnit, kOpAdd, r3, rSP, startOffset);
buzbee1b4c8592011-08-31 10:43:51 -0700851 newLIR3(cUnit, kThumb2Vldms, r3, fr0, regsLeft);
buzbee561227c2011-09-02 15:28:19 -0700852 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700853 opRegRegImm(cUnit, kOpAdd, r3, rSP, 4 /* Method* */ + (3 * 4));
buzbee561227c2011-09-02 15:28:19 -0700854 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee1b4c8592011-08-31 10:43:51 -0700855 newLIR3(cUnit, kThumb2Vstms, r3, fr0, regsLeft);
buzbee561227c2011-09-02 15:28:19 -0700856 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700857 }
858
859 // Handle the 1st 3 in r1, r2 & r3
buzbeee9a72f62011-09-04 17:59:07 -0700860 for (unsigned int i = 0; i < 3; i++) {
861 if (i < dInsn->vA)
862 registerArgs[i] = dInsn->vC + i;
863 else
864 registerArgs[i] = INVALID_REG;
buzbee67bf8852011-08-17 17:51:35 -0700865 }
buzbeee9a72f62011-09-04 17:59:07 -0700866 if (skipThis) {
867 registerArgs[0] = INVALID_REG;
868 }
869 callState = loadArgRegs(cUnit, mir, dInsn, callState, registerArgs,
870 nextCallInsn, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700871
buzbee561227c2011-09-02 15:28:19 -0700872 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700873 return callState;
874}
875
buzbee2a475e72011-09-07 17:19:17 -0700876#ifdef DISPLAY_MISSING_TARGETS
877// Debugging routine - if null target, branch to DebugMe
878static void genShowTarget(CompilationUnit* cUnit)
879{
880 ArmLIR* branchOver = genCmpImmBranch(cUnit, kArmCondNe, rLR, 0);
881 loadWordDisp(cUnit, rSELF,
882 OFFSETOF_MEMBER(Thread, pDebugMe), rLR);
883 ArmLIR* target = newLIR0(cUnit, kArmPseudoTargetLabel);
884 target->defMask = -1;
885 branchOver->generic.target = (LIR*)target;
886}
887#endif
888
buzbee561227c2011-09-02 15:28:19 -0700889static void genInvokeStaticDirect(CompilationUnit* cUnit, MIR* mir,
890 bool direct, bool range)
buzbee67bf8852011-08-17 17:51:35 -0700891{
892 DecodedInstruction* dInsn = &mir->dalvikInsn;
893 int callState = 0;
894 ArmLIR* nullCk;
buzbee561227c2011-09-02 15:28:19 -0700895 ArmLIR** pNullCk = direct ? &nullCk : NULL;
buzbee561227c2011-09-02 15:28:19 -0700896 NextCallInsn nextCallInsn = nextSDCallInsn;
897
buzbee109bd6a2011-09-06 13:58:41 -0700898 // Explicit register usage
899 oatLockCallTemps(cUnit);
900
buzbee561227c2011-09-02 15:28:19 -0700901 if (range) {
902 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, pNullCk,
buzbee1da522d2011-09-04 11:22:20 -0700903 nextCallInsn, NULL, false);
buzbee561227c2011-09-02 15:28:19 -0700904 } else {
905 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, pNullCk,
buzbee1da522d2011-09-04 11:22:20 -0700906 false, nextCallInsn, NULL, false);
buzbee561227c2011-09-02 15:28:19 -0700907 }
buzbee67bf8852011-08-17 17:51:35 -0700908 // Finish up any of the call sequence not interleaved in arg loading
909 while (callState >= 0) {
buzbee561227c2011-09-02 15:28:19 -0700910 callState = nextCallInsn(cUnit, mir, dInsn, callState, NULL);
buzbee67bf8852011-08-17 17:51:35 -0700911 }
buzbee2a475e72011-09-07 17:19:17 -0700912#ifdef DISPLAY_MISSING_TARGETS
913 genShowTarget(cUnit);
914#endif
buzbeeec5adf32011-09-11 15:25:43 -0700915 opReg(cUnit, kOpBlx, rLR);
buzbee67bf8852011-08-17 17:51:35 -0700916}
917
buzbee4a3164f2011-09-03 11:25:10 -0700918/*
919 * All invoke-interface calls bounce off of art_invoke_interface_trampoline,
920 * which will locate the target and continue on via a tail call.
921 */
buzbee67bf8852011-08-17 17:51:35 -0700922static void genInvokeInterface(CompilationUnit* cUnit, MIR* mir)
923{
924 DecodedInstruction* dInsn = &mir->dalvikInsn;
925 int callState = 0;
926 ArmLIR* nullCk;
buzbee109bd6a2011-09-06 13:58:41 -0700927
928 // Explicit register usage
929 oatLockCallTemps(cUnit);
buzbee67bf8852011-08-17 17:51:35 -0700930 /* Note: must call nextInterfaceCallInsn() prior to 1st argument load */
buzbee561227c2011-09-02 15:28:19 -0700931 callState = nextInterfaceCallInsn(cUnit, mir, dInsn, callState, NULL);
buzbee67bf8852011-08-17 17:51:35 -0700932 if (mir->dalvikInsn.opcode == OP_INVOKE_INTERFACE)
933 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee1da522d2011-09-04 11:22:20 -0700934 false, nextInterfaceCallInsn, NULL,
935 true);
buzbee67bf8852011-08-17 17:51:35 -0700936 else
937 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee1da522d2011-09-04 11:22:20 -0700938 nextInterfaceCallInsn, NULL, true);
buzbee67bf8852011-08-17 17:51:35 -0700939 // Finish up any of the call sequence not interleaved in arg loading
940 while (callState >= 0) {
buzbee561227c2011-09-02 15:28:19 -0700941 callState = nextInterfaceCallInsn(cUnit, mir, dInsn, callState, NULL);
buzbee67bf8852011-08-17 17:51:35 -0700942 }
buzbee2a475e72011-09-07 17:19:17 -0700943#ifdef DISPLAY_MISSING_TARGETS
944 genShowTarget(cUnit);
945#endif
buzbeeec5adf32011-09-11 15:25:43 -0700946 opReg(cUnit, kOpBlx, rLR);
buzbee67bf8852011-08-17 17:51:35 -0700947}
948
949static void genInvokeSuper(CompilationUnit* cUnit, MIR* mir)
950{
951 DecodedInstruction* dInsn = &mir->dalvikInsn;
952 int callState = 0;
953 ArmLIR* nullCk;
buzbee4a3164f2011-09-03 11:25:10 -0700954 ArmLIR* rollback;
955 Method* baseMethod = cUnit->method->GetDexCacheResolvedMethods()->
956 Get(dInsn->vB);
957 NextCallInsn nextCallInsn;
958 bool fastPath = true;
buzbee109bd6a2011-09-06 13:58:41 -0700959
960 // Explicit register usage
961 oatLockCallTemps(cUnit);
buzbee34cd9e52011-09-08 14:31:52 -0700962 if (SLOW_INVOKE_PATH || baseMethod == NULL) {
buzbee4a3164f2011-09-03 11:25:10 -0700963 fastPath = false;
964 } else {
965 Class* superClass = cUnit->method->GetDeclaringClass()->GetSuperClass();
966 if (superClass == NULL) {
967 fastPath = false;
968 } else {
969 int32_t target_idx = baseMethod->GetMethodIndex();
970 if (superClass->GetVTable()->GetLength() <= target_idx) {
971 fastPath = false;
972 } else {
973 fastPath = (superClass->GetVTable()->Get(target_idx) != NULL);
974 }
975 }
976 }
977 if (fastPath) {
978 nextCallInsn = nextSuperCallInsn;
979 rollback = NULL;
980 } else {
981 nextCallInsn = nextSuperCallInsnSP;
982 rollback = newLIR0(cUnit, kArmPseudoTargetLabel);
983 rollback->defMask = -1;
984 }
buzbee67bf8852011-08-17 17:51:35 -0700985 if (mir->dalvikInsn.opcode == OP_INVOKE_SUPER)
986 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee1da522d2011-09-04 11:22:20 -0700987 false, nextCallInsn, rollback, true);
buzbee67bf8852011-08-17 17:51:35 -0700988 else
989 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee1da522d2011-09-04 11:22:20 -0700990 nextCallInsn, rollback, true);
buzbee67bf8852011-08-17 17:51:35 -0700991 // Finish up any of the call sequence not interleaved in arg loading
992 while (callState >= 0) {
buzbee6a0f7f52011-09-05 16:14:20 -0700993 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700994 }
buzbee2a475e72011-09-07 17:19:17 -0700995#ifdef DISPLAY_MISSING_TARGETS
996 genShowTarget(cUnit);
997#endif
buzbeeec5adf32011-09-11 15:25:43 -0700998 opReg(cUnit, kOpBlx, rLR);
buzbee67bf8852011-08-17 17:51:35 -0700999}
1000
1001static void genInvokeVirtual(CompilationUnit* cUnit, MIR* mir)
1002{
1003 DecodedInstruction* dInsn = &mir->dalvikInsn;
1004 int callState = 0;
1005 ArmLIR* nullCk;
buzbee561227c2011-09-02 15:28:19 -07001006 ArmLIR* rollback;
1007 Method* method = cUnit->method->GetDexCacheResolvedMethods()->
1008 Get(dInsn->vB);
1009 NextCallInsn nextCallInsn;
buzbee7b1b86d2011-08-26 18:59:10 -07001010
buzbee109bd6a2011-09-06 13:58:41 -07001011 // Explicit register usage
1012 oatLockCallTemps(cUnit);
buzbee34cd9e52011-09-08 14:31:52 -07001013 if (SLOW_INVOKE_PATH || method == NULL) {
buzbee561227c2011-09-02 15:28:19 -07001014 // Slow path
1015 nextCallInsn = nextVCallInsnSP;
1016 // If we need a slow-path callout, we'll restart here
1017 rollback = newLIR0(cUnit, kArmPseudoTargetLabel);
1018 rollback->defMask = -1;
1019 } else {
1020 // Fast path
1021 nextCallInsn = nextVCallInsn;
1022 rollback = NULL;
1023 }
buzbee67bf8852011-08-17 17:51:35 -07001024 if (mir->dalvikInsn.opcode == OP_INVOKE_VIRTUAL)
1025 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee1da522d2011-09-04 11:22:20 -07001026 false, nextCallInsn, rollback, true);
buzbee67bf8852011-08-17 17:51:35 -07001027 else
1028 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee1da522d2011-09-04 11:22:20 -07001029 nextCallInsn, rollback, true);
buzbee67bf8852011-08-17 17:51:35 -07001030 // Finish up any of the call sequence not interleaved in arg loading
1031 while (callState >= 0) {
buzbee561227c2011-09-02 15:28:19 -07001032 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -07001033 }
buzbee2a475e72011-09-07 17:19:17 -07001034#ifdef DISPLAY_MISSING_TARGETS
1035 genShowTarget(cUnit);
1036#endif
buzbeeec5adf32011-09-11 15:25:43 -07001037 opReg(cUnit, kOpBlx, rLR);
buzbee67bf8852011-08-17 17:51:35 -07001038}
1039
buzbee67bf8852011-08-17 17:51:35 -07001040static bool compileDalvikInstruction(CompilationUnit* cUnit, MIR* mir,
1041 BasicBlock* bb, ArmLIR* labelList)
1042{
1043 bool res = false; // Assume success
1044 RegLocation rlSrc[3];
1045 RegLocation rlDest = badLoc;
1046 RegLocation rlResult = badLoc;
1047 Opcode opcode = mir->dalvikInsn.opcode;
1048
1049 /* Prep Src and Dest locations */
1050 int nextSreg = 0;
1051 int nextLoc = 0;
1052 int attrs = oatDataFlowAttributes[opcode];
1053 rlSrc[0] = rlSrc[1] = rlSrc[2] = badLoc;
1054 if (attrs & DF_UA) {
1055 rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
1056 nextSreg++;
1057 } else if (attrs & DF_UA_WIDE) {
1058 rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg,
1059 nextSreg + 1);
1060 nextSreg+= 2;
1061 }
1062 if (attrs & DF_UB) {
1063 rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
1064 nextSreg++;
1065 } else if (attrs & DF_UB_WIDE) {
1066 rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg,
1067 nextSreg + 1);
1068 nextSreg+= 2;
1069 }
1070 if (attrs & DF_UC) {
1071 rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
1072 } else if (attrs & DF_UC_WIDE) {
1073 rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg,
1074 nextSreg + 1);
1075 }
1076 if (attrs & DF_DA) {
1077 rlDest = oatGetDest(cUnit, mir, 0);
1078 } else if (attrs & DF_DA_WIDE) {
1079 rlDest = oatGetDestWide(cUnit, mir, 0, 1);
1080 }
1081
1082 switch(opcode) {
1083 case OP_NOP:
1084 break;
1085
1086 case OP_MOVE_EXCEPTION:
1087 int exOffset;
1088 int resetReg;
buzbeec143c552011-08-20 17:38:58 -07001089 exOffset = Thread::ExceptionOffset().Int32Value();
buzbee67bf8852011-08-17 17:51:35 -07001090 resetReg = oatAllocTemp(cUnit);
1091 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1092 loadWordDisp(cUnit, rSELF, exOffset, rlResult.lowReg);
1093 loadConstant(cUnit, resetReg, 0);
1094 storeWordDisp(cUnit, rSELF, exOffset, resetReg);
1095 storeValue(cUnit, rlDest, rlResult);
1096 break;
1097
1098 case OP_RETURN_VOID:
1099 break;
1100
1101 case OP_RETURN:
1102 case OP_RETURN_OBJECT:
1103 storeValue(cUnit, retLoc, rlSrc[0]);
1104 break;
1105
1106 case OP_RETURN_WIDE:
1107 rlDest = retLocWide;
1108 rlDest.fp = rlSrc[0].fp;
1109 storeValueWide(cUnit, rlDest, rlSrc[0]);
1110 break;
1111
1112 case OP_MOVE_RESULT_WIDE:
1113 if (mir->OptimizationFlags & MIR_INLINED)
1114 break; // Nop - combined w/ previous invoke
1115 /*
1116 * Somewhat hacky here. Because we're now passing
1117 * return values in registers, we have to let the
1118 * register allocation utilities know that the return
1119 * registers are live and may not be used for address
1120 * formation in storeValueWide.
1121 */
1122 assert(retLocWide.lowReg == r0);
buzbee1da522d2011-09-04 11:22:20 -07001123 assert(retLocWide.highReg == r1);
buzbee67bf8852011-08-17 17:51:35 -07001124 oatLockTemp(cUnit, retLocWide.lowReg);
1125 oatLockTemp(cUnit, retLocWide.highReg);
1126 storeValueWide(cUnit, rlDest, retLocWide);
1127 oatFreeTemp(cUnit, retLocWide.lowReg);
1128 oatFreeTemp(cUnit, retLocWide.highReg);
1129 break;
1130
1131 case OP_MOVE_RESULT:
1132 case OP_MOVE_RESULT_OBJECT:
1133 if (mir->OptimizationFlags & MIR_INLINED)
1134 break; // Nop - combined w/ previous invoke
1135 /* See comment for OP_MOVE_RESULT_WIDE */
1136 assert(retLoc.lowReg == r0);
1137 oatLockTemp(cUnit, retLoc.lowReg);
1138 storeValue(cUnit, rlDest, retLoc);
1139 oatFreeTemp(cUnit, retLoc.lowReg);
1140 break;
1141
1142 case OP_MOVE:
1143 case OP_MOVE_OBJECT:
1144 case OP_MOVE_16:
1145 case OP_MOVE_OBJECT_16:
1146 case OP_MOVE_FROM16:
1147 case OP_MOVE_OBJECT_FROM16:
1148 storeValue(cUnit, rlDest, rlSrc[0]);
1149 break;
1150
1151 case OP_MOVE_WIDE:
1152 case OP_MOVE_WIDE_16:
1153 case OP_MOVE_WIDE_FROM16:
1154 storeValueWide(cUnit, rlDest, rlSrc[0]);
1155 break;
1156
1157 case OP_CONST:
1158 case OP_CONST_4:
1159 case OP_CONST_16:
1160 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
1161 loadConstantNoClobber(cUnit, rlResult.lowReg, mir->dalvikInsn.vB);
1162 storeValue(cUnit, rlDest, rlResult);
1163 break;
1164
1165 case OP_CONST_HIGH16:
1166 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
1167 loadConstantNoClobber(cUnit, rlResult.lowReg,
1168 mir->dalvikInsn.vB << 16);
1169 storeValue(cUnit, rlDest, rlResult);
1170 break;
1171
1172 case OP_CONST_WIDE_16:
1173 case OP_CONST_WIDE_32:
1174 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1175 loadConstantNoClobber(cUnit, rlResult.lowReg, mir->dalvikInsn.vB);
1176 //TUNING: do high separately to avoid load dependency
1177 opRegRegImm(cUnit, kOpAsr, rlResult.highReg, rlResult.lowReg, 31);
1178 storeValueWide(cUnit, rlDest, rlResult);
1179 break;
1180
1181 case OP_CONST_WIDE:
1182 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
1183 loadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg,
buzbee54330722011-08-23 16:46:55 -07001184 mir->dalvikInsn.vB_wide & 0xffffffff,
1185 (mir->dalvikInsn.vB_wide >> 32) & 0xffffffff);
buzbee3ea4ec52011-08-22 17:37:19 -07001186 storeValueWide(cUnit, rlDest, rlResult);
buzbee67bf8852011-08-17 17:51:35 -07001187 break;
1188
1189 case OP_CONST_WIDE_HIGH16:
1190 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
1191 loadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg,
1192 0, mir->dalvikInsn.vB << 16);
buzbee7b1b86d2011-08-26 18:59:10 -07001193 storeValueWide(cUnit, rlDest, rlResult);
buzbee67bf8852011-08-17 17:51:35 -07001194 break;
1195
1196 case OP_MONITOR_ENTER:
1197 genMonitorEnter(cUnit, mir, rlSrc[0]);
1198 break;
1199
1200 case OP_MONITOR_EXIT:
1201 genMonitorExit(cUnit, mir, rlSrc[0]);
1202 break;
1203
1204 case OP_CHECK_CAST:
1205 genCheckCast(cUnit, mir, rlSrc[0]);
1206 break;
1207
1208 case OP_INSTANCE_OF:
1209 genInstanceof(cUnit, mir, rlDest, rlSrc[0]);
1210 break;
1211
1212 case OP_NEW_INSTANCE:
1213 genNewInstance(cUnit, mir, rlDest);
1214 break;
1215
1216 case OP_THROW:
1217 genThrow(cUnit, mir, rlSrc[0]);
1218 break;
1219
buzbee5ade1d22011-09-09 14:44:52 -07001220 case OP_THROW_VERIFICATION_ERROR:
1221 loadWordDisp(cUnit, rSELF,
1222 OFFSETOF_MEMBER(Thread, pThrowVerificationErrorFromCode), rLR);
1223 loadConstant(cUnit, r0, mir->dalvikInsn.vA);
1224 loadConstant(cUnit, r1, mir->dalvikInsn.vB);
buzbeeec5adf32011-09-11 15:25:43 -07001225 callUnwindableHelper(cUnit, rLR);
buzbee5ade1d22011-09-09 14:44:52 -07001226 break;
1227
buzbee67bf8852011-08-17 17:51:35 -07001228 case OP_ARRAY_LENGTH:
1229 int lenOffset;
buzbeec143c552011-08-20 17:38:58 -07001230 lenOffset = Array::LengthOffset().Int32Value();
buzbee7b1b86d2011-08-26 18:59:10 -07001231 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
buzbee5ade1d22011-09-09 14:44:52 -07001232 genNullCheck(cUnit, rlSrc[0].sRegLow, rlSrc[0].lowReg, mir);
buzbee67bf8852011-08-17 17:51:35 -07001233 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1234 loadWordDisp(cUnit, rlSrc[0].lowReg, lenOffset,
1235 rlResult.lowReg);
1236 storeValue(cUnit, rlDest, rlResult);
1237 break;
1238
1239 case OP_CONST_STRING:
1240 case OP_CONST_STRING_JUMBO:
1241 genConstString(cUnit, mir, rlDest, rlSrc[0]);
1242 break;
1243
1244 case OP_CONST_CLASS:
1245 genConstClass(cUnit, mir, rlDest, rlSrc[0]);
1246 break;
1247
1248 case OP_FILL_ARRAY_DATA:
1249 genFillArrayData(cUnit, mir, rlSrc[0]);
1250 break;
1251
1252 case OP_FILLED_NEW_ARRAY:
1253 genFilledNewArray(cUnit, mir, false /* not range */);
1254 break;
1255
1256 case OP_FILLED_NEW_ARRAY_RANGE:
1257 genFilledNewArray(cUnit, mir, true /* range */);
1258 break;
1259
1260 case OP_NEW_ARRAY:
1261 genNewArray(cUnit, mir, rlDest, rlSrc[0]);
1262 break;
1263
1264 case OP_GOTO:
1265 case OP_GOTO_16:
1266 case OP_GOTO_32:
1267 // TUNING: add MIR flag to disable when unnecessary
1268 bool backwardBranch;
1269 backwardBranch = (bb->taken->startOffset <= mir->offset);
1270 if (backwardBranch) {
1271 genSuspendPoll(cUnit, mir);
1272 }
1273 genUnconditionalBranch(cUnit, &labelList[bb->taken->id]);
1274 break;
1275
1276 case OP_PACKED_SWITCH:
1277 genPackedSwitch(cUnit, mir, rlSrc[0]);
1278 break;
1279
1280 case OP_SPARSE_SWITCH:
1281 genSparseSwitch(cUnit, mir, rlSrc[0]);
1282 break;
1283
1284 case OP_CMPL_FLOAT:
1285 case OP_CMPG_FLOAT:
1286 case OP_CMPL_DOUBLE:
1287 case OP_CMPG_DOUBLE:
1288 res = genCmpFP(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1289 break;
1290
1291 case OP_CMP_LONG:
1292 genCmpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1293 break;
1294
1295 case OP_IF_EQ:
1296 case OP_IF_NE:
1297 case OP_IF_LT:
1298 case OP_IF_GE:
1299 case OP_IF_GT:
1300 case OP_IF_LE: {
1301 bool backwardBranch;
1302 ArmConditionCode cond;
1303 backwardBranch = (bb->taken->startOffset <= mir->offset);
1304 if (backwardBranch) {
1305 genSuspendPoll(cUnit, mir);
1306 }
1307 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1308 rlSrc[1] = loadValue(cUnit, rlSrc[1], kCoreReg);
1309 opRegReg(cUnit, kOpCmp, rlSrc[0].lowReg, rlSrc[1].lowReg);
1310 switch(opcode) {
1311 case OP_IF_EQ:
1312 cond = kArmCondEq;
1313 break;
1314 case OP_IF_NE:
1315 cond = kArmCondNe;
1316 break;
1317 case OP_IF_LT:
1318 cond = kArmCondLt;
1319 break;
1320 case OP_IF_GE:
1321 cond = kArmCondGe;
1322 break;
1323 case OP_IF_GT:
1324 cond = kArmCondGt;
1325 break;
1326 case OP_IF_LE:
1327 cond = kArmCondLe;
1328 break;
1329 default:
1330 cond = (ArmConditionCode)0;
1331 LOG(FATAL) << "Unexpected opcode " << (int)opcode;
1332 }
1333 genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]);
1334 genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
1335 break;
1336 }
1337
1338 case OP_IF_EQZ:
1339 case OP_IF_NEZ:
1340 case OP_IF_LTZ:
1341 case OP_IF_GEZ:
1342 case OP_IF_GTZ:
1343 case OP_IF_LEZ: {
1344 bool backwardBranch;
1345 ArmConditionCode cond;
1346 backwardBranch = (bb->taken->startOffset <= mir->offset);
1347 if (backwardBranch) {
1348 genSuspendPoll(cUnit, mir);
1349 }
1350 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1351 opRegImm(cUnit, kOpCmp, rlSrc[0].lowReg, 0);
1352 switch(opcode) {
1353 case OP_IF_EQZ:
1354 cond = kArmCondEq;
1355 break;
1356 case OP_IF_NEZ:
1357 cond = kArmCondNe;
1358 break;
1359 case OP_IF_LTZ:
1360 cond = kArmCondLt;
1361 break;
1362 case OP_IF_GEZ:
1363 cond = kArmCondGe;
1364 break;
1365 case OP_IF_GTZ:
1366 cond = kArmCondGt;
1367 break;
1368 case OP_IF_LEZ:
1369 cond = kArmCondLe;
1370 break;
1371 default:
1372 cond = (ArmConditionCode)0;
1373 LOG(FATAL) << "Unexpected opcode " << (int)opcode;
1374 }
1375 genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]);
1376 genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
1377 break;
1378 }
1379
1380 case OP_AGET_WIDE:
1381 genArrayGet(cUnit, mir, kLong, rlSrc[0], rlSrc[1], rlDest, 3);
1382 break;
1383 case OP_AGET:
1384 case OP_AGET_OBJECT:
1385 genArrayGet(cUnit, mir, kWord, rlSrc[0], rlSrc[1], rlDest, 2);
1386 break;
1387 case OP_AGET_BOOLEAN:
1388 genArrayGet(cUnit, mir, kUnsignedByte, rlSrc[0], rlSrc[1],
1389 rlDest, 0);
1390 break;
1391 case OP_AGET_BYTE:
1392 genArrayGet(cUnit, mir, kSignedByte, rlSrc[0], rlSrc[1], rlDest, 0);
1393 break;
1394 case OP_AGET_CHAR:
1395 genArrayGet(cUnit, mir, kUnsignedHalf, rlSrc[0], rlSrc[1],
1396 rlDest, 1);
1397 break;
1398 case OP_AGET_SHORT:
1399 genArrayGet(cUnit, mir, kSignedHalf, rlSrc[0], rlSrc[1], rlDest, 1);
1400 break;
1401 case OP_APUT_WIDE:
1402 genArrayPut(cUnit, mir, kLong, rlSrc[1], rlSrc[2], rlSrc[0], 3);
1403 break;
1404 case OP_APUT:
1405 genArrayPut(cUnit, mir, kWord, rlSrc[1], rlSrc[2], rlSrc[0], 2);
1406 break;
1407 case OP_APUT_OBJECT:
buzbee1b4c8592011-08-31 10:43:51 -07001408 genArrayObjPut(cUnit, mir, rlSrc[1], rlSrc[2], rlSrc[0], 2);
buzbee67bf8852011-08-17 17:51:35 -07001409 break;
1410 case OP_APUT_SHORT:
1411 case OP_APUT_CHAR:
1412 genArrayPut(cUnit, mir, kUnsignedHalf, rlSrc[1], rlSrc[2],
1413 rlSrc[0], 1);
1414 break;
1415 case OP_APUT_BYTE:
1416 case OP_APUT_BOOLEAN:
1417 genArrayPut(cUnit, mir, kUnsignedByte, rlSrc[1], rlSrc[2],
1418 rlSrc[0], 0);
1419 break;
1420
1421 case OP_IGET_WIDE:
1422 case OP_IGET_WIDE_VOLATILE:
1423 genIGetWideX(cUnit, mir, rlDest, rlSrc[0]);
1424 break;
1425
1426 case OP_IGET:
1427 case OP_IGET_VOLATILE:
1428 case OP_IGET_OBJECT:
1429 case OP_IGET_OBJECT_VOLATILE:
1430 genIGetX(cUnit, mir, kWord, rlDest, rlSrc[0]);
1431 break;
1432
1433 case OP_IGET_BOOLEAN:
1434 case OP_IGET_BYTE:
1435 genIGetX(cUnit, mir, kUnsignedByte, rlDest, rlSrc[0]);
1436 break;
1437
1438 case OP_IGET_CHAR:
1439 genIGetX(cUnit, mir, kUnsignedHalf, rlDest, rlSrc[0]);
1440 break;
1441
1442 case OP_IGET_SHORT:
1443 genIGetX(cUnit, mir, kSignedHalf, rlDest, rlSrc[0]);
1444 break;
1445
1446 case OP_IPUT_WIDE:
1447 case OP_IPUT_WIDE_VOLATILE:
1448 genIPutWideX(cUnit, mir, rlSrc[0], rlSrc[1]);
1449 break;
1450
1451 case OP_IPUT_OBJECT:
1452 case OP_IPUT_OBJECT_VOLATILE:
1453 genIPutX(cUnit, mir, kWord, rlSrc[0], rlSrc[1], true);
1454 break;
1455
1456 case OP_IPUT:
1457 case OP_IPUT_VOLATILE:
1458 genIPutX(cUnit, mir, kWord, rlSrc[0], rlSrc[1], false);
1459 break;
1460
1461 case OP_IPUT_BOOLEAN:
1462 case OP_IPUT_BYTE:
1463 genIPutX(cUnit, mir, kUnsignedByte, rlSrc[0], rlSrc[1], false);
1464 break;
1465
1466 case OP_IPUT_CHAR:
1467 genIPutX(cUnit, mir, kUnsignedHalf, rlSrc[0], rlSrc[1], false);
1468 break;
1469
1470 case OP_IPUT_SHORT:
1471 genIPutX(cUnit, mir, kSignedHalf, rlSrc[0], rlSrc[1], false);
1472 break;
1473
1474 case OP_SGET:
1475 case OP_SGET_OBJECT:
1476 case OP_SGET_BOOLEAN:
1477 case OP_SGET_BYTE:
1478 case OP_SGET_CHAR:
1479 case OP_SGET_SHORT:
1480 genSget(cUnit, mir, rlResult, rlDest);
1481 break;
1482
1483 case OP_SGET_WIDE:
1484 genSgetWide(cUnit, mir, rlResult, rlDest);
1485 break;
1486
1487 case OP_SPUT:
1488 case OP_SPUT_OBJECT:
1489 case OP_SPUT_BOOLEAN:
1490 case OP_SPUT_BYTE:
1491 case OP_SPUT_CHAR:
1492 case OP_SPUT_SHORT:
1493 genSput(cUnit, mir, rlSrc[0]);
1494 break;
1495
1496 case OP_SPUT_WIDE:
1497 genSputWide(cUnit, mir, rlSrc[0]);
1498 break;
1499
1500 case OP_INVOKE_STATIC_RANGE:
buzbee561227c2011-09-02 15:28:19 -07001501 genInvokeStaticDirect(cUnit, mir, false /*direct*/,
1502 true /*range*/);
1503 break;
buzbee67bf8852011-08-17 17:51:35 -07001504 case OP_INVOKE_STATIC:
buzbee561227c2011-09-02 15:28:19 -07001505 genInvokeStaticDirect(cUnit, mir, false /*direct*/,
1506 false /*range*/);
buzbee67bf8852011-08-17 17:51:35 -07001507 break;
1508
1509 case OP_INVOKE_DIRECT:
buzbee561227c2011-09-02 15:28:19 -07001510 genInvokeStaticDirect(cUnit, mir, true /*direct*/,
1511 false /*range*/);
1512 break;
buzbee67bf8852011-08-17 17:51:35 -07001513 case OP_INVOKE_DIRECT_RANGE:
buzbee561227c2011-09-02 15:28:19 -07001514 genInvokeStaticDirect(cUnit, mir, true /*direct*/,
1515 true /*range*/);
buzbee67bf8852011-08-17 17:51:35 -07001516 break;
1517
1518 case OP_INVOKE_VIRTUAL:
1519 case OP_INVOKE_VIRTUAL_RANGE:
1520 genInvokeVirtual(cUnit, mir);
1521 break;
1522
1523 case OP_INVOKE_SUPER:
1524 case OP_INVOKE_SUPER_RANGE:
1525 genInvokeSuper(cUnit, mir);
1526 break;
1527
1528 case OP_INVOKE_INTERFACE:
1529 case OP_INVOKE_INTERFACE_RANGE:
1530 genInvokeInterface(cUnit, mir);
1531 break;
1532
1533 case OP_NEG_INT:
1534 case OP_NOT_INT:
1535 res = genArithOpInt(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1536 break;
1537
1538 case OP_NEG_LONG:
1539 case OP_NOT_LONG:
1540 res = genArithOpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1541 break;
1542
1543 case OP_NEG_FLOAT:
1544 res = genArithOpFloat(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1545 break;
1546
1547 case OP_NEG_DOUBLE:
1548 res = genArithOpDouble(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1549 break;
1550
1551 case OP_INT_TO_LONG:
1552 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1553 if (rlSrc[0].location == kLocPhysReg) {
1554 genRegCopy(cUnit, rlResult.lowReg, rlSrc[0].lowReg);
1555 } else {
1556 loadValueDirect(cUnit, rlSrc[0], rlResult.lowReg);
1557 }
1558 opRegRegImm(cUnit, kOpAsr, rlResult.highReg,
1559 rlResult.lowReg, 31);
1560 storeValueWide(cUnit, rlDest, rlResult);
1561 break;
1562
1563 case OP_LONG_TO_INT:
1564 rlSrc[0] = oatUpdateLocWide(cUnit, rlSrc[0]);
1565 rlSrc[0] = oatWideToNarrow(cUnit, rlSrc[0]);
1566 storeValue(cUnit, rlDest, rlSrc[0]);
1567 break;
1568
1569 case OP_INT_TO_BYTE:
1570 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1571 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1572 opRegReg(cUnit, kOp2Byte, rlResult.lowReg, rlSrc[0].lowReg);
1573 storeValue(cUnit, rlDest, rlResult);
1574 break;
1575
1576 case OP_INT_TO_SHORT:
1577 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1578 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1579 opRegReg(cUnit, kOp2Short, rlResult.lowReg, rlSrc[0].lowReg);
1580 storeValue(cUnit, rlDest, rlResult);
1581 break;
1582
1583 case OP_INT_TO_CHAR:
1584 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1585 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1586 opRegReg(cUnit, kOp2Char, rlResult.lowReg, rlSrc[0].lowReg);
1587 storeValue(cUnit, rlDest, rlResult);
1588 break;
1589
1590 case OP_INT_TO_FLOAT:
1591 case OP_INT_TO_DOUBLE:
1592 case OP_LONG_TO_FLOAT:
1593 case OP_LONG_TO_DOUBLE:
1594 case OP_FLOAT_TO_INT:
1595 case OP_FLOAT_TO_LONG:
1596 case OP_FLOAT_TO_DOUBLE:
1597 case OP_DOUBLE_TO_INT:
1598 case OP_DOUBLE_TO_LONG:
1599 case OP_DOUBLE_TO_FLOAT:
1600 genConversion(cUnit, mir);
1601 break;
1602
1603 case OP_ADD_INT:
1604 case OP_SUB_INT:
1605 case OP_MUL_INT:
1606 case OP_DIV_INT:
1607 case OP_REM_INT:
1608 case OP_AND_INT:
1609 case OP_OR_INT:
1610 case OP_XOR_INT:
1611 case OP_SHL_INT:
1612 case OP_SHR_INT:
1613 case OP_USHR_INT:
1614 case OP_ADD_INT_2ADDR:
1615 case OP_SUB_INT_2ADDR:
1616 case OP_MUL_INT_2ADDR:
1617 case OP_DIV_INT_2ADDR:
1618 case OP_REM_INT_2ADDR:
1619 case OP_AND_INT_2ADDR:
1620 case OP_OR_INT_2ADDR:
1621 case OP_XOR_INT_2ADDR:
1622 case OP_SHL_INT_2ADDR:
1623 case OP_SHR_INT_2ADDR:
1624 case OP_USHR_INT_2ADDR:
1625 genArithOpInt(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1626 break;
1627
1628 case OP_ADD_LONG:
1629 case OP_SUB_LONG:
1630 case OP_MUL_LONG:
1631 case OP_DIV_LONG:
1632 case OP_REM_LONG:
1633 case OP_AND_LONG:
1634 case OP_OR_LONG:
1635 case OP_XOR_LONG:
1636 case OP_ADD_LONG_2ADDR:
1637 case OP_SUB_LONG_2ADDR:
1638 case OP_MUL_LONG_2ADDR:
1639 case OP_DIV_LONG_2ADDR:
1640 case OP_REM_LONG_2ADDR:
1641 case OP_AND_LONG_2ADDR:
1642 case OP_OR_LONG_2ADDR:
1643 case OP_XOR_LONG_2ADDR:
1644 genArithOpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1645 break;
1646
buzbee67bf8852011-08-17 17:51:35 -07001647 case OP_SHL_LONG:
1648 case OP_SHR_LONG:
1649 case OP_USHR_LONG:
buzbeee6d61962011-08-27 11:58:19 -07001650 case OP_SHL_LONG_2ADDR:
1651 case OP_SHR_LONG_2ADDR:
1652 case OP_USHR_LONG_2ADDR:
buzbee67bf8852011-08-17 17:51:35 -07001653 genShiftOpLong(cUnit,mir, rlDest, rlSrc[0], rlSrc[1]);
1654 break;
1655
1656 case OP_ADD_FLOAT:
1657 case OP_SUB_FLOAT:
1658 case OP_MUL_FLOAT:
1659 case OP_DIV_FLOAT:
1660 case OP_REM_FLOAT:
1661 case OP_ADD_FLOAT_2ADDR:
1662 case OP_SUB_FLOAT_2ADDR:
1663 case OP_MUL_FLOAT_2ADDR:
1664 case OP_DIV_FLOAT_2ADDR:
1665 case OP_REM_FLOAT_2ADDR:
1666 genArithOpFloat(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1667 break;
1668
1669 case OP_ADD_DOUBLE:
1670 case OP_SUB_DOUBLE:
1671 case OP_MUL_DOUBLE:
1672 case OP_DIV_DOUBLE:
1673 case OP_REM_DOUBLE:
1674 case OP_ADD_DOUBLE_2ADDR:
1675 case OP_SUB_DOUBLE_2ADDR:
1676 case OP_MUL_DOUBLE_2ADDR:
1677 case OP_DIV_DOUBLE_2ADDR:
1678 case OP_REM_DOUBLE_2ADDR:
1679 genArithOpDouble(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1680 break;
1681
1682 case OP_RSUB_INT:
1683 case OP_ADD_INT_LIT16:
1684 case OP_MUL_INT_LIT16:
1685 case OP_DIV_INT_LIT16:
1686 case OP_REM_INT_LIT16:
1687 case OP_AND_INT_LIT16:
1688 case OP_OR_INT_LIT16:
1689 case OP_XOR_INT_LIT16:
1690 case OP_ADD_INT_LIT8:
1691 case OP_RSUB_INT_LIT8:
1692 case OP_MUL_INT_LIT8:
1693 case OP_DIV_INT_LIT8:
1694 case OP_REM_INT_LIT8:
1695 case OP_AND_INT_LIT8:
1696 case OP_OR_INT_LIT8:
1697 case OP_XOR_INT_LIT8:
1698 case OP_SHL_INT_LIT8:
1699 case OP_SHR_INT_LIT8:
1700 case OP_USHR_INT_LIT8:
1701 genArithOpIntLit(cUnit, mir, rlDest, rlSrc[0], mir->dalvikInsn.vC);
1702 break;
1703
1704 default:
1705 res = true;
1706 }
1707 return res;
1708}
1709
1710static const char *extendedMIROpNames[kMirOpLast - kMirOpFirst] = {
1711 "kMirOpPhi",
1712 "kMirOpNullNRangeUpCheck",
1713 "kMirOpNullNRangeDownCheck",
1714 "kMirOpLowerBound",
1715 "kMirOpPunt",
1716 "kMirOpCheckInlinePrediction",
1717};
1718
1719/* Extended MIR instructions like PHI */
1720static void handleExtendedMethodMIR(CompilationUnit* cUnit, MIR* mir)
1721{
1722 int opOffset = mir->dalvikInsn.opcode - kMirOpFirst;
1723 char* msg = (char*)oatNew(strlen(extendedMIROpNames[opOffset]) + 1, false);
1724 strcpy(msg, extendedMIROpNames[opOffset]);
1725 ArmLIR* op = newLIR1(cUnit, kArmPseudoExtended, (int) msg);
1726
1727 switch ((ExtendedMIROpcode)mir->dalvikInsn.opcode) {
1728 case kMirOpPhi: {
1729 char* ssaString = oatGetSSAString(cUnit, mir->ssaRep);
1730 op->flags.isNop = true;
1731 newLIR1(cUnit, kArmPseudoSSARep, (int) ssaString);
1732 break;
1733 }
1734 default:
1735 break;
1736 }
1737}
1738
1739/* If there are any ins passed in registers that have not been promoted
1740 * to a callee-save register, flush them to the frame.
buzbeedfd3d702011-08-28 12:56:51 -07001741 * Note: at this pointCopy any ins that are passed in register to their
1742 * home location */
buzbee67bf8852011-08-17 17:51:35 -07001743static void flushIns(CompilationUnit* cUnit)
1744{
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001745 if (cUnit->method->NumIns() == 0)
buzbee67bf8852011-08-17 17:51:35 -07001746 return;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001747 int inRegs = (cUnit->method->NumIns() > 2) ? 3
1748 : cUnit->method->NumIns();
buzbee67bf8852011-08-17 17:51:35 -07001749 int startReg = r1;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001750 int startLoc = cUnit->method->NumRegisters() -
1751 cUnit->method->NumIns();
buzbee67bf8852011-08-17 17:51:35 -07001752 for (int i = 0; i < inRegs; i++) {
1753 RegLocation loc = cUnit->regLocation[startLoc + i];
buzbeedfd3d702011-08-28 12:56:51 -07001754 //TUNING: be smarter about flushing ins to frame
1755 storeBaseDisp(cUnit, rSP, loc.spOffset, startReg + i, kWord);
buzbee67bf8852011-08-17 17:51:35 -07001756 if (loc.location == kLocPhysReg) {
1757 genRegCopy(cUnit, loc.lowReg, startReg + i);
buzbee67bf8852011-08-17 17:51:35 -07001758 }
1759 }
1760
1761 // Handle special case of wide argument half in regs, half in frame
1762 if (inRegs == 3) {
1763 RegLocation loc = cUnit->regLocation[startLoc + 2];
1764 if (loc.wide && loc.location == kLocPhysReg) {
1765 // Load the other half of the arg into the promoted pair
buzbee561227c2011-09-02 15:28:19 -07001766 loadWordDisp(cUnit, rSP, loc.spOffset + 4, loc.highReg);
buzbee67bf8852011-08-17 17:51:35 -07001767 inRegs++;
1768 }
1769 }
1770
1771 // Now, do initial assignment of all promoted arguments passed in frame
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001772 for (int i = inRegs; i < cUnit->method->NumIns();) {
buzbee67bf8852011-08-17 17:51:35 -07001773 RegLocation loc = cUnit->regLocation[startLoc + i];
1774 if (loc.fpLocation == kLocPhysReg) {
1775 loc.location = kLocPhysReg;
1776 loc.fp = true;
1777 loc.lowReg = loc.fpLowReg;
1778 loc.highReg = loc.fpHighReg;
1779 }
1780 if (loc.location == kLocPhysReg) {
1781 if (loc.wide) {
1782 loadBaseDispWide(cUnit, NULL, rSP, loc.spOffset,
1783 loc.lowReg, loc.highReg, INVALID_SREG);
1784 i++;
1785 } else {
buzbee561227c2011-09-02 15:28:19 -07001786 loadWordDisp(cUnit, rSP, loc.spOffset, loc.lowReg);
buzbee67bf8852011-08-17 17:51:35 -07001787 }
1788 }
1789 i++;
1790 }
1791}
1792
1793/* Handle the content in each basic block */
1794static bool methodBlockCodeGen(CompilationUnit* cUnit, BasicBlock* bb)
1795{
1796 MIR* mir;
1797 ArmLIR* labelList = (ArmLIR*) cUnit->blockLabelList;
1798 int blockId = bb->id;
1799
1800 cUnit->curBlock = bb;
1801 labelList[blockId].operands[0] = bb->startOffset;
1802
1803 /* Insert the block label */
1804 labelList[blockId].opcode = kArmPseudoNormalBlockLabel;
1805 oatAppendLIR(cUnit, (LIR*) &labelList[blockId]);
1806
1807 oatClobberAllRegs(cUnit);
1808 oatResetNullCheck(cUnit);
1809
1810 ArmLIR* headLIR = NULL;
1811
1812 if (bb->blockType == kEntryBlock) {
1813 /*
1814 * On entry, r0, r1, r2 & r3 are live. Let the register allocation
1815 * mechanism know so it doesn't try to use any of them when
1816 * expanding the frame or flushing. This leaves the utility
1817 * code with a single temp: r12. This should be enough.
1818 */
1819 oatLockTemp(cUnit, r0);
1820 oatLockTemp(cUnit, r1);
1821 oatLockTemp(cUnit, r2);
1822 oatLockTemp(cUnit, r3);
buzbeecefd1872011-09-09 09:59:52 -07001823
1824 /*
1825 * We can safely skip the stack overflow check if we're
1826 * a leaf *and* our frame size < fudge factor.
1827 */
1828 bool skipOverflowCheck = ((cUnit->attrs & METHOD_IS_LEAF) &&
1829 ((size_t)cUnit->frameSize <
1830 art::Thread::kStackOverflowReservedBytes));
buzbee67bf8852011-08-17 17:51:35 -07001831 newLIR0(cUnit, kArmPseudoMethodEntry);
buzbeecefd1872011-09-09 09:59:52 -07001832 if (!skipOverflowCheck) {
1833 /* Load stack limit */
1834 loadWordDisp(cUnit, rSELF,
1835 art::Thread::StackEndOffset().Int32Value(), r12);
1836 }
buzbee67bf8852011-08-17 17:51:35 -07001837 /* Spill core callee saves */
1838 newLIR1(cUnit, kThumb2Push, cUnit->coreSpillMask);
1839 /* Need to spill any FP regs? */
1840 if (cUnit->numFPSpills) {
1841 newLIR1(cUnit, kThumb2VPushCS, cUnit->numFPSpills);
1842 }
buzbeecefd1872011-09-09 09:59:52 -07001843 if (!skipOverflowCheck) {
1844 opRegRegImm(cUnit, kOpSub, rLR, rSP,
1845 cUnit->frameSize - (cUnit->numSpills * 4));
buzbeeec5adf32011-09-11 15:25:43 -07001846 genRegRegCheck(cUnit, kArmCondCc, rLR, r12, NULL,
1847 kArmThrowStackOverflow);
buzbeecefd1872011-09-09 09:59:52 -07001848 genRegCopy(cUnit, rSP, rLR); // Establish stack
1849 } else {
1850 opRegImm(cUnit, kOpSub, rSP,
1851 cUnit->frameSize - (cUnit->numSpills * 4));
1852 }
buzbee67bf8852011-08-17 17:51:35 -07001853 storeBaseDisp(cUnit, rSP, 0, r0, kWord);
1854 flushIns(cUnit);
1855 oatFreeTemp(cUnit, r0);
1856 oatFreeTemp(cUnit, r1);
1857 oatFreeTemp(cUnit, r2);
1858 oatFreeTemp(cUnit, r3);
1859 } else if (bb->blockType == kExitBlock) {
1860 newLIR0(cUnit, kArmPseudoMethodExit);
1861 opRegImm(cUnit, kOpAdd, rSP, cUnit->frameSize - (cUnit->numSpills * 4));
1862 /* Need to restore any FP callee saves? */
1863 if (cUnit->numFPSpills) {
1864 newLIR1(cUnit, kThumb2VPopCS, cUnit->numFPSpills);
1865 }
1866 if (cUnit->coreSpillMask & (1 << rLR)) {
1867 /* Unspill rLR to rPC */
1868 cUnit->coreSpillMask &= ~(1 << rLR);
1869 cUnit->coreSpillMask |= (1 << rPC);
1870 }
1871 newLIR1(cUnit, kThumb2Pop, cUnit->coreSpillMask);
1872 if (!(cUnit->coreSpillMask & (1 << rPC))) {
1873 /* We didn't pop to rPC, so must do a bv rLR */
1874 newLIR1(cUnit, kThumbBx, rLR);
1875 }
1876 }
1877
1878 for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
1879
1880 oatResetRegPool(cUnit);
1881 if (cUnit->disableOpt & (1 << kTrackLiveTemps)) {
1882 oatClobberAllRegs(cUnit);
1883 }
1884
1885 if (cUnit->disableOpt & (1 << kSuppressLoads)) {
1886 oatResetDefTracking(cUnit);
1887 }
1888
1889 if ((int)mir->dalvikInsn.opcode >= (int)kMirOpFirst) {
1890 handleExtendedMethodMIR(cUnit, mir);
1891 continue;
1892 }
1893
1894 cUnit->currentDalvikOffset = mir->offset;
1895
1896 Opcode dalvikOpcode = mir->dalvikInsn.opcode;
1897 InstructionFormat dalvikFormat =
1898 dexGetFormatFromOpcode(dalvikOpcode);
1899
1900 ArmLIR* boundaryLIR;
1901
1902 /* Mark the beginning of a Dalvik instruction for line tracking */
1903 boundaryLIR = newLIR1(cUnit, kArmPseudoDalvikByteCodeBoundary,
1904 (int) oatGetDalvikDisassembly(
1905 &mir->dalvikInsn, ""));
1906 /* Remember the first LIR for this block */
1907 if (headLIR == NULL) {
1908 headLIR = boundaryLIR;
1909 /* Set the first boundaryLIR as a scheduling barrier */
1910 headLIR->defMask = ENCODE_ALL;
1911 }
1912
1913 /* Don't generate the SSA annotation unless verbose mode is on */
1914 if (cUnit->printMe && mir->ssaRep) {
1915 char *ssaString = oatGetSSAString(cUnit, mir->ssaRep);
1916 newLIR1(cUnit, kArmPseudoSSARep, (int) ssaString);
1917 }
1918
1919 bool notHandled = compileDalvikInstruction(cUnit, mir, bb, labelList);
1920
1921 if (notHandled) {
1922 char buf[100];
1923 snprintf(buf, 100, "%#06x: Opcode %#x (%s) / Fmt %d not handled",
1924 mir->offset,
1925 dalvikOpcode, dexGetOpcodeName(dalvikOpcode),
1926 dalvikFormat);
1927 LOG(FATAL) << buf;
1928 }
1929 }
1930
1931 if (headLIR) {
1932 /*
1933 * Eliminate redundant loads/stores and delay stores into later
1934 * slots
1935 */
1936 oatApplyLocalOptimizations(cUnit, (LIR*) headLIR,
1937 cUnit->lastLIRInsn);
1938
1939 /*
1940 * Generate an unconditional branch to the fallthrough block.
1941 */
1942 if (bb->fallThrough) {
1943 genUnconditionalBranch(cUnit,
1944 &labelList[bb->fallThrough->id]);
1945 }
1946 }
1947 return false;
1948}
1949
1950/*
1951 * Nop any unconditional branches that go to the next instruction.
1952 * Note: new redundant branches may be inserted later, and we'll
1953 * use a check in final instruction assembly to nop those out.
1954 */
1955void removeRedundantBranches(CompilationUnit* cUnit)
1956{
1957 ArmLIR* thisLIR;
1958
1959 for (thisLIR = (ArmLIR*) cUnit->firstLIRInsn;
1960 thisLIR != (ArmLIR*) cUnit->lastLIRInsn;
1961 thisLIR = NEXT_LIR(thisLIR)) {
1962
1963 /* Branch to the next instruction */
1964 if ((thisLIR->opcode == kThumbBUncond) ||
1965 (thisLIR->opcode == kThumb2BUncond)) {
1966 ArmLIR* nextLIR = thisLIR;
1967
1968 while (true) {
1969 nextLIR = NEXT_LIR(nextLIR);
1970
1971 /*
1972 * Is the branch target the next instruction?
1973 */
1974 if (nextLIR == (ArmLIR*) thisLIR->generic.target) {
1975 thisLIR->flags.isNop = true;
1976 break;
1977 }
1978
1979 /*
1980 * Found real useful stuff between the branch and the target.
1981 * Need to explicitly check the lastLIRInsn here because it
1982 * might be the last real instruction.
1983 */
1984 if (!isPseudoOpcode(nextLIR->opcode) ||
1985 (nextLIR = (ArmLIR*) cUnit->lastLIRInsn))
1986 break;
1987 }
1988 }
1989 }
1990}
1991
buzbee5ade1d22011-09-09 14:44:52 -07001992static void handleThrowLaunchpads(CompilationUnit *cUnit)
1993{
1994 ArmLIR** throwLabel =
1995 (ArmLIR **) cUnit->throwLaunchpads.elemList;
1996 int numElems = cUnit->throwLaunchpads.numUsed;
1997 int i;
1998
1999 for (i = 0; i < numElems; i++) {
2000 ArmLIR* lab = throwLabel[i];
2001 cUnit->currentDalvikOffset = lab->operands[1];
2002 oatAppendLIR(cUnit, (LIR *)lab);
2003 int funcOffset = 0;
2004 int v1 = lab->operands[2];
2005 int v2 = lab->operands[3];
2006 switch(lab->operands[0]) {
2007 case kArmThrowNullPointer:
2008 funcOffset = OFFSETOF_MEMBER(Thread, pThrowNullPointerFromCode);
2009 break;
2010 case kArmThrowArrayBounds:
2011 if (v2 != r0) {
2012 genRegCopy(cUnit, r0, v1);
2013 genRegCopy(cUnit, r1, v2);
2014 } else {
2015 if (v1 == r1) {
2016 genRegCopy(cUnit, r12, v1);
2017 genRegCopy(cUnit, r1, v2);
2018 genRegCopy(cUnit, r0, r12);
2019 } else {
2020 genRegCopy(cUnit, r1, v2);
2021 genRegCopy(cUnit, r0, v1);
2022 }
2023 }
2024 funcOffset = OFFSETOF_MEMBER(Thread, pThrowArrayBoundsFromCode);
2025 break;
2026 case kArmThrowDivZero:
2027 funcOffset = OFFSETOF_MEMBER(Thread, pThrowDivZeroFromCode);
2028 break;
2029 case kArmThrowVerificationError:
2030 loadConstant(cUnit, r0, v1);
2031 loadConstant(cUnit, r1, v2);
2032 funcOffset =
2033 OFFSETOF_MEMBER(Thread, pThrowVerificationErrorFromCode);
2034 break;
2035 case kArmThrowNegArraySize:
2036 genRegCopy(cUnit, r0, v1);
2037 funcOffset =
2038 OFFSETOF_MEMBER(Thread, pThrowNegArraySizeFromCode);
2039 break;
2040 case kArmThrowInternalError:
2041 genRegCopy(cUnit, r0, v1);
2042 funcOffset =
2043 OFFSETOF_MEMBER(Thread, pThrowInternalErrorFromCode);
2044 break;
2045 case kArmThrowRuntimeException:
2046 genRegCopy(cUnit, r0, v1);
2047 funcOffset =
2048 OFFSETOF_MEMBER(Thread, pThrowRuntimeExceptionFromCode);
2049 break;
2050 case kArmThrowNoSuchMethod:
2051 genRegCopy(cUnit, r0, v1);
2052 funcOffset =
2053 OFFSETOF_MEMBER(Thread, pThrowNoSuchMethodFromCode);
2054 break;
buzbeeec5adf32011-09-11 15:25:43 -07002055 case kArmThrowStackOverflow:
2056 funcOffset =
2057 OFFSETOF_MEMBER(Thread, pStackOverflowFromCode);
2058 // Restore stack alignment
2059 opRegImm(cUnit, kOpAdd, rSP, cUnit->numSpills * 4);
2060 break;
buzbee5ade1d22011-09-09 14:44:52 -07002061 default:
2062 LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0];
2063 }
2064 loadWordDisp(cUnit, rSELF, funcOffset, rLR);
buzbeeec5adf32011-09-11 15:25:43 -07002065 callUnwindableHelper(cUnit, rLR);
buzbee5ade1d22011-09-09 14:44:52 -07002066 }
2067}
2068
buzbee67bf8852011-08-17 17:51:35 -07002069void oatMethodMIR2LIR(CompilationUnit* cUnit)
2070{
2071 /* Used to hold the labels of each block */
2072 cUnit->blockLabelList =
2073 (void *) oatNew(sizeof(ArmLIR) * cUnit->numBlocks, true);
2074
2075 oatDataFlowAnalysisDispatcher(cUnit, methodBlockCodeGen,
2076 kPreOrderDFSTraversal, false /* Iterative */);
2077 removeRedundantBranches(cUnit);
buzbee5ade1d22011-09-09 14:44:52 -07002078
2079 handleThrowLaunchpads(cUnit);
buzbee67bf8852011-08-17 17:51:35 -07002080}
2081
2082/* Common initialization routine for an architecture family */
2083bool oatArchInit()
2084{
2085 int i;
2086
2087 for (i = 0; i < kArmLast; i++) {
2088 if (EncodingMap[i].opcode != i) {
2089 LOG(FATAL) << "Encoding order for " << EncodingMap[i].name <<
2090 " is wrong: expecting " << i << ", seeing " <<
2091 (int)EncodingMap[i].opcode;
2092 }
2093 }
2094
2095 return oatArchVariantInit();
2096}
2097
2098/* Needed by the Assembler */
2099void oatSetupResourceMasks(ArmLIR* lir)
2100{
2101 setupResourceMasks(lir);
2102}
2103
2104/* Needed by the ld/st optmizatons */
2105ArmLIR* oatRegCopyNoInsert(CompilationUnit* cUnit, int rDest, int rSrc)
2106{
2107 return genRegCopyNoInsert(cUnit, rDest, rSrc);
2108}
2109
2110/* Needed by the register allocator */
2111ArmLIR* oatRegCopy(CompilationUnit* cUnit, int rDest, int rSrc)
2112{
2113 return genRegCopy(cUnit, rDest, rSrc);
2114}
2115
2116/* Needed by the register allocator */
2117void oatRegCopyWide(CompilationUnit* cUnit, int destLo, int destHi,
2118 int srcLo, int srcHi)
2119{
2120 genRegCopyWide(cUnit, destLo, destHi, srcLo, srcHi);
2121}
2122
2123void oatFlushRegImpl(CompilationUnit* cUnit, int rBase,
2124 int displacement, int rSrc, OpSize size)
2125{
2126 storeBaseDisp(cUnit, rBase, displacement, rSrc, size);
2127}
2128
2129void oatFlushRegWideImpl(CompilationUnit* cUnit, int rBase,
2130 int displacement, int rSrcLo, int rSrcHi)
2131{
2132 storeBaseDispWide(cUnit, rBase, displacement, rSrcLo, rSrcHi);
2133}