blob: c2d0d1880dc2cea3fd199e520fb6d208bf3675a3 [file] [log] [blame]
buzbee31a4a6f2012-02-28 15:36:15 -08001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17namespace art {
18
19/*
20 * This source files contains "gen" codegen routines that should
21 * be applicable to most targets. Only mid-level support utilities
22 * and "op" calls may be used here.
23 */
24
25
26/*
27 * x86 targets will likely be different enough to need their own
28 * invoke gen routies.
29 */
30#if defined(TARGET_ARM) || defined (TARGET_MIPS)
31typedef int (*NextCallInsn)(CompilationUnit*, MIR*, int, uint32_t dexIdx,
32 uint32_t methodIdx);
33/*
34 * If there are any ins passed in registers that have not been promoted
35 * to a callee-save register, flush them to the frame. Perform intial
36 * assignment of promoted arguments.
37 */
38void flushIns(CompilationUnit* cUnit)
39{
40 if (cUnit->numIns == 0)
41 return;
42 int firstArgReg = rARG1;
43 int lastArgReg = rARG3;
44 int startVReg = cUnit->numDalvikRegisters - cUnit->numIns;
45 /*
buzbee86a4bce2012-03-06 18:15:00 -080046 * Copy incoming arguments to their proper home locations.
47 * NOTE: an older version of dx had an issue in which
48 * it would reuse static method argument registers.
buzbee31a4a6f2012-02-28 15:36:15 -080049 * This could result in the same Dalvik virtual register
buzbee86a4bce2012-03-06 18:15:00 -080050 * being promoted to both core and fp regs. To account for this,
51 * we only copy to the corresponding promoted physical register
52 * if it matches the type of the SSA name for the incoming
53 * argument. It is also possible that long and double arguments
54 * end up half-promoted. In those cases, we must flush the promoted
55 * half to memory as well.
buzbee31a4a6f2012-02-28 15:36:15 -080056 */
57 for (int i = 0; i < cUnit->numIns; i++) {
buzbee86a4bce2012-03-06 18:15:00 -080058 PromotionMap* vMap = &cUnit->promotionMap[startVReg + i];
buzbee31a4a6f2012-02-28 15:36:15 -080059 if (i <= (lastArgReg - firstArgReg)) {
60 // If arriving in register
buzbee86a4bce2012-03-06 18:15:00 -080061 bool needFlush = true;
62 RegLocation* tLoc = &cUnit->regLocation[startVReg + i];
63 if ((vMap->coreLocation == kLocPhysReg) && !tLoc->fp) {
64 opRegCopy(cUnit, vMap->coreReg, firstArgReg + i);
65 needFlush = false;
66 } else if ((vMap->fpLocation == kLocPhysReg) && tLoc->fp) {
67 opRegCopy(cUnit, vMap->fpReg, firstArgReg + i);
68 needFlush = false;
69 } else {
70 needFlush = true;
buzbee31a4a6f2012-02-28 15:36:15 -080071 }
buzbee86a4bce2012-03-06 18:15:00 -080072
73 // For wide args, force flush if only half is promoted
74 if (tLoc->wide) {
75 PromotionMap* pMap = vMap + (tLoc->highWord ? -1 : +1);
76 needFlush |= (pMap->coreLocation != vMap->coreLocation) ||
77 (pMap->fpLocation != vMap->fpLocation);
buzbee31a4a6f2012-02-28 15:36:15 -080078 }
buzbee86a4bce2012-03-06 18:15:00 -080079 if (needFlush) {
80 storeBaseDisp(cUnit, rSP, oatSRegOffset(cUnit, startVReg + i),
81 firstArgReg + i, kWord);
82 }
buzbee31a4a6f2012-02-28 15:36:15 -080083 } else {
84 // If arriving in frame & promoted
buzbee86a4bce2012-03-06 18:15:00 -080085 if (vMap->coreLocation == kLocPhysReg) {
buzbee31a4a6f2012-02-28 15:36:15 -080086 loadWordDisp(cUnit, rSP, oatSRegOffset(cUnit, startVReg + i),
buzbee86a4bce2012-03-06 18:15:00 -080087 vMap->coreReg);
buzbee31a4a6f2012-02-28 15:36:15 -080088 }
buzbee86a4bce2012-03-06 18:15:00 -080089 if (vMap->fpLocation == kLocPhysReg) {
buzbee31a4a6f2012-02-28 15:36:15 -080090 loadWordDisp(cUnit, rSP, oatSRegOffset(cUnit, startVReg + i),
buzbee86a4bce2012-03-06 18:15:00 -080091 vMap->fpReg);
buzbee31a4a6f2012-02-28 15:36:15 -080092 }
93 }
94 }
95}
96
97/*
98 * Bit of a hack here - in leiu of a real scheduling pass,
99 * emit the next instruction in static & direct invoke sequences.
100 */
101int nextSDCallInsn(CompilationUnit* cUnit, MIR* mir,
102 int state, uint32_t dexIdx, uint32_t unused)
103{
104 switch(state) {
105 case 0: // Get the current Method* [sets rARG0]
106 loadCurrMethodDirect(cUnit, rARG0);
107 break;
108 case 1: // Get method->dex_cache_resolved_methods_
109 loadWordDisp(cUnit, rARG0,
110 Method::DexCacheResolvedMethodsOffset().Int32Value(),
111 rARG0);
112 break;
113 case 2: // Grab target method*
114 loadWordDisp(cUnit, rARG0,
115 Array::DataOffset(sizeof(Object*)).Int32Value() + dexIdx * 4,
116 rARG0);
117 break;
118 case 3: // Grab the code from the method*
119 loadWordDisp(cUnit, rARG0, Method::GetCodeOffset().Int32Value(),
buzbee0398c422012-03-02 15:22:47 -0800120 rINVOKE_TGT);
buzbee31a4a6f2012-02-28 15:36:15 -0800121 break;
122 default:
123 return -1;
124 }
125 return state + 1;
126}
127
128/*
129 * Bit of a hack here - in leiu of a real scheduling pass,
130 * emit the next instruction in a virtual invoke sequence.
131 * We can use rLR as a temp prior to target address loading
132 * Note also that we'll load the first argument ("this") into
133 * rARG1 here rather than the standard loadArgRegs.
134 */
135int nextVCallInsn(CompilationUnit* cUnit, MIR* mir,
136 int state, uint32_t dexIdx, uint32_t methodIdx)
137{
138 RegLocation rlArg;
139 /*
140 * This is the fast path in which the target virtual method is
141 * fully resolved at compile time.
142 */
143 switch(state) {
144 case 0: // Get "this" [set rARG1]
145 rlArg = oatGetSrc(cUnit, mir, 0);
146 loadValueDirectFixed(cUnit, rlArg, rARG1);
147 break;
148 case 1: // Is "this" null? [use rARG1]
149 genNullCheck(cUnit, oatSSASrc(mir,0), rARG1, mir);
buzbee0398c422012-03-02 15:22:47 -0800150 // get this->klass_ [use rARG1, set rINVOKE_TGT]
buzbee31a4a6f2012-02-28 15:36:15 -0800151 loadWordDisp(cUnit, rARG1, Object::ClassOffset().Int32Value(),
buzbee0398c422012-03-02 15:22:47 -0800152 rINVOKE_TGT);
buzbee31a4a6f2012-02-28 15:36:15 -0800153 break;
buzbee0398c422012-03-02 15:22:47 -0800154 case 2: // Get this->klass_->vtable [usr rINVOKE_TGT, set rINVOKE_TGT]
155 loadWordDisp(cUnit, rINVOKE_TGT, Class::VTableOffset().Int32Value(),
156 rINVOKE_TGT);
buzbee31a4a6f2012-02-28 15:36:15 -0800157 break;
buzbee0398c422012-03-02 15:22:47 -0800158 case 3: // Get target method [use rINVOKE_TGT, set rARG0]
159 loadWordDisp(cUnit, rINVOKE_TGT, (methodIdx * 4) +
buzbee31a4a6f2012-02-28 15:36:15 -0800160 Array::DataOffset(sizeof(Object*)).Int32Value(),
161 rARG0);
162 break;
buzbee0398c422012-03-02 15:22:47 -0800163 case 4: // Get the compiled code address [uses rARG0, sets rINVOKE_TGT]
buzbee31a4a6f2012-02-28 15:36:15 -0800164 loadWordDisp(cUnit, rARG0, Method::GetCodeOffset().Int32Value(),
buzbee0398c422012-03-02 15:22:47 -0800165 rINVOKE_TGT);
buzbee31a4a6f2012-02-28 15:36:15 -0800166 break;
167 default:
168 return -1;
169 }
170 return state + 1;
171}
172
173/*
174 * Interleave launch code for INVOKE_SUPER. See comments
175 * for nextVCallIns.
176 */
177int nextSuperCallInsn(CompilationUnit* cUnit, MIR* mir,
178 int state, uint32_t dexIdx, uint32_t methodIdx)
179{
180 /*
181 * This is the fast path in which the target virtual method is
182 * fully resolved at compile time. Note also that this path assumes
183 * that the check to verify that the target method index falls
184 * within the size of the super's vtable has been done at compile-time.
185 */
186 RegLocation rlArg;
187 switch(state) {
188 case 0: // Get current Method* [set rARG0]
189 loadCurrMethodDirect(cUnit, rARG0);
190 // Load "this" [set rARG1]
191 rlArg = oatGetSrc(cUnit, mir, 0);
192 loadValueDirectFixed(cUnit, rlArg, rARG1);
buzbee0398c422012-03-02 15:22:47 -0800193 // Get method->declaring_class_ [use rARG0, set rINVOKE_TGT]
buzbee31a4a6f2012-02-28 15:36:15 -0800194 loadWordDisp(cUnit, rARG0,
195 Method::DeclaringClassOffset().Int32Value(),
buzbee0398c422012-03-02 15:22:47 -0800196 rINVOKE_TGT);
buzbee31a4a6f2012-02-28 15:36:15 -0800197 // Is "this" null? [use rARG1]
198 genNullCheck(cUnit, oatSSASrc(mir,0), rARG1, mir);
199 break;
buzbee0398c422012-03-02 15:22:47 -0800200 case 1: // method->declaring_class_->super_class [use/set rINVOKE_TGT]
201 loadWordDisp(cUnit, rINVOKE_TGT,
202 Class::SuperClassOffset().Int32Value(), rINVOKE_TGT);
buzbee31a4a6f2012-02-28 15:36:15 -0800203 break;
buzbee0398c422012-03-02 15:22:47 -0800204 case 2: // Get ...->super_class_->vtable [u/s rINVOKE_TGT]
205 loadWordDisp(cUnit, rINVOKE_TGT,
206 Class::VTableOffset().Int32Value(), rINVOKE_TGT);
buzbee31a4a6f2012-02-28 15:36:15 -0800207 break;
buzbee0398c422012-03-02 15:22:47 -0800208 case 3: // Get target method [use rINVOKE_TGT, set rARG0]
209 loadWordDisp(cUnit, rINVOKE_TGT, (methodIdx * 4) +
buzbee31a4a6f2012-02-28 15:36:15 -0800210 Array::DataOffset(sizeof(Object*)).Int32Value(),
211 rARG0);
212 break;
buzbee0398c422012-03-02 15:22:47 -0800213 case 4: // target compiled code address [uses rARG0, sets rINVOKE_TGT]
buzbee31a4a6f2012-02-28 15:36:15 -0800214 loadWordDisp(cUnit, rARG0, Method::GetCodeOffset().Int32Value(),
buzbee0398c422012-03-02 15:22:47 -0800215 rINVOKE_TGT);
buzbee31a4a6f2012-02-28 15:36:15 -0800216 break;
217 default:
218 return -1;
219 }
220 return state + 1;
221}
222
223int nextInvokeInsnSP(CompilationUnit* cUnit, MIR* mir, int trampoline,
224 int state, uint32_t dexIdx, uint32_t methodIdx)
225{
226 /*
227 * This handles the case in which the base method is not fully
228 * resolved at compile time, we bail to a runtime helper.
229 */
230 if (state == 0) {
231 // Load trampoline target
buzbee0398c422012-03-02 15:22:47 -0800232 loadWordDisp(cUnit, rSELF, trampoline, rINVOKE_TGT);
buzbee31a4a6f2012-02-28 15:36:15 -0800233 // Load rARG0 with method index
234 loadConstant(cUnit, rARG0, dexIdx);
235 return 1;
236 }
237 return -1;
238}
239
240int nextStaticCallInsnSP(CompilationUnit* cUnit, MIR* mir,
241 int state, uint32_t dexIdx, uint32_t methodIdx)
242{
243 int trampoline = OFFSETOF_MEMBER(Thread, pInvokeStaticTrampolineWithAccessCheck);
244 return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
245}
246
247int nextDirectCallInsnSP(CompilationUnit* cUnit, MIR* mir, int state,
248 uint32_t dexIdx, uint32_t methodIdx)
249{
250 int trampoline = OFFSETOF_MEMBER(Thread, pInvokeDirectTrampolineWithAccessCheck);
251 return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
252}
253
254int nextSuperCallInsnSP(CompilationUnit* cUnit, MIR* mir, int state,
255 uint32_t dexIdx, uint32_t methodIdx)
256{
257 int trampoline = OFFSETOF_MEMBER(Thread, pInvokeSuperTrampolineWithAccessCheck);
258 return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
259}
260
261int nextVCallInsnSP(CompilationUnit* cUnit, MIR* mir, int state,
262 uint32_t dexIdx, uint32_t methodIdx)
263{
264 int trampoline = OFFSETOF_MEMBER(Thread, pInvokeVirtualTrampolineWithAccessCheck);
265 return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
266}
267
268/*
269 * All invoke-interface calls bounce off of art_invoke_interface_trampoline,
270 * which will locate the target and continue on via a tail call.
271 */
272int nextInterfaceCallInsn(CompilationUnit* cUnit, MIR* mir, int state,
273 uint32_t dexIdx, uint32_t unused)
274{
275 int trampoline = OFFSETOF_MEMBER(Thread, pInvokeInterfaceTrampoline);
276 return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
277}
278
279int nextInterfaceCallInsnWithAccessCheck(CompilationUnit* cUnit, MIR* mir,
280 int state, uint32_t dexIdx,
281 uint32_t unused)
282{
283 int trampoline = OFFSETOF_MEMBER(Thread, pInvokeInterfaceTrampolineWithAccessCheck);
284 return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
285}
286
287int loadArgRegs(CompilationUnit* cUnit, MIR* mir, DecodedInstruction* dInsn,
288 int callState, NextCallInsn nextCallInsn, uint32_t dexIdx,
289 uint32_t methodIdx, bool skipThis)
290{
291 int nextReg = rARG1;
292 int nextArg = 0;
293 if (skipThis) {
294 nextReg++;
295 nextArg++;
296 }
297 for (; (nextReg <= rARG3) && (nextArg < mir->ssaRep->numUses); nextReg++) {
298 RegLocation rlArg = oatGetRawSrc(cUnit, mir, nextArg++);
299 rlArg = oatUpdateRawLoc(cUnit, rlArg);
300 if (rlArg.wide && (nextReg <= rARG2)) {
301 loadValueDirectWideFixed(cUnit, rlArg, nextReg, nextReg + 1);
302 nextReg++;
303 nextArg++;
304 } else {
305 rlArg.wide = false;
306 loadValueDirectFixed(cUnit, rlArg, nextReg);
307 }
308 callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
309 }
310 return callState;
311}
312
313/*
314 * Load up to 5 arguments, the first three of which will be in
315 * rARG1 .. rARG3. On entry rARG0 contains the current method pointer,
316 * and as part of the load sequence, it must be replaced with
317 * the target method pointer. Note, this may also be called
318 * for "range" variants if the number of arguments is 5 or fewer.
319 */
320int genDalvikArgsNoRange(CompilationUnit* cUnit, MIR* mir,
321 DecodedInstruction* dInsn, int callState,
322 LIR** pcrLabel, NextCallInsn nextCallInsn,
323 uint32_t dexIdx, uint32_t methodIdx, bool skipThis)
324{
325 RegLocation rlArg;
326
327 /* If no arguments, just return */
328 if (dInsn->vA == 0)
329 return callState;
330
331 callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
332
333 DCHECK_LE(dInsn->vA, 5U);
334 if (dInsn->vA > 3) {
335 uint32_t nextUse = 3;
336 //Detect special case of wide arg spanning arg3/arg4
337 RegLocation rlUse0 = oatGetRawSrc(cUnit, mir, 0);
338 RegLocation rlUse1 = oatGetRawSrc(cUnit, mir, 1);
339 RegLocation rlUse2 = oatGetRawSrc(cUnit, mir, 2);
340 if (((!rlUse0.wide && !rlUse1.wide) || rlUse0.wide) &&
341 rlUse2.wide) {
342 int reg;
343 // Wide spans, we need the 2nd half of uses[2].
344 rlArg = oatUpdateLocWide(cUnit, rlUse2);
345 if (rlArg.location == kLocPhysReg) {
346 reg = rlArg.highReg;
347 } else {
348 // rARG2 & rARG3 can safely be used here
349 reg = rARG3;
350 loadWordDisp(cUnit, rSP,
351 oatSRegOffset(cUnit, rlArg.sRegLow) + 4, reg);
352 callState = nextCallInsn(cUnit, mir, callState, dexIdx,
353 methodIdx);
354 }
355 storeBaseDisp(cUnit, rSP, (nextUse + 1) * 4, reg, kWord);
356 storeBaseDisp(cUnit, rSP, 16 /* (3+1)*4 */, reg, kWord);
357 callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
358 nextUse++;
359 }
360 // Loop through the rest
361 while (nextUse < dInsn->vA) {
362 int lowReg;
363 int highReg;
364 rlArg = oatGetRawSrc(cUnit, mir, nextUse);
365 rlArg = oatUpdateRawLoc(cUnit, rlArg);
366 if (rlArg.location == kLocPhysReg) {
367 lowReg = rlArg.lowReg;
368 highReg = rlArg.highReg;
369 } else {
370 lowReg = rARG2;
371 highReg = rARG3;
372 if (rlArg.wide) {
373 loadValueDirectWideFixed(cUnit, rlArg, lowReg, highReg);
374 } else {
375 loadValueDirectFixed(cUnit, rlArg, lowReg);
376 }
377 callState = nextCallInsn(cUnit, mir, callState, dexIdx,
378 methodIdx);
379 }
380 int outsOffset = (nextUse + 1) * 4;
381 if (rlArg.wide) {
382 storeBaseDispWide(cUnit, rSP, outsOffset, lowReg, highReg);
383 nextUse += 2;
384 } else {
385 storeWordDisp(cUnit, rSP, outsOffset, lowReg);
386 nextUse++;
387 }
388 callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
389 }
390 }
391
392 callState = loadArgRegs(cUnit, mir, dInsn, callState, nextCallInsn,
393 dexIdx, methodIdx, skipThis);
394
395 if (pcrLabel) {
396 *pcrLabel = genNullCheck(cUnit, oatSSASrc(mir,0), rARG1, mir);
397 }
398 return callState;
399}
400
401/*
402 * May have 0+ arguments (also used for jumbo). Note that
403 * source virtual registers may be in physical registers, so may
404 * need to be flushed to home location before copying. This
405 * applies to arg3 and above (see below).
406 *
407 * Two general strategies:
408 * If < 20 arguments
409 * Pass args 3-18 using vldm/vstm block copy
410 * Pass arg0, arg1 & arg2 in rARG1-rARG3
411 * If 20+ arguments
412 * Pass args arg19+ using memcpy block copy
413 * Pass arg0, arg1 & arg2 in rARG1-rARG3
414 *
415 */
416int genDalvikArgsRange(CompilationUnit* cUnit, MIR* mir,
417 DecodedInstruction* dInsn, int callState,
418 LIR** pcrLabel, NextCallInsn nextCallInsn,
419 uint32_t dexIdx, uint32_t methodIdx, bool skipThis)
420{
421 int firstArg = dInsn->vC;
422 int numArgs = dInsn->vA;
423
424 // If we can treat it as non-range (Jumbo ops will use range form)
425 if (numArgs <= 5)
426 return genDalvikArgsNoRange(cUnit, mir, dInsn, callState, pcrLabel,
427 nextCallInsn, dexIdx, methodIdx,
428 skipThis);
429 /*
430 * Make sure range list doesn't span the break between in normal
431 * Dalvik vRegs and the ins.
432 */
433 int highestArg = oatGetSrc(cUnit, mir, numArgs-1).sRegLow;
434 int boundaryReg = cUnit->numDalvikRegisters - cUnit->numIns;
435 if ((firstArg < boundaryReg) && (highestArg >= boundaryReg)) {
436 LOG(FATAL) << "Argument list spanned locals & args";
437 }
438
439 /*
440 * First load the non-register arguments. Both forms expect all
441 * of the source arguments to be in their home frame location, so
442 * scan the sReg names and flush any that have been promoted to
443 * frame backing storage.
444 */
445 // Scan the rest of the args - if in physReg flush to memory
446 for (int nextArg = 0; nextArg < numArgs;) {
447 RegLocation loc = oatGetRawSrc(cUnit, mir, nextArg);
448 if (loc.wide) {
449 loc = oatUpdateLocWide(cUnit, loc);
450 if ((nextArg >= 2) && (loc.location == kLocPhysReg)) {
451 storeBaseDispWide(cUnit, rSP,
452 oatSRegOffset(cUnit, loc.sRegLow),
453 loc.lowReg, loc.highReg);
454 }
455 nextArg += 2;
456 } else {
457 loc = oatUpdateLoc(cUnit, loc);
458 if ((nextArg >= 3) && (loc.location == kLocPhysReg)) {
459 storeBaseDisp(cUnit, rSP, oatSRegOffset(cUnit, loc.sRegLow),
460 loc.lowReg, kWord);
461 }
462 nextArg++;
463 }
464 }
465
466 int startOffset = oatSRegOffset(cUnit,
467 cUnit->regLocation[mir->ssaRep->uses[3]].sRegLow);
468 int outsOffset = 4 /* Method* */ + (3 * 4);
469#if defined(TARGET_MIPS)
470 // Generate memcpy
471 opRegRegImm(cUnit, kOpAdd, rARG0, rSP, outsOffset);
472 opRegRegImm(cUnit, kOpAdd, rARG1, rSP, startOffset);
473 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread, pMemcpy));
474 loadConstant(cUnit, rARG2, (numArgs - 3) * 4);
475 callRuntimeHelper(cUnit, rTgt);
476 // Restore Method*
477 loadCurrMethodDirect(cUnit, rARG0);
478#else
479 if (numArgs >= 20) {
480 // Generate memcpy
481 opRegRegImm(cUnit, kOpAdd, rARG0, rSP, outsOffset);
482 opRegRegImm(cUnit, kOpAdd, rARG1, rSP, startOffset);
483 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread, pMemcpy));
484 loadConstant(cUnit, rARG2, (numArgs - 3) * 4);
485 callRuntimeHelper(cUnit, rTgt);
486 // Restore Method*
487 loadCurrMethodDirect(cUnit, rARG0);
488 } else {
489 // Use vldm/vstm pair using rARG3 as a temp
490 int regsLeft = std::min(numArgs - 3, 16);
491 callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
492 opRegRegImm(cUnit, kOpAdd, rARG3, rSP, startOffset);
493 LIR* ld = newLIR3(cUnit, kThumb2Vldms, rARG3, fr0, regsLeft);
494 //TUNING: loosen barrier
495 ld->defMask = ENCODE_ALL;
496 setMemRefType(ld, true /* isLoad */, kDalvikReg);
497 callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
498 opRegRegImm(cUnit, kOpAdd, rARG3, rSP, 4 /* Method* */ + (3 * 4));
499 callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
500 LIR* st = newLIR3(cUnit, kThumb2Vstms, rARG3, fr0, regsLeft);
501 setMemRefType(st, false /* isLoad */, kDalvikReg);
502 st->defMask = ENCODE_ALL;
503 callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
504 }
505#endif
506
507 callState = loadArgRegs(cUnit, mir, dInsn, callState, nextCallInsn,
508 dexIdx, methodIdx, skipThis);
509
510 callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
511 if (pcrLabel) {
512 *pcrLabel = genNullCheck(cUnit, oatSSASrc(mir,0), rARG1, mir);
513 }
514 return callState;
515}
516
517#endif // TARGET_ARM || TARGET_MIPS
518
519
520} // namespace art