blob: 1aba82c746f8c86c850aff29cbc7a7fd38dc843e [file] [log] [blame]
buzbee67bf8852011-08-17 17:51:35 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/*
18 * This file contains codegen and support common to all supported
19 * ARM variants. It is included by:
20 *
21 * Codegen-$(TARGET_ARCH_VARIANT).c
22 *
23 * which combines this common code with specific support found in the
24 * applicable directory below this one.
25 */
26
27/* Track exercised opcodes */
28static int opcodeCoverage[kNumPackedOpcodes];
29
30static void setMemRefType(ArmLIR* lir, bool isLoad, int memType)
31{
32 u8 *maskPtr;
33 u8 mask = ENCODE_MEM;;
34 assert(EncodingMap[lir->opcode].flags & (IS_LOAD | IS_STORE));
35 if (isLoad) {
36 maskPtr = &lir->useMask;
37 } else {
38 maskPtr = &lir->defMask;
39 }
40 /* Clear out the memref flags */
41 *maskPtr &= ~mask;
42 /* ..and then add back the one we need */
43 switch(memType) {
44 case kLiteral:
45 assert(isLoad);
46 *maskPtr |= ENCODE_LITERAL;
47 break;
48 case kDalvikReg:
49 *maskPtr |= ENCODE_DALVIK_REG;
50 break;
51 case kHeapRef:
52 *maskPtr |= ENCODE_HEAP_REF;
53 break;
54 case kMustNotAlias:
55 /* Currently only loads can be marked as kMustNotAlias */
56 assert(!(EncodingMap[lir->opcode].flags & IS_STORE));
57 *maskPtr |= ENCODE_MUST_NOT_ALIAS;
58 break;
59 default:
60 LOG(FATAL) << "Oat: invalid memref kind - " << memType;
61 }
62}
63
64/*
65 * Mark load/store instructions that access Dalvik registers through r5FP +
66 * offset.
67 */
68static void annotateDalvikRegAccess(ArmLIR* lir, int regId, bool isLoad)
69{
70 setMemRefType(lir, isLoad, kDalvikReg);
71
72 /*
73 * Store the Dalvik register id in aliasInfo. Mark he MSB if it is a 64-bit
74 * access.
75 */
76 lir->aliasInfo = regId;
77 if (DOUBLEREG(lir->operands[0])) {
78 lir->aliasInfo |= 0x80000000;
79 }
80}
81
82/*
83 * Decode the register id.
84 */
85static inline u8 getRegMaskCommon(int reg)
86{
87 u8 seed;
88 int shift;
89 int regId = reg & 0x1f;
90
91 /*
92 * Each double register is equal to a pair of single-precision FP registers
93 */
94 seed = DOUBLEREG(reg) ? 3 : 1;
95 /* FP register starts at bit position 16 */
96 shift = FPREG(reg) ? kFPReg0 : 0;
97 /* Expand the double register id into single offset */
98 shift += regId;
99 return (seed << shift);
100}
101
102/*
103 * Mark the corresponding bit(s).
104 */
105static inline void setupRegMask(u8* mask, int reg)
106{
107 *mask |= getRegMaskCommon(reg);
108}
109
110/*
111 * Set up the proper fields in the resource mask
112 */
113static void setupResourceMasks(ArmLIR* lir)
114{
115 int opcode = lir->opcode;
116 int flags;
117
118 if (opcode <= 0) {
119 lir->useMask = lir->defMask = 0;
120 return;
121 }
122
123 flags = EncodingMap[lir->opcode].flags;
124
125 /* Set up the mask for resources that are updated */
126 if (flags & (IS_LOAD | IS_STORE)) {
127 /* Default to heap - will catch specialized classes later */
128 setMemRefType(lir, flags & IS_LOAD, kHeapRef);
129 }
130
131 /*
132 * Conservatively assume the branch here will call out a function that in
133 * turn will trash everything.
134 */
135 if (flags & IS_BRANCH) {
136 lir->defMask = lir->useMask = ENCODE_ALL;
137 return;
138 }
139
140 if (flags & REG_DEF0) {
141 setupRegMask(&lir->defMask, lir->operands[0]);
142 }
143
144 if (flags & REG_DEF1) {
145 setupRegMask(&lir->defMask, lir->operands[1]);
146 }
147
148 if (flags & REG_DEF_SP) {
149 lir->defMask |= ENCODE_REG_SP;
150 }
151
152 if (flags & REG_DEF_LR) {
153 lir->defMask |= ENCODE_REG_LR;
154 }
155
156 if (flags & REG_DEF_LIST0) {
157 lir->defMask |= ENCODE_REG_LIST(lir->operands[0]);
158 }
159
160 if (flags & REG_DEF_LIST1) {
161 lir->defMask |= ENCODE_REG_LIST(lir->operands[1]);
162 }
163
164 if (flags & REG_DEF_FPCS_LIST0) {
165 lir->defMask |= ENCODE_REG_FPCS_LIST(lir->operands[0]);
166 }
167
168 if (flags & SETS_CCODES) {
169 lir->defMask |= ENCODE_CCODE;
170 }
171
172 /* Conservatively treat the IT block */
173 if (flags & IS_IT) {
174 lir->defMask = ENCODE_ALL;
175 }
176
177 if (flags & (REG_USE0 | REG_USE1 | REG_USE2 | REG_USE3)) {
178 int i;
179
180 for (i = 0; i < 4; i++) {
181 if (flags & (1 << (kRegUse0 + i))) {
182 setupRegMask(&lir->useMask, lir->operands[i]);
183 }
184 }
185 }
186
187 if (flags & REG_USE_PC) {
188 lir->useMask |= ENCODE_REG_PC;
189 }
190
191 if (flags & REG_USE_SP) {
192 lir->useMask |= ENCODE_REG_SP;
193 }
194
195 if (flags & REG_USE_LIST0) {
196 lir->useMask |= ENCODE_REG_LIST(lir->operands[0]);
197 }
198
199 if (flags & REG_USE_LIST1) {
200 lir->useMask |= ENCODE_REG_LIST(lir->operands[1]);
201 }
202
203 if (flags & REG_USE_FPCS_LIST0) {
204 lir->useMask |= ENCODE_REG_FPCS_LIST(lir->operands[0]);
205 }
206
207 if (flags & REG_USE_FPCS_LIST2) {
208 lir->useMask |= ENCODE_REG_FPCS_LIST(lir->operands[2] >> 16);
209 }
210
211 if (flags & USES_CCODES) {
212 lir->useMask |= ENCODE_CCODE;
213 }
214
215 /* Fixup for kThumbPush/lr and kThumbPop/pc */
216 if (opcode == kThumbPush || opcode == kThumbPop) {
217 u8 r8Mask = getRegMaskCommon(r8);
218 if ((opcode == kThumbPush) && (lir->useMask & r8Mask)) {
219 lir->useMask &= ~r8Mask;
220 lir->useMask |= ENCODE_REG_LR;
221 } else if ((opcode == kThumbPop) && (lir->defMask & r8Mask)) {
222 lir->defMask &= ~r8Mask;
223 lir->defMask |= ENCODE_REG_PC;
224 }
225 }
226}
227
228/*
229 * Set up the accurate resource mask for branch instructions
230 */
231static void relaxBranchMasks(ArmLIR* lir)
232{
233 int flags = EncodingMap[lir->opcode].flags;
234
235 /* Make sure only branch instructions are passed here */
236 assert(flags & IS_BRANCH);
237
238 lir->useMask = lir->defMask = ENCODE_REG_PC;
239
240 if (flags & REG_DEF_LR) {
241 lir->defMask |= ENCODE_REG_LR;
242 }
243
244 if (flags & (REG_USE0 | REG_USE1 | REG_USE2 | REG_USE3)) {
245 int i;
246
247 for (i = 0; i < 4; i++) {
248 if (flags & (1 << (kRegUse0 + i))) {
249 setupRegMask(&lir->useMask, lir->operands[i]);
250 }
251 }
252 }
253
254 if (flags & USES_CCODES) {
255 lir->useMask |= ENCODE_CCODE;
256 }
257}
258
259/*
260 * The following are building blocks to construct low-level IRs with 0 - 4
261 * operands.
262 */
263static ArmLIR* newLIR0(CompilationUnit* cUnit, ArmOpcode opcode)
264{
265 ArmLIR* insn = (ArmLIR* ) oatNew(sizeof(ArmLIR), true);
266 assert(isPseudoOpcode(opcode) || (EncodingMap[opcode].flags & NO_OPERAND));
267 insn->opcode = opcode;
268 setupResourceMasks(insn);
269 insn->generic.dalvikOffset = cUnit->currentDalvikOffset;
270 oatAppendLIR(cUnit, (LIR*) insn);
271 return insn;
272}
273
274static ArmLIR* newLIR1(CompilationUnit* cUnit, ArmOpcode opcode,
275 int dest)
276{
277 ArmLIR* insn = (ArmLIR* ) oatNew(sizeof(ArmLIR), true);
278 assert(isPseudoOpcode(opcode) || (EncodingMap[opcode].flags & IS_UNARY_OP));
279 insn->opcode = opcode;
280 insn->operands[0] = dest;
281 setupResourceMasks(insn);
282 insn->generic.dalvikOffset = cUnit->currentDalvikOffset;
283 oatAppendLIR(cUnit, (LIR*) insn);
284 return insn;
285}
286
287static ArmLIR* newLIR2(CompilationUnit* cUnit, ArmOpcode opcode,
288 int dest, int src1)
289{
290 ArmLIR* insn = (ArmLIR* ) oatNew(sizeof(ArmLIR), true);
291 assert(isPseudoOpcode(opcode) ||
292 (EncodingMap[opcode].flags & IS_BINARY_OP));
293 insn->opcode = opcode;
294 insn->operands[0] = dest;
295 insn->operands[1] = src1;
296 setupResourceMasks(insn);
297 insn->generic.dalvikOffset = cUnit->currentDalvikOffset;
298 oatAppendLIR(cUnit, (LIR*) insn);
299 return insn;
300}
301
302static ArmLIR* newLIR3(CompilationUnit* cUnit, ArmOpcode opcode,
303 int dest, int src1, int src2)
304{
305 ArmLIR* insn = (ArmLIR* ) oatNew(sizeof(ArmLIR), true);
306 assert(isPseudoOpcode(opcode) ||
307 (EncodingMap[opcode].flags & IS_TERTIARY_OP));
308 insn->opcode = opcode;
309 insn->operands[0] = dest;
310 insn->operands[1] = src1;
311 insn->operands[2] = src2;
312 setupResourceMasks(insn);
313 insn->generic.dalvikOffset = cUnit->currentDalvikOffset;
314 oatAppendLIR(cUnit, (LIR*) insn);
315 return insn;
316}
317
318#if defined(_ARMV7_A) || defined(_ARMV7_A_NEON)
319static ArmLIR* newLIR4(CompilationUnit* cUnit, ArmOpcode opcode,
320 int dest, int src1, int src2, int info)
321{
322 ArmLIR* insn = (ArmLIR* ) oatNew(sizeof(ArmLIR), true);
323 assert(isPseudoOpcode(opcode) ||
324 (EncodingMap[opcode].flags & IS_QUAD_OP));
325 insn->opcode = opcode;
326 insn->operands[0] = dest;
327 insn->operands[1] = src1;
328 insn->operands[2] = src2;
329 insn->operands[3] = info;
330 setupResourceMasks(insn);
331 insn->generic.dalvikOffset = cUnit->currentDalvikOffset;
332 oatAppendLIR(cUnit, (LIR*) insn);
333 return insn;
334}
335#endif
336
337/*
338 * Search the existing constants in the literal pool for an exact or close match
339 * within specified delta (greater or equal to 0).
340 */
341static ArmLIR* scanLiteralPool(LIR* dataTarget, int value, unsigned int delta)
342{
343 while (dataTarget) {
344 if (((unsigned) (value - ((ArmLIR* ) dataTarget)->operands[0])) <=
345 delta)
346 return (ArmLIR* ) dataTarget;
347 dataTarget = dataTarget->next;
348 }
349 return NULL;
350}
351
352/*
353 * The following are building blocks to insert constants into the pool or
354 * instruction streams.
355 */
356
357/* Add a 32-bit constant either in the constant pool or mixed with code */
358static ArmLIR* addWordData(CompilationUnit* cUnit, LIR* *constantListP,
359 int value)
360{
361 /* Add the constant to the literal pool */
362 if (constantListP) {
363 ArmLIR* newValue = (ArmLIR* ) oatNew(sizeof(ArmLIR), true);
364 newValue->operands[0] = value;
365 newValue->generic.next = *constantListP;
366 *constantListP = (LIR*) newValue;
367 return newValue;
368 } else {
369 /* Add the constant in the middle of code stream */
370 newLIR1(cUnit, kArm16BitData, (value & 0xffff));
371 newLIR1(cUnit, kArm16BitData, (value >> 16));
372 }
373 return NULL;
374}
375
376/*
377 * Generate an kArmPseudoBarrier marker to indicate the boundary of special
378 * blocks.
379 */
380static void genBarrier(CompilationUnit* cUnit)
381{
382 ArmLIR* barrier = newLIR0(cUnit, kArmPseudoBarrier);
383 /* Mark all resources as being clobbered */
384 barrier->defMask = -1;
385}
386
387/* Create the PC reconstruction slot if not already done */
388static ArmLIR* genCheckCommon(CompilationUnit* cUnit, int dOffset,
389 ArmLIR* branch,
390 ArmLIR* pcrLabel)
391{
392 //FIXME - won't be rolling back, need to throw now.
393 UNIMPLEMENTED(WARNING);
394#if 0
395
396 /* Forget all def info (because we might rollback here. Bug #2367397 */
397 oatResetDefTracking(cUnit);
398
399 /* Set up the place holder to reconstruct this Dalvik PC */
400 if (pcrLabel == NULL) {
401 int dPC = (int) (cUnit->insns + dOffset);
402 pcrLabel = (ArmLIR* ) oatNew(sizeof(ArmLIR), true);
403 pcrLabel->opcode = kArmPseudoPCReconstructionCell;
404 pcrLabel->operands[0] = dPC;
405 pcrLabel->operands[1] = dOffset;
406 /* Insert the place holder to the growable list */
407 oatInsertGrowableList(&cUnit->pcReconstructionList,
408 (intptr_t) pcrLabel);
409 }
410#endif
411 /* Branch to the PC reconstruction code */
412 branch->generic.target = (LIR*) pcrLabel;
413
414 /* Clear the conservative flags for branches that punt to the interpreter */
415 relaxBranchMasks(branch);
416
417 return pcrLabel;
418}