blob: 208eecf1b492b8ec2e70db6d15eff1d38f30f5ee [file] [log] [blame]
buzbee67bf8852011-08-17 17:51:35 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/*
18 * This file contains codegen and support common to all supported
19 * ARM variants. It is included by:
20 *
21 * Codegen-$(TARGET_ARCH_VARIANT).c
22 *
23 * which combines this common code with specific support found in the
24 * applicable directory below this one.
25 */
26
27/* Track exercised opcodes */
28static int opcodeCoverage[kNumPackedOpcodes];
29
30static void setMemRefType(ArmLIR* lir, bool isLoad, int memType)
31{
32 u8 *maskPtr;
33 u8 mask = ENCODE_MEM;;
34 assert(EncodingMap[lir->opcode].flags & (IS_LOAD | IS_STORE));
35 if (isLoad) {
36 maskPtr = &lir->useMask;
37 } else {
38 maskPtr = &lir->defMask;
39 }
40 /* Clear out the memref flags */
41 *maskPtr &= ~mask;
42 /* ..and then add back the one we need */
43 switch(memType) {
44 case kLiteral:
45 assert(isLoad);
46 *maskPtr |= ENCODE_LITERAL;
47 break;
48 case kDalvikReg:
49 *maskPtr |= ENCODE_DALVIK_REG;
50 break;
51 case kHeapRef:
52 *maskPtr |= ENCODE_HEAP_REF;
53 break;
54 case kMustNotAlias:
55 /* Currently only loads can be marked as kMustNotAlias */
56 assert(!(EncodingMap[lir->opcode].flags & IS_STORE));
57 *maskPtr |= ENCODE_MUST_NOT_ALIAS;
58 break;
59 default:
60 LOG(FATAL) << "Oat: invalid memref kind - " << memType;
61 }
62}
63
64/*
65 * Mark load/store instructions that access Dalvik registers through r5FP +
66 * offset.
67 */
68static void annotateDalvikRegAccess(ArmLIR* lir, int regId, bool isLoad)
69{
70 setMemRefType(lir, isLoad, kDalvikReg);
71
72 /*
73 * Store the Dalvik register id in aliasInfo. Mark he MSB if it is a 64-bit
74 * access.
75 */
76 lir->aliasInfo = regId;
77 if (DOUBLEREG(lir->operands[0])) {
78 lir->aliasInfo |= 0x80000000;
79 }
80}
81
82/*
83 * Decode the register id.
84 */
85static inline u8 getRegMaskCommon(int reg)
86{
87 u8 seed;
88 int shift;
89 int regId = reg & 0x1f;
90
91 /*
92 * Each double register is equal to a pair of single-precision FP registers
93 */
94 seed = DOUBLEREG(reg) ? 3 : 1;
95 /* FP register starts at bit position 16 */
96 shift = FPREG(reg) ? kFPReg0 : 0;
97 /* Expand the double register id into single offset */
98 shift += regId;
99 return (seed << shift);
100}
101
102/*
103 * Mark the corresponding bit(s).
104 */
105static inline void setupRegMask(u8* mask, int reg)
106{
107 *mask |= getRegMaskCommon(reg);
108}
109
110/*
111 * Set up the proper fields in the resource mask
112 */
113static void setupResourceMasks(ArmLIR* lir)
114{
115 int opcode = lir->opcode;
116 int flags;
117
118 if (opcode <= 0) {
119 lir->useMask = lir->defMask = 0;
120 return;
121 }
122
123 flags = EncodingMap[lir->opcode].flags;
124
125 /* Set up the mask for resources that are updated */
126 if (flags & (IS_LOAD | IS_STORE)) {
127 /* Default to heap - will catch specialized classes later */
128 setMemRefType(lir, flags & IS_LOAD, kHeapRef);
129 }
130
131 /*
132 * Conservatively assume the branch here will call out a function that in
133 * turn will trash everything.
134 */
135 if (flags & IS_BRANCH) {
136 lir->defMask = lir->useMask = ENCODE_ALL;
137 return;
138 }
139
140 if (flags & REG_DEF0) {
141 setupRegMask(&lir->defMask, lir->operands[0]);
142 }
143
144 if (flags & REG_DEF1) {
145 setupRegMask(&lir->defMask, lir->operands[1]);
146 }
147
148 if (flags & REG_DEF_SP) {
149 lir->defMask |= ENCODE_REG_SP;
150 }
151
152 if (flags & REG_DEF_LR) {
153 lir->defMask |= ENCODE_REG_LR;
154 }
155
156 if (flags & REG_DEF_LIST0) {
157 lir->defMask |= ENCODE_REG_LIST(lir->operands[0]);
158 }
159
160 if (flags & REG_DEF_LIST1) {
161 lir->defMask |= ENCODE_REG_LIST(lir->operands[1]);
162 }
163
164 if (flags & REG_DEF_FPCS_LIST0) {
165 lir->defMask |= ENCODE_REG_FPCS_LIST(lir->operands[0]);
166 }
167
168 if (flags & SETS_CCODES) {
169 lir->defMask |= ENCODE_CCODE;
170 }
171
172 /* Conservatively treat the IT block */
173 if (flags & IS_IT) {
174 lir->defMask = ENCODE_ALL;
175 }
176
177 if (flags & (REG_USE0 | REG_USE1 | REG_USE2 | REG_USE3)) {
178 int i;
179
180 for (i = 0; i < 4; i++) {
181 if (flags & (1 << (kRegUse0 + i))) {
182 setupRegMask(&lir->useMask, lir->operands[i]);
183 }
184 }
185 }
186
187 if (flags & REG_USE_PC) {
188 lir->useMask |= ENCODE_REG_PC;
189 }
190
191 if (flags & REG_USE_SP) {
192 lir->useMask |= ENCODE_REG_SP;
193 }
194
195 if (flags & REG_USE_LIST0) {
196 lir->useMask |= ENCODE_REG_LIST(lir->operands[0]);
197 }
198
199 if (flags & REG_USE_LIST1) {
200 lir->useMask |= ENCODE_REG_LIST(lir->operands[1]);
201 }
202
203 if (flags & REG_USE_FPCS_LIST0) {
204 lir->useMask |= ENCODE_REG_FPCS_LIST(lir->operands[0]);
205 }
206
207 if (flags & REG_USE_FPCS_LIST2) {
208 lir->useMask |= ENCODE_REG_FPCS_LIST(lir->operands[2] >> 16);
209 }
210
211 if (flags & USES_CCODES) {
212 lir->useMask |= ENCODE_CCODE;
213 }
214
215 /* Fixup for kThumbPush/lr and kThumbPop/pc */
216 if (opcode == kThumbPush || opcode == kThumbPop) {
217 u8 r8Mask = getRegMaskCommon(r8);
218 if ((opcode == kThumbPush) && (lir->useMask & r8Mask)) {
219 lir->useMask &= ~r8Mask;
220 lir->useMask |= ENCODE_REG_LR;
221 } else if ((opcode == kThumbPop) && (lir->defMask & r8Mask)) {
222 lir->defMask &= ~r8Mask;
223 lir->defMask |= ENCODE_REG_PC;
224 }
225 }
226}
227
228/*
buzbee67bf8852011-08-17 17:51:35 -0700229 * The following are building blocks to construct low-level IRs with 0 - 4
230 * operands.
231 */
232static ArmLIR* newLIR0(CompilationUnit* cUnit, ArmOpcode opcode)
233{
234 ArmLIR* insn = (ArmLIR* ) oatNew(sizeof(ArmLIR), true);
235 assert(isPseudoOpcode(opcode) || (EncodingMap[opcode].flags & NO_OPERAND));
236 insn->opcode = opcode;
237 setupResourceMasks(insn);
238 insn->generic.dalvikOffset = cUnit->currentDalvikOffset;
239 oatAppendLIR(cUnit, (LIR*) insn);
240 return insn;
241}
242
243static ArmLIR* newLIR1(CompilationUnit* cUnit, ArmOpcode opcode,
244 int dest)
245{
246 ArmLIR* insn = (ArmLIR* ) oatNew(sizeof(ArmLIR), true);
247 assert(isPseudoOpcode(opcode) || (EncodingMap[opcode].flags & IS_UNARY_OP));
248 insn->opcode = opcode;
249 insn->operands[0] = dest;
250 setupResourceMasks(insn);
251 insn->generic.dalvikOffset = cUnit->currentDalvikOffset;
252 oatAppendLIR(cUnit, (LIR*) insn);
253 return insn;
254}
255
256static ArmLIR* newLIR2(CompilationUnit* cUnit, ArmOpcode opcode,
257 int dest, int src1)
258{
259 ArmLIR* insn = (ArmLIR* ) oatNew(sizeof(ArmLIR), true);
260 assert(isPseudoOpcode(opcode) ||
261 (EncodingMap[opcode].flags & IS_BINARY_OP));
262 insn->opcode = opcode;
263 insn->operands[0] = dest;
264 insn->operands[1] = src1;
265 setupResourceMasks(insn);
266 insn->generic.dalvikOffset = cUnit->currentDalvikOffset;
267 oatAppendLIR(cUnit, (LIR*) insn);
268 return insn;
269}
270
271static ArmLIR* newLIR3(CompilationUnit* cUnit, ArmOpcode opcode,
272 int dest, int src1, int src2)
273{
274 ArmLIR* insn = (ArmLIR* ) oatNew(sizeof(ArmLIR), true);
275 assert(isPseudoOpcode(opcode) ||
276 (EncodingMap[opcode].flags & IS_TERTIARY_OP));
277 insn->opcode = opcode;
278 insn->operands[0] = dest;
279 insn->operands[1] = src1;
280 insn->operands[2] = src2;
281 setupResourceMasks(insn);
282 insn->generic.dalvikOffset = cUnit->currentDalvikOffset;
283 oatAppendLIR(cUnit, (LIR*) insn);
284 return insn;
285}
286
287#if defined(_ARMV7_A) || defined(_ARMV7_A_NEON)
288static ArmLIR* newLIR4(CompilationUnit* cUnit, ArmOpcode opcode,
289 int dest, int src1, int src2, int info)
290{
291 ArmLIR* insn = (ArmLIR* ) oatNew(sizeof(ArmLIR), true);
292 assert(isPseudoOpcode(opcode) ||
293 (EncodingMap[opcode].flags & IS_QUAD_OP));
294 insn->opcode = opcode;
295 insn->operands[0] = dest;
296 insn->operands[1] = src1;
297 insn->operands[2] = src2;
298 insn->operands[3] = info;
299 setupResourceMasks(insn);
300 insn->generic.dalvikOffset = cUnit->currentDalvikOffset;
301 oatAppendLIR(cUnit, (LIR*) insn);
302 return insn;
303}
304#endif
305
306/*
307 * Search the existing constants in the literal pool for an exact or close match
308 * within specified delta (greater or equal to 0).
309 */
310static ArmLIR* scanLiteralPool(LIR* dataTarget, int value, unsigned int delta)
311{
312 while (dataTarget) {
313 if (((unsigned) (value - ((ArmLIR* ) dataTarget)->operands[0])) <=
314 delta)
315 return (ArmLIR* ) dataTarget;
316 dataTarget = dataTarget->next;
317 }
318 return NULL;
319}
320
321/*
322 * The following are building blocks to insert constants into the pool or
323 * instruction streams.
324 */
325
326/* Add a 32-bit constant either in the constant pool or mixed with code */
327static ArmLIR* addWordData(CompilationUnit* cUnit, LIR* *constantListP,
328 int value)
329{
330 /* Add the constant to the literal pool */
331 if (constantListP) {
332 ArmLIR* newValue = (ArmLIR* ) oatNew(sizeof(ArmLIR), true);
333 newValue->operands[0] = value;
334 newValue->generic.next = *constantListP;
335 *constantListP = (LIR*) newValue;
336 return newValue;
337 } else {
338 /* Add the constant in the middle of code stream */
339 newLIR1(cUnit, kArm16BitData, (value & 0xffff));
340 newLIR1(cUnit, kArm16BitData, (value >> 16));
341 }
342 return NULL;
343}
344
345/*
346 * Generate an kArmPseudoBarrier marker to indicate the boundary of special
347 * blocks.
348 */
349static void genBarrier(CompilationUnit* cUnit)
350{
351 ArmLIR* barrier = newLIR0(cUnit, kArmPseudoBarrier);
352 /* Mark all resources as being clobbered */
353 barrier->defMask = -1;
354}