blob: 1c1c98b52b0669c8350c15bad067288bae258734 [file] [log] [blame]
Dave Allison65fcc2c2014-04-28 13:45:27 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_COMPILER_UTILS_ARM_ASSEMBLER_THUMB2_H_
18#define ART_COMPILER_UTILS_ARM_ASSEMBLER_THUMB2_H_
19
Vladimir Markocf93a5c2015-06-16 11:33:24 +000020#include <deque>
Andreas Gampe7cffc3b2015-10-19 21:31:53 -070021#include <utility>
Dave Allison65fcc2c2014-04-28 13:45:27 -070022#include <vector>
23
Vladimir Marko93205e32016-04-13 11:59:46 +010024#include "base/arena_containers.h"
Dave Allison65fcc2c2014-04-28 13:45:27 -070025#include "base/logging.h"
26#include "constants_arm.h"
27#include "utils/arm/managed_register_arm.h"
28#include "utils/arm/assembler_arm.h"
Vladimir Marko6b756b52015-07-14 11:58:38 +010029#include "utils/array_ref.h"
Dave Allison65fcc2c2014-04-28 13:45:27 -070030#include "offsets.h"
Dave Allison65fcc2c2014-04-28 13:45:27 -070031
32namespace art {
33namespace arm {
34
Dave Allison65fcc2c2014-04-28 13:45:27 -070035class Thumb2Assembler FINAL : public ArmAssembler {
36 public:
Vladimir Marko93205e32016-04-13 11:59:46 +010037 explicit Thumb2Assembler(ArenaAllocator* arena, bool can_relocate_branches = true)
38 : ArmAssembler(arena),
39 can_relocate_branches_(can_relocate_branches),
Nicolas Geoffray8d486732014-07-16 16:23:40 +010040 force_32bit_(false),
41 it_cond_index_(kNoItCondition),
Vladimir Markocf93a5c2015-06-16 11:33:24 +000042 next_condition_(AL),
Vladimir Marko93205e32016-04-13 11:59:46 +010043 fixups_(arena->Adapter(kArenaAllocAssembler)),
44 fixup_dependents_(arena->Adapter(kArenaAllocAssembler)),
45 literals_(arena->Adapter(kArenaAllocAssembler)),
Vladimir Markoebdbf4b2016-07-07 15:37:02 +010046 literal64_dedupe_map_(std::less<uint64_t>(), arena->Adapter(kArenaAllocAssembler)),
Vladimir Marko93205e32016-04-13 11:59:46 +010047 jump_tables_(arena->Adapter(kArenaAllocAssembler)),
Vladimir Markocf93a5c2015-06-16 11:33:24 +000048 last_position_adjustment_(0u),
49 last_old_position_(0u),
50 last_fixup_id_(0u) {
Vladimir Marko10ef6942015-10-22 15:25:54 +010051 cfi().DelayEmittingAdvancePCs();
Dave Allison65fcc2c2014-04-28 13:45:27 -070052 }
53
54 virtual ~Thumb2Assembler() {
Dave Allison65fcc2c2014-04-28 13:45:27 -070055 }
56
57 bool IsThumb() const OVERRIDE {
58 return true;
59 }
60
61 bool IsForced32Bit() const {
62 return force_32bit_;
63 }
64
Nicolas Geoffrayd126ba12015-05-20 11:25:27 +010065 bool CanRelocateBranches() const {
66 return can_relocate_branches_;
Nicolas Geoffray8d486732014-07-16 16:23:40 +010067 }
68
Vladimir Markocf93a5c2015-06-16 11:33:24 +000069 void FinalizeCode() OVERRIDE;
Dave Allison65fcc2c2014-04-28 13:45:27 -070070
71 // Data-processing instructions.
Vladimir Marko73cf0fb2015-07-30 15:07:22 +010072 virtual void and_(Register rd, Register rn, const ShifterOperand& so,
73 Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
Dave Allison65fcc2c2014-04-28 13:45:27 -070074
Vladimir Marko73cf0fb2015-07-30 15:07:22 +010075 virtual void eor(Register rd, Register rn, const ShifterOperand& so,
76 Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
Dave Allison65fcc2c2014-04-28 13:45:27 -070077
Vladimir Marko73cf0fb2015-07-30 15:07:22 +010078 virtual void sub(Register rd, Register rn, const ShifterOperand& so,
79 Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
Dave Allison65fcc2c2014-04-28 13:45:27 -070080
Vladimir Marko73cf0fb2015-07-30 15:07:22 +010081 virtual void rsb(Register rd, Register rn, const ShifterOperand& so,
82 Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
Dave Allison65fcc2c2014-04-28 13:45:27 -070083
Vladimir Marko73cf0fb2015-07-30 15:07:22 +010084 virtual void add(Register rd, Register rn, const ShifterOperand& so,
85 Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
Dave Allison65fcc2c2014-04-28 13:45:27 -070086
Vladimir Marko73cf0fb2015-07-30 15:07:22 +010087 virtual void adc(Register rd, Register rn, const ShifterOperand& so,
88 Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
Dave Allison65fcc2c2014-04-28 13:45:27 -070089
Vladimir Marko73cf0fb2015-07-30 15:07:22 +010090 virtual void sbc(Register rd, Register rn, const ShifterOperand& so,
91 Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
Dave Allison65fcc2c2014-04-28 13:45:27 -070092
Vladimir Marko73cf0fb2015-07-30 15:07:22 +010093 virtual void rsc(Register rd, Register rn, const ShifterOperand& so,
94 Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
Dave Allison65fcc2c2014-04-28 13:45:27 -070095
96 void tst(Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
97
98 void teq(Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
99
100 void cmp(Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
101
102 void cmn(Register rn, const ShifterOperand& so, Condition cond = AL) OVERRIDE;
103
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100104 virtual void orr(Register rd, Register rn, const ShifterOperand& so,
105 Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
Dave Allison65fcc2c2014-04-28 13:45:27 -0700106
Vladimir Markod2b4ca22015-09-14 15:13:26 +0100107 virtual void orn(Register rd, Register rn, const ShifterOperand& so,
108 Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
109
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100110 virtual void mov(Register rd, const ShifterOperand& so,
111 Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
Dave Allison65fcc2c2014-04-28 13:45:27 -0700112
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100113 virtual void bic(Register rd, Register rn, const ShifterOperand& so,
114 Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
Dave Allison65fcc2c2014-04-28 13:45:27 -0700115
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100116 virtual void mvn(Register rd, const ShifterOperand& so,
117 Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
Dave Allison65fcc2c2014-04-28 13:45:27 -0700118
119 // Miscellaneous data-processing instructions.
120 void clz(Register rd, Register rm, Condition cond = AL) OVERRIDE;
121 void movw(Register rd, uint16_t imm16, Condition cond = AL) OVERRIDE;
122 void movt(Register rd, uint16_t imm16, Condition cond = AL) OVERRIDE;
Scott Wakeling9ee23f42015-07-23 10:44:35 +0100123 void rbit(Register rd, Register rm, Condition cond = AL) OVERRIDE;
Artem Serovc257da72016-02-02 13:49:43 +0000124 void rev(Register rd, Register rm, Condition cond = AL) OVERRIDE;
125 void rev16(Register rd, Register rm, Condition cond = AL) OVERRIDE;
126 void revsh(Register rd, Register rm, Condition cond = AL) OVERRIDE;
Dave Allison65fcc2c2014-04-28 13:45:27 -0700127
128 // Multiply instructions.
129 void mul(Register rd, Register rn, Register rm, Condition cond = AL) OVERRIDE;
130 void mla(Register rd, Register rn, Register rm, Register ra,
131 Condition cond = AL) OVERRIDE;
132 void mls(Register rd, Register rn, Register rm, Register ra,
133 Condition cond = AL) OVERRIDE;
Zheng Xuc6667102015-05-15 16:08:45 +0800134 void smull(Register rd_lo, Register rd_hi, Register rn, Register rm,
135 Condition cond = AL) OVERRIDE;
Dave Allison65fcc2c2014-04-28 13:45:27 -0700136 void umull(Register rd_lo, Register rd_hi, Register rn, Register rm,
137 Condition cond = AL) OVERRIDE;
138
139 void sdiv(Register rd, Register rn, Register rm, Condition cond = AL) OVERRIDE;
140 void udiv(Register rd, Register rn, Register rm, Condition cond = AL) OVERRIDE;
141
Roland Levillain981e4542014-11-14 11:47:14 +0000142 // Bit field extract instructions.
Roland Levillain51d3fc42014-11-13 14:11:42 +0000143 void sbfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond = AL) OVERRIDE;
Roland Levillain981e4542014-11-14 11:47:14 +0000144 void ubfx(Register rd, Register rn, uint32_t lsb, uint32_t width, Condition cond = AL) OVERRIDE;
Roland Levillain51d3fc42014-11-13 14:11:42 +0000145
Dave Allison65fcc2c2014-04-28 13:45:27 -0700146 // Load/store instructions.
147 void ldr(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
148 void str(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
149
150 void ldrb(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
151 void strb(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
152
153 void ldrh(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
154 void strh(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
155
156 void ldrsb(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
157 void ldrsh(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
158
Roland Levillain4af147e2015-04-07 13:54:49 +0100159 // Load/store register dual instructions using registers `rd` and `rd` + 1.
Dave Allison65fcc2c2014-04-28 13:45:27 -0700160 void ldrd(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
161 void strd(Register rd, const Address& ad, Condition cond = AL) OVERRIDE;
162
Roland Levillain4af147e2015-04-07 13:54:49 +0100163 // Load/store register dual instructions using registers `rd` and `rd2`.
164 // Note that contrary to the ARM A1 encoding, the Thumb-2 T1 encoding
165 // does not require `rd` to be even, nor `rd2' to be equal to `rd` + 1.
166 void ldrd(Register rd, Register rd2, const Address& ad, Condition cond);
167 void strd(Register rd, Register rd2, const Address& ad, Condition cond);
168
169
Dave Allison65fcc2c2014-04-28 13:45:27 -0700170 void ldm(BlockAddressMode am, Register base,
171 RegList regs, Condition cond = AL) OVERRIDE;
172 void stm(BlockAddressMode am, Register base,
173 RegList regs, Condition cond = AL) OVERRIDE;
174
175 void ldrex(Register rd, Register rn, Condition cond = AL) OVERRIDE;
176 void strex(Register rd, Register rt, Register rn, Condition cond = AL) OVERRIDE;
177
178 void ldrex(Register rd, Register rn, uint16_t imm, Condition cond = AL);
179 void strex(Register rd, Register rt, Register rn, uint16_t imm, Condition cond = AL);
180
Calin Juravle52c48962014-12-16 17:02:57 +0000181 void ldrexd(Register rt, Register rt2, Register rn, Condition cond = AL) OVERRIDE;
182 void strexd(Register rd, Register rt, Register rt2, Register rn, Condition cond = AL) OVERRIDE;
Dave Allison65fcc2c2014-04-28 13:45:27 -0700183
184 // Miscellaneous instructions.
185 void clrex(Condition cond = AL) OVERRIDE;
186 void nop(Condition cond = AL) OVERRIDE;
187
188 void bkpt(uint16_t imm16) OVERRIDE;
189 void svc(uint32_t imm24) OVERRIDE;
190
191 // If-then
192 void it(Condition firstcond, ItState i1 = kItOmitted,
193 ItState i2 = kItOmitted, ItState i3 = kItOmitted) OVERRIDE;
194
195 void cbz(Register rn, Label* target) OVERRIDE;
196 void cbnz(Register rn, Label* target) OVERRIDE;
197
198 // Floating point instructions (VFPv3-D16 and VFPv3-D32 profiles).
199 void vmovsr(SRegister sn, Register rt, Condition cond = AL) OVERRIDE;
200 void vmovrs(Register rt, SRegister sn, Condition cond = AL) OVERRIDE;
201 void vmovsrr(SRegister sm, Register rt, Register rt2, Condition cond = AL) OVERRIDE;
202 void vmovrrs(Register rt, Register rt2, SRegister sm, Condition cond = AL) OVERRIDE;
203 void vmovdrr(DRegister dm, Register rt, Register rt2, Condition cond = AL) OVERRIDE;
204 void vmovrrd(Register rt, Register rt2, DRegister dm, Condition cond = AL) OVERRIDE;
205 void vmovs(SRegister sd, SRegister sm, Condition cond = AL) OVERRIDE;
206 void vmovd(DRegister dd, DRegister dm, Condition cond = AL) OVERRIDE;
207
208 // Returns false if the immediate cannot be encoded.
209 bool vmovs(SRegister sd, float s_imm, Condition cond = AL) OVERRIDE;
210 bool vmovd(DRegister dd, double d_imm, Condition cond = AL) OVERRIDE;
211
212 void vldrs(SRegister sd, const Address& ad, Condition cond = AL) OVERRIDE;
213 void vstrs(SRegister sd, const Address& ad, Condition cond = AL) OVERRIDE;
214 void vldrd(DRegister dd, const Address& ad, Condition cond = AL) OVERRIDE;
215 void vstrd(DRegister dd, const Address& ad, Condition cond = AL) OVERRIDE;
216
217 void vadds(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL) OVERRIDE;
218 void vaddd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL) OVERRIDE;
219 void vsubs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL) OVERRIDE;
220 void vsubd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL) OVERRIDE;
221 void vmuls(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL) OVERRIDE;
222 void vmuld(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL) OVERRIDE;
223 void vmlas(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL) OVERRIDE;
224 void vmlad(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL) OVERRIDE;
225 void vmlss(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL) OVERRIDE;
226 void vmlsd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL) OVERRIDE;
227 void vdivs(SRegister sd, SRegister sn, SRegister sm, Condition cond = AL) OVERRIDE;
228 void vdivd(DRegister dd, DRegister dn, DRegister dm, Condition cond = AL) OVERRIDE;
229
230 void vabss(SRegister sd, SRegister sm, Condition cond = AL) OVERRIDE;
231 void vabsd(DRegister dd, DRegister dm, Condition cond = AL) OVERRIDE;
232 void vnegs(SRegister sd, SRegister sm, Condition cond = AL) OVERRIDE;
233 void vnegd(DRegister dd, DRegister dm, Condition cond = AL) OVERRIDE;
234 void vsqrts(SRegister sd, SRegister sm, Condition cond = AL) OVERRIDE;
235 void vsqrtd(DRegister dd, DRegister dm, Condition cond = AL) OVERRIDE;
236
237 void vcvtsd(SRegister sd, DRegister dm, Condition cond = AL) OVERRIDE;
238 void vcvtds(DRegister dd, SRegister sm, Condition cond = AL) OVERRIDE;
239 void vcvtis(SRegister sd, SRegister sm, Condition cond = AL) OVERRIDE;
240 void vcvtid(SRegister sd, DRegister dm, Condition cond = AL) OVERRIDE;
241 void vcvtsi(SRegister sd, SRegister sm, Condition cond = AL) OVERRIDE;
242 void vcvtdi(DRegister dd, SRegister sm, Condition cond = AL) OVERRIDE;
243 void vcvtus(SRegister sd, SRegister sm, Condition cond = AL) OVERRIDE;
244 void vcvtud(SRegister sd, DRegister dm, Condition cond = AL) OVERRIDE;
245 void vcvtsu(SRegister sd, SRegister sm, Condition cond = AL) OVERRIDE;
246 void vcvtdu(DRegister dd, SRegister sm, Condition cond = AL) OVERRIDE;
247
248 void vcmps(SRegister sd, SRegister sm, Condition cond = AL) OVERRIDE;
249 void vcmpd(DRegister dd, DRegister dm, Condition cond = AL) OVERRIDE;
250 void vcmpsz(SRegister sd, Condition cond = AL) OVERRIDE;
251 void vcmpdz(DRegister dd, Condition cond = AL) OVERRIDE;
252 void vmstat(Condition cond = AL) OVERRIDE; // VMRS APSR_nzcv, FPSCR
253
xueliang.zhonge652c122016-06-13 14:42:27 +0100254 void vcntd(DRegister dd, DRegister dm) OVERRIDE;
255 void vpaddld(DRegister dd, DRegister dm, int32_t size, bool is_unsigned) OVERRIDE;
256
Dave Allison65fcc2c2014-04-28 13:45:27 -0700257 void vpushs(SRegister reg, int nregs, Condition cond = AL) OVERRIDE;
258 void vpushd(DRegister reg, int nregs, Condition cond = AL) OVERRIDE;
259 void vpops(SRegister reg, int nregs, Condition cond = AL) OVERRIDE;
260 void vpopd(DRegister reg, int nregs, Condition cond = AL) OVERRIDE;
261
262 // Branch instructions.
263 void b(Label* label, Condition cond = AL);
264 void bl(Label* label, Condition cond = AL);
265 void blx(Label* label);
266 void blx(Register rm, Condition cond = AL) OVERRIDE;
267 void bx(Register rm, Condition cond = AL) OVERRIDE;
268
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100269 virtual void Lsl(Register rd, Register rm, uint32_t shift_imm,
270 Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
271 virtual void Lsr(Register rd, Register rm, uint32_t shift_imm,
272 Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
273 virtual void Asr(Register rd, Register rm, uint32_t shift_imm,
274 Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
275 virtual void Ror(Register rd, Register rm, uint32_t shift_imm,
276 Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
277 virtual void Rrx(Register rd, Register rm,
278 Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
Dave Allison45fdb932014-06-25 12:37:10 -0700279
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100280 virtual void Lsl(Register rd, Register rm, Register rn,
281 Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
282 virtual void Lsr(Register rd, Register rm, Register rn,
283 Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
284 virtual void Asr(Register rd, Register rm, Register rn,
285 Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
286 virtual void Ror(Register rd, Register rm, Register rn,
287 Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
Dave Allison65fcc2c2014-04-28 13:45:27 -0700288
289 void Push(Register rd, Condition cond = AL) OVERRIDE;
290 void Pop(Register rd, Condition cond = AL) OVERRIDE;
291
292 void PushList(RegList regs, Condition cond = AL) OVERRIDE;
293 void PopList(RegList regs, Condition cond = AL) OVERRIDE;
294
295 void Mov(Register rd, Register rm, Condition cond = AL) OVERRIDE;
296
297 void CompareAndBranchIfZero(Register r, Label* label) OVERRIDE;
298 void CompareAndBranchIfNonZero(Register r, Label* label) OVERRIDE;
299
Nicolas Geoffray19a19cf2014-10-22 16:07:05 +0100300 // Memory barriers.
301 void dmb(DmbOptions flavor) OVERRIDE;
302
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000303 // Get the final position of a label after local fixup based on the old position
304 // recorded before FinalizeCode().
305 uint32_t GetAdjustedPosition(uint32_t old_position) OVERRIDE;
306
307 using ArmAssembler::NewLiteral; // Make the helper template visible.
308
309 Literal* NewLiteral(size_t size, const uint8_t* data) OVERRIDE;
310 void LoadLiteral(Register rt, Literal* literal) OVERRIDE;
311 void LoadLiteral(Register rt, Register rt2, Literal* literal) OVERRIDE;
312 void LoadLiteral(SRegister sd, Literal* literal) OVERRIDE;
313 void LoadLiteral(DRegister dd, Literal* literal) OVERRIDE;
314
Dave Allison65fcc2c2014-04-28 13:45:27 -0700315 // Add signed constant value to rd. May clobber IP.
Dave Allison65fcc2c2014-04-28 13:45:27 -0700316 void AddConstant(Register rd, Register rn, int32_t value,
Vladimir Marko449b1092015-09-08 12:16:45 +0100317 Condition cond = AL, SetCc set_cc = kCcDontCare) OVERRIDE;
Dave Allison65fcc2c2014-04-28 13:45:27 -0700318
Andreas Gampe7cffc3b2015-10-19 21:31:53 -0700319 void CmpConstant(Register rn, int32_t value, Condition cond = AL) OVERRIDE;
320
Dave Allison65fcc2c2014-04-28 13:45:27 -0700321 // Load and Store. May clobber IP.
322 void LoadImmediate(Register rd, int32_t value, Condition cond = AL) OVERRIDE;
Vladimir Markoebdbf4b2016-07-07 15:37:02 +0100323 void LoadDImmediate(DRegister dd, double value, Condition cond = AL) OVERRIDE;
Dave Allison65fcc2c2014-04-28 13:45:27 -0700324 void MarkExceptionHandler(Label* label) OVERRIDE;
325 void LoadFromOffset(LoadOperandType type,
326 Register reg,
327 Register base,
328 int32_t offset,
329 Condition cond = AL) OVERRIDE;
330 void StoreToOffset(StoreOperandType type,
331 Register reg,
332 Register base,
333 int32_t offset,
334 Condition cond = AL) OVERRIDE;
335 void LoadSFromOffset(SRegister reg,
336 Register base,
337 int32_t offset,
338 Condition cond = AL) OVERRIDE;
339 void StoreSToOffset(SRegister reg,
340 Register base,
341 int32_t offset,
342 Condition cond = AL) OVERRIDE;
343 void LoadDFromOffset(DRegister reg,
344 Register base,
345 int32_t offset,
346 Condition cond = AL) OVERRIDE;
347 void StoreDToOffset(DRegister reg,
348 Register base,
349 int32_t offset,
350 Condition cond = AL) OVERRIDE;
351
Nicolas Geoffray3bcc8ea2014-11-28 15:00:02 +0000352 bool ShifterOperandCanHold(Register rd,
353 Register rn,
354 Opcode opcode,
355 uint32_t immediate,
Vladimir Markof5c09c32015-12-17 12:08:08 +0000356 SetCc set_cc,
Nicolas Geoffray3bcc8ea2014-11-28 15:00:02 +0000357 ShifterOperand* shifter_op) OVERRIDE;
Vladimir Markof5c09c32015-12-17 12:08:08 +0000358 using ArmAssembler::ShifterOperandCanHold; // Don't hide the non-virtual override.
Nicolas Geoffray3bcc8ea2014-11-28 15:00:02 +0000359
Nicolas Geoffray5bd05a52015-10-13 09:48:30 +0100360 bool ShifterOperandCanAlwaysHold(uint32_t immediate) OVERRIDE;
361
Dave Allison65fcc2c2014-04-28 13:45:27 -0700362
Ian Rogers13735952014-10-08 12:43:28 -0700363 static bool IsInstructionForExceptionHandling(uintptr_t pc);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700364
365 // Emit data (e.g. encoded instruction or immediate) to the.
366 // instruction stream.
367 void Emit32(int32_t value); // Emit a 32 bit instruction in thumb format.
368 void Emit16(int16_t value); // Emit a 16 bit instruction in little endian format.
369 void Bind(Label* label) OVERRIDE;
370
Dave Allison65fcc2c2014-04-28 13:45:27 -0700371 // Force the assembler to generate 32 bit instructions.
372 void Force32Bit() {
373 force_32bit_ = true;
374 }
375
Andreas Gampe7cffc3b2015-10-19 21:31:53 -0700376 // Emit an ADR (or a sequence of instructions) to load the jump table address into base_reg. This
377 // will generate a fixup.
378 JumpTable* CreateJumpTable(std::vector<Label*>&& labels, Register base_reg) OVERRIDE;
379 // Emit an ADD PC, X to dispatch a jump-table jump. This will generate a fixup.
380 void EmitJumpTableDispatch(JumpTable* jump_table, Register displacement_reg) OVERRIDE;
381
Dave Allison65fcc2c2014-04-28 13:45:27 -0700382 private:
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000383 typedef uint16_t FixupId;
384
385 // Fixup: branches and literal pool references.
386 //
387 // The thumb2 architecture allows branches to be either 16 or 32 bit instructions. This
388 // depends on both the type of branch and the offset to which it is branching. The 16-bit
389 // cbz and cbnz instructions may also need to be replaced with a separate 16-bit compare
390 // instruction and a 16- or 32-bit branch instruction. Load from a literal pool can also be
391 // 16-bit or 32-bit instruction and, if the method is large, we may need to use a sequence
392 // of instructions to make up for the limited range of load literal instructions (up to
393 // 4KiB for the 32-bit variant). When generating code for these insns we don't know the
394 // size before hand, so we assume it is the smallest available size and determine the final
395 // code offsets and sizes and emit code in FinalizeCode().
396 //
397 // To handle this, we keep a record of every branch and literal pool load in the program.
398 // The actual instruction encoding for these is delayed until we know the final size of
399 // every instruction. When we bind a label to a branch we don't know the final location yet
400 // as some preceding instructions may need to be expanded, so we record a non-final offset.
401 // In FinalizeCode(), we expand the sizes of branches and literal loads that are out of
402 // range. With each expansion, we need to update dependent Fixups, i.e. insntructios with
403 // target on the other side of the expanded insn, as their offsets change and this may
404 // trigger further expansion.
405 //
406 // All Fixups have a 'fixup id' which is a 16 bit unsigned number used to identify the
407 // Fixup. For each unresolved label we keep a singly-linked list of all Fixups pointing
408 // to it, using the fixup ids as links. The first link is stored in the label's position
409 // (the label is linked but not bound), the following links are stored in the code buffer,
410 // in the placeholder where we will eventually emit the actual code.
411
412 class Fixup {
413 public:
414 // Branch type.
415 enum Type : uint8_t {
416 kConditional, // B<cond>.
417 kUnconditional, // B.
418 kUnconditionalLink, // BL.
419 kUnconditionalLinkX, // BLX.
420 kCompareAndBranchXZero, // cbz/cbnz.
421 kLoadLiteralNarrow, // Load narrrow integer literal.
422 kLoadLiteralWide, // Load wide integer literal.
Andreas Gampe7cffc3b2015-10-19 21:31:53 -0700423 kLoadLiteralAddr, // Load address of literal (used for jump table).
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000424 kLoadFPLiteralSingle, // Load FP literal single.
425 kLoadFPLiteralDouble, // Load FP literal double.
426 };
427
428 // Calculated size of branch instruction based on type and offset.
429 enum Size : uint8_t {
430 // Branch variants.
431 kBranch16Bit,
432 kBranch32Bit,
433 // NOTE: We don't support branches which would require multiple instructions, i.e.
434 // conditinoal branches beyond +-1MiB and unconditional branches beyond +-16MiB.
435
436 // CBZ/CBNZ variants.
437 kCbxz16Bit, // CBZ/CBNZ rX, label; X < 8; 7-bit positive offset.
438 kCbxz32Bit, // CMP rX, #0 + Bcc label; X < 8; 16-bit Bcc; +-8-bit offset.
439 kCbxz48Bit, // CMP rX, #0 + Bcc label; X < 8; 32-bit Bcc; up to +-1MiB offset.
440
441 // Load integer literal variants.
442 // LDR rX, label; X < 8; 16-bit variant up to 1KiB offset; 2 bytes.
443 kLiteral1KiB,
444 // LDR rX, label; 32-bit variant up to 4KiB offset; 4 bytes.
445 kLiteral4KiB,
446 // MOV rX, imm16 + ADD rX, pc + LDR rX, [rX]; X < 8; up to 64KiB offset; 8 bytes.
447 kLiteral64KiB,
448 // MOV rX, modimm + ADD rX, pc + LDR rX, [rX, #imm12]; up to 1MiB offset; 10 bytes.
449 kLiteral1MiB,
450 // NOTE: We don't provide the 12-byte version of kLiteralFar below where the LDR is 16-bit.
451 // MOV rX, imm16 + MOVT rX, imm16 + ADD rX, pc + LDR rX, [rX]; any offset; 14 bytes.
452 kLiteralFar,
453
Andreas Gampe7cffc3b2015-10-19 21:31:53 -0700454 // Load literal base addr.
455 // ADR rX, label; X < 8; 8 bit immediate, shifted to 10 bit. 2 bytes.
456 kLiteralAddr1KiB,
457 // ADR rX, label; 4KiB offset. 4 bytes.
458 kLiteralAddr4KiB,
459 // MOV rX, imm16 + ADD rX, pc; 64KiB offset. 6 bytes.
460 kLiteralAddr64KiB,
461 // MOV rX, imm16 + MOVT rX, imm16 + ADD rX, pc; any offset; 10 bytes.
462 kLiteralAddrFar,
463
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000464 // Load long or FP literal variants.
465 // VLDR s/dX, label; 32-bit insn, up to 1KiB offset; 4 bytes.
466 kLongOrFPLiteral1KiB,
Vladimir Markoebdbf4b2016-07-07 15:37:02 +0100467 // MOV ip, imm16 + ADD ip, pc + VLDR s/dX, [IP, #0]; up to 64KiB offset; 10 bytes.
468 kLongOrFPLiteral64KiB,
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000469 // MOV ip, imm16 + MOVT ip, imm16 + ADD ip, pc + VLDR s/dX, [IP]; any offset; 14 bytes.
470 kLongOrFPLiteralFar,
471 };
472
473 // Unresolved branch possibly with a condition.
474 static Fixup Branch(uint32_t location, Type type, Size size = kBranch16Bit,
475 Condition cond = AL) {
476 DCHECK(type == kConditional || type == kUnconditional ||
477 type == kUnconditionalLink || type == kUnconditionalLinkX);
478 DCHECK(size == kBranch16Bit || size == kBranch32Bit);
479 DCHECK(size == kBranch32Bit || (type == kConditional || type == kUnconditional));
480 return Fixup(kNoRegister, kNoRegister, kNoSRegister, kNoDRegister,
481 cond, type, size, location);
482 }
483
484 // Unresolved compare-and-branch instruction with a register and condition (EQ or NE).
485 static Fixup CompareAndBranch(uint32_t location, Register rn, Condition cond) {
486 DCHECK(cond == EQ || cond == NE);
487 return Fixup(rn, kNoRegister, kNoSRegister, kNoDRegister,
488 cond, kCompareAndBranchXZero, kCbxz16Bit, location);
489 }
490
491 // Load narrow literal.
Andreas Gampe7cffc3b2015-10-19 21:31:53 -0700492 static Fixup LoadNarrowLiteral(uint32_t location, Register rt, Size size) {
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000493 DCHECK(size == kLiteral1KiB || size == kLiteral4KiB || size == kLiteral64KiB ||
494 size == kLiteral1MiB || size == kLiteralFar);
495 DCHECK(!IsHighRegister(rt) || (size != kLiteral1KiB && size != kLiteral64KiB));
496 return Fixup(rt, kNoRegister, kNoSRegister, kNoDRegister,
497 AL, kLoadLiteralNarrow, size, location);
498 }
499
500 // Load wide literal.
501 static Fixup LoadWideLiteral(uint32_t location, Register rt, Register rt2,
502 Size size = kLongOrFPLiteral1KiB) {
Vladimir Markoebdbf4b2016-07-07 15:37:02 +0100503 DCHECK(size == kLongOrFPLiteral1KiB || size == kLongOrFPLiteral64KiB ||
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000504 size == kLongOrFPLiteralFar);
505 DCHECK(!IsHighRegister(rt) || (size != kLiteral1KiB && size != kLiteral64KiB));
506 return Fixup(rt, rt2, kNoSRegister, kNoDRegister,
507 AL, kLoadLiteralWide, size, location);
508 }
509
510 // Load FP single literal.
511 static Fixup LoadSingleLiteral(uint32_t location, SRegister sd,
512 Size size = kLongOrFPLiteral1KiB) {
Vladimir Markoebdbf4b2016-07-07 15:37:02 +0100513 DCHECK(size == kLongOrFPLiteral1KiB || size == kLongOrFPLiteral64KiB ||
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000514 size == kLongOrFPLiteralFar);
515 return Fixup(kNoRegister, kNoRegister, sd, kNoDRegister,
516 AL, kLoadFPLiteralSingle, size, location);
517 }
518
519 // Load FP double literal.
520 static Fixup LoadDoubleLiteral(uint32_t location, DRegister dd,
521 Size size = kLongOrFPLiteral1KiB) {
Vladimir Markoebdbf4b2016-07-07 15:37:02 +0100522 DCHECK(size == kLongOrFPLiteral1KiB || size == kLongOrFPLiteral64KiB ||
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000523 size == kLongOrFPLiteralFar);
524 return Fixup(kNoRegister, kNoRegister, kNoSRegister, dd,
525 AL, kLoadFPLiteralDouble, size, location);
526 }
527
Andreas Gampe7cffc3b2015-10-19 21:31:53 -0700528 static Fixup LoadLiteralAddress(uint32_t location, Register rt, Size size) {
529 DCHECK(size == kLiteralAddr1KiB || size == kLiteralAddr4KiB || size == kLiteralAddr64KiB ||
530 size == kLiteralAddrFar);
531 DCHECK(!IsHighRegister(rt) || size != kLiteralAddr1KiB);
532 return Fixup(rt, kNoRegister, kNoSRegister, kNoDRegister,
533 AL, kLoadLiteralAddr, size, location);
534 }
535
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000536 Type GetType() const {
537 return type_;
538 }
539
Vladimir Marko663c9342015-07-22 11:28:14 +0100540 bool IsLoadLiteral() const {
541 return GetType() >= kLoadLiteralNarrow;
542 }
543
Vladimir Markoa64f2492016-04-25 12:43:50 +0000544 // Returns whether the Fixup can expand from the original size.
545 bool CanExpand() const {
546 switch (GetOriginalSize()) {
547 case kBranch32Bit:
548 case kCbxz48Bit:
549 case kLiteralFar:
550 case kLiteralAddrFar:
551 case kLongOrFPLiteralFar:
552 return false;
553 default:
554 return true;
555 }
556 }
557
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000558 Size GetOriginalSize() const {
559 return original_size_;
560 }
561
562 Size GetSize() const {
563 return size_;
564 }
565
566 uint32_t GetOriginalSizeInBytes() const;
567
568 uint32_t GetSizeInBytes() const;
569
570 uint32_t GetLocation() const {
571 return location_;
572 }
573
574 uint32_t GetAdjustment() const {
575 return adjustment_;
576 }
577
Vladimir Marko6b756b52015-07-14 11:58:38 +0100578 // Prepare the assembler->fixup_dependents_ and each Fixup's dependents_start_/count_.
579 static void PrepareDependents(Thumb2Assembler* assembler);
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000580
Vladimir Marko93205e32016-04-13 11:59:46 +0100581 ArrayRef<const FixupId> Dependents(const Thumb2Assembler& assembler) const {
582 return ArrayRef<const FixupId>(assembler.fixup_dependents_).SubArray(dependents_start_,
583 dependents_count_);
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000584 }
585
586 // Resolve a branch when the target is known.
587 void Resolve(uint32_t target) {
588 DCHECK_EQ(target_, kUnresolved);
589 DCHECK_NE(target, kUnresolved);
590 target_ = target;
591 }
592
593 // Check if the current size is OK for current location_, target_ and adjustment_.
594 // If not, increase the size. Return the size increase, 0 if unchanged.
595 // If the target if after this Fixup, also add the difference to adjustment_,
596 // so that we don't need to consider forward Fixups as their own dependencies.
597 uint32_t AdjustSizeIfNeeded(uint32_t current_code_size);
598
599 // Increase adjustments. This is called for dependents of a Fixup when its size changes.
600 void IncreaseAdjustment(uint32_t increase) {
601 adjustment_ += increase;
602 }
603
604 // Finalize the branch with an adjustment to the location. Both location and target are updated.
605 void Finalize(uint32_t location_adjustment) {
606 DCHECK_NE(target_, kUnresolved);
607 location_ += location_adjustment;
608 target_ += location_adjustment;
609 }
610
611 // Emit the branch instruction into the assembler buffer. This does the
612 // encoding into the thumb instruction.
613 void Emit(AssemblerBuffer* buffer, uint32_t code_size) const;
614
615 private:
616 Fixup(Register rn, Register rt2, SRegister sd, DRegister dd,
617 Condition cond, Type type, Size size, uint32_t location)
618 : rn_(rn),
619 rt2_(rt2),
620 sd_(sd),
621 dd_(dd),
622 cond_(cond),
623 type_(type),
624 original_size_(size), size_(size),
625 location_(location),
626 target_(kUnresolved),
627 adjustment_(0u),
Vladimir Marko6b756b52015-07-14 11:58:38 +0100628 dependents_count_(0u),
629 dependents_start_(0u) {
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000630 }
Vladimir Markoa64f2492016-04-25 12:43:50 +0000631
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000632 static size_t SizeInBytes(Size size);
633
634 // The size of padding added before the literal pool.
635 static size_t LiteralPoolPaddingSize(uint32_t current_code_size);
636
637 // Returns the offset from the PC-using insn to the target.
638 int32_t GetOffset(uint32_t current_code_size) const;
639
640 size_t IncreaseSize(Size new_size);
641
642 int32_t LoadWideOrFpEncoding(Register rbase, int32_t offset) const;
643
Vladimir Markoa64f2492016-04-25 12:43:50 +0000644 template <typename Function>
645 static void ForExpandableDependencies(Thumb2Assembler* assembler, Function fn);
646
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000647 static constexpr uint32_t kUnresolved = 0xffffffff; // Value for target_ for unresolved.
648
649 const Register rn_; // Rn for cbnz/cbz, Rt for literal loads.
650 Register rt2_; // For kLoadLiteralWide.
651 SRegister sd_; // For kLoadFPLiteralSingle.
652 DRegister dd_; // For kLoadFPLiteralDouble.
653 const Condition cond_;
654 const Type type_;
655 Size original_size_;
656 Size size_;
657 uint32_t location_; // Offset into assembler buffer in bytes.
658 uint32_t target_; // Offset into assembler buffer in bytes.
659 uint32_t adjustment_; // The number of extra bytes inserted between location_ and target_.
Vladimir Marko6b756b52015-07-14 11:58:38 +0100660 // Fixups that require adjustment when current size changes are stored in a single
661 // array in the assembler and we store only the start index and count here.
662 uint32_t dependents_count_;
663 uint32_t dependents_start_;
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000664 };
665
Dave Allison65fcc2c2014-04-28 13:45:27 -0700666 // Emit a single 32 or 16 bit data processing instruction.
667 void EmitDataProcessing(Condition cond,
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700668 Opcode opcode,
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100669 SetCc set_cc,
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700670 Register rn,
671 Register rd,
672 const ShifterOperand& so);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700673
Artem Serovc257da72016-02-02 13:49:43 +0000674 // Emit a single 32 bit miscellaneous instruction.
675 void Emit32Miscellaneous(uint8_t op1,
676 uint8_t op2,
677 uint32_t rest_encoding);
678
679 // Emit reverse byte instructions: rev, rev16, revsh.
680 void EmitReverseBytes(Register rd, Register rm, uint32_t op);
681
682 // Emit a single 16 bit miscellaneous instruction.
683 void Emit16Miscellaneous(uint32_t rest_encoding);
684
Dave Allison65fcc2c2014-04-28 13:45:27 -0700685 // Must the instruction be 32 bits or can it possibly be encoded
686 // in 16 bits?
687 bool Is32BitDataProcessing(Condition cond,
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700688 Opcode opcode,
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100689 SetCc set_cc,
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700690 Register rn,
691 Register rd,
692 const ShifterOperand& so);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700693
694 // Emit a 32 bit data processing instruction.
695 void Emit32BitDataProcessing(Condition cond,
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700696 Opcode opcode,
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100697 SetCc set_cc,
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700698 Register rn,
699 Register rd,
700 const ShifterOperand& so);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700701
702 // Emit a 16 bit data processing instruction.
703 void Emit16BitDataProcessing(Condition cond,
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700704 Opcode opcode,
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100705 SetCc set_cc,
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700706 Register rn,
707 Register rd,
708 const ShifterOperand& so);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700709
710 void Emit16BitAddSub(Condition cond,
711 Opcode opcode,
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100712 SetCc set_cc,
Dave Allison65fcc2c2014-04-28 13:45:27 -0700713 Register rn,
714 Register rd,
715 const ShifterOperand& so);
716
717 uint16_t EmitCompareAndBranch(Register rn, uint16_t prev, bool n);
718
719 void EmitLoadStore(Condition cond,
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700720 bool load,
721 bool byte,
722 bool half,
723 bool is_signed,
724 Register rd,
725 const Address& ad);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700726
727 void EmitMemOpAddressMode3(Condition cond,
728 int32_t mode,
729 Register rd,
730 const Address& ad);
731
732 void EmitMultiMemOp(Condition cond,
733 BlockAddressMode am,
734 bool load,
735 Register base,
736 RegList regs);
737
738 void EmitMulOp(Condition cond,
739 int32_t opcode,
740 Register rd,
741 Register rn,
742 Register rm,
743 Register rs);
744
745 void EmitVFPsss(Condition cond,
746 int32_t opcode,
747 SRegister sd,
748 SRegister sn,
749 SRegister sm);
750
751 void EmitVFPddd(Condition cond,
752 int32_t opcode,
753 DRegister dd,
754 DRegister dn,
755 DRegister dm);
756
757 void EmitVFPsd(Condition cond,
758 int32_t opcode,
759 SRegister sd,
760 DRegister dm);
761
762 void EmitVFPds(Condition cond,
763 int32_t opcode,
764 DRegister dd,
765 SRegister sm);
766
767 void EmitVPushPop(uint32_t reg, int nregs, bool push, bool dbl, Condition cond);
768
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000769 void EmitBranch(Condition cond, Label* label, bool link, bool x);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700770 static int32_t EncodeBranchOffset(int32_t offset, int32_t inst);
771 static int DecodeBranchOffset(int32_t inst);
Vladimir Marko73cf0fb2015-07-30 15:07:22 +0100772 void EmitShift(Register rd, Register rm, Shift shift, uint8_t amount,
773 Condition cond = AL, SetCc set_cc = kCcDontCare);
774 void EmitShift(Register rd, Register rn, Shift shift, Register rm,
775 Condition cond = AL, SetCc set_cc = kCcDontCare);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700776
Vladimir Marko6fd0ffe2015-11-19 21:13:52 +0000777 static int32_t GetAllowedLoadOffsetBits(LoadOperandType type);
778 static int32_t GetAllowedStoreOffsetBits(StoreOperandType type);
779 bool CanSplitLoadStoreOffset(int32_t allowed_offset_bits,
780 int32_t offset,
781 /*out*/ int32_t* add_to_base,
782 /*out*/ int32_t* offset_for_load_store);
783 int32_t AdjustLoadStoreOffset(int32_t allowed_offset_bits,
784 Register temp,
785 Register base,
786 int32_t offset,
787 Condition cond);
788
Nicolas Geoffrayd126ba12015-05-20 11:25:27 +0100789 // Whether the assembler can relocate branches. If false, unresolved branches will be
790 // emitted on 32bits.
791 bool can_relocate_branches_;
792
793 // Force the assembler to use 32 bit thumb2 instructions.
794 bool force_32bit_;
Dave Allison65fcc2c2014-04-28 13:45:27 -0700795
796 // IfThen conditions. Used to check that conditional instructions match the preceding IT.
797 Condition it_conditions_[4];
798 uint8_t it_cond_index_;
799 Condition next_condition_;
800
801 void SetItCondition(ItState s, Condition cond, uint8_t index);
802
803 void CheckCondition(Condition cond) {
804 CHECK_EQ(cond, next_condition_);
805
806 // Move to the next condition if there is one.
807 if (it_cond_index_ < 3) {
808 ++it_cond_index_;
809 next_condition_ = it_conditions_[it_cond_index_];
810 } else {
811 next_condition_ = AL;
812 }
813 }
814
815 void CheckConditionLastIt(Condition cond) {
816 if (it_cond_index_ < 3) {
817 // Check that the next condition is AL. This means that the
818 // current condition is the last in the IT block.
819 CHECK_EQ(it_conditions_[it_cond_index_ + 1], AL);
820 }
821 CheckCondition(cond);
822 }
823
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000824 FixupId AddFixup(Fixup fixup) {
825 FixupId fixup_id = static_cast<FixupId>(fixups_.size());
826 fixups_.push_back(fixup);
827 // For iterating using FixupId, we need the next id to be representable.
828 DCHECK_EQ(static_cast<size_t>(static_cast<FixupId>(fixups_.size())), fixups_.size());
829 return fixup_id;
Dave Allison65fcc2c2014-04-28 13:45:27 -0700830 }
831
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000832 Fixup* GetFixup(FixupId fixup_id) {
833 DCHECK_LT(fixup_id, fixups_.size());
834 return &fixups_[fixup_id];
Dave Allison65fcc2c2014-04-28 13:45:27 -0700835 }
836
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000837 void BindLabel(Label* label, uint32_t bound_pc);
Andreas Gampe7cffc3b2015-10-19 21:31:53 -0700838 uint32_t BindLiterals();
839 void BindJumpTables(uint32_t code_size);
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000840 void AdjustFixupIfNeeded(Fixup* fixup, uint32_t* current_code_size,
841 std::deque<FixupId>* fixups_to_recalculate);
842 uint32_t AdjustFixups();
843 void EmitFixups(uint32_t adjusted_code_size);
844 void EmitLiterals();
Andreas Gampe7cffc3b2015-10-19 21:31:53 -0700845 void EmitJumpTables();
Vladimir Marko10ef6942015-10-22 15:25:54 +0100846 void PatchCFI();
Dave Allison65fcc2c2014-04-28 13:45:27 -0700847
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000848 static int16_t BEncoding16(int32_t offset, Condition cond);
849 static int32_t BEncoding32(int32_t offset, Condition cond);
850 static int16_t CbxzEncoding16(Register rn, int32_t offset, Condition cond);
851 static int16_t CmpRnImm8Encoding16(Register rn, int32_t value);
852 static int16_t AddRdnRmEncoding16(Register rdn, Register rm);
853 static int32_t MovwEncoding32(Register rd, int32_t value);
854 static int32_t MovtEncoding32(Register rd, int32_t value);
855 static int32_t MovModImmEncoding32(Register rd, int32_t value);
856 static int16_t LdrLitEncoding16(Register rt, int32_t offset);
857 static int32_t LdrLitEncoding32(Register rt, int32_t offset);
858 static int32_t LdrdEncoding32(Register rt, Register rt2, Register rn, int32_t offset);
859 static int32_t VldrsEncoding32(SRegister sd, Register rn, int32_t offset);
860 static int32_t VldrdEncoding32(DRegister dd, Register rn, int32_t offset);
861 static int16_t LdrRtRnImm5Encoding16(Register rt, Register rn, int32_t offset);
862 static int32_t LdrRtRnImm12Encoding(Register rt, Register rn, int32_t offset);
Andreas Gampe7cffc3b2015-10-19 21:31:53 -0700863 static int16_t AdrEncoding16(Register rd, int32_t offset);
864 static int32_t AdrEncoding32(Register rd, int32_t offset);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700865
Vladimir Marko93205e32016-04-13 11:59:46 +0100866 ArenaVector<Fixup> fixups_;
867 ArenaVector<FixupId> fixup_dependents_;
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000868
869 // Use std::deque<> for literal labels to allow insertions at the end
870 // without invalidating pointers and references to existing elements.
Vladimir Marko93205e32016-04-13 11:59:46 +0100871 ArenaDeque<Literal> literals_;
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000872
Vladimir Markoebdbf4b2016-07-07 15:37:02 +0100873 // Deduplication map for 64-bit literals, used for LoadDImmediate().
874 ArenaSafeMap<uint64_t, Literal*> literal64_dedupe_map_;
875
Andreas Gampe7cffc3b2015-10-19 21:31:53 -0700876 // Jump table list.
Vladimir Marko93205e32016-04-13 11:59:46 +0100877 ArenaDeque<JumpTable> jump_tables_;
Andreas Gampe7cffc3b2015-10-19 21:31:53 -0700878
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000879 // Data for AdjustedPosition(), see the description there.
880 uint32_t last_position_adjustment_;
881 uint32_t last_old_position_;
882 FixupId last_fixup_id_;
Dave Allison65fcc2c2014-04-28 13:45:27 -0700883};
884
885} // namespace arm
886} // namespace art
887
888#endif // ART_COMPILER_UTILS_ARM_ASSEMBLER_THUMB2_H_