blob: 322f6c4d7056d6ec50a8e839af4489393c8e6a9f [file] [log] [blame]
Artem Serov12e097c2016-08-08 15:13:26 +01001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_VIXL_H_
18#define ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_VIXL_H_
19
20#include "base/arena_containers.h"
21#include "base/logging.h"
22#include "constants_arm.h"
23#include "offsets.h"
24#include "utils/arm/assembler_arm_shared.h"
25#include "utils/arm/managed_register_arm.h"
26#include "utils/assembler.h"
27#include "utils/jni_macro_assembler.h"
28
29// TODO(VIXL): Make VIXL compile with -Wshadow and remove pragmas.
30#pragma GCC diagnostic push
31#pragma GCC diagnostic ignored "-Wshadow"
32#include "aarch32/macro-assembler-aarch32.h"
33#pragma GCC diagnostic pop
34
35namespace vixl32 = vixl::aarch32;
36
37namespace art {
38namespace arm {
39
xueliang.zhongf51bc622016-11-04 09:23:32 +000040class ArmVIXLMacroAssembler FINAL : public vixl32::MacroAssembler {
41 public:
Scott Wakelingf8d19c22016-12-20 09:43:32 +000042 // Most methods fit in a 1KB code buffer, which results in more optimal alloc/realloc and
43 // fewer system calls than a larger default capacity.
44 static constexpr size_t kDefaultCodeBufferCapacity = 1 * KB;
45
46 ArmVIXLMacroAssembler()
47 : vixl32::MacroAssembler(ArmVIXLMacroAssembler::kDefaultCodeBufferCapacity) {}
48
xueliang.zhongf51bc622016-11-04 09:23:32 +000049 // The following interfaces can generate CMP+Bcc or Cbz/Cbnz.
50 // CMP+Bcc are generated by default.
51 // If a hint is given (is_far_target = false) and rn and label can all fit into Cbz/Cbnz,
52 // then Cbz/Cbnz is generated.
53 // Prefer following interfaces to using vixl32::MacroAssembler::Cbz/Cbnz.
54 // In T32, Cbz/Cbnz instructions have following limitations:
55 // - Far targets, which are over 126 bytes away, are not supported.
56 // - Only low registers can be encoded.
57 // - Backward branches are not supported.
58 void CompareAndBranchIfZero(vixl32::Register rn,
59 vixl32::Label* label,
60 bool is_far_target = true);
61 void CompareAndBranchIfNonZero(vixl32::Register rn,
62 vixl32::Label* label,
63 bool is_far_target = true);
Scott Wakelingbffdc702016-12-07 17:46:03 +000064
65 // In T32 some of the instructions (add, mov, etc) outside an IT block
66 // have only 32-bit encodings. But there are 16-bit flag setting
67 // versions of these instructions (adds, movs, etc). In most of the
68 // cases in ART we don't care if the instructions keep flags or not;
69 // thus we can benefit from smaller code size.
70 // VIXL will never generate flag setting versions (for example, adds
71 // for Add macro instruction) unless vixl32::DontCare option is
72 // explicitly specified. That's why we introduce wrappers to use
73 // DontCare option by default.
74#define WITH_FLAGS_DONT_CARE_RD_RN_OP(func_name) \
75 void (func_name)(vixl32::Register rd, vixl32::Register rn, const vixl32::Operand& operand) { \
76 MacroAssembler::func_name(vixl32::DontCare, rd, rn, operand); \
77 } \
78 using MacroAssembler::func_name
79
80 WITH_FLAGS_DONT_CARE_RD_RN_OP(Adc);
81 WITH_FLAGS_DONT_CARE_RD_RN_OP(Sub);
82 WITH_FLAGS_DONT_CARE_RD_RN_OP(Sbc);
83 WITH_FLAGS_DONT_CARE_RD_RN_OP(Rsb);
84 WITH_FLAGS_DONT_CARE_RD_RN_OP(Rsc);
85
86 WITH_FLAGS_DONT_CARE_RD_RN_OP(Eor);
87 WITH_FLAGS_DONT_CARE_RD_RN_OP(Orr);
88 WITH_FLAGS_DONT_CARE_RD_RN_OP(Orn);
89 WITH_FLAGS_DONT_CARE_RD_RN_OP(And);
90 WITH_FLAGS_DONT_CARE_RD_RN_OP(Bic);
91
92 WITH_FLAGS_DONT_CARE_RD_RN_OP(Asr);
93 WITH_FLAGS_DONT_CARE_RD_RN_OP(Lsr);
94 WITH_FLAGS_DONT_CARE_RD_RN_OP(Lsl);
95 WITH_FLAGS_DONT_CARE_RD_RN_OP(Ror);
96
97#undef WITH_FLAGS_DONT_CARE_RD_RN_OP
98
99#define WITH_FLAGS_DONT_CARE_RD_OP(func_name) \
100 void (func_name)(vixl32::Register rd, const vixl32::Operand& operand) { \
101 MacroAssembler::func_name(vixl32::DontCare, rd, operand); \
102 } \
103 using MacroAssembler::func_name
104
105 WITH_FLAGS_DONT_CARE_RD_OP(Mvn);
106 WITH_FLAGS_DONT_CARE_RD_OP(Mov);
107
108#undef WITH_FLAGS_DONT_CARE_RD_OP
109
110 // The following two functions don't fall into above categories. Overload them separately.
111 void Rrx(vixl32::Register rd, vixl32::Register rn) {
112 MacroAssembler::Rrx(vixl32::DontCare, rd, rn);
113 }
114 using MacroAssembler::Rrx;
115
116 void Mul(vixl32::Register rd, vixl32::Register rn, vixl32::Register rm) {
117 MacroAssembler::Mul(vixl32::DontCare, rd, rn, rm);
118 }
119 using MacroAssembler::Mul;
120
121 // TODO: Remove when MacroAssembler::Add(FlagsUpdate, Condition, Register, Register, Operand)
122 // makes the right decision about 16-bit encodings.
123 void Add(vixl32::Register rd, vixl32::Register rn, const vixl32::Operand& operand) {
Artem Serov517d9f62016-12-12 15:51:15 +0000124 if (rd.Is(rn) && operand.IsPlainRegister()) {
Scott Wakelingbffdc702016-12-07 17:46:03 +0000125 MacroAssembler::Add(rd, rn, operand);
126 } else {
127 MacroAssembler::Add(vixl32::DontCare, rd, rn, operand);
128 }
129 }
130 using MacroAssembler::Add;
131
132 // These interfaces try to use 16-bit T2 encoding of B instruction.
133 void B(vixl32::Label* label);
Artem Serov517d9f62016-12-12 15:51:15 +0000134 // For B(label), we always try to use Narrow encoding, because 16-bit T2 encoding supports
135 // jumping within 2KB range. For B(cond, label), because the supported branch range is 256
136 // bytes; we use the far_target hint to try to use 16-bit T1 encoding for short range jumps.
137 void B(vixl32::Condition cond, vixl32::Label* label, bool is_far_target = true);
xueliang.zhongf51bc622016-11-04 09:23:32 +0000138};
139
Artem Serov12e097c2016-08-08 15:13:26 +0100140class ArmVIXLAssembler FINAL : public Assembler {
141 private:
142 class ArmException;
143 public:
144 explicit ArmVIXLAssembler(ArenaAllocator* arena)
145 : Assembler(arena) {
146 // Use Thumb2 instruction set.
147 vixl_masm_.UseT32();
148 }
149
150 virtual ~ArmVIXLAssembler() {}
xueliang.zhongf51bc622016-11-04 09:23:32 +0000151 ArmVIXLMacroAssembler* GetVIXLAssembler() { return &vixl_masm_; }
Artem Serov12e097c2016-08-08 15:13:26 +0100152 void FinalizeCode() OVERRIDE;
153
154 // Size of generated code.
155 size_t CodeSize() const OVERRIDE;
156 const uint8_t* CodeBufferBaseAddress() const OVERRIDE;
157
158 // Copy instructions out of assembly buffer into the given region of memory.
159 void FinalizeInstructions(const MemoryRegion& region) OVERRIDE;
160
161 void Bind(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
162 UNIMPLEMENTED(FATAL) << "Do not use Bind for ARM";
163 }
164 void Jump(Label* label ATTRIBUTE_UNUSED) OVERRIDE {
165 UNIMPLEMENTED(FATAL) << "Do not use Jump for ARM";
166 }
167
168 //
169 // Heap poisoning.
170 //
171 // Poison a heap reference contained in `reg`.
172 void PoisonHeapReference(vixl32::Register reg);
173 // Unpoison a heap reference contained in `reg`.
174 void UnpoisonHeapReference(vixl32::Register reg);
Anton Kirilove28d9ae2016-10-25 18:17:23 +0100175 // Poison a heap reference contained in `reg` if heap poisoning is enabled.
176 void MaybePoisonHeapReference(vixl32::Register reg);
Artem Serov12e097c2016-08-08 15:13:26 +0100177 // Unpoison a heap reference contained in `reg` if heap poisoning is enabled.
178 void MaybeUnpoisonHeapReference(vixl32::Register reg);
179
180 void StoreToOffset(StoreOperandType type,
181 vixl32::Register reg,
182 vixl32::Register base,
183 int32_t offset);
184 void StoreSToOffset(vixl32::SRegister source, vixl32::Register base, int32_t offset);
185 void StoreDToOffset(vixl32::DRegister source, vixl32::Register base, int32_t offset);
186
187 void LoadImmediate(vixl32::Register dest, int32_t value);
188 void LoadFromOffset(LoadOperandType type,
189 vixl32::Register reg,
190 vixl32::Register base,
191 int32_t offset);
192 void LoadSFromOffset(vixl32::SRegister reg, vixl32::Register base, int32_t offset);
193 void LoadDFromOffset(vixl32::DRegister reg, vixl32::Register base, int32_t offset);
194
Scott Wakelinga7812ae2016-10-17 10:03:36 +0100195 void LoadRegisterList(RegList regs, size_t stack_offset);
196 void StoreRegisterList(RegList regs, size_t stack_offset);
197
Artem Serov12e097c2016-08-08 15:13:26 +0100198 bool ShifterOperandCanAlwaysHold(uint32_t immediate);
Artem Serov02109dd2016-09-23 17:17:54 +0100199 bool ShifterOperandCanHold(Opcode opcode, uint32_t immediate, SetCc set_cc = kCcDontCare);
Artem Serov12e097c2016-08-08 15:13:26 +0100200 bool CanSplitLoadStoreOffset(int32_t allowed_offset_bits,
201 int32_t offset,
202 /*out*/ int32_t* add_to_base,
203 /*out*/ int32_t* offset_for_load_store);
204 int32_t AdjustLoadStoreOffset(int32_t allowed_offset_bits,
205 vixl32::Register temp,
206 vixl32::Register base,
207 int32_t offset);
208 int32_t GetAllowedLoadOffsetBits(LoadOperandType type);
209 int32_t GetAllowedStoreOffsetBits(StoreOperandType type);
210
211 void AddConstant(vixl32::Register rd, int32_t value);
212 void AddConstant(vixl32::Register rd, vixl32::Register rn, int32_t value);
213 void AddConstantInIt(vixl32::Register rd,
214 vixl32::Register rn,
215 int32_t value,
216 vixl32::Condition cond = vixl32::al);
217
Artem Serovc5fcb442016-12-02 19:19:58 +0000218 template <typename T>
219 vixl::aarch32::Literal<T>* CreateLiteralDestroyedWithPool(T value) {
220 vixl::aarch32::Literal<T>* literal =
221 new vixl::aarch32::Literal<T>(value,
222 vixl32::RawLiteral::kPlacedWhenUsed,
223 vixl32::RawLiteral::kDeletedOnPoolDestruction);
224 return literal;
225 }
226
Artem Serov12e097c2016-08-08 15:13:26 +0100227 private:
228 // VIXL assembler.
xueliang.zhongf51bc622016-11-04 09:23:32 +0000229 ArmVIXLMacroAssembler vixl_masm_;
Artem Serov12e097c2016-08-08 15:13:26 +0100230};
231
232// Thread register declaration.
233extern const vixl32::Register tr;
234
235} // namespace arm
236} // namespace art
237
238#endif // ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_VIXL_H_