blob: 70df252114d6d604b28bebe7b31cb7ef78075977 [file] [log] [blame]
Serban Constantinescued8dd492014-02-11 14:15:10 +00001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13* See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_COMPILER_UTILS_ARM64_ASSEMBLER_ARM64_H_
18#define ART_COMPILER_UTILS_ARM64_ASSEMBLER_ARM64_H_
19
20#include <vector>
21
22#include "base/logging.h"
23#include "constants_arm64.h"
24#include "utils/arm64/managed_register_arm64.h"
25#include "utils/assembler.h"
26#include "offsets.h"
27#include "utils.h"
28#include "UniquePtr.h"
29#include "a64/macro-assembler-a64.h"
30#include "a64/disasm-a64.h"
31
32namespace art {
33namespace arm64 {
34
35#define MEM_OP(x...) vixl::MemOperand(x)
36#define COND_OP(x) static_cast<vixl::Condition>(x)
37
38enum Condition {
39 kNoCondition = -1,
40 EQ = 0,
41 NE = 1,
42 HS = 2,
43 LO = 3,
44 MI = 4,
45 PL = 5,
46 VS = 6,
47 VC = 7,
48 HI = 8,
49 LS = 9,
50 GE = 10,
51 LT = 11,
52 GT = 12,
53 LE = 13,
54 AL = 14, // Always.
55 NV = 15, // Behaves as always/al.
56 kMaxCondition = 16,
57};
58
59enum LoadOperandType {
60 kLoadSignedByte,
61 kLoadUnsignedByte,
62 kLoadSignedHalfword,
63 kLoadUnsignedHalfword,
64 kLoadWord,
65 kLoadCoreWord,
66 kLoadSWord,
67 kLoadDWord
68};
69
70enum StoreOperandType {
71 kStoreByte,
72 kStoreHalfword,
73 kStoreWord,
74 kStoreCoreWord,
75 kStoreSWord,
76 kStoreDWord
77};
78
79class Arm64Exception;
80
81class Arm64Assembler : public Assembler {
82 public:
83 Arm64Assembler() : vixl_buf_(new byte[BUF_SIZE]),
84 vixl_masm_(new vixl::MacroAssembler(vixl_buf_, BUF_SIZE)) {}
85
86 virtual ~Arm64Assembler() {
87 if (kIsDebugBuild) {
88 vixl::Decoder *decoder = new vixl::Decoder();
89 vixl::PrintDisassembler *test = new vixl::PrintDisassembler(stdout);
90 decoder->AppendVisitor(test);
91
92 for (size_t i = 0; i < CodeSize() / vixl::kInstructionSize; ++i) {
93 vixl::Instruction *instr =
94 reinterpret_cast<vixl::Instruction*>(vixl_buf_ + i * vixl::kInstructionSize);
95 decoder->Decode(instr);
96 }
97 }
98 delete[] vixl_buf_;
99 }
100
101 // Emit slow paths queued during assembly.
102 void EmitSlowPaths();
103
104 // Size of generated code.
105 size_t CodeSize() const;
106
107 // Copy instructions out of assembly buffer into the given region of memory.
108 void FinalizeInstructions(const MemoryRegion& region);
109
110 // Emit code that will create an activation on the stack.
111 void BuildFrame(size_t frame_size, ManagedRegister method_reg,
112 const std::vector<ManagedRegister>& callee_save_regs,
113 const std::vector<ManagedRegister>& entry_spills);
114
115 // Emit code that will remove an activation from the stack.
116 void RemoveFrame(size_t frame_size,
117 const std::vector<ManagedRegister>& callee_save_regs);
118
119 void IncreaseFrameSize(size_t adjust);
120 void DecreaseFrameSize(size_t adjust);
121
122 // Store routines.
123 void Store(FrameOffset offs, ManagedRegister src, size_t size);
124 void StoreRef(FrameOffset dest, ManagedRegister src);
125 void StoreRawPtr(FrameOffset dest, ManagedRegister src);
126 void StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
127 ManagedRegister scratch);
128 void StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
129 ManagedRegister scratch);
130 void StoreStackOffsetToThread(ThreadOffset thr_offs,
131 FrameOffset fr_offs,
132 ManagedRegister scratch);
133 void StoreStackPointerToThread(ThreadOffset thr_offs);
134 void StoreSpanning(FrameOffset dest, ManagedRegister src,
135 FrameOffset in_off, ManagedRegister scratch);
136
137 // Load routines.
138 void Load(ManagedRegister dest, FrameOffset src, size_t size);
139 void Load(ManagedRegister dest, ThreadOffset src, size_t size);
140 void LoadRef(ManagedRegister dest, FrameOffset src);
141 void LoadRef(ManagedRegister dest, ManagedRegister base,
142 MemberOffset offs);
143 void LoadRawPtr(ManagedRegister dest, ManagedRegister base,
144 Offset offs);
145 void LoadRawPtrFromThread(ManagedRegister dest,
146 ThreadOffset offs);
147 // Copying routines.
148 void Move(ManagedRegister dest, ManagedRegister src, size_t size);
149 void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs,
150 ManagedRegister scratch);
151 void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs,
152 ManagedRegister scratch);
153 void CopyRef(FrameOffset dest, FrameOffset src,
154 ManagedRegister scratch);
155 void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size);
156 void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
157 ManagedRegister scratch, size_t size);
158 void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
159 ManagedRegister scratch, size_t size);
160 void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
161 ManagedRegister scratch, size_t size);
162 void Copy(ManagedRegister dest, Offset dest_offset,
163 ManagedRegister src, Offset src_offset,
164 ManagedRegister scratch, size_t size);
165 void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
166 ManagedRegister scratch, size_t size);
167 void MemoryBarrier(ManagedRegister scratch);
168
169 // Sign extension.
170 void SignExtend(ManagedRegister mreg, size_t size);
171
172 // Zero extension.
173 void ZeroExtend(ManagedRegister mreg, size_t size);
174
175 // Exploit fast access in managed code to Thread::Current().
176 void GetCurrentThread(ManagedRegister tr);
177 void GetCurrentThread(FrameOffset dest_offset,
178 ManagedRegister scratch);
179
180 // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
181 // value is null and null_allowed. in_reg holds a possibly stale reference
182 // that can be used to avoid loading the SIRT entry to see if the value is
183 // NULL.
184 void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
185 ManagedRegister in_reg, bool null_allowed);
186
187 // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
188 // value is null and null_allowed.
189 void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
190 ManagedRegister scratch, bool null_allowed);
191
192 // src holds a SIRT entry (Object**) load this into dst.
193 void LoadReferenceFromSirt(ManagedRegister dst,
194 ManagedRegister src);
195
196 // Heap::VerifyObject on src. In some cases (such as a reference to this) we
197 // know that src may not be null.
198 void VerifyObject(ManagedRegister src, bool could_be_null);
199 void VerifyObject(FrameOffset src, bool could_be_null);
200
201 // Call to address held at [base+offset].
202 void Call(ManagedRegister base, Offset offset, ManagedRegister scratch);
203 void Call(FrameOffset base, Offset offset, ManagedRegister scratch);
204 void Call(ThreadOffset offset, ManagedRegister scratch);
205
206 // Generate code to check if Thread::Current()->exception_ is non-null
207 // and branch to a ExceptionSlowPath if it is.
208 void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust);
209
210 private:
211 static vixl::Register reg_x(int code) {
212 CHECK(code < kNumberOfCoreRegisters) << code;
213 if (code == SP) {
214 return vixl::sp;
215 }
216 return vixl::Register::XRegFromCode(code);
217 }
218
219 static vixl::Register reg_w(int code) {
220 return vixl::Register::WRegFromCode(code);
221 }
222
223 static vixl::FPRegister reg_d(int code) {
224 return vixl::FPRegister::DRegFromCode(code);
225 }
226
227 static vixl::FPRegister reg_s(int code) {
228 return vixl::FPRegister::SRegFromCode(code);
229 }
230
231 // Emits Exception block.
232 void EmitExceptionPoll(Arm64Exception *exception);
233
234 void StoreWToOffset(StoreOperandType type, WRegister source,
235 Register base, int32_t offset);
236 void StoreToOffset(Register source, Register base, int32_t offset);
237 void StoreSToOffset(SRegister source, Register base, int32_t offset);
238 void StoreDToOffset(DRegister source, Register base, int32_t offset);
239
240 void LoadImmediate(Register dest, int32_t value, Condition cond = AL);
241 void Load(Arm64ManagedRegister dst, Register src, int32_t src_offset, size_t size);
242 void LoadWFromOffset(LoadOperandType type, WRegister dest,
243 Register base, int32_t offset);
244 void LoadFromOffset(Register dest, Register base, int32_t offset);
245 void LoadSFromOffset(SRegister dest, Register base, int32_t offset);
246 void LoadDFromOffset(DRegister dest, Register base, int32_t offset);
247 void AddConstant(Register rd, int32_t value, Condition cond = AL);
248 void AddConstant(Register rd, Register rn, int32_t value, Condition cond = AL);
249
250 // Vixl buffer size.
251 static constexpr size_t BUF_SIZE = 4096;
252
253 // Vixl buffer.
254 byte* vixl_buf_;
255
256 // Unique ptr - vixl assembler.
257 UniquePtr<vixl::MacroAssembler> vixl_masm_;
258
259 // List of exception blocks to generate at the end of the code cache.
260 std::vector<Arm64Exception*> exception_blocks_;
261};
262
263class Arm64Exception {
264 private:
265 explicit Arm64Exception(Arm64ManagedRegister scratch, size_t stack_adjust)
266 : scratch_(scratch), stack_adjust_(stack_adjust) {
267 }
268
269 vixl::Label* Entry() { return &exception_entry_; }
270
271 // Register used for passing Thread::Current()->exception_ .
272 const Arm64ManagedRegister scratch_;
273
274 // Stack adjust for ExceptionPool.
275 const size_t stack_adjust_;
276
277 vixl::Label exception_entry_;
278
279 friend class Arm64Assembler;
280 DISALLOW_COPY_AND_ASSIGN(Arm64Exception);
281};
282
283} // namespace arm64
284} // namespace art
285
286#endif // ART_COMPILER_UTILS_ARM64_ASSEMBLER_ARM64_H_