blob: 89f7947cd5233aa3a69e3f5984aafa03b6bfd130 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -070016
Ian Rogers166db042013-07-26 12:05:57 -070017#ifndef ART_COMPILER_UTILS_ASSEMBLER_H_
18#define ART_COMPILER_UTILS_ASSEMBLER_H_
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -070019
Ian Rogers2c8f6532011-09-02 17:16:34 -070020#include <vector>
21
Ian Rogersd582fa42014-11-05 23:46:43 -080022#include "arch/instruction_set.h"
Goran Jakovljevic8c434dc2015-08-26 14:39:44 +020023#include "arch/instruction_set_features.h"
David Srbecky4fda4eb2016-02-05 13:34:46 +000024#include "arm/constants_arm.h"
Vladimir Marko93205e32016-04-13 11:59:46 +010025#include "base/arena_allocator.h"
26#include "base/arena_object.h"
Andreas Gampe542451c2016-07-26 09:02:02 -070027#include "base/enums.h"
Elliott Hughes07ed66b2012-12-12 18:34:25 -080028#include "base/logging.h"
Elliott Hughes76160052012-12-12 16:31:20 -080029#include "base/macros.h"
David Srbecky4fda4eb2016-02-05 13:34:46 +000030#include "debug/dwarf/debug_frame_opcode_writer.h"
Andreas Gampe85b62f22015-09-09 13:15:38 -070031#include "label.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070032#include "managed_register.h"
33#include "memory_region.h"
Ian Rogersd582fa42014-11-05 23:46:43 -080034#include "mips/constants_mips.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070035#include "offsets.h"
Vladimir Marko32248382016-05-19 10:37:24 +010036#include "utils/array_ref.h"
Ian Rogersd582fa42014-11-05 23:46:43 -080037#include "x86/constants_x86.h"
38#include "x86_64/constants_x86_64.h"
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -070039
Carl Shapiro6b6b5f02011-06-21 15:05:09 -070040namespace art {
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -070041
42class Assembler;
43class AssemblerBuffer;
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -070044
45// Assembler fixups are positions in generated code that require processing
46// after the code has been copied to executable memory. This includes building
47// relocation information.
48class AssemblerFixup {
49 public:
50 virtual void Process(const MemoryRegion& region, int position) = 0;
51 virtual ~AssemblerFixup() {}
52
53 private:
54 AssemblerFixup* previous_;
55 int position_;
56
57 AssemblerFixup* previous() const { return previous_; }
Andreas Gampe277ccbd2014-11-03 21:36:10 -080058 void set_previous(AssemblerFixup* previous_in) { previous_ = previous_in; }
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -070059
60 int position() const { return position_; }
Andreas Gampe277ccbd2014-11-03 21:36:10 -080061 void set_position(int position_in) { position_ = position_in; }
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -070062
63 friend class AssemblerBuffer;
64};
65
Ian Rogers45a76cb2011-07-21 22:00:15 -070066// Parent of all queued slow paths, emitted during finalization
Vladimir Marko93205e32016-04-13 11:59:46 +010067class SlowPath : public DeletableArenaObject<kArenaAllocAssembler> {
Ian Rogers45a76cb2011-07-21 22:00:15 -070068 public:
Mathieu Chartier2cebb242015-04-21 16:50:40 -070069 SlowPath() : next_(nullptr) {}
Ian Rogers45a76cb2011-07-21 22:00:15 -070070 virtual ~SlowPath() {}
71
72 Label* Continuation() { return &continuation_; }
73 Label* Entry() { return &entry_; }
74 // Generate code for slow path
75 virtual void Emit(Assembler *sp_asm) = 0;
76
77 protected:
78 // Entry branched to by fast path
79 Label entry_;
80 // Optional continuation that is branched to at the end of the slow path
81 Label continuation_;
82 // Next in linked list of slow paths
83 SlowPath *next_;
84
Mathieu Chartier02e25112013-08-14 16:14:24 -070085 private:
Ian Rogers45a76cb2011-07-21 22:00:15 -070086 friend class AssemblerBuffer;
87 DISALLOW_COPY_AND_ASSIGN(SlowPath);
88};
89
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -070090class AssemblerBuffer {
91 public:
Vladimir Marko93205e32016-04-13 11:59:46 +010092 explicit AssemblerBuffer(ArenaAllocator* arena);
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -070093 ~AssemblerBuffer();
94
Vladimir Marko93205e32016-04-13 11:59:46 +010095 ArenaAllocator* GetArena() {
96 return arena_;
97 }
98
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -070099 // Basic support for emitting, loading, and storing.
100 template<typename T> void Emit(T value) {
101 CHECK(HasEnsuredCapacity());
102 *reinterpret_cast<T*>(cursor_) = value;
103 cursor_ += sizeof(T);
104 }
105
106 template<typename T> T Load(size_t position) {
107 CHECK_LE(position, Size() - static_cast<int>(sizeof(T)));
108 return *reinterpret_cast<T*>(contents_ + position);
109 }
110
111 template<typename T> void Store(size_t position, T value) {
112 CHECK_LE(position, Size() - static_cast<int>(sizeof(T)));
113 *reinterpret_cast<T*>(contents_ + position) = value;
114 }
115
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000116 void Resize(size_t new_size) {
117 if (new_size > Capacity()) {
118 ExtendCapacity(new_size);
119 }
120 cursor_ = contents_ + new_size;
121 }
122
123 void Move(size_t newposition, size_t oldposition, size_t size) {
124 // Move a chunk of the buffer from oldposition to newposition.
125 DCHECK_LE(oldposition + size, Size());
126 DCHECK_LE(newposition + size, Size());
127 memmove(contents_ + newposition, contents_ + oldposition, size);
Dave Allison65fcc2c2014-04-28 13:45:27 -0700128 }
129
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700130 // Emit a fixup at the current location.
131 void EmitFixup(AssemblerFixup* fixup) {
132 fixup->set_previous(fixup_);
133 fixup->set_position(Size());
134 fixup_ = fixup;
135 }
136
Ian Rogers45a76cb2011-07-21 22:00:15 -0700137 void EnqueueSlowPath(SlowPath* slowpath) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700138 if (slow_path_ == nullptr) {
Ian Rogers45a76cb2011-07-21 22:00:15 -0700139 slow_path_ = slowpath;
140 } else {
141 SlowPath* cur = slow_path_;
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700142 for ( ; cur->next_ != nullptr ; cur = cur->next_) {}
Ian Rogers45a76cb2011-07-21 22:00:15 -0700143 cur->next_ = slowpath;
144 }
145 }
146
147 void EmitSlowPaths(Assembler* sp_asm) {
148 SlowPath* cur = slow_path_;
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700149 SlowPath* next = nullptr;
150 slow_path_ = nullptr;
151 for ( ; cur != nullptr ; cur = next) {
Ian Rogers45a76cb2011-07-21 22:00:15 -0700152 cur->Emit(sp_asm);
153 next = cur->next_;
154 delete cur;
155 }
156 }
157
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700158 // Get the size of the emitted code.
159 size_t Size() const {
160 CHECK_GE(cursor_, contents_);
161 return cursor_ - contents_;
162 }
163
Ian Rogers13735952014-10-08 12:43:28 -0700164 uint8_t* contents() const { return contents_; }
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700165
166 // Copy the assembled instructions into the specified memory block
167 // and apply all fixups.
168 void FinalizeInstructions(const MemoryRegion& region);
169
170 // To emit an instruction to the assembler buffer, the EnsureCapacity helper
171 // must be used to guarantee that the underlying data area is big enough to
172 // hold the emitted instruction. Usage:
173 //
174 // AssemblerBuffer buffer;
175 // AssemblerBuffer::EnsureCapacity ensured(&buffer);
176 // ... emit bytes for single instruction ...
177
Elliott Hughes31f1f4f2012-03-12 13:57:36 -0700178#ifndef NDEBUG
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700179
180 class EnsureCapacity {
181 public:
182 explicit EnsureCapacity(AssemblerBuffer* buffer) {
Vladimir Marko9152fed2016-04-20 14:39:47 +0100183 if (buffer->cursor() > buffer->limit()) {
184 buffer->ExtendCapacity(buffer->Size() + kMinimumGap);
Elliott Hughes31f1f4f2012-03-12 13:57:36 -0700185 }
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700186 // In debug mode, we save the assembler buffer along with the gap
187 // size before we start emitting to the buffer. This allows us to
188 // check that any single generated instruction doesn't overflow the
189 // limit implied by the minimum gap size.
190 buffer_ = buffer;
191 gap_ = ComputeGap();
192 // Make sure that extending the capacity leaves a big enough gap
193 // for any kind of instruction.
194 CHECK_GE(gap_, kMinimumGap);
195 // Mark the buffer as having ensured the capacity.
196 CHECK(!buffer->HasEnsuredCapacity()); // Cannot nest.
197 buffer->has_ensured_capacity_ = true;
198 }
199
200 ~EnsureCapacity() {
201 // Unmark the buffer, so we cannot emit after this.
202 buffer_->has_ensured_capacity_ = false;
203 // Make sure the generated instruction doesn't take up more
204 // space than the minimum gap.
205 int delta = gap_ - ComputeGap();
Ian Rogersb033c752011-07-20 12:22:35 -0700206 CHECK_LE(delta, kMinimumGap);
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700207 }
208
209 private:
210 AssemblerBuffer* buffer_;
211 int gap_;
212
213 int ComputeGap() { return buffer_->Capacity() - buffer_->Size(); }
214 };
215
216 bool has_ensured_capacity_;
217 bool HasEnsuredCapacity() const { return has_ensured_capacity_; }
218
219#else
220
221 class EnsureCapacity {
222 public:
223 explicit EnsureCapacity(AssemblerBuffer* buffer) {
Vladimir Marko9152fed2016-04-20 14:39:47 +0100224 if (buffer->cursor() > buffer->limit()) {
225 buffer->ExtendCapacity(buffer->Size() + kMinimumGap);
226 }
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700227 }
228 };
229
230 // When building the C++ tests, assertion code is enabled. To allow
231 // asserting that the user of the assembler buffer has ensured the
232 // capacity needed for emitting, we add a dummy method in non-debug mode.
233 bool HasEnsuredCapacity() const { return true; }
234
235#endif
236
237 // Returns the position in the instruction stream.
238 int GetPosition() { return cursor_ - contents_; }
239
Vladimir Marko9152fed2016-04-20 14:39:47 +0100240 size_t Capacity() const {
241 CHECK_GE(limit_, contents_);
242 return (limit_ - contents_) + kMinimumGap;
243 }
244
245 // Unconditionally increase the capacity.
246 // The provided `min_capacity` must be higher than current `Capacity()`.
247 void ExtendCapacity(size_t min_capacity);
Andreas Gampe7cffc3b2015-10-19 21:31:53 -0700248
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700249 private:
250 // The limit is set to kMinimumGap bytes before the end of the data area.
251 // This leaves enough space for the longest possible instruction and allows
252 // for a single, fast space check per instruction.
253 static const int kMinimumGap = 32;
254
Vladimir Marko93205e32016-04-13 11:59:46 +0100255 ArenaAllocator* arena_;
Ian Rogers13735952014-10-08 12:43:28 -0700256 uint8_t* contents_;
257 uint8_t* cursor_;
258 uint8_t* limit_;
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700259 AssemblerFixup* fixup_;
Ian Rogersb48b9eb2014-02-28 16:20:21 -0800260#ifndef NDEBUG
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700261 bool fixups_processed_;
Ian Rogersb48b9eb2014-02-28 16:20:21 -0800262#endif
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700263
Ian Rogers45a76cb2011-07-21 22:00:15 -0700264 // Head of linked list of slow paths
265 SlowPath* slow_path_;
266
Ian Rogers13735952014-10-08 12:43:28 -0700267 uint8_t* cursor() const { return cursor_; }
268 uint8_t* limit() const { return limit_; }
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700269
270 // Process the fixup chain starting at the given fixup. The offset is
271 // non-zero for fixups in the body if the preamble is non-empty.
272 void ProcessFixups(const MemoryRegion& region);
273
274 // Compute the limit based on the data area and the capacity. See
275 // description of kMinimumGap for the reasoning behind the value.
Ian Rogers13735952014-10-08 12:43:28 -0700276 static uint8_t* ComputeLimit(uint8_t* data, size_t capacity) {
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700277 return data + capacity - kMinimumGap;
278 }
279
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700280 friend class AssemblerFixup;
281};
282
David Srbeckydd973932015-04-07 20:29:48 +0100283// The purpose of this class is to ensure that we do not have to explicitly
284// call the AdvancePC method (which is good for convenience and correctness).
285class DebugFrameOpCodeWriterForAssembler FINAL
286 : public dwarf::DebugFrameOpCodeWriter<> {
287 public:
Vladimir Marko10ef6942015-10-22 15:25:54 +0100288 struct DelayedAdvancePC {
289 uint32_t stream_pos;
290 uint32_t pc;
291 };
292
David Srbeckydd973932015-04-07 20:29:48 +0100293 // This method is called the by the opcode writers.
294 virtual void ImplicitlyAdvancePC() FINAL;
295
296 explicit DebugFrameOpCodeWriterForAssembler(Assembler* buffer)
Vladimir Marko10ef6942015-10-22 15:25:54 +0100297 : dwarf::DebugFrameOpCodeWriter<>(false /* enabled */),
298 assembler_(buffer),
299 delay_emitting_advance_pc_(false),
300 delayed_advance_pcs_() {
301 }
302
303 ~DebugFrameOpCodeWriterForAssembler() {
304 DCHECK(delayed_advance_pcs_.empty());
305 }
306
307 // Tell the writer to delay emitting advance PC info.
308 // The assembler must explicitly process all the delayed advances.
309 void DelayEmittingAdvancePCs() {
310 delay_emitting_advance_pc_ = true;
311 }
312
313 // Override the last delayed PC. The new PC can be out of order.
314 void OverrideDelayedPC(size_t pc) {
315 DCHECK(delay_emitting_advance_pc_);
Vladimir Marko6134ba12016-04-14 11:27:34 +0100316 if (enabled_) {
317 DCHECK(!delayed_advance_pcs_.empty());
318 delayed_advance_pcs_.back().pc = pc;
319 }
Vladimir Marko10ef6942015-10-22 15:25:54 +0100320 }
321
322 // Return the number of delayed advance PC entries.
323 size_t NumberOfDelayedAdvancePCs() const {
324 return delayed_advance_pcs_.size();
325 }
326
327 // Release the CFI stream and advance PC infos so that the assembler can patch it.
328 std::pair<std::vector<uint8_t>, std::vector<DelayedAdvancePC>>
329 ReleaseStreamAndPrepareForDelayedAdvancePC() {
330 DCHECK(delay_emitting_advance_pc_);
331 delay_emitting_advance_pc_ = false;
332 std::pair<std::vector<uint8_t>, std::vector<DelayedAdvancePC>> result;
333 result.first.swap(opcodes_);
334 result.second.swap(delayed_advance_pcs_);
335 return result;
336 }
337
338 // Reserve space for the CFI stream.
339 void ReserveCFIStream(size_t capacity) {
340 opcodes_.reserve(capacity);
341 }
342
343 // Append raw data to the CFI stream.
344 void AppendRawData(const std::vector<uint8_t>& raw_data, size_t first, size_t last) {
345 DCHECK_LE(0u, first);
346 DCHECK_LE(first, last);
347 DCHECK_LE(last, raw_data.size());
348 opcodes_.insert(opcodes_.end(), raw_data.begin() + first, raw_data.begin() + last);
David Srbeckydd973932015-04-07 20:29:48 +0100349 }
350
351 private:
352 Assembler* assembler_;
Vladimir Marko10ef6942015-10-22 15:25:54 +0100353 bool delay_emitting_advance_pc_;
354 std::vector<DelayedAdvancePC> delayed_advance_pcs_;
David Srbeckydd973932015-04-07 20:29:48 +0100355};
356
Vladimir Marko93205e32016-04-13 11:59:46 +0100357class Assembler : public DeletableArenaObject<kArenaAllocAssembler> {
Ian Rogers2c8f6532011-09-02 17:16:34 -0700358 public:
Vladimir Marko93205e32016-04-13 11:59:46 +0100359 static std::unique_ptr<Assembler> Create(
360 ArenaAllocator* arena,
361 InstructionSet instruction_set,
362 const InstructionSetFeatures* instruction_set_features = nullptr);
Ian Rogers2c8f6532011-09-02 17:16:34 -0700363
Vladimir Markocf93a5c2015-06-16 11:33:24 +0000364 // Finalize the code; emit slow paths, fixup branches, add literal pool, etc.
365 virtual void FinalizeCode() { buffer_.EmitSlowPaths(this); }
Ian Rogers2c8f6532011-09-02 17:16:34 -0700366
367 // Size of generated code
Serban Constantinescued8dd492014-02-11 14:15:10 +0000368 virtual size_t CodeSize() const { return buffer_.Size(); }
Alexandre Rameseb7b7392015-06-19 14:47:01 +0100369 virtual const uint8_t* CodeBufferBaseAddress() const { return buffer_.contents(); }
Ian Rogers2c8f6532011-09-02 17:16:34 -0700370
371 // Copy instructions out of assembly buffer into the given region of memory
Serban Constantinescued8dd492014-02-11 14:15:10 +0000372 virtual void FinalizeInstructions(const MemoryRegion& region) {
Ian Rogers2c8f6532011-09-02 17:16:34 -0700373 buffer_.FinalizeInstructions(region);
374 }
375
Nicolas Geoffrayd4dd2552014-02-28 10:23:58 +0000376 // TODO: Implement with disassembler.
Roland Levillain4b8f1ec2015-08-26 18:34:03 +0100377 virtual void Comment(const char* format ATTRIBUTE_UNUSED, ...) {}
Nicolas Geoffrayd4dd2552014-02-28 10:23:58 +0000378
Ian Rogers2c8f6532011-09-02 17:16:34 -0700379 // Emit code that will create an activation on the stack
Vladimir Marko32248382016-05-19 10:37:24 +0100380 virtual void BuildFrame(size_t frame_size,
381 ManagedRegister method_reg,
382 ArrayRef<const ManagedRegister> callee_save_regs,
Dmitry Petrochenkofca82202014-03-21 11:21:37 +0700383 const ManagedRegisterEntrySpills& entry_spills) = 0;
Ian Rogers2c8f6532011-09-02 17:16:34 -0700384
385 // Emit code that will remove an activation from the stack
Andreas Gampe542451c2016-07-26 09:02:02 -0700386 virtual void RemoveFrame(size_t frame_size, ArrayRef<const ManagedRegister> callee_save_regs) = 0;
Ian Rogers2c8f6532011-09-02 17:16:34 -0700387
388 virtual void IncreaseFrameSize(size_t adjust) = 0;
389 virtual void DecreaseFrameSize(size_t adjust) = 0;
390
391 // Store routines
392 virtual void Store(FrameOffset offs, ManagedRegister src, size_t size) = 0;
393 virtual void StoreRef(FrameOffset dest, ManagedRegister src) = 0;
394 virtual void StoreRawPtr(FrameOffset dest, ManagedRegister src) = 0;
395
Andreas Gampe542451c2016-07-26 09:02:02 -0700396 virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) = 0;
Ian Rogers2c8f6532011-09-02 17:16:34 -0700397
Andreas Gampe542451c2016-07-26 09:02:02 -0700398 virtual void StoreImmediateToThread32(ThreadOffset32 dest,
399 uint32_t imm,
Ian Rogersdd7624d2014-03-14 17:43:00 -0700400 ManagedRegister scratch);
Andreas Gampe542451c2016-07-26 09:02:02 -0700401 virtual void StoreImmediateToThread64(ThreadOffset64 dest,
402 uint32_t imm,
Ian Rogersdd7624d2014-03-14 17:43:00 -0700403 ManagedRegister scratch);
Ian Rogers2c8f6532011-09-02 17:16:34 -0700404
Andreas Gampe542451c2016-07-26 09:02:02 -0700405 virtual void StoreStackOffsetToThread32(ThreadOffset32 thr_offs,
Ian Rogersdd7624d2014-03-14 17:43:00 -0700406 FrameOffset fr_offs,
407 ManagedRegister scratch);
Andreas Gampe542451c2016-07-26 09:02:02 -0700408 virtual void StoreStackOffsetToThread64(ThreadOffset64 thr_offs,
Ian Rogersdd7624d2014-03-14 17:43:00 -0700409 FrameOffset fr_offs,
410 ManagedRegister scratch);
Ian Rogers2c8f6532011-09-02 17:16:34 -0700411
Andreas Gampe542451c2016-07-26 09:02:02 -0700412 virtual void StoreStackPointerToThread32(ThreadOffset32 thr_offs);
413 virtual void StoreStackPointerToThread64(ThreadOffset64 thr_offs);
Ian Rogers2c8f6532011-09-02 17:16:34 -0700414
415 virtual void StoreSpanning(FrameOffset dest, ManagedRegister src,
416 FrameOffset in_off, ManagedRegister scratch) = 0;
417
418 // Load routines
419 virtual void Load(ManagedRegister dest, FrameOffset src, size_t size) = 0;
420
Andreas Gampe542451c2016-07-26 09:02:02 -0700421 virtual void LoadFromThread32(ManagedRegister dest, ThreadOffset32 src, size_t size);
422 virtual void LoadFromThread64(ManagedRegister dest, ThreadOffset64 src, size_t size);
Ian Rogers5a7a74a2011-09-26 16:32:29 -0700423
Mathieu Chartiere401d142015-04-22 13:56:20 -0700424 virtual void LoadRef(ManagedRegister dest, FrameOffset src) = 0;
Roland Levillain4d027112015-07-01 15:41:14 +0100425 // If unpoison_reference is true and kPoisonReference is true, then we negate the read reference.
Mathieu Chartiere401d142015-04-22 13:56:20 -0700426 virtual void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
Roland Levillain4d027112015-07-01 15:41:14 +0100427 bool unpoison_reference) = 0;
Ian Rogers2c8f6532011-09-02 17:16:34 -0700428
Ian Rogersdd7624d2014-03-14 17:43:00 -0700429 virtual void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) = 0;
Ian Rogers2c8f6532011-09-02 17:16:34 -0700430
Andreas Gampe542451c2016-07-26 09:02:02 -0700431 virtual void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset32 offs);
432 virtual void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset64 offs);
Ian Rogers2c8f6532011-09-02 17:16:34 -0700433
434 // Copying routines
Ian Rogersb5d09b22012-03-06 22:14:17 -0800435 virtual void Move(ManagedRegister dest, ManagedRegister src, size_t size) = 0;
Ian Rogers2c8f6532011-09-02 17:16:34 -0700436
Andreas Gampe542451c2016-07-26 09:02:02 -0700437 virtual void CopyRawPtrFromThread32(FrameOffset fr_offs,
438 ThreadOffset32 thr_offs,
Ian Rogersdd7624d2014-03-14 17:43:00 -0700439 ManagedRegister scratch);
Andreas Gampe542451c2016-07-26 09:02:02 -0700440 virtual void CopyRawPtrFromThread64(FrameOffset fr_offs,
441 ThreadOffset64 thr_offs,
Ian Rogersdd7624d2014-03-14 17:43:00 -0700442 ManagedRegister scratch);
Ian Rogers2c8f6532011-09-02 17:16:34 -0700443
Andreas Gampe542451c2016-07-26 09:02:02 -0700444 virtual void CopyRawPtrToThread32(ThreadOffset32 thr_offs,
445 FrameOffset fr_offs,
Ian Rogersdd7624d2014-03-14 17:43:00 -0700446 ManagedRegister scratch);
Andreas Gampe542451c2016-07-26 09:02:02 -0700447 virtual void CopyRawPtrToThread64(ThreadOffset64 thr_offs,
448 FrameOffset fr_offs,
Ian Rogersdd7624d2014-03-14 17:43:00 -0700449 ManagedRegister scratch);
Ian Rogers2c8f6532011-09-02 17:16:34 -0700450
Andreas Gampe542451c2016-07-26 09:02:02 -0700451 virtual void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) = 0;
Ian Rogers2c8f6532011-09-02 17:16:34 -0700452
Elliott Hughesa09aea22012-01-06 18:58:27 -0800453 virtual void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) = 0;
Ian Rogers2c8f6532011-09-02 17:16:34 -0700454
Ian Rogersdc51b792011-09-22 20:41:37 -0700455 virtual void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
456 ManagedRegister scratch, size_t size) = 0;
457
Ian Rogers5a7a74a2011-09-26 16:32:29 -0700458 virtual void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
459 ManagedRegister scratch, size_t size) = 0;
460
Ian Rogersdc51b792011-09-22 20:41:37 -0700461 virtual void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
462 ManagedRegister scratch, size_t size) = 0;
463
Ian Rogers5a7a74a2011-09-26 16:32:29 -0700464 virtual void Copy(ManagedRegister dest, Offset dest_offset,
465 ManagedRegister src, Offset src_offset,
466 ManagedRegister scratch, size_t size) = 0;
467
468 virtual void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
469 ManagedRegister scratch, size_t size) = 0;
Ian Rogersdc51b792011-09-22 20:41:37 -0700470
Ian Rogerse5de95b2011-09-18 20:31:38 -0700471 virtual void MemoryBarrier(ManagedRegister scratch) = 0;
472
jeffhao58136ca2012-05-24 13:40:11 -0700473 // Sign extension
474 virtual void SignExtend(ManagedRegister mreg, size_t size) = 0;
475
jeffhaocee4d0c2012-06-15 14:42:01 -0700476 // Zero extension
477 virtual void ZeroExtend(ManagedRegister mreg, size_t size) = 0;
478
Ian Rogers2c8f6532011-09-02 17:16:34 -0700479 // Exploit fast access in managed code to Thread::Current()
480 virtual void GetCurrentThread(ManagedRegister tr) = 0;
Andreas Gampe542451c2016-07-26 09:02:02 -0700481 virtual void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) = 0;
Ian Rogers2c8f6532011-09-02 17:16:34 -0700482
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700483 // Set up out_reg to hold a Object** into the handle scope, or to be null if the
Ian Rogers2c8f6532011-09-02 17:16:34 -0700484 // value is null and null_allowed. in_reg holds a possibly stale reference
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700485 // that can be used to avoid loading the handle scope entry to see if the value is
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700486 // null.
Andreas Gampe542451c2016-07-26 09:02:02 -0700487 virtual void CreateHandleScopeEntry(ManagedRegister out_reg,
488 FrameOffset handlescope_offset,
489 ManagedRegister in_reg,
490 bool null_allowed) = 0;
Ian Rogers2c8f6532011-09-02 17:16:34 -0700491
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700492 // Set up out_off to hold a Object** into the handle scope, or to be null if the
Ian Rogers2c8f6532011-09-02 17:16:34 -0700493 // value is null and null_allowed.
Andreas Gampe542451c2016-07-26 09:02:02 -0700494 virtual void CreateHandleScopeEntry(FrameOffset out_off,
495 FrameOffset handlescope_offset,
496 ManagedRegister scratch,
497 bool null_allowed) = 0;
Ian Rogers2c8f6532011-09-02 17:16:34 -0700498
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700499 // src holds a handle scope entry (Object**) load this into dst
Andreas Gampe542451c2016-07-26 09:02:02 -0700500 virtual void LoadReferenceFromHandleScope(ManagedRegister dst, ManagedRegister src) = 0;
Ian Rogers2c8f6532011-09-02 17:16:34 -0700501
502 // Heap::VerifyObject on src. In some cases (such as a reference to this) we
503 // know that src may not be null.
504 virtual void VerifyObject(ManagedRegister src, bool could_be_null) = 0;
505 virtual void VerifyObject(FrameOffset src, bool could_be_null) = 0;
506
507 // Call to address held at [base+offset]
Andreas Gampe542451c2016-07-26 09:02:02 -0700508 virtual void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) = 0;
509 virtual void Call(FrameOffset base, Offset offset, ManagedRegister scratch) = 0;
510 virtual void CallFromThread32(ThreadOffset32 offset, ManagedRegister scratch);
511 virtual void CallFromThread64(ThreadOffset64 offset, ManagedRegister scratch);
Ian Rogers2c8f6532011-09-02 17:16:34 -0700512
Ian Rogers2c8f6532011-09-02 17:16:34 -0700513 // Generate code to check if Thread::Current()->exception_ is non-null
514 // and branch to a ExceptionSlowPath if it is.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700515 virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) = 0;
Ian Rogers2c8f6532011-09-02 17:16:34 -0700516
Andreas Gampe85b62f22015-09-09 13:15:38 -0700517 virtual void Bind(Label* label) = 0;
518 virtual void Jump(Label* label) = 0;
519
Ian Rogers2c8f6532011-09-02 17:16:34 -0700520 virtual ~Assembler() {}
521
David Srbeckydd973932015-04-07 20:29:48 +0100522 /**
523 * @brief Buffer of DWARF's Call Frame Information opcodes.
524 * @details It is used by debuggers and other tools to unwind the call stack.
525 */
526 DebugFrameOpCodeWriterForAssembler& cfi() { return cfi_; }
527
Ian Rogers2c8f6532011-09-02 17:16:34 -0700528 protected:
Vladimir Marko93205e32016-04-13 11:59:46 +0100529 explicit Assembler(ArenaAllocator* arena) : buffer_(arena), cfi_(this) {}
530
531 ArenaAllocator* GetArena() {
532 return buffer_.GetArena();
533 }
Ian Rogers2c8f6532011-09-02 17:16:34 -0700534
535 AssemblerBuffer buffer_;
David Srbeckydd973932015-04-07 20:29:48 +0100536
537 DebugFrameOpCodeWriterForAssembler cfi_;
Ian Rogers2c8f6532011-09-02 17:16:34 -0700538};
539
Carl Shapiro6b6b5f02011-06-21 15:05:09 -0700540} // namespace art
Carl Shapiroa5d5cfd2011-06-21 12:46:59 -0700541
Ian Rogers166db042013-07-26 12:05:57 -0700542#endif // ART_COMPILER_UTILS_ASSEMBLER_H_