ART: Extract macro assembler
Extract macro assembler functionality used by the JNI compiler from
the assembler interface. Templatize the new interface so that
type safety ensures correct usage.
Change-Id: Idb9f56e5b87e43ee6a7378853d8a9f01abe156b2
Test: m test-art-host
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index dc1f24a..53685bf 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -164,25 +164,16 @@
offs.Int32Value());
}
-void Arm64Assembler::StoreImmediateToThread64(ThreadOffset64 offs,
- uint32_t imm,
+void Arm64Assembler::StoreStackOffsetToThread(ThreadOffset64 tr_offs,
+ FrameOffset fr_offs,
ManagedRegister m_scratch) {
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsXRegister()) << scratch;
- LoadImmediate(scratch.AsXRegister(), imm);
- StoreToOffset(scratch.AsXRegister(), TR, offs.Int32Value());
-}
-
-void Arm64Assembler::StoreStackOffsetToThread64(ThreadOffset64 tr_offs,
- FrameOffset fr_offs,
- ManagedRegister m_scratch) {
- Arm64ManagedRegister scratch = m_scratch.AsArm64();
- CHECK(scratch.IsXRegister()) << scratch;
AddConstant(scratch.AsXRegister(), SP, fr_offs.Int32Value());
StoreToOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
}
-void Arm64Assembler::StoreStackPointerToThread64(ThreadOffset64 tr_offs) {
+void Arm64Assembler::StoreStackPointerToThread(ThreadOffset64 tr_offs) {
UseScratchRegisterScope temps(&vixl_masm_);
Register temp = temps.AcquireX();
___ Mov(temp, reg_x(SP));
@@ -286,7 +277,7 @@
return Load(m_dst.AsArm64(), SP, src.Int32Value(), size);
}
-void Arm64Assembler::LoadFromThread64(ManagedRegister m_dst, ThreadOffset64 src, size_t size) {
+void Arm64Assembler::LoadFromThread(ManagedRegister m_dst, ThreadOffset64 src, size_t size) {
return Load(m_dst.AsArm64(), TR, src.Int32Value(), size);
}
@@ -319,7 +310,7 @@
___ Ldr(reg_x(dst.AsXRegister()), MEM_OP(reg_x(base.AsXRegister()), offs.Int32Value()));
}
-void Arm64Assembler::LoadRawPtrFromThread64(ManagedRegister m_dst, ThreadOffset64 offs) {
+void Arm64Assembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset64 offs) {
Arm64ManagedRegister dst = m_dst.AsArm64();
CHECK(dst.IsXRegister()) << dst;
LoadFromOffset(dst.AsXRegister(), TR, offs.Int32Value());
@@ -355,18 +346,18 @@
}
}
-void Arm64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs,
- ThreadOffset64 tr_offs,
- ManagedRegister m_scratch) {
+void Arm64Assembler::CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset64 tr_offs,
+ ManagedRegister m_scratch) {
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsXRegister()) << scratch;
LoadFromOffset(scratch.AsXRegister(), TR, tr_offs.Int32Value());
StoreToOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
}
-void Arm64Assembler::CopyRawPtrToThread64(ThreadOffset64 tr_offs,
- FrameOffset fr_offs,
- ManagedRegister m_scratch) {
+void Arm64Assembler::CopyRawPtrToThread(ThreadOffset64 tr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister m_scratch) {
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsXRegister()) << scratch;
LoadFromOffset(scratch.AsXRegister(), SP, fr_offs.Int32Value());
@@ -543,8 +534,8 @@
___ Blr(reg_x(scratch.AsXRegister()));
}
-void Arm64Assembler::CallFromThread64(ThreadOffset64 offset ATTRIBUTE_UNUSED,
- ManagedRegister scratch ATTRIBUTE_UNUSED) {
+void Arm64Assembler::CallFromThread(ThreadOffset64 offset ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant";
}
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index b8434b9..d7084da 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -22,9 +22,11 @@
#include <vector>
#include "base/arena_containers.h"
+#include "base/enums.h"
#include "base/logging.h"
#include "utils/arm64/managed_register_arm64.h"
#include "utils/assembler.h"
+#include "utils/jni_macro_assembler.h"
#include "offsets.h"
// TODO: make vixl clean wrt -Wshadow, -Wunknown-pragmas, -Wmissing-noreturn
@@ -81,7 +83,7 @@
DISALLOW_COPY_AND_ASSIGN(Arm64Exception);
};
-class Arm64Assembler FINAL : public Assembler {
+class Arm64Assembler FINAL : public Assembler, public JNIMacroAssembler<PointerSize::k64> {
public:
explicit Arm64Assembler(ArenaAllocator* arena)
: Assembler(arena),
@@ -91,6 +93,8 @@
vixl::aarch64::MacroAssembler* GetVIXLAssembler() { return &vixl_masm_; }
+ DebugFrameOpCodeWriterForAssembler& cfi() { return Assembler::cfi(); }
+
// Finalize the code.
void FinalizeCode() OVERRIDE;
@@ -122,28 +126,28 @@
void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
- void StoreImmediateToThread64(ThreadOffset64 dest, uint32_t imm, ManagedRegister scratch)
- OVERRIDE;
- void StoreStackOffsetToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs,
- ManagedRegister scratch) OVERRIDE;
- void StoreStackPointerToThread64(ThreadOffset64 thr_offs) OVERRIDE;
+ void StoreStackOffsetToThread(ThreadOffset64 thr_offs,
+ FrameOffset fr_offs,
+ ManagedRegister scratch) OVERRIDE;
+ void StoreStackPointerToThread(ThreadOffset64 thr_offs) OVERRIDE;
void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
ManagedRegister scratch) OVERRIDE;
// Load routines.
void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
- void LoadFromThread64(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
+ void LoadFromThread(ManagedRegister dest, ThreadOffset64 src, size_t size) OVERRIDE;
void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs,
bool unpoison_reference) OVERRIDE;
void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
- void LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
+ void LoadRawPtrFromThread(ManagedRegister dest, ThreadOffset64 offs) OVERRIDE;
// Copying routines.
void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
- void CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset64 thr_offs,
- ManagedRegister scratch) OVERRIDE;
- void CopyRawPtrToThread64(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
+ void CopyRawPtrFromThread(FrameOffset fr_offs,
+ ThreadOffset64 thr_offs,
+ ManagedRegister scratch) OVERRIDE;
+ void CopyRawPtrToThread(ThreadOffset64 thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
OVERRIDE;
void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
@@ -196,7 +200,7 @@
// Call to address held at [base+offset].
void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
- void CallFromThread64(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
+ void CallFromThread(ThreadOffset64 offset, ManagedRegister scratch) OVERRIDE;
// Jump to address (not setting link register)
void JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch);