Allow mixing of thread offsets between 32 and 64bit architectures.
Begin a more full implementation x86-64 REX prefixes.
Doesn't implement 64bit thread offset support for the JNI compiler.
Change-Id: If9af2f08a1833c21ddb4b4077f9b03add1a05147
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index f8b91d7..5b2c8ba 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -155,7 +155,7 @@
StoreToOffset(scratch.AsCoreRegister(), SP, offs.Int32Value());
}
-void Arm64Assembler::StoreImmediateToThread(ThreadOffset offs, uint32_t imm,
+void Arm64Assembler::StoreImmediateToThread32(ThreadOffset<4> offs, uint32_t imm,
ManagedRegister m_scratch) {
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsCoreRegister()) << scratch;
@@ -163,7 +163,7 @@
StoreToOffset(scratch.AsCoreRegister(), TR, offs.Int32Value());
}
-void Arm64Assembler::StoreStackOffsetToThread(ThreadOffset tr_offs,
+void Arm64Assembler::StoreStackOffsetToThread32(ThreadOffset<4> tr_offs,
FrameOffset fr_offs,
ManagedRegister m_scratch) {
Arm64ManagedRegister scratch = m_scratch.AsArm64();
@@ -172,7 +172,7 @@
StoreToOffset(scratch.AsCoreRegister(), TR, tr_offs.Int32Value());
}
-void Arm64Assembler::StoreStackPointerToThread(ThreadOffset tr_offs) {
+void Arm64Assembler::StoreStackPointerToThread32(ThreadOffset<4> tr_offs) {
// Arm64 does not support: "str sp, [dest]" therefore we use IP1 as a temp reg.
___ Mov(reg_x(IP1), reg_x(SP));
StoreToOffset(IP1, TR, tr_offs.Int32Value());
@@ -269,7 +269,7 @@
return Load(m_dst.AsArm64(), SP, src.Int32Value(), size);
}
-void Arm64Assembler::Load(ManagedRegister m_dst, ThreadOffset src, size_t size) {
+void Arm64Assembler::LoadFromThread32(ManagedRegister m_dst, ThreadOffset<4> src, size_t size) {
return Load(m_dst.AsArm64(), TR, src.Int32Value(), size);
}
@@ -294,7 +294,7 @@
LoadFromOffset(dst.AsCoreRegister(), base.AsCoreRegister(), offs.Int32Value());
}
-void Arm64Assembler::LoadRawPtrFromThread(ManagedRegister m_dst, ThreadOffset offs) {
+void Arm64Assembler::LoadRawPtrFromThread32(ManagedRegister m_dst, ThreadOffset<4> offs) {
Arm64ManagedRegister dst = m_dst.AsArm64();
CHECK(dst.IsCoreRegister()) << dst;
LoadFromOffset(dst.AsCoreRegister(), TR, offs.Int32Value());
@@ -322,8 +322,8 @@
}
}
-void Arm64Assembler::CopyRawPtrFromThread(FrameOffset fr_offs,
- ThreadOffset tr_offs,
+void Arm64Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs,
+ ThreadOffset<4> tr_offs,
ManagedRegister m_scratch) {
Arm64ManagedRegister scratch = m_scratch.AsArm64();
CHECK(scratch.IsCoreRegister()) << scratch;
@@ -331,7 +331,7 @@
StoreToOffset(scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
}
-void Arm64Assembler::CopyRawPtrToThread(ThreadOffset tr_offs,
+void Arm64Assembler::CopyRawPtrToThread32(ThreadOffset<4> tr_offs,
FrameOffset fr_offs,
ManagedRegister m_scratch) {
Arm64ManagedRegister scratch = m_scratch.AsArm64();
@@ -486,7 +486,7 @@
___ Blr(reg_x(scratch.AsCoreRegister()));
}
-void Arm64Assembler::Call(ThreadOffset /*offset*/, ManagedRegister /*scratch*/) {
+void Arm64Assembler::CallFromThread32(ThreadOffset<4> /*offset*/, ManagedRegister /*scratch*/) {
UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant";
}
@@ -555,7 +555,7 @@
Arm64ManagedRegister scratch = m_scratch.AsArm64();
Arm64Exception *current_exception = new Arm64Exception(scratch, stack_adjust);
exception_blocks_.push_back(current_exception);
- LoadFromOffset(scratch.AsCoreRegister(), TR, Thread::ExceptionOffset().Int32Value());
+ LoadFromOffset(scratch.AsCoreRegister(), TR, Thread::ExceptionOffset<4>().Int32Value());
___ Cmp(reg_x(scratch.AsCoreRegister()), 0);
___ B(current_exception->Entry(), COND_OP(NE));
}
@@ -569,7 +569,7 @@
// Pass exception object as argument.
// Don't care about preserving X0 as this won't return.
___ Mov(reg_x(X0), reg_x(exception->scratch_.AsCoreRegister()));
- LoadFromOffset(IP1, TR, QUICK_ENTRYPOINT_OFFSET(pDeliverException).Int32Value());
+ LoadFromOffset(IP1, TR, QUICK_ENTRYPOINT_OFFSET(8, pDeliverException).Int32Value());
___ Blr(reg_x(IP1));
// Call should never return.
___ Brk();
diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h
index 44eb6ff..3abcaad 100644
--- a/compiler/utils/arm64/assembler_arm64.h
+++ b/compiler/utils/arm64/assembler_arm64.h
@@ -79,7 +79,7 @@
class Arm64Exception;
-class Arm64Assembler : public Assembler {
+class Arm64Assembler FINAL : public Assembler {
public:
Arm64Assembler() : vixl_buf_(new byte[BUF_SIZE]),
vixl_masm_(new vixl::MacroAssembler(vixl_buf_, BUF_SIZE)) {}
@@ -111,105 +111,97 @@
// Emit code that will create an activation on the stack.
void BuildFrame(size_t frame_size, ManagedRegister method_reg,
const std::vector<ManagedRegister>& callee_save_regs,
- const ManagedRegisterEntrySpills& entry_spills);
+ const ManagedRegisterEntrySpills& entry_spills) OVERRIDE;
// Emit code that will remove an activation from the stack.
- void RemoveFrame(size_t frame_size,
- const std::vector<ManagedRegister>& callee_save_regs);
+ void RemoveFrame(size_t frame_size, const std::vector<ManagedRegister>& callee_save_regs)
+ OVERRIDE;
- void IncreaseFrameSize(size_t adjust);
- void DecreaseFrameSize(size_t adjust);
+ void IncreaseFrameSize(size_t adjust) OVERRIDE;
+ void DecreaseFrameSize(size_t adjust) OVERRIDE;
// Store routines.
- void Store(FrameOffset offs, ManagedRegister src, size_t size);
- void StoreRef(FrameOffset dest, ManagedRegister src);
- void StoreRawPtr(FrameOffset dest, ManagedRegister src);
- void StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
- ManagedRegister scratch);
- void StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
- ManagedRegister scratch);
- void StoreStackOffsetToThread(ThreadOffset thr_offs,
- FrameOffset fr_offs,
- ManagedRegister scratch);
- void StoreStackPointerToThread(ThreadOffset thr_offs);
- void StoreSpanning(FrameOffset dest, ManagedRegister src,
- FrameOffset in_off, ManagedRegister scratch);
+ void Store(FrameOffset offs, ManagedRegister src, size_t size) OVERRIDE;
+ void StoreRef(FrameOffset dest, ManagedRegister src) OVERRIDE;
+ void StoreRawPtr(FrameOffset dest, ManagedRegister src) OVERRIDE;
+ void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
+ void StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm, ManagedRegister scratch)
+ OVERRIDE;
+ void StoreStackOffsetToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs,
+ ManagedRegister scratch) OVERRIDE;
+ void StoreStackPointerToThread32(ThreadOffset<4> thr_offs) OVERRIDE;
+ void StoreSpanning(FrameOffset dest, ManagedRegister src, FrameOffset in_off,
+ ManagedRegister scratch) OVERRIDE;
// Load routines.
- void Load(ManagedRegister dest, FrameOffset src, size_t size);
- void Load(ManagedRegister dest, ThreadOffset src, size_t size);
- void LoadRef(ManagedRegister dest, FrameOffset src);
- void LoadRef(ManagedRegister dest, ManagedRegister base,
- MemberOffset offs);
- void LoadRawPtr(ManagedRegister dest, ManagedRegister base,
- Offset offs);
- void LoadRawPtrFromThread(ManagedRegister dest,
- ThreadOffset offs);
+ void Load(ManagedRegister dest, FrameOffset src, size_t size) OVERRIDE;
+ void LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size) OVERRIDE;
+ void LoadRef(ManagedRegister dest, FrameOffset src) OVERRIDE;
+ void LoadRef(ManagedRegister dest, ManagedRegister base, MemberOffset offs) OVERRIDE;
+ void LoadRawPtr(ManagedRegister dest, ManagedRegister base, Offset offs) OVERRIDE;
+ void LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset<4> offs) OVERRIDE;
+
// Copying routines.
- void Move(ManagedRegister dest, ManagedRegister src, size_t size);
- void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs,
- ManagedRegister scratch);
- void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs,
- ManagedRegister scratch);
- void CopyRef(FrameOffset dest, FrameOffset src,
- ManagedRegister scratch);
- void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size);
- void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
- ManagedRegister scratch, size_t size);
- void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
- ManagedRegister scratch, size_t size);
- void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
- ManagedRegister scratch, size_t size);
- void Copy(ManagedRegister dest, Offset dest_offset,
- ManagedRegister src, Offset src_offset,
- ManagedRegister scratch, size_t size);
+ void Move(ManagedRegister dest, ManagedRegister src, size_t size) OVERRIDE;
+ void CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset<4> thr_offs,
+ ManagedRegister scratch) OVERRIDE;
+ void CopyRawPtrToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs, ManagedRegister scratch)
+ OVERRIDE;
+ void CopyRef(FrameOffset dest, FrameOffset src, ManagedRegister scratch) OVERRIDE;
+ void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size) OVERRIDE;
+ void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset, ManagedRegister scratch,
+ size_t size) OVERRIDE;
+ void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src, ManagedRegister scratch,
+ size_t size) OVERRIDE;
+ void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset, ManagedRegister scratch,
+ size_t size) OVERRIDE;
+ void Copy(ManagedRegister dest, Offset dest_offset, ManagedRegister src, Offset src_offset,
+ ManagedRegister scratch, size_t size) OVERRIDE;
void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
- ManagedRegister scratch, size_t size);
- void MemoryBarrier(ManagedRegister scratch);
+ ManagedRegister scratch, size_t size) OVERRIDE;
+ void MemoryBarrier(ManagedRegister scratch) OVERRIDE;
// Sign extension.
- void SignExtend(ManagedRegister mreg, size_t size);
+ void SignExtend(ManagedRegister mreg, size_t size) OVERRIDE;
// Zero extension.
- void ZeroExtend(ManagedRegister mreg, size_t size);
+ void ZeroExtend(ManagedRegister mreg, size_t size) OVERRIDE;
// Exploit fast access in managed code to Thread::Current().
- void GetCurrentThread(ManagedRegister tr);
- void GetCurrentThread(FrameOffset dest_offset,
- ManagedRegister scratch);
+ void GetCurrentThread(ManagedRegister tr) OVERRIDE;
+ void GetCurrentThread(FrameOffset dest_offset, ManagedRegister scratch) OVERRIDE;
// Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
// value is null and null_allowed. in_reg holds a possibly stale reference
// that can be used to avoid loading the SIRT entry to see if the value is
// NULL.
void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
- ManagedRegister in_reg, bool null_allowed);
+ ManagedRegister in_reg, bool null_allowed) OVERRIDE;
// Set up out_off to hold a Object** into the SIRT, or to be NULL if the
// value is null and null_allowed.
void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
- ManagedRegister scratch, bool null_allowed);
+ ManagedRegister scratch, bool null_allowed) OVERRIDE;
// src holds a SIRT entry (Object**) load this into dst.
- void LoadReferenceFromSirt(ManagedRegister dst,
- ManagedRegister src);
+ void LoadReferenceFromSirt(ManagedRegister dst, ManagedRegister src) OVERRIDE;
// Heap::VerifyObject on src. In some cases (such as a reference to this) we
// know that src may not be null.
- void VerifyObject(ManagedRegister src, bool could_be_null);
- void VerifyObject(FrameOffset src, bool could_be_null);
+ void VerifyObject(ManagedRegister src, bool could_be_null) OVERRIDE;
+ void VerifyObject(FrameOffset src, bool could_be_null) OVERRIDE;
// Call to address held at [base+offset].
- void Call(ManagedRegister base, Offset offset, ManagedRegister scratch);
- void Call(FrameOffset base, Offset offset, ManagedRegister scratch);
- void Call(ThreadOffset offset, ManagedRegister scratch);
+ void Call(ManagedRegister base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void Call(FrameOffset base, Offset offset, ManagedRegister scratch) OVERRIDE;
+ void CallFromThread32(ThreadOffset<4> offset, ManagedRegister scratch) OVERRIDE;
// Jump to address (not setting link register)
void JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch);
// Generate code to check if Thread::Current()->exception_ is non-null
// and branch to a ExceptionSlowPath if it is.
- void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust);
+ void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
private:
static vixl::Register reg_x(int code) {
diff --git a/compiler/utils/arm64/constants_arm64.h b/compiler/utils/arm64/constants_arm64.h
index c05c2f1..ecf9fbe 100644
--- a/compiler/utils/arm64/constants_arm64.h
+++ b/compiler/utils/arm64/constants_arm64.h
@@ -29,7 +29,7 @@
namespace art {
namespace arm64 {
- constexpr unsigned int kCalleeSavedRegsSize = 20;
+constexpr unsigned int kCalleeSavedRegsSize = 20;
} // arm64
} // art