Revert "Revert "ART: Implement literal pool for arm, fix branch fixup.""
This reverts commit fbeb4aede0ddc5b1e6a5a3a40cc6266fe8518c98.
Adjust block label positions. Bad catch block labels were the
reason for the revert.
Change-Id: Ia6950d639d46b9da6b07f3ade63ab46d03d63310
diff --git a/compiler/trampolines/trampoline_compiler.cc b/compiler/trampolines/trampoline_compiler.cc
index cb51ed8..facc630 100644
--- a/compiler/trampolines/trampoline_compiler.cc
+++ b/compiler/trampolines/trampoline_compiler.cc
@@ -17,21 +17,21 @@
#include "trampoline_compiler.h"
#include "jni_env_ext.h"
-#include "utils/arm/assembler_arm.h"
+#include "utils/arm/assembler_thumb2.h"
#include "utils/arm64/assembler_arm64.h"
#include "utils/mips/assembler_mips.h"
#include "utils/mips64/assembler_mips64.h"
#include "utils/x86/assembler_x86.h"
#include "utils/x86_64/assembler_x86_64.h"
-#define __ assembler->
+#define __ assembler.
namespace art {
namespace arm {
static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi,
ThreadOffset<4> offset) {
- std::unique_ptr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kThumb2)));
+ Thumb2Assembler assembler;
switch (abi) {
case kInterpreterAbi: // Thread* is first argument (R0) in interpreter ABI.
@@ -46,10 +46,11 @@
}
__ bkpt(0);
- size_t cs = assembler->CodeSize();
+ __ FinalizeCode();
+ size_t cs = __ CodeSize();
std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
- assembler->FinalizeInstructions(code);
+ __ FinalizeInstructions(code);
return entry_stub.release();
}
@@ -58,7 +59,7 @@
namespace arm64 {
static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi,
ThreadOffset<8> offset) {
- std::unique_ptr<Arm64Assembler> assembler(static_cast<Arm64Assembler*>(Assembler::Create(kArm64)));
+ Arm64Assembler assembler;
switch (abi) {
case kInterpreterAbi: // Thread* is first argument (X0) in interpreter ABI.
@@ -82,11 +83,11 @@
break;
}
- assembler->EmitSlowPaths();
- size_t cs = assembler->CodeSize();
+ __ FinalizeCode();
+ size_t cs = __ CodeSize();
std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
- assembler->FinalizeInstructions(code);
+ __ FinalizeInstructions(code);
return entry_stub.release();
}
@@ -95,7 +96,7 @@
namespace mips {
static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi,
ThreadOffset<4> offset) {
- std::unique_ptr<MipsAssembler> assembler(static_cast<MipsAssembler*>(Assembler::Create(kMips)));
+ MipsAssembler assembler;
switch (abi) {
case kInterpreterAbi: // Thread* is first argument (A0) in interpreter ABI.
@@ -112,10 +113,11 @@
__ Nop();
__ Break();
- size_t cs = assembler->CodeSize();
+ __ FinalizeCode();
+ size_t cs = __ CodeSize();
std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
- assembler->FinalizeInstructions(code);
+ __ FinalizeInstructions(code);
return entry_stub.release();
}
@@ -124,7 +126,7 @@
namespace mips64 {
static const std::vector<uint8_t>* CreateTrampoline(EntryPointCallingConvention abi,
ThreadOffset<8> offset) {
- std::unique_ptr<Mips64Assembler> assembler(static_cast<Mips64Assembler*>(Assembler::Create(kMips64)));
+ Mips64Assembler assembler;
switch (abi) {
case kInterpreterAbi: // Thread* is first argument (A0) in interpreter ABI.
@@ -141,10 +143,11 @@
__ Nop();
__ Break();
- size_t cs = assembler->CodeSize();
+ __ FinalizeCode();
+ size_t cs = __ CodeSize();
std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
- assembler->FinalizeInstructions(code);
+ __ FinalizeInstructions(code);
return entry_stub.release();
}
@@ -152,16 +155,17 @@
namespace x86 {
static const std::vector<uint8_t>* CreateTrampoline(ThreadOffset<4> offset) {
- std::unique_ptr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
+ X86Assembler assembler;
// All x86 trampolines call via the Thread* held in fs.
__ fs()->jmp(Address::Absolute(offset));
__ int3();
- size_t cs = assembler->CodeSize();
+ __ FinalizeCode();
+ size_t cs = __ CodeSize();
std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
- assembler->FinalizeInstructions(code);
+ __ FinalizeInstructions(code);
return entry_stub.release();
}
@@ -169,17 +173,17 @@
namespace x86_64 {
static const std::vector<uint8_t>* CreateTrampoline(ThreadOffset<8> offset) {
- std::unique_ptr<x86_64::X86_64Assembler>
- assembler(static_cast<x86_64::X86_64Assembler*>(Assembler::Create(kX86_64)));
+ x86_64::X86_64Assembler assembler;
// All x86 trampolines call via the Thread* held in gs.
__ gs()->jmp(x86_64::Address::Absolute(offset, true));
__ int3();
- size_t cs = assembler->CodeSize();
+ __ FinalizeCode();
+ size_t cs = __ CodeSize();
std::unique_ptr<std::vector<uint8_t>> entry_stub(new std::vector<uint8_t>(cs));
MemoryRegion code(&(*entry_stub)[0], entry_stub->size());
- assembler->FinalizeInstructions(code);
+ __ FinalizeInstructions(code);
return entry_stub.release();
}