Merge "Restore valgrind-test-art-host"
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index 55a4821..976a66e 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -86,18 +86,8 @@
#
# Used to enable optimizing compiler
#
-ART_USE_OPTIMIZING_COMPILER := false
-ifneq ($(wildcard art/USE_OPTIMIZING_COMPILER),)
-$(info Enabling ART_USE_OPTIMIZING_COMPILER because of existence of art/USE_OPTIMIZING_COMPILER)
-ART_USE_OPTIMIZING_COMPILER := true
-endif
-ifeq ($(WITH_ART_USE_OPTIMIZING_COMPILER), true)
-ART_USE_OPTIMIZING_COMPILER := true
-endif
-
ifeq ($(ART_USE_OPTIMIZING_COMPILER),true)
DEX2OAT_FLAGS := --compiler-backend=Optimizing
-DALVIKVM_FLAGS += -Xcompiler-option --compiler-backend=Optimizing
endif
#
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index 6ef451f..844f58e 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -21,7 +21,7 @@
# The main rules to build the default "boot" image are in
# build/core/dex_preopt_libart.mk
-include art/build/Android.common_path.mk
+include art/build/Android.common_build.mk
# Use dex2oat debug version for better error reporting
# $(1): 2ND_ or undefined, 2ND_ for 32-bit host builds.
@@ -31,7 +31,7 @@
$$($(1)HOST_CORE_IMG_OUT): $$(HOST_CORE_DEX_LOCATIONS) $$(DEX2OAT_DEPENDENCY)
@echo "host dex2oat: $$@ ($$?)"
@mkdir -p $$(dir $$@)
- $$(hide) $$(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
+ $$(hide) $$(DEX2OAT) $$(DEX2OAT_FLAGS) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
--image-classes=$$(PRELOADED_CLASSES) $$(addprefix --dex-file=,$$(HOST_CORE_DEX_FILES)) \
$$(addprefix --dex-location=,$$(HOST_CORE_DEX_LOCATIONS)) --oat-file=$$($(1)HOST_CORE_OAT_OUT) \
--oat-location=$$($(1)HOST_CORE_OAT) --image=$$($(1)HOST_CORE_IMG_OUT) \
@@ -54,7 +54,7 @@
$$($(1)TARGET_CORE_IMG_OUT): $$($(1)TARGET_CORE_DEX_FILES) $$(DEX2OAT_DEPENDENCY)
@echo "target dex2oat: $$@ ($$?)"
@mkdir -p $$(dir $$@)
- $$(hide) $$(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_XMS) --runtime-arg -Xmx$(DEX2OAT_XMX) \
+ $$(hide) $$(DEX2OAT) $$(DEX2OAT_FLAGS) --runtime-arg -Xms$(DEX2OAT_XMS) --runtime-arg -Xmx$(DEX2OAT_XMX) \
--image-classes=$$(PRELOADED_CLASSES) $$(addprefix --dex-file=,$$(TARGET_CORE_DEX_FILES)) \
$$(addprefix --dex-location=,$$(TARGET_CORE_DEX_LOCATIONS)) --oat-file=$$($(1)TARGET_CORE_OAT_OUT) \
--oat-location=$$($(1)TARGET_CORE_OAT) --image=$$($(1)TARGET_CORE_IMG_OUT) \
diff --git a/compiler/buffered_output_stream.h b/compiler/buffered_output_stream.h
index 75a3f24..bbc49df 100644
--- a/compiler/buffered_output_stream.h
+++ b/compiler/buffered_output_stream.h
@@ -23,7 +23,7 @@
namespace art {
-class BufferedOutputStream : public OutputStream {
+class BufferedOutputStream FINAL : public OutputStream {
public:
explicit BufferedOutputStream(OutputStream* out);
diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h
index b2c348b..fce23bc 100644
--- a/compiler/dex/bb_optimizations.h
+++ b/compiler/dex/bb_optimizations.h
@@ -137,20 +137,20 @@
};
/**
- * @class NullCheckEliminationAndTypeInference
- * @brief Null check elimination and type inference.
+ * @class NullCheckElimination
+ * @brief Null check elimination pass.
*/
-class NullCheckEliminationAndTypeInference : public PassME {
+class NullCheckElimination : public PassME {
public:
- NullCheckEliminationAndTypeInference()
- : PassME("NCE_TypeInference", kRepeatingTopologicalSortTraversal, "4_post_nce_cfg") {
+ NullCheckElimination()
+ : PassME("NCE", kRepeatingTopologicalSortTraversal, "3_post_nce_cfg") {
}
- void Start(PassDataHolder* data) const {
+ bool Gate(const PassDataHolder* data) const {
DCHECK(data != nullptr);
- CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
+ CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
DCHECK(c_unit != nullptr);
- c_unit->mir_graph->EliminateNullChecksAndInferTypesStart();
+ return c_unit->mir_graph->EliminateNullChecksGate();
}
bool Worker(PassDataHolder* data) const {
@@ -160,14 +160,35 @@
DCHECK(c_unit != nullptr);
BasicBlock* bb = pass_me_data_holder->bb;
DCHECK(bb != nullptr);
- return c_unit->mir_graph->EliminateNullChecksAndInferTypes(bb);
+ return c_unit->mir_graph->EliminateNullChecks(bb);
}
void End(PassDataHolder* data) const {
DCHECK(data != nullptr);
CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
DCHECK(c_unit != nullptr);
- c_unit->mir_graph->EliminateNullChecksAndInferTypesEnd();
+ c_unit->mir_graph->EliminateNullChecksEnd();
+ }
+};
+
+/**
+ * @class TypeInference
+ * @brief Type inference pass.
+ */
+class TypeInference : public PassME {
+ public:
+ TypeInference()
+ : PassME("TypeInference", kRepeatingTopologicalSortTraversal, "4_post_type_cfg") {
+ }
+
+ bool Worker(PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
+ CompilationUnit* c_unit = pass_me_data_holder->c_unit;
+ DCHECK(c_unit != nullptr);
+ BasicBlock* bb = pass_me_data_holder->bb;
+ DCHECK(bb != nullptr);
+ return c_unit->mir_graph->InferTypes(bb);
}
};
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 276b886..7e83c0c 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -94,11 +94,11 @@
topological_order_indexes_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
topological_order_loop_head_stack_(arena->Adapter(kArenaAllocTopologicalSortOrder)),
i_dom_list_(NULL),
- def_block_matrix_(NULL),
temp_scoped_alloc_(),
temp_insn_data_(nullptr),
temp_bit_vector_size_(0u),
temp_bit_vector_(nullptr),
+ temp_bit_matrix_(nullptr),
temp_gvn_(),
block_list_(arena->Adapter(kArenaAllocBBList)),
try_block_addr_(NULL),
@@ -1706,6 +1706,7 @@
temp_bit_vector_size_ = 0u;
temp_bit_vector_ = nullptr;
+ temp_bit_matrix_ = nullptr; // Def block matrix.
DCHECK(temp_scoped_alloc_.get() != nullptr);
temp_scoped_alloc_.reset();
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index fe6fb75..cc215bd 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -198,9 +198,7 @@
ArenaBitVector* use_v;
ArenaBitVector* def_v;
ArenaBitVector* live_in_v;
- ArenaBitVector* phi_v;
int32_t* vreg_to_ssa_map_exit;
- ArenaBitVector* ending_check_v; // For null check and class init check elimination.
};
/*
@@ -1022,9 +1020,10 @@
int SRegToVReg(int ssa_reg) const;
void VerifyDataflow();
void CheckForDominanceFrontier(BasicBlock* dom_bb, const BasicBlock* succ_bb);
- void EliminateNullChecksAndInferTypesStart();
- bool EliminateNullChecksAndInferTypes(BasicBlock* bb);
- void EliminateNullChecksAndInferTypesEnd();
+ bool EliminateNullChecksGate();
+ bool EliminateNullChecks(BasicBlock* bb);
+ void EliminateNullChecksEnd();
+ bool InferTypes(BasicBlock* bb);
bool EliminateClassInitChecksGate();
bool EliminateClassInitChecks(BasicBlock* bb);
void EliminateClassInitChecksEnd();
@@ -1260,11 +1259,15 @@
// Stack of the loop head indexes and recalculation flags for RepeatingTopologicalSortIterator.
ArenaVector<std::pair<uint16_t, bool>> topological_order_loop_head_stack_;
int* i_dom_list_;
- ArenaBitVector** def_block_matrix_; // original num registers x num_blocks.
std::unique_ptr<ScopedArenaAllocator> temp_scoped_alloc_;
uint16_t* temp_insn_data_;
uint32_t temp_bit_vector_size_;
ArenaBitVector* temp_bit_vector_;
+ // temp_bit_matrix_ used as one of
+ // - def_block_matrix: original num registers x num_blocks_,
+ // - ending_null_check_matrix: num_blocks_ x original num registers,
+ // - ending_clinit_check_matrix: num_blocks_ x unique class count.
+ ArenaBitVector** temp_bit_matrix_;
std::unique_ptr<GlobalValueNumbering> temp_gvn_;
static const int kInvalidEntry = -1;
ArenaVector<BasicBlock*> block_list_;
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 35dae00..322b737 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -819,96 +819,91 @@
}
}
-void MIRGraph::EliminateNullChecksAndInferTypesStart() {
- if ((cu_->disable_opt & (1 << kNullCheckElimination)) == 0) {
- if (kIsDebugBuild) {
- AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
- CHECK(bb->data_flow_info == nullptr || bb->data_flow_info->ending_check_v == nullptr);
- }
- }
-
- DCHECK(temp_scoped_alloc_.get() == nullptr);
- temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
- temp_bit_vector_size_ = GetNumSSARegs();
- temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
- temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapTempSSARegisterV);
+bool MIRGraph::EliminateNullChecksGate() {
+ if ((cu_->disable_opt & (1 << kNullCheckElimination)) != 0 ||
+ (merged_df_flags_ & DF_HAS_NULL_CHKS) == 0) {
+ return false;
}
+
+ DCHECK(temp_scoped_alloc_.get() == nullptr);
+ temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
+ temp_bit_vector_size_ = GetNumSSARegs();
+ temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
+ temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapNullCheck);
+ temp_bit_matrix_ = static_cast<ArenaBitVector**>(
+ temp_scoped_alloc_->Alloc(sizeof(ArenaBitVector*) * GetNumBlocks(), kArenaAllocMisc));
+ std::fill_n(temp_bit_matrix_, GetNumBlocks(), nullptr);
+ return true;
}
/*
- * Eliminate unnecessary null checks for a basic block. Also, while we're doing
- * an iterative walk go ahead and perform type and size inference.
+ * Eliminate unnecessary null checks for a basic block.
*/
-bool MIRGraph::EliminateNullChecksAndInferTypes(BasicBlock* bb) {
- if (bb->data_flow_info == NULL) return false;
- bool infer_changed = false;
- bool do_nce = ((cu_->disable_opt & (1 << kNullCheckElimination)) == 0);
+bool MIRGraph::EliminateNullChecks(BasicBlock* bb) {
+ if (bb->data_flow_info == nullptr) return false;
ArenaBitVector* ssa_regs_to_check = temp_bit_vector_;
- if (do_nce) {
- /*
- * Set initial state. Catch blocks don't need any special treatment.
- */
- if (bb->block_type == kEntryBlock) {
- ssa_regs_to_check->ClearAllBits();
- // Assume all ins are objects.
- for (uint16_t in_reg = GetFirstInVR();
- in_reg < GetNumOfCodeVRs(); in_reg++) {
- ssa_regs_to_check->SetBit(in_reg);
- }
- if ((cu_->access_flags & kAccStatic) == 0) {
- // If non-static method, mark "this" as non-null
- int this_reg = GetFirstInVR();
- ssa_regs_to_check->ClearBit(this_reg);
- }
- } else if (bb->predecessors.size() == 1) {
- BasicBlock* pred_bb = GetBasicBlock(bb->predecessors[0]);
- // pred_bb must have already been processed at least once.
- DCHECK(pred_bb->data_flow_info->ending_check_v != nullptr);
- ssa_regs_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
- if (pred_bb->block_type == kDalvikByteCode) {
- // Check to see if predecessor had an explicit null-check.
- MIR* last_insn = pred_bb->last_mir_insn;
- if (last_insn != nullptr) {
- Instruction::Code last_opcode = last_insn->dalvikInsn.opcode;
- if (last_opcode == Instruction::IF_EQZ) {
- if (pred_bb->fall_through == bb->id) {
- // The fall-through of a block following a IF_EQZ, set the vA of the IF_EQZ to show that
- // it can't be null.
- ssa_regs_to_check->ClearBit(last_insn->ssa_rep->uses[0]);
- }
- } else if (last_opcode == Instruction::IF_NEZ) {
- if (pred_bb->taken == bb->id) {
- // The taken block following a IF_NEZ, set the vA of the IF_NEZ to show that it can't be
- // null.
- ssa_regs_to_check->ClearBit(last_insn->ssa_rep->uses[0]);
- }
+ /*
+ * Set initial state. Catch blocks don't need any special treatment.
+ */
+ if (bb->block_type == kEntryBlock) {
+ ssa_regs_to_check->ClearAllBits();
+ // Assume all ins are objects.
+ for (uint16_t in_reg = GetFirstInVR();
+ in_reg < GetNumOfCodeVRs(); in_reg++) {
+ ssa_regs_to_check->SetBit(in_reg);
+ }
+ if ((cu_->access_flags & kAccStatic) == 0) {
+ // If non-static method, mark "this" as non-null
+ int this_reg = GetFirstInVR();
+ ssa_regs_to_check->ClearBit(this_reg);
+ }
+ } else if (bb->predecessors.size() == 1) {
+ BasicBlock* pred_bb = GetBasicBlock(bb->predecessors[0]);
+ // pred_bb must have already been processed at least once.
+ DCHECK(temp_bit_matrix_[pred_bb->id] != nullptr);
+ ssa_regs_to_check->Copy(temp_bit_matrix_[pred_bb->id]);
+ if (pred_bb->block_type == kDalvikByteCode) {
+ // Check to see if predecessor had an explicit null-check.
+ MIR* last_insn = pred_bb->last_mir_insn;
+ if (last_insn != nullptr) {
+ Instruction::Code last_opcode = last_insn->dalvikInsn.opcode;
+ if (last_opcode == Instruction::IF_EQZ) {
+ if (pred_bb->fall_through == bb->id) {
+ // The fall-through of a block following a IF_EQZ, set the vA of the IF_EQZ to show that
+ // it can't be null.
+ ssa_regs_to_check->ClearBit(last_insn->ssa_rep->uses[0]);
+ }
+ } else if (last_opcode == Instruction::IF_NEZ) {
+ if (pred_bb->taken == bb->id) {
+ // The taken block following a IF_NEZ, set the vA of the IF_NEZ to show that it can't be
+ // null.
+ ssa_regs_to_check->ClearBit(last_insn->ssa_rep->uses[0]);
}
}
}
- } else {
- // Starting state is union of all incoming arcs
- bool copied_first = false;
- for (BasicBlockId pred_id : bb->predecessors) {
- BasicBlock* pred_bb = GetBasicBlock(pred_id);
- DCHECK(pred_bb != nullptr);
- DCHECK(pred_bb->data_flow_info != nullptr);
- if (pred_bb->data_flow_info->ending_check_v == nullptr) {
- continue;
- }
- if (!copied_first) {
- copied_first = true;
- ssa_regs_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
- } else {
- ssa_regs_to_check->Union(pred_bb->data_flow_info->ending_check_v);
- }
- }
- DCHECK(copied_first); // At least one predecessor must have been processed before this bb.
}
- // At this point, ssa_regs_to_check shows which sregs have an object definition with
- // no intervening uses.
+ } else {
+ // Starting state is union of all incoming arcs
+ bool copied_first = false;
+ for (BasicBlockId pred_id : bb->predecessors) {
+ BasicBlock* pred_bb = GetBasicBlock(pred_id);
+ DCHECK(pred_bb != nullptr);
+ DCHECK(pred_bb->data_flow_info != nullptr);
+ if (temp_bit_matrix_[pred_bb->id] == nullptr) {
+ continue;
+ }
+ if (!copied_first) {
+ copied_first = true;
+ ssa_regs_to_check->Copy(temp_bit_matrix_[pred_bb->id]);
+ } else {
+ ssa_regs_to_check->Union(temp_bit_matrix_[pred_bb->id]);
+ }
+ }
+ DCHECK(copied_first); // At least one predecessor must have been processed before this bb.
}
+ // At this point, ssa_regs_to_check shows which sregs have an object definition with
+ // no intervening uses.
// Walk through the instruction in the block, updating as necessary
for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
@@ -916,12 +911,6 @@
continue;
}
- // Propagate type info.
- infer_changed = InferTypeAndSize(bb, mir, infer_changed);
- if (!do_nce) {
- continue;
- }
-
uint64_t df_attributes = GetDataFlowAttributes(mir);
// Might need a null check?
@@ -1022,35 +1011,48 @@
// Did anything change?
bool nce_changed = false;
- if (do_nce) {
- if (bb->data_flow_info->ending_check_v == nullptr) {
- DCHECK(temp_scoped_alloc_.get() != nullptr);
- bb->data_flow_info->ending_check_v = new (temp_scoped_alloc_.get()) ArenaBitVector(
- temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapNullCheck);
- nce_changed = ssa_regs_to_check->GetHighestBitSet() != -1;
- bb->data_flow_info->ending_check_v->Copy(ssa_regs_to_check);
- } else if (!ssa_regs_to_check->SameBitsSet(bb->data_flow_info->ending_check_v)) {
- nce_changed = true;
- bb->data_flow_info->ending_check_v->Copy(ssa_regs_to_check);
- }
+ ArenaBitVector* old_ending_ssa_regs_to_check = temp_bit_matrix_[bb->id];
+ if (old_ending_ssa_regs_to_check == nullptr) {
+ DCHECK(temp_scoped_alloc_.get() != nullptr);
+ nce_changed = ssa_regs_to_check->GetHighestBitSet() != -1;
+ temp_bit_matrix_[bb->id] = ssa_regs_to_check;
+ // Create a new ssa_regs_to_check for next BB.
+ temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
+ temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapNullCheck);
+ } else if (!ssa_regs_to_check->SameBitsSet(old_ending_ssa_regs_to_check)) {
+ nce_changed = true;
+ temp_bit_matrix_[bb->id] = ssa_regs_to_check;
+ temp_bit_vector_ = old_ending_ssa_regs_to_check; // Reuse for ssa_regs_to_check for next BB.
}
- return infer_changed | nce_changed;
+ return nce_changed;
}
-void MIRGraph::EliminateNullChecksAndInferTypesEnd() {
- if ((cu_->disable_opt & (1 << kNullCheckElimination)) == 0) {
- // Clean up temporaries.
- temp_bit_vector_size_ = 0u;
- temp_bit_vector_ = nullptr;
- AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
- if (bb->data_flow_info != nullptr) {
- bb->data_flow_info->ending_check_v = nullptr;
- }
+void MIRGraph::EliminateNullChecksEnd() {
+ // Clean up temporaries.
+ temp_bit_vector_size_ = 0u;
+ temp_bit_vector_ = nullptr;
+ temp_bit_matrix_ = nullptr;
+ DCHECK(temp_scoped_alloc_.get() != nullptr);
+ temp_scoped_alloc_.reset();
+}
+
+/*
+ * Perform type and size inference for a basic block.
+ */
+bool MIRGraph::InferTypes(BasicBlock* bb) {
+ if (bb->data_flow_info == nullptr) return false;
+
+ bool infer_changed = false;
+ for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ if (mir->ssa_rep == NULL) {
+ continue;
}
- DCHECK(temp_scoped_alloc_.get() != nullptr);
- temp_scoped_alloc_.reset();
+
+ // Propagate type info.
+ infer_changed = InferTypeAndSize(bb, mir, infer_changed);
}
+
+ return infer_changed;
}
bool MIRGraph::EliminateClassInitChecksGate() {
@@ -1059,13 +1061,6 @@
return false;
}
- if (kIsDebugBuild) {
- AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
- CHECK(bb->data_flow_info == nullptr || bb->data_flow_info->ending_check_v == nullptr);
- }
- }
-
DCHECK(temp_scoped_alloc_.get() == nullptr);
temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
@@ -1139,6 +1134,9 @@
temp_bit_vector_size_ = unique_class_count;
temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapClInitCheck);
+ temp_bit_matrix_ = static_cast<ArenaBitVector**>(
+ temp_scoped_alloc_->Alloc(sizeof(ArenaBitVector*) * GetNumBlocks(), kArenaAllocMisc));
+ std::fill_n(temp_bit_matrix_, GetNumBlocks(), nullptr);
DCHECK_GT(temp_bit_vector_size_, 0u);
return true;
}
@@ -1148,7 +1146,7 @@
*/
bool MIRGraph::EliminateClassInitChecks(BasicBlock* bb) {
DCHECK_EQ((cu_->disable_opt & (1 << kClassInitCheckElimination)), 0u);
- if (bb->data_flow_info == NULL) {
+ if (bb->data_flow_info == nullptr) {
return false;
}
@@ -1164,8 +1162,8 @@
// pred_bb must have already been processed at least once.
DCHECK(pred_bb != nullptr);
DCHECK(pred_bb->data_flow_info != nullptr);
- DCHECK(pred_bb->data_flow_info->ending_check_v != nullptr);
- classes_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
+ DCHECK(temp_bit_matrix_[pred_bb->id] != nullptr);
+ classes_to_check->Copy(temp_bit_matrix_[pred_bb->id]);
} else {
// Starting state is union of all incoming arcs.
bool copied_first = false;
@@ -1173,14 +1171,14 @@
BasicBlock* pred_bb = GetBasicBlock(pred_id);
DCHECK(pred_bb != nullptr);
DCHECK(pred_bb->data_flow_info != nullptr);
- if (pred_bb->data_flow_info->ending_check_v == nullptr) {
+ if (temp_bit_matrix_[pred_bb->id] == nullptr) {
continue;
}
if (!copied_first) {
copied_first = true;
- classes_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
+ classes_to_check->Copy(temp_bit_matrix_[pred_bb->id]);
} else {
- classes_to_check->Union(pred_bb->data_flow_info->ending_check_v);
+ classes_to_check->Union(temp_bit_matrix_[pred_bb->id]);
}
}
DCHECK(copied_first); // At least one predecessor must have been processed before this bb.
@@ -1211,16 +1209,18 @@
// Did anything change?
bool changed = false;
- if (bb->data_flow_info->ending_check_v == nullptr) {
+ ArenaBitVector* old_ending_classes_to_check = temp_bit_matrix_[bb->id];
+ if (old_ending_classes_to_check == nullptr) {
DCHECK(temp_scoped_alloc_.get() != nullptr);
- DCHECK(bb->data_flow_info != nullptr);
- bb->data_flow_info->ending_check_v = new (temp_scoped_alloc_.get()) ArenaBitVector(
- temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapClInitCheck);
changed = classes_to_check->GetHighestBitSet() != -1;
- bb->data_flow_info->ending_check_v->Copy(classes_to_check);
- } else if (!classes_to_check->Equal(bb->data_flow_info->ending_check_v)) {
+ temp_bit_matrix_[bb->id] = classes_to_check;
+ // Create a new classes_to_check for next BB.
+ temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
+ temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapClInitCheck);
+ } else if (!classes_to_check->Equal(old_ending_classes_to_check)) {
changed = true;
- bb->data_flow_info->ending_check_v->Copy(classes_to_check);
+ temp_bit_matrix_[bb->id] = classes_to_check;
+ temp_bit_vector_ = old_ending_classes_to_check; // Reuse for classes_to_check for next BB.
}
return changed;
}
@@ -1229,13 +1229,7 @@
// Clean up temporaries.
temp_bit_vector_size_ = 0u;
temp_bit_vector_ = nullptr;
- AllNodesIterator iter(this);
- for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
- if (bb->data_flow_info != nullptr) {
- bb->data_flow_info->ending_check_v = nullptr;
- }
- }
-
+ temp_bit_matrix_ = nullptr;
DCHECK(temp_insn_data_ != nullptr);
temp_insn_data_ = nullptr;
DCHECK(temp_scoped_alloc_.get() != nullptr);
diff --git a/compiler/dex/pass_driver_me_opts.cc b/compiler/dex/pass_driver_me_opts.cc
index 6281062..cd3ffd4 100644
--- a/compiler/dex/pass_driver_me_opts.cc
+++ b/compiler/dex/pass_driver_me_opts.cc
@@ -37,7 +37,8 @@
GetPassInstance<CacheMethodLoweringInfo>(),
GetPassInstance<SpecialMethodInliner>(),
GetPassInstance<CodeLayout>(),
- GetPassInstance<NullCheckEliminationAndTypeInference>(),
+ GetPassInstance<NullCheckElimination>(),
+ GetPassInstance<TypeInference>(),
GetPassInstance<ClassInitCheckElimination>(),
GetPassInstance<GlobalValueNumberingPass>(),
GetPassInstance<BBCombine>(),
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 3cc573b..4388041 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -124,7 +124,7 @@
for (uint32_t idx : bb->data_flow_info->def_v->Indexes()) {
/* Block bb defines register idx */
- def_block_matrix_[idx]->SetBit(bb->id);
+ temp_bit_matrix_[idx]->SetBit(bb->id);
}
return true;
}
@@ -132,15 +132,17 @@
void MIRGraph::ComputeDefBlockMatrix() {
int num_registers = GetNumOfCodeAndTempVRs();
/* Allocate num_registers bit vector pointers */
- def_block_matrix_ = static_cast<ArenaBitVector**>
- (arena_->Alloc(sizeof(ArenaBitVector *) * num_registers,
- kArenaAllocDFInfo));
+ DCHECK(temp_scoped_alloc_ != nullptr);
+ DCHECK(temp_bit_matrix_ == nullptr);
+ temp_bit_matrix_ = static_cast<ArenaBitVector**>(
+ temp_scoped_alloc_->Alloc(sizeof(ArenaBitVector*) * num_registers, kArenaAllocDFInfo));
int i;
/* Initialize num_register vectors with num_blocks bits each */
for (i = 0; i < num_registers; i++) {
- def_block_matrix_[i] =
- new (arena_) ArenaBitVector(arena_, GetNumBlocks(), false, kBitMapBMatrix);
+ temp_bit_matrix_[i] = new (temp_scoped_alloc_.get()) ArenaBitVector(arena_, GetNumBlocks(),
+ false, kBitMapBMatrix);
+ temp_bit_matrix_[i]->ClearAllBits();
}
AllNodesIterator iter(this);
@@ -159,7 +161,7 @@
int num_regs = GetNumOfCodeVRs();
int in_reg = GetFirstInVR();
for (; in_reg < num_regs; in_reg++) {
- def_block_matrix_[in_reg]->SetBit(GetEntryBlock()->id);
+ temp_bit_matrix_[in_reg]->SetBit(GetEntryBlock()->id);
}
}
@@ -478,7 +480,7 @@
/* Iterate through each Dalvik register */
for (dalvik_reg = GetNumOfCodeAndTempVRs() - 1; dalvik_reg >= 0; dalvik_reg--) {
- input_blocks->Copy(def_block_matrix_[dalvik_reg]);
+ input_blocks->Copy(temp_bit_matrix_[dalvik_reg]);
phi_blocks->ClearAllBits();
do {
// TUNING: When we repeat this, we could skip indexes from the previous pass.
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index 74ee038..c32bdb4 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_ELF_BUILDER_H_
#include "base/stl_util.h"
+#include "base/value_object.h"
#include "buffered_output_stream.h"
#include "elf_utils.h"
#include "file_output_stream.h"
@@ -26,11 +27,12 @@
namespace art {
template <typename Elf_Word, typename Elf_Sword, typename Elf_Shdr>
-class ElfSectionBuilder {
+class ElfSectionBuilder : public ValueObject {
public:
ElfSectionBuilder(const std::string& sec_name, Elf_Word type, Elf_Word flags,
const ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> *link, Elf_Word info,
- Elf_Word align, Elf_Word entsize) : name_(sec_name), link_(link) {
+ Elf_Word align, Elf_Word entsize)
+ : section_index_(0), name_(sec_name), link_(link) {
memset(§ion_, 0, sizeof(section_));
section_.sh_type = type;
section_.sh_flags = flags;
@@ -39,23 +41,41 @@
section_.sh_entsize = entsize;
}
- virtual ~ElfSectionBuilder() {}
+ ~ElfSectionBuilder() {}
- Elf_Shdr section_;
- Elf_Word section_index_ = 0;
-
- Elf_Word GetLink() {
- return (link_) ? link_->section_index_ : 0;
+ Elf_Word GetLink() const {
+ return (link_ != nullptr) ? link_->section_index_ : 0;
}
- const std::string name_;
+ const Elf_Shdr* GetSection() const {
+ return §ion_;
+ }
- protected:
- const ElfSectionBuilder* link_;
+ Elf_Shdr* GetSection() {
+ return §ion_;
+ }
+
+ Elf_Word GetSectionIndex() const {
+ return section_index_;
+ }
+
+ void SetSectionIndex(Elf_Word section_index) {
+ section_index_ = section_index;
+ }
+
+ const std::string& GetName() const {
+ return name_;
+ }
+
+ private:
+ Elf_Shdr section_;
+ Elf_Word section_index_;
+ const std::string name_;
+ const ElfSectionBuilder* const link_;
};
template <typename Elf_Word, typename Elf_Sword, typename Elf_Dyn, typename Elf_Shdr>
-class ElfDynamicBuilder : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> {
+class ElfDynamicBuilder FINAL : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> {
public:
void AddDynamicTag(Elf_Sword tag, Elf_Word d_un) {
if (tag == DT_NULL) {
@@ -65,7 +85,7 @@
}
void AddDynamicTag(Elf_Sword tag, Elf_Word d_un,
- ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* section) {
+ const ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* section) {
if (tag == DT_NULL) {
return;
}
@@ -78,7 +98,7 @@
link, 0, kPageSize, sizeof(Elf_Dyn)) {}
~ElfDynamicBuilder() {}
- Elf_Word GetSize() {
+ Elf_Word GetSize() const {
// Add 1 for the DT_NULL, 1 for DT_STRSZ, and 1 for DT_SONAME. All of
// these must be added when we actually put the file together because
// their values are very dependent on state.
@@ -89,13 +109,13 @@
// table and soname_off should be the offset of the soname in .dynstr.
// Since niether can be found prior to final layout we will wait until here
// to add them.
- std::vector<Elf_Dyn> GetDynamics(Elf_Word strsz, Elf_Word soname) {
+ std::vector<Elf_Dyn> GetDynamics(Elf_Word strsz, Elf_Word soname) const {
std::vector<Elf_Dyn> ret;
for (auto it = dynamics_.cbegin(); it != dynamics_.cend(); ++it) {
- if (it->section_) {
+ if (it->section_ != nullptr) {
// We are adding an address relative to a section.
ret.push_back(
- {it->tag_, {it->off_ + it->section_->section_.sh_addr}});
+ {it->tag_, {it->off_ + it->section_->GetSection()->sh_addr}});
} else {
ret.push_back({it->tag_, {it->off_}});
}
@@ -106,9 +126,9 @@
return ret;
}
- protected:
+ private:
struct ElfDynamicState {
- ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* section_;
+ const ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* section_;
Elf_Sword tag_;
Elf_Word off_;
};
@@ -116,39 +136,50 @@
};
template <typename Elf_Word, typename Elf_Sword, typename Elf_Shdr>
-class ElfRawSectionBuilder : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> {
+class ElfRawSectionBuilder FINAL : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> {
public:
ElfRawSectionBuilder(const std::string& sec_name, Elf_Word type, Elf_Word flags,
const ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* link, Elf_Word info,
Elf_Word align, Elf_Word entsize)
: ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>(sec_name, type, flags, link, info, align,
- entsize) {}
- ~ElfRawSectionBuilder() {}
- std::vector<uint8_t>* GetBuffer() { return &buf_; }
- void SetBuffer(std::vector<uint8_t>&& buf) { buf_ = buf; }
+ entsize) {
+ }
- protected:
+ ~ElfRawSectionBuilder() {}
+
+ std::vector<uint8_t>* GetBuffer() {
+ return &buf_;
+ }
+
+ void SetBuffer(const std::vector<uint8_t>& buf) {
+ buf_ = buf;
+ }
+
+ private:
std::vector<uint8_t> buf_;
};
template <typename Elf_Word, typename Elf_Sword, typename Elf_Shdr>
-class ElfOatSectionBuilder : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> {
+class ElfOatSectionBuilder FINAL : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> {
public:
ElfOatSectionBuilder(const std::string& sec_name, Elf_Word size, Elf_Word offset,
Elf_Word type, Elf_Word flags)
: ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>(sec_name, type, flags, nullptr, 0, kPageSize,
- 0), offset_(offset), size_(size) {}
+ 0),
+ offset_(offset), size_(size) {
+ }
+
~ElfOatSectionBuilder() {}
- Elf_Word GetOffset() {
+ Elf_Word GetOffset() const {
return offset_;
}
- Elf_Word GetSize() {
+ Elf_Word GetSize() const {
return size_;
}
- protected:
+ private:
// Offset of the content within the file.
Elf_Word offset_;
// Size of the content within the file.
@@ -175,7 +206,7 @@
template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr, typename Elf_Sym,
typename Elf_Shdr>
-class ElfSymtabBuilder : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> {
+class ElfSymtabBuilder FINAL : public ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> {
public:
// Add a symbol with given name to this symtab. The symbol refers to
// 'relative_addr' within the given section and has the given attributes.
@@ -202,10 +233,12 @@
strtab_(str_name,
str_type,
((alloc) ? SHF_ALLOC : 0U),
- nullptr, 0, 1, 1) {}
+ nullptr, 0, 1, 1) {
+ }
+
~ElfSymtabBuilder() {}
- std::vector<Elf_Word> GenerateHashContents() {
+ std::vector<Elf_Word> GenerateHashContents() const {
// Here is how The ELF hash table works.
// There are 3 arrays to worry about.
// * The symbol table where the symbol information is.
@@ -295,7 +328,7 @@
tab += it->name_;
tab += '\0';
}
- strtab_.section_.sh_size = tab.size();
+ strtab_.GetSection()->sh_size = tab.size();
return tab;
}
@@ -311,13 +344,13 @@
memset(&sym, 0, sizeof(sym));
sym.st_name = it->name_idx_;
if (it->is_relative_) {
- sym.st_value = it->addr_ + it->section_->section_.sh_offset;
+ sym.st_value = it->addr_ + it->section_->GetSection()->sh_offset;
} else {
sym.st_value = it->addr_;
}
sym.st_size = it->size_;
sym.st_other = it->other_;
- sym.st_shndx = it->section_->section_index_;
+ sym.st_shndx = it->section_->GetSectionIndex();
sym.st_info = it->info_;
ret.push_back(sym);
@@ -325,7 +358,7 @@
return ret;
}
- Elf_Word GetSize() {
+ Elf_Word GetSize() const {
// 1 is for the implicit NULL symbol.
return symbols_.size() + 1;
}
@@ -334,7 +367,7 @@
return &strtab_;
}
- protected:
+ private:
struct ElfSymbolState {
const std::string name_;
const ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* section_;
@@ -377,18 +410,26 @@
protected:
explicit ElfFilePiece(Elf_Word offset) : offset_(offset) {}
- virtual std::string GetDescription() = 0;
+ Elf_Word GetOffset() const {
+ return offset_;
+ }
+
+ virtual const char* GetDescription() const = 0;
virtual bool DoActualWrite(File* elf_file) = 0;
- Elf_Word offset_;
+ private:
+ const Elf_Word offset_;
+
+ DISALLOW_COPY_AND_ASSIGN(ElfFilePiece);
};
template <typename Elf_Word>
-class ElfFileMemoryPiece : public ElfFilePiece<Elf_Word> {
+class ElfFileMemoryPiece FINAL : public ElfFilePiece<Elf_Word> {
public:
ElfFileMemoryPiece(const std::string& name, Elf_Word offset, const void* data, Elf_Word size)
: ElfFilePiece<Elf_Word>(offset), dbg_name_(name), data_(data), size_(size) {}
+ protected:
bool DoActualWrite(File* elf_file) OVERRIDE {
DCHECK(data_ != nullptr || size_ == 0U) << dbg_name_ << " " << size_;
@@ -400,8 +441,8 @@
return true;
}
- std::string GetDescription() OVERRIDE {
- return dbg_name_;
+ const char* GetDescription() const OVERRIDE {
+ return dbg_name_.c_str();
}
private:
@@ -418,13 +459,14 @@
};
template <typename Elf_Word>
-class ElfFileRodataPiece : public ElfFilePiece<Elf_Word> {
+class ElfFileRodataPiece FINAL : public ElfFilePiece<Elf_Word> {
public:
ElfFileRodataPiece(Elf_Word offset, CodeOutput* output) : ElfFilePiece<Elf_Word>(offset),
output_(output) {}
+ protected:
bool DoActualWrite(File* elf_file) OVERRIDE {
- output_->SetCodeOffset(this->offset_);
+ output_->SetCodeOffset(this->GetOffset());
std::unique_ptr<BufferedOutputStream> output_stream(
new BufferedOutputStream(new FileOutputStream(elf_file)));
if (!output_->Write(output_stream.get())) {
@@ -435,20 +477,23 @@
return true;
}
- std::string GetDescription() OVERRIDE {
+ const char* GetDescription() const OVERRIDE {
return ".rodata";
}
private:
- CodeOutput* output_;
+ CodeOutput* const output_;
+
+ DISALLOW_COPY_AND_ASSIGN(ElfFileRodataPiece);
};
template <typename Elf_Word>
-class ElfFileOatTextPiece : public ElfFilePiece<Elf_Word> {
+class ElfFileOatTextPiece FINAL : public ElfFilePiece<Elf_Word> {
public:
ElfFileOatTextPiece(Elf_Word offset, CodeOutput* output) : ElfFilePiece<Elf_Word>(offset),
output_(output) {}
+ protected:
bool DoActualWrite(File* elf_file) OVERRIDE {
// All data is written by the ElfFileRodataPiece right now, as the oat writer writes in one
// piece. This is for future flexibility.
@@ -456,12 +501,14 @@
return true;
}
- std::string GetDescription() OVERRIDE {
+ const char* GetDescription() const OVERRIDE {
return ".text";
}
private:
- CodeOutput* output_;
+ CodeOutput* const output_;
+
+ DISALLOW_COPY_AND_ASSIGN(ElfFileOatTextPiece);
};
template <typename Elf_Word>
@@ -513,6 +560,14 @@
}
~ElfBuilder() {}
+ const ElfOatSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>& GetTextBuilder() const {
+ return text_builder_;
+ }
+
+ ElfSymtabBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Sym, Elf_Shdr>* GetSymtabBuilder() {
+ return &symtab_builder_;
+ }
+
bool Init() {
// The basic layout of the elf file. Order may be different in final output.
// +-------------------------+
@@ -676,34 +731,40 @@
section_index_ = 1;
// setup .dynsym
- section_ptrs_.push_back(&dynsym_builder_.section_);
+ section_ptrs_.push_back(dynsym_builder_.GetSection());
AssignSectionStr(&dynsym_builder_, &shstrtab_);
- dynsym_builder_.section_index_ = section_index_++;
+ dynsym_builder_.SetSectionIndex(section_index_);
+ section_index_++;
// Setup .dynstr
- section_ptrs_.push_back(&dynsym_builder_.GetStrTab()->section_);
+ section_ptrs_.push_back(dynsym_builder_.GetStrTab()->GetSection());
AssignSectionStr(dynsym_builder_.GetStrTab(), &shstrtab_);
- dynsym_builder_.GetStrTab()->section_index_ = section_index_++;
+ dynsym_builder_.GetStrTab()->SetSectionIndex(section_index_);
+ section_index_++;
// Setup .hash
- section_ptrs_.push_back(&hash_builder_.section_);
+ section_ptrs_.push_back(hash_builder_.GetSection());
AssignSectionStr(&hash_builder_, &shstrtab_);
- hash_builder_.section_index_ = section_index_++;
+ hash_builder_.SetSectionIndex(section_index_);
+ section_index_++;
// Setup .rodata
- section_ptrs_.push_back(&rodata_builder_.section_);
+ section_ptrs_.push_back(rodata_builder_.GetSection());
AssignSectionStr(&rodata_builder_, &shstrtab_);
- rodata_builder_.section_index_ = section_index_++;
+ rodata_builder_.SetSectionIndex(section_index_);
+ section_index_++;
// Setup .text
- section_ptrs_.push_back(&text_builder_.section_);
+ section_ptrs_.push_back(text_builder_.GetSection());
AssignSectionStr(&text_builder_, &shstrtab_);
- text_builder_.section_index_ = section_index_++;
+ text_builder_.SetSectionIndex(section_index_);
+ section_index_++;
// Setup .dynamic
- section_ptrs_.push_back(&dynamic_builder_.section_);
+ section_ptrs_.push_back(dynamic_builder_.GetSection());
AssignSectionStr(&dynamic_builder_, &shstrtab_);
- dynamic_builder_.section_index_ = section_index_++;
+ dynamic_builder_.SetSectionIndex(section_index_);
+ section_index_++;
// Fill in the hash section.
hash_ = dynsym_builder_.GenerateHashContents();
@@ -718,64 +779,67 @@
// Get the layout in the sections.
//
// Get the layout of the dynsym section.
- dynsym_builder_.section_.sh_offset = RoundUp(base_offset, dynsym_builder_.section_.sh_addralign);
- dynsym_builder_.section_.sh_addr = dynsym_builder_.section_.sh_offset;
- dynsym_builder_.section_.sh_size = dynsym_builder_.GetSize() * sizeof(Elf_Sym);
- dynsym_builder_.section_.sh_link = dynsym_builder_.GetLink();
+ dynsym_builder_.GetSection()->sh_offset =
+ RoundUp(base_offset, dynsym_builder_.GetSection()->sh_addralign);
+ dynsym_builder_.GetSection()->sh_addr = dynsym_builder_.GetSection()->sh_offset;
+ dynsym_builder_.GetSection()->sh_size = dynsym_builder_.GetSize() * sizeof(Elf_Sym);
+ dynsym_builder_.GetSection()->sh_link = dynsym_builder_.GetLink();
// Get the layout of the dynstr section.
- dynsym_builder_.GetStrTab()->section_.sh_offset = NextOffset<Elf_Word, Elf_Shdr>
- (dynsym_builder_.GetStrTab()->section_,
- dynsym_builder_.section_);
- dynsym_builder_.GetStrTab()->section_.sh_addr = dynsym_builder_.GetStrTab()->section_.sh_offset;
- dynsym_builder_.GetStrTab()->section_.sh_size = dynstr_.size();
- dynsym_builder_.GetStrTab()->section_.sh_link = dynsym_builder_.GetStrTab()->GetLink();
+ dynsym_builder_.GetStrTab()->GetSection()->sh_offset =
+ NextOffset<Elf_Word, Elf_Shdr>(*dynsym_builder_.GetStrTab()->GetSection(),
+ *dynsym_builder_.GetSection());
+ dynsym_builder_.GetStrTab()->GetSection()->sh_addr =
+ dynsym_builder_.GetStrTab()->GetSection()->sh_offset;
+ dynsym_builder_.GetStrTab()->GetSection()->sh_size = dynstr_.size();
+ dynsym_builder_.GetStrTab()->GetSection()->sh_link = dynsym_builder_.GetStrTab()->GetLink();
// Get the layout of the hash section
- hash_builder_.section_.sh_offset = NextOffset<Elf_Word, Elf_Shdr>
- (hash_builder_.section_,
- dynsym_builder_.GetStrTab()->section_);
- hash_builder_.section_.sh_addr = hash_builder_.section_.sh_offset;
- hash_builder_.section_.sh_size = hash_.size() * sizeof(Elf_Word);
- hash_builder_.section_.sh_link = hash_builder_.GetLink();
+ hash_builder_.GetSection()->sh_offset =
+ NextOffset<Elf_Word, Elf_Shdr>(*hash_builder_.GetSection(),
+ *dynsym_builder_.GetStrTab()->GetSection());
+ hash_builder_.GetSection()->sh_addr = hash_builder_.GetSection()->sh_offset;
+ hash_builder_.GetSection()->sh_size = hash_.size() * sizeof(Elf_Word);
+ hash_builder_.GetSection()->sh_link = hash_builder_.GetLink();
// Get the layout of the rodata section.
- rodata_builder_.section_.sh_offset = NextOffset<Elf_Word, Elf_Shdr>
- (rodata_builder_.section_,
- hash_builder_.section_);
- rodata_builder_.section_.sh_addr = rodata_builder_.section_.sh_offset;
- rodata_builder_.section_.sh_size = rodata_builder_.GetSize();
- rodata_builder_.section_.sh_link = rodata_builder_.GetLink();
+ rodata_builder_.GetSection()->sh_offset =
+ NextOffset<Elf_Word, Elf_Shdr>(*rodata_builder_.GetSection(),
+ *hash_builder_.GetSection());
+ rodata_builder_.GetSection()->sh_addr = rodata_builder_.GetSection()->sh_offset;
+ rodata_builder_.GetSection()->sh_size = rodata_builder_.GetSize();
+ rodata_builder_.GetSection()->sh_link = rodata_builder_.GetLink();
// Get the layout of the text section.
- text_builder_.section_.sh_offset = NextOffset<Elf_Word, Elf_Shdr>
- (text_builder_.section_, rodata_builder_.section_);
- text_builder_.section_.sh_addr = text_builder_.section_.sh_offset;
- text_builder_.section_.sh_size = text_builder_.GetSize();
- text_builder_.section_.sh_link = text_builder_.GetLink();
- CHECK_ALIGNED(rodata_builder_.section_.sh_offset + rodata_builder_.section_.sh_size, kPageSize);
+ text_builder_.GetSection()->sh_offset =
+ NextOffset<Elf_Word, Elf_Shdr>(*text_builder_.GetSection(),
+ *rodata_builder_.GetSection());
+ text_builder_.GetSection()->sh_addr = text_builder_.GetSection()->sh_offset;
+ text_builder_.GetSection()->sh_size = text_builder_.GetSize();
+ text_builder_.GetSection()->sh_link = text_builder_.GetLink();
+ CHECK_ALIGNED(rodata_builder_.GetSection()->sh_offset +
+ rodata_builder_.GetSection()->sh_size, kPageSize);
// Get the layout of the dynamic section.
- dynamic_builder_.section_.sh_offset = NextOffset<Elf_Word, Elf_Shdr>
- (dynamic_builder_.section_,
- text_builder_.section_);
- dynamic_builder_.section_.sh_addr = dynamic_builder_.section_.sh_offset;
- dynamic_builder_.section_.sh_size = dynamic_builder_.GetSize() * sizeof(Elf_Dyn);
- dynamic_builder_.section_.sh_link = dynamic_builder_.GetLink();
+ dynamic_builder_.GetSection()->sh_offset =
+ NextOffset<Elf_Word, Elf_Shdr>(*dynamic_builder_.GetSection(), *text_builder_.GetSection());
+ dynamic_builder_.GetSection()->sh_addr = dynamic_builder_.GetSection()->sh_offset;
+ dynamic_builder_.GetSection()->sh_size = dynamic_builder_.GetSize() * sizeof(Elf_Dyn);
+ dynamic_builder_.GetSection()->sh_link = dynamic_builder_.GetLink();
if (debug_logging_) {
- LOG(INFO) << "dynsym off=" << dynsym_builder_.section_.sh_offset
- << " dynsym size=" << dynsym_builder_.section_.sh_size;
- LOG(INFO) << "dynstr off=" << dynsym_builder_.GetStrTab()->section_.sh_offset
- << " dynstr size=" << dynsym_builder_.GetStrTab()->section_.sh_size;
- LOG(INFO) << "hash off=" << hash_builder_.section_.sh_offset
- << " hash size=" << hash_builder_.section_.sh_size;
- LOG(INFO) << "rodata off=" << rodata_builder_.section_.sh_offset
- << " rodata size=" << rodata_builder_.section_.sh_size;
- LOG(INFO) << "text off=" << text_builder_.section_.sh_offset
- << " text size=" << text_builder_.section_.sh_size;
- LOG(INFO) << "dynamic off=" << dynamic_builder_.section_.sh_offset
- << " dynamic size=" << dynamic_builder_.section_.sh_size;
+ LOG(INFO) << "dynsym off=" << dynsym_builder_.GetSection()->sh_offset
+ << " dynsym size=" << dynsym_builder_.GetSection()->sh_size;
+ LOG(INFO) << "dynstr off=" << dynsym_builder_.GetStrTab()->GetSection()->sh_offset
+ << " dynstr size=" << dynsym_builder_.GetStrTab()->GetSection()->sh_size;
+ LOG(INFO) << "hash off=" << hash_builder_.GetSection()->sh_offset
+ << " hash size=" << hash_builder_.GetSection()->sh_size;
+ LOG(INFO) << "rodata off=" << rodata_builder_.GetSection()->sh_offset
+ << " rodata size=" << rodata_builder_.GetSection()->sh_size;
+ LOG(INFO) << "text off=" << text_builder_.GetSection()->sh_offset
+ << " text size=" << text_builder_.GetSection()->sh_size;
+ LOG(INFO) << "dynamic off=" << dynamic_builder_.GetSection()->sh_offset
+ << " dynamic size=" << dynamic_builder_.GetSection()->sh_size;
}
return true;
@@ -783,19 +847,21 @@
bool Write() {
std::vector<ElfFilePiece<Elf_Word>*> pieces;
- Elf_Shdr prev = dynamic_builder_.section_;
+ Elf_Shdr* prev = dynamic_builder_.GetSection();
std::string strtab;
if (IncludingDebugSymbols()) {
// Setup .symtab
- section_ptrs_.push_back(&symtab_builder_.section_);
+ section_ptrs_.push_back(symtab_builder_.GetSection());
AssignSectionStr(&symtab_builder_, &shstrtab_);
- symtab_builder_.section_index_ = section_index_++;
+ symtab_builder_.SetSectionIndex(section_index_);
+ section_index_++;
// Setup .strtab
- section_ptrs_.push_back(&symtab_builder_.GetStrTab()->section_);
+ section_ptrs_.push_back(symtab_builder_.GetStrTab()->GetSection());
AssignSectionStr(symtab_builder_.GetStrTab(), &shstrtab_);
- symtab_builder_.GetStrTab()->section_index_ = section_index_++;
+ symtab_builder_.GetStrTab()->SetSectionIndex(section_index_);
+ section_index_++;
strtab = symtab_builder_.GenerateStrtab();
if (debug_logging_) {
@@ -810,15 +876,17 @@
for (ElfRawSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> *builder = other_builders_.data(),
*end = builder + other_builders_.size();
builder != end; ++builder) {
- section_ptrs_.push_back(&builder->section_);
+ section_ptrs_.push_back(builder->GetSection());
AssignSectionStr(builder, &shstrtab_);
- builder->section_index_ = section_index_++;
+ builder->SetSectionIndex(section_index_);
+ section_index_++;
}
// Setup shstrtab
- section_ptrs_.push_back(&shstrtab_builder_.section_);
+ section_ptrs_.push_back(shstrtab_builder_.GetSection());
AssignSectionStr(&shstrtab_builder_, &shstrtab_);
- shstrtab_builder_.section_index_ = section_index_++;
+ shstrtab_builder_.SetSectionIndex(section_index_);
+ section_index_++;
if (debug_logging_) {
LOG(INFO) << ".shstrtab size (bytes) =" << shstrtab_.size()
@@ -829,71 +897,71 @@
if (IncludingDebugSymbols()) {
// Get the layout of the symtab section.
- symtab_builder_.section_.sh_offset = NextOffset<Elf_Word, Elf_Shdr>
- (symtab_builder_.section_,
- dynamic_builder_.section_);
- symtab_builder_.section_.sh_addr = 0;
+ symtab_builder_.GetSection()->sh_offset =
+ NextOffset<Elf_Word, Elf_Shdr>(*symtab_builder_.GetSection(),
+ *dynamic_builder_.GetSection());
+ symtab_builder_.GetSection()->sh_addr = 0;
// Add to leave space for the null symbol.
- symtab_builder_.section_.sh_size = symtab_builder_.GetSize() * sizeof(Elf_Sym);
- symtab_builder_.section_.sh_link = symtab_builder_.GetLink();
+ symtab_builder_.GetSection()->sh_size = symtab_builder_.GetSize() * sizeof(Elf_Sym);
+ symtab_builder_.GetSection()->sh_link = symtab_builder_.GetLink();
// Get the layout of the dynstr section.
- symtab_builder_.GetStrTab()->section_.sh_offset = NextOffset<Elf_Word, Elf_Shdr>
- (symtab_builder_.GetStrTab()->section_,
- symtab_builder_.section_);
- symtab_builder_.GetStrTab()->section_.sh_addr = 0;
- symtab_builder_.GetStrTab()->section_.sh_size = strtab.size();
- symtab_builder_.GetStrTab()->section_.sh_link = symtab_builder_.GetStrTab()->GetLink();
+ symtab_builder_.GetStrTab()->GetSection()->sh_offset =
+ NextOffset<Elf_Word, Elf_Shdr>(*symtab_builder_.GetStrTab()->GetSection(),
+ *symtab_builder_.GetSection());
+ symtab_builder_.GetStrTab()->GetSection()->sh_addr = 0;
+ symtab_builder_.GetStrTab()->GetSection()->sh_size = strtab.size();
+ symtab_builder_.GetStrTab()->GetSection()->sh_link = symtab_builder_.GetStrTab()->GetLink();
- prev = symtab_builder_.GetStrTab()->section_;
+ prev = symtab_builder_.GetStrTab()->GetSection();
if (debug_logging_) {
- LOG(INFO) << "symtab off=" << symtab_builder_.section_.sh_offset
- << " symtab size=" << symtab_builder_.section_.sh_size;
- LOG(INFO) << "strtab off=" << symtab_builder_.GetStrTab()->section_.sh_offset
- << " strtab size=" << symtab_builder_.GetStrTab()->section_.sh_size;
+ LOG(INFO) << "symtab off=" << symtab_builder_.GetSection()->sh_offset
+ << " symtab size=" << symtab_builder_.GetSection()->sh_size;
+ LOG(INFO) << "strtab off=" << symtab_builder_.GetStrTab()->GetSection()->sh_offset
+ << " strtab size=" << symtab_builder_.GetStrTab()->GetSection()->sh_size;
}
}
// Get the layout of the extra sections. (This will deal with the debug
// sections if they are there)
for (auto it = other_builders_.begin(); it != other_builders_.end(); ++it) {
- it->section_.sh_offset = NextOffset<Elf_Word, Elf_Shdr>(it->section_, prev);
- it->section_.sh_addr = 0;
- it->section_.sh_size = it->GetBuffer()->size();
- it->section_.sh_link = it->GetLink();
+ it->GetSection()->sh_offset = NextOffset<Elf_Word, Elf_Shdr>(*it->GetSection(), *prev);
+ it->GetSection()->sh_addr = 0;
+ it->GetSection()->sh_size = it->GetBuffer()->size();
+ it->GetSection()->sh_link = it->GetLink();
// We postpone adding an ElfFilePiece to keep the order in "pieces."
- prev = it->section_;
+ prev = it->GetSection();
if (debug_logging_) {
- LOG(INFO) << it->name_ << " off=" << it->section_.sh_offset
- << " size=" << it->section_.sh_size;
+ LOG(INFO) << it->GetName() << " off=" << it->GetSection()->sh_offset
+ << " size=" << it->GetSection()->sh_size;
}
}
// Get the layout of the shstrtab section
- shstrtab_builder_.section_.sh_offset = NextOffset<Elf_Word, Elf_Shdr>
- (shstrtab_builder_.section_, prev);
- shstrtab_builder_.section_.sh_addr = 0;
- shstrtab_builder_.section_.sh_size = shstrtab_.size();
- shstrtab_builder_.section_.sh_link = shstrtab_builder_.GetLink();
+ shstrtab_builder_.GetSection()->sh_offset =
+ NextOffset<Elf_Word, Elf_Shdr>(*shstrtab_builder_.GetSection(), *prev);
+ shstrtab_builder_.GetSection()->sh_addr = 0;
+ shstrtab_builder_.GetSection()->sh_size = shstrtab_.size();
+ shstrtab_builder_.GetSection()->sh_link = shstrtab_builder_.GetLink();
if (debug_logging_) {
- LOG(INFO) << "shstrtab off=" << shstrtab_builder_.section_.sh_offset
- << " shstrtab size=" << shstrtab_builder_.section_.sh_size;
+ LOG(INFO) << "shstrtab off=" << shstrtab_builder_.GetSection()->sh_offset
+ << " shstrtab size=" << shstrtab_builder_.GetSection()->sh_size;
}
// The section list comes after come after.
Elf_Word sections_offset = RoundUp(
- shstrtab_builder_.section_.sh_offset + shstrtab_builder_.section_.sh_size,
+ shstrtab_builder_.GetSection()->sh_offset + shstrtab_builder_.GetSection()->sh_size,
sizeof(Elf_Word));
// Setup the actual symbol arrays.
std::vector<Elf_Sym> dynsym = dynsym_builder_.GenerateSymtab();
- CHECK_EQ(dynsym.size() * sizeof(Elf_Sym), dynsym_builder_.section_.sh_size);
+ CHECK_EQ(dynsym.size() * sizeof(Elf_Sym), dynsym_builder_.GetSection()->sh_size);
std::vector<Elf_Sym> symtab;
if (IncludingDebugSymbols()) {
symtab = symtab_builder_.GenerateSymtab();
- CHECK_EQ(symtab.size() * sizeof(Elf_Sym), symtab_builder_.section_.sh_size);
+ CHECK_EQ(symtab.size() * sizeof(Elf_Sym), symtab_builder_.GetSection()->sh_size);
}
// Setup the dynamic section.
@@ -901,43 +969,44 @@
// and the soname_offset.
std::vector<Elf_Dyn> dynamic = dynamic_builder_.GetDynamics(dynstr_.size(),
dynstr_soname_offset_);
- CHECK_EQ(dynamic.size() * sizeof(Elf_Dyn), dynamic_builder_.section_.sh_size);
+ CHECK_EQ(dynamic.size() * sizeof(Elf_Dyn), dynamic_builder_.GetSection()->sh_size);
// Finish setup of the program headers now that we know the layout of the
// whole file.
- Elf_Word load_r_size = rodata_builder_.section_.sh_offset + rodata_builder_.section_.sh_size;
+ Elf_Word load_r_size =
+ rodata_builder_.GetSection()->sh_offset + rodata_builder_.GetSection()->sh_size;
program_headers_[PH_LOAD_R__].p_filesz = load_r_size;
program_headers_[PH_LOAD_R__].p_memsz = load_r_size;
- program_headers_[PH_LOAD_R__].p_align = rodata_builder_.section_.sh_addralign;
+ program_headers_[PH_LOAD_R__].p_align = rodata_builder_.GetSection()->sh_addralign;
- Elf_Word load_rx_size = text_builder_.section_.sh_size;
- program_headers_[PH_LOAD_R_X].p_offset = text_builder_.section_.sh_offset;
- program_headers_[PH_LOAD_R_X].p_vaddr = text_builder_.section_.sh_offset;
- program_headers_[PH_LOAD_R_X].p_paddr = text_builder_.section_.sh_offset;
+ Elf_Word load_rx_size = text_builder_.GetSection()->sh_size;
+ program_headers_[PH_LOAD_R_X].p_offset = text_builder_.GetSection()->sh_offset;
+ program_headers_[PH_LOAD_R_X].p_vaddr = text_builder_.GetSection()->sh_offset;
+ program_headers_[PH_LOAD_R_X].p_paddr = text_builder_.GetSection()->sh_offset;
program_headers_[PH_LOAD_R_X].p_filesz = load_rx_size;
program_headers_[PH_LOAD_R_X].p_memsz = load_rx_size;
- program_headers_[PH_LOAD_R_X].p_align = text_builder_.section_.sh_addralign;
+ program_headers_[PH_LOAD_R_X].p_align = text_builder_.GetSection()->sh_addralign;
- program_headers_[PH_LOAD_RW_].p_offset = dynamic_builder_.section_.sh_offset;
- program_headers_[PH_LOAD_RW_].p_vaddr = dynamic_builder_.section_.sh_offset;
- program_headers_[PH_LOAD_RW_].p_paddr = dynamic_builder_.section_.sh_offset;
- program_headers_[PH_LOAD_RW_].p_filesz = dynamic_builder_.section_.sh_size;
- program_headers_[PH_LOAD_RW_].p_memsz = dynamic_builder_.section_.sh_size;
- program_headers_[PH_LOAD_RW_].p_align = dynamic_builder_.section_.sh_addralign;
+ program_headers_[PH_LOAD_RW_].p_offset = dynamic_builder_.GetSection()->sh_offset;
+ program_headers_[PH_LOAD_RW_].p_vaddr = dynamic_builder_.GetSection()->sh_offset;
+ program_headers_[PH_LOAD_RW_].p_paddr = dynamic_builder_.GetSection()->sh_offset;
+ program_headers_[PH_LOAD_RW_].p_filesz = dynamic_builder_.GetSection()->sh_size;
+ program_headers_[PH_LOAD_RW_].p_memsz = dynamic_builder_.GetSection()->sh_size;
+ program_headers_[PH_LOAD_RW_].p_align = dynamic_builder_.GetSection()->sh_addralign;
- program_headers_[PH_DYNAMIC].p_offset = dynamic_builder_.section_.sh_offset;
- program_headers_[PH_DYNAMIC].p_vaddr = dynamic_builder_.section_.sh_offset;
- program_headers_[PH_DYNAMIC].p_paddr = dynamic_builder_.section_.sh_offset;
- program_headers_[PH_DYNAMIC].p_filesz = dynamic_builder_.section_.sh_size;
- program_headers_[PH_DYNAMIC].p_memsz = dynamic_builder_.section_.sh_size;
- program_headers_[PH_DYNAMIC].p_align = dynamic_builder_.section_.sh_addralign;
+ program_headers_[PH_DYNAMIC].p_offset = dynamic_builder_.GetSection()->sh_offset;
+ program_headers_[PH_DYNAMIC].p_vaddr = dynamic_builder_.GetSection()->sh_offset;
+ program_headers_[PH_DYNAMIC].p_paddr = dynamic_builder_.GetSection()->sh_offset;
+ program_headers_[PH_DYNAMIC].p_filesz = dynamic_builder_.GetSection()->sh_size;
+ program_headers_[PH_DYNAMIC].p_memsz = dynamic_builder_.GetSection()->sh_size;
+ program_headers_[PH_DYNAMIC].p_align = dynamic_builder_.GetSection()->sh_addralign;
// Finish setup of the Ehdr values.
elf_header_.e_phoff = PHDR_OFFSET;
elf_header_.e_shoff = sections_offset;
elf_header_.e_phnum = PH_NUM;
elf_header_.e_shnum = section_ptrs_.size();
- elf_header_.e_shstrndx = shstrtab_builder_.section_index_;
+ elf_header_.e_shstrndx = shstrtab_builder_.GetSectionIndex();
// Add the rest of the pieces to the list.
pieces.push_back(new ElfFileMemoryPiece<Elf_Word>("Elf Header", 0, &elf_header_,
@@ -945,33 +1014,33 @@
pieces.push_back(new ElfFileMemoryPiece<Elf_Word>("Program headers", PHDR_OFFSET,
&program_headers_, sizeof(program_headers_)));
pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".dynamic",
- dynamic_builder_.section_.sh_offset,
+ dynamic_builder_.GetSection()->sh_offset,
dynamic.data(),
- dynamic_builder_.section_.sh_size));
- pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".dynsym", dynsym_builder_.section_.sh_offset,
+ dynamic_builder_.GetSection()->sh_size));
+ pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".dynsym", dynsym_builder_.GetSection()->sh_offset,
dynsym.data(),
dynsym.size() * sizeof(Elf_Sym)));
pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".dynstr",
- dynsym_builder_.GetStrTab()->section_.sh_offset,
+ dynsym_builder_.GetStrTab()->GetSection()->sh_offset,
dynstr_.c_str(), dynstr_.size()));
- pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".hash", hash_builder_.section_.sh_offset,
+ pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".hash", hash_builder_.GetSection()->sh_offset,
hash_.data(),
hash_.size() * sizeof(Elf_Word)));
- pieces.push_back(new ElfFileRodataPiece<Elf_Word>(rodata_builder_.section_.sh_offset,
+ pieces.push_back(new ElfFileRodataPiece<Elf_Word>(rodata_builder_.GetSection()->sh_offset,
oat_writer_));
- pieces.push_back(new ElfFileOatTextPiece<Elf_Word>(text_builder_.section_.sh_offset,
+ pieces.push_back(new ElfFileOatTextPiece<Elf_Word>(text_builder_.GetSection()->sh_offset,
oat_writer_));
if (IncludingDebugSymbols()) {
pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".symtab",
- symtab_builder_.section_.sh_offset,
+ symtab_builder_.GetSection()->sh_offset,
symtab.data(),
symtab.size() * sizeof(Elf_Sym)));
pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".strtab",
- symtab_builder_.GetStrTab()->section_.sh_offset,
+ symtab_builder_.GetStrTab()->GetSection()->sh_offset,
strtab.c_str(), strtab.size()));
}
pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(".shstrtab",
- shstrtab_builder_.section_.sh_offset,
+ shstrtab_builder_.GetSection()->sh_offset,
&shstrtab_[0], shstrtab_.size()));
for (uint32_t i = 0; i < section_ptrs_.size(); ++i) {
// Just add all the sections in induvidually since they are all over the
@@ -983,7 +1052,7 @@
// Postponed debug info.
for (auto it = other_builders_.begin(); it != other_builders_.end(); ++it) {
- pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(it->name_, it->section_.sh_offset,
+ pieces.push_back(new ElfFileMemoryPiece<Elf_Word>(it->GetName(), it->GetSection()->sh_offset,
it->GetBuffer()->data(),
it->GetBuffer()->size()));
}
@@ -1006,47 +1075,6 @@
}
private:
- CodeOutput* oat_writer_;
- File* elf_file_;
- const bool add_symbols_;
- const bool debug_logging_;
-
- bool fatal_error_ = false;
-
- // What phdr is.
- static const uint32_t PHDR_OFFSET = sizeof(Elf_Ehdr);
- enum : uint8_t {
- PH_PHDR = 0,
- PH_LOAD_R__ = 1,
- PH_LOAD_R_X = 2,
- PH_LOAD_RW_ = 3,
- PH_DYNAMIC = 4,
- PH_NUM = 5,
- };
- static const uint32_t PHDR_SIZE = sizeof(Elf_Phdr) * PH_NUM;
- Elf_Phdr program_headers_[PH_NUM];
-
- Elf_Ehdr elf_header_;
-
- Elf_Shdr null_hdr_;
- std::string shstrtab_;
- uint32_t section_index_;
- std::string dynstr_;
- uint32_t dynstr_soname_offset_;
- std::vector<Elf_Shdr*> section_ptrs_;
- std::vector<Elf_Word> hash_;
-
- public:
- ElfOatSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> text_builder_;
- ElfOatSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> rodata_builder_;
- ElfSymtabBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Sym, Elf_Shdr> dynsym_builder_;
- ElfSymtabBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Sym, Elf_Shdr> symtab_builder_;
- ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> hash_builder_;
- ElfDynamicBuilder<Elf_Word, Elf_Sword, Elf_Dyn, Elf_Shdr> dynamic_builder_;
- ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> shstrtab_builder_;
- std::vector<ElfRawSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>> other_builders_;
-
- private:
void SetISA(InstructionSet isa) {
switch (isa) {
case kArm:
@@ -1141,14 +1169,14 @@
true, 4, STB_GLOBAL, STT_OBJECT);
}
- void AssignSectionStr(ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> *builder,
+ void AssignSectionStr(ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>* builder,
std::string* strtab) {
- builder->section_.sh_name = strtab->size();
- *strtab += builder->name_;
+ builder->GetSection()->sh_name = strtab->size();
+ *strtab += builder->GetName();
*strtab += '\0';
if (debug_logging_) {
- LOG(INFO) << "adding section name \"" << builder->name_ << "\" "
- << "to shstrtab at offset " << builder->section_.sh_name;
+ LOG(INFO) << "adding section name \"" << builder->GetName() << "\" "
+ << "to shstrtab at offset " << builder->GetSection()->sh_name;
}
}
@@ -1163,7 +1191,51 @@
return true;
}
- bool IncludingDebugSymbols() { return add_symbols_ && symtab_builder_.GetSize() > 1; }
+ bool IncludingDebugSymbols() const {
+ return add_symbols_ && symtab_builder_.GetSize() > 1;
+ }
+
+ CodeOutput* const oat_writer_;
+ File* const elf_file_;
+ const bool add_symbols_;
+ const bool debug_logging_;
+
+ bool fatal_error_ = false;
+
+ // What phdr is.
+ static const uint32_t PHDR_OFFSET = sizeof(Elf_Ehdr);
+ enum : uint8_t {
+ PH_PHDR = 0,
+ PH_LOAD_R__ = 1,
+ PH_LOAD_R_X = 2,
+ PH_LOAD_RW_ = 3,
+ PH_DYNAMIC = 4,
+ PH_NUM = 5,
+ };
+ static const uint32_t PHDR_SIZE = sizeof(Elf_Phdr) * PH_NUM;
+ Elf_Phdr program_headers_[PH_NUM];
+
+ Elf_Ehdr elf_header_;
+
+ Elf_Shdr null_hdr_;
+ std::string shstrtab_;
+ // The index of the current section being built. The first being 1.
+ uint32_t section_index_;
+ std::string dynstr_;
+ uint32_t dynstr_soname_offset_;
+ std::vector<const Elf_Shdr*> section_ptrs_;
+ std::vector<Elf_Word> hash_;
+
+ ElfOatSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> text_builder_;
+ ElfOatSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> rodata_builder_;
+ ElfSymtabBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Sym, Elf_Shdr> dynsym_builder_;
+ ElfSymtabBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Sym, Elf_Shdr> symtab_builder_;
+ ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> hash_builder_;
+ ElfDynamicBuilder<Elf_Word, Elf_Sword, Elf_Dyn, Elf_Shdr> dynamic_builder_;
+ ElfSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> shstrtab_builder_;
+ std::vector<ElfRawSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr>> other_builders_;
+
+ DISALLOW_COPY_AND_ASSIGN(ElfBuilder);
};
} // namespace art
diff --git a/compiler/elf_writer_mclinker.cc b/compiler/elf_writer_mclinker.cc
index f017641..7705b9c 100644
--- a/compiler/elf_writer_mclinker.cc
+++ b/compiler/elf_writer_mclinker.cc
@@ -82,7 +82,7 @@
}
// Fill oat_contents.
- VectorOutputStream output_stream("oat contents", oat_contents);
+ VectorOutputStream output_stream("oat contents", &oat_contents);
oat_writer->SetOatDataOffset(oat_section->offset());
CHECK(oat_writer->Write(&output_stream));
CHECK_EQ(oat_writer->GetSize(), oat_contents.size());
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index e661324..c75d8f8 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -195,7 +195,7 @@
}
}
-class OatWriterWrapper : public CodeOutput {
+class OatWriterWrapper FINAL : public CodeOutput {
public:
explicit OatWriterWrapper(OatWriter* oat_writer) : oat_writer_(oat_writer) {}
@@ -206,7 +206,7 @@
return oat_writer_->Write(out);
}
private:
- OatWriter* oat_writer_;
+ OatWriter* const oat_writer_;
};
template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
@@ -676,14 +676,14 @@
std::unique_ptr<std::vector<uint8_t>> cfi_info(
ConstructCIEFrame(compiler_driver->GetInstructionSet()));
- Elf_Addr text_section_address = builder->text_builder_.section_.sh_addr;
+ Elf_Addr text_section_address = builder->GetTextBuilder().GetSection()->sh_addr;
// Iterate over the compiled methods.
const std::vector<OatWriter::DebugInfo>& method_info = oat_writer->GetCFIMethodInfo();
- ElfSymtabBuilder<Elf_Word, Elf_Sword, Elf_Addr,
- Elf_Sym, Elf_Shdr>* symtab = &builder->symtab_builder_;
+ ElfSymtabBuilder<Elf_Word, Elf_Sword, Elf_Addr, Elf_Sym, Elf_Shdr>* symtab =
+ builder->GetSymtabBuilder();
for (auto it = method_info.begin(); it != method_info.end(); ++it) {
- symtab->AddSymbol(it->method_name_, &builder->text_builder_, it->low_pc_, true,
+ symtab->AddSymbol(it->method_name_, &builder->GetTextBuilder(), it->low_pc_, true,
it->high_pc_ - it->low_pc_, STB_GLOBAL, STT_FUNC);
// Include CFI for compiled method, if possible.
diff --git a/compiler/file_output_stream.h b/compiler/file_output_stream.h
index 76b00fe..9dfbd7f 100644
--- a/compiler/file_output_stream.h
+++ b/compiler/file_output_stream.h
@@ -23,7 +23,7 @@
namespace art {
-class FileOutputStream : public OutputStream {
+class FileOutputStream FINAL : public OutputStream {
public:
explicit FileOutputStream(File* file);
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 7d7b188..e68cdb0 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -22,7 +22,7 @@
#include "driver/compiler_driver.h"
#include "driver/dex_compilation_unit.h"
#include "primitive.h"
-#include "utils/allocation.h"
+#include "utils/arena_object.h"
#include "utils/growable_array.h"
#include "nodes.h"
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index e6fe067..fe4c3c3 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -186,20 +186,16 @@
Location loc = locations->InAt(i);
HInstruction* input = instruction->InputAt(i);
if (loc.IsUnallocated()) {
- if (loc.GetPolicy() == Location::kRequiresRegister) {
- loc = Location::RegisterLocation(
- AllocateFreeRegister(input->GetType(), blocked_registers_));
- } else if (loc.GetPolicy() == Location::kRequiresFpuRegister) {
- loc = Location::FpuRegisterLocation(
- AllocateFreeRegister(input->GetType(), blocked_registers_));
+ if ((loc.GetPolicy() == Location::kRequiresRegister)
+ || (loc.GetPolicy() == Location::kRequiresFpuRegister)) {
+ loc = AllocateFreeRegister(input->GetType(), blocked_registers_);
} else {
DCHECK_EQ(loc.GetPolicy(), Location::kAny);
HLoadLocal* load = input->AsLoadLocal();
if (load != nullptr) {
loc = GetStackLocation(load);
} else {
- loc = Location::RegisterLocation(
- AllocateFreeRegister(input->GetType(), blocked_registers_));
+ loc = AllocateFreeRegister(input->GetType(), blocked_registers_);
}
}
locations->SetInAt(i, loc);
@@ -213,8 +209,7 @@
DCHECK_EQ(loc.GetPolicy(), Location::kRequiresRegister);
// TODO: Adjust handling of temps. We currently consider temps to use
// core registers. They may also use floating point registers at some point.
- loc = Location::RegisterLocation(static_cast<ManagedRegister>(
- AllocateFreeRegister(Primitive::kPrimInt, blocked_registers_)));
+ loc = AllocateFreeRegister(Primitive::kPrimInt, blocked_registers_);
locations->SetTempAt(i, loc);
}
}
@@ -223,12 +218,8 @@
switch (result_location.GetPolicy()) {
case Location::kAny:
case Location::kRequiresRegister:
- result_location = Location::RegisterLocation(
- AllocateFreeRegister(instruction->GetType(), blocked_registers_));
- break;
case Location::kRequiresFpuRegister:
- result_location = Location::FpuRegisterLocation(
- AllocateFreeRegister(instruction->GetType(), blocked_registers_));
+ result_location = AllocateFreeRegister(instruction->GetType(), blocked_registers_);
break;
case Location::kSameAsFirstInput:
result_location = locations->InAt(0);
@@ -465,7 +456,7 @@
}
case Location::kRegister : {
- int id = location.reg().RegId();
+ int id = location.reg();
stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kInRegister, id);
if (current->GetType() == Primitive::kPrimDouble
|| current->GetType() == Primitive::kPrimLong) {
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index c7623fe..74ad8e9 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -168,8 +168,8 @@
void AllocateRegistersLocally(HInstruction* instruction) const;
// Backend specific implementation for allocating a register.
- virtual ManagedRegister AllocateFreeRegister(Primitive::Type type,
- bool* blocked_registers) const = 0;
+ virtual Location AllocateFreeRegister(Primitive::Type type,
+ bool* blocked_registers) const = 0;
// Raw implementation of allocating a register: loops over blocked_registers to find
// the first available register.
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index a68837e..d555a0d 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -29,21 +29,17 @@
namespace art {
-arm::ArmManagedRegister Location::AsArm() const {
- return reg().AsArm();
-}
-
namespace arm {
+static SRegister FromDToLowS(DRegister reg) {
+ return static_cast<SRegister>(reg * 2);
+}
+
static constexpr bool kExplicitStackOverflowCheck = false;
static constexpr int kNumberOfPushedRegistersAtEntry = 1 + 2; // LR, R6, R7
static constexpr int kCurrentMethodStackOffset = 0;
-static Location ArmCoreLocation(Register reg) {
- return Location::RegisterLocation(ArmManagedRegister::FromCoreRegister(reg));
-}
-
static constexpr Register kRuntimeParameterCoreRegisters[] = { R0, R1, R2 };
static constexpr size_t kRuntimeParameterCoreRegistersLength =
arraysize(kRuntimeParameterCoreRegisters);
@@ -144,8 +140,8 @@
CodeGeneratorARM* arm_codegen = reinterpret_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
InvokeRuntimeCallingConvention calling_convention;
- arm_codegen->Move32(ArmCoreLocation(calling_convention.GetRegisterAt(0)), index_location_);
- arm_codegen->Move32(ArmCoreLocation(calling_convention.GetRegisterAt(1)), length_location_);
+ arm_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), index_location_);
+ arm_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(1)), length_location_);
int32_t offset = QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pThrowArrayBounds).Int32Value();
__ ldr(LR, Address(TR, offset));
__ blx(LR);
@@ -226,8 +222,8 @@
return blocked_registers + kNumberOfCoreRegisters + kNumberOfSRegisters;
}
-ManagedRegister CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type,
- bool* blocked_registers) const {
+Location CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type,
+ bool* blocked_registers) const {
switch (type) {
case Primitive::kPrimLong: {
bool* blocked_register_pairs = GetBlockedRegisterPairs(blocked_registers);
@@ -247,7 +243,7 @@
blocked_register_pairs[i] = true;
}
}
- return pair;
+ return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
}
case Primitive::kPrimByte:
@@ -266,20 +262,20 @@
blocked_register_pairs[i] = true;
}
}
- return ArmManagedRegister::FromCoreRegister(static_cast<Register>(reg));
+ return Location::RegisterLocation(reg);
}
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
int reg = AllocateFreeRegisterInternal(GetBlockedDRegisters(blocked_registers), kNumberOfDRegisters);
- return ArmManagedRegister::FromDRegister(static_cast<DRegister>(reg));
+ return Location::FpuRegisterLocation(reg);
}
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << type;
}
- return ManagedRegister::NoRegister();
+ return Location();
}
void CodeGeneratorARM::SetupBlockedRegisters(bool* blocked_registers) const {
@@ -400,7 +396,7 @@
case Primitive::kPrimNot: {
uint32_t index = gp_index_++;
if (index < calling_convention.GetNumberOfRegisters()) {
- return ArmCoreLocation(calling_convention.GetRegisterAt(index));
+ return Location::RegisterLocation(calling_convention.GetRegisterAt(index));
} else {
return Location::StackSlot(calling_convention.GetStackOffsetOf(index));
}
@@ -411,8 +407,9 @@
uint32_t index = gp_index_;
gp_index_ += 2;
if (index + 1 < calling_convention.GetNumberOfRegisters()) {
- return Location::RegisterLocation(ArmManagedRegister::FromRegisterPair(
- calling_convention.GetRegisterPairAt(index)));
+ ArmManagedRegister pair = ArmManagedRegister::FromRegisterPair(
+ calling_convention.GetRegisterPairAt(index));
+ return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
} else if (index + 1 == calling_convention.GetNumberOfRegisters()) {
return Location::QuickParameter(index);
} else {
@@ -433,31 +430,26 @@
}
if (destination.IsRegister()) {
if (source.IsRegister()) {
- __ Mov(destination.AsArm().AsCoreRegister(), source.AsArm().AsCoreRegister());
+ __ Mov(destination.As<Register>(), source.As<Register>());
} else if (source.IsFpuRegister()) {
- __ vmovrs(destination.AsArm().AsCoreRegister(),
- source.AsArm().AsOverlappingDRegisterLow());
+ __ vmovrs(destination.As<Register>(), FromDToLowS(source.As<DRegister>()));
} else {
- __ ldr(destination.AsArm().AsCoreRegister(), Address(SP, source.GetStackIndex()));
+ __ ldr(destination.As<Register>(), Address(SP, source.GetStackIndex()));
}
} else if (destination.IsFpuRegister()) {
if (source.IsRegister()) {
- __ vmovsr(destination.AsArm().AsOverlappingDRegisterLow(),
- source.AsArm().AsCoreRegister());
+ __ vmovsr(FromDToLowS(destination.As<DRegister>()), source.As<Register>());
} else if (source.IsFpuRegister()) {
- __ vmovs(destination.AsArm().AsOverlappingDRegisterLow(),
- source.AsArm().AsOverlappingDRegisterLow());
+ __ vmovs(FromDToLowS(destination.As<DRegister>()), FromDToLowS(source.As<DRegister>()));
} else {
- __ vldrs(destination.AsArm().AsOverlappingDRegisterLow(),
- Address(SP, source.GetStackIndex()));
+ __ vldrs(FromDToLowS(destination.As<DRegister>()), Address(SP, source.GetStackIndex()));
}
} else {
DCHECK(destination.IsStackSlot());
if (source.IsRegister()) {
- __ str(source.AsArm().AsCoreRegister(), Address(SP, destination.GetStackIndex()));
+ __ str(source.As<Register>(), Address(SP, destination.GetStackIndex()));
} else if (source.IsFpuRegister()) {
- __ vstrs(source.AsArm().AsOverlappingDRegisterLow(),
- Address(SP, destination.GetStackIndex()));
+ __ vstrs(FromDToLowS(source.As<DRegister>()), Address(SP, destination.GetStackIndex()));
} else {
DCHECK(source.IsStackSlot());
__ ldr(IP, Address(SP, source.GetStackIndex()));
@@ -470,41 +462,42 @@
if (source.Equals(destination)) {
return;
}
- if (destination.IsRegister()) {
- if (source.IsRegister()) {
- __ Mov(destination.AsArm().AsRegisterPairLow(), source.AsArm().AsRegisterPairLow());
- __ Mov(destination.AsArm().AsRegisterPairHigh(), source.AsArm().AsRegisterPairHigh());
+ if (destination.IsRegisterPair()) {
+ if (source.IsRegisterPair()) {
+ __ Mov(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
+ __ Mov(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
} else if (source.IsFpuRegister()) {
LOG(FATAL) << "Unimplemented";
} else if (source.IsQuickParameter()) {
uint32_t argument_index = source.GetQuickParameterIndex();
InvokeDexCallingConvention calling_convention;
- __ Mov(destination.AsArm().AsRegisterPairLow(),
+ __ Mov(destination.AsRegisterPairLow<Register>(),
calling_convention.GetRegisterAt(argument_index));
- __ ldr(destination.AsArm().AsRegisterPairHigh(),
+ __ ldr(destination.AsRegisterPairHigh<Register>(),
Address(SP, calling_convention.GetStackOffsetOf(argument_index + 1) + GetFrameSize()));
} else {
DCHECK(source.IsDoubleStackSlot());
- if (destination.AsArm().AsRegisterPair() == R1_R2) {
+ if (destination.AsRegisterPairLow<Register>() == R1) {
+ DCHECK_EQ(destination.AsRegisterPairHigh<Register>(), R2);
__ ldr(R1, Address(SP, source.GetStackIndex()));
__ ldr(R2, Address(SP, source.GetHighStackIndex(kArmWordSize)));
} else {
- __ LoadFromOffset(kLoadWordPair, destination.AsArm().AsRegisterPairLow(),
+ __ LoadFromOffset(kLoadWordPair, destination.AsRegisterPairLow<Register>(),
SP, source.GetStackIndex());
}
}
} else if (destination.IsFpuRegister()) {
if (source.IsDoubleStackSlot()) {
- __ vldrd(destination.AsArm().AsDRegister(), Address(SP, source.GetStackIndex()));
+ __ vldrd(destination.As<DRegister>(), Address(SP, source.GetStackIndex()));
} else {
LOG(FATAL) << "Unimplemented";
}
} else if (destination.IsQuickParameter()) {
InvokeDexCallingConvention calling_convention;
uint32_t argument_index = destination.GetQuickParameterIndex();
- if (source.IsRegister()) {
- __ Mov(calling_convention.GetRegisterAt(argument_index), source.AsArm().AsRegisterPairLow());
- __ str(source.AsArm().AsRegisterPairHigh(),
+ if (source.IsRegisterPair()) {
+ __ Mov(calling_convention.GetRegisterAt(argument_index), source.AsRegisterPairLow<Register>());
+ __ str(source.AsRegisterPairHigh<Register>(),
Address(SP, calling_convention.GetStackOffsetOf(argument_index + 1)));
} else if (source.IsFpuRegister()) {
LOG(FATAL) << "Unimplemented";
@@ -516,12 +509,13 @@
}
} else {
DCHECK(destination.IsDoubleStackSlot());
- if (source.IsRegister()) {
- if (source.AsArm().AsRegisterPair() == R1_R2) {
+ if (source.IsRegisterPair()) {
+ if (source.AsRegisterPairLow<Register>() == R1) {
+ DCHECK_EQ(source.AsRegisterPairHigh<Register>(), R2);
__ str(R1, Address(SP, destination.GetStackIndex()));
__ str(R2, Address(SP, destination.GetHighStackIndex(kArmWordSize)));
} else {
- __ StoreToOffset(kStoreWordPair, source.AsArm().AsRegisterPairLow(),
+ __ StoreToOffset(kStoreWordPair, source.AsRegisterPairLow<Register>(),
SP, destination.GetStackIndex());
}
} else if (source.IsQuickParameter()) {
@@ -533,7 +527,7 @@
Address(SP, calling_convention.GetStackOffsetOf(argument_index + 1) + GetFrameSize()));
__ str(R0, Address(SP, destination.GetHighStackIndex(kArmWordSize)));
} else if (source.IsFpuRegister()) {
- __ vstrd(source.AsArm().AsDRegister(), Address(SP, destination.GetStackIndex()));
+ __ vstrd(source.As<DRegister>(), Address(SP, destination.GetStackIndex()));
} else {
DCHECK(source.IsDoubleStackSlot());
__ ldr(IP, Address(SP, source.GetStackIndex()));
@@ -553,7 +547,7 @@
if (instruction->AsIntConstant() != nullptr) {
int32_t value = instruction->AsIntConstant()->GetValue();
if (location.IsRegister()) {
- __ LoadImmediate(location.AsArm().AsCoreRegister(), value);
+ __ LoadImmediate(location.As<Register>(), value);
} else {
DCHECK(location.IsStackSlot());
__ LoadImmediate(IP, value);
@@ -561,9 +555,9 @@
}
} else if (instruction->AsLongConstant() != nullptr) {
int64_t value = instruction->AsLongConstant()->GetValue();
- if (location.IsRegister()) {
- __ LoadImmediate(location.AsArm().AsRegisterPairLow(), Low32Bits(value));
- __ LoadImmediate(location.AsArm().AsRegisterPairHigh(), High32Bits(value));
+ if (location.IsRegisterPair()) {
+ __ LoadImmediate(location.AsRegisterPairLow<Register>(), Low32Bits(value));
+ __ LoadImmediate(location.AsRegisterPairHigh<Register>(), High32Bits(value));
} else {
DCHECK(location.IsDoubleStackSlot());
__ LoadImmediate(IP, Low32Bits(value));
@@ -667,7 +661,7 @@
if (!cond->IsCondition() || cond->AsCondition()->NeedsMaterialization()) {
// Condition has been materialized, compare the output to 0
DCHECK(if_instr->GetLocations()->InAt(0).IsRegister());
- __ cmp(if_instr->GetLocations()->InAt(0).AsArm().AsCoreRegister(),
+ __ cmp(if_instr->GetLocations()->InAt(0).As<Register>(),
ShifterOperand(0));
__ b(codegen_->GetLabelOf(if_instr->IfTrueSuccessor()), NE);
} else {
@@ -675,18 +669,18 @@
// condition as the branch condition.
LocationSummary* locations = cond->GetLocations();
if (locations->InAt(1).IsRegister()) {
- __ cmp(locations->InAt(0).AsArm().AsCoreRegister(),
- ShifterOperand(locations->InAt(1).AsArm().AsCoreRegister()));
+ __ cmp(locations->InAt(0).As<Register>(),
+ ShifterOperand(locations->InAt(1).As<Register>()));
} else {
DCHECK(locations->InAt(1).IsConstant());
int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
ShifterOperand operand;
if (ShifterOperand::CanHoldArm(value, &operand)) {
- __ cmp(locations->InAt(0).AsArm().AsCoreRegister(), ShifterOperand(value));
+ __ cmp(locations->InAt(0).As<Register>(), ShifterOperand(value));
} else {
Register temp = IP;
__ LoadImmediate(temp, value);
- __ cmp(locations->InAt(0).AsArm().AsCoreRegister(), ShifterOperand(temp));
+ __ cmp(locations->InAt(0).As<Register>(), ShifterOperand(temp));
}
}
__ b(codegen_->GetLabelOf(if_instr->IfTrueSuccessor()),
@@ -714,24 +708,24 @@
LocationSummary* locations = comp->GetLocations();
if (locations->InAt(1).IsRegister()) {
- __ cmp(locations->InAt(0).AsArm().AsCoreRegister(),
- ShifterOperand(locations->InAt(1).AsArm().AsCoreRegister()));
+ __ cmp(locations->InAt(0).As<Register>(),
+ ShifterOperand(locations->InAt(1).As<Register>()));
} else {
DCHECK(locations->InAt(1).IsConstant());
int32_t value = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue();
ShifterOperand operand;
if (ShifterOperand::CanHoldArm(value, &operand)) {
- __ cmp(locations->InAt(0).AsArm().AsCoreRegister(), ShifterOperand(value));
+ __ cmp(locations->InAt(0).As<Register>(), ShifterOperand(value));
} else {
Register temp = IP;
__ LoadImmediate(temp, value);
- __ cmp(locations->InAt(0).AsArm().AsCoreRegister(), ShifterOperand(temp));
+ __ cmp(locations->InAt(0).As<Register>(), ShifterOperand(temp));
}
}
__ it(ARMCondition(comp->GetCondition()), kItElse);
- __ mov(locations->Out().AsArm().AsCoreRegister(), ShifterOperand(1),
+ __ mov(locations->Out().As<Register>(), ShifterOperand(1),
ARMCondition(comp->GetCondition()));
- __ mov(locations->Out().AsArm().AsCoreRegister(), ShifterOperand(0),
+ __ mov(locations->Out().As<Register>(), ShifterOperand(0),
ARMOppositeCondition(comp->GetCondition()));
}
@@ -864,13 +858,12 @@
case Primitive::kPrimInt:
case Primitive::kPrimNot:
case Primitive::kPrimFloat:
- locations->SetInAt(0, ArmCoreLocation(R0));
+ locations->SetInAt(0, Location::RegisterLocation(R0));
break;
case Primitive::kPrimLong:
case Primitive::kPrimDouble:
- locations->SetInAt(
- 0, Location::RegisterLocation(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ locations->SetInAt(0, Location::RegisterPairLocation(R0, R1));
break;
default:
@@ -888,12 +881,13 @@
case Primitive::kPrimInt:
case Primitive::kPrimNot:
case Primitive::kPrimFloat:
- DCHECK_EQ(ret->GetLocations()->InAt(0).AsArm().AsCoreRegister(), R0);
+ DCHECK_EQ(ret->GetLocations()->InAt(0).As<Register>(), R0);
break;
case Primitive::kPrimLong:
case Primitive::kPrimDouble:
- DCHECK_EQ(ret->GetLocations()->InAt(0).AsArm().AsRegisterPair(), R0_R1);
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegisterPairLow<Register>(), R0);
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegisterPairHigh<Register>(), R1);
break;
default:
@@ -912,7 +906,7 @@
}
void InstructionCodeGeneratorARM::VisitInvokeStatic(HInvokeStatic* invoke) {
- Register temp = invoke->GetLocations()->GetTemp(0).AsArm().AsCoreRegister();
+ Register temp = invoke->GetLocations()->GetTemp(0).As<Register>();
uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).Int32Value() +
invoke->GetIndexInDexCache() * kArmWordSize;
@@ -947,7 +941,7 @@
void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
- locations->AddTemp(ArmCoreLocation(R0));
+ locations->AddTemp(Location::RegisterLocation(R0));
InvokeDexCallingConventionVisitor calling_convention_visitor;
for (size_t i = 0; i < invoke->InputCount(); i++) {
@@ -963,12 +957,12 @@
case Primitive::kPrimInt:
case Primitive::kPrimNot:
case Primitive::kPrimFloat:
- locations->SetOut(ArmCoreLocation(R0));
+ locations->SetOut(Location::RegisterLocation(R0));
break;
case Primitive::kPrimLong:
case Primitive::kPrimDouble:
- locations->SetOut(Location::RegisterLocation(ArmManagedRegister::FromRegisterPair(R0_R1)));
+ locations->SetOut(Location::RegisterPairLocation(R0, R1));
break;
case Primitive::kPrimVoid:
@@ -978,7 +972,7 @@
void InstructionCodeGeneratorARM::VisitInvokeVirtual(HInvokeVirtual* invoke) {
- Register temp = invoke->GetLocations()->GetTemp(0).AsArm().AsCoreRegister();
+ Register temp = invoke->GetLocations()->GetTemp(0).As<Register>();
uint32_t method_offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() +
invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
LocationSummary* locations = invoke->GetLocations();
@@ -989,7 +983,7 @@
__ ldr(temp, Address(SP, receiver.GetStackIndex()));
__ ldr(temp, Address(temp, class_offset));
} else {
- __ ldr(temp, Address(receiver.AsArm().AsCoreRegister(), class_offset));
+ __ ldr(temp, Address(receiver.As<Register>(), class_offset));
}
// temp = temp->GetMethodAt(method_offset);
uint32_t entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value();
@@ -1030,38 +1024,37 @@
void InstructionCodeGeneratorARM::VisitAdd(HAdd* add) {
LocationSummary* locations = add->GetLocations();
+ Location out = locations->Out();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
switch (add->GetResultType()) {
case Primitive::kPrimInt:
- if (locations->InAt(1).IsRegister()) {
- __ add(locations->Out().AsArm().AsCoreRegister(),
- locations->InAt(0).AsArm().AsCoreRegister(),
- ShifterOperand(locations->InAt(1).AsArm().AsCoreRegister()));
+ if (second.IsRegister()) {
+ __ add(out.As<Register>(), first.As<Register>(), ShifterOperand(second.As<Register>()));
} else {
- __ AddConstant(locations->Out().AsArm().AsCoreRegister(),
- locations->InAt(0).AsArm().AsCoreRegister(),
- locations->InAt(1).GetConstant()->AsIntConstant()->GetValue());
+ __ AddConstant(out.As<Register>(),
+ first.As<Register>(),
+ second.GetConstant()->AsIntConstant()->GetValue());
}
break;
case Primitive::kPrimLong:
- __ adds(locations->Out().AsArm().AsRegisterPairLow(),
- locations->InAt(0).AsArm().AsRegisterPairLow(),
- ShifterOperand(locations->InAt(1).AsArm().AsRegisterPairLow()));
- __ adc(locations->Out().AsArm().AsRegisterPairHigh(),
- locations->InAt(0).AsArm().AsRegisterPairHigh(),
- ShifterOperand(locations->InAt(1).AsArm().AsRegisterPairHigh()));
+ __ adds(out.AsRegisterPairLow<Register>(),
+ first.AsRegisterPairLow<Register>(),
+ ShifterOperand(second.AsRegisterPairLow<Register>()));
+ __ adc(out.AsRegisterPairHigh<Register>(),
+ first.AsRegisterPairHigh<Register>(),
+ ShifterOperand(second.AsRegisterPairHigh<Register>()));
break;
case Primitive::kPrimFloat:
- __ vadds(locations->Out().AsArm().AsOverlappingDRegisterLow(),
- locations->InAt(0).AsArm().AsOverlappingDRegisterLow(),
- locations->InAt(1).AsArm().AsOverlappingDRegisterLow());
+ __ vadds(FromDToLowS(out.As<DRegister>()),
+ FromDToLowS(first.As<DRegister>()),
+ FromDToLowS(second.As<DRegister>()));
break;
case Primitive::kPrimDouble:
- __ vaddd(locations->Out().AsArm().AsDRegister(),
- locations->InAt(0).AsArm().AsDRegister(),
- locations->InAt(1).AsArm().AsDRegister());
+ __ vaddd(out.As<DRegister>(), first.As<DRegister>(), second.As<DRegister>());
break;
default:
@@ -1099,24 +1092,24 @@
switch (sub->GetResultType()) {
case Primitive::kPrimInt: {
if (locations->InAt(1).IsRegister()) {
- __ sub(locations->Out().AsArm().AsCoreRegister(),
- locations->InAt(0).AsArm().AsCoreRegister(),
- ShifterOperand(locations->InAt(1).AsArm().AsCoreRegister()));
+ __ sub(locations->Out().As<Register>(),
+ locations->InAt(0).As<Register>(),
+ ShifterOperand(locations->InAt(1).As<Register>()));
} else {
- __ AddConstant(locations->Out().AsArm().AsCoreRegister(),
- locations->InAt(0).AsArm().AsCoreRegister(),
+ __ AddConstant(locations->Out().As<Register>(),
+ locations->InAt(0).As<Register>(),
-locations->InAt(1).GetConstant()->AsIntConstant()->GetValue());
}
break;
}
case Primitive::kPrimLong:
- __ subs(locations->Out().AsArm().AsRegisterPairLow(),
- locations->InAt(0).AsArm().AsRegisterPairLow(),
- ShifterOperand(locations->InAt(1).AsArm().AsRegisterPairLow()));
- __ sbc(locations->Out().AsArm().AsRegisterPairHigh(),
- locations->InAt(0).AsArm().AsRegisterPairHigh(),
- ShifterOperand(locations->InAt(1).AsArm().AsRegisterPairHigh()));
+ __ subs(locations->Out().AsRegisterPairLow<Register>(),
+ locations->InAt(0).AsRegisterPairLow<Register>(),
+ ShifterOperand(locations->InAt(1).AsRegisterPairLow<Register>()));
+ __ sbc(locations->Out().AsRegisterPairHigh<Register>(),
+ locations->InAt(0).AsRegisterPairHigh<Register>(),
+ ShifterOperand(locations->InAt(1).AsRegisterPairHigh<Register>()));
break;
case Primitive::kPrimBoolean:
@@ -1135,9 +1128,9 @@
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
InvokeRuntimeCallingConvention calling_convention;
- locations->AddTemp(ArmCoreLocation(calling_convention.GetRegisterAt(0)));
- locations->AddTemp(ArmCoreLocation(calling_convention.GetRegisterAt(1)));
- locations->SetOut(ArmCoreLocation(R0));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetOut(Location::RegisterLocation(R0));
}
void InstructionCodeGeneratorARM::VisitNewInstance(HNewInstance* instruction) {
@@ -1178,8 +1171,8 @@
void InstructionCodeGeneratorARM::VisitNot(HNot* instruction) {
LocationSummary* locations = instruction->GetLocations();
- __ eor(locations->Out().AsArm().AsCoreRegister(),
- locations->InAt(0).AsArm().AsCoreRegister(), ShifterOperand(1));
+ __ eor(locations->Out().As<Register>(),
+ locations->InAt(0).As<Register>(), ShifterOperand(1));
}
void LocationsBuilderARM::VisitCompare(HCompare* compare) {
@@ -1195,19 +1188,19 @@
LocationSummary* locations = compare->GetLocations();
switch (compare->InputAt(0)->GetType()) {
case Primitive::kPrimLong: {
- Register output = locations->Out().AsArm().AsCoreRegister();
- ArmManagedRegister left = locations->InAt(0).AsArm();
- ArmManagedRegister right = locations->InAt(1).AsArm();
+ Register output = locations->Out().As<Register>();
+ Location left = locations->InAt(0);
+ Location right = locations->InAt(1);
Label less, greater, done;
- __ cmp(left.AsRegisterPairHigh(),
- ShifterOperand(right.AsRegisterPairHigh())); // Signed compare.
+ __ cmp(left.AsRegisterPairHigh<Register>(),
+ ShifterOperand(right.AsRegisterPairHigh<Register>())); // Signed compare.
__ b(&less, LT);
__ b(&greater, GT);
// Do LoadImmediate before any `cmp`, as LoadImmediate might affect
// the status flags.
__ LoadImmediate(output, 0);
- __ cmp(left.AsRegisterPairLow(),
- ShifterOperand(right.AsRegisterPairLow())); // Unsigned compare.
+ __ cmp(left.AsRegisterPairLow<Register>(),
+ ShifterOperand(right.AsRegisterPairLow<Register>())); // Unsigned compare.
__ b(&done, EQ);
__ b(&less, CC);
@@ -1255,40 +1248,40 @@
void InstructionCodeGeneratorARM::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).AsArm().AsCoreRegister();
+ Register obj = locations->InAt(0).As<Register>();
uint32_t offset = instruction->GetFieldOffset().Uint32Value();
Primitive::Type field_type = instruction->GetFieldType();
switch (field_type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte: {
- Register value = locations->InAt(1).AsArm().AsCoreRegister();
+ Register value = locations->InAt(1).As<Register>();
__ StoreToOffset(kStoreByte, value, obj, offset);
break;
}
case Primitive::kPrimShort:
case Primitive::kPrimChar: {
- Register value = locations->InAt(1).AsArm().AsCoreRegister();
+ Register value = locations->InAt(1).As<Register>();
__ StoreToOffset(kStoreHalfword, value, obj, offset);
break;
}
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- Register value = locations->InAt(1).AsArm().AsCoreRegister();
+ Register value = locations->InAt(1).As<Register>();
__ StoreToOffset(kStoreWord, value, obj, offset);
if (field_type == Primitive::kPrimNot) {
- Register temp = locations->GetTemp(0).AsArm().AsCoreRegister();
- Register card = locations->GetTemp(1).AsArm().AsCoreRegister();
+ Register temp = locations->GetTemp(0).As<Register>();
+ Register card = locations->GetTemp(1).As<Register>();
codegen_->MarkGCCard(temp, card, obj, value);
}
break;
}
case Primitive::kPrimLong: {
- ArmManagedRegister value = locations->InAt(1).AsArm();
- __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow(), obj, offset);
+ Location value = locations->InAt(1);
+ __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), obj, offset);
break;
}
@@ -1310,45 +1303,45 @@
void InstructionCodeGeneratorARM::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).AsArm().AsCoreRegister();
+ Register obj = locations->InAt(0).As<Register>();
uint32_t offset = instruction->GetFieldOffset().Uint32Value();
switch (instruction->GetType()) {
case Primitive::kPrimBoolean: {
- Register out = locations->Out().AsArm().AsCoreRegister();
+ Register out = locations->Out().As<Register>();
__ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
break;
}
case Primitive::kPrimByte: {
- Register out = locations->Out().AsArm().AsCoreRegister();
+ Register out = locations->Out().As<Register>();
__ LoadFromOffset(kLoadSignedByte, out, obj, offset);
break;
}
case Primitive::kPrimShort: {
- Register out = locations->Out().AsArm().AsCoreRegister();
+ Register out = locations->Out().As<Register>();
__ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
break;
}
case Primitive::kPrimChar: {
- Register out = locations->Out().AsArm().AsCoreRegister();
+ Register out = locations->Out().As<Register>();
__ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
break;
}
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- Register out = locations->Out().AsArm().AsCoreRegister();
+ Register out = locations->Out().As<Register>();
__ LoadFromOffset(kLoadWord, out, obj, offset);
break;
}
case Primitive::kPrimLong: {
// TODO: support volatile.
- ArmManagedRegister out = locations->Out().AsArm();
- __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow(), obj, offset);
+ Location out = locations->Out();
+ __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), obj, offset);
break;
}
@@ -1378,7 +1371,7 @@
Location obj = locations->InAt(0);
if (obj.IsRegister()) {
- __ cmp(obj.AsArm().AsCoreRegister(), ShifterOperand(0));
+ __ cmp(obj.As<Register>(), ShifterOperand(0));
__ b(slow_path->GetEntryLabel(), EQ);
} else {
DCHECK(obj.IsConstant()) << obj;
@@ -1398,18 +1391,18 @@
void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).AsArm().AsCoreRegister();
+ Register obj = locations->InAt(0).As<Register>();
Location index = locations->InAt(1);
switch (instruction->GetType()) {
case Primitive::kPrimBoolean: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
- Register out = locations->Out().AsArm().AsCoreRegister();
+ Register out = locations->Out().As<Register>();
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
__ LoadFromOffset(kLoadUnsignedByte, out, obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister()));
+ __ add(IP, obj, ShifterOperand(index.As<Register>()));
__ LoadFromOffset(kLoadUnsignedByte, out, IP, data_offset);
}
break;
@@ -1417,12 +1410,12 @@
case Primitive::kPrimByte: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
- Register out = locations->Out().AsArm().AsCoreRegister();
+ Register out = locations->Out().As<Register>();
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
__ LoadFromOffset(kLoadSignedByte, out, obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister()));
+ __ add(IP, obj, ShifterOperand(index.As<Register>()));
__ LoadFromOffset(kLoadSignedByte, out, IP, data_offset);
}
break;
@@ -1430,12 +1423,12 @@
case Primitive::kPrimShort: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
- Register out = locations->Out().AsArm().AsCoreRegister();
+ Register out = locations->Out().As<Register>();
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
__ LoadFromOffset(kLoadSignedHalfword, out, obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister(), LSL, TIMES_2));
+ __ add(IP, obj, ShifterOperand(index.As<Register>(), LSL, TIMES_2));
__ LoadFromOffset(kLoadSignedHalfword, out, IP, data_offset);
}
break;
@@ -1443,12 +1436,12 @@
case Primitive::kPrimChar: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
- Register out = locations->Out().AsArm().AsCoreRegister();
+ Register out = locations->Out().As<Register>();
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
__ LoadFromOffset(kLoadUnsignedHalfword, out, obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister(), LSL, TIMES_2));
+ __ add(IP, obj, ShifterOperand(index.As<Register>(), LSL, TIMES_2));
__ LoadFromOffset(kLoadUnsignedHalfword, out, IP, data_offset);
}
break;
@@ -1458,12 +1451,12 @@
case Primitive::kPrimNot: {
DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- Register out = locations->Out().AsArm().AsCoreRegister();
+ Register out = locations->Out().As<Register>();
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
__ LoadFromOffset(kLoadWord, out, obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister(), LSL, TIMES_4));
+ __ add(IP, obj, ShifterOperand(index.As<Register>(), LSL, TIMES_4));
__ LoadFromOffset(kLoadWord, out, IP, data_offset);
}
break;
@@ -1471,13 +1464,13 @@
case Primitive::kPrimLong: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
- ArmManagedRegister out = locations->Out().AsArm();
+ Location out = locations->Out();
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow(), obj, offset);
+ __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister(), LSL, TIMES_8));
- __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow(), IP, data_offset);
+ __ add(IP, obj, ShifterOperand(index.As<Register>(), LSL, TIMES_8));
+ __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), IP, data_offset);
}
break;
}
@@ -1498,9 +1491,9 @@
instruction, is_object ? LocationSummary::kCall : LocationSummary::kNoCall);
if (is_object) {
InvokeRuntimeCallingConvention calling_convention;
- locations->SetInAt(0, ArmCoreLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, ArmCoreLocation(calling_convention.GetRegisterAt(1)));
- locations->SetInAt(2, ArmCoreLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
} else {
locations->SetInAt(0, Location::RequiresRegister(), Location::kDiesAtEntry);
locations->SetInAt(
@@ -1511,7 +1504,7 @@
void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).AsArm().AsCoreRegister();
+ Register obj = locations->InAt(0).As<Register>();
Location index = locations->InAt(1);
Primitive::Type value_type = instruction->GetComponentType();
@@ -1519,12 +1512,12 @@
case Primitive::kPrimBoolean:
case Primitive::kPrimByte: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
- Register value = locations->InAt(2).AsArm().AsCoreRegister();
+ Register value = locations->InAt(2).As<Register>();
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
__ StoreToOffset(kStoreByte, value, obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister()));
+ __ add(IP, obj, ShifterOperand(index.As<Register>()));
__ StoreToOffset(kStoreByte, value, IP, data_offset);
}
break;
@@ -1533,12 +1526,12 @@
case Primitive::kPrimShort:
case Primitive::kPrimChar: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
- Register value = locations->InAt(2).AsArm().AsCoreRegister();
+ Register value = locations->InAt(2).As<Register>();
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
__ StoreToOffset(kStoreHalfword, value, obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister(), LSL, TIMES_2));
+ __ add(IP, obj, ShifterOperand(index.As<Register>(), LSL, TIMES_2));
__ StoreToOffset(kStoreHalfword, value, IP, data_offset);
}
break;
@@ -1546,12 +1539,12 @@
case Primitive::kPrimInt: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- Register value = locations->InAt(2).AsArm().AsCoreRegister();
+ Register value = locations->InAt(2).As<Register>();
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
__ StoreToOffset(kStoreWord, value, obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister(), LSL, TIMES_4));
+ __ add(IP, obj, ShifterOperand(index.As<Register>(), LSL, TIMES_4));
__ StoreToOffset(kStoreWord, value, IP, data_offset);
}
break;
@@ -1568,13 +1561,13 @@
case Primitive::kPrimLong: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
- ArmManagedRegister value = locations->InAt(2).AsArm();
+ Location value = locations->InAt(2);
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow(), obj, offset);
+ __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), obj, offset);
} else {
- __ add(IP, obj, ShifterOperand(index.AsArm().AsCoreRegister(), LSL, TIMES_8));
- __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow(), IP, data_offset);
+ __ add(IP, obj, ShifterOperand(index.As<Register>(), LSL, TIMES_8));
+ __ StoreToOffset(kStoreWordPair, value.AsRegisterPairLow<Register>(), IP, data_offset);
}
break;
}
@@ -1598,8 +1591,8 @@
void InstructionCodeGeneratorARM::VisitArrayLength(HArrayLength* instruction) {
LocationSummary* locations = instruction->GetLocations();
uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
- Register obj = locations->InAt(0).AsArm().AsCoreRegister();
- Register out = locations->Out().AsArm().AsCoreRegister();
+ Register obj = locations->InAt(0).As<Register>();
+ Register out = locations->Out().As<Register>();
__ LoadFromOffset(kLoadWord, out, obj, offset);
}
@@ -1619,8 +1612,8 @@
instruction, locations->InAt(0), locations->InAt(1));
codegen_->AddSlowPath(slow_path);
- Register index = locations->InAt(0).AsArm().AsCoreRegister();
- Register length = locations->InAt(1).AsArm().AsCoreRegister();
+ Register index = locations->InAt(0).As<Register>();
+ Register length = locations->InAt(1).As<Register>();
__ cmp(index, ShifterOperand(length));
__ b(slow_path->GetEntryLabel(), CS);
@@ -1696,15 +1689,15 @@
if (source.IsRegister()) {
if (destination.IsRegister()) {
- __ Mov(destination.AsArm().AsCoreRegister(), source.AsArm().AsCoreRegister());
+ __ Mov(destination.As<Register>(), source.As<Register>());
} else {
DCHECK(destination.IsStackSlot());
- __ StoreToOffset(kStoreWord, source.AsArm().AsCoreRegister(),
+ __ StoreToOffset(kStoreWord, source.As<Register>(),
SP, destination.GetStackIndex());
}
} else if (source.IsStackSlot()) {
if (destination.IsRegister()) {
- __ LoadFromOffset(kLoadWord, destination.AsArm().AsCoreRegister(),
+ __ LoadFromOffset(kLoadWord, destination.As<Register>(),
SP, source.GetStackIndex());
} else {
DCHECK(destination.IsStackSlot());
@@ -1716,7 +1709,7 @@
DCHECK(source.GetConstant()->AsIntConstant() != nullptr);
int32_t value = source.GetConstant()->AsIntConstant()->GetValue();
if (destination.IsRegister()) {
- __ LoadImmediate(destination.AsArm().AsCoreRegister(), value);
+ __ LoadImmediate(destination.As<Register>(), value);
} else {
DCHECK(destination.IsStackSlot());
__ LoadImmediate(IP, value);
@@ -1748,15 +1741,15 @@
Location destination = move->GetDestination();
if (source.IsRegister() && destination.IsRegister()) {
- DCHECK_NE(source.AsArm().AsCoreRegister(), IP);
- DCHECK_NE(destination.AsArm().AsCoreRegister(), IP);
- __ Mov(IP, source.AsArm().AsCoreRegister());
- __ Mov(source.AsArm().AsCoreRegister(), destination.AsArm().AsCoreRegister());
- __ Mov(destination.AsArm().AsCoreRegister(), IP);
+ DCHECK_NE(source.As<Register>(), IP);
+ DCHECK_NE(destination.As<Register>(), IP);
+ __ Mov(IP, source.As<Register>());
+ __ Mov(source.As<Register>(), destination.As<Register>());
+ __ Mov(destination.As<Register>(), IP);
} else if (source.IsRegister() && destination.IsStackSlot()) {
- Exchange(source.AsArm().AsCoreRegister(), destination.GetStackIndex());
+ Exchange(source.As<Register>(), destination.GetStackIndex());
} else if (source.IsStackSlot() && destination.IsRegister()) {
- Exchange(destination.AsArm().AsCoreRegister(), source.GetStackIndex());
+ Exchange(destination.As<Register>(), source.GetStackIndex());
} else if (source.IsStackSlot() && destination.IsStackSlot()) {
Exchange(source.GetStackIndex(), destination.GetStackIndex());
} else {
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index b5de8ed..9da26e8 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -164,7 +164,7 @@
}
virtual void SetupBlockedRegisters(bool* blocked_registers) const OVERRIDE;
- virtual ManagedRegister AllocateFreeRegister(
+ virtual Location AllocateFreeRegister(
Primitive::Type type, bool* blocked_registers) const OVERRIDE;
virtual size_t GetNumberOfRegisters() const OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index ec82dd3..5f6d458 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -29,10 +29,6 @@
namespace art {
-x86::X86ManagedRegister Location::AsX86() const {
- return reg().AsX86();
-}
-
namespace x86 {
static constexpr bool kExplicitStackOverflowCheck = false;
@@ -40,10 +36,6 @@
static constexpr int kNumberOfPushedRegistersAtEntry = 1;
static constexpr int kCurrentMethodStackOffset = 0;
-static Location X86CpuLocation(Register reg) {
- return Location::RegisterLocation(X86ManagedRegister::FromCpuRegister(reg));
-}
-
static constexpr Register kRuntimeParameterCoreRegisters[] = { EAX, ECX, EDX };
static constexpr size_t kRuntimeParameterCoreRegistersLength =
arraysize(kRuntimeParameterCoreRegisters);
@@ -105,8 +97,8 @@
CodeGeneratorX86* x86_codegen = reinterpret_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
InvokeRuntimeCallingConvention calling_convention;
- x86_codegen->Move32(X86CpuLocation(calling_convention.GetRegisterAt(0)), index_location_);
- x86_codegen->Move32(X86CpuLocation(calling_convention.GetRegisterAt(1)), length_location_);
+ x86_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), index_location_);
+ x86_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(1)), length_location_);
__ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowArrayBounds)));
codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
}
@@ -201,8 +193,7 @@
return blocked_registers + kNumberOfCpuRegisters;
}
-ManagedRegister CodeGeneratorX86::AllocateFreeRegister(Primitive::Type type,
- bool* blocked_registers) const {
+Location CodeGeneratorX86::AllocateFreeRegister(Primitive::Type type, bool* blocked_registers) const {
switch (type) {
case Primitive::kPrimLong: {
bool* blocked_register_pairs = GetBlockedRegisterPairs(blocked_registers);
@@ -222,7 +213,7 @@
blocked_register_pairs[i] = true;
}
}
- return pair;
+ return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
}
case Primitive::kPrimByte:
@@ -242,21 +233,20 @@
blocked_register_pairs[i] = true;
}
}
- return X86ManagedRegister::FromCpuRegister(reg);
+ return Location::RegisterLocation(reg);
}
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
- XmmRegister reg = static_cast<XmmRegister>(AllocateFreeRegisterInternal(
+ return Location::FpuRegisterLocation(AllocateFreeRegisterInternal(
GetBlockedXmmRegisters(blocked_registers), kNumberOfXmmRegisters));
- return X86ManagedRegister::FromXmmRegister(reg);
}
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << type;
}
- return ManagedRegister::NoRegister();
+ return Location();
}
void CodeGeneratorX86::SetupBlockedRegisters(bool* blocked_registers) const {
@@ -359,7 +349,7 @@
case Primitive::kPrimNot: {
uint32_t index = gp_index_++;
if (index < calling_convention.GetNumberOfRegisters()) {
- return X86CpuLocation(calling_convention.GetRegisterAt(index));
+ return Location::RegisterLocation(calling_convention.GetRegisterAt(index));
} else {
return Location::StackSlot(calling_convention.GetStackOffsetOf(index));
}
@@ -370,8 +360,9 @@
uint32_t index = gp_index_;
gp_index_ += 2;
if (index + 1 < calling_convention.GetNumberOfRegisters()) {
- return Location::RegisterLocation(X86ManagedRegister::FromRegisterPair(
- calling_convention.GetRegisterPairAt(index)));
+ X86ManagedRegister pair = X86ManagedRegister::FromRegisterPair(
+ calling_convention.GetRegisterPairAt(index));
+ return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
} else if (index + 1 == calling_convention.GetNumberOfRegisters()) {
return Location::QuickParameter(index);
} else {
@@ -392,28 +383,28 @@
}
if (destination.IsRegister()) {
if (source.IsRegister()) {
- __ movl(destination.AsX86().AsCpuRegister(), source.AsX86().AsCpuRegister());
+ __ movl(destination.As<Register>(), source.As<Register>());
} else if (source.IsFpuRegister()) {
- __ movd(destination.AsX86().AsCpuRegister(), source.AsX86().AsXmmRegister());
+ __ movd(destination.As<Register>(), source.As<XmmRegister>());
} else {
DCHECK(source.IsStackSlot());
- __ movl(destination.AsX86().AsCpuRegister(), Address(ESP, source.GetStackIndex()));
+ __ movl(destination.As<Register>(), Address(ESP, source.GetStackIndex()));
}
} else if (destination.IsFpuRegister()) {
if (source.IsRegister()) {
- __ movd(destination.AsX86().AsXmmRegister(), source.AsX86().AsCpuRegister());
+ __ movd(destination.As<XmmRegister>(), source.As<Register>());
} else if (source.IsFpuRegister()) {
- __ movaps(destination.AsX86().AsXmmRegister(), source.AsX86().AsXmmRegister());
+ __ movaps(destination.As<XmmRegister>(), source.As<XmmRegister>());
} else {
DCHECK(source.IsStackSlot());
- __ movss(destination.AsX86().AsXmmRegister(), Address(ESP, source.GetStackIndex()));
+ __ movss(destination.As<XmmRegister>(), Address(ESP, source.GetStackIndex()));
}
} else {
DCHECK(destination.IsStackSlot());
if (source.IsRegister()) {
- __ movl(Address(ESP, destination.GetStackIndex()), source.AsX86().AsCpuRegister());
+ __ movl(Address(ESP, destination.GetStackIndex()), source.As<Register>());
} else if (source.IsFpuRegister()) {
- __ movss(Address(ESP, destination.GetStackIndex()), source.AsX86().AsXmmRegister());
+ __ movss(Address(ESP, destination.GetStackIndex()), source.As<XmmRegister>());
} else {
DCHECK(source.IsStackSlot());
__ pushl(Address(ESP, source.GetStackIndex()));
@@ -426,32 +417,32 @@
if (source.Equals(destination)) {
return;
}
- if (destination.IsRegister()) {
- if (source.IsRegister()) {
- __ movl(destination.AsX86().AsRegisterPairLow(), source.AsX86().AsRegisterPairLow());
- __ movl(destination.AsX86().AsRegisterPairHigh(), source.AsX86().AsRegisterPairHigh());
+ if (destination.IsRegisterPair()) {
+ if (source.IsRegisterPair()) {
+ __ movl(destination.AsRegisterPairLow<Register>(), source.AsRegisterPairLow<Register>());
+ __ movl(destination.AsRegisterPairHigh<Register>(), source.AsRegisterPairHigh<Register>());
} else if (source.IsFpuRegister()) {
LOG(FATAL) << "Unimplemented";
} else if (source.IsQuickParameter()) {
uint32_t argument_index = source.GetQuickParameterIndex();
InvokeDexCallingConvention calling_convention;
- __ movl(destination.AsX86().AsRegisterPairLow(),
+ __ movl(destination.AsRegisterPairLow<Register>(),
calling_convention.GetRegisterAt(argument_index));
- __ movl(destination.AsX86().AsRegisterPairHigh(), Address(ESP,
+ __ movl(destination.AsRegisterPairHigh<Register>(), Address(ESP,
calling_convention.GetStackOffsetOf(argument_index + 1) + GetFrameSize()));
} else {
DCHECK(source.IsDoubleStackSlot());
- __ movl(destination.AsX86().AsRegisterPairLow(), Address(ESP, source.GetStackIndex()));
- __ movl(destination.AsX86().AsRegisterPairHigh(),
+ __ movl(destination.AsRegisterPairLow<Register>(), Address(ESP, source.GetStackIndex()));
+ __ movl(destination.AsRegisterPairHigh<Register>(),
Address(ESP, source.GetHighStackIndex(kX86WordSize)));
}
} else if (destination.IsQuickParameter()) {
InvokeDexCallingConvention calling_convention;
uint32_t argument_index = destination.GetQuickParameterIndex();
if (source.IsRegister()) {
- __ movl(calling_convention.GetRegisterAt(argument_index), source.AsX86().AsRegisterPairLow());
+ __ movl(calling_convention.GetRegisterAt(argument_index), source.AsRegisterPairLow<Register>());
__ movl(Address(ESP, calling_convention.GetStackOffsetOf(argument_index + 1)),
- source.AsX86().AsRegisterPairHigh());
+ source.AsRegisterPairHigh<Register>());
} else if (source.IsFpuRegister()) {
LOG(FATAL) << "Unimplemented";
} else {
@@ -463,16 +454,16 @@
}
} else if (destination.IsFpuRegister()) {
if (source.IsDoubleStackSlot()) {
- __ movsd(destination.AsX86().AsXmmRegister(), Address(ESP, source.GetStackIndex()));
+ __ movsd(destination.As<XmmRegister>(), Address(ESP, source.GetStackIndex()));
} else {
LOG(FATAL) << "Unimplemented";
}
} else {
DCHECK(destination.IsDoubleStackSlot());
- if (source.IsRegister()) {
- __ movl(Address(ESP, destination.GetStackIndex()), source.AsX86().AsRegisterPairLow());
+ if (source.IsRegisterPair()) {
+ __ movl(Address(ESP, destination.GetStackIndex()), source.AsRegisterPairLow<Register>());
__ movl(Address(ESP, destination.GetHighStackIndex(kX86WordSize)),
- source.AsX86().AsRegisterPairHigh());
+ source.AsRegisterPairHigh<Register>());
} else if (source.IsQuickParameter()) {
InvokeDexCallingConvention calling_convention;
uint32_t argument_index = source.GetQuickParameterIndex();
@@ -481,7 +472,7 @@
DCHECK_EQ(calling_convention.GetStackOffsetOf(argument_index + 1) + GetFrameSize(),
static_cast<size_t>(destination.GetHighStackIndex(kX86WordSize)));
} else if (source.IsFpuRegister()) {
- __ movsd(Address(ESP, destination.GetStackIndex()), source.AsX86().AsXmmRegister());
+ __ movsd(Address(ESP, destination.GetStackIndex()), source.As<XmmRegister>());
} else {
DCHECK(source.IsDoubleStackSlot());
__ pushl(Address(ESP, source.GetStackIndex()));
@@ -496,15 +487,15 @@
if (instruction->AsIntConstant() != nullptr) {
Immediate imm(instruction->AsIntConstant()->GetValue());
if (location.IsRegister()) {
- __ movl(location.AsX86().AsCpuRegister(), imm);
+ __ movl(location.As<Register>(), imm);
} else {
__ movl(Address(ESP, location.GetStackIndex()), imm);
}
} else if (instruction->AsLongConstant() != nullptr) {
int64_t value = instruction->AsLongConstant()->GetValue();
if (location.IsRegister()) {
- __ movl(location.AsX86().AsRegisterPairLow(), Immediate(Low32Bits(value)));
- __ movl(location.AsX86().AsRegisterPairHigh(), Immediate(High32Bits(value)));
+ __ movl(location.AsRegisterPairLow<Register>(), Immediate(Low32Bits(value)));
+ __ movl(location.AsRegisterPairHigh<Register>(), Immediate(High32Bits(value)));
} else {
__ movl(Address(ESP, location.GetStackIndex()), Immediate(Low32Bits(value)));
__ movl(Address(ESP, location.GetHighStackIndex(kX86WordSize)), Immediate(High32Bits(value)));
@@ -609,7 +600,7 @@
// Materialized condition, compare against 0.
Location lhs = if_instr->GetLocations()->InAt(0);
if (lhs.IsRegister()) {
- __ cmpl(lhs.AsX86().AsCpuRegister(), Immediate(0));
+ __ cmpl(lhs.As<Register>(), Immediate(0));
} else {
__ cmpl(Address(ESP, lhs.GetStackIndex()), Immediate(0));
}
@@ -620,13 +611,13 @@
Location rhs = cond->GetLocations()->InAt(1);
// LHS is guaranteed to be in a register (see LocationsBuilderX86::VisitCondition).
if (rhs.IsRegister()) {
- __ cmpl(lhs.AsX86().AsCpuRegister(), rhs.AsX86().AsCpuRegister());
+ __ cmpl(lhs.As<Register>(), rhs.As<Register>());
} else if (rhs.IsConstant()) {
HIntConstant* instruction = rhs.GetConstant()->AsIntConstant();
Immediate imm(instruction->AsIntConstant()->GetValue());
- __ cmpl(lhs.AsX86().AsCpuRegister(), imm);
+ __ cmpl(lhs.As<Register>(), imm);
} else {
- __ cmpl(lhs.AsX86().AsCpuRegister(), Address(ESP, rhs.GetStackIndex()));
+ __ cmpl(lhs.As<Register>(), Address(ESP, rhs.GetStackIndex()));
}
__ j(X86Condition(cond->AsCondition()->GetCondition()),
codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
@@ -693,18 +684,18 @@
void InstructionCodeGeneratorX86::VisitCondition(HCondition* comp) {
if (comp->NeedsMaterialization()) {
LocationSummary* locations = comp->GetLocations();
- Register reg = locations->Out().AsX86().AsCpuRegister();
+ Register reg = locations->Out().As<Register>();
// Clear register: setcc only sets the low byte.
__ xorl(reg, reg);
if (locations->InAt(1).IsRegister()) {
- __ cmpl(locations->InAt(0).AsX86().AsCpuRegister(),
- locations->InAt(1).AsX86().AsCpuRegister());
+ __ cmpl(locations->InAt(0).As<Register>(),
+ locations->InAt(1).As<Register>());
} else if (locations->InAt(1).IsConstant()) {
HConstant* instruction = locations->InAt(1).GetConstant();
Immediate imm(instruction->AsIntConstant()->GetValue());
- __ cmpl(locations->InAt(0).AsX86().AsCpuRegister(), imm);
+ __ cmpl(locations->InAt(0).As<Register>(), imm);
} else {
- __ cmpl(locations->InAt(0).AsX86().AsCpuRegister(),
+ __ cmpl(locations->InAt(0).As<Register>(),
Address(ESP, locations->InAt(1).GetStackIndex()));
}
__ setb(X86Condition(comp->GetCondition()), reg);
@@ -797,18 +788,18 @@
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimNot:
- locations->SetInAt(0, X86CpuLocation(EAX));
+ locations->SetInAt(0, Location::RegisterLocation(EAX));
break;
case Primitive::kPrimLong:
locations->SetInAt(
- 0, Location::RegisterLocation(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+ 0, Location::RegisterPairLocation(EAX, EDX));
break;
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
locations->SetInAt(
- 0, Location::FpuRegisterLocation(X86ManagedRegister::FromXmmRegister(XMM0)));
+ 0, Location::FpuRegisterLocation(XMM0));
break;
default:
@@ -825,16 +816,17 @@
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimNot:
- DCHECK_EQ(ret->GetLocations()->InAt(0).AsX86().AsCpuRegister(), EAX);
+ DCHECK_EQ(ret->GetLocations()->InAt(0).As<Register>(), EAX);
break;
case Primitive::kPrimLong:
- DCHECK_EQ(ret->GetLocations()->InAt(0).AsX86().AsRegisterPair(), EAX_EDX);
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegisterPairLow<Register>(), EAX);
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegisterPairHigh<Register>(), EDX);
break;
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- DCHECK_EQ(ret->GetLocations()->InAt(0).AsX86().AsXmmRegister(), XMM0);
+ DCHECK_EQ(ret->GetLocations()->InAt(0).As<XmmRegister>(), XMM0);
break;
default:
@@ -850,7 +842,7 @@
}
void InstructionCodeGeneratorX86::VisitInvokeStatic(HInvokeStatic* invoke) {
- Register temp = invoke->GetLocations()->GetTemp(0).AsX86().AsCpuRegister();
+ Register temp = invoke->GetLocations()->GetTemp(0).As<Register>();
uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).Int32Value() +
invoke->GetIndexInDexCache() * kX86WordSize;
@@ -882,7 +874,7 @@
void LocationsBuilderX86::HandleInvoke(HInvoke* invoke) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
- locations->AddTemp(X86CpuLocation(EAX));
+ locations->AddTemp(Location::RegisterLocation(EAX));
InvokeDexCallingConventionVisitor calling_convention_visitor;
for (size_t i = 0; i < invoke->InputCount(); i++) {
@@ -897,11 +889,11 @@
case Primitive::kPrimShort:
case Primitive::kPrimInt:
case Primitive::kPrimNot:
- locations->SetOut(X86CpuLocation(EAX));
+ locations->SetOut(Location::RegisterLocation(EAX));
break;
case Primitive::kPrimLong:
- locations->SetOut(Location::RegisterLocation(X86ManagedRegister::FromRegisterPair(EAX_EDX)));
+ locations->SetOut(Location::RegisterPairLocation(EAX, EDX));
break;
case Primitive::kPrimVoid:
@@ -909,7 +901,7 @@
case Primitive::kPrimDouble:
case Primitive::kPrimFloat:
- locations->SetOut(Location::FpuRegisterLocation(X86ManagedRegister::FromXmmRegister(XMM0)));
+ locations->SetOut(Location::FpuRegisterLocation(XMM0));
break;
}
@@ -917,7 +909,7 @@
}
void InstructionCodeGeneratorX86::VisitInvokeVirtual(HInvokeVirtual* invoke) {
- Register temp = invoke->GetLocations()->GetTemp(0).AsX86().AsCpuRegister();
+ Register temp = invoke->GetLocations()->GetTemp(0).As<Register>();
uint32_t method_offset = mirror::Class::EmbeddedVTableOffset().Uint32Value() +
invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
LocationSummary* locations = invoke->GetLocations();
@@ -928,7 +920,7 @@
__ movl(temp, Address(ESP, receiver.GetStackIndex()));
__ movl(temp, Address(temp, class_offset));
} else {
- __ movl(temp, Address(receiver.AsX86().AsCpuRegister(), class_offset));
+ __ movl(temp, Address(receiver.As<Register>(), class_offset));
}
// temp = temp->GetMethodAt(method_offset);
__ movl(temp, Address(temp, method_offset));
@@ -972,28 +964,30 @@
switch (add->GetResultType()) {
case Primitive::kPrimInt: {
- DCHECK_EQ(first.AsX86().AsCpuRegister(), locations->Out().AsX86().AsCpuRegister());
+ DCHECK_EQ(first.As<Register>(), locations->Out().As<Register>());
if (second.IsRegister()) {
- __ addl(first.AsX86().AsCpuRegister(), second.AsX86().AsCpuRegister());
+ __ addl(first.As<Register>(), second.As<Register>());
} else if (second.IsConstant()) {
HConstant* instruction = second.GetConstant();
Immediate imm(instruction->AsIntConstant()->GetValue());
- __ addl(first.AsX86().AsCpuRegister(), imm);
+ __ addl(first.As<Register>(), imm);
} else {
- __ addl(first.AsX86().AsCpuRegister(), Address(ESP, second.GetStackIndex()));
+ __ addl(first.As<Register>(), Address(ESP, second.GetStackIndex()));
}
break;
}
case Primitive::kPrimLong: {
- DCHECK_EQ(first.AsX86().AsRegisterPair(),
- locations->Out().AsX86().AsRegisterPair());
+ DCHECK_EQ(first.AsRegisterPairLow<Register>(),
+ locations->Out().AsRegisterPairLow<Register>());
+ DCHECK_EQ(first.AsRegisterPairHigh<Register>(),
+ locations->Out().AsRegisterPairHigh<Register>());
if (second.IsRegister()) {
- __ addl(first.AsX86().AsRegisterPairLow(), second.AsX86().AsRegisterPairLow());
- __ adcl(first.AsX86().AsRegisterPairHigh(), second.AsX86().AsRegisterPairHigh());
+ __ addl(first.AsRegisterPairLow<Register>(), second.AsRegisterPairLow<Register>());
+ __ adcl(first.AsRegisterPairHigh<Register>(), second.AsRegisterPairHigh<Register>());
} else {
- __ addl(first.AsX86().AsRegisterPairLow(), Address(ESP, second.GetStackIndex()));
- __ adcl(first.AsX86().AsRegisterPairHigh(),
+ __ addl(first.AsRegisterPairLow<Register>(), Address(ESP, second.GetStackIndex()));
+ __ adcl(first.AsRegisterPairHigh<Register>(),
Address(ESP, second.GetHighStackIndex(kX86WordSize)));
}
break;
@@ -1001,18 +995,18 @@
case Primitive::kPrimFloat: {
if (second.IsFpuRegister()) {
- __ addss(first.AsX86().AsXmmRegister(), second.AsX86().AsXmmRegister());
+ __ addss(first.As<XmmRegister>(), second.As<XmmRegister>());
} else {
- __ addss(first.AsX86().AsXmmRegister(), Address(ESP, second.GetStackIndex()));
+ __ addss(first.As<XmmRegister>(), Address(ESP, second.GetStackIndex()));
}
break;
}
case Primitive::kPrimDouble: {
if (second.IsFpuRegister()) {
- __ addsd(first.AsX86().AsXmmRegister(), second.AsX86().AsXmmRegister());
+ __ addsd(first.As<XmmRegister>(), second.As<XmmRegister>());
} else {
- __ addsd(first.AsX86().AsXmmRegister(), Address(ESP, second.GetStackIndex()));
+ __ addsd(first.As<XmmRegister>(), Address(ESP, second.GetStackIndex()));
}
break;
}
@@ -1048,37 +1042,41 @@
void InstructionCodeGeneratorX86::VisitSub(HSub* sub) {
LocationSummary* locations = sub->GetLocations();
+ Location first = locations->InAt(0);
+ Location second = locations->InAt(1);
switch (sub->GetResultType()) {
case Primitive::kPrimInt: {
- DCHECK_EQ(locations->InAt(0).AsX86().AsCpuRegister(),
- locations->Out().AsX86().AsCpuRegister());
- if (locations->InAt(1).IsRegister()) {
- __ subl(locations->InAt(0).AsX86().AsCpuRegister(),
- locations->InAt(1).AsX86().AsCpuRegister());
- } else if (locations->InAt(1).IsConstant()) {
- HConstant* instruction = locations->InAt(1).GetConstant();
+ DCHECK_EQ(first.As<Register>(),
+ locations->Out().As<Register>());
+ if (second.IsRegister()) {
+ __ subl(first.As<Register>(),
+ second.As<Register>());
+ } else if (second.IsConstant()) {
+ HConstant* instruction = second.GetConstant();
Immediate imm(instruction->AsIntConstant()->GetValue());
- __ subl(locations->InAt(0).AsX86().AsCpuRegister(), imm);
+ __ subl(first.As<Register>(), imm);
} else {
- __ subl(locations->InAt(0).AsX86().AsCpuRegister(),
- Address(ESP, locations->InAt(1).GetStackIndex()));
+ __ subl(first.As<Register>(),
+ Address(ESP, second.GetStackIndex()));
}
break;
}
case Primitive::kPrimLong: {
- DCHECK_EQ(locations->InAt(0).AsX86().AsRegisterPair(),
- locations->Out().AsX86().AsRegisterPair());
- if (locations->InAt(1).IsRegister()) {
- __ subl(locations->InAt(0).AsX86().AsRegisterPairLow(),
- locations->InAt(1).AsX86().AsRegisterPairLow());
- __ sbbl(locations->InAt(0).AsX86().AsRegisterPairHigh(),
- locations->InAt(1).AsX86().AsRegisterPairHigh());
+ DCHECK_EQ(first.AsRegisterPairLow<Register>(),
+ locations->Out().AsRegisterPairLow<Register>());
+ DCHECK_EQ(first.AsRegisterPairHigh<Register>(),
+ locations->Out().AsRegisterPairHigh<Register>());
+ if (second.IsRegister()) {
+ __ subl(first.AsRegisterPairLow<Register>(),
+ second.AsRegisterPairLow<Register>());
+ __ sbbl(first.AsRegisterPairHigh<Register>(),
+ second.AsRegisterPairHigh<Register>());
} else {
- __ subl(locations->InAt(0).AsX86().AsRegisterPairLow(),
- Address(ESP, locations->InAt(1).GetStackIndex()));
- __ sbbl(locations->InAt(0).AsX86().AsRegisterPairHigh(),
- Address(ESP, locations->InAt(1).GetHighStackIndex(kX86WordSize)));
+ __ subl(first.AsRegisterPairLow<Register>(),
+ Address(ESP, second.GetStackIndex()));
+ __ sbbl(first.AsRegisterPairHigh<Register>(),
+ Address(ESP, second.GetHighStackIndex(kX86WordSize)));
}
break;
}
@@ -1098,10 +1096,10 @@
void LocationsBuilderX86::VisitNewInstance(HNewInstance* instruction) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
- locations->SetOut(X86CpuLocation(EAX));
+ locations->SetOut(Location::RegisterLocation(EAX));
InvokeRuntimeCallingConvention calling_convention;
- locations->AddTemp(X86CpuLocation(calling_convention.GetRegisterAt(0)));
- locations->AddTemp(X86CpuLocation(calling_convention.GetRegisterAt(1)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
}
void InstructionCodeGeneratorX86::VisitNewInstance(HNewInstance* instruction) {
@@ -1141,8 +1139,8 @@
void InstructionCodeGeneratorX86::VisitNot(HNot* instruction) {
LocationSummary* locations = instruction->GetLocations();
Location out = locations->Out();
- DCHECK_EQ(locations->InAt(0).AsX86().AsCpuRegister(), out.AsX86().AsCpuRegister());
- __ xorl(out.AsX86().AsCpuRegister(), Immediate(1));
+ DCHECK_EQ(locations->InAt(0).As<Register>(), out.As<Register>());
+ __ xorl(out.As<Register>(), Immediate(1));
}
void LocationsBuilderX86::VisitCompare(HCompare* compare) {
@@ -1159,22 +1157,23 @@
switch (compare->InputAt(0)->GetType()) {
case Primitive::kPrimLong: {
Label less, greater, done;
- Register output = locations->Out().AsX86().AsCpuRegister();
- X86ManagedRegister left = locations->InAt(0).AsX86();
+ Register output = locations->Out().As<Register>();
+ Location left = locations->InAt(0);
Location right = locations->InAt(1);
if (right.IsRegister()) {
- __ cmpl(left.AsRegisterPairHigh(), right.AsX86().AsRegisterPairHigh());
+ __ cmpl(left.AsRegisterPairHigh<Register>(), right.AsRegisterPairHigh<Register>());
} else {
DCHECK(right.IsDoubleStackSlot());
- __ cmpl(left.AsRegisterPairHigh(), Address(ESP, right.GetHighStackIndex(kX86WordSize)));
+ __ cmpl(left.AsRegisterPairHigh<Register>(),
+ Address(ESP, right.GetHighStackIndex(kX86WordSize)));
}
__ j(kLess, &less); // Signed compare.
__ j(kGreater, &greater); // Signed compare.
- if (right.IsRegister()) {
- __ cmpl(left.AsRegisterPairLow(), right.AsX86().AsRegisterPairLow());
+ if (right.IsRegisterPair()) {
+ __ cmpl(left.AsRegisterPairLow<Register>(), right.AsRegisterPairLow<Register>());
} else {
DCHECK(right.IsDoubleStackSlot());
- __ cmpl(left.AsRegisterPairLow(), Address(ESP, right.GetStackIndex()));
+ __ cmpl(left.AsRegisterPairLow<Register>(), Address(ESP, right.GetStackIndex()));
}
__ movl(output, Immediate(0));
__ j(kEqual, &done);
@@ -1221,7 +1220,7 @@
bool dies_at_entry = !is_object_type && !is_byte_type;
if (is_byte_type) {
// Ensure the value is in a byte register.
- locations->SetInAt(1, X86CpuLocation(EAX), dies_at_entry);
+ locations->SetInAt(1, Location::RegisterLocation(EAX), dies_at_entry);
} else {
locations->SetInAt(1, Location::RequiresRegister(), dies_at_entry);
}
@@ -1229,48 +1228,48 @@
if (is_object_type) {
locations->AddTemp(Location::RequiresRegister());
// Ensure the card is in a byte register.
- locations->AddTemp(X86CpuLocation(ECX));
+ locations->AddTemp(Location::RegisterLocation(ECX));
}
}
void InstructionCodeGeneratorX86::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).AsX86().AsCpuRegister();
+ Register obj = locations->InAt(0).As<Register>();
uint32_t offset = instruction->GetFieldOffset().Uint32Value();
Primitive::Type field_type = instruction->GetFieldType();
switch (field_type) {
case Primitive::kPrimBoolean:
case Primitive::kPrimByte: {
- ByteRegister value = locations->InAt(1).AsX86().AsByteRegister();
+ ByteRegister value = locations->InAt(1).As<ByteRegister>();
__ movb(Address(obj, offset), value);
break;
}
case Primitive::kPrimShort:
case Primitive::kPrimChar: {
- Register value = locations->InAt(1).AsX86().AsCpuRegister();
+ Register value = locations->InAt(1).As<Register>();
__ movw(Address(obj, offset), value);
break;
}
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- Register value = locations->InAt(1).AsX86().AsCpuRegister();
+ Register value = locations->InAt(1).As<Register>();
__ movl(Address(obj, offset), value);
if (field_type == Primitive::kPrimNot) {
- Register temp = locations->GetTemp(0).AsX86().AsCpuRegister();
- Register card = locations->GetTemp(1).AsX86().AsCpuRegister();
+ Register temp = locations->GetTemp(0).As<Register>();
+ Register card = locations->GetTemp(1).As<Register>();
codegen_->MarkGCCard(temp, card, obj, value);
}
break;
}
case Primitive::kPrimLong: {
- X86ManagedRegister value = locations->InAt(1).AsX86();
- __ movl(Address(obj, offset), value.AsRegisterPairLow());
- __ movl(Address(obj, kX86WordSize + offset), value.AsRegisterPairHigh());
+ Location value = locations->InAt(1);
+ __ movl(Address(obj, offset), value.AsRegisterPairLow<Register>());
+ __ movl(Address(obj, kX86WordSize + offset), value.AsRegisterPairHigh<Register>());
break;
}
@@ -1304,46 +1303,45 @@
void InstructionCodeGeneratorX86::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).AsX86().AsCpuRegister();
+ Register obj = locations->InAt(0).As<Register>();
uint32_t offset = instruction->GetFieldOffset().Uint32Value();
switch (instruction->GetType()) {
case Primitive::kPrimBoolean: {
- Register out = locations->Out().AsX86().AsCpuRegister();
+ Register out = locations->Out().As<Register>();
__ movzxb(out, Address(obj, offset));
break;
}
case Primitive::kPrimByte: {
- Register out = locations->Out().AsX86().AsCpuRegister();
+ Register out = locations->Out().As<Register>();
__ movsxb(out, Address(obj, offset));
break;
}
case Primitive::kPrimShort: {
- Register out = locations->Out().AsX86().AsCpuRegister();
+ Register out = locations->Out().As<Register>();
__ movsxw(out, Address(obj, offset));
break;
}
case Primitive::kPrimChar: {
- Register out = locations->Out().AsX86().AsCpuRegister();
+ Register out = locations->Out().As<Register>();
__ movzxw(out, Address(obj, offset));
break;
}
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
- Register out = locations->Out().AsX86().AsCpuRegister();
+ Register out = locations->Out().As<Register>();
__ movl(out, Address(obj, offset));
break;
}
case Primitive::kPrimLong: {
// TODO: support volatile.
- X86ManagedRegister out = locations->Out().AsX86();
- __ movl(out.AsRegisterPairLow(), Address(obj, offset));
- __ movl(out.AsRegisterPairHigh(), Address(obj, kX86WordSize + offset));
+ __ movl(locations->Out().AsRegisterPairLow<Register>(), Address(obj, offset));
+ __ movl(locations->Out().AsRegisterPairHigh<Register>(), Address(obj, kX86WordSize + offset));
break;
}
@@ -1373,7 +1371,7 @@
Location obj = locations->InAt(0);
if (obj.IsRegister()) {
- __ cmpl(obj.AsX86().AsCpuRegister(), Immediate(0));
+ __ cmpl(obj.As<Register>(), Immediate(0));
} else if (obj.IsStackSlot()) {
__ cmpl(Address(ESP, obj.GetStackIndex()), Immediate(0));
} else {
@@ -1396,54 +1394,54 @@
void InstructionCodeGeneratorX86::VisitArrayGet(HArrayGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).AsX86().AsCpuRegister();
+ Register obj = locations->InAt(0).As<Register>();
Location index = locations->InAt(1);
switch (instruction->GetType()) {
case Primitive::kPrimBoolean: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
- Register out = locations->Out().AsX86().AsCpuRegister();
+ Register out = locations->Out().As<Register>();
if (index.IsConstant()) {
__ movzxb(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
} else {
- __ movzxb(out, Address(obj, index.AsX86().AsCpuRegister(), TIMES_1, data_offset));
+ __ movzxb(out, Address(obj, index.As<Register>(), TIMES_1, data_offset));
}
break;
}
case Primitive::kPrimByte: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
- Register out = locations->Out().AsX86().AsCpuRegister();
+ Register out = locations->Out().As<Register>();
if (index.IsConstant()) {
__ movsxb(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
} else {
- __ movsxb(out, Address(obj, index.AsX86().AsCpuRegister(), TIMES_1, data_offset));
+ __ movsxb(out, Address(obj, index.As<Register>(), TIMES_1, data_offset));
}
break;
}
case Primitive::kPrimShort: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
- Register out = locations->Out().AsX86().AsCpuRegister();
+ Register out = locations->Out().As<Register>();
if (index.IsConstant()) {
__ movsxw(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
} else {
- __ movsxw(out, Address(obj, index.AsX86().AsCpuRegister(), TIMES_2, data_offset));
+ __ movsxw(out, Address(obj, index.As<Register>(), TIMES_2, data_offset));
}
break;
}
case Primitive::kPrimChar: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
- Register out = locations->Out().AsX86().AsCpuRegister();
+ Register out = locations->Out().As<Register>();
if (index.IsConstant()) {
__ movzxw(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
} else {
- __ movzxw(out, Address(obj, index.AsX86().AsCpuRegister(), TIMES_2, data_offset));
+ __ movzxw(out, Address(obj, index.As<Register>(), TIMES_2, data_offset));
}
break;
}
@@ -1451,28 +1449,28 @@
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- Register out = locations->Out().AsX86().AsCpuRegister();
+ Register out = locations->Out().As<Register>();
if (index.IsConstant()) {
__ movl(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset));
} else {
- __ movl(out, Address(obj, index.AsX86().AsCpuRegister(), TIMES_4, data_offset));
+ __ movl(out, Address(obj, index.As<Register>(), TIMES_4, data_offset));
}
break;
}
case Primitive::kPrimLong: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
- X86ManagedRegister out = locations->Out().AsX86();
+ Location out = locations->Out();
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- __ movl(out.AsRegisterPairLow(), Address(obj, offset));
- __ movl(out.AsRegisterPairHigh(), Address(obj, offset + kX86WordSize));
+ __ movl(out.AsRegisterPairLow<Register>(), Address(obj, offset));
+ __ movl(out.AsRegisterPairHigh<Register>(), Address(obj, offset + kX86WordSize));
} else {
- __ movl(out.AsRegisterPairLow(),
- Address(obj, index.AsX86().AsCpuRegister(), TIMES_8, data_offset));
- __ movl(out.AsRegisterPairHigh(),
- Address(obj, index.AsX86().AsCpuRegister(), TIMES_8, data_offset + kX86WordSize));
+ __ movl(out.AsRegisterPairLow<Register>(),
+ Address(obj, index.As<Register>(), TIMES_8, data_offset));
+ __ movl(out.AsRegisterPairHigh<Register>(),
+ Address(obj, index.As<Register>(), TIMES_8, data_offset + kX86WordSize));
}
break;
}
@@ -1494,9 +1492,9 @@
if (value_type == Primitive::kPrimNot) {
InvokeRuntimeCallingConvention calling_convention;
- locations->SetInAt(0, X86CpuLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, X86CpuLocation(calling_convention.GetRegisterAt(1)));
- locations->SetInAt(2, X86CpuLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
} else {
bool is_byte_type = (value_type == Primitive::kPrimBoolean)
|| (value_type == Primitive::kPrimByte);
@@ -1510,7 +1508,7 @@
if (is_byte_type) {
// Ensure the value is in a byte register.
locations->SetInAt(2, Location::ByteRegisterOrConstant(
- X86ManagedRegister::FromCpuRegister(EAX), instruction->InputAt(2)), dies_at_entry);
+ EAX, instruction->InputAt(2)), dies_at_entry);
} else {
locations->SetInAt(2, Location::RegisterOrConstant(instruction->InputAt(2)), dies_at_entry);
}
@@ -1519,7 +1517,7 @@
void InstructionCodeGeneratorX86::VisitArraySet(HArraySet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- Register obj = locations->InAt(0).AsX86().AsCpuRegister();
+ Register obj = locations->InAt(0).As<Register>();
Location index = locations->InAt(1);
Location value = locations->InAt(2);
Primitive::Type value_type = instruction->GetComponentType();
@@ -1531,17 +1529,17 @@
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
if (value.IsRegister()) {
- __ movb(Address(obj, offset), value.AsX86().AsByteRegister());
+ __ movb(Address(obj, offset), value.As<ByteRegister>());
} else {
__ movb(Address(obj, offset),
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
} else {
if (value.IsRegister()) {
- __ movb(Address(obj, index.AsX86().AsCpuRegister(), TIMES_1, data_offset),
- value.AsX86().AsByteRegister());
+ __ movb(Address(obj, index.As<Register>(), TIMES_1, data_offset),
+ value.As<ByteRegister>());
} else {
- __ movb(Address(obj, index.AsX86().AsCpuRegister(), TIMES_1, data_offset),
+ __ movb(Address(obj, index.As<Register>(), TIMES_1, data_offset),
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
}
@@ -1554,17 +1552,17 @@
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
if (value.IsRegister()) {
- __ movw(Address(obj, offset), value.AsX86().AsCpuRegister());
+ __ movw(Address(obj, offset), value.As<Register>());
} else {
__ movw(Address(obj, offset),
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
} else {
if (value.IsRegister()) {
- __ movw(Address(obj, index.AsX86().AsCpuRegister(), TIMES_2, data_offset),
- value.AsX86().AsCpuRegister());
+ __ movw(Address(obj, index.As<Register>(), TIMES_2, data_offset),
+ value.As<Register>());
} else {
- __ movw(Address(obj, index.AsX86().AsCpuRegister(), TIMES_2, data_offset),
+ __ movw(Address(obj, index.As<Register>(), TIMES_2, data_offset),
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
}
@@ -1576,16 +1574,16 @@
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
if (value.IsRegister()) {
- __ movl(Address(obj, offset), value.AsX86().AsCpuRegister());
+ __ movl(Address(obj, offset), value.As<Register>());
} else {
__ movl(Address(obj, offset), Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
} else {
if (value.IsRegister()) {
- __ movl(Address(obj, index.AsX86().AsCpuRegister(), TIMES_4, data_offset),
- value.AsX86().AsCpuRegister());
+ __ movl(Address(obj, index.As<Register>(), TIMES_4, data_offset),
+ value.As<Register>());
} else {
- __ movl(Address(obj, index.AsX86().AsCpuRegister(), TIMES_4, data_offset),
+ __ movl(Address(obj, index.As<Register>(), TIMES_4, data_offset),
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
}
@@ -1603,25 +1601,27 @@
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
- if (value.IsRegister()) {
- __ movl(Address(obj, offset), value.AsX86().AsRegisterPairLow());
- __ movl(Address(obj, offset + kX86WordSize), value.AsX86().AsRegisterPairHigh());
+ if (value.IsRegisterPair()) {
+ __ movl(Address(obj, offset), value.AsRegisterPairLow<Register>());
+ __ movl(Address(obj, offset + kX86WordSize), value.AsRegisterPairHigh<Register>());
} else {
+ DCHECK(value.IsConstant());
int64_t val = value.GetConstant()->AsLongConstant()->GetValue();
__ movl(Address(obj, offset), Immediate(Low32Bits(val)));
__ movl(Address(obj, offset + kX86WordSize), Immediate(High32Bits(val)));
}
} else {
- if (value.IsRegister()) {
- __ movl(Address(obj, index.AsX86().AsCpuRegister(), TIMES_8, data_offset),
- value.AsX86().AsRegisterPairLow());
- __ movl(Address(obj, index.AsX86().AsCpuRegister(), TIMES_8, data_offset + kX86WordSize),
- value.AsX86().AsRegisterPairHigh());
+ if (value.IsRegisterPair()) {
+ __ movl(Address(obj, index.As<Register>(), TIMES_8, data_offset),
+ value.AsRegisterPairLow<Register>());
+ __ movl(Address(obj, index.As<Register>(), TIMES_8, data_offset + kX86WordSize),
+ value.AsRegisterPairHigh<Register>());
} else {
+ DCHECK(value.IsConstant());
int64_t val = value.GetConstant()->AsLongConstant()->GetValue();
- __ movl(Address(obj, index.AsX86().AsCpuRegister(), TIMES_8, data_offset),
+ __ movl(Address(obj, index.As<Register>(), TIMES_8, data_offset),
Immediate(Low32Bits(val)));
- __ movl(Address(obj, index.AsX86().AsCpuRegister(), TIMES_8, data_offset + kX86WordSize),
+ __ movl(Address(obj, index.As<Register>(), TIMES_8, data_offset + kX86WordSize),
Immediate(High32Bits(val)));
}
}
@@ -1647,8 +1647,8 @@
void InstructionCodeGeneratorX86::VisitArrayLength(HArrayLength* instruction) {
LocationSummary* locations = instruction->GetLocations();
uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
- Register obj = locations->InAt(0).AsX86().AsCpuRegister();
- Register out = locations->Out().AsX86().AsCpuRegister();
+ Register obj = locations->InAt(0).As<Register>();
+ Register out = locations->Out().As<Register>();
__ movl(out, Address(obj, offset));
}
@@ -1668,8 +1668,8 @@
instruction, locations->InAt(0), locations->InAt(1));
codegen_->AddSlowPath(slow_path);
- Register index = locations->InAt(0).AsX86().AsCpuRegister();
- Register length = locations->InAt(1).AsX86().AsCpuRegister();
+ Register index = locations->InAt(0).As<Register>();
+ Register length = locations->InAt(1).As<Register>();
__ cmpl(index, length);
__ j(kAboveEqual, slow_path->GetEntryLabel());
@@ -1744,14 +1744,14 @@
if (source.IsRegister()) {
if (destination.IsRegister()) {
- __ movl(destination.AsX86().AsCpuRegister(), source.AsX86().AsCpuRegister());
+ __ movl(destination.As<Register>(), source.As<Register>());
} else {
DCHECK(destination.IsStackSlot());
- __ movl(Address(ESP, destination.GetStackIndex()), source.AsX86().AsCpuRegister());
+ __ movl(Address(ESP, destination.GetStackIndex()), source.As<Register>());
}
} else if (source.IsStackSlot()) {
if (destination.IsRegister()) {
- __ movl(destination.AsX86().AsCpuRegister(), Address(ESP, source.GetStackIndex()));
+ __ movl(destination.As<Register>(), Address(ESP, source.GetStackIndex()));
} else {
DCHECK(destination.IsStackSlot());
MoveMemoryToMemory(destination.GetStackIndex(),
@@ -1761,7 +1761,7 @@
HIntConstant* instruction = source.GetConstant()->AsIntConstant();
Immediate imm(instruction->AsIntConstant()->GetValue());
if (destination.IsRegister()) {
- __ movl(destination.AsX86().AsCpuRegister(), imm);
+ __ movl(destination.As<Register>(), imm);
} else {
__ movl(Address(ESP, destination.GetStackIndex()), imm);
}
@@ -1803,11 +1803,11 @@
Location destination = move->GetDestination();
if (source.IsRegister() && destination.IsRegister()) {
- __ xchgl(destination.AsX86().AsCpuRegister(), source.AsX86().AsCpuRegister());
+ __ xchgl(destination.As<Register>(), source.As<Register>());
} else if (source.IsRegister() && destination.IsStackSlot()) {
- Exchange(source.AsX86().AsCpuRegister(), destination.GetStackIndex());
+ Exchange(source.As<Register>(), destination.GetStackIndex());
} else if (source.IsStackSlot() && destination.IsRegister()) {
- Exchange(destination.AsX86().AsCpuRegister(), source.GetStackIndex());
+ Exchange(destination.As<Register>(), source.GetStackIndex());
} else if (source.IsStackSlot() && destination.IsStackSlot()) {
Exchange(destination.GetStackIndex(), source.GetStackIndex());
} else {
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 3e2ca90..c520164 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -167,7 +167,7 @@
virtual size_t GetNumberOfRegisters() const OVERRIDE;
virtual void SetupBlockedRegisters(bool* blocked_registers) const OVERRIDE;
- virtual ManagedRegister AllocateFreeRegister(
+ virtual Location AllocateFreeRegister(
Primitive::Type type, bool* blocked_registers) const OVERRIDE;
virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index f5437a1..393eb1a 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -30,10 +30,6 @@
namespace art {
-x86_64::X86_64ManagedRegister Location::AsX86_64() const {
- return reg().AsX86_64();
-}
-
namespace x86_64 {
static constexpr bool kExplicitStackOverflowCheck = false;
@@ -44,10 +40,6 @@
static constexpr int kNumberOfPushedRegistersAtEntry = 1;
static constexpr int kCurrentMethodStackOffset = 0;
-static Location X86_64CpuLocation(Register reg) {
- return Location::RegisterLocation(X86_64ManagedRegister::FromCpuRegister(reg));
-}
-
static constexpr Register kRuntimeParameterCoreRegisters[] = { RDI, RSI, RDX };
static constexpr size_t kRuntimeParameterCoreRegistersLength =
arraysize(kRuntimeParameterCoreRegisters);
@@ -144,8 +136,8 @@
CodeGeneratorX86_64* x64_codegen = reinterpret_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
InvokeRuntimeCallingConvention calling_convention;
- x64_codegen->Move(X86_64CpuLocation(calling_convention.GetRegisterAt(0)), index_location_);
- x64_codegen->Move(X86_64CpuLocation(calling_convention.GetRegisterAt(1)), length_location_);
+ x64_codegen->Move(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), index_location_);
+ x64_codegen->Move(Location::RegisterLocation(calling_convention.GetRegisterAt(1)), length_location_);
__ gs()->call(Address::Absolute(
QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowArrayBounds), true));
codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
@@ -208,8 +200,8 @@
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
-ManagedRegister CodeGeneratorX86_64::AllocateFreeRegister(Primitive::Type type,
- bool* blocked_registers) const {
+Location CodeGeneratorX86_64::AllocateFreeRegister(Primitive::Type type,
+ bool* blocked_registers) const {
switch (type) {
case Primitive::kPrimLong:
case Primitive::kPrimByte:
@@ -219,21 +211,21 @@
case Primitive::kPrimInt:
case Primitive::kPrimNot: {
size_t reg = AllocateFreeRegisterInternal(blocked_registers, kNumberOfCpuRegisters);
- return X86_64ManagedRegister::FromCpuRegister(static_cast<Register>(reg));
+ return Location::RegisterLocation(reg);
}
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
size_t reg = AllocateFreeRegisterInternal(
blocked_registers + kNumberOfCpuRegisters, kNumberOfFloatRegisters);
- return X86_64ManagedRegister::FromXmmRegister(static_cast<FloatRegister>(reg));
+ return Location::FpuRegisterLocation(reg);
}
case Primitive::kPrimVoid:
LOG(FATAL) << "Unreachable type " << type;
}
- return ManagedRegister::NoRegister();
+ return Location();
}
void CodeGeneratorX86_64::SetupBlockedRegisters(bool* blocked_registers) const {
@@ -331,37 +323,37 @@
}
if (destination.IsRegister()) {
if (source.IsRegister()) {
- __ movq(destination.AsX86_64().AsCpuRegister(), source.AsX86_64().AsCpuRegister());
+ __ movq(destination.As<CpuRegister>(), source.As<CpuRegister>());
} else if (source.IsFpuRegister()) {
- __ movd(destination.AsX86_64().AsCpuRegister(), source.AsX86_64().AsXmmRegister());
+ __ movd(destination.As<CpuRegister>(), source.As<XmmRegister>());
} else if (source.IsStackSlot()) {
- __ movl(destination.AsX86_64().AsCpuRegister(),
+ __ movl(destination.As<CpuRegister>(),
Address(CpuRegister(RSP), source.GetStackIndex()));
} else {
DCHECK(source.IsDoubleStackSlot());
- __ movq(destination.AsX86_64().AsCpuRegister(),
+ __ movq(destination.As<CpuRegister>(),
Address(CpuRegister(RSP), source.GetStackIndex()));
}
} else if (destination.IsFpuRegister()) {
if (source.IsRegister()) {
- __ movd(destination.AsX86_64().AsXmmRegister(), source.AsX86_64().AsCpuRegister());
+ __ movd(destination.As<XmmRegister>(), source.As<CpuRegister>());
} else if (source.IsFpuRegister()) {
- __ movaps(destination.AsX86_64().AsXmmRegister(), source.AsX86_64().AsXmmRegister());
+ __ movaps(destination.As<XmmRegister>(), source.As<XmmRegister>());
} else if (source.IsStackSlot()) {
- __ movss(destination.AsX86_64().AsXmmRegister(),
+ __ movss(destination.As<XmmRegister>(),
Address(CpuRegister(RSP), source.GetStackIndex()));
} else {
DCHECK(source.IsDoubleStackSlot());
- __ movsd(destination.AsX86_64().AsXmmRegister(),
+ __ movsd(destination.As<XmmRegister>(),
Address(CpuRegister(RSP), source.GetStackIndex()));
}
} else if (destination.IsStackSlot()) {
if (source.IsRegister()) {
__ movl(Address(CpuRegister(RSP), destination.GetStackIndex()),
- source.AsX86_64().AsCpuRegister());
+ source.As<CpuRegister>());
} else if (source.IsFpuRegister()) {
__ movss(Address(CpuRegister(RSP), destination.GetStackIndex()),
- source.AsX86_64().AsXmmRegister());
+ source.As<XmmRegister>());
} else {
DCHECK(source.IsStackSlot());
__ movl(CpuRegister(TMP), Address(CpuRegister(RSP), source.GetStackIndex()));
@@ -371,10 +363,10 @@
DCHECK(destination.IsDoubleStackSlot());
if (source.IsRegister()) {
__ movq(Address(CpuRegister(RSP), destination.GetStackIndex()),
- source.AsX86_64().AsCpuRegister());
+ source.As<CpuRegister>());
} else if (source.IsFpuRegister()) {
__ movsd(Address(CpuRegister(RSP), destination.GetStackIndex()),
- source.AsX86_64().AsXmmRegister());
+ source.As<XmmRegister>());
} else {
DCHECK(source.IsDoubleStackSlot());
__ movq(CpuRegister(TMP), Address(CpuRegister(RSP), source.GetStackIndex()));
@@ -389,14 +381,14 @@
if (instruction->AsIntConstant() != nullptr) {
Immediate imm(instruction->AsIntConstant()->GetValue());
if (location.IsRegister()) {
- __ movl(location.AsX86_64().AsCpuRegister(), imm);
+ __ movl(location.As<CpuRegister>(), imm);
} else {
__ movl(Address(CpuRegister(RSP), location.GetStackIndex()), imm);
}
} else if (instruction->AsLongConstant() != nullptr) {
int64_t value = instruction->AsLongConstant()->GetValue();
if (location.IsRegister()) {
- __ movq(location.AsX86_64().AsCpuRegister(), Immediate(value));
+ __ movq(location.As<CpuRegister>(), Immediate(value));
} else {
__ movq(CpuRegister(TMP), Immediate(value));
__ movq(Address(CpuRegister(RSP), location.GetStackIndex()), CpuRegister(TMP));
@@ -497,7 +489,7 @@
// Materialized condition, compare against 0.
Location lhs = if_instr->GetLocations()->InAt(0);
if (lhs.IsRegister()) {
- __ cmpl(lhs.AsX86_64().AsCpuRegister(), Immediate(0));
+ __ cmpl(lhs.As<CpuRegister>(), Immediate(0));
} else {
__ cmpl(Address(CpuRegister(RSP), lhs.GetStackIndex()), Immediate(0));
}
@@ -507,12 +499,12 @@
Location lhs = cond->GetLocations()->InAt(0);
Location rhs = cond->GetLocations()->InAt(1);
if (rhs.IsRegister()) {
- __ cmpl(lhs.AsX86_64().AsCpuRegister(), rhs.AsX86_64().AsCpuRegister());
+ __ cmpl(lhs.As<CpuRegister>(), rhs.As<CpuRegister>());
} else if (rhs.IsConstant()) {
- __ cmpl(lhs.AsX86_64().AsCpuRegister(),
+ __ cmpl(lhs.As<CpuRegister>(),
Immediate(rhs.GetConstant()->AsIntConstant()->GetValue()));
} else {
- __ cmpl(lhs.AsX86_64().AsCpuRegister(), Address(CpuRegister(RSP), rhs.GetStackIndex()));
+ __ cmpl(lhs.As<CpuRegister>(), Address(CpuRegister(RSP), rhs.GetStackIndex()));
}
__ j(X86_64Condition(cond->AsCondition()->GetCondition()),
codegen_->GetLabelOf(if_instr->IfTrueSuccessor()));
@@ -578,17 +570,17 @@
void InstructionCodeGeneratorX86_64::VisitCondition(HCondition* comp) {
if (comp->NeedsMaterialization()) {
LocationSummary* locations = comp->GetLocations();
- CpuRegister reg = locations->Out().AsX86_64().AsCpuRegister();
+ CpuRegister reg = locations->Out().As<CpuRegister>();
// Clear register: setcc only sets the low byte.
__ xorq(reg, reg);
if (locations->InAt(1).IsRegister()) {
- __ cmpq(locations->InAt(0).AsX86_64().AsCpuRegister(),
- locations->InAt(1).AsX86_64().AsCpuRegister());
+ __ cmpq(locations->InAt(0).As<CpuRegister>(),
+ locations->InAt(1).As<CpuRegister>());
} else if (locations->InAt(1).IsConstant()) {
- __ cmpq(locations->InAt(0).AsX86_64().AsCpuRegister(),
+ __ cmpq(locations->InAt(0).As<CpuRegister>(),
Immediate(locations->InAt(1).GetConstant()->AsIntConstant()->GetValue()));
} else {
- __ cmpq(locations->InAt(0).AsX86_64().AsCpuRegister(),
+ __ cmpq(locations->InAt(0).As<CpuRegister>(),
Address(CpuRegister(RSP), locations->InAt(1).GetStackIndex()));
}
__ setcc(X86_64Condition(comp->GetCondition()), reg);
@@ -656,22 +648,23 @@
LocationSummary* locations = compare->GetLocations();
switch (compare->InputAt(0)->GetType()) {
case Primitive::kPrimLong:
- __ cmpq(locations->InAt(0).AsX86_64().AsCpuRegister(),
- locations->InAt(1).AsX86_64().AsCpuRegister());
+ __ cmpq(locations->InAt(0).As<CpuRegister>(),
+ locations->InAt(1).As<CpuRegister>());
break;
default:
LOG(FATAL) << "Unimplemented compare type " << compare->InputAt(0)->GetType();
}
- __ movl(locations->Out().AsX86_64().AsCpuRegister(), Immediate(0));
+ CpuRegister output = locations->Out().As<CpuRegister>();
+ __ movl(output, Immediate(0));
__ j(kEqual, &done);
__ j(kGreater, &greater);
- __ movl(locations->Out().AsX86_64().AsCpuRegister(), Immediate(-1));
+ __ movl(output, Immediate(-1));
__ jmp(&done);
__ Bind(&greater);
- __ movl(locations->Out().AsX86_64().AsCpuRegister(), Immediate(1));
+ __ movl(output, Immediate(1));
__ Bind(&done);
}
@@ -714,13 +707,13 @@
case Primitive::kPrimInt:
case Primitive::kPrimNot:
case Primitive::kPrimLong:
- locations->SetInAt(0, X86_64CpuLocation(RAX));
+ locations->SetInAt(0, Location::RegisterLocation(RAX));
break;
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
locations->SetInAt(0,
- Location::FpuRegisterLocation(X86_64ManagedRegister::FromXmmRegister(XMM0)));
+ Location::FpuRegisterLocation(XMM0));
break;
default:
@@ -738,12 +731,12 @@
case Primitive::kPrimInt:
case Primitive::kPrimNot:
case Primitive::kPrimLong:
- DCHECK_EQ(ret->GetLocations()->InAt(0).AsX86_64().AsCpuRegister().AsRegister(), RAX);
+ DCHECK_EQ(ret->GetLocations()->InAt(0).As<CpuRegister>().AsRegister(), RAX);
break;
case Primitive::kPrimFloat:
case Primitive::kPrimDouble:
- DCHECK_EQ(ret->GetLocations()->InAt(0).AsX86_64().AsXmmRegister().AsFloatRegister(),
+ DCHECK_EQ(ret->GetLocations()->InAt(0).As<XmmRegister>().AsFloatRegister(),
XMM0);
break;
@@ -766,7 +759,7 @@
uint32_t index = gp_index_++;
stack_index_++;
if (index < calling_convention.GetNumberOfRegisters()) {
- return X86_64CpuLocation(calling_convention.GetRegisterAt(index));
+ return Location::RegisterLocation(calling_convention.GetRegisterAt(index));
} else {
return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index_ - 1));
}
@@ -777,7 +770,7 @@
stack_index_ += 2;
if (index < calling_convention.GetNumberOfRegisters()) {
gp_index_ += 1;
- return X86_64CpuLocation(calling_convention.GetRegisterAt(index));
+ return Location::RegisterLocation(calling_convention.GetRegisterAt(index));
} else {
gp_index_ += 2;
return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index_ - 2));
@@ -788,8 +781,7 @@
uint32_t index = fp_index_++;
stack_index_++;
if (index < calling_convention.GetNumberOfFpuRegisters()) {
- return Location::FpuRegisterLocation(X86_64ManagedRegister::FromXmmRegister(
- calling_convention.GetFpuRegisterAt(index)));
+ return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(index));
} else {
return Location::StackSlot(calling_convention.GetStackOffsetOf(stack_index_ - 1));
}
@@ -799,8 +791,7 @@
uint32_t index = fp_index_++;
stack_index_ += 2;
if (index < calling_convention.GetNumberOfFpuRegisters()) {
- return Location::FpuRegisterLocation(X86_64ManagedRegister::FromXmmRegister(
- calling_convention.GetFpuRegisterAt(index)));
+ return Location::FpuRegisterLocation(calling_convention.GetFpuRegisterAt(index));
} else {
return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index_ - 2));
}
@@ -818,7 +809,7 @@
}
void InstructionCodeGeneratorX86_64::VisitInvokeStatic(HInvokeStatic* invoke) {
- CpuRegister temp = invoke->GetLocations()->GetTemp(0).AsX86_64().AsCpuRegister();
+ CpuRegister temp = invoke->GetLocations()->GetTemp(0).As<CpuRegister>();
uint32_t heap_reference_size = sizeof(mirror::HeapReference<mirror::Object>);
size_t index_in_cache = mirror::Array::DataOffset(heap_reference_size).SizeValue() +
invoke->GetIndexInDexCache() * heap_reference_size;
@@ -850,7 +841,7 @@
void LocationsBuilderX86_64::HandleInvoke(HInvoke* invoke) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
- locations->AddTemp(X86_64CpuLocation(RDI));
+ locations->AddTemp(Location::RegisterLocation(RDI));
InvokeDexCallingConventionVisitor calling_convention_visitor;
for (size_t i = 0; i < invoke->InputCount(); i++) {
@@ -866,7 +857,7 @@
case Primitive::kPrimInt:
case Primitive::kPrimNot:
case Primitive::kPrimLong:
- locations->SetOut(X86_64CpuLocation(RAX));
+ locations->SetOut(Location::RegisterLocation(RAX));
break;
case Primitive::kPrimVoid:
@@ -874,14 +865,13 @@
case Primitive::kPrimDouble:
case Primitive::kPrimFloat:
- locations->SetOut(
- Location::FpuRegisterLocation(X86_64ManagedRegister::FromXmmRegister(XMM0)));
+ locations->SetOut(Location::FpuRegisterLocation(XMM0));
break;
}
}
void InstructionCodeGeneratorX86_64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
- CpuRegister temp = invoke->GetLocations()->GetTemp(0).AsX86_64().AsCpuRegister();
+ CpuRegister temp = invoke->GetLocations()->GetTemp(0).As<CpuRegister>();
size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() +
invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
LocationSummary* locations = invoke->GetLocations();
@@ -892,7 +882,7 @@
__ movq(temp, Address(CpuRegister(RSP), receiver.GetStackIndex()));
__ movq(temp, Address(temp, class_offset));
} else {
- __ movq(temp, Address(receiver.AsX86_64().AsCpuRegister(), class_offset));
+ __ movq(temp, Address(receiver.As<CpuRegister>(), class_offset));
}
// temp = temp->GetMethodAt(method_offset);
__ movl(temp, Address(temp, method_offset));
@@ -943,28 +933,28 @@
switch (add->GetResultType()) {
case Primitive::kPrimInt: {
if (second.IsRegister()) {
- __ addl(first.AsX86_64().AsCpuRegister(), second.AsX86_64().AsCpuRegister());
+ __ addl(first.As<CpuRegister>(), second.As<CpuRegister>());
} else if (second.IsConstant()) {
HConstant* instruction = second.GetConstant();
Immediate imm(instruction->AsIntConstant()->GetValue());
- __ addl(first.AsX86_64().AsCpuRegister(), imm);
+ __ addl(first.As<CpuRegister>(), imm);
} else {
- __ addl(first.AsX86_64().AsCpuRegister(),
+ __ addl(first.As<CpuRegister>(),
Address(CpuRegister(RSP), second.GetStackIndex()));
}
break;
}
case Primitive::kPrimLong: {
- __ addq(first.AsX86_64().AsCpuRegister(), second.AsX86_64().AsCpuRegister());
+ __ addq(first.As<CpuRegister>(), second.As<CpuRegister>());
break;
}
case Primitive::kPrimFloat: {
if (second.IsFpuRegister()) {
- __ addss(first.AsX86_64().AsXmmRegister(), second.AsX86_64().AsXmmRegister());
+ __ addss(first.As<XmmRegister>(), second.As<XmmRegister>());
} else {
- __ addss(first.AsX86_64().AsXmmRegister(),
+ __ addss(first.As<XmmRegister>(),
Address(CpuRegister(RSP), second.GetStackIndex()));
}
break;
@@ -972,10 +962,9 @@
case Primitive::kPrimDouble: {
if (second.IsFpuRegister()) {
- __ addsd(first.AsX86_64().AsXmmRegister(), second.AsX86_64().AsXmmRegister());
+ __ addsd(first.As<XmmRegister>(), second.As<XmmRegister>());
} else {
- __ addsd(first.AsX86_64().AsXmmRegister(),
- Address(CpuRegister(RSP), second.GetStackIndex()));
+ __ addsd(first.As<XmmRegister>(), Address(CpuRegister(RSP), second.GetStackIndex()));
}
break;
}
@@ -1016,26 +1005,26 @@
void InstructionCodeGeneratorX86_64::VisitSub(HSub* sub) {
LocationSummary* locations = sub->GetLocations();
- DCHECK_EQ(locations->InAt(0).AsX86_64().AsCpuRegister().AsRegister(),
- locations->Out().AsX86_64().AsCpuRegister().AsRegister());
+ DCHECK_EQ(locations->InAt(0).As<CpuRegister>().AsRegister(),
+ locations->Out().As<CpuRegister>().AsRegister());
switch (sub->GetResultType()) {
case Primitive::kPrimInt: {
if (locations->InAt(1).IsRegister()) {
- __ subl(locations->InAt(0).AsX86_64().AsCpuRegister(),
- locations->InAt(1).AsX86_64().AsCpuRegister());
+ __ subl(locations->InAt(0).As<CpuRegister>(),
+ locations->InAt(1).As<CpuRegister>());
} else if (locations->InAt(1).IsConstant()) {
HConstant* instruction = locations->InAt(1).GetConstant();
Immediate imm(instruction->AsIntConstant()->GetValue());
- __ subl(locations->InAt(0).AsX86_64().AsCpuRegister(), imm);
+ __ subl(locations->InAt(0).As<CpuRegister>(), imm);
} else {
- __ subl(locations->InAt(0).AsX86_64().AsCpuRegister(),
+ __ subl(locations->InAt(0).As<CpuRegister>(),
Address(CpuRegister(RSP), locations->InAt(1).GetStackIndex()));
}
break;
}
case Primitive::kPrimLong: {
- __ subq(locations->InAt(0).AsX86_64().AsCpuRegister(),
- locations->InAt(1).AsX86_64().AsCpuRegister());
+ __ subq(locations->InAt(0).As<CpuRegister>(),
+ locations->InAt(1).As<CpuRegister>());
break;
}
@@ -1055,9 +1044,9 @@
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
InvokeRuntimeCallingConvention calling_convention;
- locations->AddTemp(X86_64CpuLocation(calling_convention.GetRegisterAt(0)));
- locations->AddTemp(X86_64CpuLocation(calling_convention.GetRegisterAt(1)));
- locations->SetOut(X86_64CpuLocation(RAX));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->AddTemp(Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetOut(Location::RegisterLocation(RAX));
}
void InstructionCodeGeneratorX86_64::VisitNewInstance(HNewInstance* instruction) {
@@ -1097,9 +1086,9 @@
void InstructionCodeGeneratorX86_64::VisitNot(HNot* instruction) {
LocationSummary* locations = instruction->GetLocations();
- DCHECK_EQ(locations->InAt(0).AsX86_64().AsCpuRegister().AsRegister(),
- locations->Out().AsX86_64().AsCpuRegister().AsRegister());
- __ xorq(locations->Out().AsX86_64().AsCpuRegister(), Immediate(1));
+ DCHECK_EQ(locations->InAt(0).As<CpuRegister>().AsRegister(),
+ locations->Out().As<CpuRegister>().AsRegister());
+ __ xorq(locations->Out().As<CpuRegister>(), Immediate(1));
}
void LocationsBuilderX86_64::VisitPhi(HPhi* instruction) {
@@ -1132,8 +1121,8 @@
void InstructionCodeGeneratorX86_64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- CpuRegister obj = locations->InAt(0).AsX86_64().AsCpuRegister();
- CpuRegister value = locations->InAt(1).AsX86_64().AsCpuRegister();
+ CpuRegister obj = locations->InAt(0).As<CpuRegister>();
+ CpuRegister value = locations->InAt(1).As<CpuRegister>();
size_t offset = instruction->GetFieldOffset().SizeValue();
Primitive::Type field_type = instruction->GetFieldType();
@@ -1154,8 +1143,8 @@
case Primitive::kPrimNot: {
__ movl(Address(obj, offset), value);
if (field_type == Primitive::kPrimNot) {
- CpuRegister temp = locations->GetTemp(0).AsX86_64().AsCpuRegister();
- CpuRegister card = locations->GetTemp(1).AsX86_64().AsCpuRegister();
+ CpuRegister temp = locations->GetTemp(0).As<CpuRegister>();
+ CpuRegister card = locations->GetTemp(1).As<CpuRegister>();
codegen_->MarkGCCard(temp, card, obj, value);
}
break;
@@ -1184,8 +1173,8 @@
void InstructionCodeGeneratorX86_64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- CpuRegister obj = locations->InAt(0).AsX86_64().AsCpuRegister();
- CpuRegister out = locations->Out().AsX86_64().AsCpuRegister();
+ CpuRegister obj = locations->InAt(0).As<CpuRegister>();
+ CpuRegister out = locations->Out().As<CpuRegister>();
size_t offset = instruction->GetFieldOffset().SizeValue();
switch (instruction->GetType()) {
@@ -1246,7 +1235,7 @@
Location obj = locations->InAt(0);
if (obj.IsRegister()) {
- __ cmpl(obj.AsX86_64().AsCpuRegister(), Immediate(0));
+ __ cmpl(obj.As<CpuRegister>(), Immediate(0));
} else if (obj.IsStackSlot()) {
__ cmpl(Address(CpuRegister(RSP), obj.GetStackIndex()), Immediate(0));
} else {
@@ -1269,54 +1258,54 @@
void InstructionCodeGeneratorX86_64::VisitArrayGet(HArrayGet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- CpuRegister obj = locations->InAt(0).AsX86_64().AsCpuRegister();
+ CpuRegister obj = locations->InAt(0).As<CpuRegister>();
Location index = locations->InAt(1);
switch (instruction->GetType()) {
case Primitive::kPrimBoolean: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value();
- CpuRegister out = locations->Out().AsX86_64().AsCpuRegister();
+ CpuRegister out = locations->Out().As<CpuRegister>();
if (index.IsConstant()) {
__ movzxb(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
} else {
- __ movzxb(out, Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_1, data_offset));
+ __ movzxb(out, Address(obj, index.As<CpuRegister>(), TIMES_1, data_offset));
}
break;
}
case Primitive::kPrimByte: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value();
- CpuRegister out = locations->Out().AsX86_64().AsCpuRegister();
+ CpuRegister out = locations->Out().As<CpuRegister>();
if (index.IsConstant()) {
__ movsxb(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset));
} else {
- __ movsxb(out, Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_1, data_offset));
+ __ movsxb(out, Address(obj, index.As<CpuRegister>(), TIMES_1, data_offset));
}
break;
}
case Primitive::kPrimShort: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value();
- CpuRegister out = locations->Out().AsX86_64().AsCpuRegister();
+ CpuRegister out = locations->Out().As<CpuRegister>();
if (index.IsConstant()) {
__ movsxw(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
} else {
- __ movsxw(out, Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_2, data_offset));
+ __ movsxw(out, Address(obj, index.As<CpuRegister>(), TIMES_2, data_offset));
}
break;
}
case Primitive::kPrimChar: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value();
- CpuRegister out = locations->Out().AsX86_64().AsCpuRegister();
+ CpuRegister out = locations->Out().As<CpuRegister>();
if (index.IsConstant()) {
__ movzxw(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset));
} else {
- __ movzxw(out, Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_2, data_offset));
+ __ movzxw(out, Address(obj, index.As<CpuRegister>(), TIMES_2, data_offset));
}
break;
}
@@ -1325,24 +1314,24 @@
case Primitive::kPrimNot: {
DCHECK_EQ(sizeof(mirror::HeapReference<mirror::Object>), sizeof(int32_t));
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value();
- CpuRegister out = locations->Out().AsX86_64().AsCpuRegister();
+ CpuRegister out = locations->Out().As<CpuRegister>();
if (index.IsConstant()) {
__ movl(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset));
} else {
- __ movl(out, Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_4, data_offset));
+ __ movl(out, Address(obj, index.As<CpuRegister>(), TIMES_4, data_offset));
}
break;
}
case Primitive::kPrimLong: {
uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value();
- CpuRegister out = locations->Out().AsX86_64().AsCpuRegister();
+ CpuRegister out = locations->Out().As<CpuRegister>();
if (index.IsConstant()) {
__ movq(out, Address(obj,
(index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset));
} else {
- __ movq(out, Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_8, data_offset));
+ __ movq(out, Address(obj, index.As<CpuRegister>(), TIMES_8, data_offset));
}
break;
}
@@ -1363,9 +1352,9 @@
instruction, is_object ? LocationSummary::kCall : LocationSummary::kNoCall);
if (is_object) {
InvokeRuntimeCallingConvention calling_convention;
- locations->SetInAt(0, X86_64CpuLocation(calling_convention.GetRegisterAt(0)));
- locations->SetInAt(1, X86_64CpuLocation(calling_convention.GetRegisterAt(1)));
- locations->SetInAt(2, X86_64CpuLocation(calling_convention.GetRegisterAt(2)));
+ locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
+ locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
} else {
locations->SetInAt(0, Location::RequiresRegister(), Location::kDiesAtEntry);
locations->SetInAt(
@@ -1381,7 +1370,7 @@
void InstructionCodeGeneratorX86_64::VisitArraySet(HArraySet* instruction) {
LocationSummary* locations = instruction->GetLocations();
- CpuRegister obj = locations->InAt(0).AsX86_64().AsCpuRegister();
+ CpuRegister obj = locations->InAt(0).As<CpuRegister>();
Location index = locations->InAt(1);
Location value = locations->InAt(2);
Primitive::Type value_type = instruction->GetComponentType();
@@ -1393,16 +1382,16 @@
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset;
if (value.IsRegister()) {
- __ movb(Address(obj, offset), value.AsX86_64().AsCpuRegister());
+ __ movb(Address(obj, offset), value.As<CpuRegister>());
} else {
__ movb(Address(obj, offset), Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
} else {
if (value.IsRegister()) {
- __ movb(Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_1, data_offset),
- value.AsX86_64().AsCpuRegister());
+ __ movb(Address(obj, index.As<CpuRegister>(), TIMES_1, data_offset),
+ value.As<CpuRegister>());
} else {
- __ movb(Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_1, data_offset),
+ __ movb(Address(obj, index.As<CpuRegister>(), TIMES_1, data_offset),
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
}
@@ -1415,16 +1404,16 @@
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset;
if (value.IsRegister()) {
- __ movw(Address(obj, offset), value.AsX86_64().AsCpuRegister());
+ __ movw(Address(obj, offset), value.As<CpuRegister>());
} else {
__ movw(Address(obj, offset), Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
} else {
if (value.IsRegister()) {
- __ movw(Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_2, data_offset),
- value.AsX86_64().AsCpuRegister());
+ __ movw(Address(obj, index.As<CpuRegister>(), TIMES_2, data_offset),
+ value.As<CpuRegister>());
} else {
- __ movw(Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_2, data_offset),
+ __ movw(Address(obj, index.As<CpuRegister>(), TIMES_2, data_offset),
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
}
@@ -1436,16 +1425,16 @@
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset;
if (value.IsRegister()) {
- __ movl(Address(obj, offset), value.AsX86_64().AsCpuRegister());
+ __ movl(Address(obj, offset), value.As<CpuRegister>());
} else {
__ movl(Address(obj, offset), Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
} else {
if (value.IsRegister()) {
- __ movl(Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_4, data_offset),
- value.AsX86_64().AsCpuRegister());
+ __ movl(Address(obj, index.As<CpuRegister>(), TIMES_4, data_offset),
+ value.As<CpuRegister>());
} else {
- __ movl(Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_4, data_offset),
+ __ movl(Address(obj, index.As<CpuRegister>(), TIMES_4, data_offset),
Immediate(value.GetConstant()->AsIntConstant()->GetValue()));
}
}
@@ -1464,11 +1453,11 @@
if (index.IsConstant()) {
size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset;
DCHECK(value.IsRegister());
- __ movq(Address(obj, offset), value.AsX86_64().AsCpuRegister());
+ __ movq(Address(obj, offset), value.As<CpuRegister>());
} else {
DCHECK(value.IsRegister());
- __ movq(Address(obj, index.AsX86_64().AsCpuRegister(), TIMES_8, data_offset),
- value.AsX86_64().AsCpuRegister());
+ __ movq(Address(obj, index.As<CpuRegister>(), TIMES_8, data_offset),
+ value.As<CpuRegister>());
}
break;
}
@@ -1492,8 +1481,8 @@
void InstructionCodeGeneratorX86_64::VisitArrayLength(HArrayLength* instruction) {
LocationSummary* locations = instruction->GetLocations();
uint32_t offset = mirror::Array::LengthOffset().Uint32Value();
- CpuRegister obj = locations->InAt(0).AsX86_64().AsCpuRegister();
- CpuRegister out = locations->Out().AsX86_64().AsCpuRegister();
+ CpuRegister obj = locations->InAt(0).As<CpuRegister>();
+ CpuRegister out = locations->Out().As<CpuRegister>();
__ movl(out, Address(obj, offset));
}
@@ -1513,8 +1502,8 @@
instruction, locations->InAt(0), locations->InAt(1));
codegen_->AddSlowPath(slow_path);
- CpuRegister index = locations->InAt(0).AsX86_64().AsCpuRegister();
- CpuRegister length = locations->InAt(1).AsX86_64().AsCpuRegister();
+ CpuRegister index = locations->InAt(0).As<CpuRegister>();
+ CpuRegister length = locations->InAt(1).As<CpuRegister>();
__ cmpl(index, length);
__ j(kAboveEqual, slow_path->GetEntryLabel());
@@ -1596,18 +1585,18 @@
if (source.IsRegister()) {
if (destination.IsRegister()) {
- __ movq(destination.AsX86_64().AsCpuRegister(), source.AsX86_64().AsCpuRegister());
+ __ movq(destination.As<CpuRegister>(), source.As<CpuRegister>());
} else if (destination.IsStackSlot()) {
__ movl(Address(CpuRegister(RSP), destination.GetStackIndex()),
- source.AsX86_64().AsCpuRegister());
+ source.As<CpuRegister>());
} else {
DCHECK(destination.IsDoubleStackSlot());
__ movq(Address(CpuRegister(RSP), destination.GetStackIndex()),
- source.AsX86_64().AsCpuRegister());
+ source.As<CpuRegister>());
}
} else if (source.IsStackSlot()) {
if (destination.IsRegister()) {
- __ movl(destination.AsX86_64().AsX86_64().AsCpuRegister(),
+ __ movl(destination.As<CpuRegister>(),
Address(CpuRegister(RSP), source.GetStackIndex()));
} else {
DCHECK(destination.IsStackSlot());
@@ -1616,7 +1605,7 @@
}
} else if (source.IsDoubleStackSlot()) {
if (destination.IsRegister()) {
- __ movq(destination.AsX86_64().AsX86_64().AsCpuRegister(),
+ __ movq(destination.As<CpuRegister>(),
Address(CpuRegister(RSP), source.GetStackIndex()));
} else {
DCHECK(destination.IsDoubleStackSlot());
@@ -1628,14 +1617,14 @@
if (constant->IsIntConstant()) {
Immediate imm(constant->AsIntConstant()->GetValue());
if (destination.IsRegister()) {
- __ movl(destination.AsX86_64().AsCpuRegister(), imm);
+ __ movl(destination.As<CpuRegister>(), imm);
} else {
__ movl(Address(CpuRegister(RSP), destination.GetStackIndex()), imm);
}
} else if (constant->IsLongConstant()) {
int64_t value = constant->AsLongConstant()->GetValue();
if (destination.IsRegister()) {
- __ movq(destination.AsX86_64().AsCpuRegister(), Immediate(value));
+ __ movq(destination.As<CpuRegister>(), Immediate(value));
} else {
__ movq(CpuRegister(TMP), Immediate(value));
__ movq(Address(CpuRegister(RSP), destination.GetStackIndex()), CpuRegister(TMP));
@@ -1692,17 +1681,17 @@
Location destination = move->GetDestination();
if (source.IsRegister() && destination.IsRegister()) {
- __ xchgq(destination.AsX86_64().AsCpuRegister(), source.AsX86_64().AsCpuRegister());
+ __ xchgq(destination.As<CpuRegister>(), source.As<CpuRegister>());
} else if (source.IsRegister() && destination.IsStackSlot()) {
- Exchange32(source.AsX86_64().AsCpuRegister(), destination.GetStackIndex());
+ Exchange32(source.As<CpuRegister>(), destination.GetStackIndex());
} else if (source.IsStackSlot() && destination.IsRegister()) {
- Exchange32(destination.AsX86_64().AsCpuRegister(), source.GetStackIndex());
+ Exchange32(destination.As<CpuRegister>(), source.GetStackIndex());
} else if (source.IsStackSlot() && destination.IsStackSlot()) {
Exchange32(destination.GetStackIndex(), source.GetStackIndex());
} else if (source.IsRegister() && destination.IsDoubleStackSlot()) {
- Exchange64(source.AsX86_64().AsCpuRegister(), destination.GetStackIndex());
+ Exchange64(source.As<CpuRegister>(), destination.GetStackIndex());
} else if (source.IsDoubleStackSlot() && destination.IsRegister()) {
- Exchange64(destination.AsX86_64().AsCpuRegister(), source.GetStackIndex());
+ Exchange64(destination.As<CpuRegister>(), source.GetStackIndex());
} else if (source.IsDoubleStackSlot() && destination.IsDoubleStackSlot()) {
Exchange64(destination.GetStackIndex(), source.GetStackIndex());
} else {
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index c81f785..bdaf15f 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -186,7 +186,7 @@
}
virtual void SetupBlockedRegisters(bool* blocked_registers) const OVERRIDE;
- virtual ManagedRegister AllocateFreeRegister(
+ virtual Location AllocateFreeRegister(
Primitive::Type type, bool* blocked_registers) const OVERRIDE;
virtual void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
virtual void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 686a0b0..b4eb89d 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -123,9 +123,9 @@
void DumpLocation(Location location, Primitive::Type type) {
if (location.IsRegister()) {
if (type == Primitive::kPrimDouble || type == Primitive::kPrimFloat) {
- codegen_.DumpFloatingPointRegister(output_, location.reg().RegId());
+ codegen_.DumpFloatingPointRegister(output_, location.reg());
} else {
- codegen_.DumpCoreRegister(output_, location.reg().RegId());
+ codegen_.DumpCoreRegister(output_, location.reg());
}
} else if (location.IsConstant()) {
output_ << "constant";
diff --git a/compiler/optimizing/graph_visualizer.h b/compiler/optimizing/graph_visualizer.h
index 6e2c6fd..f17ba3b 100644
--- a/compiler/optimizing/graph_visualizer.h
+++ b/compiler/optimizing/graph_visualizer.h
@@ -17,7 +17,7 @@
#ifndef ART_COMPILER_OPTIMIZING_GRAPH_VISUALIZER_H_
#define ART_COMPILER_OPTIMIZING_GRAPH_VISUALIZER_H_
-#include "utils/allocation.h"
+#include "base/value_object.h"
namespace art {
diff --git a/compiler/optimizing/locations.cc b/compiler/optimizing/locations.cc
index 7b09241..1637484 100644
--- a/compiler/optimizing/locations.cc
+++ b/compiler/optimizing/locations.cc
@@ -55,7 +55,7 @@
: Location::RequiresRegister();
}
-Location Location::ByteRegisterOrConstant(ManagedRegister reg, HInstruction* instruction) {
+Location Location::ByteRegisterOrConstant(int reg, HInstruction* instruction) {
return instruction->IsConstant()
? Location::ConstantLocation(instruction->AsConstant())
: Location::RegisterLocation(reg);
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index 5f85b6a..24d7ad3 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -19,9 +19,9 @@
#include "base/bit_field.h"
#include "base/bit_vector.h"
-#include "utils/allocation.h"
+#include "base/value_object.h"
+#include "utils/arena_object.h"
#include "utils/growable_array.h"
-#include "utils/managed_register.h"
namespace art {
@@ -45,21 +45,26 @@
kRegister = 4, // Core register.
// We do not use the value 5 because it conflicts with kLocationConstantMask.
- kDoNotUse = 5,
+ kDoNotUse5 = 5,
kFpuRegister = 6, // Floating point processor.
+ kRegisterPair = 7,
+
// On 32bits architectures, quick can pass a long where the
// low bits are in the last parameter register, and the high
// bits are in a stack slot. The kQuickParameter kind is for
// handling this special case.
- kQuickParameter = 7,
+ kQuickParameter = 8,
+
+ // We do not use the value 9 because it conflicts with kLocationConstantMask.
+ kDoNotUse9 = 9,
// Unallocated location represents a location that is not fixed and can be
// allocated by a register allocator. Each unallocated location has
// a policy that specifies what kind of location is suitable. Payload
// contains register allocation policy.
- kUnallocated = 8,
+ kUnallocated = 10,
};
Location() : value_(kInvalid) {
@@ -71,6 +76,7 @@
COMPILE_ASSERT((kRegister & kLocationConstantMask) != kConstant, TagError);
COMPILE_ASSERT((kQuickParameter & kLocationConstantMask) != kConstant, TagError);
COMPILE_ASSERT((kFpuRegister & kLocationConstantMask) != kConstant, TagError);
+ COMPILE_ASSERT((kRegisterPair & kLocationConstantMask) != kConstant, TagError);
COMPILE_ASSERT((kConstant & kLocationConstantMask) == kConstant, TagError);
DCHECK(!IsValid());
@@ -111,12 +117,16 @@
}
// Register locations.
- static Location RegisterLocation(ManagedRegister reg) {
- return Location(kRegister, reg.RegId());
+ static Location RegisterLocation(int reg) {
+ return Location(kRegister, reg);
}
- static Location FpuRegisterLocation(ManagedRegister reg) {
- return Location(kFpuRegister, reg.RegId());
+ static Location FpuRegisterLocation(int reg) {
+ return Location(kFpuRegister, reg);
+ }
+
+ static Location RegisterPairLocation(int low, int high) {
+ return Location(kRegisterPair, low << 16 | high);
}
bool IsRegister() const {
@@ -127,15 +137,36 @@
return GetKind() == kFpuRegister;
}
- ManagedRegister reg() const {
- DCHECK(IsRegister() || IsFpuRegister());
- return static_cast<ManagedRegister>(GetPayload());
+ bool IsRegisterPair() const {
+ return GetKind() == kRegisterPair;
}
- static uword EncodeStackIndex(intptr_t stack_index) {
+ int reg() const {
+ DCHECK(IsRegister() || IsFpuRegister());
+ return GetPayload();
+ }
+
+ template <typename T>
+ T As() const {
+ return static_cast<T>(reg());
+ }
+
+ template <typename T>
+ T AsRegisterPairLow() const {
+ DCHECK(IsRegisterPair());
+ return static_cast<T>(GetPayload() >> 16);
+ }
+
+ template <typename T>
+ T AsRegisterPairHigh() const {
+ DCHECK(IsRegisterPair());
+ return static_cast<T>(GetPayload() & 0xFFFF);
+ }
+
+ static uintptr_t EncodeStackIndex(intptr_t stack_index) {
DCHECK(-kStackIndexBias <= stack_index);
DCHECK(stack_index < kStackIndexBias);
- return static_cast<uword>(kStackIndexBias + stack_index);
+ return static_cast<uintptr_t>(kStackIndexBias + stack_index);
}
static Location StackSlot(intptr_t stack_index) {
@@ -187,10 +218,6 @@
return GetKind() == kQuickParameter;
}
- arm::ArmManagedRegister AsArm() const;
- x86::X86ManagedRegister AsX86() const;
- x86_64::X86_64ManagedRegister AsX86_64() const;
-
Kind GetKind() const {
return IsConstant() ? kConstant : KindField::Decode(value_);
}
@@ -209,7 +236,9 @@
case kUnallocated: return "U";
case kConstant: return "C";
case kFpuRegister: return "F";
- case kDoNotUse:
+ case kRegisterPair: return "RP";
+ case kDoNotUse5: // fall-through
+ case kDoNotUse9:
LOG(FATAL) << "Should not use this location kind";
}
UNREACHABLE();
@@ -246,7 +275,7 @@
}
static Location RegisterOrConstant(HInstruction* instruction);
- static Location ByteRegisterOrConstant(ManagedRegister reg, HInstruction* instruction);
+ static Location ByteRegisterOrConstant(int reg, HInstruction* instruction);
// The location of the first input to the instruction will be
// used to replace this unallocated location.
@@ -299,8 +328,12 @@
RegisterSet() : core_registers_(0), floating_point_registers_(0) {}
void Add(Location loc) {
- // TODO: floating point registers.
- core_registers_ |= (1 << loc.reg().RegId());
+ if (loc.IsRegister()) {
+ core_registers_ |= (1 << loc.reg());
+ } else {
+ DCHECK(loc.IsFpuRegister());
+ floating_point_registers_ |= (1 << loc.reg());
+ }
}
bool ContainsCoreRegister(uint32_t id) {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index fc5b06d..2010e7e 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -20,7 +20,7 @@
#include "locations.h"
#include "offsets.h"
#include "primitive.h"
-#include "utils/allocation.h"
+#include "utils/arena_object.h"
#include "utils/arena_bit_vector.h"
#include "utils/growable_array.h"
diff --git a/compiler/optimizing/parallel_move_resolver.cc b/compiler/optimizing/parallel_move_resolver.cc
index cadd3c5..c71d93e 100644
--- a/compiler/optimizing/parallel_move_resolver.cc
+++ b/compiler/optimizing/parallel_move_resolver.cc
@@ -170,8 +170,7 @@
DCHECK_NE(blocked, if_scratch);
int scratch = -1;
for (int reg = 0; reg < register_count; ++reg) {
- if ((blocked != reg) &&
- IsScratchLocation(Location::RegisterLocation(ManagedRegister(reg)))) {
+ if ((blocked != reg) && IsScratchLocation(Location::RegisterLocation(reg))) {
scratch = reg;
break;
}
diff --git a/compiler/optimizing/parallel_move_resolver.h b/compiler/optimizing/parallel_move_resolver.h
index fcc1de6..309425e 100644
--- a/compiler/optimizing/parallel_move_resolver.h
+++ b/compiler/optimizing/parallel_move_resolver.h
@@ -17,7 +17,7 @@
#ifndef ART_COMPILER_OPTIMIZING_PARALLEL_MOVE_RESOLVER_H_
#define ART_COMPILER_OPTIMIZING_PARALLEL_MOVE_RESOLVER_H_
-#include "utils/allocation.h"
+#include "base/value_object.h"
#include "utils/growable_array.h"
namespace art {
diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc
index 863e107..2bdcc61 100644
--- a/compiler/optimizing/parallel_move_test.cc
+++ b/compiler/optimizing/parallel_move_test.cc
@@ -32,9 +32,9 @@
message_ << " ";
}
message_ << "("
- << move->GetSource().reg().RegId()
+ << move->GetSource().reg()
<< " -> "
- << move->GetDestination().reg().RegId()
+ << move->GetDestination().reg()
<< ")";
}
@@ -44,9 +44,9 @@
message_ << " ";
}
message_ << "("
- << move->GetSource().reg().RegId()
+ << move->GetSource().reg()
<< " <-> "
- << move->GetDestination().reg().RegId()
+ << move->GetDestination().reg()
<< ")";
}
@@ -70,8 +70,8 @@
HParallelMove* moves = new (allocator) HParallelMove(allocator);
for (size_t i = 0; i < number_of_moves; ++i) {
moves->AddMove(new (allocator) MoveOperands(
- Location::RegisterLocation(ManagedRegister(operands[i][0])),
- Location::RegisterLocation(ManagedRegister(operands[i][1])),
+ Location::RegisterLocation(operands[i][0]),
+ Location::RegisterLocation(operands[i][1]),
nullptr));
}
return moves;
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 3ee1afe..a9d159e 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -95,7 +95,7 @@
size_t start,
size_t end,
Primitive::Type type) {
- int reg = location.reg().RegId();
+ int reg = location.reg();
LiveInterval* interval = physical_register_intervals_.Get(reg);
if (interval == nullptr) {
interval = LiveInterval::MakeFixedInterval(allocator_, reg, type);
@@ -187,7 +187,7 @@
if (locations->WillCall()) {
// Block all registers.
for (size_t i = 0; i < codegen_->GetNumberOfCoreRegisters(); ++i) {
- BlockRegister(Location::RegisterLocation(ManagedRegister(i)),
+ BlockRegister(Location::RegisterLocation(i),
position,
position + 1,
Primitive::kPrimInt);
@@ -216,7 +216,7 @@
if (output.IsRegister()) {
// Shift the interval's start by one to account for the blocked register.
current->SetFrom(position + 1);
- current->SetRegister(output.reg().RegId());
+ current->SetRegister(output.reg());
BlockRegister(output, position, position + 1, instruction->GetType());
} else if (output.IsStackSlot() || output.IsDoubleStackSlot()) {
current->SetSpillSlot(output.GetStackIndex());
@@ -884,7 +884,7 @@
if (current->HasSpillSlot() && current->HasRegister()) {
// We spill eagerly, so move must be at definition.
InsertMoveAfter(interval->GetDefinedBy(),
- Location::RegisterLocation(ManagedRegister(interval->GetRegister())),
+ Location::RegisterLocation(interval->GetRegister()),
interval->NeedsTwoSpillSlots()
? Location::DoubleStackSlot(interval->GetParent()->GetSpillSlot())
: Location::StackSlot(interval->GetParent()->GetSpillSlot()));
@@ -938,7 +938,7 @@
case Location::kRegister: {
locations->AddLiveRegister(source);
if (current->GetType() == Primitive::kPrimNot) {
- locations->SetRegisterBit(source.reg().RegId());
+ locations->SetRegisterBit(source.reg());
}
break;
}
@@ -1106,7 +1106,7 @@
}
LocationSummary* locations = at->GetLocations();
locations->SetTempAt(
- temp_index++, Location::RegisterLocation(ManagedRegister(temp->GetRegister())));
+ temp_index++, Location::RegisterLocation(temp->GetRegister()));
}
}
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index b7d56e6..7517a6b 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -25,7 +25,6 @@
#include "ssa_liveness_analysis.h"
#include "ssa_phi_elimination.h"
#include "utils/arena_allocator.h"
-#include "utils/managed_register.h"
#include "gtest/gtest.h"
@@ -525,7 +524,7 @@
// Set the phi to a specific register, and check that the inputs get allocated
// the same register.
- phi->GetLocations()->SetOut(Location::RegisterLocation(ManagedRegister(2)));
+ phi->GetLocations()->SetOut(Location::RegisterLocation(2));
RegisterAllocator register_allocator(&allocator, &codegen, liveness);
register_allocator.AllocateRegisters();
@@ -542,7 +541,7 @@
// Set input1 to a specific register, and check that the phi and other input get allocated
// the same register.
- input1->GetLocations()->SetOut(Location::RegisterLocation(ManagedRegister(2)));
+ input1->GetLocations()->SetOut(Location::RegisterLocation(2));
RegisterAllocator register_allocator(&allocator, &codegen, liveness);
register_allocator.AllocateRegisters();
@@ -559,7 +558,7 @@
// Set input2 to a specific register, and check that the phi and other input get allocated
// the same register.
- input2->GetLocations()->SetOut(Location::RegisterLocation(ManagedRegister(2)));
+ input2->GetLocations()->SetOut(Location::RegisterLocation(2));
RegisterAllocator register_allocator(&allocator, &codegen, liveness);
register_allocator.AllocateRegisters();
@@ -620,7 +619,7 @@
liveness.Analyze();
// Check that the field gets put in the register expected by its use.
- ret->GetLocations()->SetInAt(0, Location::RegisterLocation(ManagedRegister(2)));
+ ret->GetLocations()->SetInAt(0, Location::RegisterLocation(2));
RegisterAllocator register_allocator(&allocator, &codegen, liveness);
register_allocator.AllocateRegisters();
@@ -682,7 +681,7 @@
liveness.Analyze();
// check that both adds get the same register.
- first_add->InputAt(0)->GetLocations()->SetOut(Location::RegisterLocation(ManagedRegister(2)));
+ first_add->InputAt(0)->GetLocations()->SetOut(Location::RegisterLocation(2));
ASSERT_EQ(first_add->GetLocations()->Out().GetPolicy(), Location::kSameAsFirstInput);
ASSERT_EQ(second_add->GetLocations()->Out().GetPolicy(), Location::kSameAsFirstInput);
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 1de90b4..f0edc64 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -319,8 +319,8 @@
if (user->IsPhi()) {
// If the phi has a register, try to use the same.
Location phi_location = user->GetLiveInterval()->ToLocation();
- if (phi_location.IsRegister() && free_until[phi_location.reg().RegId()] >= use_position) {
- return phi_location.reg().RegId();
+ if (phi_location.IsRegister() && free_until[phi_location.reg()] >= use_position) {
+ return phi_location.reg();
}
const GrowableArray<HBasicBlock*>& predecessors = user->GetBlock()->GetPredecessors();
// If the instruction dies at the phi assignment, we can try having the
@@ -333,8 +333,8 @@
HInstruction* input = user->InputAt(i);
Location location = input->GetLiveInterval()->GetLocationAt(
predecessors.Get(i)->GetLifetimeEnd() - 1);
- if (location.IsRegister() && free_until[location.reg().RegId()] >= use_position) {
- return location.reg().RegId();
+ if (location.IsRegister() && free_until[location.reg()] >= use_position) {
+ return location.reg();
}
}
}
@@ -345,8 +345,8 @@
// We use the user's lifetime position - 1 (and not `use_position`) because the
// register is blocked at the beginning of the user.
size_t position = user->GetLifetimePosition() - 1;
- if (expected.IsRegister() && free_until[expected.reg().RegId()] >= position) {
- return expected.reg().RegId();
+ if (expected.IsRegister() && free_until[expected.reg()] >= position) {
+ return expected.reg();
}
}
}
@@ -369,7 +369,7 @@
// be reused.
Location input_location = input_interval.ToLocation();
if (input_location.IsRegister()) {
- return input_location.reg().RegId();
+ return input_location.reg();
}
}
}
@@ -385,7 +385,7 @@
// be reused.
Location location = input_interval.ToLocation();
if (location.IsRegister()) {
- return location.reg().RegId();
+ return location.reg();
}
}
}
@@ -399,7 +399,7 @@
Location LiveInterval::ToLocation() const {
if (HasRegister()) {
- return Location::RegisterLocation(ManagedRegister(GetRegister()));
+ return Location::RegisterLocation(GetRegister());
} else {
HInstruction* defined_by = GetParent()->GetDefinedBy();
if (defined_by->IsConstant()) {
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 0ea11ad..5f74c33 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -18,9 +18,9 @@
#define ART_COMPILER_OPTIMIZING_STACK_MAP_STREAM_H_
#include "base/bit_vector.h"
+#include "base/value_object.h"
#include "memory_region.h"
#include "stack_map.h"
-#include "utils/allocation.h"
#include "utils/growable_array.h"
namespace art {
diff --git a/compiler/output_stream_test.cc b/compiler/output_stream_test.cc
index 315ca09..bba9892 100644
--- a/compiler/output_stream_test.cc
+++ b/compiler/output_stream_test.cc
@@ -90,7 +90,7 @@
TEST_F(OutputStreamTest, Vector) {
std::vector<uint8_t> output;
- VectorOutputStream output_stream("test vector output", output);
+ VectorOutputStream output_stream("test vector output", &output);
SetOutputStream(output_stream);
GenerateTestOutput();
CheckTestOutput(output);
diff --git a/compiler/utils/allocation.h b/compiler/utils/arena_object.h
similarity index 75%
rename from compiler/utils/allocation.h
rename to compiler/utils/arena_object.h
index b0947ca..50909f75 100644
--- a/compiler/utils/allocation.h
+++ b/compiler/utils/arena_object.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_COMPILER_UTILS_ALLOCATION_H_
-#define ART_COMPILER_UTILS_ALLOCATION_H_
+#ifndef ART_COMPILER_UTILS_ARENA_OBJECT_H_
+#define ART_COMPILER_UTILS_ARENA_OBJECT_H_
#include "arena_allocator.h"
#include "base/logging.h"
@@ -34,17 +34,6 @@
}
};
-class ValueObject {
- public:
- void* operator new(size_t size) {
- LOG(FATAL) << "UNREACHABLE";
- abort();
- }
- void operator delete(void*, size_t) {
- LOG(FATAL) << "UNREACHABLE";
- }
-};
-
} // namespace art
-#endif // ART_COMPILER_UTILS_ALLOCATION_H_
+#endif // ART_COMPILER_UTILS_ARENA_OBJECT_H_
diff --git a/compiler/utils/x86_64/constants_x86_64.h b/compiler/utils/x86_64/constants_x86_64.h
index ca9eae3..2a5b43d 100644
--- a/compiler/utils/x86_64/constants_x86_64.h
+++ b/compiler/utils/x86_64/constants_x86_64.h
@@ -48,6 +48,7 @@
class XmmRegister {
public:
explicit XmmRegister(FloatRegister r) : reg_(r) {}
+ explicit XmmRegister(int r) : reg_(FloatRegister(r)) {}
FloatRegister AsFloatRegister() const {
return reg_;
}
diff --git a/compiler/vector_output_stream.cc b/compiler/vector_output_stream.cc
index e5ff729..3d33673 100644
--- a/compiler/vector_output_stream.cc
+++ b/compiler/vector_output_stream.cc
@@ -20,8 +20,8 @@
namespace art {
-VectorOutputStream::VectorOutputStream(const std::string& location, std::vector<uint8_t>& vector)
- : OutputStream(location), offset_(vector.size()), vector_(vector) {}
+VectorOutputStream::VectorOutputStream(const std::string& location, std::vector<uint8_t>* vector)
+ : OutputStream(location), offset_(vector->size()), vector_(vector) {}
off_t VectorOutputStream::Seek(off_t offset, Whence whence) {
CHECK(whence == kSeekSet || whence == kSeekCurrent || whence == kSeekEnd) << whence;
@@ -36,7 +36,7 @@
break;
}
case kSeekEnd: {
- new_offset = vector_.size() + offset;
+ new_offset = vector_->size() + offset;
break;
}
}
diff --git a/compiler/vector_output_stream.h b/compiler/vector_output_stream.h
index 09daa12..3c5877c 100644
--- a/compiler/vector_output_stream.h
+++ b/compiler/vector_output_stream.h
@@ -25,21 +25,21 @@
namespace art {
-class VectorOutputStream : public OutputStream {
+class VectorOutputStream FINAL : public OutputStream {
public:
- VectorOutputStream(const std::string& location, std::vector<uint8_t>& vector);
+ VectorOutputStream(const std::string& location, std::vector<uint8_t>* vector);
virtual ~VectorOutputStream() {}
bool WriteFully(const void* buffer, size_t byte_count) {
- if (static_cast<size_t>(offset_) == vector_.size()) {
+ if (static_cast<size_t>(offset_) == vector_->size()) {
const uint8_t* start = reinterpret_cast<const uint8_t*>(buffer);
- vector_.insert(vector_.end(), &start[0], &start[byte_count]);
+ vector_->insert(vector_->end(), &start[0], &start[byte_count]);
offset_ += byte_count;
} else {
off_t new_offset = offset_ + byte_count;
EnsureCapacity(new_offset);
- memcpy(&vector_[offset_], buffer, byte_count);
+ memcpy(&(*vector_)[offset_], buffer, byte_count);
offset_ = new_offset;
}
return true;
@@ -49,13 +49,13 @@
private:
void EnsureCapacity(off_t new_offset) {
- if (new_offset > static_cast<off_t>(vector_.size())) {
- vector_.resize(new_offset);
+ if (new_offset > static_cast<off_t>(vector_->size())) {
+ vector_->resize(new_offset);
}
}
off_t offset_;
- std::vector<uint8_t>& vector_;
+ std::vector<uint8_t>* const vector_;
DISALLOW_COPY_AND_ASSIGN(VectorOutputStream);
};
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index d5e766f..51b7a98 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -120,10 +120,12 @@
"kClassRoots",
};
-class OatSymbolizer : public CodeOutput {
+class OatSymbolizer FINAL : public CodeOutput {
public:
- explicit OatSymbolizer(const OatFile* oat_file, std::string& output_name) :
- oat_file_(oat_file), builder_(nullptr), elf_output_(nullptr), output_name_(output_name) {}
+ explicit OatSymbolizer(const OatFile* oat_file, const std::string& output_name) :
+ oat_file_(oat_file), builder_(nullptr), elf_output_(nullptr),
+ output_name_(output_name.empty() ? "symbolized.oat" : output_name) {
+ }
bool Init() {
Elf32_Word oat_data_size = oat_file_->GetOatHeader().GetExecutableOffset();
@@ -131,9 +133,6 @@
uint32_t diff = static_cast<uint32_t>(oat_file_->End() - oat_file_->Begin());
uint32_t oat_exec_size = diff - oat_data_size;
- if (output_name_.empty()) {
- output_name_ = "symbolized.oat";
- }
elf_output_ = OS::CreateEmptyFile(output_name_.c_str());
builder_.reset(new ElfBuilder<Elf32_Word, Elf32_Sword, Elf32_Addr, Elf32_Dyn,
@@ -307,11 +306,11 @@
}
ElfSymtabBuilder<Elf32_Word, Elf32_Sword, Elf32_Addr,
- Elf32_Sym, Elf32_Shdr>* symtab = &builder_->symtab_builder_;
+ Elf32_Sym, Elf32_Shdr>* symtab = builder_->GetSymtabBuilder();
- symtab->AddSymbol(pretty_name, &builder_->text_builder_, oat_method.GetCodeOffset() -
- oat_file_->GetOatHeader().GetExecutableOffset(), true,
- oat_method.GetQuickCodeSize(), STB_GLOBAL, STT_FUNC);
+ symtab->AddSymbol(pretty_name, &builder_->GetTextBuilder(),
+ oat_method.GetCodeOffset() - oat_file_->GetOatHeader().GetExecutableOffset(),
+ true, oat_method.GetQuickCodeSize(), STB_GLOBAL, STT_FUNC);
}
}
@@ -340,7 +339,7 @@
Elf32_Sym, Elf32_Ehdr, Elf32_Phdr, Elf32_Shdr> > builder_;
File* elf_output_;
std::unordered_map<uint32_t, uint32_t> state_;
- std::string output_name_;
+ const std::string output_name_;
};
class OatDumperOptions {
diff --git a/compiler/utils/allocation.h b/runtime/base/value_object.h
similarity index 65%
copy from compiler/utils/allocation.h
copy to runtime/base/value_object.h
index b0947ca..ee0e2a0 100644
--- a/compiler/utils/allocation.h
+++ b/runtime/base/value_object.h
@@ -14,26 +14,13 @@
* limitations under the License.
*/
-#ifndef ART_COMPILER_UTILS_ALLOCATION_H_
-#define ART_COMPILER_UTILS_ALLOCATION_H_
+#ifndef ART_RUNTIME_BASE_VALUE_OBJECT_H_
+#define ART_RUNTIME_BASE_VALUE_OBJECT_H_
-#include "arena_allocator.h"
#include "base/logging.h"
namespace art {
-class ArenaObject {
- public:
- // Allocate a new ArenaObject of 'size' bytes in the Arena.
- void* operator new(size_t size, ArenaAllocator* allocator) {
- return allocator->Alloc(size, kArenaAllocMisc);
- }
-
- void operator delete(void*, size_t) {
- LOG(FATAL) << "UNREACHABLE";
- }
-};
-
class ValueObject {
public:
void* operator new(size_t size) {
@@ -47,4 +34,4 @@
} // namespace art
-#endif // ART_COMPILER_UTILS_ALLOCATION_H_
+#endif // ART_RUNTIME_BASE_VALUE_OBJECT_H_
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index ae5b08f..5ca8bec 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -316,7 +316,7 @@
# {5: trace or no-trace}-{6: gcstress gcverify cms}-{7: forcecopy checkjni jni}-
# {8: no-image image}-{9: test name}{10: 32 or 64}
define define-test-art-run-test
- run_test_options := $(addprefix --runtime-option ,$(DALVIKVM_FLAGS))
+ run_test_options :=
prereq_rule :=
test_groups :=
uc_host_or_target :=