Compiler: continuing refactoring
Moving the arena memory allocation mechanism into it's own class as
a prelude to cleaning up the MIR and LIR data structures.
Reworked bit vector as a class using placement new w/ the arena
allocator.
Reworked GrowableList as a class template using the new arena
allocator and renamed to GrowableArray.
Change-Id: I639c4c08abe068094cae2649e04f58c8addd0015
diff --git a/src/compiler/dex/quick/arm/call_arm.cc b/src/compiler/dex/quick/arm/call_arm.cc
index bb46e1f..32d4ed6 100644
--- a/src/compiler/dex/quick/arm/call_arm.cc
+++ b/src/compiler/dex/quick/arm/call_arm.cc
@@ -326,12 +326,14 @@
}
// Add the table to the list - we'll process it later
SwitchTable *tab_rec =
- static_cast<SwitchTable*>(NewMem(cu_, sizeof(SwitchTable), true, kAllocData));
+ static_cast<SwitchTable*>(arena_->NewMem(sizeof(SwitchTable), true,
+ ArenaAllocator::kAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
int size = table[1];
- tab_rec->targets = static_cast<LIR**>(NewMem(cu_, size * sizeof(LIR*), true, kAllocLIR));
- InsertGrowableList(cu_, &switch_tables_, reinterpret_cast<uintptr_t>(tab_rec));
+ tab_rec->targets = static_cast<LIR**>(arena_->NewMem(size * sizeof(LIR*), true,
+ ArenaAllocator::kAllocLIR));
+ switch_tables_.Insert(tab_rec);
// Get the switch value
rl_src = LoadValue(rl_src, kCoreReg);
@@ -374,12 +376,14 @@
}
// Add the table to the list - we'll process it later
SwitchTable *tab_rec =
- static_cast<SwitchTable*>(NewMem(cu_, sizeof(SwitchTable), true, kAllocData));
+ static_cast<SwitchTable*>(arena_->NewMem(sizeof(SwitchTable), true,
+ ArenaAllocator::kAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
int size = table[1];
- tab_rec->targets = static_cast<LIR**>(NewMem(cu_, size * sizeof(LIR*), true, kAllocLIR));
- InsertGrowableList(cu_, &switch_tables_, reinterpret_cast<uintptr_t>(tab_rec));
+ tab_rec->targets =
+ static_cast<LIR**>(arena_->NewMem(size * sizeof(LIR*), true, ArenaAllocator::kAllocLIR));
+ switch_tables_.Insert(tab_rec);
// Get the switch value
rl_src = LoadValue(rl_src, kCoreReg);
@@ -427,14 +431,15 @@
const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
// Add the table to the list - we'll process it later
FillArrayData *tab_rec =
- static_cast<FillArrayData*>(NewMem(cu_, sizeof(FillArrayData), true, kAllocData));
+ static_cast<FillArrayData*>(arena_->NewMem(sizeof(FillArrayData), true,
+ ArenaAllocator::kAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
uint16_t width = tab_rec->table[1];
uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
tab_rec->size = (size * width) + 8;
- InsertGrowableList(cu_, &fill_array_data_, reinterpret_cast<uintptr_t>(tab_rec));
+ fill_array_data_.Insert(tab_rec);
// Making a call - use explicit registers
FlushAllRegs(); /* Everything to home location */
diff --git a/src/compiler/dex/quick/arm/codegen_arm.h b/src/compiler/dex/quick/arm/codegen_arm.h
index df9451a..9e409e6 100644
--- a/src/compiler/dex/quick/arm/codegen_arm.h
+++ b/src/compiler/dex/quick/arm/codegen_arm.h
@@ -24,7 +24,7 @@
class ArmMir2Lir : public Mir2Lir {
public:
- ArmMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph);
+ ArmMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
// Required for target - codegen helpers.
virtual bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src,
diff --git a/src/compiler/dex/quick/arm/target_arm.cc b/src/compiler/dex/quick/arm/target_arm.cc
index 43bbb69..0a05a3a 100644
--- a/src/compiler/dex/quick/arm/target_arm.cc
+++ b/src/compiler/dex/quick/arm/target_arm.cc
@@ -505,7 +505,8 @@
return ((lir->opcode == kThumbBUncond) || (lir->opcode == kThumb2BUncond));
}
-ArmMir2Lir::ArmMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph) : Mir2Lir(cu, mir_graph) {
+ArmMir2Lir::ArmMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
+ : Mir2Lir(cu, mir_graph, arena) {
// Sanity check - make sure encoding map lines up.
for (int i = 0; i < kArmLast; i++) {
if (ArmMir2Lir::EncodingMap[i].opcode != i) {
@@ -516,8 +517,9 @@
}
}
-Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph) {
- return new ArmMir2Lir(cu, mir_graph);
+Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+ ArenaAllocator* const arena) {
+ return new ArmMir2Lir(cu, mir_graph, arena);
}
/*
@@ -555,13 +557,16 @@
int num_temps = sizeof(core_temps)/sizeof(*core_temps);
int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
- reg_pool_ = static_cast<RegisterPool*>(NewMem(cu_, sizeof(*reg_pool_), true, kAllocRegAlloc));
+ reg_pool_ = static_cast<RegisterPool*>(arena_->NewMem(sizeof(*reg_pool_), true,
+ ArenaAllocator::kAllocRegAlloc));
reg_pool_->num_core_regs = num_regs;
reg_pool_->core_regs = reinterpret_cast<RegisterInfo*>
- (NewMem(cu_, num_regs * sizeof(*reg_pool_->core_regs), true, kAllocRegAlloc));
+ (arena_->NewMem(num_regs * sizeof(*reg_pool_->core_regs), true,
+ ArenaAllocator::kAllocRegAlloc));
reg_pool_->num_fp_regs = num_fp_regs;
reg_pool_->FPRegs = static_cast<RegisterInfo*>
- (NewMem(cu_, num_fp_regs * sizeof(*reg_pool_->FPRegs), true, kAllocRegAlloc));
+ (arena_->NewMem(num_fp_regs * sizeof(*reg_pool_->FPRegs), true,
+ ArenaAllocator::kAllocRegAlloc));
CompilerInitPool(reg_pool_->core_regs, core_regs, reg_pool_->num_core_regs);
CompilerInitPool(reg_pool_->FPRegs, FpRegs, reg_pool_->num_fp_regs);
// Keep special registers from being allocated
diff --git a/src/compiler/dex/quick/codegen_util.cc b/src/compiler/dex/quick/codegen_util.cc
index 91422ea..717d7ca 100644
--- a/src/compiler/dex/quick/codegen_util.cc
+++ b/src/compiler/dex/quick/codegen_util.cc
@@ -365,7 +365,7 @@
LIR* Mir2Lir::RawLIR(int dalvik_offset, int opcode, int op0,
int op1, int op2, int op3, int op4, LIR* target)
{
- LIR* insn = static_cast<LIR*>(NewMem(cu_, sizeof(LIR), true, kAllocLIR));
+ LIR* insn = static_cast<LIR*>(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocLIR));
insn->dalvik_offset = dalvik_offset;
insn->opcode = opcode;
insn->operands[0] = op0;
@@ -499,7 +499,7 @@
{
/* Add the constant to the literal pool */
if (constant_list_p) {
- LIR* new_value = static_cast<LIR*>(NewMem(cu_, sizeof(LIR), true, kAllocData));
+ LIR* new_value = static_cast<LIR*>(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocData));
new_value->operands[0] = value;
new_value->next = *constant_list_p;
*constant_list_p = new_value;
@@ -573,11 +573,9 @@
/* Write the switch tables to the output stream */
void Mir2Lir::InstallSwitchTables()
{
- GrowableListIterator iterator;
- GrowableListIteratorInit(&switch_tables_, &iterator);
+ GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
while (true) {
- Mir2Lir::SwitchTable* tab_rec =
- reinterpret_cast<Mir2Lir::SwitchTable*>(GrowableListIteratorNext( &iterator));
+ Mir2Lir::SwitchTable* tab_rec = iterator.Next();
if (tab_rec == NULL) break;
AlignBuffer(code_buffer_, tab_rec->offset);
/*
@@ -633,11 +631,9 @@
/* Write the fill array dta to the output stream */
void Mir2Lir::InstallFillArrayData()
{
- GrowableListIterator iterator;
- GrowableListIteratorInit(&fill_array_data_, &iterator);
+ GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_);
while (true) {
- Mir2Lir::FillArrayData *tab_rec =
- reinterpret_cast<Mir2Lir::FillArrayData*>(GrowableListIteratorNext( &iterator));
+ Mir2Lir::FillArrayData *tab_rec = iterator.Next();
if (tab_rec == NULL) break;
AlignBuffer(code_buffer_, tab_rec->offset);
for (int i = 0; i < (tab_rec->size + 1) / 2; i++) {
@@ -831,11 +827,9 @@
int Mir2Lir::AssignSwitchTablesOffset(int offset)
{
- GrowableListIterator iterator;
- GrowableListIteratorInit(&switch_tables_, &iterator);
+ GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
while (true) {
- Mir2Lir::SwitchTable *tab_rec =
- reinterpret_cast<Mir2Lir::SwitchTable*>(GrowableListIteratorNext(&iterator));
+ Mir2Lir::SwitchTable *tab_rec = iterator.Next();
if (tab_rec == NULL) break;
tab_rec->offset = offset;
if (tab_rec->table[0] == Instruction::kSparseSwitchSignature) {
@@ -851,11 +845,9 @@
int Mir2Lir::AssignFillArrayDataOffset(int offset)
{
- GrowableListIterator iterator;
- GrowableListIteratorInit(&fill_array_data_, &iterator);
+ GrowableArray<FillArrayData*>::Iterator iterator(&fill_array_data_);
while (true) {
- Mir2Lir::FillArrayData *tab_rec =
- reinterpret_cast<Mir2Lir::FillArrayData*>(GrowableListIteratorNext(&iterator));
+ Mir2Lir::FillArrayData *tab_rec = iterator.Next();
if (tab_rec == NULL) break;
tab_rec->offset = offset;
offset += tab_rec->size;
@@ -973,7 +965,7 @@
if (it == boundary_map_.end()) {
LOG(FATAL) << "Error: didn't find vaddr 0x" << std::hex << vaddr;
}
- LIR* new_label = static_cast<LIR*>(NewMem(cu_, sizeof(LIR), true, kAllocLIR));
+ LIR* new_label = static_cast<LIR*>(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocLIR));
new_label->dalvik_offset = vaddr;
new_label->opcode = kPseudoCaseLabel;
new_label->operands[0] = keyVal;
@@ -1007,11 +999,9 @@
void Mir2Lir::ProcessSwitchTables()
{
- GrowableListIterator iterator;
- GrowableListIteratorInit(&switch_tables_, &iterator);
+ GrowableArray<SwitchTable*>::Iterator iterator(&switch_tables_);
while (true) {
- Mir2Lir::SwitchTable *tab_rec =
- reinterpret_cast<Mir2Lir::SwitchTable*>(GrowableListIteratorNext(&iterator));
+ Mir2Lir::SwitchTable *tab_rec = iterator.Next();
if (tab_rec == NULL) break;
if (tab_rec->table[0] == Instruction::kPackedSwitchSignature) {
MarkPackedCaseLabels(tab_rec);
@@ -1123,15 +1113,23 @@
return res;
}
-Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph)
+// TODO: move to mir_to_lir.cc
+Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
: literal_list_(NULL),
method_literal_list_(NULL),
code_literal_list_(NULL),
cu_(cu),
mir_graph_(mir_graph),
+ switch_tables_(arena, 4, kGrowableArraySwitchTables),
+ fill_array_data_(arena, 4, kGrowableArrayFillArrayData),
+ throw_launchpads_(arena, 2048, kGrowableArrayThrowLaunchPads),
+ suspend_launchpads_(arena, 4, kGrowableArraySuspendLaunchPads),
+ intrinsic_launchpads_(arena, 2048, kGrowableArrayMisc),
data_offset_(0),
total_size_(0),
block_label_list_(NULL),
+ current_dalvik_offset_(0),
+ reg_pool_(NULL),
live_sreg_(0),
num_core_spills_(0),
num_fp_spills_(0),
@@ -1141,15 +1139,10 @@
first_lir_insn_(NULL),
last_lir_insn_(NULL)
{
-
- CompilerInitGrowableList(cu_, &switch_tables_, 4, kListSwitchTables);
- CompilerInitGrowableList(cu_, &fill_array_data_, 4, kListFillArrayData);
- CompilerInitGrowableList(cu_, &throw_launchpads_, 2048, kListThrowLaunchPads);
- CompilerInitGrowableList(cu_, &intrinsic_launchpads_, 4, kListMisc);
- CompilerInitGrowableList(cu_, &suspend_launchpads_, 2048, kListSuspendLaunchPads);
+ arena_ = arena;
promotion_map_ = static_cast<PromotionMap*>
- (NewMem(cu_, (cu_->num_dalvik_registers + cu_->num_compiler_temps + 1) *
- sizeof(promotion_map_[0]), true, kAllocRegAlloc));
+ (arena_->NewMem((cu_->num_dalvik_registers + cu_->num_compiler_temps + 1) *
+ sizeof(promotion_map_[0]), true, ArenaAllocator::kAllocRegAlloc));
}
void Mir2Lir::Materialize() {
diff --git a/src/compiler/dex/quick/gen_common.cc b/src/compiler/dex/quick/gen_common.cc
index c13e797..d27f5c6 100644
--- a/src/compiler/dex/quick/gen_common.cc
+++ b/src/compiler/dex/quick/gen_common.cc
@@ -45,7 +45,7 @@
LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_);
LIR* branch = OpCondBranch(c_code, tgt);
// Remember branch target - will process later
- InsertGrowableList(cu_, &throw_launchpads_, reinterpret_cast<uintptr_t>(tgt));
+ throw_launchpads_.Insert(tgt);
return branch;
}
@@ -59,7 +59,7 @@
branch = OpCmpImmBranch(c_code, reg, imm_val, tgt);
}
// Remember branch target - will process later
- InsertGrowableList(cu_, &throw_launchpads_, reinterpret_cast<uintptr_t>(tgt));
+ throw_launchpads_.Insert(tgt);
return branch;
}
@@ -80,7 +80,7 @@
LIR* tgt = RawLIR(0, kPseudoThrowTarget, kind, current_dalvik_offset_, reg1, reg2);
LIR* branch = OpCmpBranch(c_code, reg1, reg2, tgt);
// Remember branch target - will process later
- InsertGrowableList(cu_, &throw_launchpads_, reinterpret_cast<uintptr_t>(tgt));
+ throw_launchpads_.Insert(tgt);
return branch;
}
@@ -520,13 +520,12 @@
void Mir2Lir::HandleSuspendLaunchPads()
{
- LIR** suspend_label = reinterpret_cast<LIR**>(suspend_launchpads_.elem_list);
- int num_elems = suspend_launchpads_.num_used;
+ int num_elems = suspend_launchpads_.Size();
int helper_offset = ENTRYPOINT_OFFSET(pTestSuspendFromCode);
for (int i = 0; i < num_elems; i++) {
ResetRegPool();
ResetDefTracking();
- LIR* lab = suspend_label[i];
+ LIR* lab = suspend_launchpads_.Get(i);
LIR* resume_lab = reinterpret_cast<LIR*>(lab->operands[0]);
current_dalvik_offset_ = lab->operands[1];
AppendLIR(lab);
@@ -538,12 +537,11 @@
void Mir2Lir::HandleIntrinsicLaunchPads()
{
- LIR** intrinsic_label = reinterpret_cast<LIR**>(intrinsic_launchpads_.elem_list);
- int num_elems = intrinsic_launchpads_.num_used;
+ int num_elems = intrinsic_launchpads_.Size();
for (int i = 0; i < num_elems; i++) {
ResetRegPool();
ResetDefTracking();
- LIR* lab = intrinsic_label[i];
+ LIR* lab = intrinsic_launchpads_.Get(i);
CallInfo* info = reinterpret_cast<CallInfo*>(lab->operands[0]);
current_dalvik_offset_ = info->offset;
AppendLIR(lab);
@@ -558,12 +556,11 @@
void Mir2Lir::HandleThrowLaunchPads()
{
- LIR** throw_label = reinterpret_cast<LIR**>(throw_launchpads_.elem_list);
- int num_elems = throw_launchpads_.num_used;
+ int num_elems = throw_launchpads_.Size();
for (int i = 0; i < num_elems; i++) {
ResetRegPool();
ResetDefTracking();
- LIR* lab = throw_label[i];
+ LIR* lab = throw_launchpads_.Get(i);
current_dalvik_offset_ = lab->operands[1];
AppendLIR(lab);
int func_offset = 0;
@@ -1685,7 +1682,7 @@
LIR* target = RawLIR(current_dalvik_offset_, kPseudoSuspendTarget,
reinterpret_cast<uintptr_t>(ret_lab), current_dalvik_offset_);
branch->target = target;
- InsertGrowableList(cu_, &suspend_launchpads_, reinterpret_cast<uintptr_t>(target));
+ suspend_launchpads_.Insert(target);
}
/* Check if we need to check for pending suspend request */
@@ -1701,7 +1698,7 @@
reinterpret_cast<uintptr_t>(target), current_dalvik_offset_);
FlushAllRegs();
OpUnconditionalBranch(launch_pad);
- InsertGrowableList(cu_, &suspend_launchpads_, reinterpret_cast<uintptr_t>(launch_pad));
+ suspend_launchpads_.Insert(launch_pad);
}
} // namespace art
diff --git a/src/compiler/dex/quick/gen_invoke.cc b/src/compiler/dex/quick/gen_invoke.cc
index 3e946f8..8003d9a 100644
--- a/src/compiler/dex/quick/gen_invoke.cc
+++ b/src/compiler/dex/quick/gen_invoke.cc
@@ -861,8 +861,7 @@
if (range_check) {
// Set up a launch pad to allow retry in case of bounds violation */
launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
- InsertGrowableList(cu_, &intrinsic_launchpads_,
- reinterpret_cast<uintptr_t>(launch_pad));
+ intrinsic_launchpads_.Insert(launch_pad);
OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
FreeTemp(reg_max);
OpCondBranch(kCondCs, launch_pad);
@@ -873,8 +872,7 @@
LoadWordDisp(rl_obj.low_reg, count_offset, reg_max);
// Set up a launch pad to allow retry in case of bounds violation */
launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
- InsertGrowableList(cu_, &intrinsic_launchpads_,
- reinterpret_cast<uintptr_t>(launch_pad));
+ intrinsic_launchpads_.Insert(launch_pad);
OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
FreeTemp(reg_max);
OpCondBranch(kCondCc, launch_pad);
@@ -1046,7 +1044,7 @@
int r_tgt = (cu_->instruction_set != kX86) ? LoadHelper(ENTRYPOINT_OFFSET(pIndexOf)) : 0;
GenNullCheck(rl_obj.s_reg_low, reg_ptr, info->opt_flags);
LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
- InsertGrowableList(cu_, &intrinsic_launchpads_, reinterpret_cast<uintptr_t>(launch_pad));
+ intrinsic_launchpads_.Insert(launch_pad);
OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, launch_pad);
// NOTE: not a safepoint
if (cu_->instruction_set != kX86) {
@@ -1085,7 +1083,7 @@
GenNullCheck(rl_this.s_reg_low, reg_this, info->opt_flags);
//TUNING: check if rl_cmp.s_reg_low is already null checked
LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
- InsertGrowableList(cu_, &intrinsic_launchpads_, reinterpret_cast<uintptr_t>(launch_pad));
+ intrinsic_launchpads_.Insert(launch_pad);
OpCmpImmBranch(kCondEq, reg_cmp, 0, launch_pad);
// NOTE: not a safepoint
if (cu_->instruction_set != kX86) {
diff --git a/src/compiler/dex/quick/local_optimizations.cc b/src/compiler/dex/quick/local_optimizations.cc
index 695b12c..1cafce4 100644
--- a/src/compiler/dex/quick/local_optimizations.cc
+++ b/src/compiler/dex/quick/local_optimizations.cc
@@ -245,7 +245,8 @@
DEBUG_OPT(dump_dependent_insn_pair(this_lir, check_lir, "REG CLOBBERED"));
/* Only sink store instructions */
if (sink_distance && !is_this_lir_load) {
- LIR* new_store_lir = static_cast<LIR*>(NewMem(cu_, sizeof(LIR), true, kAllocLIR));
+ LIR* new_store_lir =
+ static_cast<LIR*>(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocLIR));
*new_store_lir = *this_lir;
/*
* Stop point found - insert *before* the check_lir
@@ -432,7 +433,8 @@
/* Found a slot to hoist to */
if (slot >= 0) {
LIR* cur_lir = prev_inst_list[slot];
- LIR* new_load_lir = static_cast<LIR*>(NewMem(cu_, sizeof(LIR), true, kAllocLIR));
+ LIR* new_load_lir =
+ static_cast<LIR*>(arena_->NewMem(sizeof(LIR), true, ArenaAllocator::kAllocLIR));
*new_load_lir = *this_lir;
/*
* Insertion is guaranteed to succeed since check_lir
diff --git a/src/compiler/dex/quick/mips/call_mips.cc b/src/compiler/dex/quick/mips/call_mips.cc
index f73e602..b53d1e3 100644
--- a/src/compiler/dex/quick/mips/call_mips.cc
+++ b/src/compiler/dex/quick/mips/call_mips.cc
@@ -68,13 +68,14 @@
}
// Add the table to the list - we'll process it later
SwitchTable *tab_rec =
- static_cast<SwitchTable*>(NewMem(cu_, sizeof(SwitchTable), true, kAllocData));
+ static_cast<SwitchTable*>(arena_->NewMem(sizeof(SwitchTable), true,
+ ArenaAllocator::kAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
int elements = table[1];
tab_rec->targets =
- static_cast<LIR**>(NewMem(cu_, elements * sizeof(LIR*), true, kAllocLIR));
- InsertGrowableList(cu_, &switch_tables_, reinterpret_cast<uintptr_t>(tab_rec));
+ static_cast<LIR**>(arena_->NewMem(elements * sizeof(LIR*), true, ArenaAllocator::kAllocLIR));
+ switch_tables_.Insert(tab_rec);
// The table is composed of 8-byte key/disp pairs
int byte_size = elements * 8;
@@ -148,12 +149,14 @@
}
// Add the table to the list - we'll process it later
SwitchTable *tab_rec =
- static_cast<SwitchTable*>(NewMem(cu_, sizeof(SwitchTable), true, kAllocData));
+ static_cast<SwitchTable*>(arena_->NewMem(sizeof(SwitchTable), true,
+ ArenaAllocator::kAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
int size = table[1];
- tab_rec->targets = static_cast<LIR**>(NewMem(cu_, size * sizeof(LIR*), true, kAllocLIR));
- InsertGrowableList(cu_, &switch_tables_, reinterpret_cast<uintptr_t>(tab_rec));
+ tab_rec->targets = static_cast<LIR**>(arena_->NewMem(size * sizeof(LIR*), true,
+ ArenaAllocator::kAllocLIR));
+ switch_tables_.Insert(tab_rec);
// Get the switch value
rl_src = LoadValue(rl_src, kCoreReg);
@@ -228,14 +231,15 @@
const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
// Add the table to the list - we'll process it later
FillArrayData *tab_rec =
- reinterpret_cast<FillArrayData*>(NewMem(cu_, sizeof(FillArrayData), true, kAllocData));
+ reinterpret_cast<FillArrayData*>(arena_->NewMem(sizeof(FillArrayData), true,
+ ArenaAllocator::kAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
uint16_t width = tab_rec->table[1];
uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
tab_rec->size = (size * width) + 8;
- InsertGrowableList(cu_, &fill_array_data_, reinterpret_cast<uintptr_t>(tab_rec));
+ fill_array_data_.Insert(tab_rec);
// Making a call - use explicit registers
FlushAllRegs(); /* Everything to home location */
diff --git a/src/compiler/dex/quick/mips/codegen_mips.h b/src/compiler/dex/quick/mips/codegen_mips.h
index f681eda..db262a8 100644
--- a/src/compiler/dex/quick/mips/codegen_mips.h
+++ b/src/compiler/dex/quick/mips/codegen_mips.h
@@ -25,7 +25,7 @@
class MipsMir2Lir : public Mir2Lir {
public:
- MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph);
+ MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
// Required for target - codegen utilities.
virtual bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src,
diff --git a/src/compiler/dex/quick/mips/target_mips.cc b/src/compiler/dex/quick/mips/target_mips.cc
index 8d342af..46a625e 100644
--- a/src/compiler/dex/quick/mips/target_mips.cc
+++ b/src/compiler/dex/quick/mips/target_mips.cc
@@ -488,13 +488,16 @@
int num_temps = sizeof(core_temps)/sizeof(*core_temps);
int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
- reg_pool_ = static_cast<RegisterPool*>(NewMem(cu_, sizeof(*reg_pool_), true, kAllocRegAlloc));
+ reg_pool_ = static_cast<RegisterPool*>(arena_->NewMem(sizeof(*reg_pool_), true,
+ ArenaAllocator::kAllocRegAlloc));
reg_pool_->num_core_regs = num_regs;
reg_pool_->core_regs = static_cast<RegisterInfo*>
- (NewMem(cu_, num_regs * sizeof(*reg_pool_->core_regs), true, kAllocRegAlloc));
+ (arena_->NewMem(num_regs * sizeof(*reg_pool_->core_regs), true,
+ ArenaAllocator::kAllocRegAlloc));
reg_pool_->num_fp_regs = num_fp_regs;
reg_pool_->FPRegs = static_cast<RegisterInfo*>
- (NewMem(cu_, num_fp_regs * sizeof(*reg_pool_->FPRegs), true, kAllocRegAlloc));
+ (arena_->NewMem(num_fp_regs * sizeof(*reg_pool_->FPRegs), true,
+ ArenaAllocator::kAllocRegAlloc));
CompilerInitPool(reg_pool_->core_regs, core_regs, reg_pool_->num_core_regs);
CompilerInitPool(reg_pool_->FPRegs, FpRegs, reg_pool_->num_fp_regs);
// Keep special registers from being allocated
@@ -572,7 +575,8 @@
return (lir->opcode == kMipsB);
}
-MipsMir2Lir::MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph) : Mir2Lir(cu, mir_graph) {
+MipsMir2Lir::MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
+ : Mir2Lir(cu, mir_graph, arena) {
for (int i = 0; i < kMipsLast; i++) {
if (MipsMir2Lir::EncodingMap[i].opcode != i) {
LOG(FATAL) << "Encoding order for " << MipsMir2Lir::EncodingMap[i].name
@@ -582,8 +586,9 @@
}
}
-Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph) {
- return new MipsMir2Lir(cu, mir_graph);
+Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+ ArenaAllocator* const arena) {
+ return new MipsMir2Lir(cu, mir_graph, arena);
}
uint64_t MipsMir2Lir::GetTargetInstFlags(int opcode)
diff --git a/src/compiler/dex/quick/mir_to_lir.cc b/src/compiler/dex/quick/mir_to_lir.cc
index 1f50914..481078d 100644
--- a/src/compiler/dex/quick/mir_to_lir.cc
+++ b/src/compiler/dex/quick/mir_to_lir.cc
@@ -794,7 +794,7 @@
BasicBlock*bb = NULL;
for (int idx = 0; idx < num_reachable_blocks; idx++) {
// TODO: no direct access of growable lists.
- int dfs_index = mir_graph_->GetDfsOrder()->elem_list[idx];
+ int dfs_index = mir_graph_->GetDfsOrder()->Get(idx);
bb = mir_graph_->GetBasicBlock(dfs_index);
if (bb->block_type == kDalvikByteCode) {
break;
@@ -821,7 +821,8 @@
{
// Hold the labels of each block.
block_label_list_ =
- static_cast<LIR*>(NewMem(cu_, sizeof(LIR) * mir_graph_->GetNumBlocks(), true, kAllocLIR));
+ static_cast<LIR*>(arena_->NewMem(sizeof(LIR) * mir_graph_->GetNumBlocks(), true,
+ ArenaAllocator::kAllocLIR));
PreOrderDfsIterator iter(mir_graph_, false /* not iterative */);
for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
diff --git a/src/compiler/dex/quick/mir_to_lir.h b/src/compiler/dex/quick/mir_to_lir.h
index 69ebc7e..270c883 100644
--- a/src/compiler/dex/quick/mir_to_lir.h
+++ b/src/compiler/dex/quick/mir_to_lir.h
@@ -21,8 +21,9 @@
#include "compiled_method.h"
#include "compiler/dex/compiler_enums.h"
#include "compiler/dex/compiler_ir.h"
-#include "compiler/dex/compiler_utility.h"
#include "compiler/dex/backend.h"
+#include "compiler/dex/growable_array.h"
+#include "compiler/dex/arena_allocator.h"
#include "safe_map.h"
namespace art {
@@ -124,9 +125,12 @@
};
// Target-specific initialization.
-Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph);
-Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph);
-Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph);
+Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+ ArenaAllocator* const arena);
+Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+ ArenaAllocator* const arena);
+Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+ ArenaAllocator* const arena);
// Utility macros to traverse the LIR list.
#define NEXT_LIR(lir) (lir->next)
@@ -684,7 +688,7 @@
LIR* code_literal_list_; // Code literals requiring patching.
protected:
- Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph);
+ Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
CompilationUnit* GetCompilationUnit() {
return cu_;
@@ -692,11 +696,11 @@
CompilationUnit* const cu_;
MIRGraph* const mir_graph_;
- GrowableList switch_tables_;
- GrowableList fill_array_data_;
- GrowableList throw_launchpads_;
- GrowableList suspend_launchpads_;
- GrowableList intrinsic_launchpads_;
+ GrowableArray<SwitchTable*> switch_tables_;
+ GrowableArray<FillArrayData*> fill_array_data_;
+ GrowableArray<LIR*> throw_launchpads_;
+ GrowableArray<LIR*> suspend_launchpads_;
+ GrowableArray<LIR*> intrinsic_launchpads_;
SafeMap<unsigned int, LIR*> boundary_map_; // boundary lookup cache.
/*
* Holds mapping from native PC to dex PC for safepoints where we may deoptimize.
@@ -742,6 +746,7 @@
unsigned int fp_spill_mask_;
LIR* first_lir_insn_;
LIR* last_lir_insn_;
+ ArenaAllocator* arena_;
}; // Class Mir2Lir
diff --git a/src/compiler/dex/quick/ralloc_util.cc b/src/compiler/dex/quick/ralloc_util.cc
index a6b8793..dd38914 100644
--- a/src/compiler/dex/quick/ralloc_util.cc
+++ b/src/compiler/dex/quick/ralloc_util.cc
@@ -18,7 +18,6 @@
#include "compiler/dex/compiler_ir.h"
#include "compiler/dex/compiler_internals.h"
-#include "compiler/dex/compiler_utility.h"
namespace art {
@@ -1056,10 +1055,12 @@
* TUNING: replace with linear scan once we have the ability
* to describe register live ranges for GC.
*/
- RefCounts *core_regs = static_cast<RefCounts*>(NewMem(cu_, sizeof(RefCounts) * num_regs,
- true, kAllocRegAlloc));
- RefCounts *FpRegs = static_cast<RefCounts *>(NewMem(cu_, sizeof(RefCounts) * num_regs,
- true, kAllocRegAlloc));
+ RefCounts *core_regs =
+ static_cast<RefCounts*>(arena_->NewMem(sizeof(RefCounts) * num_regs, true,
+ ArenaAllocator::kAllocRegAlloc));
+ RefCounts *FpRegs =
+ static_cast<RefCounts *>(arena_->NewMem(sizeof(RefCounts) * num_regs, true,
+ ArenaAllocator::kAllocRegAlloc));
// Set ssa names for original Dalvik registers
for (int i = 0; i < dalvik_regs; i++) {
core_regs[i].s_reg = FpRegs[i].s_reg = i;
@@ -1069,7 +1070,7 @@
FpRegs[dalvik_regs].s_reg = mir_graph_->GetMethodSReg(); // For consistecy
// Set ssa names for compiler_temps
for (int i = 1; i <= cu_->num_compiler_temps; i++) {
- CompilerTemp* ct = reinterpret_cast<CompilerTemp*>(mir_graph_->compiler_temps_.elem_list[i]);
+ CompilerTemp* ct = mir_graph_->compiler_temps_.Get(i);
core_regs[dalvik_regs + i].s_reg = ct->s_reg;
FpRegs[dalvik_regs + i].s_reg = ct->s_reg;
}
diff --git a/src/compiler/dex/quick/x86/call_x86.cc b/src/compiler/dex/quick/x86/call_x86.cc
index 6b215f2..614a72d 100644
--- a/src/compiler/dex/quick/x86/call_x86.cc
+++ b/src/compiler/dex/quick/x86/call_x86.cc
@@ -76,12 +76,14 @@
}
// Add the table to the list - we'll process it later
SwitchTable *tab_rec =
- static_cast<SwitchTable *>(NewMem(cu_, sizeof(SwitchTable), true, kAllocData));
+ static_cast<SwitchTable *>(arena_->NewMem(sizeof(SwitchTable), true,
+ ArenaAllocator::kAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
int size = table[1];
- tab_rec->targets = static_cast<LIR**>(NewMem(cu_, size * sizeof(LIR*), true, kAllocLIR));
- InsertGrowableList(cu_, &switch_tables_, reinterpret_cast<uintptr_t>(tab_rec));
+ tab_rec->targets = static_cast<LIR**>(arena_->NewMem(size * sizeof(LIR*), true,
+ ArenaAllocator::kAllocLIR));
+ switch_tables_.Insert(tab_rec);
// Get the switch value
rl_src = LoadValue(rl_src, kCoreReg);
@@ -132,14 +134,15 @@
const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
// Add the table to the list - we'll process it later
FillArrayData *tab_rec =
- static_cast<FillArrayData*>(NewMem(cu_, sizeof(FillArrayData), true, kAllocData));
+ static_cast<FillArrayData*>(arena_->NewMem(sizeof(FillArrayData), true,
+ ArenaAllocator::kAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
uint16_t width = tab_rec->table[1];
uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
tab_rec->size = (size * width) + 8;
- InsertGrowableList(cu_, &fill_array_data_, reinterpret_cast<uintptr_t>(tab_rec));
+ fill_array_data_.Insert(tab_rec);
// Making a call - use explicit registers
FlushAllRegs(); /* Everything to home location */
@@ -251,7 +254,7 @@
OpRegThreadMem(kOpCmp, rX86_SP, Thread::StackEndOffset().Int32Value());
OpCondBranch(kCondUlt, tgt);
// Remember branch target - will process later
- InsertGrowableList(cu_, &throw_launchpads_, reinterpret_cast<uintptr_t>(tgt));
+ throw_launchpads_.Insert(tgt);
}
FlushIns(ArgLocs, rl_method);
diff --git a/src/compiler/dex/quick/x86/codegen_x86.h b/src/compiler/dex/quick/x86/codegen_x86.h
index 93b6839..99e5148 100644
--- a/src/compiler/dex/quick/x86/codegen_x86.h
+++ b/src/compiler/dex/quick/x86/codegen_x86.h
@@ -25,7 +25,7 @@
class X86Mir2Lir : public Mir2Lir {
public:
- X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph);
+ X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
// Required for target - codegen helpers.
virtual bool SmallLiteralDivide(Instruction::Code dalvik_opcode, RegLocation rl_src,
diff --git a/src/compiler/dex/quick/x86/int_x86.cc b/src/compiler/dex/quick/x86/int_x86.cc
index 9c72ad9..0430778 100644
--- a/src/compiler/dex/quick/x86/int_x86.cc
+++ b/src/compiler/dex/quick/x86/int_x86.cc
@@ -32,7 +32,7 @@
OpRegMem(kOpCmp, reg1, base, offset);
LIR* branch = OpCondBranch(c_code, tgt);
// Remember branch target - will process later
- InsertGrowableList(cu_, &throw_launchpads_, reinterpret_cast<uintptr_t>(tgt));
+ throw_launchpads_.Insert(tgt);
return branch;
}
diff --git a/src/compiler/dex/quick/x86/target_x86.cc b/src/compiler/dex/quick/x86/target_x86.cc
index 20074f1..e6a49f8 100644
--- a/src/compiler/dex/quick/x86/target_x86.cc
+++ b/src/compiler/dex/quick/x86/target_x86.cc
@@ -458,15 +458,16 @@
int num_temps = sizeof(core_temps)/sizeof(*core_temps);
int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
- reg_pool_ = static_cast<RegisterPool*>(NewMem(cu_, sizeof(*reg_pool_), true, kAllocRegAlloc));
+ reg_pool_ = static_cast<RegisterPool*>(arena_->NewMem(sizeof(*reg_pool_), true,
+ ArenaAllocator::kAllocRegAlloc));
reg_pool_->num_core_regs = num_regs;
reg_pool_->core_regs =
- static_cast<RegisterInfo*>(NewMem(cu_, num_regs * sizeof(*reg_pool_->core_regs),
- true, kAllocRegAlloc));
+ static_cast<RegisterInfo*>(arena_->NewMem(num_regs * sizeof(*reg_pool_->core_regs), true,
+ ArenaAllocator::kAllocRegAlloc));
reg_pool_->num_fp_regs = num_fp_regs;
reg_pool_->FPRegs =
- static_cast<RegisterInfo *>(NewMem(cu_, num_fp_regs * sizeof(*reg_pool_->FPRegs),
- true, kAllocRegAlloc));
+ static_cast<RegisterInfo *>(arena_->NewMem(num_fp_regs * sizeof(*reg_pool_->FPRegs), true,
+ ArenaAllocator::kAllocRegAlloc));
CompilerInitPool(reg_pool_->core_regs, core_regs, reg_pool_->num_core_regs);
CompilerInitPool(reg_pool_->FPRegs, FpRegs, reg_pool_->num_fp_regs);
// Keep special registers from being allocated
@@ -528,7 +529,8 @@
return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32);
}
-X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph) : Mir2Lir(cu, mir_graph) {
+X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
+ : Mir2Lir(cu, mir_graph, arena) {
for (int i = 0; i < kX86Last; i++) {
if (X86Mir2Lir::EncodingMap[i].opcode != i) {
LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
@@ -538,8 +540,9 @@
}
}
-Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph) {
- return new X86Mir2Lir(cu, mir_graph);
+Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+ ArenaAllocator* const arena) {
+ return new X86Mir2Lir(cu, mir_graph, arena);
}
// Not used in x86