Merge "Improve type propagation with if-contexts"
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 69b4295..beb34dc 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -119,7 +119,6 @@
optimizing/primitive_type_propagation.cc \
optimizing/reference_type_propagation.cc \
trampolines/trampoline_compiler.cc \
- utils/arena_allocator.cc \
utils/arena_bit_vector.cc \
utils/arm/assembler_arm.cc \
utils/arm/assembler_arm32.cc \
@@ -137,7 +136,6 @@
utils/x86/managed_register_x86.cc \
utils/x86_64/assembler_x86_64.cc \
utils/x86_64/managed_register_x86_64.cc \
- utils/scoped_arena_allocator.cc \
utils/swap_space.cc \
buffered_output_stream.cc \
compiler.cc \
diff --git a/compiler/dex/compiler_ir.h b/compiler/dex/compiler_ir.h
index 0c46d43..dceea24 100644
--- a/compiler/dex/compiler_ir.h
+++ b/compiler/dex/compiler_ir.h
@@ -21,11 +21,11 @@
#include <string>
#include <vector>
+#include "base/arena_allocator.h"
+#include "base/scoped_arena_allocator.h"
#include "base/timing_logger.h"
#include "invoke_type.h"
#include "safe_map.h"
-#include "utils/arena_allocator.h"
-#include "utils/scoped_arena_allocator.h"
namespace art {
diff --git a/compiler/dex/global_value_numbering.h b/compiler/dex/global_value_numbering.h
index c7bca85..6fa658c 100644
--- a/compiler/dex/global_value_numbering.h
+++ b/compiler/dex/global_value_numbering.h
@@ -17,12 +17,12 @@
#ifndef ART_COMPILER_DEX_GLOBAL_VALUE_NUMBERING_H_
#define ART_COMPILER_DEX_GLOBAL_VALUE_NUMBERING_H_
+#include "base/arena_object.h"
#include "base/logging.h"
#include "base/macros.h"
#include "mir_graph.h"
#include "compiler_ir.h"
#include "dex_flags.h"
-#include "utils/arena_object.h"
namespace art {
diff --git a/compiler/dex/gvn_dead_code_elimination.h b/compiler/dex/gvn_dead_code_elimination.h
index ea28039..9a19f29 100644
--- a/compiler/dex/gvn_dead_code_elimination.h
+++ b/compiler/dex/gvn_dead_code_elimination.h
@@ -17,9 +17,9 @@
#ifndef ART_COMPILER_DEX_GVN_DEAD_CODE_ELIMINATION_H_
#define ART_COMPILER_DEX_GVN_DEAD_CODE_ELIMINATION_H_
+#include "base/arena_object.h"
+#include "base/scoped_arena_containers.h"
#include "global_value_numbering.h"
-#include "utils/arena_object.h"
-#include "utils/scoped_arena_containers.h"
namespace art {
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index bfacf8e..97ea05a 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -19,9 +19,9 @@
#include <memory>
+#include "base/arena_object.h"
#include "base/logging.h"
#include "global_value_numbering.h"
-#include "utils/arena_object.h"
#include "utils/dex_instruction_utils.h"
namespace art {
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index cc16dc4..31dbc60 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -18,6 +18,7 @@
#include <memory>
#include "base/logging.h"
+#include "base/scoped_arena_containers.h"
#include "dataflow_iterator-inl.h"
#include "compiler_ir.h"
#include "dex_flags.h"
@@ -29,7 +30,6 @@
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
#include "driver/dex_compilation_unit.h"
-#include "utils/scoped_arena_containers.h"
namespace art {
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 08ca1b2..76b5e44 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -24,6 +24,7 @@
#include "base/logging.h"
#include "base/stl_util.h"
#include "base/stringprintf.h"
+#include "base/scoped_arena_containers.h"
#include "compiler_ir.h"
#include "dex_file-inl.h"
#include "dex_flags.h"
@@ -34,7 +35,6 @@
#include "leb128.h"
#include "pass_driver_me_post_opt.h"
#include "stack.h"
-#include "utils/scoped_arena_containers.h"
namespace art {
@@ -1738,7 +1738,7 @@
info->num_arg_words = mir->ssa_rep->num_uses;
info->args = (info->num_arg_words == 0) ? nullptr :
arena_->AllocArray<RegLocation>(info->num_arg_words, kArenaAllocMisc);
- for (int i = 0; i < info->num_arg_words; i++) {
+ for (size_t i = 0; i < info->num_arg_words; i++) {
info->args[i] = GetRawSrc(mir, i);
}
info->opt_flags = mir->optimization_flags;
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 020136c..e5abd3b 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -19,17 +19,17 @@
#include <stdint.h>
+#include "base/arena_containers.h"
+#include "base/scoped_arena_containers.h"
#include "dex_file.h"
#include "dex_instruction.h"
#include "dex_types.h"
#include "invoke_type.h"
#include "mir_field_info.h"
#include "mir_method_info.h"
-#include "utils/arena_bit_vector.h"
-#include "utils/arena_containers.h"
-#include "utils/scoped_arena_containers.h"
#include "reg_location.h"
#include "reg_storage.h"
+#include "utils/arena_bit_vector.h"
namespace art {
@@ -498,19 +498,19 @@
* more efficient invoke code generation.
*/
struct CallInfo {
- int num_arg_words; // Note: word count, not arg count.
- RegLocation* args; // One for each word of arguments.
- RegLocation result; // Eventual target of MOVE_RESULT.
+ size_t num_arg_words; // Note: word count, not arg count.
+ RegLocation* args; // One for each word of arguments.
+ RegLocation result; // Eventual target of MOVE_RESULT.
int opt_flags;
InvokeType type;
uint32_t dex_idx;
- uint32_t index; // Method idx for invokes, type idx for FilledNewArray.
+ uint32_t index; // Method idx for invokes, type idx for FilledNewArray.
uintptr_t direct_code;
uintptr_t direct_method;
- RegLocation target; // Target of following move_result.
+ RegLocation target; // Target of following move_result.
bool skip_this;
bool is_range;
- DexOffset offset; // Offset in code units.
+ DexOffset offset; // Offset in code units.
MIR* mir;
};
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 2f547ea..fd67d4e 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -16,6 +16,7 @@
#include "base/bit_vector-inl.h"
#include "base/logging.h"
+#include "base/scoped_arena_containers.h"
#include "dataflow_iterator-inl.h"
#include "dex_flags.h"
#include "driver/compiler_driver.h"
@@ -27,7 +28,6 @@
#include "quick/dex_file_method_inliner.h"
#include "quick/dex_file_to_method_inliner_map.h"
#include "stack.h"
-#include "utils/scoped_arena_containers.h"
namespace art {
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 1b5dde2..9cf005b 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -484,6 +484,28 @@
NewLIR1(kThumbBx, rs_rARM_LR.GetReg());
}
+void ArmMir2Lir::GenSpecialEntryForSuspend() {
+ // Keep 16-byte stack alignment - push r0, i.e. ArtMethod*, r5, r6, lr.
+ DCHECK(!IsTemp(rs_r5));
+ DCHECK(!IsTemp(rs_r6));
+ core_spill_mask_ =
+ (1u << rs_r5.GetRegNum()) | (1u << rs_r6.GetRegNum()) | (1u << rs_rARM_LR.GetRegNum());
+ num_core_spills_ = 3u;
+ fp_spill_mask_ = 0u;
+ num_fp_spills_ = 0u;
+ frame_size_ = 16u;
+ core_vmap_table_.clear();
+ fp_vmap_table_.clear();
+ NewLIR1(kThumbPush, (1u << rs_r0.GetRegNum()) | // ArtMethod*
+ (core_spill_mask_ & ~(1u << rs_rARM_LR.GetRegNum())) | // Spills other than LR.
+ (1u << 8)); // LR encoded for 16-bit push.
+}
+
+void ArmMir2Lir::GenSpecialExitForSuspend() {
+ // Pop the frame. (ArtMethod* no longer needed but restore it anyway.)
+ NewLIR1(kThumb2Pop, (1u << rs_r0.GetRegNum()) | core_spill_mask_); // 32-bit because of LR.
+}
+
static bool ArmUseRelativeCall(CompilationUnit* cu, const MethodReference& target_method) {
// Emit relative calls only within a dex file due to the limited range of the BL insn.
return cu->dex_file == target_method.dex_file;
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 025e69f..67fabbd 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -18,9 +18,9 @@
#define ART_COMPILER_DEX_QUICK_ARM_CODEGEN_ARM_H_
#include "arm_lir.h"
+#include "base/arena_containers.h"
#include "base/logging.h"
#include "dex/quick/mir_to_lir.h"
-#include "utils/arena_containers.h"
namespace art {
@@ -167,7 +167,9 @@
void GenDivZeroCheckWide(RegStorage reg);
void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
void GenExitSequence();
- void GenSpecialExitSequence();
+ void GenSpecialExitSequence() OVERRIDE;
+ void GenSpecialEntryForSuspend() OVERRIDE;
+ void GenSpecialExitForSuspend() OVERRIDE;
void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
void GenSelect(BasicBlock* bb, MIR* mir);
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index d1e4b7e..24e8fdf 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -392,6 +392,23 @@
NewLIR0(kA64Ret);
}
+void Arm64Mir2Lir::GenSpecialEntryForSuspend() {
+ // Keep 16-byte stack alignment - push x0, i.e. ArtMethod*, lr.
+ core_spill_mask_ = (1u << rs_xLR.GetRegNum());
+ num_core_spills_ = 1u;
+ fp_spill_mask_ = 0u;
+ num_fp_spills_ = 0u;
+ frame_size_ = 16u;
+ core_vmap_table_.clear();
+ fp_vmap_table_.clear();
+ NewLIR4(WIDE(kA64StpPre4rrXD), rs_x0.GetReg(), rs_xLR.GetReg(), rs_sp.GetReg(), -frame_size_ / 8);
+}
+
+void Arm64Mir2Lir::GenSpecialExitForSuspend() {
+ // Pop the frame. (ArtMethod* no longer needed but restore it anyway.)
+ NewLIR4(WIDE(kA64LdpPost4rrXD), rs_x0.GetReg(), rs_xLR.GetReg(), rs_sp.GetReg(), frame_size_ / 8);
+}
+
static bool Arm64UseRelativeCall(CompilationUnit* cu, const MethodReference& target_method) {
// Emit relative calls anywhere in the image or within a dex file otherwise.
return cu->compiler_driver->IsImage() || cu->dex_file == target_method.dex_file;
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index 49ca625..d5f0536 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -169,6 +169,8 @@
void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) OVERRIDE;
void GenExitSequence() OVERRIDE;
void GenSpecialExitSequence() OVERRIDE;
+ void GenSpecialEntryForSuspend() OVERRIDE;
+ void GenSpecialExitForSuspend() OVERRIDE;
void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) OVERRIDE;
void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) OVERRIDE;
void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 055c39f..0be9fd4 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -456,37 +456,29 @@
return AddWordData(constant_list_p, val_lo);
}
-static void Push32(std::vector<uint8_t>&buf, int data) {
- buf.push_back(data & 0xff);
- buf.push_back((data >> 8) & 0xff);
- buf.push_back((data >> 16) & 0xff);
- buf.push_back((data >> 24) & 0xff);
-}
-
/**
* @brief Push a compressed reference which needs patching at link/patchoat-time.
* @details This needs to be kept consistent with the code which actually does the patching in
* oat_writer.cc and in the patchoat tool.
*/
-static void PushUnpatchedReference(std::vector<uint8_t>&buf) {
+static void PushUnpatchedReference(CodeBuffer* buf) {
// Note that we can safely initialize the patches to zero. The code deduplication mechanism takes
// the patches into account when determining whether two pieces of codes are functionally
// equivalent.
Push32(buf, UINT32_C(0));
}
-static void AlignBuffer(std::vector<uint8_t>&buf, size_t offset) {
- while (buf.size() < offset) {
- buf.push_back(0);
- }
+static void AlignBuffer(CodeBuffer* buf, size_t offset) {
+ DCHECK_LE(buf->size(), offset);
+ buf->insert(buf->end(), offset - buf->size(), 0u);
}
/* Write the literal pool to the output stream */
void Mir2Lir::InstallLiteralPools() {
- AlignBuffer(code_buffer_, data_offset_);
+ AlignBuffer(&code_buffer_, data_offset_);
LIR* data_lir = literal_list_;
while (data_lir != nullptr) {
- Push32(code_buffer_, data_lir->operands[0]);
+ Push32(&code_buffer_, data_lir->operands[0]);
data_lir = NEXT_LIR(data_lir);
}
// TODO: patches_.reserve() as needed.
@@ -498,7 +490,7 @@
reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1]));
patches_.push_back(LinkerPatch::CodePatch(code_buffer_.size(),
target_dex_file, target_method_idx));
- PushUnpatchedReference(code_buffer_);
+ PushUnpatchedReference(&code_buffer_);
data_lir = NEXT_LIR(data_lir);
}
data_lir = method_literal_list_;
@@ -508,7 +500,7 @@
reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1]));
patches_.push_back(LinkerPatch::MethodPatch(code_buffer_.size(),
target_dex_file, target_method_idx));
- PushUnpatchedReference(code_buffer_);
+ PushUnpatchedReference(&code_buffer_);
data_lir = NEXT_LIR(data_lir);
}
// Push class literals.
@@ -519,7 +511,7 @@
reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1]));
patches_.push_back(LinkerPatch::TypePatch(code_buffer_.size(),
class_dex_file, target_type_idx));
- PushUnpatchedReference(code_buffer_);
+ PushUnpatchedReference(&code_buffer_);
data_lir = NEXT_LIR(data_lir);
}
}
@@ -527,7 +519,7 @@
/* Write the switch tables to the output stream */
void Mir2Lir::InstallSwitchTables() {
for (Mir2Lir::SwitchTable* tab_rec : switch_tables_) {
- AlignBuffer(code_buffer_, tab_rec->offset);
+ AlignBuffer(&code_buffer_, tab_rec->offset);
/*
* For Arm, our reference point is the address of the bx
* instruction that does the launch, so we have to subtract
@@ -567,8 +559,8 @@
LIR* boundary_lir = InsertCaseLabel(target, key);
DCHECK(boundary_lir != nullptr);
int disp = boundary_lir->offset - bx_offset;
- Push32(code_buffer_, key);
- Push32(code_buffer_, disp);
+ Push32(&code_buffer_, key);
+ Push32(&code_buffer_, disp);
if (cu_->verbose) {
LOG(INFO) << " Case[" << elems << "] key: 0x"
<< std::hex << key << ", disp: 0x"
@@ -592,7 +584,7 @@
LIR* boundary_lir = InsertCaseLabel(target, key);
DCHECK(boundary_lir != nullptr);
int disp = boundary_lir->offset - bx_offset;
- Push32(code_buffer_, disp);
+ Push32(&code_buffer_, disp);
if (cu_->verbose) {
LOG(INFO) << " Case[" << elems << "] disp: 0x"
<< std::hex << disp;
@@ -607,7 +599,7 @@
/* Write the fill array dta to the output stream */
void Mir2Lir::InstallFillArrayData() {
for (Mir2Lir::FillArrayData* tab_rec : fill_array_data_) {
- AlignBuffer(code_buffer_, tab_rec->offset);
+ AlignBuffer(&code_buffer_, tab_rec->offset);
for (int i = 0; i < (tab_rec->size + 1) / 2; i++) {
code_buffer_.push_back(tab_rec->table[i] & 0xFF);
code_buffer_.push_back((tab_rec->table[i] >> 8) & 0xFF);
@@ -975,8 +967,11 @@
estimated_native_code_size_(0),
reg_pool_(nullptr),
live_sreg_(0),
+ code_buffer_(mir_graph->GetArena()->Adapter()),
+ encoded_mapping_table_(mir_graph->GetArena()->Adapter()),
core_vmap_table_(mir_graph->GetArena()->Adapter()),
fp_vmap_table_(mir_graph->GetArena()->Adapter()),
+ native_gc_map_(mir_graph->GetArena()->Adapter()),
patches_(mir_graph->GetArena()->Adapter()),
num_core_spills_(0),
num_fp_spills_(0),
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 9f53b89..3c9b7a3 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -413,7 +413,7 @@
* Current code also throws internal unimp if not 'L', '[' or 'I'.
*/
void Mir2Lir::GenFilledNewArray(CallInfo* info) {
- int elems = info->num_arg_words;
+ size_t elems = info->num_arg_words;
int type_idx = info->index;
FlushAllRegs(); /* Everything to home location */
QuickEntrypointEnum target;
@@ -450,7 +450,7 @@
* of any regs in the source range that have been promoted to
* home location.
*/
- for (int i = 0; i < elems; i++) {
+ for (size_t i = 0; i < elems; i++) {
RegLocation loc = UpdateLoc(info->args[i]);
if (loc.location == kLocPhysReg) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -493,7 +493,7 @@
OpRegRegImm(kOpAdd, r_dst, ref_reg,
mirror::Array::DataOffset(component_size).Int32Value());
// Set up the loop counter (known to be > 0)
- LoadConstant(r_idx, elems - 1);
+ LoadConstant(r_idx, static_cast<int>(elems - 1));
// Generate the copy loop. Going backwards for convenience
LIR* loop_head_target = NewLIR0(kPseudoTargetLabel);
// Copy next element
@@ -515,9 +515,9 @@
FreeTemp(r_dst);
FreeTemp(r_src);
} else {
- DCHECK_LE(elems, 5); // Usually but not necessarily non-range.
+ DCHECK_LE(elems, 5u); // Usually but not necessarily non-range.
// TUNING: interleave
- for (int i = 0; i < elems; i++) {
+ for (size_t i = 0; i < elems; i++) {
RegLocation rl_arg;
if (info->args[i].ref) {
rl_arg = LoadValue(info->args[i], kRefReg);
@@ -537,7 +537,7 @@
}
if (elems != 0 && info->args[0].ref) {
// If there is at least one potentially non-null value, unconditionally mark the GC card.
- for (int i = 0; i < elems; i++) {
+ for (size_t i = 0; i < elems; i++) {
if (!mir_graph_->IsConstantNullRef(info->args[i])) {
UnconditionallyMarkGCCard(ref_reg);
break;
@@ -2158,7 +2158,7 @@
}
}
-class SuspendCheckSlowPath : public Mir2Lir::LIRSlowPath {
+class Mir2Lir::SuspendCheckSlowPath : public Mir2Lir::LIRSlowPath {
public:
SuspendCheckSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont)
: LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, cont) {
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index bb5b0cd..8e3df7c 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -405,9 +405,10 @@
*/
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
RegLocation* t_loc = nullptr;
+ EnsureInitializedArgMappingToPhysicalReg();
for (uint32_t i = 0; i < mir_graph_->GetNumOfInVRs(); i += t_loc->wide ? 2 : 1) {
// get reg corresponding to input
- RegStorage reg = GetArgMappingToPhysicalReg(i);
+ RegStorage reg = in_to_reg_storage_mapping_.GetReg(i);
t_loc = &ArgLocs[i];
// If the wide input appeared as single, flush it and go
@@ -661,7 +662,7 @@
}
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
// Scan the rest of the args - if in phys_reg flush to memory
- for (int next_arg = start; next_arg < info->num_arg_words;) {
+ for (size_t next_arg = start; next_arg < info->num_arg_words;) {
RegLocation loc = info->args[next_arg];
if (loc.wide) {
loc = UpdateLocWide(loc);
@@ -719,10 +720,10 @@
uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
InvokeType type, bool skip_this) {
// If no arguments, just return.
- if (info->num_arg_words == 0)
+ if (info->num_arg_words == 0u)
return call_state;
- const int start_index = skip_this ? 1 : 0;
+ const size_t start_index = skip_this ? 1 : 0;
// Get architecture dependent mapping between output VRs and physical registers
// basing on shorty of method to call.
@@ -733,13 +734,13 @@
in_to_reg_storage_mapping.Initialize(&shorty_iterator, GetResetedInToRegStorageMapper());
}
- int stack_map_start = std::max(in_to_reg_storage_mapping.GetMaxMappedIn() + 1, start_index);
+ size_t stack_map_start = std::max(in_to_reg_storage_mapping.GetEndMappedIn(), start_index);
if ((stack_map_start < info->num_arg_words) && info->args[stack_map_start].high_word) {
// It is possible that the last mapped reg is 32 bit while arg is 64-bit.
// It will be handled together with low part mapped to register.
stack_map_start++;
}
- int regs_left_to_pass_via_stack = info->num_arg_words - stack_map_start;
+ size_t regs_left_to_pass_via_stack = info->num_arg_words - stack_map_start;
// If it is a range case we can try to copy remaining VRs (not mapped to physical registers)
// using more optimal algorithm.
@@ -755,11 +756,10 @@
RegStorage regRef = TargetReg(kArg3, kRef);
RegStorage regSingle = TargetReg(kArg3, kNotWide);
RegStorage regWide = TargetReg(kArg2, kWide);
- for (int i = start_index;
- i < stack_map_start + regs_left_to_pass_via_stack; i++) {
+ for (size_t i = start_index; i < stack_map_start + regs_left_to_pass_via_stack; i++) {
RegLocation rl_arg = info->args[i];
rl_arg = UpdateRawLoc(rl_arg);
- RegStorage reg = in_to_reg_storage_mapping.Get(i);
+ RegStorage reg = in_to_reg_storage_mapping.GetReg(i);
if (!reg.Valid()) {
int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set);
{
@@ -799,10 +799,10 @@
}
// Finish with VRs mapped to physical registers.
- for (int i = start_index; i < stack_map_start; i++) {
+ for (size_t i = start_index; i < stack_map_start; i++) {
RegLocation rl_arg = info->args[i];
rl_arg = UpdateRawLoc(rl_arg);
- RegStorage reg = in_to_reg_storage_mapping.Get(i);
+ RegStorage reg = in_to_reg_storage_mapping.GetReg(i);
if (reg.Valid()) {
if (rl_arg.wide) {
// if reg is not 64-bit (it is half of 64-bit) then handle it separately.
@@ -852,12 +852,11 @@
return call_state;
}
-RegStorage Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
+void Mir2Lir::EnsureInitializedArgMappingToPhysicalReg() {
if (!in_to_reg_storage_mapping_.IsInitialized()) {
ShortyIterator shorty_iterator(cu_->shorty, cu_->invoke_type == kStatic);
in_to_reg_storage_mapping_.Initialize(&shorty_iterator, GetResetedInToRegStorageMapper());
}
- return in_to_reg_storage_mapping_.Get(arg_num);
}
RegLocation Mir2Lir::InlineTarget(CallInfo* info) {
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 0719b52..d9471f6 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -315,6 +315,26 @@
OpReg(kOpBx, rs_rRA);
}
+void MipsMir2Lir::GenSpecialEntryForSuspend() {
+ // Keep 16-byte stack alignment - push A0, i.e. ArtMethod*, 2 filler words and RA.
+ core_spill_mask_ = (1u << rs_rRA.GetRegNum());
+ num_core_spills_ = 1u;
+ fp_spill_mask_ = 0u;
+ num_fp_spills_ = 0u;
+ frame_size_ = 16u;
+ core_vmap_table_.clear();
+ fp_vmap_table_.clear();
+ OpRegImm(kOpSub, rs_rMIPS_SP, frame_size_);
+ Store32Disp(rs_rMIPS_SP, frame_size_ - 4, rs_rRA);
+ Store32Disp(rs_rMIPS_SP, 0, rs_rA0);
+}
+
+void MipsMir2Lir::GenSpecialExitForSuspend() {
+ // Pop the frame. Don't pop ArtMethod*, it's no longer needed.
+ Load32Disp(rs_rMIPS_SP, frame_size_ - 4, rs_rRA);
+ OpRegImm(kOpAdd, rs_rMIPS_SP, frame_size_);
+}
+
/*
* Bit of a hack here - in the absence of a real scheduling pass,
* emit the next instruction in static & direct invoke sequences.
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index a37fe40..e1b43ca 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -141,7 +141,9 @@
void GenDivZeroCheckWide(RegStorage reg);
void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
void GenExitSequence();
- void GenSpecialExitSequence();
+ void GenSpecialExitSequence() OVERRIDE;
+ void GenSpecialEntryForSuspend() OVERRIDE;
+ void GenSpecialExitForSuspend() OVERRIDE;
void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
void GenSelect(BasicBlock* bb, MIR* mir);
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 9f6d8af..34e5e25 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -24,6 +24,69 @@
namespace art {
+class Mir2Lir::SpecialSuspendCheckSlowPath : public Mir2Lir::LIRSlowPath {
+ public:
+ SpecialSuspendCheckSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont)
+ : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, cont),
+ num_used_args_(0u) {
+ }
+
+ void PreserveArg(int in_position) {
+ // Avoid duplicates.
+ for (size_t i = 0; i != num_used_args_; ++i) {
+ if (used_args_[i] == in_position) {
+ return;
+ }
+ }
+ DCHECK_LT(num_used_args_, kMaxArgsToPreserve);
+ used_args_[num_used_args_] = in_position;
+ ++num_used_args_;
+ }
+
+ void Compile() OVERRIDE {
+ m2l_->ResetRegPool();
+ m2l_->ResetDefTracking();
+ GenerateTargetLabel(kPseudoSuspendTarget);
+
+ m2l_->LockCallTemps();
+
+ // Generate frame.
+ m2l_->GenSpecialEntryForSuspend();
+
+ // Spill all args.
+ for (size_t i = 0, end = m2l_->in_to_reg_storage_mapping_.GetEndMappedIn(); i < end;
+ i += m2l_->in_to_reg_storage_mapping_.GetShorty(i).IsWide() ? 2u : 1u) {
+ m2l_->SpillArg(i);
+ }
+
+ m2l_->FreeCallTemps();
+
+ // Do the actual suspend call to runtime.
+ m2l_->CallRuntimeHelper(kQuickTestSuspend, true);
+
+ m2l_->LockCallTemps();
+
+ // Unspill used regs. (Don't unspill unused args.)
+ for (size_t i = 0; i != num_used_args_; ++i) {
+ m2l_->UnspillArg(used_args_[i]);
+ }
+
+ // Pop the frame.
+ m2l_->GenSpecialExitForSuspend();
+
+ // Branch to the continue label.
+ DCHECK(cont_ != nullptr);
+ m2l_->OpUnconditionalBranch(cont_);
+
+ m2l_->FreeCallTemps();
+ }
+
+ private:
+ static constexpr size_t kMaxArgsToPreserve = 2u;
+ size_t num_used_args_;
+ int used_args_[kMaxArgsToPreserve];
+};
+
RegisterClass Mir2Lir::ShortyToRegClass(char shorty_type) {
RegisterClass res;
switch (shorty_type) {
@@ -54,15 +117,15 @@
return res;
}
-void Mir2Lir::LockArg(int in_position, bool) {
- RegStorage reg_arg = GetArgMappingToPhysicalReg(in_position);
+void Mir2Lir::LockArg(size_t in_position) {
+ RegStorage reg_arg = in_to_reg_storage_mapping_.GetReg(in_position);
if (reg_arg.Valid()) {
LockTemp(reg_arg);
}
}
-RegStorage Mir2Lir::LoadArg(int in_position, RegisterClass reg_class, bool wide) {
+RegStorage Mir2Lir::LoadArg(size_t in_position, RegisterClass reg_class, bool wide) {
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
int offset = StackVisitor::GetOutVROffset(in_position, cu_->instruction_set);
@@ -82,7 +145,7 @@
offset += sizeof(uint64_t);
}
- RegStorage reg_arg = GetArgMappingToPhysicalReg(in_position);
+ RegStorage reg_arg = in_to_reg_storage_mapping_.GetReg(in_position);
// TODO: REVISIT: This adds a spill of low part while we could just copy it.
if (reg_arg.Valid() && wide && (reg_arg.GetWideKind() == kNotWide)) {
@@ -112,7 +175,7 @@
return reg_arg;
}
-void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) {
+void Mir2Lir::LoadArgDirect(size_t in_position, RegLocation rl_dest) {
DCHECK_EQ(rl_dest.location, kLocPhysReg);
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
int offset = StackVisitor::GetOutVROffset(in_position, cu_->instruction_set);
@@ -132,7 +195,7 @@
offset += sizeof(uint64_t);
}
- RegStorage reg_arg = GetArgMappingToPhysicalReg(in_position);
+ RegStorage reg_arg = in_to_reg_storage_mapping_.GetReg(in_position);
// TODO: REVISIT: This adds a spill of low part while we could just copy it.
if (reg_arg.Valid() && rl_dest.wide && (reg_arg.GetWideKind() == kNotWide)) {
@@ -153,6 +216,41 @@
}
}
+void Mir2Lir::SpillArg(size_t in_position) {
+ RegStorage reg_arg = in_to_reg_storage_mapping_.GetReg(in_position);
+
+ if (reg_arg.Valid()) {
+ int offset = frame_size_ + StackVisitor::GetOutVROffset(in_position, cu_->instruction_set);
+ ShortyArg arg = in_to_reg_storage_mapping_.GetShorty(in_position);
+ OpSize size = arg.IsRef() ? kReference :
+ (arg.IsWide() && reg_arg.GetWideKind() == kWide) ? k64 : k32;
+ StoreBaseDisp(TargetPtrReg(kSp), offset, reg_arg, size, kNotVolatile);
+ }
+}
+
+void Mir2Lir::UnspillArg(size_t in_position) {
+ RegStorage reg_arg = in_to_reg_storage_mapping_.GetReg(in_position);
+
+ if (reg_arg.Valid()) {
+ int offset = frame_size_ + StackVisitor::GetOutVROffset(in_position, cu_->instruction_set);
+ ShortyArg arg = in_to_reg_storage_mapping_.GetShorty(in_position);
+ OpSize size = arg.IsRef() ? kReference :
+ (arg.IsWide() && reg_arg.GetWideKind() == kWide) ? k64 : k32;
+ LoadBaseDisp(TargetPtrReg(kSp), offset, reg_arg, size, kNotVolatile);
+ }
+}
+
+Mir2Lir::SpecialSuspendCheckSlowPath* Mir2Lir::GenSpecialSuspendTest() {
+ LockCallTemps();
+ LIR* branch = OpTestSuspend(nullptr);
+ FreeCallTemps();
+ LIR* cont = NewLIR0(kPseudoTargetLabel);
+ SpecialSuspendCheckSlowPath* slow_path =
+ new (arena_) SpecialSuspendCheckSlowPath(this, branch, cont);
+ AddSlowPath(slow_path);
+ return slow_path;
+}
+
bool Mir2Lir::GenSpecialIGet(MIR* mir, const InlineMethod& special) {
// FastInstance() already checked by DexFileMethodInliner.
const InlineIGetIPutData& data = special.d.ifield_data;
@@ -161,13 +259,16 @@
return false;
}
- OpSize size = k32;
+ OpSize size;
switch (data.op_variant) {
- case InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT):
- size = kReference;
+ case InlineMethodAnalyser::IGetVariant(Instruction::IGET):
+ size = in_to_reg_storage_mapping_.GetShorty(data.src_arg).IsFP() ? kSingle : k32;
break;
case InlineMethodAnalyser::IGetVariant(Instruction::IGET_WIDE):
- size = k64;
+ size = in_to_reg_storage_mapping_.GetShorty(data.src_arg).IsFP() ? kDouble : k64;
+ break;
+ case InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT):
+ size = kReference;
break;
case InlineMethodAnalyser::IGetVariant(Instruction::IGET_SHORT):
size = kSignedHalf;
@@ -181,11 +282,18 @@
case InlineMethodAnalyser::IGetVariant(Instruction::IGET_BOOLEAN):
size = kUnsignedByte;
break;
+ default:
+ LOG(FATAL) << "Unknown variant: " << data.op_variant;
+ UNREACHABLE();
}
// Point of no return - no aborts after this
- GenPrintLabel(mir);
+ if (!kLeafOptimization) {
+ auto* slow_path = GenSpecialSuspendTest();
+ slow_path->PreserveArg(data.object_arg);
+ }
LockArg(data.object_arg);
+ GenPrintLabel(mir);
RegStorage reg_obj = LoadArg(data.object_arg, kRefReg);
RegisterClass reg_class = RegClassForFieldLoadStore(size, data.is_volatile);
RegisterClass ret_reg_class = ShortyToRegClass(cu_->shorty[0]);
@@ -223,13 +331,16 @@
return false;
}
- OpSize size = k32;
+ OpSize size;
switch (data.op_variant) {
- case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_OBJECT):
- size = kReference;
+ case InlineMethodAnalyser::IPutVariant(Instruction::IPUT):
+ size = in_to_reg_storage_mapping_.GetShorty(data.src_arg).IsFP() ? kSingle : k32;
break;
case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_WIDE):
- size = k64;
+ size = in_to_reg_storage_mapping_.GetShorty(data.src_arg).IsFP() ? kDouble : k64;
+ break;
+ case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_OBJECT):
+ size = kReference;
break;
case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_SHORT):
size = kSignedHalf;
@@ -243,12 +354,20 @@
case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BOOLEAN):
size = kUnsignedByte;
break;
+ default:
+ LOG(FATAL) << "Unknown variant: " << data.op_variant;
+ UNREACHABLE();
}
// Point of no return - no aborts after this
- GenPrintLabel(mir);
+ if (!kLeafOptimization) {
+ auto* slow_path = GenSpecialSuspendTest();
+ slow_path->PreserveArg(data.object_arg);
+ slow_path->PreserveArg(data.src_arg);
+ }
LockArg(data.object_arg);
- LockArg(data.src_arg, IsWide(size));
+ LockArg(data.src_arg);
+ GenPrintLabel(mir);
RegStorage reg_obj = LoadArg(data.object_arg, kRefReg);
RegisterClass reg_class = RegClassForFieldLoadStore(size, data.is_volatile);
RegStorage reg_src = LoadArg(data.src_arg, reg_class, IsWide(size));
@@ -269,8 +388,12 @@
bool wide = (data.is_wide != 0u);
// Point of no return - no aborts after this
+ if (!kLeafOptimization) {
+ auto* slow_path = GenSpecialSuspendTest();
+ slow_path->PreserveArg(data.arg);
+ }
+ LockArg(data.arg);
GenPrintLabel(mir);
- LockArg(data.arg, wide);
RegisterClass reg_class = ShortyToRegClass(cu_->shorty[0]);
RegLocation rl_dest = wide ? GetReturnWide(reg_class) : GetReturn(reg_class);
LoadArgDirect(data.arg, rl_dest);
@@ -285,15 +408,22 @@
current_dalvik_offset_ = mir->offset;
MIR* return_mir = nullptr;
bool successful = false;
+ EnsureInitializedArgMappingToPhysicalReg();
switch (special.opcode) {
case kInlineOpNop:
successful = true;
DCHECK_EQ(mir->dalvikInsn.opcode, Instruction::RETURN_VOID);
+ if (!kLeafOptimization) {
+ GenSpecialSuspendTest();
+ }
return_mir = mir;
break;
case kInlineOpNonWideConst: {
successful = true;
+ if (!kLeafOptimization) {
+ GenSpecialSuspendTest();
+ }
RegLocation rl_dest = GetReturn(ShortyToRegClass(cu_->shorty[0]));
GenPrintLabel(mir);
LoadConstant(rl_dest.reg, static_cast<int>(special.d.data));
@@ -333,13 +463,17 @@
}
GenSpecialExitSequence();
- core_spill_mask_ = 0;
- num_core_spills_ = 0;
- fp_spill_mask_ = 0;
- num_fp_spills_ = 0;
- frame_size_ = 0;
- core_vmap_table_.clear();
- fp_vmap_table_.clear();
+ if (!kLeafOptimization) {
+ HandleSlowPaths();
+ } else {
+ core_spill_mask_ = 0;
+ num_core_spills_ = 0;
+ fp_spill_mask_ = 0;
+ num_fp_spills_ = 0;
+ frame_size_ = 0;
+ core_vmap_table_.clear();
+ fp_vmap_table_.clear();
+ }
}
return successful;
@@ -1287,31 +1421,41 @@
InToRegStorageMapper* mapper) {
DCHECK(mapper != nullptr);
DCHECK(shorty != nullptr);
- max_mapped_in_ = -1;
- has_arguments_on_stack_ = false;
+ DCHECK(!IsInitialized());
+ DCHECK_EQ(end_mapped_in_, 0u);
+ DCHECK(!has_arguments_on_stack_);
while (shorty->Next()) {
ShortyArg arg = shorty->GetArg();
RegStorage reg = mapper->GetNextReg(arg);
+ mapping_.emplace_back(arg, reg);
+ if (arg.IsWide()) {
+ mapping_.emplace_back(ShortyArg(kInvalidShorty), RegStorage::InvalidReg());
+ }
if (reg.Valid()) {
- mapping_.Put(count_, reg);
- max_mapped_in_ = count_;
- // If the VR is wide and was mapped as wide then account for it.
- if (arg.IsWide() && reg.Is64Bit()) {
- max_mapped_in_++;
+ end_mapped_in_ = mapping_.size();
+ // If the VR is wide but wasn't mapped as wide then account for it.
+ if (arg.IsWide() && !reg.Is64Bit()) {
+ --end_mapped_in_;
}
} else {
has_arguments_on_stack_ = true;
}
- count_ += arg.IsWide() ? 2 : 1;
}
initialized_ = true;
}
-RegStorage Mir2Lir::InToRegStorageMapping::Get(int in_position) {
+RegStorage Mir2Lir::InToRegStorageMapping::GetReg(size_t in_position) {
DCHECK(IsInitialized());
- DCHECK_LT(in_position, count_);
- auto res = mapping_.find(in_position);
- return res != mapping_.end() ? res->second : RegStorage::InvalidReg();
+ DCHECK_LT(in_position, mapping_.size());
+ DCHECK_NE(mapping_[in_position].first.GetType(), kInvalidShorty);
+ return mapping_[in_position].second;
+}
+
+Mir2Lir::ShortyArg Mir2Lir::InToRegStorageMapping::GetShorty(size_t in_position) {
+ DCHECK(IsInitialized());
+ DCHECK_LT(static_cast<size_t>(in_position), mapping_.size());
+ DCHECK_NE(mapping_[in_position].first.GetType(), kInvalidShorty);
+ return mapping_[in_position].first;
}
} // namespace art
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 88ca911..6f3f057 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -17,6 +17,9 @@
#ifndef ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_
#define ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_
+#include "base/arena_allocator.h"
+#include "base/arena_containers.h"
+#include "base/arena_object.h"
#include "compiled_method.h"
#include "dex/compiler_enums.h"
#include "dex/dex_flags.h"
@@ -29,9 +32,6 @@
#include "leb128.h"
#include "safe_map.h"
#include "utils/array_ref.h"
-#include "utils/arena_allocator.h"
-#include "utils/arena_containers.h"
-#include "utils/arena_object.h"
#include "utils/stack_checks.h"
namespace art {
@@ -146,7 +146,7 @@
uint32_t method_idx, uintptr_t direct_code,
uintptr_t direct_method, InvokeType type);
-typedef std::vector<uint8_t> CodeBuffer;
+typedef ArenaVector<uint8_t> CodeBuffer;
typedef uint32_t CodeOffset; // Native code offset in bytes.
struct UseDefMasks {
@@ -515,6 +515,9 @@
LIR* const cont_;
};
+ class SuspendCheckSlowPath;
+ class SpecialSuspendCheckSlowPath;
+
// Helper class for changing mem_ref_type_ until the end of current scope. See mem_ref_type_.
class ScopedMemRefType {
public:
@@ -1203,7 +1206,7 @@
}
}
- RegStorage GetArgMappingToPhysicalReg(int arg_num);
+ void EnsureInitializedArgMappingToPhysicalReg();
virtual RegLocation GetReturnAlt() = 0;
virtual RegLocation GetReturnWideAlt() = 0;
virtual RegLocation LocCReturn() = 0;
@@ -1570,6 +1573,16 @@
virtual void GenSpecialExitSequence() = 0;
/**
+ * @brief Used to generate stack frame for suspend path of special methods.
+ */
+ virtual void GenSpecialEntryForSuspend() = 0;
+
+ /**
+ * @brief Used to pop the stack frame for suspend path of special methods.
+ */
+ virtual void GenSpecialExitForSuspend() = 0;
+
+ /**
* @brief Used to generate code for special methods that are known to be
* small enough to work in frameless mode.
* @param bb The basic block of the first MIR.
@@ -1590,9 +1603,8 @@
* @brief Used to lock register if argument at in_position was passed that way.
* @details Does nothing if the argument is passed via stack.
* @param in_position The argument number whose register to lock.
- * @param wide Whether the argument is wide.
*/
- void LockArg(int in_position, bool wide = false);
+ void LockArg(size_t in_position);
/**
* @brief Used to load VR argument to a physical register.
@@ -1602,14 +1614,33 @@
* @param wide Whether the argument is 64-bit or not.
* @return Returns the register (or register pair) for the loaded argument.
*/
- RegStorage LoadArg(int in_position, RegisterClass reg_class, bool wide = false);
+ RegStorage LoadArg(size_t in_position, RegisterClass reg_class, bool wide = false);
/**
* @brief Used to load a VR argument directly to a specified register location.
* @param in_position The argument number to place in register.
* @param rl_dest The register location where to place argument.
*/
- void LoadArgDirect(int in_position, RegLocation rl_dest);
+ void LoadArgDirect(size_t in_position, RegLocation rl_dest);
+
+ /**
+ * @brief Used to spill register if argument at in_position was passed that way.
+ * @details Does nothing if the argument is passed via stack.
+ * @param in_position The argument number whose register to spill.
+ */
+ void SpillArg(size_t in_position);
+
+ /**
+ * @brief Used to unspill register if argument at in_position was passed that way.
+ * @details Does nothing if the argument is passed via stack.
+ * @param in_position The argument number whose register to spill.
+ */
+ void UnspillArg(size_t in_position);
+
+ /**
+ * @brief Generate suspend test in a special method.
+ */
+ SpecialSuspendCheckSlowPath* GenSpecialSuspendTest();
/**
* @brief Used to generate LIR for special getter method.
@@ -1742,10 +1773,10 @@
// The source mapping table data (pc -> dex). More entries than in encoded_mapping_table_
DefaultSrcMap src_mapping_table_;
// The encoding mapping table data (dex -> pc offset and pc offset -> dex) with a size prefix.
- std::vector<uint8_t> encoded_mapping_table_;
+ ArenaVector<uint8_t> encoded_mapping_table_;
ArenaVector<uint32_t> core_vmap_table_;
ArenaVector<uint32_t> fp_vmap_table_;
- std::vector<uint8_t> native_gc_map_;
+ ArenaVector<uint8_t> native_gc_map_;
ArenaVector<LinkerPatch> patches_;
int num_core_spills_;
int num_fp_spills_;
@@ -1802,21 +1833,22 @@
class InToRegStorageMapping {
public:
explicit InToRegStorageMapping(ArenaAllocator* arena)
- : mapping_(std::less<int>(), arena->Adapter()), count_(0),
- max_mapped_in_(0), has_arguments_on_stack_(false), initialized_(false) {}
+ : mapping_(arena->Adapter()),
+ end_mapped_in_(0u), has_arguments_on_stack_(false), initialized_(false) {}
void Initialize(ShortyIterator* shorty, InToRegStorageMapper* mapper);
/**
- * @return the index of last VR mapped to physical register. In other words
- * any VR starting from (return value + 1) index is mapped to memory.
+ * @return the past-the-end index of VRs mapped to physical registers.
+ * In other words any VR starting from this index is mapped to memory.
*/
- int GetMaxMappedIn() { return max_mapped_in_; }
+ size_t GetEndMappedIn() { return end_mapped_in_; }
bool HasArgumentsOnStack() { return has_arguments_on_stack_; }
- RegStorage Get(int in_position);
+ RegStorage GetReg(size_t in_position);
+ ShortyArg GetShorty(size_t in_position);
bool IsInitialized() { return initialized_; }
private:
- ArenaSafeMap<int, RegStorage> mapping_;
- int count_;
- int max_mapped_in_;
+ static constexpr char kInvalidShorty = '-';
+ ArenaVector<std::pair<ShortyArg, RegStorage>> mapping_;
+ size_t end_mapped_in_;
bool has_arguments_on_stack_;
bool initialized_;
};
diff --git a/compiler/dex/quick/resource_mask.cc b/compiler/dex/quick/resource_mask.cc
index 8a27ecb..57e8af3 100644
--- a/compiler/dex/quick/resource_mask.cc
+++ b/compiler/dex/quick/resource_mask.cc
@@ -18,8 +18,8 @@
#include "resource_mask.h"
+#include "base/arena_allocator.h"
#include "base/logging.h"
-#include "utils/arena_allocator.h"
#include "utils.h"
namespace art {
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 284e8f6..f964691 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -272,6 +272,41 @@
NewLIR0(kX86Ret);
}
+void X86Mir2Lir::GenSpecialEntryForSuspend() {
+ // Keep 16-byte stack alignment, there's already the return address, so
+ // - for 32-bit push EAX, i.e. ArtMethod*, ESI, EDI,
+ // - for 64-bit push RAX, i.e. ArtMethod*.
+ if (!cu_->target64) {
+ DCHECK(!IsTemp(rs_rSI));
+ DCHECK(!IsTemp(rs_rDI));
+ core_spill_mask_ =
+ (1u << rs_rSI.GetRegNum()) | (1u << rs_rSI.GetRegNum()) | (1u << rs_rRET.GetRegNum());
+ num_core_spills_ = 3u;
+ } else {
+ core_spill_mask_ = (1u << rs_rRET.GetRegNum());
+ num_core_spills_ = 1u;
+ }
+ fp_spill_mask_ = 0u;
+ num_fp_spills_ = 0u;
+ frame_size_ = 16u;
+ core_vmap_table_.clear();
+ fp_vmap_table_.clear();
+ if (!cu_->target64) {
+ NewLIR1(kX86Push32R, rs_rDI.GetReg());
+ NewLIR1(kX86Push32R, rs_rSI.GetReg());
+ }
+ NewLIR1(kX86Push32R, TargetReg(kArg0, kRef).GetReg()); // ArtMethod*
+}
+
+void X86Mir2Lir::GenSpecialExitForSuspend() {
+ // Pop the frame. (ArtMethod* no longer needed but restore it anyway.)
+ NewLIR1(kX86Pop32R, TargetReg(kArg0, kRef).GetReg()); // ArtMethod*
+ if (!cu_->target64) {
+ NewLIR1(kX86Pop32R, rs_rSI.GetReg());
+ NewLIR1(kX86Pop32R, rs_rDI.GetReg());
+ }
+}
+
void X86Mir2Lir::GenImplicitNullCheck(RegStorage reg, int opt_flags) {
if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
return;
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index ca60400..20163b4 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -259,6 +259,8 @@
void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) OVERRIDE;
void GenExitSequence() OVERRIDE;
void GenSpecialExitSequence() OVERRIDE;
+ void GenSpecialEntryForSuspend() OVERRIDE;
+ void GenSpecialExitForSuspend() OVERRIDE;
void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) OVERRIDE;
void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) OVERRIDE;
void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index c4adb09..8f97d1e 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -1051,10 +1051,10 @@
}
for (LIR *p = const_vectors_; p != nullptr; p = p->next) {
- PushWord(&code_buffer_, p->operands[0]);
- PushWord(&code_buffer_, p->operands[1]);
- PushWord(&code_buffer_, p->operands[2]);
- PushWord(&code_buffer_, p->operands[3]);
+ Push32(&code_buffer_, p->operands[0]);
+ Push32(&code_buffer_, p->operands[1]);
+ Push32(&code_buffer_, p->operands[2]);
+ Push32(&code_buffer_, p->operands[3]);
}
}
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index fcea77c..197f66d 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -16,9 +16,9 @@
#include "base/bit_vector-inl.h"
#include "base/logging.h"
+#include "base/scoped_arena_containers.h"
#include "compiler_ir.h"
#include "dataflow_iterator-inl.h"
-#include "utils/scoped_arena_containers.h"
#define NOTVISITED (-1)
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 2fca2e5..b756244 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -22,6 +22,7 @@
#include <vector>
#include "arch/instruction_set.h"
+#include "base/arena_allocator.h"
#include "base/mutex.h"
#include "base/timing_logger.h"
#include "class_reference.h"
@@ -38,7 +39,6 @@
#include "runtime.h"
#include "safe_map.h"
#include "thread_pool.h"
-#include "utils/arena_allocator.h"
#include "utils/dedupe_set.h"
#include "utils/swap_space.h"
#include "utils.h"
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index 9ec4f28..401d5a9 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -90,19 +90,19 @@
// Length (will be filled in later in this routine).
if (is_x86_64) {
- PushWord(cfi_info, 0xffffffff); // Indicates 64bit
- PushWord(cfi_info, 0);
- PushWord(cfi_info, 0);
+ Push32(cfi_info, 0xffffffff); // Indicates 64bit
+ Push32(cfi_info, 0);
+ Push32(cfi_info, 0);
} else {
- PushWord(cfi_info, 0);
+ Push32(cfi_info, 0);
}
// CIE id: always 0.
if (is_x86_64) {
- PushWord(cfi_info, 0);
- PushWord(cfi_info, 0);
+ Push32(cfi_info, 0);
+ Push32(cfi_info, 0);
} else {
- PushWord(cfi_info, 0);
+ Push32(cfi_info, 0);
}
// Version: always 1.
@@ -318,7 +318,7 @@
PushByte(data_, 0); // extended opcode:
PushByte(data_, 1 + 4); // length: opcode_size + address_size
PushByte(data_, DW_LNE_set_address);
- PushWord(data_, addr);
+ Push32(data_, addr);
}
void SetLine(unsigned line) {
@@ -507,13 +507,13 @@
// Start the debug_info section with the header information
// 'unit_length' will be filled in later.
int cunit_length = dbg_info->size();
- PushWord(dbg_info, 0);
+ Push32(dbg_info, 0);
// 'version' - 3.
PushHalf(dbg_info, 3);
// Offset into .debug_abbrev section (always 0).
- PushWord(dbg_info, 0);
+ Push32(dbg_info, 0);
// Address size: 4.
PushByte(dbg_info, 4);
@@ -523,7 +523,7 @@
PushByte(dbg_info, 1);
// The producer is Android dex2oat.
- PushWord(dbg_info, producer_str_offset);
+ Push32(dbg_info, producer_str_offset);
// The language is Java.
PushByte(dbg_info, DW_LANG_Java);
@@ -532,8 +532,8 @@
uint32_t cunit_low_pc = 0 - 1;
uint32_t cunit_high_pc = 0;
int cunit_low_pc_pos = dbg_info->size();
- PushWord(dbg_info, 0);
- PushWord(dbg_info, 0);
+ Push32(dbg_info, 0);
+ Push32(dbg_info, 0);
if (dbg_line == nullptr) {
for (size_t i = 0; i < method_info.size(); ++i) {
@@ -546,9 +546,9 @@
PushByte(dbg_info, 2);
// Enter name, low_pc, high_pc.
- PushWord(dbg_info, PushStr(dbg_str, dbg.method_name_));
- PushWord(dbg_info, dbg.low_pc_ + text_section_offset);
- PushWord(dbg_info, dbg.high_pc_ + text_section_offset);
+ Push32(dbg_info, PushStr(dbg_str, dbg.method_name_));
+ Push32(dbg_info, dbg.low_pc_ + text_section_offset);
+ Push32(dbg_info, dbg.high_pc_ + text_section_offset);
}
} else {
// TODO: in gdb info functions <regexp> - reports Java functions, but
@@ -559,15 +559,15 @@
// method ranges.
// Line number table offset
- PushWord(dbg_info, dbg_line->size());
+ Push32(dbg_info, dbg_line->size());
size_t lnt_length = dbg_line->size();
- PushWord(dbg_line, 0);
+ Push32(dbg_line, 0);
PushHalf(dbg_line, 4); // LNT Version DWARF v4 => 4
size_t lnt_hdr_length = dbg_line->size();
- PushWord(dbg_line, 0); // TODO: 64-bit uses 8-byte here
+ Push32(dbg_line, 0); // TODO: 64-bit uses 8-byte here
PushByte(dbg_line, 1); // minimum_instruction_length (ubyte)
PushByte(dbg_line, 1); // maximum_operations_per_instruction (ubyte) = always 1
@@ -629,9 +629,9 @@
PushByte(dbg_info, 2);
// Enter name, low_pc, high_pc.
- PushWord(dbg_info, PushStr(dbg_str, dbg.method_name_));
- PushWord(dbg_info, dbg.low_pc_ + text_section_offset);
- PushWord(dbg_info, dbg.high_pc_ + text_section_offset);
+ Push32(dbg_info, PushStr(dbg_str, dbg.method_name_));
+ Push32(dbg_info, dbg.low_pc_ + text_section_offset);
+ Push32(dbg_info, dbg.high_pc_ + text_section_offset);
GetLineInfoForJava(dbg.dbgstream_, dbg.compiled_method_->GetSrcMappingTable(),
&pc2java_map, dbg.low_pc_);
diff --git a/compiler/gc_map_builder.h b/compiler/gc_map_builder.h
index bc8ad41..4c36ef7 100644
--- a/compiler/gc_map_builder.h
+++ b/compiler/gc_map_builder.h
@@ -26,15 +26,17 @@
class GcMapBuilder {
public:
- GcMapBuilder(std::vector<uint8_t>* table, size_t entries, uint32_t max_native_offset,
+ template <typename Alloc>
+ GcMapBuilder(std::vector<uint8_t, Alloc>* table, size_t entries, uint32_t max_native_offset,
size_t references_width)
: entries_(entries), references_width_(entries != 0u ? references_width : 0u),
native_offset_width_(entries != 0 && max_native_offset != 0
? sizeof(max_native_offset) - CLZ(max_native_offset) / 8u
: 0u),
- in_use_(entries), table_(table) {
+ in_use_(entries) {
// Resize table and set up header.
table->resize((EntryWidth() * entries) + sizeof(uint32_t));
+ table_ = table->data();
CHECK_LT(native_offset_width_, 1U << 3);
(*table)[0] = native_offset_width_ & 7;
CHECK_LT(references_width_, 1U << 13);
@@ -65,7 +67,7 @@
uint32_t native_offset = 0;
size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
for (size_t i = 0; i < native_offset_width_; i++) {
- native_offset |= (*table_)[table_offset + i] << (i * 8);
+ native_offset |= table_[table_offset + i] << (i * 8);
}
return native_offset;
}
@@ -73,13 +75,13 @@
void SetCodeOffset(size_t table_index, uint32_t native_offset) {
size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
for (size_t i = 0; i < native_offset_width_; i++) {
- (*table_)[table_offset + i] = (native_offset >> (i * 8)) & 0xFF;
+ table_[table_offset + i] = (native_offset >> (i * 8)) & 0xFF;
}
}
void SetReferences(size_t table_index, const uint8_t* references) {
size_t table_offset = (table_index * EntryWidth()) + sizeof(uint32_t);
- memcpy(&(*table_)[table_offset + native_offset_width_], references, references_width_);
+ memcpy(&table_[table_offset + native_offset_width_], references, references_width_);
}
size_t EntryWidth() const {
@@ -95,7 +97,7 @@
// Entries that are in use.
std::vector<bool> in_use_;
// The table we're building.
- std::vector<uint8_t>* const table_;
+ uint8_t* table_;
};
} // namespace art
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index bcee563..ae9974d 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -14,9 +14,9 @@
* limitations under the License.
*/
+#include "base/arena_containers.h"
#include "bounds_check_elimination.h"
#include "nodes.h"
-#include "utils/arena_containers.h"
namespace art {
diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc
index 662834a..17cb8f3 100644
--- a/compiler/optimizing/bounds_check_elimination_test.cc
+++ b/compiler/optimizing/bounds_check_elimination_test.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "base/arena_allocator.h"
#include "bounds_check_elimination.h"
#include "builder.h"
#include "gvn.h"
@@ -21,7 +22,6 @@
#include "nodes.h"
#include "optimizing_unit_test.h"
#include "side_effects_analysis.h"
-#include "utils/arena_allocator.h"
#include "gtest/gtest.h"
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index c510136..3e4a616 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -17,13 +17,13 @@
#ifndef ART_COMPILER_OPTIMIZING_BUILDER_H_
#define ART_COMPILER_OPTIMIZING_BUILDER_H_
+#include "base/arena_object.h"
#include "dex_file.h"
#include "dex_file-inl.h"
#include "driver/compiler_driver.h"
#include "driver/dex_compilation_unit.h"
#include "optimizing_compiler_stats.h"
#include "primitive.h"
-#include "utils/arena_object.h"
#include "utils/growable_array.h"
#include "nodes.h"
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 5b395c8..2a57fdc 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -620,7 +620,7 @@
Location location = locations->GetEnvironmentAt(i);
switch (location.GetKind()) {
case Location::kConstant: {
- DCHECK(current == location.GetConstant());
+ DCHECK_EQ(current, location.GetConstant());
if (current->IsLongConstant()) {
int64_t value = current->AsLongConstant()->GetValue();
stack_map_stream_.AddDexRegisterEntry(DexRegisterMap::kConstant, Low32Bits(value));
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 6da1e61..0d7864f 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1397,7 +1397,13 @@
case Primitive::kPrimFloat:
case Primitive::kPrimDouble: {
locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetInAt(1, Location::RequiresFpuRegister());
+ HInstruction* right = compare->InputAt(1);
+ if ((right->IsFloatConstant() && (right->AsFloatConstant()->GetValue() == 0.0f)) ||
+ (right->IsDoubleConstant() && (right->AsDoubleConstant()->GetValue() == 0.0))) {
+ locations->SetInAt(1, Location::ConstantLocation(right->AsConstant()));
+ } else {
+ locations->SetInAt(1, Location::RequiresFpuRegister());
+ }
locations->SetOut(Location::RequiresRegister());
break;
}
@@ -1427,9 +1433,17 @@
case Primitive::kPrimDouble: {
Register result = OutputRegister(compare);
FPRegister left = InputFPRegisterAt(compare, 0);
- FPRegister right = InputFPRegisterAt(compare, 1);
-
- __ Fcmp(left, right);
+ if (compare->GetLocations()->InAt(1).IsConstant()) {
+ if (kIsDebugBuild) {
+ HInstruction* right = compare->GetLocations()->InAt(1).GetConstant();
+ DCHECK((right->IsFloatConstant() && (right->AsFloatConstant()->GetValue() == 0.0f)) ||
+ (right->IsDoubleConstant() && (right->AsDoubleConstant()->GetValue() == 0.0)));
+ }
+ // 0.0 is the only immediate that can be encoded directly in a FCMP instruction.
+ __ Fcmp(left, 0.0);
+ } else {
+ __ Fcmp(left, InputFPRegisterAt(compare, 1));
+ }
if (compare->IsGtBias()) {
__ Cset(result, ne);
} else {
diff --git a/compiler/optimizing/dominator_test.cc b/compiler/optimizing/dominator_test.cc
index b246c6f..7623e42 100644
--- a/compiler/optimizing/dominator_test.cc
+++ b/compiler/optimizing/dominator_test.cc
@@ -14,11 +14,11 @@
* limitations under the License.
*/
+#include "base/arena_allocator.h"
#include "builder.h"
#include "dex_instruction.h"
#include "nodes.h"
#include "optimizing_unit_test.h"
-#include "utils/arena_allocator.h"
#include "gtest/gtest.h"
diff --git a/compiler/optimizing/find_loops_test.cc b/compiler/optimizing/find_loops_test.cc
index e05d9b3..2bfecc6 100644
--- a/compiler/optimizing/find_loops_test.cc
+++ b/compiler/optimizing/find_loops_test.cc
@@ -14,13 +14,13 @@
* limitations under the License.
*/
+#include "base/arena_allocator.h"
#include "builder.h"
#include "dex_file.h"
#include "dex_instruction.h"
#include "nodes.h"
#include "optimizing_unit_test.h"
#include "ssa_liveness_analysis.h"
-#include "utils/arena_allocator.h"
#include "pretty_printer.h"
#include "gtest/gtest.h"
diff --git a/compiler/optimizing/graph_test.cc b/compiler/optimizing/graph_test.cc
index c59f836..4742e4d 100644
--- a/compiler/optimizing/graph_test.cc
+++ b/compiler/optimizing/graph_test.cc
@@ -14,12 +14,12 @@
* limitations under the License.
*/
+#include "base/arena_allocator.h"
#include "base/stringprintf.h"
#include "builder.h"
#include "nodes.h"
#include "optimizing_unit_test.h"
#include "pretty_printer.h"
-#include "utils/arena_allocator.h"
#include "gtest/gtest.h"
diff --git a/compiler/optimizing/gvn_test.cc b/compiler/optimizing/gvn_test.cc
index 4a48fee..a81d49a 100644
--- a/compiler/optimizing/gvn_test.cc
+++ b/compiler/optimizing/gvn_test.cc
@@ -14,12 +14,12 @@
* limitations under the License.
*/
+#include "base/arena_allocator.h"
#include "builder.h"
#include "gvn.h"
#include "nodes.h"
#include "optimizing_unit_test.h"
#include "side_effects_analysis.h"
-#include "utils/arena_allocator.h"
#include "gtest/gtest.h"
diff --git a/compiler/optimizing/linearize_test.cc b/compiler/optimizing/linearize_test.cc
index eb27965..f22b7a7 100644
--- a/compiler/optimizing/linearize_test.cc
+++ b/compiler/optimizing/linearize_test.cc
@@ -16,6 +16,7 @@
#include <fstream>
+#include "base/arena_allocator.h"
#include "base/stringprintf.h"
#include "builder.h"
#include "code_generator.h"
@@ -29,7 +30,6 @@
#include "pretty_printer.h"
#include "ssa_builder.h"
#include "ssa_liveness_analysis.h"
-#include "utils/arena_allocator.h"
#include "gtest/gtest.h"
diff --git a/compiler/optimizing/live_interval_test.cc b/compiler/optimizing/live_interval_test.cc
index ac8759c..28000c1 100644
--- a/compiler/optimizing/live_interval_test.cc
+++ b/compiler/optimizing/live_interval_test.cc
@@ -14,9 +14,9 @@
* limitations under the License.
*/
+#include "base/arena_allocator.h"
#include "optimizing_unit_test.h"
#include "ssa_liveness_analysis.h"
-#include "utils/arena_allocator.h"
#include "gtest/gtest.h"
diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc
index 0558b85..17914e8 100644
--- a/compiler/optimizing/live_ranges_test.cc
+++ b/compiler/optimizing/live_ranges_test.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "base/arena_allocator.h"
#include "builder.h"
#include "code_generator.h"
#include "code_generator_x86.h"
@@ -24,7 +25,6 @@
#include "optimizing_unit_test.h"
#include "prepare_for_register_allocation.h"
#include "ssa_liveness_analysis.h"
-#include "utils/arena_allocator.h"
#include "gtest/gtest.h"
diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc
index c9be570..907eff1 100644
--- a/compiler/optimizing/liveness_test.cc
+++ b/compiler/optimizing/liveness_test.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "base/arena_allocator.h"
#include "builder.h"
#include "code_generator.h"
#include "code_generator_x86.h"
@@ -24,7 +25,6 @@
#include "optimizing_unit_test.h"
#include "prepare_for_register_allocation.h"
#include "ssa_liveness_analysis.h"
-#include "utils/arena_allocator.h"
#include "gtest/gtest.h"
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index ebca1cc..198cc15 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -17,10 +17,10 @@
#ifndef ART_COMPILER_OPTIMIZING_LOCATIONS_H_
#define ART_COMPILER_OPTIMIZING_LOCATIONS_H_
+#include "base/arena_object.h"
#include "base/bit_field.h"
#include "base/bit_vector.h"
#include "base/value_object.h"
-#include "utils/arena_object.h"
#include "utils/growable_array.h"
namespace art {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 4baea66..352403d 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_OPTIMIZING_NODES_H_
#define ART_COMPILER_OPTIMIZING_NODES_H_
+#include "base/arena_object.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "handle.h"
#include "handle_scope.h"
@@ -25,7 +26,6 @@
#include "mirror/class.h"
#include "offsets.h"
#include "primitive.h"
-#include "utils/arena_object.h"
#include "utils/arena_bit_vector.h"
#include "utils/growable_array.h"
diff --git a/compiler/optimizing/nodes_test.cc b/compiler/optimizing/nodes_test.cc
index 5dbdc74..4cf22d3 100644
--- a/compiler/optimizing/nodes_test.cc
+++ b/compiler/optimizing/nodes_test.cc
@@ -14,8 +14,8 @@
* limitations under the License.
*/
+#include "base/arena_allocator.h"
#include "nodes.h"
-#include "utils/arena_allocator.h"
#include "gtest/gtest.h"
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 385a553..2fef8c7 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -19,6 +19,7 @@
#include <fstream>
#include <stdint.h>
+#include "base/arena_allocator.h"
#include "base/dumpable.h"
#include "base/timing_logger.h"
#include "bounds_check_elimination.h"
@@ -47,7 +48,6 @@
#include "ssa_phi_elimination.h"
#include "ssa_liveness_analysis.h"
#include "reference_type_propagation.h"
-#include "utils/arena_allocator.h"
namespace art {
diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc
index bb7541d..44a3da2 100644
--- a/compiler/optimizing/parallel_move_test.cc
+++ b/compiler/optimizing/parallel_move_test.cc
@@ -14,9 +14,9 @@
* limitations under the License.
*/
+#include "base/arena_allocator.h"
#include "nodes.h"
#include "parallel_move_resolver.h"
-#include "utils/arena_allocator.h"
#include "gtest/gtest.h"
diff --git a/compiler/optimizing/pretty_printer_test.cc b/compiler/optimizing/pretty_printer_test.cc
index 9cf8235..293fde9 100644
--- a/compiler/optimizing/pretty_printer_test.cc
+++ b/compiler/optimizing/pretty_printer_test.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "base/arena_allocator.h"
#include "base/stringprintf.h"
#include "builder.h"
#include "dex_file.h"
@@ -21,7 +22,6 @@
#include "nodes.h"
#include "optimizing_unit_test.h"
#include "pretty_printer.h"
-#include "utils/arena_allocator.h"
#include "gtest/gtest.h"
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 0cc00c0..e5d06a9 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "base/arena_allocator.h"
#include "builder.h"
#include "code_generator.h"
#include "code_generator_x86.h"
@@ -25,7 +26,6 @@
#include "register_allocator.h"
#include "ssa_liveness_analysis.h"
#include "ssa_phi_elimination.h"
-#include "utils/arena_allocator.h"
#include "gtest/gtest.h"
diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc
index 7e90b37..7fc1ec6 100644
--- a/compiler/optimizing/ssa_test.cc
+++ b/compiler/optimizing/ssa_test.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "base/arena_allocator.h"
#include "base/stringprintf.h"
#include "builder.h"
#include "dex_file.h"
@@ -22,7 +23,6 @@
#include "optimizing_unit_test.h"
#include "pretty_printer.h"
#include "ssa_builder.h"
-#include "utils/arena_allocator.h"
#include "gtest/gtest.h"
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 3974e53..5283d5d 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -166,18 +166,23 @@
stack_map.SetStackMask(*entry.sp_mask);
}
- // Set the register map.
- MemoryRegion register_region = dex_register_maps_region.Subregion(
- next_dex_register_map_offset,
- DexRegisterMap::kFixedSize + entry.num_dex_registers * DexRegisterMap::SingleEntrySize());
- next_dex_register_map_offset += register_region.size();
- DexRegisterMap dex_register_map(register_region);
- stack_map.SetDexRegisterMapOffset(register_region.start() - memory_start);
+ if (entry.num_dex_registers != 0) {
+ // Set the register map.
+ MemoryRegion register_region = dex_register_maps_region.Subregion(
+ next_dex_register_map_offset,
+ DexRegisterMap::kFixedSize
+ + entry.num_dex_registers * DexRegisterMap::SingleEntrySize());
+ next_dex_register_map_offset += register_region.size();
+ DexRegisterMap dex_register_map(register_region);
+ stack_map.SetDexRegisterMapOffset(register_region.start() - memory_start);
- for (size_t j = 0; j < entry.num_dex_registers; ++j) {
- DexRegisterEntry register_entry =
- dex_register_maps_.Get(j + entry.dex_register_maps_start_index);
- dex_register_map.SetRegisterInfo(j, register_entry.kind, register_entry.value);
+ for (size_t j = 0; j < entry.num_dex_registers; ++j) {
+ DexRegisterEntry register_entry =
+ dex_register_maps_.Get(j + entry.dex_register_maps_start_index);
+ dex_register_map.SetRegisterInfo(j, register_entry.kind, register_entry.value);
+ }
+ } else {
+ stack_map.SetDexRegisterMapOffset(StackMap::kNoDexRegisterMap);
}
// Set the inlining info.
@@ -196,7 +201,7 @@
inline_info.SetMethodReferenceIndexAtDepth(j, inline_entry.method_index);
}
} else {
- stack_map.SetInlineDescriptorOffset(InlineInfo::kNoInlineInfo);
+ stack_map.SetInlineDescriptorOffset(StackMap::kNoInlineInfo);
}
}
}
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 5ee6ae0..744fb45 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -61,6 +61,7 @@
MemoryRegion stack_mask = stack_map.GetStackMask();
ASSERT_TRUE(SameBits(stack_mask, sp_mask));
+ ASSERT_TRUE(stack_map.HasDexRegisterMap());
DexRegisterMap dex_registers = code_info.GetDexRegisterMapOf(stack_map, 2);
ASSERT_EQ(DexRegisterMap::kInStack, dex_registers.GetLocationKind(0));
ASSERT_EQ(DexRegisterMap::kConstant, dex_registers.GetLocationKind(1));
@@ -107,6 +108,7 @@
MemoryRegion stack_mask = stack_map.GetStackMask();
ASSERT_TRUE(SameBits(stack_mask, sp_mask1));
+ ASSERT_TRUE(stack_map.HasDexRegisterMap());
DexRegisterMap dex_registers = code_info.GetDexRegisterMapOf(stack_map, 2);
ASSERT_EQ(DexRegisterMap::kInStack, dex_registers.GetLocationKind(0));
ASSERT_EQ(DexRegisterMap::kConstant, dex_registers.GetLocationKind(1));
diff --git a/compiler/utils/arena_allocator_test.cc b/compiler/utils/arena_allocator_test.cc
index 7156540..7065527 100644
--- a/compiler/utils/arena_allocator_test.cc
+++ b/compiler/utils/arena_allocator_test.cc
@@ -14,8 +14,8 @@
* limitations under the License.
*/
+#include "base/arena_allocator.h"
#include "gtest/gtest.h"
-#include "utils/arena_allocator.h"
#include "utils/arena_bit_vector.h"
namespace art {
diff --git a/compiler/utils/arena_bit_vector.cc b/compiler/utils/arena_bit_vector.cc
index f17e5a9..ddc0c81 100644
--- a/compiler/utils/arena_bit_vector.cc
+++ b/compiler/utils/arena_bit_vector.cc
@@ -14,9 +14,10 @@
* limitations under the License.
*/
-#include "arena_allocator.h"
#include "arena_bit_vector.h"
+#include "base/arena_allocator.h"
+
namespace art {
template <typename ArenaAlloc>
diff --git a/compiler/utils/arena_bit_vector.h b/compiler/utils/arena_bit_vector.h
index e5e1b70..f2a7452 100644
--- a/compiler/utils/arena_bit_vector.h
+++ b/compiler/utils/arena_bit_vector.h
@@ -17,7 +17,7 @@
#ifndef ART_COMPILER_UTILS_ARENA_BIT_VECTOR_H_
#define ART_COMPILER_UTILS_ARENA_BIT_VECTOR_H_
-#include "arena_object.h"
+#include "base/arena_object.h"
#include "base/bit_vector.h"
namespace art {
diff --git a/compiler/utils/dwarf_cfi.cc b/compiler/utils/dwarf_cfi.cc
index 83e5f5a..a7e09c6 100644
--- a/compiler/utils/dwarf_cfi.cc
+++ b/compiler/utils/dwarf_cfi.cc
@@ -37,7 +37,7 @@
} else {
// Four byte delta.
buf->push_back(0x04);
- PushWord(buf, increment);
+ Push32(buf, increment);
}
}
@@ -68,35 +68,35 @@
void WriteFDEHeader(std::vector<uint8_t>* buf, bool is_64bit) {
// 'length' (filled in by other functions).
if (is_64bit) {
- PushWord(buf, 0xffffffff); // Indicates 64bit
- PushWord(buf, 0);
- PushWord(buf, 0);
+ Push32(buf, 0xffffffff); // Indicates 64bit
+ Push32(buf, 0);
+ Push32(buf, 0);
} else {
- PushWord(buf, 0);
+ Push32(buf, 0);
}
// 'CIE_pointer' (filled in by linker).
if (is_64bit) {
- PushWord(buf, 0);
- PushWord(buf, 0);
+ Push32(buf, 0);
+ Push32(buf, 0);
} else {
- PushWord(buf, 0);
+ Push32(buf, 0);
}
// 'initial_location' (filled in by linker).
if (is_64bit) {
- PushWord(buf, 0);
- PushWord(buf, 0);
+ Push32(buf, 0);
+ Push32(buf, 0);
} else {
- PushWord(buf, 0);
+ Push32(buf, 0);
}
// 'address_range' (filled in by other functions).
if (is_64bit) {
- PushWord(buf, 0);
- PushWord(buf, 0);
+ Push32(buf, 0);
+ Push32(buf, 0);
} else {
- PushWord(buf, 0);
+ Push32(buf, 0);
}
// Augmentation length: 0
diff --git a/compiler/utils/growable_array.h b/compiler/utils/growable_array.h
index fd43ea6..821e28b 100644
--- a/compiler/utils/growable_array.h
+++ b/compiler/utils/growable_array.h
@@ -20,7 +20,7 @@
#include <stdint.h>
#include <stddef.h>
-#include "arena_object.h"
+#include "base/arena_object.h"
namespace art {
diff --git a/compiler/utils/swap_space.h b/compiler/utils/swap_space.h
index 2d0d77a..1f8f5da 100644
--- a/compiler/utils/swap_space.h
+++ b/compiler/utils/swap_space.h
@@ -23,12 +23,12 @@
#include <stdint.h>
#include <stddef.h>
+#include "base/debug_stack.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "mem_map.h"
#include "utils.h"
-#include "utils/debug_stack.h"
namespace art {
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 931cca7..11ccafb 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -764,7 +764,7 @@
oat_method.GetVmapTableOffsetOffset());
success = false;
} else if (options_->dump_vmap_) {
- DumpVmap(*indent2_os, oat_method);
+ DumpVmapData(*indent2_os, oat_method, code_item);
}
}
{
@@ -869,39 +869,89 @@
os << ")";
}
- void DumpVmap(std::ostream& os, const OatFile::OatMethod& oat_method) {
- // If the native GC map is null, then this method has been compiled with the
- // optimizing compiler. The optimizing compiler currently outputs its stack map
- // in the vmap table, and the code below does not work with such a stack map.
+ // Display data stored at the the vmap offset of an oat method.
+ void DumpVmapData(std::ostream& os,
+ const OatFile::OatMethod& oat_method,
+ const DexFile::CodeItem* code_item) {
if (oat_method.GetGcMap() == nullptr) {
- return;
+ // If the native GC map is null, then this method has been
+ // compiled with the optimizing compiler. The optimizing
+ // compiler currently outputs its stack maps in the vmap table.
+ const void* raw_code_info = oat_method.GetVmapTable();
+ if (raw_code_info != nullptr) {
+ CodeInfo code_info(raw_code_info);
+ DCHECK(code_item != nullptr);
+ DumpCodeInfo(os, code_info, *code_item);
+ }
+ } else {
+ // Otherwise, display the vmap table.
+ const uint8_t* raw_table = oat_method.GetVmapTable();
+ if (raw_table != nullptr) {
+ VmapTable vmap_table(raw_table);
+ DumpVmapTable(os, oat_method, vmap_table);
+ }
}
- const uint8_t* raw_table = oat_method.GetVmapTable();
- if (raw_table != nullptr) {
- const VmapTable vmap_table(raw_table);
- bool first = true;
- bool processing_fp = false;
- uint32_t spill_mask = oat_method.GetCoreSpillMask();
- for (size_t i = 0; i < vmap_table.Size(); i++) {
- uint16_t dex_reg = vmap_table[i];
- uint32_t cpu_reg = vmap_table.ComputeRegister(spill_mask, i,
- processing_fp ? kFloatVReg : kIntVReg);
- os << (first ? "v" : ", v") << dex_reg;
- if (!processing_fp) {
- os << "/r" << cpu_reg;
- } else {
- os << "/fr" << cpu_reg;
- }
- first = false;
- if (!processing_fp && dex_reg == 0xFFFF) {
- processing_fp = true;
- spill_mask = oat_method.GetFpSpillMask();
+ }
+
+ // Display a CodeInfo object emitted by the optimizing compiler.
+ void DumpCodeInfo(std::ostream& os,
+ const CodeInfo& code_info,
+ const DexFile::CodeItem& code_item) {
+ uint16_t number_of_dex_registers = code_item.registers_size_;
+ uint32_t code_info_size = code_info.GetOverallSize();
+ size_t number_of_stack_maps = code_info.GetNumberOfStackMaps();
+ os << " Optimized CodeInfo (size=" << code_info_size
+ << ", number_of_dex_registers=" << number_of_dex_registers
+ << ", number_of_stack_maps=" << number_of_stack_maps << ")\n";
+ for (size_t i = 0; i < number_of_stack_maps; ++i) {
+ StackMap stack_map = code_info.GetStackMapAt(i);
+ // TODO: Display stack_mask value.
+ os << " StackMap " << i
+ << std::hex
+ << " (dex_pc=0x" << stack_map.GetDexPc()
+ << ", native_pc_offset=0x" << stack_map.GetNativePcOffset()
+ << ", register_mask=0x" << stack_map.GetRegisterMask()
+ << std::dec
+ << ")\n";
+ if (stack_map.HasDexRegisterMap()) {
+ DexRegisterMap dex_register_map =
+ code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+ for (size_t j = 0; j < number_of_dex_registers; ++j) {
+ os << " v" << j << ": "
+ << DexRegisterMap::PrettyDescriptor(dex_register_map.GetLocationKind(j))
+ << " (" << dex_register_map.GetValue(j) << ")\n";
}
}
- os << "\n";
+ // TODO: Display more information from code_info.
}
}
+ // Display a vmap table.
+ void DumpVmapTable(std::ostream& os,
+ const OatFile::OatMethod& oat_method,
+ const VmapTable& vmap_table) {
+ bool first = true;
+ bool processing_fp = false;
+ uint32_t spill_mask = oat_method.GetCoreSpillMask();
+ for (size_t i = 0; i < vmap_table.Size(); i++) {
+ uint16_t dex_reg = vmap_table[i];
+ uint32_t cpu_reg = vmap_table.ComputeRegister(spill_mask, i,
+ processing_fp ? kFloatVReg : kIntVReg);
+ os << (first ? "v" : ", v") << dex_reg;
+ if (!processing_fp) {
+ os << "/r" << cpu_reg;
+ } else {
+ os << "/fr" << cpu_reg;
+ }
+ first = false;
+ if (!processing_fp && dex_reg == 0xFFFF) {
+ processing_fp = true;
+ spill_mask = oat_method.GetFpSpillMask();
+ }
+ }
+ os << "\n";
+ }
+
void DumpVregLocations(std::ostream& os, const OatFile::OatMethod& oat_method,
const DexFile::CodeItem* code_item) {
if (code_item != nullptr) {
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 4714610..c647cc2 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -22,10 +22,12 @@
atomic.cc.arm \
barrier.cc \
base/allocator.cc \
+ base/arena_allocator.cc \
base/bit_vector.cc \
base/hex_dump.cc \
base/logging.cc \
base/mutex.cc \
+ base/scoped_arena_allocator.cc \
base/scoped_flock.cc \
base/stringpiece.cc \
base/stringprintf.cc \
diff --git a/compiler/utils/arena_allocator.cc b/runtime/base/arena_allocator.cc
similarity index 98%
rename from compiler/utils/arena_allocator.cc
rename to runtime/base/arena_allocator.cc
index a80ad93..b3f812e 100644
--- a/compiler/utils/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -19,8 +19,8 @@
#include <numeric>
#include "arena_allocator.h"
-#include "base/logging.h"
-#include "base/mutex.h"
+#include "logging.h"
+#include "mutex.h"
#include "thread-inl.h"
#include <memcheck/memcheck.h>
diff --git a/compiler/utils/arena_allocator.h b/runtime/base/arena_allocator.h
similarity index 95%
rename from compiler/utils/arena_allocator.h
rename to runtime/base/arena_allocator.h
index e730fd7..9237391 100644
--- a/compiler/utils/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -14,17 +14,17 @@
* limitations under the License.
*/
-#ifndef ART_COMPILER_UTILS_ARENA_ALLOCATOR_H_
-#define ART_COMPILER_UTILS_ARENA_ALLOCATOR_H_
+#ifndef ART_RUNTIME_BASE_ARENA_ALLOCATOR_H_
+#define ART_RUNTIME_BASE_ARENA_ALLOCATOR_H_
#include <stdint.h>
#include <stddef.h>
-#include "base/macros.h"
-#include "base/mutex.h"
+#include "debug_stack.h"
+#include "macros.h"
#include "mem_map.h"
+#include "mutex.h"
#include "utils.h"
-#include "utils/debug_stack.h"
namespace art {
@@ -180,7 +180,7 @@
if (UNLIKELY(running_on_valgrind_)) {
return AllocValgrind(bytes, kind);
}
- bytes = RoundUp(bytes, 8);
+ bytes = RoundUp(bytes, kAlignment);
if (UNLIKELY(ptr_ + bytes > end_)) {
// Obtain a new block.
ObtainNewArenaForAllocation(bytes);
@@ -205,6 +205,8 @@
MemStats GetMemStats() const;
private:
+ static constexpr size_t kAlignment = 8;
+
void UpdateBytesAllocated();
ArenaPool* pool_;
@@ -235,4 +237,4 @@
} // namespace art
-#endif // ART_COMPILER_UTILS_ARENA_ALLOCATOR_H_
+#endif // ART_RUNTIME_BASE_ARENA_ALLOCATOR_H_
diff --git a/compiler/utils/arena_containers.h b/runtime/base/arena_containers.h
similarity index 96%
rename from compiler/utils/arena_containers.h
rename to runtime/base/arena_containers.h
index a7a7438..162eb16 100644
--- a/compiler/utils/arena_containers.h
+++ b/runtime/base/arena_containers.h
@@ -14,15 +14,15 @@
* limitations under the License.
*/
-#ifndef ART_COMPILER_UTILS_ARENA_CONTAINERS_H_
-#define ART_COMPILER_UTILS_ARENA_CONTAINERS_H_
+#ifndef ART_RUNTIME_BASE_ARENA_CONTAINERS_H_
+#define ART_RUNTIME_BASE_ARENA_CONTAINERS_H_
#include <deque>
#include <queue>
#include <set>
#include <vector>
-#include "utils/arena_allocator.h"
+#include "arena_allocator.h"
#include "safe_map.h"
namespace art {
@@ -203,4 +203,4 @@
} // namespace art
-#endif // ART_COMPILER_UTILS_ARENA_CONTAINERS_H_
+#endif // ART_RUNTIME_BASE_ARENA_CONTAINERS_H_
diff --git a/compiler/utils/arena_object.h b/runtime/base/arena_object.h
similarity index 91%
rename from compiler/utils/arena_object.h
rename to runtime/base/arena_object.h
index d64c419..ab97d0c 100644
--- a/compiler/utils/arena_object.h
+++ b/runtime/base/arena_object.h
@@ -14,10 +14,10 @@
* limitations under the License.
*/
-#ifndef ART_COMPILER_UTILS_ARENA_OBJECT_H_
-#define ART_COMPILER_UTILS_ARENA_OBJECT_H_
+#ifndef ART_RUNTIME_BASE_ARENA_OBJECT_H_
+#define ART_RUNTIME_BASE_ARENA_OBJECT_H_
-#include "arena_allocator.h"
+#include "base/arena_allocator.h"
#include "base/logging.h"
#include "scoped_arena_allocator.h"
@@ -64,4 +64,4 @@
} // namespace art
-#endif // ART_COMPILER_UTILS_ARENA_OBJECT_H_
+#endif // ART_RUNTIME_BASE_ARENA_OBJECT_H_
diff --git a/compiler/utils/debug_stack.h b/runtime/base/debug_stack.h
similarity index 96%
rename from compiler/utils/debug_stack.h
rename to runtime/base/debug_stack.h
index 1bb0624..03f4575 100644
--- a/compiler/utils/debug_stack.h
+++ b/runtime/base/debug_stack.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_COMPILER_UTILS_DEBUG_STACK_H_
-#define ART_COMPILER_UTILS_DEBUG_STACK_H_
+#ifndef ART_RUNTIME_BASE_DEBUG_STACK_H_
+#define ART_RUNTIME_BASE_DEBUG_STACK_H_
#include "base/logging.h"
#include "base/macros.h"
@@ -135,4 +135,4 @@
} // namespace art
-#endif // ART_COMPILER_UTILS_DEBUG_STACK_H_
+#endif // ART_RUNTIME_BASE_DEBUG_STACK_H_
diff --git a/compiler/utils/scoped_arena_allocator.cc b/runtime/base/scoped_arena_allocator.cc
similarity index 98%
rename from compiler/utils/scoped_arena_allocator.cc
rename to runtime/base/scoped_arena_allocator.cc
index d9e0619..4a7be38 100644
--- a/compiler/utils/scoped_arena_allocator.cc
+++ b/runtime/base/scoped_arena_allocator.cc
@@ -16,7 +16,7 @@
#include "scoped_arena_allocator.h"
-#include "utils/arena_allocator.h"
+#include "arena_allocator.h"
#include <memcheck/memcheck.h>
namespace art {
diff --git a/compiler/utils/scoped_arena_allocator.h b/runtime/base/scoped_arena_allocator.h
similarity index 93%
rename from compiler/utils/scoped_arena_allocator.h
rename to runtime/base/scoped_arena_allocator.h
index c46acbc..bbedeac 100644
--- a/compiler/utils/scoped_arena_allocator.h
+++ b/runtime/base/scoped_arena_allocator.h
@@ -14,14 +14,14 @@
* limitations under the License.
*/
-#ifndef ART_COMPILER_UTILS_SCOPED_ARENA_ALLOCATOR_H_
-#define ART_COMPILER_UTILS_SCOPED_ARENA_ALLOCATOR_H_
+#ifndef ART_RUNTIME_BASE_SCOPED_ARENA_ALLOCATOR_H_
+#define ART_RUNTIME_BASE_SCOPED_ARENA_ALLOCATOR_H_
-#include "base/logging.h"
-#include "base/macros.h"
-#include "utils/arena_allocator.h"
-#include "utils/debug_stack.h"
+#include "arena_allocator.h"
+#include "debug_stack.h"
#include "globals.h"
+#include "logging.h"
+#include "macros.h"
namespace art {
@@ -147,4 +147,4 @@
} // namespace art
-#endif // ART_COMPILER_UTILS_SCOPED_ARENA_ALLOCATOR_H_
+#endif // ART_RUNTIME_BASE_SCOPED_ARENA_ALLOCATOR_H_
diff --git a/compiler/utils/scoped_arena_containers.h b/runtime/base/scoped_arena_containers.h
similarity index 95%
rename from compiler/utils/scoped_arena_containers.h
rename to runtime/base/scoped_arena_containers.h
index df93b27..664a909 100644
--- a/compiler/utils/scoped_arena_containers.h
+++ b/runtime/base/scoped_arena_containers.h
@@ -14,16 +14,16 @@
* limitations under the License.
*/
-#ifndef ART_COMPILER_UTILS_SCOPED_ARENA_CONTAINERS_H_
-#define ART_COMPILER_UTILS_SCOPED_ARENA_CONTAINERS_H_
+#ifndef ART_RUNTIME_BASE_SCOPED_ARENA_CONTAINERS_H_
+#define ART_RUNTIME_BASE_SCOPED_ARENA_CONTAINERS_H_
#include <deque>
#include <queue>
#include <set>
#include <vector>
-#include "utils/arena_containers.h" // For ArenaAllocatorAdapterKind.
-#include "utils/scoped_arena_allocator.h"
+#include "arena_containers.h" // For ArenaAllocatorAdapterKind.
+#include "scoped_arena_allocator.h"
#include "safe_map.h"
namespace art {
@@ -190,4 +190,4 @@
} // namespace art
-#endif // ART_COMPILER_UTILS_SCOPED_ARENA_CONTAINERS_H_
+#endif // ART_RUNTIME_BASE_SCOPED_ARENA_CONTAINERS_H_
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 6ec0949..7db1d72 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -393,7 +393,7 @@
bool CheckNonHeap(JavaVMExt* vm, bool entry, const char* fmt, JniValueType* args) {
bool should_trace = (flags_ & kFlag_ForceTrace) != 0;
- if (!should_trace && vm->IsTracingEnabled()) {
+ if (!should_trace && vm != nullptr && vm->IsTracingEnabled()) {
// We need to guard some of the invocation interface's calls: a bad caller might
// use DetachCurrentThread or GetEnv on a thread that's not yet attached.
Thread* self = Thread::Current();
@@ -3630,7 +3630,9 @@
sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), true, "v", args);
JniValueType result;
result.i = BaseVm(vm)->DestroyJavaVM(vm);
- sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), false, "i", &result);
+ // Use null to signal that the JavaVM isn't valid anymore. DestroyJavaVM deletes the runtime,
+ // which will delete the JavaVMExt.
+ sc.CheckNonHeap(nullptr, false, "i", &result);
return result.i;
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index f554c61..ee66b49 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -157,8 +157,8 @@
}
bool operator() (const FieldGap& lhs, const FieldGap& rhs)
NO_THREAD_SAFETY_ANALYSIS {
- // Sort by gap size, largest first.
- return lhs.size > rhs.size;
+ // Sort by gap size, largest first. Secondary sort by starting offset.
+ return lhs.size > rhs.size || (lhs.size == rhs.size && lhs.start_offset < rhs.start_offset);
}
};
typedef std::priority_queue<FieldGap, std::vector<FieldGap>, FieldGapsComparator> FieldGaps;
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index db51264..5ea9f70 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -198,7 +198,7 @@
}
if (!error_msg.empty()) {
- LOG(ERROR) << error_msg;
+ LOG(WARNING) << error_msg;
CHECK(self->IsExceptionPending());
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 419d555..452980c 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -3154,8 +3154,12 @@
}
void Heap::RequestConcurrentGC(Thread* self) {
- if (CanAddHeapTask(self) &&
- concurrent_gc_pending_.CompareExchangeStrongSequentiallyConsistent(false, true)) {
+ // If we don't have a started runtime, then we don't have a thread which is running the heap
+ // tasks. In this case, do the GC in the allocating thread to ensure that memory gets freed.
+ if (!Runtime::Current()->IsFinishedStarting()) {
+ CollectGarbageInternal(collector::kGcTypeFull, kGcCauseForAlloc, false);
+ } else if (CanAddHeapTask(self) &&
+ concurrent_gc_pending_.CompareExchangeStrongSequentiallyConsistent(false, true)) {
task_processor_->AddTask(self, new ConcurrentGCTask(NanoTime())); // Start straight away.
}
}
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 071997f..261d3c2 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -646,6 +646,20 @@
return nullptr;
}
+ // Check that the file is large enough.
+ uint64_t image_file_size = static_cast<uint64_t>(file->GetLength());
+ if (image_header.GetImageSize() > image_file_size) {
+ *error_msg = StringPrintf("Image file too small for image heap: %" PRIu64 " vs. %zu.",
+ image_file_size, image_header.GetImageSize());
+ return nullptr;
+ }
+ if (image_header.GetBitmapOffset() + image_header.GetImageBitmapSize() != image_file_size) {
+ *error_msg = StringPrintf("Image file too small for image bitmap: %" PRIu64 " vs. %zu.",
+ image_file_size,
+ image_header.GetBitmapOffset() + image_header.GetImageBitmapSize());
+ return nullptr;
+ }
+
// Note: The image header is part of the image due to mmap page alignment required of offset.
std::unique_ptr<MemMap> map(MemMap::MapFileAtAddress(image_header.GetImageBegin(),
image_header.GetImageSize(),
diff --git a/runtime/image.cc b/runtime/image.cc
index 269a07d..3cb2580 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -24,7 +24,7 @@
namespace art {
const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' };
-const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '3', '\0' };
+const uint8_t ImageHeader::kImageVersion[] = { '0', '1', '4', '\0' };
ImageHeader::ImageHeader(uint32_t image_begin,
uint32_t image_size,
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 7cc3e57..fd22361 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -61,8 +61,6 @@
static constexpr int kDepthOffset = 0;
static constexpr int kFixedSize = kDepthOffset + sizeof(uint8_t);
- static constexpr uint32_t kNoInlineInfo = -1;
-
MemoryRegion region_;
friend class CodeInfo;
@@ -76,10 +74,11 @@
* [location_kind, register_value]+.
*
* The location_kind for a Dex register can either be:
- * - Constant: register_value holds the constant,
- * - Stack: register_value holds the stack offset,
- * - Register: register_value holds the physical register number.
- * - None: the register has no location yet, meaning it has not been set.
+ * - kConstant: register_value holds the constant,
+ * - kStack: register_value holds the stack offset,
+ * - kRegister: register_value holds the physical register number.
+ * - kFpuRegister: register_value holds the physical register number.
+ * - kNone: the register has no location yet, meaning it has not been set.
*/
class DexRegisterMap {
public:
@@ -93,6 +92,24 @@
kConstant
};
+ static const char* PrettyDescriptor(LocationKind kind) {
+ switch (kind) {
+ case kNone:
+ return "none";
+ case kInStack:
+ return "in stack";
+ case kInRegister:
+ return "in register";
+ case kInFpuRegister:
+ return "in fpu register";
+ case kConstant:
+ return "as constant";
+ default:
+ LOG(FATAL) << "Invalid location kind " << static_cast<int>(kind);
+ return nullptr;
+ }
+ }
+
LocationKind GetLocationKind(uint16_t register_index) const {
return region_.Load<LocationKind>(
kFixedSize + register_index * SingleEntrySize());
@@ -191,11 +208,15 @@
}
}
- bool HasInlineInfo() const {
- return GetInlineDescriptorOffset() != InlineInfo::kNoInlineInfo;
+ bool HasDexRegisterMap() const {
+ return GetDexRegisterMapOffset() != kNoDexRegisterMap;
}
- bool Equals(const StackMap& other) {
+ bool HasInlineInfo() const {
+ return GetInlineDescriptorOffset() != kNoInlineInfo;
+ }
+
+ bool Equals(const StackMap& other) const {
return region_.pointer() == other.region_.pointer()
&& region_.size() == other.region_.size();
}
@@ -205,6 +226,14 @@
return RoundUp(StackMap::kFixedSize + stack_mask_size, 4);
}
+ // Special (invalid) offset for the DexRegisterMapOffset field meaning
+ // that there is no Dex register map for this stack map.
+ static constexpr uint32_t kNoDexRegisterMap = -1;
+
+ // Special (invalid) offset for the InlineDescriptorOffset field meaning
+ // that there is no inline info for this stack map.
+ static constexpr uint32_t kNoInlineInfo = -1;
+
private:
static constexpr int kDexPcOffset = 0;
static constexpr int kNativePcOffsetOffset = kDexPcOffset + sizeof(uint32_t);
@@ -271,20 +300,22 @@
return StackMap::ComputeAlignedStackMapSize(GetStackMaskSize());
}
- DexRegisterMap GetDexRegisterMapOf(StackMap stack_map, uint32_t number_of_dex_registers) {
+ DexRegisterMap GetDexRegisterMapOf(StackMap stack_map, uint32_t number_of_dex_registers) const {
+ DCHECK(stack_map.HasDexRegisterMap());
uint32_t offset = stack_map.GetDexRegisterMapOffset();
return DexRegisterMap(region_.Subregion(offset,
DexRegisterMap::kFixedSize + number_of_dex_registers * DexRegisterMap::SingleEntrySize()));
}
- InlineInfo GetInlineInfoOf(StackMap stack_map) {
+ InlineInfo GetInlineInfoOf(StackMap stack_map) const {
+ DCHECK(stack_map.HasInlineInfo());
uint32_t offset = stack_map.GetInlineDescriptorOffset();
uint8_t depth = region_.Load<uint8_t>(offset);
return InlineInfo(region_.Subregion(offset,
InlineInfo::kFixedSize + depth * InlineInfo::SingleEntrySize()));
}
- StackMap GetStackMapForDexPc(uint32_t dex_pc) {
+ StackMap GetStackMapForDexPc(uint32_t dex_pc) const {
for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
StackMap stack_map = GetStackMapAt(i);
if (stack_map.GetDexPc() == dex_pc) {
@@ -295,7 +326,7 @@
UNREACHABLE();
}
- StackMap GetStackMapForNativePcOffset(uint32_t native_pc_offset) {
+ StackMap GetStackMapForNativePcOffset(uint32_t native_pc_offset) const {
// TODO: stack maps are sorted by native pc, we can do a binary search.
for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
StackMap stack_map = GetStackMapAt(i);
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 3ec9561..85c9340 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1554,13 +1554,6 @@
Leb128Encoder(dst).PushBackSigned(data);
}
-void PushWord(std::vector<uint8_t>* buf, int data) {
- buf->push_back(data & 0xff);
- buf->push_back((data >> 8) & 0xff);
- buf->push_back((data >> 16) & 0xff);
- buf->push_back((data >> 24) & 0xff);
-}
-
std::string PrettyDescriptor(Primitive::Type type) {
return PrettyDescriptor(Primitive::Descriptor(type));
}
diff --git a/runtime/utils.h b/runtime/utils.h
index 0fbc9df..3191e7d 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -546,7 +546,13 @@
}
};
-void PushWord(std::vector<uint8_t>* buf, int32_t data);
+template <typename Alloc>
+void Push32(std::vector<uint8_t, Alloc>* buf, int32_t data) {
+ buf->push_back(data & 0xff);
+ buf->push_back((data >> 8) & 0xff);
+ buf->push_back((data >> 16) & 0xff);
+ buf->push_back((data >> 24) & 0xff);
+}
void EncodeUnsignedLeb128(uint32_t data, std::vector<uint8_t>* buf);
void EncodeSignedLeb128(int32_t data, std::vector<uint8_t>* buf);
diff --git a/test/109-suspend-check/src/Main.java b/test/109-suspend-check/src/Main.java
index cd5130d..8046d75 100644
--- a/test/109-suspend-check/src/Main.java
+++ b/test/109-suspend-check/src/Main.java
@@ -19,30 +19,33 @@
public static void main(String[] args) {
System.out.println("Running (" + TEST_TIME + " seconds) ...");
- InfiniteForLoop forLoop = new InfiniteForLoop();
- InfiniteWhileLoop whileLoop = new InfiniteWhileLoop();
- InfiniteWhileLoopWithIntrinsic whileLoopWithIntrinsic =
- new InfiniteWhileLoopWithIntrinsic();
InfiniteDoWhileLoopWithLong doWhileLoopWithLong = new InfiniteDoWhileLoopWithLong();
- InfiniteDoWhileLoop doWhileLoop = new InfiniteDoWhileLoop();
- MakeGarbage garbage = new MakeGarbage();
- forLoop.start();
- whileLoop.start();
- whileLoopWithIntrinsic.start();
+ SimpleLoopThread[] simpleLoops = {
+ new InfiniteForLoop(),
+ new InfiniteWhileLoop(),
+ new InfiniteWhileLoopWithIntrinsic(),
+ new InfiniteDoWhileLoop(),
+ new MakeGarbage(),
+ new InfiniteWhileLoopWithSpecialReturnArgOrConst(new SpecialMethods1()),
+ new InfiniteWhileLoopWithSpecialReturnArgOrConst(new SpecialMethods2()),
+ new InfiniteWhileLoopWithSpecialPutOrNop(new SpecialMethods1()),
+ new InfiniteWhileLoopWithSpecialPutOrNop(new SpecialMethods2()),
+ new InfiniteWhileLoopWithSpecialConstOrIGet(new SpecialMethods1()),
+ new InfiniteWhileLoopWithSpecialConstOrIGet(new SpecialMethods2()),
+ };
doWhileLoopWithLong.start();
- doWhileLoop.start();
- garbage.start();
+ for (SimpleLoopThread loop : simpleLoops) {
+ loop.start();
+ }
for (int i = 0; i < TEST_TIME; i++) {
Runtime.getRuntime().gc();
System.out.println(".");
sleep(1000);
}
- forLoop.stopNow();
- whileLoop.stopNow();
- whileLoopWithIntrinsic.stopNow();
doWhileLoopWithLong.stopNow();
- doWhileLoop.stopNow();
- garbage.stopNow();
+ for (SimpleLoopThread loop : simpleLoops) {
+ loop.stopNow();
+ }
System.out.println("Done.");
}
@@ -55,8 +58,84 @@
}
}
-class InfiniteWhileLoopWithIntrinsic extends Thread {
- volatile private boolean keepGoing = true;
+class SimpleLoopThread extends Thread {
+ volatile protected boolean keepGoing = true;
+ public void stopNow() {
+ keepGoing = false;
+ }
+}
+
+interface SpecialMethodInterface {
+ long ReturnArgOrConst(long arg);
+ void PutOrNop(long arg);
+ long ConstOrIGet();
+}
+
+class SpecialMethods1 implements SpecialMethodInterface {
+ public long ReturnArgOrConst(long arg) {
+ return 42L;
+ }
+ public void PutOrNop(long arg) {
+ }
+ public long ConstOrIGet() {
+ return 42L;
+ }
+}
+
+class SpecialMethods2 implements SpecialMethodInterface {
+ public long value = 42L;
+ public long ReturnArgOrConst(long arg) {
+ return arg;
+ }
+ public void PutOrNop(long arg) {
+ value = arg;
+ }
+ public long ConstOrIGet() {
+ return value;
+ }
+}
+
+class InfiniteWhileLoopWithSpecialReturnArgOrConst extends SimpleLoopThread {
+ private SpecialMethodInterface smi;
+ public InfiniteWhileLoopWithSpecialReturnArgOrConst(SpecialMethodInterface smi) {
+ this.smi = smi;
+ }
+ public void run() {
+ long i = 0L;
+ while (keepGoing) {
+ i += smi.ReturnArgOrConst(i);
+ }
+ }
+}
+
+class InfiniteWhileLoopWithSpecialPutOrNop extends SimpleLoopThread {
+ private SpecialMethodInterface smi;
+ public InfiniteWhileLoopWithSpecialPutOrNop(SpecialMethodInterface smi) {
+ this.smi = smi;
+ }
+ public void run() {
+ long i = 0L;
+ while (keepGoing) {
+ smi.PutOrNop(i);
+ i++;
+ }
+ }
+}
+
+class InfiniteWhileLoopWithSpecialConstOrIGet extends SimpleLoopThread {
+ private SpecialMethodInterface smi;
+ public InfiniteWhileLoopWithSpecialConstOrIGet(SpecialMethodInterface smi) {
+ this.smi = smi;
+ }
+ public void run() {
+ long i = 0L;
+ while (keepGoing) {
+ i += smi.ConstOrIGet();
+ }
+ }
+}
+
+class InfiniteWhileLoopWithIntrinsic extends SimpleLoopThread {
private String[] strings = { "a", "b", "c", "d" };
private int sum = 0;
public void run() {
@@ -66,9 +145,6 @@
sum += strings[i & 3].length();
}
}
- public void stopNow() {
- keepGoing = false;
- }
}
class InfiniteDoWhileLoopWithLong extends Thread {
@@ -84,55 +160,37 @@
}
}
-class InfiniteWhileLoop extends Thread {
- volatile private boolean keepGoing = true;
+class InfiniteWhileLoop extends SimpleLoopThread {
public void run() {
int i = 0;
while (keepGoing) {
i++;
}
}
- public void stopNow() {
- keepGoing = false;
- }
}
-class InfiniteDoWhileLoop extends Thread {
- volatile private boolean keepGoing = true;
+class InfiniteDoWhileLoop extends SimpleLoopThread {
public void run() {
int i = 0;
do {
i++;
} while (keepGoing);
}
- public void stopNow() {
- keepGoing = false;
- }
}
-class InfiniteForLoop extends Thread {
- int count = 100000;
- volatile private boolean keepGoing = true;
+class InfiniteForLoop extends SimpleLoopThread {
public void run() {
int i = 0;
for (int j = 0; keepGoing; j++) {
i += j;
}
}
- public void stopNow() {
- keepGoing = false;
- }
}
-
-class MakeGarbage extends Thread {
- volatile private boolean keepGoing = true;
+class MakeGarbage extends SimpleLoopThread {
public void run() {
while (keepGoing) {
byte[] garbage = new byte[100000];
}
}
- public void stopNow() {
- keepGoing = false;
- }
}