Ensure stack maps are 4 byte aligned.
With the recent move to gcc 4.9, we are hitting alignment
SIGBUS on ARM. The reason is that gcc will optimize two consecutive
32bits loads into one 64bits load, and the instruction (ldrd)
will fail if the data is not aligned.
Also removed the emission of mapping table when a method is optimized.
The information can be found in the StackMap itself.
Change-Id: Icf79406c18a3f4db3c05d52fc2c0dd2e35bf0f8f
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index a3d9a0b..e84f65a 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -57,7 +57,7 @@
const std::vector<uint8_t>& vmap_table = compiled_method->GetVmapTable();
uint32_t vmap_table_offset = vmap_table.empty() ? 0u
: sizeof(OatQuickMethodHeader) + vmap_table.size();
- const std::vector<uint8_t>& mapping_table = compiled_method->GetMappingTable();
+ const std::vector<uint8_t>& mapping_table = *compiled_method->GetMappingTable();
uint32_t mapping_table_offset = mapping_table.empty() ? 0u
: sizeof(OatQuickMethodHeader) + vmap_table.size() + mapping_table.size();
const std::vector<uint8_t>& gc_map = *compiled_method->GetGcMap();
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index 698bf3b..e292834 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -170,14 +170,13 @@
const size_t frame_size_in_bytes,
const uint32_t core_spill_mask,
const uint32_t fp_spill_mask,
- const std::vector<uint8_t>& mapping_table,
const std::vector<uint8_t>& stack_map)
: CompiledCode(driver, instruction_set, quick_code),
frame_size_in_bytes_(frame_size_in_bytes),
core_spill_mask_(core_spill_mask),
fp_spill_mask_(fp_spill_mask),
src_mapping_table_(driver->DeduplicateSrcMappingTable(SrcMap())),
- mapping_table_(driver->DeduplicateMappingTable(mapping_table)),
+ mapping_table_(nullptr),
vmap_table_(driver->DeduplicateVMapTable(stack_map)),
gc_map_(nullptr),
cfi_info_(nullptr),
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 7f76eef..d2f5d01 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -291,7 +291,6 @@
const size_t frame_size_in_bytes,
const uint32_t core_spill_mask,
const uint32_t fp_spill_mask,
- const std::vector<uint8_t>& mapping_table,
const std::vector<uint8_t>& vmap_table);
// Constructs a CompiledMethod for the QuickJniCompiler.
@@ -330,9 +329,8 @@
return *src_mapping_table_;
}
- const std::vector<uint8_t>& GetMappingTable() const {
- DCHECK(mapping_table_ != nullptr);
- return *mapping_table_;
+ std::vector<uint8_t> const* GetMappingTable() const {
+ return mapping_table_;
}
const std::vector<uint8_t>& GetVmapTable() const {
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 8a7abb4..6bb526c 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -530,7 +530,7 @@
struct OatWriter::MappingTableDataAccess {
static const std::vector<uint8_t>* GetData(const CompiledMethod* compiled_method) ALWAYS_INLINE {
- return &compiled_method->GetMappingTable();
+ return compiled_method->GetMappingTable();
}
static uint32_t GetOffset(OatClass* oat_class, size_t method_offsets_index) ALWAYS_INLINE {
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index b3ac7ff..ea0dc66 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -347,8 +347,8 @@
return lhs->GetQuickCode() < rhs->GetQuickCode();
}
// If the code is the same, all other fields are likely to be the same as well.
- if (UNLIKELY(&lhs->GetMappingTable() != &rhs->GetMappingTable())) {
- return &lhs->GetMappingTable() < &rhs->GetMappingTable();
+ if (UNLIKELY(lhs->GetMappingTable() != rhs->GetMappingTable())) {
+ return lhs->GetMappingTable() < rhs->GetMappingTable();
}
if (UNLIKELY(&lhs->GetVmapTable() != &rhs->GetVmapTable())) {
return &lhs->GetVmapTable() < &rhs->GetVmapTable();
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 11fc9bf..89a0cf9 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -238,6 +238,17 @@
return true;
}
+// The stack map we generate must be 4-byte aligned on ARM. Since existing
+// maps are generated alongside these stack maps, we must also align them.
+static std::vector<uint8_t>& AlignVectorSize(std::vector<uint8_t>& vector) {
+ size_t size = vector.size();
+ size_t aligned_size = RoundUp(size, 4);
+ for (; size < aligned_size; ++size) {
+ vector.push_back(0);
+ }
+ return vector;
+}
+
CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
@@ -318,12 +329,6 @@
visualizer.DumpGraph(kRegisterAllocatorPassName);
codegen->CompileOptimized(&allocator);
- std::vector<uint8_t> mapping_table;
- SrcMap src_mapping_table;
- codegen->BuildMappingTable(&mapping_table,
- GetCompilerDriver()->GetCompilerOptions().GetIncludeDebugSymbols() ?
- &src_mapping_table : nullptr);
-
std::vector<uint8_t> stack_map;
codegen->BuildStackMaps(&stack_map);
@@ -333,7 +338,6 @@
codegen->GetFrameSize(),
codegen->GetCoreSpillMask(),
0, /* FPR spill mask, unused */
- mapping_table,
stack_map);
} else if (shouldOptimize && RegisterAllocator::Supports(instruction_set)) {
LOG(FATAL) << "Could not allocate registers in optimizing compiler";
@@ -360,9 +364,9 @@
codegen->GetCoreSpillMask(),
0, /* FPR spill mask, unused */
&src_mapping_table,
- mapping_table,
- vmap_table,
- gc_map,
+ AlignVectorSize(mapping_table),
+ AlignVectorSize(vmap_table),
+ AlignVectorSize(gc_map),
nullptr);
}
}
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 9cfa71c..3974e53 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -111,7 +111,7 @@
}
size_t ComputeStackMapSize() const {
- return stack_maps_.Size() * (StackMap::kFixedSize + StackMaskEncodingSize(stack_mask_max_));
+ return stack_maps_.Size() * StackMap::ComputeAlignedStackMapSize(stack_mask_max_);
}
size_t ComputeDexRegisterMapSize() const {