Merge "Make the mark stack expandable for the CC collector."
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index 4abd191..c53479c 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -89,9 +89,11 @@
HOST_CORE_DEX_FILES := $(foreach jar,$(HOST_CORE_JARS), $(call intermediates-dir-for,JAVA_LIBRARIES,$(jar),t,COMMON)/javalib.jar)
TARGET_CORE_DEX_FILES := $(foreach jar,$(TARGET_CORE_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar)
+ifeq ($(ANDROID_COMPILE_WITH_JACK),true)
# Classpath for Jack compilation: we only need core-libart.
HOST_JACK_CLASSPATH_DEPENDENCIES := $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart-hostdex,t,COMMON)/classes.jack
HOST_JACK_CLASSPATH := $(foreach dep,$(HOST_JACK_CLASSPATH_DEPENDENCIES),$(abspath $(dep)))
TARGET_JACK_CLASSPATH_DEPENDENCIES := $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart, ,COMMON)/classes.jack
TARGET_JACK_CLASSPATH := $(foreach dep,$(TARGET_JACK_CLASSPATH_DEPENDENCIES),$(abspath $(dep)))
+endif
endif # ART_ANDROID_COMMON_PATH_MK
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 6295e15..fdfd94c 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -230,6 +230,7 @@
runtime/jni_internal_test.cc \
runtime/proxy_test.cc \
runtime/reflection_test.cc \
+ compiler/compiled_method_test.cc \
compiler/dex/gvn_dead_code_elimination_test.cc \
compiler/dex/global_value_numbering_test.cc \
compiler/dex/local_value_numbering_test.cc \
@@ -237,6 +238,7 @@
compiler/dex/mir_optimization_test.cc \
compiler/dex/type_inference_test.cc \
compiler/dwarf/dwarf_test.cc \
+ compiler/driver/compiled_method_storage_test.cc \
compiler/driver/compiler_driver_test.cc \
compiler/elf_writer_test.cc \
compiler/image_test.cc \
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 17f9d12..aaac126 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -54,6 +54,7 @@
dex/verification_results.cc \
dex/vreg_analysis.cc \
dex/quick_compiler_callbacks.cc \
+ driver/compiled_method_storage.cc \
driver/compiler_driver.cc \
driver/compiler_options.cc \
driver/dex_compilation_unit.cc \
@@ -88,6 +89,7 @@
optimizing/primitive_type_propagation.cc \
optimizing/reference_type_propagation.cc \
optimizing/register_allocator.cc \
+ optimizing/sharpening.cc \
optimizing/side_effects_analysis.cc \
optimizing/ssa_builder.cc \
optimizing/ssa_liveness_analysis.cc \
diff --git a/compiler/cfi_test.h b/compiler/cfi_test.h
index 5e345db..6fd4575 100644
--- a/compiler/cfi_test.h
+++ b/compiler/cfi_test.h
@@ -51,7 +51,7 @@
dwarf::WriteDebugFrameCIE(is64bit, dwarf::DW_EH_PE_absptr, dwarf::Reg(8),
initial_opcodes, kCFIFormat, &debug_frame_data_);
std::vector<uintptr_t> debug_frame_patches;
- dwarf::WriteDebugFrameFDE(is64bit, 0, 0, actual_asm.size(), &actual_cfi,
+ dwarf::WriteDebugFrameFDE(is64bit, 0, 0, actual_asm.size(), ArrayRef<const uint8_t>(actual_cfi),
kCFIFormat, &debug_frame_data_, &debug_frame_patches);
ReformatCfi(Objdump(false, "-W"), &lines);
// Pretty-print assembly.
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 58a2f96..151437b 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -54,22 +54,22 @@
method->GetDexMethodIndex()));
}
if (compiled_method != nullptr) {
- const SwapVector<uint8_t>* code = compiled_method->GetQuickCode();
- uint32_t code_size = code->size();
+ ArrayRef<const uint8_t> code = compiled_method->GetQuickCode();
+ uint32_t code_size = code.size();
CHECK_NE(0u, code_size);
- const SwapVector<uint8_t>* vmap_table = compiled_method->GetVmapTable();
- uint32_t vmap_table_offset = vmap_table->empty() ? 0u
- : sizeof(OatQuickMethodHeader) + vmap_table->size();
- const SwapVector<uint8_t>* mapping_table = compiled_method->GetMappingTable();
- bool mapping_table_used = mapping_table != nullptr && !mapping_table->empty();
- size_t mapping_table_size = mapping_table_used ? mapping_table->size() : 0U;
+ ArrayRef<const uint8_t> vmap_table = compiled_method->GetVmapTable();
+ uint32_t vmap_table_offset = vmap_table.empty() ? 0u
+ : sizeof(OatQuickMethodHeader) + vmap_table.size();
+ ArrayRef<const uint8_t> mapping_table = compiled_method->GetMappingTable();
+ bool mapping_table_used = !mapping_table.empty();
+ size_t mapping_table_size = mapping_table.size();
uint32_t mapping_table_offset = !mapping_table_used ? 0u
- : sizeof(OatQuickMethodHeader) + vmap_table->size() + mapping_table_size;
- const SwapVector<uint8_t>* gc_map = compiled_method->GetGcMap();
- bool gc_map_used = gc_map != nullptr && !gc_map->empty();
- size_t gc_map_size = gc_map_used ? gc_map->size() : 0U;
+ : sizeof(OatQuickMethodHeader) + vmap_table.size() + mapping_table_size;
+ ArrayRef<const uint8_t> gc_map = compiled_method->GetGcMap();
+ bool gc_map_used = !gc_map.empty();
+ size_t gc_map_size = gc_map.size();
uint32_t gc_map_offset = !gc_map_used ? 0u
- : sizeof(OatQuickMethodHeader) + vmap_table->size() + mapping_table_size + gc_map_size;
+ : sizeof(OatQuickMethodHeader) + vmap_table.size() + mapping_table_size + gc_map_size;
OatQuickMethodHeader method_header(mapping_table_offset, vmap_table_offset, gc_map_offset,
compiled_method->GetFrameSizeInBytes(),
compiled_method->GetCoreSpillMask(),
@@ -77,25 +77,25 @@
header_code_and_maps_chunks_.push_back(std::vector<uint8_t>());
std::vector<uint8_t>* chunk = &header_code_and_maps_chunks_.back();
- size_t size = sizeof(method_header) + code_size + vmap_table->size() + mapping_table_size +
+ size_t size = sizeof(method_header) + code_size + vmap_table.size() + mapping_table_size +
gc_map_size;
size_t code_offset = compiled_method->AlignCode(size - code_size);
size_t padding = code_offset - (size - code_size);
chunk->reserve(padding + size);
chunk->resize(sizeof(method_header));
memcpy(&(*chunk)[0], &method_header, sizeof(method_header));
- chunk->insert(chunk->begin(), vmap_table->begin(), vmap_table->end());
+ chunk->insert(chunk->begin(), vmap_table.begin(), vmap_table.end());
if (mapping_table_used) {
- chunk->insert(chunk->begin(), mapping_table->begin(), mapping_table->end());
+ chunk->insert(chunk->begin(), mapping_table.begin(), mapping_table.end());
}
if (gc_map_used) {
- chunk->insert(chunk->begin(), gc_map->begin(), gc_map->end());
+ chunk->insert(chunk->begin(), gc_map.begin(), gc_map.end());
}
chunk->insert(chunk->begin(), padding, 0);
- chunk->insert(chunk->end(), code->begin(), code->end());
+ chunk->insert(chunk->end(), code.begin(), code.end());
CHECK_EQ(padding + size, chunk->size());
const void* code_ptr = &(*chunk)[code_offset];
- MakeExecutable(code_ptr, code->size());
+ MakeExecutable(code_ptr, code.size());
const void* method_code = CompiledMethod::CodePointer(code_ptr,
compiled_method->GetInstructionSet());
LOG(INFO) << "MakeExecutable " << PrettyMethod(method) << " code=" << method_code;
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index 74ef35e..9551d22 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -15,27 +15,22 @@
*/
#include "compiled_method.h"
+
+#include "driver/compiled_method_storage.h"
#include "driver/compiler_driver.h"
+#include "utils/swap_space.h"
namespace art {
CompiledCode::CompiledCode(CompilerDriver* compiler_driver, InstructionSet instruction_set,
- const ArrayRef<const uint8_t>& quick_code, bool owns_code_array)
- : compiler_driver_(compiler_driver), instruction_set_(instruction_set),
- owns_code_array_(owns_code_array), quick_code_(nullptr) {
- if (owns_code_array_) {
- // If we are supposed to own the code, don't deduplicate it.
- quick_code_ = new SwapVector<uint8_t>(quick_code.begin(), quick_code.end(),
- compiler_driver_->GetSwapSpaceAllocator());
- } else {
- quick_code_ = compiler_driver_->DeduplicateCode(quick_code);
- }
+ const ArrayRef<const uint8_t>& quick_code)
+ : compiler_driver_(compiler_driver),
+ instruction_set_(instruction_set),
+ quick_code_(compiler_driver_->GetCompiledMethodStorage()->DeduplicateCode(quick_code)) {
}
CompiledCode::~CompiledCode() {
- if (owns_code_array_) {
- delete quick_code_;
- }
+ compiler_driver_->GetCompiledMethodStorage()->ReleaseCode(quick_code_);
}
bool CompiledCode::operator==(const CompiledCode& rhs) const {
@@ -104,59 +99,28 @@
}
}
-const std::vector<uint32_t>& CompiledCode::GetOatdataOffsetsToCompliledCodeOffset() const {
- CHECK_NE(0U, oatdata_offsets_to_compiled_code_offset_.size());
- return oatdata_offsets_to_compiled_code_offset_;
-}
-
-void CompiledCode::AddOatdataOffsetToCompliledCodeOffset(uint32_t offset) {
- oatdata_offsets_to_compiled_code_offset_.push_back(offset);
-}
-
CompiledMethod::CompiledMethod(CompilerDriver* driver,
InstructionSet instruction_set,
const ArrayRef<const uint8_t>& quick_code,
const size_t frame_size_in_bytes,
const uint32_t core_spill_mask,
const uint32_t fp_spill_mask,
- DefaultSrcMap* src_mapping_table,
+ const ArrayRef<const SrcMapElem>& src_mapping_table,
const ArrayRef<const uint8_t>& mapping_table,
const ArrayRef<const uint8_t>& vmap_table,
const ArrayRef<const uint8_t>& native_gc_map,
const ArrayRef<const uint8_t>& cfi_info,
const ArrayRef<const LinkerPatch>& patches)
- : CompiledCode(driver, instruction_set, quick_code, !driver->DedupeEnabled()),
- owns_arrays_(!driver->DedupeEnabled()),
+ : CompiledCode(driver, instruction_set, quick_code),
frame_size_in_bytes_(frame_size_in_bytes), core_spill_mask_(core_spill_mask),
fp_spill_mask_(fp_spill_mask),
- patches_(patches.begin(), patches.end(), driver->GetSwapSpaceAllocator()) {
- if (owns_arrays_) {
- if (src_mapping_table == nullptr) {
- src_mapping_table_ = new SwapSrcMap(driver->GetSwapSpaceAllocator());
- } else {
- src_mapping_table_ = new SwapSrcMap(src_mapping_table->begin(), src_mapping_table->end(),
- driver->GetSwapSpaceAllocator());
- }
- mapping_table_ = mapping_table.empty() ?
- nullptr : new SwapVector<uint8_t>(mapping_table.begin(), mapping_table.end(),
- driver->GetSwapSpaceAllocator());
- vmap_table_ = new SwapVector<uint8_t>(vmap_table.begin(), vmap_table.end(),
- driver->GetSwapSpaceAllocator());
- gc_map_ = native_gc_map.empty() ? nullptr :
- new SwapVector<uint8_t>(native_gc_map.begin(), native_gc_map.end(),
- driver->GetSwapSpaceAllocator());
- cfi_info_ = cfi_info.empty() ? nullptr :
- new SwapVector<uint8_t>(cfi_info.begin(), cfi_info.end(), driver->GetSwapSpaceAllocator());
- } else {
- src_mapping_table_ = src_mapping_table == nullptr ?
- driver->DeduplicateSrcMappingTable(ArrayRef<SrcMapElem>()) :
- driver->DeduplicateSrcMappingTable(ArrayRef<SrcMapElem>(*src_mapping_table));
- mapping_table_ = mapping_table.empty() ?
- nullptr : driver->DeduplicateMappingTable(mapping_table);
- vmap_table_ = driver->DeduplicateVMapTable(vmap_table);
- gc_map_ = native_gc_map.empty() ? nullptr : driver->DeduplicateGCMap(native_gc_map);
- cfi_info_ = cfi_info.empty() ? nullptr : driver->DeduplicateCFIInfo(cfi_info);
- }
+ src_mapping_table_(
+ driver->GetCompiledMethodStorage()->DeduplicateSrcMappingTable(src_mapping_table)),
+ mapping_table_(driver->GetCompiledMethodStorage()->DeduplicateMappingTable(mapping_table)),
+ vmap_table_(driver->GetCompiledMethodStorage()->DeduplicateVMapTable(vmap_table)),
+ gc_map_(driver->GetCompiledMethodStorage()->DeduplicateGCMap(native_gc_map)),
+ cfi_info_(driver->GetCompiledMethodStorage()->DeduplicateCFIInfo(cfi_info)),
+ patches_(driver->GetCompiledMethodStorage()->DeduplicateLinkerPatches(patches)) {
}
CompiledMethod* CompiledMethod::SwapAllocCompiledMethod(
@@ -166,13 +130,13 @@
const size_t frame_size_in_bytes,
const uint32_t core_spill_mask,
const uint32_t fp_spill_mask,
- DefaultSrcMap* src_mapping_table,
+ const ArrayRef<const SrcMapElem>& src_mapping_table,
const ArrayRef<const uint8_t>& mapping_table,
const ArrayRef<const uint8_t>& vmap_table,
const ArrayRef<const uint8_t>& native_gc_map,
const ArrayRef<const uint8_t>& cfi_info,
const ArrayRef<const LinkerPatch>& patches) {
- SwapAllocator<CompiledMethod> alloc(driver->GetSwapSpaceAllocator());
+ SwapAllocator<CompiledMethod> alloc(driver->GetCompiledMethodStorage()->GetSwapSpaceAllocator());
CompiledMethod* ret = alloc.allocate(1);
alloc.construct(ret, driver, instruction_set, quick_code, frame_size_in_bytes, core_spill_mask,
fp_spill_mask, src_mapping_table, mapping_table, vmap_table, native_gc_map,
@@ -180,22 +144,20 @@
return ret;
}
-
-
void CompiledMethod::ReleaseSwapAllocatedCompiledMethod(CompilerDriver* driver, CompiledMethod* m) {
- SwapAllocator<CompiledMethod> alloc(driver->GetSwapSpaceAllocator());
+ SwapAllocator<CompiledMethod> alloc(driver->GetCompiledMethodStorage()->GetSwapSpaceAllocator());
alloc.destroy(m);
alloc.deallocate(m, 1);
}
CompiledMethod::~CompiledMethod() {
- if (owns_arrays_) {
- delete src_mapping_table_;
- delete mapping_table_;
- delete vmap_table_;
- delete gc_map_;
- delete cfi_info_;
- }
+ CompiledMethodStorage* storage = GetCompilerDriver()->GetCompiledMethodStorage();
+ storage->ReleaseLinkerPatches(patches_);
+ storage->ReleaseCFIInfo(cfi_info_);
+ storage->ReleaseGCMap(gc_map_);
+ storage->ReleaseVMapTable(vmap_table_);
+ storage->ReleaseMappingTable(mapping_table_);
+ storage->ReleaseSrcMappingTable(src_mapping_table_);
}
} // namespace art
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index a4d2387..15a4ba0 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -23,19 +23,20 @@
#include "arch/instruction_set.h"
#include "base/bit_utils.h"
+#include "length_prefixed_array.h"
#include "method_reference.h"
#include "utils/array_ref.h"
-#include "utils/swap_space.h"
namespace art {
class CompilerDriver;
+class CompiledMethodStorage;
class CompiledCode {
public:
// For Quick to supply an code blob
CompiledCode(CompilerDriver* compiler_driver, InstructionSet instruction_set,
- const ArrayRef<const uint8_t>& quick_code, bool owns_code_array);
+ const ArrayRef<const uint8_t>& quick_code);
virtual ~CompiledCode();
@@ -43,8 +44,8 @@
return instruction_set_;
}
- const SwapVector<uint8_t>* GetQuickCode() const {
- return quick_code_;
+ ArrayRef<const uint8_t> GetQuickCode() const {
+ return GetArray(quick_code_);
}
bool operator==(const CompiledCode& rhs) const;
@@ -66,41 +67,46 @@
static const void* CodePointer(const void* code_pointer,
InstructionSet instruction_set);
- const std::vector<uint32_t>& GetOatdataOffsetsToCompliledCodeOffset() const;
- void AddOatdataOffsetToCompliledCodeOffset(uint32_t offset);
+ protected:
+ template <typename T>
+ static ArrayRef<const T> GetArray(const LengthPrefixedArray<T>* array) {
+ if (array == nullptr) {
+ return ArrayRef<const T>();
+ }
+ DCHECK_NE(array->size(), 0u);
+ return ArrayRef<const T>(&array->At(0), array->size());
+ }
+
+ CompilerDriver* GetCompilerDriver() {
+ return compiler_driver_;
+ }
private:
CompilerDriver* const compiler_driver_;
const InstructionSet instruction_set_;
- // If we own the code array (means that we free in destructor).
- const bool owns_code_array_;
-
// Used to store the PIC code for Quick.
- SwapVector<uint8_t>* quick_code_;
-
- // There are offsets from the oatdata symbol to where the offset to
- // the compiled method will be found. These are computed by the
- // OatWriter and then used by the ElfWriter to add relocations so
- // that MCLinker can update the values to the location in the linked .so.
- std::vector<uint32_t> oatdata_offsets_to_compiled_code_offset_;
+ const LengthPrefixedArray<uint8_t>* const quick_code_;
};
class SrcMapElem {
public:
uint32_t from_;
int32_t to_;
-
- // Lexicographical compare.
- bool operator<(const SrcMapElem& other) const {
- if (from_ != other.from_) {
- return from_ < other.from_;
- }
- return to_ < other.to_;
- }
};
+inline bool operator<(const SrcMapElem& lhs, const SrcMapElem& rhs) {
+ if (lhs.from_ != rhs.from_) {
+ return lhs.from_ < rhs.from_;
+ }
+ return lhs.to_ < rhs.to_;
+}
+
+inline bool operator==(const SrcMapElem& lhs, const SrcMapElem& rhs) {
+ return lhs.from_ == rhs.from_ && lhs.to_ == rhs.to_;
+}
+
template <class Allocator>
class SrcMap FINAL : public std::vector<SrcMapElem, Allocator> {
public:
@@ -151,7 +157,6 @@
};
using DefaultSrcMap = SrcMap<std::allocator<SrcMapElem>>;
-using SwapSrcMap = SrcMap<SwapAllocator<SrcMapElem>>;
enum LinkerPatchType {
@@ -273,6 +278,9 @@
uint32_t method_idx_; // Method index for Call/Method patches.
uint32_t type_idx_; // Type index for Type patches.
uint32_t element_offset_; // Element offset in the dex cache arrays.
+ static_assert(sizeof(method_idx_) == sizeof(cmp1_), "needed by relational operators");
+ static_assert(sizeof(type_idx_) == sizeof(cmp1_), "needed by relational operators");
+ static_assert(sizeof(element_offset_) == sizeof(cmp1_), "needed by relational operators");
};
union {
uint32_t cmp2_; // Used for relational operators.
@@ -313,7 +321,7 @@
const size_t frame_size_in_bytes,
const uint32_t core_spill_mask,
const uint32_t fp_spill_mask,
- DefaultSrcMap* src_mapping_table,
+ const ArrayRef<const SrcMapElem>& src_mapping_table,
const ArrayRef<const uint8_t>& mapping_table,
const ArrayRef<const uint8_t>& vmap_table,
const ArrayRef<const uint8_t>& native_gc_map,
@@ -329,7 +337,7 @@
const size_t frame_size_in_bytes,
const uint32_t core_spill_mask,
const uint32_t fp_spill_mask,
- DefaultSrcMap* src_mapping_table,
+ const ArrayRef<const SrcMapElem>& src_mapping_table,
const ArrayRef<const uint8_t>& mapping_table,
const ArrayRef<const uint8_t>& vmap_table,
const ArrayRef<const uint8_t>& native_gc_map,
@@ -350,35 +358,31 @@
return fp_spill_mask_;
}
- const SwapSrcMap& GetSrcMappingTable() const {
- DCHECK(src_mapping_table_ != nullptr);
- return *src_mapping_table_;
+ ArrayRef<const SrcMapElem> GetSrcMappingTable() const {
+ return GetArray(src_mapping_table_);
}
- SwapVector<uint8_t> const* GetMappingTable() const {
- return mapping_table_;
+ ArrayRef<const uint8_t> GetMappingTable() const {
+ return GetArray(mapping_table_);
}
- const SwapVector<uint8_t>* GetVmapTable() const {
- DCHECK(vmap_table_ != nullptr);
- return vmap_table_;
+ ArrayRef<const uint8_t> GetVmapTable() const {
+ return GetArray(vmap_table_);
}
- SwapVector<uint8_t> const* GetGcMap() const {
- return gc_map_;
+ ArrayRef<const uint8_t> GetGcMap() const {
+ return GetArray(gc_map_);
}
- const SwapVector<uint8_t>* GetCFIInfo() const {
- return cfi_info_;
+ ArrayRef<const uint8_t> GetCFIInfo() const {
+ return GetArray(cfi_info_);
}
ArrayRef<const LinkerPatch> GetPatches() const {
- return ArrayRef<const LinkerPatch>(patches_);
+ return GetArray(patches_);
}
private:
- // Whether or not the arrays are owned by the compiled method or dedupe sets.
- const bool owns_arrays_;
// For quick code, the size of the activation used by the code.
const size_t frame_size_in_bytes_;
// For quick code, a bit mask describing spilled GPR callee-save registers.
@@ -386,19 +390,19 @@
// For quick code, a bit mask describing spilled FPR callee-save registers.
const uint32_t fp_spill_mask_;
// For quick code, a set of pairs (PC, DEX) mapping from native PC offset to DEX offset.
- SwapSrcMap* src_mapping_table_;
+ const LengthPrefixedArray<SrcMapElem>* const src_mapping_table_;
// For quick code, a uleb128 encoded map from native PC offset to dex PC aswell as dex PC to
// native PC offset. Size prefixed.
- SwapVector<uint8_t>* mapping_table_;
+ const LengthPrefixedArray<uint8_t>* const mapping_table_;
// For quick code, a uleb128 encoded map from GPR/FPR register to dex register. Size prefixed.
- SwapVector<uint8_t>* vmap_table_;
+ const LengthPrefixedArray<uint8_t>* const vmap_table_;
// For quick code, a map keyed by native PC indices to bitmaps describing what dalvik registers
// are live.
- SwapVector<uint8_t>* gc_map_;
+ const LengthPrefixedArray<uint8_t>* const gc_map_;
// For quick code, a FDE entry for the debug_frame section.
- SwapVector<uint8_t>* cfi_info_;
+ const LengthPrefixedArray<uint8_t>* const cfi_info_;
// For quick code, linker patches needed by the method.
- const SwapVector<LinkerPatch> patches_;
+ const LengthPrefixedArray<LinkerPatch>* const patches_;
};
} // namespace art
diff --git a/compiler/compiled_method_test.cc b/compiler/compiled_method_test.cc
new file mode 100644
index 0000000..99ee875
--- /dev/null
+++ b/compiler/compiled_method_test.cc
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "compiled_method.h"
+
+namespace art {
+
+TEST(CompiledMethod, SrcMapElemOperators) {
+ SrcMapElem elems[] = {
+ { 1u, -1 },
+ { 1u, 0 },
+ { 1u, 1 },
+ { 2u, -1 },
+ { 2u, 0 }, // Index 4.
+ { 2u, 1 },
+ { 2u, 0u }, // Index 6: Arbitrarily add identical SrcMapElem with index 4.
+ };
+
+ for (size_t i = 0; i != arraysize(elems); ++i) {
+ for (size_t j = 0; j != arraysize(elems); ++j) {
+ bool expected = (i != 6u ? i : 4u) == (j != 6u ? j : 4u);
+ EXPECT_EQ(expected, elems[i] == elems[j]) << i << " " << j;
+ }
+ }
+
+ for (size_t i = 0; i != arraysize(elems); ++i) {
+ for (size_t j = 0; j != arraysize(elems); ++j) {
+ bool expected = (i != 6u ? i : 4u) < (j != 6u ? j : 4u);
+ EXPECT_EQ(expected, elems[i] < elems[j]) << i << " " << j;
+ }
+ }
+}
+
+TEST(CompiledMethod, LinkerPatchOperators) {
+ const DexFile* dex_file1 = reinterpret_cast<const DexFile*>(1);
+ const DexFile* dex_file2 = reinterpret_cast<const DexFile*>(2);
+ LinkerPatch patches[] = {
+ LinkerPatch::MethodPatch(16u, dex_file1, 1000u),
+ LinkerPatch::MethodPatch(16u, dex_file1, 1001u),
+ LinkerPatch::MethodPatch(16u, dex_file2, 1000u),
+ LinkerPatch::MethodPatch(16u, dex_file2, 1001u), // Index 3.
+ LinkerPatch::CodePatch(16u, dex_file1, 1000u),
+ LinkerPatch::CodePatch(16u, dex_file1, 1001u),
+ LinkerPatch::CodePatch(16u, dex_file2, 1000u),
+ LinkerPatch::CodePatch(16u, dex_file2, 1001u),
+ LinkerPatch::RelativeCodePatch(16u, dex_file1, 1000u),
+ LinkerPatch::RelativeCodePatch(16u, dex_file1, 1001u),
+ LinkerPatch::RelativeCodePatch(16u, dex_file2, 1000u),
+ LinkerPatch::RelativeCodePatch(16u, dex_file2, 1001u),
+ LinkerPatch::TypePatch(16u, dex_file1, 1000u),
+ LinkerPatch::TypePatch(16u, dex_file1, 1001u),
+ LinkerPatch::TypePatch(16u, dex_file2, 1000u),
+ LinkerPatch::TypePatch(16u, dex_file2, 1001u),
+ LinkerPatch::DexCacheArrayPatch(16u, dex_file1, 3000u, 2000u),
+ LinkerPatch::DexCacheArrayPatch(16u, dex_file1, 3001u, 2000u),
+ LinkerPatch::DexCacheArrayPatch(16u, dex_file1, 3000u, 2001u),
+ LinkerPatch::DexCacheArrayPatch(16u, dex_file1, 3001u, 2001u),
+ LinkerPatch::DexCacheArrayPatch(16u, dex_file2, 3000u, 2000u),
+ LinkerPatch::DexCacheArrayPatch(16u, dex_file2, 3001u, 2000u),
+ LinkerPatch::DexCacheArrayPatch(16u, dex_file2, 3000u, 2001u),
+ LinkerPatch::DexCacheArrayPatch(16u, dex_file2, 3001u, 2001u),
+ LinkerPatch::MethodPatch(32u, dex_file1, 1000u),
+ LinkerPatch::MethodPatch(32u, dex_file1, 1001u),
+ LinkerPatch::MethodPatch(32u, dex_file2, 1000u),
+ LinkerPatch::MethodPatch(32u, dex_file2, 1001u),
+ LinkerPatch::CodePatch(32u, dex_file1, 1000u),
+ LinkerPatch::CodePatch(32u, dex_file1, 1001u),
+ LinkerPatch::CodePatch(32u, dex_file2, 1000u),
+ LinkerPatch::CodePatch(32u, dex_file2, 1001u),
+ LinkerPatch::RelativeCodePatch(32u, dex_file1, 1000u),
+ LinkerPatch::RelativeCodePatch(32u, dex_file1, 1001u),
+ LinkerPatch::RelativeCodePatch(32u, dex_file2, 1000u),
+ LinkerPatch::RelativeCodePatch(32u, dex_file2, 1001u),
+ LinkerPatch::TypePatch(32u, dex_file1, 1000u),
+ LinkerPatch::TypePatch(32u, dex_file1, 1001u),
+ LinkerPatch::TypePatch(32u, dex_file2, 1000u),
+ LinkerPatch::TypePatch(32u, dex_file2, 1001u),
+ LinkerPatch::DexCacheArrayPatch(32u, dex_file1, 3000u, 2000u),
+ LinkerPatch::DexCacheArrayPatch(32u, dex_file1, 3001u, 2000u),
+ LinkerPatch::DexCacheArrayPatch(32u, dex_file1, 3000u, 2001u),
+ LinkerPatch::DexCacheArrayPatch(32u, dex_file1, 3001u, 2001u),
+ LinkerPatch::DexCacheArrayPatch(32u, dex_file2, 3000u, 2000u),
+ LinkerPatch::DexCacheArrayPatch(32u, dex_file2, 3001u, 2000u),
+ LinkerPatch::DexCacheArrayPatch(32u, dex_file2, 3000u, 2001u),
+ LinkerPatch::DexCacheArrayPatch(32u, dex_file2, 3001u, 2001u),
+ LinkerPatch::MethodPatch(16u, dex_file2, 1001u), // identical with patch as index 3.
+ };
+ constexpr size_t last_index = arraysize(patches) - 1u;
+
+ for (size_t i = 0; i != arraysize(patches); ++i) {
+ for (size_t j = 0; j != arraysize(patches); ++j) {
+ bool expected = (i != last_index ? i : 3u) == (j != last_index ? j : 3u);
+ EXPECT_EQ(expected, patches[i] == patches[j]) << i << " " << j;
+ }
+ }
+
+ for (size_t i = 0; i != arraysize(patches); ++i) {
+ for (size_t j = 0; j != arraysize(patches); ++j) {
+ bool expected = (i != last_index ? i : 3u) < (j != last_index ? j : 3u);
+ EXPECT_EQ(expected, patches[i] < patches[j]) << i << " " << j;
+ }
+ }
+}
+
+} // namespace art
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index ff7ddc1..4836041 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -356,7 +356,7 @@
0,
0,
0,
- nullptr, // src_mapping_table
+ ArrayRef<const SrcMapElem>(), // src_mapping_table
ArrayRef<const uint8_t>(), // mapping_table
ArrayRef<const uint8_t>(builder.GetData()), // vmap_table
ArrayRef<const uint8_t>(), // gc_map
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index cde99b3..d68835a 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -22,6 +22,7 @@
#endif
#include "base/bit_vector-inl.h"
+#include "base/stringprintf.h"
#include "dex/mir_graph.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
@@ -1165,7 +1166,7 @@
cu_->compiler_driver, cu_->instruction_set,
ArrayRef<const uint8_t>(code_buffer_),
frame_size_, core_spill_mask_, fp_spill_mask_,
- &src_mapping_table_,
+ ArrayRef<const SrcMapElem>(src_mapping_table_),
ArrayRef<const uint8_t>(encoded_mapping_table_),
ArrayRef<const uint8_t>(vmap_encoder.GetData()),
ArrayRef<const uint8_t>(native_gc_map_),
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index d9d0434..dceb118 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -18,6 +18,7 @@
#include "mir_to_lir-inl.h"
+#include "base/stringprintf.h"
#include "dex/compiler_ir.h"
#include "dex/dataflow_iterator-inl.h"
#include "dex/mir_graph.h"
diff --git a/compiler/driver/compiled_method_storage.cc b/compiler/driver/compiled_method_storage.cc
new file mode 100644
index 0000000..bc5c6ca
--- /dev/null
+++ b/compiler/driver/compiled_method_storage.cc
@@ -0,0 +1,269 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <algorithm>
+#include <ostream>
+
+#include "compiled_method_storage.h"
+
+#include "base/logging.h"
+#include "compiled_method.h"
+#include "thread-inl.h"
+#include "utils.h"
+#include "utils/dedupe_set-inl.h"
+#include "utils/swap_space.h"
+
+namespace art {
+
+namespace { // anonymous namespace
+
+template <typename T>
+const LengthPrefixedArray<T>* CopyArray(SwapSpace* swap_space, const ArrayRef<const T>& array) {
+ DCHECK(!array.empty());
+ SwapAllocator<uint8_t> allocator(swap_space);
+ void* storage = allocator.allocate(LengthPrefixedArray<T>::ComputeSize(array.size()));
+ LengthPrefixedArray<T>* array_copy = new(storage) LengthPrefixedArray<T>(array.size());
+ std::copy(array.begin(), array.end(), array_copy->begin());
+ return array_copy;
+}
+
+template <typename T>
+void ReleaseArray(SwapSpace* swap_space, const LengthPrefixedArray<T>* array) {
+ SwapAllocator<uint8_t> allocator(swap_space);
+ size_t size = LengthPrefixedArray<T>::ComputeSize(array->size());
+ array->~LengthPrefixedArray<T>();
+ allocator.deallocate(const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(array)), size);
+}
+
+} // anonymous namespace
+
+template <typename T, typename DedupeSetType>
+inline const LengthPrefixedArray<T>* CompiledMethodStorage::AllocateOrDeduplicateArray(
+ const ArrayRef<const T>& data,
+ DedupeSetType* dedupe_set) {
+ if (data.empty()) {
+ return nullptr;
+ } else if (!DedupeEnabled()) {
+ return CopyArray(swap_space_.get(), data);
+ } else {
+ return dedupe_set->Add(Thread::Current(), data);
+ }
+}
+
+template <typename T>
+inline void CompiledMethodStorage::ReleaseArrayIfNotDeduplicated(
+ const LengthPrefixedArray<T>* array) {
+ if (array != nullptr && !DedupeEnabled()) {
+ ReleaseArray(swap_space_.get(), array);
+ }
+}
+
+template <typename ContentType>
+class CompiledMethodStorage::DedupeHashFunc {
+ private:
+ static constexpr bool kUseMurmur3Hash = true;
+
+ public:
+ size_t operator()(const ArrayRef<ContentType>& array) const {
+ const uint8_t* data = reinterpret_cast<const uint8_t*>(array.data());
+ // TODO: More reasonable assertion.
+ // static_assert(IsPowerOfTwo(sizeof(ContentType)),
+ // "ContentType is not power of two, don't know whether array layout is as assumed");
+ uint32_t len = sizeof(ContentType) * array.size();
+ if (kUseMurmur3Hash) {
+ static constexpr uint32_t c1 = 0xcc9e2d51;
+ static constexpr uint32_t c2 = 0x1b873593;
+ static constexpr uint32_t r1 = 15;
+ static constexpr uint32_t r2 = 13;
+ static constexpr uint32_t m = 5;
+ static constexpr uint32_t n = 0xe6546b64;
+
+ uint32_t hash = 0;
+
+ const int nblocks = len / 4;
+ typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t;
+ const unaligned_uint32_t *blocks = reinterpret_cast<const uint32_t*>(data);
+ int i;
+ for (i = 0; i < nblocks; i++) {
+ uint32_t k = blocks[i];
+ k *= c1;
+ k = (k << r1) | (k >> (32 - r1));
+ k *= c2;
+
+ hash ^= k;
+ hash = ((hash << r2) | (hash >> (32 - r2))) * m + n;
+ }
+
+ const uint8_t *tail = reinterpret_cast<const uint8_t*>(data + nblocks * 4);
+ uint32_t k1 = 0;
+
+ switch (len & 3) {
+ case 3:
+ k1 ^= tail[2] << 16;
+ FALLTHROUGH_INTENDED;
+ case 2:
+ k1 ^= tail[1] << 8;
+ FALLTHROUGH_INTENDED;
+ case 1:
+ k1 ^= tail[0];
+
+ k1 *= c1;
+ k1 = (k1 << r1) | (k1 >> (32 - r1));
+ k1 *= c2;
+ hash ^= k1;
+ }
+
+ hash ^= len;
+ hash ^= (hash >> 16);
+ hash *= 0x85ebca6b;
+ hash ^= (hash >> 13);
+ hash *= 0xc2b2ae35;
+ hash ^= (hash >> 16);
+
+ return hash;
+ } else {
+ size_t hash = 0x811c9dc5;
+ for (uint32_t i = 0; i < len; ++i) {
+ hash = (hash * 16777619) ^ data[i];
+ }
+ hash += hash << 13;
+ hash ^= hash >> 7;
+ hash += hash << 3;
+ hash ^= hash >> 17;
+ hash += hash << 5;
+ return hash;
+ }
+ }
+};
+
+template <typename T>
+class CompiledMethodStorage::LengthPrefixedArrayAlloc {
+ public:
+ explicit LengthPrefixedArrayAlloc(SwapSpace* swap_space)
+ : swap_space_(swap_space) {
+ }
+
+ const LengthPrefixedArray<T>* Copy(const ArrayRef<const T>& array) {
+ return CopyArray(swap_space_, array);
+ }
+
+ void Destroy(const LengthPrefixedArray<T>* array) {
+ ReleaseArray(swap_space_, array);
+ }
+
+ private:
+ SwapSpace* const swap_space_;
+};
+
+CompiledMethodStorage::CompiledMethodStorage(int swap_fd)
+ : swap_space_(swap_fd == -1 ? nullptr : new SwapSpace(swap_fd, 10 * MB)),
+ dedupe_enabled_(true),
+ dedupe_code_("dedupe code", LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
+ dedupe_src_mapping_table_("dedupe source mapping table",
+ LengthPrefixedArrayAlloc<SrcMapElem>(swap_space_.get())),
+ dedupe_mapping_table_("dedupe mapping table",
+ LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
+ dedupe_vmap_table_("dedupe vmap table",
+ LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
+ dedupe_gc_map_("dedupe gc map", LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
+ dedupe_cfi_info_("dedupe cfi info", LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())),
+ dedupe_linker_patches_("dedupe cfi info",
+ LengthPrefixedArrayAlloc<LinkerPatch>(swap_space_.get())) {
+}
+
+CompiledMethodStorage::~CompiledMethodStorage() {
+ // All done by member destructors.
+}
+
+void CompiledMethodStorage::DumpMemoryUsage(std::ostream& os, bool extended) const {
+ if (swap_space_.get() != nullptr) {
+ os << " swap=" << PrettySize(swap_space_->GetSize());
+ }
+ if (extended) {
+ Thread* self = Thread::Current();
+ os << "\nCode dedupe: " << dedupe_code_.DumpStats(self);
+ os << "\nMapping table dedupe: " << dedupe_mapping_table_.DumpStats(self);
+ os << "\nVmap table dedupe: " << dedupe_vmap_table_.DumpStats(self);
+ os << "\nGC map dedupe: " << dedupe_gc_map_.DumpStats(self);
+ os << "\nCFI info dedupe: " << dedupe_cfi_info_.DumpStats(self);
+ }
+}
+
+const LengthPrefixedArray<uint8_t>* CompiledMethodStorage::DeduplicateCode(
+ const ArrayRef<const uint8_t>& code) {
+ return AllocateOrDeduplicateArray(code, &dedupe_code_);
+}
+
+void CompiledMethodStorage::ReleaseCode(const LengthPrefixedArray<uint8_t>* code) {
+ ReleaseArrayIfNotDeduplicated(code);
+}
+
+const LengthPrefixedArray<SrcMapElem>* CompiledMethodStorage::DeduplicateSrcMappingTable(
+ const ArrayRef<const SrcMapElem>& src_map) {
+ return AllocateOrDeduplicateArray(src_map, &dedupe_src_mapping_table_);
+}
+
+void CompiledMethodStorage::ReleaseSrcMappingTable(const LengthPrefixedArray<SrcMapElem>* src_map) {
+ ReleaseArrayIfNotDeduplicated(src_map);
+}
+
+const LengthPrefixedArray<uint8_t>* CompiledMethodStorage::DeduplicateMappingTable(
+ const ArrayRef<const uint8_t>& table) {
+ return AllocateOrDeduplicateArray(table, &dedupe_mapping_table_);
+}
+
+void CompiledMethodStorage::ReleaseMappingTable(const LengthPrefixedArray<uint8_t>* table) {
+ ReleaseArrayIfNotDeduplicated(table);
+}
+
+const LengthPrefixedArray<uint8_t>* CompiledMethodStorage::DeduplicateVMapTable(
+ const ArrayRef<const uint8_t>& table) {
+ return AllocateOrDeduplicateArray(table, &dedupe_vmap_table_);
+}
+
+void CompiledMethodStorage::ReleaseVMapTable(const LengthPrefixedArray<uint8_t>* table) {
+ ReleaseArrayIfNotDeduplicated(table);
+}
+
+const LengthPrefixedArray<uint8_t>* CompiledMethodStorage::DeduplicateGCMap(
+ const ArrayRef<const uint8_t>& gc_map) {
+ return AllocateOrDeduplicateArray(gc_map, &dedupe_gc_map_);
+}
+
+void CompiledMethodStorage::ReleaseGCMap(const LengthPrefixedArray<uint8_t>* gc_map) {
+ ReleaseArrayIfNotDeduplicated(gc_map);
+}
+
+const LengthPrefixedArray<uint8_t>* CompiledMethodStorage::DeduplicateCFIInfo(
+ const ArrayRef<const uint8_t>& cfi_info) {
+ return AllocateOrDeduplicateArray(cfi_info, &dedupe_cfi_info_);
+}
+
+void CompiledMethodStorage::ReleaseCFIInfo(const LengthPrefixedArray<uint8_t>* cfi_info) {
+ ReleaseArrayIfNotDeduplicated(cfi_info);
+}
+
+const LengthPrefixedArray<LinkerPatch>* CompiledMethodStorage::DeduplicateLinkerPatches(
+ const ArrayRef<const LinkerPatch>& linker_patches) {
+ return AllocateOrDeduplicateArray(linker_patches, &dedupe_linker_patches_);
+}
+
+void CompiledMethodStorage::ReleaseLinkerPatches(
+ const LengthPrefixedArray<LinkerPatch>* linker_patches) {
+ ReleaseArrayIfNotDeduplicated(linker_patches);
+}
+
+} // namespace art
diff --git a/compiler/driver/compiled_method_storage.h b/compiler/driver/compiled_method_storage.h
new file mode 100644
index 0000000..ef10b67
--- /dev/null
+++ b/compiler/driver/compiled_method_storage.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DRIVER_COMPILED_METHOD_STORAGE_H_
+#define ART_COMPILER_DRIVER_COMPILED_METHOD_STORAGE_H_
+
+#include <iosfwd>
+#include <memory>
+
+#include "base/macros.h"
+#include "length_prefixed_array.h"
+#include "utils/array_ref.h"
+#include "utils/dedupe_set.h"
+#include "utils/swap_space.h"
+
+namespace art {
+
+class LinkerPatch;
+class SrcMapElem;
+
+class CompiledMethodStorage {
+ public:
+ explicit CompiledMethodStorage(int swap_fd);
+ ~CompiledMethodStorage();
+
+ void DumpMemoryUsage(std::ostream& os, bool extended) const;
+
+ void SetDedupeEnabled(bool dedupe_enabled) {
+ dedupe_enabled_ = dedupe_enabled;
+ }
+ bool DedupeEnabled() const {
+ return dedupe_enabled_;
+ }
+
+ SwapAllocator<void> GetSwapSpaceAllocator() {
+ return SwapAllocator<void>(swap_space_.get());
+ }
+
+ const LengthPrefixedArray<uint8_t>* DeduplicateCode(const ArrayRef<const uint8_t>& code);
+ void ReleaseCode(const LengthPrefixedArray<uint8_t>* code);
+
+ const LengthPrefixedArray<SrcMapElem>* DeduplicateSrcMappingTable(
+ const ArrayRef<const SrcMapElem>& src_map);
+ void ReleaseSrcMappingTable(const LengthPrefixedArray<SrcMapElem>* src_map);
+
+ const LengthPrefixedArray<uint8_t>* DeduplicateMappingTable(const ArrayRef<const uint8_t>& table);
+ void ReleaseMappingTable(const LengthPrefixedArray<uint8_t>* table);
+
+ const LengthPrefixedArray<uint8_t>* DeduplicateVMapTable(const ArrayRef<const uint8_t>& table);
+ void ReleaseVMapTable(const LengthPrefixedArray<uint8_t>* table);
+
+ const LengthPrefixedArray<uint8_t>* DeduplicateGCMap(const ArrayRef<const uint8_t>& gc_map);
+ void ReleaseGCMap(const LengthPrefixedArray<uint8_t>* gc_map);
+
+ const LengthPrefixedArray<uint8_t>* DeduplicateCFIInfo(const ArrayRef<const uint8_t>& cfi_info);
+ void ReleaseCFIInfo(const LengthPrefixedArray<uint8_t>* cfi_info);
+
+ const LengthPrefixedArray<LinkerPatch>* DeduplicateLinkerPatches(
+ const ArrayRef<const LinkerPatch>& linker_patches);
+ void ReleaseLinkerPatches(const LengthPrefixedArray<LinkerPatch>* linker_patches);
+
+ private:
+ template <typename T, typename DedupeSetType>
+ const LengthPrefixedArray<T>* AllocateOrDeduplicateArray(const ArrayRef<const T>& data,
+ DedupeSetType* dedupe_set);
+
+ template <typename T>
+ void ReleaseArrayIfNotDeduplicated(const LengthPrefixedArray<T>* array);
+
+ // DeDuplication data structures.
+ template <typename ContentType>
+ class DedupeHashFunc;
+
+ template <typename T>
+ class LengthPrefixedArrayAlloc;
+
+ template <typename T>
+ using ArrayDedupeSet = DedupeSet<ArrayRef<const T>,
+ LengthPrefixedArray<T>,
+ LengthPrefixedArrayAlloc<T>,
+ size_t,
+ DedupeHashFunc<const T>,
+ 4>;
+
+ // Swap pool and allocator used for native allocations. May be file-backed. Needs to be first
+ // as other fields rely on this.
+ std::unique_ptr<SwapSpace> swap_space_;
+
+ bool dedupe_enabled_;
+
+ ArrayDedupeSet<uint8_t> dedupe_code_;
+ ArrayDedupeSet<SrcMapElem> dedupe_src_mapping_table_;
+ ArrayDedupeSet<uint8_t> dedupe_mapping_table_;
+ ArrayDedupeSet<uint8_t> dedupe_vmap_table_;
+ ArrayDedupeSet<uint8_t> dedupe_gc_map_;
+ ArrayDedupeSet<uint8_t> dedupe_cfi_info_;
+ ArrayDedupeSet<LinkerPatch> dedupe_linker_patches_;
+
+ DISALLOW_COPY_AND_ASSIGN(CompiledMethodStorage);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_DRIVER_COMPILED_METHOD_STORAGE_H_
diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc
new file mode 100644
index 0000000..c6dbd24
--- /dev/null
+++ b/compiler/driver/compiled_method_storage_test.cc
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "compiled_method_storage.h"
+#include "compiled_method.h"
+#include "compiler_driver.h"
+#include "compiler_options.h"
+#include "dex/verification_results.h"
+#include "dex/quick/dex_file_to_method_inliner_map.h"
+
+namespace art {
+
+TEST(CompiledMethodStorage, Deduplicate) {
+ CompilerOptions compiler_options;
+ VerificationResults verification_results(&compiler_options);
+ DexFileToMethodInlinerMap method_inliner_map;
+ CompilerDriver driver(&compiler_options,
+ &verification_results,
+ &method_inliner_map,
+ Compiler::kOptimizing, kNone,
+ nullptr,
+ false,
+ nullptr,
+ nullptr,
+ nullptr,
+ 1u,
+ false,
+ false,
+ "",
+ false,
+ nullptr,
+ -1,
+ "");
+ CompiledMethodStorage* storage = driver.GetCompiledMethodStorage();
+
+ ASSERT_TRUE(storage->DedupeEnabled()); // The default.
+
+ const uint8_t raw_code1[] = { 1u, 2u, 3u };
+ const uint8_t raw_code2[] = { 4u, 3u, 2u, 1u };
+ ArrayRef<const uint8_t> code[] = {
+ ArrayRef<const uint8_t>(raw_code1),
+ ArrayRef<const uint8_t>(raw_code2),
+ };
+ const SrcMapElem raw_src_map1[] = { { 1u, 2u }, { 3u, 4u }, { 5u, 6u } };
+ const SrcMapElem raw_src_map2[] = { { 8u, 7u }, { 6u, 5u }, { 4u, 3u }, { 2u, 1u } };
+ ArrayRef<const SrcMapElem> src_map[] = {
+ ArrayRef<const SrcMapElem>(raw_src_map1),
+ ArrayRef<const SrcMapElem>(raw_src_map2),
+ };
+ const uint8_t raw_mapping_table1[] = { 5, 6, 7 };
+ const uint8_t raw_mapping_table2[] = { 7, 6, 5, 4 };
+ ArrayRef<const uint8_t> mapping_table[] = {
+ ArrayRef<const uint8_t>(raw_mapping_table1),
+ ArrayRef<const uint8_t>(raw_mapping_table2),
+ };
+ const uint8_t raw_vmap_table1[] = { 2, 4, 6 };
+ const uint8_t raw_vmap_table2[] = { 7, 5, 3, 1 };
+ ArrayRef<const uint8_t> vmap_table[] = {
+ ArrayRef<const uint8_t>(raw_vmap_table1),
+ ArrayRef<const uint8_t>(raw_vmap_table2),
+ };
+ const uint8_t raw_gc_map1[] = { 9, 8, 7 };
+ const uint8_t raw_gc_map2[] = { 6, 7, 8, 9 };
+ ArrayRef<const uint8_t> gc_map[] = {
+ ArrayRef<const uint8_t>(raw_gc_map1),
+ ArrayRef<const uint8_t>(raw_gc_map2),
+ };
+ const uint8_t raw_cfi_info1[] = { 1, 3, 5 };
+ const uint8_t raw_cfi_info2[] = { 8, 6, 4, 2 };
+ ArrayRef<const uint8_t> cfi_info[] = {
+ ArrayRef<const uint8_t>(raw_cfi_info1),
+ ArrayRef<const uint8_t>(raw_cfi_info2),
+ };
+ const LinkerPatch raw_patches1[] = {
+ LinkerPatch::CodePatch(0u, nullptr, 1u),
+ LinkerPatch::MethodPatch(4u, nullptr, 1u),
+ };
+ const LinkerPatch raw_patches2[] = {
+ LinkerPatch::CodePatch(0u, nullptr, 1u),
+ LinkerPatch::MethodPatch(4u, nullptr, 2u),
+ };
+ ArrayRef<const LinkerPatch> patches[] = {
+ ArrayRef<const LinkerPatch>(raw_patches1),
+ ArrayRef<const LinkerPatch>(raw_patches2),
+ };
+
+ std::vector<CompiledMethod*> compiled_methods;
+ compiled_methods.reserve(1u << 7);
+ for (auto&& c : code) {
+ for (auto&& s : src_map) {
+ for (auto&& m : mapping_table) {
+ for (auto&& v : vmap_table) {
+ for (auto&& g : gc_map) {
+ for (auto&& f : cfi_info) {
+ for (auto&& p : patches) {
+ compiled_methods.push_back(CompiledMethod::SwapAllocCompiledMethod(
+ &driver, kNone, c, 0u, 0u, 0u, s, m, v, g, f, p));
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ constexpr size_t code_bit = 1u << 6;
+ constexpr size_t src_map_bit = 1u << 5;
+ constexpr size_t mapping_table_bit = 1u << 4;
+ constexpr size_t vmap_table_bit = 1u << 3;
+ constexpr size_t gc_map_bit = 1u << 2;
+ constexpr size_t cfi_info_bit = 1u << 1;
+ constexpr size_t patches_bit = 1u << 0;
+ CHECK_EQ(compiled_methods.size(), 1u << 7);
+ for (size_t i = 0; i != compiled_methods.size(); ++i) {
+ for (size_t j = 0; j != compiled_methods.size(); ++j) {
+ CompiledMethod* lhs = compiled_methods[i];
+ CompiledMethod* rhs = compiled_methods[j];
+ bool same_code = ((i ^ j) & code_bit) == 0u;
+ bool same_src_map = ((i ^ j) & src_map_bit) == 0u;
+ bool same_mapping_table = ((i ^ j) & mapping_table_bit) == 0u;
+ bool same_vmap_table = ((i ^ j) & vmap_table_bit) == 0u;
+ bool same_gc_map = ((i ^ j) & gc_map_bit) == 0u;
+ bool same_cfi_info = ((i ^ j) & cfi_info_bit) == 0u;
+ bool same_patches = ((i ^ j) & patches_bit) == 0u;
+ ASSERT_EQ(same_code, lhs->GetQuickCode().data() == rhs->GetQuickCode().data())
+ << i << " " << j;
+ ASSERT_EQ(same_src_map, lhs->GetSrcMappingTable().data() == rhs->GetSrcMappingTable().data())
+ << i << " " << j;
+ ASSERT_EQ(same_mapping_table, lhs->GetMappingTable().data() == rhs->GetMappingTable().data())
+ << i << " " << j;
+ ASSERT_EQ(same_vmap_table, lhs->GetVmapTable().data() == rhs->GetVmapTable().data())
+ << i << " " << j;
+ ASSERT_EQ(same_gc_map, lhs->GetGcMap().data() == rhs->GetGcMap().data())
+ << i << " " << j;
+ ASSERT_EQ(same_cfi_info, lhs->GetCFIInfo().data() == rhs->GetCFIInfo().data())
+ << i << " " << j;
+ ASSERT_EQ(same_patches, lhs->GetPatches().data() == rhs->GetPatches().data())
+ << i << " " << j;
+ }
+ }
+ for (CompiledMethod* method : compiled_methods) {
+ CompiledMethod::ReleaseSwapAllocatedCompiledMethod(&driver, method);
+ }
+}
+
+} // namespace art
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index b956584..8750aa8 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -348,9 +348,8 @@
const std::string& dump_cfg_file_name, bool dump_cfg_append,
CumulativeLogger* timer, int swap_fd,
const std::string& profile_file)
- : swap_space_(swap_fd == -1 ? nullptr : new SwapSpace(swap_fd, 10 * MB)),
- swap_space_allocator_(new SwapAllocator<void>(swap_space_.get())),
- profile_present_(false), compiler_options_(compiler_options),
+ : profile_present_(false),
+ compiler_options_(compiler_options),
verification_results_(verification_results),
method_inliner_map_(method_inliner_map),
compiler_(Compiler::Create(this, compiler_kind)),
@@ -369,7 +368,6 @@
had_hard_verifier_failure_(false),
thread_count_(thread_count),
stats_(new AOTCompilationStats),
- dedupe_enabled_(true),
dump_stats_(dump_stats),
dump_passes_(dump_passes),
dump_cfg_file_name_(dump_cfg_file_name),
@@ -377,12 +375,7 @@
timings_logger_(timer),
compiler_context_(nullptr),
support_boot_image_fixup_(instruction_set != kMips && instruction_set != kMips64),
- dedupe_code_("dedupe code", *swap_space_allocator_),
- dedupe_src_mapping_table_("dedupe source mapping table", *swap_space_allocator_),
- dedupe_mapping_table_("dedupe mapping table", *swap_space_allocator_),
- dedupe_vmap_table_("dedupe vmap table", *swap_space_allocator_),
- dedupe_gc_map_("dedupe gc map", *swap_space_allocator_),
- dedupe_cfi_info_("dedupe cfi info", *swap_space_allocator_) {
+ compiled_method_storage_(swap_fd) {
DCHECK(compiler_options_ != nullptr);
DCHECK(verification_results_ != nullptr);
DCHECK(method_inliner_map_ != nullptr);
@@ -402,36 +395,6 @@
}
}
-SwapVector<uint8_t>* CompilerDriver::DeduplicateCode(const ArrayRef<const uint8_t>& code) {
- DCHECK(dedupe_enabled_);
- return dedupe_code_.Add(Thread::Current(), code);
-}
-
-SwapSrcMap* CompilerDriver::DeduplicateSrcMappingTable(const ArrayRef<SrcMapElem>& src_map) {
- DCHECK(dedupe_enabled_);
- return dedupe_src_mapping_table_.Add(Thread::Current(), src_map);
-}
-
-SwapVector<uint8_t>* CompilerDriver::DeduplicateMappingTable(const ArrayRef<const uint8_t>& code) {
- DCHECK(dedupe_enabled_);
- return dedupe_mapping_table_.Add(Thread::Current(), code);
-}
-
-SwapVector<uint8_t>* CompilerDriver::DeduplicateVMapTable(const ArrayRef<const uint8_t>& code) {
- DCHECK(dedupe_enabled_);
- return dedupe_vmap_table_.Add(Thread::Current(), code);
-}
-
-SwapVector<uint8_t>* CompilerDriver::DeduplicateGCMap(const ArrayRef<const uint8_t>& code) {
- DCHECK(dedupe_enabled_);
- return dedupe_gc_map_.Add(Thread::Current(), code);
-}
-
-SwapVector<uint8_t>* CompilerDriver::DeduplicateCFIInfo(const ArrayRef<const uint8_t>& cfi_info) {
- DCHECK(dedupe_enabled_);
- return dedupe_cfi_info_.Add(Thread::Current(), cfi_info);
-}
-
CompilerDriver::~CompilerDriver() {
Thread* self = Thread::Current();
{
@@ -447,6 +410,7 @@
compiler_->UnInit();
}
+
#define CREATE_TRAMPOLINE(type, abi, offset) \
if (Is64BitInstructionSet(instruction_set_)) { \
return CreateTrampoline64(instruction_set_, abi, \
@@ -2642,16 +2606,7 @@
oss << " native alloc=" << PrettySize(allocated_space) << " free="
<< PrettySize(free_space);
#endif
- if (swap_space_.get() != nullptr) {
- oss << " swap=" << PrettySize(swap_space_->GetSize());
- }
- if (extended) {
- oss << "\nCode dedupe: " << dedupe_code_.DumpStats();
- oss << "\nMapping table dedupe: " << dedupe_mapping_table_.DumpStats();
- oss << "\nVmap table dedupe: " << dedupe_vmap_table_.DumpStats();
- oss << "\nGC map dedupe: " << dedupe_gc_map_.DumpStats();
- oss << "\nCFI info dedupe: " << dedupe_cfi_info_.DumpStats();
- }
+ compiled_method_storage_.DumpMemoryUsage(oss, extended);
return oss.str();
}
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 0dc8261..485cdcf 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -30,6 +30,7 @@
#include "class_reference.h"
#include "compiler.h"
#include "dex_file.h"
+#include "driver/compiled_method_storage.h"
#include "invoke_type.h"
#include "method_reference.h"
#include "mirror/class.h" // For mirror::Class::Status.
@@ -38,10 +39,7 @@
#include "runtime.h"
#include "safe_map.h"
#include "thread_pool.h"
-#include "utils/array_ref.h"
-#include "utils/dedupe_set.h"
#include "utils/dex_cache_arrays_layout.h"
-#include "utils/swap_space.h"
namespace art {
@@ -80,8 +78,6 @@
kQuickAbi
};
-static constexpr bool kUseMurmur3Hash = true;
-
class CompilerDriver {
public:
// Create a compiler targeting the requested "instruction_set".
@@ -388,10 +384,6 @@
support_boot_image_fixup_ = support_boot_image_fixup;
}
- SwapAllocator<void>& GetSwapSpaceAllocator() {
- return *swap_space_allocator_.get();
- }
-
bool WriteElf(const std::string& android_root,
bool is_host,
const std::vector<const DexFile*>& dex_files,
@@ -431,10 +423,10 @@
}
void SetDedupeEnabled(bool dedupe_enabled) {
- dedupe_enabled_ = dedupe_enabled;
+ compiled_method_storage_.SetDedupeEnabled(dedupe_enabled);
}
bool DedupeEnabled() const {
- return dedupe_enabled_;
+ return compiled_method_storage_.DedupeEnabled();
}
// Checks if class specified by type_idx is one of the image_classes_
@@ -455,13 +447,6 @@
uint16_t class_def_idx,
const DexFile& dex_file) const;
- SwapVector<uint8_t>* DeduplicateCode(const ArrayRef<const uint8_t>& code);
- SwapSrcMap* DeduplicateSrcMappingTable(const ArrayRef<SrcMapElem>& src_map);
- SwapVector<uint8_t>* DeduplicateMappingTable(const ArrayRef<const uint8_t>& code);
- SwapVector<uint8_t>* DeduplicateVMapTable(const ArrayRef<const uint8_t>& code);
- SwapVector<uint8_t>* DeduplicateGCMap(const ArrayRef<const uint8_t>& code);
- SwapVector<uint8_t>* DeduplicateCFIInfo(const ArrayRef<const uint8_t>& cfi_info);
-
// Should the compiler run on this method given profile information?
bool SkipCompilation(const std::string& method_name);
@@ -479,6 +464,10 @@
return compiler_kind_;
}
+ CompiledMethodStorage* GetCompiledMethodStorage() {
+ return &compiled_method_storage_;
+ }
+
private:
// Return whether the declaring class of `resolved_member` is
// available to `referrer_class` for read or write access using two
@@ -599,11 +588,6 @@
ThreadPool* thread_pool, TimingLogger* timings)
REQUIRES(!Locks::mutator_lock_);
- // Swap pool and allocator used for native allocations. May be file-backed. Needs to be first
- // as other fields rely on this.
- std::unique_ptr<SwapSpace> swap_space_;
- std::unique_ptr<SwapAllocator<void> > swap_space_allocator_;
-
ProfileFile profile_file_;
bool profile_present_;
@@ -663,7 +647,6 @@
class AOTCompilationStats;
std::unique_ptr<AOTCompilationStats> stats_;
- bool dedupe_enabled_;
bool dump_stats_;
const bool dump_passes_;
const std::string dump_cfg_file_name_;
@@ -678,93 +661,7 @@
bool support_boot_image_fixup_;
- // DeDuplication data structures, these own the corresponding byte arrays.
- template <typename ContentType>
- class DedupeHashFunc {
- public:
- size_t operator()(const ArrayRef<ContentType>& array) const {
- const uint8_t* data = reinterpret_cast<const uint8_t*>(array.data());
- static_assert(IsPowerOfTwo(sizeof(ContentType)),
- "ContentType is not power of two, don't know whether array layout is as assumed");
- uint32_t len = sizeof(ContentType) * array.size();
- if (kUseMurmur3Hash) {
- static constexpr uint32_t c1 = 0xcc9e2d51;
- static constexpr uint32_t c2 = 0x1b873593;
- static constexpr uint32_t r1 = 15;
- static constexpr uint32_t r2 = 13;
- static constexpr uint32_t m = 5;
- static constexpr uint32_t n = 0xe6546b64;
-
- uint32_t hash = 0;
-
- const int nblocks = len / 4;
- typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t;
- const unaligned_uint32_t *blocks = reinterpret_cast<const uint32_t*>(data);
- int i;
- for (i = 0; i < nblocks; i++) {
- uint32_t k = blocks[i];
- k *= c1;
- k = (k << r1) | (k >> (32 - r1));
- k *= c2;
-
- hash ^= k;
- hash = ((hash << r2) | (hash >> (32 - r2))) * m + n;
- }
-
- const uint8_t *tail = reinterpret_cast<const uint8_t*>(data + nblocks * 4);
- uint32_t k1 = 0;
-
- switch (len & 3) {
- case 3:
- k1 ^= tail[2] << 16;
- FALLTHROUGH_INTENDED;
- case 2:
- k1 ^= tail[1] << 8;
- FALLTHROUGH_INTENDED;
- case 1:
- k1 ^= tail[0];
-
- k1 *= c1;
- k1 = (k1 << r1) | (k1 >> (32 - r1));
- k1 *= c2;
- hash ^= k1;
- }
-
- hash ^= len;
- hash ^= (hash >> 16);
- hash *= 0x85ebca6b;
- hash ^= (hash >> 13);
- hash *= 0xc2b2ae35;
- hash ^= (hash >> 16);
-
- return hash;
- } else {
- size_t hash = 0x811c9dc5;
- for (uint32_t i = 0; i < len; ++i) {
- hash = (hash * 16777619) ^ data[i];
- }
- hash += hash << 13;
- hash ^= hash >> 7;
- hash += hash << 3;
- hash ^= hash >> 17;
- hash += hash << 5;
- return hash;
- }
- }
- };
-
- DedupeSet<ArrayRef<const uint8_t>,
- SwapVector<uint8_t>, size_t, DedupeHashFunc<const uint8_t>, 4> dedupe_code_;
- DedupeSet<ArrayRef<SrcMapElem>,
- SwapSrcMap, size_t, DedupeHashFunc<SrcMapElem>, 4> dedupe_src_mapping_table_;
- DedupeSet<ArrayRef<const uint8_t>,
- SwapVector<uint8_t>, size_t, DedupeHashFunc<const uint8_t>, 4> dedupe_mapping_table_;
- DedupeSet<ArrayRef<const uint8_t>,
- SwapVector<uint8_t>, size_t, DedupeHashFunc<const uint8_t>, 4> dedupe_vmap_table_;
- DedupeSet<ArrayRef<const uint8_t>,
- SwapVector<uint8_t>, size_t, DedupeHashFunc<const uint8_t>, 4> dedupe_gc_map_;
- DedupeSet<ArrayRef<const uint8_t>,
- SwapVector<uint8_t>, size_t, DedupeHashFunc<const uint8_t>, 4> dedupe_cfi_info_;
+ CompiledMethodStorage compiled_method_storage_;
friend class CompileClassVisitor;
DISALLOW_COPY_AND_ASSIGN(CompilerDriver);
diff --git a/compiler/dwarf/dwarf_test.cc b/compiler/dwarf/dwarf_test.cc
index a07d27c..3ba380e 100644
--- a/compiler/dwarf/dwarf_test.cc
+++ b/compiler/dwarf/dwarf_test.cc
@@ -126,7 +126,7 @@
initial_opcodes, kCFIFormat, &debug_frame_data_);
std::vector<uintptr_t> debug_frame_patches;
std::vector<uintptr_t> expected_patches { 28 }; // NOLINT
- WriteDebugFrameFDE(is64bit, 0, 0x01000000, 0x01000000, opcodes.data(),
+ WriteDebugFrameFDE(is64bit, 0, 0x01000000, 0x01000000, ArrayRef<const uint8_t>(*opcodes.data()),
kCFIFormat, &debug_frame_data_, &debug_frame_patches);
EXPECT_EQ(expected_patches, debug_frame_patches);
@@ -142,7 +142,8 @@
std::vector<uintptr_t> debug_frame_patches;
std::vector<uintptr_t> expected_patches { 32 }; // NOLINT
WriteDebugFrameFDE(is64bit, 0, 0x0100000000000000, 0x0200000000000000,
- opcodes.data(), kCFIFormat, &debug_frame_data_, &debug_frame_patches);
+ ArrayRef<const uint8_t>(*opcodes.data()),
+ kCFIFormat, &debug_frame_data_, &debug_frame_patches);
DW_CHECK("FDE cie=00000000 pc=100000000000000..300000000000000");
EXPECT_EQ(expected_patches, debug_frame_patches);
@@ -179,7 +180,8 @@
initial_opcodes, kCFIFormat, &debug_frame_data_);
std::vector<uintptr_t> debug_frame_patches;
WriteDebugFrameFDE(is64bit, 0, 0x0100000000000000, 0x0200000000000000,
- opcodes.data(), kCFIFormat, &debug_frame_data_, &debug_frame_patches);
+ ArrayRef<const uint8_t>(*opcodes.data()),
+ kCFIFormat, &debug_frame_data_, &debug_frame_patches);
CheckObjdumpOutput(is64bit, "-W");
}
diff --git a/compiler/dwarf/headers.h b/compiler/dwarf/headers.h
index b7eff19..f3fba4b 100644
--- a/compiler/dwarf/headers.h
+++ b/compiler/dwarf/headers.h
@@ -25,6 +25,7 @@
#include "dwarf/dwarf_constants.h"
#include "dwarf/register.h"
#include "dwarf/writer.h"
+#include "utils/array_ref.h"
namespace art {
namespace dwarf {
@@ -70,21 +71,19 @@
writer.PushUint8(DW_EH_PE_absptr | DW_EH_PE_udata4); // R: Pointer encoding.
}
}
- writer.PushData(opcodes.data());
+ writer.PushData(*opcodes.data());
writer.Pad(is64bit ? 8 : 4);
writer.UpdateUint32(cie_header_start_, writer.data()->size() - cie_header_start_ - 4);
}
// Write frame description entry (FDE) to .debug_frame or .eh_frame section.
-template<typename Vector>
+inline
void WriteDebugFrameFDE(bool is64bit, size_t cie_offset,
uint64_t initial_address, uint64_t address_range,
- const Vector* opcodes,
+ const ArrayRef<const uint8_t>& opcodes,
CFIFormat format,
std::vector<uint8_t>* debug_frame,
std::vector<uintptr_t>* debug_frame_patches) {
- static_assert(std::is_same<typename Vector::value_type, uint8_t>::value, "Invalid value type");
-
Writer<> writer(debug_frame);
size_t fde_header_start = writer.data()->size();
writer.PushUint32(0); // Length placeholder.
@@ -125,7 +124,7 @@
writer.PushUint32(debug_abbrev_offset);
writer.PushUint8(entries.Is64bit() ? 8 : 4);
size_t entries_offset = writer.data()->size();
- writer.PushData(entries.data());
+ writer.PushData(*entries.data());
writer.UpdateUint32(start, writer.data()->size() - start - 4);
// Copy patch locations and make them relative to .debug_info section.
for (uintptr_t patch_location : entries.GetPatchLocations()) {
@@ -181,7 +180,7 @@
writer.PushUint8(0); // Terminate file list.
writer.UpdateUint32(header_length_pos, writer.data()->size() - header_length_pos - 4);
size_t opcodes_offset = writer.data()->size();
- writer.PushData(opcodes.data());
+ writer.PushData(*opcodes.data());
writer.UpdateUint32(header_start, writer.data()->size() - header_start - 4);
// Copy patch locations and make them relative to .debug_line section.
for (uintptr_t patch_location : opcodes.GetPatchLocations()) {
diff --git a/compiler/dwarf/writer.h b/compiler/dwarf/writer.h
index 42c32c4..00b9dfa 100644
--- a/compiler/dwarf/writer.h
+++ b/compiler/dwarf/writer.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_DWARF_WRITER_H_
#define ART_COMPILER_DWARF_WRITER_H_
+#include <type_traits>
#include <vector>
#include "base/bit_utils.h"
#include "base/logging.h"
@@ -119,9 +120,10 @@
}
template<typename Vector2>
- void PushData(const Vector2* buffer) {
- static_assert(std::is_same<typename Vector2::value_type, uint8_t>::value, "Invalid value type");
- data_->insert(data_->end(), buffer->begin(), buffer->end());
+ void PushData(const Vector2& buffer) {
+ static_assert(std::is_same<typename std::add_const<typename Vector::value_type>::type,
+ const uint8_t>::value, "Invalid value type");
+ data_->insert(data_->end(), buffer.begin(), buffer.end());
}
void UpdateUint32(size_t offset, uint32_t value) {
diff --git a/compiler/elf_writer_debug.cc b/compiler/elf_writer_debug.cc
index c10ffeb..3a9e312 100644
--- a/compiler/elf_writer_debug.cc
+++ b/compiler/elf_writer_debug.cc
@@ -182,8 +182,8 @@
WriteDebugFrameCIE(isa, address_type, format, debug_frame);
for (const OatWriter::DebugInfo& mi : method_infos) {
if (!mi.deduped_) { // Only one FDE per unique address.
- const SwapVector<uint8_t>* opcodes = mi.compiled_method_->GetCFIInfo();
- if (opcodes != nullptr) {
+ ArrayRef<const uint8_t> opcodes = mi.compiled_method_->GetCFIInfo();
+ if (!opcodes.empty()) {
address_to_fde_offset_map.emplace(mi.low_pc_, debug_frame->size());
WriteDebugFrameFDE(Is64BitInstructionSet(isa), cie_offset,
mi.low_pc_, mi.high_pc_ - mi.low_pc_,
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 4310be6..0e5a97f 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -796,7 +796,7 @@
offset, kNativeObjectRelocationTypeArtFieldArray });
offset += header_size;
// Forward individual fields so that we can quickly find where they belong.
- for (size_t i = 0, count = cur_fields->Length(); i < count; ++i) {
+ for (size_t i = 0, count = cur_fields->size(); i < count; ++i) {
// Need to forward arrays separate of fields.
ArtField* field = &cur_fields->At(i);
auto it2 = native_object_relocations_.find(field);
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 3d1b42f..b563c80 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -160,7 +160,7 @@
Runtime* runtime = Runtime::Current();
// Check if the method is already compiled.
- if (runtime->GetJit()->GetCodeCache()->ContainsMethod(method)) {
+ if (runtime->GetJit()->GetCodeCache()->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
VLOG(jit) << "Already compiled " << PrettyMethod(method);
return true;
}
@@ -207,10 +207,7 @@
result = true;
} else {
TimingLogger::ScopedTiming t2("LinkCode", &logger);
- OatFile::OatMethod oat_method(nullptr, 0);
- if (AddToCodeCache(method, compiled_method, &oat_method)) {
- oat_method.LinkMethod(method);
- CHECK(runtime->GetJit()->GetCodeCache()->ContainsMethod(method)) << PrettyMethod(method);
+ if (AddToCodeCache(method, compiled_method)) {
result = true;
}
}
@@ -227,57 +224,57 @@
}
bool JitCompiler::AddToCodeCache(ArtMethod* method,
- const CompiledMethod* compiled_method,
- OatFile::OatMethod* out_method) {
+ const CompiledMethod* compiled_method) {
Runtime* runtime = Runtime::Current();
JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache();
- const auto* quick_code = compiled_method->GetQuickCode();
- if (quick_code == nullptr) {
+ auto const quick_code = compiled_method->GetQuickCode();
+ if (quick_code.empty()) {
return false;
}
- const auto code_size = quick_code->size();
+ const auto code_size = quick_code.size();
Thread* const self = Thread::Current();
- auto* const mapping_table = compiled_method->GetMappingTable();
- auto* const vmap_table = compiled_method->GetVmapTable();
- auto* const gc_map = compiled_method->GetGcMap();
+ auto const mapping_table = compiled_method->GetMappingTable();
+ auto const vmap_table = compiled_method->GetVmapTable();
+ auto const gc_map = compiled_method->GetGcMap();
uint8_t* mapping_table_ptr = nullptr;
uint8_t* vmap_table_ptr = nullptr;
uint8_t* gc_map_ptr = nullptr;
- if (mapping_table != nullptr) {
+ if (!mapping_table.empty()) {
// Write out pre-header stuff.
mapping_table_ptr = code_cache->AddDataArray(
- self, mapping_table->data(), mapping_table->data() + mapping_table->size());
+ self, mapping_table.data(), mapping_table.data() + mapping_table.size());
if (mapping_table_ptr == nullptr) {
return false; // Out of data cache.
}
}
- if (vmap_table != nullptr) {
+ if (!vmap_table.empty()) {
vmap_table_ptr = code_cache->AddDataArray(
- self, vmap_table->data(), vmap_table->data() + vmap_table->size());
+ self, vmap_table.data(), vmap_table.data() + vmap_table.size());
if (vmap_table_ptr == nullptr) {
return false; // Out of data cache.
}
}
- if (gc_map != nullptr) {
+ if (!gc_map.empty()) {
gc_map_ptr = code_cache->AddDataArray(
- self, gc_map->data(), gc_map->data() + gc_map->size());
+ self, gc_map.data(), gc_map.data() + gc_map.size());
if (gc_map_ptr == nullptr) {
return false; // Out of data cache.
}
}
uint8_t* const code = code_cache->CommitCode(self,
+ method,
mapping_table_ptr,
vmap_table_ptr,
gc_map_ptr,
compiled_method->GetFrameSizeInBytes(),
compiled_method->GetCoreSpillMask(),
compiled_method->GetFpSpillMask(),
- compiled_method->GetQuickCode()->data(),
- compiled_method->GetQuickCode()->size());
+ compiled_method->GetQuickCode().data(),
+ compiled_method->GetQuickCode().size());
if (code == nullptr) {
return false;
@@ -285,13 +282,6 @@
const size_t thumb_offset = compiled_method->CodeDelta();
const uint32_t code_offset = sizeof(OatQuickMethodHeader) + thumb_offset;
- *out_method = OatFile::OatMethod(code, code_offset);
- DCHECK_EQ(out_method->GetGcMap(), gc_map_ptr);
- DCHECK_EQ(out_method->GetMappingTable(), mapping_table_ptr);
- DCHECK_EQ(out_method->GetVmapTable(), vmap_table_ptr);
- DCHECK_EQ(out_method->GetFrameSizeInBytes(), compiled_method->GetFrameSizeInBytes());
- DCHECK_EQ(out_method->GetCoreSpillMask(), compiled_method->GetCoreSpillMask());
- DCHECK_EQ(out_method->GetFpSpillMask(), compiled_method->GetFpSpillMask());
VLOG(jit)
<< "JIT added "
<< PrettyMethod(method) << "@" << method
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index 757f3f3..913a6d0 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -59,8 +59,8 @@
// This is in the compiler since the runtime doesn't have access to the compiled method
// structures.
bool AddToCodeCache(ArtMethod* method,
- const CompiledMethod* compiled_method,
- OatFile::OatMethod* out_method) SHARED_REQUIRES(Locks::mutator_lock_);
+ const CompiledMethod* compiled_method)
+ SHARED_REQUIRES(Locks::mutator_lock_);
DISALLOW_COPY_AND_ASSIGN(JitCompiler);
};
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 34f0802..52a2382 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -487,7 +487,7 @@
frame_size,
main_jni_conv->CoreSpillMask(),
main_jni_conv->FpSpillMask(),
- nullptr, // src_mapping_table.
+ ArrayRef<const SrcMapElem>(),
ArrayRef<const uint8_t>(), // mapping_table.
ArrayRef<const uint8_t>(), // vmap_table.
ArrayRef<const uint8_t>(), // native_gc_map.
diff --git a/compiler/linker/arm/relative_patcher_arm_base.cc b/compiler/linker/arm/relative_patcher_arm_base.cc
index cb9ea38..ac38f3d 100644
--- a/compiler/linker/arm/relative_patcher_arm_base.cc
+++ b/compiler/linker/arm/relative_patcher_arm_base.cc
@@ -85,8 +85,7 @@
const CompiledMethod* compiled_method,
MethodReference method_ref,
uint32_t max_extra_space) {
- DCHECK(compiled_method->GetQuickCode() != nullptr);
- uint32_t quick_code_size = compiled_method->GetQuickCode()->size();
+ uint32_t quick_code_size = compiled_method->GetQuickCode().size();
uint32_t quick_code_offset = compiled_method->AlignCode(offset) + sizeof(OatQuickMethodHeader);
uint32_t next_aligned_offset = compiled_method->AlignCode(quick_code_offset + quick_code_size);
// Adjust for extra space required by the subclass.
diff --git a/compiler/linker/arm64/relative_patcher_arm64.cc b/compiler/linker/arm64/relative_patcher_arm64.cc
index 6f234a8..57018af 100644
--- a/compiler/linker/arm64/relative_patcher_arm64.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64.cc
@@ -74,7 +74,7 @@
// Now that we have the actual offset where the code will be placed, locate the ADRP insns
// that actually require the thunk.
uint32_t quick_code_offset = compiled_method->AlignCode(offset) + sizeof(OatQuickMethodHeader);
- ArrayRef<const uint8_t> code(*compiled_method->GetQuickCode());
+ ArrayRef<const uint8_t> code = compiled_method->GetQuickCode();
uint32_t thunk_offset = compiled_method->AlignCode(quick_code_offset + code.size());
DCHECK(compiled_method != nullptr);
for (const LinkerPatch& patch : compiled_method->GetPatches()) {
diff --git a/compiler/linker/arm64/relative_patcher_arm64_test.cc b/compiler/linker/arm64/relative_patcher_arm64_test.cc
index 857d584..2a426b5 100644
--- a/compiler/linker/arm64/relative_patcher_arm64_test.cc
+++ b/compiler/linker/arm64/relative_patcher_arm64_test.cc
@@ -237,7 +237,7 @@
CHECK(!compiled_method_refs_.empty());
CHECK_EQ(compiled_method_refs_[0].dex_method_index, 1u);
CHECK_EQ(compiled_method_refs_.size(), compiled_methods_.size());
- uint32_t method1_size = compiled_methods_[0]->GetQuickCode()->size();
+ uint32_t method1_size = compiled_methods_[0]->GetQuickCode().size();
uint32_t thunk_offset = CompiledCode::AlignCode(method1_offset + method1_size, kArm64);
uint32_t b_diff = thunk_offset - (method1_offset + num_nops * 4u);
ASSERT_EQ(b_diff & 3u, 0u);
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index e357662..92cf8ca 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -74,8 +74,8 @@
compiled_method_refs_.push_back(method_ref);
compiled_methods_.emplace_back(new CompiledMethod(
&driver_, instruction_set_, code,
- 0u, 0u, 0u, nullptr, ArrayRef<const uint8_t>(), ArrayRef<const uint8_t>(),
- ArrayRef<const uint8_t>(), ArrayRef<const uint8_t>(),
+ 0u, 0u, 0u, ArrayRef<const SrcMapElem>(), ArrayRef<const uint8_t>(),
+ ArrayRef<const uint8_t>(), ArrayRef<const uint8_t>(), ArrayRef<const uint8_t>(),
patches));
}
@@ -93,7 +93,7 @@
offset += sizeof(OatQuickMethodHeader);
uint32_t quick_code_offset = offset + compiled_method->CodeDelta();
- const auto& code = *compiled_method->GetQuickCode();
+ const auto code = compiled_method->GetQuickCode();
offset += code.size();
method_offset_map_.map.Put(compiled_method_refs_[idx], quick_code_offset);
@@ -125,7 +125,7 @@
out_.WriteFully(dummy_header, sizeof(OatQuickMethodHeader));
offset += sizeof(OatQuickMethodHeader);
- ArrayRef<const uint8_t> code(*compiled_method->GetQuickCode());
+ ArrayRef<const uint8_t> code = compiled_method->GetQuickCode();
if (!compiled_method->GetPatches().empty()) {
patched_code_.assign(code.begin(), code.end());
code = ArrayRef<const uint8_t>(patched_code_);
@@ -164,7 +164,7 @@
++idx;
}
CHECK_NE(idx, compiled_method_refs_.size());
- CHECK_EQ(compiled_methods_[idx]->GetQuickCode()->size(), expected_code.size());
+ CHECK_EQ(compiled_methods_[idx]->GetQuickCode().size(), expected_code.size());
auto result = method_offset_map_.FindMethodOffset(method_ref);
CHECK(result.first); // Must have been linked.
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 2d9d91a..06576cc 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -63,9 +63,9 @@
EXPECT_EQ(oat_method.GetFpSpillMask(), compiled_method->GetFpSpillMask());
uintptr_t oat_code_aligned = RoundDown(reinterpret_cast<uintptr_t>(quick_oat_code), 2);
quick_oat_code = reinterpret_cast<const void*>(oat_code_aligned);
- const SwapVector<uint8_t>* quick_code = compiled_method->GetQuickCode();
- EXPECT_TRUE(quick_code != nullptr);
- size_t code_size = quick_code->size() * sizeof(quick_code[0]);
+ ArrayRef<const uint8_t> quick_code = compiled_method->GetQuickCode();
+ EXPECT_FALSE(quick_code.empty());
+ size_t code_size = quick_code.size() * sizeof(quick_code[0]);
EXPECT_EQ(0, memcmp(quick_oat_code, &quick_code[0], code_size))
<< PrettyMethod(method) << " " << code_size;
CHECK_EQ(0, memcmp(quick_oat_code, &quick_code[0], code_size));
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 640698b..dcb23bf 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -172,7 +172,7 @@
}
struct OatWriter::GcMapDataAccess {
- static const SwapVector<uint8_t>* GetData(const CompiledMethod* compiled_method) ALWAYS_INLINE {
+ static ArrayRef<const uint8_t> GetData(const CompiledMethod* compiled_method) ALWAYS_INLINE {
return compiled_method->GetGcMap();
}
@@ -194,7 +194,7 @@
};
struct OatWriter::MappingTableDataAccess {
- static const SwapVector<uint8_t>* GetData(const CompiledMethod* compiled_method) ALWAYS_INLINE {
+ static ArrayRef<const uint8_t> GetData(const CompiledMethod* compiled_method) ALWAYS_INLINE {
return compiled_method->GetMappingTable();
}
@@ -216,7 +216,7 @@
};
struct OatWriter::VmapTableDataAccess {
- static const SwapVector<uint8_t>* GetData(const CompiledMethod* compiled_method) ALWAYS_INLINE {
+ static ArrayRef<const uint8_t> GetData(const CompiledMethod* compiled_method) ALWAYS_INLINE {
return compiled_method->GetVmapTable();
}
@@ -388,8 +388,8 @@
// Derived from CompiledMethod.
uint32_t quick_code_offset = 0;
- const SwapVector<uint8_t>* quick_code = compiled_method->GetQuickCode();
- uint32_t code_size = quick_code->size() * sizeof(uint8_t);
+ ArrayRef<const uint8_t> quick_code = compiled_method->GetQuickCode();
+ uint32_t code_size = quick_code.size() * sizeof(uint8_t);
uint32_t thumb_offset = compiled_method->CodeDelta();
// Deduplicate code arrays if we are not producing debuggable code.
@@ -428,7 +428,7 @@
uint32_t vmap_table_offset = method_header->vmap_table_offset_;
// If we don't have quick code, then we must have a vmap, as that is how the dex2dex
// compiler records its transformations.
- DCHECK(quick_code != nullptr || vmap_table_offset != 0);
+ DCHECK(!quick_code.empty() || vmap_table_offset != 0);
uint32_t gc_map_offset = method_header->gc_map_offset_;
// The code offset was 0 when the mapping/vmap table offset was set, so it's set
// to 0-offset and we need to adjust it by code_offset.
@@ -496,12 +496,12 @@
} else {
status = mirror::Class::kStatusNotReady;
}
- const SwapVector<uint8_t>* gc_map = compiled_method->GetGcMap();
- if (gc_map != nullptr) {
- size_t gc_map_size = gc_map->size() * sizeof(gc_map[0]);
+ ArrayRef<const uint8_t> gc_map = compiled_method->GetGcMap();
+ if (!gc_map.empty()) {
+ size_t gc_map_size = gc_map.size() * sizeof(gc_map[0]);
bool is_native = it.MemberIsNative();
CHECK(gc_map_size != 0 || is_native || status < mirror::Class::kStatusVerified)
- << gc_map << " " << gc_map_size << " " << (is_native ? "true" : "false") << " "
+ << gc_map_size << " " << (is_native ? "true" : "false") << " "
<< (status < mirror::Class::kStatusVerified) << " " << status << " "
<< PrettyMethod(it.GetMemberIndex(), *dex_file_);
}
@@ -519,30 +519,22 @@
private:
struct CodeOffsetsKeyComparator {
bool operator()(const CompiledMethod* lhs, const CompiledMethod* rhs) const {
- if (lhs->GetQuickCode() != rhs->GetQuickCode()) {
- return lhs->GetQuickCode() < rhs->GetQuickCode();
+ // Code is deduplicated by CompilerDriver, compare only data pointers.
+ if (lhs->GetQuickCode().data() != rhs->GetQuickCode().data()) {
+ return lhs->GetQuickCode().data() < rhs->GetQuickCode().data();
}
// If the code is the same, all other fields are likely to be the same as well.
- if (UNLIKELY(lhs->GetMappingTable() != rhs->GetMappingTable())) {
- return lhs->GetMappingTable() < rhs->GetMappingTable();
+ if (UNLIKELY(lhs->GetMappingTable().data() != rhs->GetMappingTable().data())) {
+ return lhs->GetMappingTable().data() < rhs->GetMappingTable().data();
}
- if (UNLIKELY(lhs->GetVmapTable() != rhs->GetVmapTable())) {
- return lhs->GetVmapTable() < rhs->GetVmapTable();
+ if (UNLIKELY(lhs->GetVmapTable().data() != rhs->GetVmapTable().data())) {
+ return lhs->GetVmapTable().data() < rhs->GetVmapTable().data();
}
- if (UNLIKELY(lhs->GetGcMap() != rhs->GetGcMap())) {
- return lhs->GetGcMap() < rhs->GetGcMap();
+ if (UNLIKELY(lhs->GetGcMap().data() != rhs->GetGcMap().data())) {
+ return lhs->GetGcMap().data() < rhs->GetGcMap().data();
}
- const auto& lhs_patches = lhs->GetPatches();
- const auto& rhs_patches = rhs->GetPatches();
- if (UNLIKELY(lhs_patches.size() != rhs_patches.size())) {
- return lhs_patches.size() < rhs_patches.size();
- }
- auto rit = rhs_patches.begin();
- for (const LinkerPatch& lpatch : lhs_patches) {
- if (UNLIKELY(!(lpatch == *rit))) {
- return lpatch < *rit;
- }
- ++rit;
+ if (UNLIKELY(lhs->GetPatches().data() != rhs->GetPatches().data())) {
+ return lhs->GetPatches().data() < rhs->GetPatches().data();
}
return false;
}
@@ -583,17 +575,17 @@
DCHECK_LT(method_offsets_index_, oat_class->method_offsets_.size());
DCHECK_EQ(DataAccess::GetOffset(oat_class, method_offsets_index_), 0u);
- const SwapVector<uint8_t>* map = DataAccess::GetData(compiled_method);
- uint32_t map_size = map == nullptr ? 0 : map->size() * sizeof((*map)[0]);
+ ArrayRef<const uint8_t> map = DataAccess::GetData(compiled_method);
+ uint32_t map_size = map.size() * sizeof(map[0]);
if (map_size != 0u) {
- auto lb = dedupe_map_.lower_bound(map);
- if (lb != dedupe_map_.end() && !dedupe_map_.key_comp()(map, lb->first)) {
+ auto lb = dedupe_map_.lower_bound(map.data());
+ if (lb != dedupe_map_.end() && !dedupe_map_.key_comp()(map.data(), lb->first)) {
DataAccess::SetOffset(oat_class, method_offsets_index_, lb->second);
} else {
DataAccess::SetOffset(oat_class, method_offsets_index_, offset_);
- dedupe_map_.PutBefore(lb, map, offset_);
+ dedupe_map_.PutBefore(lb, map.data(), offset_);
offset_ += map_size;
- writer_->oat_header_->UpdateChecksum(&(*map)[0], map_size);
+ writer_->oat_header_->UpdateChecksum(&map[0], map_size);
}
}
++method_offsets_index_;
@@ -605,7 +597,7 @@
private:
// Deduplication is already done on a pointer basis by the compiler driver,
// so we can simply compare the pointers to find out if things are duplicated.
- SafeMap<const SwapVector<uint8_t>*, uint32_t> dedupe_map_;
+ SafeMap<const uint8_t*, uint32_t> dedupe_map_;
};
class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
@@ -647,7 +639,7 @@
UNREACHABLE();
}
- if (compiled_method != nullptr && compiled_method->GetQuickCode()->size() != 0) {
+ if (compiled_method != nullptr && compiled_method->GetQuickCode().size() != 0) {
method->SetEntryPointFromQuickCompiledCodePtrSize(
reinterpret_cast<void*>(offsets.code_offset_), pointer_size_);
}
@@ -713,10 +705,8 @@
size_t file_offset = file_offset_;
OutputStream* out = out_;
- const SwapVector<uint8_t>* quick_code = compiled_method->GetQuickCode();
- // Need a wrapper if we create a copy for patching.
- ArrayRef<const uint8_t> wrapped(*quick_code);
- uint32_t code_size = quick_code->size() * sizeof(uint8_t);
+ ArrayRef<const uint8_t> quick_code = compiled_method->GetQuickCode();
+ uint32_t code_size = quick_code.size() * sizeof(uint8_t);
// Deduplicate code arrays.
const OatMethodOffsets& method_offsets = oat_class->method_offsets_[method_offsets_index_];
@@ -753,8 +743,8 @@
DCHECK_OFFSET_();
if (!compiled_method->GetPatches().empty()) {
- patched_code_.assign(quick_code->begin(), quick_code->end());
- wrapped = ArrayRef<const uint8_t>(patched_code_);
+ patched_code_.assign(quick_code.begin(), quick_code.end());
+ quick_code = ArrayRef<const uint8_t>(patched_code_);
for (const LinkerPatch& patch : compiled_method->GetPatches()) {
if (patch.Type() == kLinkerPatchCallRelative) {
// NOTE: Relative calls across oat files are not supported.
@@ -781,8 +771,8 @@
}
}
- writer_->oat_header_->UpdateChecksum(wrapped.data(), code_size);
- if (!out->WriteFully(wrapped.data(), code_size)) {
+ writer_->oat_header_->UpdateChecksum(quick_code.data(), code_size);
+ if (!out->WriteFully(quick_code.data(), code_size)) {
ReportWriteFailure("method code", it);
return false;
}
@@ -947,14 +937,14 @@
++method_offsets_index_;
// Write deduplicated map.
- const SwapVector<uint8_t>* map = DataAccess::GetData(compiled_method);
- size_t map_size = map == nullptr ? 0 : map->size() * sizeof((*map)[0]);
+ ArrayRef<const uint8_t> map = DataAccess::GetData(compiled_method);
+ size_t map_size = map.size() * sizeof(map[0]);
DCHECK((map_size == 0u && map_offset == 0u) ||
(map_size != 0u && map_offset != 0u && map_offset <= offset_))
<< map_size << " " << map_offset << " " << offset_ << " "
<< PrettyMethod(it.GetMemberIndex(), *dex_file_) << " for " << DataAccess::Name();
if (map_size != 0u && map_offset == offset_) {
- if (UNLIKELY(!out->WriteFully(&(*map)[0], map_size))) {
+ if (UNLIKELY(!out->WriteFully(&map[0], map_size))) {
ReportWriteFailure(it);
return false;
}
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 8ca352f..ed193c7 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -774,11 +774,12 @@
&string_init_offset);
// Replace calls to String.<init> with StringFactory.
if (is_string_init) {
- HInvokeStaticOrDirect::DispatchInfo dispatch_info = ComputeDispatchInfo(is_string_init,
- string_init_offset,
- target_method,
- direct_method,
- direct_code);
+ HInvokeStaticOrDirect::DispatchInfo dispatch_info = {
+ HInvokeStaticOrDirect::MethodLoadKind::kStringInit,
+ HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+ dchecked_integral_cast<uint64_t>(string_init_offset),
+ 0U
+ };
HInvoke* invoke = new (arena_) HInvokeStaticOrDirect(
arena_,
number_of_arguments - 1,
@@ -841,11 +842,12 @@
clinit_check = ProcessClinitCheckForInvoke(dex_pc, method_idx, &clinit_check_requirement);
}
- HInvokeStaticOrDirect::DispatchInfo dispatch_info = ComputeDispatchInfo(is_string_init,
- string_init_offset,
- target_method,
- direct_method,
- direct_code);
+ HInvokeStaticOrDirect::DispatchInfo dispatch_info = {
+ HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
+ HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+ 0u,
+ 0U
+ };
invoke = new (arena_) HInvokeStaticOrDirect(arena_,
number_of_arguments,
return_type,
@@ -958,77 +960,6 @@
return clinit_check;
}
-HInvokeStaticOrDirect::DispatchInfo HGraphBuilder::ComputeDispatchInfo(
- bool is_string_init,
- int32_t string_init_offset,
- MethodReference target_method,
- uintptr_t direct_method,
- uintptr_t direct_code) {
- HInvokeStaticOrDirect::MethodLoadKind method_load_kind;
- HInvokeStaticOrDirect::CodePtrLocation code_ptr_location;
- uint64_t method_load_data = 0u;
- uint64_t direct_code_ptr = 0u;
-
- if (is_string_init) {
- // TODO: Use direct_method and direct_code for the appropriate StringFactory method.
- method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kStringInit;
- code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
- method_load_data = string_init_offset;
- } else if (target_method.dex_file == outer_compilation_unit_->GetDexFile() &&
- target_method.dex_method_index == outer_compilation_unit_->GetDexMethodIndex()) {
- method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kRecursive;
- code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallSelf;
- } else {
- if (direct_method != 0u) { // Should we use a direct pointer to the method?
- if (direct_method != static_cast<uintptr_t>(-1)) { // Is the method pointer known now?
- method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress;
- method_load_data = direct_method;
- } else { // The direct pointer will be known at link time.
- method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup;
- }
- } else { // Use dex cache.
- DCHECK(target_method.dex_file == dex_compilation_unit_->GetDexFile());
- DexCacheArraysLayout layout =
- compiler_driver_->GetDexCacheArraysLayout(target_method.dex_file);
- if (layout.Valid()) { // Can we use PC-relative access to the dex cache arrays?
- method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative;
- method_load_data = layout.MethodOffset(target_method.dex_method_index);
- } else { // We must go through the ArtMethod's pointer to resolved methods.
- method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod;
- }
- }
- if (direct_code != 0u) { // Should we use a direct pointer to the code?
- if (direct_code != static_cast<uintptr_t>(-1)) { // Is the code pointer known now?
- code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallDirect;
- direct_code_ptr = direct_code;
- } else if (compiler_driver_->IsImage() ||
- target_method.dex_file == dex_compilation_unit_->GetDexFile()) {
- // Use PC-relative calls for invokes within a multi-dex oat file.
- // TODO: Recognize when the target dex file is within the current oat file for
- // app compilation. At the moment we recognize only the boot image as multi-dex.
- // NOTE: This will require changing the ARM backend which currently falls
- // through from kCallPCRelative to kDirectCodeFixup for different dex files.
- code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative;
- } else { // The direct pointer will be known at link time.
- // NOTE: This is used for app->boot calls when compiling an app against
- // a relocatable but not yet relocated image.
- code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup;
- }
- } else { // We must use the code pointer from the ArtMethod.
- code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
- }
- }
-
- if (graph_->IsDebuggable()) {
- // For debuggable apps always use the code pointer from ArtMethod
- // so that we don't circumvent instrumentation stubs if installed.
- code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
- }
-
- return HInvokeStaticOrDirect::DispatchInfo {
- method_load_kind, code_ptr_location, method_load_data, direct_code_ptr };
-}
-
bool HGraphBuilder::SetupInvokeArguments(HInvoke* invoke,
uint32_t number_of_vreg_arguments,
uint32_t* args,
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 6910d51..9eaa4b6 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -276,12 +276,6 @@
uint32_t dex_pc,
HInvoke* invoke);
- HInvokeStaticOrDirect::DispatchInfo ComputeDispatchInfo(bool is_string_init,
- int32_t string_init_offset,
- MethodReference target_method,
- uintptr_t direct_method,
- uintptr_t direct_code);
-
bool SetupInvokeArguments(HInvoke* invoke,
uint32_t number_of_vreg_arguments,
uint32_t* args,
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 1c62dfa..a1bb5e0 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -379,13 +379,17 @@
if (invoke->IsInvokeStaticOrDirect()) {
HInvokeStaticOrDirect* call = invoke->AsInvokeStaticOrDirect();
- if (call->IsStringInit()) {
- locations->AddTemp(visitor->GetMethodLocation());
- } else if (call->IsRecursive()) {
- locations->SetInAt(call->GetCurrentMethodInputIndex(), visitor->GetMethodLocation());
- } else {
- locations->AddTemp(visitor->GetMethodLocation());
- locations->SetInAt(call->GetCurrentMethodInputIndex(), Location::RequiresRegister());
+ switch (call->GetMethodLoadKind()) {
+ case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
+ locations->SetInAt(call->GetCurrentMethodInputIndex(), visitor->GetMethodLocation());
+ break;
+ case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod:
+ locations->AddTemp(visitor->GetMethodLocation());
+ locations->SetInAt(call->GetCurrentMethodInputIndex(), Location::RequiresRegister());
+ break;
+ default:
+ locations->AddTemp(visitor->GetMethodLocation());
+ break;
}
} else {
locations->AddTemp(visitor->GetMethodLocation());
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index b04dfc0..47b6f30 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -172,6 +172,7 @@
OptimizingCompilerStats* stats = nullptr);
virtual ~CodeGenerator() {}
+ // Get the graph. This is the outermost graph, never the graph of a method being inlined.
HGraph* GetGraph() const { return graph_; }
HBasicBlock* GetNextBlockToEmit() const;
@@ -431,6 +432,12 @@
uint32_t dex_pc,
SlowPathCode* slow_path) = 0;
+ // Check if the desired_dispatch_info is supported. If it is, return it,
+ // otherwise return a fall-back info that should be used instead.
+ virtual HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ MethodReference target_method) = 0;
+
// Generate a call to a static or direct method.
virtual void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) = 0;
// Generate a call to a virtual method.
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 92a5878..8d9794b 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -5155,26 +5155,51 @@
}
}
+HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARM::GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ MethodReference target_method) {
+ if (desired_dispatch_info.method_load_kind ==
+ HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative) {
+ // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
+ return HInvokeStaticOrDirect::DispatchInfo {
+ HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
+ HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+ 0u,
+ 0u
+ };
+ }
+ if (desired_dispatch_info.code_ptr_location ==
+ HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative) {
+ const DexFile& outer_dex_file = GetGraph()->GetDexFile();
+ if (&outer_dex_file != target_method.dex_file) {
+ // Calls across dex files are more likely to exceed the available BL range,
+ // so use absolute patch with fixup if available and kCallArtMethod otherwise.
+ HInvokeStaticOrDirect::CodePtrLocation code_ptr_location =
+ (desired_dispatch_info.method_load_kind ==
+ HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup)
+ ? HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup
+ : HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
+ return HInvokeStaticOrDirect::DispatchInfo {
+ desired_dispatch_info.method_load_kind,
+ code_ptr_location,
+ desired_dispatch_info.method_load_data,
+ 0u
+ };
+ }
+ }
+ return desired_dispatch_info;
+}
+
void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
// For better instruction scheduling we load the direct code pointer before the method pointer.
- bool direct_code_loaded = false;
switch (invoke->GetCodePtrLocation()) {
- case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
- if (IsSameDexFile(*invoke->GetTargetMethod().dex_file, GetGraph()->GetDexFile())) {
- break;
- }
- // Calls across dex files are more likely to exceed the available BL range,
- // so use absolute patch by falling through to kDirectCodeFixup.
- FALLTHROUGH_INTENDED;
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
// LR = code address from literal pool with link-time patch.
__ LoadLiteral(LR, DeduplicateMethodCodeLiteral(invoke->GetTargetMethod()));
- direct_code_loaded = true;
break;
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
// LR = invoke->GetDirectCodePtr();
__ LoadImmediate(LR, invoke->GetDirectCodePtr());
- direct_code_loaded = true;
break;
default:
break;
@@ -5197,8 +5222,10 @@
DeduplicateMethodAddressLiteral(invoke->GetTargetMethod()));
break;
case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
- // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
- FALLTHROUGH_INTENDED;
+ // TODO: Implement this type.
+ // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
+ LOG(FATAL) << "Unsupported";
+ UNREACHABLE();
case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
Register method_reg;
@@ -5227,20 +5254,14 @@
__ bl(GetFrameEntryLabel());
break;
case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
- if (!direct_code_loaded) {
- relative_call_patches_.emplace_back(invoke->GetTargetMethod());
- __ Bind(&relative_call_patches_.back().label);
- Label label;
- __ bl(&label); // Arbitrarily branch to the instruction after BL, override at link time.
- __ Bind(&label);
- break;
- }
- // If we loaded the direct code above, fall through.
- FALLTHROUGH_INTENDED;
+ relative_call_patches_.emplace_back(invoke->GetTargetMethod());
+ __ Bind(&relative_call_patches_.back().label);
+ // Arbitrarily branch to the BL itself, override at link time.
+ __ bl(&relative_call_patches_.back().label);
+ break;
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
// LR prepared above for better instruction scheduling.
- DCHECK(direct_code_loaded);
// LR()
__ blx(LR);
break;
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 6900933..cef1095 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -362,6 +362,12 @@
Label* GetFrameEntryLabel() { return &frame_entry_label_; }
+ // Check if the desired_dispatch_info is supported. If it is, return it,
+ // otherwise return a fall-back info that should be used instead.
+ HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ MethodReference target_method) OVERRIDE;
+
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 1773c06..b0be446 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -2826,6 +2826,13 @@
return false;
}
+HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARM64::GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ MethodReference target_method ATTRIBUTE_UNUSED) {
+ // On arm64 we support all dispatch types.
+ return desired_dispatch_info;
+}
+
void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
// For better instruction scheduling we load the direct code pointer before the method pointer.
bool direct_code_loaded = false;
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 799f1bd..ab684ea 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -388,6 +388,12 @@
return false;
}
+ // Check if the desired_dispatch_info is supported. If it is, return it,
+ // otherwise return a fall-back info that should be used instead.
+ HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ MethodReference target_method) OVERRIDE;
+
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 4404aa3..6aed444 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1000,6 +1000,9 @@
void CodeGeneratorMIPS::AddLocationAsTemp(Location location, LocationSummary* locations) {
if (location.IsRegister()) {
locations->AddTemp(location);
+ } else if (location.IsRegisterPair()) {
+ locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairLow<Register>()));
+ locations->AddTemp(Location::RegisterLocation(location.AsRegisterPairHigh<Register>()));
} else {
UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
}
@@ -2951,6 +2954,37 @@
return false;
}
+HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS::GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ MethodReference target_method ATTRIBUTE_UNUSED) {
+ switch (desired_dispatch_info.method_load_kind) {
+ case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
+ case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
+ // TODO: Implement these types. For the moment, we fall back to kDexCacheViaMethod.
+ return HInvokeStaticOrDirect::DispatchInfo {
+ HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
+ HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+ 0u,
+ 0u
+ };
+ default:
+ break;
+ }
+ switch (desired_dispatch_info.code_ptr_location) {
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
+ // TODO: Implement these types. For the moment, we fall back to kCallArtMethod.
+ return HInvokeStaticOrDirect::DispatchInfo {
+ desired_dispatch_info.method_load_kind,
+ HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+ desired_dispatch_info.method_load_data,
+ 0u
+ };
+ default:
+ return desired_dispatch_info;
+ }
+}
+
void CodeGeneratorMIPS::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
// All registers are assumed to be correctly set up per the calling convention.
@@ -2970,13 +3004,11 @@
__ LoadConst32(temp.AsRegister<Register>(), invoke->GetMethodAddress());
break;
case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
- // TODO: Implement this type. (Needs literal support.) At the moment, the
- // CompilerDriver will not direct the backend to use this type for MIPS.
- LOG(FATAL) << "Unsupported!";
- UNREACHABLE();
case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
- // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
- FALLTHROUGH_INTENDED;
+ // TODO: Implement these types.
+ // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
+ LOG(FATAL) << "Unsupported";
+ UNREACHABLE();
case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
Register reg = temp.AsRegister<Register>();
@@ -3017,15 +3049,15 @@
__ Jalr(T9);
__ Nop();
break;
- case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
- // TODO: Implement kCallPCRelative. For the moment, we fall back to kMethodCode.
- FALLTHROUGH_INTENDED;
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
- // TODO: Implement kDirectCodeFixup. For the moment, we fall back to kMethodCode.
- FALLTHROUGH_INTENDED;
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
+ // TODO: Implement these types.
+ // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
+ LOG(FATAL) << "Unsupported";
+ UNREACHABLE();
case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
// T9 = callee_method->entry_point_from_quick_compiled_code_;
- __ LoadFromOffset(kLoadDoubleword,
+ __ LoadFromOffset(kLoadWord,
T9,
callee_method.AsRegister<Register>(),
ArtMethod::EntryPointFromQuickCompiledCodeOffset(
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index a571e76..059131d 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -332,6 +332,12 @@
return type == Primitive::kPrimLong;
}
+ // Check if the desired_dispatch_info is supported. If it is, return it,
+ // otherwise return a fall-back info that should be used instead.
+ HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ MethodReference target_method) OVERRIDE;
+
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp);
void GenerateVirtualCall(HInvokeVirtual* invoke ATTRIBUTE_UNUSED,
Location temp ATTRIBUTE_UNUSED) OVERRIDE {
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 5f78285..55efd5f 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -2528,6 +2528,37 @@
return false;
}
+HInvokeStaticOrDirect::DispatchInfo CodeGeneratorMIPS64::GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ MethodReference target_method ATTRIBUTE_UNUSED) {
+ switch (desired_dispatch_info.method_load_kind) {
+ case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
+ case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
+ // TODO: Implement these types. For the moment, we fall back to kDexCacheViaMethod.
+ return HInvokeStaticOrDirect::DispatchInfo {
+ HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
+ HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+ 0u,
+ 0u
+ };
+ default:
+ break;
+ }
+ switch (desired_dispatch_info.code_ptr_location) {
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
+ // TODO: Implement these types. For the moment, we fall back to kCallArtMethod.
+ return HInvokeStaticOrDirect::DispatchInfo {
+ desired_dispatch_info.method_load_kind,
+ HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+ desired_dispatch_info.method_load_data,
+ 0u
+ };
+ default:
+ return desired_dispatch_info;
+ }
+}
+
void CodeGeneratorMIPS64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
// All registers are assumed to be correctly set up per the calling convention.
@@ -2547,13 +2578,11 @@
__ LoadConst64(temp.AsRegister<GpuRegister>(), invoke->GetMethodAddress());
break;
case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
- // TODO: Implement this type. (Needs literal support.) At the moment, the
- // CompilerDriver will not direct the backend to use this type for MIPS.
- LOG(FATAL) << "Unsupported!";
- UNREACHABLE();
case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
- // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
- FALLTHROUGH_INTENDED;
+ // TODO: Implement these types.
+ // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
+ LOG(FATAL) << "Unsupported";
+ UNREACHABLE();
case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
GpuRegister reg = temp.AsRegister<GpuRegister>();
@@ -2593,12 +2622,12 @@
// LR()
__ Jalr(T9);
break;
- case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
- // TODO: Implement kCallPCRelative. For the moment, we fall back to kMethodCode.
- FALLTHROUGH_INTENDED;
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
- // TODO: Implement kDirectCodeFixup. For the moment, we fall back to kMethodCode.
- FALLTHROUGH_INTENDED;
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative:
+ // TODO: Implement these types.
+ // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
+ LOG(FATAL) << "Unsupported";
+ UNREACHABLE();
case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
// T9 = callee_method->entry_point_from_quick_compiled_code_;
__ LoadFromOffset(kLoadDoubleword,
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index df3fc0d..9bbd027 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -326,6 +326,12 @@
bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const { return false; }
+ // Check if the desired_dispatch_info is supported. If it is, return it,
+ // otherwise return a fall-back info that should be used instead.
+ HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ MethodReference target_method) OVERRIDE;
+
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
void GenerateVirtualCall(HInvokeVirtual* invoke ATTRIBUTE_UNUSED,
Location temp ATTRIBUTE_UNUSED) OVERRIDE {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 963eec2..0df7e3b 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -3757,6 +3757,34 @@
}
}
+HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86::GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ MethodReference target_method ATTRIBUTE_UNUSED) {
+ if (desired_dispatch_info.method_load_kind ==
+ HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative) {
+ // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
+ return HInvokeStaticOrDirect::DispatchInfo {
+ HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod,
+ HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+ 0u,
+ 0u
+ };
+ }
+ switch (desired_dispatch_info.code_ptr_location) {
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
+ // For direct code, we actually prefer to call via the code pointer from ArtMethod*.
+ // (Though the direct CALL ptr16:32 is available for consideration).
+ return HInvokeStaticOrDirect::DispatchInfo {
+ desired_dispatch_info.method_load_kind,
+ HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+ desired_dispatch_info.method_load_data,
+ 0u
+ };
+ default:
+ return desired_dispatch_info;
+ }
+}
void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
@@ -3777,8 +3805,10 @@
__ Bind(&method_patches_.back().label); // Bind the label at the end of the "movl" insn.
break;
case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative:
- // TODO: Implement this type. For the moment, we fall back to kDexCacheViaMethod.
- FALLTHROUGH_INTENDED;
+ // TODO: Implement this type.
+ // Currently filtered out by GetSupportedInvokeStaticOrDirectDispatch().
+ LOG(FATAL) << "Unsupported";
+ UNREACHABLE();
case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
Location current_method = invoke->GetLocations()->InAt(invoke->GetCurrentMethodInputIndex());
Register method_reg;
@@ -3814,9 +3844,9 @@
}
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
- // For direct code, we actually prefer to call via the code pointer from ArtMethod*.
- // (Though the direct CALL ptr16:32 is available for consideration).
- FALLTHROUGH_INTENDED;
+ // Filtered out by GetSupportedInvokeStaticOrDirectDispatch().
+ LOG(FATAL) << "Unsupported";
+ UNREACHABLE();
case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
// (callee_method + offset_of_quick_compiled_code)()
__ call(Address(callee_method.AsRegister<Register>(),
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index fdfc5ab..ac3d06c 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -333,6 +333,12 @@
// Helper method to move a 64bits value between two locations.
void Move64(Location destination, Location source);
+ // Check if the desired_dispatch_info is supported. If it is, return it,
+ // otherwise return a fall-back info that should be used instead.
+ HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ MethodReference target_method) OVERRIDE;
+
// Generate a call to a static or direct method.
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
// Generate a call to a virtual method.
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index ed2e4ca..5218d70 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -473,6 +473,24 @@
UNREACHABLE();
}
+HInvokeStaticOrDirect::DispatchInfo CodeGeneratorX86_64::GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ MethodReference target_method ATTRIBUTE_UNUSED) {
+ switch (desired_dispatch_info.code_ptr_location) {
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
+ case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
+ // For direct code, we actually prefer to call via the code pointer from ArtMethod*.
+ return HInvokeStaticOrDirect::DispatchInfo {
+ desired_dispatch_info.method_load_kind,
+ HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod,
+ desired_dispatch_info.method_load_data,
+ 0u
+ };
+ default:
+ return desired_dispatch_info;
+ }
+}
+
void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke,
Location temp) {
// All registers are assumed to be correctly set up.
@@ -539,8 +557,9 @@
}
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
- // For direct code, we actually prefer to call via the code pointer from ArtMethod*.
- FALLTHROUGH_INTENDED;
+ // Filtered out by GetSupportedInvokeStaticOrDirectDispatch().
+ LOG(FATAL) << "Unsupported";
+ UNREACHABLE();
case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
// (callee_method + offset_of_quick_compiled_code)()
__ call(Address(callee_method.AsRegister<CpuRegister>(),
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index dc86a48..fc485f5 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -335,6 +335,12 @@
return false;
}
+ // Check if the desired_dispatch_info is supported. If it is, return it,
+ // otherwise return a fall-back info that should be used instead.
+ HInvokeStaticOrDirect::DispatchInfo GetSupportedInvokeStaticOrDirectDispatch(
+ const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
+ MethodReference target_method) OVERRIDE;
+
void GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) OVERRIDE;
void GenerateVirtualCall(HInvokeVirtual* invoke, Location temp) OVERRIDE;
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index e2aca30..0aaa6b3 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -32,6 +32,7 @@
#include "optimizing_compiler.h"
#include "reference_type_propagation.h"
#include "register_allocator.h"
+#include "sharpening.h"
#include "ssa_phi_elimination.h"
#include "scoped_thread_state_change.h"
#include "thread.h"
@@ -396,12 +397,14 @@
HDeadCodeElimination dce(callee_graph, stats_);
HConstantFolding fold(callee_graph);
ReferenceTypePropagation type_propagation(callee_graph, handles_);
+ HSharpening sharpening(callee_graph, codegen_, dex_compilation_unit, compiler_driver_);
InstructionSimplifier simplify(callee_graph, stats_);
IntrinsicsRecognizer intrinsics(callee_graph, compiler_driver_);
HOptimization* optimizations[] = {
&intrinsics,
&type_propagation,
+ &sharpening,
&simplify,
&dce,
&fold,
@@ -415,6 +418,7 @@
size_t number_of_instructions_budget = kMaximumNumberOfHInstructions;
if (depth_ + 1 < compiler_driver_->GetCompilerOptions().GetInlineDepthLimit()) {
HInliner inliner(callee_graph,
+ codegen_,
outer_compilation_unit_,
dex_compilation_unit,
compiler_driver_,
@@ -484,7 +488,7 @@
return false;
}
- if (!same_dex_file && current->NeedsDexCache()) {
+ if (!same_dex_file && current->NeedsDexCacheOfDeclaringClass()) {
VLOG(compiler) << "Method " << PrettyMethod(method_index, callee_dex_file)
<< " could not be inlined because " << current->DebugName()
<< " it is in a different dex file and requires access to the dex cache";
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index bce5915..0f6a945 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -22,6 +22,7 @@
namespace art {
+class CodeGenerator;
class CompilerDriver;
class DexCompilationUnit;
class HGraph;
@@ -31,6 +32,7 @@
class HInliner : public HOptimization {
public:
HInliner(HGraph* outer_graph,
+ CodeGenerator* codegen,
const DexCompilationUnit& outer_compilation_unit,
const DexCompilationUnit& caller_compilation_unit,
CompilerDriver* compiler_driver,
@@ -40,6 +42,7 @@
: HOptimization(outer_graph, kInlinerPassName, stats),
outer_compilation_unit_(outer_compilation_unit),
caller_compilation_unit_(caller_compilation_unit),
+ codegen_(codegen),
compiler_driver_(compiler_driver),
depth_(depth),
number_of_inlined_instructions_(0),
@@ -57,6 +60,7 @@
const DexCompilationUnit& outer_compilation_unit_;
const DexCompilationUnit& caller_compilation_unit_;
+ CodeGenerator* const codegen_;
CompilerDriver* const compiler_driver_;
const size_t depth_;
size_t number_of_inlined_instructions_;
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 56c4177..0ab0b80 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -272,7 +272,9 @@
GenReverseBytes(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler());
}
-static void GenNumberOfLeadingZeroes(LocationSummary* locations, bool is64bit, Mips64Assembler* assembler) {
+static void GenNumberOfLeadingZeroes(LocationSummary* locations,
+ bool is64bit,
+ Mips64Assembler* assembler) {
GpuRegister in = locations->InAt(0).AsRegister<GpuRegister>();
GpuRegister out = locations->Out().AsRegister<GpuRegister>();
@@ -301,7 +303,9 @@
GenNumberOfLeadingZeroes(invoke->GetLocations(), true, GetAssembler());
}
-static void GenNumberOfTrailingZeroes(LocationSummary* locations, bool is64bit, Mips64Assembler* assembler) {
+static void GenNumberOfTrailingZeroes(LocationSummary* locations,
+ bool is64bit,
+ Mips64Assembler* assembler) {
Location in = locations->InAt(0);
Location out = locations->Out();
@@ -383,7 +387,7 @@
GenRotateRight(invoke, Primitive::kPrimInt, GetAssembler());
}
-// int java.lang.Long.rotateRight(long i, int distance)
+// long java.lang.Long.rotateRight(long i, int distance)
void IntrinsicLocationsBuilderMIPS64::VisitLongRotateRight(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
LocationSummary::kNoCall,
@@ -446,7 +450,7 @@
GenRotateLeft(invoke, Primitive::kPrimInt, GetAssembler());
}
-// int java.lang.Long.rotateLeft(long i, int distance)
+// long java.lang.Long.rotateLeft(long i, int distance)
void IntrinsicLocationsBuilderMIPS64::VisitLongRotateLeft(HInvoke* invoke) {
LocationSummary* locations = new (arena_) LocationSummary(invoke,
LocationSummary::kNoCall,
@@ -754,17 +758,19 @@
__ SqrtD(out, in);
}
-static void CreateFPToFP(ArenaAllocator* arena, HInvoke* invoke) {
+static void CreateFPToFP(ArenaAllocator* arena,
+ HInvoke* invoke,
+ Location::OutputOverlap overlaps = Location::kOutputOverlap) {
LocationSummary* locations = new (arena) LocationSummary(invoke,
LocationSummary::kNoCall,
kIntrinsified);
locations->SetInAt(0, Location::RequiresFpuRegister());
- locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
+ locations->SetOut(Location::RequiresFpuRegister(), overlaps);
}
// double java.lang.Math.rint(double)
void IntrinsicLocationsBuilderMIPS64::VisitMathRint(HInvoke* invoke) {
- CreateFPToFP(arena_, invoke);
+ CreateFPToFP(arena_, invoke, Location::kNoOutputOverlap);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathRint(HInvoke* invoke) {
@@ -788,15 +794,22 @@
kQuietNaN |
kSignalingNaN;
-void IntrinsicCodeGeneratorMIPS64::VisitMathFloor(HInvoke* invoke) {
- LocationSummary* locations = invoke->GetLocations();
- Mips64Assembler* assembler = GetAssembler();
+enum FloatRoundingMode {
+ kFloor,
+ kCeil,
+};
+
+static void GenRoundingMode(LocationSummary* locations,
+ FloatRoundingMode mode,
+ Mips64Assembler* assembler) {
FpuRegister in = locations->InAt(0).AsFpuRegister<FpuRegister>();
FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
+ DCHECK_NE(in, out);
+
Label done;
- // double floor(double in) {
+ // double floor/ceil(double in) {
// if in.isNaN || in.isInfinite || in.isZero {
// return in;
// }
@@ -806,19 +819,23 @@
__ MovD(out, in);
__ Bnezc(AT, &done);
- // Long outLong = floor(in);
+ // Long outLong = floor/ceil(in);
// if outLong == Long.MAX_VALUE {
- // // floor() has almost certainly returned a value which
- // // can't be successfully represented as a signed 64-bit
- // // number. Java expects that the input value will be
- // // returned in these cases.
- // // There is also a small probability that floor(in)
- // // correctly truncates the input value to Long.MAX_VALUE. In
- // // that case, this exception handling code still does the
- // // correct thing.
+ // // floor()/ceil() has almost certainly returned a value
+ // // which can't be successfully represented as a signed
+ // // 64-bit number. Java expects that the input value will
+ // // be returned in these cases.
+ // // There is also a small probability that floor(in)/ceil(in)
+ // // correctly truncates/rounds up the input value to
+ // // Long.MAX_VALUE. In that case, this exception handling
+ // // code still does the correct thing.
// return in;
// }
- __ FloorLD(out, in);
+ if (mode == kFloor) {
+ __ FloorLD(out, in);
+ } else if (mode == kCeil) {
+ __ CeilLD(out, in);
+ }
__ Dmfc1(AT, out);
__ MovD(out, in);
__ LoadConst64(TMP, kPrimLongMax);
@@ -832,53 +849,17 @@
// }
}
+void IntrinsicCodeGeneratorMIPS64::VisitMathFloor(HInvoke* invoke) {
+ GenRoundingMode(invoke->GetLocations(), kFloor, GetAssembler());
+}
+
// double java.lang.Math.ceil(double)
void IntrinsicLocationsBuilderMIPS64::VisitMathCeil(HInvoke* invoke) {
CreateFPToFP(arena_, invoke);
}
void IntrinsicCodeGeneratorMIPS64::VisitMathCeil(HInvoke* invoke) {
- LocationSummary* locations = invoke->GetLocations();
- Mips64Assembler* assembler = GetAssembler();
- FpuRegister in = locations->InAt(0).AsFpuRegister<FpuRegister>();
- FpuRegister out = locations->Out().AsFpuRegister<FpuRegister>();
-
- Label done;
-
- // double ceil(double in) {
- // if in.isNaN || in.isInfinite || in.isZero {
- // return in;
- // }
- __ ClassD(out, in);
- __ Dmfc1(AT, out);
- __ Andi(AT, AT, kFPLeaveUnchanged); // +0.0 | +Inf | -0.0 | -Inf | qNaN | sNaN
- __ MovD(out, in);
- __ Bnezc(AT, &done);
-
- // Long outLong = ceil(in);
- // if outLong == Long.MAX_VALUE {
- // // ceil() has almost certainly returned a value which
- // // can't be successfully represented as a signed 64-bit
- // // number. Java expects that the input value will be
- // // returned in these cases.
- // // There is also a small probability that ceil(in)
- // // correctly rounds up the input value to Long.MAX_VALUE. In
- // // that case, this exception handling code still does the
- // // correct thing.
- // return in;
- // }
- __ CeilLD(out, in);
- __ Dmfc1(AT, out);
- __ MovD(out, in);
- __ LoadConst64(TMP, kPrimLongMax);
- __ Beqc(AT, TMP, &done);
-
- // double out = outLong;
- // return out;
- __ Dmtc1(AT, out);
- __ Cvtdl(out, out);
- __ Bind(&done);
- // }
+ GenRoundingMode(invoke->GetLocations(), kCeil, GetAssembler());
}
// byte libcore.io.Memory.peekByte(long address)
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 68d02a4..90f28e5 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -695,12 +695,15 @@
} else {
redundant_store = true;
}
+ // TODO: eliminate the store if the singleton object is not finalizable.
+ redundant_store = false;
}
if (redundant_store) {
removed_instructions_.push_back(instruction);
substitute_instructions_.push_back(nullptr);
TryRemovingNullCheck(instruction);
}
+
heap_values[idx] = value;
// This store may kill values in other heap locations due to aliasing.
for (size_t i = 0; i < heap_values.size(); i++) {
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 98c3096..3480265 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -1911,8 +1911,8 @@
return !opt.GetDoesNotNeedEnvironment();
}
-bool HInvokeStaticOrDirect::NeedsDexCache() const {
- if (IsRecursive() || IsStringInit()) {
+bool HInvokeStaticOrDirect::NeedsDexCacheOfDeclaringClass() const {
+ if (GetMethodLoadKind() != MethodLoadKind::kDexCacheViaMethod) {
return false;
}
if (!IsIntrinsic()) {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 7aa933d..6028d4b 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1980,7 +1980,9 @@
return NeedsEnvironment() || IsLoadClass() || IsLoadString();
}
- virtual bool NeedsDexCache() const { return false; }
+ // Returns whether the code generation of the instruction will require to have access
+ // to the dex cache of the current method's declaring class via the current method.
+ virtual bool NeedsDexCacheOfDeclaringClass() const { return false; }
// Does this instruction have any use in an environment before
// control flow hits 'other'?
@@ -3368,15 +3370,15 @@
};
struct DispatchInfo {
- const MethodLoadKind method_load_kind;
- const CodePtrLocation code_ptr_location;
+ MethodLoadKind method_load_kind;
+ CodePtrLocation code_ptr_location;
// The method load data holds
// - thread entrypoint offset for kStringInit method if this is a string init invoke.
// Note that there are multiple string init methods, each having its own offset.
// - the method address for kDirectAddress
// - the dex cache arrays offset for kDexCachePcRel.
- const uint64_t method_load_data;
- const uint64_t direct_code_ptr;
+ uint64_t method_load_data;
+ uint64_t direct_code_ptr;
};
HInvokeStaticOrDirect(ArenaAllocator* arena,
@@ -3405,6 +3407,10 @@
target_method_(target_method),
dispatch_info_(dispatch_info) {}
+ void SetDispatchInfo(const DispatchInfo& dispatch_info) {
+ dispatch_info_ = dispatch_info;
+ }
+
bool CanDoImplicitNullCheckOn(HInstruction* obj ATTRIBUTE_UNUSED) const OVERRIDE {
// We access the method via the dex cache so we can't do an implicit null check.
// TODO: for intrinsics we can generate implicit null checks.
@@ -3419,11 +3425,13 @@
MethodLoadKind GetMethodLoadKind() const { return dispatch_info_.method_load_kind; }
CodePtrLocation GetCodePtrLocation() const { return dispatch_info_.code_ptr_location; }
bool IsRecursive() const { return GetMethodLoadKind() == MethodLoadKind::kRecursive; }
- bool NeedsDexCache() const OVERRIDE;
+ bool NeedsDexCacheOfDeclaringClass() const OVERRIDE;
bool IsStringInit() const { return GetMethodLoadKind() == MethodLoadKind::kStringInit; }
uint32_t GetCurrentMethodInputIndex() const { return GetNumberOfArguments(); }
bool HasMethodAddress() const { return GetMethodLoadKind() == MethodLoadKind::kDirectAddress; }
- bool HasPcRelDexCache() const { return GetMethodLoadKind() == MethodLoadKind::kDexCachePcRelative; }
+ bool HasPcRelDexCache() const {
+ return GetMethodLoadKind() == MethodLoadKind::kDexCachePcRelative;
+ }
bool HasDirectCodePtr() const { return GetCodePtrLocation() == CodePtrLocation::kCallDirect; }
MethodReference GetTargetMethod() const { return target_method_; }
@@ -4765,7 +4773,7 @@
const DexFile& GetDexFile() { return dex_file_; }
- bool NeedsDexCache() const OVERRIDE { return !is_referrers_class_; }
+ bool NeedsDexCacheOfDeclaringClass() const OVERRIDE { return !is_referrers_class_; }
static SideEffects SideEffectsForArchRuntimeCalls() {
return SideEffects::CanTriggerGC();
@@ -4807,7 +4815,7 @@
// TODO: Can we deopt or debug when we resolve a string?
bool NeedsEnvironment() const OVERRIDE { return false; }
- bool NeedsDexCache() const OVERRIDE { return true; }
+ bool NeedsDexCacheOfDeclaringClass() const OVERRIDE { return true; }
bool CanBeNull() const OVERRIDE { return false; }
static SideEffects SideEffectsForArchRuntimeCalls() {
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index d6f2543..6632f95 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -63,6 +63,7 @@
#include "prepare_for_register_allocation.h"
#include "reference_type_propagation.h"
#include "register_allocator.h"
+#include "sharpening.h"
#include "side_effects_analysis.h"
#include "ssa_builder.h"
#include "ssa_phi_elimination.h"
@@ -378,6 +379,7 @@
}
static void MaybeRunInliner(HGraph* graph,
+ CodeGenerator* codegen,
CompilerDriver* driver,
OptimizingCompilerStats* stats,
const DexCompilationUnit& dex_compilation_unit,
@@ -392,7 +394,7 @@
ArenaAllocator* arena = graph->GetArena();
HInliner* inliner = new (arena) HInliner(
- graph, dex_compilation_unit, dex_compilation_unit, driver, handles, stats);
+ graph, codegen, dex_compilation_unit, dex_compilation_unit, driver, handles, stats);
ReferenceTypePropagation* type_propagation =
new (arena) ReferenceTypePropagation(graph, handles,
"reference_type_propagation_after_inlining");
@@ -445,6 +447,7 @@
}
static void RunOptimizations(HGraph* graph,
+ CodeGenerator* codegen,
CompilerDriver* driver,
OptimizingCompilerStats* stats,
const DexCompilationUnit& dex_compilation_unit,
@@ -467,6 +470,7 @@
BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph, induction);
ReferenceTypePropagation* type_propagation =
new (arena) ReferenceTypePropagation(graph, handles);
+ HSharpening* sharpening = new (arena) HSharpening(graph, codegen, dex_compilation_unit, driver);
InstructionSimplifier* simplify2 = new (arena) InstructionSimplifier(
graph, stats, "instruction_simplifier_after_types");
InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier(
@@ -481,6 +485,7 @@
fold1,
simplify1,
type_propagation,
+ sharpening,
dce1,
simplify2
};
@@ -502,7 +507,7 @@
RunOptimizations(optimizations2, arraysize(optimizations2), pass_observer);
} else {
- MaybeRunInliner(graph, driver, stats, dex_compilation_unit, pass_observer, handles);
+ MaybeRunInliner(graph, codegen, driver, stats, dex_compilation_unit, pass_observer, handles);
HOptimization* optimizations2[] = {
// BooleanSimplifier depends on the InstructionSimplifier removing
@@ -577,8 +582,13 @@
ScopedObjectAccess soa(Thread::Current());
StackHandleScopeCollection handles(soa.Self());
soa.Self()->TransitionFromRunnableToSuspended(kNative);
- RunOptimizations(graph, compiler_driver, compilation_stats_.get(),
- dex_compilation_unit, pass_observer, &handles);
+ RunOptimizations(graph,
+ codegen,
+ compiler_driver,
+ compilation_stats_.get(),
+ dex_compilation_unit,
+ pass_observer,
+ &handles);
AllocateRegisters(graph, codegen, pass_observer);
@@ -607,7 +617,7 @@
codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(),
codegen->GetCoreSpillMask(),
codegen->GetFpuSpillMask(),
- &src_mapping_table,
+ ArrayRef<const SrcMapElem>(src_mapping_table),
ArrayRef<const uint8_t>(), // mapping_table.
ArrayRef<const uint8_t>(stack_map),
ArrayRef<const uint8_t>(), // native_gc_map.
@@ -652,7 +662,7 @@
codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(),
codegen->GetCoreSpillMask(),
codegen->GetFpuSpillMask(),
- &src_mapping_table,
+ ArrayRef<const SrcMapElem>(src_mapping_table),
AlignVectorSize(mapping_table),
AlignVectorSize(vmap_table),
AlignVectorSize(gc_map),
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
new file mode 100644
index 0000000..6494964
--- /dev/null
+++ b/compiler/optimizing/sharpening.cc
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "sharpening.h"
+
+#include "code_generator.h"
+#include "utils/dex_cache_arrays_layout-inl.h"
+#include "driver/compiler_driver.h"
+#include "nodes.h"
+
+namespace art {
+
+void HSharpening::Run() {
+ // We don't care about the order of the blocks here.
+ for (HBasicBlock* block : graph_->GetReversePostOrder()) {
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
+ HInstruction* instruction = it.Current();
+ if (instruction->IsInvokeStaticOrDirect()) {
+ ProcessInvokeStaticOrDirect(instruction->AsInvokeStaticOrDirect());
+ }
+ // TODO: Move the sharpening of invoke-virtual/-interface/-super from HGraphBuilder
+ // here. Rewrite it to avoid the CompilerDriver's reliance on verifier data
+ // because we know the type better when inlining.
+ // TODO: HLoadClass, HLoadString - select PC relative dex cache array access if
+ // available.
+ }
+ }
+}
+
+void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
+ if (invoke->IsStringInit()) {
+ // Not using the dex cache arrays. But we could still try to use a better dispatch...
+ // TODO: Use direct_method and direct_code for the appropriate StringFactory method.
+ return;
+ }
+
+ // TODO: Avoid CompilerDriver.
+ InvokeType invoke_type = invoke->GetOriginalInvokeType();
+ MethodReference target_method(&graph_->GetDexFile(), invoke->GetDexMethodIndex());
+ int vtable_idx;
+ uintptr_t direct_code, direct_method;
+ bool success = compiler_driver_->ComputeInvokeInfo(
+ &compilation_unit_,
+ invoke->GetDexPc(),
+ false /* update_stats: already updated in builder */,
+ true /* enable_devirtualization */,
+ &invoke_type,
+ &target_method,
+ &vtable_idx,
+ &direct_code,
+ &direct_method);
+ DCHECK(success);
+ DCHECK_EQ(invoke_type, invoke->GetInvokeType());
+ DCHECK_EQ(target_method.dex_file, invoke->GetTargetMethod().dex_file);
+ DCHECK_EQ(target_method.dex_method_index, invoke->GetTargetMethod().dex_method_index);
+
+ HInvokeStaticOrDirect::MethodLoadKind method_load_kind;
+ HInvokeStaticOrDirect::CodePtrLocation code_ptr_location;
+ uint64_t method_load_data = 0u;
+ uint64_t direct_code_ptr = 0u;
+
+ HGraph* outer_graph = codegen_->GetGraph();
+ if (target_method.dex_file == &outer_graph->GetDexFile() &&
+ target_method.dex_method_index == outer_graph->GetMethodIdx()) {
+ method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kRecursive;
+ code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallSelf;
+ } else {
+ if (direct_method != 0u) { // Should we use a direct pointer to the method?
+ if (direct_method != static_cast<uintptr_t>(-1)) { // Is the method pointer known now?
+ method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress;
+ method_load_data = direct_method;
+ } else { // The direct pointer will be known at link time.
+ method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup;
+ }
+ } else { // Use dex cache.
+ DCHECK_EQ(target_method.dex_file, &graph_->GetDexFile());
+ DexCacheArraysLayout layout =
+ compiler_driver_->GetDexCacheArraysLayout(target_method.dex_file);
+ if (layout.Valid()) { // Can we use PC-relative access to the dex cache arrays?
+ method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative;
+ method_load_data = layout.MethodOffset(target_method.dex_method_index);
+ } else { // We must go through the ArtMethod's pointer to resolved methods.
+ method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod;
+ }
+ }
+ if (direct_code != 0u) { // Should we use a direct pointer to the code?
+ if (direct_code != static_cast<uintptr_t>(-1)) { // Is the code pointer known now?
+ code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallDirect;
+ direct_code_ptr = direct_code;
+ } else if (compiler_driver_->IsImage() ||
+ target_method.dex_file == &graph_->GetDexFile()) {
+ // Use PC-relative calls for invokes within a multi-dex oat file.
+ // TODO: Recognize when the target dex file is within the current oat file for
+ // app compilation. At the moment we recognize only the boot image as multi-dex.
+ code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative;
+ } else { // The direct pointer will be known at link time.
+ // NOTE: This is used for app->boot calls when compiling an app against
+ // a relocatable but not yet relocated image.
+ code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup;
+ }
+ } else { // We must use the code pointer from the ArtMethod.
+ code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
+ }
+ }
+
+ if (graph_->IsDebuggable()) {
+ // For debuggable apps always use the code pointer from ArtMethod
+ // so that we don't circumvent instrumentation stubs if installed.
+ code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
+ }
+
+ HInvokeStaticOrDirect::DispatchInfo desired_dispatch_info = {
+ method_load_kind, code_ptr_location, method_load_data, direct_code_ptr
+ };
+ HInvokeStaticOrDirect::DispatchInfo dispatch_info =
+ codegen_->GetSupportedInvokeStaticOrDirectDispatch(desired_dispatch_info,
+ invoke->GetTargetMethod());
+ invoke->SetDispatchInfo(dispatch_info);
+}
+
+} // namespace art
diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h
new file mode 100644
index 0000000..adae700
--- /dev/null
+++ b/compiler/optimizing/sharpening.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_SHARPENING_H_
+#define ART_COMPILER_OPTIMIZING_SHARPENING_H_
+
+#include "optimization.h"
+
+namespace art {
+
+class CodeGenerator;
+class CompilerDriver;
+class DexCompilationUnit;
+class HInvokeStaticOrDirect;
+
+// Optimization that tries to improve the way we dispatch methods and access types,
+// fields, etc. Besides actual method sharpening based on receiver type (for example
+// virtual->direct), this includes selecting the best available dispatch for
+// invoke-static/-direct based on code generator support.
+class HSharpening : public HOptimization {
+ public:
+ HSharpening(HGraph* graph,
+ CodeGenerator* codegen,
+ const DexCompilationUnit& compilation_unit,
+ CompilerDriver* compiler_driver)
+ : HOptimization(graph, kSharpeningPassName),
+ codegen_(codegen),
+ compilation_unit_(compilation_unit),
+ compiler_driver_(compiler_driver) { }
+
+ void Run() OVERRIDE;
+
+ static constexpr const char* kSharpeningPassName = "sharpening";
+
+ private:
+ void ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke);
+
+ CodeGenerator* codegen_;
+ const DexCompilationUnit& compilation_unit_;
+ CompilerDriver* compiler_driver_;
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_OPTIMIZING_SHARPENING_H_
diff --git a/compiler/utils/dedupe_set-inl.h b/compiler/utils/dedupe_set-inl.h
new file mode 100644
index 0000000..ac54813
--- /dev/null
+++ b/compiler/utils/dedupe_set-inl.h
@@ -0,0 +1,253 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_DEDUPE_SET_INL_H_
+#define ART_COMPILER_UTILS_DEDUPE_SET_INL_H_
+
+#include "dedupe_set.h"
+
+#include <algorithm>
+#include <inttypes.h>
+#include <unordered_map>
+
+#include "base/mutex.h"
+#include "base/hash_set.h"
+#include "base/stl_util.h"
+#include "base/stringprintf.h"
+#include "base/time_utils.h"
+
+namespace art {
+
+template <typename InKey,
+ typename StoreKey,
+ typename Alloc,
+ typename HashType,
+ typename HashFunc,
+ HashType kShard>
+struct DedupeSet<InKey, StoreKey, Alloc, HashType, HashFunc, kShard>::Stats {
+ size_t collision_sum = 0u;
+ size_t collision_max = 0u;
+ size_t total_probe_distance = 0u;
+ size_t total_size = 0u;
+};
+
+template <typename InKey,
+ typename StoreKey,
+ typename Alloc,
+ typename HashType,
+ typename HashFunc,
+ HashType kShard>
+class DedupeSet<InKey, StoreKey, Alloc, HashType, HashFunc, kShard>::Shard {
+ public:
+ Shard(const Alloc& alloc, const std::string& lock_name)
+ : alloc_(alloc),
+ lock_name_(lock_name),
+ lock_(lock_name_.c_str()),
+ keys_() {
+ }
+
+ ~Shard() {
+ for (const HashedKey<StoreKey>& key : keys_) {
+ DCHECK(key.Key() != nullptr);
+ alloc_.Destroy(key.Key());
+ }
+ }
+
+ const StoreKey* Add(Thread* self, size_t hash, const InKey& in_key) REQUIRES(!lock_) {
+ MutexLock lock(self, lock_);
+ HashedKey<InKey> hashed_in_key(hash, &in_key);
+ auto it = keys_.Find(hashed_in_key);
+ if (it != keys_.end()) {
+ DCHECK(it->Key() != nullptr);
+ return it->Key();
+ }
+ const StoreKey* store_key = alloc_.Copy(in_key);
+ keys_.Insert(HashedKey<StoreKey> { hash, store_key });
+ return store_key;
+ }
+
+ void UpdateStats(Thread* self, Stats* global_stats) REQUIRES(!lock_) {
+ // HashSet<> doesn't keep entries ordered by hash, so we actually allocate memory
+ // for bookkeeping while collecting the stats.
+ std::unordered_map<HashType, size_t> stats;
+ {
+ MutexLock lock(self, lock_);
+ // Note: The total_probe_distance will be updated with the current state.
+ // It may have been higher before a re-hash.
+ global_stats->total_probe_distance += keys_.TotalProbeDistance();
+ global_stats->total_size += keys_.Size();
+ for (const HashedKey<StoreKey>& key : keys_) {
+ auto it = stats.find(key.Hash());
+ if (it == stats.end()) {
+ stats.insert({key.Hash(), 1u});
+ } else {
+ ++it->second;
+ }
+ }
+ }
+ for (const auto& entry : stats) {
+ size_t number_of_entries = entry.second;
+ if (number_of_entries > 1u) {
+ global_stats->collision_sum += number_of_entries - 1u;
+ global_stats->collision_max = std::max(global_stats->collision_max, number_of_entries);
+ }
+ }
+ }
+
+ private:
+ template <typename T>
+ class HashedKey {
+ public:
+ HashedKey() : hash_(0u), key_(nullptr) { }
+ HashedKey(size_t hash, const T* key) : hash_(hash), key_(key) { }
+
+ size_t Hash() const {
+ return hash_;
+ }
+
+ const T* Key() const {
+ return key_;
+ }
+
+ bool IsEmpty() const {
+ return Key() == nullptr;
+ }
+
+ void MakeEmpty() {
+ key_ = nullptr;
+ }
+
+ private:
+ size_t hash_;
+ const T* key_;
+ };
+
+ class ShardEmptyFn {
+ public:
+ bool IsEmpty(const HashedKey<StoreKey>& key) const {
+ return key.IsEmpty();
+ }
+
+ void MakeEmpty(HashedKey<StoreKey>& key) {
+ key.MakeEmpty();
+ }
+ };
+
+ struct ShardHashFn {
+ template <typename T>
+ size_t operator()(const HashedKey<T>& key) const {
+ return key.Hash();
+ }
+ };
+
+ struct ShardPred {
+ typename std::enable_if<!std::is_same<StoreKey, InKey>::value, bool>::type
+ operator()(const HashedKey<StoreKey>& lhs, const HashedKey<StoreKey>& rhs) const {
+ DCHECK(lhs.Key() != nullptr);
+ DCHECK(rhs.Key() != nullptr);
+ // Rehashing: stored keys are already deduplicated, so we can simply compare key pointers.
+ return lhs.Key() == rhs.Key();
+ }
+
+ template <typename LeftT, typename RightT>
+ bool operator()(const HashedKey<LeftT>& lhs, const HashedKey<RightT>& rhs) const {
+ DCHECK(lhs.Key() != nullptr);
+ DCHECK(rhs.Key() != nullptr);
+ return lhs.Hash() == rhs.Hash() &&
+ lhs.Key()->size() == rhs.Key()->size() &&
+ std::equal(lhs.Key()->begin(), lhs.Key()->end(), rhs.Key()->begin());
+ }
+ };
+
+ Alloc alloc_;
+ const std::string lock_name_;
+ Mutex lock_;
+ HashSet<HashedKey<StoreKey>, ShardEmptyFn, ShardHashFn, ShardPred> keys_ GUARDED_BY(lock_);
+};
+
+template <typename InKey,
+ typename StoreKey,
+ typename Alloc,
+ typename HashType,
+ typename HashFunc,
+ HashType kShard>
+const StoreKey* DedupeSet<InKey, StoreKey, Alloc, HashType, HashFunc, kShard>::Add(
+ Thread* self, const InKey& key) {
+ uint64_t hash_start;
+ if (kIsDebugBuild) {
+ hash_start = NanoTime();
+ }
+ HashType raw_hash = HashFunc()(key);
+ if (kIsDebugBuild) {
+ uint64_t hash_end = NanoTime();
+ hash_time_ += hash_end - hash_start;
+ }
+ HashType shard_hash = raw_hash / kShard;
+ HashType shard_bin = raw_hash % kShard;
+ return shards_[shard_bin]->Add(self, shard_hash, key);
+}
+
+template <typename InKey,
+ typename StoreKey,
+ typename Alloc,
+ typename HashType,
+ typename HashFunc,
+ HashType kShard>
+DedupeSet<InKey, StoreKey, Alloc, HashType, HashFunc, kShard>::DedupeSet(const char* set_name,
+ const Alloc& alloc)
+ : hash_time_(0) {
+ for (HashType i = 0; i < kShard; ++i) {
+ std::ostringstream oss;
+ oss << set_name << " lock " << i;
+ shards_[i].reset(new Shard(alloc, oss.str()));
+ }
+}
+
+template <typename InKey,
+ typename StoreKey,
+ typename Alloc,
+ typename HashType,
+ typename HashFunc,
+ HashType kShard>
+DedupeSet<InKey, StoreKey, Alloc, HashType, HashFunc, kShard>::~DedupeSet() {
+ // Everything done by member destructors.
+}
+
+template <typename InKey,
+ typename StoreKey,
+ typename Alloc,
+ typename HashType,
+ typename HashFunc,
+ HashType kShard>
+std::string DedupeSet<InKey, StoreKey, Alloc, HashType, HashFunc, kShard>::DumpStats(
+ Thread* self) const {
+ Stats stats;
+ for (HashType shard = 0; shard < kShard; ++shard) {
+ shards_[shard]->UpdateStats(self, &stats);
+ }
+ return StringPrintf("%zu collisions, %zu max hash collisions, "
+ "%zu/%zu probe distance, %" PRIu64 " ns hash time",
+ stats.collision_sum,
+ stats.collision_max,
+ stats.total_probe_distance,
+ stats.total_size,
+ hash_time_);
+}
+
+
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_DEDUPE_SET_INL_H_
diff --git a/compiler/utils/dedupe_set.h b/compiler/utils/dedupe_set.h
index 2c4a689..b62f216 100644
--- a/compiler/utils/dedupe_set.h
+++ b/compiler/utils/dedupe_set.h
@@ -17,151 +17,41 @@
#ifndef ART_COMPILER_UTILS_DEDUPE_SET_H_
#define ART_COMPILER_UTILS_DEDUPE_SET_H_
-#include <algorithm>
-#include <inttypes.h>
#include <memory>
-#include <set>
+#include <stdint.h>
#include <string>
-#include "base/mutex.h"
-#include "base/stl_util.h"
-#include "base/stringprintf.h"
-#include "base/time_utils.h"
-#include "utils/swap_space.h"
+#include "base/macros.h"
namespace art {
+class Thread;
+
// A set of Keys that support a HashFunc returning HashType. Used to find duplicates of Key in the
// Add method. The data-structure is thread-safe through the use of internal locks, it also
// supports the lock being sharded.
-template <typename InKey, typename StoreKey, typename HashType, typename HashFunc,
+template <typename InKey,
+ typename StoreKey,
+ typename Alloc,
+ typename HashType,
+ typename HashFunc,
HashType kShard = 1>
class DedupeSet {
- typedef std::pair<HashType, const InKey*> HashedInKey;
- struct HashedKey {
- StoreKey* store_ptr;
- union {
- HashType store_hash; // Valid if store_ptr != null.
- const HashedInKey* in_key; // Valid if store_ptr == null.
- };
- };
-
- class Comparator {
- public:
- bool operator()(const HashedKey& a, const HashedKey& b) const {
- HashType a_hash = (a.store_ptr != nullptr) ? a.store_hash : a.in_key->first;
- HashType b_hash = (b.store_ptr != nullptr) ? b.store_hash : b.in_key->first;
- if (a_hash != b_hash) {
- return a_hash < b_hash;
- }
- if (a.store_ptr != nullptr && b.store_ptr != nullptr) {
- return std::lexicographical_compare(a.store_ptr->begin(), a.store_ptr->end(),
- b.store_ptr->begin(), b.store_ptr->end());
- } else if (a.store_ptr != nullptr && b.store_ptr == nullptr) {
- return std::lexicographical_compare(a.store_ptr->begin(), a.store_ptr->end(),
- b.in_key->second->begin(), b.in_key->second->end());
- } else if (a.store_ptr == nullptr && b.store_ptr != nullptr) {
- return std::lexicographical_compare(a.in_key->second->begin(), a.in_key->second->end(),
- b.store_ptr->begin(), b.store_ptr->end());
- } else {
- return std::lexicographical_compare(a.in_key->second->begin(), a.in_key->second->end(),
- b.in_key->second->begin(), b.in_key->second->end());
- }
- }
- };
-
public:
- StoreKey* Add(Thread* self, const InKey& key) {
- uint64_t hash_start;
- if (kIsDebugBuild) {
- hash_start = NanoTime();
- }
- HashType raw_hash = HashFunc()(key);
- if (kIsDebugBuild) {
- uint64_t hash_end = NanoTime();
- hash_time_ += hash_end - hash_start;
- }
- HashType shard_hash = raw_hash / kShard;
- HashType shard_bin = raw_hash % kShard;
- HashedInKey hashed_in_key(shard_hash, &key);
- HashedKey hashed_key;
- hashed_key.store_ptr = nullptr;
- hashed_key.in_key = &hashed_in_key;
- MutexLock lock(self, *lock_[shard_bin]);
- auto it = keys_[shard_bin].find(hashed_key);
- if (it != keys_[shard_bin].end()) {
- DCHECK(it->store_ptr != nullptr);
- return it->store_ptr;
- }
- hashed_key.store_ptr = CreateStoreKey(key);
- hashed_key.store_hash = shard_hash;
- keys_[shard_bin].insert(hashed_key);
- return hashed_key.store_ptr;
- }
+ // Add a new key to the dedupe set if not present. Return the equivalent deduplicated stored key.
+ const StoreKey* Add(Thread* self, const InKey& key);
- DedupeSet(const char* set_name, SwapAllocator<void>& alloc)
- : allocator_(alloc), hash_time_(0) {
- for (HashType i = 0; i < kShard; ++i) {
- std::ostringstream oss;
- oss << set_name << " lock " << i;
- lock_name_[i] = oss.str();
- lock_[i].reset(new Mutex(lock_name_[i].c_str()));
- }
- }
+ DedupeSet(const char* set_name, const Alloc& alloc);
- ~DedupeSet() {
- // Have to manually free all pointers.
- for (auto& shard : keys_) {
- for (const auto& hashed_key : shard) {
- DCHECK(hashed_key.store_ptr != nullptr);
- DeleteStoreKey(hashed_key.store_ptr);
- }
- }
- }
+ ~DedupeSet();
- std::string DumpStats() const {
- size_t collision_sum = 0;
- size_t collision_max = 0;
- for (HashType shard = 0; shard < kShard; ++shard) {
- HashType last_hash = 0;
- size_t collision_cur_max = 0;
- for (const HashedKey& key : keys_[shard]) {
- DCHECK(key.store_ptr != nullptr);
- if (key.store_hash == last_hash) {
- collision_cur_max++;
- if (collision_cur_max > 1) {
- collision_sum++;
- if (collision_cur_max > collision_max) {
- collision_max = collision_cur_max;
- }
- }
- } else {
- collision_cur_max = 1;
- last_hash = key.store_hash;
- }
- }
- }
- return StringPrintf("%zu collisions, %zu max bucket size, %" PRIu64 " ns hash time",
- collision_sum, collision_max, hash_time_);
- }
+ std::string DumpStats(Thread* self) const;
private:
- StoreKey* CreateStoreKey(const InKey& key) {
- StoreKey* ret = allocator_.allocate(1);
- allocator_.construct(ret, key.begin(), key.end(), allocator_);
- return ret;
- }
+ struct Stats;
+ class Shard;
- void DeleteStoreKey(StoreKey* key) {
- SwapAllocator<StoreKey> alloc(allocator_);
- alloc.destroy(key);
- alloc.deallocate(key, 1);
- }
-
- std::string lock_name_[kShard];
- std::unique_ptr<Mutex> lock_[kShard];
- std::set<HashedKey, Comparator> keys_[kShard];
- SwapAllocator<StoreKey> allocator_;
+ std::unique_ptr<Shard> shards_[kShard];
uint64_t hash_time_;
DISALLOW_COPY_AND_ASSIGN(DedupeSet);
diff --git a/compiler/utils/dedupe_set_test.cc b/compiler/utils/dedupe_set_test.cc
index 637964e..60a891d 100644
--- a/compiler/utils/dedupe_set_test.cc
+++ b/compiler/utils/dedupe_set_test.cc
@@ -18,15 +18,18 @@
#include <algorithm>
#include <cstdio>
+#include <vector>
+#include "dedupe_set-inl.h"
#include "gtest/gtest.h"
#include "thread-inl.h"
+#include "utils/array_ref.h"
namespace art {
-class DedupeHashFunc {
+class DedupeSetTestHashFunc {
public:
- size_t operator()(const std::vector<uint8_t>& array) const {
+ size_t operator()(const ArrayRef<const uint8_t>& array) const {
size_t hash = 0;
for (uint8_t c : array) {
hash += c;
@@ -36,46 +39,52 @@
return hash;
}
};
+
+class DedupeSetTestAlloc {
+ public:
+ const std::vector<uint8_t>* Copy(const ArrayRef<const uint8_t>& src) {
+ return new std::vector<uint8_t>(src.begin(), src.end());
+ }
+
+ void Destroy(const std::vector<uint8_t>* key) {
+ delete key;
+ }
+};
+
TEST(DedupeSetTest, Test) {
Thread* self = Thread::Current();
- typedef std::vector<uint8_t> ByteArray;
- SwapAllocator<void> swap(nullptr);
- DedupeSet<ByteArray, SwapVector<uint8_t>, size_t, DedupeHashFunc> deduplicator("test", swap);
- SwapVector<uint8_t>* array1;
+ DedupeSetTestAlloc alloc;
+ DedupeSet<ArrayRef<const uint8_t>,
+ std::vector<uint8_t>,
+ DedupeSetTestAlloc,
+ size_t,
+ DedupeSetTestHashFunc> deduplicator("test", alloc);
+ const std::vector<uint8_t>* array1;
{
- ByteArray test1;
- test1.push_back(10);
- test1.push_back(20);
- test1.push_back(30);
- test1.push_back(45);
-
+ uint8_t raw_test1[] = { 10u, 20u, 30u, 45u };
+ ArrayRef<const uint8_t> test1(raw_test1);
array1 = deduplicator.Add(self, test1);
ASSERT_NE(array1, nullptr);
ASSERT_TRUE(std::equal(test1.begin(), test1.end(), array1->begin()));
}
- SwapVector<uint8_t>* array2;
+ const std::vector<uint8_t>* array2;
{
- ByteArray test1;
- test1.push_back(10);
- test1.push_back(20);
- test1.push_back(30);
- test1.push_back(45);
- array2 = deduplicator.Add(self, test1);
+ uint8_t raw_test2[] = { 10u, 20u, 30u, 45u };
+ ArrayRef<const uint8_t> test2(raw_test2);
+ array2 = deduplicator.Add(self, test2);
ASSERT_EQ(array2, array1);
- ASSERT_TRUE(std::equal(test1.begin(), test1.end(), array2->begin()));
+ ASSERT_TRUE(std::equal(test2.begin(), test2.end(), array2->begin()));
}
- SwapVector<uint8_t>* array3;
+ const std::vector<uint8_t>* array3;
{
- ByteArray test1;
- test1.push_back(10);
- test1.push_back(22);
- test1.push_back(30);
- test1.push_back(47);
- array3 = deduplicator.Add(self, test1);
+ uint8_t raw_test3[] = { 10u, 22u, 30u, 47u };
+ ArrayRef<const uint8_t> test3(raw_test3);
+ array3 = deduplicator.Add(self, test3);
ASSERT_NE(array3, nullptr);
- ASSERT_TRUE(std::equal(test1.begin(), test1.end(), array3->begin()));
+ ASSERT_NE(array3, array1);
+ ASSERT_TRUE(std::equal(test3.begin(), test3.end(), array3->begin()));
}
}
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index 282db5d..52e6c02 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -775,7 +775,7 @@
// case Instruction::k35ms: // [opt] invoke-virtual+super
// case Instruction::k35mi: // [opt] inline invoke
{
- u4 arg[5];
+ u4 arg[Instruction::kMaxVarArgRegs];
pDecInsn->GetVarArgs(arg);
fputs(" {", gOutFile);
for (int i = 0, n = pDecInsn->VRegA(); i < n; i++) {
@@ -788,6 +788,21 @@
fprintf(gOutFile, "}, %s", indexBuf);
}
break;
+ case Instruction::k25x: // op vC, {vD, vE, vF, vG} (B: count)
+ {
+ u4 arg[Instruction::kMaxVarArgRegs25x];
+ pDecInsn->GetAllArgs25x(arg);
+ fprintf(gOutFile, " v%d, {", arg[0]);
+ for (int i = 0, n = pDecInsn->VRegB(); i < n; i++) {
+ if (i == 0) {
+ fprintf(gOutFile, "v%d", arg[Instruction::kLambdaVirtualRegisterWidth + i]);
+ } else {
+ fprintf(gOutFile, ", v%d", arg[Instruction::kLambdaVirtualRegisterWidth + i]);
+ }
+ } // for
+ fputc('}', gOutFile);
+ }
+ break;
case Instruction::k3rc: // op {vCCCC .. v(CCCC+AA-1)}, thing@BBBB
// NOT SUPPORTED:
// case Instruction::k3rms: // [opt] invoke-virtual+super/range
diff --git a/runtime/arch/mips/context_mips.cc b/runtime/arch/mips/context_mips.cc
index 4dedb33..375a03a 100644
--- a/runtime/arch/mips/context_mips.cc
+++ b/runtime/arch/mips/context_mips.cc
@@ -28,11 +28,11 @@
std::fill_n(gprs_, arraysize(gprs_), nullptr);
std::fill_n(fprs_, arraysize(fprs_), nullptr);
gprs_[SP] = &sp_;
- gprs_[RA] = &ra_;
+ gprs_[T9] = &t9_;
gprs_[A0] = &arg0_;
// Initialize registers with easy to spot debug values.
sp_ = MipsContext::kBadGprBase + SP;
- ra_ = MipsContext::kBadGprBase + RA;
+ t9_ = MipsContext::kBadGprBase + T9;
arg0_ = 0;
}
diff --git a/runtime/arch/mips/context_mips.h b/runtime/arch/mips/context_mips.h
index f1e2905..7dcff63 100644
--- a/runtime/arch/mips/context_mips.h
+++ b/runtime/arch/mips/context_mips.h
@@ -41,7 +41,7 @@
}
void SetPC(uintptr_t new_pc) OVERRIDE {
- SetGPR(RA, new_pc);
+ SetGPR(T9, new_pc);
}
bool IsAccessibleGPR(uint32_t reg) OVERRIDE {
@@ -86,9 +86,10 @@
// Pointers to registers in the stack, initialized to null except for the special cases below.
uintptr_t* gprs_[kNumberOfCoreRegisters];
uint32_t* fprs_[kNumberOfFRegisters];
- // Hold values for sp and ra (return address) if they are not located within a stack frame, as
- // well as the first argument.
- uintptr_t sp_, ra_, arg0_;
+ // Hold values for sp and t9 if they are not located within a stack frame. We use t9 for the
+ // PC (as ra is required to be valid for single-frame deopt and must not be clobbered). We
+ // also need the first argument for single-frame deopt.
+ uintptr_t sp_, t9_, arg0_;
};
} // namespace mips
} // namespace art
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index ba58c3f..0691f2a 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -374,7 +374,7 @@
lw $ra, 124($a0)
lw $a0, 16($a0)
move $v0, $zero # clear result registers r0 and r1
- jalr $zero, $ra # do long jump
+ jalr $zero, $t9 # do long jump
move $v1, $zero
END art_quick_do_long_jump
diff --git a/runtime/art_method.cc b/runtime/art_method.cc
index f5befdf..a10d7af 100644
--- a/runtime/art_method.cc
+++ b/runtime/art_method.cc
@@ -390,21 +390,70 @@
}
Runtime* runtime = Runtime::Current();
- const void* code = runtime->GetInstrumentation()->GetQuickCodeFor(this, sizeof(void*));
- DCHECK(code != nullptr);
+ const void* existing_entry_point = GetEntryPointFromQuickCompiledCode();
+ DCHECK(existing_entry_point != nullptr);
+ ClassLinker* class_linker = runtime->GetClassLinker();
- if (runtime->GetClassLinker()->IsQuickGenericJniStub(code)) {
+ if (class_linker->IsQuickGenericJniStub(existing_entry_point)) {
// The generic JNI does not have any method header.
return nullptr;
}
- code = EntryPointToCodePointer(code);
- OatQuickMethodHeader* method_header = reinterpret_cast<OatQuickMethodHeader*>(
- reinterpret_cast<uintptr_t>(code) - sizeof(OatQuickMethodHeader));
+ // Check whether the current entry point contains this pc.
+ if (!class_linker->IsQuickResolutionStub(existing_entry_point) &&
+ !class_linker->IsQuickToInterpreterBridge(existing_entry_point)) {
+ OatQuickMethodHeader* method_header =
+ OatQuickMethodHeader::FromEntryPoint(existing_entry_point);
- // TODO(ngeoffray): validate the pc. Note that unit tests can give unrelated pcs (for
- // example arch_test).
- UNUSED(pc);
+ if (method_header->Contains(pc)) {
+ return method_header;
+ }
+ }
+
+ // Check whether the pc is in the JIT code cache.
+ jit::Jit* jit = Runtime::Current()->GetJit();
+ if (jit != nullptr) {
+ jit::JitCodeCache* code_cache = jit->GetCodeCache();
+ OatQuickMethodHeader* method_header = code_cache->LookupMethodHeader(pc, this);
+ if (method_header != nullptr) {
+ DCHECK(method_header->Contains(pc));
+ return method_header;
+ } else {
+ DCHECK(!code_cache->ContainsPc(reinterpret_cast<const void*>(pc))) << std::hex << pc;
+ }
+ }
+
+ // The code has to be in an oat file.
+ bool found;
+ OatFile::OatMethod oat_method = class_linker->FindOatMethodFor(this, &found);
+ if (!found) {
+ // Only for unit tests.
+ // TODO(ngeoffray): Update these tests to pass the right pc?
+ return OatQuickMethodHeader::FromEntryPoint(existing_entry_point);
+ }
+ const void* oat_entry_point = oat_method.GetQuickCode();
+ if (oat_entry_point == nullptr || class_linker->IsQuickGenericJniStub(oat_entry_point)) {
+ DCHECK(IsNative());
+ return nullptr;
+ }
+
+ OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromEntryPoint(oat_entry_point);
+ if (pc == 0) {
+ // This is a downcall, it can only happen for a native method.
+ DCHECK(IsNative());
+ return method_header;
+ }
+
+ if (pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc())) {
+ // If we're instrumenting, just return the compiled OAT code.
+ // TODO(ngeoffray): Avoid this call path.
+ return method_header;
+ }
+
+ DCHECK(method_header->Contains(pc))
+ << PrettyMethod(this)
+ << std::hex << pc << " " << oat_entry_point
+ << " " << (uintptr_t)(method_header->code_ + method_header->code_size_);
return method_header;
}
diff --git a/runtime/art_method.h b/runtime/art_method.h
index 9f1495c..bb9804e 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -433,6 +433,10 @@
return ++hotness_count_;
}
+ void ClearCounter() {
+ hotness_count_ = 0;
+ }
+
const uint8_t* GetQuickenedInfo() SHARED_REQUIRES(Locks::mutator_lock_);
// Returns the method header for the compiled code containing 'pc'. Note that runtime
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index 1704688..71afa0f 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -94,6 +94,8 @@
"CodeGen ",
"ParallelMove ",
"GraphChecker ",
+ "LSE ",
+ "Verifier ",
};
template <bool kCount>
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 7747bdf..ace6c38 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -107,6 +107,7 @@
kArenaAllocParallelMoveResolver,
kArenaAllocGraphChecker,
kArenaAllocLSE,
+ kArenaAllocVerifier,
kNumArenaAllocKinds
};
diff --git a/runtime/base/dchecked_vector.h b/runtime/base/dchecked_vector.h
index 6ec573a..2bd12df 100644
--- a/runtime/base/dchecked_vector.h
+++ b/runtime/base/dchecked_vector.h
@@ -59,8 +59,10 @@
: Base() { }
explicit dchecked_vector(const allocator_type& alloc)
: Base(alloc) { }
+ // Note that we cannot forward to std::vector(size_type, const allocator_type&) because it is not
+ // available in C++11, which is the latest GCC can support. http://b/25022512
explicit dchecked_vector(size_type n, const allocator_type& alloc = allocator_type())
- : Base(n, alloc) { }
+ : Base(alloc) { resize(n); }
dchecked_vector(size_type n,
const value_type& value,
const allocator_type& alloc = allocator_type())
diff --git a/runtime/base/hash_set.h b/runtime/base/hash_set.h
index 4819f06..95baa82 100644
--- a/runtime/base/hash_set.h
+++ b/runtime/base/hash_set.h
@@ -420,6 +420,19 @@
Resize(Size() / max_load_factor_);
}
+ // Reserve enough room to insert until Size() == num_elements without requiring to grow the hash
+ // set. No-op if the hash set is already large enough to do this.
+ void Reserve(size_t num_elements) {
+ size_t num_buckets = num_elements / max_load_factor_;
+ // Deal with rounding errors. Add one for rounding.
+ while (static_cast<size_t>(num_buckets * max_load_factor_) <= num_elements + 1u) {
+ ++num_buckets;
+ }
+ if (num_buckets > NumBuckets()) {
+ Resize(num_buckets);
+ }
+ }
+
// To distance that inserted elements were probed. Used for measuring how good hash functions
// are.
size_t TotalProbeDistance() const {
@@ -488,6 +501,15 @@
}
}
+ // The hash set expands when Size() reaches ElementsUntilExpand().
+ size_t ElementsUntilExpand() const {
+ return elements_until_expand_;
+ }
+
+ size_t NumBuckets() const {
+ return num_buckets_;
+ }
+
private:
T& ElementForIndex(size_t index) {
DCHECK_LT(index, NumBuckets());
@@ -543,10 +565,6 @@
return emptyfn_.IsEmpty(ElementForIndex(index));
}
- size_t NumBuckets() const {
- return num_buckets_;
- }
-
// Allocate a number of buckets.
void AllocateStorage(size_t num_buckets) {
num_buckets_ = num_buckets;
diff --git a/runtime/base/hash_set_test.cc b/runtime/base/hash_set_test.cc
index 743e98e..8254063 100644
--- a/runtime/base/hash_set_test.cc
+++ b/runtime/base/hash_set_test.cc
@@ -333,4 +333,25 @@
ASSERT_NE(hash_set.end(), hash_set.Find(std::forward_list<int>({1, 2, 3, 4})));
}
+TEST_F(HashSetTest, TestReserve) {
+ HashSet<std::string, IsEmptyFnString> hash_set;
+ std::vector<size_t> sizes = {1, 10, 25, 55, 128, 1024, 4096};
+ for (size_t size : sizes) {
+ hash_set.Reserve(size);
+ const size_t buckets_before = hash_set.NumBuckets();
+ // Check that we expanded enough.
+ CHECK_GE(hash_set.ElementsUntilExpand(), size);
+ // Try inserting elements until we are at our reserve size and ensure the hash set did not
+ // expand.
+ while (hash_set.Size() < size) {
+ hash_set.Insert(std::to_string(hash_set.Size()));
+ }
+ CHECK_EQ(hash_set.NumBuckets(), buckets_before);
+ }
+ // Check the behaviour for shrinking, it does not necessarily resize down.
+ constexpr size_t size = 100;
+ hash_set.Reserve(size);
+ CHECK_GE(hash_set.ElementsUntilExpand(), size);
+}
+
} // namespace art
diff --git a/runtime/base/scoped_arena_containers.h b/runtime/base/scoped_arena_containers.h
index b74aef1..1236585 100644
--- a/runtime/base/scoped_arena_containers.h
+++ b/runtime/base/scoped_arena_containers.h
@@ -20,6 +20,7 @@
#include <deque>
#include <queue>
#include <set>
+#include <type_traits>
#include <unordered_map>
#include <utility>
@@ -218,12 +219,19 @@
}
};
-// Declare but do not define a partial specialization for T[].
-// This is to prevent accidental use of this unsupported use case.
+// In general we lack support for arrays. We would need to call the destructor on each element,
+// which requires access to the array size. Support for that is future work.
+//
+// However, we can support trivially destructible component types, as then a destructor doesn't
+// need to be called.
template <typename T>
class ArenaDelete<T[]> {
public:
- void operator()(T* ptr) const = delete;
+ void operator()(T* ptr ATTRIBUTE_UNUSED) const {
+ static_assert(std::is_trivially_destructible<T>::value,
+ "ArenaUniquePtr does not support non-trivially-destructible arrays.");
+ // TODO: Implement debug checks, and MEMORY_TOOL support.
+ }
};
// Arena unique ptr that only calls the destructor of the element.
diff --git a/runtime/base/stringpiece.h b/runtime/base/stringpiece.h
index 9c83cf5..46743e9 100644
--- a/runtime/base/stringpiece.h
+++ b/runtime/base/stringpiece.h
@@ -165,7 +165,7 @@
// Pointer to char data, not necessarily zero terminated.
const char* ptr_;
// Length of data.
- size_type length_;
+ size_type length_;
};
// This large function is defined inline so that in a fairly common case where
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 81622e1..69d0799 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1174,15 +1174,26 @@
mirror::LongArray::ResetArrayClass();
mirror::ShortArray::ResetArrayClass();
Thread* const self = Thread::Current();
- JavaVMExt* const vm = Runtime::Current()->GetJavaVM();
for (const ClassLoaderData& data : class_loaders_) {
- vm->DeleteWeakGlobalRef(self, data.weak_root);
- delete data.allocator;
- delete data.class_table;
+ DeleteClassLoader(self, data);
}
class_loaders_.clear();
}
+void ClassLinker::DeleteClassLoader(Thread* self, const ClassLoaderData& data) {
+ Runtime* const runtime = Runtime::Current();
+ JavaVMExt* const vm = runtime->GetJavaVM();
+ vm->DeleteWeakGlobalRef(self, data.weak_root);
+ if (runtime->GetJit() != nullptr) {
+ jit::JitCodeCache* code_cache = runtime->GetJit()->GetCodeCache();
+ if (code_cache != nullptr) {
+ code_cache->RemoveMethodsIn(self, *data.allocator);
+ }
+ }
+ delete data.allocator;
+ delete data.class_table;
+}
+
mirror::PointerArray* ClassLinker::AllocPointerArray(Thread* self, size_t length) {
return down_cast<mirror::PointerArray*>(image_pointer_size_ == 8u ?
static_cast<mirror::Array*>(mirror::LongArray::Alloc(self, length)) :
@@ -1833,13 +1844,6 @@
return code;
}
}
- jit::Jit* const jit = Runtime::Current()->GetJit();
- if (jit != nullptr) {
- auto* code = jit->GetCodeCache()->GetCodeFor(method);
- if (code != nullptr) {
- return code;
- }
- }
if (method->IsNative()) {
// No code and native? Use generic trampoline.
return GetQuickGenericJniStub();
@@ -1856,13 +1860,6 @@
if (found) {
return oat_method.GetQuickCode();
}
- jit::Jit* jit = Runtime::Current()->GetJit();
- if (jit != nullptr) {
- auto* code = jit->GetCodeCache()->GetCodeFor(method);
- if (code != nullptr) {
- return code;
- }
- }
return nullptr;
}
@@ -5325,7 +5322,7 @@
ScopedArenaUnorderedMap<ArtMethod*, ArtMethod*> move_table(allocator.Adapter());
if (virtuals != old_virtuals) {
// Maps from heap allocated miranda method to linear alloc miranda method.
- StrideIterator<ArtMethod> out = virtuals->Begin(method_size, method_alignment);
+ StrideIterator<ArtMethod> out = virtuals->begin(method_size, method_alignment);
// Copy over the old methods + miranda methods.
for (auto& m : klass->GetVirtualMethods(image_pointer_size_)) {
move_table.emplace(&m, &*out);
@@ -5335,7 +5332,7 @@
++out;
}
}
- StrideIterator<ArtMethod> out(virtuals->Begin(method_size, method_alignment)
+ StrideIterator<ArtMethod> out(virtuals->begin(method_size, method_alignment)
+ old_method_count);
// Copy over miranda methods before copying vtable since CopyOf may cause thread suspension and
// we want the roots of the miranda methods to get visited.
@@ -5367,7 +5364,7 @@
move_table.emplace(def_method, &new_method);
++out;
}
- virtuals->SetLength(new_method_count);
+ virtuals->SetSize(new_method_count);
UpdateClassVirtualMethods(klass.Get(), virtuals);
// Done copying methods, they are all roots in the class now, so we can end the no thread
// suspension assert.
@@ -5382,7 +5379,7 @@
self->AssertPendingOOMException();
return false;
}
- out = virtuals->Begin(method_size, method_alignment) + old_method_count;
+ out = virtuals->begin(method_size, method_alignment) + old_method_count;
size_t vtable_pos = old_vtable_count;
for (size_t i = old_method_count; i < new_method_count; ++i) {
// Leave the declaring class alone as type indices are relative to it
@@ -6387,7 +6384,6 @@
void ClassLinker::CleanupClassLoaders() {
Thread* const self = Thread::Current();
WriterMutexLock mu(self, *Locks::classlinker_classes_lock_);
- JavaVMExt* const vm = Runtime::Current()->GetJavaVM();
for (auto it = class_loaders_.begin(); it != class_loaders_.end(); ) {
const ClassLoaderData& data = *it;
// Need to use DecodeJObject so that we get null for cleared JNI weak globals.
@@ -6395,10 +6391,7 @@
if (class_loader != nullptr) {
++it;
} else {
- // Weak reference was cleared, delete the data associated with this class loader.
- delete data.class_table;
- delete data.allocator;
- vm->DeleteWeakGlobalRef(self, data.weak_root);
+ DeleteClassLoader(self, data);
it = class_loaders_.erase(it);
}
}
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index a2d38ac..392efd2 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -551,6 +551,10 @@
LinearAlloc* allocator;
};
+ static void DeleteClassLoader(Thread* self, const ClassLoaderData& data)
+ REQUIRES(Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
void VisitClassLoaders(ClassLoaderVisitor* visitor) const
SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index 48a12e5..035230e 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -185,6 +185,7 @@
static constexpr uint32_t kMaxVarArgRegs = 5;
static constexpr uint32_t kMaxVarArgRegs25x = 6; // lambdas are 2 registers.
+ static constexpr uint32_t kLambdaVirtualRegisterWidth = 2;
// Returns the size (in 2 byte code units) of this instruction.
size_t SizeInCodeUnits() const {
@@ -248,7 +249,7 @@
// VRegA
bool HasVRegA() const;
- int32_t VRegA() const;
+ ALWAYS_INLINE int32_t VRegA() const;
int8_t VRegA_10t() const {
return VRegA_10t(Fetch16(0));
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 4de8a8e..b1d4d35 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -93,7 +93,7 @@
fake_code_.begin(), fake_code_.end());
// NOTE: Don't align the code (it will not be executed) but check that the Thumb2
- // adjustment will be a NOP, see ArtMethod::EntryPointToCodePointer().
+ // adjustment will be a NOP, see EntryPointToCodePointer().
CHECK_ALIGNED(mapping_table_offset, 2);
const uint8_t* code_ptr = &fake_header_code_and_maps_[gc_map_offset];
diff --git a/runtime/gc/accounting/bitmap.cc b/runtime/gc/accounting/bitmap.cc
index fdded02..380cb8e 100644
--- a/runtime/gc/accounting/bitmap.cc
+++ b/runtime/gc/accounting/bitmap.cc
@@ -18,6 +18,7 @@
#include "base/bit_utils.h"
#include "card_table.h"
+#include "jit/jit_code_cache.h"
#include "mem_map.h"
namespace art {
@@ -91,6 +92,7 @@
}
template class MemoryRangeBitmap<CardTable::kCardSize>;
+template class MemoryRangeBitmap<jit::kJitCodeAlignment>;
} // namespace accounting
} // namespace gc
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 657fcb5..1d38525 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -26,6 +26,7 @@
#include "art_field-inl.h"
#include "base/allocator.h"
+#include "base/arena_allocator.h"
#include "base/dumpable.h"
#include "base/histogram-inl.h"
#include "base/stl_util.h"
@@ -1258,11 +1259,11 @@
}
void Heap::Trim(Thread* self) {
+ Runtime* const runtime = Runtime::Current();
if (!CareAboutPauseTimes()) {
ATRACE_BEGIN("Deflating monitors");
// Deflate the monitors, this can cause a pause but shouldn't matter since we don't care
// about pauses.
- Runtime* runtime = Runtime::Current();
{
ScopedSuspendAll ssa(__FUNCTION__);
uint64_t start_time = NanoTime();
@@ -1274,6 +1275,10 @@
}
TrimIndirectReferenceTables(self);
TrimSpaces(self);
+ // Trim arenas that may have been used by JIT or verifier.
+ ATRACE_BEGIN("Trimming arena maps");
+ runtime->GetArenaPool()->TrimMaps();
+ ATRACE_END();
}
class TrimIndirectReferenceTableClosure : public Closure {
diff --git a/runtime/image.cc b/runtime/image.cc
index 192371f..1bc19ff 100644
--- a/runtime/image.cc
+++ b/runtime/image.cc
@@ -150,10 +150,10 @@
void ImageSection::VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const {
for (size_t pos = 0; pos < Size(); ) {
auto* array = reinterpret_cast<LengthPrefixedArray<ArtField>*>(base + Offset() + pos);
- for (size_t i = 0; i < array->Length(); ++i) {
+ for (size_t i = 0; i < array->size(); ++i) {
visitor->Visit(&array->At(i, sizeof(ArtField)));
}
- pos += array->ComputeSize(array->Length());
+ pos += array->ComputeSize(array->size());
}
}
@@ -164,10 +164,10 @@
const size_t method_size = ArtMethod::Size(pointer_size);
for (size_t pos = 0; pos < Size(); ) {
auto* array = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(base + Offset() + pos);
- for (size_t i = 0; i < array->Length(); ++i) {
+ for (size_t i = 0; i < array->size(); ++i) {
visitor->Visit(&array->At(i, method_size, method_alignment));
}
- pos += array->ComputeSize(array->Length(), method_size, method_alignment);
+ pos += array->ComputeSize(array->size(), method_size, method_alignment);
}
}
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index ed64d7e..4db37e6 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -97,16 +97,6 @@
static void UpdateEntrypoints(ArtMethod* method, const void* quick_code)
SHARED_REQUIRES(Locks::mutator_lock_) {
- Runtime* const runtime = Runtime::Current();
- jit::Jit* jit = runtime->GetJit();
- if (jit != nullptr) {
- const void* old_code_ptr = method->GetEntryPointFromQuickCompiledCode();
- jit::JitCodeCache* code_cache = jit->GetCodeCache();
- if (code_cache->ContainsCodePtr(old_code_ptr)) {
- // Save the old compiled code since we need it to implement ClassLinker::GetQuickOatCodeFor.
- code_cache->SaveCompiledCode(method, old_code_ptr);
- }
- }
method->SetEntryPointFromQuickCompiledCode(quick_code);
}
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 0607493..5afd28e 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -49,7 +49,7 @@
void Jit::DumpInfo(std::ostream& os) {
os << "Code cache size=" << PrettySize(code_cache_->CodeCacheSize())
<< " data cache size=" << PrettySize(code_cache_->DataCacheSize())
- << " num methods=" << code_cache_->NumMethods()
+ << " number of compiled code=" << code_cache_->NumberOfCompiledCode()
<< "\n";
cumulative_timings_.Dump(os);
}
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 4187358..2d0a2a5 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -19,8 +19,12 @@
#include <sstream>
#include "art_method-inl.h"
+#include "entrypoints/runtime_asm_entrypoints.h"
+#include "gc/accounting/bitmap-inl.h"
+#include "linear_alloc.h"
#include "mem_map.h"
#include "oat_file-inl.h"
+#include "thread_list.h"
namespace art {
namespace jit {
@@ -74,14 +78,10 @@
JitCodeCache::JitCodeCache(MemMap* code_map, MemMap* data_map)
: lock_("Jit code cache", kJitCodeCacheLock),
+ lock_cond_("Jit code cache variable", lock_),
+ collection_in_progress_(false),
code_map_(code_map),
- data_map_(data_map),
- num_methods_(0) {
-
- VLOG(jit) << "Created jit code cache: data size="
- << PrettySize(data_map_->Size())
- << ", code size="
- << PrettySize(code_map_->Size());
+ data_map_(data_map) {
code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_map_->Size(), false /*locked*/);
data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_map_->Size(), false /*locked*/);
@@ -96,13 +96,22 @@
CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
CHECKED_MPROTECT(data_map_->Begin(), data_map_->Size(), kProtData);
+
+ live_bitmap_.reset(CodeCacheBitmap::Create("code-cache-bitmap",
+ reinterpret_cast<uintptr_t>(code_map_->Begin()),
+ reinterpret_cast<uintptr_t>(code_map_->End())));
+
+ if (live_bitmap_.get() == nullptr) {
+ PLOG(FATAL) << "creating bitmaps for the JIT code cache failed";
+ }
+
+ VLOG(jit) << "Created jit code cache: data size="
+ << PrettySize(data_map_->Size())
+ << ", code size="
+ << PrettySize(code_map_->Size());
}
-bool JitCodeCache::ContainsMethod(ArtMethod* method) const {
- return ContainsCodePtr(method->GetEntryPointFromQuickCompiledCode());
-}
-
-bool JitCodeCache::ContainsCodePtr(const void* ptr) const {
+bool JitCodeCache::ContainsPc(const void* ptr) const {
return code_map_->Begin() <= ptr && ptr < code_map_->End();
}
@@ -121,6 +130,7 @@
};
uint8_t* JitCodeCache::CommitCode(Thread* self,
+ ArtMethod* method,
const uint8_t* mapping_table,
const uint8_t* vmap_table,
const uint8_t* gc_map,
@@ -129,6 +139,93 @@
size_t fp_spill_mask,
const uint8_t* code,
size_t code_size) {
+ uint8_t* result = CommitCodeInternal(self,
+ method,
+ mapping_table,
+ vmap_table,
+ gc_map,
+ frame_size_in_bytes,
+ core_spill_mask,
+ fp_spill_mask,
+ code,
+ code_size);
+ if (result == nullptr) {
+ // Retry.
+ GarbageCollectCache(self);
+ result = CommitCodeInternal(self,
+ method,
+ mapping_table,
+ vmap_table,
+ gc_map,
+ frame_size_in_bytes,
+ core_spill_mask,
+ fp_spill_mask,
+ code,
+ code_size);
+ }
+ return result;
+}
+
+bool JitCodeCache::WaitForPotentialCollectionToComplete(Thread* self) {
+ bool in_collection = false;
+ while (collection_in_progress_) {
+ in_collection = true;
+ lock_cond_.Wait(self);
+ }
+ return in_collection;
+}
+
+static uintptr_t FromCodeToAllocation(const void* code) {
+ size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
+ return reinterpret_cast<uintptr_t>(code) - RoundUp(sizeof(OatQuickMethodHeader), alignment);
+}
+
+void JitCodeCache::FreeCode(const void* code_ptr, ArtMethod* method ATTRIBUTE_UNUSED) {
+ uintptr_t allocation = FromCodeToAllocation(code_ptr);
+ const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+ const uint8_t* data = method_header->GetNativeGcMap();
+ if (data != nullptr) {
+ mspace_free(data_mspace_, const_cast<uint8_t*>(data));
+ }
+ data = method_header->GetMappingTable();
+ if (data != nullptr) {
+ mspace_free(data_mspace_, const_cast<uint8_t*>(data));
+ }
+ // Use the offset directly to prevent sanity check that the method is
+ // compiled with optimizing.
+ // TODO(ngeoffray): Clean up.
+ if (method_header->vmap_table_offset_ != 0) {
+ data = method_header->code_ - method_header->vmap_table_offset_;
+ mspace_free(data_mspace_, const_cast<uint8_t*>(data));
+ }
+ mspace_free(code_mspace_, reinterpret_cast<uint8_t*>(allocation));
+}
+
+void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) {
+ MutexLock mu(self, lock_);
+ // We do not check if a code cache GC is in progress, as this method comes
+ // with the classlinker_classes_lock_ held, and suspending ourselves could
+ // lead to a deadlock.
+ for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
+ if (alloc.ContainsUnsafe(it->second)) {
+ FreeCode(it->first, it->second);
+ it = method_code_map_.erase(it);
+ } else {
+ ++it;
+ }
+ }
+}
+
+uint8_t* JitCodeCache::CommitCodeInternal(Thread* self,
+ ArtMethod* method,
+ const uint8_t* mapping_table,
+ const uint8_t* vmap_table,
+ const uint8_t* gc_map,
+ size_t frame_size_in_bytes,
+ size_t core_spill_mask,
+ size_t fp_spill_mask,
+ const uint8_t* code,
+ size_t code_size) {
size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
// Ensure the header ends up at expected instruction alignment.
size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
@@ -137,7 +234,9 @@
OatQuickMethodHeader* method_header = nullptr;
uint8_t* code_ptr = nullptr;
+ ScopedThreadSuspension sts(self, kSuspended);
MutexLock mu(self, lock_);
+ WaitForPotentialCollectionToComplete(self);
{
ScopedCodeCacheWrite scc(code_map_.get());
uint8_t* result = reinterpret_cast<uint8_t*>(
@@ -149,7 +248,7 @@
DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(code_ptr), alignment);
std::copy(code, code + code_size, code_ptr);
- method_header = reinterpret_cast<OatQuickMethodHeader*>(code_ptr) - 1;
+ method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
new (method_header) OatQuickMethodHeader(
(mapping_table == nullptr) ? 0 : code_ptr - mapping_table,
(vmap_table == nullptr) ? 0 : code_ptr - vmap_table,
@@ -162,8 +261,12 @@
__builtin___clear_cache(reinterpret_cast<char*>(code_ptr),
reinterpret_cast<char*>(code_ptr + code_size));
-
- ++num_methods_; // TODO: This is hacky but works since each method has exactly one code region.
+ method_code_map_.Put(code_ptr, method);
+ // We have checked there was no collection in progress earlier. If we
+ // were, setting the entry point of a method would be unsafe, as the collection
+ // could delete it.
+ DCHECK(!collection_in_progress_);
+ method->SetEntryPointFromQuickCompiledCode(method_header->GetEntryPoint());
return reinterpret_cast<uint8_t*>(method_header);
}
@@ -181,10 +284,32 @@
return bytes_allocated;
}
+size_t JitCodeCache::NumberOfCompiledCode() {
+ MutexLock mu(Thread::Current(), lock_);
+ return method_code_map_.size();
+}
+
uint8_t* JitCodeCache::ReserveData(Thread* self, size_t size) {
size = RoundUp(size, sizeof(void*));
- MutexLock mu(self, lock_);
- return reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
+ uint8_t* result = nullptr;
+
+ {
+ ScopedThreadSuspension sts(self, kSuspended);
+ MutexLock mu(self, lock_);
+ WaitForPotentialCollectionToComplete(self);
+ result = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
+ }
+
+ if (result == nullptr) {
+ // Retry.
+ GarbageCollectCache(self);
+ ScopedThreadSuspension sts(self, kSuspended);
+ MutexLock mu(self, lock_);
+ WaitForPotentialCollectionToComplete(self);
+ result = reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
+ }
+
+ return result;
}
uint8_t* JitCodeCache::AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end) {
@@ -196,29 +321,143 @@
return result;
}
-const void* JitCodeCache::GetCodeFor(ArtMethod* method) {
- const void* code = method->GetEntryPointFromQuickCompiledCode();
- if (ContainsCodePtr(code)) {
- return code;
+class MarkCodeVisitor FINAL : public StackVisitor {
+ public:
+ MarkCodeVisitor(Thread* thread_in, JitCodeCache* code_cache_in)
+ : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kSkipInlinedFrames),
+ code_cache_(code_cache_in),
+ bitmap_(code_cache_->GetLiveBitmap()) {}
+
+ bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
+ if (method_header == nullptr) {
+ return true;
+ }
+ const void* code = method_header->GetCode();
+ if (code_cache_->ContainsPc(code)) {
+ // Use the atomic set version, as multiple threads are executing this code.
+ bitmap_->AtomicTestAndSet(FromCodeToAllocation(code));
+ }
+ return true;
}
- MutexLock mu(Thread::Current(), lock_);
- auto it = method_code_map_.find(method);
- if (it != method_code_map_.end()) {
- return it->second;
+
+ private:
+ JitCodeCache* const code_cache_;
+ CodeCacheBitmap* const bitmap_;
+};
+
+class MarkCodeClosure FINAL : public Closure {
+ public:
+ MarkCodeClosure(JitCodeCache* code_cache, Barrier* barrier)
+ : code_cache_(code_cache), barrier_(barrier) {}
+
+ void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ DCHECK(thread == Thread::Current() || thread->IsSuspended());
+ MarkCodeVisitor visitor(thread, code_cache_);
+ visitor.WalkStack();
+ if (thread->GetState() == kRunnable) {
+ barrier_->Pass(Thread::Current());
+ }
}
- return nullptr;
+
+ private:
+ JitCodeCache* const code_cache_;
+ Barrier* const barrier_;
+};
+
+void JitCodeCache::GarbageCollectCache(Thread* self) {
+ if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
+ LOG(INFO) << "Clearing code cache, code="
+ << PrettySize(CodeCacheSize())
+ << ", data=" << PrettySize(DataCacheSize());
+ }
+
+ size_t map_size = 0;
+ ScopedThreadSuspension sts(self, kSuspended);
+
+ // Walk over all compiled methods and set the entry points of these
+ // methods to interpreter.
+ {
+ MutexLock mu(self, lock_);
+ if (WaitForPotentialCollectionToComplete(self)) {
+ return;
+ }
+ collection_in_progress_ = true;
+ map_size = method_code_map_.size();
+ for (auto& it : method_code_map_) {
+ it.second->SetEntryPointFromQuickCompiledCode(GetQuickToInterpreterBridge());
+ }
+ }
+
+ // Run a checkpoint on all threads to mark the JIT compiled code they are running.
+ {
+ Barrier barrier(0);
+ MarkCodeClosure closure(this, &barrier);
+ size_t threads_running_checkpoint =
+ Runtime::Current()->GetThreadList()->RunCheckpoint(&closure);
+ if (threads_running_checkpoint != 0) {
+ barrier.Increment(self, threads_running_checkpoint);
+ }
+ }
+
+ // Free unused compiled code, and restore the entry point of used compiled code.
+ {
+ MutexLock mu(self, lock_);
+ DCHECK_EQ(map_size, method_code_map_.size());
+ ScopedCodeCacheWrite scc(code_map_.get());
+ for (auto it = method_code_map_.begin(); it != method_code_map_.end();) {
+ const void* code_ptr = it->first;
+ ArtMethod* method = it->second;
+ uintptr_t allocation = FromCodeToAllocation(code_ptr);
+ const OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+ if (GetLiveBitmap()->Test(allocation)) {
+ method->SetEntryPointFromQuickCompiledCode(method_header->GetEntryPoint());
+ ++it;
+ } else {
+ method->ClearCounter();
+ DCHECK_NE(method->GetEntryPointFromQuickCompiledCode(), method_header->GetEntryPoint());
+ FreeCode(code_ptr, method);
+ it = method_code_map_.erase(it);
+ }
+ }
+ GetLiveBitmap()->Bitmap::Clear();
+ collection_in_progress_ = false;
+ lock_cond_.Broadcast(self);
+ }
+
+ if (!kIsDebugBuild || VLOG_IS_ON(jit)) {
+ LOG(INFO) << "After clearing code cache, code="
+ << PrettySize(CodeCacheSize())
+ << ", data=" << PrettySize(DataCacheSize());
+ }
}
-void JitCodeCache::SaveCompiledCode(ArtMethod* method, const void* old_code_ptr) {
- DCHECK_EQ(method->GetEntryPointFromQuickCompiledCode(), old_code_ptr);
- DCHECK(ContainsCodePtr(old_code_ptr)) << PrettyMethod(method) << " old_code_ptr="
- << old_code_ptr;
- MutexLock mu(Thread::Current(), lock_);
- auto it = method_code_map_.find(method);
- if (it != method_code_map_.end()) {
- return;
+
+OatQuickMethodHeader* JitCodeCache::LookupMethodHeader(uintptr_t pc, ArtMethod* method) {
+ static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA");
+ if (kRuntimeISA == kArm) {
+ // On Thumb-2, the pc is offset by one.
+ --pc;
}
- method_code_map_.Put(method, old_code_ptr);
+ if (!ContainsPc(reinterpret_cast<const void*>(pc))) {
+ return nullptr;
+ }
+
+ MutexLock mu(Thread::Current(), lock_);
+ if (method_code_map_.empty()) {
+ return nullptr;
+ }
+ auto it = method_code_map_.lower_bound(reinterpret_cast<const void*>(pc));
+ --it;
+
+ const void* code_ptr = it->first;
+ OatQuickMethodHeader* method_header = OatQuickMethodHeader::FromCodePointer(code_ptr);
+ if (!method_header->Contains(pc)) {
+ return nullptr;
+ }
+ DCHECK_EQ(it->second, method)
+ << PrettyMethod(method) << " " << PrettyMethod(it->second) << " " << std::hex << pc;
+ return method_header;
}
} // namespace jit
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index fa90c18..4e415b8 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -22,6 +22,7 @@
#include "atomic.h"
#include "base/macros.h"
#include "base/mutex.h"
+#include "gc/accounting/bitmap.h"
#include "gc/allocator/dlmalloc.h"
#include "gc_root.h"
#include "jni.h"
@@ -33,32 +34,40 @@
namespace art {
class ArtMethod;
-class CompiledMethod;
-class CompilerCallbacks;
+class LinearAlloc;
namespace jit {
class JitInstrumentationCache;
+// Alignment that will suit all architectures.
+static constexpr int kJitCodeAlignment = 16;
+using CodeCacheBitmap = gc::accounting::MemoryRangeBitmap<kJitCodeAlignment>;
+
class JitCodeCache {
public:
static constexpr size_t kMaxCapacity = 1 * GB;
- static constexpr size_t kDefaultCapacity = 2 * MB;
+ // Put the default to a very low amount for debug builds to stress the code cache
+ // collection.
+ static constexpr size_t kDefaultCapacity = kIsDebugBuild ? 20 * KB : 2 * MB;
// Create the code cache with a code + data capacity equal to "capacity", error message is passed
// in the out arg error_msg.
static JitCodeCache* Create(size_t capacity, std::string* error_msg);
- size_t NumMethods() const {
- return num_methods_;
- }
-
+ // Number of bytes allocated in the code cache.
size_t CodeCacheSize() REQUIRES(!lock_);
+ // Number of bytes allocated in the data cache.
size_t DataCacheSize() REQUIRES(!lock_);
+ // Number of compiled code in the code cache. Note that this is not the number
+ // of methods that got JIT compiled, as we might have collected some.
+ size_t NumberOfCompiledCode() REQUIRES(!lock_);
+
// Allocate and write code and its metadata to the code cache.
uint8_t* CommitCode(Thread* self,
+ ArtMethod* method,
const uint8_t* mapping_table,
const uint8_t* vmap_table,
const uint8_t* gc_map,
@@ -67,51 +76,89 @@
size_t fp_spill_mask,
const uint8_t* code,
size_t code_size)
+ SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!lock_);
- // Return true if the code cache contains the code pointer which si the entrypoint of the method.
- bool ContainsMethod(ArtMethod* method) const
- SHARED_REQUIRES(Locks::mutator_lock_);
-
- // Return true if the code cache contains a code ptr.
- bool ContainsCodePtr(const void* ptr) const;
+ // Return true if the code cache contains this pc.
+ bool ContainsPc(const void* pc) const;
// Reserve a region of data of size at least "size". Returns null if there is no more room.
- uint8_t* ReserveData(Thread* self, size_t size) REQUIRES(!lock_);
+ uint8_t* ReserveData(Thread* self, size_t size)
+ SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES(!lock_);
// Add a data array of size (end - begin) with the associated contents, returns null if there
// is no more room.
uint8_t* AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end)
+ SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!lock_);
- // Get code for a method, returns null if it is not in the jit cache.
- const void* GetCodeFor(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
+ CodeCacheBitmap* GetLiveBitmap() const {
+ return live_bitmap_.get();
+ }
- // Save the compiled code for a method so that GetCodeFor(method) will return old_code_ptr if the
- // entrypoint isn't within the cache.
- void SaveCompiledCode(ArtMethod* method, const void* old_code_ptr)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
+ // Perform a collection on the code cache.
+ void GarbageCollectCache(Thread* self)
+ REQUIRES(!lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Given the 'pc', try to find the JIT compiled code associated with it.
+ // Return null if 'pc' is not in the code cache. 'method' is passed for
+ // sanity check.
+ OatQuickMethodHeader* LookupMethodHeader(uintptr_t pc, ArtMethod* method)
+ REQUIRES(!lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
+ REQUIRES(!lock_)
+ REQUIRES(Locks::classlinker_classes_lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
private:
- // Takes ownership of code_mem_map.
+ // Take ownership of code_mem_map.
JitCodeCache(MemMap* code_map, MemMap* data_map);
- // Lock which guards.
+ // Internal version of 'CommitCode' that will not retry if the
+ // allocation fails. Return null if the allocation fails.
+ uint8_t* CommitCodeInternal(Thread* self,
+ ArtMethod* method,
+ const uint8_t* mapping_table,
+ const uint8_t* vmap_table,
+ const uint8_t* gc_map,
+ size_t frame_size_in_bytes,
+ size_t core_spill_mask,
+ size_t fp_spill_mask,
+ const uint8_t* code,
+ size_t code_size)
+ REQUIRES(!lock_)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // If a collection is in progress, wait for it to finish. Return
+ // whether the thread actually waited.
+ bool WaitForPotentialCollectionToComplete(Thread* self)
+ REQUIRES(lock_) REQUIRES(!Locks::mutator_lock_);
+
+ // Free in the mspace allocations taken by 'method'.
+ void FreeCode(const void* code_ptr, ArtMethod* method) REQUIRES(lock_);
+
+ // Lock for guarding allocations, collections, and the method_code_map_.
Mutex lock_;
+ // Condition to wait on during collection.
+ ConditionVariable lock_cond_ GUARDED_BY(lock_);
+ // Whether there is a code cache collection in progress.
+ bool collection_in_progress_ GUARDED_BY(lock_);
// Mem map which holds code.
std::unique_ptr<MemMap> code_map_;
// Mem map which holds data (stack maps and profiling info).
std::unique_ptr<MemMap> data_map_;
// The opaque mspace for allocating code.
- void* code_mspace_;
+ void* code_mspace_ GUARDED_BY(lock_);
// The opaque mspace for allocating data.
- void* data_mspace_;
- // Number of compiled methods.
- size_t num_methods_;
- // This map holds code for methods if they were deoptimized by the instrumentation stubs. This is
- // required since we have to implement ClassLinker::GetQuickOatCodeFor for walking stacks.
- SafeMap<ArtMethod*, const void*> method_code_map_ GUARDED_BY(lock_);
+ void* data_mspace_ GUARDED_BY(lock_);
+ // Bitmap for collecting code and data.
+ std::unique_ptr<CodeCacheBitmap> live_bitmap_;
+ // This map holds compiled code associated to the ArtMethod
+ SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(lock_);
DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
};
diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc
index 9b9c5d2..666b8e7 100644
--- a/runtime/jit/jit_instrumentation.cc
+++ b/runtime/jit/jit_instrumentation.cc
@@ -76,8 +76,7 @@
ScopedObjectAccessUnchecked soa(self);
// Since we don't have on-stack replacement, some methods can remain in the interpreter longer
// than we want resulting in samples even after the method is compiled.
- if (method->IsClassInitializer() || method->IsNative() ||
- Runtime::Current()->GetJit()->GetCodeCache()->ContainsMethod(method)) {
+ if (method->IsClassInitializer() || method->IsNative()) {
return;
}
if (thread_pool_.get() == nullptr) {
diff --git a/runtime/jit/profiling_info.cc b/runtime/jit/profiling_info.cc
index 0c039f2..7c5f78e 100644
--- a/runtime/jit/profiling_info.cc
+++ b/runtime/jit/profiling_info.cc
@@ -28,15 +28,10 @@
ProfilingInfo* ProfilingInfo::Create(ArtMethod* method) {
// Walk over the dex instructions of the method and keep track of
// instructions we are interested in profiling.
- const uint16_t* code_ptr = nullptr;
- const uint16_t* code_end = nullptr;
- {
- ScopedObjectAccess soa(Thread::Current());
- DCHECK(!method->IsNative());
- const DexFile::CodeItem& code_item = *method->GetCodeItem();
- code_ptr = code_item.insns_;
- code_end = code_item.insns_ + code_item.insns_size_in_code_units_;
- }
+ DCHECK(!method->IsNative());
+ const DexFile::CodeItem& code_item = *method->GetCodeItem();
+ const uint16_t* code_ptr = code_item.insns_;
+ const uint16_t* code_end = code_item.insns_ + code_item.insns_size_in_code_units_;
uint32_t dex_pc = 0;
std::vector<uint32_t> entries;
@@ -91,7 +86,7 @@
ScopedObjectAccess soa(self);
for (size_t i = 0; i < InlineCache::kIndividualCacheSize; ++i) {
- mirror::Class* existing = cache->classes_[i].Read<kWithoutReadBarrier>();
+ mirror::Class* existing = cache->classes_[i].Read();
if (existing == cls) {
// Receiver type is already in the cache, nothing else to do.
return;
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index 73ca41a..7a2d1a8 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -36,7 +36,7 @@
*/
class ProfilingInfo {
public:
- static ProfilingInfo* Create(ArtMethod* method);
+ static ProfilingInfo* Create(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
// Add information from an executed INVOKE instruction to the profile.
void AddInvokeInfo(Thread* self, uint32_t dex_pc, mirror::Class* cls);
diff --git a/runtime/length_prefixed_array.h b/runtime/length_prefixed_array.h
index 0ff6d7a..e01b6cc 100644
--- a/runtime/length_prefixed_array.h
+++ b/runtime/length_prefixed_array.h
@@ -30,19 +30,34 @@
class LengthPrefixedArray {
public:
explicit LengthPrefixedArray(size_t length)
- : length_(dchecked_integral_cast<uint32_t>(length)) {}
+ : size_(dchecked_integral_cast<uint32_t>(length)) {}
T& At(size_t index, size_t element_size = sizeof(T), size_t alignment = alignof(T)) {
- DCHECK_LT(index, length_);
+ DCHECK_LT(index, size_);
return AtUnchecked(index, element_size, alignment);
}
- StrideIterator<T> Begin(size_t element_size = sizeof(T), size_t alignment = alignof(T)) {
+ const T& At(size_t index, size_t element_size = sizeof(T), size_t alignment = alignof(T)) const {
+ DCHECK_LT(index, size_);
+ return AtUnchecked(index, element_size, alignment);
+ }
+
+ StrideIterator<T> begin(size_t element_size = sizeof(T), size_t alignment = alignof(T)) {
return StrideIterator<T>(&AtUnchecked(0, element_size, alignment), element_size);
}
- StrideIterator<T> End(size_t element_size = sizeof(T), size_t alignment = alignof(T)) {
- return StrideIterator<T>(&AtUnchecked(length_, element_size, alignment), element_size);
+ StrideIterator<const T> begin(size_t element_size = sizeof(T),
+ size_t alignment = alignof(T)) const {
+ return StrideIterator<const T>(&AtUnchecked(0, element_size, alignment), element_size);
+ }
+
+ StrideIterator<T> end(size_t element_size = sizeof(T), size_t alignment = alignof(T)) {
+ return StrideIterator<T>(&AtUnchecked(size_, element_size, alignment), element_size);
+ }
+
+ StrideIterator<const T> end(size_t element_size = sizeof(T),
+ size_t alignment = alignof(T)) const {
+ return StrideIterator<const T>(&AtUnchecked(size_, element_size, alignment), element_size);
}
static size_t OffsetOfElement(size_t index,
@@ -60,13 +75,13 @@
return result;
}
- uint64_t Length() const {
- return length_;
+ size_t size() const {
+ return size_;
}
// Update the length but does not reallocate storage.
- void SetLength(size_t length) {
- length_ = dchecked_integral_cast<uint32_t>(length);
+ void SetSize(size_t length) {
+ size_ = dchecked_integral_cast<uint32_t>(length);
}
private:
@@ -75,7 +90,12 @@
reinterpret_cast<uintptr_t>(this) + OffsetOfElement(index, element_size, alignment));
}
- uint32_t length_;
+ const T& AtUnchecked(size_t index, size_t element_size, size_t alignment) const {
+ return *reinterpret_cast<T*>(
+ reinterpret_cast<uintptr_t>(this) + OffsetOfElement(index, element_size, alignment));
+ }
+
+ uint32_t size_;
uint8_t data[0];
};
@@ -84,7 +104,7 @@
IterationRange<StrideIterator<T>> MakeIterationRangeFromLengthPrefixedArray(
LengthPrefixedArray<T>* arr, size_t element_size = sizeof(T), size_t alignment = alignof(T)) {
return arr != nullptr ?
- MakeIterationRange(arr->Begin(element_size, alignment), arr->End(element_size, alignment)) :
+ MakeIterationRange(arr->begin(element_size, alignment), arr->end(element_size, alignment)) :
MakeEmptyIterationRange(StrideIterator<T>(nullptr, 0));
}
diff --git a/runtime/linear_alloc.cc b/runtime/linear_alloc.cc
index 43e81d9..f91b0ed 100644
--- a/runtime/linear_alloc.cc
+++ b/runtime/linear_alloc.cc
@@ -48,4 +48,8 @@
return allocator_.Contains(ptr);
}
+bool LinearAlloc::ContainsUnsafe(void* ptr) const {
+ return allocator_.Contains(ptr);
+}
+
} // namespace art
diff --git a/runtime/linear_alloc.h b/runtime/linear_alloc.h
index 1b21527..df7f17d 100644
--- a/runtime/linear_alloc.h
+++ b/runtime/linear_alloc.h
@@ -47,6 +47,10 @@
// Return true if the linear alloc contrains an address.
bool Contains(void* ptr) const REQUIRES(!lock_);
+ // Unsafe version of 'Contains' only to be used when the allocator is going
+ // to be deleted.
+ bool ContainsUnsafe(void* ptr) const NO_THREAD_SAFETY_ANALYSIS;
+
private:
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
ArenaAllocator allocator_ GUARDED_BY(lock_);
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index a528c3b..19ee7f4 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -928,22 +928,22 @@
inline uint32_t Class::NumDirectMethods() {
LengthPrefixedArray<ArtMethod>* arr = GetDirectMethodsPtrUnchecked();
- return arr != nullptr ? arr->Length() : 0u;
+ return arr != nullptr ? arr->size() : 0u;
}
inline uint32_t Class::NumVirtualMethods() {
LengthPrefixedArray<ArtMethod>* arr = GetVirtualMethodsPtrUnchecked();
- return arr != nullptr ? arr->Length() : 0u;
+ return arr != nullptr ? arr->size() : 0u;
}
inline uint32_t Class::NumInstanceFields() {
LengthPrefixedArray<ArtField>* arr = GetIFieldsPtrUnchecked();
- return arr != nullptr ? arr->Length() : 0u;
+ return arr != nullptr ? arr->size() : 0u;
}
inline uint32_t Class::NumStaticFields() {
LengthPrefixedArray<ArtField>* arr = GetSFieldsPtrUnchecked();
- return arr != nullptr ? arr->Length() : 0u;
+ return arr != nullptr ? arr->size() : 0u;
}
} // namespace mirror
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 53fedab..9d01a1d 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -574,7 +574,7 @@
return nullptr;
}
size_t low = 0;
- size_t high = fields->Length();
+ size_t high = fields->size();
ArtField* ret = nullptr;
while (low < high) {
size_t mid = (low + high) / 2;
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 3a73900..5e42392 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -190,7 +190,7 @@
return nullptr;
}
size_t low = 0;
- size_t high = fields->Length();
+ size_t high = fields->size();
const uint16_t* const data = name->GetValue();
const size_t length = name->GetLength();
while (low < high) {
diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h
index 6eadd87..c9a2cfb 100644
--- a/runtime/oat_quick_method_header.h
+++ b/runtime/oat_quick_method_header.h
@@ -21,6 +21,7 @@
#include "base/macros.h"
#include "quick/quick_method_frame_info.h"
#include "stack_map.h"
+#include "utils.h"
namespace art {
@@ -39,6 +40,16 @@
~OatQuickMethodHeader();
+ static OatQuickMethodHeader* FromCodePointer(const void* code_ptr) {
+ uintptr_t code = reinterpret_cast<uintptr_t>(code_ptr);
+ uintptr_t header = code - OFFSETOF_MEMBER(OatQuickMethodHeader, code_);
+ return reinterpret_cast<OatQuickMethodHeader*>(header);
+ }
+
+ static OatQuickMethodHeader* FromEntryPoint(const void* entry_point) {
+ return FromCodePointer(EntryPointToCodePointer(entry_point));
+ }
+
OatQuickMethodHeader& operator=(const OatQuickMethodHeader&) = default;
uintptr_t NativeQuickPcOffset(const uintptr_t pc) const {
@@ -74,6 +85,11 @@
bool Contains(uintptr_t pc) const {
uintptr_t code_start = reinterpret_cast<uintptr_t>(code_);
+ static_assert(kRuntimeISA != kThumb2, "kThumb2 cannot be a runtime ISA");
+ if (kRuntimeISA == kArm) {
+ // On Thumb-2, the pc is offset by one.
+ code_start++;
+ }
return code_start <= pc && pc <= (code_start + code_size_);
}
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index bc9ba37..57472ad 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -216,10 +216,10 @@
LengthPrefixedArray<ArtField>* static_fields0 = proxyClass0->GetSFieldsPtr();
ASSERT_TRUE(static_fields0 != nullptr);
- ASSERT_EQ(2u, static_fields0->Length());
+ ASSERT_EQ(2u, static_fields0->size());
LengthPrefixedArray<ArtField>* static_fields1 = proxyClass1->GetSFieldsPtr();
ASSERT_TRUE(static_fields1 != nullptr);
- ASSERT_EQ(2u, static_fields1->Length());
+ ASSERT_EQ(2u, static_fields1->size());
EXPECT_EQ(static_fields0->At(0).GetDeclaringClass(), proxyClass0.Get());
EXPECT_EQ(static_fields0->At(1).GetDeclaringClass(), proxyClass0.Get());
diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc
index 99e262e..6554394 100644
--- a/runtime/quick/inline_method_analyser.cc
+++ b/runtime/quick/inline_method_analyser.cc
@@ -73,7 +73,6 @@
bool InlineMethodAnalyser::AnalyseMethodCode(verifier::MethodVerifier* verifier,
InlineMethod* method) {
DCHECK(verifier != nullptr);
- DCHECK_EQ(Runtime::Current()->IsCompiler(), method != nullptr);
if (!Runtime::Current()->UseJit()) {
DCHECK_EQ(verifier->CanLoadClasses(), method != nullptr);
}
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 9359d27..b0727da 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -856,13 +856,11 @@
// If we are the JIT then we may have just compiled the method after the
// IsQuickToInterpreterBridge check.
jit::Jit* const jit = Runtime::Current()->GetJit();
- if (jit != nullptr &&
- jit->GetCodeCache()->ContainsCodePtr(reinterpret_cast<const void*>(code))) {
+ if (jit != nullptr && jit->GetCodeCache()->ContainsPc(code)) {
return;
}
- uint32_t code_size = reinterpret_cast<const OatQuickMethodHeader*>(
- EntryPointToCodePointer(code))[-1].code_size_;
+ uint32_t code_size = OatQuickMethodHeader::FromEntryPoint(code)->code_size_;
uintptr_t code_start = reinterpret_cast<uintptr_t>(code);
CHECK(code_start <= pc && pc <= (code_start + code_size))
<< PrettyMethod(method)
diff --git a/runtime/stride_iterator.h b/runtime/stride_iterator.h
index a9da51b..ac04c3b 100644
--- a/runtime/stride_iterator.h
+++ b/runtime/stride_iterator.h
@@ -19,6 +19,8 @@
#include <iterator>
+#include "base/logging.h"
+
namespace art {
template<typename T>
diff --git a/runtime/verifier/method_verifier-inl.h b/runtime/verifier/method_verifier-inl.h
index 2d9fd53..f52d011 100644
--- a/runtime/verifier/method_verifier-inl.h
+++ b/runtime/verifier/method_verifier-inl.h
@@ -38,6 +38,10 @@
return insn_flags_[index];
}
+inline InstructionFlags& MethodVerifier::GetInstructionFlags(size_t index) {
+ return insn_flags_[index];
+}
+
inline mirror::ClassLoader* MethodVerifier::GetClassLoader() {
return class_loader_.Get();
}
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 4051a1c..e1d4160 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -58,12 +58,14 @@
// On VLOG(verifier), should we dump the whole state when we run into a hard failure?
static constexpr bool kDumpRegLinesOnHardFailureIfVLOG = true;
+PcToRegisterLineTable::PcToRegisterLineTable(ScopedArenaAllocator& arena)
+ : register_lines_(arena.Adapter(kArenaAllocVerifier)) {}
+
void PcToRegisterLineTable::Init(RegisterTrackingMode mode, InstructionFlags* flags,
uint32_t insns_size, uint16_t registers_size,
MethodVerifier* verifier) {
DCHECK_GT(insns_size, 0U);
- register_lines_.reset(new RegisterLine*[insns_size]());
- size_ = insns_size;
+ register_lines_.resize(insns_size);
for (uint32_t i = 0; i < insns_size; i++) {
bool interesting = false;
switch (mode) {
@@ -80,19 +82,12 @@
break;
}
if (interesting) {
- register_lines_[i] = RegisterLine::Create(registers_size, verifier);
+ register_lines_[i].reset(RegisterLine::Create(registers_size, verifier));
}
}
}
-PcToRegisterLineTable::~PcToRegisterLineTable() {
- for (size_t i = 0; i < size_; i++) {
- delete register_lines_[i];
- if (kIsDebugBuild) {
- register_lines_[i] = nullptr;
- }
- }
-}
+PcToRegisterLineTable::~PcToRegisterLineTable() {}
// Note: returns true on failure.
ALWAYS_INLINE static inline bool FailOrAbort(MethodVerifier* verifier, bool condition,
@@ -398,7 +393,10 @@
bool need_precise_constants, bool verify_to_dump,
bool allow_thread_suspension)
: self_(self),
- reg_types_(can_load_classes),
+ arena_stack_(Runtime::Current()->GetArenaPool()),
+ arena_(&arena_stack_),
+ reg_types_(can_load_classes, arena_),
+ reg_table_(arena_),
work_insn_idx_(DexFile::kDexNoIndex),
dex_method_idx_(dex_method_idx),
mirror_method_(method),
@@ -702,7 +700,11 @@
}
// Allocate and initialize an array to hold instruction data.
- insn_flags_.reset(new InstructionFlags[code_item_->insns_size_in_code_units_]());
+ insn_flags_.reset(arena_.AllocArray<InstructionFlags>(code_item_->insns_size_in_code_units_));
+ DCHECK(insn_flags_ != nullptr);
+ std::uninitialized_fill_n(insn_flags_.get(),
+ code_item_->insns_size_in_code_units_,
+ InstructionFlags());
// Run through the instructions and see if the width checks out.
bool result = ComputeWidthsAndCountOps();
// Flag instructions guarded by a "try" block and check exception handlers.
@@ -848,7 +850,7 @@
break;
}
size_t inst_size = inst->SizeInCodeUnits();
- insn_flags_[dex_pc].SetIsOpcode();
+ GetInstructionFlags(dex_pc).SetIsOpcode();
dex_pc += inst_size;
inst = inst->RelativeAt(inst_size);
}
@@ -881,7 +883,7 @@
<< " endAddr=" << end << " (size=" << insns_size << ")";
return false;
}
- if (!insn_flags_[start].IsOpcode()) {
+ if (!GetInstructionFlags(start).IsOpcode()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD)
<< "'try' block starts inside an instruction (" << start << ")";
return false;
@@ -889,7 +891,7 @@
uint32_t dex_pc = start;
const Instruction* inst = Instruction::At(code_item_->insns_ + dex_pc);
while (dex_pc < end) {
- insn_flags_[dex_pc].SetInTry();
+ GetInstructionFlags(dex_pc).SetInTry();
size_t insn_size = inst->SizeInCodeUnits();
dex_pc += insn_size;
inst = inst->RelativeAt(insn_size);
@@ -903,7 +905,7 @@
CatchHandlerIterator iterator(handlers_ptr);
for (; iterator.HasNext(); iterator.Next()) {
uint32_t dex_pc= iterator.GetHandlerAddress();
- if (!insn_flags_[dex_pc].IsOpcode()) {
+ if (!GetInstructionFlags(dex_pc).IsOpcode()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD)
<< "exception handler starts at bad address (" << dex_pc << ")";
return false;
@@ -913,7 +915,7 @@
<< "exception handler begins with move-result* (" << dex_pc << ")";
return false;
}
- insn_flags_[dex_pc].SetBranchTarget();
+ GetInstructionFlags(dex_pc).SetBranchTarget();
// Ensure exception types are resolved so that they don't need resolution to be delivered,
// unresolved exception types will be ignored by exception delivery
if (iterator.GetHandlerTypeIndex() != DexFile::kDexNoIndex16) {
@@ -935,8 +937,8 @@
const Instruction* inst = Instruction::At(code_item_->insns_);
/* Flag the start of the method as a branch target, and a GC point due to stack overflow errors */
- insn_flags_[0].SetBranchTarget();
- insn_flags_[0].SetCompileTimeInfoPoint();
+ GetInstructionFlags(0).SetBranchTarget();
+ GetInstructionFlags(0).SetCompileTimeInfoPoint();
uint32_t insns_size = code_item_->insns_size_in_code_units_;
for (uint32_t dex_pc = 0; dex_pc < insns_size;) {
@@ -948,18 +950,18 @@
// All invoke points are marked as "Throw" points already.
// We are relying on this to also count all the invokes as interesting.
if (inst->IsBranch()) {
- insn_flags_[dex_pc].SetCompileTimeInfoPoint();
+ GetInstructionFlags(dex_pc).SetCompileTimeInfoPoint();
// The compiler also needs safepoints for fall-through to loop heads.
// Such a loop head must be a target of a branch.
int32_t offset = 0;
bool cond, self_ok;
bool target_ok = GetBranchOffset(dex_pc, &offset, &cond, &self_ok);
DCHECK(target_ok);
- insn_flags_[dex_pc + offset].SetCompileTimeInfoPoint();
+ GetInstructionFlags(dex_pc + offset).SetCompileTimeInfoPoint();
} else if (inst->IsSwitch() || inst->IsThrow()) {
- insn_flags_[dex_pc].SetCompileTimeInfoPoint();
+ GetInstructionFlags(dex_pc).SetCompileTimeInfoPoint();
} else if (inst->IsReturn()) {
- insn_flags_[dex_pc].SetCompileTimeInfoPointAndReturn();
+ GetInstructionFlags(dex_pc).SetCompileTimeInfoPointAndReturn();
}
dex_pc += inst->SizeInCodeUnits();
inst = inst->Next();
@@ -1202,7 +1204,7 @@
}
// Make sure the array-data is marked as an opcode. This ensures that it was reached when
// traversing the code item linearly. It is an approximation for a by-spec padding value.
- if (!insn_flags_[cur_offset + array_data_offset].IsOpcode()) {
+ if (!GetInstructionFlags(cur_offset + array_data_offset).IsOpcode()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "array data table at " << cur_offset
<< ", data offset " << array_data_offset
<< " not correctly visited, probably bad padding.";
@@ -1245,13 +1247,13 @@
int32_t abs_offset = cur_offset + offset;
if (abs_offset < 0 ||
(uint32_t) abs_offset >= insn_count ||
- !insn_flags_[abs_offset].IsOpcode()) {
+ !GetInstructionFlags(abs_offset).IsOpcode()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid branch target " << offset << " (-> "
<< reinterpret_cast<void*>(abs_offset) << ") at "
<< reinterpret_cast<void*>(cur_offset);
return false;
}
- insn_flags_[abs_offset].SetBranchTarget();
+ GetInstructionFlags(abs_offset).SetBranchTarget();
return true;
}
@@ -1315,7 +1317,7 @@
}
// Make sure the switch data is marked as an opcode. This ensures that it was reached when
// traversing the code item linearly. It is an approximation for a by-spec padding value.
- if (!insn_flags_[cur_offset + switch_offset].IsOpcode()) {
+ if (!GetInstructionFlags(cur_offset + switch_offset).IsOpcode()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "switch table at " << cur_offset
<< ", switch offset " << switch_offset
<< " not correctly visited, probably bad padding.";
@@ -1387,14 +1389,14 @@
int32_t abs_offset = cur_offset + offset;
if (abs_offset < 0 ||
abs_offset >= static_cast<int32_t>(insn_count) ||
- !insn_flags_[abs_offset].IsOpcode()) {
+ !GetInstructionFlags(abs_offset).IsOpcode()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch target " << offset
<< " (-> " << reinterpret_cast<void*>(abs_offset) << ") at "
<< reinterpret_cast<void*>(cur_offset)
<< "[" << targ << "]";
return false;
}
- insn_flags_[abs_offset].SetBranchTarget();
+ GetInstructionFlags(abs_offset).SetBranchTarget();
}
return true;
}
@@ -1435,7 +1437,6 @@
registers_size,
this);
-
work_line_.reset(RegisterLine::Create(registers_size, this));
saved_line_.reset(RegisterLine::Create(registers_size, this));
@@ -1491,7 +1492,7 @@
vios->Stream() << reg_line->Dump(this) << "\n";
}
vios->Stream()
- << StringPrintf("0x%04zx", dex_pc) << ": " << insn_flags_[dex_pc].ToString() << " ";
+ << StringPrintf("0x%04zx", dex_pc) << ": " << GetInstructionFlags(dex_pc).ToString() << " ";
const bool kDumpHexOfInstruction = false;
if (kDumpHexOfInstruction) {
vios->Stream() << inst->DumpHex(5) << " ";
@@ -1677,7 +1678,7 @@
const uint32_t insns_size = code_item_->insns_size_in_code_units_;
/* Begin by marking the first instruction as "changed". */
- insn_flags_[0].SetChanged();
+ GetInstructionFlags(0).SetChanged();
uint32_t start_guess = 0;
/* Continue until no instructions are marked "changed". */
@@ -1688,7 +1689,7 @@
// Find the first marked one. Use "start_guess" as a way to find one quickly.
uint32_t insn_idx = start_guess;
for (; insn_idx < insns_size; insn_idx++) {
- if (insn_flags_[insn_idx].IsChanged())
+ if (GetInstructionFlags(insn_idx).IsChanged())
break;
}
if (insn_idx == insns_size) {
@@ -1708,7 +1709,7 @@
// situation where we have a stray "changed" flag set on an instruction that isn't a branch
// target.
work_insn_idx_ = insn_idx;
- if (insn_flags_[insn_idx].IsBranchTarget()) {
+ if (GetInstructionFlags(insn_idx).IsBranchTarget()) {
work_line_->CopyFromLine(reg_table_.GetLine(insn_idx));
} else if (kIsDebugBuild) {
/*
@@ -1734,8 +1735,8 @@
return false;
}
/* Clear "changed" and mark as visited. */
- insn_flags_[insn_idx].SetVisited();
- insn_flags_[insn_idx].ClearChanged();
+ GetInstructionFlags(insn_idx).SetVisited();
+ GetInstructionFlags(insn_idx).ClearChanged();
}
if (gDebugVerify) {
@@ -1762,10 +1763,10 @@
(insns[insn_idx + 1] == Instruction::kPackedSwitchSignature ||
insns[insn_idx + 1] == Instruction::kSparseSwitchSignature ||
insns[insn_idx + 1] == Instruction::kArrayDataSignature))) {
- insn_flags_[insn_idx].SetVisited();
+ GetInstructionFlags(insn_idx).SetVisited();
}
- if (!insn_flags_[insn_idx].IsVisited()) {
+ if (!GetInstructionFlags(insn_idx).IsVisited()) {
if (dead_start < 0)
dead_start = insn_idx;
} else if (dead_start >= 0) {
@@ -1895,8 +1896,8 @@
// We need to ensure the work line is consistent while performing validation. When we spot a
// peephole pattern we compute a new line for either the fallthrough instruction or the
// branch target.
- std::unique_ptr<RegisterLine> branch_line;
- std::unique_ptr<RegisterLine> fallthrough_line;
+ ArenaUniquePtr<RegisterLine> branch_line;
+ ArenaUniquePtr<RegisterLine> fallthrough_line;
switch (inst->Opcode()) {
case Instruction::NOP:
@@ -2144,9 +2145,9 @@
work_line_->PushMonitor(this, inst->VRegA_11x(), work_insn_idx_);
// Check whether the previous instruction is a move-object with vAA as a source, creating
// untracked lock aliasing.
- if (0 != work_insn_idx_ && !insn_flags_[work_insn_idx_].IsBranchTarget()) {
+ if (0 != work_insn_idx_ && !GetInstructionFlags(work_insn_idx_).IsBranchTarget()) {
uint32_t prev_idx = work_insn_idx_ - 1;
- while (0 != prev_idx && !insn_flags_[prev_idx].IsOpcode()) {
+ while (0 != prev_idx && !GetInstructionFlags(prev_idx).IsOpcode()) {
prev_idx--;
}
const Instruction* prev_inst = Instruction::At(code_item_->insns_ + prev_idx);
@@ -2427,10 +2428,10 @@
uint32_t instance_of_idx = 0;
if (0 != work_insn_idx_) {
instance_of_idx = work_insn_idx_ - 1;
- while (0 != instance_of_idx && !insn_flags_[instance_of_idx].IsOpcode()) {
+ while (0 != instance_of_idx && !GetInstructionFlags(instance_of_idx).IsOpcode()) {
instance_of_idx--;
}
- if (FailOrAbort(this, insn_flags_[instance_of_idx].IsOpcode(),
+ if (FailOrAbort(this, GetInstructionFlags(instance_of_idx).IsOpcode(),
"Unable to get previous instruction of if-eqz/if-nez for work index ",
work_insn_idx_)) {
break;
@@ -2486,15 +2487,15 @@
update_line->SetRegisterType<LockOp::kKeep>(this,
instance_of_inst->VRegB_22c(),
cast_type);
- if (!insn_flags_[instance_of_idx].IsBranchTarget() && 0 != instance_of_idx) {
+ if (!GetInstructionFlags(instance_of_idx).IsBranchTarget() && 0 != instance_of_idx) {
// See if instance-of was preceded by a move-object operation, common due to the small
// register encoding space of instance-of, and propagate type information to the source
// of the move-object.
uint32_t move_idx = instance_of_idx - 1;
- while (0 != move_idx && !insn_flags_[move_idx].IsOpcode()) {
+ while (0 != move_idx && !GetInstructionFlags(move_idx).IsOpcode()) {
move_idx--;
}
- if (FailOrAbort(this, insn_flags_[move_idx].IsOpcode(),
+ if (FailOrAbort(this, GetInstructionFlags(move_idx).IsOpcode(),
"Unable to get previous instruction of if-eqz/if-nez for work index ",
work_insn_idx_)) {
break;
@@ -2786,8 +2787,7 @@
work_line_->MarkRefsAsInitialized(this, this_type, this_reg, work_insn_idx_);
}
if (return_type == nullptr) {
- return_type = ®_types_.FromDescriptor(GetClassLoader(), return_type_descriptor,
- false);
+ return_type = ®_types_.FromDescriptor(GetClassLoader(), return_type_descriptor, false);
}
if (!return_type->IsLowHalf()) {
work_line_->SetResultRegisterType(this, *return_type);
@@ -2860,7 +2860,7 @@
uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
- descriptor = dex_file_->StringByTypeIdx(return_type_idx);
+ descriptor = dex_file_->StringByTypeIdx(return_type_idx);
} else {
descriptor = abs_method->GetReturnTypeDescriptor();
}
@@ -3309,7 +3309,7 @@
return false;
}
/* update branch target, set "changed" if appropriate */
- if (nullptr != branch_line.get()) {
+ if (nullptr != branch_line) {
if (!UpdateRegisters(work_insn_idx_ + branch_target, branch_line.get(), false)) {
return false;
}
@@ -3364,7 +3364,7 @@
* Handle instructions that can throw and that are sitting in a "try" block. (If they're not in a
* "try" block when they throw, control transfers out of the method.)
*/
- if ((opcode_flags & Instruction::kThrow) != 0 && insn_flags_[work_insn_idx_].IsInTry()) {
+ if ((opcode_flags & Instruction::kThrow) != 0 && GetInstructionFlags(work_insn_idx_).IsInTry()) {
bool has_catch_all_handler = false;
CatchHandlerIterator iterator(*code_item_, work_insn_idx_);
@@ -3434,11 +3434,11 @@
if (!CheckNotMoveException(code_item_->insns_, next_insn_idx)) {
return false;
}
- if (nullptr != fallthrough_line.get()) {
+ if (nullptr != fallthrough_line) {
// Make workline consistent with fallthrough computed from peephole optimization.
work_line_->CopyFromLine(fallthrough_line.get());
}
- if (insn_flags_[next_insn_idx].IsReturn()) {
+ if (GetInstructionFlags(next_insn_idx).IsReturn()) {
// For returns we only care about the operand to the return, all other registers are dead.
const Instruction* ret_inst = Instruction::At(code_item_->insns_ + next_insn_idx);
AdjustReturnLine(this, ret_inst, work_line_.get());
@@ -3456,7 +3456,7 @@
* We're not recording register data for the next instruction, so we don't know what the
* prior state was. We have to assume that something has changed and re-evaluate it.
*/
- insn_flags_[next_insn_idx].SetChanged();
+ GetInstructionFlags(next_insn_idx).SetChanged();
}
}
@@ -3480,7 +3480,7 @@
}
DCHECK_LT(*start_guess, code_item_->insns_size_in_code_units_);
- DCHECK(insn_flags_[*start_guess].IsOpcode());
+ DCHECK(GetInstructionFlags(*start_guess).IsOpcode());
if (have_pending_runtime_throw_failure_) {
have_any_pending_runtime_throw_failure_ = true;
@@ -3491,30 +3491,55 @@
return true;
} // NOLINT(readability/fn_size)
+void MethodVerifier::UninstantiableError(const char* descriptor) {
+ Fail(VerifyError::VERIFY_ERROR_NO_CLASS) << "Could not create precise reference for "
+ << "non-instantiable klass " << descriptor;
+}
+
+inline bool MethodVerifier::IsInstantiableOrPrimitive(mirror::Class* klass) {
+ return klass->IsInstantiable() || klass->IsPrimitive();
+}
+
const RegType& MethodVerifier::ResolveClassAndCheckAccess(uint32_t class_idx) {
- const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
- const RegType& referrer = GetDeclaringClass();
mirror::Class* klass = dex_cache_->GetResolvedType(class_idx);
- const RegType& result = klass != nullptr ?
- FromClass(descriptor, klass, klass->CannotBeAssignedFromOtherTypes()) :
- reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
- if (result.IsConflict()) {
- Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "accessing broken descriptor '" << descriptor
- << "' in " << referrer;
- return result;
+ const RegType* result = nullptr;
+ if (klass != nullptr) {
+ bool precise = klass->CannotBeAssignedFromOtherTypes();
+ if (precise && !IsInstantiableOrPrimitive(klass)) {
+ const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
+ UninstantiableError(descriptor);
+ precise = false;
+ }
+ result = reg_types_.FindClass(klass, precise);
+ if (result == nullptr) {
+ const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
+ result = reg_types_.InsertClass(descriptor, klass, precise);
+ }
+ } else {
+ const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
+ result = ®_types_.FromDescriptor(GetClassLoader(), descriptor, false);
}
- if (klass == nullptr && !result.IsUnresolvedTypes()) {
- dex_cache_->SetResolvedType(class_idx, result.GetClass());
+ DCHECK(result != nullptr);
+ if (result->IsConflict()) {
+ const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
+ Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "accessing broken descriptor '" << descriptor
+ << "' in " << GetDeclaringClass();
+ return *result;
+ }
+ if (klass == nullptr && !result->IsUnresolvedTypes()) {
+ dex_cache_->SetResolvedType(class_idx, result->GetClass());
}
// Check if access is allowed. Unresolved types use xxxWithAccessCheck to
// check at runtime if access is allowed and so pass here. If result is
// primitive, skip the access check.
- if (result.IsNonZeroReferenceTypes() && !result.IsUnresolvedTypes() &&
- !referrer.IsUnresolvedTypes() && !referrer.CanAccess(result)) {
- Fail(VERIFY_ERROR_ACCESS_CLASS) << "illegal class access: '"
- << referrer << "' -> '" << result << "'";
+ if (result->IsNonZeroReferenceTypes() && !result->IsUnresolvedTypes()) {
+ const RegType& referrer = GetDeclaringClass();
+ if (!referrer.IsUnresolvedTypes() && !referrer.CanAccess(*result)) {
+ Fail(VERIFY_ERROR_ACCESS_CLASS) << "illegal class access: '"
+ << referrer << "' -> '" << result << "'";
+ }
}
- return result;
+ return *result;
}
const RegType& MethodVerifier::GetCaughtExceptionType() {
@@ -3720,9 +3745,10 @@
} else {
const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
const uint16_t class_idx = dex_file_->GetMethodId(method_idx).class_idx_;
- res_method_class = ®_types_.FromDescriptor(GetClassLoader(),
- dex_file_->StringByTypeIdx(class_idx),
- false);
+ res_method_class = ®_types_.FromDescriptor(
+ GetClassLoader(),
+ dex_file_->StringByTypeIdx(class_idx),
+ false);
}
if (!res_method_class->IsAssignableFrom(actual_arg_type)) {
Fail(actual_arg_type.IsUnresolvedTypes() ? VERIFY_ERROR_NO_CLASS:
@@ -4476,14 +4502,16 @@
field->GetType<false>();
if (field_type_class != nullptr) {
- field_type = &FromClass(field->GetTypeDescriptor(), field_type_class,
+ field_type = &FromClass(field->GetTypeDescriptor(),
+ field_type_class,
field_type_class->CannotBeAssignedFromOtherTypes());
} else {
Thread* self = Thread::Current();
DCHECK(!can_load_classes_ || self->IsExceptionPending());
self->ClearException();
field_type = ®_types_.FromDescriptor(field->GetDeclaringClass()->GetClassLoader(),
- field->GetTypeDescriptor(), false);
+ field->GetTypeDescriptor(),
+ false);
}
if (field_type == nullptr) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer field type from " << inst->Name();
@@ -4604,14 +4632,14 @@
bool update_merge_line) {
bool changed = true;
RegisterLine* target_line = reg_table_.GetLine(next_insn);
- if (!insn_flags_[next_insn].IsVisitedOrChanged()) {
+ if (!GetInstructionFlags(next_insn).IsVisitedOrChanged()) {
/*
* We haven't processed this instruction before, and we haven't touched the registers here, so
* there's nothing to "merge". Copy the registers over and mark it as changed. (This is the
* only way a register can transition out of "unknown", so this is not just an optimization.)
*/
target_line->CopyFromLine(merge_line);
- if (insn_flags_[next_insn].IsReturn()) {
+ if (GetInstructionFlags(next_insn).IsReturn()) {
// Verify that the monitor stack is empty on return.
merge_line->VerifyMonitorStackEmpty(this);
@@ -4621,10 +4649,9 @@
AdjustReturnLine(this, ret_inst, target_line);
}
} else {
- std::unique_ptr<RegisterLine> copy(gDebugVerify ?
- RegisterLine::Create(target_line->NumRegs(), this) :
- nullptr);
+ ArenaUniquePtr<RegisterLine> copy;
if (gDebugVerify) {
+ copy.reset(RegisterLine::Create(target_line->NumRegs(), this));
copy->CopyFromLine(target_line);
}
changed = target_line->MergeRegisters(this, merge_line);
@@ -4643,13 +4670,13 @@
}
}
if (changed) {
- insn_flags_[next_insn].SetChanged();
+ GetInstructionFlags(next_insn).SetChanged();
}
return true;
}
InstructionFlags* MethodVerifier::CurrentInsnFlags() {
- return &insn_flags_[work_insn_idx_];
+ return &GetInstructionFlags(work_insn_idx_);
}
const RegType& MethodVerifier::GetMethodReturnType() {
@@ -4685,8 +4712,7 @@
= dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(method_id.class_idx_));
if (mirror_method_ != nullptr) {
mirror::Class* klass = mirror_method_->GetDeclaringClass();
- declaring_class_ = &FromClass(descriptor, klass,
- klass->CannotBeAssignedFromOtherTypes());
+ declaring_class_ = &FromClass(descriptor, klass, klass->CannotBeAssignedFromOtherTypes());
} else {
declaring_class_ = ®_types_.FromDescriptor(GetClassLoader(), descriptor, false);
}
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index ba694b7..7b51d6e 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -21,7 +21,10 @@
#include <sstream>
#include <vector>
+#include "base/arena_allocator.h"
#include "base/macros.h"
+#include "base/scoped_arena_containers.h"
+#include "base/stl_util.h"
#include "dex_file.h"
#include "handle.h"
#include "instruction_flags.h"
@@ -107,7 +110,7 @@
// execution of that instruction.
class PcToRegisterLineTable {
public:
- PcToRegisterLineTable() : size_(0) {}
+ explicit PcToRegisterLineTable(ScopedArenaAllocator& arena);
~PcToRegisterLineTable();
// Initialize the RegisterTable. Every instruction address can have a different set of information
@@ -116,14 +119,12 @@
void Init(RegisterTrackingMode mode, InstructionFlags* flags, uint32_t insns_size,
uint16_t registers_size, MethodVerifier* verifier);
- RegisterLine* GetLine(size_t idx) {
- DCHECK_LT(idx, size_);
- return register_lines_[idx];
+ RegisterLine* GetLine(size_t idx) const {
+ return register_lines_[idx].get();
}
private:
- std::unique_ptr<RegisterLine*[]> register_lines_;
- size_t size_;
+ ScopedArenaVector<ArenaUniquePtr<RegisterLine>> register_lines_;
DISALLOW_COPY_AND_ASSIGN(PcToRegisterLineTable);
};
@@ -240,7 +241,8 @@
// Accessors used by the compiler via CompilerCallback
const DexFile::CodeItem* CodeItem() const;
RegisterLine* GetRegLine(uint32_t dex_pc);
- const InstructionFlags& GetInstructionFlags(size_t index) const;
+ ALWAYS_INLINE const InstructionFlags& GetInstructionFlags(size_t index) const;
+ ALWAYS_INLINE InstructionFlags& GetInstructionFlags(size_t index);
mirror::ClassLoader* GetClassLoader() SHARED_REQUIRES(Locks::mutator_lock_);
mirror::DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
MethodReference GetMethodReference() const;
@@ -275,7 +277,14 @@
return IsConstructor() && !IsStatic();
}
+ ScopedArenaAllocator& GetArena() {
+ return arena_;
+ }
+
private:
+ void UninstantiableError(const char* descriptor);
+ static bool IsInstantiableOrPrimitive(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+
// Is the method being verified a constructor? See the comment on the field.
bool IsConstructor() const {
return is_constructor_;
@@ -687,19 +696,23 @@
// The thread we're verifying on.
Thread* const self_;
+ // Arena allocator.
+ ArenaStack arena_stack_;
+ ScopedArenaAllocator arena_;
+
RegTypeCache reg_types_;
PcToRegisterLineTable reg_table_;
// Storage for the register status we're currently working on.
- std::unique_ptr<RegisterLine> work_line_;
+ ArenaUniquePtr<RegisterLine> work_line_;
// The address of the instruction we're currently working on, note that this is in 2 byte
// quantities
uint32_t work_insn_idx_;
// Storage for the register status we're saving for later.
- std::unique_ptr<RegisterLine> saved_line_;
+ ArenaUniquePtr<RegisterLine> saved_line_;
const uint32_t dex_method_idx_; // The method we're working on.
// Its object representation if known.
@@ -715,7 +728,8 @@
const DexFile::CodeItem* const code_item_; // The code item containing the code for the method.
const RegType* declaring_class_; // Lazily computed reg type of the method's declaring class.
// Instruction widths and flags, one entry per code unit.
- std::unique_ptr<InstructionFlags[]> insn_flags_;
+ // Owned, but not unique_ptr since insn_flags_ are allocated in arenas.
+ ArenaUniquePtr<InstructionFlags[]> insn_flags_;
// The dex PC of a FindLocksAtDexPc request, -1 otherwise.
uint32_t interesting_dex_pc_;
// The container into which FindLocksAtDexPc should write the registers containing held locks,
diff --git a/runtime/verifier/reg_type-inl.h b/runtime/verifier/reg_type-inl.h
index f445132..11a53e5 100644
--- a/runtime/verifier/reg_type-inl.h
+++ b/runtime/verifier/reg_type-inl.h
@@ -20,6 +20,7 @@
#include "reg_type.h"
#include "base/casts.h"
+#include "base/scoped_arena_allocator.h"
#include "mirror/class.h"
namespace art {
@@ -180,6 +181,10 @@
return instance_;
}
+inline void* RegType::operator new(size_t size, ScopedArenaAllocator* arena) {
+ return arena->Alloc(size, kArenaAllocMisc);
+}
+
} // namespace verifier
} // namespace art
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index b86a4c8..16cab03 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -16,6 +16,7 @@
#include "reg_type-inl.h"
+#include "base/arena_bit_vector.h"
#include "base/bit_vector-inl.h"
#include "base/casts.h"
#include "class_linker-inl.h"
@@ -46,20 +47,17 @@
const DoubleHiType* DoubleHiType::instance_ = nullptr;
const IntegerType* IntegerType::instance_ = nullptr;
-PrimitiveType::PrimitiveType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+PrimitiveType::PrimitiveType(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id)
: RegType(klass, descriptor, cache_id) {
CHECK(klass != nullptr);
CHECK(!descriptor.empty());
}
-Cat1Type::Cat1Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+Cat1Type::Cat1Type(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id)
: PrimitiveType(klass, descriptor, cache_id) {
}
-Cat2Type::Cat2Type(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+Cat2Type::Cat2Type(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id)
: PrimitiveType(klass, descriptor, cache_id) {
}
@@ -121,11 +119,11 @@
}
std::string IntegerType::Dump() const {
- return "Integer";
+ return "Integer";
}
const DoubleHiType* DoubleHiType::CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new DoubleHiType(klass, descriptor, cache_id);
@@ -140,7 +138,7 @@
}
const DoubleLoType* DoubleLoType::CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new DoubleLoType(klass, descriptor, cache_id);
@@ -154,14 +152,14 @@
}
}
-const LongLoType* LongLoType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const LongLoType* LongLoType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new LongLoType(klass, descriptor, cache_id);
return instance_;
}
-const LongHiType* LongHiType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const LongHiType* LongHiType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new LongHiType(klass, descriptor, cache_id);
@@ -182,7 +180,7 @@
}
}
-const FloatType* FloatType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const FloatType* FloatType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new FloatType(klass, descriptor, cache_id);
@@ -196,7 +194,7 @@
}
}
-const CharType* CharType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const CharType* CharType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new CharType(klass, descriptor, cache_id);
@@ -210,7 +208,7 @@
}
}
-const ShortType* ShortType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const ShortType* ShortType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new ShortType(klass, descriptor, cache_id);
@@ -224,7 +222,7 @@
}
}
-const ByteType* ByteType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const ByteType* ByteType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new ByteType(klass, descriptor, cache_id);
@@ -238,7 +236,7 @@
}
}
-const IntegerType* IntegerType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const IntegerType* IntegerType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new IntegerType(klass, descriptor, cache_id);
@@ -253,7 +251,7 @@
}
const ConflictType* ConflictType::CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new ConflictType(klass, descriptor, cache_id);
@@ -267,7 +265,7 @@
}
}
-const BooleanType* BooleanType::CreateInstance(mirror::Class* klass, const std::string& descriptor,
+const BooleanType* BooleanType::CreateInstance(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(BooleanType::instance_ == nullptr);
instance_ = new BooleanType(klass, descriptor, cache_id);
@@ -286,7 +284,7 @@
}
const UndefinedType* UndefinedType::CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id) {
CHECK(instance_ == nullptr);
instance_ = new UndefinedType(klass, descriptor, cache_id);
@@ -300,7 +298,7 @@
}
}
-PreciseReferenceType::PreciseReferenceType(mirror::Class* klass, const std::string& descriptor,
+PreciseReferenceType::PreciseReferenceType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id)
: RegType(klass, descriptor, cache_id) {
// Note: no check for IsInstantiable() here. We may produce this in case an InstantiationError
@@ -335,14 +333,14 @@
std::string UnresolvedReferenceType::Dump() const {
std::stringstream result;
- result << "Unresolved Reference" << ": " << PrettyDescriptor(GetDescriptor().c_str());
+ result << "Unresolved Reference" << ": " << PrettyDescriptor(GetDescriptor().as_string().c_str());
return result.str();
}
std::string UnresolvedUninitializedRefType::Dump() const {
std::stringstream result;
result << "Unresolved And Uninitialized Reference" << ": "
- << PrettyDescriptor(GetDescriptor().c_str())
+ << PrettyDescriptor(GetDescriptor().as_string().c_str())
<< " Allocation PC: " << GetAllocationPc();
return result.str();
}
@@ -350,7 +348,7 @@
std::string UnresolvedUninitializedThisRefType::Dump() const {
std::stringstream result;
result << "Unresolved And Uninitialized This Reference"
- << PrettyDescriptor(GetDescriptor().c_str());
+ << PrettyDescriptor(GetDescriptor().as_string().c_str());
return result.str();
}
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 2834a9a..80b751c 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -22,9 +22,11 @@
#include <set>
#include <string>
+#include "base/arena_object.h"
#include "base/bit_vector.h"
#include "base/macros.h"
#include "base/mutex.h"
+#include "base/stringpiece.h"
#include "gc_root.h"
#include "handle_scope.h"
#include "object_callbacks.h"
@@ -35,6 +37,9 @@
class Class;
} // namespace mirror
+class ArenaBitVector;
+class ScopedArenaAllocator;
+
namespace verifier {
class RegTypeCache;
@@ -173,7 +178,7 @@
bool IsJavaLangObjectArray() const
SHARED_REQUIRES(Locks::mutator_lock_);
bool IsInstantiableTypes() const SHARED_REQUIRES(Locks::mutator_lock_);
- const std::string& GetDescriptor() const {
+ const StringPiece& GetDescriptor() const {
DCHECK(HasClass() ||
(IsUnresolvedTypes() && !IsUnresolvedMergedReference() &&
!IsUnresolvedSuperClass()));
@@ -274,10 +279,20 @@
void VisitRoots(RootVisitor* visitor, const RootInfo& root_info) const
SHARED_REQUIRES(Locks::mutator_lock_);
+ static void* operator new(size_t size) noexcept {
+ return ::operator new(size);
+ }
+
+ static void* operator new(size_t size, ArenaAllocator* arena) = delete;
+ static void* operator new(size_t size, ScopedArenaAllocator* arena);
+
protected:
- RegType(mirror::Class* klass, const std::string& descriptor,
+ RegType(mirror::Class* klass,
+ const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
- : descriptor_(descriptor), klass_(klass), cache_id_(cache_id) {
+ : descriptor_(descriptor),
+ klass_(klass),
+ cache_id_(cache_id) {
if (kIsDebugBuild) {
CheckInvariants();
}
@@ -285,9 +300,8 @@
void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
- const std::string descriptor_;
- mutable GcRoot<mirror::Class>
- klass_; // Non-const only due to moving classes.
+ const StringPiece descriptor_;
+ mutable GcRoot<mirror::Class> klass_; // Non-const only due to moving classes.
const uint16_t cache_id_;
friend class RegTypeCache;
@@ -311,7 +325,7 @@
// Create the singleton instance.
static const ConflictType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -319,7 +333,7 @@
static void Destroy();
private:
- ConflictType(mirror::Class* klass, const std::string& descriptor,
+ ConflictType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: RegType(klass, descriptor, cache_id) {}
@@ -340,7 +354,7 @@
// Create the singleton instance.
static const UndefinedType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -348,7 +362,7 @@
static void Destroy();
private:
- UndefinedType(mirror::Class* klass, const std::string& descriptor,
+ UndefinedType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: RegType(klass, descriptor, cache_id) {}
@@ -357,7 +371,7 @@
class PrimitiveType : public RegType {
public:
- PrimitiveType(mirror::Class* klass, const std::string& descriptor,
+ PrimitiveType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
bool HasClassVirtual() const OVERRIDE { return true; }
@@ -365,7 +379,7 @@
class Cat1Type : public PrimitiveType {
public:
- Cat1Type(mirror::Class* klass, const std::string& descriptor,
+ Cat1Type(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
};
@@ -374,14 +388,14 @@
bool IsInteger() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
static const IntegerType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const IntegerType* GetInstance() PURE;
static void Destroy();
private:
- IntegerType(mirror::Class* klass, const std::string& descriptor,
+ IntegerType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const IntegerType* instance_;
@@ -392,14 +406,14 @@
bool IsBoolean() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
static const BooleanType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const BooleanType* GetInstance() PURE;
static void Destroy();
private:
- BooleanType(mirror::Class* klass, const std::string& descriptor,
+ BooleanType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
@@ -411,14 +425,14 @@
bool IsByte() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
static const ByteType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const ByteType* GetInstance() PURE;
static void Destroy();
private:
- ByteType(mirror::Class* klass, const std::string& descriptor,
+ ByteType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const ByteType* instance_;
@@ -429,14 +443,14 @@
bool IsShort() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
static const ShortType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const ShortType* GetInstance() PURE;
static void Destroy();
private:
- ShortType(mirror::Class* klass, const std::string& descriptor,
+ ShortType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const ShortType* instance_;
@@ -447,14 +461,14 @@
bool IsChar() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
static const CharType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const CharType* GetInstance() PURE;
static void Destroy();
private:
- CharType(mirror::Class* klass, const std::string& descriptor,
+ CharType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const CharType* instance_;
@@ -465,14 +479,14 @@
bool IsFloat() const OVERRIDE { return true; }
std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
static const FloatType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const FloatType* GetInstance() PURE;
static void Destroy();
private:
- FloatType(mirror::Class* klass, const std::string& descriptor,
+ FloatType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const FloatType* instance_;
@@ -480,7 +494,7 @@
class Cat2Type : public PrimitiveType {
public:
- Cat2Type(mirror::Class* klass, const std::string& descriptor,
+ Cat2Type(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
};
@@ -490,14 +504,14 @@
bool IsLongLo() const OVERRIDE { return true; }
bool IsLong() const OVERRIDE { return true; }
static const LongLoType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const LongLoType* GetInstance() PURE;
static void Destroy();
private:
- LongLoType(mirror::Class* klass, const std::string& descriptor,
+ LongLoType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat2Type(klass, descriptor, cache_id) {}
static const LongLoType* instance_;
@@ -508,14 +522,14 @@
std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
bool IsLongHi() const OVERRIDE { return true; }
static const LongHiType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const LongHiType* GetInstance() PURE;
static void Destroy();
private:
- LongHiType(mirror::Class* klass, const std::string& descriptor,
+ LongHiType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat2Type(klass, descriptor, cache_id) {}
static const LongHiType* instance_;
@@ -527,14 +541,14 @@
bool IsDoubleLo() const OVERRIDE { return true; }
bool IsDouble() const OVERRIDE { return true; }
static const DoubleLoType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const DoubleLoType* GetInstance() PURE;
static void Destroy();
private:
- DoubleLoType(mirror::Class* klass, const std::string& descriptor,
+ DoubleLoType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat2Type(klass, descriptor, cache_id) {}
static const DoubleLoType* instance_;
@@ -545,14 +559,14 @@
std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
virtual bool IsDoubleHi() const OVERRIDE { return true; }
static const DoubleHiType* CreateInstance(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
static const DoubleHiType* GetInstance() PURE;
static void Destroy();
private:
- DoubleHiType(mirror::Class* klass, const std::string& descriptor,
+ DoubleHiType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: Cat2Type(klass, descriptor, cache_id) {}
static const DoubleHiType* instance_;
@@ -677,7 +691,7 @@
// instructions and must be passed to a constructor.
class UninitializedType : public RegType {
public:
- UninitializedType(mirror::Class* klass, const std::string& descriptor,
+ UninitializedType(mirror::Class* klass, const StringPiece& descriptor,
uint32_t allocation_pc, uint16_t cache_id)
: RegType(klass, descriptor, cache_id), allocation_pc_(allocation_pc) {}
@@ -697,7 +711,7 @@
class UninitializedReferenceType FINAL : public UninitializedType {
public:
UninitializedReferenceType(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint32_t allocation_pc, uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_)
: UninitializedType(klass, descriptor, allocation_pc, cache_id) {}
@@ -713,7 +727,7 @@
// constructor.
class UnresolvedUninitializedRefType FINAL : public UninitializedType {
public:
- UnresolvedUninitializedRefType(const std::string& descriptor,
+ UnresolvedUninitializedRefType(const StringPiece& descriptor,
uint32_t allocation_pc, uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_)
: UninitializedType(nullptr, descriptor, allocation_pc, cache_id) {
@@ -737,7 +751,7 @@
class UninitializedThisReferenceType FINAL : public UninitializedType {
public:
UninitializedThisReferenceType(mirror::Class* klass,
- const std::string& descriptor,
+ const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_)
: UninitializedType(klass, descriptor, 0, cache_id) {
@@ -758,7 +772,7 @@
class UnresolvedUninitializedThisRefType FINAL : public UninitializedType {
public:
- UnresolvedUninitializedThisRefType(const std::string& descriptor,
+ UnresolvedUninitializedThisRefType(const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_)
: UninitializedType(nullptr, descriptor, 0, cache_id) {
@@ -781,7 +795,7 @@
// sub-class.
class ReferenceType FINAL : public RegType {
public:
- ReferenceType(mirror::Class* klass, const std::string& descriptor,
+ ReferenceType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
: RegType(klass, descriptor, cache_id) {}
@@ -799,7 +813,7 @@
// type.
class PreciseReferenceType FINAL : public RegType {
public:
- PreciseReferenceType(mirror::Class* klass, const std::string& descriptor,
+ PreciseReferenceType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
@@ -815,7 +829,7 @@
// Common parent of unresolved types.
class UnresolvedType : public RegType {
public:
- UnresolvedType(const std::string& descriptor, uint16_t cache_id)
+ UnresolvedType(const StringPiece& descriptor, uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_)
: RegType(nullptr, descriptor, cache_id) {}
@@ -827,7 +841,7 @@
// of this type must be conservative.
class UnresolvedReferenceType FINAL : public UnresolvedType {
public:
- UnresolvedReferenceType(const std::string& descriptor, uint16_t cache_id)
+ UnresolvedReferenceType(const StringPiece& descriptor, uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_)
: UnresolvedType(descriptor, cache_id) {
if (kIsDebugBuild) {
@@ -882,8 +896,10 @@
class UnresolvedMergedType FINAL : public UnresolvedType {
public:
// Note: the constructor will copy the unresolved BitVector, not use it directly.
- UnresolvedMergedType(const RegType& resolved, const BitVector& unresolved,
- const RegTypeCache* reg_type_cache, uint16_t cache_id)
+ UnresolvedMergedType(const RegType& resolved,
+ const BitVector& unresolved,
+ const RegTypeCache* reg_type_cache,
+ uint16_t cache_id)
SHARED_REQUIRES(Locks::mutator_lock_);
// The resolved part. See description below.
diff --git a/runtime/verifier/reg_type_cache-inl.h b/runtime/verifier/reg_type_cache-inl.h
index b6f253b..68af62e 100644
--- a/runtime/verifier/reg_type_cache-inl.h
+++ b/runtime/verifier/reg_type_cache-inl.h
@@ -118,6 +118,18 @@
}
}
+template <class RegTypeType>
+inline RegTypeType& RegTypeCache::AddEntry(RegTypeType* new_entry) {
+ DCHECK(new_entry != nullptr);
+ entries_.push_back(new_entry);
+ if (new_entry->HasClass()) {
+ mirror::Class* klass = new_entry->GetClass();
+ DCHECK(!klass->IsPrimitive());
+ klass_entries_.push_back(std::make_pair(GcRoot<mirror::Class>(klass), new_entry));
+ }
+ return *new_entry;
+}
+
} // namespace verifier
} // namespace art
#endif // ART_RUNTIME_VERIFIER_REG_TYPE_CACHE_INL_H_
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index bb756e9..71ed4a2 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -16,7 +16,9 @@
#include "reg_type_cache-inl.h"
+#include "base/arena_bit_vector.h"
#include "base/casts.h"
+#include "base/scoped_arena_allocator.h"
#include "base/stl_util.h"
#include "class_linker-inl.h"
#include "dex_file-inl.h"
@@ -29,9 +31,10 @@
bool RegTypeCache::primitive_initialized_ = false;
uint16_t RegTypeCache::primitive_count_ = 0;
-const PreciseConstType* RegTypeCache::small_precise_constants_[kMaxSmallConstant - kMinSmallConstant + 1];
+const PreciseConstType* RegTypeCache::small_precise_constants_[kMaxSmallConstant -
+ kMinSmallConstant + 1];
-static bool MatchingPrecisionForClass(const RegType* entry, bool precise)
+ALWAYS_INLINE static inline bool MatchingPrecisionForClass(const RegType* entry, bool precise)
SHARED_REQUIRES(Locks::mutator_lock_) {
if (entry->IsPreciseReference() == precise) {
// We were or weren't looking for a precise reference and we found what we need.
@@ -67,7 +70,8 @@
DCHECK_EQ(entries_.size(), primitive_count_);
}
-const RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader, const char* descriptor,
+const RegType& RegTypeCache::FromDescriptor(mirror::ClassLoader* loader,
+ const char* descriptor,
bool precise) {
DCHECK(RegTypeCache::primitive_initialized_);
if (descriptor[1] == '\0') {
@@ -159,13 +163,20 @@
return klass;
}
-const RegType& RegTypeCache::From(mirror::ClassLoader* loader, const char* descriptor,
+StringPiece RegTypeCache::AddString(const StringPiece& string_piece) {
+ char* ptr = arena_.AllocArray<char>(string_piece.length());
+ memcpy(ptr, string_piece.data(), string_piece.length());
+ return StringPiece(ptr, string_piece.length());
+}
+
+const RegType& RegTypeCache::From(mirror::ClassLoader* loader,
+ const char* descriptor,
bool precise) {
+ StringPiece sp_descriptor(descriptor);
// Try looking up the class in the cache first. We use a StringPiece to avoid continual strlen
// operations on the descriptor.
- StringPiece descriptor_sp(descriptor);
for (size_t i = primitive_count_; i < entries_.size(); i++) {
- if (MatchDescriptor(i, descriptor_sp, precise)) {
+ if (MatchDescriptor(i, sp_descriptor, precise)) {
return *(entries_[i]);
}
}
@@ -186,12 +197,11 @@
if (klass->CannotBeAssignedFromOtherTypes() || precise) {
DCHECK(!(klass->IsAbstract()) || klass->IsArrayClass());
DCHECK(!klass->IsInterface());
- entry = new PreciseReferenceType(klass, descriptor_sp.as_string(), entries_.size());
+ entry = new (&arena_) PreciseReferenceType(klass, AddString(sp_descriptor), entries_.size());
} else {
- entry = new ReferenceType(klass, descriptor_sp.as_string(), entries_.size());
+ entry = new (&arena_) ReferenceType(klass, AddString(sp_descriptor), entries_.size());
}
- AddEntry(entry);
- return *entry;
+ return AddEntry(entry);
} else { // Class not resolved.
// We tried loading the class and failed, this might get an exception raised
// so we want to clear it before we go on.
@@ -202,9 +212,8 @@
DCHECK(!Thread::Current()->IsExceptionPending());
}
if (IsValidDescriptor(descriptor)) {
- RegType* entry = new UnresolvedReferenceType(descriptor_sp.as_string(), entries_.size());
- AddEntry(entry);
- return *entry;
+ return AddEntry(
+ new (&arena_) UnresolvedReferenceType(AddString(sp_descriptor), entries_.size()));
} else {
// The descriptor is broken return the unknown type as there's nothing sensible that
// could be done at runtime
@@ -213,50 +222,65 @@
}
}
-const RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* klass, bool precise) {
+const RegType* RegTypeCache::FindClass(mirror::Class* klass, bool precise) const {
DCHECK(klass != nullptr);
if (klass->IsPrimitive()) {
// Note: precise isn't used for primitive classes. A char is assignable to an int. All
// primitive classes are final.
- return RegTypeFromPrimitiveType(klass->GetPrimitiveType());
- } else {
- // Look for the reference in the list of entries to have.
- for (size_t i = primitive_count_; i < entries_.size(); i++) {
- const RegType* cur_entry = entries_[i];
- if (cur_entry->klass_.Read() == klass && MatchingPrecisionForClass(cur_entry, precise)) {
- return *cur_entry;
+ return &RegTypeFromPrimitiveType(klass->GetPrimitiveType());
+ }
+ for (auto& pair : klass_entries_) {
+ mirror::Class* const reg_klass = pair.first.Read();
+ if (reg_klass == klass) {
+ const RegType* reg_type = pair.second;
+ if (MatchingPrecisionForClass(reg_type, precise)) {
+ return reg_type;
}
}
- // No reference to the class was found, create new reference.
- RegType* entry;
- if (precise) {
- entry = new PreciseReferenceType(klass, descriptor, entries_.size());
- } else {
- entry = new ReferenceType(klass, descriptor, entries_.size());
- }
- AddEntry(entry);
- return *entry;
}
+ return nullptr;
}
-RegTypeCache::RegTypeCache(bool can_load_classes) : can_load_classes_(can_load_classes) {
+const RegType* RegTypeCache::InsertClass(const StringPiece& descriptor,
+ mirror::Class* klass,
+ bool precise) {
+ // No reference to the class was found, create new reference.
+ DCHECK(FindClass(klass, precise) == nullptr);
+ RegType* const reg_type = precise
+ ? static_cast<RegType*>(
+ new (&arena_) PreciseReferenceType(klass, descriptor, entries_.size()))
+ : new (&arena_) ReferenceType(klass, descriptor, entries_.size());
+ return &AddEntry(reg_type);
+}
+
+const RegType& RegTypeCache::FromClass(const char* descriptor, mirror::Class* klass, bool precise) {
+ DCHECK(klass != nullptr);
+ const RegType* reg_type = FindClass(klass, precise);
+ if (reg_type == nullptr) {
+ reg_type = InsertClass(AddString(StringPiece(descriptor)), klass, precise);
+ }
+ return *reg_type;
+}
+
+RegTypeCache::RegTypeCache(bool can_load_classes, ScopedArenaAllocator& arena)
+ : entries_(arena.Adapter(kArenaAllocVerifier)),
+ klass_entries_(arena.Adapter(kArenaAllocVerifier)),
+ can_load_classes_(can_load_classes),
+ arena_(arena) {
if (kIsDebugBuild) {
Thread::Current()->AssertThreadSuspensionIsAllowable(gAborting == 0);
}
- entries_.reserve(64);
+ // The klass_entries_ array does not have primitives or small constants.
+ static constexpr size_t kNumReserveEntries = 32;
+ klass_entries_.reserve(kNumReserveEntries);
+ // We want to have room for additional entries after inserting primitives and small
+ // constants.
+ entries_.reserve(kNumReserveEntries + kNumPrimitivesAndSmallConstants);
FillPrimitiveAndSmallConstantTypes();
}
RegTypeCache::~RegTypeCache() {
- CHECK_LE(primitive_count_, entries_.size());
- // Delete only the non primitive types.
- if (entries_.size() == kNumPrimitivesAndSmallConstants) {
- // All entries are from the global pool, nothing to delete.
- return;
- }
- std::vector<const RegType*>::iterator non_primitive_begin = entries_.begin();
- std::advance(non_primitive_begin, kNumPrimitivesAndSmallConstants);
- STLDeleteContainerPointers(non_primitive_begin, entries_.end());
+ DCHECK_LE(primitive_count_, entries_.size());
}
void RegTypeCache::ShutDown() {
@@ -318,9 +342,9 @@
}
const RegType& RegTypeCache::FromUnresolvedMerge(const RegType& left, const RegType& right) {
- BitVector types(1, // Allocate at least a word.
- true, // Is expandable.
- Allocator::GetMallocAllocator()); // TODO: Arenas in the verifier.
+ ArenaBitVector types(&arena_,
+ kDefaultArenaBitVectorBytes * kBitsPerByte, // Allocate at least 8 bytes.
+ true); // Is expandable.
const RegType* left_resolved;
if (left.IsUnresolvedMergedReference()) {
const UnresolvedMergedType* left_merge = down_cast<const UnresolvedMergedType*>(&left);
@@ -361,20 +385,15 @@
const BitVector& unresolved_part = cmp_type->GetUnresolvedTypes();
// Use SameBitsSet. "types" is expandable to allow merging in the components, but the
// BitVector in the final RegType will be made non-expandable.
- if (&resolved_part == &resolved_parts_merged &&
- types.SameBitsSet(&unresolved_part)) {
+ if (&resolved_part == &resolved_parts_merged && types.SameBitsSet(&unresolved_part)) {
return *cur_entry;
}
}
}
-
- // Create entry.
- RegType* entry = new UnresolvedMergedType(resolved_parts_merged,
- types,
- this,
- entries_.size());
- AddEntry(entry);
- return *entry;
+ return AddEntry(new (&arena_) UnresolvedMergedType(resolved_parts_merged,
+ types,
+ this,
+ entries_.size()));
}
const RegType& RegTypeCache::FromUnresolvedSuperClass(const RegType& child) {
@@ -391,14 +410,12 @@
}
}
}
- RegType* entry = new UnresolvedSuperClass(child.GetId(), this, entries_.size());
- AddEntry(entry);
- return *entry;
+ return AddEntry(new (&arena_) UnresolvedSuperClass(child.GetId(), this, entries_.size()));
}
const UninitializedType& RegTypeCache::Uninitialized(const RegType& type, uint32_t allocation_pc) {
UninitializedType* entry = nullptr;
- const std::string& descriptor(type.GetDescriptor());
+ const StringPiece& descriptor(type.GetDescriptor());
if (type.IsUnresolvedTypes()) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
const RegType* cur_entry = entries_[i];
@@ -409,7 +426,9 @@
return *down_cast<const UnresolvedUninitializedRefType*>(cur_entry);
}
}
- entry = new UnresolvedUninitializedRefType(descriptor, allocation_pc, entries_.size());
+ entry = new (&arena_) UnresolvedUninitializedRefType(descriptor,
+ allocation_pc,
+ entries_.size());
} else {
mirror::Class* klass = type.GetClass();
for (size_t i = primitive_count_; i < entries_.size(); i++) {
@@ -421,17 +440,19 @@
return *down_cast<const UninitializedReferenceType*>(cur_entry);
}
}
- entry = new UninitializedReferenceType(klass, descriptor, allocation_pc, entries_.size());
+ entry = new (&arena_) UninitializedReferenceType(klass,
+ descriptor,
+ allocation_pc,
+ entries_.size());
}
- AddEntry(entry);
- return *entry;
+ return AddEntry(entry);
}
const RegType& RegTypeCache::FromUninitialized(const RegType& uninit_type) {
RegType* entry;
if (uninit_type.IsUnresolvedTypes()) {
- const std::string& descriptor(uninit_type.GetDescriptor());
+ const StringPiece& descriptor(uninit_type.GetDescriptor());
for (size_t i = primitive_count_; i < entries_.size(); i++) {
const RegType* cur_entry = entries_[i];
if (cur_entry->IsUnresolvedReference() &&
@@ -439,7 +460,7 @@
return *cur_entry;
}
}
- entry = new UnresolvedReferenceType(descriptor, entries_.size());
+ entry = new (&arena_) UnresolvedReferenceType(descriptor, entries_.size());
} else {
mirror::Class* klass = uninit_type.GetClass();
if (uninit_type.IsUninitializedThisReference() && !klass->IsFinal()) {
@@ -450,7 +471,7 @@
return *cur_entry;
}
}
- entry = new ReferenceType(klass, "", entries_.size());
+ entry = new (&arena_) ReferenceType(klass, "", entries_.size());
} else if (!klass->IsPrimitive()) {
// We're uninitialized because of allocation, look or create a precise type as allocations
// may only create objects of that type.
@@ -469,18 +490,19 @@
return *cur_entry;
}
}
- entry = new PreciseReferenceType(klass, uninit_type.GetDescriptor(), entries_.size());
+ entry = new (&arena_) PreciseReferenceType(klass,
+ uninit_type.GetDescriptor(),
+ entries_.size());
} else {
return Conflict();
}
}
- AddEntry(entry);
- return *entry;
+ return AddEntry(entry);
}
const UninitializedType& RegTypeCache::UninitializedThisArgument(const RegType& type) {
UninitializedType* entry;
- const std::string& descriptor(type.GetDescriptor());
+ const StringPiece& descriptor(type.GetDescriptor());
if (type.IsUnresolvedTypes()) {
for (size_t i = primitive_count_; i < entries_.size(); i++) {
const RegType* cur_entry = entries_[i];
@@ -489,7 +511,7 @@
return *down_cast<const UninitializedType*>(cur_entry);
}
}
- entry = new UnresolvedUninitializedThisRefType(descriptor, entries_.size());
+ entry = new (&arena_) UnresolvedUninitializedThisRefType(descriptor, entries_.size());
} else {
mirror::Class* klass = type.GetClass();
for (size_t i = primitive_count_; i < entries_.size(); i++) {
@@ -498,10 +520,9 @@
return *down_cast<const UninitializedType*>(cur_entry);
}
}
- entry = new UninitializedThisReferenceType(klass, descriptor, entries_.size());
+ entry = new (&arena_) UninitializedThisReferenceType(klass, descriptor, entries_.size());
}
- AddEntry(entry);
- return *entry;
+ return AddEntry(entry);
}
const ConstantType& RegTypeCache::FromCat1NonSmallConstant(int32_t value, bool precise) {
@@ -515,12 +536,11 @@
}
ConstantType* entry;
if (precise) {
- entry = new PreciseConstType(value, entries_.size());
+ entry = new (&arena_) PreciseConstType(value, entries_.size());
} else {
- entry = new ImpreciseConstType(value, entries_.size());
+ entry = new (&arena_) ImpreciseConstType(value, entries_.size());
}
- AddEntry(entry);
- return *entry;
+ return AddEntry(entry);
}
const ConstantType& RegTypeCache::FromCat2ConstLo(int32_t value, bool precise) {
@@ -533,12 +553,11 @@
}
ConstantType* entry;
if (precise) {
- entry = new PreciseConstLoType(value, entries_.size());
+ entry = new (&arena_) PreciseConstLoType(value, entries_.size());
} else {
- entry = new ImpreciseConstLoType(value, entries_.size());
+ entry = new (&arena_) ImpreciseConstLoType(value, entries_.size());
}
- AddEntry(entry);
- return *entry;
+ return AddEntry(entry);
}
const ConstantType& RegTypeCache::FromCat2ConstHi(int32_t value, bool precise) {
@@ -551,32 +570,30 @@
}
ConstantType* entry;
if (precise) {
- entry = new PreciseConstHiType(value, entries_.size());
+ entry = new (&arena_) PreciseConstHiType(value, entries_.size());
} else {
- entry = new ImpreciseConstHiType(value, entries_.size());
+ entry = new (&arena_) ImpreciseConstHiType(value, entries_.size());
}
- AddEntry(entry);
- return *entry;
+ return AddEntry(entry);
}
const RegType& RegTypeCache::GetComponentType(const RegType& array, mirror::ClassLoader* loader) {
if (!array.IsArrayTypes()) {
return Conflict();
} else if (array.IsUnresolvedTypes()) {
- const std::string& descriptor(array.GetDescriptor());
- const std::string component(descriptor.substr(1, descriptor.size() - 1));
- return FromDescriptor(loader, component.c_str(), false);
+ const std::string descriptor(array.GetDescriptor().as_string());
+ return FromDescriptor(loader, descriptor.c_str() + 1, false);
} else {
mirror::Class* klass = array.GetClass()->GetComponentType();
std::string temp;
+ const char* descriptor = klass->GetDescriptor(&temp);
if (klass->IsErroneous()) {
// Arrays may have erroneous component types, use unresolved in that case.
// We assume that the primitive classes are not erroneous, so we know it is a
// reference type.
- return FromDescriptor(loader, klass->GetDescriptor(&temp), false);
+ return FromDescriptor(loader, descriptor, false);
} else {
- return FromClass(klass->GetDescriptor(&temp), klass,
- klass->CannotBeAssignedFromOtherTypes());
+ return FromClass(descriptor, klass, klass->CannotBeAssignedFromOtherTypes());
}
}
}
@@ -618,10 +635,10 @@
for (size_t i = primitive_count_; i < entries_.size(); ++i) {
entries_[i]->VisitRoots(visitor, root_info);
}
-}
-
-void RegTypeCache::AddEntry(RegType* new_entry) {
- entries_.push_back(new_entry);
+ for (auto& pair : klass_entries_) {
+ GcRoot<mirror::Class>& root = pair.first;
+ root.VisitRoot(visitor, root_info);
+ }
}
} // namespace verifier
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index 93948a1..6f9a04e 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -19,6 +19,7 @@
#include "base/casts.h"
#include "base/macros.h"
+#include "base/scoped_arena_containers.h"
#include "object_callbacks.h"
#include "reg_type.h"
#include "runtime.h"
@@ -31,15 +32,19 @@
class Class;
class ClassLoader;
} // namespace mirror
+class ScopedArenaAllocator;
class StringPiece;
namespace verifier {
class RegType;
+// Use 8 bytes since that is the default arena allocator alignment.
+static constexpr size_t kDefaultArenaBitVectorBytes = 8;
+
class RegTypeCache {
public:
- explicit RegTypeCache(bool can_load_classes);
+ explicit RegTypeCache(bool can_load_classes, ScopedArenaAllocator& arena);
~RegTypeCache();
static void Init() SHARED_REQUIRES(Locks::mutator_lock_) {
if (!RegTypeCache::primitive_initialized_) {
@@ -53,6 +58,13 @@
const art::verifier::RegType& GetFromId(uint16_t id) const;
const RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Find a RegType, returns null if not found.
+ const RegType* FindClass(mirror::Class* klass, bool precise) const
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ // Insert a new class with a specified descriptor, must not already be in the cache.
+ const RegType* InsertClass(const StringPiece& descriptor, mirror::Class* klass, bool precise)
+ SHARED_REQUIRES(Locks::mutator_lock_);
+ // Get or insert a reg type for a description, klass, and precision.
const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
SHARED_REQUIRES(Locks::mutator_lock_);
const ConstantType& FromCat1Const(int32_t value, bool precise)
@@ -150,7 +162,13 @@
const ConstantType& FromCat1NonSmallConstant(int32_t value, bool precise)
SHARED_REQUIRES(Locks::mutator_lock_);
- void AddEntry(RegType* new_entry);
+ // Returns the pass in RegType.
+ template <class RegTypeType>
+ RegTypeType& AddEntry(RegTypeType* new_entry) SHARED_REQUIRES(Locks::mutator_lock_);
+
+ // Add a string piece to the arena allocator so that it stays live for the lifetime of the
+ // verifier.
+ StringPiece AddString(const StringPiece& string_piece);
template <class Type>
static const Type* CreatePrimitiveTypeInstance(const std::string& descriptor)
@@ -160,7 +178,8 @@
// A quick look up for popular small constants.
static constexpr int32_t kMinSmallConstant = -1;
static constexpr int32_t kMaxSmallConstant = 4;
- static const PreciseConstType* small_precise_constants_[kMaxSmallConstant - kMinSmallConstant + 1];
+ static const PreciseConstType* small_precise_constants_[kMaxSmallConstant -
+ kMinSmallConstant + 1];
static constexpr size_t kNumPrimitivesAndSmallConstants =
12 + (kMaxSmallConstant - kMinSmallConstant + 1);
@@ -172,11 +191,17 @@
static uint16_t primitive_count_;
// The actual storage for the RegTypes.
- std::vector<const RegType*> entries_;
+ ScopedArenaVector<const RegType*> entries_;
+
+ // Fast lookup for quickly finding entries that have a matching class.
+ ScopedArenaVector<std::pair<GcRoot<mirror::Class>, const RegType*>> klass_entries_;
// Whether or not we're allowed to load classes.
const bool can_load_classes_;
+ // Arena allocator.
+ ScopedArenaAllocator& arena_;
+
DISALLOW_COPY_AND_ASSIGN(RegTypeCache);
};
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index 971b1f5..22ac7e4 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -20,6 +20,7 @@
#include "base/bit_vector.h"
#include "base/casts.h"
+#include "base/scoped_arena_allocator.h"
#include "common_runtime_test.h"
#include "reg_type_cache-inl.h"
#include "reg_type-inl.h"
@@ -29,12 +30,23 @@
namespace art {
namespace verifier {
-class RegTypeTest : public CommonRuntimeTest {};
+class BaseRegTypeTest : public CommonRuntimeTest {
+ public:
+ void PostRuntimeCreate() OVERRIDE {
+ stack.reset(new ArenaStack(Runtime::Current()->GetArenaPool()));
+ allocator.reset(new ScopedArenaAllocator(stack.get()));
+ }
+
+ std::unique_ptr<ArenaStack> stack;
+ std::unique_ptr<ScopedArenaAllocator> allocator;
+};
+
+class RegTypeTest : public BaseRegTypeTest {};
TEST_F(RegTypeTest, ConstLoHi) {
// Tests creating primitive types types.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache(true);
+ RegTypeCache cache(true, *allocator);
const RegType& ref_type_const_0 = cache.FromCat1Const(10, true);
const RegType& ref_type_const_1 = cache.FromCat1Const(10, true);
const RegType& ref_type_const_2 = cache.FromCat1Const(30, true);
@@ -56,7 +68,7 @@
TEST_F(RegTypeTest, Pairs) {
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache(true);
+ RegTypeCache cache(true, *allocator);
int64_t val = static_cast<int32_t>(1234);
const RegType& precise_lo = cache.FromCat2ConstLo(static_cast<int32_t>(val), true);
const RegType& precise_hi = cache.FromCat2ConstHi(static_cast<int32_t>(val >> 32), true);
@@ -80,7 +92,7 @@
TEST_F(RegTypeTest, Primitives) {
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache(true);
+ RegTypeCache cache(true, *allocator);
const RegType& bool_reg_type = cache.Boolean();
EXPECT_FALSE(bool_reg_type.IsUndefined());
@@ -347,13 +359,13 @@
EXPECT_TRUE(double_reg_type.HasClass());
}
-class RegTypeReferenceTest : public CommonRuntimeTest {};
+class RegTypeReferenceTest : public BaseRegTypeTest {};
TEST_F(RegTypeReferenceTest, JavalangObjectImprecise) {
// Tests matching precisions. A reference type that was created precise doesn't
// match the one that is imprecise.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache(true);
+ RegTypeCache cache(true, *allocator);
const RegType& imprecise_obj = cache.JavaLangObject(false);
const RegType& precise_obj = cache.JavaLangObject(true);
const RegType& precise_obj_2 = cache.FromDescriptor(nullptr, "Ljava/lang/Object;", true);
@@ -368,7 +380,7 @@
// Tests creating unresolved types. Miss for the first time asking the cache and
// a hit second time.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache(true);
+ RegTypeCache cache(true, *allocator);
const RegType& ref_type_0 = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
EXPECT_TRUE(ref_type_0.IsNonZeroReferenceTypes());
@@ -384,7 +396,7 @@
TEST_F(RegTypeReferenceTest, UnresolvedUnintializedType) {
// Tests creating types uninitialized types from unresolved types.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache(true);
+ RegTypeCache cache(true, *allocator);
const RegType& ref_type_0 = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
EXPECT_TRUE(ref_type_0.IsUnresolvedReference());
const RegType& ref_type = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
@@ -406,7 +418,7 @@
TEST_F(RegTypeReferenceTest, Dump) {
// Tests types for proper Dump messages.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache(true);
+ RegTypeCache cache(true, *allocator);
const RegType& unresolved_ref = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExist;", true);
const RegType& unresolved_ref_another = cache.FromDescriptor(nullptr, "Ljava/lang/DoesNotExistEither;", true);
const RegType& resolved_ref = cache.JavaLangString();
@@ -431,7 +443,7 @@
// Hit the second time. Then check for the same effect when using
// The JavaLangObject method instead of FromDescriptor. String class is final.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache(true);
+ RegTypeCache cache(true, *allocator);
const RegType& ref_type = cache.JavaLangString();
const RegType& ref_type_2 = cache.JavaLangString();
const RegType& ref_type_3 = cache.FromDescriptor(nullptr, "Ljava/lang/String;", true);
@@ -451,7 +463,7 @@
// Hit the second time. Then I am checking for the same effect when using
// The JavaLangObject method instead of FromDescriptor. Object Class in not final.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache(true);
+ RegTypeCache cache(true, *allocator);
const RegType& ref_type = cache.JavaLangObject(true);
const RegType& ref_type_2 = cache.JavaLangObject(true);
const RegType& ref_type_3 = cache.FromDescriptor(nullptr, "Ljava/lang/Object;", true);
@@ -464,7 +476,7 @@
// Tests merging logic
// String and object , LUB is object.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache_new(true);
+ RegTypeCache cache_new(true, *allocator);
const RegType& string = cache_new.JavaLangString();
const RegType& Object = cache_new.JavaLangObject(true);
EXPECT_TRUE(string.Merge(Object, &cache_new).IsJavaLangObject());
@@ -487,7 +499,7 @@
TEST_F(RegTypeTest, MergingFloat) {
// Testing merging logic with float and float constants.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache_new(true);
+ RegTypeCache cache_new(true, *allocator);
constexpr int32_t kTestConstantValue = 10;
const RegType& float_type = cache_new.Float();
@@ -518,7 +530,7 @@
TEST_F(RegTypeTest, MergingLong) {
// Testing merging logic with long and long constants.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache_new(true);
+ RegTypeCache cache_new(true, *allocator);
constexpr int32_t kTestConstantValue = 10;
const RegType& long_lo_type = cache_new.LongLo();
@@ -572,7 +584,7 @@
TEST_F(RegTypeTest, MergingDouble) {
// Testing merging logic with double and double constants.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache_new(true);
+ RegTypeCache cache_new(true, *allocator);
constexpr int32_t kTestConstantValue = 10;
const RegType& double_lo_type = cache_new.DoubleLo();
@@ -626,7 +638,7 @@
TEST_F(RegTypeTest, ConstPrecision) {
// Tests creating primitive types types.
ScopedObjectAccess soa(Thread::Current());
- RegTypeCache cache_new(true);
+ RegTypeCache cache_new(true, *allocator);
const RegType& imprecise_const = cache_new.FromCat1Const(10, false);
const RegType& precise_const = cache_new.FromCat1Const(10, true);
diff --git a/runtime/verifier/register_line-inl.h b/runtime/verifier/register_line-inl.h
index 1df2428..57fb701 100644
--- a/runtime/verifier/register_line-inl.h
+++ b/runtime/verifier/register_line-inl.h
@@ -182,6 +182,21 @@
}
}
+inline RegisterLine* RegisterLine::Create(size_t num_regs, MethodVerifier* verifier) {
+ void* memory = verifier->GetArena().Alloc(OFFSETOF_MEMBER(RegisterLine, line_) +
+ (num_regs * sizeof(uint16_t)));
+ return new (memory) RegisterLine(num_regs, verifier);
+}
+
+inline RegisterLine::RegisterLine(size_t num_regs, MethodVerifier* verifier)
+ : num_regs_(num_regs),
+ monitors_(verifier->GetArena().Adapter(kArenaAllocVerifier)),
+ reg_to_lock_depths_(std::less<uint32_t>(), verifier->GetArena().Adapter(kArenaAllocVerifier)),
+ this_initialized_(false) {
+ std::uninitialized_fill_n(line_, num_regs_, 0u);
+ SetResultTypeToUnknown(verifier);
+}
+
} // namespace verifier
} // namespace art
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index f48b1e1..37343b5 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -412,12 +412,9 @@
}
}
-// Check whether there is another register in the search map that is locked the same way as the
-// register in the src map. This establishes an alias.
-static bool FindLockAliasedRegister(
- uint32_t src,
- const AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier>& src_map,
- const AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier>& search_map) {
+bool FindLockAliasedRegister(uint32_t src,
+ const RegisterLine::RegToLockDepthsMap& src_map,
+ const RegisterLine::RegToLockDepthsMap& search_map) {
auto it = src_map.find(src);
if (it == src_map.end()) {
// "Not locked" is trivially aliased.
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index 46db1c6..b2f5555 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -20,6 +20,7 @@
#include <memory>
#include <vector>
+#include "base/scoped_arena_containers.h"
#include "safe_map.h"
namespace art {
@@ -58,11 +59,11 @@
// stack of entered monitors (identified by code unit offset).
class RegisterLine {
public:
- static RegisterLine* Create(size_t num_regs, MethodVerifier* verifier) {
- void* memory = operator new(sizeof(RegisterLine) + (num_regs * sizeof(uint16_t)));
- RegisterLine* rl = new (memory) RegisterLine(num_regs, verifier);
- return rl;
- }
+ // A map from register to a bit vector of indices into the monitors_ stack.
+ using RegToLockDepthsMap = ScopedArenaSafeMap<uint32_t, uint32_t>;
+
+ // Create a register line of num_regs registers.
+ static RegisterLine* Create(size_t num_regs, MethodVerifier* verifier);
// Implement category-1 "move" instructions. Copy a 32-bit value from "vsrc" to "vdst".
void CopyRegister1(MethodVerifier* verifier, uint32_t vdst, uint32_t vsrc, TypeCategory cat)
@@ -311,11 +312,11 @@
// Write a bit at each register location that holds a reference.
void WriteReferenceBitMap(MethodVerifier* verifier, std::vector<uint8_t>* data, size_t max_bytes);
- size_t GetMonitorEnterCount() {
+ size_t GetMonitorEnterCount() const {
return monitors_.size();
}
- uint32_t GetMonitorEnterDexPc(size_t i) {
+ uint32_t GetMonitorEnterDexPc(size_t i) const {
return monitors_[i];
}
@@ -375,11 +376,7 @@
reg_to_lock_depths_.erase(reg);
}
- RegisterLine(size_t num_regs, MethodVerifier* verifier)
- : num_regs_(num_regs), this_initialized_(false) {
- memset(&line_, 0, num_regs_ * sizeof(uint16_t));
- SetResultTypeToUnknown(verifier);
- }
+ RegisterLine(size_t num_regs, MethodVerifier* verifier);
// Storage for the result register's type, valid after an invocation.
uint16_t result_[2];
@@ -388,17 +385,18 @@
const uint32_t num_regs_;
// A stack of monitor enter locations.
- std::vector<uint32_t, TrackingAllocator<uint32_t, kAllocatorTagVerifier>> monitors_;
+ ScopedArenaVector<uint32_t> monitors_;
+
// A map from register to a bit vector of indices into the monitors_ stack. As we pop the monitor
// stack we verify that monitor-enter/exit are correctly nested. That is, if there was a
// monitor-enter on v5 and then on v6, we expect the monitor-exit to be on v6 then on v5.
- AllocationTrackingSafeMap<uint32_t, uint32_t, kAllocatorTagVerifier> reg_to_lock_depths_;
+ RegToLockDepthsMap reg_to_lock_depths_;
// Whether "this" initialization (a constructor supercall) has happened.
bool this_initialized_;
// An array of RegType Ids associated with each dex register.
- uint16_t line_[0];
+ uint16_t line_[1];
DISALLOW_COPY_AND_ASSIGN(RegisterLine);
};
diff --git a/test/530-checker-lse/src/Main.java b/test/530-checker-lse/src/Main.java
index ea35b37..c766aaa 100644
--- a/test/530-checker-lse/src/Main.java
+++ b/test/530-checker-lse/src/Main.java
@@ -56,7 +56,7 @@
/// CHECK-START: double Main.calcCircleArea(double) load_store_elimination (after)
/// CHECK: NewInstance
- /// CHECK-NOT: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
/// CHECK-NOT: InstanceFieldGet
static double calcCircleArea(double radius) {
@@ -117,8 +117,7 @@
/// CHECK: InstanceFieldGet
/// CHECK: InstanceFieldSet
/// CHECK: NewInstance
- /// CHECK-NOT: NullCheck
- /// CHECK-NOT: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
/// CHECK-NOT: InstanceFieldGet
// A new allocation shouldn't alias with pre-existing values.
@@ -224,7 +223,7 @@
/// CHECK-START: int Main.test8() load_store_elimination (after)
/// CHECK: NewInstance
- /// CHECK-NOT: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
/// CHECK: InvokeVirtual
/// CHECK-NOT: NullCheck
/// CHECK-NOT: InstanceFieldGet
diff --git a/test/955-lambda-smali/run b/test/955-lambda-smali/run
index b754680..2fb2f89 100755
--- a/test/955-lambda-smali/run
+++ b/test/955-lambda-smali/run
@@ -15,4 +15,4 @@
# limitations under the License.
# Ensure that the lambda experimental opcodes are turned on for dalvikvm and dex2oat
-${RUN} "$@" --runtime-option -Xexperimental:lambdas -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:lambdas
+${RUN} "$@" --experimental lambdas
diff --git a/test/960-default-smali/build b/test/960-default-smali/build
index c786687..06692f9 100755
--- a/test/960-default-smali/build
+++ b/test/960-default-smali/build
@@ -18,7 +18,7 @@
set -e
# Generate the smali Main.smali file or fail
-./util-src/generate_smali.py ./smali
+${ANDROID_BUILD_TOP}/art/test/utils/python/generate_smali_main.py ./smali
if [[ $@ == *"--jvm"* ]]; then
# Build the Java files if we are running a --jvm test
@@ -29,5 +29,5 @@
fi
# Build the smali files and make a dex
-${SMALI} -JXmx256m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+${SMALI} -JXmx256m ${SMALI_ARGS} --output classes.dex $(find smali -name '*.smali')
zip "$TEST_NAME.jar" classes.dex
diff --git a/test/960-default-smali/info.txt b/test/960-default-smali/info.txt
index eb596e2..9583abb 100644
--- a/test/960-default-smali/info.txt
+++ b/test/960-default-smali/info.txt
@@ -2,15 +2,16 @@
Obviously needs to run under ART or a Java 8 Language runtime and compiler.
-When run a Main.smali file will be generated by the util-src/generate_smali.py
-script. If we run with --jvm we will use the tools/extract-embedded-java script to
-turn the smali into equivalent Java using the embedded Java code.
+When run a Main.smali file will be generated by the
+test/utils/python/generate_smali_main.py script. If we run with --jvm we will
+use the tools/extract-embedded-java script to turn the smali into equivalent
+Java using the embedded Java code.
When updating be sure to write the equivalent Java code in comments of the smali
files.
-Care should be taken when updating the generate_smali.py script. It must always
-return equivalent output when run multiple times.
+Care should be taken when updating the generate_smali_main.py script. It must
+always return equivalent output when run multiple times.
To update the test files do the following steps:
<Add new classes/interfaces>
diff --git a/test/960-default-smali/run b/test/960-default-smali/run
index e378b06..22f6800 100755
--- a/test/960-default-smali/run
+++ b/test/960-default-smali/run
@@ -14,8 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-if echo $@ | grep -q -- "--jvm"; then
- ${RUN} "$@"
-else
- ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
-fi
+${RUN} --experimental default-methods "$@"
diff --git a/test/961-default-iface-resolution-generated/build b/test/961-default-iface-resolution-generated/build
index 707c17e..5eb851f 100755
--- a/test/961-default-iface-resolution-generated/build
+++ b/test/961-default-iface-resolution-generated/build
@@ -40,7 +40,7 @@
fi
# Build the smali files and make a dex
-${SMALI} -JXmx512m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+${SMALI} -JXmx512m ${SMALI_ARGS} --output classes.dex $(find smali -name '*.smali')
zip $TEST_NAME.jar classes.dex
# Reset the ulimit back to its initial value
diff --git a/test/961-default-iface-resolution-generated/run b/test/961-default-iface-resolution-generated/run
index e378b06..22f6800 100755
--- a/test/961-default-iface-resolution-generated/run
+++ b/test/961-default-iface-resolution-generated/run
@@ -14,8 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-if echo $@ | grep -q -- "--jvm"; then
- ${RUN} "$@"
-else
- ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
-fi
+${RUN} --experimental default-methods "$@"
diff --git a/test/962-iface-static/build b/test/962-iface-static/build
index 5ad82f7..06bb3bd 100755
--- a/test/962-iface-static/build
+++ b/test/962-iface-static/build
@@ -26,5 +26,5 @@
fi
# Build the smali files and make a dex
-${SMALI} -JXmx512m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+${SMALI} -JXmx512m ${SMALI_ARGS} --output classes.dex $(find smali -name '*.smali')
zip $TEST_NAME.jar classes.dex
diff --git a/test/962-iface-static/run b/test/962-iface-static/run
index e713708..d37737f 100755
--- a/test/962-iface-static/run
+++ b/test/962-iface-static/run
@@ -14,8 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-if echo $@ | grep -q -- "--jvm"; then
- ${RUN} "$@"
-else
- ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
-fi
+${RUN} --experimental default-methods "$@"
diff --git a/test/963-default-range-smali/build b/test/963-default-range-smali/build
index 5ad82f7..06bb3bd 100755
--- a/test/963-default-range-smali/build
+++ b/test/963-default-range-smali/build
@@ -26,5 +26,5 @@
fi
# Build the smali files and make a dex
-${SMALI} -JXmx512m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+${SMALI} -JXmx512m ${SMALI_ARGS} --output classes.dex $(find smali -name '*.smali')
zip $TEST_NAME.jar classes.dex
diff --git a/test/963-default-range-smali/run b/test/963-default-range-smali/run
index e713708..d37737f 100755
--- a/test/963-default-range-smali/run
+++ b/test/963-default-range-smali/run
@@ -14,8 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-if echo $@ | grep -q -- "--jvm"; then
- ${RUN} "$@"
-else
- ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
-fi
+${RUN} --experimental default-methods "$@"
diff --git a/test/964-default-iface-init-generated/build b/test/964-default-iface-init-generated/build
index deef803..b0fbe4b 100755
--- a/test/964-default-iface-init-generated/build
+++ b/test/964-default-iface-init-generated/build
@@ -38,7 +38,7 @@
fi
# Build the smali files and make a dex
-${SMALI} -JXmx512m --experimental --api-level 23 --output classes.dex $(find smali -name '*.smali')
+${SMALI} -JXmx512m ${SMALI_ARGS} --output classes.dex $(find smali -name '*.smali')
zip $TEST_NAME.jar classes.dex
# Reset the ulimit back to its initial value
diff --git a/test/964-default-iface-init-generated/run b/test/964-default-iface-init-generated/run
index e378b06..22f6800 100755
--- a/test/964-default-iface-init-generated/run
+++ b/test/964-default-iface-init-generated/run
@@ -14,8 +14,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-if echo $@ | grep -q -- "--jvm"; then
- ${RUN} "$@"
-else
- ${RUN} "$@" --runtime-option -Xexperimental:default-methods -Xcompiler-option --runtime-arg -Xcompiler-option -Xexperimental:default-methods
-fi
+${RUN} --experimental default-methods "$@"
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 9c04135..6ce3d94 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -441,55 +441,9 @@
# Known broken tests for the mips32 optimizing compiler backend.
TEST_ART_BROKEN_OPTIMIZING_MIPS_RUN_TESTS := \
441-checker-inliner \
- 442-checker-constant-folding \
- 444-checker-nce \
- 445-checker-licm \
- 446-checker-inliner2 \
- 447-checker-inliner3 \
- 449-checker-bce \
- 450-checker-types \
- 455-checker-gvn \
- 458-checker-instruction-simplification \
- 462-checker-inlining-across-dex-files \
- 463-checker-boolean-simplifier \
- 464-checker-inline-sharpen-calls \
- 465-checker-clinit-gvn \
- 468-checker-bool-simplifier-regression \
- 473-checker-inliner-constants \
- 474-checker-boolean-input \
- 476-checker-ctor-memory-barrier \
- 477-checker-bound-type \
- 478-checker-clinit-check-pruning \
- 478-checker-inliner-nested-loop \
- 480-checker-dead-blocks \
- 482-checker-loop-back-edge-use \
- 484-checker-register-hints \
- 485-checker-dce-loop-update \
- 485-checker-dce-switch \
- 486-checker-must-do-null-check \
- 487-checker-inline-calls \
- 488-checker-inline-recursive-calls \
- 490-checker-inline \
- 492-checker-inline-invoke-interface \
- 493-checker-inline-invoke-interface \
- 494-checker-instanceof-tests \
- 495-checker-checkcast-tests \
- 496-checker-inlining-and-class-loader \
- 508-checker-disassembly \
510-checker-try-catch \
- 517-checker-builder-fallthrough \
521-checker-array-set-null \
- 522-checker-regression-monitor-exit \
- 523-checker-can-throw-regression \
- 525-checker-arrays-and-fields \
- 526-checker-caller-callee-regs \
- 529-checker-unresolved \
- 530-checker-loops \
- 530-checker-regression-reftype-final \
- 532-checker-nonnull-arrayset \
- 534-checker-bce-deoptimization \
536-checker-intrinsic-optimization \
- 537-checker-debuggable \
ifeq (mips,$(TARGET_ARCH))
ifneq (,$(filter optimizing,$(COMPILER_TYPES)))
diff --git a/test/etc/default-build b/test/etc/default-build
index c92402b..4743216 100755
--- a/test/etc/default-build
+++ b/test/etc/default-build
@@ -96,7 +96,7 @@
if [ -d smali ]; then
# Compile Smali classes
- ${SMALI} -JXmx256m --experimental --api-level 23 --output smali_classes.dex `find smali -name '*.smali'`
+ ${SMALI} -JXmx256m ${SMALI_ARGS} --output smali_classes.dex `find smali -name '*.smali'`
# Don't bother with dexmerger if we provide our own main function in a smali file.
if [ ${SKIP_DX_MERGER} = "false" ]; then
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index fbefa07..280b4bc 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -18,6 +18,7 @@
DEBUGGER="n"
DEV_MODE="n"
DEX2OAT=""
+EXPERIMENTAL=""
FALSE_BIN="/system/bin/false"
FLAGS=""
GDB=""
@@ -196,6 +197,13 @@
FLAGS="${FLAGS} -Xcompiler-option --compile-pic"
COMPILE_FLAGS="${COMPILE_FLAGS} --compile-pic"
shift
+ elif [ "x$1" = "x--experimental" ]; then
+ if [ "$#" -lt 2 ]; then
+ echo "missing --experimental option" 1>&2
+ exit 1
+ fi
+ EXPERIMENTAL="$EXPERIMENTAL $2"
+ shift 2
elif expr "x$1" : "x--" >/dev/null 2>&1; then
echo "unknown $0 option: $1" 1>&2
exit 1
@@ -204,6 +212,13 @@
fi
done
+if [ "$USE_JVM" = "n" ]; then
+ for feature in ${EXPERIMENTAL}; do
+ FLAGS="${FLAGS} -Xexperimental:${feature}"
+ COMPILE_FLAGS="${COMPILE_FLAGS} --runtime-arg -Xexperimental:${feature}"
+ done
+fi
+
if [ "x$1" = "x" ] ; then
MAIN="Main"
else
diff --git a/test/run-test b/test/run-test
index 293779f..5a43fb0 100755
--- a/test/run-test
+++ b/test/run-test
@@ -46,6 +46,7 @@
export DEX_LOCATION=/data/run-test/${test_dir}
export NEED_DEX="true"
export USE_JACK="false"
+export SMALI_ARGS="--experimental --api-level 23"
# If dx was not set by the environment variable, assume it is in the path.
if [ -z "$DX" ]; then
diff --git a/test/960-default-smali/util-src/generate_smali.py b/test/utils/python/generate_smali_main.py
similarity index 98%
rename from test/960-default-smali/util-src/generate_smali.py
rename to test/utils/python/generate_smali_main.py
index b2bf1f0..d796d31 100755
--- a/test/960-default-smali/util-src/generate_smali.py
+++ b/test/utils/python/generate_smali_main.py
@@ -15,7 +15,7 @@
# limitations under the License.
"""
-Generate Smali Main file for test 960
+Generate Smali Main file from a classes.xml file.
"""
import os
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index 81ea79a..9a8b462 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -159,13 +159,6 @@
bug: 22786792
},
{
- description: "Formatting failures",
- result: EXEC_FAILED,
- names: ["libcore.java.text.NumberFormatTest#test_currencyFromLocale",
- "libcore.java.text.NumberFormatTest#test_currencyWithPatternDigits"],
- bug: 25136848
-},
-{
description: "Lack of IPv6 on some buildbot slaves",
result: EXEC_FAILED,
names: ["libcore.io.OsTest#test_byteBufferPositions_sendto_recvfrom_af_inet6",
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index edec362..9aed271 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -88,7 +88,8 @@
fi
done
-vm_args="--vm-arg $image"
+vm_args="--vm-arg $image --vm-arg -Xusejit:true"
+debuggee_args="$debuggee_args -Xusejit:true"
if [[ $debug == "yes" ]]; then
art="$art -d"
art_debugee="$art_debugee -d"
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index 80f7a37..67a7983 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -102,4 +102,4 @@
# Run the tests using vogar.
echo "Running tests for the following test packages:"
echo ${working_packages[@]} | tr " " "\n"
-vogar $vogar_args --expectations art/tools/libcore_failures.txt --classpath $jsr166_test_jar --classpath $test_jar ${working_packages[@]}
+vogar $vogar_args --vm-arg -Xusejit:true --expectations art/tools/libcore_failures.txt --classpath $jsr166_test_jar --classpath $test_jar ${working_packages[@]}