Merge "Make test 146 run with secondary dex file in classpath."
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index ecc9e76..7be1894 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -93,11 +93,10 @@
HOST_CORE_DEX_FILES := $(foreach jar,$(HOST_CORE_JARS), $(call intermediates-dir-for,JAVA_LIBRARIES,$(jar),t,COMMON)/javalib.jar)
TARGET_CORE_DEX_FILES := $(foreach jar,$(TARGET_CORE_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar)
-ifeq ($(ANDROID_COMPILE_WITH_JACK),true)
# Classpath for Jack compilation: we only need core-libart.
HOST_JACK_CLASSPATH_DEPENDENCIES := $(call intermediates-dir-for,JAVA_LIBRARIES,core-oj-hostdex,t,COMMON)/classes.jack $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart-hostdex,t,COMMON)/classes.jack
HOST_JACK_CLASSPATH := $(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-oj-hostdex,t,COMMON)/classes.jack):$(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart-hostdex,t,COMMON)/classes.jack)
TARGET_JACK_CLASSPATH_DEPENDENCIES := $(call intermediates-dir-for,JAVA_LIBRARIES,core-oj, ,COMMON)/classes.jack $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart, ,COMMON)/classes.jack
TARGET_JACK_CLASSPATH := $(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-oj, ,COMMON)/classes.jack):$(abspath $(call intermediates-dir-for,JAVA_LIBRARIES,core-libart, ,COMMON)/classes.jack)
-endif
+
endif # ART_ANDROID_COMMON_PATH_MK
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index cde41e0..df7df26 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -201,7 +201,6 @@
LOCAL_MODULE_PATH := $(3)
LOCAL_DEX_PREOPT_IMAGE_LOCATION := $(TARGET_CORE_IMG_OUT)
ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),)
- LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex
LOCAL_JACK_FLAGS := -D jack.dex.output.policy=minimal-multidex -D jack.preprocessor=true -D jack.preprocessor.file=$(LOCAL_PATH)/$(2)/main.jpp
endif
include $(BUILD_JAVA_LIBRARY)
@@ -217,7 +216,6 @@
LOCAL_JAVA_LIBRARIES := $(HOST_CORE_JARS)
LOCAL_DEX_PREOPT_IMAGE := $(HOST_CORE_IMG_LOCATION)
ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),)
- LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex
LOCAL_JACK_FLAGS := -D jack.dex.output.policy=minimal-multidex -D jack.preprocessor=true -D jack.preprocessor.file=$(LOCAL_PATH)/$(2)/main.jpp
endif
include $(BUILD_HOST_DALVIK_JAVA_LIBRARY)
diff --git a/compiler/debug/elf_debug_info_writer.h b/compiler/debug/elf_debug_info_writer.h
index a6e6f8b..e8e278d 100644
--- a/compiler/debug/elf_debug_info_writer.h
+++ b/compiler/debug/elf_debug_info_writer.h
@@ -180,8 +180,8 @@
std::vector<DexRegisterMap> dex_reg_maps;
if (mi->code_info != nullptr) {
const CodeInfo code_info(mi->code_info);
- StackMapEncoding encoding = code_info.ExtractEncoding();
- for (size_t s = 0; s < code_info.GetNumberOfStackMaps(); ++s) {
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ for (size_t s = 0; s < code_info.GetNumberOfStackMaps(encoding); ++s) {
const StackMap& stack_map = code_info.GetStackMapAt(s, encoding);
dex_reg_maps.push_back(code_info.GetDexRegisterMapOf(
stack_map, encoding, dex_code->registers_size_));
diff --git a/compiler/debug/elf_debug_line_writer.h b/compiler/debug/elf_debug_line_writer.h
index 66e135f..3db7306 100644
--- a/compiler/debug/elf_debug_line_writer.h
+++ b/compiler/debug/elf_debug_line_writer.h
@@ -98,15 +98,15 @@
if (mi->code_info != nullptr) {
// Use stack maps to create mapping table from pc to dex.
const CodeInfo code_info(mi->code_info);
- const StackMapEncoding encoding = code_info.ExtractEncoding();
- pc2dex_map.reserve(code_info.GetNumberOfStackMaps());
- for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(); s++) {
+ const CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ pc2dex_map.reserve(code_info.GetNumberOfStackMaps(encoding));
+ for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(encoding); s++) {
StackMap stack_map = code_info.GetStackMapAt(s, encoding);
DCHECK(stack_map.IsValid());
- const uint32_t pc = stack_map.GetNativePcOffset(encoding);
- const int32_t dex = stack_map.GetDexPc(encoding);
+ const uint32_t pc = stack_map.GetNativePcOffset(encoding.stack_map_encoding);
+ const int32_t dex = stack_map.GetDexPc(encoding.stack_map_encoding);
pc2dex_map.push_back({pc, dex});
- if (stack_map.HasDexRegisterMap(encoding)) {
+ if (stack_map.HasDexRegisterMap(encoding.stack_map_encoding)) {
// Guess that the first map with local variables is the end of prologue.
prologue_end = std::min(prologue_end, pc);
}
diff --git a/compiler/debug/elf_debug_loc_writer.h b/compiler/debug/elf_debug_loc_writer.h
index 4712d47..9645643 100644
--- a/compiler/debug/elf_debug_loc_writer.h
+++ b/compiler/debug/elf_debug_loc_writer.h
@@ -98,12 +98,12 @@
// Get stack maps sorted by pc (they might not be sorted internally).
// TODO(dsrbecky) Remove this once stackmaps get sorted by pc.
const CodeInfo code_info(method_info->code_info);
- const StackMapEncoding encoding = code_info.ExtractEncoding();
+ const CodeInfoEncoding encoding = code_info.ExtractEncoding();
std::map<uint32_t, uint32_t> stack_maps; // low_pc -> stack_map_index.
- for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(); s++) {
+ for (uint32_t s = 0; s < code_info.GetNumberOfStackMaps(encoding); s++) {
StackMap stack_map = code_info.GetStackMapAt(s, encoding);
DCHECK(stack_map.IsValid());
- if (!stack_map.HasDexRegisterMap(encoding)) {
+ if (!stack_map.HasDexRegisterMap(encoding.stack_map_encoding)) {
// The compiler creates stackmaps without register maps at the start of
// basic blocks in order to keep instruction-accurate line number mapping.
// However, we never stop at those (breakpoint locations always have map).
@@ -111,7 +111,7 @@
// The main reason for this is to save space by avoiding undefined gaps.
continue;
}
- const uint32_t pc_offset = stack_map.GetNativePcOffset(encoding);
+ const uint32_t pc_offset = stack_map.GetNativePcOffset(encoding.stack_map_encoding);
DCHECK_LE(pc_offset, method_info->code_size);
DCHECK_LE(compilation_unit_code_address, method_info->code_address);
const uint32_t low_pc = dchecked_integral_cast<uint32_t>(
@@ -135,7 +135,7 @@
}
// Check that the stack map is in the requested range.
- uint32_t dex_pc = stack_map.GetDexPc(encoding);
+ uint32_t dex_pc = stack_map.GetDexPc(encoding.stack_map_encoding);
if (!(dex_pc_low <= dex_pc && dex_pc < dex_pc_high)) {
// The variable is not in scope at this PC. Therefore omit the entry.
// Note that this is different to None() entry which means in scope, but unknown location.
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index f113921..7cf9072 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -640,7 +640,7 @@
const CodeInfo& code_info,
const ArenaVector<HSuspendCheck*>& loop_headers,
ArenaVector<size_t>* covered) {
- StackMapEncoding encoding = code_info.ExtractEncoding();
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
for (size_t i = 0; i < loop_headers.size(); ++i) {
if (loop_headers[i]->GetDexPc() == dex_pc) {
if (graph.IsCompilingOsr()) {
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 3f41e35..c571312 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -137,34 +137,41 @@
size_t StackMapStream::PrepareForFillIn() {
int stack_mask_number_of_bits = stack_mask_max_ + 1; // Need room for max element too.
- stack_mask_size_ = RoundUp(stack_mask_number_of_bits, kBitsPerByte) / kBitsPerByte;
inline_info_size_ = ComputeInlineInfoSize();
dex_register_maps_size_ = ComputeDexRegisterMapsSize();
uint32_t max_native_pc_offset = ComputeMaxNativePcOffset();
- stack_map_encoding_ = StackMapEncoding::CreateFromSizes(stack_mask_size_,
- inline_info_size_,
- dex_register_maps_size_,
- dex_pc_max_,
- max_native_pc_offset,
- register_mask_max_);
- stack_maps_size_ = stack_maps_.size() * stack_map_encoding_.ComputeStackMapSize();
+ size_t stack_map_size = stack_map_encoding_.SetFromSizes(max_native_pc_offset,
+ dex_pc_max_,
+ dex_register_maps_size_,
+ inline_info_size_,
+ register_mask_max_,
+ stack_mask_number_of_bits);
+ stack_maps_size_ = stack_maps_.size() * stack_map_size;
dex_register_location_catalog_size_ = ComputeDexRegisterLocationCatalogSize();
- // Note: use RoundUp to word-size here if you want CodeInfo objects to be word aligned.
- needed_size_ = CodeInfo::kFixedSize
- + stack_maps_size_
- + dex_register_location_catalog_size_
- + dex_register_maps_size_
- + inline_info_size_;
+ size_t non_header_size =
+ stack_maps_size_ +
+ dex_register_location_catalog_size_ +
+ dex_register_maps_size_ +
+ inline_info_size_;
- stack_maps_start_ = CodeInfo::kFixedSize;
+ // Prepare the CodeInfo variable-sized encoding.
+ CodeInfoEncoding code_info_encoding;
+ code_info_encoding.non_header_size = non_header_size;
+ code_info_encoding.stack_map_encoding = stack_map_encoding_;
+ code_info_encoding.number_of_stack_maps = stack_maps_.size();
+ code_info_encoding.stack_map_size_in_bytes = stack_map_size;
+ code_info_encoding.number_of_location_catalog_entries = location_catalog_entries_.size();
+ code_info_encoding.Compress(&code_info_encoding_);
+
// TODO: Move the catalog at the end. It is currently too expensive at runtime
// to compute its size (note that we do not encode that size in the CodeInfo).
- dex_register_location_catalog_start_ = stack_maps_start_ + stack_maps_size_;
+ dex_register_location_catalog_start_ = code_info_encoding_.size() + stack_maps_size_;
dex_register_maps_start_ =
dex_register_location_catalog_start_ + dex_register_location_catalog_size_;
inline_infos_start_ = dex_register_maps_start_ + dex_register_maps_size_;
+ needed_size_ = code_info_encoding_.size() + non_header_size;
return needed_size_;
}
@@ -227,9 +234,13 @@
DCHECK_EQ(0u, current_entry_.dex_pc) << "EndStackMapEntry not called after BeginStackMapEntry";
DCHECK_NE(0u, needed_size_) << "PrepareForFillIn not called before FillIn";
- CodeInfo code_info(region);
DCHECK_EQ(region.size(), needed_size_);
- code_info.SetOverallSize(region.size());
+
+ // Note that the memory region does not have to be zeroed when we JIT code
+ // because we do not use the arena allocator there.
+
+ // Write the CodeInfo header.
+ region.CopyFrom(0, MemoryRegion(code_info_encoding_.data(), code_info_encoding_.size()));
MemoryRegion dex_register_locations_region = region.Subregion(
dex_register_maps_start_, dex_register_maps_size_);
@@ -237,12 +248,11 @@
MemoryRegion inline_infos_region = region.Subregion(
inline_infos_start_, inline_info_size_);
- code_info.SetEncoding(stack_map_encoding_);
- code_info.SetNumberOfStackMaps(stack_maps_.size());
- DCHECK_EQ(code_info.GetStackMapsSize(code_info.ExtractEncoding()), stack_maps_size_);
+ CodeInfo code_info(region);
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ DCHECK_EQ(code_info.GetStackMapsSize(encoding), stack_maps_size_);
// Set the Dex register location catalog.
- code_info.SetNumberOfLocationCatalogEntries(location_catalog_entries_.size());
MemoryRegion dex_register_location_catalog_region = region.Subregion(
dex_register_location_catalog_start_, dex_register_location_catalog_size_);
DexRegisterLocationCatalog dex_register_location_catalog(dex_register_location_catalog_region);
@@ -260,17 +270,22 @@
uintptr_t next_dex_register_map_offset = 0;
uintptr_t next_inline_info_offset = 0;
for (size_t i = 0, e = stack_maps_.size(); i < e; ++i) {
- StackMap stack_map = code_info.GetStackMapAt(i, stack_map_encoding_);
+ StackMap stack_map = code_info.GetStackMapAt(i, encoding);
StackMapEntry entry = stack_maps_[i];
stack_map.SetDexPc(stack_map_encoding_, entry.dex_pc);
stack_map.SetNativePcOffset(stack_map_encoding_, entry.native_pc_offset);
stack_map.SetRegisterMask(stack_map_encoding_, entry.register_mask);
+ size_t number_of_stack_mask_bits = stack_map.GetNumberOfStackMaskBits(stack_map_encoding_);
if (entry.sp_mask != nullptr) {
- stack_map.SetStackMask(stack_map_encoding_, *entry.sp_mask);
+ for (size_t bit = 0; bit < number_of_stack_mask_bits; bit++) {
+ stack_map.SetStackMaskBit(stack_map_encoding_, bit, entry.sp_mask->IsBitSet(bit));
+ }
} else {
// The MemoryRegion does not have to be zeroed, so make sure we clear the bits.
- stack_map.SetStackMask(stack_map_encoding_, empty_bitmask);
+ for (size_t bit = 0; bit < number_of_stack_mask_bits; bit++) {
+ stack_map.SetStackMaskBit(stack_map_encoding_, bit, false);
+ }
}
if (entry.num_dex_registers == 0 || (entry.live_dex_registers_mask->NumSetBits() == 0)) {
@@ -282,7 +297,7 @@
// If we have a hit reuse the offset.
stack_map.SetDexRegisterMapOffset(
stack_map_encoding_,
- code_info.GetStackMapAt(entry.same_dex_register_map_as_, stack_map_encoding_)
+ code_info.GetStackMapAt(entry.same_dex_register_map_as_, encoding)
.GetDexRegisterMapOffset(stack_map_encoding_));
} else {
// New dex registers maps should be added to the stack map.
@@ -437,7 +452,7 @@
size_t num_dex_registers,
BitVector* live_dex_registers_mask,
size_t dex_register_locations_index) const {
- StackMapEncoding encoding = code_info.ExtractEncoding();
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
for (size_t reg = 0; reg < num_dex_registers; reg++) {
// Find the location we tried to encode.
DexRegisterLocation expected = DexRegisterLocation::None();
@@ -464,25 +479,26 @@
// Check that all StackMapStream inputs are correctly encoded by trying to read them back.
void StackMapStream::CheckCodeInfo(MemoryRegion region) const {
CodeInfo code_info(region);
- StackMapEncoding encoding = code_info.ExtractEncoding();
- DCHECK_EQ(code_info.GetNumberOfStackMaps(), stack_maps_.size());
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ DCHECK_EQ(code_info.GetNumberOfStackMaps(encoding), stack_maps_.size());
for (size_t s = 0; s < stack_maps_.size(); ++s) {
const StackMap stack_map = code_info.GetStackMapAt(s, encoding);
+ const StackMapEncoding& stack_map_encoding = encoding.stack_map_encoding;
StackMapEntry entry = stack_maps_[s];
// Check main stack map fields.
- DCHECK_EQ(stack_map.GetNativePcOffset(encoding), entry.native_pc_offset);
- DCHECK_EQ(stack_map.GetDexPc(encoding), entry.dex_pc);
- DCHECK_EQ(stack_map.GetRegisterMask(encoding), entry.register_mask);
- MemoryRegion stack_mask = stack_map.GetStackMask(encoding);
+ DCHECK_EQ(stack_map.GetNativePcOffset(stack_map_encoding), entry.native_pc_offset);
+ DCHECK_EQ(stack_map.GetDexPc(stack_map_encoding), entry.dex_pc);
+ DCHECK_EQ(stack_map.GetRegisterMask(stack_map_encoding), entry.register_mask);
+ size_t num_stack_mask_bits = stack_map.GetNumberOfStackMaskBits(stack_map_encoding);
if (entry.sp_mask != nullptr) {
- DCHECK_GE(stack_mask.size_in_bits(), entry.sp_mask->GetNumberOfBits());
- for (size_t b = 0; b < stack_mask.size_in_bits(); b++) {
- DCHECK_EQ(stack_mask.LoadBit(b), entry.sp_mask->IsBitSet(b));
+ DCHECK_GE(num_stack_mask_bits, entry.sp_mask->GetNumberOfBits());
+ for (size_t b = 0; b < num_stack_mask_bits; b++) {
+ DCHECK_EQ(stack_map.GetStackMaskBit(stack_map_encoding, b), entry.sp_mask->IsBitSet(b));
}
} else {
- for (size_t b = 0; b < stack_mask.size_in_bits(); b++) {
- DCHECK_EQ(stack_mask.LoadBit(b), 0u);
+ for (size_t b = 0; b < num_stack_mask_bits; b++) {
+ DCHECK_EQ(stack_map.GetStackMaskBit(stack_map_encoding, b), 0u);
}
}
@@ -494,7 +510,7 @@
entry.dex_register_locations_start_index);
// Check inline info.
- DCHECK_EQ(stack_map.HasInlineInfo(encoding), (entry.inlining_depth != 0));
+ DCHECK_EQ(stack_map.HasInlineInfo(stack_map_encoding), (entry.inlining_depth != 0));
if (entry.inlining_depth != 0) {
InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
DCHECK_EQ(inline_info.GetDepth(), entry.inlining_depth);
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 016a911..b686748 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -74,13 +74,12 @@
allocator->Adapter(kArenaAllocStackMapStream)),
current_entry_(),
current_inline_info_(),
- stack_mask_size_(0),
+ code_info_encoding_(allocator->Adapter(kArenaAllocStackMapStream)),
inline_info_size_(0),
dex_register_maps_size_(0),
stack_maps_size_(0),
dex_register_location_catalog_size_(0),
dex_register_location_catalog_start_(0),
- stack_maps_start_(0),
dex_register_maps_start_(0),
inline_infos_start_(0),
needed_size_(0),
@@ -90,6 +89,7 @@
location_catalog_entries_.reserve(4);
dex_register_locations_.reserve(10 * 4);
inline_infos_.reserve(2);
+ code_info_encoding_.reserve(16);
}
// See runtime/stack_map.h to know what these fields contain.
@@ -200,13 +200,12 @@
StackMapEntry current_entry_;
InlineInfoEntry current_inline_info_;
StackMapEncoding stack_map_encoding_;
- size_t stack_mask_size_;
+ ArenaVector<uint8_t> code_info_encoding_;
size_t inline_info_size_;
size_t dex_register_maps_size_;
size_t stack_maps_size_;
size_t dex_register_location_catalog_size_;
size_t dex_register_location_catalog_start_;
- size_t stack_maps_start_;
size_t dex_register_maps_start_;
size_t inline_infos_start_;
size_t needed_size_;
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 604787f..3552487 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -23,9 +23,18 @@
namespace art {
-static bool SameBits(MemoryRegion region, const BitVector& bit_vector) {
- for (size_t i = 0; i < region.size_in_bits(); ++i) {
- if (region.LoadBit(i) != bit_vector.IsBitSet(i)) {
+// Check that the stack mask of given stack map is identical
+// to the given bit vector. Returns true if they are same.
+static bool CheckStackMask(
+ const StackMap& stack_map,
+ StackMapEncoding& encoding,
+ const BitVector& bit_vector) {
+ int number_of_bits = stack_map.GetNumberOfStackMaskBits(encoding);
+ if (bit_vector.GetHighestBitSet() >= number_of_bits) {
+ return false;
+ }
+ for (int i = 0; i < number_of_bits; ++i) {
+ if (stack_map.GetStackMaskBit(encoding, i) != bit_vector.IsBitSet(i)) {
return false;
}
}
@@ -52,12 +61,11 @@
stream.FillIn(region);
CodeInfo code_info(region);
- StackMapEncoding encoding = code_info.ExtractEncoding();
- ASSERT_EQ(0u, encoding.NumberOfBytesForStackMask());
- ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ ASSERT_EQ(1u, code_info.GetNumberOfStackMaps(encoding));
- uint32_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
- ASSERT_EQ(2u, number_of_location_catalog_entries);
+ uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
+ ASSERT_EQ(2u, number_of_catalog_entries);
DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
// The Dex register location catalog contains:
// - one 1-byte short Dex register location, and
@@ -68,14 +76,13 @@
StackMap stack_map = code_info.GetStackMapAt(0, encoding);
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
- ASSERT_EQ(0u, stack_map.GetDexPc(encoding));
- ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding));
- ASSERT_EQ(0x3u, stack_map.GetRegisterMask(encoding));
+ ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map_encoding));
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding));
+ ASSERT_EQ(0x3u, stack_map.GetRegisterMask(encoding.stack_map_encoding));
- MemoryRegion stack_mask = stack_map.GetStackMask(encoding);
- ASSERT_TRUE(SameBits(stack_mask, sp_mask));
+ ASSERT_TRUE(CheckStackMask(stack_map, encoding.stack_map_encoding, sp_mask));
- ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding));
+ ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
DexRegisterMap dex_register_map =
code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
@@ -100,9 +107,9 @@
ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info, encoding));
size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
- 0, number_of_dex_registers, number_of_location_catalog_entries);
+ 0, number_of_dex_registers, number_of_catalog_entries);
size_t index1 = dex_register_map.GetLocationCatalogEntryIndex(
- 1, number_of_dex_registers, number_of_location_catalog_entries);
+ 1, number_of_dex_registers, number_of_catalog_entries);
ASSERT_EQ(0u, index0);
ASSERT_EQ(1u, index1);
DexRegisterLocation location0 = location_catalog.GetDexRegisterLocation(index0);
@@ -114,7 +121,7 @@
ASSERT_EQ(0, location0.GetValue());
ASSERT_EQ(-2, location1.GetValue());
- ASSERT_FALSE(stack_map.HasInlineInfo(encoding));
+ ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
}
TEST(StackMapTest, Test2) {
@@ -166,12 +173,11 @@
stream.FillIn(region);
CodeInfo code_info(region);
- StackMapEncoding encoding = code_info.ExtractEncoding();
- ASSERT_EQ(2u, encoding.NumberOfBytesForStackMask());
- ASSERT_EQ(4u, code_info.GetNumberOfStackMaps());
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ ASSERT_EQ(4u, code_info.GetNumberOfStackMaps(encoding));
- uint32_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
- ASSERT_EQ(7u, number_of_location_catalog_entries);
+ uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
+ ASSERT_EQ(7u, number_of_catalog_entries);
DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
// The Dex register location catalog contains:
// - six 1-byte short Dex register locations, and
@@ -184,14 +190,13 @@
StackMap stack_map = code_info.GetStackMapAt(0, encoding);
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
- ASSERT_EQ(0u, stack_map.GetDexPc(encoding));
- ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding));
- ASSERT_EQ(0x3u, stack_map.GetRegisterMask(encoding));
+ ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map_encoding));
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding));
+ ASSERT_EQ(0x3u, stack_map.GetRegisterMask(encoding.stack_map_encoding));
- MemoryRegion stack_mask = stack_map.GetStackMask(encoding);
- ASSERT_TRUE(SameBits(stack_mask, sp_mask1));
+ ASSERT_TRUE(CheckStackMask(stack_map, encoding.stack_map_encoding, sp_mask1));
- ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding));
+ ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
DexRegisterMap dex_register_map =
code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
@@ -216,9 +221,9 @@
ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info, encoding));
size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
- 0, number_of_dex_registers, number_of_location_catalog_entries);
+ 0, number_of_dex_registers, number_of_catalog_entries);
size_t index1 = dex_register_map.GetLocationCatalogEntryIndex(
- 1, number_of_dex_registers, number_of_location_catalog_entries);
+ 1, number_of_dex_registers, number_of_catalog_entries);
ASSERT_EQ(0u, index0);
ASSERT_EQ(1u, index1);
DexRegisterLocation location0 = location_catalog.GetDexRegisterLocation(index0);
@@ -230,7 +235,7 @@
ASSERT_EQ(0, location0.GetValue());
ASSERT_EQ(-2, location1.GetValue());
- ASSERT_TRUE(stack_map.HasInlineInfo(encoding));
+ ASSERT_TRUE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
ASSERT_EQ(2u, inline_info.GetDepth());
ASSERT_EQ(82u, inline_info.GetMethodIndexAtDepth(0));
@@ -246,14 +251,13 @@
StackMap stack_map = code_info.GetStackMapAt(1, encoding);
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1u, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(128u, encoding)));
- ASSERT_EQ(1u, stack_map.GetDexPc(encoding));
- ASSERT_EQ(128u, stack_map.GetNativePcOffset(encoding));
- ASSERT_EQ(0xFFu, stack_map.GetRegisterMask(encoding));
+ ASSERT_EQ(1u, stack_map.GetDexPc(encoding.stack_map_encoding));
+ ASSERT_EQ(128u, stack_map.GetNativePcOffset(encoding.stack_map_encoding));
+ ASSERT_EQ(0xFFu, stack_map.GetRegisterMask(encoding.stack_map_encoding));
- MemoryRegion stack_mask = stack_map.GetStackMask(encoding);
- ASSERT_TRUE(SameBits(stack_mask, sp_mask2));
+ ASSERT_TRUE(CheckStackMask(stack_map, encoding.stack_map_encoding, sp_mask2));
- ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding));
+ ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
DexRegisterMap dex_register_map =
code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
@@ -279,9 +283,9 @@
1, number_of_dex_registers, code_info, encoding));
size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
- 0, number_of_dex_registers, number_of_location_catalog_entries);
+ 0, number_of_dex_registers, number_of_catalog_entries);
size_t index1 = dex_register_map.GetLocationCatalogEntryIndex(
- 1, number_of_dex_registers, number_of_location_catalog_entries);
+ 1, number_of_dex_registers, number_of_catalog_entries);
ASSERT_EQ(2u, index0);
ASSERT_EQ(3u, index1);
DexRegisterLocation location0 = location_catalog.GetDexRegisterLocation(index0);
@@ -293,7 +297,7 @@
ASSERT_EQ(18, location0.GetValue());
ASSERT_EQ(3, location1.GetValue());
- ASSERT_FALSE(stack_map.HasInlineInfo(encoding));
+ ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
}
// Third stack map.
@@ -301,14 +305,13 @@
StackMap stack_map = code_info.GetStackMapAt(2, encoding);
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(2u, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(192u, encoding)));
- ASSERT_EQ(2u, stack_map.GetDexPc(encoding));
- ASSERT_EQ(192u, stack_map.GetNativePcOffset(encoding));
- ASSERT_EQ(0xABu, stack_map.GetRegisterMask(encoding));
+ ASSERT_EQ(2u, stack_map.GetDexPc(encoding.stack_map_encoding));
+ ASSERT_EQ(192u, stack_map.GetNativePcOffset(encoding.stack_map_encoding));
+ ASSERT_EQ(0xABu, stack_map.GetRegisterMask(encoding.stack_map_encoding));
- MemoryRegion stack_mask = stack_map.GetStackMask(encoding);
- ASSERT_TRUE(SameBits(stack_mask, sp_mask3));
+ ASSERT_TRUE(CheckStackMask(stack_map, encoding.stack_map_encoding, sp_mask3));
- ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding));
+ ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
DexRegisterMap dex_register_map =
code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
@@ -334,9 +337,9 @@
1, number_of_dex_registers, code_info, encoding));
size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
- 0, number_of_dex_registers, number_of_location_catalog_entries);
+ 0, number_of_dex_registers, number_of_catalog_entries);
size_t index1 = dex_register_map.GetLocationCatalogEntryIndex(
- 1, number_of_dex_registers, number_of_location_catalog_entries);
+ 1, number_of_dex_registers, number_of_catalog_entries);
ASSERT_EQ(4u, index0);
ASSERT_EQ(5u, index1);
DexRegisterLocation location0 = location_catalog.GetDexRegisterLocation(index0);
@@ -348,7 +351,7 @@
ASSERT_EQ(6, location0.GetValue());
ASSERT_EQ(8, location1.GetValue());
- ASSERT_FALSE(stack_map.HasInlineInfo(encoding));
+ ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
}
// Fourth stack map.
@@ -356,14 +359,13 @@
StackMap stack_map = code_info.GetStackMapAt(3, encoding);
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(3u, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(256u, encoding)));
- ASSERT_EQ(3u, stack_map.GetDexPc(encoding));
- ASSERT_EQ(256u, stack_map.GetNativePcOffset(encoding));
- ASSERT_EQ(0xCDu, stack_map.GetRegisterMask(encoding));
+ ASSERT_EQ(3u, stack_map.GetDexPc(encoding.stack_map_encoding));
+ ASSERT_EQ(256u, stack_map.GetNativePcOffset(encoding.stack_map_encoding));
+ ASSERT_EQ(0xCDu, stack_map.GetRegisterMask(encoding.stack_map_encoding));
- MemoryRegion stack_mask = stack_map.GetStackMask(encoding);
- ASSERT_TRUE(SameBits(stack_mask, sp_mask4));
+ ASSERT_TRUE(CheckStackMask(stack_map, encoding.stack_map_encoding, sp_mask4));
- ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding));
+ ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
DexRegisterMap dex_register_map =
code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
ASSERT_TRUE(dex_register_map.IsDexRegisterLive(0));
@@ -389,9 +391,9 @@
1, number_of_dex_registers, code_info, encoding));
size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
- 0, number_of_dex_registers, number_of_location_catalog_entries);
+ 0, number_of_dex_registers, number_of_catalog_entries);
size_t index1 = dex_register_map.GetLocationCatalogEntryIndex(
- 1, number_of_dex_registers, number_of_location_catalog_entries);
+ 1, number_of_dex_registers, number_of_catalog_entries);
ASSERT_EQ(3u, index0); // Shared with second stack map.
ASSERT_EQ(6u, index1);
DexRegisterLocation location0 = location_catalog.GetDexRegisterLocation(index0);
@@ -403,7 +405,7 @@
ASSERT_EQ(3, location0.GetValue());
ASSERT_EQ(1, location1.GetValue());
- ASSERT_FALSE(stack_map.HasInlineInfo(encoding));
+ ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
}
}
@@ -425,12 +427,11 @@
stream.FillIn(region);
CodeInfo code_info(region);
- StackMapEncoding encoding = code_info.ExtractEncoding();
- ASSERT_EQ(0u, encoding.NumberOfBytesForStackMask());
- ASSERT_EQ(1u, code_info.GetNumberOfStackMaps());
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ ASSERT_EQ(1u, code_info.GetNumberOfStackMaps(encoding));
- uint32_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
- ASSERT_EQ(1u, number_of_location_catalog_entries);
+ uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
+ ASSERT_EQ(1u, number_of_catalog_entries);
DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
// The Dex register location catalog contains:
// - one 5-byte large Dex register location.
@@ -440,11 +441,11 @@
StackMap stack_map = code_info.GetStackMapAt(0, encoding);
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
- ASSERT_EQ(0u, stack_map.GetDexPc(encoding));
- ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding));
- ASSERT_EQ(0x3u, stack_map.GetRegisterMask(encoding));
+ ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map_encoding));
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding));
+ ASSERT_EQ(0x3u, stack_map.GetRegisterMask(encoding.stack_map_encoding));
- ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding));
+ ASSERT_TRUE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
DexRegisterMap dex_register_map =
code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
ASSERT_FALSE(dex_register_map.IsDexRegisterLive(0));
@@ -467,9 +468,9 @@
ASSERT_EQ(-2, dex_register_map.GetConstant(1, number_of_dex_registers, code_info, encoding));
size_t index0 = dex_register_map.GetLocationCatalogEntryIndex(
- 0, number_of_dex_registers, number_of_location_catalog_entries);
+ 0, number_of_dex_registers, number_of_catalog_entries);
size_t index1 = dex_register_map.GetLocationCatalogEntryIndex(
- 1, number_of_dex_registers, number_of_location_catalog_entries);
+ 1, number_of_dex_registers, number_of_catalog_entries);
ASSERT_EQ(DexRegisterLocationCatalog::kNoLocationEntryIndex, index0);
ASSERT_EQ(0u, index1);
DexRegisterLocation location0 = location_catalog.GetDexRegisterLocation(index0);
@@ -481,7 +482,7 @@
ASSERT_EQ(0, location0.GetValue());
ASSERT_EQ(-2, location1.GetValue());
- ASSERT_FALSE(stack_map.HasInlineInfo(encoding));
+ ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
}
// Generate a stack map whose dex register offset is
@@ -518,13 +519,13 @@
stream.FillIn(region);
CodeInfo code_info(region);
- StackMapEncoding encoding = code_info.ExtractEncoding();
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
// The location catalog contains two entries (DexRegisterLocation(kConstant, 0)
// and DexRegisterLocation(kConstant, 1)), therefore the location catalog index
// has a size of 1 bit.
- uint32_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
- ASSERT_EQ(2u, number_of_location_catalog_entries);
- ASSERT_EQ(1u, DexRegisterMap::SingleEntrySizeInBits(number_of_location_catalog_entries));
+ uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
+ ASSERT_EQ(2u, number_of_catalog_entries);
+ ASSERT_EQ(1u, DexRegisterMap::SingleEntrySizeInBits(number_of_catalog_entries));
// The first Dex register map contains:
// - a live register bit mask for 1024 registers (that is, 128 bytes of
@@ -537,16 +538,17 @@
DexRegisterMap dex_register_map0 =
code_info.GetDexRegisterMapOf(stack_map0, encoding, number_of_dex_registers);
ASSERT_EQ(127u, dex_register_map0.GetLocationMappingDataSize(number_of_dex_registers,
- number_of_location_catalog_entries));
+ number_of_catalog_entries));
ASSERT_EQ(255u, dex_register_map0.Size());
StackMap stack_map1 = code_info.GetStackMapAt(1, encoding);
- ASSERT_TRUE(stack_map1.HasDexRegisterMap(encoding));
+ ASSERT_TRUE(stack_map1.HasDexRegisterMap(encoding.stack_map_encoding));
// ...the offset of the second Dex register map (relative to the
// beginning of the Dex register maps region) is 255 (i.e.,
// kNoDexRegisterMapSmallEncoding).
- ASSERT_NE(stack_map1.GetDexRegisterMapOffset(encoding), StackMap::kNoDexRegisterMap);
- ASSERT_EQ(stack_map1.GetDexRegisterMapOffset(encoding), 0xFFu);
+ ASSERT_NE(stack_map1.GetDexRegisterMapOffset(encoding.stack_map_encoding),
+ StackMap::kNoDexRegisterMap);
+ ASSERT_EQ(stack_map1.GetDexRegisterMapOffset(encoding.stack_map_encoding), 0xFFu);
}
TEST(StackMapTest, TestShareDexRegisterMap) {
@@ -578,7 +580,7 @@
stream.FillIn(region);
CodeInfo ci(region);
- StackMapEncoding encoding = ci.ExtractEncoding();
+ CodeInfoEncoding encoding = ci.ExtractEncoding();
// Verify first stack map.
StackMap sm0 = ci.GetStackMapAt(0, encoding);
@@ -599,9 +601,12 @@
ASSERT_EQ(-2, dex_registers2.GetConstant(1, number_of_dex_registers, ci, encoding));
// Verify dex register map offsets.
- ASSERT_EQ(sm0.GetDexRegisterMapOffset(encoding), sm1.GetDexRegisterMapOffset(encoding));
- ASSERT_NE(sm0.GetDexRegisterMapOffset(encoding), sm2.GetDexRegisterMapOffset(encoding));
- ASSERT_NE(sm1.GetDexRegisterMapOffset(encoding), sm2.GetDexRegisterMapOffset(encoding));
+ ASSERT_EQ(sm0.GetDexRegisterMapOffset(encoding.stack_map_encoding),
+ sm1.GetDexRegisterMapOffset(encoding.stack_map_encoding));
+ ASSERT_NE(sm0.GetDexRegisterMapOffset(encoding.stack_map_encoding),
+ sm2.GetDexRegisterMapOffset(encoding.stack_map_encoding));
+ ASSERT_NE(sm1.GetDexRegisterMapOffset(encoding.stack_map_encoding),
+ sm2.GetDexRegisterMapOffset(encoding.stack_map_encoding));
}
TEST(StackMapTest, TestNoDexRegisterMap) {
@@ -624,34 +629,33 @@
stream.FillIn(region);
CodeInfo code_info(region);
- StackMapEncoding encoding = code_info.ExtractEncoding();
- ASSERT_EQ(0u, encoding.NumberOfBytesForStackMask());
- ASSERT_EQ(2u, code_info.GetNumberOfStackMaps());
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ ASSERT_EQ(2u, code_info.GetNumberOfStackMaps(encoding));
- uint32_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
- ASSERT_EQ(0u, number_of_location_catalog_entries);
+ uint32_t number_of_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
+ ASSERT_EQ(0u, number_of_catalog_entries);
DexRegisterLocationCatalog location_catalog = code_info.GetDexRegisterLocationCatalog(encoding);
ASSERT_EQ(0u, location_catalog.Size());
StackMap stack_map = code_info.GetStackMapAt(0, encoding);
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64, encoding)));
- ASSERT_EQ(0u, stack_map.GetDexPc(encoding));
- ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding));
- ASSERT_EQ(0x3u, stack_map.GetRegisterMask(encoding));
+ ASSERT_EQ(0u, stack_map.GetDexPc(encoding.stack_map_encoding));
+ ASSERT_EQ(64u, stack_map.GetNativePcOffset(encoding.stack_map_encoding));
+ ASSERT_EQ(0x3u, stack_map.GetRegisterMask(encoding.stack_map_encoding));
- ASSERT_FALSE(stack_map.HasDexRegisterMap(encoding));
- ASSERT_FALSE(stack_map.HasInlineInfo(encoding));
+ ASSERT_FALSE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
+ ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
stack_map = code_info.GetStackMapAt(1, encoding);
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1, encoding)));
ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(67, encoding)));
- ASSERT_EQ(1u, stack_map.GetDexPc(encoding));
- ASSERT_EQ(67u, stack_map.GetNativePcOffset(encoding));
- ASSERT_EQ(0x4u, stack_map.GetRegisterMask(encoding));
+ ASSERT_EQ(1u, stack_map.GetDexPc(encoding.stack_map_encoding));
+ ASSERT_EQ(67u, stack_map.GetNativePcOffset(encoding.stack_map_encoding));
+ ASSERT_EQ(0x4u, stack_map.GetRegisterMask(encoding.stack_map_encoding));
- ASSERT_FALSE(stack_map.HasDexRegisterMap(encoding));
- ASSERT_FALSE(stack_map.HasInlineInfo(encoding));
+ ASSERT_FALSE(stack_map.HasDexRegisterMap(encoding.stack_map_encoding));
+ ASSERT_FALSE(stack_map.HasInlineInfo(encoding.stack_map_encoding));
}
TEST(StackMapTest, InlineTest) {
@@ -726,7 +730,7 @@
stream.FillIn(region);
CodeInfo ci(region);
- StackMapEncoding encoding = ci.ExtractEncoding();
+ CodeInfoEncoding encoding = ci.ExtractEncoding();
{
// Verify first stack map.
@@ -792,7 +796,7 @@
DexRegisterMap dex_registers0 = ci.GetDexRegisterMapOf(sm2, encoding, 2);
ASSERT_FALSE(dex_registers0.IsDexRegisterLive(0));
ASSERT_EQ(4, dex_registers0.GetConstant(1, 2, ci, encoding));
- ASSERT_FALSE(sm2.HasInlineInfo(encoding));
+ ASSERT_FALSE(sm2.HasInlineInfo(encoding.stack_map_encoding));
}
{
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 335b6e1..9a3bb02 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -247,6 +247,10 @@
return;
}
+ uint32_t entry_point = oat_method.GetCodeOffset() - oat_header.GetExecutableOffset();
+ // Clear Thumb2 bit.
+ const void* code_address = EntryPointToCodePointer(reinterpret_cast<void*>(entry_point));
+
debug::MethodDebugInfo info = debug::MethodDebugInfo();
info.trampoline_name = nullptr;
info.dex_file = &dex_file;
@@ -259,7 +263,7 @@
info.is_native_debuggable = oat_header.IsNativeDebuggable();
info.is_optimized = method_header->IsOptimized();
info.is_code_address_text_relative = true;
- info.code_address = oat_method.GetCodeOffset() - oat_header.GetExecutableOffset();
+ info.code_address = reinterpret_cast<uintptr_t>(code_address);
info.code_size = method_header->GetCodeSize();
info.frame_size_in_bytes = method_header->GetFrameSizeInBytes();
info.code_info = info.is_optimized ? method_header->GetOptimizedCodeInfoPtr() : nullptr;
@@ -1373,7 +1377,7 @@
const void* raw_code_info = oat_method.GetVmapTable();
if (raw_code_info != nullptr) {
CodeInfo code_info(raw_code_info);
- StackMapEncoding encoding = code_info.ExtractEncoding();
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(offset, encoding);
if (stack_map.IsValid()) {
stack_map.Dump(vios, code_info, encoding, oat_method.GetCodeOffset(),
diff --git a/oatdump/oatdump_test.cc b/oatdump/oatdump_test.cc
index 5407d3a..bf062ed 100644
--- a/oatdump/oatdump_test.cc
+++ b/oatdump/oatdump_test.cc
@@ -69,6 +69,7 @@
exec_argv.push_back("--output=" + core_oat_location_ + ".symbolize");
} else if (mode == kModeArt) {
exec_argv.push_back("--image=" + core_art_location_);
+ exec_argv.push_back("--instruction-set=" + std::string(GetInstructionSetString(kRuntimeISA)));
exec_argv.push_back("--output=/dev/null");
} else {
CHECK_EQ(static_cast<size_t>(mode), static_cast<size_t>(kModeOat));
diff --git a/runtime/base/bit_utils.h b/runtime/base/bit_utils.h
index 8430d68..f279f45 100644
--- a/runtime/base/bit_utils.h
+++ b/runtime/base/bit_utils.h
@@ -269,7 +269,7 @@
template <typename T>
static constexpr T MaxInt(size_t bits) {
return
- DCHECK_CONSTEXPR(bits > 0, "bits cannot be zero", 0)
+ DCHECK_CONSTEXPR(std::is_unsigned<T>::value || bits > 0, "bits cannot be zero for signed", 0)
DCHECK_CONSTEXPR(bits <= BitSizeOf<T>(), "kBits must be < max.", 0)
bits == BitSizeOf<T>()
? std::numeric_limits<T>::max()
@@ -283,7 +283,7 @@
template <typename T>
static constexpr T MinInt(size_t bits) {
return
- DCHECK_CONSTEXPR(bits > 0, "bits cannot be zero", 0)
+ DCHECK_CONSTEXPR(std::is_unsigned<T>::value || bits > 0, "bits cannot be zero for signed", 0)
DCHECK_CONSTEXPR(bits <= BitSizeOf<T>(), "kBits must be < max.", 0)
bits == BitSizeOf<T>()
? std::numeric_limits<T>::min()
diff --git a/runtime/base/time_utils.cc b/runtime/base/time_utils.cc
index b7cf207..3e5bac8 100644
--- a/runtime/base/time_utils.cc
+++ b/runtime/base/time_utils.cc
@@ -15,6 +15,7 @@
*/
#include <inttypes.h>
+#include <limits>
#include <sstream>
#include "time_utils.h"
@@ -190,9 +191,16 @@
}
int64_t end_sec = ts->tv_sec + ms / 1000;
- if (UNLIKELY(end_sec >= 0x7fffffff)) {
- LOG(INFO) << "Note: end time exceeds INT32_MAX: " << end_sec;
- end_sec = 0x7ffffffe;
+ constexpr int32_t int32_max = std::numeric_limits<int32_t>::max();
+ if (UNLIKELY(end_sec >= int32_max)) {
+ // Either ms was intended to denote an infinite timeout, or we have a
+ // problem. The former generally uses the largest possible millisecond
+ // or nanosecond value. Log only in the latter case.
+ constexpr int64_t int64_max = std::numeric_limits<int64_t>::max();
+ if (ms != int64_max && ms != int64_max / (1000 * 1000)) {
+ LOG(INFO) << "Note: end time exceeds INT32_MAX: " << end_sec;
+ }
+ end_sec = int32_max - 1; // Allow for increment below.
}
ts->tv_sec = end_sec;
ts->tv_nsec = (ts->tv_nsec + (ms % 1000) * 1000000) + ns;
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index fcf3326..7595d14 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -66,13 +66,12 @@
SHARED_REQUIRES(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
CodeInfo code_info = GetCurrentOatQuickMethodHeader()->GetOptimizedCodeInfo();
- StackMapEncoding encoding = code_info.ExtractEncoding();
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
uint16_t number_of_dex_registers = m->GetCodeItem()->registers_size_;
DexRegisterMap dex_register_map =
code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
- MemoryRegion stack_mask = stack_map.GetStackMask(encoding);
- uint32_t register_mask = stack_map.GetRegisterMask(encoding);
+ uint32_t register_mask = stack_map.GetRegisterMask(encoding.stack_map_encoding);
for (int i = 0; i < number_of_references; ++i) {
int reg = registers[i];
CHECK(reg < m->GetCodeItem()->registers_size_);
@@ -85,7 +84,8 @@
break;
case DexRegisterLocation::Kind::kInStack:
DCHECK_EQ(location.GetValue() % kFrameSlotSize, 0);
- CHECK(stack_mask.LoadBit(location.GetValue() / kFrameSlotSize));
+ CHECK(stack_map.GetStackMaskBit(encoding.stack_map_encoding,
+ location.GetValue() / kFrameSlotSize));
break;
case DexRegisterLocation::Kind::kInRegister:
case DexRegisterLocation::Kind::kInRegisterHigh:
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 32ad422..7060593 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -50,7 +50,7 @@
#include "experimental_flags.h"
#include "gc_root-inl.h"
#include "gc/accounting/card_table-inl.h"
-#include "gc/accounting/heap_bitmap.h"
+#include "gc/accounting/heap_bitmap-inl.h"
#include "gc/heap.h"
#include "gc/space/image_space.h"
#include "handle_scope-inl.h"
@@ -96,6 +96,7 @@
namespace art {
static constexpr bool kSanityCheckObjects = kIsDebugBuild;
+static constexpr bool kVerifyArtMethodDeclaringClasses = kIsDebugBuild;
static void ThrowNoClassDefFoundError(const char* fmt, ...)
__attribute__((__format__(__printf__, 1, 2)))
@@ -1197,6 +1198,23 @@
ClassTable* const table_;
};
+class VerifyDeclaringClassVisitor : public ArtMethodVisitor {
+ public:
+ VerifyDeclaringClassVisitor() SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
+ : live_bitmap_(Runtime::Current()->GetHeap()->GetLiveBitmap()) {}
+
+ virtual void Visit(ArtMethod* method)
+ SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ mirror::Class* klass = method->GetDeclaringClassUnchecked();
+ if (klass != nullptr) {
+ CHECK(live_bitmap_->Test(klass)) << "Image method has unmarked declaring class";
+ }
+ }
+
+ private:
+ gc::accounting::HeapBitmap* const live_bitmap_;
+};
+
bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches(
gc::space::ImageSpace* space,
Handle<mirror::ClassLoader> class_loader,
@@ -1416,15 +1434,24 @@
}
}
}
- if (*out_forward_dex_cache_array) {
- ScopedTrace timing("Fixup ArtMethod dex cache arrays");
- FixupArtMethodArrayVisitor visitor(header);
- header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods(
- &visitor,
- space->Begin(),
- sizeof(void*));
- Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader.Get());
- }
+ }
+ if (*out_forward_dex_cache_array) {
+ ScopedTrace timing("Fixup ArtMethod dex cache arrays");
+ FixupArtMethodArrayVisitor visitor(header);
+ header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods(
+ &visitor,
+ space->Begin(),
+ sizeof(void*));
+ Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader.Get());
+ }
+ if (kVerifyArtMethodDeclaringClasses) {
+ ScopedTrace timing("Verify declaring classes");
+ ReaderMutexLock rmu(self, *Locks::heap_bitmap_lock_);
+ VerifyDeclaringClassVisitor visitor;
+ header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods(
+ &visitor,
+ space->Begin(),
+ sizeof(void*));
}
return true;
}
@@ -3715,10 +3742,6 @@
DCHECK(klass.Get() != nullptr);
DCHECK(supertype.Get() != nullptr);
- StackHandleScope<1> hs(self);
- // Acquire lock to prevent races on verifying the super class.
- ObjectLock<mirror::Class> super_lock(self, supertype);
-
if (!supertype->IsVerified() && !supertype->IsErroneous()) {
VerifyClass(self, supertype);
}
@@ -3732,6 +3755,7 @@
PrettyDescriptor(klass.Get()).c_str(),
PrettyDescriptor(supertype.Get()).c_str());
LOG(WARNING) << error_msg << " in " << klass->GetDexCache()->GetLocation()->ToModifiedUtf8();
+ StackHandleScope<1> hs(self);
Handle<mirror::Throwable> cause(hs.NewHandle(self->GetException()));
if (cause.Get() != nullptr) {
// Set during VerifyClass call (if at all).
@@ -3746,44 +3770,57 @@
if (Runtime::Current()->IsAotCompiler()) {
Runtime::Current()->GetCompilerCallbacks()->ClassRejected(ref);
}
+ // Need to grab the lock to change status.
+ ObjectLock<mirror::Class> super_lock(self, klass);
mirror::Class::SetStatus(klass, mirror::Class::kStatusError, self);
return false;
}
void ClassLinker::VerifyClass(Thread* self, Handle<mirror::Class> klass, LogSeverity log_level) {
- // TODO: assert that the monitor on the Class is held
- ObjectLock<mirror::Class> lock(self, klass);
+ {
+ // TODO: assert that the monitor on the Class is held
+ ObjectLock<mirror::Class> lock(self, klass);
- // Don't attempt to re-verify if already sufficiently verified.
- if (klass->IsVerified()) {
- EnsureSkipAccessChecksMethods(klass);
- return;
- }
- if (klass->IsCompileTimeVerified() && Runtime::Current()->IsAotCompiler()) {
- return;
- }
+ // Is somebody verifying this now?
+ mirror::Class::Status old_status = klass->GetStatus();
+ while (old_status == mirror::Class::kStatusVerifying ||
+ old_status == mirror::Class::kStatusVerifyingAtRuntime) {
+ lock.WaitIgnoringInterrupts();
+ CHECK_GT(klass->GetStatus(), old_status);
+ old_status = klass->GetStatus();
+ }
- // The class might already be erroneous, for example at compile time if we attempted to verify
- // this class as a parent to another.
- if (klass->IsErroneous()) {
- ThrowEarlierClassFailure(klass.Get());
- return;
- }
+ // The class might already be erroneous, for example at compile time if we attempted to verify
+ // this class as a parent to another.
+ if (klass->IsErroneous()) {
+ ThrowEarlierClassFailure(klass.Get());
+ return;
+ }
- if (klass->GetStatus() == mirror::Class::kStatusResolved) {
- mirror::Class::SetStatus(klass, mirror::Class::kStatusVerifying, self);
- } else {
- CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusRetryVerificationAtRuntime)
- << PrettyClass(klass.Get());
- CHECK(!Runtime::Current()->IsAotCompiler());
- mirror::Class::SetStatus(klass, mirror::Class::kStatusVerifyingAtRuntime, self);
- }
+ // Don't attempt to re-verify if already sufficiently verified.
+ if (klass->IsVerified()) {
+ EnsureSkipAccessChecksMethods(klass);
+ return;
+ }
+ if (klass->IsCompileTimeVerified() && Runtime::Current()->IsAotCompiler()) {
+ return;
+ }
- // Skip verification if disabled.
- if (!Runtime::Current()->IsVerificationEnabled()) {
- mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, self);
- EnsureSkipAccessChecksMethods(klass);
- return;
+ if (klass->GetStatus() == mirror::Class::kStatusResolved) {
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusVerifying, self);
+ } else {
+ CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusRetryVerificationAtRuntime)
+ << PrettyClass(klass.Get());
+ CHECK(!Runtime::Current()->IsAotCompiler());
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusVerifyingAtRuntime, self);
+ }
+
+ // Skip verification if disabled.
+ if (!Runtime::Current()->IsVerificationEnabled()) {
+ mirror::Class::SetStatus(klass, mirror::Class::kStatusVerified, self);
+ EnsureSkipAccessChecksMethods(klass);
+ return;
+ }
}
// Verify super class.
@@ -3856,6 +3893,10 @@
log_level,
&error_msg);
}
+
+ // Verification is done, grab the lock again.
+ ObjectLock<mirror::Class> lock(self, klass);
+
if (preverified || verifier_failure != verifier::MethodVerifier::kHardFailure) {
if (!preverified && verifier_failure != verifier::MethodVerifier::kNoFailure) {
VLOG(class_linker) << "Soft verification failure in class " << PrettyDescriptor(klass.Get())
diff --git a/runtime/compiler_filter.cc b/runtime/compiler_filter.cc
index 31a1bc1..d617caf 100644
--- a/runtime/compiler_filter.cc
+++ b/runtime/compiler_filter.cc
@@ -83,6 +83,34 @@
UNREACHABLE();
}
+CompilerFilter::Filter CompilerFilter::GetNonProfileDependentFilterFrom(Filter filter) {
+ switch (filter) {
+ case CompilerFilter::kVerifyNone:
+ case CompilerFilter::kVerifyAtRuntime:
+ case CompilerFilter::kInterpretOnly:
+ case CompilerFilter::kSpace:
+ case CompilerFilter::kBalanced:
+ case CompilerFilter::kTime:
+ case CompilerFilter::kSpeed:
+ case CompilerFilter::kEverything:
+ return filter;
+
+ case CompilerFilter::kVerifyProfile:
+ return CompilerFilter::kInterpretOnly;
+
+ case CompilerFilter::kSpaceProfile:
+ return CompilerFilter::kSpace;
+
+ case CompilerFilter::kSpeedProfile:
+ return CompilerFilter::kSpeed;
+
+ case CompilerFilter::kEverythingProfile:
+ return CompilerFilter::kEverything;
+ }
+ UNREACHABLE();
+}
+
+
bool CompilerFilter::IsAsGoodAs(Filter current, Filter target) {
return current >= target;
}
diff --git a/runtime/compiler_filter.h b/runtime/compiler_filter.h
index 1bea8b4..6289d8a 100644
--- a/runtime/compiler_filter.h
+++ b/runtime/compiler_filter.h
@@ -59,6 +59,9 @@
// profile.
static bool DependsOnProfile(Filter filter);
+ // Returns a non-profile-guided version of the given filter.
+ static Filter GetNonProfileDependentFilterFrom(Filter filter);
+
// Returns true if the 'current' compiler filter is considered at least as
// good as the 'target' compilation type.
// For example: kSpeed is as good as kInterpretOnly, but kInterpretOnly is
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 109e03d..d832552 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -4818,7 +4818,7 @@
LOG(INFO) << "Tracked allocations, (count=" << count << ")";
for (auto it = records->RBegin(), end = records->REnd();
count > 0 && it != end; count--, it++) {
- const gc::AllocRecord* record = it->second;
+ const gc::AllocRecord* record = &it->second;
LOG(INFO) << StringPrintf(" Thread %-2d %6zd bytes ", record->GetTid(), record->ByteCount())
<< PrettyClass(record->GetClass());
@@ -4957,7 +4957,7 @@
uint16_t count = capped_count;
for (auto it = records->RBegin(), end = records->REnd();
count > 0 && it != end; count--, it++) {
- const gc::AllocRecord* record = it->second;
+ const gc::AllocRecord* record = &it->second;
std::string temp;
class_names.Add(record->GetClassDescriptor(&temp));
for (size_t i = 0, depth = record->GetDepth(); i < depth; i++) {
@@ -5008,7 +5008,7 @@
// (2b) thread id
// (2b) allocated object's class name index
// (1b) stack depth
- const gc::AllocRecord* record = it->second;
+ const gc::AllocRecord* record = &it->second;
size_t stack_depth = record->GetDepth();
size_t allocated_object_class_name_index =
class_names.IndexOf(record->GetClassDescriptor(&temp));
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 5344cdd..116261b 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -39,6 +39,7 @@
namespace art {
+template <bool kResolve = true>
inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method,
const InlineInfo& inline_info,
uint8_t inlining_depth)
@@ -50,6 +51,9 @@
if (!caller->IsRuntimeMethod()) {
return caller;
}
+ if (!kResolve) {
+ return nullptr;
+ }
// The method in the dex cache can be the runtime method responsible for invoking
// the stub that will then update the dex cache. Therefore, we need to do the
@@ -64,7 +68,7 @@
if (inlining_depth == 0) {
class_loader.Assign(outer_method->GetClassLoader());
} else {
- caller = GetResolvedMethod(outer_method, inline_info, inlining_depth - 1);
+ caller = GetResolvedMethod<kResolve>(outer_method, inline_info, inlining_depth - 1);
class_loader.Assign(caller->GetClassLoader());
}
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 3dfad76..3368411 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -275,10 +275,10 @@
if (current_code->IsOptimized()) {
uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc);
CodeInfo code_info = current_code->GetOptimizedCodeInfo();
- StackMapEncoding encoding = code_info.ExtractEncoding();
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
DCHECK(stack_map.IsValid());
- if (stack_map.HasInlineInfo(encoding)) {
+ if (stack_map.HasInlineInfo(encoding.stack_map_encoding)) {
InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
caller = GetResolvedMethod(outer_method, inline_info, inline_info.GetDepth() - 1);
}
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 7375656..1a96d0f 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -327,14 +327,14 @@
if (current_code->IsOptimized()) {
CodeInfo code_info = current_code->GetOptimizedCodeInfo();
- StackMapEncoding encoding = code_info.ExtractEncoding();
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(outer_pc_offset, encoding);
DCHECK(stack_map.IsValid());
- if (stack_map.HasInlineInfo(encoding)) {
+ if (stack_map.HasInlineInfo(encoding.stack_map_encoding)) {
InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
return inline_info.GetDexPcAtDepth(inline_info.GetDepth() - 1);
} else {
- return stack_map.GetDexPc(encoding);
+ return stack_map.GetDexPc(encoding.stack_map_encoding);
}
} else {
return current_code->ToDexPc(*caller_sp, outer_pc);
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index e3714bb..bd023b3 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -102,15 +102,15 @@
// Only visit the last recent_record_max_ number of allocation records in entries_ and mark the
// klass_ fields as strong roots.
for (auto it = entries_.rbegin(), end = entries_.rend(); it != end; ++it) {
- AllocRecord* record = it->second;
+ AllocRecord& record = it->second;
if (count > 0) {
- buffered_visitor.VisitRootIfNonNull(record->GetClassGcRoot());
+ buffered_visitor.VisitRootIfNonNull(record.GetClassGcRoot());
--count;
}
// Visit all of the stack frames to make sure no methods in the stack traces get unloaded by
// class unloading.
- for (size_t i = 0, depth = record->GetDepth(); i < depth; ++i) {
- const AllocRecordStackTraceElement& element = record->StackElement(i);
+ for (size_t i = 0, depth = record.GetDepth(); i < depth; ++i) {
+ const AllocRecordStackTraceElement& element = record.StackElement(i);
DCHECK(element.GetMethod() != nullptr);
element.GetMethod()->VisitRoots(buffered_visitor, sizeof(void*));
}
@@ -143,15 +143,14 @@
++count;
// This does not need a read barrier because this is called by GC.
mirror::Object* old_object = it->first.Read<kWithoutReadBarrier>();
- AllocRecord* record = it->second;
+ AllocRecord& record = it->second;
mirror::Object* new_object = old_object == nullptr ? nullptr : visitor->IsMarked(old_object);
if (new_object == nullptr) {
if (count > delete_bound) {
it->first = GcRoot<mirror::Object>(nullptr);
- SweepClassObject(record, visitor);
+ SweepClassObject(&record, visitor);
++it;
} else {
- delete record;
it = entries_.erase(it);
++count_deleted;
}
@@ -160,7 +159,7 @@
it->first = GcRoot<mirror::Object>(new_object);
++count_moved;
}
- SweepClassObject(record, visitor);
+ SweepClassObject(&record, visitor);
++it;
}
}
@@ -184,34 +183,32 @@
new_record_condition_.Broadcast(Thread::Current());
}
-struct AllocRecordStackVisitor : public StackVisitor {
- AllocRecordStackVisitor(Thread* thread, AllocRecordStackTrace* trace_in, size_t max)
+class AllocRecordStackVisitor : public StackVisitor {
+ public:
+ AllocRecordStackVisitor(Thread* thread, size_t max_depth, AllocRecordStackTrace* trace_out)
SHARED_REQUIRES(Locks::mutator_lock_)
- : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
- trace(trace_in),
- max_depth(max) {}
+ : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFramesNoResolve),
+ max_depth_(max_depth),
+ trace_(trace_out) {}
// TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
// annotalysis.
bool VisitFrame() OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
- if (depth >= max_depth) {
+ if (trace_->GetDepth() >= max_depth_) {
return false;
}
ArtMethod* m = GetMethod();
- if (!m->IsRuntimeMethod()) {
- trace->SetStackElementAt(depth, m, GetDexPc());
- ++depth;
+ // m may be null if we have inlined methods of unresolved classes. b/27858645
+ if (m != nullptr && !m->IsRuntimeMethod()) {
+ m = m->GetInterfaceMethodIfProxy(sizeof(void*));
+ trace_->AddStackElement(AllocRecordStackTraceElement(m, GetDexPc()));
}
return true;
}
- ~AllocRecordStackVisitor() {
- trace->SetDepth(depth);
- }
-
- AllocRecordStackTrace* trace;
- size_t depth = 0u;
- const size_t max_depth;
+ private:
+ const size_t max_depth_;
+ AllocRecordStackTrace* const trace_;
};
void AllocRecordObjectMap::SetAllocTrackingEnabled(bool enable) {
@@ -235,7 +232,6 @@
if (self_name == "JDWP") {
records->alloc_ddm_thread_id_ = self->GetTid();
}
- records->scratch_trace_.SetDepth(records->max_stack_depth_);
size_t sz = sizeof(AllocRecordStackTraceElement) * records->max_stack_depth_ +
sizeof(AllocRecord) + sizeof(AllocRecordStackTrace);
LOG(INFO) << "Enabling alloc tracker (" << records->alloc_record_max_ << " entries of "
@@ -265,27 +261,35 @@
}
}
-void AllocRecordObjectMap::RecordAllocation(Thread* self, mirror::Object* obj, mirror::Class* klass,
+void AllocRecordObjectMap::RecordAllocation(Thread* self,
+ mirror::Object** obj,
size_t byte_count) {
+ // Get stack trace outside of lock in case there are allocations during the stack walk.
+ // b/27858645.
+ AllocRecordStackTrace trace;
+ AllocRecordStackVisitor visitor(self, max_stack_depth_, /*out*/ &trace);
+ {
+ StackHandleScope<1> hs(self);
+ auto obj_wrapper = hs.NewHandleWrapper(obj);
+ visitor.WalkStack();
+ }
+
MutexLock mu(self, *Locks::alloc_tracker_lock_);
- Heap* heap = Runtime::Current()->GetHeap();
+ Heap* const heap = Runtime::Current()->GetHeap();
if (!heap->IsAllocTrackingEnabled()) {
// In the process of shutting down recording, bail.
return;
}
- AllocRecordObjectMap* records = heap->GetAllocationRecords();
- DCHECK(records != nullptr);
-
- // Do not record for DDM thread
- if (records->alloc_ddm_thread_id_ == self->GetTid()) {
+ // Do not record for DDM thread.
+ if (alloc_ddm_thread_id_ == self->GetTid()) {
return;
}
// Wait for GC's sweeping to complete and allow new records
- while (UNLIKELY((!kUseReadBarrier && !records->allow_new_record_) ||
+ while (UNLIKELY((!kUseReadBarrier && !allow_new_record_) ||
(kUseReadBarrier && !self->GetWeakRefAccessEnabled()))) {
- records->new_record_condition_.WaitHoldingLocks(self);
+ new_record_condition_.WaitHoldingLocks(self);
}
if (!heap->IsAllocTrackingEnabled()) {
@@ -294,28 +298,22 @@
return;
}
- DCHECK_LE(records->Size(), records->alloc_record_max_);
+ DCHECK_LE(Size(), alloc_record_max_);
- // Get stack trace.
- // add scope to make "visitor" destroyed promptly, in order to set the scratch_trace_->depth_
- {
- AllocRecordStackVisitor visitor(self, &records->scratch_trace_, records->max_stack_depth_);
- visitor.WalkStack();
- }
- records->scratch_trace_.SetTid(self->GetTid());
- AllocRecordStackTrace* trace = new AllocRecordStackTrace(records->scratch_trace_);
+ // Erase extra unfilled elements.
+ trace.SetTid(self->GetTid());
- // Fill in the basics.
- AllocRecord* record = new AllocRecord(byte_count, klass, trace);
-
- records->Put(obj, record);
- DCHECK_LE(records->Size(), records->alloc_record_max_);
+ // Add the record.
+ Put(*obj, AllocRecord(byte_count, (*obj)->GetClass(), std::move(trace)));
+ DCHECK_LE(Size(), alloc_record_max_);
}
void AllocRecordObjectMap::Clear() {
- STLDeleteValues(&entries_);
entries_.clear();
}
+AllocRecordObjectMap::AllocRecordObjectMap()
+ : new_record_condition_("New allocation record condition", *Locks::alloc_tracker_lock_) {}
+
} // namespace gc
} // namespace art
diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h
index 18cce4d..a2d86cc 100644
--- a/runtime/gc/allocation_record.h
+++ b/runtime/gc/allocation_record.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_GC_ALLOCATION_RECORD_H_
#include <list>
+#include <memory>
#include "base/mutex.h"
#include "object_callbacks.h"
@@ -37,10 +38,13 @@
class AllocRecordStackTraceElement {
public:
- AllocRecordStackTraceElement() : method_(nullptr), dex_pc_(0) {}
-
int32_t ComputeLineNumber() const SHARED_REQUIRES(Locks::mutator_lock_);
+ AllocRecordStackTraceElement() = default;
+ AllocRecordStackTraceElement(ArtMethod* method, uint32_t dex_pc)
+ : method_(method),
+ dex_pc_(dex_pc) {}
+
ArtMethod* GetMethod() const {
return method_;
}
@@ -58,32 +62,27 @@
}
bool operator==(const AllocRecordStackTraceElement& other) const {
- if (this == &other) return true;
return method_ == other.method_ && dex_pc_ == other.dex_pc_;
}
private:
- ArtMethod* method_;
- uint32_t dex_pc_;
+ ArtMethod* method_ = nullptr;
+ uint32_t dex_pc_ = 0;
};
class AllocRecordStackTrace {
public:
static constexpr size_t kHashMultiplier = 17;
- explicit AllocRecordStackTrace(size_t max_depth)
- : tid_(0), depth_(0), stack_(new AllocRecordStackTraceElement[max_depth]) {}
+ AllocRecordStackTrace() = default;
+
+ AllocRecordStackTrace(AllocRecordStackTrace&& r)
+ : tid_(r.tid_),
+ stack_(std::move(r.stack_)) {}
AllocRecordStackTrace(const AllocRecordStackTrace& r)
- : tid_(r.tid_), depth_(r.depth_), stack_(new AllocRecordStackTraceElement[r.depth_]) {
- for (size_t i = 0; i < depth_; ++i) {
- stack_[i] = r.stack_[i];
- }
- }
-
- ~AllocRecordStackTrace() {
- delete[] stack_;
- }
+ : tid_(r.tid_),
+ stack_(r.stack_) {}
pid_t GetTid() const {
return tid_;
@@ -94,37 +93,32 @@
}
size_t GetDepth() const {
- return depth_;
- }
-
- void SetDepth(size_t depth) {
- depth_ = depth;
+ return stack_.size();
}
const AllocRecordStackTraceElement& GetStackElement(size_t index) const {
- DCHECK_LT(index, depth_);
+ DCHECK_LT(index, GetDepth());
return stack_[index];
}
+ void AddStackElement(const AllocRecordStackTraceElement& element) {
+ stack_.push_back(element);
+ }
+
void SetStackElementAt(size_t index, ArtMethod* m, uint32_t dex_pc) {
+ DCHECK_LT(index, stack_.size());
stack_[index].SetMethod(m);
stack_[index].SetDexPc(dex_pc);
}
bool operator==(const AllocRecordStackTrace& other) const {
if (this == &other) return true;
- if (tid_ != other.tid_) return false;
- if (depth_ != other.depth_) return false;
- for (size_t i = 0; i < depth_; ++i) {
- if (!(stack_[i] == other.stack_[i])) return false;
- }
- return true;
+ return tid_ == other.tid_ && stack_ == other.stack_;
}
private:
- pid_t tid_;
- size_t depth_;
- AllocRecordStackTraceElement* const stack_;
+ pid_t tid_ = 0;
+ std::vector<AllocRecordStackTraceElement> stack_;
};
struct HashAllocRecordTypes {
@@ -161,19 +155,15 @@
class AllocRecord {
public:
// All instances of AllocRecord should be managed by an instance of AllocRecordObjectMap.
- AllocRecord(size_t count, mirror::Class* klass, AllocRecordStackTrace* trace)
- : byte_count_(count), klass_(klass), trace_(trace) {}
-
- ~AllocRecord() {
- delete trace_;
- }
+ AllocRecord(size_t count, mirror::Class* klass, AllocRecordStackTrace&& trace)
+ : byte_count_(count), klass_(klass), trace_(std::move(trace)) {}
size_t GetDepth() const {
- return trace_->GetDepth();
+ return trace_.GetDepth();
}
const AllocRecordStackTrace* GetStackTrace() const {
- return trace_;
+ return &trace_;
}
size_t ByteCount() const {
@@ -181,7 +171,7 @@
}
pid_t GetTid() const {
- return trace_->GetTid();
+ return trace_.GetTid();
}
mirror::Class* GetClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
@@ -196,16 +186,15 @@
}
const AllocRecordStackTraceElement& StackElement(size_t index) const {
- return trace_->GetStackElement(index);
+ return trace_.GetStackElement(index);
}
private:
const size_t byte_count_;
// The klass_ could be a strong or weak root for GC
GcRoot<mirror::Class> klass_;
- // TODO: Currently trace_ is like a std::unique_ptr,
- // but in future with deduplication it could be a std::shared_ptr.
- const AllocRecordStackTrace* const trace_;
+ // TODO: Share between alloc records with identical stack traces.
+ AllocRecordStackTrace trace_;
};
class AllocRecordObjectMap {
@@ -215,36 +204,29 @@
// weak roots). The last recent_record_max_ number of pairs in the list are always kept for DDMS's
// recent allocation tracking, but GcRoot<mirror::Object> pointers in these pairs can become null.
// Both types of pointers need read barriers, do not directly access them.
- typedef std::list<std::pair<GcRoot<mirror::Object>, AllocRecord*>> EntryList;
+ using EntryPair = std::pair<GcRoot<mirror::Object>, AllocRecord>;
+ typedef std::list<EntryPair> EntryList;
- // "static" because it is part of double-checked locking. It needs to check a bool first,
- // in order to make sure the AllocRecordObjectMap object is not null.
- static void RecordAllocation(Thread* self, mirror::Object* obj, mirror::Class* klass,
- size_t byte_count)
+ // Caller needs to check that it is enabled before calling since we read the stack trace before
+ // checking the enabled boolean.
+ void RecordAllocation(Thread* self,
+ mirror::Object** obj,
+ size_t byte_count)
REQUIRES(!Locks::alloc_tracker_lock_)
SHARED_REQUIRES(Locks::mutator_lock_);
static void SetAllocTrackingEnabled(bool enabled) REQUIRES(!Locks::alloc_tracker_lock_);
- AllocRecordObjectMap() REQUIRES(Locks::alloc_tracker_lock_)
- : alloc_record_max_(kDefaultNumAllocRecords),
- recent_record_max_(kDefaultNumRecentRecords),
- max_stack_depth_(kDefaultAllocStackDepth),
- scratch_trace_(kMaxSupportedStackDepth),
- alloc_ddm_thread_id_(0),
- allow_new_record_(true),
- new_record_condition_("New allocation record condition", *Locks::alloc_tracker_lock_) {}
-
+ AllocRecordObjectMap() REQUIRES(Locks::alloc_tracker_lock_);
~AllocRecordObjectMap();
- void Put(mirror::Object* obj, AllocRecord* record)
+ void Put(mirror::Object* obj, AllocRecord&& record)
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_) {
if (entries_.size() == alloc_record_max_) {
- delete entries_.front().second;
entries_.pop_front();
}
- entries_.emplace_back(GcRoot<mirror::Object>(obj), record);
+ entries_.push_back(EntryPair(GcRoot<mirror::Object>(obj), std::move(record)));
}
size_t Size() const SHARED_REQUIRES(Locks::alloc_tracker_lock_) {
@@ -313,12 +295,11 @@
static constexpr size_t kDefaultNumRecentRecords = 64 * 1024 - 1;
static constexpr size_t kDefaultAllocStackDepth = 16;
static constexpr size_t kMaxSupportedStackDepth = 128;
- size_t alloc_record_max_ GUARDED_BY(Locks::alloc_tracker_lock_);
- size_t recent_record_max_ GUARDED_BY(Locks::alloc_tracker_lock_);
- size_t max_stack_depth_ GUARDED_BY(Locks::alloc_tracker_lock_);
- AllocRecordStackTrace scratch_trace_ GUARDED_BY(Locks::alloc_tracker_lock_);
- pid_t alloc_ddm_thread_id_ GUARDED_BY(Locks::alloc_tracker_lock_);
- bool allow_new_record_ GUARDED_BY(Locks::alloc_tracker_lock_);
+ size_t alloc_record_max_ GUARDED_BY(Locks::alloc_tracker_lock_) = kDefaultNumAllocRecords;
+ size_t recent_record_max_ GUARDED_BY(Locks::alloc_tracker_lock_) = kDefaultNumRecentRecords;
+ size_t max_stack_depth_ = kDefaultAllocStackDepth;
+ pid_t alloc_ddm_thread_id_ GUARDED_BY(Locks::alloc_tracker_lock_) = 0;
+ bool allow_new_record_ GUARDED_BY(Locks::alloc_tracker_lock_) = true;
ConditionVariable new_record_condition_ GUARDED_BY(Locks::alloc_tracker_lock_);
// see the comment in typedef of EntryList
EntryList entries_ GUARDED_BY(Locks::alloc_tracker_lock_);
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 59fd4a6..6aed61a 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -176,8 +176,10 @@
}
if (kInstrumented) {
if (IsAllocTrackingEnabled()) {
- // Use obj->GetClass() instead of klass, because PushOnAllocationStack() could move klass
- AllocRecordObjectMap::RecordAllocation(self, obj, obj->GetClass(), bytes_allocated);
+ // allocation_records_ is not null since it never becomes null after allocation tracking is
+ // enabled.
+ DCHECK(allocation_records_ != nullptr);
+ allocation_records_->RecordAllocation(self, &obj, bytes_allocated);
}
} else {
DCHECK(!IsAllocTrackingEnabled());
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 2925591..fada1a2 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -1326,8 +1326,7 @@
// Allocation tracking support
Atomic<bool> alloc_tracking_enabled_;
- std::unique_ptr<AllocRecordObjectMap> allocation_records_
- GUARDED_BY(Locks::alloc_tracker_lock_);
+ std::unique_ptr<AllocRecordObjectMap> allocation_records_;
// GC stress related data structures.
Mutex* backtrace_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index bb35ec7..3885c60 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -828,7 +828,7 @@
continue;
}
++count;
- const gc::AllocRecordStackTrace* trace = it->second->GetStackTrace();
+ const gc::AllocRecordStackTrace* trace = it->second.GetStackTrace();
// Copy the pair into a real hash map to speed up look up.
auto records_result = allocation_records_.emplace(obj, trace);
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index d70a7c4..f365fd0 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -290,7 +290,6 @@
JValue result;
self->AllowThreadSuspension();
HANDLE_MONITOR_CHECKS();
- instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), dex_pc,
@@ -305,7 +304,6 @@
JValue result;
self->AllowThreadSuspension();
HANDLE_MONITOR_CHECKS();
- instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), dex_pc,
@@ -321,7 +319,6 @@
result.SetI(shadow_frame.GetVReg(inst->VRegA_11x(inst_data)));
self->AllowThreadSuspension();
HANDLE_MONITOR_CHECKS();
- instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), dex_pc,
@@ -336,7 +333,6 @@
result.SetJ(shadow_frame.GetVRegLong(inst->VRegA_11x(inst_data)));
self->AllowThreadSuspension();
HANDLE_MONITOR_CHECKS();
- instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), dex_pc,
@@ -372,7 +368,6 @@
}
}
result.SetL(obj_result);
- instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (UNLIKELY(instrumentation->HasMethodExitListeners())) {
instrumentation->MethodExitEvent(self, shadow_frame.GetThisObject(code_item->ins_size_),
shadow_frame.GetMethod(), dex_pc,
@@ -2588,7 +2583,6 @@
self->CheckSuspend();
UPDATE_HANDLER_TABLE();
}
- instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
uint32_t found_dex_pc = FindNextInstructionFollowingException(self, shadow_frame, dex_pc,
instrumentation);
if (found_dex_pc == DexFile::kDexNoIndex) {
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index eaea01d..81be959 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -353,28 +353,35 @@
jint src_pos = shadow_frame->GetVReg(arg_offset + 1);
jint dst_pos = shadow_frame->GetVReg(arg_offset + 3);
jint length = shadow_frame->GetVReg(arg_offset + 4);
- mirror::Array* src_array = shadow_frame->GetVRegReference(arg_offset)->AsArray();
- mirror::Array* dst_array = shadow_frame->GetVRegReference(arg_offset + 2)->AsArray();
- // Null checking.
- if (src_array == nullptr) {
+ mirror::Object* src_obj = shadow_frame->GetVRegReference(arg_offset);
+ mirror::Object* dst_obj = shadow_frame->GetVRegReference(arg_offset + 2);
+ // Null checking. For simplicity, abort transaction.
+ if (src_obj == nullptr) {
AbortTransactionOrFail(self, "src is null in arraycopy.");
return;
}
- if (dst_array == nullptr) {
+ if (dst_obj == nullptr) {
AbortTransactionOrFail(self, "dst is null in arraycopy.");
return;
}
+ // Test for arrayness. Throw ArrayStoreException.
+ if (!src_obj->IsArrayInstance() || !dst_obj->IsArrayInstance()) {
+ self->ThrowNewException("Ljava/lang/ArrayStoreException;", "src or trg is not an array");
+ return;
+ }
- // Bounds checking.
+ mirror::Array* src_array = src_obj->AsArray();
+ mirror::Array* dst_array = dst_obj->AsArray();
+
+ // Bounds checking. Throw IndexOutOfBoundsException.
if (UNLIKELY(src_pos < 0) || UNLIKELY(dst_pos < 0) || UNLIKELY(length < 0) ||
UNLIKELY(src_pos > src_array->GetLength() - length) ||
UNLIKELY(dst_pos > dst_array->GetLength() - length)) {
- self->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;",
+ self->ThrowNewExceptionF("Ljava/lang/IndexOutOfBoundsException;",
"src.length=%d srcPos=%d dst.length=%d dstPos=%d length=%d",
src_array->GetLength(), src_pos, dst_array->GetLength(), dst_pos,
length);
- AbortTransactionOrFail(self, "Index out of bounds.");
return;
}
@@ -393,19 +400,11 @@
return;
}
- // For simplicity only do this if the component types are the same. Otherwise we have to copy
- // even more code from the object-array functions.
- if (src_type != trg_type) {
- AbortTransactionOrFail(self, "Types not the same in arraycopy: %s vs %s",
- PrettyDescriptor(src_array->GetClass()->GetComponentType()).c_str(),
- PrettyDescriptor(dst_array->GetClass()->GetComponentType()).c_str());
- return;
- }
-
mirror::ObjectArray<mirror::Object>* src = src_array->AsObjectArray<mirror::Object>();
mirror::ObjectArray<mirror::Object>* dst = dst_array->AsObjectArray<mirror::Object>();
if (src == dst) {
// Can overlap, but not have type mismatches.
+ // We cannot use ObjectArray::MemMove here, as it doesn't support transactions.
const bool copy_forward = (dst_pos < src_pos) || (dst_pos - src_pos >= length);
if (copy_forward) {
for (int32_t i = 0; i < length; ++i) {
@@ -417,9 +416,15 @@
}
}
} else {
- // Can't overlap. Would need type checks, but we abort above.
- for (int32_t i = 0; i < length; ++i) {
- dst->Set(dst_pos + i, src->Get(src_pos + i));
+ // We're being lazy here. Optimally this could be a memcpy (if component types are
+ // assignable), but the ObjectArray implementation doesn't support transactions. The
+ // checking version, however, does.
+ if (Runtime::Current()->IsActiveTransaction()) {
+ dst->AssignableCheckingMemcpy<true>(
+ dst_pos, src, src_pos, length, true /* throw_exception */);
+ } else {
+ dst->AssignableCheckingMemcpy<false>(
+ dst_pos, src, src_pos, length, true /* throw_exception */);
}
}
} else if (src_type->IsPrimitiveChar()) {
@@ -444,46 +449,42 @@
UnstartedRuntime::UnstartedSystemArraycopy(self, shadow_frame, result, arg_offset);
}
+void UnstartedRuntime::UnstartedSystemGetSecurityManager(
+ Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame ATTRIBUTE_UNUSED,
+ JValue* result, size_t arg_offset ATTRIBUTE_UNUSED) {
+ result->SetL(nullptr);
+}
+
void UnstartedRuntime::UnstartedThreadLocalGet(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset ATTRIBUTE_UNUSED) {
std::string caller(PrettyMethod(shadow_frame->GetLink()->GetMethod()));
bool ok = false;
- if (caller == "java.lang.String java.lang.IntegralToString.convertInt"
- "(java.lang.AbstractStringBuilder, int)") {
+ if (caller == "void java.lang.FloatingDecimal.developLongDigits(int, long, long)" ||
+ caller == "java.lang.String java.lang.FloatingDecimal.toJavaFormatString()") {
// Allocate non-threadlocal buffer.
- result->SetL(mirror::CharArray::Alloc(self, 11));
+ result->SetL(mirror::CharArray::Alloc(self, 26));
ok = true;
- } else if (caller == "java.lang.RealToString java.lang.RealToString.getInstance()") {
- // Note: RealToString is implemented and used in a different fashion than IntegralToString.
- // Conversion is done over an actual object of RealToString (the conversion method is an
- // instance method). This means it is not as clear whether it is correct to return a new
- // object each time. The caller needs to be inspected by hand to see whether it (incorrectly)
- // stores the object for later use.
- // See also b/19548084 for a possible rewrite and bringing it in line with IntegralToString.
- if (shadow_frame->GetLink()->GetLink() != nullptr) {
- std::string caller2(PrettyMethod(shadow_frame->GetLink()->GetLink()->GetMethod()));
- if (caller2 == "java.lang.String java.lang.Double.toString(double)") {
- // Allocate new object.
- StackHandleScope<2> hs(self);
- Handle<mirror::Class> h_real_to_string_class(hs.NewHandle(
- shadow_frame->GetLink()->GetMethod()->GetDeclaringClass()));
- Handle<mirror::Object> h_real_to_string_obj(hs.NewHandle(
- h_real_to_string_class->AllocObject(self)));
- if (h_real_to_string_obj.Get() != nullptr) {
- auto* cl = Runtime::Current()->GetClassLinker();
- ArtMethod* init_method = h_real_to_string_class->FindDirectMethod(
- "<init>", "()V", cl->GetImagePointerSize());
- if (init_method == nullptr) {
- h_real_to_string_class->DumpClass(LOG(FATAL), mirror::Class::kDumpClassFullDetail);
- } else {
- JValue invoke_result;
- EnterInterpreterFromInvoke(self, init_method, h_real_to_string_obj.Get(), nullptr,
- nullptr);
- if (!self->IsExceptionPending()) {
- result->SetL(h_real_to_string_obj.Get());
- ok = true;
- }
- }
+ } else if (caller ==
+ "java.lang.FloatingDecimal java.lang.FloatingDecimal.getThreadLocalInstance()") {
+ // Allocate new object.
+ StackHandleScope<2> hs(self);
+ Handle<mirror::Class> h_real_to_string_class(hs.NewHandle(
+ shadow_frame->GetLink()->GetMethod()->GetDeclaringClass()));
+ Handle<mirror::Object> h_real_to_string_obj(hs.NewHandle(
+ h_real_to_string_class->AllocObject(self)));
+ if (h_real_to_string_obj.Get() != nullptr) {
+ auto* cl = Runtime::Current()->GetClassLinker();
+ ArtMethod* init_method = h_real_to_string_class->FindDirectMethod(
+ "<init>", "()V", cl->GetImagePointerSize());
+ if (init_method == nullptr) {
+ h_real_to_string_class->DumpClass(LOG(FATAL), mirror::Class::kDumpClassFullDetail);
+ } else {
+ JValue invoke_result;
+ EnterInterpreterFromInvoke(self, init_method, h_real_to_string_obj.Get(), nullptr,
+ nullptr);
+ if (!self->IsExceptionPending()) {
+ result->SetL(h_real_to_string_obj.Get());
+ ok = true;
}
}
}
@@ -1234,6 +1235,19 @@
result->SetZ(success ? JNI_TRUE : JNI_FALSE);
}
+void UnstartedRuntime::UnstartedJNIUnsafeGetIntVolatile(
+ Thread* self, ArtMethod* method ATTRIBUTE_UNUSED, mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args, JValue* result) {
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(args[0]);
+ if (obj == nullptr) {
+ AbortTransactionOrFail(self, "Cannot access null object, retry at runtime.");
+ return;
+ }
+
+ jlong offset = (static_cast<uint64_t>(args[2]) << 32) | args[1];
+ result->SetI(obj->GetField32Volatile(MemberOffset(offset)));
+}
+
void UnstartedRuntime::UnstartedJNIUnsafePutObject(
Thread* self ATTRIBUTE_UNUSED, ArtMethod* method ATTRIBUTE_UNUSED,
mirror::Object* receiver ATTRIBUTE_UNUSED, uint32_t* args, JValue* result ATTRIBUTE_UNUSED) {
diff --git a/runtime/interpreter/unstarted_runtime_list.h b/runtime/interpreter/unstarted_runtime_list.h
index 29f2197..d669b75 100644
--- a/runtime/interpreter/unstarted_runtime_list.h
+++ b/runtime/interpreter/unstarted_runtime_list.h
@@ -31,6 +31,7 @@
V(SystemArraycopy, "void java.lang.System.arraycopy(java.lang.Object, int, java.lang.Object, int, int)") \
V(SystemArraycopyChar, "void java.lang.System.arraycopy(char[], int, char[], int, int)") \
V(SystemArraycopyInt, "void java.lang.System.arraycopy(int[], int, int[], int, int)") \
+ V(SystemGetSecurityManager, "java.lang.SecurityManager java.lang.System.getSecurityManager()") \
V(ThreadLocalGet, "java.lang.Object java.lang.ThreadLocal.get()") \
V(MathCeil, "double java.lang.Math.ceil(double)") \
V(ObjectHashCode, "int java.lang.Object.hashCode()") \
@@ -79,6 +80,7 @@
V(SystemIdentityHashCode, "int java.lang.System.identityHashCode(java.lang.Object)") \
V(ByteOrderIsLittleEndian, "boolean java.nio.ByteOrder.isLittleEndian()") \
V(UnsafeCompareAndSwapInt, "boolean sun.misc.Unsafe.compareAndSwapInt(java.lang.Object, long, int, int)") \
+ V(UnsafeGetIntVolatile, "int sun.misc.Unsafe.getIntVolatile(java.lang.Object, long)") \
V(UnsafePutObject, "void sun.misc.Unsafe.putObject(java.lang.Object, long, java.lang.Object)") \
V(UnsafeGetArrayBaseOffsetForComponentType, "int sun.misc.Unsafe.getArrayBaseOffsetForComponentType(java.lang.Class)") \
V(UnsafeGetArrayIndexScaleForComponentType, "int sun.misc.Unsafe.getArrayIndexScaleForComponentType(java.lang.Class)")
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index a1ae2aa..fb53b1d 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -66,6 +66,94 @@
#undef UNSTARTED_RUNTIME_DIRECT_LIST
#undef UNSTARTED_RUNTIME_JNI_LIST
#undef UNSTARTED_JNI
+
+ // Helpers for ArrayCopy.
+ //
+ // Note: as we have to use handles, we use StackHandleScope to transfer data. Hardcode a size
+ // of three everywhere. That is enough to test all cases.
+
+ static mirror::ObjectArray<mirror::Object>* CreateObjectArray(
+ Thread* self,
+ mirror::Class* component_type,
+ const StackHandleScope<3>& data)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ Runtime* runtime = Runtime::Current();
+ mirror::Class* array_type = runtime->GetClassLinker()->FindArrayClass(self, &component_type);
+ CHECK(array_type != nullptr);
+ mirror::ObjectArray<mirror::Object>* result =
+ mirror::ObjectArray<mirror::Object>::Alloc(self, array_type, 3);
+ CHECK(result != nullptr);
+ for (size_t i = 0; i < 3; ++i) {
+ result->Set(static_cast<int32_t>(i), data.GetReference(i));
+ CHECK(!self->IsExceptionPending());
+ }
+ return result;
+ }
+
+ static void CheckObjectArray(mirror::ObjectArray<mirror::Object>* array,
+ const StackHandleScope<3>& data)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ CHECK_EQ(array->GetLength(), 3);
+ CHECK_EQ(data.NumberOfReferences(), 3U);
+ for (size_t i = 0; i < 3; ++i) {
+ EXPECT_EQ(data.GetReference(i), array->Get(static_cast<int32_t>(i))) << i;
+ }
+ }
+
+ void RunArrayCopy(Thread* self,
+ ShadowFrame* tmp,
+ bool expect_exception,
+ mirror::ObjectArray<mirror::Object>* src,
+ int32_t src_pos,
+ mirror::ObjectArray<mirror::Object>* dst,
+ int32_t dst_pos,
+ int32_t length)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ JValue result;
+ tmp->SetVRegReference(0, src);
+ tmp->SetVReg(1, src_pos);
+ tmp->SetVRegReference(2, dst);
+ tmp->SetVReg(3, dst_pos);
+ tmp->SetVReg(4, length);
+ UnstartedSystemArraycopy(self, tmp, &result, 0);
+ bool exception_pending = self->IsExceptionPending();
+ EXPECT_EQ(exception_pending, expect_exception);
+ if (exception_pending) {
+ self->ClearException();
+ }
+ }
+
+ void RunArrayCopy(Thread* self,
+ ShadowFrame* tmp,
+ bool expect_exception,
+ mirror::Class* src_component_class,
+ mirror::Class* dst_component_class,
+ const StackHandleScope<3>& src_data,
+ int32_t src_pos,
+ const StackHandleScope<3>& dst_data,
+ int32_t dst_pos,
+ int32_t length,
+ const StackHandleScope<3>& expected_result)
+ SHARED_REQUIRES(Locks::mutator_lock_) {
+ StackHandleScope<3> hs_misc(self);
+ Handle<mirror::Class> dst_component_handle(hs_misc.NewHandle(dst_component_class));
+
+ Handle<mirror::ObjectArray<mirror::Object>> src_handle(
+ hs_misc.NewHandle(CreateObjectArray(self, src_component_class, src_data)));
+
+ Handle<mirror::ObjectArray<mirror::Object>> dst_handle(
+ hs_misc.NewHandle(CreateObjectArray(self, dst_component_handle.Get(), dst_data)));
+
+ RunArrayCopy(self,
+ tmp,
+ expect_exception,
+ src_handle.Get(),
+ src_pos,
+ dst_handle.Get(),
+ dst_pos,
+ length);
+ CheckObjectArray(dst_handle.Get(), expected_result);
+ }
};
TEST_F(UnstartedRuntimeTest, MemoryPeekByte) {
@@ -277,5 +365,148 @@
ShadowFrame::DeleteDeoptimizedFrame(shadow_frame);
}
+// Tests the exceptions that should be checked before modifying the destination.
+// (Doesn't check the object vs primitive case ATM.)
+TEST_F(UnstartedRuntimeTest, SystemArrayCopyObjectArrayTestExceptions) {
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ JValue result;
+ ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+
+ // Note: all tests are not GC safe. Assume there's no GC running here with the few objects we
+ // allocate.
+ StackHandleScope<2> hs_misc(self);
+ Handle<mirror::Class> object_class(
+ hs_misc.NewHandle(mirror::Class::GetJavaLangClass()->GetSuperClass()));
+
+ StackHandleScope<3> hs_data(self);
+ hs_data.NewHandle(mirror::String::AllocFromModifiedUtf8(self, "1"));
+ hs_data.NewHandle(mirror::String::AllocFromModifiedUtf8(self, "2"));
+ hs_data.NewHandle(mirror::String::AllocFromModifiedUtf8(self, "3"));
+
+ Handle<mirror::ObjectArray<mirror::Object>> array(
+ hs_misc.NewHandle(CreateObjectArray(self, object_class.Get(), hs_data)));
+
+ RunArrayCopy(self, tmp, true, array.Get(), -1, array.Get(), 0, 0);
+ RunArrayCopy(self, tmp, true, array.Get(), 0, array.Get(), -1, 0);
+ RunArrayCopy(self, tmp, true, array.Get(), 0, array.Get(), 0, -1);
+ RunArrayCopy(self, tmp, true, array.Get(), 0, array.Get(), 0, 4);
+ RunArrayCopy(self, tmp, true, array.Get(), 0, array.Get(), 1, 3);
+ RunArrayCopy(self, tmp, true, array.Get(), 1, array.Get(), 0, 3);
+
+ mirror::ObjectArray<mirror::Object>* class_as_array =
+ reinterpret_cast<mirror::ObjectArray<mirror::Object>*>(object_class.Get());
+ RunArrayCopy(self, tmp, true, class_as_array, 0, array.Get(), 0, 0);
+ RunArrayCopy(self, tmp, true, array.Get(), 0, class_as_array, 0, 0);
+
+ ShadowFrame::DeleteDeoptimizedFrame(tmp);
+}
+
+TEST_F(UnstartedRuntimeTest, SystemArrayCopyObjectArrayTest) {
+ Thread* self = Thread::Current();
+ ScopedObjectAccess soa(self);
+ JValue result;
+ ShadowFrame* tmp = ShadowFrame::CreateDeoptimizedFrame(10, nullptr, nullptr, 0);
+
+ StackHandleScope<1> hs_object(self);
+ Handle<mirror::Class> object_class(
+ hs_object.NewHandle(mirror::Class::GetJavaLangClass()->GetSuperClass()));
+
+ // Simple test:
+ // [1,2,3]{1 @ 2} into [4,5,6] = [4,2,6]
+ {
+ StackHandleScope<3> hs_src(self);
+ hs_src.NewHandle(mirror::String::AllocFromModifiedUtf8(self, "1"));
+ hs_src.NewHandle(mirror::String::AllocFromModifiedUtf8(self, "2"));
+ hs_src.NewHandle(mirror::String::AllocFromModifiedUtf8(self, "3"));
+
+ StackHandleScope<3> hs_dst(self);
+ hs_dst.NewHandle(mirror::String::AllocFromModifiedUtf8(self, "4"));
+ hs_dst.NewHandle(mirror::String::AllocFromModifiedUtf8(self, "5"));
+ hs_dst.NewHandle(mirror::String::AllocFromModifiedUtf8(self, "6"));
+
+ StackHandleScope<3> hs_expected(self);
+ hs_expected.NewHandle(hs_dst.GetReference(0));
+ hs_expected.NewHandle(hs_dst.GetReference(1));
+ hs_expected.NewHandle(hs_src.GetReference(1));
+
+ RunArrayCopy(self,
+ tmp,
+ false,
+ object_class.Get(),
+ object_class.Get(),
+ hs_src,
+ 1,
+ hs_dst,
+ 2,
+ 1,
+ hs_expected);
+ }
+
+ // Simple test:
+ // [1,2,3]{1 @ 1} into [4,5,6] = [4,2,6] (with dst String[])
+ {
+ StackHandleScope<3> hs_src(self);
+ hs_src.NewHandle(mirror::String::AllocFromModifiedUtf8(self, "1"));
+ hs_src.NewHandle(mirror::String::AllocFromModifiedUtf8(self, "2"));
+ hs_src.NewHandle(mirror::String::AllocFromModifiedUtf8(self, "3"));
+
+ StackHandleScope<3> hs_dst(self);
+ hs_dst.NewHandle(mirror::String::AllocFromModifiedUtf8(self, "4"));
+ hs_dst.NewHandle(mirror::String::AllocFromModifiedUtf8(self, "5"));
+ hs_dst.NewHandle(mirror::String::AllocFromModifiedUtf8(self, "6"));
+
+ StackHandleScope<3> hs_expected(self);
+ hs_expected.NewHandle(hs_dst.GetReference(0));
+ hs_expected.NewHandle(hs_src.GetReference(1));
+ hs_expected.NewHandle(hs_dst.GetReference(2));
+
+ RunArrayCopy(self,
+ tmp,
+ false,
+ object_class.Get(),
+ mirror::String::GetJavaLangString(),
+ hs_src,
+ 1,
+ hs_dst,
+ 1,
+ 1,
+ hs_expected);
+ }
+
+ // Simple test:
+ // [1,*,3] into [4,5,6] = [1,5,6] + exc
+ {
+ StackHandleScope<3> hs_src(self);
+ hs_src.NewHandle(mirror::String::AllocFromModifiedUtf8(self, "1"));
+ hs_src.NewHandle(mirror::String::GetJavaLangString());
+ hs_src.NewHandle(mirror::String::AllocFromModifiedUtf8(self, "3"));
+
+ StackHandleScope<3> hs_dst(self);
+ hs_dst.NewHandle(mirror::String::AllocFromModifiedUtf8(self, "4"));
+ hs_dst.NewHandle(mirror::String::AllocFromModifiedUtf8(self, "5"));
+ hs_dst.NewHandle(mirror::String::AllocFromModifiedUtf8(self, "6"));
+
+ StackHandleScope<3> hs_expected(self);
+ hs_expected.NewHandle(hs_src.GetReference(0));
+ hs_expected.NewHandle(hs_dst.GetReference(1));
+ hs_expected.NewHandle(hs_dst.GetReference(2));
+
+ RunArrayCopy(self,
+ tmp,
+ true,
+ object_class.Get(),
+ mirror::String::GetJavaLangString(),
+ hs_src,
+ 0,
+ hs_dst,
+ 0,
+ 3,
+ hs_expected);
+ }
+
+ ShadowFrame::DeleteDeoptimizedFrame(tmp);
+}
+
} // namespace interpreter
} // namespace art
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 6496afd..73aaf04 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -362,7 +362,7 @@
}
CodeInfo code_info = osr_method->GetOptimizedCodeInfo();
- StackMapEncoding encoding = code_info.ExtractEncoding();
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
// Find stack map starting at the target dex_pc.
StackMap stack_map = code_info.GetOsrStackMapForDexPc(dex_pc + dex_pc_offset, encoding);
@@ -421,7 +421,8 @@
}
}
- native_pc = stack_map.GetNativePcOffset(encoding) + osr_method->GetEntryPoint();
+ native_pc = stack_map.GetNativePcOffset(encoding.stack_map_encoding) +
+ osr_method->GetEntryPoint();
VLOG(jit) << "Jumping to "
<< method_name
<< "@"
diff --git a/runtime/memory_region.h b/runtime/memory_region.h
index 13c69ac..fbb0441 100644
--- a/runtime/memory_region.h
+++ b/runtime/memory_region.h
@@ -138,7 +138,7 @@
// bit of the stored `value`. `value` must not be larger than `length`
// bits.
void StoreBits(uintptr_t bit_offset, uint32_t value, size_t length) {
- CHECK_LT(value, 2u << length);
+ CHECK_LE(value, MaxInt<uint32_t>(length));
for (size_t i = 0; i < length; ++i) {
bool ith_bit = value & (1 << i);
StoreBit(bit_offset + i, ith_bit);
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index 6f9d642..c3c5231 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -197,6 +197,7 @@
}
template<class T>
+template<bool kTransactionActive>
inline void ObjectArray<T>::AssignableCheckingMemcpy(int32_t dst_pos, ObjectArray<T>* src,
int32_t src_pos, int32_t count,
bool throw_exception) {
@@ -215,15 +216,15 @@
o = src->GetWithoutChecks(src_pos + i);
if (o == nullptr) {
// Null is always assignable.
- SetWithoutChecks<false>(dst_pos + i, nullptr);
+ SetWithoutChecks<kTransactionActive>(dst_pos + i, nullptr);
} else {
// TODO: use the underlying class reference to avoid uncompression when not necessary.
Class* o_class = o->GetClass();
if (LIKELY(lastAssignableElementClass == o_class)) {
- SetWithoutChecks<false>(dst_pos + i, o);
+ SetWithoutChecks<kTransactionActive>(dst_pos + i, o);
} else if (LIKELY(dst_class->IsAssignableFrom(o_class))) {
lastAssignableElementClass = o_class;
- SetWithoutChecks<false>(dst_pos + i, o);
+ SetWithoutChecks<kTransactionActive>(dst_pos + i, o);
} else {
// Can't put this element into the array, break to perform write-barrier and throw
// exception.
diff --git a/runtime/mirror/object_array.h b/runtime/mirror/object_array.h
index 1b1295c..4257396 100644
--- a/runtime/mirror/object_array.h
+++ b/runtime/mirror/object_array.h
@@ -78,6 +78,7 @@
int32_t count) SHARED_REQUIRES(Locks::mutator_lock_);
// Copy src into this array with assignability checks.
+ template<bool kTransactionActive>
void AssignableCheckingMemcpy(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
int32_t count, bool throw_exception)
SHARED_REQUIRES(Locks::mutator_lock_);
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 1e4b35f..0abe39d 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -456,6 +456,31 @@
return CompilerFilter::DependsOnProfile(filter) ? JNI_TRUE : JNI_FALSE;
}
+static jstring DexFile_getNonProfileGuidedCompilerFilter(JNIEnv* env,
+ jclass javeDexFileClass ATTRIBUTE_UNUSED,
+ jstring javaCompilerFilter) {
+ ScopedUtfChars compiler_filter(env, javaCompilerFilter);
+ if (env->ExceptionCheck()) {
+ return nullptr;
+ }
+
+ CompilerFilter::Filter filter;
+ if (!CompilerFilter::ParseCompilerFilter(compiler_filter.c_str(), &filter)) {
+ return javaCompilerFilter;
+ }
+
+ CompilerFilter::Filter new_filter = CompilerFilter::GetNonProfileDependentFilterFrom(filter);
+
+ // Filter stayed the same, return input.
+ if (filter == new_filter) {
+ return javaCompilerFilter;
+ }
+
+ // Create a new string object and return.
+ std::string new_filter_str = CompilerFilter::NameOfFilter(new_filter);
+ return env->NewStringUTF(new_filter_str.c_str());
+}
+
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(DexFile, closeDexFile, "(Ljava/lang/Object;)Z"),
NATIVE_METHOD(DexFile,
@@ -478,6 +503,9 @@
")Ljava/lang/Object;"),
NATIVE_METHOD(DexFile, isValidCompilerFilter, "(Ljava/lang/String;)Z"),
NATIVE_METHOD(DexFile, isProfileGuidedCompilerFilter, "(Ljava/lang/String;)Z"),
+ NATIVE_METHOD(DexFile,
+ getNonProfileGuidedCompilerFilter,
+ "(Ljava/lang/String;)Ljava/lang/String;"),
};
void register_dalvik_system_DexFile(JNIEnv* env) {
diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc
index d9863c5..9e2d68d 100644
--- a/runtime/native/java_lang_System.cc
+++ b/runtime/native/java_lang_System.cc
@@ -149,7 +149,9 @@
dstObjArray->AssignableMemcpy(dstPos, srcObjArray, srcPos, count);
return;
}
- dstObjArray->AssignableCheckingMemcpy(dstPos, srcObjArray, srcPos, count, true);
+ // This code is never run under a transaction.
+ DCHECK(!Runtime::Current()->IsActiveTransaction());
+ dstObjArray->AssignableCheckingMemcpy<false>(dstPos, srcObjArray, srcPos, count, true);
}
// Template to convert general array to that of its specific primitive type.
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index bb90d46..ce892f3 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -179,9 +179,10 @@
return HasOriginalDexFiles() ? kDex2OatNeeded : kNoDexOptNeeded;
}
-bool OatFileAssistant::MakeUpToDate(CompilerFilter::Filter target, std::string* error_msg) {
+OatFileAssistant::ResultOfAttemptToUpdate
+OatFileAssistant::MakeUpToDate(CompilerFilter::Filter target, std::string* error_msg) {
switch (GetDexOptNeeded(target)) {
- case kNoDexOptNeeded: return true;
+ case kNoDexOptNeeded: return kUpdateSucceeded;
case kDex2OatNeeded: return GenerateOatFile(target, error_msg);
case kPatchOatNeeded: return RelocateOatFile(OdexFileName(), error_msg);
case kSelfPatchOatNeeded: return RelocateOatFile(OatFileName(), error_msg);
@@ -569,21 +570,21 @@
return true;
}
-bool OatFileAssistant::RelocateOatFile(const std::string* input_file,
- std::string* error_msg) {
+OatFileAssistant::ResultOfAttemptToUpdate
+OatFileAssistant::RelocateOatFile(const std::string* input_file, std::string* error_msg) {
CHECK(error_msg != nullptr);
if (input_file == nullptr) {
*error_msg = "Patching of oat file for dex location " + dex_location_
+ " not attempted because the input file name could not be determined.";
- return false;
+ return kUpdateNotAttempted;
}
const std::string& input_file_name = *input_file;
if (OatFileName() == nullptr) {
*error_msg = "Patching of oat file for dex location " + dex_location_
+ " not attempted because the oat file name could not be determined.";
- return false;
+ return kUpdateNotAttempted;
}
const std::string& oat_file_name = *OatFileName();
@@ -592,13 +593,13 @@
if (image_info == nullptr) {
*error_msg = "Patching of oat file " + oat_file_name
+ " not attempted because no image location was found.";
- return false;
+ return kUpdateNotAttempted;
}
if (!runtime->IsDex2OatEnabled()) {
*error_msg = "Patching of oat file " + oat_file_name
+ " not attempted because dex2oat is disabled";
- return false;
+ return kUpdateNotAttempted;
}
std::vector<std::string> argv;
@@ -613,28 +614,29 @@
// Manually delete the file. This ensures there is no garbage left over if
// the process unexpectedly died.
TEMP_FAILURE_RETRY(unlink(oat_file_name.c_str()));
- return false;
+ return kUpdateFailed;
}
// Mark that the oat file has changed and we should try to reload.
ClearOatFileCache();
- return true;
+ return kUpdateSucceeded;
}
-bool OatFileAssistant::GenerateOatFile(CompilerFilter::Filter target, std::string* error_msg) {
+OatFileAssistant::ResultOfAttemptToUpdate
+OatFileAssistant::GenerateOatFile(CompilerFilter::Filter target, std::string* error_msg) {
CHECK(error_msg != nullptr);
Runtime* runtime = Runtime::Current();
if (!runtime->IsDex2OatEnabled()) {
*error_msg = "Generation of oat file for dex location " + dex_location_
+ " not attempted because dex2oat is disabled.";
- return false;
+ return kUpdateNotAttempted;
}
if (OatFileName() == nullptr) {
*error_msg = "Generation of oat file for dex location " + dex_location_
+ " not attempted because the oat file name could not be determined.";
- return false;
+ return kUpdateNotAttempted;
}
const std::string& oat_file_name = *OatFileName();
@@ -643,7 +645,7 @@
// TODO: Why does dex2oat behave that way?
if (!OS::FileExists(dex_location_.c_str())) {
*error_msg = "Dex location " + dex_location_ + " does not exists.";
- return false;
+ return kUpdateNotAttempted;
}
std::unique_ptr<File> oat_file;
@@ -651,14 +653,14 @@
if (oat_file.get() == nullptr) {
*error_msg = "Generation of oat file " + oat_file_name
+ " not attempted because the oat file could not be created.";
- return false;
+ return kUpdateNotAttempted;
}
if (fchmod(oat_file->Fd(), 0644) != 0) {
*error_msg = "Generation of oat file " + oat_file_name
+ " not attempted because the oat file could not be made world readable.";
oat_file->Erase();
- return false;
+ return kUpdateNotAttempted;
}
std::vector<std::string> args;
@@ -672,18 +674,18 @@
// the process unexpectedly died.
oat_file->Erase();
TEMP_FAILURE_RETRY(unlink(oat_file_name.c_str()));
- return false;
+ return kUpdateFailed;
}
if (oat_file->FlushCloseOrErase() != 0) {
*error_msg = "Unable to close oat file " + oat_file_name;
TEMP_FAILURE_RETRY(unlink(oat_file_name.c_str()));
- return false;
+ return kUpdateFailed;
}
// Mark that the oat file has changed and we should try to reload.
ClearOatFileCache();
- return true;
+ return kUpdateSucceeded;
}
bool OatFileAssistant::Dex2Oat(const std::vector<std::string>& args,
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index db754b9..17f72fe 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -148,14 +148,26 @@
// given compiler filter.
DexOptNeeded GetDexOptNeeded(CompilerFilter::Filter target_compiler_filter);
+ // Return code used when attempting to generate updated code.
+ enum ResultOfAttemptToUpdate {
+ kUpdateFailed, // We tried making the code up to date, but
+ // encountered an unexpected failure.
+ kUpdateNotAttempted, // We wanted to update the code, but determined we
+ // should not make the attempt.
+ kUpdateSucceeded // We successfully made the code up to date
+ // (possibly by doing nothing).
+ };
+
// Attempts to generate or relocate the oat file as needed to make it up to
// date with in a way that is at least as good as an oat file generated with
// the given compiler filter.
- // Returns true on success.
+ // Returns the result of attempting to update the code.
//
- // If there is a failure, the value of error_msg will be set to a string
- // describing why there was failure. error_msg must not be null.
- bool MakeUpToDate(CompilerFilter::Filter target_compiler_filter, std::string* error_msg);
+ // If the result is not kUpdateSucceeded, the value of error_msg will be set
+ // to a string describing why there was a failure or the update was not
+ // attempted. error_msg must not be null.
+ ResultOfAttemptToUpdate MakeUpToDate(CompilerFilter::Filter target_compiler_filter,
+ std::string* error_msg);
// Returns an oat file that can be used for loading dex files.
// Returns null if no suitable oat file was found.
@@ -232,22 +244,20 @@
// Generates the oat file by relocation from the named input file.
// This does not check the current status before attempting to relocate the
// oat file.
- // Returns true on success.
- // This will fail if dex2oat is not enabled in the current runtime.
//
- // If there is a failure, the value of error_msg will be set to a string
- // describing why there was failure. error_msg must not be null.
- bool RelocateOatFile(const std::string* input_file, std::string* error_msg);
+ // If the result is not kUpdateSucceeded, the value of error_msg will be set
+ // to a string describing why there was a failure or the update was not
+ // attempted. error_msg must not be null.
+ ResultOfAttemptToUpdate RelocateOatFile(const std::string* input_file, std::string* error_msg);
// Generate the oat file from the dex file using the given compiler filter.
// This does not check the current status before attempting to generate the
// oat file.
- // Returns true on success.
- // This will fail if dex2oat is not enabled in the current runtime.
//
- // If there is a failure, the value of error_msg will be set to a string
- // describing why there was failure. error_msg must not be null.
- bool GenerateOatFile(CompilerFilter::Filter filter, std::string* error_msg);
+ // If the result is not kUpdateSucceeded, the value of error_msg will be set
+ // to a string describing why there was a failure or the update was not
+ // attempted. error_msg must not be null.
+ ResultOfAttemptToUpdate GenerateOatFile(CompilerFilter::Filter filter, std::string* error_msg);
// Executes dex2oat using the current runtime configuration overridden with
// the given arguments. This does not check to see if dex2oat is enabled in
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index c247812..bddfa4f 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -452,7 +452,8 @@
// Trying to make the oat file up to date should not fail or crash.
std::string error_msg;
- EXPECT_TRUE(oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg));
+ EXPECT_EQ(OatFileAssistant::kUpdateSucceeded,
+ oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg));
// Trying to get the best oat file should fail, but not crash.
std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
@@ -703,7 +704,8 @@
// Make the oat file up to date.
std::string error_msg;
- ASSERT_TRUE(oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg)) << error_msg;
+ ASSERT_EQ(OatFileAssistant::kUpdateSucceeded,
+ oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg)) << error_msg;
EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
@@ -765,7 +767,8 @@
// Make the oat file up to date.
std::string error_msg;
- ASSERT_TRUE(oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg)) << error_msg;
+ ASSERT_EQ(OatFileAssistant::kUpdateSucceeded,
+ oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg)) << error_msg;
EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
@@ -821,7 +824,8 @@
// Make the oat file up to date. This should have no effect.
std::string error_msg;
- EXPECT_TRUE(oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg)) << error_msg;
+ EXPECT_EQ(OatFileAssistant::kUpdateSucceeded,
+ oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg)) << error_msg;
EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
@@ -871,7 +875,8 @@
// Make the oat file up to date.
std::string error_msg;
- ASSERT_TRUE(oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg)) << error_msg;
+ ASSERT_EQ(OatFileAssistant::kUpdateSucceeded,
+ oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg)) << error_msg;
EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
@@ -914,7 +919,8 @@
// Make the oat file up to date.
std::string error_msg;
- ASSERT_TRUE(oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg)) << error_msg;
+ ASSERT_EQ(OatFileAssistant::kUpdateSucceeded,
+ oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg)) << error_msg;
EXPECT_EQ(OatFileAssistant::kNoDexOptNeeded,
oat_file_assistant.GetDexOptNeeded(CompilerFilter::kSpeed));
@@ -1093,7 +1099,8 @@
OatFileAssistant oat_file_assistant(
dex_location.c_str(), oat_location.c_str(), kRuntimeISA, false, true);
std::string error_msg;
- ASSERT_TRUE(oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg)) << error_msg;
+ ASSERT_EQ(OatFileAssistant::kUpdateSucceeded,
+ oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg)) << error_msg;
std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
ASSERT_TRUE(oat_file.get() != nullptr);
@@ -1123,7 +1130,8 @@
OatFileAssistant oat_file_assistant(
dex_location.c_str(), oat_location.c_str(), kRuntimeISA, false, true);
std::string error_msg;
- ASSERT_FALSE(oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg));
+ ASSERT_EQ(OatFileAssistant::kUpdateNotAttempted,
+ oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg));
std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
ASSERT_TRUE(oat_file.get() == nullptr);
@@ -1138,7 +1146,8 @@
OatFileAssistant oat_file_assistant(
dex_location.c_str(), oat_location.c_str(), kRuntimeISA, false, true);
std::string error_msg;
- ASSERT_FALSE(oat_file_assistant.GenerateOatFile(CompilerFilter::kSpeed, &error_msg));
+ EXPECT_EQ(OatFileAssistant::kUpdateNotAttempted,
+ oat_file_assistant.GenerateOatFile(CompilerFilter::kSpeed, &error_msg));
}
// Turn an absolute path into a path relative to the current working
@@ -1217,7 +1226,8 @@
// Trying to make it up to date should have no effect.
std::string error_msg;
- EXPECT_TRUE(oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg));
+ EXPECT_EQ(OatFileAssistant::kUpdateSucceeded,
+ oat_file_assistant.MakeUpToDate(CompilerFilter::kSpeed, &error_msg));
EXPECT_TRUE(error_msg.empty());
}
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 2f13f55..94f6345 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -329,8 +329,20 @@
// Update the oat file on disk if we can. This may fail, but that's okay.
// Best effort is all that matters here.
- if (!oat_file_assistant.MakeUpToDate(filter_, /*out*/ &error_msg)) {
- LOG(INFO) << error_msg;
+ switch (oat_file_assistant.MakeUpToDate(filter_, /*out*/ &error_msg)) {
+ case OatFileAssistant::kUpdateFailed:
+ LOG(WARNING) << error_msg;
+ break;
+
+ case OatFileAssistant::kUpdateNotAttempted:
+ // Avoid spamming the logs if we decided not to attempt making the oat
+ // file up to date.
+ VLOG(oat) << error_msg;
+ break;
+
+ case OatFileAssistant::kUpdateSucceeded:
+ // Nothing to do.
+ break;
}
// Get the oat file on disk.
diff --git a/runtime/oat_quick_method_header.cc b/runtime/oat_quick_method_header.cc
index 9786c05..07a112f 100644
--- a/runtime/oat_quick_method_header.cc
+++ b/runtime/oat_quick_method_header.cc
@@ -46,10 +46,10 @@
uint32_t sought_offset = pc - reinterpret_cast<uintptr_t>(entry_point);
if (IsOptimized()) {
CodeInfo code_info = GetOptimizedCodeInfo();
- StackMapEncoding encoding = code_info.ExtractEncoding();
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(sought_offset, encoding);
if (stack_map.IsValid()) {
- return stack_map.GetDexPc(encoding);
+ return stack_map.GetDexPc(encoding.stack_map_encoding);
}
} else {
MappingTable table(GetMappingTable());
@@ -95,7 +95,7 @@
// Optimized code does not have a mapping table. Search for the dex-to-pc
// mapping in stack maps.
CodeInfo code_info = GetOptimizedCodeInfo();
- StackMapEncoding encoding = code_info.ExtractEncoding();
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
// All stack maps are stored in the same CodeItem section, safepoint stack
// maps first, then catch stack maps. We use `is_for_catch_handler` to select
@@ -104,7 +104,8 @@
LIKELY(is_for_catch_handler) ? code_info.GetCatchStackMapForDexPc(dex_pc, encoding)
: code_info.GetStackMapForDexPc(dex_pc, encoding);
if (stack_map.IsValid()) {
- return reinterpret_cast<uintptr_t>(entry_point) + stack_map.GetNativePcOffset(encoding);
+ return reinterpret_cast<uintptr_t>(entry_point) +
+ stack_map.GetNativePcOffset(encoding.stack_map_encoding);
}
} else {
MappingTable table(GetMappingTable());
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 6d1403c..a785ecb 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -220,7 +220,7 @@
const size_t number_of_vregs = handler_method_->GetCodeItem()->registers_size_;
CodeInfo code_info = handler_method_header_->GetOptimizedCodeInfo();
- StackMapEncoding encoding = code_info.ExtractEncoding();
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
// Find stack map of the catch block.
StackMap catch_stack_map = code_info.GetCatchStackMapForDexPc(GetHandlerDexPc(), encoding);
@@ -382,11 +382,10 @@
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
CodeInfo code_info = method_header->GetOptimizedCodeInfo();
uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
- StackMapEncoding encoding = code_info.ExtractEncoding();
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
const size_t number_of_vregs = m->GetCodeItem()->registers_size_;
- MemoryRegion stack_mask = stack_map.GetStackMask(encoding);
- uint32_t register_mask = stack_map.GetRegisterMask(encoding);
+ uint32_t register_mask = stack_map.GetRegisterMask(encoding.stack_map_encoding);
DexRegisterMap vreg_map = IsInInlinedFrame()
? code_info.GetDexRegisterMapAtDepth(GetCurrentInliningDepth() - 1,
code_info.GetInlineInfoOf(stack_map, encoding),
@@ -419,7 +418,8 @@
const uint8_t* addr = reinterpret_cast<const uint8_t*>(GetCurrentQuickFrame()) + offset;
value = *reinterpret_cast<const uint32_t*>(addr);
uint32_t bit = (offset >> 2);
- if (stack_mask.size_in_bits() > bit && stack_mask.LoadBit(bit)) {
+ if (stack_map.GetNumberOfStackMaskBits(encoding.stack_map_encoding) > bit &&
+ stack_map.GetStackMaskBit(encoding.stack_map_encoding, bit)) {
is_reference = true;
}
break;
diff --git a/runtime/stack.cc b/runtime/stack.cc
index ee5da8e..2336365 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -117,7 +117,7 @@
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc_);
CodeInfo code_info = method_header->GetOptimizedCodeInfo();
- StackMapEncoding encoding = code_info.ExtractEncoding();
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
DCHECK(stack_map.IsValid());
return code_info.GetInlineInfoOf(stack_map, encoding);
@@ -130,7 +130,11 @@
if (IsInInlinedFrame()) {
size_t depth_in_stack_map = current_inlining_depth_ - 1;
InlineInfo inline_info = GetCurrentInlineInfo();
- return GetResolvedMethod(*GetCurrentQuickFrame(), inline_info, depth_in_stack_map);
+ DCHECK(walk_kind_ != StackWalkKind::kSkipInlinedFrames);
+ bool allow_resolve = walk_kind_ != StackWalkKind::kIncludeInlinedFramesNoResolve;
+ return allow_resolve
+ ? GetResolvedMethod<true>(*GetCurrentQuickFrame(), inline_info, depth_in_stack_map)
+ : GetResolvedMethod<false>(*GetCurrentQuickFrame(), inline_info, depth_in_stack_map);
} else {
return *cur_quick_frame_;
}
@@ -308,7 +312,7 @@
DCHECK_LT(vreg, code_item->registers_size_);
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
CodeInfo code_info = method_header->GetOptimizedCodeInfo();
- StackMapEncoding encoding = code_info.ExtractEncoding();
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc_);
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
@@ -859,15 +863,16 @@
cur_oat_quick_method_header_ = method->GetOatQuickMethodHeader(cur_quick_frame_pc_);
SanityCheckFrame();
- if ((walk_kind_ == StackWalkKind::kIncludeInlinedFrames)
+ if ((walk_kind_ == StackWalkKind::kIncludeInlinedFrames ||
+ walk_kind_ == StackWalkKind::kIncludeInlinedFramesNoResolve)
&& (cur_oat_quick_method_header_ != nullptr)
&& cur_oat_quick_method_header_->IsOptimized()) {
CodeInfo code_info = cur_oat_quick_method_header_->GetOptimizedCodeInfo();
- StackMapEncoding encoding = code_info.ExtractEncoding();
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
uint32_t native_pc_offset =
cur_oat_quick_method_header_->NativeQuickPcOffset(cur_quick_frame_pc_);
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
- if (stack_map.IsValid() && stack_map.HasInlineInfo(encoding)) {
+ if (stack_map.IsValid() && stack_map.HasInlineInfo(encoding.stack_map_encoding)) {
InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
DCHECK_EQ(current_inlining_depth_, 0u);
for (current_inlining_depth_ = inline_info.GetDepth();
diff --git a/runtime/stack.h b/runtime/stack.h
index ec653e7..a25874e 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -595,6 +595,7 @@
// when walking the stack.
enum class StackWalkKind {
kIncludeInlinedFrames,
+ kIncludeInlinedFramesNoResolve,
kSkipInlinedFrames,
};
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index 3093436..b51baf1 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -56,66 +56,29 @@
uint16_t dex_register_number,
uint16_t number_of_dex_registers,
const CodeInfo& code_info,
- const StackMapEncoding& enc) const {
+ const CodeInfoEncoding& enc) const {
DexRegisterLocationCatalog dex_register_location_catalog =
code_info.GetDexRegisterLocationCatalog(enc);
size_t location_catalog_entry_index = GetLocationCatalogEntryIndex(
dex_register_number,
number_of_dex_registers,
- code_info.GetNumberOfLocationCatalogEntries());
+ code_info.GetNumberOfLocationCatalogEntries(enc));
return dex_register_location_catalog.GetLocationInternalKind(location_catalog_entry_index);
}
DexRegisterLocation DexRegisterMap::GetDexRegisterLocation(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
const CodeInfo& code_info,
- const StackMapEncoding& enc) const {
+ const CodeInfoEncoding& enc) const {
DexRegisterLocationCatalog dex_register_location_catalog =
code_info.GetDexRegisterLocationCatalog(enc);
size_t location_catalog_entry_index = GetLocationCatalogEntryIndex(
dex_register_number,
number_of_dex_registers,
- code_info.GetNumberOfLocationCatalogEntries());
+ code_info.GetNumberOfLocationCatalogEntries(enc));
return dex_register_location_catalog.GetDexRegisterLocation(location_catalog_entry_index);
}
-uint32_t StackMap::LoadAt(size_t number_of_bytes, size_t offset, bool check_max) const {
- if (number_of_bytes == 0u) {
- DCHECK(!check_max);
- return 0;
- } else if (number_of_bytes == 1u) {
- uint8_t value = region_.LoadUnaligned<uint8_t>(offset);
- return (check_max && value == 0xFF) ? -1 : value;
- } else if (number_of_bytes == 2u) {
- uint16_t value = region_.LoadUnaligned<uint16_t>(offset);
- return (check_max && value == 0xFFFF) ? -1 : value;
- } else if (number_of_bytes == 3u) {
- uint16_t low = region_.LoadUnaligned<uint16_t>(offset);
- uint16_t high = region_.LoadUnaligned<uint8_t>(offset + sizeof(uint16_t));
- uint32_t value = (high << 16) + low;
- return (check_max && value == 0xFFFFFF) ? -1 : value;
- } else {
- DCHECK_EQ(number_of_bytes, 4u);
- return region_.LoadUnaligned<uint32_t>(offset);
- }
-}
-
-void StackMap::StoreAt(size_t number_of_bytes, size_t offset, uint32_t value) const {
- if (number_of_bytes == 0u) {
- DCHECK_EQ(value, 0u);
- } else if (number_of_bytes == 1u) {
- region_.StoreUnaligned<uint8_t>(offset, value);
- } else if (number_of_bytes == 2u) {
- region_.StoreUnaligned<uint16_t>(offset, value);
- } else if (number_of_bytes == 3u) {
- region_.StoreUnaligned<uint16_t>(offset, Low16Bits(value));
- region_.StoreUnaligned<uint8_t>(offset + sizeof(uint16_t), High16Bits(value));
- } else {
- region_.StoreUnaligned<uint32_t>(offset, value);
- DCHECK_EQ(number_of_bytes, 4u);
- }
-}
-
static void DumpRegisterMapping(std::ostream& os,
size_t dex_register_num,
DexRegisterLocation location,
@@ -126,25 +89,30 @@
<< " (" << location.GetValue() << ")" << suffix << '\n';
}
+void StackMapEncoding::Dump(VariableIndentationOutputStream* vios) const {
+ vios->Stream()
+ << "StackMapEncoding"
+ << " (native_pc_bit_offset=" << static_cast<uint32_t>(kNativePcBitOffset)
+ << ", dex_pc_bit_offset=" << static_cast<uint32_t>(dex_pc_bit_offset_)
+ << ", dex_register_map_bit_offset=" << static_cast<uint32_t>(dex_register_map_bit_offset_)
+ << ", inline_info_bit_offset=" << static_cast<uint32_t>(inline_info_bit_offset_)
+ << ", register_mask_bit_offset=" << static_cast<uint32_t>(register_mask_bit_offset_)
+ << ", stack_mask_bit_offset=" << static_cast<uint32_t>(stack_mask_bit_offset_)
+ << ")\n";
+}
+
void CodeInfo::Dump(VariableIndentationOutputStream* vios,
uint32_t code_offset,
uint16_t number_of_dex_registers,
bool dump_stack_maps) const {
- StackMapEncoding encoding = ExtractEncoding();
- uint32_t code_info_size = GetOverallSize();
- size_t number_of_stack_maps = GetNumberOfStackMaps();
+ CodeInfoEncoding encoding = ExtractEncoding();
+ size_t number_of_stack_maps = GetNumberOfStackMaps(encoding);
vios->Stream()
- << "Optimized CodeInfo (size=" << code_info_size
- << ", number_of_dex_registers=" << number_of_dex_registers
+ << "Optimized CodeInfo (number_of_dex_registers=" << number_of_dex_registers
<< ", number_of_stack_maps=" << number_of_stack_maps
- << ", has_inline_info=" << encoding.HasInlineInfo()
- << ", number_of_bytes_for_inline_info=" << encoding.NumberOfBytesForInlineInfo()
- << ", number_of_bytes_for_dex_register_map=" << encoding.NumberOfBytesForDexRegisterMap()
- << ", number_of_bytes_for_dex_pc=" << encoding.NumberOfBytesForDexPc()
- << ", number_of_bytes_for_native_pc=" << encoding.NumberOfBytesForNativePc()
- << ", number_of_bytes_for_register_mask=" << encoding.NumberOfBytesForRegisterMask()
<< ")\n";
ScopedIndentation indent1(vios);
+ encoding.stack_map_encoding.Dump(vios);
// Display the Dex register location catalog.
GetDexRegisterLocationCatalog(encoding).Dump(vios, *this);
// Display stack maps along with (live) Dex register maps.
@@ -165,8 +133,8 @@
void DexRegisterLocationCatalog::Dump(VariableIndentationOutputStream* vios,
const CodeInfo& code_info) {
- StackMapEncoding encoding = code_info.ExtractEncoding();
- size_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ size_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
size_t location_catalog_size_in_bytes = code_info.GetDexRegisterLocationCatalogSize(encoding);
vios->Stream()
<< "DexRegisterLocationCatalog (number_of_entries=" << number_of_location_catalog_entries
@@ -181,8 +149,8 @@
void DexRegisterMap::Dump(VariableIndentationOutputStream* vios,
const CodeInfo& code_info,
uint16_t number_of_dex_registers) const {
- StackMapEncoding encoding = code_info.ExtractEncoding();
- size_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries();
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
+ size_t number_of_location_catalog_entries = code_info.GetNumberOfLocationCatalogEntries(encoding);
// TODO: Display the bit mask of live Dex registers.
for (size_t j = 0; j < number_of_dex_registers; ++j) {
if (IsDexRegisterLive(j)) {
@@ -202,32 +170,32 @@
void StackMap::Dump(VariableIndentationOutputStream* vios,
const CodeInfo& code_info,
- const StackMapEncoding& encoding,
+ const CodeInfoEncoding& encoding,
uint32_t code_offset,
uint16_t number_of_dex_registers,
const std::string& header_suffix) const {
+ StackMapEncoding stack_map_encoding = encoding.stack_map_encoding;
vios->Stream()
<< "StackMap" << header_suffix
<< std::hex
- << " [native_pc=0x" << code_offset + GetNativePcOffset(encoding) << "]"
- << " (dex_pc=0x" << GetDexPc(encoding)
- << ", native_pc_offset=0x" << GetNativePcOffset(encoding)
- << ", dex_register_map_offset=0x" << GetDexRegisterMapOffset(encoding)
- << ", inline_info_offset=0x" << GetInlineDescriptorOffset(encoding)
- << ", register_mask=0x" << GetRegisterMask(encoding)
+ << " [native_pc=0x" << code_offset + GetNativePcOffset(stack_map_encoding) << "]"
+ << " (dex_pc=0x" << GetDexPc(stack_map_encoding)
+ << ", native_pc_offset=0x" << GetNativePcOffset(stack_map_encoding)
+ << ", dex_register_map_offset=0x" << GetDexRegisterMapOffset(stack_map_encoding)
+ << ", inline_info_offset=0x" << GetInlineDescriptorOffset(stack_map_encoding)
+ << ", register_mask=0x" << GetRegisterMask(stack_map_encoding)
<< std::dec
<< ", stack_mask=0b";
- MemoryRegion stack_mask = GetStackMask(encoding);
- for (size_t i = 0, e = stack_mask.size_in_bits(); i < e; ++i) {
- vios->Stream() << stack_mask.LoadBit(e - i - 1);
+ for (size_t i = 0, e = GetNumberOfStackMaskBits(stack_map_encoding); i < e; ++i) {
+ vios->Stream() << GetStackMaskBit(stack_map_encoding, e - i - 1);
}
vios->Stream() << ")\n";
- if (HasDexRegisterMap(encoding)) {
+ if (HasDexRegisterMap(stack_map_encoding)) {
DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(
*this, encoding, number_of_dex_registers);
dex_register_map.Dump(vios, code_info, number_of_dex_registers);
}
- if (HasInlineInfo(encoding)) {
+ if (HasInlineInfo(stack_map_encoding)) {
InlineInfo inline_info = code_info.GetInlineInfoOf(*this, encoding);
// We do not know the length of the dex register maps of inlined frames
// at this level, so we just pass null to `InlineInfo::Dump` to tell
@@ -251,7 +219,7 @@
<< ", invoke_type=" << static_cast<InvokeType>(GetInvokeTypeAtDepth(i))
<< ")\n";
if (HasDexRegisterMapAtDepth(i) && (number_of_dex_registers != nullptr)) {
- StackMapEncoding encoding = code_info.ExtractEncoding();
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
DexRegisterMap dex_register_map =
code_info.GetDexRegisterMapAtDepth(i, *this, encoding, number_of_dex_registers[i]);
ScopedIndentation indent1(vios);
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index dbf23aa..9e8884e 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -20,6 +20,7 @@
#include "base/bit_vector.h"
#include "base/bit_utils.h"
#include "memory_region.h"
+#include "leb128.h"
namespace art {
@@ -39,13 +40,9 @@
// Size of Dex virtual registers.
static constexpr size_t kVRegSize = 4;
-// We encode the number of bytes needed for writing a value on 3 bits
-// (i.e. up to 8 values), for values that we know are maximum 32-bit
-// long.
-static constexpr size_t kNumberOfBitForNumberOfBytesForEncoding = 3;
-
class CodeInfo;
class StackMapEncoding;
+struct CodeInfoEncoding;
/**
* Classes in the following file are wrapper on stack map information backed
@@ -459,7 +456,7 @@
DexRegisterLocation::Kind GetLocationKind(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
const CodeInfo& code_info,
- const StackMapEncoding& enc) const {
+ const CodeInfoEncoding& enc) const {
return DexRegisterLocation::ConvertToSurfaceKind(
GetLocationInternalKind(dex_register_number, number_of_dex_registers, code_info, enc));
}
@@ -468,18 +465,18 @@
DexRegisterLocation::Kind GetLocationInternalKind(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
const CodeInfo& code_info,
- const StackMapEncoding& enc) const;
+ const CodeInfoEncoding& enc) const;
// Get the Dex register location `dex_register_number`.
DexRegisterLocation GetDexRegisterLocation(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
const CodeInfo& code_info,
- const StackMapEncoding& enc) const;
+ const CodeInfoEncoding& enc) const;
int32_t GetStackOffsetInBytes(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
const CodeInfo& code_info,
- const StackMapEncoding& enc) const {
+ const CodeInfoEncoding& enc) const {
DexRegisterLocation location =
GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info, enc);
DCHECK(location.GetKind() == DexRegisterLocation::Kind::kInStack);
@@ -490,7 +487,7 @@
int32_t GetConstant(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
const CodeInfo& code_info,
- const StackMapEncoding& enc) const {
+ const CodeInfoEncoding& enc) const {
DexRegisterLocation location =
GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info, enc);
DCHECK_EQ(location.GetKind(), DexRegisterLocation::Kind::kConstant);
@@ -500,7 +497,7 @@
int32_t GetMachineRegister(uint16_t dex_register_number,
uint16_t number_of_dex_registers,
const CodeInfo& code_info,
- const StackMapEncoding& enc) const {
+ const CodeInfoEncoding& enc) const {
DexRegisterLocation location =
GetDexRegisterLocation(dex_register_number, number_of_dex_registers, code_info, enc);
DCHECK(location.GetInternalKind() == DexRegisterLocation::Kind::kInRegister ||
@@ -657,109 +654,131 @@
friend class StackMapStream;
};
+// Represents bit range of bit-packed integer field.
+// We reuse the idea from ULEB128p1 to support encoding of -1 (aka 0xFFFFFFFF).
+// If min_value is set to -1, we implicitly subtract one from any loaded value,
+// and add one to any stored value. This is generalized to any negative values.
+// In other words, min_value acts as a base and the stored value is added to it.
+struct FieldEncoding {
+ FieldEncoding(size_t start_offset, size_t end_offset, int32_t min_value = 0)
+ : start_offset_(start_offset), end_offset_(end_offset), min_value_(min_value) {
+ DCHECK_LE(start_offset_, end_offset_);
+ DCHECK_LE(BitSize(), 32u);
+ }
+
+ ALWAYS_INLINE size_t BitSize() const { return end_offset_ - start_offset_; }
+
+ ALWAYS_INLINE int32_t Load(const MemoryRegion& region) const {
+ DCHECK_LE(end_offset_, region.size_in_bits());
+ const size_t bit_count = BitSize();
+ if (bit_count == 0) {
+ // Do not touch any memory if the range is empty.
+ return min_value_;
+ }
+ uint8_t* address = region.start() + start_offset_ / kBitsPerByte;
+ const uint32_t shift = start_offset_ & (kBitsPerByte - 1);
+ // Load the value (reading only the strictly needed bytes).
+ const uint32_t load_bit_count = shift + bit_count;
+ uint32_t value = *address++ >> shift;
+ if (load_bit_count > 8) {
+ value |= static_cast<uint32_t>(*address++) << (8 - shift);
+ if (load_bit_count > 16) {
+ value |= static_cast<uint32_t>(*address++) << (16 - shift);
+ if (load_bit_count > 24) {
+ value |= static_cast<uint32_t>(*address++) << (24 - shift);
+ if (load_bit_count > 32) {
+ value |= static_cast<uint32_t>(*address++) << (32 - shift);
+ }
+ }
+ }
+ }
+ // Clear unwanted most significant bits.
+ uint32_t clear_bit_count = 32 - bit_count;
+ value = (value << clear_bit_count) >> clear_bit_count;
+ return value + min_value_;
+ }
+
+ ALWAYS_INLINE void Store(MemoryRegion region, int32_t value) const {
+ region.StoreBits(start_offset_, value - min_value_, BitSize());
+ DCHECK_EQ(Load(region), value);
+ }
+
+ private:
+ size_t start_offset_;
+ size_t end_offset_;
+ int32_t min_value_;
+};
+
class StackMapEncoding {
public:
StackMapEncoding() {}
- StackMapEncoding(size_t stack_mask_size,
- size_t bytes_for_inline_info,
- size_t bytes_for_dex_register_map,
- size_t bytes_for_dex_pc,
- size_t bytes_for_native_pc,
- size_t bytes_for_register_mask)
- : bytes_for_stack_mask_(stack_mask_size),
- bytes_for_inline_info_(bytes_for_inline_info),
- bytes_for_dex_register_map_(bytes_for_dex_register_map),
- bytes_for_dex_pc_(bytes_for_dex_pc),
- bytes_for_native_pc_(bytes_for_native_pc),
- bytes_for_register_mask_(bytes_for_register_mask) {}
+ // Set stack map bit layout based on given sizes.
+ // Returns the size of stack map in bytes.
+ size_t SetFromSizes(size_t native_pc_max,
+ size_t dex_pc_max,
+ size_t dex_register_map_size,
+ size_t inline_info_size,
+ size_t register_mask_max,
+ size_t stack_mask_bit_size) {
+ size_t bit_offset = 0;
+ DCHECK_EQ(kNativePcBitOffset, bit_offset);
+ bit_offset += MinimumBitsToStore(native_pc_max);
- static StackMapEncoding CreateFromSizes(size_t stack_mask_size,
- size_t inline_info_size,
- size_t dex_register_map_size,
- size_t dex_pc_max,
- size_t native_pc_max,
- size_t register_mask_max) {
- return StackMapEncoding(
- stack_mask_size,
- // + 1 to also encode kNoInlineInfo: if an inline info offset
- // is at 0xFF, we want to overflow to a larger encoding, because it will
- // conflict with kNoInlineInfo.
- // The offset is relative to the dex register map. TODO: Change this.
- inline_info_size == 0
- ? 0
- : EncodingSizeInBytes(dex_register_map_size + inline_info_size + 1),
- // + 1 to also encode kNoDexRegisterMap: if a dex register map offset
- // is at 0xFF, we want to overflow to a larger encoding, because it will
- // conflict with kNoDexRegisterMap.
- EncodingSizeInBytes(dex_register_map_size + 1),
- EncodingSizeInBytes(dex_pc_max),
- EncodingSizeInBytes(native_pc_max),
- EncodingSizeInBytes(register_mask_max));
+ dex_pc_bit_offset_ = dchecked_integral_cast<uint8_t>(bit_offset);
+ bit_offset += MinimumBitsToStore(1 /* kNoDexPc */ + dex_pc_max);
+
+ // We also need +1 for kNoDexRegisterMap, but since the size is strictly
+ // greater than any offset we might try to encode, we already implicitly have it.
+ dex_register_map_bit_offset_ = dchecked_integral_cast<uint8_t>(bit_offset);
+ bit_offset += MinimumBitsToStore(dex_register_map_size);
+
+ // We also need +1 for kNoInlineInfo, but since the inline_info_size is strictly
+ // greater than the offset we might try to encode, we already implicitly have it.
+ // If inline_info_size is zero, we can encode only kNoInlineInfo (in zero bits).
+ inline_info_bit_offset_ = dchecked_integral_cast<uint8_t>(bit_offset);
+ if (inline_info_size != 0) {
+ bit_offset += MinimumBitsToStore(dex_register_map_size + inline_info_size);
+ }
+
+ register_mask_bit_offset_ = dchecked_integral_cast<uint8_t>(bit_offset);
+ bit_offset += MinimumBitsToStore(register_mask_max);
+
+ stack_mask_bit_offset_ = dchecked_integral_cast<uint8_t>(bit_offset);
+ bit_offset += stack_mask_bit_size;
+
+ return RoundUp(bit_offset, kBitsPerByte) / kBitsPerByte;
}
- // Get the size of one stack map of this CodeInfo object, in bytes.
- // All stack maps of a CodeInfo have the same size.
- size_t ComputeStackMapSize() const {
- return bytes_for_register_mask_
- + bytes_for_stack_mask_
- + bytes_for_inline_info_
- + bytes_for_dex_register_map_
- + bytes_for_dex_pc_
- + bytes_for_native_pc_;
+ ALWAYS_INLINE FieldEncoding GetNativePcEncoding() const {
+ return FieldEncoding(kNativePcBitOffset, dex_pc_bit_offset_);
+ }
+ ALWAYS_INLINE FieldEncoding GetDexPcEncoding() const {
+ return FieldEncoding(dex_pc_bit_offset_, dex_register_map_bit_offset_, -1 /* min_value */);
+ }
+ ALWAYS_INLINE FieldEncoding GetDexRegisterMapEncoding() const {
+ return FieldEncoding(dex_register_map_bit_offset_, inline_info_bit_offset_, -1 /* min_value */);
+ }
+ ALWAYS_INLINE FieldEncoding GetInlineInfoEncoding() const {
+ return FieldEncoding(inline_info_bit_offset_, register_mask_bit_offset_, -1 /* min_value */);
+ }
+ ALWAYS_INLINE FieldEncoding GetRegisterMaskEncoding() const {
+ return FieldEncoding(register_mask_bit_offset_, stack_mask_bit_offset_);
+ }
+ ALWAYS_INLINE size_t GetStackMaskBitOffset() const {
+ // The end offset is not encoded. It is implicitly the end of stack map entry.
+ return stack_mask_bit_offset_;
}
- bool HasInlineInfo() const { return bytes_for_inline_info_ > 0; }
-
- size_t NumberOfBytesForStackMask() const { return bytes_for_stack_mask_; }
- size_t NumberOfBytesForInlineInfo() const { return bytes_for_inline_info_; }
- size_t NumberOfBytesForDexRegisterMap() const { return bytes_for_dex_register_map_; }
- size_t NumberOfBytesForDexPc() const { return bytes_for_dex_pc_; }
- size_t NumberOfBytesForNativePc() const { return bytes_for_native_pc_; }
- size_t NumberOfBytesForRegisterMask() const { return bytes_for_register_mask_; }
-
- size_t ComputeStackMapRegisterMaskOffset() const {
- return kRegisterMaskOffset;
- }
-
- size_t ComputeStackMapStackMaskOffset() const {
- return ComputeStackMapRegisterMaskOffset() + bytes_for_register_mask_;
- }
-
- size_t ComputeStackMapDexPcOffset() const {
- return ComputeStackMapStackMaskOffset() + bytes_for_stack_mask_;
- }
-
- size_t ComputeStackMapNativePcOffset() const {
- return ComputeStackMapDexPcOffset() + bytes_for_dex_pc_;
- }
-
- size_t ComputeStackMapDexRegisterMapOffset() const {
- return ComputeStackMapNativePcOffset() + bytes_for_native_pc_;
- }
-
- size_t ComputeStackMapInlineInfoOffset() const {
- return ComputeStackMapDexRegisterMapOffset() + bytes_for_dex_register_map_;
- }
+ void Dump(VariableIndentationOutputStream* vios) const;
private:
- static size_t EncodingSizeInBytes(size_t max_element) {
- DCHECK(IsUint<32>(max_element));
- return (max_element == 0) ? 0
- : IsUint<8>(max_element) ? 1
- : IsUint<16>(max_element) ? 2
- : IsUint<24>(max_element) ? 3
- : 4;
- }
-
- static constexpr int kRegisterMaskOffset = 0;
-
- size_t bytes_for_stack_mask_;
- size_t bytes_for_inline_info_;
- size_t bytes_for_dex_register_map_;
- size_t bytes_for_dex_pc_;
- size_t bytes_for_native_pc_;
- size_t bytes_for_register_mask_;
+ static constexpr size_t kNativePcBitOffset = 0;
+ uint8_t dex_pc_bit_offset_;
+ uint8_t dex_register_map_bit_offset_;
+ uint8_t inline_info_bit_offset_;
+ uint8_t register_mask_bit_offset_;
+ uint8_t stack_mask_bit_offset_;
};
/**
@@ -772,7 +791,7 @@
*
* The information is of the form:
*
- * [dex_pc, native_pc_offset, dex_register_map_offset, inlining_info_offset, register_mask,
+ * [native_pc_offset, dex_pc, dex_register_map_offset, inlining_info_offset, register_mask,
* stack_mask].
*/
class StackMap {
@@ -780,89 +799,75 @@
StackMap() {}
explicit StackMap(MemoryRegion region) : region_(region) {}
- bool IsValid() const { return region_.pointer() != nullptr; }
+ ALWAYS_INLINE bool IsValid() const { return region_.pointer() != nullptr; }
- uint32_t GetDexPc(const StackMapEncoding& encoding) const {
- return LoadAt(encoding.NumberOfBytesForDexPc(), encoding.ComputeStackMapDexPcOffset());
+ ALWAYS_INLINE uint32_t GetDexPc(const StackMapEncoding& encoding) const {
+ return encoding.GetDexPcEncoding().Load(region_);
}
- void SetDexPc(const StackMapEncoding& encoding, uint32_t dex_pc) {
- StoreAt(encoding.NumberOfBytesForDexPc(), encoding.ComputeStackMapDexPcOffset(), dex_pc);
+ ALWAYS_INLINE void SetDexPc(const StackMapEncoding& encoding, uint32_t dex_pc) {
+ encoding.GetDexPcEncoding().Store(region_, dex_pc);
}
- uint32_t GetNativePcOffset(const StackMapEncoding& encoding) const {
- return LoadAt(encoding.NumberOfBytesForNativePc(), encoding.ComputeStackMapNativePcOffset());
+ ALWAYS_INLINE uint32_t GetNativePcOffset(const StackMapEncoding& encoding) const {
+ return encoding.GetNativePcEncoding().Load(region_);
}
- void SetNativePcOffset(const StackMapEncoding& encoding, uint32_t native_pc_offset) {
- StoreAt(encoding.NumberOfBytesForNativePc(),
- encoding.ComputeStackMapNativePcOffset(),
- native_pc_offset);
+ ALWAYS_INLINE void SetNativePcOffset(const StackMapEncoding& encoding, uint32_t native_pc_offset) {
+ encoding.GetNativePcEncoding().Store(region_, native_pc_offset);
}
- uint32_t GetDexRegisterMapOffset(const StackMapEncoding& encoding) const {
- return LoadAt(encoding.NumberOfBytesForDexRegisterMap(),
- encoding.ComputeStackMapDexRegisterMapOffset(),
- /* check_max */ true);
+ ALWAYS_INLINE uint32_t GetDexRegisterMapOffset(const StackMapEncoding& encoding) const {
+ return encoding.GetDexRegisterMapEncoding().Load(region_);
}
- void SetDexRegisterMapOffset(const StackMapEncoding& encoding, uint32_t offset) {
- StoreAt(encoding.NumberOfBytesForDexRegisterMap(),
- encoding.ComputeStackMapDexRegisterMapOffset(),
- offset);
+ ALWAYS_INLINE void SetDexRegisterMapOffset(const StackMapEncoding& encoding, uint32_t offset) {
+ encoding.GetDexRegisterMapEncoding().Store(region_, offset);
}
- uint32_t GetInlineDescriptorOffset(const StackMapEncoding& encoding) const {
- if (!encoding.HasInlineInfo()) return kNoInlineInfo;
- return LoadAt(encoding.NumberOfBytesForInlineInfo(),
- encoding.ComputeStackMapInlineInfoOffset(),
- /* check_max */ true);
+ ALWAYS_INLINE uint32_t GetInlineDescriptorOffset(const StackMapEncoding& encoding) const {
+ return encoding.GetInlineInfoEncoding().Load(region_);
}
- void SetInlineDescriptorOffset(const StackMapEncoding& encoding, uint32_t offset) {
- DCHECK(encoding.HasInlineInfo());
- StoreAt(encoding.NumberOfBytesForInlineInfo(),
- encoding.ComputeStackMapInlineInfoOffset(),
- offset);
+ ALWAYS_INLINE void SetInlineDescriptorOffset(const StackMapEncoding& encoding, uint32_t offset) {
+ encoding.GetInlineInfoEncoding().Store(region_, offset);
}
- uint32_t GetRegisterMask(const StackMapEncoding& encoding) const {
- return LoadAt(encoding.NumberOfBytesForRegisterMask(),
- encoding.ComputeStackMapRegisterMaskOffset());
+ ALWAYS_INLINE uint32_t GetRegisterMask(const StackMapEncoding& encoding) const {
+ return encoding.GetRegisterMaskEncoding().Load(region_);
}
- void SetRegisterMask(const StackMapEncoding& encoding, uint32_t mask) {
- StoreAt(encoding.NumberOfBytesForRegisterMask(),
- encoding.ComputeStackMapRegisterMaskOffset(),
- mask);
+ ALWAYS_INLINE void SetRegisterMask(const StackMapEncoding& encoding, uint32_t mask) {
+ encoding.GetRegisterMaskEncoding().Store(region_, mask);
}
- MemoryRegion GetStackMask(const StackMapEncoding& encoding) const {
- return region_.Subregion(encoding.ComputeStackMapStackMaskOffset(),
- encoding.NumberOfBytesForStackMask());
+ ALWAYS_INLINE size_t GetNumberOfStackMaskBits(const StackMapEncoding& encoding) const {
+ return region_.size_in_bits() - encoding.GetStackMaskBitOffset();
}
- void SetStackMask(const StackMapEncoding& encoding, const BitVector& sp_map) {
- MemoryRegion region = GetStackMask(encoding);
- sp_map.CopyTo(region.start(), region.size());
+ ALWAYS_INLINE bool GetStackMaskBit(const StackMapEncoding& encoding, size_t index) const {
+ return region_.LoadBit(encoding.GetStackMaskBitOffset() + index);
}
- bool HasDexRegisterMap(const StackMapEncoding& encoding) const {
+ ALWAYS_INLINE void SetStackMaskBit(const StackMapEncoding& encoding, size_t index, bool value) {
+ region_.StoreBit(encoding.GetStackMaskBitOffset() + index, value);
+ }
+
+ ALWAYS_INLINE bool HasDexRegisterMap(const StackMapEncoding& encoding) const {
return GetDexRegisterMapOffset(encoding) != kNoDexRegisterMap;
}
- bool HasInlineInfo(const StackMapEncoding& encoding) const {
+ ALWAYS_INLINE bool HasInlineInfo(const StackMapEncoding& encoding) const {
return GetInlineDescriptorOffset(encoding) != kNoInlineInfo;
}
- bool Equals(const StackMap& other) const {
- return region_.pointer() == other.region_.pointer()
- && region_.size() == other.region_.size();
+ ALWAYS_INLINE bool Equals(const StackMap& other) const {
+ return region_.pointer() == other.region_.pointer() && region_.size() == other.region_.size();
}
void Dump(VariableIndentationOutputStream* vios,
const CodeInfo& code_info,
- const StackMapEncoding& encoding,
+ const CodeInfoEncoding& encoding,
uint32_t code_offset,
uint16_t number_of_dex_registers,
const std::string& header_suffix = "") const;
@@ -878,11 +883,6 @@
private:
static constexpr int kFixedSize = 0;
- // Loads `number_of_bytes` at the given `offset` and assemble a uint32_t. If `check_max` is true,
- // this method converts a maximum value of size `number_of_bytes` into a uint32_t 0xFFFFFFFF.
- uint32_t LoadAt(size_t number_of_bytes, size_t offset, bool check_max = false) const;
- void StoreAt(size_t number_of_bytes, size_t offset, uint32_t value) const;
-
MemoryRegion region_;
friend class StackMapStream;
@@ -986,142 +986,121 @@
friend class StackMapStream;
};
+// Most of the fields are encoded as ULEB128 to save space.
+struct CodeInfoEncoding {
+ uint32_t non_header_size;
+ uint32_t number_of_stack_maps;
+ uint32_t stack_map_size_in_bytes;
+ uint32_t number_of_location_catalog_entries;
+ StackMapEncoding stack_map_encoding;
+ uint8_t header_size;
+
+ CodeInfoEncoding() { }
+
+ explicit CodeInfoEncoding(const void* data) {
+ const uint8_t* ptr = reinterpret_cast<const uint8_t*>(data);
+ non_header_size = DecodeUnsignedLeb128(&ptr);
+ number_of_stack_maps = DecodeUnsignedLeb128(&ptr);
+ stack_map_size_in_bytes = DecodeUnsignedLeb128(&ptr);
+ number_of_location_catalog_entries = DecodeUnsignedLeb128(&ptr);
+ static_assert(alignof(StackMapEncoding) == 1, "StackMapEncoding should not require alignment");
+ stack_map_encoding = *reinterpret_cast<const StackMapEncoding*>(ptr);
+ ptr += sizeof(StackMapEncoding);
+ header_size = dchecked_integral_cast<uint8_t>(ptr - reinterpret_cast<const uint8_t*>(data));
+ }
+
+ template<typename Vector>
+ void Compress(Vector* dest) const {
+ EncodeUnsignedLeb128(dest, non_header_size);
+ EncodeUnsignedLeb128(dest, number_of_stack_maps);
+ EncodeUnsignedLeb128(dest, stack_map_size_in_bytes);
+ EncodeUnsignedLeb128(dest, number_of_location_catalog_entries);
+ const uint8_t* ptr = reinterpret_cast<const uint8_t*>(&stack_map_encoding);
+ dest->insert(dest->end(), ptr, ptr + sizeof(stack_map_encoding));
+ }
+};
+
/**
* Wrapper around all compiler information collected for a method.
* The information is of the form:
*
- * [overall_size, encoding_info, number_of_location_catalog_entries, number_of_stack_maps,
- * stack_mask_size, DexRegisterLocationCatalog+, StackMap+, DexRegisterMap+, InlineInfo*]
+ * [CodeInfoEncoding, StackMap+, DexRegisterLocationCatalog+, DexRegisterMap+, InlineInfo*]
*
- * where `encoding_info` is of the form:
+ * where CodeInfoEncoding is of the form:
*
- * [has_inline_info, inline_info_size_in_bytes, dex_register_map_size_in_bytes,
- * dex_pc_size_in_bytes, native_pc_size_in_bytes, register_mask_size_in_bytes].
+ * [non_header_size, number_of_stack_maps, stack_map_size_in_bytes,
+ * number_of_location_catalog_entries, StackMapEncoding]
*/
class CodeInfo {
public:
- // Memory layout: fixed contents.
- typedef uint32_t OverallSizeType;
- typedef uint16_t EncodingInfoType;
- typedef uint32_t NumberOfLocationCatalogEntriesType;
- typedef uint32_t NumberOfStackMapsType;
- typedef uint32_t StackMaskSizeType;
-
- // Memory (bit) layout: encoding info.
- static constexpr int HasInlineInfoBitSize = 1;
- static constexpr int InlineInfoBitSize = kNumberOfBitForNumberOfBytesForEncoding;
- static constexpr int DexRegisterMapBitSize = kNumberOfBitForNumberOfBytesForEncoding;
- static constexpr int DexPcBitSize = kNumberOfBitForNumberOfBytesForEncoding;
- static constexpr int NativePcBitSize = kNumberOfBitForNumberOfBytesForEncoding;
- static constexpr int RegisterMaskBitSize = kNumberOfBitForNumberOfBytesForEncoding;
-
- explicit CodeInfo(MemoryRegion region) : region_(region) {}
+ explicit CodeInfo(MemoryRegion region) : region_(region) {
+ }
explicit CodeInfo(const void* data) {
- uint32_t size = reinterpret_cast<const uint32_t*>(data)[0];
- region_ = MemoryRegion(const_cast<void*>(data), size);
+ CodeInfoEncoding encoding = CodeInfoEncoding(data);
+ region_ = MemoryRegion(const_cast<void*>(data),
+ encoding.header_size + encoding.non_header_size);
}
- StackMapEncoding ExtractEncoding() const {
- return StackMapEncoding(region_.LoadUnaligned<uint32_t>(kStackMaskSizeOffset),
- GetNumberOfBytesForEncoding(kInlineInfoBitOffset),
- GetNumberOfBytesForEncoding(kDexRegisterMapBitOffset),
- GetNumberOfBytesForEncoding(kDexPcBitOffset),
- GetNumberOfBytesForEncoding(kNativePcBitOffset),
- GetNumberOfBytesForEncoding(kRegisterMaskBitOffset));
+ CodeInfoEncoding ExtractEncoding() const {
+ return CodeInfoEncoding(region_.start());
}
- void SetEncoding(const StackMapEncoding& encoding) {
- region_.StoreUnaligned<uint32_t>(kStackMaskSizeOffset, encoding.NumberOfBytesForStackMask());
- region_.StoreBit(kHasInlineInfoBitOffset, encoding.NumberOfBytesForInlineInfo() != 0);
- SetEncodingAt(kInlineInfoBitOffset, encoding.NumberOfBytesForInlineInfo());
- SetEncodingAt(kDexRegisterMapBitOffset, encoding.NumberOfBytesForDexRegisterMap());
- SetEncodingAt(kDexPcBitOffset, encoding.NumberOfBytesForDexPc());
- SetEncodingAt(kNativePcBitOffset, encoding.NumberOfBytesForNativePc());
- SetEncodingAt(kRegisterMaskBitOffset, encoding.NumberOfBytesForRegisterMask());
+ bool HasInlineInfo(const CodeInfoEncoding& encoding) const {
+ return encoding.stack_map_encoding.GetInlineInfoEncoding().BitSize() > 0;
}
- void SetEncodingAt(size_t bit_offset, size_t number_of_bytes) {
- region_.StoreBits(bit_offset, number_of_bytes, kNumberOfBitForNumberOfBytesForEncoding);
- }
-
- size_t GetNumberOfBytesForEncoding(size_t bit_offset) const {
- return region_.LoadBits(bit_offset, kNumberOfBitForNumberOfBytesForEncoding);
- }
-
- bool HasInlineInfo() const {
- return region_.LoadBit(kHasInlineInfoBitOffset);
- }
-
- DexRegisterLocationCatalog GetDexRegisterLocationCatalog(const StackMapEncoding& encoding) const {
+ DexRegisterLocationCatalog GetDexRegisterLocationCatalog(const CodeInfoEncoding& encoding) const {
return DexRegisterLocationCatalog(region_.Subregion(
GetDexRegisterLocationCatalogOffset(encoding),
GetDexRegisterLocationCatalogSize(encoding)));
}
- StackMap GetStackMapAt(size_t i, const StackMapEncoding& encoding) const {
- size_t stack_map_size = encoding.ComputeStackMapSize();
+ StackMap GetStackMapAt(size_t i, const CodeInfoEncoding& encoding) const {
+ size_t stack_map_size = encoding.stack_map_size_in_bytes;
return StackMap(GetStackMaps(encoding).Subregion(i * stack_map_size, stack_map_size));
}
- OverallSizeType GetOverallSize() const {
- return region_.LoadUnaligned<OverallSizeType>(kOverallSizeOffset);
+ uint32_t GetNumberOfLocationCatalogEntries(const CodeInfoEncoding& encoding) const {
+ return encoding.number_of_location_catalog_entries;
}
- void SetOverallSize(OverallSizeType size) {
- region_.StoreUnaligned<OverallSizeType>(kOverallSizeOffset, size);
- }
-
- NumberOfLocationCatalogEntriesType GetNumberOfLocationCatalogEntries() const {
- return region_.LoadUnaligned<NumberOfLocationCatalogEntriesType>(
- kNumberOfLocationCatalogEntriesOffset);
- }
-
- void SetNumberOfLocationCatalogEntries(NumberOfLocationCatalogEntriesType num_entries) {
- region_.StoreUnaligned<NumberOfLocationCatalogEntriesType>(
- kNumberOfLocationCatalogEntriesOffset, num_entries);
- }
-
- uint32_t GetDexRegisterLocationCatalogSize(const StackMapEncoding& encoding) const {
+ uint32_t GetDexRegisterLocationCatalogSize(const CodeInfoEncoding& encoding) const {
return ComputeDexRegisterLocationCatalogSize(GetDexRegisterLocationCatalogOffset(encoding),
- GetNumberOfLocationCatalogEntries());
+ GetNumberOfLocationCatalogEntries(encoding));
}
- NumberOfStackMapsType GetNumberOfStackMaps() const {
- return region_.LoadUnaligned<NumberOfStackMapsType>(kNumberOfStackMapsOffset);
- }
-
- void SetNumberOfStackMaps(NumberOfStackMapsType number_of_stack_maps) {
- region_.StoreUnaligned<NumberOfStackMapsType>(kNumberOfStackMapsOffset, number_of_stack_maps);
+ uint32_t GetNumberOfStackMaps(const CodeInfoEncoding& encoding) const {
+ return encoding.number_of_stack_maps;
}
// Get the size of all the stack maps of this CodeInfo object, in bytes.
- size_t GetStackMapsSize(const StackMapEncoding& encoding) const {
- return encoding.ComputeStackMapSize() * GetNumberOfStackMaps();
+ size_t GetStackMapsSize(const CodeInfoEncoding& encoding) const {
+ return encoding.stack_map_size_in_bytes * GetNumberOfStackMaps(encoding);
}
- uint32_t GetDexRegisterLocationCatalogOffset(const StackMapEncoding& encoding) const {
- return GetStackMapsOffset() + GetStackMapsSize(encoding);
+ uint32_t GetDexRegisterLocationCatalogOffset(const CodeInfoEncoding& encoding) const {
+ return GetStackMapsOffset(encoding) + GetStackMapsSize(encoding);
}
- size_t GetDexRegisterMapsOffset(const StackMapEncoding& encoding) const {
+ size_t GetDexRegisterMapsOffset(const CodeInfoEncoding& encoding) const {
return GetDexRegisterLocationCatalogOffset(encoding)
+ GetDexRegisterLocationCatalogSize(encoding);
}
- uint32_t GetStackMapsOffset() const {
- return kFixedSize;
+ uint32_t GetStackMapsOffset(const CodeInfoEncoding& encoding) const {
+ return encoding.header_size;
}
DexRegisterMap GetDexRegisterMapOf(StackMap stack_map,
- const StackMapEncoding& encoding,
+ const CodeInfoEncoding& encoding,
uint32_t number_of_dex_registers) const {
- if (!stack_map.HasDexRegisterMap(encoding)) {
+ if (!stack_map.HasDexRegisterMap(encoding.stack_map_encoding)) {
return DexRegisterMap();
} else {
uint32_t offset = GetDexRegisterMapsOffset(encoding)
- + stack_map.GetDexRegisterMapOffset(encoding);
- size_t size = ComputeDexRegisterMapSizeOf(offset, number_of_dex_registers);
+ + stack_map.GetDexRegisterMapOffset(encoding.stack_map_encoding);
+ size_t size = ComputeDexRegisterMapSizeOf(encoding, offset, number_of_dex_registers);
return DexRegisterMap(region_.Subregion(offset, size));
}
}
@@ -1129,31 +1108,31 @@
// Return the `DexRegisterMap` pointed by `inline_info` at depth `depth`.
DexRegisterMap GetDexRegisterMapAtDepth(uint8_t depth,
InlineInfo inline_info,
- const StackMapEncoding& encoding,
+ const CodeInfoEncoding& encoding,
uint32_t number_of_dex_registers) const {
if (!inline_info.HasDexRegisterMapAtDepth(depth)) {
return DexRegisterMap();
} else {
uint32_t offset = GetDexRegisterMapsOffset(encoding)
+ inline_info.GetDexRegisterMapOffsetAtDepth(depth);
- size_t size = ComputeDexRegisterMapSizeOf(offset, number_of_dex_registers);
+ size_t size = ComputeDexRegisterMapSizeOf(encoding, offset, number_of_dex_registers);
return DexRegisterMap(region_.Subregion(offset, size));
}
}
- InlineInfo GetInlineInfoOf(StackMap stack_map, const StackMapEncoding& encoding) const {
- DCHECK(stack_map.HasInlineInfo(encoding));
- uint32_t offset = stack_map.GetInlineDescriptorOffset(encoding)
+ InlineInfo GetInlineInfoOf(StackMap stack_map, const CodeInfoEncoding& encoding) const {
+ DCHECK(stack_map.HasInlineInfo(encoding.stack_map_encoding));
+ uint32_t offset = stack_map.GetInlineDescriptorOffset(encoding.stack_map_encoding)
+ GetDexRegisterMapsOffset(encoding);
uint8_t depth = region_.LoadUnaligned<uint8_t>(offset);
return InlineInfo(region_.Subregion(offset,
InlineInfo::kFixedSize + depth * InlineInfo::SingleEntrySize()));
}
- StackMap GetStackMapForDexPc(uint32_t dex_pc, const StackMapEncoding& encoding) const {
- for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
+ StackMap GetStackMapForDexPc(uint32_t dex_pc, const CodeInfoEncoding& encoding) const {
+ for (size_t i = 0, e = GetNumberOfStackMaps(encoding); i < e; ++i) {
StackMap stack_map = GetStackMapAt(i, encoding);
- if (stack_map.GetDexPc(encoding) == dex_pc) {
+ if (stack_map.GetDexPc(encoding.stack_map_encoding) == dex_pc) {
return stack_map;
}
}
@@ -1162,37 +1141,39 @@
// Searches the stack map list backwards because catch stack maps are stored
// at the end.
- StackMap GetCatchStackMapForDexPc(uint32_t dex_pc, const StackMapEncoding& encoding) const {
- for (size_t i = GetNumberOfStackMaps(); i > 0; --i) {
+ StackMap GetCatchStackMapForDexPc(uint32_t dex_pc, const CodeInfoEncoding& encoding) const {
+ for (size_t i = GetNumberOfStackMaps(encoding); i > 0; --i) {
StackMap stack_map = GetStackMapAt(i - 1, encoding);
- if (stack_map.GetDexPc(encoding) == dex_pc) {
+ if (stack_map.GetDexPc(encoding.stack_map_encoding) == dex_pc) {
return stack_map;
}
}
return StackMap();
}
- StackMap GetOsrStackMapForDexPc(uint32_t dex_pc, const StackMapEncoding& encoding) const {
- size_t e = GetNumberOfStackMaps();
+ StackMap GetOsrStackMapForDexPc(uint32_t dex_pc, const CodeInfoEncoding& encoding) const {
+ size_t e = GetNumberOfStackMaps(encoding);
if (e == 0) {
// There cannot be OSR stack map if there is no stack map.
return StackMap();
}
// Walk over all stack maps. If two consecutive stack maps are identical, then we
// have found a stack map suitable for OSR.
+ const StackMapEncoding& stack_map_encoding = encoding.stack_map_encoding;
for (size_t i = 0; i < e - 1; ++i) {
StackMap stack_map = GetStackMapAt(i, encoding);
- if (stack_map.GetDexPc(encoding) == dex_pc) {
+ if (stack_map.GetDexPc(stack_map_encoding) == dex_pc) {
StackMap other = GetStackMapAt(i + 1, encoding);
- if (other.GetDexPc(encoding) == dex_pc &&
- other.GetNativePcOffset(encoding) == stack_map.GetNativePcOffset(encoding)) {
- DCHECK_EQ(other.GetDexRegisterMapOffset(encoding),
- stack_map.GetDexRegisterMapOffset(encoding));
- DCHECK(!stack_map.HasInlineInfo(encoding));
+ if (other.GetDexPc(stack_map_encoding) == dex_pc &&
+ other.GetNativePcOffset(stack_map_encoding) ==
+ stack_map.GetNativePcOffset(stack_map_encoding)) {
+ DCHECK_EQ(other.GetDexRegisterMapOffset(stack_map_encoding),
+ stack_map.GetDexRegisterMapOffset(stack_map_encoding));
+ DCHECK(!stack_map.HasInlineInfo(stack_map_encoding));
if (i < e - 2) {
// Make sure there are not three identical stack maps following each other.
- DCHECK_NE(stack_map.GetNativePcOffset(encoding),
- GetStackMapAt(i + 2, encoding).GetNativePcOffset(encoding));
+ DCHECK_NE(stack_map.GetNativePcOffset(stack_map_encoding),
+ GetStackMapAt(i + 2, encoding).GetNativePcOffset(stack_map_encoding));
}
return stack_map;
}
@@ -1202,13 +1183,13 @@
}
StackMap GetStackMapForNativePcOffset(uint32_t native_pc_offset,
- const StackMapEncoding& encoding) const {
+ const CodeInfoEncoding& encoding) const {
// TODO: Safepoint stack maps are sorted by native_pc_offset but catch stack
// maps are not. If we knew that the method does not have try/catch,
// we could do binary search.
- for (size_t i = 0, e = GetNumberOfStackMaps(); i < e; ++i) {
+ for (size_t i = 0, e = GetNumberOfStackMaps(encoding); i < e; ++i) {
StackMap stack_map = GetStackMapAt(i, encoding);
- if (stack_map.GetNativePcOffset(encoding) == native_pc_offset) {
+ if (stack_map.GetNativePcOffset(encoding.stack_map_encoding) == native_pc_offset) {
return stack_map;
}
}
@@ -1226,38 +1207,16 @@
bool dump_stack_maps) const;
private:
- static constexpr int kOverallSizeOffset = 0;
- static constexpr int kEncodingInfoOffset = ELEMENT_BYTE_OFFSET_AFTER(OverallSize);
- static constexpr int kNumberOfLocationCatalogEntriesOffset =
- ELEMENT_BYTE_OFFSET_AFTER(EncodingInfo);
- static constexpr int kNumberOfStackMapsOffset =
- ELEMENT_BYTE_OFFSET_AFTER(NumberOfLocationCatalogEntries);
- static constexpr int kStackMaskSizeOffset = ELEMENT_BYTE_OFFSET_AFTER(NumberOfStackMaps);
- static constexpr int kFixedSize = ELEMENT_BYTE_OFFSET_AFTER(StackMaskSize);
-
- static constexpr int kHasInlineInfoBitOffset = kEncodingInfoOffset * kBitsPerByte;
- static constexpr int kInlineInfoBitOffset = ELEMENT_BIT_OFFSET_AFTER(HasInlineInfo);
- static constexpr int kDexRegisterMapBitOffset = ELEMENT_BIT_OFFSET_AFTER(InlineInfo);
- static constexpr int kDexPcBitOffset = ELEMENT_BIT_OFFSET_AFTER(DexRegisterMap);
- static constexpr int kNativePcBitOffset = ELEMENT_BIT_OFFSET_AFTER(DexPc);
- static constexpr int kRegisterMaskBitOffset = ELEMENT_BIT_OFFSET_AFTER(NativePc);
-
- static constexpr int kEncodingInfoPastTheEndBitOffset = ELEMENT_BIT_OFFSET_AFTER(RegisterMask);
- static constexpr int kEncodingInfoOverallBitSize =
- kEncodingInfoPastTheEndBitOffset - kHasInlineInfoBitOffset;
-
- static_assert(kEncodingInfoOverallBitSize <= (sizeof(EncodingInfoType) * kBitsPerByte),
- "art::CodeInfo::EncodingInfoType is too short to hold all encoding info elements.");
-
- MemoryRegion GetStackMaps(const StackMapEncoding& encoding) const {
+ MemoryRegion GetStackMaps(const CodeInfoEncoding& encoding) const {
return region_.size() == 0
? MemoryRegion()
- : region_.Subregion(GetStackMapsOffset(), GetStackMapsSize(encoding));
+ : region_.Subregion(GetStackMapsOffset(encoding), GetStackMapsSize(encoding));
}
// Compute the size of the Dex register map associated to the stack map at
// `dex_register_map_offset_in_code_info`.
- size_t ComputeDexRegisterMapSizeOf(uint32_t dex_register_map_offset_in_code_info,
+ size_t ComputeDexRegisterMapSizeOf(const CodeInfoEncoding& encoding,
+ uint32_t dex_register_map_offset_in_code_info,
uint16_t number_of_dex_registers) const {
// Offset where the actual mapping data starts within art::DexRegisterMap.
size_t location_mapping_data_offset_in_dex_register_map =
@@ -1270,7 +1229,7 @@
size_t number_of_live_dex_registers =
dex_register_map_without_locations.GetNumberOfLiveDexRegisters(number_of_dex_registers);
size_t location_mapping_data_size_in_bits =
- DexRegisterMap::SingleEntrySizeInBits(GetNumberOfLocationCatalogEntries())
+ DexRegisterMap::SingleEntrySizeInBits(GetNumberOfLocationCatalogEntries(encoding))
* number_of_live_dex_registers;
size_t location_mapping_data_size_in_bytes =
RoundUp(location_mapping_data_size_in_bits, kBitsPerByte) / kBitsPerByte;
diff --git a/runtime/thread.cc b/runtime/thread.cc
index e3adf9f..3ecb041 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -90,8 +90,7 @@
pthread_key_t Thread::pthread_key_self_;
ConditionVariable* Thread::resume_cond_ = nullptr;
const size_t Thread::kStackOverflowImplicitCheckSize = GetStackOverflowReservedBytes(kRuntimeISA);
-// Enabled for b/27493510. TODO: disable when fixed.
-static constexpr bool kVerifyImageObjectsMarked = true;
+static constexpr bool kVerifyImageObjectsMarked = kIsDebugBuild;
// For implicit overflow checks we reserve an extra piece of memory at the bottom
// of the stack (lowest memory). The higher portion of the memory
@@ -2771,13 +2770,13 @@
reinterpret_cast<uintptr_t>(cur_quick_frame));
uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
CodeInfo code_info = method_header->GetOptimizedCodeInfo();
- StackMapEncoding encoding = code_info.ExtractEncoding();
+ CodeInfoEncoding encoding = code_info.ExtractEncoding();
StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
DCHECK(map.IsValid());
- MemoryRegion mask = map.GetStackMask(encoding);
// Visit stack entries that hold pointers.
- for (size_t i = 0; i < mask.size_in_bits(); ++i) {
- if (mask.LoadBit(i)) {
+ size_t number_of_bits = map.GetNumberOfStackMaskBits(encoding.stack_map_encoding);
+ for (size_t i = 0; i < number_of_bits; ++i) {
+ if (map.GetStackMaskBit(encoding.stack_map_encoding, i)) {
auto* ref_addr = vreg_base + i;
mirror::Object* ref = ref_addr->AsMirrorPtr();
if (ref != nullptr) {
@@ -2790,7 +2789,7 @@
}
}
// Visit callee-save registers that hold pointers.
- uint32_t register_mask = map.GetRegisterMask(encoding);
+ uint32_t register_mask = map.GetRegisterMask(encoding.stack_map_encoding);
for (size_t i = 0; i < BitSizeOf<uint32_t>(); ++i) {
if (register_mask & (1 << i)) {
mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(i));
diff --git a/test/566-polymorphic-inlining/polymorphic_inline.cc b/test/566-polymorphic-inlining/polymorphic_inline.cc
index b2934ed..167a575 100644
--- a/test/566-polymorphic-inlining/polymorphic_inline.cc
+++ b/test/566-polymorphic-inlining/polymorphic_inline.cc
@@ -43,7 +43,8 @@
}
CodeInfo info = header->GetOptimizedCodeInfo();
- CHECK(info.HasInlineInfo());
+ CodeInfoEncoding encoding = info.ExtractEncoding();
+ CHECK(info.HasInlineInfo(encoding));
}
extern "C" JNIEXPORT void JNICALL Java_Main_ensureJittedAndPolymorphicInline(JNIEnv*, jclass cls) {
diff --git a/test/593-checker-shift-and-simplifier/info.txt b/test/593-checker-shift-and-simplifier/info.txt
index 2fae678..2f4c7f5 100644
--- a/test/593-checker-shift-and-simplifier/info.txt
+++ b/test/593-checker-shift-and-simplifier/info.txt
@@ -1 +1 @@
-Regression test on pattern that caused double removal of AND by AMD64 simplifier.
+Regression test on pattern that caused double removal of AND by ARM64 simplifier.
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 11a38cb..1edc599 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -37,14 +37,10 @@
$(DX) \
$(HOST_OUT_EXECUTABLES)/jasmin \
$(HOST_OUT_EXECUTABLES)/smali \
- $(HOST_OUT_EXECUTABLES)/dexmerger
-TEST_ART_RUN_TEST_ORDERONLY_DEPENDENCIES :=
+ $(HOST_OUT_EXECUTABLES)/dexmerger \
+ $(JACK)
-ifeq ($(ANDROID_COMPILE_WITH_JACK),true)
- TEST_ART_RUN_TEST_DEPENDENCIES += \
- $(JACK)
- TEST_ART_RUN_TEST_ORDERONLY_DEPENDENCIES += setup-jack-server
-endif
+TEST_ART_RUN_TEST_ORDERONLY_DEPENDENCIES := setup-jack-server
ifeq ($(ART_TEST_DEBUG_GC),true)
ART_TEST_WITH_STRACE := true
@@ -55,11 +51,6 @@
define define-build-art-run-test
dmart_target := $(art_run_tests_dir)/art-run-tests/$(1)/touch
run_test_options = --build-only
- ifeq ($(ANDROID_COMPILE_WITH_JACK),true)
- run_test_options += --build-with-jack
- else
- run_test_options += --build-with-javac-dx
- endif
ifeq ($(ART_TEST_QUIET),true)
run_test_options += --quiet
endif
@@ -670,11 +661,6 @@
test_groups :=
uc_host_or_target :=
jack_classpath :=
- ifeq ($(ANDROID_COMPILE_WITH_JACK),true)
- run_test_options += --build-with-jack
- else
- run_test_options += --build-with-javac-dx
- endif
ifeq ($(ART_TEST_WITH_STRACE),true)
run_test_options += --strace
endif