Remove the CodeOffset helper class.
I need to reduce the StackMapEntry to a POD type so that it
can be used in BitTableBuilder.
Test: test-art-host-gtest-stack_map_test
Change-Id: I5f9ad7fdc9c9405f22669a11aea14f925ef06ef7
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index de1be5b..b358bfa 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1161,8 +1161,8 @@
// last emitted is different than the native pc of the stack map just emitted.
size_t number_of_stack_maps = stack_map_stream->GetNumberOfStackMaps();
if (number_of_stack_maps > 1) {
- DCHECK_NE(stack_map_stream->GetStackMap(number_of_stack_maps - 1).native_pc_code_offset,
- stack_map_stream->GetStackMap(number_of_stack_maps - 2).native_pc_code_offset);
+ DCHECK_NE(stack_map_stream->GetStackMapNativePcOffset(number_of_stack_maps - 1),
+ stack_map_stream->GetStackMapNativePcOffset(number_of_stack_maps - 2));
}
}
}
@@ -1174,8 +1174,7 @@
if (count == 0) {
return false;
}
- CodeOffset native_pc_offset = stack_map_stream->GetStackMap(count - 1).native_pc_code_offset;
- return (native_pc_offset.Uint32Value(GetInstructionSet()) == pc);
+ return stack_map_stream->GetStackMapNativePcOffset(count - 1) == pc;
}
void CodeGenerator::MaybeRecordNativeDebugInfo(HInstruction* instruction,
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 7f3441f..8be84a1 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1042,8 +1042,7 @@
// Adjust native pc offsets in stack maps.
StackMapStream* stack_map_stream = GetStackMapStream();
for (size_t i = 0, num = stack_map_stream->GetNumberOfStackMaps(); i != num; ++i) {
- uint32_t old_position =
- stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(InstructionSet::kMips);
+ uint32_t old_position = stack_map_stream->GetStackMapNativePcOffset(i);
uint32_t new_position = __ GetAdjustedPosition(old_position);
DCHECK_GE(new_position, old_position);
stack_map_stream->SetStackMapNativePcOffset(i, new_position);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index ee32b96..cd9e0e5 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -988,8 +988,7 @@
// Adjust native pc offsets in stack maps.
StackMapStream* stack_map_stream = GetStackMapStream();
for (size_t i = 0, num = stack_map_stream->GetNumberOfStackMaps(); i != num; ++i) {
- uint32_t old_position =
- stack_map_stream->GetStackMap(i).native_pc_code_offset.Uint32Value(InstructionSet::kMips64);
+ uint32_t old_position = stack_map_stream->GetStackMapNativePcOffset(i);
uint32_t new_position = __ GetAdjustedPosition(old_position);
DCHECK_GE(new_position, old_position);
stack_map_stream->SetStackMapNativePcOffset(i, new_position);
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index b40ea37..5dc2acd 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -25,6 +25,14 @@
namespace art {
+uint32_t StackMapStream::GetStackMapNativePcOffset(size_t i) {
+ return StackMap::UnpackNativePc(stack_maps_[i].packed_native_pc, instruction_set_);
+}
+
+void StackMapStream::SetStackMapNativePcOffset(size_t i, uint32_t native_pc_offset) {
+ stack_maps_[i].packed_native_pc = StackMap::PackNativePc(native_pc_offset, instruction_set_);
+}
+
void StackMapStream::BeginStackMapEntry(uint32_t dex_pc,
uint32_t native_pc_offset,
uint32_t register_mask,
@@ -33,7 +41,7 @@
uint8_t inlining_depth) {
DCHECK_EQ(0u, current_entry_.dex_pc) << "EndStackMapEntry not called after BeginStackMapEntry";
current_entry_.dex_pc = dex_pc;
- current_entry_.native_pc_code_offset = CodeOffset::FromOffset(native_pc_offset, instruction_set_);
+ current_entry_.packed_native_pc = StackMap::PackNativePc(native_pc_offset, instruction_set_);
current_entry_.register_mask = register_mask;
current_entry_.sp_mask = sp_mask;
current_entry_.inlining_depth = inlining_depth;
@@ -278,7 +286,7 @@
for (const StackMapEntry& entry : stack_maps_) {
if (entry.dex_method_index != dex::kDexNoIndex) {
std::array<uint32_t, InvokeInfo::kCount> invoke_info_entry {
- entry.native_pc_code_offset.CompressedValue(),
+ entry.packed_native_pc,
entry.invoke_type,
entry.dex_method_index_idx
};
@@ -306,7 +314,7 @@
inline_info_builder.Add(inline_info_entry);
}
std::array<uint32_t, StackMap::kCount> stack_map_entry {
- entry.native_pc_code_offset.CompressedValue(),
+ entry.packed_native_pc,
entry.dex_pc,
dex_register_entries_[entry.dex_register_map_index].offset,
entry.inlining_depth != 0 ? inline_info_index : InlineInfo::kNoValue,
@@ -476,7 +484,7 @@
// Check main stack map fields.
DCHECK_EQ(stack_map.GetNativePcOffset(instruction_set_),
- entry.native_pc_code_offset.Uint32Value(instruction_set_));
+ StackMap::UnpackNativePc(entry.packed_native_pc, instruction_set_));
DCHECK_EQ(stack_map.GetDexPc(), entry.dex_pc);
DCHECK_EQ(stack_map.GetRegisterMaskIndex(), entry.register_mask_index);
DCHECK_EQ(code_info.GetRegisterMaskOf(stack_map), entry.register_mask);
@@ -493,7 +501,7 @@
if (entry.dex_method_index != dex::kDexNoIndex) {
InvokeInfo invoke_info = code_info.GetInvokeInfo(invoke_info_index);
DCHECK_EQ(invoke_info.GetNativePcOffset(instruction_set_),
- entry.native_pc_code_offset.Uint32Value(instruction_set_));
+ StackMap::UnpackNativePc(entry.packed_native_pc, instruction_set_));
DCHECK_EQ(invoke_info.GetInvokeType(), entry.invoke_type);
DCHECK_EQ(invoke_info.GetMethodIndexIdx(), entry.dex_method_index_idx);
invoke_info_index++;
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 19863d8..37a9bfc 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -103,7 +103,7 @@
// See runtime/stack_map.h to know what these fields contain.
struct StackMapEntry {
uint32_t dex_pc;
- CodeOffset native_pc_code_offset;
+ uint32_t packed_native_pc;
uint32_t register_mask;
BitVector* sp_mask;
uint32_t inlining_depth;
@@ -148,14 +148,8 @@
return stack_maps_.size();
}
- const StackMapEntry& GetStackMap(size_t i) const {
- return stack_maps_[i];
- }
-
- void SetStackMapNativePcOffset(size_t i, uint32_t native_pc_offset) {
- stack_maps_[i].native_pc_code_offset =
- CodeOffset::FromOffset(native_pc_offset, instruction_set_);
- }
+ uint32_t GetStackMapNativePcOffset(size_t i);
+ void SetStackMapNativePcOffset(size_t i, uint32_t native_pc_offset);
// Prepares the stream to fill in a memory region. Must be called before FillIn.
// Returns the size (in bytes) needed to store this stream.
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index c372bb9..45466d8 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -918,26 +918,31 @@
}
}
-TEST(StackMapTest, CodeOffsetTest) {
- // Test minimum alignments, and decoding.
- CodeOffset offset_thumb2 =
- CodeOffset::FromOffset(kThumb2InstructionAlignment, InstructionSet::kThumb2);
- CodeOffset offset_arm64 =
- CodeOffset::FromOffset(kArm64InstructionAlignment, InstructionSet::kArm64);
- CodeOffset offset_x86 =
- CodeOffset::FromOffset(kX86InstructionAlignment, InstructionSet::kX86);
- CodeOffset offset_x86_64 =
- CodeOffset::FromOffset(kX86_64InstructionAlignment, InstructionSet::kX86_64);
- CodeOffset offset_mips =
- CodeOffset::FromOffset(kMipsInstructionAlignment, InstructionSet::kMips);
- CodeOffset offset_mips64 =
- CodeOffset::FromOffset(kMips64InstructionAlignment, InstructionSet::kMips64);
- EXPECT_EQ(offset_thumb2.Uint32Value(InstructionSet::kThumb2), kThumb2InstructionAlignment);
- EXPECT_EQ(offset_arm64.Uint32Value(InstructionSet::kArm64), kArm64InstructionAlignment);
- EXPECT_EQ(offset_x86.Uint32Value(InstructionSet::kX86), kX86InstructionAlignment);
- EXPECT_EQ(offset_x86_64.Uint32Value(InstructionSet::kX86_64), kX86_64InstructionAlignment);
- EXPECT_EQ(offset_mips.Uint32Value(InstructionSet::kMips), kMipsInstructionAlignment);
- EXPECT_EQ(offset_mips64.Uint32Value(InstructionSet::kMips64), kMips64InstructionAlignment);
+TEST(StackMapTest, PackedNativePcTest) {
+ uint32_t packed_thumb2 =
+ StackMap::PackNativePc(kThumb2InstructionAlignment, InstructionSet::kThumb2);
+ uint32_t packed_arm64 =
+ StackMap::PackNativePc(kArm64InstructionAlignment, InstructionSet::kArm64);
+ uint32_t packed_x86 =
+ StackMap::PackNativePc(kX86InstructionAlignment, InstructionSet::kX86);
+ uint32_t packed_x86_64 =
+ StackMap::PackNativePc(kX86_64InstructionAlignment, InstructionSet::kX86_64);
+ uint32_t packed_mips =
+ StackMap::PackNativePc(kMipsInstructionAlignment, InstructionSet::kMips);
+ uint32_t packed_mips64 =
+ StackMap::PackNativePc(kMips64InstructionAlignment, InstructionSet::kMips64);
+ EXPECT_EQ(StackMap::UnpackNativePc(packed_thumb2, InstructionSet::kThumb2),
+ kThumb2InstructionAlignment);
+ EXPECT_EQ(StackMap::UnpackNativePc(packed_arm64, InstructionSet::kArm64),
+ kArm64InstructionAlignment);
+ EXPECT_EQ(StackMap::UnpackNativePc(packed_x86, InstructionSet::kX86),
+ kX86InstructionAlignment);
+ EXPECT_EQ(StackMap::UnpackNativePc(packed_x86_64, InstructionSet::kX86_64),
+ kX86_64InstructionAlignment);
+ EXPECT_EQ(StackMap::UnpackNativePc(packed_mips, InstructionSet::kMips),
+ kMipsInstructionAlignment);
+ EXPECT_EQ(StackMap::UnpackNativePc(packed_mips64, InstructionSet::kMips64),
+ kMips64InstructionAlignment);
}
TEST(StackMapTest, TestDeduplicateStackMask) {
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 6688cc1..1f197b8 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1699,7 +1699,7 @@
// Stack maps
stats_.AddBits(
Stats::kByteKindStackMapNativePc,
- stack_maps.NumColumnBits(StackMap::kNativePcOffset) * num_stack_maps);
+ stack_maps.NumColumnBits(StackMap::kPackedNativePc) * num_stack_maps);
stats_.AddBits(
Stats::kByteKindStackMapDexPc,
stack_maps.NumColumnBits(StackMap::kDexPc) * num_stack_maps);
diff --git a/runtime/arch/code_offset.h b/runtime/arch/code_offset.h
deleted file mode 100644
index f0c6d22..0000000
--- a/runtime/arch/code_offset.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_ARCH_CODE_OFFSET_H_
-#define ART_RUNTIME_ARCH_CODE_OFFSET_H_
-
-#include <iosfwd>
-
-#include <android-base/logging.h>
-
-#include "arch/instruction_set.h"
-#include "base/bit_utils.h"
-#include "base/macros.h"
-
-namespace art {
-
-// CodeOffset is a holder for compressed code offsets. Since some architectures have alignment
-// requirements it is possible to compress code offsets to reduce stack map sizes.
-class CodeOffset {
- public:
- ALWAYS_INLINE static CodeOffset FromOffset(uint32_t offset, InstructionSet isa = kRuntimeISA) {
- return CodeOffset(offset / GetInstructionSetInstructionAlignment(isa));
- }
-
- ALWAYS_INLINE static CodeOffset FromCompressedOffset(uint32_t offset) {
- return CodeOffset(offset);
- }
-
- ALWAYS_INLINE uint32_t Uint32Value(InstructionSet isa = kRuntimeISA) const {
- uint32_t decoded = value_ * GetInstructionSetInstructionAlignment(isa);
- DCHECK_GE(decoded, value_) << "Integer overflow";
- return decoded;
- }
-
- // Return compressed internal value.
- ALWAYS_INLINE uint32_t CompressedValue() const {
- return value_;
- }
-
- ALWAYS_INLINE CodeOffset() = default;
- ALWAYS_INLINE CodeOffset(const CodeOffset&) = default;
- ALWAYS_INLINE CodeOffset& operator=(const CodeOffset&) = default;
- ALWAYS_INLINE CodeOffset& operator=(CodeOffset&&) = default;
-
- private:
- ALWAYS_INLINE explicit CodeOffset(uint32_t value) : value_(value) {}
-
- uint32_t value_ = 0u;
-};
-
-inline bool operator==(const CodeOffset& a, const CodeOffset& b) {
- return a.CompressedValue() == b.CompressedValue();
-}
-
-inline bool operator!=(const CodeOffset& a, const CodeOffset& b) {
- return !(a == b);
-}
-
-inline bool operator<(const CodeOffset& a, const CodeOffset& b) {
- return a.CompressedValue() < b.CompressedValue();
-}
-
-inline bool operator<=(const CodeOffset& a, const CodeOffset& b) {
- return a.CompressedValue() <= b.CompressedValue();
-}
-
-inline bool operator>(const CodeOffset& a, const CodeOffset& b) {
- return a.CompressedValue() > b.CompressedValue();
-}
-
-inline bool operator>=(const CodeOffset& a, const CodeOffset& b) {
- return a.CompressedValue() >= b.CompressedValue();
-}
-
-inline std::ostream& operator<<(std::ostream& os, const CodeOffset& offset) {
- return os << offset.Uint32Value();
-}
-
-} // namespace art
-
-#endif // ART_RUNTIME_ARCH_CODE_OFFSET_H_
diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc
index fd0e28d..4ed60bf 100644
--- a/runtime/stack_map.cc
+++ b/runtime/stack_map.cc
@@ -90,7 +90,7 @@
VariableIndentationOutputStream* vios) {
vios->Stream()
<< "StackMapEncoding"
- << " (NativePcOffsetBits=" << table.NumColumnBits(kNativePcOffset)
+ << " (PackedNativePcBits=" << table.NumColumnBits(kPackedNativePc)
<< ", DexPcBits=" << table.NumColumnBits(kDexPc)
<< ", DexRegisterMapOffsetBits=" << table.NumColumnBits(kDexRegisterMapOffset)
<< ", InlineInfoIndexBits=" << table.NumColumnBits(kInlineInfoIndex)
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 1cb9a39..363884a 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -19,7 +19,6 @@
#include <limits>
-#include "arch/code_offset.h"
#include "base/bit_memory_region.h"
#include "base/bit_table.h"
#include "base/bit_utils.h"
@@ -658,7 +657,7 @@
class StackMap : public BitTable<6>::Accessor {
public:
enum Field {
- kNativePcOffset,
+ kPackedNativePc,
kDexPc,
kDexRegisterMapOffset,
kInlineInfoIndex,
@@ -672,8 +671,7 @@
: BitTable<kCount>::Accessor(table, row) {}
ALWAYS_INLINE uint32_t GetNativePcOffset(InstructionSet instruction_set) const {
- CodeOffset offset(CodeOffset::FromCompressedOffset(Get<kNativePcOffset>()));
- return offset.Uint32Value(instruction_set);
+ return UnpackNativePc(Get<kPackedNativePc>(), instruction_set);
}
uint32_t GetDexPc() const { return Get<kDexPc>(); }
@@ -688,6 +686,17 @@
uint32_t GetStackMaskIndex() const { return Get<kStackMaskIndex>(); }
+ static uint32_t PackNativePc(uint32_t native_pc, InstructionSet isa) {
+ // TODO: DCHECK_ALIGNED_PARAM(native_pc, GetInstructionSetInstructionAlignment(isa));
+ return native_pc / GetInstructionSetInstructionAlignment(isa);
+ }
+
+ static uint32_t UnpackNativePc(uint32_t packed_native_pc, InstructionSet isa) {
+ uint32_t native_pc = packed_native_pc * GetInstructionSetInstructionAlignment(isa);
+ DCHECK_EQ(native_pc / GetInstructionSetInstructionAlignment(isa), packed_native_pc);
+ return native_pc;
+ }
+
static void DumpEncoding(const BitTable<6>& table, VariableIndentationOutputStream* vios);
void Dump(VariableIndentationOutputStream* vios,
const CodeInfo& code_info,
@@ -776,7 +785,7 @@
class InvokeInfo : public BitTable<3>::Accessor {
public:
enum Field {
- kNativePcOffset,
+ kPackedNativePc,
kInvokeType,
kMethodIndexIdx,
kCount,
@@ -786,8 +795,7 @@
: BitTable<kCount>::Accessor(table, row) {}
ALWAYS_INLINE uint32_t GetNativePcOffset(InstructionSet instruction_set) const {
- CodeOffset offset(CodeOffset::FromCompressedOffset(Get<kNativePcOffset>()));
- return offset.Uint32Value(instruction_set);
+ return StackMap::UnpackNativePc(Get<kPackedNativePc>(), instruction_set);
}
uint32_t GetInvokeType() const { return Get<kInvokeType>(); }