Optimize register mask and stack mask in stack maps.
Use BitTable to store the masks as well and move the
deduplication responsibility to the BitTable builders.
Don't generate entries for masks which are all zeros.
This saves 0.2% of .oat file size on both Arm64 and Arm.
Encode registers as (value+shift) due to tailing zeros.
This saves 1.0% of .oat file size on Arm64 and 0.2% on Arm.
Test: test-art-host-gtest
Change-Id: I636b7edd49e10e8afc9f2aa385b5980f7ee0e1f1
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 9db7588..c372bb9 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -32,10 +32,10 @@
const StackMap& stack_map,
const BitVector& bit_vector) {
BitMemoryRegion stack_mask = code_info.GetStackMaskOf(stack_map);
- if (bit_vector.GetNumberOfBits() > code_info.GetNumberOfStackMaskBits()) {
+ if (bit_vector.GetNumberOfBits() > stack_mask.size_in_bits()) {
return false;
}
- for (size_t i = 0; i < code_info.GetNumberOfStackMaskBits(); ++i) {
+ for (size_t i = 0; i < stack_mask.size_in_bits(); ++i) {
if (stack_mask.LoadBit(i) != bit_vector.IsBitSet(i)) {
return false;
}