Revert^2 "Stack maps: Interleave consecutive varints."
Reorder the layout of consecutive varints. Store all the 'headers'
which define the varint size first and then store any large values.
The size is unchanged, but it makes the reading from memory faster.
This speeds up CodeInfo by 10%, and maps startup by 0.1%.
Change in size is negligible (the bits mostly just move).
This reverts commit 1b2a49b7aba39ed6663a69dfdf63d0df069f0d42.
Test: test.py -b --host --64 --optimizing
Change-Id: Ica7b42180ef2bae637445c0ce44fd3833ec0ecfc
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 87702cc..87e15ba 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -307,12 +307,14 @@
ScopedArenaVector<uint8_t> buffer(allocator_->Adapter(kArenaAllocStackMapStream));
BitMemoryWriter<ScopedArenaVector<uint8_t>> out(&buffer);
- out.WriteVarint(flags);
- out.WriteVarint(packed_frame_size_);
- out.WriteVarint(core_spill_mask_);
- out.WriteVarint(fp_spill_mask_);
- out.WriteVarint(num_dex_registers_);
- out.WriteVarint(bit_table_flags);
+ out.WriteInterleavedVarints(std::array<uint32_t, CodeInfo::kNumHeaders>{
+ flags,
+ packed_frame_size_,
+ core_spill_mask_,
+ fp_spill_mask_,
+ num_dex_registers_,
+ bit_table_flags,
+ });
ForEachBitTable([&out](size_t, auto bit_table) {
if (bit_table->size() != 0) { // Skip empty bit-tables.
bit_table->Encode(out);