Use ScopedArenaAllocator for code generation.

Reuse the memory previously allocated on the ArenaStack by
optimization passes.

This CL handles only the architecture-independent codegen
and slow paths, architecture-dependent codegen allocations
shall be moved to the ScopedArenaAllocator in a follow-up.

Memory needed to compile the two most expensive methods for
aosp_angler-userdebug boot image:
  BatteryStats.dumpCheckinLocked() : 19.6MiB -> 18.5MiB (-1189KiB)
  BatteryStats.dumpLocked(): 39.3MiB -> 37.0MiB (-2379KiB)

Also move definitions of functions that use bit_vector-inl.h
from bit_vector.h also to bit_vector-inl.h .

Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Bug: 64312607
Change-Id: I84688c3a5a95bf90f56bd3a150bc31fedc95f29c
diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc
index 7eb2188..9bc8045 100644
--- a/compiler/optimizing/stack_map_stream.cc
+++ b/compiler/optimizing/stack_map_stream.cc
@@ -43,9 +43,12 @@
   current_entry_.dex_method_index = dex::kDexNoIndex;
   current_entry_.dex_register_entry.num_dex_registers = num_dex_registers;
   current_entry_.dex_register_entry.locations_start_index = dex_register_locations_.size();
-  current_entry_.dex_register_entry.live_dex_registers_mask = (num_dex_registers != 0)
-      ? ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream)
-      : nullptr;
+  current_entry_.dex_register_entry.live_dex_registers_mask = nullptr;
+  if (num_dex_registers != 0u) {
+    current_entry_.dex_register_entry.live_dex_registers_mask =
+        ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream);
+    current_entry_.dex_register_entry.live_dex_registers_mask->ClearAllBits();
+  }
   if (sp_mask != nullptr) {
     stack_mask_max_ = std::max(stack_mask_max_, sp_mask->GetHighestBitSet());
   }
@@ -121,9 +124,12 @@
   current_inline_info_.dex_pc = dex_pc;
   current_inline_info_.dex_register_entry.num_dex_registers = num_dex_registers;
   current_inline_info_.dex_register_entry.locations_start_index = dex_register_locations_.size();
-  current_inline_info_.dex_register_entry.live_dex_registers_mask = (num_dex_registers != 0)
-      ? ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream)
-      : nullptr;
+  current_inline_info_.dex_register_entry.live_dex_registers_mask = nullptr;
+  if (num_dex_registers != 0) {
+    current_inline_info_.dex_register_entry.live_dex_registers_mask =
+        ArenaBitVector::Create(allocator_, num_dex_registers, true, kArenaAllocStackMapStream);
+    current_inline_info_.dex_register_entry.live_dex_registers_mask->ClearAllBits();
+  }
   current_dex_register_ = 0;
 }
 
@@ -468,7 +474,7 @@
   if (entries_it == dex_map_hash_to_stack_map_indices_.end()) {
     // We don't have a perfect hash functions so we need a list to collect all stack maps
     // which might have the same dex register map.
-    ArenaVector<uint32_t> stack_map_indices(allocator_->Adapter(kArenaAllocStackMapStream));
+    ScopedArenaVector<uint32_t> stack_map_indices(allocator_->Adapter(kArenaAllocStackMapStream));
     stack_map_indices.push_back(current_entry_index);
     dex_map_hash_to_stack_map_indices_.Put(entry.hash, std::move(stack_map_indices));
   } else {
@@ -546,7 +552,7 @@
 
 size_t StackMapStream::PrepareRegisterMasks() {
   register_masks_.resize(stack_maps_.size(), 0u);
-  ArenaUnorderedMap<uint32_t, size_t> dedupe(allocator_->Adapter(kArenaAllocStackMapStream));
+  ScopedArenaUnorderedMap<uint32_t, size_t> dedupe(allocator_->Adapter(kArenaAllocStackMapStream));
   for (StackMapEntry& stack_map : stack_maps_) {
     const size_t index = dedupe.size();
     stack_map.register_mask_index = dedupe.emplace(stack_map.register_mask, index).first->second;
@@ -558,7 +564,7 @@
 void StackMapStream::PrepareMethodIndices() {
   CHECK(method_indices_.empty());
   method_indices_.resize(stack_maps_.size() + inline_infos_.size());
-  ArenaUnorderedMap<uint32_t, size_t> dedupe(allocator_->Adapter(kArenaAllocStackMapStream));
+  ScopedArenaUnorderedMap<uint32_t, size_t> dedupe(allocator_->Adapter(kArenaAllocStackMapStream));
   for (StackMapEntry& stack_map : stack_maps_) {
     const size_t index = dedupe.size();
     const uint32_t method_index = stack_map.dex_method_index;
@@ -584,11 +590,11 @@
   stack_masks_.resize(byte_entry_size * stack_maps_.size(), 0u);
   // For deduplicating we store the stack masks as byte packed for simplicity. We can bit pack later
   // when copying out from stack_masks_.
-  ArenaUnorderedMap<MemoryRegion,
-                    size_t,
-                    FNVHash<MemoryRegion>,
-                    MemoryRegion::ContentEquals> dedup(
-                        stack_maps_.size(), allocator_->Adapter(kArenaAllocStackMapStream));
+  ScopedArenaUnorderedMap<MemoryRegion,
+                          size_t,
+                          FNVHash<MemoryRegion>,
+                          MemoryRegion::ContentEquals> dedup(
+                              stack_maps_.size(), allocator_->Adapter(kArenaAllocStackMapStream));
   for (StackMapEntry& stack_map : stack_maps_) {
     size_t index = dedup.size();
     MemoryRegion stack_mask(stack_masks_.data() + index * byte_entry_size, byte_entry_size);