Reduce memory lost by ArenaAllocator for large allocations.
When allocating from a new arena, check if the old arena has
more remaining space than the new one after the current
allocation. If so, keep using the old arena to reduce the
amount of "lost" arena memory. This can happen when we try
to allocate more than half the default arena size. If the
allocation exceeds the default arena size, it's very likely
to happen even though the ArenaPool could still provide some
much larger previously allocated arena.
Also avoid artithmetic overflow when checking if the
request can be satisfied from the current arena.
And abort immediately if calloc() fails.
Bug: 28173563
Bug: 28256882
Change-Id: I1b4bda5d3f32ecd95fbd11addd1f0ca6dcc33e45
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index d951089..5b6801f 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -162,6 +162,7 @@
MallocArena::MallocArena(size_t size) {
memory_ = reinterpret_cast<uint8_t*>(calloc(1, size));
+ CHECK(memory_ != nullptr); // Abort on OOM.
size_ = size;
}
@@ -319,13 +320,23 @@
// mark only the actually allocated memory as defined. That leaves red zones
// and padding between allocations marked as inaccessible.
size_t rounded_bytes = RoundUp(bytes + kMemoryToolRedZoneBytes, 8);
- if (UNLIKELY(ptr_ + rounded_bytes > end_)) {
- // Obtain a new block.
- ObtainNewArenaForAllocation(rounded_bytes);
- CHECK(ptr_ != nullptr);
- MEMORY_TOOL_MAKE_NOACCESS(ptr_, end_ - ptr_);
- }
ArenaAllocatorStats::RecordAlloc(rounded_bytes, kind);
+ if (UNLIKELY(rounded_bytes > static_cast<size_t>(end_ - ptr_))) {
+ void* ret = AllocFromNewArena(rounded_bytes);
+ if (ret == arena_head_->Begin()) {
+ DCHECK(ptr_ - rounded_bytes == ret);
+ uint8_t* noaccess_begin = ptr_ - rounded_bytes + bytes;
+ MEMORY_TOOL_MAKE_NOACCESS(noaccess_begin, end_ - noaccess_begin);
+ } else {
+ // We're still using the old arena but `ret` comes from a new one just after it.
+ DCHECK(arena_head_->next_ != nullptr);
+ DCHECK(ret == arena_head_->next_->Begin());
+ DCHECK_EQ(rounded_bytes, arena_head_->next_->GetBytesAllocated());
+ uint8_t* noaccess_begin = arena_head_->next_->Begin() + bytes;
+ MEMORY_TOOL_MAKE_NOACCESS(noaccess_begin, arena_head_->next_->End() - noaccess_begin);
+ }
+ return ret;
+ }
uint8_t* ret = ptr_;
ptr_ += rounded_bytes;
MEMORY_TOOL_MAKE_DEFINED(ret, bytes);
@@ -340,14 +351,27 @@
pool_->FreeArenaChain(arena_head_);
}
-void ArenaAllocator::ObtainNewArenaForAllocation(size_t allocation_size) {
- UpdateBytesAllocated();
- Arena* new_arena = pool_->AllocArena(std::max(Arena::kDefaultSize, allocation_size));
- new_arena->next_ = arena_head_;
- arena_head_ = new_arena;
- // Update our internal data structures.
- ptr_ = begin_ = new_arena->Begin();
- end_ = new_arena->End();
+void* ArenaAllocator::AllocFromNewArena(size_t bytes) {
+ Arena* new_arena = pool_->AllocArena(std::max(Arena::kDefaultSize, bytes));
+ DCHECK(new_arena != nullptr);
+ DCHECK_LE(bytes, new_arena->Size());
+ if (static_cast<size_t>(end_ - ptr_) > new_arena->Size() - bytes) {
+ // The old arena has more space remaining than the new one, so keep using it.
+ // This can happen when the requested size is over half of the default size.
+ DCHECK(arena_head_ != nullptr);
+ new_arena->bytes_allocated_ = bytes; // UpdateBytesAllocated() on the new_arena.
+ new_arena->next_ = arena_head_->next_;
+ arena_head_->next_ = new_arena;
+ } else {
+ UpdateBytesAllocated();
+ new_arena->next_ = arena_head_;
+ arena_head_ = new_arena;
+ // Update our internal data structures.
+ begin_ = new_arena->Begin();
+ ptr_ = begin_ + bytes;
+ end_ = new_arena->End();
+ }
+ return new_arena->Begin();
}
bool ArenaAllocator::Contains(const void* ptr) const {