Remove unnecessary indirection from MemMap.
Avoid plain MemMap pointers being passed around by changing
the MemMap to moveable and return MemMap objects by value.
Previously we could have a valid zero-size MemMap but this
is now forbidden.
MemMap::RemapAtEnd() is changed to avoid the explicit call
to munmap(); mmap() with MAP_FIXED automatically removes
old mappings for overlapping regions.
Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Test: Pixel 2 XL boots.
Test: m test-art-target-gtest
Test: testrunner.py --target --optimizing
Change-Id: I12bd453c26a396edc20eb141bfd4dad20923f170
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index a4095d8..1639a82 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -91,11 +91,15 @@
size_t num_of_pages = footprint_ / kPageSize;
size_t max_num_of_pages = max_capacity_ / kPageSize;
std::string error_msg;
- page_map_mem_map_.reset(MemMap::MapAnonymous("rosalloc page map", nullptr,
- RoundUp(max_num_of_pages, kPageSize),
- PROT_READ | PROT_WRITE, false, false, &error_msg));
- CHECK(page_map_mem_map_.get() != nullptr) << "Couldn't allocate the page map : " << error_msg;
- page_map_ = page_map_mem_map_->Begin();
+ page_map_mem_map_ = MemMap::MapAnonymous("rosalloc page map",
+ /* addr */ nullptr,
+ RoundUp(max_num_of_pages, kPageSize),
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_msg);
+ CHECK(page_map_mem_map_.IsValid()) << "Couldn't allocate the page map : " << error_msg;
+ page_map_ = page_map_mem_map_.Begin();
page_map_size_ = num_of_pages;
max_page_map_size_ = max_num_of_pages;
free_page_run_size_map_.resize(num_of_pages);
@@ -1364,8 +1368,8 @@
// Zero out the tail of the page map.
uint8_t* zero_begin = const_cast<uint8_t*>(page_map_) + new_num_of_pages;
uint8_t* madvise_begin = AlignUp(zero_begin, kPageSize);
- DCHECK_LE(madvise_begin, page_map_mem_map_->End());
- size_t madvise_size = page_map_mem_map_->End() - madvise_begin;
+ DCHECK_LE(madvise_begin, page_map_mem_map_.End());
+ size_t madvise_size = page_map_mem_map_.End() - madvise_begin;
if (madvise_size > 0) {
DCHECK_ALIGNED(madvise_begin, kPageSize);
DCHECK_EQ(RoundUp(madvise_size, kPageSize), madvise_size);
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 30213d5..0562167 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -31,13 +31,12 @@
#include "base/allocator.h"
#include "base/bit_utils.h"
#include "base/globals.h"
+#include "base/mem_map.h"
#include "base/mutex.h"
#include "thread.h"
namespace art {
-class MemMap;
-
namespace gc {
namespace allocator {
@@ -746,7 +745,7 @@
volatile uint8_t* page_map_; // No GUARDED_BY(lock_) for kReadPageMapEntryWithoutLockInBulkFree.
size_t page_map_size_;
size_t max_page_map_size_;
- std::unique_ptr<MemMap> page_map_mem_map_;
+ MemMap page_map_mem_map_;
// The table that indicates the size of free page runs. These sizes
// are stored here to avoid storing in the free page header and