Remove unnecessary indirection from MemMap.
Avoid plain MemMap pointers being passed around by changing
the MemMap to moveable and return MemMap objects by value.
Previously we could have a valid zero-size MemMap but this
is now forbidden.
MemMap::RemapAtEnd() is changed to avoid the explicit call
to munmap(); mmap() with MAP_FIXED automatically removes
old mappings for overlapping regions.
Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Test: Pixel 2 XL boots.
Test: m test-art-target-gtest
Test: testrunner.py --target --optimizing
Change-Id: I12bd453c26a396edc20eb141bfd4dad20923f170
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 30213d5..0562167 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -31,13 +31,12 @@
#include "base/allocator.h"
#include "base/bit_utils.h"
#include "base/globals.h"
+#include "base/mem_map.h"
#include "base/mutex.h"
#include "thread.h"
namespace art {
-class MemMap;
-
namespace gc {
namespace allocator {
@@ -746,7 +745,7 @@
volatile uint8_t* page_map_; // No GUARDED_BY(lock_) for kReadPageMapEntryWithoutLockInBulkFree.
size_t page_map_size_;
size_t max_page_map_size_;
- std::unique_ptr<MemMap> page_map_mem_map_;
+ MemMap page_map_mem_map_;
// The table that indicates the size of free page runs. These sizes
// are stored here to avoid storing in the free page header and