Add way to select arena type at runtime
We now use MemMap for JIT, and malloc for everything else. This
should help fix the allegedly regressed compile times.
Change-Id: I6a6552738933f9d7ee3bd23f45e310818b19b70d
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index 70d138d..e37aca1 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -26,10 +26,6 @@
namespace art {
-// Memmap is a bit slower than malloc to allocate, but this is mitigated by the arena pool which
-// only allocates few arenas and recycles them afterwards.
-static constexpr bool kUseMemMap = true;
-static constexpr bool kUseMemSet = true && kUseMemMap;
static constexpr size_t kValgrindRedZoneBytes = 8;
constexpr size_t Arena::kDefaultSize;
@@ -124,33 +120,30 @@
// Explicitly instantiate the used implementation.
template class ArenaAllocatorStatsImpl<kArenaAllocatorCountAllocations>;
-Arena::Arena(size_t size)
- : bytes_allocated_(0),
- map_(nullptr),
- next_(nullptr) {
- if (kUseMemMap) {
- std::string error_msg;
- map_ = MemMap::MapAnonymous("dalvik-LinearAlloc", nullptr, size, PROT_READ | PROT_WRITE, false,
- false, &error_msg);
- CHECK(map_ != nullptr) << error_msg;
- memory_ = map_->Begin();
- size_ = map_->Size();
- } else {
- memory_ = reinterpret_cast<uint8_t*>(calloc(1, size));
- size_ = size;
- }
+Arena::Arena() : bytes_allocated_(0), next_(nullptr) {
}
-Arena::~Arena() {
- if (kUseMemMap) {
- delete map_;
- } else {
- free(reinterpret_cast<void*>(memory_));
- }
+MallocArena::MallocArena(size_t size) {
+ memory_ = reinterpret_cast<uint8_t*>(calloc(1, size));
+ size_ = size;
}
-void Arena::Release() {
- if (kUseMemMap && bytes_allocated_ > 0) {
+MallocArena::~MallocArena() {
+ free(reinterpret_cast<void*>(memory_));
+}
+
+MemMapArena::MemMapArena(size_t size) {
+ std::string error_msg;
+ map_.reset(
+ MemMap::MapAnonymous("dalvik-LinearAlloc", nullptr, size, PROT_READ | PROT_WRITE, false,
+ false, &error_msg));
+ CHECK(map_.get() != nullptr) << error_msg;
+ memory_ = map_->Begin();
+ size_ = map_->Size();
+}
+
+void MemMapArena::Release() {
+ if (bytes_allocated_ > 0) {
map_->MadviseDontNeedAndZero();
bytes_allocated_ = 0;
}
@@ -158,19 +151,14 @@
void Arena::Reset() {
if (bytes_allocated_ > 0) {
- if (kUseMemSet || !kUseMemMap) {
- memset(Begin(), 0, bytes_allocated_);
- } else {
- map_->MadviseDontNeedAndZero();
- }
+ memset(Begin(), 0, bytes_allocated_);
bytes_allocated_ = 0;
}
}
-ArenaPool::ArenaPool()
- : lock_("Arena pool lock"),
- free_arenas_(nullptr) {
- if (kUseMemMap) {
+ArenaPool::ArenaPool(bool use_malloc)
+ : use_malloc_(use_malloc), lock_("Arena pool lock"), free_arenas_(nullptr) {
+ if (!use_malloc) {
MemMap::Init();
}
}
@@ -194,16 +182,19 @@
}
}
if (ret == nullptr) {
- ret = new Arena(size);
+ ret = use_malloc_ ? static_cast<Arena*>(new MallocArena(size)) : new MemMapArena(size);
}
ret->Reset();
return ret;
}
void ArenaPool::TrimMaps() {
- MutexLock lock(Thread::Current(), lock_);
- for (auto* arena = free_arenas_; arena != nullptr; arena = arena->next_) {
- arena->Release();
+ if (!use_malloc_) {
+ // Doesn't work for malloc.
+ MutexLock lock(Thread::Current(), lock_);
+ for (auto* arena = free_arenas_; arena != nullptr; arena = arena->next_) {
+ arena->Release();
+ }
}
}
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 04ca3ea..cc7b856 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -116,12 +116,12 @@
class Arena {
public:
static constexpr size_t kDefaultSize = 128 * KB;
- explicit Arena(size_t size = kDefaultSize);
- ~Arena();
+ Arena();
+ virtual ~Arena() { }
// Reset is for pre-use and uses memset for performance.
void Reset();
// Release is used inbetween uses and uses madvise for memory usage.
- void Release();
+ virtual void Release() { }
uint8_t* Begin() {
return memory_;
}
@@ -142,32 +142,50 @@
return bytes_allocated_;
}
- private:
+ protected:
size_t bytes_allocated_;
uint8_t* memory_;
size_t size_;
- MemMap* map_;
Arena* next_;
friend class ArenaPool;
friend class ArenaAllocator;
friend class ArenaStack;
friend class ScopedArenaAllocator;
template <bool kCount> friend class ArenaAllocatorStatsImpl;
+
+ private:
DISALLOW_COPY_AND_ASSIGN(Arena);
};
+class MallocArena FINAL : public Arena {
+ public:
+ explicit MallocArena(size_t size = Arena::kDefaultSize);
+ virtual ~MallocArena();
+};
+
+class MemMapArena FINAL : public Arena {
+ public:
+ explicit MemMapArena(size_t size = Arena::kDefaultSize);
+ virtual ~MemMapArena() { }
+ void Release() OVERRIDE;
+
+ private:
+ std::unique_ptr<MemMap> map_;
+};
+
class ArenaPool {
public:
- ArenaPool();
+ explicit ArenaPool(bool use_malloc = true);
~ArenaPool();
Arena* AllocArena(size_t size) LOCKS_EXCLUDED(lock_);
void FreeArenaChain(Arena* first) LOCKS_EXCLUDED(lock_);
size_t GetBytesAllocated() const LOCKS_EXCLUDED(lock_);
- // Trim the maps in arenas by madvising, used by JIT to reduce memory usage. This only works if
- // kUseMemMap is true.
+ // Trim the maps in arenas by madvising, used by JIT to reduce memory usage. This only works
+ // use_malloc is false.
void TrimMaps() LOCKS_EXCLUDED(lock_);
private:
+ const bool use_malloc_;
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Arena* free_arenas_ GUARDED_BY(lock_);
DISALLOW_COPY_AND_ASSIGN(ArenaPool);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 61798c3..0728646 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -788,7 +788,6 @@
max_spins_before_thin_lock_inflation_ =
runtime_options.GetOrDefault(Opt::MaxSpinsBeforeThinLockInflation);
- arena_pool_.reset(new ArenaPool);
monitor_list_ = new MonitorList;
monitor_pool_ = MonitorPool::Create();
thread_list_ = new ThreadList;
@@ -856,6 +855,11 @@
CreateJit();
}
+ // Use MemMap arena pool for jit, malloc otherwise. Malloc arenas are faster to allocate but
+ // can't be trimmed as easily.
+ const bool use_malloc = jit_options_.get() == nullptr;
+ arena_pool_.reset(new ArenaPool(use_malloc));
+
BlockSignals();
InitPlatformSignalHandlers();