Improve the allocation speed.
- Improves the Ritz MemAllocTest benchmark result by ~500 ms (or ~5%) on Nexus 4.
- Move the memset() call that zeroes the allocated memory out of the lock region.
- De-virtualize/Inline the allocation call chains into Heap::AllocObject().
- Turn Heap::measure_allocation_time_ into a static const variable.
- Surround the VerifyObject() call with kIsDebugBuild.
Bug: 9986565
Change-Id: Ib70b6d051a80ec329788b30256565561f031da2a
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index d7db561..832c6fa 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -55,7 +55,7 @@
return new LargeObjectMapSpace(name);
}
-mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes) {
+mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", NULL, num_bytes,
PROT_READ | PROT_WRITE);
if (mem_map == NULL) {
@@ -66,6 +66,9 @@
large_objects_.push_back(obj);
mem_maps_.Put(obj, mem_map);
size_t allocation_size = mem_map->Size();
+ if (bytes_allocated != NULL) {
+ *bytes_allocated = allocation_size;
+ }
num_bytes_allocated_ += allocation_size;
total_bytes_allocated_ += allocation_size;
++num_objects_allocated_;
@@ -239,7 +242,7 @@
return chunk->GetSize();
}
-mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes) {
+mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
MutexLock mu(self, lock_);
num_bytes = RoundUp(num_bytes, kAlignment);
Chunk temp;
@@ -262,6 +265,9 @@
AddFreeChunk(AddrFromChunk(new_chunk), chunk_size - num_bytes, chunk);
}
+ if (bytes_allocated != NULL) {
+ *bytes_allocated = num_bytes;
+ }
num_objects_allocated_++;
total_objects_allocated_++;
num_bytes_allocated_ += num_bytes;