X86_64: Add allocation entrypoint switching for CC is_marking
Only X86_64 done so far. Use normal TLAB allocators if GC is not
marking.
Allocation speed goes up by ~8% based on perf sampling.
Without change:
1.19%: art_quick_alloc_object_region_tlab
With change:
0.63%: art_quick_alloc_object_tlab
0.47%: art_quick_alloc_object_region_tlab
Bug: 31018974
Bug: 12687968
Test: test-art-host-run-test
Change-Id: I4c4d9eb229d4ad2f41b856ba5c2958a5eb3b7ffa
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index ddc3852..3e1cbeb 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1815,7 +1815,7 @@
break;
}
// Try to transition the heap if the allocation failure was due to the space being full.
- if (!IsOutOfMemoryOnAllocation<false>(allocator, alloc_size)) {
+ if (!IsOutOfMemoryOnAllocation(allocator, alloc_size, /*grow*/ false)) {
// If we aren't out of memory then the OOM was probably from the non moving space being
// full. Attempt to disable compaction and turn the main space into a non moving space.
DisableMovingGc();
@@ -4221,5 +4221,72 @@
gc_pause_listener_.StoreRelaxed(nullptr);
}
+mirror::Object* Heap::AllocWithNewTLAB(Thread* self,
+ size_t alloc_size,
+ bool grow,
+ size_t* bytes_allocated,
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
+ const AllocatorType allocator_type = GetCurrentAllocator();
+ if (allocator_type == kAllocatorTypeTLAB) {
+ DCHECK(bump_pointer_space_ != nullptr);
+ const size_t new_tlab_size = alloc_size + kDefaultTLABSize;
+ if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, new_tlab_size, grow))) {
+ return nullptr;
+ }
+ // Try allocating a new thread local buffer, if the allocation fails the space must be
+ // full so return null.
+ if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) {
+ return nullptr;
+ }
+ *bytes_tl_bulk_allocated = new_tlab_size;
+ } else {
+ DCHECK(allocator_type == kAllocatorTypeRegionTLAB);
+ DCHECK(region_space_ != nullptr);
+ if (space::RegionSpace::kRegionSize >= alloc_size) {
+ // Non-large. Check OOME for a tlab.
+ if (LIKELY(!IsOutOfMemoryOnAllocation(allocator_type,
+ space::RegionSpace::kRegionSize,
+ grow))) {
+ // Try to allocate a tlab.
+ if (!region_space_->AllocNewTlab(self)) {
+ // Failed to allocate a tlab. Try non-tlab.
+ return region_space_->AllocNonvirtual<false>(alloc_size,
+ bytes_allocated,
+ usable_size,
+ bytes_tl_bulk_allocated);
+ }
+ *bytes_tl_bulk_allocated = space::RegionSpace::kRegionSize;
+ // Fall-through to using the TLAB below.
+ } else {
+ // Check OOME for a non-tlab allocation.
+ if (!IsOutOfMemoryOnAllocation(allocator_type, alloc_size, grow)) {
+ return region_space_->AllocNonvirtual<false>(alloc_size,
+ bytes_allocated,
+ usable_size,
+ bytes_tl_bulk_allocated);
+ }
+ // Neither tlab or non-tlab works. Give up.
+ return nullptr;
+ }
+ } else {
+ // Large. Check OOME.
+ if (LIKELY(!IsOutOfMemoryOnAllocation(allocator_type, alloc_size, grow))) {
+ return region_space_->AllocNonvirtual<false>(alloc_size,
+ bytes_allocated,
+ usable_size,
+ bytes_tl_bulk_allocated);
+ }
+ return nullptr;
+ }
+ }
+ // Refilled TLAB, return.
+ mirror::Object* ret = self->AllocTlab(alloc_size);
+ DCHECK(ret != nullptr);
+ *bytes_allocated = alloc_size;
+ *usable_size = alloc_size;
+ return ret;
+}
+
} // namespace gc
} // namespace art