blob: af9000b27a4cb9b7118a2aaa6dedc67ae96d6257 [file] [log] [blame]
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_HEAP_INL_H_
18#define ART_RUNTIME_GC_HEAP_INL_H_
19
20#include "heap.h"
21
Andreas Gampe27fa96c2016-10-07 15:05:24 -070022#include "allocation_listener.h"
David Sehrc431b9d2018-03-02 12:01:51 -080023#include "base/quasi_atomic.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010024#include "base/time_utils.h"
David Sehrc431b9d2018-03-02 12:01:51 -080025#include "base/utils.h"
Andreas Gamped4901292017-05-30 18:41:34 -070026#include "gc/accounting/atomic_stack.h"
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -080027#include "gc/accounting/card_table-inl.h"
Man Cao8c2ff642015-05-27 17:25:30 -070028#include "gc/allocation_record.h"
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -080029#include "gc/collector/semi_space.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070030#include "gc/space/bump_pointer_space-inl.h"
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070031#include "gc/space/dlmalloc_space-inl.h"
32#include "gc/space/large_object_space.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080033#include "gc/space/region_space-inl.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070034#include "gc/space/rosalloc_space-inl.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070035#include "handle_scope-inl.h"
Mathieu Chartiera058fdf2016-10-06 15:13:58 -070036#include "obj_ptr-inl.h"
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070037#include "runtime.h"
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070038#include "thread-inl.h"
Andreas Gampe90b936d2017-01-31 08:58:55 -080039#include "verify_object.h"
Mathieu Chartier88ea61e2018-06-20 17:45:41 -070040#include "write_barrier-inl.h"
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070041
42namespace art {
43namespace gc {
44
Mathieu Chartier692fafd2013-11-29 17:24:40 -080045template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
Mathieu Chartiera4f6af92015-08-11 17:35:25 -070046inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
Mathieu Chartier9d156d52016-10-06 17:44:26 -070047 ObjPtr<mirror::Class> klass,
Mathieu Chartiera4f6af92015-08-11 17:35:25 -070048 size_t byte_count,
49 AllocatorType allocator,
Mathieu Chartier1febddf2013-11-20 12:33:14 -080050 const PreFenceVisitor& pre_fence_visitor) {
Mathieu Chartierc645f1d2014-03-06 18:11:53 -080051 if (kIsDebugBuild) {
52 CheckPreconditionsForAllocObject(klass, byte_count);
Mathieu Chartier14cc9be2014-07-11 10:26:37 -070053 // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
54 // done in the runnable state where suspension is expected.
55 CHECK_EQ(self->GetState(), kRunnable);
56 self->AssertThreadSuspensionIsAllowable();
Mathieu Chartier8502f722016-06-08 15:09:08 -070057 self->AssertNoPendingException();
Mathieu Chartier9d156d52016-10-06 17:44:26 -070058 // Make sure to preserve klass.
59 StackHandleScope<1> hs(self);
60 HandleWrapperObjPtr<mirror::Class> h = hs.NewHandleWrapper(&klass);
Mathieu Chartiera59d9b22016-09-26 18:13:17 -070061 self->PoisonObjectPointers();
Mathieu Chartierc645f1d2014-03-06 18:11:53 -080062 }
Roland Levillainb81e9e92017-04-20 17:35:32 +010063 // Need to check that we aren't the large object allocator since the large object allocation code
64 // path includes this function. If we didn't check we would have an infinite loop.
Mathieu Chartier9d156d52016-10-06 17:44:26 -070065 ObjPtr<mirror::Object> obj;
Mathieu Chartier446f9ee2014-12-01 15:00:27 -080066 if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
67 obj = AllocLargeObject<kInstrumented, PreFenceVisitor>(self, &klass, byte_count,
68 pre_fence_visitor);
69 if (obj != nullptr) {
Mathieu Chartier9d156d52016-10-06 17:44:26 -070070 return obj.Ptr();
Mathieu Chartier446f9ee2014-12-01 15:00:27 -080071 } else {
72 // There should be an OOM exception, since we are retrying, clear it.
73 self->ClearException();
74 }
75 // If the large object allocation failed, try to use the normal spaces (main space,
76 // non moving space). This can happen if there is significant virtual address space
77 // fragmentation.
78 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070079 // bytes allocated for the (individual) object.
Mathieu Chartier14cc9be2014-07-11 10:26:37 -070080 size_t bytes_allocated;
81 size_t usable_size;
82 size_t new_num_bytes_allocated = 0;
Mathieu Chartier6bc77742017-04-18 17:46:23 -070083 if (IsTLABAllocator(allocator)) {
Mathieu Chartier14cc9be2014-07-11 10:26:37 -070084 byte_count = RoundUp(byte_count, space::BumpPointerSpace::kAlignment);
85 }
86 // If we have a thread local allocation we don't need to update bytes allocated.
Mathieu Chartier6bc77742017-04-18 17:46:23 -070087 if (IsTLABAllocator(allocator) && byte_count <= self->TlabSize()) {
Mathieu Chartier14cc9be2014-07-11 10:26:37 -070088 obj = self->AllocTlab(byte_count);
Mathieu Chartierfd22d5b2014-07-14 10:16:05 -070089 DCHECK(obj != nullptr) << "AllocTlab can't fail";
Mathieu Chartier14cc9be2014-07-11 10:26:37 -070090 obj->SetClass(klass);
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -070091 if (kUseBakerReadBarrier) {
92 obj->AssertReadBarrierState();
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080093 }
Mathieu Chartier14cc9be2014-07-11 10:26:37 -070094 bytes_allocated = byte_count;
Mathieu Chartierfd22d5b2014-07-14 10:16:05 -070095 usable_size = bytes_allocated;
96 pre_fence_visitor(obj, usable_size);
Mathieu Chartier14cc9be2014-07-11 10:26:37 -070097 QuasiAtomic::ThreadFenceForConstructor();
Mathieu Chartier9d156d52016-10-06 17:44:26 -070098 } else if (
99 !kInstrumented && allocator == kAllocatorTypeRosAlloc &&
100 (obj = rosalloc_space_->AllocThreadLocal(self, byte_count, &bytes_allocated)) != nullptr &&
101 LIKELY(obj != nullptr)) {
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700102 DCHECK(!is_running_on_memory_tool_);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700103 obj->SetClass(klass);
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700104 if (kUseBakerReadBarrier) {
105 obj->AssertReadBarrierState();
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700106 }
107 usable_size = bytes_allocated;
108 pre_fence_visitor(obj, usable_size);
109 QuasiAtomic::ThreadFenceForConstructor();
Mathieu Chartier14cc9be2014-07-11 10:26:37 -0700110 } else {
Hans Boehmfb8b4e22018-09-05 16:45:42 -0700111 // Bytes allocated that includes bulk thread-local buffer allocations in addition to direct
112 // non-TLAB object allocations.
Roland Levillain9b869ea2018-01-31 13:46:11 +0000113 size_t bytes_tl_bulk_allocated = 0u;
Mathieu Chartier14cc9be2014-07-11 10:26:37 -0700114 obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700115 &usable_size, &bytes_tl_bulk_allocated);
Mathieu Chartier14cc9be2014-07-11 10:26:37 -0700116 if (UNLIKELY(obj == nullptr)) {
Mathieu Chartiereebc3af2016-02-29 18:13:38 -0800117 // AllocateInternalWithGc can cause thread suspension, if someone instruments the entrypoints
118 // or changes the allocator in a suspend point here, we need to retry the allocation.
119 obj = AllocateInternalWithGc(self,
120 allocator,
121 kInstrumented,
122 byte_count,
123 &bytes_allocated,
124 &usable_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700125 &bytes_tl_bulk_allocated, &klass);
Mathieu Chartier14cc9be2014-07-11 10:26:37 -0700126 if (obj == nullptr) {
Mathieu Chartiereebc3af2016-02-29 18:13:38 -0800127 // The only way that we can get a null return if there is no pending exception is if the
128 // allocator or instrumentation changed.
129 if (!self->IsExceptionPending()) {
130 // AllocObject will pick up the new allocator type, and instrumented as true is the safe
131 // default.
132 return AllocObject</*kInstrumented*/true>(self,
133 klass,
134 byte_count,
135 pre_fence_visitor);
Mathieu Chartier14cc9be2014-07-11 10:26:37 -0700136 }
137 return nullptr;
138 }
Hiroshi Yamauchi624468c2014-03-31 15:14:47 -0700139 }
Mathieu Chartier14cc9be2014-07-11 10:26:37 -0700140 DCHECK_GT(bytes_allocated, 0u);
141 DCHECK_GT(usable_size, 0u);
142 obj->SetClass(klass);
Hiroshi Yamauchi12b58b22016-11-01 11:55:29 -0700143 if (kUseBakerReadBarrier) {
144 obj->AssertReadBarrierState();
Mathieu Chartier14cc9be2014-07-11 10:26:37 -0700145 }
146 if (collector::SemiSpace::kUseRememberedSet && UNLIKELY(allocator == kAllocatorTypeNonMoving)) {
147 // (Note this if statement will be constant folded away for the
148 // fast-path quick entry points.) Because SetClass() has no write
149 // barrier, if a non-moving space allocation, we need a write
150 // barrier as the class pointer may point to the bump pointer
151 // space (where the class pointer is an "old-to-young" reference,
152 // though rare) under the GSS collector with the remembered set
153 // enabled. We don't need this for kAllocatorTypeRosAlloc/DlMalloc
154 // cases because we don't directly allocate into the main alloc
155 // space (besides promotions) under the SS/GSS collector.
Mathieu Chartier88ea61e2018-06-20 17:45:41 -0700156 WriteBarrier::ForFieldWrite(obj, mirror::Object::ClassOffset(), klass);
Mathieu Chartier14cc9be2014-07-11 10:26:37 -0700157 }
158 pre_fence_visitor(obj, usable_size);
Hans Boehmb0171b92016-01-28 17:19:15 -0800159 QuasiAtomic::ThreadFenceForConstructor();
Mathieu Chartier34afcde2017-06-30 15:31:11 -0700160 if (bytes_tl_bulk_allocated > 0) {
Hans Boehmfb8b4e22018-09-05 16:45:42 -0700161 size_t num_bytes_allocated_before =
162 num_bytes_allocated_.fetch_add(bytes_tl_bulk_allocated, std::memory_order_relaxed);
163 new_num_bytes_allocated = num_bytes_allocated_before + bytes_tl_bulk_allocated;
Mathieu Chartier34afcde2017-06-30 15:31:11 -0700164 // Only trace when we get an increase in the number of bytes allocated. This happens when
165 // obtaining a new TLAB and isn't often enough to hurt performance according to golem.
Roland Levillain9b869ea2018-01-31 13:46:11 +0000166 TraceHeapSize(new_num_bytes_allocated);
Mathieu Chartier34afcde2017-06-30 15:31:11 -0700167 }
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -0800168 }
Mathieu Chartierfd22d5b2014-07-14 10:16:05 -0700169 if (kIsDebugBuild && Runtime::Current()->IsStarted()) {
170 CHECK_LE(obj->SizeOf(), usable_size);
171 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800172 // TODO: Deprecate.
173 if (kInstrumented) {
174 if (Runtime::Current()->HasStatsEnabled()) {
175 RuntimeStats* thread_stats = self->GetStats();
176 ++thread_stats->allocated_objects;
177 thread_stats->allocated_bytes += bytes_allocated;
178 RuntimeStats* global_stats = Runtime::Current()->GetStats();
179 ++global_stats->allocated_objects;
180 global_stats->allocated_bytes += bytes_allocated;
181 }
182 } else {
183 DCHECK(!Runtime::Current()->HasStatsEnabled());
184 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800185 if (kInstrumented) {
Man Cao8c2ff642015-05-27 17:25:30 -0700186 if (IsAllocTrackingEnabled()) {
Mathieu Chartier458b1052016-03-29 14:02:55 -0700187 // allocation_records_ is not null since it never becomes null after allocation tracking is
188 // enabled.
189 DCHECK(allocation_records_ != nullptr);
190 allocation_records_->RecordAllocation(self, &obj, bytes_allocated);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800191 }
Orion Hodson88591fe2018-03-06 13:35:43 +0000192 AllocationListener* l = alloc_listener_.load(std::memory_order_seq_cst);
Andreas Gampe27fa96c2016-10-07 15:05:24 -0700193 if (l != nullptr) {
194 // Same as above. We assume that a listener that was once stored will never be deleted.
195 // Otherwise we'd have to perform this under a lock.
196 l->ObjectAllocated(self, &obj, bytes_allocated);
197 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800198 } else {
Man Cao8c2ff642015-05-27 17:25:30 -0700199 DCHECK(!IsAllocTrackingEnabled());
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800200 }
Mathieu Chartier14b0a5d2016-03-11 17:22:23 -0800201 if (AllocatorHasAllocationStack(allocator)) {
202 PushOnAllocationStack(self, &obj);
203 }
Mathieu Chartier31000802015-06-14 14:14:37 -0700204 if (kInstrumented) {
205 if (gc_stress_mode_) {
206 CheckGcStressMode(self, &obj);
207 }
208 } else {
209 DCHECK(!gc_stress_mode_);
210 }
Roland Levillainb81e9e92017-04-20 17:35:32 +0100211 // IsGcConcurrent() isn't known at compile time so we can optimize by not checking it for
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800212 // the BumpPointer or TLAB allocators. This is nice since it allows the entire if statement to be
213 // optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant since
214 // the allocator_type should be constant propagated.
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -0700215 if (AllocatorMayHaveConcurrentGC(allocator) && IsGcConcurrent()) {
Hans Boehmfb8b4e22018-09-05 16:45:42 -0700216 // New_num_bytes_allocated is zero if we didn't update num_bytes_allocated_.
217 // That's fine.
Mathieu Chartierf517f1a2014-03-06 15:52:27 -0800218 CheckConcurrentGC(self, new_num_bytes_allocated, &obj);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800219 }
Mathieu Chartier4e305412014-02-19 10:54:44 -0800220 VerifyObject(obj);
221 self->VerifyStack();
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700222 return obj.Ptr();
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700223}
224
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800225// The size of a thread-local allocation stack in the number of references.
226static constexpr size_t kThreadLocalAllocationStackSize = 128;
227
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700228inline void Heap::PushOnAllocationStack(Thread* self, ObjPtr<mirror::Object>* obj) {
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800229 if (kUseThreadLocalAllocationStack) {
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700230 if (UNLIKELY(!self->PushOnThreadLocalAllocationStack(obj->Ptr()))) {
Mathieu Chartierc1790162014-05-23 10:54:50 -0700231 PushOnThreadLocalAllocationStackWithInternalGC(self, obj);
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800232 }
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700233 } else if (UNLIKELY(!allocation_stack_->AtomicPushBack(obj->Ptr()))) {
Mathieu Chartierc1790162014-05-23 10:54:50 -0700234 PushOnAllocationStackWithInternalGC(self, obj);
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800235 }
236}
237
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800238template <bool kInstrumented, typename PreFenceVisitor>
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700239inline mirror::Object* Heap::AllocLargeObject(Thread* self,
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700240 ObjPtr<mirror::Class>* klass,
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800241 size_t byte_count,
242 const PreFenceVisitor& pre_fence_visitor) {
Mathieu Chartier446f9ee2014-12-01 15:00:27 -0800243 // Save and restore the class in case it moves.
244 StackHandleScope<1> hs(self);
245 auto klass_wrapper = hs.NewHandleWrapper(klass);
246 return AllocObjectWithAllocator<kInstrumented, false, PreFenceVisitor>(self, *klass, byte_count,
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800247 kAllocatorTypeLOS,
248 pre_fence_visitor);
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800249}
250
251template <const bool kInstrumented, const bool kGrow>
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700252inline mirror::Object* Heap::TryToAllocate(Thread* self,
253 AllocatorType allocator_type,
254 size_t alloc_size,
255 size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700256 size_t* usable_size,
257 size_t* bytes_tl_bulk_allocated) {
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700258 if (allocator_type != kAllocatorTypeTLAB &&
259 allocator_type != kAllocatorTypeRegionTLAB &&
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700260 allocator_type != kAllocatorTypeRosAlloc &&
Mathieu Chartier5ace2012016-11-30 10:15:41 -0800261 UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, alloc_size, kGrow))) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800262 return nullptr;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700263 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800264 mirror::Object* ret;
265 switch (allocator_type) {
266 case kAllocatorTypeBumpPointer: {
267 DCHECK(bump_pointer_space_ != nullptr);
268 alloc_size = RoundUp(alloc_size, space::BumpPointerSpace::kAlignment);
269 ret = bump_pointer_space_->AllocNonvirtual(alloc_size);
270 if (LIKELY(ret != nullptr)) {
271 *bytes_allocated = alloc_size;
Ian Rogers6fac4472014-02-25 17:01:10 -0800272 *usable_size = alloc_size;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700273 *bytes_tl_bulk_allocated = alloc_size;
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800274 }
275 break;
276 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800277 case kAllocatorTypeRosAlloc: {
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700278 if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
Roland Levillain05e34f42018-05-24 13:19:05 +0000279 // If running on ASan, we should be using the instrumented path.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700280 size_t max_bytes_tl_bulk_allocated = rosalloc_space_->MaxBytesBulkAllocatedFor(alloc_size);
Mathieu Chartier5ace2012016-11-30 10:15:41 -0800281 if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type,
282 max_bytes_tl_bulk_allocated,
283 kGrow))) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700284 return nullptr;
285 }
286 ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
287 bytes_tl_bulk_allocated);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800288 } else {
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700289 DCHECK(!is_running_on_memory_tool_);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700290 size_t max_bytes_tl_bulk_allocated =
291 rosalloc_space_->MaxBytesBulkAllocatedForNonvirtual(alloc_size);
Mathieu Chartier5ace2012016-11-30 10:15:41 -0800292 if (UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type,
293 max_bytes_tl_bulk_allocated,
294 kGrow))) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700295 return nullptr;
296 }
297 if (!kInstrumented) {
298 DCHECK(!rosalloc_space_->CanAllocThreadLocal(self, alloc_size));
299 }
Mathieu Chartier5ace2012016-11-30 10:15:41 -0800300 ret = rosalloc_space_->AllocNonvirtual(self,
301 alloc_size,
302 bytes_allocated,
303 usable_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700304 bytes_tl_bulk_allocated);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800305 }
306 break;
307 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800308 case kAllocatorTypeDlMalloc: {
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700309 if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
Roland Levillain05e34f42018-05-24 13:19:05 +0000310 // If running on ASan, we should be using the instrumented path.
Mathieu Chartier5ace2012016-11-30 10:15:41 -0800311 ret = dlmalloc_space_->Alloc(self,
312 alloc_size,
313 bytes_allocated,
314 usable_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700315 bytes_tl_bulk_allocated);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800316 } else {
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700317 DCHECK(!is_running_on_memory_tool_);
Mathieu Chartier5ace2012016-11-30 10:15:41 -0800318 ret = dlmalloc_space_->AllocNonvirtual(self,
319 alloc_size,
320 bytes_allocated,
321 usable_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700322 bytes_tl_bulk_allocated);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800323 }
324 break;
325 }
326 case kAllocatorTypeNonMoving: {
Mathieu Chartier5ace2012016-11-30 10:15:41 -0800327 ret = non_moving_space_->Alloc(self,
328 alloc_size,
329 bytes_allocated,
330 usable_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700331 bytes_tl_bulk_allocated);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800332 break;
333 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800334 case kAllocatorTypeLOS: {
Mathieu Chartier5ace2012016-11-30 10:15:41 -0800335 ret = large_object_space_->Alloc(self,
336 alloc_size,
337 bytes_allocated,
338 usable_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700339 bytes_tl_bulk_allocated);
Hiroshi Yamauchi95a659f2013-11-22 14:43:45 -0800340 // Note that the bump pointer spaces aren't necessarily next to
341 // the other continuous spaces like the non-moving alloc space or
342 // the zygote space.
343 DCHECK(ret == nullptr || large_object_space_->Contains(ret));
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800344 break;
345 }
Nicolas Geoffray96172e02016-11-30 11:52:19 +0000346 case kAllocatorTypeRegion: {
347 DCHECK(region_space_ != nullptr);
348 alloc_size = RoundUp(alloc_size, space::RegionSpace::kAlignment);
Mathieu Chartier5ace2012016-11-30 10:15:41 -0800349 ret = region_space_->AllocNonvirtual<false>(alloc_size,
350 bytes_allocated,
351 usable_size,
Nicolas Geoffray96172e02016-11-30 11:52:19 +0000352 bytes_tl_bulk_allocated);
353 break;
354 }
Mathieu Chartier5ace2012016-11-30 10:15:41 -0800355 case kAllocatorTypeTLAB:
356 FALLTHROUGH_INTENDED;
Nicolas Geoffray96172e02016-11-30 11:52:19 +0000357 case kAllocatorTypeRegionTLAB: {
Mathieu Chartier5ace2012016-11-30 10:15:41 -0800358 DCHECK_ALIGNED(alloc_size, kObjectAlignment);
359 static_assert(space::RegionSpace::kAlignment == space::BumpPointerSpace::kAlignment,
360 "mismatched alignments");
361 static_assert(kObjectAlignment == space::BumpPointerSpace::kAlignment,
362 "mismatched alignments");
Nicolas Geoffray96172e02016-11-30 11:52:19 +0000363 if (UNLIKELY(self->TlabSize() < alloc_size)) {
Mathieu Chartier5ace2012016-11-30 10:15:41 -0800364 // kAllocatorTypeTLAB may be the allocator for region space TLAB if the GC is not marking,
365 // that is why the allocator is not passed down.
366 return AllocWithNewTLAB(self,
367 alloc_size,
368 kGrow,
369 bytes_allocated,
370 usable_size,
371 bytes_tl_bulk_allocated);
Nicolas Geoffray96172e02016-11-30 11:52:19 +0000372 }
373 // The allocation can't fail.
374 ret = self->AllocTlab(alloc_size);
375 DCHECK(ret != nullptr);
376 *bytes_allocated = alloc_size;
Mathieu Chartier5ace2012016-11-30 10:15:41 -0800377 *bytes_tl_bulk_allocated = 0; // Allocated in an existing buffer.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800378 *usable_size = alloc_size;
379 break;
380 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800381 default: {
382 LOG(FATAL) << "Invalid allocator type";
383 ret = nullptr;
384 }
385 }
386 return ret;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700387}
388
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700389inline bool Heap::ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const {
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700390 // We need to have a zygote space or else our newly allocated large object can end up in the
391 // Zygote resulting in it being prematurely freed.
392 // We can only do this for primitive objects since large objects will not be within the card table
393 // range. This also means that we rely on SetClass not dirtying the object's card.
Jeff Hao13e00912015-06-22 15:14:49 -0700394 return byte_count >= large_object_threshold_ && (c->IsPrimitiveArray() || c->IsStringClass());
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700395}
396
Mathieu Chartier5ace2012016-11-30 10:15:41 -0800397inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
398 size_t alloc_size,
399 bool grow) {
Hans Boehmfb8b4e22018-09-05 16:45:42 -0700400 size_t new_footprint = num_bytes_allocated_.load(std::memory_order_relaxed) + alloc_size;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700401 if (UNLIKELY(new_footprint > max_allowed_footprint_)) {
402 if (UNLIKELY(new_footprint > growth_limit_)) {
403 return true;
404 }
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -0700405 if (!AllocatorMayHaveConcurrentGC(allocator_type) || !IsGcConcurrent()) {
Mathieu Chartier5ace2012016-11-30 10:15:41 -0800406 if (!grow) {
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700407 return true;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700408 }
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800409 // TODO: Grow for allocation is racy, fix it.
Andreas Gampe170331f2017-12-07 18:41:03 -0800410 VlogHeapGrowth(max_allowed_footprint_, new_footprint, alloc_size);
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800411 max_allowed_footprint_ = new_footprint;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700412 }
413 }
414 return false;
415}
416
Hans Boehmfb8b4e22018-09-05 16:45:42 -0700417// Request a GC if new_num_bytes_allocated is sufficiently large.
418// A call with new_num_bytes_allocated == 0 is a fast no-op.
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700419inline void Heap::CheckConcurrentGC(Thread* self,
420 size_t new_num_bytes_allocated,
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700421 ObjPtr<mirror::Object>* obj) {
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700422 if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -0700423 RequestConcurrentGCAndSaveObject(self, false, obj);
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700424 }
425}
426
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700427} // namespace gc
428} // namespace art
429
430#endif // ART_RUNTIME_GC_HEAP_INL_H_