blob: 05ce9c79523241fb1c45fd1d2235263467e6e8ac [file] [log] [blame]
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_HEAP_INL_H_
18#define ART_RUNTIME_GC_HEAP_INL_H_
19
20#include "heap.h"
21
Andreas Gampe27fa96c2016-10-07 15:05:24 -070022#include "allocation_listener.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010023#include "base/time_utils.h"
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -080024#include "gc/accounting/card_table-inl.h"
Man Cao8c2ff642015-05-27 17:25:30 -070025#include "gc/allocation_record.h"
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -080026#include "gc/collector/semi_space.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070027#include "gc/space/bump_pointer_space-inl.h"
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070028#include "gc/space/dlmalloc_space-inl.h"
29#include "gc/space/large_object_space.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080030#include "gc/space/region_space-inl.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070031#include "gc/space/rosalloc_space-inl.h"
Mathieu Chartiera058fdf2016-10-06 15:13:58 -070032#include "obj_ptr-inl.h"
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070033#include "runtime.h"
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070034#include "handle_scope-inl.h"
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070035#include "thread-inl.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010036#include "utils.h"
Mathieu Chartier4e305412014-02-19 10:54:44 -080037#include "verify_object-inl.h"
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070038
39namespace art {
40namespace gc {
41
Mathieu Chartier692fafd2013-11-29 17:24:40 -080042template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
Mathieu Chartiera4f6af92015-08-11 17:35:25 -070043inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
Mathieu Chartier9d156d52016-10-06 17:44:26 -070044 ObjPtr<mirror::Class> klass,
Mathieu Chartiera4f6af92015-08-11 17:35:25 -070045 size_t byte_count,
46 AllocatorType allocator,
Mathieu Chartier1febddf2013-11-20 12:33:14 -080047 const PreFenceVisitor& pre_fence_visitor) {
Mathieu Chartierc645f1d2014-03-06 18:11:53 -080048 if (kIsDebugBuild) {
49 CheckPreconditionsForAllocObject(klass, byte_count);
Mathieu Chartier14cc9be2014-07-11 10:26:37 -070050 // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
51 // done in the runnable state where suspension is expected.
52 CHECK_EQ(self->GetState(), kRunnable);
53 self->AssertThreadSuspensionIsAllowable();
Mathieu Chartier8502f722016-06-08 15:09:08 -070054 self->AssertNoPendingException();
Mathieu Chartier9d156d52016-10-06 17:44:26 -070055 // Make sure to preserve klass.
56 StackHandleScope<1> hs(self);
57 HandleWrapperObjPtr<mirror::Class> h = hs.NewHandleWrapper(&klass);
Mathieu Chartiera59d9b22016-09-26 18:13:17 -070058 self->PoisonObjectPointers();
Mathieu Chartierc645f1d2014-03-06 18:11:53 -080059 }
Mathieu Chartierc528dba2013-11-26 12:00:11 -080060 // Need to check that we arent the large object allocator since the large object allocation code
61 // path this function. If we didn't check we would have an infinite loop.
Mathieu Chartier9d156d52016-10-06 17:44:26 -070062 ObjPtr<mirror::Object> obj;
Mathieu Chartier446f9ee2014-12-01 15:00:27 -080063 if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
64 obj = AllocLargeObject<kInstrumented, PreFenceVisitor>(self, &klass, byte_count,
65 pre_fence_visitor);
66 if (obj != nullptr) {
Mathieu Chartier9d156d52016-10-06 17:44:26 -070067 return obj.Ptr();
Mathieu Chartier446f9ee2014-12-01 15:00:27 -080068 } else {
69 // There should be an OOM exception, since we are retrying, clear it.
70 self->ClearException();
71 }
72 // If the large object allocation failed, try to use the normal spaces (main space,
73 // non moving space). This can happen if there is significant virtual address space
74 // fragmentation.
75 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070076 // bytes allocated for the (individual) object.
Mathieu Chartier14cc9be2014-07-11 10:26:37 -070077 size_t bytes_allocated;
78 size_t usable_size;
79 size_t new_num_bytes_allocated = 0;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080080 if (allocator == kAllocatorTypeTLAB || allocator == kAllocatorTypeRegionTLAB) {
Mathieu Chartier14cc9be2014-07-11 10:26:37 -070081 byte_count = RoundUp(byte_count, space::BumpPointerSpace::kAlignment);
82 }
83 // If we have a thread local allocation we don't need to update bytes allocated.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080084 if ((allocator == kAllocatorTypeTLAB || allocator == kAllocatorTypeRegionTLAB) &&
85 byte_count <= self->TlabSize()) {
Mathieu Chartier14cc9be2014-07-11 10:26:37 -070086 obj = self->AllocTlab(byte_count);
Mathieu Chartierfd22d5b2014-07-14 10:16:05 -070087 DCHECK(obj != nullptr) << "AllocTlab can't fail";
Mathieu Chartier14cc9be2014-07-11 10:26:37 -070088 obj->SetClass(klass);
89 if (kUseBakerOrBrooksReadBarrier) {
90 if (kUseBrooksReadBarrier) {
Mathieu Chartier9d156d52016-10-06 17:44:26 -070091 obj->SetReadBarrierPointer(obj.Ptr());
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080092 }
Mathieu Chartier14cc9be2014-07-11 10:26:37 -070093 obj->AssertReadBarrierPointer();
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080094 }
Mathieu Chartier14cc9be2014-07-11 10:26:37 -070095 bytes_allocated = byte_count;
Mathieu Chartierfd22d5b2014-07-14 10:16:05 -070096 usable_size = bytes_allocated;
97 pre_fence_visitor(obj, usable_size);
Mathieu Chartier14cc9be2014-07-11 10:26:37 -070098 QuasiAtomic::ThreadFenceForConstructor();
Mathieu Chartier9d156d52016-10-06 17:44:26 -070099 } else if (
100 !kInstrumented && allocator == kAllocatorTypeRosAlloc &&
101 (obj = rosalloc_space_->AllocThreadLocal(self, byte_count, &bytes_allocated)) != nullptr &&
102 LIKELY(obj != nullptr)) {
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700103 DCHECK(!is_running_on_memory_tool_);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700104 obj->SetClass(klass);
105 if (kUseBakerOrBrooksReadBarrier) {
106 if (kUseBrooksReadBarrier) {
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700107 obj->SetReadBarrierPointer(obj.Ptr());
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700108 }
109 obj->AssertReadBarrierPointer();
110 }
111 usable_size = bytes_allocated;
112 pre_fence_visitor(obj, usable_size);
113 QuasiAtomic::ThreadFenceForConstructor();
Mathieu Chartier14cc9be2014-07-11 10:26:37 -0700114 } else {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700115 // bytes allocated that takes bulk thread-local buffer allocations into account.
116 size_t bytes_tl_bulk_allocated = 0;
Mathieu Chartier14cc9be2014-07-11 10:26:37 -0700117 obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700118 &usable_size, &bytes_tl_bulk_allocated);
Mathieu Chartier14cc9be2014-07-11 10:26:37 -0700119 if (UNLIKELY(obj == nullptr)) {
Mathieu Chartiereebc3af2016-02-29 18:13:38 -0800120 // AllocateInternalWithGc can cause thread suspension, if someone instruments the entrypoints
121 // or changes the allocator in a suspend point here, we need to retry the allocation.
122 obj = AllocateInternalWithGc(self,
123 allocator,
124 kInstrumented,
125 byte_count,
126 &bytes_allocated,
127 &usable_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700128 &bytes_tl_bulk_allocated, &klass);
Mathieu Chartier14cc9be2014-07-11 10:26:37 -0700129 if (obj == nullptr) {
Mathieu Chartiereebc3af2016-02-29 18:13:38 -0800130 // The only way that we can get a null return if there is no pending exception is if the
131 // allocator or instrumentation changed.
132 if (!self->IsExceptionPending()) {
133 // AllocObject will pick up the new allocator type, and instrumented as true is the safe
134 // default.
135 return AllocObject</*kInstrumented*/true>(self,
136 klass,
137 byte_count,
138 pre_fence_visitor);
Mathieu Chartier14cc9be2014-07-11 10:26:37 -0700139 }
140 return nullptr;
141 }
Hiroshi Yamauchi624468c2014-03-31 15:14:47 -0700142 }
Mathieu Chartier14cc9be2014-07-11 10:26:37 -0700143 DCHECK_GT(bytes_allocated, 0u);
144 DCHECK_GT(usable_size, 0u);
145 obj->SetClass(klass);
146 if (kUseBakerOrBrooksReadBarrier) {
147 if (kUseBrooksReadBarrier) {
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700148 obj->SetReadBarrierPointer(obj.Ptr());
Mathieu Chartier14cc9be2014-07-11 10:26:37 -0700149 }
150 obj->AssertReadBarrierPointer();
151 }
152 if (collector::SemiSpace::kUseRememberedSet && UNLIKELY(allocator == kAllocatorTypeNonMoving)) {
153 // (Note this if statement will be constant folded away for the
154 // fast-path quick entry points.) Because SetClass() has no write
155 // barrier, if a non-moving space allocation, we need a write
156 // barrier as the class pointer may point to the bump pointer
157 // space (where the class pointer is an "old-to-young" reference,
158 // though rare) under the GSS collector with the remembered set
159 // enabled. We don't need this for kAllocatorTypeRosAlloc/DlMalloc
160 // cases because we don't directly allocate into the main alloc
161 // space (besides promotions) under the SS/GSS collector.
162 WriteBarrierField(obj, mirror::Object::ClassOffset(), klass);
163 }
164 pre_fence_visitor(obj, usable_size);
Hans Boehmb0171b92016-01-28 17:19:15 -0800165 QuasiAtomic::ThreadFenceForConstructor();
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700166 new_num_bytes_allocated = static_cast<size_t>(
Hans Boehmb0171b92016-01-28 17:19:15 -0800167 num_bytes_allocated_.FetchAndAddRelaxed(bytes_tl_bulk_allocated)) + bytes_tl_bulk_allocated;
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -0800168 }
Mathieu Chartierfd22d5b2014-07-14 10:16:05 -0700169 if (kIsDebugBuild && Runtime::Current()->IsStarted()) {
170 CHECK_LE(obj->SizeOf(), usable_size);
171 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800172 // TODO: Deprecate.
173 if (kInstrumented) {
174 if (Runtime::Current()->HasStatsEnabled()) {
175 RuntimeStats* thread_stats = self->GetStats();
176 ++thread_stats->allocated_objects;
177 thread_stats->allocated_bytes += bytes_allocated;
178 RuntimeStats* global_stats = Runtime::Current()->GetStats();
179 ++global_stats->allocated_objects;
180 global_stats->allocated_bytes += bytes_allocated;
181 }
182 } else {
183 DCHECK(!Runtime::Current()->HasStatsEnabled());
184 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800185 if (kInstrumented) {
Man Cao8c2ff642015-05-27 17:25:30 -0700186 if (IsAllocTrackingEnabled()) {
Mathieu Chartier458b1052016-03-29 14:02:55 -0700187 // allocation_records_ is not null since it never becomes null after allocation tracking is
188 // enabled.
189 DCHECK(allocation_records_ != nullptr);
190 allocation_records_->RecordAllocation(self, &obj, bytes_allocated);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800191 }
Andreas Gampe27fa96c2016-10-07 15:05:24 -0700192 AllocationListener* l = alloc_listener_.LoadSequentiallyConsistent();
193 if (l != nullptr) {
194 // Same as above. We assume that a listener that was once stored will never be deleted.
195 // Otherwise we'd have to perform this under a lock.
196 l->ObjectAllocated(self, &obj, bytes_allocated);
197 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800198 } else {
Man Cao8c2ff642015-05-27 17:25:30 -0700199 DCHECK(!IsAllocTrackingEnabled());
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800200 }
Mathieu Chartier14b0a5d2016-03-11 17:22:23 -0800201 if (AllocatorHasAllocationStack(allocator)) {
202 PushOnAllocationStack(self, &obj);
203 }
Mathieu Chartier31000802015-06-14 14:14:37 -0700204 if (kInstrumented) {
205 if (gc_stress_mode_) {
206 CheckGcStressMode(self, &obj);
207 }
208 } else {
209 DCHECK(!gc_stress_mode_);
210 }
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -0700211 // IsConcurrentGc() isn't known at compile time so we can optimize by not checking it for
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800212 // the BumpPointer or TLAB allocators. This is nice since it allows the entire if statement to be
213 // optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant since
214 // the allocator_type should be constant propagated.
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -0700215 if (AllocatorMayHaveConcurrentGC(allocator) && IsGcConcurrent()) {
Mathieu Chartierf517f1a2014-03-06 15:52:27 -0800216 CheckConcurrentGC(self, new_num_bytes_allocated, &obj);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800217 }
Mathieu Chartier4e305412014-02-19 10:54:44 -0800218 VerifyObject(obj);
219 self->VerifyStack();
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700220 return obj.Ptr();
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700221}
222
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800223// The size of a thread-local allocation stack in the number of references.
224static constexpr size_t kThreadLocalAllocationStackSize = 128;
225
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700226inline void Heap::PushOnAllocationStack(Thread* self, ObjPtr<mirror::Object>* obj) {
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800227 if (kUseThreadLocalAllocationStack) {
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700228 if (UNLIKELY(!self->PushOnThreadLocalAllocationStack(obj->Ptr()))) {
Mathieu Chartierc1790162014-05-23 10:54:50 -0700229 PushOnThreadLocalAllocationStackWithInternalGC(self, obj);
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800230 }
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700231 } else if (UNLIKELY(!allocation_stack_->AtomicPushBack(obj->Ptr()))) {
Mathieu Chartierc1790162014-05-23 10:54:50 -0700232 PushOnAllocationStackWithInternalGC(self, obj);
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800233 }
234}
235
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800236template <bool kInstrumented, typename PreFenceVisitor>
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700237inline mirror::Object* Heap::AllocLargeObject(Thread* self,
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700238 ObjPtr<mirror::Class>* klass,
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800239 size_t byte_count,
240 const PreFenceVisitor& pre_fence_visitor) {
Mathieu Chartier446f9ee2014-12-01 15:00:27 -0800241 // Save and restore the class in case it moves.
242 StackHandleScope<1> hs(self);
243 auto klass_wrapper = hs.NewHandleWrapper(klass);
244 return AllocObjectWithAllocator<kInstrumented, false, PreFenceVisitor>(self, *klass, byte_count,
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800245 kAllocatorTypeLOS,
246 pre_fence_visitor);
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800247}
248
249template <const bool kInstrumented, const bool kGrow>
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700250inline mirror::Object* Heap::TryToAllocate(Thread* self,
251 AllocatorType allocator_type,
252 size_t alloc_size,
253 size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700254 size_t* usable_size,
255 size_t* bytes_tl_bulk_allocated) {
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700256 if (allocator_type != kAllocatorTypeTLAB &&
257 allocator_type != kAllocatorTypeRegionTLAB &&
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700258 allocator_type != kAllocatorTypeRosAlloc &&
Mathieu Chartier14cc9be2014-07-11 10:26:37 -0700259 UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size))) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800260 return nullptr;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700261 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800262 mirror::Object* ret;
263 switch (allocator_type) {
264 case kAllocatorTypeBumpPointer: {
265 DCHECK(bump_pointer_space_ != nullptr);
266 alloc_size = RoundUp(alloc_size, space::BumpPointerSpace::kAlignment);
267 ret = bump_pointer_space_->AllocNonvirtual(alloc_size);
268 if (LIKELY(ret != nullptr)) {
269 *bytes_allocated = alloc_size;
Ian Rogers6fac4472014-02-25 17:01:10 -0800270 *usable_size = alloc_size;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700271 *bytes_tl_bulk_allocated = alloc_size;
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800272 }
273 break;
274 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800275 case kAllocatorTypeRosAlloc: {
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700276 if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
277 // If running on valgrind or asan, we should be using the instrumented path.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700278 size_t max_bytes_tl_bulk_allocated = rosalloc_space_->MaxBytesBulkAllocatedFor(alloc_size);
279 if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type,
280 max_bytes_tl_bulk_allocated))) {
281 return nullptr;
282 }
283 ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
284 bytes_tl_bulk_allocated);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800285 } else {
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700286 DCHECK(!is_running_on_memory_tool_);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700287 size_t max_bytes_tl_bulk_allocated =
288 rosalloc_space_->MaxBytesBulkAllocatedForNonvirtual(alloc_size);
289 if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type,
290 max_bytes_tl_bulk_allocated))) {
291 return nullptr;
292 }
293 if (!kInstrumented) {
294 DCHECK(!rosalloc_space_->CanAllocThreadLocal(self, alloc_size));
295 }
296 ret = rosalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size,
297 bytes_tl_bulk_allocated);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800298 }
299 break;
300 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800301 case kAllocatorTypeDlMalloc: {
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700302 if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800303 // If running on valgrind, we should be using the instrumented path.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700304 ret = dlmalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
305 bytes_tl_bulk_allocated);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800306 } else {
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700307 DCHECK(!is_running_on_memory_tool_);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700308 ret = dlmalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size,
309 bytes_tl_bulk_allocated);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800310 }
311 break;
312 }
313 case kAllocatorTypeNonMoving: {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700314 ret = non_moving_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
315 bytes_tl_bulk_allocated);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800316 break;
317 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800318 case kAllocatorTypeLOS: {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700319 ret = large_object_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
320 bytes_tl_bulk_allocated);
Hiroshi Yamauchi95a659f2013-11-22 14:43:45 -0800321 // Note that the bump pointer spaces aren't necessarily next to
322 // the other continuous spaces like the non-moving alloc space or
323 // the zygote space.
324 DCHECK(ret == nullptr || large_object_space_->Contains(ret));
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800325 break;
326 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800327 case kAllocatorTypeTLAB: {
Mathieu Chartier14cc9be2014-07-11 10:26:37 -0700328 DCHECK_ALIGNED(alloc_size, space::BumpPointerSpace::kAlignment);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800329 if (UNLIKELY(self->TlabSize() < alloc_size)) {
Mathieu Chartier14cc9be2014-07-11 10:26:37 -0700330 const size_t new_tlab_size = alloc_size + kDefaultTLABSize;
331 if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, new_tlab_size))) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800332 return nullptr;
333 }
Mathieu Chartier14cc9be2014-07-11 10:26:37 -0700334 // Try allocating a new thread local buffer, if the allocaiton fails the space must be
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700335 // full so return null.
Mathieu Chartier14cc9be2014-07-11 10:26:37 -0700336 if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) {
337 return nullptr;
338 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700339 *bytes_tl_bulk_allocated = new_tlab_size;
Mathieu Chartier14cc9be2014-07-11 10:26:37 -0700340 } else {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700341 *bytes_tl_bulk_allocated = 0;
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800342 }
343 // The allocation can't fail.
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800344 ret = self->AllocTlab(alloc_size);
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800345 DCHECK(ret != nullptr);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700346 *bytes_allocated = alloc_size;
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700347 *usable_size = alloc_size;
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800348 break;
349 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800350 case kAllocatorTypeRegion: {
351 DCHECK(region_space_ != nullptr);
352 alloc_size = RoundUp(alloc_size, space::RegionSpace::kAlignment);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700353 ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size,
354 bytes_tl_bulk_allocated);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800355 break;
356 }
357 case kAllocatorTypeRegionTLAB: {
358 DCHECK(region_space_ != nullptr);
359 DCHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment);
360 if (UNLIKELY(self->TlabSize() < alloc_size)) {
361 if (space::RegionSpace::kRegionSize >= alloc_size) {
362 // Non-large. Check OOME for a tlab.
363 if (LIKELY(!IsOutOfMemoryOnAllocation<kGrow>(allocator_type, space::RegionSpace::kRegionSize))) {
364 // Try to allocate a tlab.
365 if (!region_space_->AllocNewTlab(self)) {
366 // Failed to allocate a tlab. Try non-tlab.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700367 ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size,
368 bytes_tl_bulk_allocated);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800369 return ret;
370 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700371 *bytes_tl_bulk_allocated = space::RegionSpace::kRegionSize;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800372 // Fall-through.
373 } else {
374 // Check OOME for a non-tlab allocation.
375 if (!IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size)) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700376 ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size,
377 bytes_tl_bulk_allocated);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800378 return ret;
379 } else {
380 // Neither tlab or non-tlab works. Give up.
381 return nullptr;
382 }
383 }
384 } else {
385 // Large. Check OOME.
386 if (LIKELY(!IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size))) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700387 ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size,
388 bytes_tl_bulk_allocated);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800389 return ret;
390 } else {
391 return nullptr;
392 }
393 }
394 } else {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700395 *bytes_tl_bulk_allocated = 0; // Allocated in an existing buffer.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800396 }
397 // The allocation can't fail.
398 ret = self->AllocTlab(alloc_size);
399 DCHECK(ret != nullptr);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700400 *bytes_allocated = alloc_size;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800401 *usable_size = alloc_size;
402 break;
403 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800404 default: {
405 LOG(FATAL) << "Invalid allocator type";
406 ret = nullptr;
407 }
408 }
409 return ret;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700410}
411
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700412inline bool Heap::ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const {
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700413 // We need to have a zygote space or else our newly allocated large object can end up in the
414 // Zygote resulting in it being prematurely freed.
415 // We can only do this for primitive objects since large objects will not be within the card table
416 // range. This also means that we rely on SetClass not dirtying the object's card.
Jeff Hao13e00912015-06-22 15:14:49 -0700417 return byte_count >= large_object_threshold_ && (c->IsPrimitiveArray() || c->IsStringClass());
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700418}
419
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800420template <bool kGrow>
421inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size) {
Ian Rogers3e5cf302014-05-20 16:40:37 -0700422 size_t new_footprint = num_bytes_allocated_.LoadSequentiallyConsistent() + alloc_size;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700423 if (UNLIKELY(new_footprint > max_allowed_footprint_)) {
424 if (UNLIKELY(new_footprint > growth_limit_)) {
425 return true;
426 }
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -0700427 if (!AllocatorMayHaveConcurrentGC(allocator_type) || !IsGcConcurrent()) {
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800428 if (!kGrow) {
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700429 return true;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700430 }
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800431 // TODO: Grow for allocation is racy, fix it.
432 VLOG(heap) << "Growing heap from " << PrettySize(max_allowed_footprint_) << " to "
433 << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation";
434 max_allowed_footprint_ = new_footprint;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700435 }
436 }
437 return false;
438}
439
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700440inline void Heap::CheckConcurrentGC(Thread* self,
441 size_t new_num_bytes_allocated,
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700442 ObjPtr<mirror::Object>* obj) {
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700443 if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
Hiroshi Yamauchi0ae98992015-05-01 14:33:19 -0700444 RequestConcurrentGCAndSaveObject(self, false, obj);
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700445 }
446}
447
Mathieu Chartiera058fdf2016-10-06 15:13:58 -0700448inline void Heap::WriteBarrierField(ObjPtr<mirror::Object> dst,
449 MemberOffset offset ATTRIBUTE_UNUSED,
450 ObjPtr<mirror::Object> new_value ATTRIBUTE_UNUSED) {
451 card_table_->MarkCard(dst.Ptr());
452}
453
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700454inline void Heap::WriteBarrierArray(ObjPtr<mirror::Object> dst,
455 int start_offset ATTRIBUTE_UNUSED,
456 size_t length ATTRIBUTE_UNUSED) {
457 card_table_->MarkCard(dst.Ptr());
458}
459
460inline void Heap::WriteBarrierEveryFieldOf(ObjPtr<mirror::Object> obj) {
461 card_table_->MarkCard(obj.Ptr());
462}
463
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700464} // namespace gc
465} // namespace art
466
467#endif // ART_RUNTIME_GC_HEAP_INL_H_