blob: 3d591f0cb0fcd898436f245b4b60b255770f4279 [file] [log] [blame]
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_HEAP_INL_H_
18#define ART_RUNTIME_GC_HEAP_INL_H_
19
20#include "heap.h"
21
22#include "debugger.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070023#include "gc/space/bump_pointer_space-inl.h"
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070024#include "gc/space/dlmalloc_space-inl.h"
25#include "gc/space/large_object_space.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070026#include "gc/space/rosalloc_space-inl.h"
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070027#include "object_utils.h"
28#include "runtime.h"
29#include "thread.h"
30#include "thread-inl.h"
Mathieu Chartier4e305412014-02-19 10:54:44 -080031#include "verify_object-inl.h"
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070032
33namespace art {
34namespace gc {
35
Mathieu Chartier692fafd2013-11-29 17:24:40 -080036template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
Mathieu Chartier1febddf2013-11-20 12:33:14 -080037inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Class* klass,
38 size_t byte_count, AllocatorType allocator,
39 const PreFenceVisitor& pre_fence_visitor) {
40 DebugCheckPreconditionsForAllocObject(klass, byte_count);
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070041 // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
42 // done in the runnable state where suspension is expected.
43 DCHECK_EQ(self->GetState(), kRunnable);
44 self->AssertThreadSuspensionIsAllowable();
Mathieu Chartierc528dba2013-11-26 12:00:11 -080045 // Need to check that we arent the large object allocator since the large object allocation code
46 // path this function. If we didn't check we would have an infinite loop.
Mathieu Chartier692fafd2013-11-29 17:24:40 -080047 if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
Mathieu Chartierc528dba2013-11-26 12:00:11 -080048 return AllocLargeObject<kInstrumented, PreFenceVisitor>(self, klass, byte_count,
49 pre_fence_visitor);
50 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080051 mirror::Object* obj;
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080052 AllocationTimer alloc_timer(this, &obj);
Mathieu Chartier692fafd2013-11-29 17:24:40 -080053 size_t bytes_allocated;
Mathieu Chartierc528dba2013-11-26 12:00:11 -080054 obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080055 if (UNLIKELY(obj == nullptr)) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080056 bool is_current_allocator = allocator == GetCurrentAllocator();
Mathieu Chartierc528dba2013-11-26 12:00:11 -080057 obj = AllocateInternalWithGc(self, allocator, byte_count, &bytes_allocated, &klass);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080058 if (obj == nullptr) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080059 bool after_is_current_allocator = allocator == GetCurrentAllocator();
60 if (is_current_allocator && !after_is_current_allocator) {
61 // If the allocator changed, we need to restart the allocation.
62 return AllocObject<kInstrumented>(self, klass, byte_count);
63 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080064 return nullptr;
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080065 }
66 }
Mathieu Chartier1febddf2013-11-20 12:33:14 -080067 obj->SetClass(klass);
68 pre_fence_visitor(obj);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080069 DCHECK_GT(bytes_allocated, 0u);
70 const size_t new_num_bytes_allocated =
Ian Rogersb122a4b2013-11-19 18:00:50 -080071 static_cast<size_t>(num_bytes_allocated_.FetchAndAdd(bytes_allocated)) + bytes_allocated;
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080072 // TODO: Deprecate.
73 if (kInstrumented) {
74 if (Runtime::Current()->HasStatsEnabled()) {
75 RuntimeStats* thread_stats = self->GetStats();
76 ++thread_stats->allocated_objects;
77 thread_stats->allocated_bytes += bytes_allocated;
78 RuntimeStats* global_stats = Runtime::Current()->GetStats();
79 ++global_stats->allocated_objects;
80 global_stats->allocated_bytes += bytes_allocated;
81 }
82 } else {
83 DCHECK(!Runtime::Current()->HasStatsEnabled());
84 }
85 if (AllocatorHasAllocationStack(allocator)) {
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -080086 PushOnAllocationStack(self, obj);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080087 }
88 if (kInstrumented) {
89 if (Dbg::IsAllocTrackingEnabled()) {
Mathieu Chartier1febddf2013-11-20 12:33:14 -080090 Dbg::RecordAllocation(klass, bytes_allocated);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080091 }
92 } else {
93 DCHECK(!Dbg::IsAllocTrackingEnabled());
94 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -080095 // concurrent_gc_ isn't known at compile time so we can optimize by not checking it for
96 // the BumpPointer or TLAB allocators. This is nice since it allows the entire if statement to be
97 // optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant since
98 // the allocator_type should be constant propagated.
99 if (AllocatorMayHaveConcurrentGC(allocator) && concurrent_gc_) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800100 CheckConcurrentGC(self, new_num_bytes_allocated, obj);
101 }
Mathieu Chartier4e305412014-02-19 10:54:44 -0800102 VerifyObject(obj);
103 self->VerifyStack();
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800104 return obj;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700105}
106
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800107// The size of a thread-local allocation stack in the number of references.
108static constexpr size_t kThreadLocalAllocationStackSize = 128;
109
110inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object* obj) {
111 if (kUseThreadLocalAllocationStack) {
112 bool success = self->PushOnThreadLocalAllocationStack(obj);
113 if (UNLIKELY(!success)) {
114 // Slow path. Allocate a new thread-local allocation stack.
115 mirror::Object** start_address;
116 mirror::Object** end_address;
117 while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize,
118 &start_address, &end_address)) {
119 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
120 }
121 self->SetThreadLocalAllocationStack(start_address, end_address);
122 // Retry on the new thread-local allocation stack.
123 success = self->PushOnThreadLocalAllocationStack(obj);
124 // Must succeed.
125 CHECK(success);
126 }
127 } else {
128 // This is safe to do since the GC will never free objects which are neither in the allocation
129 // stack or the live bitmap.
130 while (!allocation_stack_->AtomicPushBack(obj)) {
131 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
132 }
133 }
134}
135
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800136template <bool kInstrumented, typename PreFenceVisitor>
137inline mirror::Object* Heap::AllocLargeObject(Thread* self, mirror::Class* klass,
138 size_t byte_count,
139 const PreFenceVisitor& pre_fence_visitor) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800140 return AllocObjectWithAllocator<kInstrumented, false, PreFenceVisitor>(self, klass, byte_count,
141 kAllocatorTypeLOS,
142 pre_fence_visitor);
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800143}
144
145template <const bool kInstrumented, const bool kGrow>
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800146inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator_type,
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800147 size_t alloc_size, size_t* bytes_allocated) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800148 if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size))) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800149 return nullptr;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700150 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800151 mirror::Object* ret;
152 switch (allocator_type) {
153 case kAllocatorTypeBumpPointer: {
154 DCHECK(bump_pointer_space_ != nullptr);
155 alloc_size = RoundUp(alloc_size, space::BumpPointerSpace::kAlignment);
156 ret = bump_pointer_space_->AllocNonvirtual(alloc_size);
157 if (LIKELY(ret != nullptr)) {
158 *bytes_allocated = alloc_size;
159 }
160 break;
161 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800162 case kAllocatorTypeRosAlloc: {
163 if (kInstrumented && UNLIKELY(running_on_valgrind_)) {
164 // If running on valgrind, we should be using the instrumented path.
165 ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800166 } else {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800167 DCHECK(!running_on_valgrind_);
168 ret = rosalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800169 }
170 break;
171 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800172 case kAllocatorTypeDlMalloc: {
173 if (kInstrumented && UNLIKELY(running_on_valgrind_)) {
174 // If running on valgrind, we should be using the instrumented path.
175 ret = dlmalloc_space_->Alloc(self, alloc_size, bytes_allocated);
176 } else {
177 DCHECK(!running_on_valgrind_);
178 ret = dlmalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated);
179 }
180 break;
181 }
182 case kAllocatorTypeNonMoving: {
183 ret = non_moving_space_->Alloc(self, alloc_size, bytes_allocated);
184 break;
185 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800186 case kAllocatorTypeLOS: {
187 ret = large_object_space_->Alloc(self, alloc_size, bytes_allocated);
Hiroshi Yamauchi95a659f2013-11-22 14:43:45 -0800188 // Note that the bump pointer spaces aren't necessarily next to
189 // the other continuous spaces like the non-moving alloc space or
190 // the zygote space.
191 DCHECK(ret == nullptr || large_object_space_->Contains(ret));
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800192 break;
193 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800194 case kAllocatorTypeTLAB: {
195 alloc_size = RoundUp(alloc_size, space::BumpPointerSpace::kAlignment);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800196 if (UNLIKELY(self->TlabSize() < alloc_size)) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800197 // Try allocating a new thread local buffer, if the allocaiton fails the space must be
198 // full so return nullptr.
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800199 if (!bump_pointer_space_->AllocNewTlab(self, alloc_size + kDefaultTLABSize)) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800200 return nullptr;
201 }
202 }
203 // The allocation can't fail.
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800204 ret = self->AllocTlab(alloc_size);
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800205 DCHECK(ret != nullptr);
206 *bytes_allocated = alloc_size;
207 break;
208 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800209 default: {
210 LOG(FATAL) << "Invalid allocator type";
211 ret = nullptr;
212 }
213 }
214 return ret;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700215}
216
Mathieu Chartier590fee92013-09-13 13:46:47 -0700217inline void Heap::DebugCheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) {
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700218 DCHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
219 (c->IsVariableSize() || c->GetObjectSize() == byte_count) ||
Ian Rogersdfb325e2013-10-30 01:00:44 -0700220 strlen(ClassHelper(c).GetDescriptor()) == 0);
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700221 DCHECK_GE(byte_count, sizeof(mirror::Object));
222}
223
224inline Heap::AllocationTimer::AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr)
225 : heap_(heap), allocated_obj_ptr_(allocated_obj_ptr) {
226 if (kMeasureAllocationTime) {
227 allocation_start_time_ = NanoTime() / kTimeAdjust;
228 }
229}
230
231inline Heap::AllocationTimer::~AllocationTimer() {
232 if (kMeasureAllocationTime) {
233 mirror::Object* allocated_obj = *allocated_obj_ptr_;
234 // Only if the allocation succeeded, record the time.
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800235 if (allocated_obj != nullptr) {
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700236 uint64_t allocation_end_time = NanoTime() / kTimeAdjust;
Ian Rogersb122a4b2013-11-19 18:00:50 -0800237 heap_->total_allocation_time_.FetchAndAdd(allocation_end_time - allocation_start_time_);
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700238 }
239 }
240};
241
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800242inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const {
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700243 // We need to have a zygote space or else our newly allocated large object can end up in the
244 // Zygote resulting in it being prematurely freed.
245 // We can only do this for primitive objects since large objects will not be within the card table
246 // range. This also means that we rely on SetClass not dirtying the object's card.
247 return byte_count >= kLargeObjectThreshold && have_zygote_space_ && c->IsPrimitiveArray();
248}
249
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800250template <bool kGrow>
251inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size) {
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700252 size_t new_footprint = num_bytes_allocated_ + alloc_size;
253 if (UNLIKELY(new_footprint > max_allowed_footprint_)) {
254 if (UNLIKELY(new_footprint > growth_limit_)) {
255 return true;
256 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800257 if (!AllocatorMayHaveConcurrentGC(allocator_type) || !concurrent_gc_) {
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800258 if (!kGrow) {
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700259 return true;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700260 }
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800261 // TODO: Grow for allocation is racy, fix it.
262 VLOG(heap) << "Growing heap from " << PrettySize(max_allowed_footprint_) << " to "
263 << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation";
264 max_allowed_footprint_ = new_footprint;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700265 }
266 }
267 return false;
268}
269
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800270inline void Heap::CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated,
271 mirror::Object* obj) {
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700272 if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
273 // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint.
274 SirtRef<mirror::Object> ref(self, obj);
275 RequestConcurrentGC(self);
276 }
277}
278
279} // namespace gc
280} // namespace art
281
282#endif // ART_RUNTIME_GC_HEAP_INL_H_