blob: 5eda0b95b8455045cbc025723e068719633f43c1 [file] [log] [blame]
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_HEAP_INL_H_
18#define ART_RUNTIME_GC_HEAP_INL_H_
19
20#include "heap.h"
21
22#include "debugger.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070023#include "gc/space/bump_pointer_space-inl.h"
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070024#include "gc/space/dlmalloc_space-inl.h"
25#include "gc/space/large_object_space.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070026#include "gc/space/rosalloc_space-inl.h"
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070027#include "object_utils.h"
28#include "runtime.h"
29#include "thread.h"
30#include "thread-inl.h"
31
32namespace art {
33namespace gc {
34
Mathieu Chartier1febddf2013-11-20 12:33:14 -080035template <bool kInstrumented, typename PreFenceVisitor>
36inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Class* klass,
37 size_t byte_count, AllocatorType allocator,
38 const PreFenceVisitor& pre_fence_visitor) {
39 DebugCheckPreconditionsForAllocObject(klass, byte_count);
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070040 // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
41 // done in the runnable state where suspension is expected.
42 DCHECK_EQ(self->GetState(), kRunnable);
43 self->AssertThreadSuspensionIsAllowable();
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080044 mirror::Object* obj;
45 size_t bytes_allocated;
46 AllocationTimer alloc_timer(this, &obj);
Mathieu Chartier1febddf2013-11-20 12:33:14 -080047 if (UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080048 obj = TryToAllocate<kInstrumented>(self, kAllocatorTypeLOS, byte_count, false,
49 &bytes_allocated);
50 allocator = kAllocatorTypeLOS;
51 } else {
52 obj = TryToAllocate<kInstrumented>(self, allocator, byte_count, false, &bytes_allocated);
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070053 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080054
55 if (UNLIKELY(obj == nullptr)) {
Mathieu Chartier1febddf2013-11-20 12:33:14 -080056 SirtRef<mirror::Class> sirt_c(self, klass);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080057 obj = AllocateInternalWithGc(self, allocator, byte_count, &bytes_allocated);
58 if (obj == nullptr) {
59 return nullptr;
60 } else {
Mathieu Chartier1febddf2013-11-20 12:33:14 -080061 klass = sirt_c.get();
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080062 }
63 }
Mathieu Chartier1febddf2013-11-20 12:33:14 -080064 obj->SetClass(klass);
65 pre_fence_visitor(obj);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080066 DCHECK_GT(bytes_allocated, 0u);
67 const size_t new_num_bytes_allocated =
68 static_cast<size_t>(num_bytes_allocated_.fetch_add(bytes_allocated)) + bytes_allocated;
69 // TODO: Deprecate.
70 if (kInstrumented) {
71 if (Runtime::Current()->HasStatsEnabled()) {
72 RuntimeStats* thread_stats = self->GetStats();
73 ++thread_stats->allocated_objects;
74 thread_stats->allocated_bytes += bytes_allocated;
75 RuntimeStats* global_stats = Runtime::Current()->GetStats();
76 ++global_stats->allocated_objects;
77 global_stats->allocated_bytes += bytes_allocated;
78 }
79 } else {
80 DCHECK(!Runtime::Current()->HasStatsEnabled());
81 }
82 if (AllocatorHasAllocationStack(allocator)) {
83 // This is safe to do since the GC will never free objects which are neither in the allocation
84 // stack or the live bitmap.
85 while (!allocation_stack_->AtomicPushBack(obj)) {
86 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
87 }
88 }
89 if (kInstrumented) {
90 if (Dbg::IsAllocTrackingEnabled()) {
Mathieu Chartier1febddf2013-11-20 12:33:14 -080091 Dbg::RecordAllocation(klass, bytes_allocated);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080092 }
93 } else {
94 DCHECK(!Dbg::IsAllocTrackingEnabled());
95 }
96 if (AllocatorHasConcurrentGC(allocator)) {
97 CheckConcurrentGC(self, new_num_bytes_allocated, obj);
98 }
99 if (kIsDebugBuild) {
100 if (kDesiredHeapVerification > kNoHeapVerification) {
101 VerifyObject(obj);
102 }
103 self->VerifyStack();
104 }
105 return obj;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700106}
107
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800108template <const bool kInstrumented>
109inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator_type,
110 size_t alloc_size, bool grow,
111 size_t* bytes_allocated) {
112 if (UNLIKELY(IsOutOfMemoryOnAllocation(alloc_size, grow))) {
113 return nullptr;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700114 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800115 if (kInstrumented) {
116 if (UNLIKELY(running_on_valgrind_ && allocator_type == kAllocatorTypeFreeList)) {
117 return non_moving_space_->Alloc(self, alloc_size, bytes_allocated);
118 }
119 }
120 mirror::Object* ret;
121 switch (allocator_type) {
122 case kAllocatorTypeBumpPointer: {
123 DCHECK(bump_pointer_space_ != nullptr);
124 alloc_size = RoundUp(alloc_size, space::BumpPointerSpace::kAlignment);
125 ret = bump_pointer_space_->AllocNonvirtual(alloc_size);
126 if (LIKELY(ret != nullptr)) {
127 *bytes_allocated = alloc_size;
128 }
129 break;
130 }
131 case kAllocatorTypeFreeList: {
132 if (kUseRosAlloc) {
133 ret = reinterpret_cast<space::RosAllocSpace*>(non_moving_space_)->AllocNonvirtual(
134 self, alloc_size, bytes_allocated);
135 } else {
136 ret = reinterpret_cast<space::DlMallocSpace*>(non_moving_space_)->AllocNonvirtual(
137 self, alloc_size, bytes_allocated);
138 }
139 break;
140 }
141 case kAllocatorTypeLOS: {
142 ret = large_object_space_->Alloc(self, alloc_size, bytes_allocated);
Hiroshi Yamauchi95a659f2013-11-22 14:43:45 -0800143 // Note that the bump pointer spaces aren't necessarily next to
144 // the other continuous spaces like the non-moving alloc space or
145 // the zygote space.
146 DCHECK(ret == nullptr || large_object_space_->Contains(ret));
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800147 break;
148 }
149 default: {
150 LOG(FATAL) << "Invalid allocator type";
151 ret = nullptr;
152 }
153 }
154 return ret;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700155}
156
Mathieu Chartier590fee92013-09-13 13:46:47 -0700157inline void Heap::DebugCheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) {
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700158 DCHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
159 (c->IsVariableSize() || c->GetObjectSize() == byte_count) ||
Ian Rogersdfb325e2013-10-30 01:00:44 -0700160 strlen(ClassHelper(c).GetDescriptor()) == 0);
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700161 DCHECK_GE(byte_count, sizeof(mirror::Object));
162}
163
164inline Heap::AllocationTimer::AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr)
165 : heap_(heap), allocated_obj_ptr_(allocated_obj_ptr) {
166 if (kMeasureAllocationTime) {
167 allocation_start_time_ = NanoTime() / kTimeAdjust;
168 }
169}
170
171inline Heap::AllocationTimer::~AllocationTimer() {
172 if (kMeasureAllocationTime) {
173 mirror::Object* allocated_obj = *allocated_obj_ptr_;
174 // Only if the allocation succeeded, record the time.
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800175 if (allocated_obj != nullptr) {
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700176 uint64_t allocation_end_time = NanoTime() / kTimeAdjust;
177 heap_->total_allocation_time_.fetch_add(allocation_end_time - allocation_start_time_);
178 }
179 }
180};
181
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800182inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const {
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700183 // We need to have a zygote space or else our newly allocated large object can end up in the
184 // Zygote resulting in it being prematurely freed.
185 // We can only do this for primitive objects since large objects will not be within the card table
186 // range. This also means that we rely on SetClass not dirtying the object's card.
187 return byte_count >= kLargeObjectThreshold && have_zygote_space_ && c->IsPrimitiveArray();
188}
189
190inline bool Heap::IsOutOfMemoryOnAllocation(size_t alloc_size, bool grow) {
191 size_t new_footprint = num_bytes_allocated_ + alloc_size;
192 if (UNLIKELY(new_footprint > max_allowed_footprint_)) {
193 if (UNLIKELY(new_footprint > growth_limit_)) {
194 return true;
195 }
196 if (!concurrent_gc_) {
197 if (!grow) {
198 return true;
199 } else {
200 max_allowed_footprint_ = new_footprint;
201 }
202 }
203 }
204 return false;
205}
206
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800207inline void Heap::CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated,
208 mirror::Object* obj) {
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700209 if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
210 // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint.
211 SirtRef<mirror::Object> ref(self, obj);
212 RequestConcurrentGC(self);
213 }
214}
215
216} // namespace gc
217} // namespace art
218
219#endif // ART_RUNTIME_GC_HEAP_INL_H_