Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2013 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #ifndef ART_RUNTIME_GC_HEAP_INL_H_ |
| 18 | #define ART_RUNTIME_GC_HEAP_INL_H_ |
| 19 | |
| 20 | #include "heap.h" |
| 21 | |
| 22 | #include "debugger.h" |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 23 | #include "gc/space/bump_pointer_space-inl.h" |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 24 | #include "gc/space/dlmalloc_space-inl.h" |
| 25 | #include "gc/space/large_object_space.h" |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 26 | #include "gc/space/rosalloc_space-inl.h" |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 27 | #include "object_utils.h" |
| 28 | #include "runtime.h" |
| 29 | #include "thread.h" |
| 30 | #include "thread-inl.h" |
| 31 | |
| 32 | namespace art { |
| 33 | namespace gc { |
| 34 | |
Mathieu Chartier | 1febddf | 2013-11-20 12:33:14 -0800 | [diff] [blame] | 35 | template <bool kInstrumented, typename PreFenceVisitor> |
| 36 | inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Class* klass, |
| 37 | size_t byte_count, AllocatorType allocator, |
| 38 | const PreFenceVisitor& pre_fence_visitor) { |
| 39 | DebugCheckPreconditionsForAllocObject(klass, byte_count); |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 40 | // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are |
| 41 | // done in the runnable state where suspension is expected. |
| 42 | DCHECK_EQ(self->GetState(), kRunnable); |
| 43 | self->AssertThreadSuspensionIsAllowable(); |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame^] | 44 | // Need to check that we arent the large object allocator since the large object allocation code |
| 45 | // path this function. If we didn't check we would have an infinite loop. |
| 46 | if (allocator != kAllocatorTypeLOS && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) { |
| 47 | return AllocLargeObject<kInstrumented, PreFenceVisitor>(self, klass, byte_count, |
| 48 | pre_fence_visitor); |
| 49 | } |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 50 | mirror::Object* obj; |
| 51 | size_t bytes_allocated; |
| 52 | AllocationTimer alloc_timer(this, &obj); |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame^] | 53 | obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 54 | if (UNLIKELY(obj == nullptr)) { |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame^] | 55 | obj = AllocateInternalWithGc(self, allocator, byte_count, &bytes_allocated, &klass); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 56 | if (obj == nullptr) { |
| 57 | return nullptr; |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 58 | } |
| 59 | } |
Mathieu Chartier | 1febddf | 2013-11-20 12:33:14 -0800 | [diff] [blame] | 60 | obj->SetClass(klass); |
| 61 | pre_fence_visitor(obj); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 62 | DCHECK_GT(bytes_allocated, 0u); |
| 63 | const size_t new_num_bytes_allocated = |
| 64 | static_cast<size_t>(num_bytes_allocated_.fetch_add(bytes_allocated)) + bytes_allocated; |
| 65 | // TODO: Deprecate. |
| 66 | if (kInstrumented) { |
| 67 | if (Runtime::Current()->HasStatsEnabled()) { |
| 68 | RuntimeStats* thread_stats = self->GetStats(); |
| 69 | ++thread_stats->allocated_objects; |
| 70 | thread_stats->allocated_bytes += bytes_allocated; |
| 71 | RuntimeStats* global_stats = Runtime::Current()->GetStats(); |
| 72 | ++global_stats->allocated_objects; |
| 73 | global_stats->allocated_bytes += bytes_allocated; |
| 74 | } |
| 75 | } else { |
| 76 | DCHECK(!Runtime::Current()->HasStatsEnabled()); |
| 77 | } |
| 78 | if (AllocatorHasAllocationStack(allocator)) { |
| 79 | // This is safe to do since the GC will never free objects which are neither in the allocation |
| 80 | // stack or the live bitmap. |
| 81 | while (!allocation_stack_->AtomicPushBack(obj)) { |
| 82 | CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false); |
| 83 | } |
| 84 | } |
| 85 | if (kInstrumented) { |
| 86 | if (Dbg::IsAllocTrackingEnabled()) { |
Mathieu Chartier | 1febddf | 2013-11-20 12:33:14 -0800 | [diff] [blame] | 87 | Dbg::RecordAllocation(klass, bytes_allocated); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 88 | } |
| 89 | } else { |
| 90 | DCHECK(!Dbg::IsAllocTrackingEnabled()); |
| 91 | } |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 92 | if (concurrent_gc_) { |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 93 | CheckConcurrentGC(self, new_num_bytes_allocated, obj); |
| 94 | } |
| 95 | if (kIsDebugBuild) { |
| 96 | if (kDesiredHeapVerification > kNoHeapVerification) { |
| 97 | VerifyObject(obj); |
| 98 | } |
| 99 | self->VerifyStack(); |
| 100 | } |
| 101 | return obj; |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 102 | } |
| 103 | |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame^] | 104 | template <bool kInstrumented, typename PreFenceVisitor> |
| 105 | inline mirror::Object* Heap::AllocLargeObject(Thread* self, mirror::Class* klass, |
| 106 | size_t byte_count, |
| 107 | const PreFenceVisitor& pre_fence_visitor) { |
| 108 | return AllocObjectWithAllocator<kInstrumented, PreFenceVisitor>(self, klass, byte_count, |
| 109 | kAllocatorTypeLOS, |
| 110 | pre_fence_visitor); |
| 111 | } |
| 112 | |
| 113 | template <const bool kInstrumented, const bool kGrow> |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 114 | inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator_type, |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame^] | 115 | size_t alloc_size, size_t* bytes_allocated) { |
| 116 | if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(alloc_size))) { |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 117 | return nullptr; |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 118 | } |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 119 | if (kInstrumented) { |
| 120 | if (UNLIKELY(running_on_valgrind_ && allocator_type == kAllocatorTypeFreeList)) { |
| 121 | return non_moving_space_->Alloc(self, alloc_size, bytes_allocated); |
| 122 | } |
Hiroshi Yamauchi | e4e23c0 | 2013-12-06 13:38:43 -0800 | [diff] [blame] | 123 | } else { |
| 124 | // If running on valgrind, we should be using the instrumented path. |
| 125 | DCHECK(!running_on_valgrind_); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 126 | } |
| 127 | mirror::Object* ret; |
| 128 | switch (allocator_type) { |
| 129 | case kAllocatorTypeBumpPointer: { |
| 130 | DCHECK(bump_pointer_space_ != nullptr); |
| 131 | alloc_size = RoundUp(alloc_size, space::BumpPointerSpace::kAlignment); |
| 132 | ret = bump_pointer_space_->AllocNonvirtual(alloc_size); |
| 133 | if (LIKELY(ret != nullptr)) { |
| 134 | *bytes_allocated = alloc_size; |
| 135 | } |
| 136 | break; |
| 137 | } |
| 138 | case kAllocatorTypeFreeList: { |
| 139 | if (kUseRosAlloc) { |
| 140 | ret = reinterpret_cast<space::RosAllocSpace*>(non_moving_space_)->AllocNonvirtual( |
| 141 | self, alloc_size, bytes_allocated); |
| 142 | } else { |
| 143 | ret = reinterpret_cast<space::DlMallocSpace*>(non_moving_space_)->AllocNonvirtual( |
| 144 | self, alloc_size, bytes_allocated); |
| 145 | } |
| 146 | break; |
| 147 | } |
| 148 | case kAllocatorTypeLOS: { |
| 149 | ret = large_object_space_->Alloc(self, alloc_size, bytes_allocated); |
Hiroshi Yamauchi | 95a659f | 2013-11-22 14:43:45 -0800 | [diff] [blame] | 150 | // Note that the bump pointer spaces aren't necessarily next to |
| 151 | // the other continuous spaces like the non-moving alloc space or |
| 152 | // the zygote space. |
| 153 | DCHECK(ret == nullptr || large_object_space_->Contains(ret)); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 154 | break; |
| 155 | } |
| 156 | default: { |
| 157 | LOG(FATAL) << "Invalid allocator type"; |
| 158 | ret = nullptr; |
| 159 | } |
| 160 | } |
| 161 | return ret; |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 162 | } |
| 163 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 164 | inline void Heap::DebugCheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) { |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 165 | DCHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) || |
| 166 | (c->IsVariableSize() || c->GetObjectSize() == byte_count) || |
Ian Rogers | dfb325e | 2013-10-30 01:00:44 -0700 | [diff] [blame] | 167 | strlen(ClassHelper(c).GetDescriptor()) == 0); |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 168 | DCHECK_GE(byte_count, sizeof(mirror::Object)); |
| 169 | } |
| 170 | |
| 171 | inline Heap::AllocationTimer::AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr) |
| 172 | : heap_(heap), allocated_obj_ptr_(allocated_obj_ptr) { |
| 173 | if (kMeasureAllocationTime) { |
| 174 | allocation_start_time_ = NanoTime() / kTimeAdjust; |
| 175 | } |
| 176 | } |
| 177 | |
| 178 | inline Heap::AllocationTimer::~AllocationTimer() { |
| 179 | if (kMeasureAllocationTime) { |
| 180 | mirror::Object* allocated_obj = *allocated_obj_ptr_; |
| 181 | // Only if the allocation succeeded, record the time. |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 182 | if (allocated_obj != nullptr) { |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 183 | uint64_t allocation_end_time = NanoTime() / kTimeAdjust; |
| 184 | heap_->total_allocation_time_.fetch_add(allocation_end_time - allocation_start_time_); |
| 185 | } |
| 186 | } |
| 187 | }; |
| 188 | |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 189 | inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const { |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 190 | // We need to have a zygote space or else our newly allocated large object can end up in the |
| 191 | // Zygote resulting in it being prematurely freed. |
| 192 | // We can only do this for primitive objects since large objects will not be within the card table |
| 193 | // range. This also means that we rely on SetClass not dirtying the object's card. |
| 194 | return byte_count >= kLargeObjectThreshold && have_zygote_space_ && c->IsPrimitiveArray(); |
| 195 | } |
| 196 | |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame^] | 197 | template <const bool kGrow> |
| 198 | inline bool Heap::IsOutOfMemoryOnAllocation(size_t alloc_size) { |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 199 | size_t new_footprint = num_bytes_allocated_ + alloc_size; |
| 200 | if (UNLIKELY(new_footprint > max_allowed_footprint_)) { |
| 201 | if (UNLIKELY(new_footprint > growth_limit_)) { |
| 202 | return true; |
| 203 | } |
| 204 | if (!concurrent_gc_) { |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame^] | 205 | if (!kGrow) { |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 206 | return true; |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 207 | } |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 208 | // TODO: Grow for allocation is racy, fix it. |
| 209 | VLOG(heap) << "Growing heap from " << PrettySize(max_allowed_footprint_) << " to " |
| 210 | << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation"; |
| 211 | max_allowed_footprint_ = new_footprint; |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 212 | } |
| 213 | } |
| 214 | return false; |
| 215 | } |
| 216 | |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 217 | inline void Heap::CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated, |
| 218 | mirror::Object* obj) { |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 219 | if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) { |
| 220 | // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint. |
| 221 | SirtRef<mirror::Object> ref(self, obj); |
| 222 | RequestConcurrentGC(self); |
| 223 | } |
| 224 | } |
| 225 | |
| 226 | } // namespace gc |
| 227 | } // namespace art |
| 228 | |
| 229 | #endif // ART_RUNTIME_GC_HEAP_INL_H_ |