Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2013 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #ifndef ART_RUNTIME_GC_HEAP_INL_H_ |
| 18 | #define ART_RUNTIME_GC_HEAP_INL_H_ |
| 19 | |
| 20 | #include "heap.h" |
| 21 | |
Vladimir Marko | 80afd02 | 2015-05-19 18:08:00 +0100 | [diff] [blame] | 22 | #include "base/time_utils.h" |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 23 | #include "gc/accounting/card_table-inl.h" |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 24 | #include "gc/allocation_record.h" |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 25 | #include "gc/collector/semi_space.h" |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 26 | #include "gc/space/bump_pointer_space-inl.h" |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 27 | #include "gc/space/dlmalloc_space-inl.h" |
| 28 | #include "gc/space/large_object_space.h" |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 29 | #include "gc/space/region_space-inl.h" |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 30 | #include "gc/space/rosalloc_space-inl.h" |
Mathieu Chartier | a058fdf | 2016-10-06 15:13:58 -0700 | [diff] [blame^] | 31 | #include "obj_ptr-inl.h" |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 32 | #include "runtime.h" |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 33 | #include "handle_scope-inl.h" |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 34 | #include "thread-inl.h" |
Vladimir Marko | 80afd02 | 2015-05-19 18:08:00 +0100 | [diff] [blame] | 35 | #include "utils.h" |
Mathieu Chartier | 4e30541 | 2014-02-19 10:54:44 -0800 | [diff] [blame] | 36 | #include "verify_object-inl.h" |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 37 | |
| 38 | namespace art { |
| 39 | namespace gc { |
| 40 | |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 41 | template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor> |
Mathieu Chartier | a4f6af9 | 2015-08-11 17:35:25 -0700 | [diff] [blame] | 42 | inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, |
| 43 | mirror::Class* klass, |
| 44 | size_t byte_count, |
| 45 | AllocatorType allocator, |
Mathieu Chartier | 1febddf | 2013-11-20 12:33:14 -0800 | [diff] [blame] | 46 | const PreFenceVisitor& pre_fence_visitor) { |
Mathieu Chartier | c645f1d | 2014-03-06 18:11:53 -0800 | [diff] [blame] | 47 | if (kIsDebugBuild) { |
| 48 | CheckPreconditionsForAllocObject(klass, byte_count); |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 49 | // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are |
| 50 | // done in the runnable state where suspension is expected. |
| 51 | CHECK_EQ(self->GetState(), kRunnable); |
| 52 | self->AssertThreadSuspensionIsAllowable(); |
Mathieu Chartier | 8502f72 | 2016-06-08 15:09:08 -0700 | [diff] [blame] | 53 | self->AssertNoPendingException(); |
Mathieu Chartier | a59d9b2 | 2016-09-26 18:13:17 -0700 | [diff] [blame] | 54 | self->PoisonObjectPointers(); |
Mathieu Chartier | c645f1d | 2014-03-06 18:11:53 -0800 | [diff] [blame] | 55 | } |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame] | 56 | // Need to check that we arent the large object allocator since the large object allocation code |
| 57 | // path this function. If we didn't check we would have an infinite loop. |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 58 | mirror::Object* obj; |
Mathieu Chartier | 446f9ee | 2014-12-01 15:00:27 -0800 | [diff] [blame] | 59 | if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) { |
| 60 | obj = AllocLargeObject<kInstrumented, PreFenceVisitor>(self, &klass, byte_count, |
| 61 | pre_fence_visitor); |
| 62 | if (obj != nullptr) { |
| 63 | return obj; |
| 64 | } else { |
| 65 | // There should be an OOM exception, since we are retrying, clear it. |
| 66 | self->ClearException(); |
| 67 | } |
| 68 | // If the large object allocation failed, try to use the normal spaces (main space, |
| 69 | // non moving space). This can happen if there is significant virtual address space |
| 70 | // fragmentation. |
| 71 | } |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 72 | // bytes allocated for the (individual) object. |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 73 | size_t bytes_allocated; |
| 74 | size_t usable_size; |
| 75 | size_t new_num_bytes_allocated = 0; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 76 | if (allocator == kAllocatorTypeTLAB || allocator == kAllocatorTypeRegionTLAB) { |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 77 | byte_count = RoundUp(byte_count, space::BumpPointerSpace::kAlignment); |
| 78 | } |
| 79 | // If we have a thread local allocation we don't need to update bytes allocated. |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 80 | if ((allocator == kAllocatorTypeTLAB || allocator == kAllocatorTypeRegionTLAB) && |
| 81 | byte_count <= self->TlabSize()) { |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 82 | obj = self->AllocTlab(byte_count); |
Mathieu Chartier | fd22d5b | 2014-07-14 10:16:05 -0700 | [diff] [blame] | 83 | DCHECK(obj != nullptr) << "AllocTlab can't fail"; |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 84 | obj->SetClass(klass); |
| 85 | if (kUseBakerOrBrooksReadBarrier) { |
| 86 | if (kUseBrooksReadBarrier) { |
| 87 | obj->SetReadBarrierPointer(obj); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 88 | } |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 89 | obj->AssertReadBarrierPointer(); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 90 | } |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 91 | bytes_allocated = byte_count; |
Mathieu Chartier | fd22d5b | 2014-07-14 10:16:05 -0700 | [diff] [blame] | 92 | usable_size = bytes_allocated; |
| 93 | pre_fence_visitor(obj, usable_size); |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 94 | QuasiAtomic::ThreadFenceForConstructor(); |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 95 | } else if (!kInstrumented && allocator == kAllocatorTypeRosAlloc && |
| 96 | (obj = rosalloc_space_->AllocThreadLocal(self, byte_count, &bytes_allocated)) && |
| 97 | LIKELY(obj != nullptr)) { |
Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 98 | DCHECK(!is_running_on_memory_tool_); |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 99 | obj->SetClass(klass); |
| 100 | if (kUseBakerOrBrooksReadBarrier) { |
| 101 | if (kUseBrooksReadBarrier) { |
| 102 | obj->SetReadBarrierPointer(obj); |
| 103 | } |
| 104 | obj->AssertReadBarrierPointer(); |
| 105 | } |
| 106 | usable_size = bytes_allocated; |
| 107 | pre_fence_visitor(obj, usable_size); |
| 108 | QuasiAtomic::ThreadFenceForConstructor(); |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 109 | } else { |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 110 | // bytes allocated that takes bulk thread-local buffer allocations into account. |
| 111 | size_t bytes_tl_bulk_allocated = 0; |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 112 | obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated, |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 113 | &usable_size, &bytes_tl_bulk_allocated); |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 114 | if (UNLIKELY(obj == nullptr)) { |
Mathieu Chartier | eebc3af | 2016-02-29 18:13:38 -0800 | [diff] [blame] | 115 | // AllocateInternalWithGc can cause thread suspension, if someone instruments the entrypoints |
| 116 | // or changes the allocator in a suspend point here, we need to retry the allocation. |
| 117 | obj = AllocateInternalWithGc(self, |
| 118 | allocator, |
| 119 | kInstrumented, |
| 120 | byte_count, |
| 121 | &bytes_allocated, |
| 122 | &usable_size, |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 123 | &bytes_tl_bulk_allocated, &klass); |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 124 | if (obj == nullptr) { |
Mathieu Chartier | eebc3af | 2016-02-29 18:13:38 -0800 | [diff] [blame] | 125 | // The only way that we can get a null return if there is no pending exception is if the |
| 126 | // allocator or instrumentation changed. |
| 127 | if (!self->IsExceptionPending()) { |
| 128 | // AllocObject will pick up the new allocator type, and instrumented as true is the safe |
| 129 | // default. |
| 130 | return AllocObject</*kInstrumented*/true>(self, |
| 131 | klass, |
| 132 | byte_count, |
| 133 | pre_fence_visitor); |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 134 | } |
| 135 | return nullptr; |
| 136 | } |
Hiroshi Yamauchi | 624468c | 2014-03-31 15:14:47 -0700 | [diff] [blame] | 137 | } |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 138 | DCHECK_GT(bytes_allocated, 0u); |
| 139 | DCHECK_GT(usable_size, 0u); |
| 140 | obj->SetClass(klass); |
| 141 | if (kUseBakerOrBrooksReadBarrier) { |
| 142 | if (kUseBrooksReadBarrier) { |
| 143 | obj->SetReadBarrierPointer(obj); |
| 144 | } |
| 145 | obj->AssertReadBarrierPointer(); |
| 146 | } |
| 147 | if (collector::SemiSpace::kUseRememberedSet && UNLIKELY(allocator == kAllocatorTypeNonMoving)) { |
| 148 | // (Note this if statement will be constant folded away for the |
| 149 | // fast-path quick entry points.) Because SetClass() has no write |
| 150 | // barrier, if a non-moving space allocation, we need a write |
| 151 | // barrier as the class pointer may point to the bump pointer |
| 152 | // space (where the class pointer is an "old-to-young" reference, |
| 153 | // though rare) under the GSS collector with the remembered set |
| 154 | // enabled. We don't need this for kAllocatorTypeRosAlloc/DlMalloc |
| 155 | // cases because we don't directly allocate into the main alloc |
| 156 | // space (besides promotions) under the SS/GSS collector. |
| 157 | WriteBarrierField(obj, mirror::Object::ClassOffset(), klass); |
| 158 | } |
| 159 | pre_fence_visitor(obj, usable_size); |
Hans Boehm | b0171b9 | 2016-01-28 17:19:15 -0800 | [diff] [blame] | 160 | QuasiAtomic::ThreadFenceForConstructor(); |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 161 | new_num_bytes_allocated = static_cast<size_t>( |
Hans Boehm | b0171b9 | 2016-01-28 17:19:15 -0800 | [diff] [blame] | 162 | num_bytes_allocated_.FetchAndAddRelaxed(bytes_tl_bulk_allocated)) + bytes_tl_bulk_allocated; |
Hiroshi Yamauchi | 9d04a20 | 2014-01-31 13:35:49 -0800 | [diff] [blame] | 163 | } |
Mathieu Chartier | fd22d5b | 2014-07-14 10:16:05 -0700 | [diff] [blame] | 164 | if (kIsDebugBuild && Runtime::Current()->IsStarted()) { |
| 165 | CHECK_LE(obj->SizeOf(), usable_size); |
| 166 | } |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 167 | // TODO: Deprecate. |
| 168 | if (kInstrumented) { |
| 169 | if (Runtime::Current()->HasStatsEnabled()) { |
| 170 | RuntimeStats* thread_stats = self->GetStats(); |
| 171 | ++thread_stats->allocated_objects; |
| 172 | thread_stats->allocated_bytes += bytes_allocated; |
| 173 | RuntimeStats* global_stats = Runtime::Current()->GetStats(); |
| 174 | ++global_stats->allocated_objects; |
| 175 | global_stats->allocated_bytes += bytes_allocated; |
| 176 | } |
| 177 | } else { |
| 178 | DCHECK(!Runtime::Current()->HasStatsEnabled()); |
| 179 | } |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 180 | if (kInstrumented) { |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 181 | if (IsAllocTrackingEnabled()) { |
Mathieu Chartier | 458b105 | 2016-03-29 14:02:55 -0700 | [diff] [blame] | 182 | // allocation_records_ is not null since it never becomes null after allocation tracking is |
| 183 | // enabled. |
| 184 | DCHECK(allocation_records_ != nullptr); |
| 185 | allocation_records_->RecordAllocation(self, &obj, bytes_allocated); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 186 | } |
| 187 | } else { |
Man Cao | 8c2ff64 | 2015-05-27 17:25:30 -0700 | [diff] [blame] | 188 | DCHECK(!IsAllocTrackingEnabled()); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 189 | } |
Mathieu Chartier | 14b0a5d | 2016-03-11 17:22:23 -0800 | [diff] [blame] | 190 | if (AllocatorHasAllocationStack(allocator)) { |
| 191 | PushOnAllocationStack(self, &obj); |
| 192 | } |
Mathieu Chartier | 3100080 | 2015-06-14 14:14:37 -0700 | [diff] [blame] | 193 | if (kInstrumented) { |
| 194 | if (gc_stress_mode_) { |
| 195 | CheckGcStressMode(self, &obj); |
| 196 | } |
| 197 | } else { |
| 198 | DCHECK(!gc_stress_mode_); |
| 199 | } |
Hiroshi Yamauchi | 3e41780 | 2014-03-20 12:03:02 -0700 | [diff] [blame] | 200 | // IsConcurrentGc() isn't known at compile time so we can optimize by not checking it for |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 201 | // the BumpPointer or TLAB allocators. This is nice since it allows the entire if statement to be |
| 202 | // optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant since |
| 203 | // the allocator_type should be constant propagated. |
Hiroshi Yamauchi | 3e41780 | 2014-03-20 12:03:02 -0700 | [diff] [blame] | 204 | if (AllocatorMayHaveConcurrentGC(allocator) && IsGcConcurrent()) { |
Mathieu Chartier | f517f1a | 2014-03-06 15:52:27 -0800 | [diff] [blame] | 205 | CheckConcurrentGC(self, new_num_bytes_allocated, &obj); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 206 | } |
Mathieu Chartier | 4e30541 | 2014-02-19 10:54:44 -0800 | [diff] [blame] | 207 | VerifyObject(obj); |
| 208 | self->VerifyStack(); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 209 | return obj; |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 210 | } |
| 211 | |
Hiroshi Yamauchi | f5b0e20 | 2014-02-11 17:02:22 -0800 | [diff] [blame] | 212 | // The size of a thread-local allocation stack in the number of references. |
| 213 | static constexpr size_t kThreadLocalAllocationStackSize = 128; |
| 214 | |
Hiroshi Yamauchi | 4cd662e | 2014-04-03 16:28:10 -0700 | [diff] [blame] | 215 | inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object** obj) { |
Hiroshi Yamauchi | f5b0e20 | 2014-02-11 17:02:22 -0800 | [diff] [blame] | 216 | if (kUseThreadLocalAllocationStack) { |
Mathieu Chartier | c179016 | 2014-05-23 10:54:50 -0700 | [diff] [blame] | 217 | if (UNLIKELY(!self->PushOnThreadLocalAllocationStack(*obj))) { |
| 218 | PushOnThreadLocalAllocationStackWithInternalGC(self, obj); |
Hiroshi Yamauchi | f5b0e20 | 2014-02-11 17:02:22 -0800 | [diff] [blame] | 219 | } |
Mathieu Chartier | c179016 | 2014-05-23 10:54:50 -0700 | [diff] [blame] | 220 | } else if (UNLIKELY(!allocation_stack_->AtomicPushBack(*obj))) { |
| 221 | PushOnAllocationStackWithInternalGC(self, obj); |
Hiroshi Yamauchi | f5b0e20 | 2014-02-11 17:02:22 -0800 | [diff] [blame] | 222 | } |
| 223 | } |
| 224 | |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame] | 225 | template <bool kInstrumented, typename PreFenceVisitor> |
Mathieu Chartier | a4f6af9 | 2015-08-11 17:35:25 -0700 | [diff] [blame] | 226 | inline mirror::Object* Heap::AllocLargeObject(Thread* self, |
| 227 | mirror::Class** klass, |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame] | 228 | size_t byte_count, |
| 229 | const PreFenceVisitor& pre_fence_visitor) { |
Mathieu Chartier | 446f9ee | 2014-12-01 15:00:27 -0800 | [diff] [blame] | 230 | // Save and restore the class in case it moves. |
| 231 | StackHandleScope<1> hs(self); |
| 232 | auto klass_wrapper = hs.NewHandleWrapper(klass); |
| 233 | return AllocObjectWithAllocator<kInstrumented, false, PreFenceVisitor>(self, *klass, byte_count, |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 234 | kAllocatorTypeLOS, |
| 235 | pre_fence_visitor); |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame] | 236 | } |
| 237 | |
| 238 | template <const bool kInstrumented, const bool kGrow> |
Mathieu Chartier | a4f6af9 | 2015-08-11 17:35:25 -0700 | [diff] [blame] | 239 | inline mirror::Object* Heap::TryToAllocate(Thread* self, |
| 240 | AllocatorType allocator_type, |
| 241 | size_t alloc_size, |
| 242 | size_t* bytes_allocated, |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 243 | size_t* usable_size, |
| 244 | size_t* bytes_tl_bulk_allocated) { |
Mathieu Chartier | a4f6af9 | 2015-08-11 17:35:25 -0700 | [diff] [blame] | 245 | if (allocator_type != kAllocatorTypeTLAB && |
| 246 | allocator_type != kAllocatorTypeRegionTLAB && |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 247 | allocator_type != kAllocatorTypeRosAlloc && |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 248 | UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size))) { |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 249 | return nullptr; |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 250 | } |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 251 | mirror::Object* ret; |
| 252 | switch (allocator_type) { |
| 253 | case kAllocatorTypeBumpPointer: { |
| 254 | DCHECK(bump_pointer_space_ != nullptr); |
| 255 | alloc_size = RoundUp(alloc_size, space::BumpPointerSpace::kAlignment); |
| 256 | ret = bump_pointer_space_->AllocNonvirtual(alloc_size); |
| 257 | if (LIKELY(ret != nullptr)) { |
| 258 | *bytes_allocated = alloc_size; |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 259 | *usable_size = alloc_size; |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 260 | *bytes_tl_bulk_allocated = alloc_size; |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 261 | } |
| 262 | break; |
| 263 | } |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 264 | case kAllocatorTypeRosAlloc: { |
Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 265 | if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) { |
| 266 | // If running on valgrind or asan, we should be using the instrumented path. |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 267 | size_t max_bytes_tl_bulk_allocated = rosalloc_space_->MaxBytesBulkAllocatedFor(alloc_size); |
| 268 | if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, |
| 269 | max_bytes_tl_bulk_allocated))) { |
| 270 | return nullptr; |
| 271 | } |
| 272 | ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size, |
| 273 | bytes_tl_bulk_allocated); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 274 | } else { |
Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 275 | DCHECK(!is_running_on_memory_tool_); |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 276 | size_t max_bytes_tl_bulk_allocated = |
| 277 | rosalloc_space_->MaxBytesBulkAllocatedForNonvirtual(alloc_size); |
| 278 | if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, |
| 279 | max_bytes_tl_bulk_allocated))) { |
| 280 | return nullptr; |
| 281 | } |
| 282 | if (!kInstrumented) { |
| 283 | DCHECK(!rosalloc_space_->CanAllocThreadLocal(self, alloc_size)); |
| 284 | } |
| 285 | ret = rosalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size, |
| 286 | bytes_tl_bulk_allocated); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 287 | } |
| 288 | break; |
| 289 | } |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 290 | case kAllocatorTypeDlMalloc: { |
Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 291 | if (kInstrumented && UNLIKELY(is_running_on_memory_tool_)) { |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 292 | // If running on valgrind, we should be using the instrumented path. |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 293 | ret = dlmalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size, |
| 294 | bytes_tl_bulk_allocated); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 295 | } else { |
Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 296 | DCHECK(!is_running_on_memory_tool_); |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 297 | ret = dlmalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size, |
| 298 | bytes_tl_bulk_allocated); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 299 | } |
| 300 | break; |
| 301 | } |
| 302 | case kAllocatorTypeNonMoving: { |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 303 | ret = non_moving_space_->Alloc(self, alloc_size, bytes_allocated, usable_size, |
| 304 | bytes_tl_bulk_allocated); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 305 | break; |
| 306 | } |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 307 | case kAllocatorTypeLOS: { |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 308 | ret = large_object_space_->Alloc(self, alloc_size, bytes_allocated, usable_size, |
| 309 | bytes_tl_bulk_allocated); |
Hiroshi Yamauchi | 95a659f | 2013-11-22 14:43:45 -0800 | [diff] [blame] | 310 | // Note that the bump pointer spaces aren't necessarily next to |
| 311 | // the other continuous spaces like the non-moving alloc space or |
| 312 | // the zygote space. |
| 313 | DCHECK(ret == nullptr || large_object_space_->Contains(ret)); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 314 | break; |
| 315 | } |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 316 | case kAllocatorTypeTLAB: { |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 317 | DCHECK_ALIGNED(alloc_size, space::BumpPointerSpace::kAlignment); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 318 | if (UNLIKELY(self->TlabSize() < alloc_size)) { |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 319 | const size_t new_tlab_size = alloc_size + kDefaultTLABSize; |
| 320 | if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, new_tlab_size))) { |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 321 | return nullptr; |
| 322 | } |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 323 | // Try allocating a new thread local buffer, if the allocaiton fails the space must be |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 324 | // full so return null. |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 325 | if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) { |
| 326 | return nullptr; |
| 327 | } |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 328 | *bytes_tl_bulk_allocated = new_tlab_size; |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 329 | } else { |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 330 | *bytes_tl_bulk_allocated = 0; |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 331 | } |
| 332 | // The allocation can't fail. |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 333 | ret = self->AllocTlab(alloc_size); |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 334 | DCHECK(ret != nullptr); |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 335 | *bytes_allocated = alloc_size; |
Hiroshi Yamauchi | 5ccd498 | 2014-03-11 12:19:04 -0700 | [diff] [blame] | 336 | *usable_size = alloc_size; |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 337 | break; |
| 338 | } |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 339 | case kAllocatorTypeRegion: { |
| 340 | DCHECK(region_space_ != nullptr); |
| 341 | alloc_size = RoundUp(alloc_size, space::RegionSpace::kAlignment); |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 342 | ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size, |
| 343 | bytes_tl_bulk_allocated); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 344 | break; |
| 345 | } |
| 346 | case kAllocatorTypeRegionTLAB: { |
| 347 | DCHECK(region_space_ != nullptr); |
| 348 | DCHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment); |
| 349 | if (UNLIKELY(self->TlabSize() < alloc_size)) { |
| 350 | if (space::RegionSpace::kRegionSize >= alloc_size) { |
| 351 | // Non-large. Check OOME for a tlab. |
| 352 | if (LIKELY(!IsOutOfMemoryOnAllocation<kGrow>(allocator_type, space::RegionSpace::kRegionSize))) { |
| 353 | // Try to allocate a tlab. |
| 354 | if (!region_space_->AllocNewTlab(self)) { |
| 355 | // Failed to allocate a tlab. Try non-tlab. |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 356 | ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size, |
| 357 | bytes_tl_bulk_allocated); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 358 | return ret; |
| 359 | } |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 360 | *bytes_tl_bulk_allocated = space::RegionSpace::kRegionSize; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 361 | // Fall-through. |
| 362 | } else { |
| 363 | // Check OOME for a non-tlab allocation. |
| 364 | if (!IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size)) { |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 365 | ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size, |
| 366 | bytes_tl_bulk_allocated); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 367 | return ret; |
| 368 | } else { |
| 369 | // Neither tlab or non-tlab works. Give up. |
| 370 | return nullptr; |
| 371 | } |
| 372 | } |
| 373 | } else { |
| 374 | // Large. Check OOME. |
| 375 | if (LIKELY(!IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size))) { |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 376 | ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size, |
| 377 | bytes_tl_bulk_allocated); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 378 | return ret; |
| 379 | } else { |
| 380 | return nullptr; |
| 381 | } |
| 382 | } |
| 383 | } else { |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 384 | *bytes_tl_bulk_allocated = 0; // Allocated in an existing buffer. |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 385 | } |
| 386 | // The allocation can't fail. |
| 387 | ret = self->AllocTlab(alloc_size); |
| 388 | DCHECK(ret != nullptr); |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 389 | *bytes_allocated = alloc_size; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 390 | *usable_size = alloc_size; |
| 391 | break; |
| 392 | } |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 393 | default: { |
| 394 | LOG(FATAL) << "Invalid allocator type"; |
| 395 | ret = nullptr; |
| 396 | } |
| 397 | } |
| 398 | return ret; |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 399 | } |
| 400 | |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 401 | inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const { |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 402 | // We need to have a zygote space or else our newly allocated large object can end up in the |
| 403 | // Zygote resulting in it being prematurely freed. |
| 404 | // We can only do this for primitive objects since large objects will not be within the card table |
| 405 | // range. This also means that we rely on SetClass not dirtying the object's card. |
Jeff Hao | 13e0091 | 2015-06-22 15:14:49 -0700 | [diff] [blame] | 406 | return byte_count >= large_object_threshold_ && (c->IsPrimitiveArray() || c->IsStringClass()); |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 407 | } |
| 408 | |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 409 | template <bool kGrow> |
| 410 | inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size) { |
Ian Rogers | 3e5cf30 | 2014-05-20 16:40:37 -0700 | [diff] [blame] | 411 | size_t new_footprint = num_bytes_allocated_.LoadSequentiallyConsistent() + alloc_size; |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 412 | if (UNLIKELY(new_footprint > max_allowed_footprint_)) { |
| 413 | if (UNLIKELY(new_footprint > growth_limit_)) { |
| 414 | return true; |
| 415 | } |
Hiroshi Yamauchi | 3e41780 | 2014-03-20 12:03:02 -0700 | [diff] [blame] | 416 | if (!AllocatorMayHaveConcurrentGC(allocator_type) || !IsGcConcurrent()) { |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame] | 417 | if (!kGrow) { |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 418 | return true; |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 419 | } |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 420 | // TODO: Grow for allocation is racy, fix it. |
| 421 | VLOG(heap) << "Growing heap from " << PrettySize(max_allowed_footprint_) << " to " |
| 422 | << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation"; |
| 423 | max_allowed_footprint_ = new_footprint; |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 424 | } |
| 425 | } |
| 426 | return false; |
| 427 | } |
| 428 | |
Mathieu Chartier | a4f6af9 | 2015-08-11 17:35:25 -0700 | [diff] [blame] | 429 | inline void Heap::CheckConcurrentGC(Thread* self, |
| 430 | size_t new_num_bytes_allocated, |
Mathieu Chartier | f517f1a | 2014-03-06 15:52:27 -0800 | [diff] [blame] | 431 | mirror::Object** obj) { |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 432 | if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) { |
Hiroshi Yamauchi | 0ae9899 | 2015-05-01 14:33:19 -0700 | [diff] [blame] | 433 | RequestConcurrentGCAndSaveObject(self, false, obj); |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 434 | } |
| 435 | } |
| 436 | |
Mathieu Chartier | a058fdf | 2016-10-06 15:13:58 -0700 | [diff] [blame^] | 437 | inline void Heap::WriteBarrierField(ObjPtr<mirror::Object> dst, |
| 438 | MemberOffset offset ATTRIBUTE_UNUSED, |
| 439 | ObjPtr<mirror::Object> new_value ATTRIBUTE_UNUSED) { |
| 440 | card_table_->MarkCard(dst.Ptr()); |
| 441 | } |
| 442 | |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 443 | } // namespace gc |
| 444 | } // namespace art |
| 445 | |
| 446 | #endif // ART_RUNTIME_GC_HEAP_INL_H_ |