blob: 5237c7b08e3aa9875e520caa47246b3312b5d13c [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070016
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -070017#include "dlmalloc_space-inl.h"
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -070018
Vladimir Marko80afd022015-05-19 18:08:00 +010019#include "base/time_utils.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070020#include "gc/accounting/card_table.h"
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -070021#include "gc/accounting/space_bitmap-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070022#include "gc/heap.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070023#include "mirror/class-inl.h"
Mathieu Chartier0f72e412013-09-06 16:40:01 -070024#include "mirror/object-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080025#include "runtime.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080026#include "thread.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070027#include "thread_list.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070028#include "utils.h"
Ian Rogers6fac4472014-02-25 17:01:10 -080029#include "valgrind_malloc_space-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070030
Carl Shapiro69759ea2011-07-21 18:13:35 -070031namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070032namespace gc {
33namespace space {
Ian Rogers30fab402012-01-23 15:43:46 -080034
Ian Rogers6fac4472014-02-25 17:01:10 -080035static constexpr bool kPrefetchDuringDlMallocFreeList = true;
36
Andreas Gamped7576322014-10-24 22:13:45 -070037DlMallocSpace::DlMallocSpace(MemMap* mem_map, size_t initial_size, const std::string& name,
38 void* mspace, uint8_t* begin, uint8_t* end, uint8_t* limit,
39 size_t growth_limit, bool can_move_objects, size_t starting_size)
Mathieu Chartier31f44142014-04-08 14:40:03 -070040 : MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
41 starting_size, initial_size),
42 mspace_(mspace) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -070043 CHECK(mspace != nullptr);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -070044}
45
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080046DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name,
Mathieu Chartier661974a2014-01-09 11:23:53 -080047 size_t starting_size, size_t initial_size,
Mathieu Chartier31f44142014-04-08 14:40:03 -070048 size_t growth_limit, size_t capacity,
49 bool can_move_objects) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080050 DCHECK(mem_map != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070051 void* mspace = CreateMspace(mem_map->Begin(), starting_size, initial_size);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080052 if (mspace == nullptr) {
Ian Rogers30fab402012-01-23 15:43:46 -080053 LOG(ERROR) << "Failed to initialize mspace for alloc space (" << name << ")";
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080054 return nullptr;
Ian Rogers30fab402012-01-23 15:43:46 -080055 }
56
lzang1385de732014-02-21 14:15:01 +080057 // Protect memory beyond the starting size. morecore will add r/w permissions when necessory
Ian Rogers13735952014-10-08 12:43:28 -070058 uint8_t* end = mem_map->Begin() + starting_size;
lzang1385de732014-02-21 14:15:01 +080059 if (capacity - starting_size > 0) {
60 CHECK_MEMORY_CALL(mprotect, (end, capacity - starting_size, PROT_NONE), name);
Ian Rogers30fab402012-01-23 15:43:46 -080061 }
62
63 // Everything is set so record in immutable structure and leave
Ian Rogers13735952014-10-08 12:43:28 -070064 uint8_t* begin = mem_map->Begin();
Mathieu Chartierda44d772014-04-01 15:01:46 -070065 if (Runtime::Current()->RunningOnValgrind()) {
Andreas Gamped7576322014-10-24 22:13:45 -070066 return new ValgrindMallocSpace<DlMallocSpace, kDefaultValgrindRedZoneBytes, true, false>(
67 mem_map, initial_size, name, mspace, begin, end, begin + capacity, growth_limit,
Mathieu Chartier31f44142014-04-08 14:40:03 -070068 can_move_objects, starting_size);
Ian Rogers1d54e732013-05-02 21:10:01 -070069 } else {
Andreas Gamped7576322014-10-24 22:13:45 -070070 return new DlMallocSpace(mem_map, initial_size, name, mspace, begin, end, begin + capacity,
71 growth_limit, can_move_objects, starting_size);
Ian Rogers1d54e732013-05-02 21:10:01 -070072 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080073}
74
Mathieu Chartier31f44142014-04-08 14:40:03 -070075DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_size,
Ian Rogers13735952014-10-08 12:43:28 -070076 size_t growth_limit, size_t capacity, uint8_t* requested_begin,
Mathieu Chartier31f44142014-04-08 14:40:03 -070077 bool can_move_objects) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080078 uint64_t start_time = 0;
79 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
80 start_time = NanoTime();
81 LOG(INFO) << "DlMallocSpace::Create entering " << name
82 << " initial_size=" << PrettySize(initial_size)
83 << " growth_limit=" << PrettySize(growth_limit)
84 << " capacity=" << PrettySize(capacity)
85 << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
86 }
87
88 // Memory we promise to dlmalloc before it asks for morecore.
89 // Note: making this value large means that large allocations are unlikely to succeed as dlmalloc
90 // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
91 // size of the large allocation) will be greater than the footprint limit.
92 size_t starting_size = kPageSize;
93 MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity,
94 requested_begin);
95 if (mem_map == nullptr) {
96 LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
97 << PrettySize(capacity);
98 return nullptr;
99 }
100 DlMallocSpace* space = CreateFromMemMap(mem_map, name, starting_size, initial_size,
Mathieu Chartier31f44142014-04-08 14:40:03 -0700101 growth_limit, capacity, can_move_objects);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700102 // We start out with only the initial size possibly containing objects.
Ian Rogers30fab402012-01-23 15:43:46 -0800103 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700104 LOG(INFO) << "DlMallocSpace::Create exiting (" << PrettyDuration(NanoTime() - start_time)
Ian Rogers3bb17a62012-01-27 23:56:44 -0800105 << " ) " << *space;
Ian Rogers30fab402012-01-23 15:43:46 -0800106 }
107 return space;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700108}
109
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700110void* DlMallocSpace::CreateMspace(void* begin, size_t morecore_start, size_t initial_size) {
Ian Rogers30fab402012-01-23 15:43:46 -0800111 // clear errno to allow PLOG on error
Carl Shapiro69759ea2011-07-21 18:13:35 -0700112 errno = 0;
Ian Rogers3bb17a62012-01-27 23:56:44 -0800113 // create mspace using our backing storage starting at begin and with a footprint of
114 // morecore_start. Don't use an internal dlmalloc lock (as we already hold heap lock). When
115 // morecore_start bytes of memory is exhaused morecore will be called.
116 void* msp = create_mspace_with_base(begin, morecore_start, false /*locked*/);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800117 if (msp != nullptr) {
Ian Rogers30fab402012-01-23 15:43:46 -0800118 // Do not allow morecore requests to succeed beyond the initial size of the heap
Ian Rogers3bb17a62012-01-27 23:56:44 -0800119 mspace_set_footprint_limit(msp, initial_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700120 } else {
Ian Rogers30fab402012-01-23 15:43:46 -0800121 PLOG(ERROR) << "create_mspace_with_base failed";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700122 }
123 return msp;
124}
125
Ian Rogers6fac4472014-02-25 17:01:10 -0800126mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700127 size_t* bytes_allocated, size_t* usable_size,
128 size_t* bytes_tl_bulk_allocated) {
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700129 mirror::Object* result;
130 {
131 MutexLock mu(self, lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700132 // Grow as much as possible within the space.
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700133 size_t max_allowed = Capacity();
134 mspace_set_footprint_limit(mspace_, max_allowed);
135 // Try the allocation.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700136 result = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated, usable_size,
137 bytes_tl_bulk_allocated);
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700138 // Shrink back down as small as possible.
139 size_t footprint = mspace_footprint(mspace_);
140 mspace_set_footprint_limit(mspace_, footprint);
jeffhaoc1160702011-10-27 15:48:45 -0700141 }
Mathieu Chartier661974a2014-01-09 11:23:53 -0800142 if (result != nullptr) {
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700143 // Zero freshly allocated memory, done while not holding the space's lock.
144 memset(result, 0, num_bytes);
Mathieu Chartier661974a2014-01-09 11:23:53 -0800145 // Check that the result is contained in the space.
146 CHECK(!kDebugSpaces || Contains(result));
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700147 }
Ian Rogers30fab402012-01-23 15:43:46 -0800148 return result;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700149}
150
Andreas Gamped7576322014-10-24 22:13:45 -0700151MallocSpace* DlMallocSpace::CreateInstance(MemMap* mem_map, const std::string& name,
Ian Rogers13735952014-10-08 12:43:28 -0700152 void* allocator, uint8_t* begin, uint8_t* end,
153 uint8_t* limit, size_t growth_limit,
Mathieu Chartier31f44142014-04-08 14:40:03 -0700154 bool can_move_objects) {
Andreas Gamped7576322014-10-24 22:13:45 -0700155 if (Runtime::Current()->RunningOnValgrind()) {
156 return new ValgrindMallocSpace<DlMallocSpace, kDefaultValgrindRedZoneBytes, true, false>(
157 mem_map, initial_size_, name, allocator, begin, end, limit, growth_limit,
158 can_move_objects, starting_size_);
159 } else {
160 return new DlMallocSpace(mem_map, initial_size_, name, allocator, begin, end, limit,
161 growth_limit, can_move_objects, starting_size_);
162 }
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700163}
164
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800165size_t DlMallocSpace::Free(Thread* self, mirror::Object* ptr) {
Ian Rogers50b35e22012-10-04 10:09:15 -0700166 MutexLock mu(self, lock_);
Mathieu Chartier8e9a1492012-10-04 12:25:40 -0700167 if (kDebugSpaces) {
Mathieu Chartier661974a2014-01-09 11:23:53 -0800168 CHECK(ptr != nullptr);
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700169 CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
170 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800171 const size_t bytes_freed = AllocationSizeNonvirtual(ptr, nullptr);
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700172 if (kRecentFreeCount > 0) {
173 RegisterRecentFree(ptr);
174 }
Ian Rogers30fab402012-01-23 15:43:46 -0800175 mspace_free(mspace_, ptr);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700176 return bytes_freed;
Ian Rogers30fab402012-01-23 15:43:46 -0800177}
178
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800179size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700180 DCHECK(ptrs != nullptr);
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800181
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700182 // Don't need the lock to calculate the size of the freed pointers.
183 size_t bytes_freed = 0;
184 for (size_t i = 0; i < num_ptrs; i++) {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800185 mirror::Object* ptr = ptrs[i];
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800186 const size_t look_ahead = 8;
187 if (kPrefetchDuringDlMallocFreeList && i + look_ahead < num_ptrs) {
188 // The head of chunk for the allocation is sizeof(size_t) behind the allocation.
189 __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + look_ahead]) - sizeof(size_t));
190 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800191 bytes_freed += AllocationSizeNonvirtual(ptr, nullptr);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700192 }
193
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700194 if (kRecentFreeCount > 0) {
195 MutexLock mu(self, lock_);
196 for (size_t i = 0; i < num_ptrs; i++) {
197 RegisterRecentFree(ptrs[i]);
198 }
199 }
200
Mathieu Chartier8e9a1492012-10-04 12:25:40 -0700201 if (kDebugSpaces) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700202 size_t num_broken_ptrs = 0;
203 for (size_t i = 0; i < num_ptrs; i++) {
204 if (!Contains(ptrs[i])) {
205 num_broken_ptrs++;
206 LOG(ERROR) << "FreeList[" << i << "] (" << ptrs[i] << ") not in bounds of heap " << *this;
207 } else {
208 size_t size = mspace_usable_size(ptrs[i]);
209 memset(ptrs[i], 0xEF, size);
210 }
Ian Rogers30fab402012-01-23 15:43:46 -0800211 }
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700212 CHECK_EQ(num_broken_ptrs, 0u);
Ian Rogers30fab402012-01-23 15:43:46 -0800213 }
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800214
215 {
216 MutexLock mu(self, lock_);
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800217 mspace_bulk_free(mspace_, reinterpret_cast<void**>(ptrs), num_ptrs);
218 return bytes_freed;
219 }
Ian Rogers30fab402012-01-23 15:43:46 -0800220}
221
Ian Rogers48931882013-01-22 14:35:16 -0800222size_t DlMallocSpace::Trim() {
Ian Rogers50b35e22012-10-04 10:09:15 -0700223 MutexLock mu(Thread::Current(), lock_);
Elliott Hughes9eebd3b2012-06-08 13:56:31 -0700224 // Trim to release memory at the end of the space.
225 mspace_trim(mspace_, 0);
226 // Visit space looking for page-sized holes to advise the kernel we don't need.
Ian Rogers48931882013-01-22 14:35:16 -0800227 size_t reclaimed = 0;
Ian Rogers1d54e732013-05-02 21:10:01 -0700228 mspace_inspect_all(mspace_, DlmallocMadviseCallback, &reclaimed);
Ian Rogers48931882013-01-22 14:35:16 -0800229 return reclaimed;
Elliott Hughes9eebd3b2012-06-08 13:56:31 -0700230}
Ian Rogers30fab402012-01-23 15:43:46 -0800231
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700232void DlMallocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
Ian Rogers30fab402012-01-23 15:43:46 -0800233 void* arg) {
Ian Rogers50b35e22012-10-04 10:09:15 -0700234 MutexLock mu(Thread::Current(), lock_);
Ian Rogers30fab402012-01-23 15:43:46 -0800235 mspace_inspect_all(mspace_, callback, arg);
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700236 callback(nullptr, nullptr, 0, arg); // Indicate end of a space.
Ian Rogers30fab402012-01-23 15:43:46 -0800237}
238
Hiroshi Yamauchi09b07a92013-07-15 13:17:06 -0700239size_t DlMallocSpace::GetFootprint() {
240 MutexLock mu(Thread::Current(), lock_);
241 return mspace_footprint(mspace_);
242}
243
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700244size_t DlMallocSpace::GetFootprintLimit() {
Ian Rogers50b35e22012-10-04 10:09:15 -0700245 MutexLock mu(Thread::Current(), lock_);
Ian Rogers30fab402012-01-23 15:43:46 -0800246 return mspace_footprint_limit(mspace_);
247}
248
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700249void DlMallocSpace::SetFootprintLimit(size_t new_size) {
Ian Rogers50b35e22012-10-04 10:09:15 -0700250 MutexLock mu(Thread::Current(), lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700251 VLOG(heap) << "DlMallocSpace::SetFootprintLimit " << PrettySize(new_size);
Ian Rogers30fab402012-01-23 15:43:46 -0800252 // Compare against the actual footprint, rather than the Size(), because the heap may not have
253 // grown all the way to the allowed size yet.
Ian Rogers30fab402012-01-23 15:43:46 -0800254 size_t current_space_size = mspace_footprint(mspace_);
255 if (new_size < current_space_size) {
256 // Don't let the space grow any more.
257 new_size = current_space_size;
258 }
259 mspace_set_footprint_limit(mspace_, new_size);
260}
261
Hiroshi Yamauchibe031ff2013-10-08 16:42:37 -0700262uint64_t DlMallocSpace::GetBytesAllocated() {
Hiroshi Yamauchi4ce1f002013-11-18 14:49:09 -0800263 MutexLock mu(Thread::Current(), lock_);
264 size_t bytes_allocated = 0;
265 mspace_inspect_all(mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
266 return bytes_allocated;
Hiroshi Yamauchibe031ff2013-10-08 16:42:37 -0700267}
268
269uint64_t DlMallocSpace::GetObjectsAllocated() {
Hiroshi Yamauchi4ce1f002013-11-18 14:49:09 -0800270 MutexLock mu(Thread::Current(), lock_);
271 size_t objects_allocated = 0;
272 mspace_inspect_all(mspace_, DlmallocObjectsAllocatedCallback, &objects_allocated);
273 return objects_allocated;
Hiroshi Yamauchibe031ff2013-10-08 16:42:37 -0700274}
275
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800276void DlMallocSpace::Clear() {
Mathieu Chartier31f44142014-04-08 14:40:03 -0700277 size_t footprint_limit = GetFootprintLimit();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800278 madvise(GetMemMap()->Begin(), GetMemMap()->Size(), MADV_DONTNEED);
Mathieu Chartier31f44142014-04-08 14:40:03 -0700279 live_bitmap_->Clear();
280 mark_bitmap_->Clear();
Ian Rogersbe2a1df2014-07-10 00:56:36 -0700281 SetEnd(Begin() + starting_size_);
Mathieu Chartier31f44142014-04-08 14:40:03 -0700282 mspace_ = CreateMspace(mem_map_->Begin(), starting_size_, initial_size_);
283 SetFootprintLimit(footprint_limit);
Mathieu Chartier15d34022014-02-26 17:16:38 -0800284}
285
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700286#ifndef NDEBUG
287void DlMallocSpace::CheckMoreCoreForPrecondition() {
288 lock_.AssertHeld(Thread::Current());
289}
290#endif
291
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -0700292static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) {
293 size_t chunk_size = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start);
294 if (used_bytes < chunk_size) {
295 size_t chunk_free_bytes = chunk_size - used_bytes;
296 size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg);
297 max_contiguous_allocation = std::max(max_contiguous_allocation, chunk_free_bytes);
298 }
299}
300
301void DlMallocSpace::LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) {
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700302 UNUSED(failed_alloc_bytes);
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -0700303 Thread* self = Thread::Current();
304 size_t max_contiguous_allocation = 0;
305 // To allow the Walk/InspectAll() to exclusively-lock the mutator
306 // lock, temporarily release the shared access to the mutator
307 // lock here by transitioning to the suspended state.
308 Locks::mutator_lock_->AssertSharedHeld(self);
309 self->TransitionFromRunnableToSuspended(kSuspended);
310 Walk(MSpaceChunkCallback, &max_contiguous_allocation);
311 self->TransitionFromSuspendedToRunnable();
312 Locks::mutator_lock_->AssertSharedHeld(self);
313 os << "; failed due to fragmentation (largest possible contiguous allocation "
314 << max_contiguous_allocation << " bytes)";
315}
316
Ian Rogers1d54e732013-05-02 21:10:01 -0700317} // namespace space
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800318
319namespace allocator {
320
321// Implement the dlmalloc morecore callback.
322void* ArtDlMallocMoreCore(void* mspace, intptr_t increment) {
323 Heap* heap = Runtime::Current()->GetHeap();
324 ::art::gc::space::DlMallocSpace* dlmalloc_space = heap->GetDlMallocSpace();
325 // Support for multiple DlMalloc provided by a slow path.
326 if (UNLIKELY(dlmalloc_space == nullptr || dlmalloc_space->GetMspace() != mspace)) {
327 dlmalloc_space = nullptr;
328 for (space::ContinuousSpace* space : heap->GetContinuousSpaces()) {
329 if (space->IsDlMallocSpace()) {
330 ::art::gc::space::DlMallocSpace* cur_dlmalloc_space = space->AsDlMallocSpace();
331 if (cur_dlmalloc_space->GetMspace() == mspace) {
332 dlmalloc_space = cur_dlmalloc_space;
333 break;
334 }
335 }
336 }
337 CHECK(dlmalloc_space != nullptr) << "Couldn't find DlmMallocSpace with mspace=" << mspace;
338 }
339 return dlmalloc_space->MoreCore(increment);
340}
341
342} // namespace allocator
343
Ian Rogers1d54e732013-05-02 21:10:01 -0700344} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -0700345} // namespace art