blob: eb1d5f456c16fdb489327cfebf5f4fff1c3c7589 [file] [log] [blame]
Hiroshi Yamauchi7cb7bbc2013-11-18 17:27:37 -08001
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07002/*
3 * Copyright (C) 2013 The Android Open Source Project
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070018#include "rosalloc_space-inl.h"
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -070019
Ian Rogerscf7f1912014-10-22 22:06:39 -070020#define ATRACE_TAG ATRACE_TAG_DALVIK
21#include "cutils/trace.h"
22
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070023#include "gc/accounting/card_table.h"
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -070024#include "gc/accounting/space_bitmap-inl.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070025#include "gc/heap.h"
26#include "mirror/class-inl.h"
27#include "mirror/object-inl.h"
28#include "runtime.h"
29#include "thread.h"
30#include "thread_list.h"
31#include "utils.h"
Ian Rogers6fac4472014-02-25 17:01:10 -080032#include "valgrind_malloc_space-inl.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070033
34namespace art {
35namespace gc {
36namespace space {
37
Mathieu Chartier73d1e172014-04-11 17:53:48 -070038static constexpr bool kPrefetchDuringRosAllocFreeList = false;
Mathieu Chartier8585bad2014-04-11 17:53:48 -070039static constexpr size_t kPrefetchLookAhead = 8;
40// Use this only for verification, it is not safe to use since the class of the object may have
41// been freed.
42static constexpr bool kVerifyFreedBytes = false;
Ian Rogers6fac4472014-02-25 17:01:10 -080043
Mathieu Chartier31f44142014-04-08 14:40:03 -070044// TODO: Fix
45// template class ValgrindMallocSpace<RosAllocSpace, allocator::RosAlloc*>;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070046
Andreas Gamped7576322014-10-24 22:13:45 -070047RosAllocSpace::RosAllocSpace(MemMap* mem_map, size_t initial_size, const std::string& name,
Ian Rogers13735952014-10-08 12:43:28 -070048 art::gc::allocator::RosAlloc* rosalloc, uint8_t* begin, uint8_t* end,
49 uint8_t* limit, size_t growth_limit, bool can_move_objects,
Andreas Gamped7576322014-10-24 22:13:45 -070050 size_t starting_size, bool low_memory_mode)
Mathieu Chartier31f44142014-04-08 14:40:03 -070051 : MallocSpace(name, mem_map, begin, end, limit, growth_limit, true, can_move_objects,
52 starting_size, initial_size),
53 rosalloc_(rosalloc), low_memory_mode_(low_memory_mode) {
54 CHECK(rosalloc != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070055}
56
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080057RosAllocSpace* RosAllocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name,
Ian Rogersa55cf412014-02-27 00:31:26 -080058 size_t starting_size, size_t initial_size,
59 size_t growth_limit, size_t capacity,
Mathieu Chartier31f44142014-04-08 14:40:03 -070060 bool low_memory_mode, bool can_move_objects) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080061 DCHECK(mem_map != nullptr);
Andreas Gamped7576322014-10-24 22:13:45 -070062
63 bool running_on_valgrind = Runtime::Current()->RunningOnValgrind();
64
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080065 allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map->Begin(), starting_size, initial_size,
Andreas Gamped7576322014-10-24 22:13:45 -070066 capacity, low_memory_mode, running_on_valgrind);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080067 if (rosalloc == NULL) {
68 LOG(ERROR) << "Failed to initialize rosalloc for alloc space (" << name << ")";
69 return NULL;
70 }
71
lzang1385de732014-02-21 14:15:01 +080072 // Protect memory beyond the starting size. MoreCore will add r/w permissions when necessory
Ian Rogers13735952014-10-08 12:43:28 -070073 uint8_t* end = mem_map->Begin() + starting_size;
lzang1385de732014-02-21 14:15:01 +080074 if (capacity - starting_size > 0) {
75 CHECK_MEMORY_CALL(mprotect, (end, capacity - starting_size, PROT_NONE), name);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080076 }
77
78 // Everything is set so record in immutable structure and leave
Ian Rogers13735952014-10-08 12:43:28 -070079 uint8_t* begin = mem_map->Begin();
Mathieu Chartier661974a2014-01-09 11:23:53 -080080 // TODO: Fix RosAllocSpace to support valgrind. There is currently some issues with
81 // AllocationSize caused by redzones. b/12944686
Andreas Gamped7576322014-10-24 22:13:45 -070082 if (running_on_valgrind) {
83 return new ValgrindMallocSpace<RosAllocSpace, kDefaultValgrindRedZoneBytes, false, true>(
84 mem_map, initial_size, name, rosalloc, begin, end, begin + capacity, growth_limit,
85 can_move_objects, starting_size, low_memory_mode);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080086 } else {
Andreas Gamped7576322014-10-24 22:13:45 -070087 return new RosAllocSpace(mem_map, initial_size, name, rosalloc, begin, end, begin + capacity,
88 growth_limit, can_move_objects, starting_size, low_memory_mode);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080089 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080090}
91
Mathieu Chartier661974a2014-01-09 11:23:53 -080092RosAllocSpace::~RosAllocSpace() {
93 delete rosalloc_;
94}
95
Ian Rogers6fac4472014-02-25 17:01:10 -080096RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_size,
Ian Rogers13735952014-10-08 12:43:28 -070097 size_t growth_limit, size_t capacity, uint8_t* requested_begin,
Mathieu Chartier31f44142014-04-08 14:40:03 -070098 bool low_memory_mode, bool can_move_objects) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070099 uint64_t start_time = 0;
100 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
101 start_time = NanoTime();
102 VLOG(startup) << "RosAllocSpace::Create entering " << name
103 << " initial_size=" << PrettySize(initial_size)
104 << " growth_limit=" << PrettySize(growth_limit)
105 << " capacity=" << PrettySize(capacity)
106 << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
107 }
108
109 // Memory we promise to rosalloc before it asks for morecore.
110 // Note: making this value large means that large allocations are unlikely to succeed as rosalloc
111 // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
112 // size of the large allocation) will be greater than the footprint limit.
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700113 size_t starting_size = Heap::kDefaultStartingSize;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700114 MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity,
115 requested_begin);
116 if (mem_map == NULL) {
117 LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
118 << PrettySize(capacity);
119 return NULL;
120 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700121
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800122 RosAllocSpace* space = CreateFromMemMap(mem_map, name, starting_size, initial_size,
Mathieu Chartier31f44142014-04-08 14:40:03 -0700123 growth_limit, capacity, low_memory_mode,
124 can_move_objects);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700125 // We start out with only the initial size possibly containing objects.
126 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
127 LOG(INFO) << "RosAllocSpace::Create exiting (" << PrettyDuration(NanoTime() - start_time)
128 << " ) " << *space;
129 }
130 return space;
131}
132
Mathieu Chartier31f44142014-04-08 14:40:03 -0700133allocator::RosAlloc* RosAllocSpace::CreateRosAlloc(void* begin, size_t morecore_start,
134 size_t initial_size,
Andreas Gamped7576322014-10-24 22:13:45 -0700135 size_t maximum_size, bool low_memory_mode,
136 bool running_on_valgrind) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700137 // clear errno to allow PLOG on error
138 errno = 0;
139 // create rosalloc using our backing storage starting at begin and
140 // with a footprint of morecore_start. When morecore_start bytes of
141 // memory is exhaused morecore will be called.
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800142 allocator::RosAlloc* rosalloc = new art::gc::allocator::RosAlloc(
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800143 begin, morecore_start, maximum_size,
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800144 low_memory_mode ?
145 art::gc::allocator::RosAlloc::kPageReleaseModeAll :
Andreas Gamped7576322014-10-24 22:13:45 -0700146 art::gc::allocator::RosAlloc::kPageReleaseModeSizeAndEnd,
147 running_on_valgrind);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700148 if (rosalloc != NULL) {
149 rosalloc->SetFootprintLimit(initial_size);
150 } else {
151 PLOG(ERROR) << "RosAlloc::Create failed";
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800152 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700153 return rosalloc;
154}
155
Ian Rogers6fac4472014-02-25 17:01:10 -0800156mirror::Object* RosAllocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700157 size_t* bytes_allocated, size_t* usable_size,
158 size_t* bytes_tl_bulk_allocated) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700159 mirror::Object* result;
160 {
161 MutexLock mu(self, lock_);
162 // Grow as much as possible within the space.
163 size_t max_allowed = Capacity();
164 rosalloc_->SetFootprintLimit(max_allowed);
165 // Try the allocation.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700166 result = AllocCommon(self, num_bytes, bytes_allocated, usable_size,
167 bytes_tl_bulk_allocated);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700168 // Shrink back down as small as possible.
169 size_t footprint = rosalloc_->Footprint();
170 rosalloc_->SetFootprintLimit(footprint);
171 }
172 // Note RosAlloc zeroes memory internally.
173 // Return the new allocation or NULL.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700174 CHECK(!kDebugSpaces || result == nullptr || Contains(result));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700175 return result;
176}
177
Andreas Gamped7576322014-10-24 22:13:45 -0700178MallocSpace* RosAllocSpace::CreateInstance(MemMap* mem_map, const std::string& name,
179 void* allocator, uint8_t* begin, uint8_t* end,
180 uint8_t* limit, size_t growth_limit,
Mathieu Chartier31f44142014-04-08 14:40:03 -0700181 bool can_move_objects) {
Andreas Gamped7576322014-10-24 22:13:45 -0700182 if (Runtime::Current()->RunningOnValgrind()) {
183 return new ValgrindMallocSpace<RosAllocSpace, kDefaultValgrindRedZoneBytes, false, true>(
184 mem_map, initial_size_, name, reinterpret_cast<allocator::RosAlloc*>(allocator), begin, end,
185 limit, growth_limit, can_move_objects, starting_size_, low_memory_mode_);
186 } else {
187 return new RosAllocSpace(mem_map, initial_size_, name,
188 reinterpret_cast<allocator::RosAlloc*>(allocator), begin, end, limit,
189 growth_limit, can_move_objects, starting_size_, low_memory_mode_);
190 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700191}
192
193size_t RosAllocSpace::Free(Thread* self, mirror::Object* ptr) {
194 if (kDebugSpaces) {
195 CHECK(ptr != NULL);
196 CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
197 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700198 if (kRecentFreeCount > 0) {
199 MutexLock mu(self, lock_);
200 RegisterRecentFree(ptr);
201 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700202 return rosalloc_->Free(self, ptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700203}
204
205size_t RosAllocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700206 DCHECK(ptrs != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700207
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700208 size_t verify_bytes = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700209 for (size_t i = 0; i < num_ptrs; i++) {
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700210 if (kPrefetchDuringRosAllocFreeList && i + kPrefetchLookAhead < num_ptrs) {
211 __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + kPrefetchLookAhead]));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700212 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700213 if (kVerifyFreedBytes) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700214 verify_bytes += AllocationSizeNonvirtual<true>(ptrs[i], nullptr);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700215 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700216 }
217
218 if (kRecentFreeCount > 0) {
219 MutexLock mu(self, lock_);
220 for (size_t i = 0; i < num_ptrs; i++) {
221 RegisterRecentFree(ptrs[i]);
222 }
223 }
224
225 if (kDebugSpaces) {
226 size_t num_broken_ptrs = 0;
227 for (size_t i = 0; i < num_ptrs; i++) {
228 if (!Contains(ptrs[i])) {
229 num_broken_ptrs++;
230 LOG(ERROR) << "FreeList[" << i << "] (" << ptrs[i] << ") not in bounds of heap " << *this;
231 } else {
232 size_t size = rosalloc_->UsableSize(ptrs[i]);
233 memset(ptrs[i], 0xEF, size);
234 }
235 }
236 CHECK_EQ(num_broken_ptrs, 0u);
237 }
238
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700239 const size_t bytes_freed = rosalloc_->BulkFree(self, reinterpret_cast<void**>(ptrs), num_ptrs);
240 if (kVerifyFreedBytes) {
241 CHECK_EQ(verify_bytes, bytes_freed);
242 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700243 return bytes_freed;
244}
245
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700246size_t RosAllocSpace::Trim() {
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -0700247 VLOG(heap) << "RosAllocSpace::Trim() ";
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800248 {
249 MutexLock mu(Thread::Current(), lock_);
250 // Trim to release memory at the end of the space.
251 rosalloc_->Trim();
252 }
253 // Attempt to release pages if it does not release all empty pages.
254 if (!rosalloc_->DoesReleaseAllPages()) {
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -0700255 return rosalloc_->ReleasePages();
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800256 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700257 return 0;
258}
259
260void RosAllocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
261 void* arg) {
Hiroshi Yamauchi1cd53db2014-03-28 15:26:48 -0700262 InspectAllRosAlloc(callback, arg, true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700263}
264
265size_t RosAllocSpace::GetFootprint() {
266 MutexLock mu(Thread::Current(), lock_);
267 return rosalloc_->Footprint();
268}
269
270size_t RosAllocSpace::GetFootprintLimit() {
271 MutexLock mu(Thread::Current(), lock_);
272 return rosalloc_->FootprintLimit();
273}
274
275void RosAllocSpace::SetFootprintLimit(size_t new_size) {
276 MutexLock mu(Thread::Current(), lock_);
277 VLOG(heap) << "RosAllocSpace::SetFootprintLimit " << PrettySize(new_size);
278 // Compare against the actual footprint, rather than the Size(), because the heap may not have
279 // grown all the way to the allowed size yet.
280 size_t current_space_size = rosalloc_->Footprint();
281 if (new_size < current_space_size) {
282 // Don't let the space grow any more.
283 new_size = current_space_size;
284 }
285 rosalloc_->SetFootprintLimit(new_size);
286}
287
288uint64_t RosAllocSpace::GetBytesAllocated() {
Hiroshi Yamauchi4ce1f002013-11-18 14:49:09 -0800289 size_t bytes_allocated = 0;
Hiroshi Yamauchi1cd53db2014-03-28 15:26:48 -0700290 InspectAllRosAlloc(art::gc::allocator::RosAlloc::BytesAllocatedCallback, &bytes_allocated, false);
Hiroshi Yamauchi4ce1f002013-11-18 14:49:09 -0800291 return bytes_allocated;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700292}
293
294uint64_t RosAllocSpace::GetObjectsAllocated() {
Hiroshi Yamauchi4ce1f002013-11-18 14:49:09 -0800295 size_t objects_allocated = 0;
Hiroshi Yamauchi1cd53db2014-03-28 15:26:48 -0700296 InspectAllRosAlloc(art::gc::allocator::RosAlloc::ObjectsAllocatedCallback, &objects_allocated, false);
Hiroshi Yamauchi4ce1f002013-11-18 14:49:09 -0800297 return objects_allocated;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700298}
299
Hiroshi Yamauchi1cd53db2014-03-28 15:26:48 -0700300void RosAllocSpace::InspectAllRosAllocWithSuspendAll(
301 void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
302 void* arg, bool do_null_callback_at_end) NO_THREAD_SAFETY_ANALYSIS {
303 // TODO: NO_THREAD_SAFETY_ANALYSIS.
304 Thread* self = Thread::Current();
305 ThreadList* tl = Runtime::Current()->GetThreadList();
Mathieu Chartierbf9fc582015-03-13 17:21:25 -0700306 tl->SuspendAll(__FUNCTION__);
Hiroshi Yamauchi1cd53db2014-03-28 15:26:48 -0700307 {
308 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
309 MutexLock mu2(self, *Locks::thread_list_lock_);
310 rosalloc_->InspectAll(callback, arg);
311 if (do_null_callback_at_end) {
312 callback(NULL, NULL, 0, arg); // Indicate end of a space.
313 }
314 }
315 tl->ResumeAll();
316}
317
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700318void RosAllocSpace::InspectAllRosAlloc(void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
Hiroshi Yamauchi1cd53db2014-03-28 15:26:48 -0700319 void* arg, bool do_null_callback_at_end) NO_THREAD_SAFETY_ANALYSIS {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700320 // TODO: NO_THREAD_SAFETY_ANALYSIS.
321 Thread* self = Thread::Current();
322 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
323 // The mutators are already suspended. For example, a call path
324 // from SignalCatcher::HandleSigQuit().
325 rosalloc_->InspectAll(callback, arg);
Hiroshi Yamauchi1cd53db2014-03-28 15:26:48 -0700326 if (do_null_callback_at_end) {
327 callback(NULL, NULL, 0, arg); // Indicate end of a space.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700328 }
Hiroshi Yamauchi1cd53db2014-03-28 15:26:48 -0700329 } else if (Locks::mutator_lock_->IsSharedHeld(self)) {
330 // The mutators are not suspended yet and we have a shared access
331 // to the mutator lock. Temporarily release the shared access by
332 // transitioning to the suspend state, and suspend the mutators.
333 self->TransitionFromRunnableToSuspended(kSuspended);
334 InspectAllRosAllocWithSuspendAll(callback, arg, do_null_callback_at_end);
335 self->TransitionFromSuspendedToRunnable();
336 Locks::mutator_lock_->AssertSharedHeld(self);
337 } else {
338 // The mutators are not suspended yet. Suspend the mutators.
339 InspectAllRosAllocWithSuspendAll(callback, arg, do_null_callback_at_end);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700340 }
341}
342
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700343size_t RosAllocSpace::RevokeThreadLocalBuffers(Thread* thread) {
344 return rosalloc_->RevokeThreadLocalRuns(thread);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700345}
346
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700347size_t RosAllocSpace::RevokeAllThreadLocalBuffers() {
348 return rosalloc_->RevokeAllThreadLocalRuns();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700349}
350
Ian Rogers68d8b422014-07-17 11:09:10 -0700351void RosAllocSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
352 if (kIsDebugBuild) {
353 rosalloc_->AssertThreadLocalRunsAreRevoked(thread);
354 }
355}
356
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -0700357void RosAllocSpace::AssertAllThreadLocalBuffersAreRevoked() {
358 if (kIsDebugBuild) {
359 rosalloc_->AssertAllThreadLocalRunsAreRevoked();
360 }
361}
362
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800363void RosAllocSpace::Clear() {
Mathieu Chartier31f44142014-04-08 14:40:03 -0700364 size_t footprint_limit = GetFootprintLimit();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800365 madvise(GetMemMap()->Begin(), GetMemMap()->Size(), MADV_DONTNEED);
Mathieu Chartier31f44142014-04-08 14:40:03 -0700366 live_bitmap_->Clear();
367 mark_bitmap_->Clear();
Ian Rogersbe2a1df2014-07-10 00:56:36 -0700368 SetEnd(begin_ + starting_size_);
Mathieu Chartier31f44142014-04-08 14:40:03 -0700369 delete rosalloc_;
Hiroshi Yamauchi94c41df2014-12-18 21:19:24 -0800370 rosalloc_ = CreateRosAlloc(mem_map_->Begin(), starting_size_, initial_size_,
371 NonGrowthLimitCapacity(), low_memory_mode_,
372 Runtime::Current()->RunningOnValgrind());
Mathieu Chartier31f44142014-04-08 14:40:03 -0700373 SetFootprintLimit(footprint_limit);
Mathieu Chartier15d34022014-02-26 17:16:38 -0800374}
375
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700376} // namespace space
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800377
378namespace allocator {
379
380// Callback from rosalloc when it needs to increase the footprint.
381void* ArtRosAllocMoreCore(allocator::RosAlloc* rosalloc, intptr_t increment) {
382 Heap* heap = Runtime::Current()->GetHeap();
383 art::gc::space::RosAllocSpace* rosalloc_space = heap->GetRosAllocSpace(rosalloc);
384 DCHECK(rosalloc_space != nullptr);
385 DCHECK_EQ(rosalloc_space->GetRosAlloc(), rosalloc);
386 return rosalloc_space->MoreCore(increment);
387}
388
389} // namespace allocator
390
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700391} // namespace gc
392} // namespace art