blob: b067bbc9cf703b7e52474e641e7bdcf4b879048c [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070016
Ian Rogers1d54e732013-05-02 21:10:01 -070017#include "dlmalloc_space.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070018
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -070019#include "dlmalloc_space-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070020#include "gc/accounting/card_table.h"
21#include "gc/heap.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070022#include "mirror/class-inl.h"
Mathieu Chartier0f72e412013-09-06 16:40:01 -070023#include "mirror/object-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080024#include "runtime.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080025#include "thread.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070026#include "thread_list.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070027#include "utils.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070028
Ian Rogers1d54e732013-05-02 21:10:01 -070029#include <valgrind.h>
Mathieu Chartier75165d02013-09-12 14:00:31 -070030#include <memcheck/memcheck.h>
Ian Rogers1d54e732013-05-02 21:10:01 -070031
Carl Shapiro69759ea2011-07-21 18:13:35 -070032namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070033namespace gc {
34namespace space {
Ian Rogers30fab402012-01-23 15:43:46 -080035
Ian Rogers1d54e732013-05-02 21:10:01 -070036static const bool kPrefetchDuringDlMallocFreeList = true;
Mathieu Chartier2fde5332012-09-14 14:51:54 -070037
Ian Rogers1d54e732013-05-02 21:10:01 -070038// Number of bytes to use as a red zone (rdz). A red zone of this size will be placed before and
39// after each allocation. 8 bytes provides long/double alignment.
40const size_t kValgrindRedZoneBytes = 8;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070041
Ian Rogers1d54e732013-05-02 21:10:01 -070042// A specialization of DlMallocSpace that provides information to valgrind wrt allocations.
43class ValgrindDlMallocSpace : public DlMallocSpace {
44 public:
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -070045 virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
46 void* obj_with_rdz = DlMallocSpace::AllocWithGrowth(self, num_bytes + 2 * kValgrindRedZoneBytes,
47 bytes_allocated);
Mathieu Chartier6875fd32013-08-02 11:17:31 -070048 if (obj_with_rdz == NULL) {
Ian Rogers1d54e732013-05-02 21:10:01 -070049 return NULL;
50 }
Mathieu Chartier6875fd32013-08-02 11:17:31 -070051 mirror::Object* result = reinterpret_cast<mirror::Object*>(
52 reinterpret_cast<byte*>(obj_with_rdz) + kValgrindRedZoneBytes);
53 // Make redzones as no access.
54 VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes);
55 VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(result) + num_bytes, kValgrindRedZoneBytes);
56 return result;
Ian Rogers1d54e732013-05-02 21:10:01 -070057 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070058
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -070059 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
60 void* obj_with_rdz = DlMallocSpace::Alloc(self, num_bytes + 2 * kValgrindRedZoneBytes,
61 bytes_allocated);
Mathieu Chartier6875fd32013-08-02 11:17:31 -070062 if (obj_with_rdz == NULL) {
63 return NULL;
Ian Rogers1d54e732013-05-02 21:10:01 -070064 }
Mathieu Chartier6875fd32013-08-02 11:17:31 -070065 mirror::Object* result = reinterpret_cast<mirror::Object*>(
66 reinterpret_cast<byte*>(obj_with_rdz) + kValgrindRedZoneBytes);
67 // Make redzones as no access.
68 VALGRIND_MAKE_MEM_NOACCESS(obj_with_rdz, kValgrindRedZoneBytes);
69 VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(result) + num_bytes, kValgrindRedZoneBytes);
70 return result;
Ian Rogers1d54e732013-05-02 21:10:01 -070071 }
Mathieu Chartier2fde5332012-09-14 14:51:54 -070072
Ian Rogers1d54e732013-05-02 21:10:01 -070073 virtual size_t AllocationSize(const mirror::Object* obj) {
Mathieu Chartier6875fd32013-08-02 11:17:31 -070074 size_t result = DlMallocSpace::AllocationSize(reinterpret_cast<const mirror::Object*>(
75 reinterpret_cast<const byte*>(obj) - kValgrindRedZoneBytes));
76 return result - 2 * kValgrindRedZoneBytes;
Ian Rogers1d54e732013-05-02 21:10:01 -070077 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070078
Ian Rogers1d54e732013-05-02 21:10:01 -070079 virtual size_t Free(Thread* self, mirror::Object* ptr) {
80 void* obj_after_rdz = reinterpret_cast<void*>(ptr);
81 void* obj_with_rdz = reinterpret_cast<byte*>(obj_after_rdz) - kValgrindRedZoneBytes;
Mathieu Chartier6875fd32013-08-02 11:17:31 -070082 // Make redzones undefined.
83 size_t allocation_size = DlMallocSpace::AllocationSize(
84 reinterpret_cast<mirror::Object*>(obj_with_rdz));
85 VALGRIND_MAKE_MEM_UNDEFINED(obj_with_rdz, allocation_size);
Ian Rogers1d54e732013-05-02 21:10:01 -070086 size_t freed = DlMallocSpace::Free(self, reinterpret_cast<mirror::Object*>(obj_with_rdz));
Mathieu Chartier6875fd32013-08-02 11:17:31 -070087 return freed - 2 * kValgrindRedZoneBytes;
Ian Rogers1d54e732013-05-02 21:10:01 -070088 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070089
Ian Rogers1d54e732013-05-02 21:10:01 -070090 virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
91 size_t freed = 0;
92 for (size_t i = 0; i < num_ptrs; i++) {
Mathieu Chartier6875fd32013-08-02 11:17:31 -070093 freed += Free(self, ptrs[i]);
Ian Rogers1d54e732013-05-02 21:10:01 -070094 }
Mathieu Chartier6875fd32013-08-02 11:17:31 -070095 return freed;
Ian Rogers1d54e732013-05-02 21:10:01 -070096 }
Mathieu Chartier2fde5332012-09-14 14:51:54 -070097
Ian Rogers1d54e732013-05-02 21:10:01 -070098 ValgrindDlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
Mathieu Chartier590fee92013-09-13 13:46:47 -070099 byte* end, byte* limit, size_t growth_limit, size_t initial_size) :
100 DlMallocSpace(name, mem_map, mspace, begin, end, limit, growth_limit) {
Mathieu Chartier6875fd32013-08-02 11:17:31 -0700101 VALGRIND_MAKE_MEM_UNDEFINED(mem_map->Begin() + initial_size, mem_map->Size() - initial_size);
Ian Rogers1d54e732013-05-02 21:10:01 -0700102 }
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700103
Ian Rogers1d54e732013-05-02 21:10:01 -0700104 virtual ~ValgrindDlMallocSpace() {
Ian Rogers1d54e732013-05-02 21:10:01 -0700105 }
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700106
Ian Rogers1d54e732013-05-02 21:10:01 -0700107 private:
108 DISALLOW_COPY_AND_ASSIGN(ValgrindDlMallocSpace);
109};
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700110
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700111DlMallocSpace::DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700112 byte* end, byte* limit, size_t growth_limit)
113 : MallocSpace(name, mem_map, begin, end, limit, growth_limit),
Hiroshi Yamauchi4ce1f002013-11-18 14:49:09 -0800114 total_bytes_freed_(0), total_objects_freed_(0), mspace_(mspace), mspace_for_alloc_(mspace) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700115 CHECK(mspace != NULL);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700116}
117
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700118DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_size, size_t growth_limit,
119 size_t capacity, byte* requested_begin) {
Ian Rogers30fab402012-01-23 15:43:46 -0800120 uint64_t start_time = 0;
121 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
122 start_time = NanoTime();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700123 VLOG(startup) << "DlMallocSpace::Create entering " << name
Ian Rogers3bb17a62012-01-27 23:56:44 -0800124 << " initial_size=" << PrettySize(initial_size)
125 << " growth_limit=" << PrettySize(growth_limit)
126 << " capacity=" << PrettySize(capacity)
Ian Rogers30fab402012-01-23 15:43:46 -0800127 << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700128 }
Ian Rogers30fab402012-01-23 15:43:46 -0800129
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700130 // Memory we promise to dlmalloc before it asks for morecore.
131 // Note: making this value large means that large allocations are unlikely to succeed as dlmalloc
132 // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
133 // size of the large allocation) will be greater than the footprint limit.
134 size_t starting_size = kPageSize;
135 MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity,
136 requested_begin);
137 if (mem_map == NULL) {
138 LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
139 << PrettySize(capacity);
Ian Rogers30fab402012-01-23 15:43:46 -0800140 return NULL;
141 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700142 void* mspace = CreateMspace(mem_map->Begin(), starting_size, initial_size);
Ian Rogers30fab402012-01-23 15:43:46 -0800143 if (mspace == NULL) {
144 LOG(ERROR) << "Failed to initialize mspace for alloc space (" << name << ")";
145 return NULL;
146 }
147
Ian Rogers3bb17a62012-01-27 23:56:44 -0800148 // Protect memory beyond the initial size.
149 byte* end = mem_map->Begin() + starting_size;
Ian Rogers30fab402012-01-23 15:43:46 -0800150 if (capacity - initial_size > 0) {
151 CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), name);
152 }
153
154 // Everything is set so record in immutable structure and leave
Ian Rogers1d54e732013-05-02 21:10:01 -0700155 DlMallocSpace* space;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700156 byte* begin = mem_map->Begin();
Ian Rogers1d54e732013-05-02 21:10:01 -0700157 if (RUNNING_ON_VALGRIND > 0) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700158 space = new ValgrindDlMallocSpace(name, mem_map, mspace, begin, end, begin + capacity,
Mathieu Chartier6875fd32013-08-02 11:17:31 -0700159 growth_limit, initial_size);
Ian Rogers1d54e732013-05-02 21:10:01 -0700160 } else {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700161 space = new DlMallocSpace(name, mem_map, mspace, begin, end, begin + capacity, growth_limit);
Ian Rogers1d54e732013-05-02 21:10:01 -0700162 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700163 // We start out with only the initial size possibly containing objects.
Ian Rogers30fab402012-01-23 15:43:46 -0800164 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700165 LOG(INFO) << "DlMallocSpace::Create exiting (" << PrettyDuration(NanoTime() - start_time)
Ian Rogers3bb17a62012-01-27 23:56:44 -0800166 << " ) " << *space;
Ian Rogers30fab402012-01-23 15:43:46 -0800167 }
168 return space;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700169}
170
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700171void* DlMallocSpace::CreateMspace(void* begin, size_t morecore_start, size_t initial_size) {
Ian Rogers30fab402012-01-23 15:43:46 -0800172 // clear errno to allow PLOG on error
Carl Shapiro69759ea2011-07-21 18:13:35 -0700173 errno = 0;
Ian Rogers3bb17a62012-01-27 23:56:44 -0800174 // create mspace using our backing storage starting at begin and with a footprint of
175 // morecore_start. Don't use an internal dlmalloc lock (as we already hold heap lock). When
176 // morecore_start bytes of memory is exhaused morecore will be called.
177 void* msp = create_mspace_with_base(begin, morecore_start, false /*locked*/);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700178 if (msp != NULL) {
Ian Rogers30fab402012-01-23 15:43:46 -0800179 // Do not allow morecore requests to succeed beyond the initial size of the heap
Ian Rogers3bb17a62012-01-27 23:56:44 -0800180 mspace_set_footprint_limit(msp, initial_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700181 } else {
Ian Rogers30fab402012-01-23 15:43:46 -0800182 PLOG(ERROR) << "create_mspace_with_base failed";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700183 }
184 return msp;
185}
186
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700187mirror::Object* DlMallocSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
188 return AllocNonvirtual(self, num_bytes, bytes_allocated);
189}
190
191mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
192 mirror::Object* result;
193 {
194 MutexLock mu(self, lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700195 // Grow as much as possible within the space.
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700196 size_t max_allowed = Capacity();
197 mspace_set_footprint_limit(mspace_, max_allowed);
198 // Try the allocation.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700199 result = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated);
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700200 // Shrink back down as small as possible.
201 size_t footprint = mspace_footprint(mspace_);
202 mspace_set_footprint_limit(mspace_, footprint);
jeffhaoc1160702011-10-27 15:48:45 -0700203 }
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700204 if (result != NULL) {
205 // Zero freshly allocated memory, done while not holding the space's lock.
206 memset(result, 0, num_bytes);
207 }
Ian Rogers30fab402012-01-23 15:43:46 -0800208 // Return the new allocation or NULL.
Mathieu Chartier8e9a1492012-10-04 12:25:40 -0700209 CHECK(!kDebugSpaces || result == NULL || Contains(result));
Ian Rogers30fab402012-01-23 15:43:46 -0800210 return result;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700211}
212
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700213MallocSpace* DlMallocSpace::CreateInstance(const std::string& name, MemMap* mem_map, void* allocator, byte* begin, byte* end,
214 byte* limit, size_t growth_limit) {
215 return new DlMallocSpace(name, mem_map, allocator, begin, end, limit, growth_limit);
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700216}
217
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800218size_t DlMallocSpace::Free(Thread* self, mirror::Object* ptr) {
Ian Rogers50b35e22012-10-04 10:09:15 -0700219 MutexLock mu(self, lock_);
Mathieu Chartier8e9a1492012-10-04 12:25:40 -0700220 if (kDebugSpaces) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700221 CHECK(ptr != NULL);
222 CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
223 }
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800224 const size_t bytes_freed = InternalAllocationSize(ptr);
Hiroshi Yamauchibe031ff2013-10-08 16:42:37 -0700225 total_bytes_freed_ += bytes_freed;
226 ++total_objects_freed_;
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700227 if (kRecentFreeCount > 0) {
228 RegisterRecentFree(ptr);
229 }
Ian Rogers30fab402012-01-23 15:43:46 -0800230 mspace_free(mspace_, ptr);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700231 return bytes_freed;
Ian Rogers30fab402012-01-23 15:43:46 -0800232}
233
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800234size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800235 DCHECK(ptrs != NULL);
236
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700237 // Don't need the lock to calculate the size of the freed pointers.
238 size_t bytes_freed = 0;
239 for (size_t i = 0; i < num_ptrs; i++) {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800240 mirror::Object* ptr = ptrs[i];
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800241 const size_t look_ahead = 8;
242 if (kPrefetchDuringDlMallocFreeList && i + look_ahead < num_ptrs) {
243 // The head of chunk for the allocation is sizeof(size_t) behind the allocation.
244 __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + look_ahead]) - sizeof(size_t));
245 }
246 bytes_freed += InternalAllocationSize(ptr);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700247 }
248
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700249 if (kRecentFreeCount > 0) {
250 MutexLock mu(self, lock_);
251 for (size_t i = 0; i < num_ptrs; i++) {
252 RegisterRecentFree(ptrs[i]);
253 }
254 }
255
Mathieu Chartier8e9a1492012-10-04 12:25:40 -0700256 if (kDebugSpaces) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700257 size_t num_broken_ptrs = 0;
258 for (size_t i = 0; i < num_ptrs; i++) {
259 if (!Contains(ptrs[i])) {
260 num_broken_ptrs++;
261 LOG(ERROR) << "FreeList[" << i << "] (" << ptrs[i] << ") not in bounds of heap " << *this;
262 } else {
263 size_t size = mspace_usable_size(ptrs[i]);
264 memset(ptrs[i], 0xEF, size);
265 }
Ian Rogers30fab402012-01-23 15:43:46 -0800266 }
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700267 CHECK_EQ(num_broken_ptrs, 0u);
Ian Rogers30fab402012-01-23 15:43:46 -0800268 }
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800269
270 {
271 MutexLock mu(self, lock_);
Hiroshi Yamauchibe031ff2013-10-08 16:42:37 -0700272 total_bytes_freed_ += bytes_freed;
273 total_objects_freed_ += num_ptrs;
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800274 mspace_bulk_free(mspace_, reinterpret_cast<void**>(ptrs), num_ptrs);
275 return bytes_freed;
276 }
Ian Rogers30fab402012-01-23 15:43:46 -0800277}
278
279// Callback from dlmalloc when it needs to increase the footprint
280extern "C" void* art_heap_morecore(void* mspace, intptr_t increment) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800281 Heap* heap = Runtime::Current()->GetHeap();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700282 DCHECK(heap->GetNonMovingSpace()->IsDlMallocSpace());
283 DCHECK_EQ(heap->GetNonMovingSpace()->AsDlMallocSpace()->GetMspace(), mspace);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700284 return heap->GetNonMovingSpace()->MoreCore(increment);
Ian Rogers30fab402012-01-23 15:43:46 -0800285}
286
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800287// Virtual functions can't get inlined.
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800288inline size_t DlMallocSpace::InternalAllocationSize(const mirror::Object* obj) {
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700289 return AllocationSizeNonvirtual(obj);
Ian Rogers30fab402012-01-23 15:43:46 -0800290}
291
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800292size_t DlMallocSpace::AllocationSize(const mirror::Object* obj) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800293 return InternalAllocationSize(obj);
294}
295
Ian Rogers48931882013-01-22 14:35:16 -0800296size_t DlMallocSpace::Trim() {
Ian Rogers50b35e22012-10-04 10:09:15 -0700297 MutexLock mu(Thread::Current(), lock_);
Elliott Hughes9eebd3b2012-06-08 13:56:31 -0700298 // Trim to release memory at the end of the space.
299 mspace_trim(mspace_, 0);
300 // Visit space looking for page-sized holes to advise the kernel we don't need.
Ian Rogers48931882013-01-22 14:35:16 -0800301 size_t reclaimed = 0;
Ian Rogers1d54e732013-05-02 21:10:01 -0700302 mspace_inspect_all(mspace_, DlmallocMadviseCallback, &reclaimed);
Ian Rogers48931882013-01-22 14:35:16 -0800303 return reclaimed;
Elliott Hughes9eebd3b2012-06-08 13:56:31 -0700304}
Ian Rogers30fab402012-01-23 15:43:46 -0800305
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700306void DlMallocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
Ian Rogers30fab402012-01-23 15:43:46 -0800307 void* arg) {
Ian Rogers50b35e22012-10-04 10:09:15 -0700308 MutexLock mu(Thread::Current(), lock_);
Ian Rogers30fab402012-01-23 15:43:46 -0800309 mspace_inspect_all(mspace_, callback, arg);
Ian Rogers15bf2d32012-08-28 17:33:04 -0700310 callback(NULL, NULL, 0, arg); // Indicate end of a space.
Ian Rogers30fab402012-01-23 15:43:46 -0800311}
312
Hiroshi Yamauchi09b07a92013-07-15 13:17:06 -0700313size_t DlMallocSpace::GetFootprint() {
314 MutexLock mu(Thread::Current(), lock_);
315 return mspace_footprint(mspace_);
316}
317
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700318size_t DlMallocSpace::GetFootprintLimit() {
Ian Rogers50b35e22012-10-04 10:09:15 -0700319 MutexLock mu(Thread::Current(), lock_);
Ian Rogers30fab402012-01-23 15:43:46 -0800320 return mspace_footprint_limit(mspace_);
321}
322
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700323void DlMallocSpace::SetFootprintLimit(size_t new_size) {
Ian Rogers50b35e22012-10-04 10:09:15 -0700324 MutexLock mu(Thread::Current(), lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700325 VLOG(heap) << "DlMallocSpace::SetFootprintLimit " << PrettySize(new_size);
Ian Rogers30fab402012-01-23 15:43:46 -0800326 // Compare against the actual footprint, rather than the Size(), because the heap may not have
327 // grown all the way to the allowed size yet.
Ian Rogers30fab402012-01-23 15:43:46 -0800328 size_t current_space_size = mspace_footprint(mspace_);
329 if (new_size < current_space_size) {
330 // Don't let the space grow any more.
331 new_size = current_space_size;
332 }
333 mspace_set_footprint_limit(mspace_, new_size);
334}
335
Hiroshi Yamauchibe031ff2013-10-08 16:42:37 -0700336uint64_t DlMallocSpace::GetBytesAllocated() {
Hiroshi Yamauchi4ce1f002013-11-18 14:49:09 -0800337 MutexLock mu(Thread::Current(), lock_);
338 size_t bytes_allocated = 0;
339 mspace_inspect_all(mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
340 return bytes_allocated;
Hiroshi Yamauchibe031ff2013-10-08 16:42:37 -0700341}
342
343uint64_t DlMallocSpace::GetObjectsAllocated() {
Hiroshi Yamauchi4ce1f002013-11-18 14:49:09 -0800344 MutexLock mu(Thread::Current(), lock_);
345 size_t objects_allocated = 0;
346 mspace_inspect_all(mspace_, DlmallocObjectsAllocatedCallback, &objects_allocated);
347 return objects_allocated;
Hiroshi Yamauchibe031ff2013-10-08 16:42:37 -0700348}
349
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700350#ifndef NDEBUG
351void DlMallocSpace::CheckMoreCoreForPrecondition() {
352 lock_.AssertHeld(Thread::Current());
353}
354#endif
355
Ian Rogers1d54e732013-05-02 21:10:01 -0700356} // namespace space
357} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -0700358} // namespace art