blob: 0438f8d41e4d4b7f568eebb573b2bb1b74f40c5f [file] [log] [blame]
Hiroshi Yamauchi7cb7bbc2013-11-18 17:27:37 -08001
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07002/*
3 * Copyright (C) 2013 The Android Open Source Project
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#include "rosalloc_space.h"
19
20#include "rosalloc_space-inl.h"
21#include "gc/accounting/card_table.h"
22#include "gc/heap.h"
23#include "mirror/class-inl.h"
24#include "mirror/object-inl.h"
25#include "runtime.h"
26#include "thread.h"
27#include "thread_list.h"
28#include "utils.h"
29
30#include <valgrind.h>
31#include <memcheck/memcheck.h>
32
33namespace art {
34namespace gc {
35namespace space {
36
37static const bool kPrefetchDuringRosAllocFreeList = true;
38
39RosAllocSpace::RosAllocSpace(const std::string& name, MemMap* mem_map,
40 art::gc::allocator::RosAlloc* rosalloc, byte* begin, byte* end,
41 byte* limit, size_t growth_limit)
Hiroshi Yamauchi4ce1f002013-11-18 14:49:09 -080042 : MallocSpace(name, mem_map, begin, end, limit, growth_limit), rosalloc_(rosalloc),
43 rosalloc_for_alloc_(rosalloc) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070044 CHECK(rosalloc != NULL);
45}
46
47RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_size, size_t growth_limit,
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080048 size_t capacity, byte* requested_begin, bool low_memory_mode) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070049 uint64_t start_time = 0;
50 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
51 start_time = NanoTime();
52 VLOG(startup) << "RosAllocSpace::Create entering " << name
53 << " initial_size=" << PrettySize(initial_size)
54 << " growth_limit=" << PrettySize(growth_limit)
55 << " capacity=" << PrettySize(capacity)
56 << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
57 }
58
59 // Memory we promise to rosalloc before it asks for morecore.
60 // Note: making this value large means that large allocations are unlikely to succeed as rosalloc
61 // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
62 // size of the large allocation) will be greater than the footprint limit.
63 size_t starting_size = kPageSize;
64 MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity,
65 requested_begin);
66 if (mem_map == NULL) {
67 LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
68 << PrettySize(capacity);
69 return NULL;
70 }
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080071 allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map->Begin(), starting_size, initial_size,
72 low_memory_mode);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070073 if (rosalloc == NULL) {
74 LOG(ERROR) << "Failed to initialize rosalloc for alloc space (" << name << ")";
75 return NULL;
76 }
77
78 // Protect memory beyond the initial size.
79 byte* end = mem_map->Begin() + starting_size;
80 if (capacity - initial_size > 0) {
81 CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), name);
82 }
83
84 // Everything is set so record in immutable structure and leave
85 RosAllocSpace* space;
86 byte* begin = mem_map->Begin();
87 if (RUNNING_ON_VALGRIND > 0) {
Hiroshi Yamauchi7cb7bbc2013-11-18 17:27:37 -080088 space = new ValgrindMallocSpace<RosAllocSpace, art::gc::allocator::RosAlloc*>(
89 name, mem_map, rosalloc, begin, end, begin + capacity, growth_limit, initial_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070090 } else {
91 space = new RosAllocSpace(name, mem_map, rosalloc, begin, end, begin + capacity, growth_limit);
92 }
93 // We start out with only the initial size possibly containing objects.
94 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
95 LOG(INFO) << "RosAllocSpace::Create exiting (" << PrettyDuration(NanoTime() - start_time)
96 << " ) " << *space;
97 }
98 return space;
99}
100
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800101allocator::RosAlloc* RosAllocSpace::CreateRosAlloc(void* begin, size_t morecore_start, size_t initial_size,
102 bool low_memory_mode) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700103 // clear errno to allow PLOG on error
104 errno = 0;
105 // create rosalloc using our backing storage starting at begin and
106 // with a footprint of morecore_start. When morecore_start bytes of
107 // memory is exhaused morecore will be called.
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800108 allocator::RosAlloc* rosalloc = new art::gc::allocator::RosAlloc(
109 begin, morecore_start,
110 low_memory_mode ?
111 art::gc::allocator::RosAlloc::kPageReleaseModeAll :
112 art::gc::allocator::RosAlloc::kPageReleaseModeSizeAndEnd);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700113 if (rosalloc != NULL) {
114 rosalloc->SetFootprintLimit(initial_size);
115 } else {
116 PLOG(ERROR) << "RosAlloc::Create failed";
117 }
118 return rosalloc;
119}
120
121mirror::Object* RosAllocSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
122 return AllocNonvirtual(self, num_bytes, bytes_allocated);
123}
124
125mirror::Object* RosAllocSpace::AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
126 mirror::Object* result;
127 {
128 MutexLock mu(self, lock_);
129 // Grow as much as possible within the space.
130 size_t max_allowed = Capacity();
131 rosalloc_->SetFootprintLimit(max_allowed);
132 // Try the allocation.
133 result = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated);
134 // Shrink back down as small as possible.
135 size_t footprint = rosalloc_->Footprint();
136 rosalloc_->SetFootprintLimit(footprint);
137 }
138 // Note RosAlloc zeroes memory internally.
139 // Return the new allocation or NULL.
140 CHECK(!kDebugSpaces || result == NULL || Contains(result));
141 return result;
142}
143
144MallocSpace* RosAllocSpace::CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
145 byte* begin, byte* end, byte* limit, size_t growth_limit) {
146 return new RosAllocSpace(name, mem_map, reinterpret_cast<allocator::RosAlloc*>(allocator),
147 begin, end, limit, growth_limit);
148}
149
150size_t RosAllocSpace::Free(Thread* self, mirror::Object* ptr) {
151 if (kDebugSpaces) {
152 CHECK(ptr != NULL);
153 CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
154 }
155 const size_t bytes_freed = InternalAllocationSize(ptr);
156 total_bytes_freed_atomic_.fetch_add(bytes_freed);
157 ++total_objects_freed_atomic_;
158 if (kRecentFreeCount > 0) {
159 MutexLock mu(self, lock_);
160 RegisterRecentFree(ptr);
161 }
162 rosalloc_->Free(self, ptr);
163 return bytes_freed;
164}
165
166size_t RosAllocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
167 DCHECK(ptrs != NULL);
168
169 // Don't need the lock to calculate the size of the freed pointers.
170 size_t bytes_freed = 0;
171 for (size_t i = 0; i < num_ptrs; i++) {
172 mirror::Object* ptr = ptrs[i];
173 const size_t look_ahead = 8;
174 if (kPrefetchDuringRosAllocFreeList && i + look_ahead < num_ptrs) {
175 __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + look_ahead]));
176 }
177 bytes_freed += InternalAllocationSize(ptr);
178 }
179
180 if (kRecentFreeCount > 0) {
181 MutexLock mu(self, lock_);
182 for (size_t i = 0; i < num_ptrs; i++) {
183 RegisterRecentFree(ptrs[i]);
184 }
185 }
186
187 if (kDebugSpaces) {
188 size_t num_broken_ptrs = 0;
189 for (size_t i = 0; i < num_ptrs; i++) {
190 if (!Contains(ptrs[i])) {
191 num_broken_ptrs++;
192 LOG(ERROR) << "FreeList[" << i << "] (" << ptrs[i] << ") not in bounds of heap " << *this;
193 } else {
194 size_t size = rosalloc_->UsableSize(ptrs[i]);
195 memset(ptrs[i], 0xEF, size);
196 }
197 }
198 CHECK_EQ(num_broken_ptrs, 0u);
199 }
200
201 rosalloc_->BulkFree(self, reinterpret_cast<void**>(ptrs), num_ptrs);
202 total_bytes_freed_atomic_.fetch_add(bytes_freed);
203 total_objects_freed_atomic_.fetch_add(num_ptrs);
204 return bytes_freed;
205}
206
207// Callback from rosalloc when it needs to increase the footprint
208extern "C" void* art_heap_rosalloc_morecore(allocator::RosAlloc* rosalloc, intptr_t increment) {
209 Heap* heap = Runtime::Current()->GetHeap();
210 DCHECK(heap->GetNonMovingSpace()->IsRosAllocSpace());
211 DCHECK_EQ(heap->GetNonMovingSpace()->AsRosAllocSpace()->GetRosAlloc(), rosalloc);
212 return heap->GetNonMovingSpace()->MoreCore(increment);
213}
214
215// Virtual functions can't get inlined.
216inline size_t RosAllocSpace::InternalAllocationSize(const mirror::Object* obj) {
217 return AllocationSizeNonvirtual(obj);
218}
219
220size_t RosAllocSpace::AllocationSize(const mirror::Object* obj) {
221 return InternalAllocationSize(obj);
222}
223
224size_t RosAllocSpace::Trim() {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800225 {
226 MutexLock mu(Thread::Current(), lock_);
227 // Trim to release memory at the end of the space.
228 rosalloc_->Trim();
229 }
230 // Attempt to release pages if it does not release all empty pages.
231 if (!rosalloc_->DoesReleaseAllPages()) {
232 VLOG(heap) << "RosAllocSpace::Trim() ";
233 size_t reclaimed = 0;
234 InspectAllRosAlloc(DlmallocMadviseCallback, &reclaimed);
235 return reclaimed;
236 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700237 return 0;
238}
239
240void RosAllocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
241 void* arg) {
242 InspectAllRosAlloc(callback, arg);
243 callback(NULL, NULL, 0, arg); // Indicate end of a space.
244}
245
246size_t RosAllocSpace::GetFootprint() {
247 MutexLock mu(Thread::Current(), lock_);
248 return rosalloc_->Footprint();
249}
250
251size_t RosAllocSpace::GetFootprintLimit() {
252 MutexLock mu(Thread::Current(), lock_);
253 return rosalloc_->FootprintLimit();
254}
255
256void RosAllocSpace::SetFootprintLimit(size_t new_size) {
257 MutexLock mu(Thread::Current(), lock_);
258 VLOG(heap) << "RosAllocSpace::SetFootprintLimit " << PrettySize(new_size);
259 // Compare against the actual footprint, rather than the Size(), because the heap may not have
260 // grown all the way to the allowed size yet.
261 size_t current_space_size = rosalloc_->Footprint();
262 if (new_size < current_space_size) {
263 // Don't let the space grow any more.
264 new_size = current_space_size;
265 }
266 rosalloc_->SetFootprintLimit(new_size);
267}
268
269uint64_t RosAllocSpace::GetBytesAllocated() {
Hiroshi Yamauchi4ce1f002013-11-18 14:49:09 -0800270 size_t bytes_allocated = 0;
271 InspectAllRosAlloc(art::gc::allocator::RosAlloc::BytesAllocatedCallback, &bytes_allocated);
272 return bytes_allocated;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700273}
274
275uint64_t RosAllocSpace::GetObjectsAllocated() {
Hiroshi Yamauchi4ce1f002013-11-18 14:49:09 -0800276 size_t objects_allocated = 0;
277 InspectAllRosAlloc(art::gc::allocator::RosAlloc::ObjectsAllocatedCallback, &objects_allocated);
278 return objects_allocated;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700279}
280
281void RosAllocSpace::InspectAllRosAlloc(void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
282 void* arg) NO_THREAD_SAFETY_ANALYSIS {
283 // TODO: NO_THREAD_SAFETY_ANALYSIS.
284 Thread* self = Thread::Current();
285 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
286 // The mutators are already suspended. For example, a call path
287 // from SignalCatcher::HandleSigQuit().
288 rosalloc_->InspectAll(callback, arg);
289 } else {
290 // The mutators are not suspended yet.
291 DCHECK(!Locks::mutator_lock_->IsSharedHeld(self));
292 ThreadList* tl = Runtime::Current()->GetThreadList();
293 tl->SuspendAll();
294 {
295 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
296 MutexLock mu2(self, *Locks::thread_list_lock_);
297 rosalloc_->InspectAll(callback, arg);
298 }
299 tl->ResumeAll();
300 }
301}
302
303void RosAllocSpace::RevokeThreadLocalBuffers(Thread* thread) {
304 rosalloc_->RevokeThreadLocalRuns(thread);
305}
306
307void RosAllocSpace::RevokeAllThreadLocalBuffers() {
308 rosalloc_->RevokeAllThreadLocalRuns();
309}
310
311} // namespace space
312} // namespace gc
313} // namespace art