blob: 72692d6ca382f284510f5766dfcc262c4ff5cb50 [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "rosalloc_space.h"
18
19#include "rosalloc_space-inl.h"
20#include "gc/accounting/card_table.h"
21#include "gc/heap.h"
22#include "mirror/class-inl.h"
23#include "mirror/object-inl.h"
24#include "runtime.h"
25#include "thread.h"
26#include "thread_list.h"
27#include "utils.h"
28
29#include <valgrind.h>
30#include <memcheck/memcheck.h>
31
32namespace art {
33namespace gc {
34namespace space {
35
36static const bool kPrefetchDuringRosAllocFreeList = true;
37
38RosAllocSpace::RosAllocSpace(const std::string& name, MemMap* mem_map,
39 art::gc::allocator::RosAlloc* rosalloc, byte* begin, byte* end,
40 byte* limit, size_t growth_limit)
41 : MallocSpace(name, mem_map, begin, end, limit, growth_limit), rosalloc_(rosalloc) {
42 CHECK(rosalloc != NULL);
43}
44
45RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_size, size_t growth_limit,
46 size_t capacity, byte* requested_begin) {
47 uint64_t start_time = 0;
48 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
49 start_time = NanoTime();
50 VLOG(startup) << "RosAllocSpace::Create entering " << name
51 << " initial_size=" << PrettySize(initial_size)
52 << " growth_limit=" << PrettySize(growth_limit)
53 << " capacity=" << PrettySize(capacity)
54 << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
55 }
56
57 // Memory we promise to rosalloc before it asks for morecore.
58 // Note: making this value large means that large allocations are unlikely to succeed as rosalloc
59 // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
60 // size of the large allocation) will be greater than the footprint limit.
61 size_t starting_size = kPageSize;
62 MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity,
63 requested_begin);
64 if (mem_map == NULL) {
65 LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
66 << PrettySize(capacity);
67 return NULL;
68 }
69 allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map->Begin(), starting_size, initial_size);
70 if (rosalloc == NULL) {
71 LOG(ERROR) << "Failed to initialize rosalloc for alloc space (" << name << ")";
72 return NULL;
73 }
74
75 // Protect memory beyond the initial size.
76 byte* end = mem_map->Begin() + starting_size;
77 if (capacity - initial_size > 0) {
78 CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), name);
79 }
80
81 // Everything is set so record in immutable structure and leave
82 RosAllocSpace* space;
83 byte* begin = mem_map->Begin();
84 if (RUNNING_ON_VALGRIND > 0) {
85 // TODO: support valgrind.
86 LOG(FATAL) << "Unimplemented";
87 space = NULL;
88 } else {
89 space = new RosAllocSpace(name, mem_map, rosalloc, begin, end, begin + capacity, growth_limit);
90 }
91 // We start out with only the initial size possibly containing objects.
92 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
93 LOG(INFO) << "RosAllocSpace::Create exiting (" << PrettyDuration(NanoTime() - start_time)
94 << " ) " << *space;
95 }
96 return space;
97}
98
99allocator::RosAlloc* RosAllocSpace::CreateRosAlloc(void* begin, size_t morecore_start, size_t initial_size) {
100 // clear errno to allow PLOG on error
101 errno = 0;
102 // create rosalloc using our backing storage starting at begin and
103 // with a footprint of morecore_start. When morecore_start bytes of
104 // memory is exhaused morecore will be called.
105 allocator::RosAlloc* rosalloc = new art::gc::allocator::RosAlloc(begin, morecore_start);
106 if (rosalloc != NULL) {
107 rosalloc->SetFootprintLimit(initial_size);
108 } else {
109 PLOG(ERROR) << "RosAlloc::Create failed";
110 }
111 return rosalloc;
112}
113
114mirror::Object* RosAllocSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
115 return AllocNonvirtual(self, num_bytes, bytes_allocated);
116}
117
118mirror::Object* RosAllocSpace::AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
119 mirror::Object* result;
120 {
121 MutexLock mu(self, lock_);
122 // Grow as much as possible within the space.
123 size_t max_allowed = Capacity();
124 rosalloc_->SetFootprintLimit(max_allowed);
125 // Try the allocation.
126 result = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated);
127 // Shrink back down as small as possible.
128 size_t footprint = rosalloc_->Footprint();
129 rosalloc_->SetFootprintLimit(footprint);
130 }
131 // Note RosAlloc zeroes memory internally.
132 // Return the new allocation or NULL.
133 CHECK(!kDebugSpaces || result == NULL || Contains(result));
134 return result;
135}
136
137MallocSpace* RosAllocSpace::CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
138 byte* begin, byte* end, byte* limit, size_t growth_limit) {
139 return new RosAllocSpace(name, mem_map, reinterpret_cast<allocator::RosAlloc*>(allocator),
140 begin, end, limit, growth_limit);
141}
142
143size_t RosAllocSpace::Free(Thread* self, mirror::Object* ptr) {
144 if (kDebugSpaces) {
145 CHECK(ptr != NULL);
146 CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
147 }
148 const size_t bytes_freed = InternalAllocationSize(ptr);
149 total_bytes_freed_atomic_.fetch_add(bytes_freed);
150 ++total_objects_freed_atomic_;
151 if (kRecentFreeCount > 0) {
152 MutexLock mu(self, lock_);
153 RegisterRecentFree(ptr);
154 }
155 rosalloc_->Free(self, ptr);
156 return bytes_freed;
157}
158
159size_t RosAllocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
160 DCHECK(ptrs != NULL);
161
162 // Don't need the lock to calculate the size of the freed pointers.
163 size_t bytes_freed = 0;
164 for (size_t i = 0; i < num_ptrs; i++) {
165 mirror::Object* ptr = ptrs[i];
166 const size_t look_ahead = 8;
167 if (kPrefetchDuringRosAllocFreeList && i + look_ahead < num_ptrs) {
168 __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + look_ahead]));
169 }
170 bytes_freed += InternalAllocationSize(ptr);
171 }
172
173 if (kRecentFreeCount > 0) {
174 MutexLock mu(self, lock_);
175 for (size_t i = 0; i < num_ptrs; i++) {
176 RegisterRecentFree(ptrs[i]);
177 }
178 }
179
180 if (kDebugSpaces) {
181 size_t num_broken_ptrs = 0;
182 for (size_t i = 0; i < num_ptrs; i++) {
183 if (!Contains(ptrs[i])) {
184 num_broken_ptrs++;
185 LOG(ERROR) << "FreeList[" << i << "] (" << ptrs[i] << ") not in bounds of heap " << *this;
186 } else {
187 size_t size = rosalloc_->UsableSize(ptrs[i]);
188 memset(ptrs[i], 0xEF, size);
189 }
190 }
191 CHECK_EQ(num_broken_ptrs, 0u);
192 }
193
194 rosalloc_->BulkFree(self, reinterpret_cast<void**>(ptrs), num_ptrs);
195 total_bytes_freed_atomic_.fetch_add(bytes_freed);
196 total_objects_freed_atomic_.fetch_add(num_ptrs);
197 return bytes_freed;
198}
199
200// Callback from rosalloc when it needs to increase the footprint
201extern "C" void* art_heap_rosalloc_morecore(allocator::RosAlloc* rosalloc, intptr_t increment) {
202 Heap* heap = Runtime::Current()->GetHeap();
203 DCHECK(heap->GetNonMovingSpace()->IsRosAllocSpace());
204 DCHECK_EQ(heap->GetNonMovingSpace()->AsRosAllocSpace()->GetRosAlloc(), rosalloc);
205 return heap->GetNonMovingSpace()->MoreCore(increment);
206}
207
208// Virtual functions can't get inlined.
209inline size_t RosAllocSpace::InternalAllocationSize(const mirror::Object* obj) {
210 return AllocationSizeNonvirtual(obj);
211}
212
213size_t RosAllocSpace::AllocationSize(const mirror::Object* obj) {
214 return InternalAllocationSize(obj);
215}
216
217size_t RosAllocSpace::Trim() {
218 MutexLock mu(Thread::Current(), lock_);
219 // Trim to release memory at the end of the space.
220 rosalloc_->Trim();
221 // No inspect_all necessary here as trimming of pages is built-in.
222 return 0;
223}
224
225void RosAllocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
226 void* arg) {
227 InspectAllRosAlloc(callback, arg);
228 callback(NULL, NULL, 0, arg); // Indicate end of a space.
229}
230
231size_t RosAllocSpace::GetFootprint() {
232 MutexLock mu(Thread::Current(), lock_);
233 return rosalloc_->Footprint();
234}
235
236size_t RosAllocSpace::GetFootprintLimit() {
237 MutexLock mu(Thread::Current(), lock_);
238 return rosalloc_->FootprintLimit();
239}
240
241void RosAllocSpace::SetFootprintLimit(size_t new_size) {
242 MutexLock mu(Thread::Current(), lock_);
243 VLOG(heap) << "RosAllocSpace::SetFootprintLimit " << PrettySize(new_size);
244 // Compare against the actual footprint, rather than the Size(), because the heap may not have
245 // grown all the way to the allowed size yet.
246 size_t current_space_size = rosalloc_->Footprint();
247 if (new_size < current_space_size) {
248 // Don't let the space grow any more.
249 new_size = current_space_size;
250 }
251 rosalloc_->SetFootprintLimit(new_size);
252}
253
254uint64_t RosAllocSpace::GetBytesAllocated() {
255 if (rosalloc_ != NULL) {
256 size_t bytes_allocated = 0;
257 InspectAllRosAlloc(art::gc::allocator::RosAlloc::BytesAllocatedCallback, &bytes_allocated);
258 return bytes_allocated;
259 } else {
260 return Size();
261 }
262}
263
264uint64_t RosAllocSpace::GetObjectsAllocated() {
265 if (rosalloc_ != NULL) {
266 size_t objects_allocated = 0;
267 InspectAllRosAlloc(art::gc::allocator::RosAlloc::ObjectsAllocatedCallback, &objects_allocated);
268 return objects_allocated;
269 } else {
270 return 0;
271 }
272}
273
274void RosAllocSpace::InspectAllRosAlloc(void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
275 void* arg) NO_THREAD_SAFETY_ANALYSIS {
276 // TODO: NO_THREAD_SAFETY_ANALYSIS.
277 Thread* self = Thread::Current();
278 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
279 // The mutators are already suspended. For example, a call path
280 // from SignalCatcher::HandleSigQuit().
281 rosalloc_->InspectAll(callback, arg);
282 } else {
283 // The mutators are not suspended yet.
284 DCHECK(!Locks::mutator_lock_->IsSharedHeld(self));
285 ThreadList* tl = Runtime::Current()->GetThreadList();
286 tl->SuspendAll();
287 {
288 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
289 MutexLock mu2(self, *Locks::thread_list_lock_);
290 rosalloc_->InspectAll(callback, arg);
291 }
292 tl->ResumeAll();
293 }
294}
295
296void RosAllocSpace::RevokeThreadLocalBuffers(Thread* thread) {
297 rosalloc_->RevokeThreadLocalRuns(thread);
298}
299
300void RosAllocSpace::RevokeAllThreadLocalBuffers() {
301 rosalloc_->RevokeAllThreadLocalRuns();
302}
303
304} // namespace space
305} // namespace gc
306} // namespace art