blob: a3d2dfa20bc411b426795fa3ba50a183fa4c1c7b [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "rosalloc_space.h"
18
19#include "rosalloc_space-inl.h"
20#include "gc/accounting/card_table.h"
21#include "gc/heap.h"
22#include "mirror/class-inl.h"
23#include "mirror/object-inl.h"
24#include "runtime.h"
25#include "thread.h"
26#include "thread_list.h"
27#include "utils.h"
28
29#include <valgrind.h>
30#include <memcheck/memcheck.h>
31
32namespace art {
33namespace gc {
34namespace space {
35
36static const bool kPrefetchDuringRosAllocFreeList = true;
37
38RosAllocSpace::RosAllocSpace(const std::string& name, MemMap* mem_map,
39 art::gc::allocator::RosAlloc* rosalloc, byte* begin, byte* end,
40 byte* limit, size_t growth_limit)
Hiroshi Yamauchi4ce1f002013-11-18 14:49:09 -080041 : MallocSpace(name, mem_map, begin, end, limit, growth_limit), rosalloc_(rosalloc),
42 rosalloc_for_alloc_(rosalloc) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070043 CHECK(rosalloc != NULL);
44}
45
46RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_size, size_t growth_limit,
47 size_t capacity, byte* requested_begin) {
48 uint64_t start_time = 0;
49 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
50 start_time = NanoTime();
51 VLOG(startup) << "RosAllocSpace::Create entering " << name
52 << " initial_size=" << PrettySize(initial_size)
53 << " growth_limit=" << PrettySize(growth_limit)
54 << " capacity=" << PrettySize(capacity)
55 << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
56 }
57
58 // Memory we promise to rosalloc before it asks for morecore.
59 // Note: making this value large means that large allocations are unlikely to succeed as rosalloc
60 // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
61 // size of the large allocation) will be greater than the footprint limit.
62 size_t starting_size = kPageSize;
63 MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity,
64 requested_begin);
65 if (mem_map == NULL) {
66 LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
67 << PrettySize(capacity);
68 return NULL;
69 }
70 allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map->Begin(), starting_size, initial_size);
71 if (rosalloc == NULL) {
72 LOG(ERROR) << "Failed to initialize rosalloc for alloc space (" << name << ")";
73 return NULL;
74 }
75
76 // Protect memory beyond the initial size.
77 byte* end = mem_map->Begin() + starting_size;
78 if (capacity - initial_size > 0) {
79 CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), name);
80 }
81
82 // Everything is set so record in immutable structure and leave
83 RosAllocSpace* space;
84 byte* begin = mem_map->Begin();
85 if (RUNNING_ON_VALGRIND > 0) {
86 // TODO: support valgrind.
87 LOG(FATAL) << "Unimplemented";
88 space = NULL;
89 } else {
90 space = new RosAllocSpace(name, mem_map, rosalloc, begin, end, begin + capacity, growth_limit);
91 }
92 // We start out with only the initial size possibly containing objects.
93 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
94 LOG(INFO) << "RosAllocSpace::Create exiting (" << PrettyDuration(NanoTime() - start_time)
95 << " ) " << *space;
96 }
97 return space;
98}
99
100allocator::RosAlloc* RosAllocSpace::CreateRosAlloc(void* begin, size_t morecore_start, size_t initial_size) {
101 // clear errno to allow PLOG on error
102 errno = 0;
103 // create rosalloc using our backing storage starting at begin and
104 // with a footprint of morecore_start. When morecore_start bytes of
105 // memory is exhaused morecore will be called.
106 allocator::RosAlloc* rosalloc = new art::gc::allocator::RosAlloc(begin, morecore_start);
107 if (rosalloc != NULL) {
108 rosalloc->SetFootprintLimit(initial_size);
109 } else {
110 PLOG(ERROR) << "RosAlloc::Create failed";
111 }
112 return rosalloc;
113}
114
115mirror::Object* RosAllocSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
116 return AllocNonvirtual(self, num_bytes, bytes_allocated);
117}
118
119mirror::Object* RosAllocSpace::AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
120 mirror::Object* result;
121 {
122 MutexLock mu(self, lock_);
123 // Grow as much as possible within the space.
124 size_t max_allowed = Capacity();
125 rosalloc_->SetFootprintLimit(max_allowed);
126 // Try the allocation.
127 result = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated);
128 // Shrink back down as small as possible.
129 size_t footprint = rosalloc_->Footprint();
130 rosalloc_->SetFootprintLimit(footprint);
131 }
132 // Note RosAlloc zeroes memory internally.
133 // Return the new allocation or NULL.
134 CHECK(!kDebugSpaces || result == NULL || Contains(result));
135 return result;
136}
137
138MallocSpace* RosAllocSpace::CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
139 byte* begin, byte* end, byte* limit, size_t growth_limit) {
140 return new RosAllocSpace(name, mem_map, reinterpret_cast<allocator::RosAlloc*>(allocator),
141 begin, end, limit, growth_limit);
142}
143
144size_t RosAllocSpace::Free(Thread* self, mirror::Object* ptr) {
145 if (kDebugSpaces) {
146 CHECK(ptr != NULL);
147 CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
148 }
149 const size_t bytes_freed = InternalAllocationSize(ptr);
150 total_bytes_freed_atomic_.fetch_add(bytes_freed);
151 ++total_objects_freed_atomic_;
152 if (kRecentFreeCount > 0) {
153 MutexLock mu(self, lock_);
154 RegisterRecentFree(ptr);
155 }
156 rosalloc_->Free(self, ptr);
157 return bytes_freed;
158}
159
160size_t RosAllocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
161 DCHECK(ptrs != NULL);
162
163 // Don't need the lock to calculate the size of the freed pointers.
164 size_t bytes_freed = 0;
165 for (size_t i = 0; i < num_ptrs; i++) {
166 mirror::Object* ptr = ptrs[i];
167 const size_t look_ahead = 8;
168 if (kPrefetchDuringRosAllocFreeList && i + look_ahead < num_ptrs) {
169 __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + look_ahead]));
170 }
171 bytes_freed += InternalAllocationSize(ptr);
172 }
173
174 if (kRecentFreeCount > 0) {
175 MutexLock mu(self, lock_);
176 for (size_t i = 0; i < num_ptrs; i++) {
177 RegisterRecentFree(ptrs[i]);
178 }
179 }
180
181 if (kDebugSpaces) {
182 size_t num_broken_ptrs = 0;
183 for (size_t i = 0; i < num_ptrs; i++) {
184 if (!Contains(ptrs[i])) {
185 num_broken_ptrs++;
186 LOG(ERROR) << "FreeList[" << i << "] (" << ptrs[i] << ") not in bounds of heap " << *this;
187 } else {
188 size_t size = rosalloc_->UsableSize(ptrs[i]);
189 memset(ptrs[i], 0xEF, size);
190 }
191 }
192 CHECK_EQ(num_broken_ptrs, 0u);
193 }
194
195 rosalloc_->BulkFree(self, reinterpret_cast<void**>(ptrs), num_ptrs);
196 total_bytes_freed_atomic_.fetch_add(bytes_freed);
197 total_objects_freed_atomic_.fetch_add(num_ptrs);
198 return bytes_freed;
199}
200
201// Callback from rosalloc when it needs to increase the footprint
202extern "C" void* art_heap_rosalloc_morecore(allocator::RosAlloc* rosalloc, intptr_t increment) {
203 Heap* heap = Runtime::Current()->GetHeap();
204 DCHECK(heap->GetNonMovingSpace()->IsRosAllocSpace());
205 DCHECK_EQ(heap->GetNonMovingSpace()->AsRosAllocSpace()->GetRosAlloc(), rosalloc);
206 return heap->GetNonMovingSpace()->MoreCore(increment);
207}
208
209// Virtual functions can't get inlined.
210inline size_t RosAllocSpace::InternalAllocationSize(const mirror::Object* obj) {
211 return AllocationSizeNonvirtual(obj);
212}
213
214size_t RosAllocSpace::AllocationSize(const mirror::Object* obj) {
215 return InternalAllocationSize(obj);
216}
217
218size_t RosAllocSpace::Trim() {
219 MutexLock mu(Thread::Current(), lock_);
220 // Trim to release memory at the end of the space.
221 rosalloc_->Trim();
222 // No inspect_all necessary here as trimming of pages is built-in.
223 return 0;
224}
225
226void RosAllocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
227 void* arg) {
228 InspectAllRosAlloc(callback, arg);
229 callback(NULL, NULL, 0, arg); // Indicate end of a space.
230}
231
232size_t RosAllocSpace::GetFootprint() {
233 MutexLock mu(Thread::Current(), lock_);
234 return rosalloc_->Footprint();
235}
236
237size_t RosAllocSpace::GetFootprintLimit() {
238 MutexLock mu(Thread::Current(), lock_);
239 return rosalloc_->FootprintLimit();
240}
241
242void RosAllocSpace::SetFootprintLimit(size_t new_size) {
243 MutexLock mu(Thread::Current(), lock_);
244 VLOG(heap) << "RosAllocSpace::SetFootprintLimit " << PrettySize(new_size);
245 // Compare against the actual footprint, rather than the Size(), because the heap may not have
246 // grown all the way to the allowed size yet.
247 size_t current_space_size = rosalloc_->Footprint();
248 if (new_size < current_space_size) {
249 // Don't let the space grow any more.
250 new_size = current_space_size;
251 }
252 rosalloc_->SetFootprintLimit(new_size);
253}
254
255uint64_t RosAllocSpace::GetBytesAllocated() {
Hiroshi Yamauchi4ce1f002013-11-18 14:49:09 -0800256 size_t bytes_allocated = 0;
257 InspectAllRosAlloc(art::gc::allocator::RosAlloc::BytesAllocatedCallback, &bytes_allocated);
258 return bytes_allocated;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700259}
260
261uint64_t RosAllocSpace::GetObjectsAllocated() {
Hiroshi Yamauchi4ce1f002013-11-18 14:49:09 -0800262 size_t objects_allocated = 0;
263 InspectAllRosAlloc(art::gc::allocator::RosAlloc::ObjectsAllocatedCallback, &objects_allocated);
264 return objects_allocated;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700265}
266
267void RosAllocSpace::InspectAllRosAlloc(void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
268 void* arg) NO_THREAD_SAFETY_ANALYSIS {
269 // TODO: NO_THREAD_SAFETY_ANALYSIS.
270 Thread* self = Thread::Current();
271 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
272 // The mutators are already suspended. For example, a call path
273 // from SignalCatcher::HandleSigQuit().
274 rosalloc_->InspectAll(callback, arg);
275 } else {
276 // The mutators are not suspended yet.
277 DCHECK(!Locks::mutator_lock_->IsSharedHeld(self));
278 ThreadList* tl = Runtime::Current()->GetThreadList();
279 tl->SuspendAll();
280 {
281 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
282 MutexLock mu2(self, *Locks::thread_list_lock_);
283 rosalloc_->InspectAll(callback, arg);
284 }
285 tl->ResumeAll();
286 }
287}
288
289void RosAllocSpace::RevokeThreadLocalBuffers(Thread* thread) {
290 rosalloc_->RevokeThreadLocalRuns(thread);
291}
292
293void RosAllocSpace::RevokeAllThreadLocalBuffers() {
294 rosalloc_->RevokeAllThreadLocalRuns();
295}
296
297} // namespace space
298} // namespace gc
299} // namespace art