blob: 9ddc14b78db071497e7311e903dcfb6356545904 [file] [log] [blame]
Hiroshi Yamauchi7cb7bbc2013-11-18 17:27:37 -08001
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07002/*
3 * Copyright (C) 2013 The Android Open Source Project
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 * http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#include "rosalloc_space.h"
19
20#include "rosalloc_space-inl.h"
21#include "gc/accounting/card_table.h"
22#include "gc/heap.h"
23#include "mirror/class-inl.h"
24#include "mirror/object-inl.h"
25#include "runtime.h"
26#include "thread.h"
27#include "thread_list.h"
28#include "utils.h"
29
30#include <valgrind.h>
31#include <memcheck/memcheck.h>
32
33namespace art {
34namespace gc {
35namespace space {
36
37static const bool kPrefetchDuringRosAllocFreeList = true;
38
39RosAllocSpace::RosAllocSpace(const std::string& name, MemMap* mem_map,
40 art::gc::allocator::RosAlloc* rosalloc, byte* begin, byte* end,
41 byte* limit, size_t growth_limit)
Hiroshi Yamauchi4ce1f002013-11-18 14:49:09 -080042 : MallocSpace(name, mem_map, begin, end, limit, growth_limit), rosalloc_(rosalloc),
43 rosalloc_for_alloc_(rosalloc) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070044 CHECK(rosalloc != NULL);
45}
46
47RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_size, size_t growth_limit,
48 size_t capacity, byte* requested_begin) {
49 uint64_t start_time = 0;
50 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
51 start_time = NanoTime();
52 VLOG(startup) << "RosAllocSpace::Create entering " << name
53 << " initial_size=" << PrettySize(initial_size)
54 << " growth_limit=" << PrettySize(growth_limit)
55 << " capacity=" << PrettySize(capacity)
56 << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
57 }
58
59 // Memory we promise to rosalloc before it asks for morecore.
60 // Note: making this value large means that large allocations are unlikely to succeed as rosalloc
61 // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
62 // size of the large allocation) will be greater than the footprint limit.
63 size_t starting_size = kPageSize;
64 MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity,
65 requested_begin);
66 if (mem_map == NULL) {
67 LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
68 << PrettySize(capacity);
69 return NULL;
70 }
71 allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map->Begin(), starting_size, initial_size);
72 if (rosalloc == NULL) {
73 LOG(ERROR) << "Failed to initialize rosalloc for alloc space (" << name << ")";
74 return NULL;
75 }
76
77 // Protect memory beyond the initial size.
78 byte* end = mem_map->Begin() + starting_size;
79 if (capacity - initial_size > 0) {
80 CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), name);
81 }
82
83 // Everything is set so record in immutable structure and leave
84 RosAllocSpace* space;
85 byte* begin = mem_map->Begin();
86 if (RUNNING_ON_VALGRIND > 0) {
Hiroshi Yamauchi7cb7bbc2013-11-18 17:27:37 -080087 space = new ValgrindMallocSpace<RosAllocSpace, art::gc::allocator::RosAlloc*>(
88 name, mem_map, rosalloc, begin, end, begin + capacity, growth_limit, initial_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070089 } else {
90 space = new RosAllocSpace(name, mem_map, rosalloc, begin, end, begin + capacity, growth_limit);
91 }
92 // We start out with only the initial size possibly containing objects.
93 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
94 LOG(INFO) << "RosAllocSpace::Create exiting (" << PrettyDuration(NanoTime() - start_time)
95 << " ) " << *space;
96 }
97 return space;
98}
99
100allocator::RosAlloc* RosAllocSpace::CreateRosAlloc(void* begin, size_t morecore_start, size_t initial_size) {
101 // clear errno to allow PLOG on error
102 errno = 0;
103 // create rosalloc using our backing storage starting at begin and
104 // with a footprint of morecore_start. When morecore_start bytes of
105 // memory is exhaused morecore will be called.
106 allocator::RosAlloc* rosalloc = new art::gc::allocator::RosAlloc(begin, morecore_start);
107 if (rosalloc != NULL) {
108 rosalloc->SetFootprintLimit(initial_size);
109 } else {
110 PLOG(ERROR) << "RosAlloc::Create failed";
111 }
112 return rosalloc;
113}
114
115mirror::Object* RosAllocSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
116 return AllocNonvirtual(self, num_bytes, bytes_allocated);
117}
118
119mirror::Object* RosAllocSpace::AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
120 mirror::Object* result;
121 {
122 MutexLock mu(self, lock_);
123 // Grow as much as possible within the space.
124 size_t max_allowed = Capacity();
125 rosalloc_->SetFootprintLimit(max_allowed);
126 // Try the allocation.
127 result = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated);
128 // Shrink back down as small as possible.
129 size_t footprint = rosalloc_->Footprint();
130 rosalloc_->SetFootprintLimit(footprint);
131 }
132 // Note RosAlloc zeroes memory internally.
133 // Return the new allocation or NULL.
134 CHECK(!kDebugSpaces || result == NULL || Contains(result));
135 return result;
136}
137
138MallocSpace* RosAllocSpace::CreateInstance(const std::string& name, MemMap* mem_map, void* allocator,
139 byte* begin, byte* end, byte* limit, size_t growth_limit) {
140 return new RosAllocSpace(name, mem_map, reinterpret_cast<allocator::RosAlloc*>(allocator),
141 begin, end, limit, growth_limit);
142}
143
144size_t RosAllocSpace::Free(Thread* self, mirror::Object* ptr) {
145 if (kDebugSpaces) {
146 CHECK(ptr != NULL);
147 CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
148 }
Hiroshi Yamauchie48780b2013-12-17 17:19:53 -0800149 const size_t bytes_freed = AllocationSizeNonvirtual(ptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700150 if (kRecentFreeCount > 0) {
151 MutexLock mu(self, lock_);
152 RegisterRecentFree(ptr);
153 }
154 rosalloc_->Free(self, ptr);
155 return bytes_freed;
156}
157
158size_t RosAllocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
159 DCHECK(ptrs != NULL);
160
161 // Don't need the lock to calculate the size of the freed pointers.
162 size_t bytes_freed = 0;
163 for (size_t i = 0; i < num_ptrs; i++) {
164 mirror::Object* ptr = ptrs[i];
165 const size_t look_ahead = 8;
166 if (kPrefetchDuringRosAllocFreeList && i + look_ahead < num_ptrs) {
167 __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + look_ahead]));
168 }
Hiroshi Yamauchie48780b2013-12-17 17:19:53 -0800169 bytes_freed += AllocationSizeNonvirtual(ptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700170 }
171
172 if (kRecentFreeCount > 0) {
173 MutexLock mu(self, lock_);
174 for (size_t i = 0; i < num_ptrs; i++) {
175 RegisterRecentFree(ptrs[i]);
176 }
177 }
178
179 if (kDebugSpaces) {
180 size_t num_broken_ptrs = 0;
181 for (size_t i = 0; i < num_ptrs; i++) {
182 if (!Contains(ptrs[i])) {
183 num_broken_ptrs++;
184 LOG(ERROR) << "FreeList[" << i << "] (" << ptrs[i] << ") not in bounds of heap " << *this;
185 } else {
186 size_t size = rosalloc_->UsableSize(ptrs[i]);
187 memset(ptrs[i], 0xEF, size);
188 }
189 }
190 CHECK_EQ(num_broken_ptrs, 0u);
191 }
192
193 rosalloc_->BulkFree(self, reinterpret_cast<void**>(ptrs), num_ptrs);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700194 return bytes_freed;
195}
196
197// Callback from rosalloc when it needs to increase the footprint
198extern "C" void* art_heap_rosalloc_morecore(allocator::RosAlloc* rosalloc, intptr_t increment) {
199 Heap* heap = Runtime::Current()->GetHeap();
200 DCHECK(heap->GetNonMovingSpace()->IsRosAllocSpace());
201 DCHECK_EQ(heap->GetNonMovingSpace()->AsRosAllocSpace()->GetRosAlloc(), rosalloc);
202 return heap->GetNonMovingSpace()->MoreCore(increment);
203}
204
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700205size_t RosAllocSpace::AllocationSize(const mirror::Object* obj) {
Hiroshi Yamauchie48780b2013-12-17 17:19:53 -0800206 return AllocationSizeNonvirtual(obj);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700207}
208
209size_t RosAllocSpace::Trim() {
210 MutexLock mu(Thread::Current(), lock_);
211 // Trim to release memory at the end of the space.
212 rosalloc_->Trim();
213 // No inspect_all necessary here as trimming of pages is built-in.
214 return 0;
215}
216
217void RosAllocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
218 void* arg) {
219 InspectAllRosAlloc(callback, arg);
220 callback(NULL, NULL, 0, arg); // Indicate end of a space.
221}
222
223size_t RosAllocSpace::GetFootprint() {
224 MutexLock mu(Thread::Current(), lock_);
225 return rosalloc_->Footprint();
226}
227
228size_t RosAllocSpace::GetFootprintLimit() {
229 MutexLock mu(Thread::Current(), lock_);
230 return rosalloc_->FootprintLimit();
231}
232
233void RosAllocSpace::SetFootprintLimit(size_t new_size) {
234 MutexLock mu(Thread::Current(), lock_);
235 VLOG(heap) << "RosAllocSpace::SetFootprintLimit " << PrettySize(new_size);
236 // Compare against the actual footprint, rather than the Size(), because the heap may not have
237 // grown all the way to the allowed size yet.
238 size_t current_space_size = rosalloc_->Footprint();
239 if (new_size < current_space_size) {
240 // Don't let the space grow any more.
241 new_size = current_space_size;
242 }
243 rosalloc_->SetFootprintLimit(new_size);
244}
245
246uint64_t RosAllocSpace::GetBytesAllocated() {
Hiroshi Yamauchi4ce1f002013-11-18 14:49:09 -0800247 size_t bytes_allocated = 0;
248 InspectAllRosAlloc(art::gc::allocator::RosAlloc::BytesAllocatedCallback, &bytes_allocated);
249 return bytes_allocated;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700250}
251
252uint64_t RosAllocSpace::GetObjectsAllocated() {
Hiroshi Yamauchi4ce1f002013-11-18 14:49:09 -0800253 size_t objects_allocated = 0;
254 InspectAllRosAlloc(art::gc::allocator::RosAlloc::ObjectsAllocatedCallback, &objects_allocated);
255 return objects_allocated;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700256}
257
258void RosAllocSpace::InspectAllRosAlloc(void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
259 void* arg) NO_THREAD_SAFETY_ANALYSIS {
260 // TODO: NO_THREAD_SAFETY_ANALYSIS.
261 Thread* self = Thread::Current();
262 if (Locks::mutator_lock_->IsExclusiveHeld(self)) {
263 // The mutators are already suspended. For example, a call path
264 // from SignalCatcher::HandleSigQuit().
265 rosalloc_->InspectAll(callback, arg);
266 } else {
267 // The mutators are not suspended yet.
268 DCHECK(!Locks::mutator_lock_->IsSharedHeld(self));
269 ThreadList* tl = Runtime::Current()->GetThreadList();
270 tl->SuspendAll();
271 {
272 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
273 MutexLock mu2(self, *Locks::thread_list_lock_);
274 rosalloc_->InspectAll(callback, arg);
275 }
276 tl->ResumeAll();
277 }
278}
279
280void RosAllocSpace::RevokeThreadLocalBuffers(Thread* thread) {
281 rosalloc_->RevokeThreadLocalRuns(thread);
282}
283
284void RosAllocSpace::RevokeAllThreadLocalBuffers() {
285 rosalloc_->RevokeAllThreadLocalRuns();
286}
287
288} // namespace space
289} // namespace gc
290} // namespace art