Hiroshi Yamauchi | 7cb7bbc | 2013-11-18 17:27:37 -0800 | [diff] [blame] | 1 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2013 The Android Open Source Project |
| 4 | * |
| 5 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | * you may not use this file except in compliance with the License. |
| 7 | * You may obtain a copy of the License at |
| 8 | * |
| 9 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | * |
| 11 | * Unless required by applicable law or agreed to in writing, software |
| 12 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | * See the License for the specific language governing permissions and |
| 15 | * limitations under the License. |
| 16 | */ |
| 17 | |
| 18 | #include "rosalloc_space.h" |
| 19 | |
| 20 | #include "rosalloc_space-inl.h" |
| 21 | #include "gc/accounting/card_table.h" |
| 22 | #include "gc/heap.h" |
| 23 | #include "mirror/class-inl.h" |
| 24 | #include "mirror/object-inl.h" |
| 25 | #include "runtime.h" |
| 26 | #include "thread.h" |
| 27 | #include "thread_list.h" |
| 28 | #include "utils.h" |
| 29 | |
| 30 | #include <valgrind.h> |
| 31 | #include <memcheck/memcheck.h> |
| 32 | |
| 33 | namespace art { |
| 34 | namespace gc { |
| 35 | namespace space { |
| 36 | |
| 37 | static const bool kPrefetchDuringRosAllocFreeList = true; |
| 38 | |
| 39 | RosAllocSpace::RosAllocSpace(const std::string& name, MemMap* mem_map, |
| 40 | art::gc::allocator::RosAlloc* rosalloc, byte* begin, byte* end, |
| 41 | byte* limit, size_t growth_limit) |
Hiroshi Yamauchi | 4ce1f00 | 2013-11-18 14:49:09 -0800 | [diff] [blame] | 42 | : MallocSpace(name, mem_map, begin, end, limit, growth_limit), rosalloc_(rosalloc), |
| 43 | rosalloc_for_alloc_(rosalloc) { |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 44 | CHECK(rosalloc != NULL); |
| 45 | } |
| 46 | |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 47 | RosAllocSpace* RosAllocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name, |
| 48 | size_t starting_size, |
| 49 | size_t initial_size, size_t growth_limit, |
| 50 | size_t capacity, bool low_memory_mode) { |
| 51 | DCHECK(mem_map != nullptr); |
| 52 | allocator::RosAlloc* rosalloc = CreateRosAlloc(mem_map->Begin(), starting_size, initial_size, |
| 53 | low_memory_mode); |
| 54 | if (rosalloc == NULL) { |
| 55 | LOG(ERROR) << "Failed to initialize rosalloc for alloc space (" << name << ")"; |
| 56 | return NULL; |
| 57 | } |
| 58 | |
| 59 | // Protect memory beyond the initial size. |
| 60 | byte* end = mem_map->Begin() + starting_size; |
| 61 | if (capacity - initial_size > 0) { |
| 62 | CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), name); |
| 63 | } |
| 64 | |
| 65 | // Everything is set so record in immutable structure and leave |
| 66 | RosAllocSpace* space; |
| 67 | byte* begin = mem_map->Begin(); |
| 68 | if (RUNNING_ON_VALGRIND > 0) { |
| 69 | space = new ValgrindMallocSpace<RosAllocSpace, art::gc::allocator::RosAlloc*>( |
| 70 | name, mem_map, rosalloc, begin, end, begin + capacity, growth_limit, initial_size); |
| 71 | } else { |
| 72 | space = new RosAllocSpace(name, mem_map, rosalloc, begin, end, begin + capacity, growth_limit); |
| 73 | } |
| 74 | return space; |
| 75 | } |
| 76 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 77 | RosAllocSpace* RosAllocSpace::Create(const std::string& name, size_t initial_size, size_t growth_limit, |
Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 78 | size_t capacity, byte* requested_begin, bool low_memory_mode) { |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 79 | uint64_t start_time = 0; |
| 80 | if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { |
| 81 | start_time = NanoTime(); |
| 82 | VLOG(startup) << "RosAllocSpace::Create entering " << name |
| 83 | << " initial_size=" << PrettySize(initial_size) |
| 84 | << " growth_limit=" << PrettySize(growth_limit) |
| 85 | << " capacity=" << PrettySize(capacity) |
| 86 | << " requested_begin=" << reinterpret_cast<void*>(requested_begin); |
| 87 | } |
| 88 | |
| 89 | // Memory we promise to rosalloc before it asks for morecore. |
| 90 | // Note: making this value large means that large allocations are unlikely to succeed as rosalloc |
| 91 | // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the |
| 92 | // size of the large allocation) will be greater than the footprint limit. |
| 93 | size_t starting_size = kPageSize; |
| 94 | MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity, |
| 95 | requested_begin); |
| 96 | if (mem_map == NULL) { |
| 97 | LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size " |
| 98 | << PrettySize(capacity); |
| 99 | return NULL; |
| 100 | } |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 101 | |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 102 | RosAllocSpace* space = CreateFromMemMap(mem_map, name, starting_size, initial_size, |
| 103 | growth_limit, capacity, low_memory_mode); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 104 | // We start out with only the initial size possibly containing objects. |
| 105 | if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { |
| 106 | LOG(INFO) << "RosAllocSpace::Create exiting (" << PrettyDuration(NanoTime() - start_time) |
| 107 | << " ) " << *space; |
| 108 | } |
| 109 | return space; |
| 110 | } |
| 111 | |
Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 112 | allocator::RosAlloc* RosAllocSpace::CreateRosAlloc(void* begin, size_t morecore_start, size_t initial_size, |
| 113 | bool low_memory_mode) { |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 114 | // clear errno to allow PLOG on error |
| 115 | errno = 0; |
| 116 | // create rosalloc using our backing storage starting at begin and |
| 117 | // with a footprint of morecore_start. When morecore_start bytes of |
| 118 | // memory is exhaused morecore will be called. |
Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 119 | allocator::RosAlloc* rosalloc = new art::gc::allocator::RosAlloc( |
| 120 | begin, morecore_start, |
| 121 | low_memory_mode ? |
| 122 | art::gc::allocator::RosAlloc::kPageReleaseModeAll : |
| 123 | art::gc::allocator::RosAlloc::kPageReleaseModeSizeAndEnd); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 124 | if (rosalloc != NULL) { |
| 125 | rosalloc->SetFootprintLimit(initial_size); |
| 126 | } else { |
| 127 | PLOG(ERROR) << "RosAlloc::Create failed"; |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 128 | } |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 129 | return rosalloc; |
| 130 | } |
| 131 | |
| 132 | mirror::Object* RosAllocSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) { |
| 133 | return AllocNonvirtual(self, num_bytes, bytes_allocated); |
| 134 | } |
| 135 | |
| 136 | mirror::Object* RosAllocSpace::AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated) { |
| 137 | mirror::Object* result; |
| 138 | { |
| 139 | MutexLock mu(self, lock_); |
| 140 | // Grow as much as possible within the space. |
| 141 | size_t max_allowed = Capacity(); |
| 142 | rosalloc_->SetFootprintLimit(max_allowed); |
| 143 | // Try the allocation. |
| 144 | result = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated); |
| 145 | // Shrink back down as small as possible. |
| 146 | size_t footprint = rosalloc_->Footprint(); |
| 147 | rosalloc_->SetFootprintLimit(footprint); |
| 148 | } |
| 149 | // Note RosAlloc zeroes memory internally. |
| 150 | // Return the new allocation or NULL. |
| 151 | CHECK(!kDebugSpaces || result == NULL || Contains(result)); |
| 152 | return result; |
| 153 | } |
| 154 | |
| 155 | MallocSpace* RosAllocSpace::CreateInstance(const std::string& name, MemMap* mem_map, void* allocator, |
| 156 | byte* begin, byte* end, byte* limit, size_t growth_limit) { |
| 157 | return new RosAllocSpace(name, mem_map, reinterpret_cast<allocator::RosAlloc*>(allocator), |
| 158 | begin, end, limit, growth_limit); |
| 159 | } |
| 160 | |
| 161 | size_t RosAllocSpace::Free(Thread* self, mirror::Object* ptr) { |
| 162 | if (kDebugSpaces) { |
| 163 | CHECK(ptr != NULL); |
| 164 | CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this; |
| 165 | } |
Hiroshi Yamauchi | e48780b | 2013-12-17 17:19:53 -0800 | [diff] [blame] | 166 | const size_t bytes_freed = AllocationSizeNonvirtual(ptr); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 167 | if (kRecentFreeCount > 0) { |
| 168 | MutexLock mu(self, lock_); |
| 169 | RegisterRecentFree(ptr); |
| 170 | } |
| 171 | rosalloc_->Free(self, ptr); |
| 172 | return bytes_freed; |
| 173 | } |
| 174 | |
| 175 | size_t RosAllocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) { |
| 176 | DCHECK(ptrs != NULL); |
| 177 | |
| 178 | // Don't need the lock to calculate the size of the freed pointers. |
| 179 | size_t bytes_freed = 0; |
| 180 | for (size_t i = 0; i < num_ptrs; i++) { |
| 181 | mirror::Object* ptr = ptrs[i]; |
| 182 | const size_t look_ahead = 8; |
| 183 | if (kPrefetchDuringRosAllocFreeList && i + look_ahead < num_ptrs) { |
| 184 | __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + look_ahead])); |
| 185 | } |
Hiroshi Yamauchi | e48780b | 2013-12-17 17:19:53 -0800 | [diff] [blame] | 186 | bytes_freed += AllocationSizeNonvirtual(ptr); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 187 | } |
| 188 | |
| 189 | if (kRecentFreeCount > 0) { |
| 190 | MutexLock mu(self, lock_); |
| 191 | for (size_t i = 0; i < num_ptrs; i++) { |
| 192 | RegisterRecentFree(ptrs[i]); |
| 193 | } |
| 194 | } |
| 195 | |
| 196 | if (kDebugSpaces) { |
| 197 | size_t num_broken_ptrs = 0; |
| 198 | for (size_t i = 0; i < num_ptrs; i++) { |
| 199 | if (!Contains(ptrs[i])) { |
| 200 | num_broken_ptrs++; |
| 201 | LOG(ERROR) << "FreeList[" << i << "] (" << ptrs[i] << ") not in bounds of heap " << *this; |
| 202 | } else { |
| 203 | size_t size = rosalloc_->UsableSize(ptrs[i]); |
| 204 | memset(ptrs[i], 0xEF, size); |
| 205 | } |
| 206 | } |
| 207 | CHECK_EQ(num_broken_ptrs, 0u); |
| 208 | } |
| 209 | |
| 210 | rosalloc_->BulkFree(self, reinterpret_cast<void**>(ptrs), num_ptrs); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 211 | return bytes_freed; |
| 212 | } |
| 213 | |
| 214 | // Callback from rosalloc when it needs to increase the footprint |
| 215 | extern "C" void* art_heap_rosalloc_morecore(allocator::RosAlloc* rosalloc, intptr_t increment) { |
| 216 | Heap* heap = Runtime::Current()->GetHeap(); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 217 | RosAllocSpace* rosalloc_space = heap->GetRosAllocSpace(); |
| 218 | DCHECK(rosalloc_space != nullptr); |
| 219 | DCHECK_EQ(rosalloc_space->GetRosAlloc(), rosalloc); |
| 220 | return rosalloc_space->MoreCore(increment); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 221 | } |
| 222 | |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame^] | 223 | size_t RosAllocSpace::AllocationSize(mirror::Object* obj) { |
Hiroshi Yamauchi | e48780b | 2013-12-17 17:19:53 -0800 | [diff] [blame] | 224 | return AllocationSizeNonvirtual(obj); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 225 | } |
| 226 | |
| 227 | size_t RosAllocSpace::Trim() { |
Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 228 | { |
| 229 | MutexLock mu(Thread::Current(), lock_); |
| 230 | // Trim to release memory at the end of the space. |
| 231 | rosalloc_->Trim(); |
| 232 | } |
| 233 | // Attempt to release pages if it does not release all empty pages. |
| 234 | if (!rosalloc_->DoesReleaseAllPages()) { |
| 235 | VLOG(heap) << "RosAllocSpace::Trim() "; |
| 236 | size_t reclaimed = 0; |
| 237 | InspectAllRosAlloc(DlmallocMadviseCallback, &reclaimed); |
| 238 | return reclaimed; |
| 239 | } |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 240 | return 0; |
| 241 | } |
| 242 | |
| 243 | void RosAllocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg), |
| 244 | void* arg) { |
| 245 | InspectAllRosAlloc(callback, arg); |
| 246 | callback(NULL, NULL, 0, arg); // Indicate end of a space. |
| 247 | } |
| 248 | |
| 249 | size_t RosAllocSpace::GetFootprint() { |
| 250 | MutexLock mu(Thread::Current(), lock_); |
| 251 | return rosalloc_->Footprint(); |
| 252 | } |
| 253 | |
| 254 | size_t RosAllocSpace::GetFootprintLimit() { |
| 255 | MutexLock mu(Thread::Current(), lock_); |
| 256 | return rosalloc_->FootprintLimit(); |
| 257 | } |
| 258 | |
| 259 | void RosAllocSpace::SetFootprintLimit(size_t new_size) { |
| 260 | MutexLock mu(Thread::Current(), lock_); |
| 261 | VLOG(heap) << "RosAllocSpace::SetFootprintLimit " << PrettySize(new_size); |
| 262 | // Compare against the actual footprint, rather than the Size(), because the heap may not have |
| 263 | // grown all the way to the allowed size yet. |
| 264 | size_t current_space_size = rosalloc_->Footprint(); |
| 265 | if (new_size < current_space_size) { |
| 266 | // Don't let the space grow any more. |
| 267 | new_size = current_space_size; |
| 268 | } |
| 269 | rosalloc_->SetFootprintLimit(new_size); |
| 270 | } |
| 271 | |
| 272 | uint64_t RosAllocSpace::GetBytesAllocated() { |
Hiroshi Yamauchi | 4ce1f00 | 2013-11-18 14:49:09 -0800 | [diff] [blame] | 273 | size_t bytes_allocated = 0; |
| 274 | InspectAllRosAlloc(art::gc::allocator::RosAlloc::BytesAllocatedCallback, &bytes_allocated); |
| 275 | return bytes_allocated; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 276 | } |
| 277 | |
| 278 | uint64_t RosAllocSpace::GetObjectsAllocated() { |
Hiroshi Yamauchi | 4ce1f00 | 2013-11-18 14:49:09 -0800 | [diff] [blame] | 279 | size_t objects_allocated = 0; |
| 280 | InspectAllRosAlloc(art::gc::allocator::RosAlloc::ObjectsAllocatedCallback, &objects_allocated); |
| 281 | return objects_allocated; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 282 | } |
| 283 | |
| 284 | void RosAllocSpace::InspectAllRosAlloc(void (*callback)(void *start, void *end, size_t num_bytes, void* callback_arg), |
| 285 | void* arg) NO_THREAD_SAFETY_ANALYSIS { |
| 286 | // TODO: NO_THREAD_SAFETY_ANALYSIS. |
| 287 | Thread* self = Thread::Current(); |
| 288 | if (Locks::mutator_lock_->IsExclusiveHeld(self)) { |
| 289 | // The mutators are already suspended. For example, a call path |
| 290 | // from SignalCatcher::HandleSigQuit(). |
| 291 | rosalloc_->InspectAll(callback, arg); |
| 292 | } else { |
| 293 | // The mutators are not suspended yet. |
| 294 | DCHECK(!Locks::mutator_lock_->IsSharedHeld(self)); |
| 295 | ThreadList* tl = Runtime::Current()->GetThreadList(); |
| 296 | tl->SuspendAll(); |
| 297 | { |
| 298 | MutexLock mu(self, *Locks::runtime_shutdown_lock_); |
| 299 | MutexLock mu2(self, *Locks::thread_list_lock_); |
| 300 | rosalloc_->InspectAll(callback, arg); |
| 301 | } |
| 302 | tl->ResumeAll(); |
| 303 | } |
| 304 | } |
| 305 | |
| 306 | void RosAllocSpace::RevokeThreadLocalBuffers(Thread* thread) { |
| 307 | rosalloc_->RevokeThreadLocalRuns(thread); |
| 308 | } |
| 309 | |
| 310 | void RosAllocSpace::RevokeAllThreadLocalBuffers() { |
| 311 | rosalloc_->RevokeAllThreadLocalRuns(); |
| 312 | } |
| 313 | |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 314 | void RosAllocSpace::Clear() { |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 315 | // TODO: Delete and create new mspace here. |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 316 | madvise(GetMemMap()->Begin(), GetMemMap()->Size(), MADV_DONTNEED); |
| 317 | GetLiveBitmap()->Clear(); |
| 318 | GetMarkBitmap()->Clear(); |
| 319 | } |
| 320 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 321 | } // namespace space |
| 322 | } // namespace gc |
| 323 | } // namespace art |