Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2013 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #ifndef ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_ |
| 18 | #define ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_ |
| 19 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 20 | #include <stdint.h> |
| 21 | #include <stdlib.h> |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 22 | #include <sys/mman.h> |
Ian Rogers | 700a402 | 2014-05-19 16:49:03 -0700 | [diff] [blame] | 23 | #include <memory> |
| 24 | #include <set> |
| 25 | #include <string> |
| 26 | #include <unordered_set> |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 27 | #include <vector> |
| 28 | |
Andreas Gampe | 5794381 | 2017-12-06 21:39:13 -0800 | [diff] [blame] | 29 | #include <android-base/logging.h> |
| 30 | |
Mathieu Chartier | 58553c7 | 2014-09-16 16:25:55 -0700 | [diff] [blame] | 31 | #include "base/allocator.h" |
Vladimir Marko | 80afd02 | 2015-05-19 18:08:00 +0100 | [diff] [blame] | 32 | #include "base/bit_utils.h" |
David Sehr | 1979c64 | 2018-04-26 14:41:18 -0700 | [diff] [blame] | 33 | #include "base/globals.h" |
Andreas Gampe | 8cf9cb3 | 2017-07-19 09:28:38 -0700 | [diff] [blame] | 34 | #include "base/mutex.h" |
Ian Rogers | e63db27 | 2014-07-15 15:36:11 -0700 | [diff] [blame] | 35 | #include "thread.h" |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 36 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 37 | namespace art { |
Vladimir Marko | 3481ba2 | 2015-04-13 12:22:36 +0100 | [diff] [blame] | 38 | |
| 39 | class MemMap; |
| 40 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 41 | namespace gc { |
| 42 | namespace allocator { |
| 43 | |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 44 | // A runs-of-slots memory allocator. |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 45 | class RosAlloc { |
| 46 | private: |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 47 | // Represents a run of free pages. |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 48 | class FreePageRun { |
| 49 | public: |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 50 | uint8_t magic_num_; // The magic number used for debugging only. |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 51 | |
| 52 | bool IsFree() const { |
Mathieu Chartier | a1c1c71 | 2014-06-23 17:53:09 -0700 | [diff] [blame] | 53 | return !kIsDebugBuild || magic_num_ == kMagicNumFree; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 54 | } |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 55 | size_t ByteSize(RosAlloc* rosalloc) const REQUIRES(rosalloc->lock_) { |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 56 | const uint8_t* fpr_base = reinterpret_cast<const uint8_t*>(this); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 57 | size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base); |
| 58 | size_t byte_size = rosalloc->free_page_run_size_map_[pm_idx]; |
| 59 | DCHECK_GE(byte_size, static_cast<size_t>(0)); |
Mathieu Chartier | 58553c7 | 2014-09-16 16:25:55 -0700 | [diff] [blame] | 60 | DCHECK_ALIGNED(byte_size, kPageSize); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 61 | return byte_size; |
| 62 | } |
| 63 | void SetByteSize(RosAlloc* rosalloc, size_t byte_size) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 64 | REQUIRES(rosalloc->lock_) { |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 65 | DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0)); |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 66 | uint8_t* fpr_base = reinterpret_cast<uint8_t*>(this); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 67 | size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base); |
| 68 | rosalloc->free_page_run_size_map_[pm_idx] = byte_size; |
| 69 | } |
| 70 | void* Begin() { |
| 71 | return reinterpret_cast<void*>(this); |
| 72 | } |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 73 | void* End(RosAlloc* rosalloc) REQUIRES(rosalloc->lock_) { |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 74 | uint8_t* fpr_base = reinterpret_cast<uint8_t*>(this); |
| 75 | uint8_t* end = fpr_base + ByteSize(rosalloc); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 76 | return end; |
| 77 | } |
Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 78 | bool IsLargerThanPageReleaseThreshold(RosAlloc* rosalloc) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 79 | REQUIRES(rosalloc->lock_) { |
Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 80 | return ByteSize(rosalloc) >= rosalloc->page_release_size_threshold_; |
| 81 | } |
| 82 | bool IsAtEndOfSpace(RosAlloc* rosalloc) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 83 | REQUIRES(rosalloc->lock_) { |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 84 | return reinterpret_cast<uint8_t*>(this) + ByteSize(rosalloc) == rosalloc->base_ + rosalloc->footprint_; |
Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 85 | } |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 86 | bool ShouldReleasePages(RosAlloc* rosalloc) REQUIRES(rosalloc->lock_) { |
Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 87 | switch (rosalloc->page_release_mode_) { |
| 88 | case kPageReleaseModeNone: |
| 89 | return false; |
| 90 | case kPageReleaseModeEnd: |
| 91 | return IsAtEndOfSpace(rosalloc); |
| 92 | case kPageReleaseModeSize: |
| 93 | return IsLargerThanPageReleaseThreshold(rosalloc); |
| 94 | case kPageReleaseModeSizeAndEnd: |
| 95 | return IsLargerThanPageReleaseThreshold(rosalloc) && IsAtEndOfSpace(rosalloc); |
| 96 | case kPageReleaseModeAll: |
| 97 | return true; |
| 98 | default: |
| 99 | LOG(FATAL) << "Unexpected page release mode "; |
| 100 | return false; |
| 101 | } |
| 102 | } |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 103 | void ReleasePages(RosAlloc* rosalloc) REQUIRES(rosalloc->lock_) { |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 104 | uint8_t* start = reinterpret_cast<uint8_t*>(this); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 105 | size_t byte_size = ByteSize(rosalloc); |
| 106 | DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0)); |
Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 107 | if (ShouldReleasePages(rosalloc)) { |
| 108 | rosalloc->ReleasePageRange(start, start + byte_size); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 109 | } |
| 110 | } |
Ian Rogers | 6a3c1fc | 2014-10-31 00:33:20 -0700 | [diff] [blame] | 111 | |
| 112 | private: |
| 113 | DISALLOW_COPY_AND_ASSIGN(FreePageRun); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 114 | }; |
| 115 | |
Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 116 | // The slot header. |
| 117 | class Slot { |
| 118 | public: |
| 119 | Slot* Next() const { |
| 120 | return next_; |
| 121 | } |
| 122 | void SetNext(Slot* next) { |
| 123 | next_ = next; |
| 124 | } |
| 125 | // The slot right before this slot in terms of the address. |
| 126 | Slot* Left(size_t bracket_size) { |
| 127 | return reinterpret_cast<Slot*>(reinterpret_cast<uintptr_t>(this) - bracket_size); |
| 128 | } |
| 129 | void Clear() { |
| 130 | next_ = nullptr; |
| 131 | } |
| 132 | |
| 133 | private: |
| 134 | Slot* next_; // Next slot in the list. |
Hiroshi Yamauchi | dc412b6 | 2015-10-15 12:26:57 -0700 | [diff] [blame] | 135 | friend class RosAlloc; |
Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 136 | }; |
| 137 | |
| 138 | // We use the tail (kUseTail == true) for the bulk or thread-local free lists to avoid the need to |
| 139 | // traverse the list from the head to the tail when merging free lists. |
| 140 | // We don't use the tail (kUseTail == false) for the free list to avoid the need to manage the |
| 141 | // tail in the allocation fast path for a performance reason. |
| 142 | template<bool kUseTail = true> |
| 143 | class SlotFreeList { |
| 144 | public: |
Andreas Gampe | d9911ee | 2017-03-27 13:27:24 -0700 | [diff] [blame] | 145 | SlotFreeList() : head_(0U), tail_(0), size_(0), padding_(0) {} |
Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 146 | Slot* Head() const { |
| 147 | return reinterpret_cast<Slot*>(head_); |
| 148 | } |
| 149 | Slot* Tail() const { |
| 150 | CHECK(kUseTail); |
| 151 | return reinterpret_cast<Slot*>(tail_); |
| 152 | } |
| 153 | size_t Size() const { |
| 154 | return size_; |
| 155 | } |
| 156 | // Removes from the head of the free list. |
| 157 | Slot* Remove() { |
| 158 | Slot* slot; |
| 159 | if (kIsDebugBuild) { |
| 160 | Verify(); |
| 161 | } |
| 162 | Slot** headp = reinterpret_cast<Slot**>(&head_); |
| 163 | Slot** tailp = kUseTail ? reinterpret_cast<Slot**>(&tail_) : nullptr; |
| 164 | Slot* old_head = *headp; |
| 165 | if (old_head == nullptr) { |
| 166 | // List was empty. |
| 167 | if (kUseTail) { |
| 168 | DCHECK(*tailp == nullptr); |
| 169 | } |
| 170 | return nullptr; |
| 171 | } else { |
| 172 | // List wasn't empty. |
| 173 | if (kUseTail) { |
| 174 | DCHECK(*tailp != nullptr); |
| 175 | } |
| 176 | Slot* old_head_next = old_head->Next(); |
| 177 | slot = old_head; |
| 178 | *headp = old_head_next; |
| 179 | if (kUseTail && old_head_next == nullptr) { |
| 180 | // List becomes empty. |
| 181 | *tailp = nullptr; |
| 182 | } |
| 183 | } |
| 184 | slot->Clear(); |
| 185 | --size_; |
| 186 | if (kIsDebugBuild) { |
| 187 | Verify(); |
| 188 | } |
| 189 | return slot; |
| 190 | } |
| 191 | void Add(Slot* slot) { |
| 192 | if (kIsDebugBuild) { |
| 193 | Verify(); |
| 194 | } |
| 195 | DCHECK(slot != nullptr); |
Hiroshi Yamauchi | b5e31f3 | 2016-02-18 15:01:17 -0800 | [diff] [blame] | 196 | DCHECK(slot->Next() == nullptr); |
Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 197 | Slot** headp = reinterpret_cast<Slot**>(&head_); |
| 198 | Slot** tailp = kUseTail ? reinterpret_cast<Slot**>(&tail_) : nullptr; |
| 199 | Slot* old_head = *headp; |
| 200 | if (old_head == nullptr) { |
| 201 | // List was empty. |
| 202 | if (kUseTail) { |
| 203 | DCHECK(*tailp == nullptr); |
| 204 | } |
| 205 | *headp = slot; |
| 206 | if (kUseTail) { |
| 207 | *tailp = slot; |
| 208 | } |
| 209 | } else { |
| 210 | // List wasn't empty. |
| 211 | if (kUseTail) { |
| 212 | DCHECK(*tailp != nullptr); |
| 213 | } |
| 214 | *headp = slot; |
| 215 | slot->SetNext(old_head); |
| 216 | } |
| 217 | ++size_; |
| 218 | if (kIsDebugBuild) { |
| 219 | Verify(); |
| 220 | } |
| 221 | } |
| 222 | // Merge the given list into this list. Empty the given list. |
| 223 | // Deliberately support only a kUseTail == true SlotFreeList parameter because 1) we don't |
| 224 | // currently have a situation where we need a kUseTail == false SlotFreeList parameter, and 2) |
| 225 | // supporting the kUseTail == false parameter would require a O(n) linked list traversal to do |
| 226 | // the merge if 'this' SlotFreeList has kUseTail == false, which we'd like to avoid. |
| 227 | void Merge(SlotFreeList<true>* list) { |
| 228 | if (kIsDebugBuild) { |
| 229 | Verify(); |
| 230 | CHECK(list != nullptr); |
| 231 | list->Verify(); |
| 232 | } |
| 233 | if (list->Size() == 0) { |
| 234 | return; |
| 235 | } |
| 236 | Slot** headp = reinterpret_cast<Slot**>(&head_); |
| 237 | Slot** tailp = kUseTail ? reinterpret_cast<Slot**>(&tail_) : nullptr; |
| 238 | Slot* old_head = *headp; |
| 239 | if (old_head == nullptr) { |
| 240 | // List was empty. |
| 241 | *headp = list->Head(); |
| 242 | if (kUseTail) { |
| 243 | *tailp = list->Tail(); |
| 244 | } |
| 245 | size_ = list->Size(); |
| 246 | } else { |
| 247 | // List wasn't empty. |
| 248 | DCHECK(list->Head() != nullptr); |
| 249 | *headp = list->Head(); |
| 250 | DCHECK(list->Tail() != nullptr); |
| 251 | list->Tail()->SetNext(old_head); |
| 252 | // if kUseTail, no change to tailp. |
| 253 | size_ += list->Size(); |
| 254 | } |
| 255 | list->Reset(); |
| 256 | if (kIsDebugBuild) { |
| 257 | Verify(); |
| 258 | } |
| 259 | } |
| 260 | |
| 261 | void Reset() { |
| 262 | head_ = 0; |
| 263 | if (kUseTail) { |
| 264 | tail_ = 0; |
| 265 | } |
| 266 | size_ = 0; |
| 267 | } |
| 268 | |
| 269 | void Verify() { |
| 270 | Slot* head = reinterpret_cast<Slot*>(head_); |
| 271 | Slot* tail = kUseTail ? reinterpret_cast<Slot*>(tail_) : nullptr; |
| 272 | if (size_ == 0) { |
| 273 | CHECK(head == nullptr); |
| 274 | if (kUseTail) { |
| 275 | CHECK(tail == nullptr); |
| 276 | } |
| 277 | } else { |
| 278 | CHECK(head != nullptr); |
| 279 | if (kUseTail) { |
| 280 | CHECK(tail != nullptr); |
| 281 | } |
| 282 | size_t count = 0; |
| 283 | for (Slot* slot = head; slot != nullptr; slot = slot->Next()) { |
| 284 | ++count; |
| 285 | if (kUseTail && slot->Next() == nullptr) { |
| 286 | CHECK_EQ(slot, tail); |
| 287 | } |
| 288 | } |
| 289 | CHECK_EQ(size_, count); |
| 290 | } |
| 291 | } |
| 292 | |
| 293 | private: |
| 294 | // A pointer (Slot*) to the head of the list. Always 8 bytes so that we will have the same |
| 295 | // layout between 32 bit and 64 bit, which is not strictly necessary, but we do so for 1) |
| 296 | // uniformity, 2) we won't need to change this code if we move to a non-low 4G heap in the |
| 297 | // future, and 3) the space savings by using 32 bit fields in 32 bit would be lost in noise |
| 298 | // (won't open up enough space to cause an extra slot to be available). |
| 299 | uint64_t head_; |
| 300 | // A pointer (Slot*) to the tail of the list. Always 8 bytes so that we will have the same |
| 301 | // layout between 32 bit and 64 bit. The tail is stored to speed up merging of lists. |
| 302 | // Unused if kUseTail is false. |
| 303 | uint64_t tail_; |
| 304 | // The number of slots in the list. This is used to make it fast to check if a free list is all |
| 305 | // free without traversing the whole free list. |
| 306 | uint32_t size_; |
| 307 | uint32_t padding_ ATTRIBUTE_UNUSED; |
Hiroshi Yamauchi | dc412b6 | 2015-10-15 12:26:57 -0700 | [diff] [blame] | 308 | friend class RosAlloc; |
Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 309 | }; |
| 310 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 311 | // Represents a run of memory slots of the same size. |
| 312 | // |
| 313 | // A run's memory layout: |
| 314 | // |
| 315 | // +-------------------+ |
| 316 | // | magic_num | |
| 317 | // +-------------------+ |
| 318 | // | size_bracket_idx | |
| 319 | // +-------------------+ |
| 320 | // | is_thread_local | |
| 321 | // +-------------------+ |
| 322 | // | to_be_bulk_freed | |
| 323 | // +-------------------+ |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 324 | // | | |
Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 325 | // | free list | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 326 | // | | |
| 327 | // +-------------------+ |
| 328 | // | | |
Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 329 | // | bulk free list | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 330 | // | | |
| 331 | // +-------------------+ |
| 332 | // | | |
| 333 | // | thread-local free | |
Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 334 | // | list | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 335 | // | | |
| 336 | // +-------------------+ |
| 337 | // | padding due to | |
| 338 | // | alignment | |
| 339 | // +-------------------+ |
| 340 | // | slot 0 | |
| 341 | // +-------------------+ |
| 342 | // | slot 1 | |
| 343 | // +-------------------+ |
| 344 | // | slot 2 | |
| 345 | // +-------------------+ |
| 346 | // ... |
| 347 | // +-------------------+ |
| 348 | // | last slot | |
| 349 | // +-------------------+ |
| 350 | // |
| 351 | class Run { |
| 352 | public: |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 353 | uint8_t magic_num_; // The magic number used for debugging. |
| 354 | uint8_t size_bracket_idx_; // The index of the size bracket of this run. |
| 355 | uint8_t is_thread_local_; // True if this run is used as a thread-local run. |
| 356 | uint8_t to_be_bulk_freed_; // Used within BulkFree() to flag a run that's involved with a bulk free. |
Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 357 | uint32_t padding_ ATTRIBUTE_UNUSED; |
| 358 | // Use a tailless free list for free_list_ so that the alloc fast path does not manage the tail. |
| 359 | SlotFreeList<false> free_list_; |
| 360 | SlotFreeList<true> bulk_free_list_; |
| 361 | SlotFreeList<true> thread_local_free_list_; |
| 362 | // Padding due to alignment |
| 363 | // Slot 0 |
| 364 | // Slot 1 |
| 365 | // ... |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 366 | |
Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 367 | // Returns the byte size of the header. |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 368 | static size_t fixed_header_size() { |
Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 369 | return sizeof(Run); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 370 | } |
Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 371 | Slot* FirstSlot() const { |
Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 372 | const uint8_t idx = size_bracket_idx_; |
| 373 | return reinterpret_cast<Slot*>(reinterpret_cast<uintptr_t>(this) + headerSizes[idx]); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 374 | } |
Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 375 | Slot* LastSlot() { |
| 376 | const uint8_t idx = size_bracket_idx_; |
| 377 | const size_t bracket_size = bracketSizes[idx]; |
| 378 | uintptr_t end = reinterpret_cast<uintptr_t>(End()); |
| 379 | Slot* last_slot = reinterpret_cast<Slot*>(end - bracket_size); |
| 380 | DCHECK_LE(FirstSlot(), last_slot); |
| 381 | return last_slot; |
| 382 | } |
| 383 | SlotFreeList<false>* FreeList() { |
| 384 | return &free_list_; |
| 385 | } |
| 386 | SlotFreeList<true>* BulkFreeList() { |
| 387 | return &bulk_free_list_; |
| 388 | } |
| 389 | SlotFreeList<true>* ThreadLocalFreeList() { |
| 390 | return &thread_local_free_list_; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 391 | } |
| 392 | void* End() { |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 393 | return reinterpret_cast<uint8_t*>(this) + kPageSize * numOfPages[size_bracket_idx_]; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 394 | } |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 395 | void SetIsThreadLocal(bool is_thread_local) { |
| 396 | is_thread_local_ = is_thread_local ? 1 : 0; |
| 397 | } |
| 398 | bool IsThreadLocal() const { |
| 399 | return is_thread_local_ != 0; |
| 400 | } |
Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 401 | // Set up the free list for a new/empty run. |
| 402 | void InitFreeList() { |
| 403 | const uint8_t idx = size_bracket_idx_; |
| 404 | const size_t bracket_size = bracketSizes[idx]; |
| 405 | Slot* first_slot = FirstSlot(); |
| 406 | // Add backwards so the first slot is at the head of the list. |
| 407 | for (Slot* slot = LastSlot(); slot >= first_slot; slot = slot->Left(bracket_size)) { |
| 408 | free_list_.Add(slot); |
| 409 | } |
| 410 | } |
| 411 | // Merge the thread local free list to the free list. Used when a thread-local run becomes |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 412 | // full. |
Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 413 | bool MergeThreadLocalFreeListToFreeList(bool* is_all_free_after_out); |
| 414 | // Merge the bulk free list to the free list. Used in a bulk free. |
| 415 | void MergeBulkFreeListToFreeList(); |
| 416 | // Merge the bulk free list to the thread local free list. In a bulk free, as a two-step |
| 417 | // process, GC will first record all the slots to free in a run in the bulk free list where it |
| 418 | // can write without a lock, and later acquire a lock once per run to merge the bulk free list |
| 419 | // to the thread-local free list. |
| 420 | void MergeBulkFreeListToThreadLocalFreeList(); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 421 | // Allocates a slot in a run. |
Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 422 | ALWAYS_INLINE void* AllocSlot(); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 423 | // Frees a slot in a run. This is used in a non-bulk free. |
| 424 | void FreeSlot(void* ptr); |
Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 425 | // Add the given slot to the bulk free list. Returns the bracket size. |
| 426 | size_t AddToBulkFreeList(void* ptr); |
| 427 | // Add the given slot to the thread-local free list. |
| 428 | void AddToThreadLocalFreeList(void* ptr); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 429 | // Returns true if all the slots in the run are not in use. |
Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 430 | bool IsAllFree() const { |
| 431 | return free_list_.Size() == numOfSlots[size_bracket_idx_]; |
| 432 | } |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 433 | // Returns the number of free slots. |
Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 434 | size_t NumberOfFreeSlots() { |
| 435 | return free_list_.Size(); |
| 436 | } |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 437 | // Returns true if all the slots in the run are in use. |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 438 | ALWAYS_INLINE bool IsFull(); |
Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 439 | // Returns true if the bulk free list is empty. |
| 440 | bool IsBulkFreeListEmpty() const { |
| 441 | return bulk_free_list_.Size() == 0; |
| 442 | } |
| 443 | // Returns true if the thread local free list is empty. |
| 444 | bool IsThreadLocalFreeListEmpty() const { |
| 445 | return thread_local_free_list_.Size() == 0; |
| 446 | } |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 447 | // Zero the run's data. |
| 448 | void ZeroData(); |
Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 449 | // Zero the run's header and the slot headers. |
| 450 | void ZeroHeaderAndSlotHeaders(); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 451 | // Iterate over all the slots and apply the given function. |
| 452 | void InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg), void* arg); |
| 453 | // Dump the run metadata for debugging. |
Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 454 | std::string Dump(); |
| 455 | // Verify for debugging. |
Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 456 | void Verify(Thread* self, RosAlloc* rosalloc, bool running_on_memory_tool) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 457 | REQUIRES(Locks::mutator_lock_) |
| 458 | REQUIRES(Locks::thread_list_lock_); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 459 | |
| 460 | private: |
Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 461 | // The common part of AddToBulkFreeList() and AddToThreadLocalFreeList(). Returns the bracket |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 462 | // size. |
Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 463 | size_t AddToFreeListShared(void* ptr, SlotFreeList<true>* free_list, const char* caller_name); |
| 464 | // Turns a FreeList into a string for debugging. |
| 465 | template<bool kUseTail> |
| 466 | std::string FreeListToStr(SlotFreeList<kUseTail>* free_list); |
| 467 | // Check a given pointer is a valid slot address and return it as Slot*. |
| 468 | Slot* ToSlot(void* ptr) { |
| 469 | const uint8_t idx = size_bracket_idx_; |
| 470 | const size_t bracket_size = bracketSizes[idx]; |
| 471 | const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr) |
| 472 | - reinterpret_cast<uint8_t*>(FirstSlot()); |
| 473 | DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0)); |
| 474 | size_t slot_idx = offset_from_slot_base / bracket_size; |
| 475 | DCHECK_LT(slot_idx, numOfSlots[idx]); |
| 476 | return reinterpret_cast<Slot*>(ptr); |
| 477 | } |
Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 478 | size_t SlotIndex(Slot* slot) const { |
| 479 | const uint8_t idx = size_bracket_idx_; |
| 480 | const size_t bracket_size = bracketSizes[idx]; |
| 481 | const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(slot) |
| 482 | - reinterpret_cast<uint8_t*>(FirstSlot()); |
| 483 | DCHECK_EQ(offset_from_slot_base % bracket_size, 0U); |
| 484 | size_t slot_idx = offset_from_slot_base / bracket_size; |
| 485 | DCHECK_LT(slot_idx, numOfSlots[idx]); |
| 486 | return slot_idx; |
| 487 | } |
Ian Rogers | 6a3c1fc | 2014-10-31 00:33:20 -0700 | [diff] [blame] | 488 | |
| 489 | // TODO: DISALLOW_COPY_AND_ASSIGN(Run); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 490 | }; |
| 491 | |
| 492 | // The magic number for a run. |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 493 | static constexpr uint8_t kMagicNum = 42; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 494 | // The magic number for free pages. |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 495 | static constexpr uint8_t kMagicNumFree = 43; |
Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 496 | // The number of size brackets. |
| 497 | static constexpr size_t kNumOfSizeBrackets = 42; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 498 | // The sizes (the slot sizes, in bytes) of the size brackets. |
| 499 | static size_t bracketSizes[kNumOfSizeBrackets]; |
| 500 | // The numbers of pages that are used for runs for each size bracket. |
| 501 | static size_t numOfPages[kNumOfSizeBrackets]; |
| 502 | // The numbers of slots of the runs for each size bracket. |
| 503 | static size_t numOfSlots[kNumOfSizeBrackets]; |
| 504 | // The header sizes in bytes of the runs for each size bracket. |
| 505 | static size_t headerSizes[kNumOfSizeBrackets]; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 506 | |
| 507 | // Initialize the run specs (the above arrays). |
| 508 | static void Initialize(); |
| 509 | static bool initialized_; |
| 510 | |
| 511 | // Returns the byte size of the bracket size from the index. |
| 512 | static size_t IndexToBracketSize(size_t idx) { |
Ian Rogers | 59c0706 | 2014-10-10 13:03:39 -0700 | [diff] [blame] | 513 | DCHECK_LT(idx, kNumOfSizeBrackets); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 514 | return bracketSizes[idx]; |
| 515 | } |
| 516 | // Returns the index of the size bracket from the bracket size. |
| 517 | static size_t BracketSizeToIndex(size_t size) { |
Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 518 | DCHECK(8 <= size && |
| 519 | ((size <= kMaxThreadLocalBracketSize && size % kThreadLocalBracketQuantumSize == 0) || |
| 520 | (size <= kMaxRegularBracketSize && size % kBracketQuantumSize == 0) || |
| 521 | size == 1 * KB || size == 2 * KB)); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 522 | size_t idx; |
| 523 | if (UNLIKELY(size == 1 * KB)) { |
| 524 | idx = kNumOfSizeBrackets - 2; |
| 525 | } else if (UNLIKELY(size == 2 * KB)) { |
| 526 | idx = kNumOfSizeBrackets - 1; |
Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 527 | } else if (LIKELY(size <= kMaxThreadLocalBracketSize)) { |
| 528 | DCHECK_EQ(size % kThreadLocalBracketQuantumSize, 0U); |
| 529 | idx = size / kThreadLocalBracketQuantumSize - 1; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 530 | } else { |
Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 531 | DCHECK(size <= kMaxRegularBracketSize); |
| 532 | DCHECK_EQ((size - kMaxThreadLocalBracketSize) % kBracketQuantumSize, 0U); |
| 533 | idx = ((size - kMaxThreadLocalBracketSize) / kBracketQuantumSize - 1) |
| 534 | + kNumThreadLocalSizeBrackets; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 535 | } |
| 536 | DCHECK(bracketSizes[idx] == size); |
| 537 | return idx; |
| 538 | } |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 539 | // Returns true if the given allocation size is for a thread local allocation. |
| 540 | static bool IsSizeForThreadLocal(size_t size) { |
Hiroshi Yamauchi | dc412b6 | 2015-10-15 12:26:57 -0700 | [diff] [blame] | 541 | bool is_size_for_thread_local = size <= kMaxThreadLocalBracketSize; |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 542 | DCHECK(size > kLargeSizeThreshold || |
| 543 | (is_size_for_thread_local == (SizeToIndex(size) < kNumThreadLocalSizeBrackets))); |
| 544 | return is_size_for_thread_local; |
| 545 | } |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 546 | // Rounds up the size up the nearest bracket size. |
| 547 | static size_t RoundToBracketSize(size_t size) { |
| 548 | DCHECK(size <= kLargeSizeThreshold); |
Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 549 | if (LIKELY(size <= kMaxThreadLocalBracketSize)) { |
| 550 | return RoundUp(size, kThreadLocalBracketQuantumSize); |
| 551 | } else if (size <= kMaxRegularBracketSize) { |
| 552 | return RoundUp(size, kBracketQuantumSize); |
| 553 | } else if (UNLIKELY(size <= 1 * KB)) { |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 554 | return 1 * KB; |
| 555 | } else { |
Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 556 | DCHECK_LE(size, 2 * KB); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 557 | return 2 * KB; |
| 558 | } |
| 559 | } |
| 560 | // Returns the size bracket index from the byte size with rounding. |
| 561 | static size_t SizeToIndex(size_t size) { |
| 562 | DCHECK(size <= kLargeSizeThreshold); |
Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 563 | if (LIKELY(size <= kMaxThreadLocalBracketSize)) { |
| 564 | return RoundUp(size, kThreadLocalBracketQuantumSize) / kThreadLocalBracketQuantumSize - 1; |
| 565 | } else if (size <= kMaxRegularBracketSize) { |
| 566 | return (RoundUp(size, kBracketQuantumSize) - kMaxThreadLocalBracketSize) / kBracketQuantumSize |
| 567 | - 1 + kNumThreadLocalSizeBrackets; |
| 568 | } else if (size <= 1 * KB) { |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 569 | return kNumOfSizeBrackets - 2; |
| 570 | } else { |
Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 571 | DCHECK_LE(size, 2 * KB); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 572 | return kNumOfSizeBrackets - 1; |
| 573 | } |
| 574 | } |
| 575 | // A combination of SizeToIndex() and RoundToBracketSize(). |
| 576 | static size_t SizeToIndexAndBracketSize(size_t size, size_t* bracket_size_out) { |
| 577 | DCHECK(size <= kLargeSizeThreshold); |
Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 578 | size_t idx; |
| 579 | size_t bracket_size; |
| 580 | if (LIKELY(size <= kMaxThreadLocalBracketSize)) { |
| 581 | bracket_size = RoundUp(size, kThreadLocalBracketQuantumSize); |
| 582 | idx = bracket_size / kThreadLocalBracketQuantumSize - 1; |
| 583 | } else if (size <= kMaxRegularBracketSize) { |
| 584 | bracket_size = RoundUp(size, kBracketQuantumSize); |
| 585 | idx = ((bracket_size - kMaxThreadLocalBracketSize) / kBracketQuantumSize - 1) |
| 586 | + kNumThreadLocalSizeBrackets; |
| 587 | } else if (size <= 1 * KB) { |
| 588 | bracket_size = 1 * KB; |
| 589 | idx = kNumOfSizeBrackets - 2; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 590 | } else { |
Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 591 | DCHECK(size <= 2 * KB); |
| 592 | bracket_size = 2 * KB; |
| 593 | idx = kNumOfSizeBrackets - 1; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 594 | } |
Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 595 | DCHECK_EQ(idx, SizeToIndex(size)) << idx; |
| 596 | DCHECK_EQ(bracket_size, IndexToBracketSize(idx)) << idx; |
| 597 | DCHECK_EQ(bracket_size, bracketSizes[idx]) << idx; |
| 598 | DCHECK_LE(size, bracket_size) << idx; |
| 599 | DCHECK(size > kMaxRegularBracketSize || |
| 600 | (size <= kMaxThreadLocalBracketSize && |
| 601 | bracket_size - size < kThreadLocalBracketQuantumSize) || |
| 602 | (size <= kMaxRegularBracketSize && bracket_size - size < kBracketQuantumSize)) << idx; |
| 603 | *bracket_size_out = bracket_size; |
| 604 | return idx; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 605 | } |
Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 606 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 607 | // Returns the page map index from an address. Requires that the |
| 608 | // address is page size aligned. |
| 609 | size_t ToPageMapIndex(const void* addr) const { |
Andreas Gampe | d757632 | 2014-10-24 22:13:45 -0700 | [diff] [blame] | 610 | DCHECK_LE(base_, addr); |
| 611 | DCHECK_LT(addr, base_ + capacity_); |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 612 | size_t byte_offset = reinterpret_cast<const uint8_t*>(addr) - base_; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 613 | DCHECK_EQ(byte_offset % static_cast<size_t>(kPageSize), static_cast<size_t>(0)); |
| 614 | return byte_offset / kPageSize; |
| 615 | } |
| 616 | // Returns the page map index from an address with rounding. |
Andreas Gampe | d757632 | 2014-10-24 22:13:45 -0700 | [diff] [blame] | 617 | size_t RoundDownToPageMapIndex(const void* addr) const { |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 618 | DCHECK(base_ <= addr && addr < reinterpret_cast<uint8_t*>(base_) + capacity_); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 619 | return (reinterpret_cast<uintptr_t>(addr) - reinterpret_cast<uintptr_t>(base_)) / kPageSize; |
| 620 | } |
| 621 | |
| 622 | // A memory allocation request larger than this size is treated as a large object and allocated |
| 623 | // at a page-granularity. |
| 624 | static const size_t kLargeSizeThreshold = 2048; |
| 625 | |
Hiroshi Yamauchi | 3c2856e | 2013-11-22 13:42:53 -0800 | [diff] [blame] | 626 | // If true, check that the returned memory is actually zero. |
| 627 | static constexpr bool kCheckZeroMemory = kIsDebugBuild; |
Roland Levillain | 05e34f4 | 2018-05-24 13:19:05 +0000 | [diff] [blame] | 628 | // Do not check memory when running under a memory tool. In a normal |
Andreas Gampe | d757632 | 2014-10-24 22:13:45 -0700 | [diff] [blame] | 629 | // build with kCheckZeroMemory the whole test should be optimized away. |
| 630 | // TODO: Unprotect before checks. |
| 631 | ALWAYS_INLINE bool ShouldCheckZeroMemory(); |
Hiroshi Yamauchi | 3c2856e | 2013-11-22 13:42:53 -0800 | [diff] [blame] | 632 | |
| 633 | // If true, log verbose details of operations. |
| 634 | static constexpr bool kTraceRosAlloc = false; |
| 635 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 636 | struct hash_run { |
| 637 | size_t operator()(const RosAlloc::Run* r) const { |
| 638 | return reinterpret_cast<size_t>(r); |
| 639 | } |
| 640 | }; |
| 641 | |
| 642 | struct eq_run { |
| 643 | bool operator()(const RosAlloc::Run* r1, const RosAlloc::Run* r2) const { |
| 644 | return r1 == r2; |
| 645 | } |
| 646 | }; |
| 647 | |
Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 648 | public: |
| 649 | // Different page release modes. |
| 650 | enum PageReleaseMode { |
| 651 | kPageReleaseModeNone, // Release no empty pages. |
| 652 | kPageReleaseModeEnd, // Release empty pages at the end of the space. |
| 653 | kPageReleaseModeSize, // Release empty pages that are larger than the threshold. |
| 654 | kPageReleaseModeSizeAndEnd, // Release empty pages that are larger than the threshold or |
| 655 | // at the end of the space. |
| 656 | kPageReleaseModeAll, // Release all empty pages. |
| 657 | }; |
| 658 | |
| 659 | // The default value for page_release_size_threshold_. |
| 660 | static constexpr size_t kDefaultPageReleaseSizeThreshold = 4 * MB; |
| 661 | |
Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 662 | // We use thread-local runs for the size brackets whose indexes |
Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 663 | // are less than this index. We use shared (current) runs for the rest. |
Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 664 | // Sync this with the length of Thread::rosalloc_runs_. |
| 665 | static const size_t kNumThreadLocalSizeBrackets = 16; |
| 666 | static_assert(kNumThreadLocalSizeBrackets == kNumRosAllocThreadLocalSizeBracketsInThread, |
| 667 | "Mismatch between kNumThreadLocalSizeBrackets and " |
| 668 | "kNumRosAllocThreadLocalSizeBracketsInThread"); |
Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 669 | |
Hiroshi Yamauchi | dc412b6 | 2015-10-15 12:26:57 -0700 | [diff] [blame] | 670 | // The size of the largest bracket we use thread-local runs for. |
| 671 | // This should be equal to bracketSizes[kNumThreadLocalSizeBrackets - 1]. |
| 672 | static const size_t kMaxThreadLocalBracketSize = 128; |
| 673 | |
Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 674 | // We use regular (8 or 16-bytes increment) runs for the size brackets whose indexes are less than |
| 675 | // this index. |
| 676 | static const size_t kNumRegularSizeBrackets = 40; |
| 677 | |
| 678 | // The size of the largest regular (8 or 16-byte increment) bracket. Non-regular brackets are the |
| 679 | // 1 KB and the 2 KB brackets. This should be equal to bracketSizes[kNumRegularSizeBrackets - 1]. |
| 680 | static const size_t kMaxRegularBracketSize = 512; |
| 681 | |
| 682 | // The bracket size increment for the thread-local brackets (<= kMaxThreadLocalBracketSize bytes). |
| 683 | static constexpr size_t kThreadLocalBracketQuantumSize = 8; |
| 684 | |
| 685 | // Equal to Log2(kThreadLocalBracketQuantumSize). |
| 686 | static constexpr size_t kThreadLocalBracketQuantumSizeShift = 3; |
| 687 | |
| 688 | // The bracket size increment for the non-thread-local, regular brackets (of size <= |
| 689 | // kMaxRegularBracketSize bytes and > kMaxThreadLocalBracketSize bytes). |
Hiroshi Yamauchi | dc412b6 | 2015-10-15 12:26:57 -0700 | [diff] [blame] | 690 | static constexpr size_t kBracketQuantumSize = 16; |
| 691 | |
Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 692 | // Equal to Log2(kBracketQuantumSize). |
Hiroshi Yamauchi | dc412b6 | 2015-10-15 12:26:57 -0700 | [diff] [blame] | 693 | static constexpr size_t kBracketQuantumSizeShift = 4; |
| 694 | |
Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 695 | private: |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 696 | // The base address of the memory region that's managed by this allocator. |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 697 | uint8_t* base_; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 698 | |
| 699 | // The footprint in bytes of the currently allocated portion of the |
| 700 | // memory region. |
| 701 | size_t footprint_; |
| 702 | |
| 703 | // The maximum footprint. The address, base_ + capacity_, indicates |
Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 704 | // the end of the memory region that's currently managed by this allocator. |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 705 | size_t capacity_; |
| 706 | |
Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 707 | // The maximum capacity. The address, base_ + max_capacity_, indicates |
| 708 | // the end of the memory region that's ever managed by this allocator. |
| 709 | size_t max_capacity_; |
| 710 | |
Andreas Gampe | 3b7dc35 | 2017-06-06 20:02:03 -0700 | [diff] [blame] | 711 | template<class Key, AllocatorTag kTag, class Compare = std::less<Key>> |
| 712 | using AllocationTrackingSet = std::set<Key, Compare, TrackingAllocator<Key, kTag>>; |
| 713 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 714 | // The run sets that hold the runs whose slots are not all |
| 715 | // full. non_full_runs_[i] is guarded by size_bracket_locks_[i]. |
Mathieu Chartier | 58553c7 | 2014-09-16 16:25:55 -0700 | [diff] [blame] | 716 | AllocationTrackingSet<Run*, kAllocatorTagRosAlloc> non_full_runs_[kNumOfSizeBrackets]; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 717 | // The run sets that hold the runs whose slots are all full. This is |
| 718 | // debug only. full_runs_[i] is guarded by size_bracket_locks_[i]. |
Mathieu Chartier | 58553c7 | 2014-09-16 16:25:55 -0700 | [diff] [blame] | 719 | std::unordered_set<Run*, hash_run, eq_run, TrackingAllocator<Run*, kAllocatorTagRosAlloc>> |
| 720 | full_runs_[kNumOfSizeBrackets]; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 721 | // The set of free pages. |
Mathieu Chartier | 58553c7 | 2014-09-16 16:25:55 -0700 | [diff] [blame] | 722 | AllocationTrackingSet<FreePageRun*, kAllocatorTagRosAlloc> free_page_runs_ GUARDED_BY(lock_); |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 723 | // The dedicated full run, it is always full and shared by all threads when revoking happens. |
| 724 | // This is an optimization since enables us to avoid a null check for revoked runs. |
| 725 | static Run* dedicated_full_run_; |
| 726 | // Using size_t to ensure that it is at least word aligned. |
| 727 | static size_t dedicated_full_run_storage_[]; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 728 | // The current runs where the allocations are first attempted for |
| 729 | // the size brackes that do not use thread-local |
| 730 | // runs. current_runs_[i] is guarded by size_bracket_locks_[i]. |
| 731 | Run* current_runs_[kNumOfSizeBrackets]; |
| 732 | // The mutexes, one per size bracket. |
| 733 | Mutex* size_bracket_locks_[kNumOfSizeBrackets]; |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 734 | // Bracket lock names (since locks only have char* names). |
Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 735 | std::string size_bracket_lock_names_[kNumOfSizeBrackets]; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 736 | // The types of page map entries. |
Ian Rogers | 6a3c1fc | 2014-10-31 00:33:20 -0700 | [diff] [blame] | 737 | enum PageMapKind { |
Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 738 | kPageMapReleased = 0, // Zero and released back to the OS. |
| 739 | kPageMapEmpty, // Zero but probably dirty. |
| 740 | kPageMapRun, // The beginning of a run. |
| 741 | kPageMapRunPart, // The non-beginning part of a run. |
| 742 | kPageMapLargeObject, // The beginning of a large object. |
| 743 | kPageMapLargeObjectPart, // The non-beginning part of a large object. |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 744 | }; |
| 745 | // The table that indicates what pages are currently used for. |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 746 | volatile uint8_t* page_map_; // No GUARDED_BY(lock_) for kReadPageMapEntryWithoutLockInBulkFree. |
Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 747 | size_t page_map_size_; |
| 748 | size_t max_page_map_size_; |
Ian Rogers | 700a402 | 2014-05-19 16:49:03 -0700 | [diff] [blame] | 749 | std::unique_ptr<MemMap> page_map_mem_map_; |
Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 750 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 751 | // The table that indicates the size of free page runs. These sizes |
| 752 | // are stored here to avoid storing in the free page header and |
| 753 | // release backing pages. |
Mathieu Chartier | 58553c7 | 2014-09-16 16:25:55 -0700 | [diff] [blame] | 754 | std::vector<size_t, TrackingAllocator<size_t, kAllocatorTagRosAlloc>> free_page_run_size_map_ |
| 755 | GUARDED_BY(lock_); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 756 | // The global lock. Used to guard the page map, the free page set, |
| 757 | // and the footprint. |
| 758 | Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; |
| 759 | // The reader-writer lock to allow one bulk free at a time while |
Hiroshi Yamauchi | 70f6004 | 2014-02-03 12:31:29 -0800 | [diff] [blame] | 760 | // allowing multiple individual frees at the same time. Also, this |
| 761 | // is used to avoid race conditions between BulkFree() and |
Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 762 | // RevokeThreadLocalRuns() on the bulk free list. |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 763 | ReaderWriterMutex bulk_free_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; |
| 764 | |
Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 765 | // The page release mode. |
| 766 | const PageReleaseMode page_release_mode_; |
| 767 | // Under kPageReleaseModeSize(AndEnd), if the free page run size is |
| 768 | // greater than or equal to this value, release pages. |
| 769 | const size_t page_release_size_threshold_; |
| 770 | |
Roland Levillain | 05e34f4 | 2018-05-24 13:19:05 +0000 | [diff] [blame] | 771 | // Whether this allocator is running on a memory tool. |
Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 772 | bool is_running_on_memory_tool_; |
Andreas Gampe | d757632 | 2014-10-24 22:13:45 -0700 | [diff] [blame] | 773 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 774 | // The base address of the memory region that's managed by this allocator. |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 775 | uint8_t* Begin() { return base_; } |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 776 | // The end address of the memory region that's managed by this allocator. |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 777 | uint8_t* End() { return base_ + capacity_; } |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 778 | |
| 779 | // Page-granularity alloc/free |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 780 | void* AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 781 | REQUIRES(lock_); |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 782 | // Returns how many bytes were freed. |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 783 | size_t FreePages(Thread* self, void* ptr, bool already_zero) REQUIRES(lock_); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 784 | |
| 785 | // Allocate/free a run slot. |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 786 | void* AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated, size_t* usable_size, |
| 787 | size_t* bytes_tl_bulk_allocated) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 788 | REQUIRES(!lock_); |
Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 789 | // Allocate/free a run slot without acquiring locks. |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 790 | // TODO: REQUIRES(Locks::mutator_lock_) |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 791 | void* AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated, |
| 792 | size_t* usable_size, size_t* bytes_tl_bulk_allocated) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 793 | REQUIRES(!lock_); |
| 794 | void* AllocFromCurrentRunUnlocked(Thread* self, size_t idx) REQUIRES(!lock_); |
Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 795 | |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 796 | // Returns the bracket size. |
| 797 | size_t FreeFromRun(Thread* self, void* ptr, Run* run) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 798 | REQUIRES(!lock_); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 799 | |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 800 | // Used to allocate a new thread local run for a size bracket. |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 801 | Run* AllocRun(Thread* self, size_t idx) REQUIRES(!lock_); |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 802 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 803 | // Used to acquire a new/reused run for a size bracket. Used when a |
| 804 | // thread-local or current run gets full. |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 805 | Run* RefillRun(Thread* self, size_t idx) REQUIRES(!lock_); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 806 | |
| 807 | // The internal of non-bulk Free(). |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 808 | size_t FreeInternal(Thread* self, void* ptr) REQUIRES(!lock_); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 809 | |
Hiroshi Yamauchi | 3c2856e | 2013-11-22 13:42:53 -0800 | [diff] [blame] | 810 | // Allocates large objects. |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 811 | void* AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated, |
| 812 | size_t* usable_size, size_t* bytes_tl_bulk_allocated) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 813 | REQUIRES(!lock_); |
Hiroshi Yamauchi | 3c2856e | 2013-11-22 13:42:53 -0800 | [diff] [blame] | 814 | |
Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 815 | // Revoke a run by adding it to non_full_runs_ or freeing the pages. |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 816 | void RevokeRun(Thread* self, size_t idx, Run* run) REQUIRES(!lock_); |
Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 817 | |
| 818 | // Revoke the current runs which share an index with the thread local runs. |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 819 | void RevokeThreadUnsafeCurrentRuns() REQUIRES(!lock_); |
Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 820 | |
Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 821 | // Release a range of pages. |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 822 | size_t ReleasePageRange(uint8_t* start, uint8_t* end) REQUIRES(lock_); |
Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 823 | |
Ian Rogers | 6a3c1fc | 2014-10-31 00:33:20 -0700 | [diff] [blame] | 824 | // Dumps the page map for debugging. |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 825 | std::string DumpPageMap() REQUIRES(lock_); |
Ian Rogers | 6a3c1fc | 2014-10-31 00:33:20 -0700 | [diff] [blame] | 826 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 827 | public: |
Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 828 | RosAlloc(void* base, size_t capacity, size_t max_capacity, |
Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 829 | PageReleaseMode page_release_mode, |
Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 830 | bool running_on_memory_tool, |
Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 831 | size_t page_release_size_threshold = kDefaultPageReleaseSizeThreshold); |
Mathieu Chartier | 661974a | 2014-01-09 11:23:53 -0800 | [diff] [blame] | 832 | ~RosAlloc(); |
Ian Rogers | 6a3c1fc | 2014-10-31 00:33:20 -0700 | [diff] [blame] | 833 | |
Hiroshi Yamauchi | dc412b6 | 2015-10-15 12:26:57 -0700 | [diff] [blame] | 834 | static size_t RunFreeListOffset() { |
| 835 | return OFFSETOF_MEMBER(Run, free_list_); |
| 836 | } |
| 837 | static size_t RunFreeListHeadOffset() { |
| 838 | return OFFSETOF_MEMBER(SlotFreeList<false>, head_); |
| 839 | } |
| 840 | static size_t RunFreeListSizeOffset() { |
| 841 | return OFFSETOF_MEMBER(SlotFreeList<false>, size_); |
| 842 | } |
| 843 | static size_t RunSlotNextOffset() { |
| 844 | return OFFSETOF_MEMBER(Slot, next_); |
| 845 | } |
| 846 | |
Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 847 | // If kThreadUnsafe is true then the allocator may avoid acquiring some locks as an optimization. |
| 848 | // If used, this may cause race conditions if multiple threads are allocating at the same time. |
| 849 | template<bool kThreadSafe = true> |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 850 | void* Alloc(Thread* self, size_t size, size_t* bytes_allocated, size_t* usable_size, |
| 851 | size_t* bytes_tl_bulk_allocated) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 852 | REQUIRES(!lock_); |
Mathieu Chartier | 8585bad | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 853 | size_t Free(Thread* self, void* ptr) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 854 | REQUIRES(!bulk_free_lock_, !lock_); |
Mathieu Chartier | 8585bad | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 855 | size_t BulkFree(Thread* self, void** ptrs, size_t num_ptrs) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 856 | REQUIRES(!bulk_free_lock_, !lock_); |
Ian Rogers | 6a3c1fc | 2014-10-31 00:33:20 -0700 | [diff] [blame] | 857 | |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 858 | // Returns true if the given allocation request can be allocated in |
| 859 | // an existing thread local run without allocating a new run. |
| 860 | ALWAYS_INLINE bool CanAllocFromThreadLocalRun(Thread* self, size_t size); |
| 861 | // Allocate the given allocation request in an existing thread local |
| 862 | // run without allocating a new run. |
| 863 | ALWAYS_INLINE void* AllocFromThreadLocalRun(Thread* self, size_t size, size_t* bytes_allocated); |
| 864 | |
| 865 | // Returns the maximum bytes that could be allocated for the given |
| 866 | // size in bulk, that is the maximum value for the |
| 867 | // bytes_allocated_bulk out param returned by RosAlloc::Alloc(). |
| 868 | ALWAYS_INLINE size_t MaxBytesBulkAllocatedFor(size_t size); |
| 869 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 870 | // Returns the size of the allocated slot for a given allocated memory chunk. |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 871 | size_t UsableSize(const void* ptr) REQUIRES(!lock_); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 872 | // Returns the size of the allocated slot for a given size. |
| 873 | size_t UsableSize(size_t bytes) { |
| 874 | if (UNLIKELY(bytes > kLargeSizeThreshold)) { |
| 875 | return RoundUp(bytes, kPageSize); |
| 876 | } else { |
| 877 | return RoundToBracketSize(bytes); |
| 878 | } |
| 879 | } |
| 880 | // Try to reduce the current footprint by releasing the free page |
| 881 | // run at the end of the memory region, if any. |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 882 | bool Trim() REQUIRES(!lock_); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 883 | // Iterates over all the memory slots and apply the given function. |
| 884 | void InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg), |
| 885 | void* arg) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 886 | REQUIRES(!lock_); |
Ian Rogers | 6a3c1fc | 2014-10-31 00:33:20 -0700 | [diff] [blame] | 887 | |
Hiroshi Yamauchi | d9a88de | 2014-04-07 13:52:31 -0700 | [diff] [blame] | 888 | // Release empty pages. |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 889 | size_t ReleasePages() REQUIRES(!lock_); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 890 | // Returns the current footprint. |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 891 | size_t Footprint() REQUIRES(!lock_); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 892 | // Returns the current capacity, maximum footprint. |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 893 | size_t FootprintLimit() REQUIRES(!lock_); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 894 | // Update the current capacity. |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 895 | void SetFootprintLimit(size_t bytes) REQUIRES(!lock_); |
Ian Rogers | 6a3c1fc | 2014-10-31 00:33:20 -0700 | [diff] [blame] | 896 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 897 | // Releases the thread-local runs assigned to the given thread back to the common set of runs. |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 898 | // Returns the total bytes of free slots in the revoked thread local runs. This is to be |
| 899 | // subtracted from Heap::num_bytes_allocated_ to cancel out the ahead-of-time counting. |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 900 | size_t RevokeThreadLocalRuns(Thread* thread) REQUIRES(!lock_, !bulk_free_lock_); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 901 | // Releases the thread-local runs assigned to all the threads back to the common set of runs. |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 902 | // Returns the total bytes of free slots in the revoked thread local runs. This is to be |
| 903 | // subtracted from Heap::num_bytes_allocated_ to cancel out the ahead-of-time counting. |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 904 | size_t RevokeAllThreadLocalRuns() REQUIRES(!Locks::thread_list_lock_, !lock_, !bulk_free_lock_); |
Hiroshi Yamauchi | c93c530 | 2014-03-20 16:15:37 -0700 | [diff] [blame] | 905 | // Assert the thread local runs of a thread are revoked. |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 906 | void AssertThreadLocalRunsAreRevoked(Thread* thread) REQUIRES(!bulk_free_lock_); |
Hiroshi Yamauchi | c93c530 | 2014-03-20 16:15:37 -0700 | [diff] [blame] | 907 | // Assert all the thread local runs are revoked. |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 908 | void AssertAllThreadLocalRunsAreRevoked() REQUIRES(!Locks::thread_list_lock_, !bulk_free_lock_); |
Ian Rogers | 6a3c1fc | 2014-10-31 00:33:20 -0700 | [diff] [blame] | 909 | |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 910 | static Run* GetDedicatedFullRun() { |
| 911 | return dedicated_full_run_; |
| 912 | } |
Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 913 | bool IsFreePage(size_t idx) const { |
| 914 | DCHECK_LT(idx, capacity_ / kPageSize); |
Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 915 | uint8_t pm_type = page_map_[idx]; |
Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 916 | return pm_type == kPageMapReleased || pm_type == kPageMapEmpty; |
| 917 | } |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 918 | |
| 919 | // Callbacks for InspectAll that will count the number of bytes |
| 920 | // allocated and objects allocated, respectively. |
| 921 | static void BytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg); |
| 922 | static void ObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg); |
Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 923 | |
| 924 | bool DoesReleaseAllPages() const { |
| 925 | return page_release_mode_ == kPageReleaseModeAll; |
| 926 | } |
Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 927 | |
| 928 | // Verify for debugging. |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 929 | void Verify() REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !bulk_free_lock_, |
| 930 | !lock_); |
Hiroshi Yamauchi | 654dd48 | 2014-07-09 12:54:32 -0700 | [diff] [blame] | 931 | |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 932 | void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) |
| 933 | REQUIRES(!bulk_free_lock_, !lock_); |
Ian Rogers | 6a3c1fc | 2014-10-31 00:33:20 -0700 | [diff] [blame] | 934 | |
Hiroshi Yamauchi | 565c2d9 | 2016-03-23 12:53:26 -0700 | [diff] [blame] | 935 | void DumpStats(std::ostream& os) |
| 936 | REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_) REQUIRES(!bulk_free_lock_); |
| 937 | |
Ian Rogers | 6a3c1fc | 2014-10-31 00:33:20 -0700 | [diff] [blame] | 938 | private: |
| 939 | friend std::ostream& operator<<(std::ostream& os, const RosAlloc::PageMapKind& rhs); |
| 940 | |
| 941 | DISALLOW_COPY_AND_ASSIGN(RosAlloc); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 942 | }; |
Ian Rogers | 6a3c1fc | 2014-10-31 00:33:20 -0700 | [diff] [blame] | 943 | std::ostream& operator<<(std::ostream& os, const RosAlloc::PageMapKind& rhs); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 944 | |
Andreas Gampe | 277ccbd | 2014-11-03 21:36:10 -0800 | [diff] [blame] | 945 | // Callback from rosalloc when it needs to increase the footprint. Must be implemented somewhere |
| 946 | // else (currently rosalloc_space.cc). |
| 947 | void* ArtRosAllocMoreCore(allocator::RosAlloc* rosalloc, intptr_t increment); |
| 948 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 949 | } // namespace allocator |
| 950 | } // namespace gc |
| 951 | } // namespace art |
| 952 | |
| 953 | #endif // ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_ |