Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2013 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #ifndef ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_ |
| 18 | #define ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_ |
| 19 | |
| 20 | #include <set> |
| 21 | #include <stdint.h> |
| 22 | #include <stdlib.h> |
| 23 | #include <string> |
| 24 | #include <sys/mman.h> |
| 25 | #include <vector> |
| 26 | |
| 27 | #include "base/mutex.h" |
| 28 | #include "base/logging.h" |
| 29 | #include "globals.h" |
Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 30 | #include "mem_map.h" |
| 31 | #include "UniquePtr.h" |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 32 | #include "utils.h" |
| 33 | |
| 34 | // A boilerplate to use hash_map/hash_set both on host and device. |
| 35 | #ifdef HAVE_ANDROID_OS |
| 36 | #include <hash_map> |
| 37 | #include <hash_set> |
| 38 | using std::hash_map; |
| 39 | using std::hash_set; |
| 40 | #else // HAVE_ANDROID_OS |
| 41 | #ifdef __DEPRECATED |
| 42 | #define ROSALLOC_OLD__DEPRECATED __DEPRECATED |
| 43 | #undef __DEPRECATED |
| 44 | #endif |
| 45 | #include <ext/hash_map> |
| 46 | #include <ext/hash_set> |
| 47 | #ifdef ROSALLOC_OLD__DEPRECATED |
| 48 | #define __DEPRECATED ROSALLOC_OLD__DEPRECATED |
| 49 | #undef ROSALLOC_OLD__DEPRECATED |
| 50 | #endif |
| 51 | using __gnu_cxx::hash_map; |
| 52 | using __gnu_cxx::hash_set; |
| 53 | #endif // HAVE_ANDROID_OS |
| 54 | |
| 55 | namespace art { |
Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 56 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 57 | namespace gc { |
| 58 | namespace allocator { |
| 59 | |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 60 | // A runs-of-slots memory allocator. |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 61 | class RosAlloc { |
| 62 | private: |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 63 | // Represents a run of free pages. |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 64 | class FreePageRun { |
| 65 | public: |
| 66 | byte magic_num_; // The magic number used for debugging only. |
| 67 | |
| 68 | bool IsFree() const { |
| 69 | if (kIsDebugBuild) { |
| 70 | return magic_num_ == kMagicNumFree; |
| 71 | } |
| 72 | return true; |
| 73 | } |
| 74 | size_t ByteSize(RosAlloc* rosalloc) const EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) { |
| 75 | const byte* fpr_base = reinterpret_cast<const byte*>(this); |
| 76 | size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base); |
| 77 | size_t byte_size = rosalloc->free_page_run_size_map_[pm_idx]; |
| 78 | DCHECK_GE(byte_size, static_cast<size_t>(0)); |
| 79 | DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0)); |
| 80 | return byte_size; |
| 81 | } |
| 82 | void SetByteSize(RosAlloc* rosalloc, size_t byte_size) |
| 83 | EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) { |
| 84 | DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0)); |
| 85 | byte* fpr_base = reinterpret_cast<byte*>(this); |
| 86 | size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base); |
| 87 | rosalloc->free_page_run_size_map_[pm_idx] = byte_size; |
| 88 | } |
| 89 | void* Begin() { |
| 90 | return reinterpret_cast<void*>(this); |
| 91 | } |
| 92 | void* End(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) { |
| 93 | byte* fpr_base = reinterpret_cast<byte*>(this); |
| 94 | byte* end = fpr_base + ByteSize(rosalloc); |
| 95 | return end; |
| 96 | } |
Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 97 | bool IsLargerThanPageReleaseThreshold(RosAlloc* rosalloc) |
| 98 | EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) { |
| 99 | return ByteSize(rosalloc) >= rosalloc->page_release_size_threshold_; |
| 100 | } |
| 101 | bool IsAtEndOfSpace(RosAlloc* rosalloc) |
| 102 | EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) { |
| 103 | return reinterpret_cast<byte*>(this) + ByteSize(rosalloc) == rosalloc->base_ + rosalloc->footprint_; |
| 104 | } |
| 105 | bool ShouldReleasePages(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) { |
| 106 | switch (rosalloc->page_release_mode_) { |
| 107 | case kPageReleaseModeNone: |
| 108 | return false; |
| 109 | case kPageReleaseModeEnd: |
| 110 | return IsAtEndOfSpace(rosalloc); |
| 111 | case kPageReleaseModeSize: |
| 112 | return IsLargerThanPageReleaseThreshold(rosalloc); |
| 113 | case kPageReleaseModeSizeAndEnd: |
| 114 | return IsLargerThanPageReleaseThreshold(rosalloc) && IsAtEndOfSpace(rosalloc); |
| 115 | case kPageReleaseModeAll: |
| 116 | return true; |
| 117 | default: |
| 118 | LOG(FATAL) << "Unexpected page release mode "; |
| 119 | return false; |
| 120 | } |
| 121 | } |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 122 | void ReleasePages(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) { |
Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 123 | byte* start = reinterpret_cast<byte*>(this); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 124 | size_t byte_size = ByteSize(rosalloc); |
| 125 | DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0)); |
Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 126 | bool release_pages = ShouldReleasePages(rosalloc); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 127 | if (kIsDebugBuild) { |
| 128 | // Exclude the first page that stores the magic number. |
| 129 | DCHECK_GE(byte_size, static_cast<size_t>(kPageSize)); |
Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 130 | start += kPageSize; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 131 | byte_size -= kPageSize; |
| 132 | if (byte_size > 0) { |
Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 133 | if (release_pages) { |
| 134 | madvise(start, byte_size, MADV_DONTNEED); |
| 135 | } |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 136 | } |
| 137 | } else { |
Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 138 | if (release_pages) { |
| 139 | madvise(start, byte_size, MADV_DONTNEED); |
| 140 | } |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 141 | } |
| 142 | } |
| 143 | }; |
| 144 | |
| 145 | // Represents a run of memory slots of the same size. |
| 146 | // |
| 147 | // A run's memory layout: |
| 148 | // |
| 149 | // +-------------------+ |
| 150 | // | magic_num | |
| 151 | // +-------------------+ |
| 152 | // | size_bracket_idx | |
| 153 | // +-------------------+ |
| 154 | // | is_thread_local | |
| 155 | // +-------------------+ |
| 156 | // | to_be_bulk_freed | |
| 157 | // +-------------------+ |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 158 | // | top_bitmap_idx | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 159 | // +-------------------+ |
| 160 | // | | |
| 161 | // | alloc bit map | |
| 162 | // | | |
| 163 | // +-------------------+ |
| 164 | // | | |
| 165 | // | bulk free bit map | |
| 166 | // | | |
| 167 | // +-------------------+ |
| 168 | // | | |
| 169 | // | thread-local free | |
| 170 | // | bit map | |
| 171 | // | | |
| 172 | // +-------------------+ |
| 173 | // | padding due to | |
| 174 | // | alignment | |
| 175 | // +-------------------+ |
| 176 | // | slot 0 | |
| 177 | // +-------------------+ |
| 178 | // | slot 1 | |
| 179 | // +-------------------+ |
| 180 | // | slot 2 | |
| 181 | // +-------------------+ |
| 182 | // ... |
| 183 | // +-------------------+ |
| 184 | // | last slot | |
| 185 | // +-------------------+ |
| 186 | // |
| 187 | class Run { |
| 188 | public: |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 189 | byte magic_num_; // The magic number used for debugging. |
| 190 | byte size_bracket_idx_; // The index of the size bracket of this run. |
| 191 | byte is_thread_local_; // True if this run is used as a thread-local run. |
| 192 | byte to_be_bulk_freed_; // Used within BulkFree() to flag a run that's involved with a bulk free. |
| 193 | uint32_t first_search_vec_idx_; // The index of the first bitmap vector which may contain an available slot. |
| 194 | uint32_t alloc_bit_map_[0]; // The bit map that allocates if each slot is in use. |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 195 | |
| 196 | // bulk_free_bit_map_[] : The bit map that is used for GC to |
| 197 | // temporarily mark the slots to free without using a lock. After |
| 198 | // all the slots to be freed in a run are marked, all those slots |
| 199 | // get freed in bulk with one locking per run, as opposed to one |
| 200 | // locking per slot to minimize the lock contention. This is used |
| 201 | // within BulkFree(). |
| 202 | |
| 203 | // thread_local_free_bit_map_[] : The bit map that is used for GC |
| 204 | // to temporarily mark the slots to free in a thread-local run |
| 205 | // without using a lock (without synchronizing the thread that |
| 206 | // owns the thread-local run.) When the thread-local run becomes |
| 207 | // full, the thread will check this bit map and update the |
| 208 | // allocation bit map of the run (that is, the slots get freed.) |
| 209 | |
| 210 | // Returns the byte size of the header except for the bit maps. |
| 211 | static size_t fixed_header_size() { |
| 212 | Run temp; |
| 213 | size_t size = reinterpret_cast<byte*>(&temp.alloc_bit_map_) - reinterpret_cast<byte*>(&temp); |
| 214 | DCHECK_EQ(size, static_cast<size_t>(8)); |
| 215 | return size; |
| 216 | } |
| 217 | // Returns the base address of the free bit map. |
Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 218 | uint32_t* BulkFreeBitMap() { |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 219 | return reinterpret_cast<uint32_t*>(reinterpret_cast<byte*>(this) + bulkFreeBitMapOffsets[size_bracket_idx_]); |
| 220 | } |
| 221 | // Returns the base address of the thread local free bit map. |
Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 222 | uint32_t* ThreadLocalFreeBitMap() { |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 223 | return reinterpret_cast<uint32_t*>(reinterpret_cast<byte*>(this) + threadLocalFreeBitMapOffsets[size_bracket_idx_]); |
| 224 | } |
| 225 | void* End() { |
| 226 | return reinterpret_cast<byte*>(this) + kPageSize * numOfPages[size_bracket_idx_]; |
| 227 | } |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 228 | // Returns the number of bitmap words per run. |
| 229 | size_t NumberOfBitmapVectors() const { |
| 230 | return RoundUp(numOfSlots[size_bracket_idx_], 32) / 32; |
| 231 | } |
| 232 | void SetIsThreadLocal(bool is_thread_local) { |
| 233 | is_thread_local_ = is_thread_local ? 1 : 0; |
| 234 | } |
| 235 | bool IsThreadLocal() const { |
| 236 | return is_thread_local_ != 0; |
| 237 | } |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 238 | // Frees slots in the allocation bit map with regard to the |
| 239 | // thread-local free bit map. Used when a thread-local run becomes |
| 240 | // full. |
| 241 | bool MergeThreadLocalFreeBitMapToAllocBitMap(bool* is_all_free_after_out); |
| 242 | // Frees slots in the allocation bit map with regard to the bulk |
| 243 | // free bit map. Used in a bulk free. |
| 244 | void MergeBulkFreeBitMapIntoAllocBitMap(); |
| 245 | // Unions the slots to be freed in the free bit map into the |
| 246 | // thread-local free bit map. In a bulk free, as a two-step |
| 247 | // process, GC will first record all the slots to free in a run in |
| 248 | // the free bit map where it can write without a lock, and later |
| 249 | // acquire a lock once per run to union the bits of the free bit |
| 250 | // map to the thread-local free bit map. |
| 251 | void UnionBulkFreeBitMapToThreadLocalFreeBitMap(); |
| 252 | // Allocates a slot in a run. |
| 253 | void* AllocSlot(); |
| 254 | // Frees a slot in a run. This is used in a non-bulk free. |
| 255 | void FreeSlot(void* ptr); |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 256 | // Marks the slots to free in the bulk free bit map. Returns the bracket size. |
| 257 | size_t MarkBulkFreeBitMap(void* ptr); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 258 | // Marks the slots to free in the thread-local free bit map. |
| 259 | void MarkThreadLocalFreeBitMap(void* ptr); |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 260 | // Last word mask, all of the bits in the last word which aren't valid slots are set to |
| 261 | // optimize allocation path. |
Andreas Gampe | 59e6760 | 2014-04-25 17:15:12 -0700 | [diff] [blame] | 262 | static uint32_t GetBitmapLastVectorMask(size_t num_slots, size_t num_vec); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 263 | // Returns true if all the slots in the run are not in use. |
| 264 | bool IsAllFree(); |
| 265 | // Returns true if all the slots in the run are in use. |
| 266 | bool IsFull(); |
Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 267 | // Returns true if the bulk free bit map is clean. |
| 268 | bool IsBulkFreeBitmapClean(); |
| 269 | // Returns true if the thread local free bit map is clean. |
| 270 | bool IsThreadLocalFreeBitmapClean(); |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 271 | // Set the alloc_bit_map_ bits for slots that are past the end of the run. |
| 272 | void SetAllocBitMapBitsForInvalidSlots(); |
| 273 | // Zero the run's data. |
| 274 | void ZeroData(); |
| 275 | // Zero the run's header. |
| 276 | void ZeroHeader(); |
| 277 | // Fill the alloc bitmap with 1s. |
| 278 | void FillAllocBitMap(); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 279 | // Iterate over all the slots and apply the given function. |
| 280 | void InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg), void* arg); |
| 281 | // Dump the run metadata for debugging. |
Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 282 | std::string Dump(); |
| 283 | // Verify for debugging. |
| 284 | void Verify(Thread* self, RosAlloc* rosalloc) |
| 285 | EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) |
| 286 | EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 287 | |
| 288 | private: |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 289 | // The common part of MarkFreeBitMap() and MarkThreadLocalFreeBitMap(). Returns the bracket |
| 290 | // size. |
| 291 | size_t MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base, const char* caller_name); |
Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 292 | // Turns the bit map into a string for debugging. |
| 293 | static std::string BitMapToStr(uint32_t* bit_map_base, size_t num_vec); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 294 | }; |
| 295 | |
| 296 | // The magic number for a run. |
| 297 | static const byte kMagicNum = 42; |
| 298 | // The magic number for free pages. |
| 299 | static const byte kMagicNumFree = 43; |
| 300 | // The number of size brackets. Sync this with the length of Thread::rosalloc_runs_. |
| 301 | static const size_t kNumOfSizeBrackets = 34; |
| 302 | // The number of smaller size brackets that are 16 bytes apart. |
| 303 | static const size_t kNumOfQuantumSizeBrackets = 32; |
| 304 | // The sizes (the slot sizes, in bytes) of the size brackets. |
| 305 | static size_t bracketSizes[kNumOfSizeBrackets]; |
| 306 | // The numbers of pages that are used for runs for each size bracket. |
| 307 | static size_t numOfPages[kNumOfSizeBrackets]; |
| 308 | // The numbers of slots of the runs for each size bracket. |
| 309 | static size_t numOfSlots[kNumOfSizeBrackets]; |
| 310 | // The header sizes in bytes of the runs for each size bracket. |
| 311 | static size_t headerSizes[kNumOfSizeBrackets]; |
| 312 | // The byte offsets of the bulk free bit maps of the runs for each size bracket. |
| 313 | static size_t bulkFreeBitMapOffsets[kNumOfSizeBrackets]; |
| 314 | // The byte offsets of the thread-local free bit maps of the runs for each size bracket. |
| 315 | static size_t threadLocalFreeBitMapOffsets[kNumOfSizeBrackets]; |
| 316 | |
| 317 | // Initialize the run specs (the above arrays). |
| 318 | static void Initialize(); |
| 319 | static bool initialized_; |
| 320 | |
| 321 | // Returns the byte size of the bracket size from the index. |
| 322 | static size_t IndexToBracketSize(size_t idx) { |
| 323 | DCHECK(idx < kNumOfSizeBrackets); |
| 324 | return bracketSizes[idx]; |
| 325 | } |
| 326 | // Returns the index of the size bracket from the bracket size. |
| 327 | static size_t BracketSizeToIndex(size_t size) { |
| 328 | DCHECK(16 <= size && ((size < 1 * KB && size % 16 == 0) || size == 1 * KB || size == 2 * KB)); |
| 329 | size_t idx; |
| 330 | if (UNLIKELY(size == 1 * KB)) { |
| 331 | idx = kNumOfSizeBrackets - 2; |
| 332 | } else if (UNLIKELY(size == 2 * KB)) { |
| 333 | idx = kNumOfSizeBrackets - 1; |
| 334 | } else { |
| 335 | DCHECK(size < 1 * KB); |
| 336 | DCHECK_EQ(size % 16, static_cast<size_t>(0)); |
| 337 | idx = size / 16 - 1; |
| 338 | } |
| 339 | DCHECK(bracketSizes[idx] == size); |
| 340 | return idx; |
| 341 | } |
| 342 | // Rounds up the size up the nearest bracket size. |
| 343 | static size_t RoundToBracketSize(size_t size) { |
| 344 | DCHECK(size <= kLargeSizeThreshold); |
| 345 | if (LIKELY(size <= 512)) { |
| 346 | return RoundUp(size, 16); |
| 347 | } else if (512 < size && size <= 1 * KB) { |
| 348 | return 1 * KB; |
| 349 | } else { |
| 350 | DCHECK(1 * KB < size && size <= 2 * KB); |
| 351 | return 2 * KB; |
| 352 | } |
| 353 | } |
| 354 | // Returns the size bracket index from the byte size with rounding. |
| 355 | static size_t SizeToIndex(size_t size) { |
| 356 | DCHECK(size <= kLargeSizeThreshold); |
| 357 | if (LIKELY(size <= 512)) { |
| 358 | return RoundUp(size, 16) / 16 - 1; |
| 359 | } else if (512 < size && size <= 1 * KB) { |
| 360 | return kNumOfSizeBrackets - 2; |
| 361 | } else { |
| 362 | DCHECK(1 * KB < size && size <= 2 * KB); |
| 363 | return kNumOfSizeBrackets - 1; |
| 364 | } |
| 365 | } |
| 366 | // A combination of SizeToIndex() and RoundToBracketSize(). |
| 367 | static size_t SizeToIndexAndBracketSize(size_t size, size_t* bracket_size_out) { |
| 368 | DCHECK(size <= kLargeSizeThreshold); |
| 369 | if (LIKELY(size <= 512)) { |
| 370 | size_t bracket_size = RoundUp(size, 16); |
| 371 | *bracket_size_out = bracket_size; |
| 372 | size_t idx = bracket_size / 16 - 1; |
| 373 | DCHECK_EQ(bracket_size, IndexToBracketSize(idx)); |
| 374 | return idx; |
| 375 | } else if (512 < size && size <= 1 * KB) { |
| 376 | size_t bracket_size = 1024; |
| 377 | *bracket_size_out = bracket_size; |
| 378 | size_t idx = kNumOfSizeBrackets - 2; |
| 379 | DCHECK_EQ(bracket_size, IndexToBracketSize(idx)); |
| 380 | return idx; |
| 381 | } else { |
| 382 | DCHECK(1 * KB < size && size <= 2 * KB); |
| 383 | size_t bracket_size = 2048; |
| 384 | *bracket_size_out = bracket_size; |
| 385 | size_t idx = kNumOfSizeBrackets - 1; |
| 386 | DCHECK_EQ(bracket_size, IndexToBracketSize(idx)); |
| 387 | return idx; |
| 388 | } |
| 389 | } |
| 390 | // Returns the page map index from an address. Requires that the |
| 391 | // address is page size aligned. |
| 392 | size_t ToPageMapIndex(const void* addr) const { |
| 393 | DCHECK(base_ <= addr && addr < base_ + capacity_); |
| 394 | size_t byte_offset = reinterpret_cast<const byte*>(addr) - base_; |
| 395 | DCHECK_EQ(byte_offset % static_cast<size_t>(kPageSize), static_cast<size_t>(0)); |
| 396 | return byte_offset / kPageSize; |
| 397 | } |
| 398 | // Returns the page map index from an address with rounding. |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 399 | size_t RoundDownToPageMapIndex(void* addr) const { |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 400 | DCHECK(base_ <= addr && addr < reinterpret_cast<byte*>(base_) + capacity_); |
| 401 | return (reinterpret_cast<uintptr_t>(addr) - reinterpret_cast<uintptr_t>(base_)) / kPageSize; |
| 402 | } |
| 403 | |
| 404 | // A memory allocation request larger than this size is treated as a large object and allocated |
| 405 | // at a page-granularity. |
| 406 | static const size_t kLargeSizeThreshold = 2048; |
| 407 | |
Hiroshi Yamauchi | 3c2856e | 2013-11-22 13:42:53 -0800 | [diff] [blame] | 408 | // If true, check that the returned memory is actually zero. |
| 409 | static constexpr bool kCheckZeroMemory = kIsDebugBuild; |
| 410 | |
| 411 | // If true, log verbose details of operations. |
| 412 | static constexpr bool kTraceRosAlloc = false; |
| 413 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 414 | struct hash_run { |
| 415 | size_t operator()(const RosAlloc::Run* r) const { |
| 416 | return reinterpret_cast<size_t>(r); |
| 417 | } |
| 418 | }; |
| 419 | |
| 420 | struct eq_run { |
| 421 | bool operator()(const RosAlloc::Run* r1, const RosAlloc::Run* r2) const { |
| 422 | return r1 == r2; |
| 423 | } |
| 424 | }; |
| 425 | |
Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 426 | public: |
| 427 | // Different page release modes. |
| 428 | enum PageReleaseMode { |
| 429 | kPageReleaseModeNone, // Release no empty pages. |
| 430 | kPageReleaseModeEnd, // Release empty pages at the end of the space. |
| 431 | kPageReleaseModeSize, // Release empty pages that are larger than the threshold. |
| 432 | kPageReleaseModeSizeAndEnd, // Release empty pages that are larger than the threshold or |
| 433 | // at the end of the space. |
| 434 | kPageReleaseModeAll, // Release all empty pages. |
| 435 | }; |
| 436 | |
| 437 | // The default value for page_release_size_threshold_. |
| 438 | static constexpr size_t kDefaultPageReleaseSizeThreshold = 4 * MB; |
| 439 | |
Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame^] | 440 | // We use thread-local runs for the size Brackets whose indexes |
| 441 | // are less than this index. We use shared (current) runs for the rest. |
| 442 | static const size_t kNumThreadLocalSizeBrackets = 11; |
| 443 | |
Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 444 | private: |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 445 | // The base address of the memory region that's managed by this allocator. |
| 446 | byte* base_; |
| 447 | |
| 448 | // The footprint in bytes of the currently allocated portion of the |
| 449 | // memory region. |
| 450 | size_t footprint_; |
| 451 | |
| 452 | // The maximum footprint. The address, base_ + capacity_, indicates |
Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 453 | // the end of the memory region that's currently managed by this allocator. |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 454 | size_t capacity_; |
| 455 | |
Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 456 | // The maximum capacity. The address, base_ + max_capacity_, indicates |
| 457 | // the end of the memory region that's ever managed by this allocator. |
| 458 | size_t max_capacity_; |
| 459 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 460 | // The run sets that hold the runs whose slots are not all |
| 461 | // full. non_full_runs_[i] is guarded by size_bracket_locks_[i]. |
| 462 | std::set<Run*> non_full_runs_[kNumOfSizeBrackets]; |
| 463 | // The run sets that hold the runs whose slots are all full. This is |
| 464 | // debug only. full_runs_[i] is guarded by size_bracket_locks_[i]. |
| 465 | hash_set<Run*, hash_run, eq_run> full_runs_[kNumOfSizeBrackets]; |
| 466 | // The set of free pages. |
| 467 | std::set<FreePageRun*> free_page_runs_ GUARDED_BY(lock_); |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 468 | // The dedicated full run, it is always full and shared by all threads when revoking happens. |
| 469 | // This is an optimization since enables us to avoid a null check for revoked runs. |
| 470 | static Run* dedicated_full_run_; |
| 471 | // Using size_t to ensure that it is at least word aligned. |
| 472 | static size_t dedicated_full_run_storage_[]; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 473 | // The current runs where the allocations are first attempted for |
| 474 | // the size brackes that do not use thread-local |
| 475 | // runs. current_runs_[i] is guarded by size_bracket_locks_[i]. |
| 476 | Run* current_runs_[kNumOfSizeBrackets]; |
| 477 | // The mutexes, one per size bracket. |
| 478 | Mutex* size_bracket_locks_[kNumOfSizeBrackets]; |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 479 | // Bracket lock names (since locks only have char* names). |
| 480 | std::string size_bracket_lock_names[kNumOfSizeBrackets]; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 481 | // The types of page map entries. |
| 482 | enum { |
| 483 | kPageMapEmpty = 0, // Not allocated. |
| 484 | kPageMapRun = 1, // The beginning of a run. |
| 485 | kPageMapRunPart = 2, // The non-beginning part of a run. |
| 486 | kPageMapLargeObject = 3, // The beginning of a large object. |
| 487 | kPageMapLargeObjectPart = 4, // The non-beginning part of a large object. |
| 488 | }; |
| 489 | // The table that indicates what pages are currently used for. |
Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 490 | byte* page_map_; // No GUARDED_BY(lock_) for kReadPageMapEntryWithoutLockInBulkFree. |
| 491 | size_t page_map_size_; |
| 492 | size_t max_page_map_size_; |
| 493 | UniquePtr<MemMap> page_map_mem_map_; |
| 494 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 495 | // The table that indicates the size of free page runs. These sizes |
| 496 | // are stored here to avoid storing in the free page header and |
| 497 | // release backing pages. |
| 498 | std::vector<size_t> free_page_run_size_map_ GUARDED_BY(lock_); |
| 499 | // The global lock. Used to guard the page map, the free page set, |
| 500 | // and the footprint. |
| 501 | Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; |
| 502 | // The reader-writer lock to allow one bulk free at a time while |
Hiroshi Yamauchi | 70f6004 | 2014-02-03 12:31:29 -0800 | [diff] [blame] | 503 | // allowing multiple individual frees at the same time. Also, this |
| 504 | // is used to avoid race conditions between BulkFree() and |
| 505 | // RevokeThreadLocalRuns() on the bulk free bitmaps. |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 506 | ReaderWriterMutex bulk_free_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; |
| 507 | |
Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 508 | // The page release mode. |
| 509 | const PageReleaseMode page_release_mode_; |
| 510 | // Under kPageReleaseModeSize(AndEnd), if the free page run size is |
| 511 | // greater than or equal to this value, release pages. |
| 512 | const size_t page_release_size_threshold_; |
| 513 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 514 | // The base address of the memory region that's managed by this allocator. |
| 515 | byte* Begin() { return base_; } |
| 516 | // The end address of the memory region that's managed by this allocator. |
| 517 | byte* End() { return base_ + capacity_; } |
| 518 | |
| 519 | // Page-granularity alloc/free |
| 520 | void* AllocPages(Thread* self, size_t num_pages, byte page_map_type) |
| 521 | EXCLUSIVE_LOCKS_REQUIRED(lock_); |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 522 | // Returns how many bytes were freed. |
| 523 | size_t FreePages(Thread* self, void* ptr, bool already_zero) EXCLUSIVE_LOCKS_REQUIRED(lock_); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 524 | |
| 525 | // Allocate/free a run slot. |
| 526 | void* AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated) |
| 527 | LOCKS_EXCLUDED(lock_); |
Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame^] | 528 | // Allocate/free a run slot without acquiring locks. |
| 529 | // TODO: EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) |
| 530 | void* AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated) |
| 531 | LOCKS_EXCLUDED(lock_); |
| 532 | void* AllocFromCurrentRunUnlocked(Thread* self, size_t idx); |
| 533 | |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 534 | // Returns the bracket size. |
| 535 | size_t FreeFromRun(Thread* self, void* ptr, Run* run) |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 536 | LOCKS_EXCLUDED(lock_); |
| 537 | |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 538 | // Used to allocate a new thread local run for a size bracket. |
| 539 | Run* AllocRun(Thread* self, size_t idx) LOCKS_EXCLUDED(lock_); |
| 540 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 541 | // Used to acquire a new/reused run for a size bracket. Used when a |
| 542 | // thread-local or current run gets full. |
| 543 | Run* RefillRun(Thread* self, size_t idx) LOCKS_EXCLUDED(lock_); |
| 544 | |
| 545 | // The internal of non-bulk Free(). |
Mathieu Chartier | 8585bad | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 546 | size_t FreeInternal(Thread* self, void* ptr) LOCKS_EXCLUDED(lock_); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 547 | |
Hiroshi Yamauchi | 3c2856e | 2013-11-22 13:42:53 -0800 | [diff] [blame] | 548 | // Allocates large objects. |
| 549 | void* AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) LOCKS_EXCLUDED(lock_); |
| 550 | |
Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame^] | 551 | // Revoke a run by adding it to non_full_runs_ or freeing the pages. |
| 552 | void RevokeRun(Thread* self, size_t idx, Run* run); |
| 553 | |
| 554 | // Revoke the current runs which share an index with the thread local runs. |
| 555 | void RevokeThreadUnsafeCurrentRuns(); |
| 556 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 557 | public: |
Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 558 | RosAlloc(void* base, size_t capacity, size_t max_capacity, |
Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 559 | PageReleaseMode page_release_mode, |
| 560 | size_t page_release_size_threshold = kDefaultPageReleaseSizeThreshold); |
Mathieu Chartier | 661974a | 2014-01-09 11:23:53 -0800 | [diff] [blame] | 561 | ~RosAlloc(); |
Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame^] | 562 | // If kThreadUnsafe is true then the allocator may avoid acquiring some locks as an optimization. |
| 563 | // If used, this may cause race conditions if multiple threads are allocating at the same time. |
| 564 | template<bool kThreadSafe = true> |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 565 | void* Alloc(Thread* self, size_t size, size_t* bytes_allocated) |
| 566 | LOCKS_EXCLUDED(lock_); |
Mathieu Chartier | 8585bad | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 567 | size_t Free(Thread* self, void* ptr) |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 568 | LOCKS_EXCLUDED(bulk_free_lock_); |
Mathieu Chartier | 8585bad | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 569 | size_t BulkFree(Thread* self, void** ptrs, size_t num_ptrs) |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 570 | LOCKS_EXCLUDED(bulk_free_lock_); |
| 571 | // Returns the size of the allocated slot for a given allocated memory chunk. |
| 572 | size_t UsableSize(void* ptr); |
| 573 | // Returns the size of the allocated slot for a given size. |
| 574 | size_t UsableSize(size_t bytes) { |
| 575 | if (UNLIKELY(bytes > kLargeSizeThreshold)) { |
| 576 | return RoundUp(bytes, kPageSize); |
| 577 | } else { |
| 578 | return RoundToBracketSize(bytes); |
| 579 | } |
| 580 | } |
| 581 | // Try to reduce the current footprint by releasing the free page |
| 582 | // run at the end of the memory region, if any. |
| 583 | bool Trim(); |
| 584 | // Iterates over all the memory slots and apply the given function. |
| 585 | void InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg), |
| 586 | void* arg) |
| 587 | LOCKS_EXCLUDED(lock_); |
Hiroshi Yamauchi | d9a88de | 2014-04-07 13:52:31 -0700 | [diff] [blame] | 588 | // Release empty pages. |
| 589 | size_t ReleasePages() LOCKS_EXCLUDED(lock_); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 590 | // Returns the current footprint. |
| 591 | size_t Footprint() LOCKS_EXCLUDED(lock_); |
| 592 | // Returns the current capacity, maximum footprint. |
| 593 | size_t FootprintLimit() LOCKS_EXCLUDED(lock_); |
| 594 | // Update the current capacity. |
| 595 | void SetFootprintLimit(size_t bytes) LOCKS_EXCLUDED(lock_); |
| 596 | // Releases the thread-local runs assigned to the given thread back to the common set of runs. |
| 597 | void RevokeThreadLocalRuns(Thread* thread); |
| 598 | // Releases the thread-local runs assigned to all the threads back to the common set of runs. |
| 599 | void RevokeAllThreadLocalRuns() LOCKS_EXCLUDED(Locks::thread_list_lock_); |
Hiroshi Yamauchi | c93c530 | 2014-03-20 16:15:37 -0700 | [diff] [blame] | 600 | // Assert the thread local runs of a thread are revoked. |
| 601 | void AssertThreadLocalRunsAreRevoked(Thread* thread); |
| 602 | // Assert all the thread local runs are revoked. |
| 603 | void AssertAllThreadLocalRunsAreRevoked() LOCKS_EXCLUDED(Locks::thread_list_lock_); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 604 | // Dumps the page map for debugging. |
Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 605 | std::string DumpPageMap() EXCLUSIVE_LOCKS_REQUIRED(lock_); |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 606 | static Run* GetDedicatedFullRun() { |
| 607 | return dedicated_full_run_; |
| 608 | } |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 609 | |
| 610 | // Callbacks for InspectAll that will count the number of bytes |
| 611 | // allocated and objects allocated, respectively. |
| 612 | static void BytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg); |
| 613 | static void ObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg); |
Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 614 | |
| 615 | bool DoesReleaseAllPages() const { |
| 616 | return page_release_mode_ == kPageReleaseModeAll; |
| 617 | } |
Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 618 | |
| 619 | // Verify for debugging. |
| 620 | void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 621 | }; |
| 622 | |
| 623 | } // namespace allocator |
| 624 | } // namespace gc |
| 625 | } // namespace art |
| 626 | |
| 627 | #endif // ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_ |