blob: 150fe956ae770e0a98b96c9da95563424b5f59df [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_
18#define ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_
19
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070020#include <stdint.h>
21#include <stdlib.h>
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070022#include <sys/mman.h>
Ian Rogers700a4022014-05-19 16:49:03 -070023#include <memory>
24#include <set>
25#include <string>
26#include <unordered_set>
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070027#include <vector>
28
Andreas Gampe57943812017-12-06 21:39:13 -080029#include <android-base/logging.h>
30
Mathieu Chartier58553c72014-09-16 16:25:55 -070031#include "base/allocator.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010032#include "base/bit_utils.h"
David Sehr1979c642018-04-26 14:41:18 -070033#include "base/globals.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070034#include "base/mutex.h"
Ian Rogerse63db272014-07-15 15:36:11 -070035#include "thread.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070036
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070037namespace art {
Vladimir Marko3481ba22015-04-13 12:22:36 +010038
39class MemMap;
40
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070041namespace gc {
42namespace allocator {
43
Ian Rogers6fac4472014-02-25 17:01:10 -080044// A runs-of-slots memory allocator.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070045class RosAlloc {
46 private:
Ian Rogers6fac4472014-02-25 17:01:10 -080047 // Represents a run of free pages.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070048 class FreePageRun {
49 public:
Ian Rogers13735952014-10-08 12:43:28 -070050 uint8_t magic_num_; // The magic number used for debugging only.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070051
52 bool IsFree() const {
Mathieu Chartiera1c1c712014-06-23 17:53:09 -070053 return !kIsDebugBuild || magic_num_ == kMagicNumFree;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070054 }
Mathieu Chartier90443472015-07-16 20:32:27 -070055 size_t ByteSize(RosAlloc* rosalloc) const REQUIRES(rosalloc->lock_) {
Ian Rogers13735952014-10-08 12:43:28 -070056 const uint8_t* fpr_base = reinterpret_cast<const uint8_t*>(this);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070057 size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
58 size_t byte_size = rosalloc->free_page_run_size_map_[pm_idx];
59 DCHECK_GE(byte_size, static_cast<size_t>(0));
Mathieu Chartier58553c72014-09-16 16:25:55 -070060 DCHECK_ALIGNED(byte_size, kPageSize);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070061 return byte_size;
62 }
63 void SetByteSize(RosAlloc* rosalloc, size_t byte_size)
Mathieu Chartier90443472015-07-16 20:32:27 -070064 REQUIRES(rosalloc->lock_) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070065 DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
Ian Rogers13735952014-10-08 12:43:28 -070066 uint8_t* fpr_base = reinterpret_cast<uint8_t*>(this);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070067 size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
68 rosalloc->free_page_run_size_map_[pm_idx] = byte_size;
69 }
70 void* Begin() {
71 return reinterpret_cast<void*>(this);
72 }
Mathieu Chartier90443472015-07-16 20:32:27 -070073 void* End(RosAlloc* rosalloc) REQUIRES(rosalloc->lock_) {
Ian Rogers13735952014-10-08 12:43:28 -070074 uint8_t* fpr_base = reinterpret_cast<uint8_t*>(this);
75 uint8_t* end = fpr_base + ByteSize(rosalloc);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070076 return end;
77 }
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080078 bool IsLargerThanPageReleaseThreshold(RosAlloc* rosalloc)
Mathieu Chartier90443472015-07-16 20:32:27 -070079 REQUIRES(rosalloc->lock_) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080080 return ByteSize(rosalloc) >= rosalloc->page_release_size_threshold_;
81 }
82 bool IsAtEndOfSpace(RosAlloc* rosalloc)
Mathieu Chartier90443472015-07-16 20:32:27 -070083 REQUIRES(rosalloc->lock_) {
Ian Rogers13735952014-10-08 12:43:28 -070084 return reinterpret_cast<uint8_t*>(this) + ByteSize(rosalloc) == rosalloc->base_ + rosalloc->footprint_;
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080085 }
Mathieu Chartier90443472015-07-16 20:32:27 -070086 bool ShouldReleasePages(RosAlloc* rosalloc) REQUIRES(rosalloc->lock_) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080087 switch (rosalloc->page_release_mode_) {
88 case kPageReleaseModeNone:
89 return false;
90 case kPageReleaseModeEnd:
91 return IsAtEndOfSpace(rosalloc);
92 case kPageReleaseModeSize:
93 return IsLargerThanPageReleaseThreshold(rosalloc);
94 case kPageReleaseModeSizeAndEnd:
95 return IsLargerThanPageReleaseThreshold(rosalloc) && IsAtEndOfSpace(rosalloc);
96 case kPageReleaseModeAll:
97 return true;
98 default:
99 LOG(FATAL) << "Unexpected page release mode ";
100 return false;
101 }
102 }
Mathieu Chartier90443472015-07-16 20:32:27 -0700103 void ReleasePages(RosAlloc* rosalloc) REQUIRES(rosalloc->lock_) {
Ian Rogers13735952014-10-08 12:43:28 -0700104 uint8_t* start = reinterpret_cast<uint8_t*>(this);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700105 size_t byte_size = ByteSize(rosalloc);
106 DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700107 if (ShouldReleasePages(rosalloc)) {
108 rosalloc->ReleasePageRange(start, start + byte_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700109 }
110 }
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700111
112 private:
113 DISALLOW_COPY_AND_ASSIGN(FreePageRun);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700114 };
115
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700116 // The slot header.
117 class Slot {
118 public:
119 Slot* Next() const {
120 return next_;
121 }
122 void SetNext(Slot* next) {
123 next_ = next;
124 }
125 // The slot right before this slot in terms of the address.
126 Slot* Left(size_t bracket_size) {
127 return reinterpret_cast<Slot*>(reinterpret_cast<uintptr_t>(this) - bracket_size);
128 }
129 void Clear() {
130 next_ = nullptr;
131 }
132
133 private:
134 Slot* next_; // Next slot in the list.
Hiroshi Yamauchidc412b62015-10-15 12:26:57 -0700135 friend class RosAlloc;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700136 };
137
138 // We use the tail (kUseTail == true) for the bulk or thread-local free lists to avoid the need to
139 // traverse the list from the head to the tail when merging free lists.
140 // We don't use the tail (kUseTail == false) for the free list to avoid the need to manage the
141 // tail in the allocation fast path for a performance reason.
142 template<bool kUseTail = true>
143 class SlotFreeList {
144 public:
Andreas Gamped9911ee2017-03-27 13:27:24 -0700145 SlotFreeList() : head_(0U), tail_(0), size_(0), padding_(0) {}
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700146 Slot* Head() const {
147 return reinterpret_cast<Slot*>(head_);
148 }
149 Slot* Tail() const {
150 CHECK(kUseTail);
151 return reinterpret_cast<Slot*>(tail_);
152 }
153 size_t Size() const {
154 return size_;
155 }
156 // Removes from the head of the free list.
157 Slot* Remove() {
158 Slot* slot;
159 if (kIsDebugBuild) {
160 Verify();
161 }
162 Slot** headp = reinterpret_cast<Slot**>(&head_);
163 Slot** tailp = kUseTail ? reinterpret_cast<Slot**>(&tail_) : nullptr;
164 Slot* old_head = *headp;
165 if (old_head == nullptr) {
166 // List was empty.
167 if (kUseTail) {
168 DCHECK(*tailp == nullptr);
169 }
170 return nullptr;
171 } else {
172 // List wasn't empty.
173 if (kUseTail) {
174 DCHECK(*tailp != nullptr);
175 }
176 Slot* old_head_next = old_head->Next();
177 slot = old_head;
178 *headp = old_head_next;
179 if (kUseTail && old_head_next == nullptr) {
180 // List becomes empty.
181 *tailp = nullptr;
182 }
183 }
184 slot->Clear();
185 --size_;
186 if (kIsDebugBuild) {
187 Verify();
188 }
189 return slot;
190 }
191 void Add(Slot* slot) {
192 if (kIsDebugBuild) {
193 Verify();
194 }
195 DCHECK(slot != nullptr);
Hiroshi Yamauchib5e31f32016-02-18 15:01:17 -0800196 DCHECK(slot->Next() == nullptr);
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700197 Slot** headp = reinterpret_cast<Slot**>(&head_);
198 Slot** tailp = kUseTail ? reinterpret_cast<Slot**>(&tail_) : nullptr;
199 Slot* old_head = *headp;
200 if (old_head == nullptr) {
201 // List was empty.
202 if (kUseTail) {
203 DCHECK(*tailp == nullptr);
204 }
205 *headp = slot;
206 if (kUseTail) {
207 *tailp = slot;
208 }
209 } else {
210 // List wasn't empty.
211 if (kUseTail) {
212 DCHECK(*tailp != nullptr);
213 }
214 *headp = slot;
215 slot->SetNext(old_head);
216 }
217 ++size_;
218 if (kIsDebugBuild) {
219 Verify();
220 }
221 }
222 // Merge the given list into this list. Empty the given list.
223 // Deliberately support only a kUseTail == true SlotFreeList parameter because 1) we don't
224 // currently have a situation where we need a kUseTail == false SlotFreeList parameter, and 2)
225 // supporting the kUseTail == false parameter would require a O(n) linked list traversal to do
226 // the merge if 'this' SlotFreeList has kUseTail == false, which we'd like to avoid.
227 void Merge(SlotFreeList<true>* list) {
228 if (kIsDebugBuild) {
229 Verify();
230 CHECK(list != nullptr);
231 list->Verify();
232 }
233 if (list->Size() == 0) {
234 return;
235 }
236 Slot** headp = reinterpret_cast<Slot**>(&head_);
237 Slot** tailp = kUseTail ? reinterpret_cast<Slot**>(&tail_) : nullptr;
238 Slot* old_head = *headp;
239 if (old_head == nullptr) {
240 // List was empty.
241 *headp = list->Head();
242 if (kUseTail) {
243 *tailp = list->Tail();
244 }
245 size_ = list->Size();
246 } else {
247 // List wasn't empty.
248 DCHECK(list->Head() != nullptr);
249 *headp = list->Head();
250 DCHECK(list->Tail() != nullptr);
251 list->Tail()->SetNext(old_head);
252 // if kUseTail, no change to tailp.
253 size_ += list->Size();
254 }
255 list->Reset();
256 if (kIsDebugBuild) {
257 Verify();
258 }
259 }
260
261 void Reset() {
262 head_ = 0;
263 if (kUseTail) {
264 tail_ = 0;
265 }
266 size_ = 0;
267 }
268
269 void Verify() {
270 Slot* head = reinterpret_cast<Slot*>(head_);
271 Slot* tail = kUseTail ? reinterpret_cast<Slot*>(tail_) : nullptr;
272 if (size_ == 0) {
273 CHECK(head == nullptr);
274 if (kUseTail) {
275 CHECK(tail == nullptr);
276 }
277 } else {
278 CHECK(head != nullptr);
279 if (kUseTail) {
280 CHECK(tail != nullptr);
281 }
282 size_t count = 0;
283 for (Slot* slot = head; slot != nullptr; slot = slot->Next()) {
284 ++count;
285 if (kUseTail && slot->Next() == nullptr) {
286 CHECK_EQ(slot, tail);
287 }
288 }
289 CHECK_EQ(size_, count);
290 }
291 }
292
293 private:
294 // A pointer (Slot*) to the head of the list. Always 8 bytes so that we will have the same
295 // layout between 32 bit and 64 bit, which is not strictly necessary, but we do so for 1)
296 // uniformity, 2) we won't need to change this code if we move to a non-low 4G heap in the
297 // future, and 3) the space savings by using 32 bit fields in 32 bit would be lost in noise
298 // (won't open up enough space to cause an extra slot to be available).
299 uint64_t head_;
300 // A pointer (Slot*) to the tail of the list. Always 8 bytes so that we will have the same
301 // layout between 32 bit and 64 bit. The tail is stored to speed up merging of lists.
302 // Unused if kUseTail is false.
303 uint64_t tail_;
304 // The number of slots in the list. This is used to make it fast to check if a free list is all
305 // free without traversing the whole free list.
306 uint32_t size_;
307 uint32_t padding_ ATTRIBUTE_UNUSED;
Hiroshi Yamauchidc412b62015-10-15 12:26:57 -0700308 friend class RosAlloc;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700309 };
310
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700311 // Represents a run of memory slots of the same size.
312 //
313 // A run's memory layout:
314 //
315 // +-------------------+
316 // | magic_num |
317 // +-------------------+
318 // | size_bracket_idx |
319 // +-------------------+
320 // | is_thread_local |
321 // +-------------------+
322 // | to_be_bulk_freed |
323 // +-------------------+
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700324 // | |
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700325 // | free list |
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700326 // | |
327 // +-------------------+
328 // | |
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700329 // | bulk free list |
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700330 // | |
331 // +-------------------+
332 // | |
333 // | thread-local free |
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700334 // | list |
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700335 // | |
336 // +-------------------+
337 // | padding due to |
338 // | alignment |
339 // +-------------------+
340 // | slot 0 |
341 // +-------------------+
342 // | slot 1 |
343 // +-------------------+
344 // | slot 2 |
345 // +-------------------+
346 // ...
347 // +-------------------+
348 // | last slot |
349 // +-------------------+
350 //
351 class Run {
352 public:
Ian Rogers13735952014-10-08 12:43:28 -0700353 uint8_t magic_num_; // The magic number used for debugging.
354 uint8_t size_bracket_idx_; // The index of the size bracket of this run.
355 uint8_t is_thread_local_; // True if this run is used as a thread-local run.
356 uint8_t to_be_bulk_freed_; // Used within BulkFree() to flag a run that's involved with a bulk free.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700357 uint32_t padding_ ATTRIBUTE_UNUSED;
358 // Use a tailless free list for free_list_ so that the alloc fast path does not manage the tail.
359 SlotFreeList<false> free_list_;
360 SlotFreeList<true> bulk_free_list_;
361 SlotFreeList<true> thread_local_free_list_;
362 // Padding due to alignment
363 // Slot 0
364 // Slot 1
365 // ...
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700366
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700367 // Returns the byte size of the header.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700368 static size_t fixed_header_size() {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700369 return sizeof(Run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700370 }
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800371 Slot* FirstSlot() const {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700372 const uint8_t idx = size_bracket_idx_;
373 return reinterpret_cast<Slot*>(reinterpret_cast<uintptr_t>(this) + headerSizes[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700374 }
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700375 Slot* LastSlot() {
376 const uint8_t idx = size_bracket_idx_;
377 const size_t bracket_size = bracketSizes[idx];
378 uintptr_t end = reinterpret_cast<uintptr_t>(End());
379 Slot* last_slot = reinterpret_cast<Slot*>(end - bracket_size);
380 DCHECK_LE(FirstSlot(), last_slot);
381 return last_slot;
382 }
383 SlotFreeList<false>* FreeList() {
384 return &free_list_;
385 }
386 SlotFreeList<true>* BulkFreeList() {
387 return &bulk_free_list_;
388 }
389 SlotFreeList<true>* ThreadLocalFreeList() {
390 return &thread_local_free_list_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700391 }
392 void* End() {
Ian Rogers13735952014-10-08 12:43:28 -0700393 return reinterpret_cast<uint8_t*>(this) + kPageSize * numOfPages[size_bracket_idx_];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700394 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700395 void SetIsThreadLocal(bool is_thread_local) {
396 is_thread_local_ = is_thread_local ? 1 : 0;
397 }
398 bool IsThreadLocal() const {
399 return is_thread_local_ != 0;
400 }
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700401 // Set up the free list for a new/empty run.
402 void InitFreeList() {
403 const uint8_t idx = size_bracket_idx_;
404 const size_t bracket_size = bracketSizes[idx];
405 Slot* first_slot = FirstSlot();
406 // Add backwards so the first slot is at the head of the list.
407 for (Slot* slot = LastSlot(); slot >= first_slot; slot = slot->Left(bracket_size)) {
408 free_list_.Add(slot);
409 }
410 }
411 // Merge the thread local free list to the free list. Used when a thread-local run becomes
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700412 // full.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700413 bool MergeThreadLocalFreeListToFreeList(bool* is_all_free_after_out);
414 // Merge the bulk free list to the free list. Used in a bulk free.
415 void MergeBulkFreeListToFreeList();
416 // Merge the bulk free list to the thread local free list. In a bulk free, as a two-step
417 // process, GC will first record all the slots to free in a run in the bulk free list where it
418 // can write without a lock, and later acquire a lock once per run to merge the bulk free list
419 // to the thread-local free list.
420 void MergeBulkFreeListToThreadLocalFreeList();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700421 // Allocates a slot in a run.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700422 ALWAYS_INLINE void* AllocSlot();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700423 // Frees a slot in a run. This is used in a non-bulk free.
424 void FreeSlot(void* ptr);
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700425 // Add the given slot to the bulk free list. Returns the bracket size.
426 size_t AddToBulkFreeList(void* ptr);
427 // Add the given slot to the thread-local free list.
428 void AddToThreadLocalFreeList(void* ptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700429 // Returns true if all the slots in the run are not in use.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700430 bool IsAllFree() const {
431 return free_list_.Size() == numOfSlots[size_bracket_idx_];
432 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700433 // Returns the number of free slots.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700434 size_t NumberOfFreeSlots() {
435 return free_list_.Size();
436 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700437 // Returns true if all the slots in the run are in use.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700438 ALWAYS_INLINE bool IsFull();
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700439 // Returns true if the bulk free list is empty.
440 bool IsBulkFreeListEmpty() const {
441 return bulk_free_list_.Size() == 0;
442 }
443 // Returns true if the thread local free list is empty.
444 bool IsThreadLocalFreeListEmpty() const {
445 return thread_local_free_list_.Size() == 0;
446 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700447 // Zero the run's data.
448 void ZeroData();
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700449 // Zero the run's header and the slot headers.
450 void ZeroHeaderAndSlotHeaders();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700451 // Iterate over all the slots and apply the given function.
452 void InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg), void* arg);
453 // Dump the run metadata for debugging.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800454 std::string Dump();
455 // Verify for debugging.
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700456 void Verify(Thread* self, RosAlloc* rosalloc, bool running_on_memory_tool)
Mathieu Chartier90443472015-07-16 20:32:27 -0700457 REQUIRES(Locks::mutator_lock_)
458 REQUIRES(Locks::thread_list_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700459
460 private:
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700461 // The common part of AddToBulkFreeList() and AddToThreadLocalFreeList(). Returns the bracket
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700462 // size.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700463 size_t AddToFreeListShared(void* ptr, SlotFreeList<true>* free_list, const char* caller_name);
464 // Turns a FreeList into a string for debugging.
465 template<bool kUseTail>
466 std::string FreeListToStr(SlotFreeList<kUseTail>* free_list);
467 // Check a given pointer is a valid slot address and return it as Slot*.
468 Slot* ToSlot(void* ptr) {
469 const uint8_t idx = size_bracket_idx_;
470 const size_t bracket_size = bracketSizes[idx];
471 const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
472 - reinterpret_cast<uint8_t*>(FirstSlot());
473 DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
474 size_t slot_idx = offset_from_slot_base / bracket_size;
475 DCHECK_LT(slot_idx, numOfSlots[idx]);
476 return reinterpret_cast<Slot*>(ptr);
477 }
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800478 size_t SlotIndex(Slot* slot) const {
479 const uint8_t idx = size_bracket_idx_;
480 const size_t bracket_size = bracketSizes[idx];
481 const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(slot)
482 - reinterpret_cast<uint8_t*>(FirstSlot());
483 DCHECK_EQ(offset_from_slot_base % bracket_size, 0U);
484 size_t slot_idx = offset_from_slot_base / bracket_size;
485 DCHECK_LT(slot_idx, numOfSlots[idx]);
486 return slot_idx;
487 }
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700488
489 // TODO: DISALLOW_COPY_AND_ASSIGN(Run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700490 };
491
492 // The magic number for a run.
Ian Rogers13735952014-10-08 12:43:28 -0700493 static constexpr uint8_t kMagicNum = 42;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700494 // The magic number for free pages.
Ian Rogers13735952014-10-08 12:43:28 -0700495 static constexpr uint8_t kMagicNumFree = 43;
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800496 // The number of size brackets.
497 static constexpr size_t kNumOfSizeBrackets = 42;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700498 // The sizes (the slot sizes, in bytes) of the size brackets.
499 static size_t bracketSizes[kNumOfSizeBrackets];
500 // The numbers of pages that are used for runs for each size bracket.
501 static size_t numOfPages[kNumOfSizeBrackets];
502 // The numbers of slots of the runs for each size bracket.
503 static size_t numOfSlots[kNumOfSizeBrackets];
504 // The header sizes in bytes of the runs for each size bracket.
505 static size_t headerSizes[kNumOfSizeBrackets];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700506
507 // Initialize the run specs (the above arrays).
508 static void Initialize();
509 static bool initialized_;
510
511 // Returns the byte size of the bracket size from the index.
512 static size_t IndexToBracketSize(size_t idx) {
Ian Rogers59c07062014-10-10 13:03:39 -0700513 DCHECK_LT(idx, kNumOfSizeBrackets);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700514 return bracketSizes[idx];
515 }
516 // Returns the index of the size bracket from the bracket size.
517 static size_t BracketSizeToIndex(size_t size) {
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800518 DCHECK(8 <= size &&
519 ((size <= kMaxThreadLocalBracketSize && size % kThreadLocalBracketQuantumSize == 0) ||
520 (size <= kMaxRegularBracketSize && size % kBracketQuantumSize == 0) ||
521 size == 1 * KB || size == 2 * KB));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700522 size_t idx;
523 if (UNLIKELY(size == 1 * KB)) {
524 idx = kNumOfSizeBrackets - 2;
525 } else if (UNLIKELY(size == 2 * KB)) {
526 idx = kNumOfSizeBrackets - 1;
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800527 } else if (LIKELY(size <= kMaxThreadLocalBracketSize)) {
528 DCHECK_EQ(size % kThreadLocalBracketQuantumSize, 0U);
529 idx = size / kThreadLocalBracketQuantumSize - 1;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700530 } else {
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800531 DCHECK(size <= kMaxRegularBracketSize);
532 DCHECK_EQ((size - kMaxThreadLocalBracketSize) % kBracketQuantumSize, 0U);
533 idx = ((size - kMaxThreadLocalBracketSize) / kBracketQuantumSize - 1)
534 + kNumThreadLocalSizeBrackets;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700535 }
536 DCHECK(bracketSizes[idx] == size);
537 return idx;
538 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700539 // Returns true if the given allocation size is for a thread local allocation.
540 static bool IsSizeForThreadLocal(size_t size) {
Hiroshi Yamauchidc412b62015-10-15 12:26:57 -0700541 bool is_size_for_thread_local = size <= kMaxThreadLocalBracketSize;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700542 DCHECK(size > kLargeSizeThreshold ||
543 (is_size_for_thread_local == (SizeToIndex(size) < kNumThreadLocalSizeBrackets)));
544 return is_size_for_thread_local;
545 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700546 // Rounds up the size up the nearest bracket size.
547 static size_t RoundToBracketSize(size_t size) {
548 DCHECK(size <= kLargeSizeThreshold);
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800549 if (LIKELY(size <= kMaxThreadLocalBracketSize)) {
550 return RoundUp(size, kThreadLocalBracketQuantumSize);
551 } else if (size <= kMaxRegularBracketSize) {
552 return RoundUp(size, kBracketQuantumSize);
553 } else if (UNLIKELY(size <= 1 * KB)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700554 return 1 * KB;
555 } else {
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800556 DCHECK_LE(size, 2 * KB);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700557 return 2 * KB;
558 }
559 }
560 // Returns the size bracket index from the byte size with rounding.
561 static size_t SizeToIndex(size_t size) {
562 DCHECK(size <= kLargeSizeThreshold);
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800563 if (LIKELY(size <= kMaxThreadLocalBracketSize)) {
564 return RoundUp(size, kThreadLocalBracketQuantumSize) / kThreadLocalBracketQuantumSize - 1;
565 } else if (size <= kMaxRegularBracketSize) {
566 return (RoundUp(size, kBracketQuantumSize) - kMaxThreadLocalBracketSize) / kBracketQuantumSize
567 - 1 + kNumThreadLocalSizeBrackets;
568 } else if (size <= 1 * KB) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700569 return kNumOfSizeBrackets - 2;
570 } else {
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800571 DCHECK_LE(size, 2 * KB);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700572 return kNumOfSizeBrackets - 1;
573 }
574 }
575 // A combination of SizeToIndex() and RoundToBracketSize().
576 static size_t SizeToIndexAndBracketSize(size_t size, size_t* bracket_size_out) {
577 DCHECK(size <= kLargeSizeThreshold);
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800578 size_t idx;
579 size_t bracket_size;
580 if (LIKELY(size <= kMaxThreadLocalBracketSize)) {
581 bracket_size = RoundUp(size, kThreadLocalBracketQuantumSize);
582 idx = bracket_size / kThreadLocalBracketQuantumSize - 1;
583 } else if (size <= kMaxRegularBracketSize) {
584 bracket_size = RoundUp(size, kBracketQuantumSize);
585 idx = ((bracket_size - kMaxThreadLocalBracketSize) / kBracketQuantumSize - 1)
586 + kNumThreadLocalSizeBrackets;
587 } else if (size <= 1 * KB) {
588 bracket_size = 1 * KB;
589 idx = kNumOfSizeBrackets - 2;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700590 } else {
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800591 DCHECK(size <= 2 * KB);
592 bracket_size = 2 * KB;
593 idx = kNumOfSizeBrackets - 1;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700594 }
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800595 DCHECK_EQ(idx, SizeToIndex(size)) << idx;
596 DCHECK_EQ(bracket_size, IndexToBracketSize(idx)) << idx;
597 DCHECK_EQ(bracket_size, bracketSizes[idx]) << idx;
598 DCHECK_LE(size, bracket_size) << idx;
599 DCHECK(size > kMaxRegularBracketSize ||
600 (size <= kMaxThreadLocalBracketSize &&
601 bracket_size - size < kThreadLocalBracketQuantumSize) ||
602 (size <= kMaxRegularBracketSize && bracket_size - size < kBracketQuantumSize)) << idx;
603 *bracket_size_out = bracket_size;
604 return idx;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700605 }
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800606
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700607 // Returns the page map index from an address. Requires that the
608 // address is page size aligned.
609 size_t ToPageMapIndex(const void* addr) const {
Andreas Gamped7576322014-10-24 22:13:45 -0700610 DCHECK_LE(base_, addr);
611 DCHECK_LT(addr, base_ + capacity_);
Ian Rogers13735952014-10-08 12:43:28 -0700612 size_t byte_offset = reinterpret_cast<const uint8_t*>(addr) - base_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700613 DCHECK_EQ(byte_offset % static_cast<size_t>(kPageSize), static_cast<size_t>(0));
614 return byte_offset / kPageSize;
615 }
616 // Returns the page map index from an address with rounding.
Andreas Gamped7576322014-10-24 22:13:45 -0700617 size_t RoundDownToPageMapIndex(const void* addr) const {
Ian Rogers13735952014-10-08 12:43:28 -0700618 DCHECK(base_ <= addr && addr < reinterpret_cast<uint8_t*>(base_) + capacity_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700619 return (reinterpret_cast<uintptr_t>(addr) - reinterpret_cast<uintptr_t>(base_)) / kPageSize;
620 }
621
622 // A memory allocation request larger than this size is treated as a large object and allocated
623 // at a page-granularity.
624 static const size_t kLargeSizeThreshold = 2048;
625
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800626 // If true, check that the returned memory is actually zero.
627 static constexpr bool kCheckZeroMemory = kIsDebugBuild;
Andreas Gampe8b362a82018-05-22 20:54:14 +0000628 // Valgrind protects memory, so do not check memory when running under valgrind. In a normal
Andreas Gamped7576322014-10-24 22:13:45 -0700629 // build with kCheckZeroMemory the whole test should be optimized away.
630 // TODO: Unprotect before checks.
631 ALWAYS_INLINE bool ShouldCheckZeroMemory();
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800632
633 // If true, log verbose details of operations.
634 static constexpr bool kTraceRosAlloc = false;
635
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700636 struct hash_run {
637 size_t operator()(const RosAlloc::Run* r) const {
638 return reinterpret_cast<size_t>(r);
639 }
640 };
641
642 struct eq_run {
643 bool operator()(const RosAlloc::Run* r1, const RosAlloc::Run* r2) const {
644 return r1 == r2;
645 }
646 };
647
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800648 public:
649 // Different page release modes.
650 enum PageReleaseMode {
651 kPageReleaseModeNone, // Release no empty pages.
652 kPageReleaseModeEnd, // Release empty pages at the end of the space.
653 kPageReleaseModeSize, // Release empty pages that are larger than the threshold.
654 kPageReleaseModeSizeAndEnd, // Release empty pages that are larger than the threshold or
655 // at the end of the space.
656 kPageReleaseModeAll, // Release all empty pages.
657 };
658
659 // The default value for page_release_size_threshold_.
660 static constexpr size_t kDefaultPageReleaseSizeThreshold = 4 * MB;
661
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800662 // We use thread-local runs for the size brackets whose indexes
Mathieu Chartier0651d412014-04-29 14:37:57 -0700663 // are less than this index. We use shared (current) runs for the rest.
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800664 // Sync this with the length of Thread::rosalloc_runs_.
665 static const size_t kNumThreadLocalSizeBrackets = 16;
666 static_assert(kNumThreadLocalSizeBrackets == kNumRosAllocThreadLocalSizeBracketsInThread,
667 "Mismatch between kNumThreadLocalSizeBrackets and "
668 "kNumRosAllocThreadLocalSizeBracketsInThread");
Mathieu Chartier0651d412014-04-29 14:37:57 -0700669
Hiroshi Yamauchidc412b62015-10-15 12:26:57 -0700670 // The size of the largest bracket we use thread-local runs for.
671 // This should be equal to bracketSizes[kNumThreadLocalSizeBrackets - 1].
672 static const size_t kMaxThreadLocalBracketSize = 128;
673
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800674 // We use regular (8 or 16-bytes increment) runs for the size brackets whose indexes are less than
675 // this index.
676 static const size_t kNumRegularSizeBrackets = 40;
677
678 // The size of the largest regular (8 or 16-byte increment) bracket. Non-regular brackets are the
679 // 1 KB and the 2 KB brackets. This should be equal to bracketSizes[kNumRegularSizeBrackets - 1].
680 static const size_t kMaxRegularBracketSize = 512;
681
682 // The bracket size increment for the thread-local brackets (<= kMaxThreadLocalBracketSize bytes).
683 static constexpr size_t kThreadLocalBracketQuantumSize = 8;
684
685 // Equal to Log2(kThreadLocalBracketQuantumSize).
686 static constexpr size_t kThreadLocalBracketQuantumSizeShift = 3;
687
688 // The bracket size increment for the non-thread-local, regular brackets (of size <=
689 // kMaxRegularBracketSize bytes and > kMaxThreadLocalBracketSize bytes).
Hiroshi Yamauchidc412b62015-10-15 12:26:57 -0700690 static constexpr size_t kBracketQuantumSize = 16;
691
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800692 // Equal to Log2(kBracketQuantumSize).
Hiroshi Yamauchidc412b62015-10-15 12:26:57 -0700693 static constexpr size_t kBracketQuantumSizeShift = 4;
694
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800695 private:
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700696 // The base address of the memory region that's managed by this allocator.
Ian Rogers13735952014-10-08 12:43:28 -0700697 uint8_t* base_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700698
699 // The footprint in bytes of the currently allocated portion of the
700 // memory region.
701 size_t footprint_;
702
703 // The maximum footprint. The address, base_ + capacity_, indicates
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800704 // the end of the memory region that's currently managed by this allocator.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700705 size_t capacity_;
706
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800707 // The maximum capacity. The address, base_ + max_capacity_, indicates
708 // the end of the memory region that's ever managed by this allocator.
709 size_t max_capacity_;
710
Andreas Gampe3b7dc352017-06-06 20:02:03 -0700711 template<class Key, AllocatorTag kTag, class Compare = std::less<Key>>
712 using AllocationTrackingSet = std::set<Key, Compare, TrackingAllocator<Key, kTag>>;
713
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700714 // The run sets that hold the runs whose slots are not all
715 // full. non_full_runs_[i] is guarded by size_bracket_locks_[i].
Mathieu Chartier58553c72014-09-16 16:25:55 -0700716 AllocationTrackingSet<Run*, kAllocatorTagRosAlloc> non_full_runs_[kNumOfSizeBrackets];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700717 // The run sets that hold the runs whose slots are all full. This is
718 // debug only. full_runs_[i] is guarded by size_bracket_locks_[i].
Mathieu Chartier58553c72014-09-16 16:25:55 -0700719 std::unordered_set<Run*, hash_run, eq_run, TrackingAllocator<Run*, kAllocatorTagRosAlloc>>
720 full_runs_[kNumOfSizeBrackets];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700721 // The set of free pages.
Mathieu Chartier58553c72014-09-16 16:25:55 -0700722 AllocationTrackingSet<FreePageRun*, kAllocatorTagRosAlloc> free_page_runs_ GUARDED_BY(lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700723 // The dedicated full run, it is always full and shared by all threads when revoking happens.
724 // This is an optimization since enables us to avoid a null check for revoked runs.
725 static Run* dedicated_full_run_;
726 // Using size_t to ensure that it is at least word aligned.
727 static size_t dedicated_full_run_storage_[];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700728 // The current runs where the allocations are first attempted for
729 // the size brackes that do not use thread-local
730 // runs. current_runs_[i] is guarded by size_bracket_locks_[i].
731 Run* current_runs_[kNumOfSizeBrackets];
732 // The mutexes, one per size bracket.
733 Mutex* size_bracket_locks_[kNumOfSizeBrackets];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700734 // Bracket lock names (since locks only have char* names).
Zuo Wangf37a88b2014-07-10 04:26:41 -0700735 std::string size_bracket_lock_names_[kNumOfSizeBrackets];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700736 // The types of page map entries.
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700737 enum PageMapKind {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700738 kPageMapReleased = 0, // Zero and released back to the OS.
739 kPageMapEmpty, // Zero but probably dirty.
740 kPageMapRun, // The beginning of a run.
741 kPageMapRunPart, // The non-beginning part of a run.
742 kPageMapLargeObject, // The beginning of a large object.
743 kPageMapLargeObjectPart, // The non-beginning part of a large object.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700744 };
745 // The table that indicates what pages are currently used for.
Ian Rogers13735952014-10-08 12:43:28 -0700746 volatile uint8_t* page_map_; // No GUARDED_BY(lock_) for kReadPageMapEntryWithoutLockInBulkFree.
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800747 size_t page_map_size_;
748 size_t max_page_map_size_;
Ian Rogers700a4022014-05-19 16:49:03 -0700749 std::unique_ptr<MemMap> page_map_mem_map_;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800750
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700751 // The table that indicates the size of free page runs. These sizes
752 // are stored here to avoid storing in the free page header and
753 // release backing pages.
Mathieu Chartier58553c72014-09-16 16:25:55 -0700754 std::vector<size_t, TrackingAllocator<size_t, kAllocatorTagRosAlloc>> free_page_run_size_map_
755 GUARDED_BY(lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700756 // The global lock. Used to guard the page map, the free page set,
757 // and the footprint.
758 Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
759 // The reader-writer lock to allow one bulk free at a time while
Hiroshi Yamauchi70f60042014-02-03 12:31:29 -0800760 // allowing multiple individual frees at the same time. Also, this
761 // is used to avoid race conditions between BulkFree() and
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700762 // RevokeThreadLocalRuns() on the bulk free list.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700763 ReaderWriterMutex bulk_free_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
764
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800765 // The page release mode.
766 const PageReleaseMode page_release_mode_;
767 // Under kPageReleaseModeSize(AndEnd), if the free page run size is
768 // greater than or equal to this value, release pages.
769 const size_t page_release_size_threshold_;
770
Andreas Gampe8b362a82018-05-22 20:54:14 +0000771 // Whether this allocator is running under Valgrind.
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700772 bool is_running_on_memory_tool_;
Andreas Gamped7576322014-10-24 22:13:45 -0700773
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700774 // The base address of the memory region that's managed by this allocator.
Ian Rogers13735952014-10-08 12:43:28 -0700775 uint8_t* Begin() { return base_; }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700776 // The end address of the memory region that's managed by this allocator.
Ian Rogers13735952014-10-08 12:43:28 -0700777 uint8_t* End() { return base_ + capacity_; }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700778
779 // Page-granularity alloc/free
Ian Rogers13735952014-10-08 12:43:28 -0700780 void* AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type)
Mathieu Chartier90443472015-07-16 20:32:27 -0700781 REQUIRES(lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700782 // Returns how many bytes were freed.
Mathieu Chartier90443472015-07-16 20:32:27 -0700783 size_t FreePages(Thread* self, void* ptr, bool already_zero) REQUIRES(lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700784
785 // Allocate/free a run slot.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700786 void* AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated, size_t* usable_size,
787 size_t* bytes_tl_bulk_allocated)
Mathieu Chartier90443472015-07-16 20:32:27 -0700788 REQUIRES(!lock_);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700789 // Allocate/free a run slot without acquiring locks.
Mathieu Chartier90443472015-07-16 20:32:27 -0700790 // TODO: REQUIRES(Locks::mutator_lock_)
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700791 void* AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated,
792 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
Mathieu Chartier90443472015-07-16 20:32:27 -0700793 REQUIRES(!lock_);
794 void* AllocFromCurrentRunUnlocked(Thread* self, size_t idx) REQUIRES(!lock_);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700795
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700796 // Returns the bracket size.
797 size_t FreeFromRun(Thread* self, void* ptr, Run* run)
Mathieu Chartier90443472015-07-16 20:32:27 -0700798 REQUIRES(!lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700799
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700800 // Used to allocate a new thread local run for a size bracket.
Mathieu Chartier90443472015-07-16 20:32:27 -0700801 Run* AllocRun(Thread* self, size_t idx) REQUIRES(!lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700802
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700803 // Used to acquire a new/reused run for a size bracket. Used when a
804 // thread-local or current run gets full.
Mathieu Chartier90443472015-07-16 20:32:27 -0700805 Run* RefillRun(Thread* self, size_t idx) REQUIRES(!lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700806
807 // The internal of non-bulk Free().
Mathieu Chartier90443472015-07-16 20:32:27 -0700808 size_t FreeInternal(Thread* self, void* ptr) REQUIRES(!lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700809
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800810 // Allocates large objects.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700811 void* AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated,
812 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
Mathieu Chartier90443472015-07-16 20:32:27 -0700813 REQUIRES(!lock_);
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800814
Mathieu Chartier0651d412014-04-29 14:37:57 -0700815 // Revoke a run by adding it to non_full_runs_ or freeing the pages.
Mathieu Chartier90443472015-07-16 20:32:27 -0700816 void RevokeRun(Thread* self, size_t idx, Run* run) REQUIRES(!lock_);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700817
818 // Revoke the current runs which share an index with the thread local runs.
Mathieu Chartier90443472015-07-16 20:32:27 -0700819 void RevokeThreadUnsafeCurrentRuns() REQUIRES(!lock_);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700820
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700821 // Release a range of pages.
Mathieu Chartier90443472015-07-16 20:32:27 -0700822 size_t ReleasePageRange(uint8_t* start, uint8_t* end) REQUIRES(lock_);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700823
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700824 // Dumps the page map for debugging.
Mathieu Chartier90443472015-07-16 20:32:27 -0700825 std::string DumpPageMap() REQUIRES(lock_);
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700826
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700827 public:
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800828 RosAlloc(void* base, size_t capacity, size_t max_capacity,
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800829 PageReleaseMode page_release_mode,
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700830 bool running_on_memory_tool,
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800831 size_t page_release_size_threshold = kDefaultPageReleaseSizeThreshold);
Mathieu Chartier661974a2014-01-09 11:23:53 -0800832 ~RosAlloc();
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700833
Hiroshi Yamauchidc412b62015-10-15 12:26:57 -0700834 static size_t RunFreeListOffset() {
835 return OFFSETOF_MEMBER(Run, free_list_);
836 }
837 static size_t RunFreeListHeadOffset() {
838 return OFFSETOF_MEMBER(SlotFreeList<false>, head_);
839 }
840 static size_t RunFreeListSizeOffset() {
841 return OFFSETOF_MEMBER(SlotFreeList<false>, size_);
842 }
843 static size_t RunSlotNextOffset() {
844 return OFFSETOF_MEMBER(Slot, next_);
845 }
846
Mathieu Chartier0651d412014-04-29 14:37:57 -0700847 // If kThreadUnsafe is true then the allocator may avoid acquiring some locks as an optimization.
848 // If used, this may cause race conditions if multiple threads are allocating at the same time.
849 template<bool kThreadSafe = true>
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700850 void* Alloc(Thread* self, size_t size, size_t* bytes_allocated, size_t* usable_size,
851 size_t* bytes_tl_bulk_allocated)
Mathieu Chartier90443472015-07-16 20:32:27 -0700852 REQUIRES(!lock_);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700853 size_t Free(Thread* self, void* ptr)
Mathieu Chartier90443472015-07-16 20:32:27 -0700854 REQUIRES(!bulk_free_lock_, !lock_);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700855 size_t BulkFree(Thread* self, void** ptrs, size_t num_ptrs)
Mathieu Chartier90443472015-07-16 20:32:27 -0700856 REQUIRES(!bulk_free_lock_, !lock_);
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700857
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700858 // Returns true if the given allocation request can be allocated in
859 // an existing thread local run without allocating a new run.
860 ALWAYS_INLINE bool CanAllocFromThreadLocalRun(Thread* self, size_t size);
861 // Allocate the given allocation request in an existing thread local
862 // run without allocating a new run.
863 ALWAYS_INLINE void* AllocFromThreadLocalRun(Thread* self, size_t size, size_t* bytes_allocated);
864
865 // Returns the maximum bytes that could be allocated for the given
866 // size in bulk, that is the maximum value for the
867 // bytes_allocated_bulk out param returned by RosAlloc::Alloc().
868 ALWAYS_INLINE size_t MaxBytesBulkAllocatedFor(size_t size);
869
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700870 // Returns the size of the allocated slot for a given allocated memory chunk.
Mathieu Chartier90443472015-07-16 20:32:27 -0700871 size_t UsableSize(const void* ptr) REQUIRES(!lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700872 // Returns the size of the allocated slot for a given size.
873 size_t UsableSize(size_t bytes) {
874 if (UNLIKELY(bytes > kLargeSizeThreshold)) {
875 return RoundUp(bytes, kPageSize);
876 } else {
877 return RoundToBracketSize(bytes);
878 }
879 }
880 // Try to reduce the current footprint by releasing the free page
881 // run at the end of the memory region, if any.
Mathieu Chartier90443472015-07-16 20:32:27 -0700882 bool Trim() REQUIRES(!lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700883 // Iterates over all the memory slots and apply the given function.
884 void InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
885 void* arg)
Mathieu Chartier90443472015-07-16 20:32:27 -0700886 REQUIRES(!lock_);
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700887
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -0700888 // Release empty pages.
Mathieu Chartier90443472015-07-16 20:32:27 -0700889 size_t ReleasePages() REQUIRES(!lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700890 // Returns the current footprint.
Mathieu Chartier90443472015-07-16 20:32:27 -0700891 size_t Footprint() REQUIRES(!lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700892 // Returns the current capacity, maximum footprint.
Mathieu Chartier90443472015-07-16 20:32:27 -0700893 size_t FootprintLimit() REQUIRES(!lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700894 // Update the current capacity.
Mathieu Chartier90443472015-07-16 20:32:27 -0700895 void SetFootprintLimit(size_t bytes) REQUIRES(!lock_);
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700896
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700897 // Releases the thread-local runs assigned to the given thread back to the common set of runs.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700898 // Returns the total bytes of free slots in the revoked thread local runs. This is to be
899 // subtracted from Heap::num_bytes_allocated_ to cancel out the ahead-of-time counting.
Mathieu Chartier90443472015-07-16 20:32:27 -0700900 size_t RevokeThreadLocalRuns(Thread* thread) REQUIRES(!lock_, !bulk_free_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700901 // Releases the thread-local runs assigned to all the threads back to the common set of runs.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700902 // Returns the total bytes of free slots in the revoked thread local runs. This is to be
903 // subtracted from Heap::num_bytes_allocated_ to cancel out the ahead-of-time counting.
Mathieu Chartier90443472015-07-16 20:32:27 -0700904 size_t RevokeAllThreadLocalRuns() REQUIRES(!Locks::thread_list_lock_, !lock_, !bulk_free_lock_);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -0700905 // Assert the thread local runs of a thread are revoked.
Mathieu Chartier90443472015-07-16 20:32:27 -0700906 void AssertThreadLocalRunsAreRevoked(Thread* thread) REQUIRES(!bulk_free_lock_);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -0700907 // Assert all the thread local runs are revoked.
Mathieu Chartier90443472015-07-16 20:32:27 -0700908 void AssertAllThreadLocalRunsAreRevoked() REQUIRES(!Locks::thread_list_lock_, !bulk_free_lock_);
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700909
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700910 static Run* GetDedicatedFullRun() {
911 return dedicated_full_run_;
912 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700913 bool IsFreePage(size_t idx) const {
914 DCHECK_LT(idx, capacity_ / kPageSize);
Ian Rogers13735952014-10-08 12:43:28 -0700915 uint8_t pm_type = page_map_[idx];
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700916 return pm_type == kPageMapReleased || pm_type == kPageMapEmpty;
917 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700918
919 // Callbacks for InspectAll that will count the number of bytes
920 // allocated and objects allocated, respectively.
921 static void BytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg);
922 static void ObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800923
924 bool DoesReleaseAllPages() const {
925 return page_release_mode_ == kPageReleaseModeAll;
926 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800927
928 // Verify for debugging.
Mathieu Chartier90443472015-07-16 20:32:27 -0700929 void Verify() REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !bulk_free_lock_,
930 !lock_);
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -0700931
Mathieu Chartier90443472015-07-16 20:32:27 -0700932 void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes)
933 REQUIRES(!bulk_free_lock_, !lock_);
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700934
Hiroshi Yamauchi565c2d92016-03-23 12:53:26 -0700935 void DumpStats(std::ostream& os)
936 REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_) REQUIRES(!bulk_free_lock_);
937
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700938 private:
939 friend std::ostream& operator<<(std::ostream& os, const RosAlloc::PageMapKind& rhs);
940
941 DISALLOW_COPY_AND_ASSIGN(RosAlloc);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700942};
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700943std::ostream& operator<<(std::ostream& os, const RosAlloc::PageMapKind& rhs);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700944
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800945// Callback from rosalloc when it needs to increase the footprint. Must be implemented somewhere
946// else (currently rosalloc_space.cc).
947void* ArtRosAllocMoreCore(allocator::RosAlloc* rosalloc, intptr_t increment);
948
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700949} // namespace allocator
950} // namespace gc
951} // namespace art
952
953#endif // ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_