blob: a472a8bffb0633ab0f8170120ffb022ba4730d3a [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_
18#define ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_
19
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070020#include <stdint.h>
21#include <stdlib.h>
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070022#include <sys/mman.h>
Ian Rogers700a4022014-05-19 16:49:03 -070023#include <memory>
24#include <set>
25#include <string>
26#include <unordered_set>
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070027#include <vector>
28
Mathieu Chartier58553c72014-09-16 16:25:55 -070029#include "base/allocator.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010030#include "base/bit_utils.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070031#include "base/mutex.h"
32#include "base/logging.h"
33#include "globals.h"
Ian Rogerse63db272014-07-15 15:36:11 -070034#include "thread.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070035
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070036namespace art {
Vladimir Marko3481ba22015-04-13 12:22:36 +010037
38class MemMap;
39
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070040namespace gc {
41namespace allocator {
42
Ian Rogers6fac4472014-02-25 17:01:10 -080043// A runs-of-slots memory allocator.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070044class RosAlloc {
45 private:
Ian Rogers6fac4472014-02-25 17:01:10 -080046 // Represents a run of free pages.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070047 class FreePageRun {
48 public:
Ian Rogers13735952014-10-08 12:43:28 -070049 uint8_t magic_num_; // The magic number used for debugging only.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070050
51 bool IsFree() const {
Mathieu Chartiera1c1c712014-06-23 17:53:09 -070052 return !kIsDebugBuild || magic_num_ == kMagicNumFree;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070053 }
Mathieu Chartier90443472015-07-16 20:32:27 -070054 size_t ByteSize(RosAlloc* rosalloc) const REQUIRES(rosalloc->lock_) {
Ian Rogers13735952014-10-08 12:43:28 -070055 const uint8_t* fpr_base = reinterpret_cast<const uint8_t*>(this);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070056 size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
57 size_t byte_size = rosalloc->free_page_run_size_map_[pm_idx];
58 DCHECK_GE(byte_size, static_cast<size_t>(0));
Mathieu Chartier58553c72014-09-16 16:25:55 -070059 DCHECK_ALIGNED(byte_size, kPageSize);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070060 return byte_size;
61 }
62 void SetByteSize(RosAlloc* rosalloc, size_t byte_size)
Mathieu Chartier90443472015-07-16 20:32:27 -070063 REQUIRES(rosalloc->lock_) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070064 DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
Ian Rogers13735952014-10-08 12:43:28 -070065 uint8_t* fpr_base = reinterpret_cast<uint8_t*>(this);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070066 size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
67 rosalloc->free_page_run_size_map_[pm_idx] = byte_size;
68 }
69 void* Begin() {
70 return reinterpret_cast<void*>(this);
71 }
Mathieu Chartier90443472015-07-16 20:32:27 -070072 void* End(RosAlloc* rosalloc) REQUIRES(rosalloc->lock_) {
Ian Rogers13735952014-10-08 12:43:28 -070073 uint8_t* fpr_base = reinterpret_cast<uint8_t*>(this);
74 uint8_t* end = fpr_base + ByteSize(rosalloc);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070075 return end;
76 }
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080077 bool IsLargerThanPageReleaseThreshold(RosAlloc* rosalloc)
Mathieu Chartier90443472015-07-16 20:32:27 -070078 REQUIRES(rosalloc->lock_) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080079 return ByteSize(rosalloc) >= rosalloc->page_release_size_threshold_;
80 }
81 bool IsAtEndOfSpace(RosAlloc* rosalloc)
Mathieu Chartier90443472015-07-16 20:32:27 -070082 REQUIRES(rosalloc->lock_) {
Ian Rogers13735952014-10-08 12:43:28 -070083 return reinterpret_cast<uint8_t*>(this) + ByteSize(rosalloc) == rosalloc->base_ + rosalloc->footprint_;
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080084 }
Mathieu Chartier90443472015-07-16 20:32:27 -070085 bool ShouldReleasePages(RosAlloc* rosalloc) REQUIRES(rosalloc->lock_) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080086 switch (rosalloc->page_release_mode_) {
87 case kPageReleaseModeNone:
88 return false;
89 case kPageReleaseModeEnd:
90 return IsAtEndOfSpace(rosalloc);
91 case kPageReleaseModeSize:
92 return IsLargerThanPageReleaseThreshold(rosalloc);
93 case kPageReleaseModeSizeAndEnd:
94 return IsLargerThanPageReleaseThreshold(rosalloc) && IsAtEndOfSpace(rosalloc);
95 case kPageReleaseModeAll:
96 return true;
97 default:
98 LOG(FATAL) << "Unexpected page release mode ";
99 return false;
100 }
101 }
Mathieu Chartier90443472015-07-16 20:32:27 -0700102 void ReleasePages(RosAlloc* rosalloc) REQUIRES(rosalloc->lock_) {
Ian Rogers13735952014-10-08 12:43:28 -0700103 uint8_t* start = reinterpret_cast<uint8_t*>(this);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700104 size_t byte_size = ByteSize(rosalloc);
105 DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700106 if (ShouldReleasePages(rosalloc)) {
107 rosalloc->ReleasePageRange(start, start + byte_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700108 }
109 }
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700110
111 private:
112 DISALLOW_COPY_AND_ASSIGN(FreePageRun);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700113 };
114
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700115 // The slot header.
116 class Slot {
117 public:
118 Slot* Next() const {
119 return next_;
120 }
121 void SetNext(Slot* next) {
122 next_ = next;
123 }
124 // The slot right before this slot in terms of the address.
125 Slot* Left(size_t bracket_size) {
126 return reinterpret_cast<Slot*>(reinterpret_cast<uintptr_t>(this) - bracket_size);
127 }
128 void Clear() {
129 next_ = nullptr;
130 }
131
132 private:
133 Slot* next_; // Next slot in the list.
Hiroshi Yamauchidc412b62015-10-15 12:26:57 -0700134 friend class RosAlloc;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700135 };
136
137 // We use the tail (kUseTail == true) for the bulk or thread-local free lists to avoid the need to
138 // traverse the list from the head to the tail when merging free lists.
139 // We don't use the tail (kUseTail == false) for the free list to avoid the need to manage the
140 // tail in the allocation fast path for a performance reason.
141 template<bool kUseTail = true>
142 class SlotFreeList {
143 public:
144 SlotFreeList() : head_(0U), tail_(0), size_(0) {}
145 Slot* Head() const {
146 return reinterpret_cast<Slot*>(head_);
147 }
148 Slot* Tail() const {
149 CHECK(kUseTail);
150 return reinterpret_cast<Slot*>(tail_);
151 }
152 size_t Size() const {
153 return size_;
154 }
155 // Removes from the head of the free list.
156 Slot* Remove() {
157 Slot* slot;
158 if (kIsDebugBuild) {
159 Verify();
160 }
161 Slot** headp = reinterpret_cast<Slot**>(&head_);
162 Slot** tailp = kUseTail ? reinterpret_cast<Slot**>(&tail_) : nullptr;
163 Slot* old_head = *headp;
164 if (old_head == nullptr) {
165 // List was empty.
166 if (kUseTail) {
167 DCHECK(*tailp == nullptr);
168 }
169 return nullptr;
170 } else {
171 // List wasn't empty.
172 if (kUseTail) {
173 DCHECK(*tailp != nullptr);
174 }
175 Slot* old_head_next = old_head->Next();
176 slot = old_head;
177 *headp = old_head_next;
178 if (kUseTail && old_head_next == nullptr) {
179 // List becomes empty.
180 *tailp = nullptr;
181 }
182 }
183 slot->Clear();
184 --size_;
185 if (kIsDebugBuild) {
186 Verify();
187 }
188 return slot;
189 }
190 void Add(Slot* slot) {
191 if (kIsDebugBuild) {
192 Verify();
193 }
194 DCHECK(slot != nullptr);
195 Slot** headp = reinterpret_cast<Slot**>(&head_);
196 Slot** tailp = kUseTail ? reinterpret_cast<Slot**>(&tail_) : nullptr;
197 Slot* old_head = *headp;
198 if (old_head == nullptr) {
199 // List was empty.
200 if (kUseTail) {
201 DCHECK(*tailp == nullptr);
202 }
203 *headp = slot;
204 if (kUseTail) {
205 *tailp = slot;
206 }
207 } else {
208 // List wasn't empty.
209 if (kUseTail) {
210 DCHECK(*tailp != nullptr);
211 }
212 *headp = slot;
213 slot->SetNext(old_head);
214 }
215 ++size_;
216 if (kIsDebugBuild) {
217 Verify();
218 }
219 }
220 // Merge the given list into this list. Empty the given list.
221 // Deliberately support only a kUseTail == true SlotFreeList parameter because 1) we don't
222 // currently have a situation where we need a kUseTail == false SlotFreeList parameter, and 2)
223 // supporting the kUseTail == false parameter would require a O(n) linked list traversal to do
224 // the merge if 'this' SlotFreeList has kUseTail == false, which we'd like to avoid.
225 void Merge(SlotFreeList<true>* list) {
226 if (kIsDebugBuild) {
227 Verify();
228 CHECK(list != nullptr);
229 list->Verify();
230 }
231 if (list->Size() == 0) {
232 return;
233 }
234 Slot** headp = reinterpret_cast<Slot**>(&head_);
235 Slot** tailp = kUseTail ? reinterpret_cast<Slot**>(&tail_) : nullptr;
236 Slot* old_head = *headp;
237 if (old_head == nullptr) {
238 // List was empty.
239 *headp = list->Head();
240 if (kUseTail) {
241 *tailp = list->Tail();
242 }
243 size_ = list->Size();
244 } else {
245 // List wasn't empty.
246 DCHECK(list->Head() != nullptr);
247 *headp = list->Head();
248 DCHECK(list->Tail() != nullptr);
249 list->Tail()->SetNext(old_head);
250 // if kUseTail, no change to tailp.
251 size_ += list->Size();
252 }
253 list->Reset();
254 if (kIsDebugBuild) {
255 Verify();
256 }
257 }
258
259 void Reset() {
260 head_ = 0;
261 if (kUseTail) {
262 tail_ = 0;
263 }
264 size_ = 0;
265 }
266
267 void Verify() {
268 Slot* head = reinterpret_cast<Slot*>(head_);
269 Slot* tail = kUseTail ? reinterpret_cast<Slot*>(tail_) : nullptr;
270 if (size_ == 0) {
271 CHECK(head == nullptr);
272 if (kUseTail) {
273 CHECK(tail == nullptr);
274 }
275 } else {
276 CHECK(head != nullptr);
277 if (kUseTail) {
278 CHECK(tail != nullptr);
279 }
280 size_t count = 0;
281 for (Slot* slot = head; slot != nullptr; slot = slot->Next()) {
282 ++count;
283 if (kUseTail && slot->Next() == nullptr) {
284 CHECK_EQ(slot, tail);
285 }
286 }
287 CHECK_EQ(size_, count);
288 }
289 }
290
291 private:
292 // A pointer (Slot*) to the head of the list. Always 8 bytes so that we will have the same
293 // layout between 32 bit and 64 bit, which is not strictly necessary, but we do so for 1)
294 // uniformity, 2) we won't need to change this code if we move to a non-low 4G heap in the
295 // future, and 3) the space savings by using 32 bit fields in 32 bit would be lost in noise
296 // (won't open up enough space to cause an extra slot to be available).
297 uint64_t head_;
298 // A pointer (Slot*) to the tail of the list. Always 8 bytes so that we will have the same
299 // layout between 32 bit and 64 bit. The tail is stored to speed up merging of lists.
300 // Unused if kUseTail is false.
301 uint64_t tail_;
302 // The number of slots in the list. This is used to make it fast to check if a free list is all
303 // free without traversing the whole free list.
304 uint32_t size_;
305 uint32_t padding_ ATTRIBUTE_UNUSED;
Hiroshi Yamauchidc412b62015-10-15 12:26:57 -0700306 friend class RosAlloc;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700307 };
308
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700309 // Represents a run of memory slots of the same size.
310 //
311 // A run's memory layout:
312 //
313 // +-------------------+
314 // | magic_num |
315 // +-------------------+
316 // | size_bracket_idx |
317 // +-------------------+
318 // | is_thread_local |
319 // +-------------------+
320 // | to_be_bulk_freed |
321 // +-------------------+
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700322 // | |
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700323 // | free list |
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700324 // | |
325 // +-------------------+
326 // | |
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700327 // | bulk free list |
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700328 // | |
329 // +-------------------+
330 // | |
331 // | thread-local free |
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700332 // | list |
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700333 // | |
334 // +-------------------+
335 // | padding due to |
336 // | alignment |
337 // +-------------------+
338 // | slot 0 |
339 // +-------------------+
340 // | slot 1 |
341 // +-------------------+
342 // | slot 2 |
343 // +-------------------+
344 // ...
345 // +-------------------+
346 // | last slot |
347 // +-------------------+
348 //
349 class Run {
350 public:
Ian Rogers13735952014-10-08 12:43:28 -0700351 uint8_t magic_num_; // The magic number used for debugging.
352 uint8_t size_bracket_idx_; // The index of the size bracket of this run.
353 uint8_t is_thread_local_; // True if this run is used as a thread-local run.
354 uint8_t to_be_bulk_freed_; // Used within BulkFree() to flag a run that's involved with a bulk free.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700355 uint32_t padding_ ATTRIBUTE_UNUSED;
356 // Use a tailless free list for free_list_ so that the alloc fast path does not manage the tail.
357 SlotFreeList<false> free_list_;
358 SlotFreeList<true> bulk_free_list_;
359 SlotFreeList<true> thread_local_free_list_;
360 // Padding due to alignment
361 // Slot 0
362 // Slot 1
363 // ...
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700364
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700365 // Returns the byte size of the header.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700366 static size_t fixed_header_size() {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700367 return sizeof(Run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700368 }
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800369 Slot* FirstSlot() const {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700370 const uint8_t idx = size_bracket_idx_;
371 return reinterpret_cast<Slot*>(reinterpret_cast<uintptr_t>(this) + headerSizes[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700372 }
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700373 Slot* LastSlot() {
374 const uint8_t idx = size_bracket_idx_;
375 const size_t bracket_size = bracketSizes[idx];
376 uintptr_t end = reinterpret_cast<uintptr_t>(End());
377 Slot* last_slot = reinterpret_cast<Slot*>(end - bracket_size);
378 DCHECK_LE(FirstSlot(), last_slot);
379 return last_slot;
380 }
381 SlotFreeList<false>* FreeList() {
382 return &free_list_;
383 }
384 SlotFreeList<true>* BulkFreeList() {
385 return &bulk_free_list_;
386 }
387 SlotFreeList<true>* ThreadLocalFreeList() {
388 return &thread_local_free_list_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700389 }
390 void* End() {
Ian Rogers13735952014-10-08 12:43:28 -0700391 return reinterpret_cast<uint8_t*>(this) + kPageSize * numOfPages[size_bracket_idx_];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700392 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700393 void SetIsThreadLocal(bool is_thread_local) {
394 is_thread_local_ = is_thread_local ? 1 : 0;
395 }
396 bool IsThreadLocal() const {
397 return is_thread_local_ != 0;
398 }
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700399 // Set up the free list for a new/empty run.
400 void InitFreeList() {
401 const uint8_t idx = size_bracket_idx_;
402 const size_t bracket_size = bracketSizes[idx];
403 Slot* first_slot = FirstSlot();
404 // Add backwards so the first slot is at the head of the list.
405 for (Slot* slot = LastSlot(); slot >= first_slot; slot = slot->Left(bracket_size)) {
406 free_list_.Add(slot);
407 }
408 }
409 // Merge the thread local free list to the free list. Used when a thread-local run becomes
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700410 // full.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700411 bool MergeThreadLocalFreeListToFreeList(bool* is_all_free_after_out);
412 // Merge the bulk free list to the free list. Used in a bulk free.
413 void MergeBulkFreeListToFreeList();
414 // Merge the bulk free list to the thread local free list. In a bulk free, as a two-step
415 // process, GC will first record all the slots to free in a run in the bulk free list where it
416 // can write without a lock, and later acquire a lock once per run to merge the bulk free list
417 // to the thread-local free list.
418 void MergeBulkFreeListToThreadLocalFreeList();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700419 // Allocates a slot in a run.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700420 ALWAYS_INLINE void* AllocSlot();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700421 // Frees a slot in a run. This is used in a non-bulk free.
422 void FreeSlot(void* ptr);
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700423 // Add the given slot to the bulk free list. Returns the bracket size.
424 size_t AddToBulkFreeList(void* ptr);
425 // Add the given slot to the thread-local free list.
426 void AddToThreadLocalFreeList(void* ptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700427 // Returns true if all the slots in the run are not in use.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700428 bool IsAllFree() const {
429 return free_list_.Size() == numOfSlots[size_bracket_idx_];
430 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700431 // Returns the number of free slots.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700432 size_t NumberOfFreeSlots() {
433 return free_list_.Size();
434 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700435 // Returns true if all the slots in the run are in use.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700436 ALWAYS_INLINE bool IsFull();
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700437 // Returns true if the bulk free list is empty.
438 bool IsBulkFreeListEmpty() const {
439 return bulk_free_list_.Size() == 0;
440 }
441 // Returns true if the thread local free list is empty.
442 bool IsThreadLocalFreeListEmpty() const {
443 return thread_local_free_list_.Size() == 0;
444 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700445 // Zero the run's data.
446 void ZeroData();
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700447 // Zero the run's header and the slot headers.
448 void ZeroHeaderAndSlotHeaders();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700449 // Iterate over all the slots and apply the given function.
450 void InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg), void* arg);
451 // Dump the run metadata for debugging.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800452 std::string Dump();
453 // Verify for debugging.
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700454 void Verify(Thread* self, RosAlloc* rosalloc, bool running_on_memory_tool)
Mathieu Chartier90443472015-07-16 20:32:27 -0700455 REQUIRES(Locks::mutator_lock_)
456 REQUIRES(Locks::thread_list_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700457
458 private:
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700459 // The common part of AddToBulkFreeList() and AddToThreadLocalFreeList(). Returns the bracket
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700460 // size.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700461 size_t AddToFreeListShared(void* ptr, SlotFreeList<true>* free_list, const char* caller_name);
462 // Turns a FreeList into a string for debugging.
463 template<bool kUseTail>
464 std::string FreeListToStr(SlotFreeList<kUseTail>* free_list);
465 // Check a given pointer is a valid slot address and return it as Slot*.
466 Slot* ToSlot(void* ptr) {
467 const uint8_t idx = size_bracket_idx_;
468 const size_t bracket_size = bracketSizes[idx];
469 const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
470 - reinterpret_cast<uint8_t*>(FirstSlot());
471 DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
472 size_t slot_idx = offset_from_slot_base / bracket_size;
473 DCHECK_LT(slot_idx, numOfSlots[idx]);
474 return reinterpret_cast<Slot*>(ptr);
475 }
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800476 size_t SlotIndex(Slot* slot) const {
477 const uint8_t idx = size_bracket_idx_;
478 const size_t bracket_size = bracketSizes[idx];
479 const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(slot)
480 - reinterpret_cast<uint8_t*>(FirstSlot());
481 DCHECK_EQ(offset_from_slot_base % bracket_size, 0U);
482 size_t slot_idx = offset_from_slot_base / bracket_size;
483 DCHECK_LT(slot_idx, numOfSlots[idx]);
484 return slot_idx;
485 }
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700486
487 // TODO: DISALLOW_COPY_AND_ASSIGN(Run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700488 };
489
490 // The magic number for a run.
Ian Rogers13735952014-10-08 12:43:28 -0700491 static constexpr uint8_t kMagicNum = 42;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700492 // The magic number for free pages.
Ian Rogers13735952014-10-08 12:43:28 -0700493 static constexpr uint8_t kMagicNumFree = 43;
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800494 // The number of size brackets.
495 static constexpr size_t kNumOfSizeBrackets = 42;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700496 // The sizes (the slot sizes, in bytes) of the size brackets.
497 static size_t bracketSizes[kNumOfSizeBrackets];
498 // The numbers of pages that are used for runs for each size bracket.
499 static size_t numOfPages[kNumOfSizeBrackets];
500 // The numbers of slots of the runs for each size bracket.
501 static size_t numOfSlots[kNumOfSizeBrackets];
502 // The header sizes in bytes of the runs for each size bracket.
503 static size_t headerSizes[kNumOfSizeBrackets];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700504
505 // Initialize the run specs (the above arrays).
506 static void Initialize();
507 static bool initialized_;
508
509 // Returns the byte size of the bracket size from the index.
510 static size_t IndexToBracketSize(size_t idx) {
Ian Rogers59c07062014-10-10 13:03:39 -0700511 DCHECK_LT(idx, kNumOfSizeBrackets);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700512 return bracketSizes[idx];
513 }
514 // Returns the index of the size bracket from the bracket size.
515 static size_t BracketSizeToIndex(size_t size) {
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800516 DCHECK(8 <= size &&
517 ((size <= kMaxThreadLocalBracketSize && size % kThreadLocalBracketQuantumSize == 0) ||
518 (size <= kMaxRegularBracketSize && size % kBracketQuantumSize == 0) ||
519 size == 1 * KB || size == 2 * KB));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700520 size_t idx;
521 if (UNLIKELY(size == 1 * KB)) {
522 idx = kNumOfSizeBrackets - 2;
523 } else if (UNLIKELY(size == 2 * KB)) {
524 idx = kNumOfSizeBrackets - 1;
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800525 } else if (LIKELY(size <= kMaxThreadLocalBracketSize)) {
526 DCHECK_EQ(size % kThreadLocalBracketQuantumSize, 0U);
527 idx = size / kThreadLocalBracketQuantumSize - 1;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700528 } else {
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800529 DCHECK(size <= kMaxRegularBracketSize);
530 DCHECK_EQ((size - kMaxThreadLocalBracketSize) % kBracketQuantumSize, 0U);
531 idx = ((size - kMaxThreadLocalBracketSize) / kBracketQuantumSize - 1)
532 + kNumThreadLocalSizeBrackets;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700533 }
534 DCHECK(bracketSizes[idx] == size);
535 return idx;
536 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700537 // Returns true if the given allocation size is for a thread local allocation.
538 static bool IsSizeForThreadLocal(size_t size) {
Hiroshi Yamauchidc412b62015-10-15 12:26:57 -0700539 bool is_size_for_thread_local = size <= kMaxThreadLocalBracketSize;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700540 DCHECK(size > kLargeSizeThreshold ||
541 (is_size_for_thread_local == (SizeToIndex(size) < kNumThreadLocalSizeBrackets)));
542 return is_size_for_thread_local;
543 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700544 // Rounds up the size up the nearest bracket size.
545 static size_t RoundToBracketSize(size_t size) {
546 DCHECK(size <= kLargeSizeThreshold);
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800547 if (LIKELY(size <= kMaxThreadLocalBracketSize)) {
548 return RoundUp(size, kThreadLocalBracketQuantumSize);
549 } else if (size <= kMaxRegularBracketSize) {
550 return RoundUp(size, kBracketQuantumSize);
551 } else if (UNLIKELY(size <= 1 * KB)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700552 return 1 * KB;
553 } else {
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800554 DCHECK_LE(size, 2 * KB);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700555 return 2 * KB;
556 }
557 }
558 // Returns the size bracket index from the byte size with rounding.
559 static size_t SizeToIndex(size_t size) {
560 DCHECK(size <= kLargeSizeThreshold);
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800561 if (LIKELY(size <= kMaxThreadLocalBracketSize)) {
562 return RoundUp(size, kThreadLocalBracketQuantumSize) / kThreadLocalBracketQuantumSize - 1;
563 } else if (size <= kMaxRegularBracketSize) {
564 return (RoundUp(size, kBracketQuantumSize) - kMaxThreadLocalBracketSize) / kBracketQuantumSize
565 - 1 + kNumThreadLocalSizeBrackets;
566 } else if (size <= 1 * KB) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700567 return kNumOfSizeBrackets - 2;
568 } else {
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800569 DCHECK_LE(size, 2 * KB);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700570 return kNumOfSizeBrackets - 1;
571 }
572 }
573 // A combination of SizeToIndex() and RoundToBracketSize().
574 static size_t SizeToIndexAndBracketSize(size_t size, size_t* bracket_size_out) {
575 DCHECK(size <= kLargeSizeThreshold);
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800576 size_t idx;
577 size_t bracket_size;
578 if (LIKELY(size <= kMaxThreadLocalBracketSize)) {
579 bracket_size = RoundUp(size, kThreadLocalBracketQuantumSize);
580 idx = bracket_size / kThreadLocalBracketQuantumSize - 1;
581 } else if (size <= kMaxRegularBracketSize) {
582 bracket_size = RoundUp(size, kBracketQuantumSize);
583 idx = ((bracket_size - kMaxThreadLocalBracketSize) / kBracketQuantumSize - 1)
584 + kNumThreadLocalSizeBrackets;
585 } else if (size <= 1 * KB) {
586 bracket_size = 1 * KB;
587 idx = kNumOfSizeBrackets - 2;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700588 } else {
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800589 DCHECK(size <= 2 * KB);
590 bracket_size = 2 * KB;
591 idx = kNumOfSizeBrackets - 1;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700592 }
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800593 DCHECK_EQ(idx, SizeToIndex(size)) << idx;
594 DCHECK_EQ(bracket_size, IndexToBracketSize(idx)) << idx;
595 DCHECK_EQ(bracket_size, bracketSizes[idx]) << idx;
596 DCHECK_LE(size, bracket_size) << idx;
597 DCHECK(size > kMaxRegularBracketSize ||
598 (size <= kMaxThreadLocalBracketSize &&
599 bracket_size - size < kThreadLocalBracketQuantumSize) ||
600 (size <= kMaxRegularBracketSize && bracket_size - size < kBracketQuantumSize)) << idx;
601 *bracket_size_out = bracket_size;
602 return idx;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700603 }
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800604
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700605 // Returns the page map index from an address. Requires that the
606 // address is page size aligned.
607 size_t ToPageMapIndex(const void* addr) const {
Andreas Gamped7576322014-10-24 22:13:45 -0700608 DCHECK_LE(base_, addr);
609 DCHECK_LT(addr, base_ + capacity_);
Ian Rogers13735952014-10-08 12:43:28 -0700610 size_t byte_offset = reinterpret_cast<const uint8_t*>(addr) - base_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700611 DCHECK_EQ(byte_offset % static_cast<size_t>(kPageSize), static_cast<size_t>(0));
612 return byte_offset / kPageSize;
613 }
614 // Returns the page map index from an address with rounding.
Andreas Gamped7576322014-10-24 22:13:45 -0700615 size_t RoundDownToPageMapIndex(const void* addr) const {
Ian Rogers13735952014-10-08 12:43:28 -0700616 DCHECK(base_ <= addr && addr < reinterpret_cast<uint8_t*>(base_) + capacity_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700617 return (reinterpret_cast<uintptr_t>(addr) - reinterpret_cast<uintptr_t>(base_)) / kPageSize;
618 }
619
620 // A memory allocation request larger than this size is treated as a large object and allocated
621 // at a page-granularity.
622 static const size_t kLargeSizeThreshold = 2048;
623
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800624 // If true, check that the returned memory is actually zero.
625 static constexpr bool kCheckZeroMemory = kIsDebugBuild;
Andreas Gamped7576322014-10-24 22:13:45 -0700626 // Valgrind protects memory, so do not check memory when running under valgrind. In a normal
627 // build with kCheckZeroMemory the whole test should be optimized away.
628 // TODO: Unprotect before checks.
629 ALWAYS_INLINE bool ShouldCheckZeroMemory();
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800630
631 // If true, log verbose details of operations.
632 static constexpr bool kTraceRosAlloc = false;
633
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700634 struct hash_run {
635 size_t operator()(const RosAlloc::Run* r) const {
636 return reinterpret_cast<size_t>(r);
637 }
638 };
639
640 struct eq_run {
641 bool operator()(const RosAlloc::Run* r1, const RosAlloc::Run* r2) const {
642 return r1 == r2;
643 }
644 };
645
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800646 public:
647 // Different page release modes.
648 enum PageReleaseMode {
649 kPageReleaseModeNone, // Release no empty pages.
650 kPageReleaseModeEnd, // Release empty pages at the end of the space.
651 kPageReleaseModeSize, // Release empty pages that are larger than the threshold.
652 kPageReleaseModeSizeAndEnd, // Release empty pages that are larger than the threshold or
653 // at the end of the space.
654 kPageReleaseModeAll, // Release all empty pages.
655 };
656
657 // The default value for page_release_size_threshold_.
658 static constexpr size_t kDefaultPageReleaseSizeThreshold = 4 * MB;
659
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800660 // We use thread-local runs for the size brackets whose indexes
Mathieu Chartier0651d412014-04-29 14:37:57 -0700661 // are less than this index. We use shared (current) runs for the rest.
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800662 // Sync this with the length of Thread::rosalloc_runs_.
663 static const size_t kNumThreadLocalSizeBrackets = 16;
664 static_assert(kNumThreadLocalSizeBrackets == kNumRosAllocThreadLocalSizeBracketsInThread,
665 "Mismatch between kNumThreadLocalSizeBrackets and "
666 "kNumRosAllocThreadLocalSizeBracketsInThread");
Mathieu Chartier0651d412014-04-29 14:37:57 -0700667
Hiroshi Yamauchidc412b62015-10-15 12:26:57 -0700668 // The size of the largest bracket we use thread-local runs for.
669 // This should be equal to bracketSizes[kNumThreadLocalSizeBrackets - 1].
670 static const size_t kMaxThreadLocalBracketSize = 128;
671
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800672 // We use regular (8 or 16-bytes increment) runs for the size brackets whose indexes are less than
673 // this index.
674 static const size_t kNumRegularSizeBrackets = 40;
675
676 // The size of the largest regular (8 or 16-byte increment) bracket. Non-regular brackets are the
677 // 1 KB and the 2 KB brackets. This should be equal to bracketSizes[kNumRegularSizeBrackets - 1].
678 static const size_t kMaxRegularBracketSize = 512;
679
680 // The bracket size increment for the thread-local brackets (<= kMaxThreadLocalBracketSize bytes).
681 static constexpr size_t kThreadLocalBracketQuantumSize = 8;
682
683 // Equal to Log2(kThreadLocalBracketQuantumSize).
684 static constexpr size_t kThreadLocalBracketQuantumSizeShift = 3;
685
686 // The bracket size increment for the non-thread-local, regular brackets (of size <=
687 // kMaxRegularBracketSize bytes and > kMaxThreadLocalBracketSize bytes).
Hiroshi Yamauchidc412b62015-10-15 12:26:57 -0700688 static constexpr size_t kBracketQuantumSize = 16;
689
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -0800690 // Equal to Log2(kBracketQuantumSize).
Hiroshi Yamauchidc412b62015-10-15 12:26:57 -0700691 static constexpr size_t kBracketQuantumSizeShift = 4;
692
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800693 private:
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700694 // The base address of the memory region that's managed by this allocator.
Ian Rogers13735952014-10-08 12:43:28 -0700695 uint8_t* base_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700696
697 // The footprint in bytes of the currently allocated portion of the
698 // memory region.
699 size_t footprint_;
700
701 // The maximum footprint. The address, base_ + capacity_, indicates
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800702 // the end of the memory region that's currently managed by this allocator.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700703 size_t capacity_;
704
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800705 // The maximum capacity. The address, base_ + max_capacity_, indicates
706 // the end of the memory region that's ever managed by this allocator.
707 size_t max_capacity_;
708
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700709 // The run sets that hold the runs whose slots are not all
710 // full. non_full_runs_[i] is guarded by size_bracket_locks_[i].
Mathieu Chartier58553c72014-09-16 16:25:55 -0700711 AllocationTrackingSet<Run*, kAllocatorTagRosAlloc> non_full_runs_[kNumOfSizeBrackets];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700712 // The run sets that hold the runs whose slots are all full. This is
713 // debug only. full_runs_[i] is guarded by size_bracket_locks_[i].
Mathieu Chartier58553c72014-09-16 16:25:55 -0700714 std::unordered_set<Run*, hash_run, eq_run, TrackingAllocator<Run*, kAllocatorTagRosAlloc>>
715 full_runs_[kNumOfSizeBrackets];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700716 // The set of free pages.
Mathieu Chartier58553c72014-09-16 16:25:55 -0700717 AllocationTrackingSet<FreePageRun*, kAllocatorTagRosAlloc> free_page_runs_ GUARDED_BY(lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700718 // The dedicated full run, it is always full and shared by all threads when revoking happens.
719 // This is an optimization since enables us to avoid a null check for revoked runs.
720 static Run* dedicated_full_run_;
721 // Using size_t to ensure that it is at least word aligned.
722 static size_t dedicated_full_run_storage_[];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700723 // The current runs where the allocations are first attempted for
724 // the size brackes that do not use thread-local
725 // runs. current_runs_[i] is guarded by size_bracket_locks_[i].
726 Run* current_runs_[kNumOfSizeBrackets];
727 // The mutexes, one per size bracket.
728 Mutex* size_bracket_locks_[kNumOfSizeBrackets];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700729 // Bracket lock names (since locks only have char* names).
Zuo Wangf37a88b2014-07-10 04:26:41 -0700730 std::string size_bracket_lock_names_[kNumOfSizeBrackets];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700731 // The types of page map entries.
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700732 enum PageMapKind {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700733 kPageMapReleased = 0, // Zero and released back to the OS.
734 kPageMapEmpty, // Zero but probably dirty.
735 kPageMapRun, // The beginning of a run.
736 kPageMapRunPart, // The non-beginning part of a run.
737 kPageMapLargeObject, // The beginning of a large object.
738 kPageMapLargeObjectPart, // The non-beginning part of a large object.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700739 };
740 // The table that indicates what pages are currently used for.
Ian Rogers13735952014-10-08 12:43:28 -0700741 volatile uint8_t* page_map_; // No GUARDED_BY(lock_) for kReadPageMapEntryWithoutLockInBulkFree.
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800742 size_t page_map_size_;
743 size_t max_page_map_size_;
Ian Rogers700a4022014-05-19 16:49:03 -0700744 std::unique_ptr<MemMap> page_map_mem_map_;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800745
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700746 // The table that indicates the size of free page runs. These sizes
747 // are stored here to avoid storing in the free page header and
748 // release backing pages.
Mathieu Chartier58553c72014-09-16 16:25:55 -0700749 std::vector<size_t, TrackingAllocator<size_t, kAllocatorTagRosAlloc>> free_page_run_size_map_
750 GUARDED_BY(lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700751 // The global lock. Used to guard the page map, the free page set,
752 // and the footprint.
753 Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
754 // The reader-writer lock to allow one bulk free at a time while
Hiroshi Yamauchi70f60042014-02-03 12:31:29 -0800755 // allowing multiple individual frees at the same time. Also, this
756 // is used to avoid race conditions between BulkFree() and
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700757 // RevokeThreadLocalRuns() on the bulk free list.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700758 ReaderWriterMutex bulk_free_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
759
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800760 // The page release mode.
761 const PageReleaseMode page_release_mode_;
762 // Under kPageReleaseModeSize(AndEnd), if the free page run size is
763 // greater than or equal to this value, release pages.
764 const size_t page_release_size_threshold_;
765
Andreas Gamped7576322014-10-24 22:13:45 -0700766 // Whether this allocator is running under Valgrind.
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700767 bool is_running_on_memory_tool_;
Andreas Gamped7576322014-10-24 22:13:45 -0700768
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700769 // The base address of the memory region that's managed by this allocator.
Ian Rogers13735952014-10-08 12:43:28 -0700770 uint8_t* Begin() { return base_; }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700771 // The end address of the memory region that's managed by this allocator.
Ian Rogers13735952014-10-08 12:43:28 -0700772 uint8_t* End() { return base_ + capacity_; }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700773
774 // Page-granularity alloc/free
Ian Rogers13735952014-10-08 12:43:28 -0700775 void* AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type)
Mathieu Chartier90443472015-07-16 20:32:27 -0700776 REQUIRES(lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700777 // Returns how many bytes were freed.
Mathieu Chartier90443472015-07-16 20:32:27 -0700778 size_t FreePages(Thread* self, void* ptr, bool already_zero) REQUIRES(lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700779
780 // Allocate/free a run slot.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700781 void* AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated, size_t* usable_size,
782 size_t* bytes_tl_bulk_allocated)
Mathieu Chartier90443472015-07-16 20:32:27 -0700783 REQUIRES(!lock_);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700784 // Allocate/free a run slot without acquiring locks.
Mathieu Chartier90443472015-07-16 20:32:27 -0700785 // TODO: REQUIRES(Locks::mutator_lock_)
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700786 void* AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated,
787 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
Mathieu Chartier90443472015-07-16 20:32:27 -0700788 REQUIRES(!lock_);
789 void* AllocFromCurrentRunUnlocked(Thread* self, size_t idx) REQUIRES(!lock_);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700790
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700791 // Returns the bracket size.
792 size_t FreeFromRun(Thread* self, void* ptr, Run* run)
Mathieu Chartier90443472015-07-16 20:32:27 -0700793 REQUIRES(!lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700794
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700795 // Used to allocate a new thread local run for a size bracket.
Mathieu Chartier90443472015-07-16 20:32:27 -0700796 Run* AllocRun(Thread* self, size_t idx) REQUIRES(!lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700797
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700798 // Used to acquire a new/reused run for a size bracket. Used when a
799 // thread-local or current run gets full.
Mathieu Chartier90443472015-07-16 20:32:27 -0700800 Run* RefillRun(Thread* self, size_t idx) REQUIRES(!lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700801
802 // The internal of non-bulk Free().
Mathieu Chartier90443472015-07-16 20:32:27 -0700803 size_t FreeInternal(Thread* self, void* ptr) REQUIRES(!lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700804
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800805 // Allocates large objects.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700806 void* AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated,
807 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
Mathieu Chartier90443472015-07-16 20:32:27 -0700808 REQUIRES(!lock_);
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800809
Mathieu Chartier0651d412014-04-29 14:37:57 -0700810 // Revoke a run by adding it to non_full_runs_ or freeing the pages.
Mathieu Chartier90443472015-07-16 20:32:27 -0700811 void RevokeRun(Thread* self, size_t idx, Run* run) REQUIRES(!lock_);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700812
813 // Revoke the current runs which share an index with the thread local runs.
Mathieu Chartier90443472015-07-16 20:32:27 -0700814 void RevokeThreadUnsafeCurrentRuns() REQUIRES(!lock_);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700815
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700816 // Release a range of pages.
Mathieu Chartier90443472015-07-16 20:32:27 -0700817 size_t ReleasePageRange(uint8_t* start, uint8_t* end) REQUIRES(lock_);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700818
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700819 // Dumps the page map for debugging.
Mathieu Chartier90443472015-07-16 20:32:27 -0700820 std::string DumpPageMap() REQUIRES(lock_);
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700821
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700822 public:
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800823 RosAlloc(void* base, size_t capacity, size_t max_capacity,
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800824 PageReleaseMode page_release_mode,
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700825 bool running_on_memory_tool,
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800826 size_t page_release_size_threshold = kDefaultPageReleaseSizeThreshold);
Mathieu Chartier661974a2014-01-09 11:23:53 -0800827 ~RosAlloc();
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700828
Hiroshi Yamauchidc412b62015-10-15 12:26:57 -0700829 static size_t RunFreeListOffset() {
830 return OFFSETOF_MEMBER(Run, free_list_);
831 }
832 static size_t RunFreeListHeadOffset() {
833 return OFFSETOF_MEMBER(SlotFreeList<false>, head_);
834 }
835 static size_t RunFreeListSizeOffset() {
836 return OFFSETOF_MEMBER(SlotFreeList<false>, size_);
837 }
838 static size_t RunSlotNextOffset() {
839 return OFFSETOF_MEMBER(Slot, next_);
840 }
841
Mathieu Chartier0651d412014-04-29 14:37:57 -0700842 // If kThreadUnsafe is true then the allocator may avoid acquiring some locks as an optimization.
843 // If used, this may cause race conditions if multiple threads are allocating at the same time.
844 template<bool kThreadSafe = true>
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700845 void* Alloc(Thread* self, size_t size, size_t* bytes_allocated, size_t* usable_size,
846 size_t* bytes_tl_bulk_allocated)
Mathieu Chartier90443472015-07-16 20:32:27 -0700847 REQUIRES(!lock_);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700848 size_t Free(Thread* self, void* ptr)
Mathieu Chartier90443472015-07-16 20:32:27 -0700849 REQUIRES(!bulk_free_lock_, !lock_);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700850 size_t BulkFree(Thread* self, void** ptrs, size_t num_ptrs)
Mathieu Chartier90443472015-07-16 20:32:27 -0700851 REQUIRES(!bulk_free_lock_, !lock_);
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700852
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700853 // Returns true if the given allocation request can be allocated in
854 // an existing thread local run without allocating a new run.
855 ALWAYS_INLINE bool CanAllocFromThreadLocalRun(Thread* self, size_t size);
856 // Allocate the given allocation request in an existing thread local
857 // run without allocating a new run.
858 ALWAYS_INLINE void* AllocFromThreadLocalRun(Thread* self, size_t size, size_t* bytes_allocated);
859
860 // Returns the maximum bytes that could be allocated for the given
861 // size in bulk, that is the maximum value for the
862 // bytes_allocated_bulk out param returned by RosAlloc::Alloc().
863 ALWAYS_INLINE size_t MaxBytesBulkAllocatedFor(size_t size);
864
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700865 // Returns the size of the allocated slot for a given allocated memory chunk.
Mathieu Chartier90443472015-07-16 20:32:27 -0700866 size_t UsableSize(const void* ptr) REQUIRES(!lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700867 // Returns the size of the allocated slot for a given size.
868 size_t UsableSize(size_t bytes) {
869 if (UNLIKELY(bytes > kLargeSizeThreshold)) {
870 return RoundUp(bytes, kPageSize);
871 } else {
872 return RoundToBracketSize(bytes);
873 }
874 }
875 // Try to reduce the current footprint by releasing the free page
876 // run at the end of the memory region, if any.
Mathieu Chartier90443472015-07-16 20:32:27 -0700877 bool Trim() REQUIRES(!lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700878 // Iterates over all the memory slots and apply the given function.
879 void InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
880 void* arg)
Mathieu Chartier90443472015-07-16 20:32:27 -0700881 REQUIRES(!lock_);
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700882
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -0700883 // Release empty pages.
Mathieu Chartier90443472015-07-16 20:32:27 -0700884 size_t ReleasePages() REQUIRES(!lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700885 // Returns the current footprint.
Mathieu Chartier90443472015-07-16 20:32:27 -0700886 size_t Footprint() REQUIRES(!lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700887 // Returns the current capacity, maximum footprint.
Mathieu Chartier90443472015-07-16 20:32:27 -0700888 size_t FootprintLimit() REQUIRES(!lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700889 // Update the current capacity.
Mathieu Chartier90443472015-07-16 20:32:27 -0700890 void SetFootprintLimit(size_t bytes) REQUIRES(!lock_);
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700891
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700892 // Releases the thread-local runs assigned to the given thread back to the common set of runs.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700893 // Returns the total bytes of free slots in the revoked thread local runs. This is to be
894 // subtracted from Heap::num_bytes_allocated_ to cancel out the ahead-of-time counting.
Mathieu Chartier90443472015-07-16 20:32:27 -0700895 size_t RevokeThreadLocalRuns(Thread* thread) REQUIRES(!lock_, !bulk_free_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700896 // Releases the thread-local runs assigned to all the threads back to the common set of runs.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700897 // Returns the total bytes of free slots in the revoked thread local runs. This is to be
898 // subtracted from Heap::num_bytes_allocated_ to cancel out the ahead-of-time counting.
Mathieu Chartier90443472015-07-16 20:32:27 -0700899 size_t RevokeAllThreadLocalRuns() REQUIRES(!Locks::thread_list_lock_, !lock_, !bulk_free_lock_);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -0700900 // Assert the thread local runs of a thread are revoked.
Mathieu Chartier90443472015-07-16 20:32:27 -0700901 void AssertThreadLocalRunsAreRevoked(Thread* thread) REQUIRES(!bulk_free_lock_);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -0700902 // Assert all the thread local runs are revoked.
Mathieu Chartier90443472015-07-16 20:32:27 -0700903 void AssertAllThreadLocalRunsAreRevoked() REQUIRES(!Locks::thread_list_lock_, !bulk_free_lock_);
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700904
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700905 static Run* GetDedicatedFullRun() {
906 return dedicated_full_run_;
907 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700908 bool IsFreePage(size_t idx) const {
909 DCHECK_LT(idx, capacity_ / kPageSize);
Ian Rogers13735952014-10-08 12:43:28 -0700910 uint8_t pm_type = page_map_[idx];
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700911 return pm_type == kPageMapReleased || pm_type == kPageMapEmpty;
912 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700913
914 // Callbacks for InspectAll that will count the number of bytes
915 // allocated and objects allocated, respectively.
916 static void BytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg);
917 static void ObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800918
919 bool DoesReleaseAllPages() const {
920 return page_release_mode_ == kPageReleaseModeAll;
921 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800922
923 // Verify for debugging.
Mathieu Chartier90443472015-07-16 20:32:27 -0700924 void Verify() REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !bulk_free_lock_,
925 !lock_);
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -0700926
Mathieu Chartier90443472015-07-16 20:32:27 -0700927 void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes)
928 REQUIRES(!bulk_free_lock_, !lock_);
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700929
930 private:
931 friend std::ostream& operator<<(std::ostream& os, const RosAlloc::PageMapKind& rhs);
932
933 DISALLOW_COPY_AND_ASSIGN(RosAlloc);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700934};
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700935std::ostream& operator<<(std::ostream& os, const RosAlloc::PageMapKind& rhs);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700936
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800937// Callback from rosalloc when it needs to increase the footprint. Must be implemented somewhere
938// else (currently rosalloc_space.cc).
939void* ArtRosAllocMoreCore(allocator::RosAlloc* rosalloc, intptr_t increment);
940
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700941} // namespace allocator
942} // namespace gc
943} // namespace art
944
945#endif // ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_