blob: 87f139292016f9e027a02f75487b5760bc42b5a9 [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_
18#define ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_
19
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070020#include <stdint.h>
21#include <stdlib.h>
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070022#include <sys/mman.h>
Ian Rogers700a4022014-05-19 16:49:03 -070023#include <memory>
24#include <set>
25#include <string>
26#include <unordered_set>
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070027#include <vector>
28
Mathieu Chartier58553c72014-09-16 16:25:55 -070029#include "base/allocator.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010030#include "base/bit_utils.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070031#include "base/mutex.h"
32#include "base/logging.h"
33#include "globals.h"
Ian Rogerse63db272014-07-15 15:36:11 -070034#include "thread.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070035
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070036namespace art {
Vladimir Marko3481ba22015-04-13 12:22:36 +010037
38class MemMap;
39
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070040namespace gc {
41namespace allocator {
42
Ian Rogers6fac4472014-02-25 17:01:10 -080043// A runs-of-slots memory allocator.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070044class RosAlloc {
45 private:
Ian Rogers6fac4472014-02-25 17:01:10 -080046 // Represents a run of free pages.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070047 class FreePageRun {
48 public:
Ian Rogers13735952014-10-08 12:43:28 -070049 uint8_t magic_num_; // The magic number used for debugging only.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070050
51 bool IsFree() const {
Mathieu Chartiera1c1c712014-06-23 17:53:09 -070052 return !kIsDebugBuild || magic_num_ == kMagicNumFree;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070053 }
Mathieu Chartier90443472015-07-16 20:32:27 -070054 size_t ByteSize(RosAlloc* rosalloc) const REQUIRES(rosalloc->lock_) {
Ian Rogers13735952014-10-08 12:43:28 -070055 const uint8_t* fpr_base = reinterpret_cast<const uint8_t*>(this);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070056 size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
57 size_t byte_size = rosalloc->free_page_run_size_map_[pm_idx];
58 DCHECK_GE(byte_size, static_cast<size_t>(0));
Mathieu Chartier58553c72014-09-16 16:25:55 -070059 DCHECK_ALIGNED(byte_size, kPageSize);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070060 return byte_size;
61 }
62 void SetByteSize(RosAlloc* rosalloc, size_t byte_size)
Mathieu Chartier90443472015-07-16 20:32:27 -070063 REQUIRES(rosalloc->lock_) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070064 DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
Ian Rogers13735952014-10-08 12:43:28 -070065 uint8_t* fpr_base = reinterpret_cast<uint8_t*>(this);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070066 size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
67 rosalloc->free_page_run_size_map_[pm_idx] = byte_size;
68 }
69 void* Begin() {
70 return reinterpret_cast<void*>(this);
71 }
Mathieu Chartier90443472015-07-16 20:32:27 -070072 void* End(RosAlloc* rosalloc) REQUIRES(rosalloc->lock_) {
Ian Rogers13735952014-10-08 12:43:28 -070073 uint8_t* fpr_base = reinterpret_cast<uint8_t*>(this);
74 uint8_t* end = fpr_base + ByteSize(rosalloc);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070075 return end;
76 }
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080077 bool IsLargerThanPageReleaseThreshold(RosAlloc* rosalloc)
Mathieu Chartier90443472015-07-16 20:32:27 -070078 REQUIRES(rosalloc->lock_) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080079 return ByteSize(rosalloc) >= rosalloc->page_release_size_threshold_;
80 }
81 bool IsAtEndOfSpace(RosAlloc* rosalloc)
Mathieu Chartier90443472015-07-16 20:32:27 -070082 REQUIRES(rosalloc->lock_) {
Ian Rogers13735952014-10-08 12:43:28 -070083 return reinterpret_cast<uint8_t*>(this) + ByteSize(rosalloc) == rosalloc->base_ + rosalloc->footprint_;
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080084 }
Mathieu Chartier90443472015-07-16 20:32:27 -070085 bool ShouldReleasePages(RosAlloc* rosalloc) REQUIRES(rosalloc->lock_) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080086 switch (rosalloc->page_release_mode_) {
87 case kPageReleaseModeNone:
88 return false;
89 case kPageReleaseModeEnd:
90 return IsAtEndOfSpace(rosalloc);
91 case kPageReleaseModeSize:
92 return IsLargerThanPageReleaseThreshold(rosalloc);
93 case kPageReleaseModeSizeAndEnd:
94 return IsLargerThanPageReleaseThreshold(rosalloc) && IsAtEndOfSpace(rosalloc);
95 case kPageReleaseModeAll:
96 return true;
97 default:
98 LOG(FATAL) << "Unexpected page release mode ";
99 return false;
100 }
101 }
Mathieu Chartier90443472015-07-16 20:32:27 -0700102 void ReleasePages(RosAlloc* rosalloc) REQUIRES(rosalloc->lock_) {
Ian Rogers13735952014-10-08 12:43:28 -0700103 uint8_t* start = reinterpret_cast<uint8_t*>(this);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700104 size_t byte_size = ByteSize(rosalloc);
105 DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700106 if (ShouldReleasePages(rosalloc)) {
107 rosalloc->ReleasePageRange(start, start + byte_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700108 }
109 }
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700110
111 private:
112 DISALLOW_COPY_AND_ASSIGN(FreePageRun);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700113 };
114
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700115 // The slot header.
116 class Slot {
117 public:
118 Slot* Next() const {
119 return next_;
120 }
121 void SetNext(Slot* next) {
122 next_ = next;
123 }
124 // The slot right before this slot in terms of the address.
125 Slot* Left(size_t bracket_size) {
126 return reinterpret_cast<Slot*>(reinterpret_cast<uintptr_t>(this) - bracket_size);
127 }
128 void Clear() {
129 next_ = nullptr;
130 }
131
132 private:
133 Slot* next_; // Next slot in the list.
134 };
135
136 // We use the tail (kUseTail == true) for the bulk or thread-local free lists to avoid the need to
137 // traverse the list from the head to the tail when merging free lists.
138 // We don't use the tail (kUseTail == false) for the free list to avoid the need to manage the
139 // tail in the allocation fast path for a performance reason.
140 template<bool kUseTail = true>
141 class SlotFreeList {
142 public:
143 SlotFreeList() : head_(0U), tail_(0), size_(0) {}
144 Slot* Head() const {
145 return reinterpret_cast<Slot*>(head_);
146 }
147 Slot* Tail() const {
148 CHECK(kUseTail);
149 return reinterpret_cast<Slot*>(tail_);
150 }
151 size_t Size() const {
152 return size_;
153 }
154 // Removes from the head of the free list.
155 Slot* Remove() {
156 Slot* slot;
157 if (kIsDebugBuild) {
158 Verify();
159 }
160 Slot** headp = reinterpret_cast<Slot**>(&head_);
161 Slot** tailp = kUseTail ? reinterpret_cast<Slot**>(&tail_) : nullptr;
162 Slot* old_head = *headp;
163 if (old_head == nullptr) {
164 // List was empty.
165 if (kUseTail) {
166 DCHECK(*tailp == nullptr);
167 }
168 return nullptr;
169 } else {
170 // List wasn't empty.
171 if (kUseTail) {
172 DCHECK(*tailp != nullptr);
173 }
174 Slot* old_head_next = old_head->Next();
175 slot = old_head;
176 *headp = old_head_next;
177 if (kUseTail && old_head_next == nullptr) {
178 // List becomes empty.
179 *tailp = nullptr;
180 }
181 }
182 slot->Clear();
183 --size_;
184 if (kIsDebugBuild) {
185 Verify();
186 }
187 return slot;
188 }
189 void Add(Slot* slot) {
190 if (kIsDebugBuild) {
191 Verify();
192 }
193 DCHECK(slot != nullptr);
194 Slot** headp = reinterpret_cast<Slot**>(&head_);
195 Slot** tailp = kUseTail ? reinterpret_cast<Slot**>(&tail_) : nullptr;
196 Slot* old_head = *headp;
197 if (old_head == nullptr) {
198 // List was empty.
199 if (kUseTail) {
200 DCHECK(*tailp == nullptr);
201 }
202 *headp = slot;
203 if (kUseTail) {
204 *tailp = slot;
205 }
206 } else {
207 // List wasn't empty.
208 if (kUseTail) {
209 DCHECK(*tailp != nullptr);
210 }
211 *headp = slot;
212 slot->SetNext(old_head);
213 }
214 ++size_;
215 if (kIsDebugBuild) {
216 Verify();
217 }
218 }
219 // Merge the given list into this list. Empty the given list.
220 // Deliberately support only a kUseTail == true SlotFreeList parameter because 1) we don't
221 // currently have a situation where we need a kUseTail == false SlotFreeList parameter, and 2)
222 // supporting the kUseTail == false parameter would require a O(n) linked list traversal to do
223 // the merge if 'this' SlotFreeList has kUseTail == false, which we'd like to avoid.
224 void Merge(SlotFreeList<true>* list) {
225 if (kIsDebugBuild) {
226 Verify();
227 CHECK(list != nullptr);
228 list->Verify();
229 }
230 if (list->Size() == 0) {
231 return;
232 }
233 Slot** headp = reinterpret_cast<Slot**>(&head_);
234 Slot** tailp = kUseTail ? reinterpret_cast<Slot**>(&tail_) : nullptr;
235 Slot* old_head = *headp;
236 if (old_head == nullptr) {
237 // List was empty.
238 *headp = list->Head();
239 if (kUseTail) {
240 *tailp = list->Tail();
241 }
242 size_ = list->Size();
243 } else {
244 // List wasn't empty.
245 DCHECK(list->Head() != nullptr);
246 *headp = list->Head();
247 DCHECK(list->Tail() != nullptr);
248 list->Tail()->SetNext(old_head);
249 // if kUseTail, no change to tailp.
250 size_ += list->Size();
251 }
252 list->Reset();
253 if (kIsDebugBuild) {
254 Verify();
255 }
256 }
257
258 void Reset() {
259 head_ = 0;
260 if (kUseTail) {
261 tail_ = 0;
262 }
263 size_ = 0;
264 }
265
266 void Verify() {
267 Slot* head = reinterpret_cast<Slot*>(head_);
268 Slot* tail = kUseTail ? reinterpret_cast<Slot*>(tail_) : nullptr;
269 if (size_ == 0) {
270 CHECK(head == nullptr);
271 if (kUseTail) {
272 CHECK(tail == nullptr);
273 }
274 } else {
275 CHECK(head != nullptr);
276 if (kUseTail) {
277 CHECK(tail != nullptr);
278 }
279 size_t count = 0;
280 for (Slot* slot = head; slot != nullptr; slot = slot->Next()) {
281 ++count;
282 if (kUseTail && slot->Next() == nullptr) {
283 CHECK_EQ(slot, tail);
284 }
285 }
286 CHECK_EQ(size_, count);
287 }
288 }
289
290 private:
291 // A pointer (Slot*) to the head of the list. Always 8 bytes so that we will have the same
292 // layout between 32 bit and 64 bit, which is not strictly necessary, but we do so for 1)
293 // uniformity, 2) we won't need to change this code if we move to a non-low 4G heap in the
294 // future, and 3) the space savings by using 32 bit fields in 32 bit would be lost in noise
295 // (won't open up enough space to cause an extra slot to be available).
296 uint64_t head_;
297 // A pointer (Slot*) to the tail of the list. Always 8 bytes so that we will have the same
298 // layout between 32 bit and 64 bit. The tail is stored to speed up merging of lists.
299 // Unused if kUseTail is false.
300 uint64_t tail_;
301 // The number of slots in the list. This is used to make it fast to check if a free list is all
302 // free without traversing the whole free list.
303 uint32_t size_;
304 uint32_t padding_ ATTRIBUTE_UNUSED;
305 };
306
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700307 // Represents a run of memory slots of the same size.
308 //
309 // A run's memory layout:
310 //
311 // +-------------------+
312 // | magic_num |
313 // +-------------------+
314 // | size_bracket_idx |
315 // +-------------------+
316 // | is_thread_local |
317 // +-------------------+
318 // | to_be_bulk_freed |
319 // +-------------------+
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700320 // | |
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700321 // | free list |
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700322 // | |
323 // +-------------------+
324 // | |
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700325 // | bulk free list |
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700326 // | |
327 // +-------------------+
328 // | |
329 // | thread-local free |
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700330 // | list |
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700331 // | |
332 // +-------------------+
333 // | padding due to |
334 // | alignment |
335 // +-------------------+
336 // | slot 0 |
337 // +-------------------+
338 // | slot 1 |
339 // +-------------------+
340 // | slot 2 |
341 // +-------------------+
342 // ...
343 // +-------------------+
344 // | last slot |
345 // +-------------------+
346 //
347 class Run {
348 public:
Ian Rogers13735952014-10-08 12:43:28 -0700349 uint8_t magic_num_; // The magic number used for debugging.
350 uint8_t size_bracket_idx_; // The index of the size bracket of this run.
351 uint8_t is_thread_local_; // True if this run is used as a thread-local run.
352 uint8_t to_be_bulk_freed_; // Used within BulkFree() to flag a run that's involved with a bulk free.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700353 uint32_t padding_ ATTRIBUTE_UNUSED;
354 // Use a tailless free list for free_list_ so that the alloc fast path does not manage the tail.
355 SlotFreeList<false> free_list_;
356 SlotFreeList<true> bulk_free_list_;
357 SlotFreeList<true> thread_local_free_list_;
358 // Padding due to alignment
359 // Slot 0
360 // Slot 1
361 // ...
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700362
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700363 // Returns the byte size of the header.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700364 static size_t fixed_header_size() {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700365 return sizeof(Run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700366 }
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700367 Slot* FirstSlot() {
368 const uint8_t idx = size_bracket_idx_;
369 return reinterpret_cast<Slot*>(reinterpret_cast<uintptr_t>(this) + headerSizes[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700370 }
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700371 Slot* LastSlot() {
372 const uint8_t idx = size_bracket_idx_;
373 const size_t bracket_size = bracketSizes[idx];
374 uintptr_t end = reinterpret_cast<uintptr_t>(End());
375 Slot* last_slot = reinterpret_cast<Slot*>(end - bracket_size);
376 DCHECK_LE(FirstSlot(), last_slot);
377 return last_slot;
378 }
379 SlotFreeList<false>* FreeList() {
380 return &free_list_;
381 }
382 SlotFreeList<true>* BulkFreeList() {
383 return &bulk_free_list_;
384 }
385 SlotFreeList<true>* ThreadLocalFreeList() {
386 return &thread_local_free_list_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700387 }
388 void* End() {
Ian Rogers13735952014-10-08 12:43:28 -0700389 return reinterpret_cast<uint8_t*>(this) + kPageSize * numOfPages[size_bracket_idx_];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700390 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700391 void SetIsThreadLocal(bool is_thread_local) {
392 is_thread_local_ = is_thread_local ? 1 : 0;
393 }
394 bool IsThreadLocal() const {
395 return is_thread_local_ != 0;
396 }
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700397 // Set up the free list for a new/empty run.
398 void InitFreeList() {
399 const uint8_t idx = size_bracket_idx_;
400 const size_t bracket_size = bracketSizes[idx];
401 Slot* first_slot = FirstSlot();
402 // Add backwards so the first slot is at the head of the list.
403 for (Slot* slot = LastSlot(); slot >= first_slot; slot = slot->Left(bracket_size)) {
404 free_list_.Add(slot);
405 }
406 }
407 // Merge the thread local free list to the free list. Used when a thread-local run becomes
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700408 // full.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700409 bool MergeThreadLocalFreeListToFreeList(bool* is_all_free_after_out);
410 // Merge the bulk free list to the free list. Used in a bulk free.
411 void MergeBulkFreeListToFreeList();
412 // Merge the bulk free list to the thread local free list. In a bulk free, as a two-step
413 // process, GC will first record all the slots to free in a run in the bulk free list where it
414 // can write without a lock, and later acquire a lock once per run to merge the bulk free list
415 // to the thread-local free list.
416 void MergeBulkFreeListToThreadLocalFreeList();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700417 // Allocates a slot in a run.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700418 ALWAYS_INLINE void* AllocSlot();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700419 // Frees a slot in a run. This is used in a non-bulk free.
420 void FreeSlot(void* ptr);
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700421 // Add the given slot to the bulk free list. Returns the bracket size.
422 size_t AddToBulkFreeList(void* ptr);
423 // Add the given slot to the thread-local free list.
424 void AddToThreadLocalFreeList(void* ptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700425 // Returns true if all the slots in the run are not in use.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700426 bool IsAllFree() const {
427 return free_list_.Size() == numOfSlots[size_bracket_idx_];
428 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700429 // Returns the number of free slots.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700430 size_t NumberOfFreeSlots() {
431 return free_list_.Size();
432 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700433 // Returns true if all the slots in the run are in use.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700434 ALWAYS_INLINE bool IsFull();
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700435 // Returns true if the bulk free list is empty.
436 bool IsBulkFreeListEmpty() const {
437 return bulk_free_list_.Size() == 0;
438 }
439 // Returns true if the thread local free list is empty.
440 bool IsThreadLocalFreeListEmpty() const {
441 return thread_local_free_list_.Size() == 0;
442 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700443 // Zero the run's data.
444 void ZeroData();
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700445 // Zero the run's header and the slot headers.
446 void ZeroHeaderAndSlotHeaders();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700447 // Iterate over all the slots and apply the given function.
448 void InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg), void* arg);
449 // Dump the run metadata for debugging.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800450 std::string Dump();
451 // Verify for debugging.
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700452 void Verify(Thread* self, RosAlloc* rosalloc, bool running_on_memory_tool)
Mathieu Chartier90443472015-07-16 20:32:27 -0700453 REQUIRES(Locks::mutator_lock_)
454 REQUIRES(Locks::thread_list_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700455
456 private:
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700457 // The common part of AddToBulkFreeList() and AddToThreadLocalFreeList(). Returns the bracket
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700458 // size.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700459 size_t AddToFreeListShared(void* ptr, SlotFreeList<true>* free_list, const char* caller_name);
460 // Turns a FreeList into a string for debugging.
461 template<bool kUseTail>
462 std::string FreeListToStr(SlotFreeList<kUseTail>* free_list);
463 // Check a given pointer is a valid slot address and return it as Slot*.
464 Slot* ToSlot(void* ptr) {
465 const uint8_t idx = size_bracket_idx_;
466 const size_t bracket_size = bracketSizes[idx];
467 const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
468 - reinterpret_cast<uint8_t*>(FirstSlot());
469 DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
470 size_t slot_idx = offset_from_slot_base / bracket_size;
471 DCHECK_LT(slot_idx, numOfSlots[idx]);
472 return reinterpret_cast<Slot*>(ptr);
473 }
474 size_t SlotIndex(Slot* slot);
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700475
476 // TODO: DISALLOW_COPY_AND_ASSIGN(Run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700477 };
478
479 // The magic number for a run.
Ian Rogers13735952014-10-08 12:43:28 -0700480 static constexpr uint8_t kMagicNum = 42;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700481 // The magic number for free pages.
Ian Rogers13735952014-10-08 12:43:28 -0700482 static constexpr uint8_t kMagicNumFree = 43;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700483 // The number of size brackets. Sync this with the length of Thread::rosalloc_runs_.
Ian Rogers13735952014-10-08 12:43:28 -0700484 static constexpr size_t kNumOfSizeBrackets = kNumRosAllocThreadLocalSizeBrackets;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700485 // The number of smaller size brackets that are 16 bytes apart.
Ian Rogers13735952014-10-08 12:43:28 -0700486 static constexpr size_t kNumOfQuantumSizeBrackets = 32;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700487 // The sizes (the slot sizes, in bytes) of the size brackets.
488 static size_t bracketSizes[kNumOfSizeBrackets];
489 // The numbers of pages that are used for runs for each size bracket.
490 static size_t numOfPages[kNumOfSizeBrackets];
491 // The numbers of slots of the runs for each size bracket.
492 static size_t numOfSlots[kNumOfSizeBrackets];
493 // The header sizes in bytes of the runs for each size bracket.
494 static size_t headerSizes[kNumOfSizeBrackets];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700495
496 // Initialize the run specs (the above arrays).
497 static void Initialize();
498 static bool initialized_;
499
500 // Returns the byte size of the bracket size from the index.
501 static size_t IndexToBracketSize(size_t idx) {
Ian Rogers59c07062014-10-10 13:03:39 -0700502 DCHECK_LT(idx, kNumOfSizeBrackets);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700503 return bracketSizes[idx];
504 }
505 // Returns the index of the size bracket from the bracket size.
506 static size_t BracketSizeToIndex(size_t size) {
507 DCHECK(16 <= size && ((size < 1 * KB && size % 16 == 0) || size == 1 * KB || size == 2 * KB));
508 size_t idx;
509 if (UNLIKELY(size == 1 * KB)) {
510 idx = kNumOfSizeBrackets - 2;
511 } else if (UNLIKELY(size == 2 * KB)) {
512 idx = kNumOfSizeBrackets - 1;
513 } else {
514 DCHECK(size < 1 * KB);
515 DCHECK_EQ(size % 16, static_cast<size_t>(0));
516 idx = size / 16 - 1;
517 }
518 DCHECK(bracketSizes[idx] == size);
519 return idx;
520 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700521 // Returns true if the given allocation size is for a thread local allocation.
522 static bool IsSizeForThreadLocal(size_t size) {
523 DCHECK_GT(kNumThreadLocalSizeBrackets, 0U);
524 size_t max_thread_local_bracket_idx = kNumThreadLocalSizeBrackets - 1;
525 bool is_size_for_thread_local = size <= bracketSizes[max_thread_local_bracket_idx];
526 DCHECK(size > kLargeSizeThreshold ||
527 (is_size_for_thread_local == (SizeToIndex(size) < kNumThreadLocalSizeBrackets)));
528 return is_size_for_thread_local;
529 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700530 // Rounds up the size up the nearest bracket size.
531 static size_t RoundToBracketSize(size_t size) {
532 DCHECK(size <= kLargeSizeThreshold);
533 if (LIKELY(size <= 512)) {
534 return RoundUp(size, 16);
535 } else if (512 < size && size <= 1 * KB) {
536 return 1 * KB;
537 } else {
538 DCHECK(1 * KB < size && size <= 2 * KB);
539 return 2 * KB;
540 }
541 }
542 // Returns the size bracket index from the byte size with rounding.
543 static size_t SizeToIndex(size_t size) {
544 DCHECK(size <= kLargeSizeThreshold);
545 if (LIKELY(size <= 512)) {
546 return RoundUp(size, 16) / 16 - 1;
547 } else if (512 < size && size <= 1 * KB) {
548 return kNumOfSizeBrackets - 2;
549 } else {
550 DCHECK(1 * KB < size && size <= 2 * KB);
551 return kNumOfSizeBrackets - 1;
552 }
553 }
554 // A combination of SizeToIndex() and RoundToBracketSize().
555 static size_t SizeToIndexAndBracketSize(size_t size, size_t* bracket_size_out) {
556 DCHECK(size <= kLargeSizeThreshold);
557 if (LIKELY(size <= 512)) {
558 size_t bracket_size = RoundUp(size, 16);
559 *bracket_size_out = bracket_size;
560 size_t idx = bracket_size / 16 - 1;
561 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
562 return idx;
563 } else if (512 < size && size <= 1 * KB) {
564 size_t bracket_size = 1024;
565 *bracket_size_out = bracket_size;
566 size_t idx = kNumOfSizeBrackets - 2;
567 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
568 return idx;
569 } else {
570 DCHECK(1 * KB < size && size <= 2 * KB);
571 size_t bracket_size = 2048;
572 *bracket_size_out = bracket_size;
573 size_t idx = kNumOfSizeBrackets - 1;
574 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
575 return idx;
576 }
577 }
578 // Returns the page map index from an address. Requires that the
579 // address is page size aligned.
580 size_t ToPageMapIndex(const void* addr) const {
Andreas Gamped7576322014-10-24 22:13:45 -0700581 DCHECK_LE(base_, addr);
582 DCHECK_LT(addr, base_ + capacity_);
Ian Rogers13735952014-10-08 12:43:28 -0700583 size_t byte_offset = reinterpret_cast<const uint8_t*>(addr) - base_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700584 DCHECK_EQ(byte_offset % static_cast<size_t>(kPageSize), static_cast<size_t>(0));
585 return byte_offset / kPageSize;
586 }
587 // Returns the page map index from an address with rounding.
Andreas Gamped7576322014-10-24 22:13:45 -0700588 size_t RoundDownToPageMapIndex(const void* addr) const {
Ian Rogers13735952014-10-08 12:43:28 -0700589 DCHECK(base_ <= addr && addr < reinterpret_cast<uint8_t*>(base_) + capacity_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700590 return (reinterpret_cast<uintptr_t>(addr) - reinterpret_cast<uintptr_t>(base_)) / kPageSize;
591 }
592
593 // A memory allocation request larger than this size is treated as a large object and allocated
594 // at a page-granularity.
595 static const size_t kLargeSizeThreshold = 2048;
596
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800597 // If true, check that the returned memory is actually zero.
598 static constexpr bool kCheckZeroMemory = kIsDebugBuild;
Andreas Gamped7576322014-10-24 22:13:45 -0700599 // Valgrind protects memory, so do not check memory when running under valgrind. In a normal
600 // build with kCheckZeroMemory the whole test should be optimized away.
601 // TODO: Unprotect before checks.
602 ALWAYS_INLINE bool ShouldCheckZeroMemory();
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800603
604 // If true, log verbose details of operations.
605 static constexpr bool kTraceRosAlloc = false;
606
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700607 struct hash_run {
608 size_t operator()(const RosAlloc::Run* r) const {
609 return reinterpret_cast<size_t>(r);
610 }
611 };
612
613 struct eq_run {
614 bool operator()(const RosAlloc::Run* r1, const RosAlloc::Run* r2) const {
615 return r1 == r2;
616 }
617 };
618
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800619 public:
620 // Different page release modes.
621 enum PageReleaseMode {
622 kPageReleaseModeNone, // Release no empty pages.
623 kPageReleaseModeEnd, // Release empty pages at the end of the space.
624 kPageReleaseModeSize, // Release empty pages that are larger than the threshold.
625 kPageReleaseModeSizeAndEnd, // Release empty pages that are larger than the threshold or
626 // at the end of the space.
627 kPageReleaseModeAll, // Release all empty pages.
628 };
629
630 // The default value for page_release_size_threshold_.
631 static constexpr size_t kDefaultPageReleaseSizeThreshold = 4 * MB;
632
Mathieu Chartier0651d412014-04-29 14:37:57 -0700633 // We use thread-local runs for the size Brackets whose indexes
634 // are less than this index. We use shared (current) runs for the rest.
Hiroshi Yamauchi3f3c6c02014-11-20 14:16:06 -0800635 static const size_t kNumThreadLocalSizeBrackets = 8;
Mathieu Chartier0651d412014-04-29 14:37:57 -0700636
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800637 private:
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700638 // The base address of the memory region that's managed by this allocator.
Ian Rogers13735952014-10-08 12:43:28 -0700639 uint8_t* base_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700640
641 // The footprint in bytes of the currently allocated portion of the
642 // memory region.
643 size_t footprint_;
644
645 // The maximum footprint. The address, base_ + capacity_, indicates
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800646 // the end of the memory region that's currently managed by this allocator.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700647 size_t capacity_;
648
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800649 // The maximum capacity. The address, base_ + max_capacity_, indicates
650 // the end of the memory region that's ever managed by this allocator.
651 size_t max_capacity_;
652
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700653 // The run sets that hold the runs whose slots are not all
654 // full. non_full_runs_[i] is guarded by size_bracket_locks_[i].
Mathieu Chartier58553c72014-09-16 16:25:55 -0700655 AllocationTrackingSet<Run*, kAllocatorTagRosAlloc> non_full_runs_[kNumOfSizeBrackets];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700656 // The run sets that hold the runs whose slots are all full. This is
657 // debug only. full_runs_[i] is guarded by size_bracket_locks_[i].
Mathieu Chartier58553c72014-09-16 16:25:55 -0700658 std::unordered_set<Run*, hash_run, eq_run, TrackingAllocator<Run*, kAllocatorTagRosAlloc>>
659 full_runs_[kNumOfSizeBrackets];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700660 // The set of free pages.
Mathieu Chartier58553c72014-09-16 16:25:55 -0700661 AllocationTrackingSet<FreePageRun*, kAllocatorTagRosAlloc> free_page_runs_ GUARDED_BY(lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700662 // The dedicated full run, it is always full and shared by all threads when revoking happens.
663 // This is an optimization since enables us to avoid a null check for revoked runs.
664 static Run* dedicated_full_run_;
665 // Using size_t to ensure that it is at least word aligned.
666 static size_t dedicated_full_run_storage_[];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700667 // The current runs where the allocations are first attempted for
668 // the size brackes that do not use thread-local
669 // runs. current_runs_[i] is guarded by size_bracket_locks_[i].
670 Run* current_runs_[kNumOfSizeBrackets];
671 // The mutexes, one per size bracket.
672 Mutex* size_bracket_locks_[kNumOfSizeBrackets];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700673 // Bracket lock names (since locks only have char* names).
Zuo Wangf37a88b2014-07-10 04:26:41 -0700674 std::string size_bracket_lock_names_[kNumOfSizeBrackets];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700675 // The types of page map entries.
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700676 enum PageMapKind {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700677 kPageMapReleased = 0, // Zero and released back to the OS.
678 kPageMapEmpty, // Zero but probably dirty.
679 kPageMapRun, // The beginning of a run.
680 kPageMapRunPart, // The non-beginning part of a run.
681 kPageMapLargeObject, // The beginning of a large object.
682 kPageMapLargeObjectPart, // The non-beginning part of a large object.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700683 };
684 // The table that indicates what pages are currently used for.
Ian Rogers13735952014-10-08 12:43:28 -0700685 volatile uint8_t* page_map_; // No GUARDED_BY(lock_) for kReadPageMapEntryWithoutLockInBulkFree.
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800686 size_t page_map_size_;
687 size_t max_page_map_size_;
Ian Rogers700a4022014-05-19 16:49:03 -0700688 std::unique_ptr<MemMap> page_map_mem_map_;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800689
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700690 // The table that indicates the size of free page runs. These sizes
691 // are stored here to avoid storing in the free page header and
692 // release backing pages.
Mathieu Chartier58553c72014-09-16 16:25:55 -0700693 std::vector<size_t, TrackingAllocator<size_t, kAllocatorTagRosAlloc>> free_page_run_size_map_
694 GUARDED_BY(lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700695 // The global lock. Used to guard the page map, the free page set,
696 // and the footprint.
697 Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
698 // The reader-writer lock to allow one bulk free at a time while
Hiroshi Yamauchi70f60042014-02-03 12:31:29 -0800699 // allowing multiple individual frees at the same time. Also, this
700 // is used to avoid race conditions between BulkFree() and
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700701 // RevokeThreadLocalRuns() on the bulk free list.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700702 ReaderWriterMutex bulk_free_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
703
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800704 // The page release mode.
705 const PageReleaseMode page_release_mode_;
706 // Under kPageReleaseModeSize(AndEnd), if the free page run size is
707 // greater than or equal to this value, release pages.
708 const size_t page_release_size_threshold_;
709
Andreas Gamped7576322014-10-24 22:13:45 -0700710 // Whether this allocator is running under Valgrind.
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700711 bool is_running_on_memory_tool_;
Andreas Gamped7576322014-10-24 22:13:45 -0700712
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700713 // The base address of the memory region that's managed by this allocator.
Ian Rogers13735952014-10-08 12:43:28 -0700714 uint8_t* Begin() { return base_; }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700715 // The end address of the memory region that's managed by this allocator.
Ian Rogers13735952014-10-08 12:43:28 -0700716 uint8_t* End() { return base_ + capacity_; }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700717
718 // Page-granularity alloc/free
Ian Rogers13735952014-10-08 12:43:28 -0700719 void* AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type)
Mathieu Chartier90443472015-07-16 20:32:27 -0700720 REQUIRES(lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700721 // Returns how many bytes were freed.
Mathieu Chartier90443472015-07-16 20:32:27 -0700722 size_t FreePages(Thread* self, void* ptr, bool already_zero) REQUIRES(lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700723
724 // Allocate/free a run slot.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700725 void* AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated, size_t* usable_size,
726 size_t* bytes_tl_bulk_allocated)
Mathieu Chartier90443472015-07-16 20:32:27 -0700727 REQUIRES(!lock_);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700728 // Allocate/free a run slot without acquiring locks.
Mathieu Chartier90443472015-07-16 20:32:27 -0700729 // TODO: REQUIRES(Locks::mutator_lock_)
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700730 void* AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated,
731 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
Mathieu Chartier90443472015-07-16 20:32:27 -0700732 REQUIRES(!lock_);
733 void* AllocFromCurrentRunUnlocked(Thread* self, size_t idx) REQUIRES(!lock_);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700734
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700735 // Returns the bracket size.
736 size_t FreeFromRun(Thread* self, void* ptr, Run* run)
Mathieu Chartier90443472015-07-16 20:32:27 -0700737 REQUIRES(!lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700738
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700739 // Used to allocate a new thread local run for a size bracket.
Mathieu Chartier90443472015-07-16 20:32:27 -0700740 Run* AllocRun(Thread* self, size_t idx) REQUIRES(!lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700741
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700742 // Used to acquire a new/reused run for a size bracket. Used when a
743 // thread-local or current run gets full.
Mathieu Chartier90443472015-07-16 20:32:27 -0700744 Run* RefillRun(Thread* self, size_t idx) REQUIRES(!lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700745
746 // The internal of non-bulk Free().
Mathieu Chartier90443472015-07-16 20:32:27 -0700747 size_t FreeInternal(Thread* self, void* ptr) REQUIRES(!lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700748
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800749 // Allocates large objects.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700750 void* AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated,
751 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
Mathieu Chartier90443472015-07-16 20:32:27 -0700752 REQUIRES(!lock_);
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800753
Mathieu Chartier0651d412014-04-29 14:37:57 -0700754 // Revoke a run by adding it to non_full_runs_ or freeing the pages.
Mathieu Chartier90443472015-07-16 20:32:27 -0700755 void RevokeRun(Thread* self, size_t idx, Run* run) REQUIRES(!lock_);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700756
757 // Revoke the current runs which share an index with the thread local runs.
Mathieu Chartier90443472015-07-16 20:32:27 -0700758 void RevokeThreadUnsafeCurrentRuns() REQUIRES(!lock_);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700759
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700760 // Release a range of pages.
Mathieu Chartier90443472015-07-16 20:32:27 -0700761 size_t ReleasePageRange(uint8_t* start, uint8_t* end) REQUIRES(lock_);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700762
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700763 // Dumps the page map for debugging.
Mathieu Chartier90443472015-07-16 20:32:27 -0700764 std::string DumpPageMap() REQUIRES(lock_);
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700765
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700766 public:
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800767 RosAlloc(void* base, size_t capacity, size_t max_capacity,
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800768 PageReleaseMode page_release_mode,
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700769 bool running_on_memory_tool,
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800770 size_t page_release_size_threshold = kDefaultPageReleaseSizeThreshold);
Mathieu Chartier661974a2014-01-09 11:23:53 -0800771 ~RosAlloc();
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700772
Mathieu Chartier0651d412014-04-29 14:37:57 -0700773 // If kThreadUnsafe is true then the allocator may avoid acquiring some locks as an optimization.
774 // If used, this may cause race conditions if multiple threads are allocating at the same time.
775 template<bool kThreadSafe = true>
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700776 void* Alloc(Thread* self, size_t size, size_t* bytes_allocated, size_t* usable_size,
777 size_t* bytes_tl_bulk_allocated)
Mathieu Chartier90443472015-07-16 20:32:27 -0700778 REQUIRES(!lock_);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700779 size_t Free(Thread* self, void* ptr)
Mathieu Chartier90443472015-07-16 20:32:27 -0700780 REQUIRES(!bulk_free_lock_, !lock_);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700781 size_t BulkFree(Thread* self, void** ptrs, size_t num_ptrs)
Mathieu Chartier90443472015-07-16 20:32:27 -0700782 REQUIRES(!bulk_free_lock_, !lock_);
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700783
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700784 // Returns true if the given allocation request can be allocated in
785 // an existing thread local run without allocating a new run.
786 ALWAYS_INLINE bool CanAllocFromThreadLocalRun(Thread* self, size_t size);
787 // Allocate the given allocation request in an existing thread local
788 // run without allocating a new run.
789 ALWAYS_INLINE void* AllocFromThreadLocalRun(Thread* self, size_t size, size_t* bytes_allocated);
790
791 // Returns the maximum bytes that could be allocated for the given
792 // size in bulk, that is the maximum value for the
793 // bytes_allocated_bulk out param returned by RosAlloc::Alloc().
794 ALWAYS_INLINE size_t MaxBytesBulkAllocatedFor(size_t size);
795
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700796 // Returns the size of the allocated slot for a given allocated memory chunk.
Mathieu Chartier90443472015-07-16 20:32:27 -0700797 size_t UsableSize(const void* ptr) REQUIRES(!lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700798 // Returns the size of the allocated slot for a given size.
799 size_t UsableSize(size_t bytes) {
800 if (UNLIKELY(bytes > kLargeSizeThreshold)) {
801 return RoundUp(bytes, kPageSize);
802 } else {
803 return RoundToBracketSize(bytes);
804 }
805 }
806 // Try to reduce the current footprint by releasing the free page
807 // run at the end of the memory region, if any.
Mathieu Chartier90443472015-07-16 20:32:27 -0700808 bool Trim() REQUIRES(!lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700809 // Iterates over all the memory slots and apply the given function.
810 void InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
811 void* arg)
Mathieu Chartier90443472015-07-16 20:32:27 -0700812 REQUIRES(!lock_);
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700813
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -0700814 // Release empty pages.
Mathieu Chartier90443472015-07-16 20:32:27 -0700815 size_t ReleasePages() REQUIRES(!lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700816 // Returns the current footprint.
Mathieu Chartier90443472015-07-16 20:32:27 -0700817 size_t Footprint() REQUIRES(!lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700818 // Returns the current capacity, maximum footprint.
Mathieu Chartier90443472015-07-16 20:32:27 -0700819 size_t FootprintLimit() REQUIRES(!lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700820 // Update the current capacity.
Mathieu Chartier90443472015-07-16 20:32:27 -0700821 void SetFootprintLimit(size_t bytes) REQUIRES(!lock_);
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700822
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700823 // Releases the thread-local runs assigned to the given thread back to the common set of runs.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700824 // Returns the total bytes of free slots in the revoked thread local runs. This is to be
825 // subtracted from Heap::num_bytes_allocated_ to cancel out the ahead-of-time counting.
Mathieu Chartier90443472015-07-16 20:32:27 -0700826 size_t RevokeThreadLocalRuns(Thread* thread) REQUIRES(!lock_, !bulk_free_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700827 // Releases the thread-local runs assigned to all the threads back to the common set of runs.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700828 // Returns the total bytes of free slots in the revoked thread local runs. This is to be
829 // subtracted from Heap::num_bytes_allocated_ to cancel out the ahead-of-time counting.
Mathieu Chartier90443472015-07-16 20:32:27 -0700830 size_t RevokeAllThreadLocalRuns() REQUIRES(!Locks::thread_list_lock_, !lock_, !bulk_free_lock_);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -0700831 // Assert the thread local runs of a thread are revoked.
Mathieu Chartier90443472015-07-16 20:32:27 -0700832 void AssertThreadLocalRunsAreRevoked(Thread* thread) REQUIRES(!bulk_free_lock_);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -0700833 // Assert all the thread local runs are revoked.
Mathieu Chartier90443472015-07-16 20:32:27 -0700834 void AssertAllThreadLocalRunsAreRevoked() REQUIRES(!Locks::thread_list_lock_, !bulk_free_lock_);
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700835
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700836 static Run* GetDedicatedFullRun() {
837 return dedicated_full_run_;
838 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700839 bool IsFreePage(size_t idx) const {
840 DCHECK_LT(idx, capacity_ / kPageSize);
Ian Rogers13735952014-10-08 12:43:28 -0700841 uint8_t pm_type = page_map_[idx];
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700842 return pm_type == kPageMapReleased || pm_type == kPageMapEmpty;
843 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700844
845 // Callbacks for InspectAll that will count the number of bytes
846 // allocated and objects allocated, respectively.
847 static void BytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg);
848 static void ObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800849
850 bool DoesReleaseAllPages() const {
851 return page_release_mode_ == kPageReleaseModeAll;
852 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800853
854 // Verify for debugging.
Mathieu Chartier90443472015-07-16 20:32:27 -0700855 void Verify() REQUIRES(Locks::mutator_lock_, !Locks::thread_list_lock_, !bulk_free_lock_,
856 !lock_);
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -0700857
Mathieu Chartier90443472015-07-16 20:32:27 -0700858 void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes)
859 REQUIRES(!bulk_free_lock_, !lock_);
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700860
861 private:
862 friend std::ostream& operator<<(std::ostream& os, const RosAlloc::PageMapKind& rhs);
863
864 DISALLOW_COPY_AND_ASSIGN(RosAlloc);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700865};
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700866std::ostream& operator<<(std::ostream& os, const RosAlloc::PageMapKind& rhs);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700867
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800868// Callback from rosalloc when it needs to increase the footprint. Must be implemented somewhere
869// else (currently rosalloc_space.cc).
870void* ArtRosAllocMoreCore(allocator::RosAlloc* rosalloc, intptr_t increment);
871
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700872} // namespace allocator
873} // namespace gc
874} // namespace art
875
876#endif // ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_