blob: 4eb133159fce66545305f8b7bb8c01f1ec8baec0 [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_
18#define ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_
19
20#include <set>
21#include <stdint.h>
22#include <stdlib.h>
23#include <string>
24#include <sys/mman.h>
25#include <vector>
26
27#include "base/mutex.h"
28#include "base/logging.h"
29#include "globals.h"
30#include "utils.h"
31
32// A boilerplate to use hash_map/hash_set both on host and device.
33#ifdef HAVE_ANDROID_OS
34#include <hash_map>
35#include <hash_set>
36using std::hash_map;
37using std::hash_set;
38#else // HAVE_ANDROID_OS
39#ifdef __DEPRECATED
40#define ROSALLOC_OLD__DEPRECATED __DEPRECATED
41#undef __DEPRECATED
42#endif
43#include <ext/hash_map>
44#include <ext/hash_set>
45#ifdef ROSALLOC_OLD__DEPRECATED
46#define __DEPRECATED ROSALLOC_OLD__DEPRECATED
47#undef ROSALLOC_OLD__DEPRECATED
48#endif
49using __gnu_cxx::hash_map;
50using __gnu_cxx::hash_set;
51#endif // HAVE_ANDROID_OS
52
53namespace art {
54namespace gc {
55namespace allocator {
56
57// A Runs-of-slots memory allocator.
58class RosAlloc {
59 private:
60 // Rerepresents a run of free pages.
61 class FreePageRun {
62 public:
63 byte magic_num_; // The magic number used for debugging only.
64
65 bool IsFree() const {
66 if (kIsDebugBuild) {
67 return magic_num_ == kMagicNumFree;
68 }
69 return true;
70 }
71 size_t ByteSize(RosAlloc* rosalloc) const EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
72 const byte* fpr_base = reinterpret_cast<const byte*>(this);
73 size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
74 size_t byte_size = rosalloc->free_page_run_size_map_[pm_idx];
75 DCHECK_GE(byte_size, static_cast<size_t>(0));
76 DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
77 return byte_size;
78 }
79 void SetByteSize(RosAlloc* rosalloc, size_t byte_size)
80 EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
81 DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
82 byte* fpr_base = reinterpret_cast<byte*>(this);
83 size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
84 rosalloc->free_page_run_size_map_[pm_idx] = byte_size;
85 }
86 void* Begin() {
87 return reinterpret_cast<void*>(this);
88 }
89 void* End(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
90 byte* fpr_base = reinterpret_cast<byte*>(this);
91 byte* end = fpr_base + ByteSize(rosalloc);
92 return end;
93 }
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080094 bool IsLargerThanPageReleaseThreshold(RosAlloc* rosalloc)
95 EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
96 return ByteSize(rosalloc) >= rosalloc->page_release_size_threshold_;
97 }
98 bool IsAtEndOfSpace(RosAlloc* rosalloc)
99 EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
100 return reinterpret_cast<byte*>(this) + ByteSize(rosalloc) == rosalloc->base_ + rosalloc->footprint_;
101 }
102 bool ShouldReleasePages(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
103 switch (rosalloc->page_release_mode_) {
104 case kPageReleaseModeNone:
105 return false;
106 case kPageReleaseModeEnd:
107 return IsAtEndOfSpace(rosalloc);
108 case kPageReleaseModeSize:
109 return IsLargerThanPageReleaseThreshold(rosalloc);
110 case kPageReleaseModeSizeAndEnd:
111 return IsLargerThanPageReleaseThreshold(rosalloc) && IsAtEndOfSpace(rosalloc);
112 case kPageReleaseModeAll:
113 return true;
114 default:
115 LOG(FATAL) << "Unexpected page release mode ";
116 return false;
117 }
118 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700119 void ReleasePages(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800120 byte* start = reinterpret_cast<byte*>(this);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700121 size_t byte_size = ByteSize(rosalloc);
122 DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800123 bool release_pages = ShouldReleasePages(rosalloc);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700124 if (kIsDebugBuild) {
125 // Exclude the first page that stores the magic number.
126 DCHECK_GE(byte_size, static_cast<size_t>(kPageSize));
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800127 start += kPageSize;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700128 byte_size -= kPageSize;
129 if (byte_size > 0) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800130 if (release_pages) {
131 madvise(start, byte_size, MADV_DONTNEED);
132 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700133 }
134 } else {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800135 if (release_pages) {
136 madvise(start, byte_size, MADV_DONTNEED);
137 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700138 }
139 }
140 };
141
142 // Represents a run of memory slots of the same size.
143 //
144 // A run's memory layout:
145 //
146 // +-------------------+
147 // | magic_num |
148 // +-------------------+
149 // | size_bracket_idx |
150 // +-------------------+
151 // | is_thread_local |
152 // +-------------------+
153 // | to_be_bulk_freed |
154 // +-------------------+
155 // | top_slot_idx |
156 // +-------------------+
157 // | |
158 // | alloc bit map |
159 // | |
160 // +-------------------+
161 // | |
162 // | bulk free bit map |
163 // | |
164 // +-------------------+
165 // | |
166 // | thread-local free |
167 // | bit map |
168 // | |
169 // +-------------------+
170 // | padding due to |
171 // | alignment |
172 // +-------------------+
173 // | slot 0 |
174 // +-------------------+
175 // | slot 1 |
176 // +-------------------+
177 // | slot 2 |
178 // +-------------------+
179 // ...
180 // +-------------------+
181 // | last slot |
182 // +-------------------+
183 //
184 class Run {
185 public:
Hiroshi Yamauchie5eedcb2013-11-18 11:55:39 -0800186 byte magic_num_; // The magic number used for debugging.
187 byte size_bracket_idx_; // The index of the size bracket of this run.
188 byte is_thread_local_; // True if this run is used as a thread-local run.
189 byte to_be_bulk_freed_; // Used within BulkFree() to flag a run that's involved with a bulk free.
190 uint32_t top_slot_idx_; // The top slot index when this run is in bump index mode.
191 uint32_t alloc_bit_map_[0]; // The bit map that allocates if each slot is in use.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700192
193 // bulk_free_bit_map_[] : The bit map that is used for GC to
194 // temporarily mark the slots to free without using a lock. After
195 // all the slots to be freed in a run are marked, all those slots
196 // get freed in bulk with one locking per run, as opposed to one
197 // locking per slot to minimize the lock contention. This is used
198 // within BulkFree().
199
200 // thread_local_free_bit_map_[] : The bit map that is used for GC
201 // to temporarily mark the slots to free in a thread-local run
202 // without using a lock (without synchronizing the thread that
203 // owns the thread-local run.) When the thread-local run becomes
204 // full, the thread will check this bit map and update the
205 // allocation bit map of the run (that is, the slots get freed.)
206
207 // Returns the byte size of the header except for the bit maps.
208 static size_t fixed_header_size() {
209 Run temp;
210 size_t size = reinterpret_cast<byte*>(&temp.alloc_bit_map_) - reinterpret_cast<byte*>(&temp);
211 DCHECK_EQ(size, static_cast<size_t>(8));
212 return size;
213 }
214 // Returns the base address of the free bit map.
215 uint32_t* bulk_free_bit_map() {
216 return reinterpret_cast<uint32_t*>(reinterpret_cast<byte*>(this) + bulkFreeBitMapOffsets[size_bracket_idx_]);
217 }
218 // Returns the base address of the thread local free bit map.
219 uint32_t* thread_local_free_bit_map() {
220 return reinterpret_cast<uint32_t*>(reinterpret_cast<byte*>(this) + threadLocalFreeBitMapOffsets[size_bracket_idx_]);
221 }
222 void* End() {
223 return reinterpret_cast<byte*>(this) + kPageSize * numOfPages[size_bracket_idx_];
224 }
225 // Frees slots in the allocation bit map with regard to the
226 // thread-local free bit map. Used when a thread-local run becomes
227 // full.
228 bool MergeThreadLocalFreeBitMapToAllocBitMap(bool* is_all_free_after_out);
229 // Frees slots in the allocation bit map with regard to the bulk
230 // free bit map. Used in a bulk free.
231 void MergeBulkFreeBitMapIntoAllocBitMap();
232 // Unions the slots to be freed in the free bit map into the
233 // thread-local free bit map. In a bulk free, as a two-step
234 // process, GC will first record all the slots to free in a run in
235 // the free bit map where it can write without a lock, and later
236 // acquire a lock once per run to union the bits of the free bit
237 // map to the thread-local free bit map.
238 void UnionBulkFreeBitMapToThreadLocalFreeBitMap();
239 // Allocates a slot in a run.
240 void* AllocSlot();
241 // Frees a slot in a run. This is used in a non-bulk free.
242 void FreeSlot(void* ptr);
243 // Marks the slots to free in the bulk free bit map.
244 void MarkBulkFreeBitMap(void* ptr);
245 // Marks the slots to free in the thread-local free bit map.
246 void MarkThreadLocalFreeBitMap(void* ptr);
247 // Returns true if all the slots in the run are not in use.
248 bool IsAllFree();
249 // Returns true if all the slots in the run are in use.
250 bool IsFull();
251 // Clear all the bit maps.
252 void ClearBitMaps();
253 // Iterate over all the slots and apply the given function.
254 void InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg), void* arg);
255 // Dump the run metadata for debugging.
256 void Dump();
257
258 private:
259 // The common part of MarkFreeBitMap() and MarkThreadLocalFreeBitMap().
260 void MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base, const char* caller_name);
261 };
262
263 // The magic number for a run.
264 static const byte kMagicNum = 42;
265 // The magic number for free pages.
266 static const byte kMagicNumFree = 43;
267 // The number of size brackets. Sync this with the length of Thread::rosalloc_runs_.
268 static const size_t kNumOfSizeBrackets = 34;
269 // The number of smaller size brackets that are 16 bytes apart.
270 static const size_t kNumOfQuantumSizeBrackets = 32;
271 // The sizes (the slot sizes, in bytes) of the size brackets.
272 static size_t bracketSizes[kNumOfSizeBrackets];
273 // The numbers of pages that are used for runs for each size bracket.
274 static size_t numOfPages[kNumOfSizeBrackets];
275 // The numbers of slots of the runs for each size bracket.
276 static size_t numOfSlots[kNumOfSizeBrackets];
277 // The header sizes in bytes of the runs for each size bracket.
278 static size_t headerSizes[kNumOfSizeBrackets];
279 // The byte offsets of the bulk free bit maps of the runs for each size bracket.
280 static size_t bulkFreeBitMapOffsets[kNumOfSizeBrackets];
281 // The byte offsets of the thread-local free bit maps of the runs for each size bracket.
282 static size_t threadLocalFreeBitMapOffsets[kNumOfSizeBrackets];
283
284 // Initialize the run specs (the above arrays).
285 static void Initialize();
286 static bool initialized_;
287
288 // Returns the byte size of the bracket size from the index.
289 static size_t IndexToBracketSize(size_t idx) {
290 DCHECK(idx < kNumOfSizeBrackets);
291 return bracketSizes[idx];
292 }
293 // Returns the index of the size bracket from the bracket size.
294 static size_t BracketSizeToIndex(size_t size) {
295 DCHECK(16 <= size && ((size < 1 * KB && size % 16 == 0) || size == 1 * KB || size == 2 * KB));
296 size_t idx;
297 if (UNLIKELY(size == 1 * KB)) {
298 idx = kNumOfSizeBrackets - 2;
299 } else if (UNLIKELY(size == 2 * KB)) {
300 idx = kNumOfSizeBrackets - 1;
301 } else {
302 DCHECK(size < 1 * KB);
303 DCHECK_EQ(size % 16, static_cast<size_t>(0));
304 idx = size / 16 - 1;
305 }
306 DCHECK(bracketSizes[idx] == size);
307 return idx;
308 }
309 // Rounds up the size up the nearest bracket size.
310 static size_t RoundToBracketSize(size_t size) {
311 DCHECK(size <= kLargeSizeThreshold);
312 if (LIKELY(size <= 512)) {
313 return RoundUp(size, 16);
314 } else if (512 < size && size <= 1 * KB) {
315 return 1 * KB;
316 } else {
317 DCHECK(1 * KB < size && size <= 2 * KB);
318 return 2 * KB;
319 }
320 }
321 // Returns the size bracket index from the byte size with rounding.
322 static size_t SizeToIndex(size_t size) {
323 DCHECK(size <= kLargeSizeThreshold);
324 if (LIKELY(size <= 512)) {
325 return RoundUp(size, 16) / 16 - 1;
326 } else if (512 < size && size <= 1 * KB) {
327 return kNumOfSizeBrackets - 2;
328 } else {
329 DCHECK(1 * KB < size && size <= 2 * KB);
330 return kNumOfSizeBrackets - 1;
331 }
332 }
333 // A combination of SizeToIndex() and RoundToBracketSize().
334 static size_t SizeToIndexAndBracketSize(size_t size, size_t* bracket_size_out) {
335 DCHECK(size <= kLargeSizeThreshold);
336 if (LIKELY(size <= 512)) {
337 size_t bracket_size = RoundUp(size, 16);
338 *bracket_size_out = bracket_size;
339 size_t idx = bracket_size / 16 - 1;
340 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
341 return idx;
342 } else if (512 < size && size <= 1 * KB) {
343 size_t bracket_size = 1024;
344 *bracket_size_out = bracket_size;
345 size_t idx = kNumOfSizeBrackets - 2;
346 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
347 return idx;
348 } else {
349 DCHECK(1 * KB < size && size <= 2 * KB);
350 size_t bracket_size = 2048;
351 *bracket_size_out = bracket_size;
352 size_t idx = kNumOfSizeBrackets - 1;
353 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
354 return idx;
355 }
356 }
357 // Returns the page map index from an address. Requires that the
358 // address is page size aligned.
359 size_t ToPageMapIndex(const void* addr) const {
360 DCHECK(base_ <= addr && addr < base_ + capacity_);
361 size_t byte_offset = reinterpret_cast<const byte*>(addr) - base_;
362 DCHECK_EQ(byte_offset % static_cast<size_t>(kPageSize), static_cast<size_t>(0));
363 return byte_offset / kPageSize;
364 }
365 // Returns the page map index from an address with rounding.
366 size_t RoundDownToPageMapIndex(void* addr) {
367 DCHECK(base_ <= addr && addr < reinterpret_cast<byte*>(base_) + capacity_);
368 return (reinterpret_cast<uintptr_t>(addr) - reinterpret_cast<uintptr_t>(base_)) / kPageSize;
369 }
370
371 // A memory allocation request larger than this size is treated as a large object and allocated
372 // at a page-granularity.
373 static const size_t kLargeSizeThreshold = 2048;
374
375 // We use use thread-local runs for the size Brackets whose indexes
376 // are less than or equal to this index. We use shared (current)
377 // runs for the rest.
378 static const size_t kMaxThreadLocalSizeBracketIdx = 10;
379
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800380 // If true, check that the returned memory is actually zero.
381 static constexpr bool kCheckZeroMemory = kIsDebugBuild;
382
383 // If true, log verbose details of operations.
384 static constexpr bool kTraceRosAlloc = false;
385
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700386 struct hash_run {
387 size_t operator()(const RosAlloc::Run* r) const {
388 return reinterpret_cast<size_t>(r);
389 }
390 };
391
392 struct eq_run {
393 bool operator()(const RosAlloc::Run* r1, const RosAlloc::Run* r2) const {
394 return r1 == r2;
395 }
396 };
397
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800398 public:
399 // Different page release modes.
400 enum PageReleaseMode {
401 kPageReleaseModeNone, // Release no empty pages.
402 kPageReleaseModeEnd, // Release empty pages at the end of the space.
403 kPageReleaseModeSize, // Release empty pages that are larger than the threshold.
404 kPageReleaseModeSizeAndEnd, // Release empty pages that are larger than the threshold or
405 // at the end of the space.
406 kPageReleaseModeAll, // Release all empty pages.
407 };
408
409 // The default value for page_release_size_threshold_.
410 static constexpr size_t kDefaultPageReleaseSizeThreshold = 4 * MB;
411
412 private:
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700413 // The base address of the memory region that's managed by this allocator.
414 byte* base_;
415
416 // The footprint in bytes of the currently allocated portion of the
417 // memory region.
418 size_t footprint_;
419
420 // The maximum footprint. The address, base_ + capacity_, indicates
421 // the end of the memory region that's managed by this allocator.
422 size_t capacity_;
423
424 // The run sets that hold the runs whose slots are not all
425 // full. non_full_runs_[i] is guarded by size_bracket_locks_[i].
426 std::set<Run*> non_full_runs_[kNumOfSizeBrackets];
427 // The run sets that hold the runs whose slots are all full. This is
428 // debug only. full_runs_[i] is guarded by size_bracket_locks_[i].
429 hash_set<Run*, hash_run, eq_run> full_runs_[kNumOfSizeBrackets];
430 // The set of free pages.
431 std::set<FreePageRun*> free_page_runs_ GUARDED_BY(lock_);
432 // The free page run whose end address is the end of the memory
433 // region that's managed by this allocator, if any.
434 FreePageRun* last_free_page_run_;
435 // The current runs where the allocations are first attempted for
436 // the size brackes that do not use thread-local
437 // runs. current_runs_[i] is guarded by size_bracket_locks_[i].
438 Run* current_runs_[kNumOfSizeBrackets];
439 // The mutexes, one per size bracket.
440 Mutex* size_bracket_locks_[kNumOfSizeBrackets];
441 // The types of page map entries.
442 enum {
443 kPageMapEmpty = 0, // Not allocated.
444 kPageMapRun = 1, // The beginning of a run.
445 kPageMapRunPart = 2, // The non-beginning part of a run.
446 kPageMapLargeObject = 3, // The beginning of a large object.
447 kPageMapLargeObjectPart = 4, // The non-beginning part of a large object.
448 };
449 // The table that indicates what pages are currently used for.
450 std::vector<byte> page_map_ GUARDED_BY(lock_);
451 // The table that indicates the size of free page runs. These sizes
452 // are stored here to avoid storing in the free page header and
453 // release backing pages.
454 std::vector<size_t> free_page_run_size_map_ GUARDED_BY(lock_);
455 // The global lock. Used to guard the page map, the free page set,
456 // and the footprint.
457 Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
458 // The reader-writer lock to allow one bulk free at a time while
459 // allowing multiple individual frees at the same time.
460 ReaderWriterMutex bulk_free_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
461
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800462 // The page release mode.
463 const PageReleaseMode page_release_mode_;
464 // Under kPageReleaseModeSize(AndEnd), if the free page run size is
465 // greater than or equal to this value, release pages.
466 const size_t page_release_size_threshold_;
467
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700468 // The base address of the memory region that's managed by this allocator.
469 byte* Begin() { return base_; }
470 // The end address of the memory region that's managed by this allocator.
471 byte* End() { return base_ + capacity_; }
472
473 // Page-granularity alloc/free
474 void* AllocPages(Thread* self, size_t num_pages, byte page_map_type)
475 EXCLUSIVE_LOCKS_REQUIRED(lock_);
476 void FreePages(Thread* self, void* ptr) EXCLUSIVE_LOCKS_REQUIRED(lock_);
477
478 // Allocate/free a run slot.
479 void* AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated)
480 LOCKS_EXCLUDED(lock_);
481 void FreeFromRun(Thread* self, void* ptr, Run* run)
482 LOCKS_EXCLUDED(lock_);
483
484 // Used to acquire a new/reused run for a size bracket. Used when a
485 // thread-local or current run gets full.
486 Run* RefillRun(Thread* self, size_t idx) LOCKS_EXCLUDED(lock_);
487
488 // The internal of non-bulk Free().
489 void FreeInternal(Thread* self, void* ptr) LOCKS_EXCLUDED(lock_);
490
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800491 // Allocates large objects.
492 void* AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) LOCKS_EXCLUDED(lock_);
493
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700494 public:
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800495 RosAlloc(void* base, size_t capacity,
496 PageReleaseMode page_release_mode,
497 size_t page_release_size_threshold = kDefaultPageReleaseSizeThreshold);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700498 void* Alloc(Thread* self, size_t size, size_t* bytes_allocated)
499 LOCKS_EXCLUDED(lock_);
500 void Free(Thread* self, void* ptr)
501 LOCKS_EXCLUDED(bulk_free_lock_);
502 void BulkFree(Thread* self, void** ptrs, size_t num_ptrs)
503 LOCKS_EXCLUDED(bulk_free_lock_);
504 // Returns the size of the allocated slot for a given allocated memory chunk.
505 size_t UsableSize(void* ptr);
506 // Returns the size of the allocated slot for a given size.
507 size_t UsableSize(size_t bytes) {
508 if (UNLIKELY(bytes > kLargeSizeThreshold)) {
509 return RoundUp(bytes, kPageSize);
510 } else {
511 return RoundToBracketSize(bytes);
512 }
513 }
514 // Try to reduce the current footprint by releasing the free page
515 // run at the end of the memory region, if any.
516 bool Trim();
517 // Iterates over all the memory slots and apply the given function.
518 void InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
519 void* arg)
520 LOCKS_EXCLUDED(lock_);
521 // Returns the current footprint.
522 size_t Footprint() LOCKS_EXCLUDED(lock_);
523 // Returns the current capacity, maximum footprint.
524 size_t FootprintLimit() LOCKS_EXCLUDED(lock_);
525 // Update the current capacity.
526 void SetFootprintLimit(size_t bytes) LOCKS_EXCLUDED(lock_);
527 // Releases the thread-local runs assigned to the given thread back to the common set of runs.
528 void RevokeThreadLocalRuns(Thread* thread);
529 // Releases the thread-local runs assigned to all the threads back to the common set of runs.
530 void RevokeAllThreadLocalRuns() LOCKS_EXCLUDED(Locks::thread_list_lock_);
531 // Dumps the page map for debugging.
532 void DumpPageMap(Thread* self);
533
534 // Callbacks for InspectAll that will count the number of bytes
535 // allocated and objects allocated, respectively.
536 static void BytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg);
537 static void ObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800538
539 bool DoesReleaseAllPages() const {
540 return page_release_mode_ == kPageReleaseModeAll;
541 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700542};
543
544} // namespace allocator
545} // namespace gc
546} // namespace art
547
548#endif // ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_