blob: 5b4ca80b0dcf6c1a2a1e26d4434abf17dc0ebb7e [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_
18#define ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_
19
20#include <set>
21#include <stdint.h>
22#include <stdlib.h>
23#include <string>
24#include <sys/mman.h>
25#include <vector>
26
27#include "base/mutex.h"
28#include "base/logging.h"
29#include "globals.h"
30#include "utils.h"
31
32// A boilerplate to use hash_map/hash_set both on host and device.
33#ifdef HAVE_ANDROID_OS
34#include <hash_map>
35#include <hash_set>
36using std::hash_map;
37using std::hash_set;
38#else // HAVE_ANDROID_OS
39#ifdef __DEPRECATED
40#define ROSALLOC_OLD__DEPRECATED __DEPRECATED
41#undef __DEPRECATED
42#endif
43#include <ext/hash_map>
44#include <ext/hash_set>
45#ifdef ROSALLOC_OLD__DEPRECATED
46#define __DEPRECATED ROSALLOC_OLD__DEPRECATED
47#undef ROSALLOC_OLD__DEPRECATED
48#endif
49using __gnu_cxx::hash_map;
50using __gnu_cxx::hash_set;
51#endif // HAVE_ANDROID_OS
52
53namespace art {
54namespace gc {
55namespace allocator {
56
Ian Rogers6fac4472014-02-25 17:01:10 -080057// A runs-of-slots memory allocator.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070058class RosAlloc {
59 private:
Ian Rogers6fac4472014-02-25 17:01:10 -080060 // Represents a run of free pages.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070061 class FreePageRun {
62 public:
63 byte magic_num_; // The magic number used for debugging only.
64
65 bool IsFree() const {
66 if (kIsDebugBuild) {
67 return magic_num_ == kMagicNumFree;
68 }
69 return true;
70 }
71 size_t ByteSize(RosAlloc* rosalloc) const EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
72 const byte* fpr_base = reinterpret_cast<const byte*>(this);
73 size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
74 size_t byte_size = rosalloc->free_page_run_size_map_[pm_idx];
75 DCHECK_GE(byte_size, static_cast<size_t>(0));
76 DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
77 return byte_size;
78 }
79 void SetByteSize(RosAlloc* rosalloc, size_t byte_size)
80 EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
81 DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
82 byte* fpr_base = reinterpret_cast<byte*>(this);
83 size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
84 rosalloc->free_page_run_size_map_[pm_idx] = byte_size;
85 }
86 void* Begin() {
87 return reinterpret_cast<void*>(this);
88 }
89 void* End(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
90 byte* fpr_base = reinterpret_cast<byte*>(this);
91 byte* end = fpr_base + ByteSize(rosalloc);
92 return end;
93 }
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080094 bool IsLargerThanPageReleaseThreshold(RosAlloc* rosalloc)
95 EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
96 return ByteSize(rosalloc) >= rosalloc->page_release_size_threshold_;
97 }
98 bool IsAtEndOfSpace(RosAlloc* rosalloc)
99 EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
100 return reinterpret_cast<byte*>(this) + ByteSize(rosalloc) == rosalloc->base_ + rosalloc->footprint_;
101 }
102 bool ShouldReleasePages(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
103 switch (rosalloc->page_release_mode_) {
104 case kPageReleaseModeNone:
105 return false;
106 case kPageReleaseModeEnd:
107 return IsAtEndOfSpace(rosalloc);
108 case kPageReleaseModeSize:
109 return IsLargerThanPageReleaseThreshold(rosalloc);
110 case kPageReleaseModeSizeAndEnd:
111 return IsLargerThanPageReleaseThreshold(rosalloc) && IsAtEndOfSpace(rosalloc);
112 case kPageReleaseModeAll:
113 return true;
114 default:
115 LOG(FATAL) << "Unexpected page release mode ";
116 return false;
117 }
118 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700119 void ReleasePages(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800120 byte* start = reinterpret_cast<byte*>(this);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700121 size_t byte_size = ByteSize(rosalloc);
122 DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800123 bool release_pages = ShouldReleasePages(rosalloc);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700124 if (kIsDebugBuild) {
125 // Exclude the first page that stores the magic number.
126 DCHECK_GE(byte_size, static_cast<size_t>(kPageSize));
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800127 start += kPageSize;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700128 byte_size -= kPageSize;
129 if (byte_size > 0) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800130 if (release_pages) {
131 madvise(start, byte_size, MADV_DONTNEED);
132 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700133 }
134 } else {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800135 if (release_pages) {
136 madvise(start, byte_size, MADV_DONTNEED);
137 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700138 }
139 }
140 };
141
142 // Represents a run of memory slots of the same size.
143 //
144 // A run's memory layout:
145 //
146 // +-------------------+
147 // | magic_num |
148 // +-------------------+
149 // | size_bracket_idx |
150 // +-------------------+
151 // | is_thread_local |
152 // +-------------------+
153 // | to_be_bulk_freed |
154 // +-------------------+
155 // | top_slot_idx |
156 // +-------------------+
157 // | |
158 // | alloc bit map |
159 // | |
160 // +-------------------+
161 // | |
162 // | bulk free bit map |
163 // | |
164 // +-------------------+
165 // | |
166 // | thread-local free |
167 // | bit map |
168 // | |
169 // +-------------------+
170 // | padding due to |
171 // | alignment |
172 // +-------------------+
173 // | slot 0 |
174 // +-------------------+
175 // | slot 1 |
176 // +-------------------+
177 // | slot 2 |
178 // +-------------------+
179 // ...
180 // +-------------------+
181 // | last slot |
182 // +-------------------+
183 //
184 class Run {
185 public:
Hiroshi Yamauchie5eedcb2013-11-18 11:55:39 -0800186 byte magic_num_; // The magic number used for debugging.
187 byte size_bracket_idx_; // The index of the size bracket of this run.
188 byte is_thread_local_; // True if this run is used as a thread-local run.
189 byte to_be_bulk_freed_; // Used within BulkFree() to flag a run that's involved with a bulk free.
190 uint32_t top_slot_idx_; // The top slot index when this run is in bump index mode.
191 uint32_t alloc_bit_map_[0]; // The bit map that allocates if each slot is in use.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700192
193 // bulk_free_bit_map_[] : The bit map that is used for GC to
194 // temporarily mark the slots to free without using a lock. After
195 // all the slots to be freed in a run are marked, all those slots
196 // get freed in bulk with one locking per run, as opposed to one
197 // locking per slot to minimize the lock contention. This is used
198 // within BulkFree().
199
200 // thread_local_free_bit_map_[] : The bit map that is used for GC
201 // to temporarily mark the slots to free in a thread-local run
202 // without using a lock (without synchronizing the thread that
203 // owns the thread-local run.) When the thread-local run becomes
204 // full, the thread will check this bit map and update the
205 // allocation bit map of the run (that is, the slots get freed.)
206
207 // Returns the byte size of the header except for the bit maps.
208 static size_t fixed_header_size() {
209 Run temp;
210 size_t size = reinterpret_cast<byte*>(&temp.alloc_bit_map_) - reinterpret_cast<byte*>(&temp);
211 DCHECK_EQ(size, static_cast<size_t>(8));
212 return size;
213 }
214 // Returns the base address of the free bit map.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800215 uint32_t* BulkFreeBitMap() {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700216 return reinterpret_cast<uint32_t*>(reinterpret_cast<byte*>(this) + bulkFreeBitMapOffsets[size_bracket_idx_]);
217 }
218 // Returns the base address of the thread local free bit map.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800219 uint32_t* ThreadLocalFreeBitMap() {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700220 return reinterpret_cast<uint32_t*>(reinterpret_cast<byte*>(this) + threadLocalFreeBitMapOffsets[size_bracket_idx_]);
221 }
222 void* End() {
223 return reinterpret_cast<byte*>(this) + kPageSize * numOfPages[size_bracket_idx_];
224 }
225 // Frees slots in the allocation bit map with regard to the
226 // thread-local free bit map. Used when a thread-local run becomes
227 // full.
228 bool MergeThreadLocalFreeBitMapToAllocBitMap(bool* is_all_free_after_out);
229 // Frees slots in the allocation bit map with regard to the bulk
230 // free bit map. Used in a bulk free.
231 void MergeBulkFreeBitMapIntoAllocBitMap();
232 // Unions the slots to be freed in the free bit map into the
233 // thread-local free bit map. In a bulk free, as a two-step
234 // process, GC will first record all the slots to free in a run in
235 // the free bit map where it can write without a lock, and later
236 // acquire a lock once per run to union the bits of the free bit
237 // map to the thread-local free bit map.
238 void UnionBulkFreeBitMapToThreadLocalFreeBitMap();
239 // Allocates a slot in a run.
240 void* AllocSlot();
241 // Frees a slot in a run. This is used in a non-bulk free.
242 void FreeSlot(void* ptr);
243 // Marks the slots to free in the bulk free bit map.
244 void MarkBulkFreeBitMap(void* ptr);
245 // Marks the slots to free in the thread-local free bit map.
246 void MarkThreadLocalFreeBitMap(void* ptr);
247 // Returns true if all the slots in the run are not in use.
248 bool IsAllFree();
249 // Returns true if all the slots in the run are in use.
250 bool IsFull();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800251 // Returns true if the bulk free bit map is clean.
252 bool IsBulkFreeBitmapClean();
253 // Returns true if the thread local free bit map is clean.
254 bool IsThreadLocalFreeBitmapClean();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700255 // Clear all the bit maps.
256 void ClearBitMaps();
257 // Iterate over all the slots and apply the given function.
258 void InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg), void* arg);
259 // Dump the run metadata for debugging.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800260 std::string Dump();
261 // Verify for debugging.
262 void Verify(Thread* self, RosAlloc* rosalloc)
263 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
264 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700265
266 private:
267 // The common part of MarkFreeBitMap() and MarkThreadLocalFreeBitMap().
268 void MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base, const char* caller_name);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800269 // Turns the bit map into a string for debugging.
270 static std::string BitMapToStr(uint32_t* bit_map_base, size_t num_vec);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700271 };
272
273 // The magic number for a run.
274 static const byte kMagicNum = 42;
275 // The magic number for free pages.
276 static const byte kMagicNumFree = 43;
277 // The number of size brackets. Sync this with the length of Thread::rosalloc_runs_.
278 static const size_t kNumOfSizeBrackets = 34;
279 // The number of smaller size brackets that are 16 bytes apart.
280 static const size_t kNumOfQuantumSizeBrackets = 32;
281 // The sizes (the slot sizes, in bytes) of the size brackets.
282 static size_t bracketSizes[kNumOfSizeBrackets];
283 // The numbers of pages that are used for runs for each size bracket.
284 static size_t numOfPages[kNumOfSizeBrackets];
285 // The numbers of slots of the runs for each size bracket.
286 static size_t numOfSlots[kNumOfSizeBrackets];
287 // The header sizes in bytes of the runs for each size bracket.
288 static size_t headerSizes[kNumOfSizeBrackets];
289 // The byte offsets of the bulk free bit maps of the runs for each size bracket.
290 static size_t bulkFreeBitMapOffsets[kNumOfSizeBrackets];
291 // The byte offsets of the thread-local free bit maps of the runs for each size bracket.
292 static size_t threadLocalFreeBitMapOffsets[kNumOfSizeBrackets];
293
294 // Initialize the run specs (the above arrays).
295 static void Initialize();
296 static bool initialized_;
297
298 // Returns the byte size of the bracket size from the index.
299 static size_t IndexToBracketSize(size_t idx) {
300 DCHECK(idx < kNumOfSizeBrackets);
301 return bracketSizes[idx];
302 }
303 // Returns the index of the size bracket from the bracket size.
304 static size_t BracketSizeToIndex(size_t size) {
305 DCHECK(16 <= size && ((size < 1 * KB && size % 16 == 0) || size == 1 * KB || size == 2 * KB));
306 size_t idx;
307 if (UNLIKELY(size == 1 * KB)) {
308 idx = kNumOfSizeBrackets - 2;
309 } else if (UNLIKELY(size == 2 * KB)) {
310 idx = kNumOfSizeBrackets - 1;
311 } else {
312 DCHECK(size < 1 * KB);
313 DCHECK_EQ(size % 16, static_cast<size_t>(0));
314 idx = size / 16 - 1;
315 }
316 DCHECK(bracketSizes[idx] == size);
317 return idx;
318 }
319 // Rounds up the size up the nearest bracket size.
320 static size_t RoundToBracketSize(size_t size) {
321 DCHECK(size <= kLargeSizeThreshold);
322 if (LIKELY(size <= 512)) {
323 return RoundUp(size, 16);
324 } else if (512 < size && size <= 1 * KB) {
325 return 1 * KB;
326 } else {
327 DCHECK(1 * KB < size && size <= 2 * KB);
328 return 2 * KB;
329 }
330 }
331 // Returns the size bracket index from the byte size with rounding.
332 static size_t SizeToIndex(size_t size) {
333 DCHECK(size <= kLargeSizeThreshold);
334 if (LIKELY(size <= 512)) {
335 return RoundUp(size, 16) / 16 - 1;
336 } else if (512 < size && size <= 1 * KB) {
337 return kNumOfSizeBrackets - 2;
338 } else {
339 DCHECK(1 * KB < size && size <= 2 * KB);
340 return kNumOfSizeBrackets - 1;
341 }
342 }
343 // A combination of SizeToIndex() and RoundToBracketSize().
344 static size_t SizeToIndexAndBracketSize(size_t size, size_t* bracket_size_out) {
345 DCHECK(size <= kLargeSizeThreshold);
346 if (LIKELY(size <= 512)) {
347 size_t bracket_size = RoundUp(size, 16);
348 *bracket_size_out = bracket_size;
349 size_t idx = bracket_size / 16 - 1;
350 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
351 return idx;
352 } else if (512 < size && size <= 1 * KB) {
353 size_t bracket_size = 1024;
354 *bracket_size_out = bracket_size;
355 size_t idx = kNumOfSizeBrackets - 2;
356 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
357 return idx;
358 } else {
359 DCHECK(1 * KB < size && size <= 2 * KB);
360 size_t bracket_size = 2048;
361 *bracket_size_out = bracket_size;
362 size_t idx = kNumOfSizeBrackets - 1;
363 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
364 return idx;
365 }
366 }
367 // Returns the page map index from an address. Requires that the
368 // address is page size aligned.
369 size_t ToPageMapIndex(const void* addr) const {
370 DCHECK(base_ <= addr && addr < base_ + capacity_);
371 size_t byte_offset = reinterpret_cast<const byte*>(addr) - base_;
372 DCHECK_EQ(byte_offset % static_cast<size_t>(kPageSize), static_cast<size_t>(0));
373 return byte_offset / kPageSize;
374 }
375 // Returns the page map index from an address with rounding.
376 size_t RoundDownToPageMapIndex(void* addr) {
377 DCHECK(base_ <= addr && addr < reinterpret_cast<byte*>(base_) + capacity_);
378 return (reinterpret_cast<uintptr_t>(addr) - reinterpret_cast<uintptr_t>(base_)) / kPageSize;
379 }
380
381 // A memory allocation request larger than this size is treated as a large object and allocated
382 // at a page-granularity.
383 static const size_t kLargeSizeThreshold = 2048;
384
385 // We use use thread-local runs for the size Brackets whose indexes
386 // are less than or equal to this index. We use shared (current)
387 // runs for the rest.
388 static const size_t kMaxThreadLocalSizeBracketIdx = 10;
389
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800390 // If true, check that the returned memory is actually zero.
391 static constexpr bool kCheckZeroMemory = kIsDebugBuild;
392
393 // If true, log verbose details of operations.
394 static constexpr bool kTraceRosAlloc = false;
395
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700396 struct hash_run {
397 size_t operator()(const RosAlloc::Run* r) const {
398 return reinterpret_cast<size_t>(r);
399 }
400 };
401
402 struct eq_run {
403 bool operator()(const RosAlloc::Run* r1, const RosAlloc::Run* r2) const {
404 return r1 == r2;
405 }
406 };
407
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800408 public:
409 // Different page release modes.
410 enum PageReleaseMode {
411 kPageReleaseModeNone, // Release no empty pages.
412 kPageReleaseModeEnd, // Release empty pages at the end of the space.
413 kPageReleaseModeSize, // Release empty pages that are larger than the threshold.
414 kPageReleaseModeSizeAndEnd, // Release empty pages that are larger than the threshold or
415 // at the end of the space.
416 kPageReleaseModeAll, // Release all empty pages.
417 };
418
419 // The default value for page_release_size_threshold_.
420 static constexpr size_t kDefaultPageReleaseSizeThreshold = 4 * MB;
421
422 private:
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700423 // The base address of the memory region that's managed by this allocator.
424 byte* base_;
425
426 // The footprint in bytes of the currently allocated portion of the
427 // memory region.
428 size_t footprint_;
429
430 // The maximum footprint. The address, base_ + capacity_, indicates
431 // the end of the memory region that's managed by this allocator.
432 size_t capacity_;
433
434 // The run sets that hold the runs whose slots are not all
435 // full. non_full_runs_[i] is guarded by size_bracket_locks_[i].
436 std::set<Run*> non_full_runs_[kNumOfSizeBrackets];
437 // The run sets that hold the runs whose slots are all full. This is
438 // debug only. full_runs_[i] is guarded by size_bracket_locks_[i].
439 hash_set<Run*, hash_run, eq_run> full_runs_[kNumOfSizeBrackets];
440 // The set of free pages.
441 std::set<FreePageRun*> free_page_runs_ GUARDED_BY(lock_);
442 // The free page run whose end address is the end of the memory
443 // region that's managed by this allocator, if any.
444 FreePageRun* last_free_page_run_;
445 // The current runs where the allocations are first attempted for
446 // the size brackes that do not use thread-local
447 // runs. current_runs_[i] is guarded by size_bracket_locks_[i].
448 Run* current_runs_[kNumOfSizeBrackets];
449 // The mutexes, one per size bracket.
450 Mutex* size_bracket_locks_[kNumOfSizeBrackets];
451 // The types of page map entries.
452 enum {
453 kPageMapEmpty = 0, // Not allocated.
454 kPageMapRun = 1, // The beginning of a run.
455 kPageMapRunPart = 2, // The non-beginning part of a run.
456 kPageMapLargeObject = 3, // The beginning of a large object.
457 kPageMapLargeObjectPart = 4, // The non-beginning part of a large object.
458 };
459 // The table that indicates what pages are currently used for.
460 std::vector<byte> page_map_ GUARDED_BY(lock_);
461 // The table that indicates the size of free page runs. These sizes
462 // are stored here to avoid storing in the free page header and
463 // release backing pages.
464 std::vector<size_t> free_page_run_size_map_ GUARDED_BY(lock_);
465 // The global lock. Used to guard the page map, the free page set,
466 // and the footprint.
467 Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
468 // The reader-writer lock to allow one bulk free at a time while
Hiroshi Yamauchi70f60042014-02-03 12:31:29 -0800469 // allowing multiple individual frees at the same time. Also, this
470 // is used to avoid race conditions between BulkFree() and
471 // RevokeThreadLocalRuns() on the bulk free bitmaps.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700472 ReaderWriterMutex bulk_free_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
473
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800474 // The page release mode.
475 const PageReleaseMode page_release_mode_;
476 // Under kPageReleaseModeSize(AndEnd), if the free page run size is
477 // greater than or equal to this value, release pages.
478 const size_t page_release_size_threshold_;
479
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700480 // The base address of the memory region that's managed by this allocator.
481 byte* Begin() { return base_; }
482 // The end address of the memory region that's managed by this allocator.
483 byte* End() { return base_ + capacity_; }
484
485 // Page-granularity alloc/free
486 void* AllocPages(Thread* self, size_t num_pages, byte page_map_type)
487 EXCLUSIVE_LOCKS_REQUIRED(lock_);
488 void FreePages(Thread* self, void* ptr) EXCLUSIVE_LOCKS_REQUIRED(lock_);
489
490 // Allocate/free a run slot.
491 void* AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated)
492 LOCKS_EXCLUDED(lock_);
493 void FreeFromRun(Thread* self, void* ptr, Run* run)
494 LOCKS_EXCLUDED(lock_);
495
496 // Used to acquire a new/reused run for a size bracket. Used when a
497 // thread-local or current run gets full.
498 Run* RefillRun(Thread* self, size_t idx) LOCKS_EXCLUDED(lock_);
499
500 // The internal of non-bulk Free().
501 void FreeInternal(Thread* self, void* ptr) LOCKS_EXCLUDED(lock_);
502
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800503 // Allocates large objects.
504 void* AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) LOCKS_EXCLUDED(lock_);
505
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700506 public:
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800507 RosAlloc(void* base, size_t capacity,
508 PageReleaseMode page_release_mode,
509 size_t page_release_size_threshold = kDefaultPageReleaseSizeThreshold);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700510 void* Alloc(Thread* self, size_t size, size_t* bytes_allocated)
511 LOCKS_EXCLUDED(lock_);
512 void Free(Thread* self, void* ptr)
513 LOCKS_EXCLUDED(bulk_free_lock_);
514 void BulkFree(Thread* self, void** ptrs, size_t num_ptrs)
515 LOCKS_EXCLUDED(bulk_free_lock_);
516 // Returns the size of the allocated slot for a given allocated memory chunk.
517 size_t UsableSize(void* ptr);
518 // Returns the size of the allocated slot for a given size.
519 size_t UsableSize(size_t bytes) {
520 if (UNLIKELY(bytes > kLargeSizeThreshold)) {
521 return RoundUp(bytes, kPageSize);
522 } else {
523 return RoundToBracketSize(bytes);
524 }
525 }
526 // Try to reduce the current footprint by releasing the free page
527 // run at the end of the memory region, if any.
528 bool Trim();
529 // Iterates over all the memory slots and apply the given function.
530 void InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
531 void* arg)
532 LOCKS_EXCLUDED(lock_);
533 // Returns the current footprint.
534 size_t Footprint() LOCKS_EXCLUDED(lock_);
535 // Returns the current capacity, maximum footprint.
536 size_t FootprintLimit() LOCKS_EXCLUDED(lock_);
537 // Update the current capacity.
538 void SetFootprintLimit(size_t bytes) LOCKS_EXCLUDED(lock_);
539 // Releases the thread-local runs assigned to the given thread back to the common set of runs.
540 void RevokeThreadLocalRuns(Thread* thread);
541 // Releases the thread-local runs assigned to all the threads back to the common set of runs.
542 void RevokeAllThreadLocalRuns() LOCKS_EXCLUDED(Locks::thread_list_lock_);
543 // Dumps the page map for debugging.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800544 std::string DumpPageMap() EXCLUSIVE_LOCKS_REQUIRED(lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700545
546 // Callbacks for InspectAll that will count the number of bytes
547 // allocated and objects allocated, respectively.
548 static void BytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg);
549 static void ObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800550
551 bool DoesReleaseAllPages() const {
552 return page_release_mode_ == kPageReleaseModeAll;
553 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800554
555 // Verify for debugging.
556 void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700557};
558
559} // namespace allocator
560} // namespace gc
561} // namespace art
562
563#endif // ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_