blob: f7fa2da236986b3fb55ae02599ac1d6c5e55951e [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_
18#define ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_
19
20#include <set>
21#include <stdint.h>
22#include <stdlib.h>
23#include <string>
24#include <sys/mman.h>
25#include <vector>
26
27#include "base/mutex.h"
28#include "base/logging.h"
29#include "globals.h"
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080030#include "mem_map.h"
31#include "UniquePtr.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070032#include "utils.h"
33
34// A boilerplate to use hash_map/hash_set both on host and device.
35#ifdef HAVE_ANDROID_OS
36#include <hash_map>
37#include <hash_set>
38using std::hash_map;
39using std::hash_set;
40#else // HAVE_ANDROID_OS
41#ifdef __DEPRECATED
42#define ROSALLOC_OLD__DEPRECATED __DEPRECATED
43#undef __DEPRECATED
44#endif
45#include <ext/hash_map>
46#include <ext/hash_set>
47#ifdef ROSALLOC_OLD__DEPRECATED
48#define __DEPRECATED ROSALLOC_OLD__DEPRECATED
49#undef ROSALLOC_OLD__DEPRECATED
50#endif
51using __gnu_cxx::hash_map;
52using __gnu_cxx::hash_set;
53#endif // HAVE_ANDROID_OS
54
55namespace art {
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080056
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070057namespace gc {
58namespace allocator {
59
Ian Rogers6fac4472014-02-25 17:01:10 -080060// A runs-of-slots memory allocator.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070061class RosAlloc {
62 private:
Ian Rogers6fac4472014-02-25 17:01:10 -080063 // Represents a run of free pages.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070064 class FreePageRun {
65 public:
66 byte magic_num_; // The magic number used for debugging only.
67
68 bool IsFree() const {
69 if (kIsDebugBuild) {
70 return magic_num_ == kMagicNumFree;
71 }
72 return true;
73 }
74 size_t ByteSize(RosAlloc* rosalloc) const EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
75 const byte* fpr_base = reinterpret_cast<const byte*>(this);
76 size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
77 size_t byte_size = rosalloc->free_page_run_size_map_[pm_idx];
78 DCHECK_GE(byte_size, static_cast<size_t>(0));
79 DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
80 return byte_size;
81 }
82 void SetByteSize(RosAlloc* rosalloc, size_t byte_size)
83 EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
84 DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
85 byte* fpr_base = reinterpret_cast<byte*>(this);
86 size_t pm_idx = rosalloc->ToPageMapIndex(fpr_base);
87 rosalloc->free_page_run_size_map_[pm_idx] = byte_size;
88 }
89 void* Begin() {
90 return reinterpret_cast<void*>(this);
91 }
92 void* End(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
93 byte* fpr_base = reinterpret_cast<byte*>(this);
94 byte* end = fpr_base + ByteSize(rosalloc);
95 return end;
96 }
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080097 bool IsLargerThanPageReleaseThreshold(RosAlloc* rosalloc)
98 EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
99 return ByteSize(rosalloc) >= rosalloc->page_release_size_threshold_;
100 }
101 bool IsAtEndOfSpace(RosAlloc* rosalloc)
102 EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
103 return reinterpret_cast<byte*>(this) + ByteSize(rosalloc) == rosalloc->base_ + rosalloc->footprint_;
104 }
105 bool ShouldReleasePages(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
106 switch (rosalloc->page_release_mode_) {
107 case kPageReleaseModeNone:
108 return false;
109 case kPageReleaseModeEnd:
110 return IsAtEndOfSpace(rosalloc);
111 case kPageReleaseModeSize:
112 return IsLargerThanPageReleaseThreshold(rosalloc);
113 case kPageReleaseModeSizeAndEnd:
114 return IsLargerThanPageReleaseThreshold(rosalloc) && IsAtEndOfSpace(rosalloc);
115 case kPageReleaseModeAll:
116 return true;
117 default:
118 LOG(FATAL) << "Unexpected page release mode ";
119 return false;
120 }
121 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700122 void ReleasePages(RosAlloc* rosalloc) EXCLUSIVE_LOCKS_REQUIRED(rosalloc->lock_) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800123 byte* start = reinterpret_cast<byte*>(this);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700124 size_t byte_size = ByteSize(rosalloc);
125 DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0));
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800126 bool release_pages = ShouldReleasePages(rosalloc);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700127 if (kIsDebugBuild) {
128 // Exclude the first page that stores the magic number.
129 DCHECK_GE(byte_size, static_cast<size_t>(kPageSize));
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800130 start += kPageSize;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700131 byte_size -= kPageSize;
132 if (byte_size > 0) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800133 if (release_pages) {
134 madvise(start, byte_size, MADV_DONTNEED);
135 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700136 }
137 } else {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800138 if (release_pages) {
139 madvise(start, byte_size, MADV_DONTNEED);
140 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700141 }
142 }
143 };
144
145 // Represents a run of memory slots of the same size.
146 //
147 // A run's memory layout:
148 //
149 // +-------------------+
150 // | magic_num |
151 // +-------------------+
152 // | size_bracket_idx |
153 // +-------------------+
154 // | is_thread_local |
155 // +-------------------+
156 // | to_be_bulk_freed |
157 // +-------------------+
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700158 // | top_bitmap_idx |
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700159 // +-------------------+
160 // | |
161 // | alloc bit map |
162 // | |
163 // +-------------------+
164 // | |
165 // | bulk free bit map |
166 // | |
167 // +-------------------+
168 // | |
169 // | thread-local free |
170 // | bit map |
171 // | |
172 // +-------------------+
173 // | padding due to |
174 // | alignment |
175 // +-------------------+
176 // | slot 0 |
177 // +-------------------+
178 // | slot 1 |
179 // +-------------------+
180 // | slot 2 |
181 // +-------------------+
182 // ...
183 // +-------------------+
184 // | last slot |
185 // +-------------------+
186 //
187 class Run {
188 public:
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700189 byte magic_num_; // The magic number used for debugging.
190 byte size_bracket_idx_; // The index of the size bracket of this run.
191 byte is_thread_local_; // True if this run is used as a thread-local run.
192 byte to_be_bulk_freed_; // Used within BulkFree() to flag a run that's involved with a bulk free.
193 uint32_t first_search_vec_idx_; // The index of the first bitmap vector which may contain an available slot.
194 uint32_t alloc_bit_map_[0]; // The bit map that allocates if each slot is in use.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700195
196 // bulk_free_bit_map_[] : The bit map that is used for GC to
197 // temporarily mark the slots to free without using a lock. After
198 // all the slots to be freed in a run are marked, all those slots
199 // get freed in bulk with one locking per run, as opposed to one
200 // locking per slot to minimize the lock contention. This is used
201 // within BulkFree().
202
203 // thread_local_free_bit_map_[] : The bit map that is used for GC
204 // to temporarily mark the slots to free in a thread-local run
205 // without using a lock (without synchronizing the thread that
206 // owns the thread-local run.) When the thread-local run becomes
207 // full, the thread will check this bit map and update the
208 // allocation bit map of the run (that is, the slots get freed.)
209
210 // Returns the byte size of the header except for the bit maps.
211 static size_t fixed_header_size() {
212 Run temp;
213 size_t size = reinterpret_cast<byte*>(&temp.alloc_bit_map_) - reinterpret_cast<byte*>(&temp);
214 DCHECK_EQ(size, static_cast<size_t>(8));
215 return size;
216 }
217 // Returns the base address of the free bit map.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800218 uint32_t* BulkFreeBitMap() {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700219 return reinterpret_cast<uint32_t*>(reinterpret_cast<byte*>(this) + bulkFreeBitMapOffsets[size_bracket_idx_]);
220 }
221 // Returns the base address of the thread local free bit map.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800222 uint32_t* ThreadLocalFreeBitMap() {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700223 return reinterpret_cast<uint32_t*>(reinterpret_cast<byte*>(this) + threadLocalFreeBitMapOffsets[size_bracket_idx_]);
224 }
225 void* End() {
226 return reinterpret_cast<byte*>(this) + kPageSize * numOfPages[size_bracket_idx_];
227 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700228 // Returns the number of bitmap words per run.
229 size_t NumberOfBitmapVectors() const {
230 return RoundUp(numOfSlots[size_bracket_idx_], 32) / 32;
231 }
232 void SetIsThreadLocal(bool is_thread_local) {
233 is_thread_local_ = is_thread_local ? 1 : 0;
234 }
235 bool IsThreadLocal() const {
236 return is_thread_local_ != 0;
237 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700238 // Frees slots in the allocation bit map with regard to the
239 // thread-local free bit map. Used when a thread-local run becomes
240 // full.
241 bool MergeThreadLocalFreeBitMapToAllocBitMap(bool* is_all_free_after_out);
242 // Frees slots in the allocation bit map with regard to the bulk
243 // free bit map. Used in a bulk free.
244 void MergeBulkFreeBitMapIntoAllocBitMap();
245 // Unions the slots to be freed in the free bit map into the
246 // thread-local free bit map. In a bulk free, as a two-step
247 // process, GC will first record all the slots to free in a run in
248 // the free bit map where it can write without a lock, and later
249 // acquire a lock once per run to union the bits of the free bit
250 // map to the thread-local free bit map.
251 void UnionBulkFreeBitMapToThreadLocalFreeBitMap();
252 // Allocates a slot in a run.
253 void* AllocSlot();
254 // Frees a slot in a run. This is used in a non-bulk free.
255 void FreeSlot(void* ptr);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700256 // Marks the slots to free in the bulk free bit map. Returns the bracket size.
257 size_t MarkBulkFreeBitMap(void* ptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700258 // Marks the slots to free in the thread-local free bit map.
259 void MarkThreadLocalFreeBitMap(void* ptr);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700260 // Last word mask, all of the bits in the last word which aren't valid slots are set to
261 // optimize allocation path.
Andreas Gampe59e67602014-04-25 17:15:12 -0700262 static uint32_t GetBitmapLastVectorMask(size_t num_slots, size_t num_vec);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700263 // Returns true if all the slots in the run are not in use.
264 bool IsAllFree();
265 // Returns true if all the slots in the run are in use.
266 bool IsFull();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800267 // Returns true if the bulk free bit map is clean.
268 bool IsBulkFreeBitmapClean();
269 // Returns true if the thread local free bit map is clean.
270 bool IsThreadLocalFreeBitmapClean();
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700271 // Set the alloc_bit_map_ bits for slots that are past the end of the run.
272 void SetAllocBitMapBitsForInvalidSlots();
273 // Zero the run's data.
274 void ZeroData();
275 // Zero the run's header.
276 void ZeroHeader();
277 // Fill the alloc bitmap with 1s.
278 void FillAllocBitMap();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700279 // Iterate over all the slots and apply the given function.
280 void InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg), void* arg);
281 // Dump the run metadata for debugging.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800282 std::string Dump();
283 // Verify for debugging.
284 void Verify(Thread* self, RosAlloc* rosalloc)
285 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
286 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700287
288 private:
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700289 // The common part of MarkFreeBitMap() and MarkThreadLocalFreeBitMap(). Returns the bracket
290 // size.
291 size_t MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base, const char* caller_name);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800292 // Turns the bit map into a string for debugging.
293 static std::string BitMapToStr(uint32_t* bit_map_base, size_t num_vec);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700294 };
295
296 // The magic number for a run.
297 static const byte kMagicNum = 42;
298 // The magic number for free pages.
299 static const byte kMagicNumFree = 43;
300 // The number of size brackets. Sync this with the length of Thread::rosalloc_runs_.
301 static const size_t kNumOfSizeBrackets = 34;
302 // The number of smaller size brackets that are 16 bytes apart.
303 static const size_t kNumOfQuantumSizeBrackets = 32;
304 // The sizes (the slot sizes, in bytes) of the size brackets.
305 static size_t bracketSizes[kNumOfSizeBrackets];
306 // The numbers of pages that are used for runs for each size bracket.
307 static size_t numOfPages[kNumOfSizeBrackets];
308 // The numbers of slots of the runs for each size bracket.
309 static size_t numOfSlots[kNumOfSizeBrackets];
310 // The header sizes in bytes of the runs for each size bracket.
311 static size_t headerSizes[kNumOfSizeBrackets];
312 // The byte offsets of the bulk free bit maps of the runs for each size bracket.
313 static size_t bulkFreeBitMapOffsets[kNumOfSizeBrackets];
314 // The byte offsets of the thread-local free bit maps of the runs for each size bracket.
315 static size_t threadLocalFreeBitMapOffsets[kNumOfSizeBrackets];
316
317 // Initialize the run specs (the above arrays).
318 static void Initialize();
319 static bool initialized_;
320
321 // Returns the byte size of the bracket size from the index.
322 static size_t IndexToBracketSize(size_t idx) {
323 DCHECK(idx < kNumOfSizeBrackets);
324 return bracketSizes[idx];
325 }
326 // Returns the index of the size bracket from the bracket size.
327 static size_t BracketSizeToIndex(size_t size) {
328 DCHECK(16 <= size && ((size < 1 * KB && size % 16 == 0) || size == 1 * KB || size == 2 * KB));
329 size_t idx;
330 if (UNLIKELY(size == 1 * KB)) {
331 idx = kNumOfSizeBrackets - 2;
332 } else if (UNLIKELY(size == 2 * KB)) {
333 idx = kNumOfSizeBrackets - 1;
334 } else {
335 DCHECK(size < 1 * KB);
336 DCHECK_EQ(size % 16, static_cast<size_t>(0));
337 idx = size / 16 - 1;
338 }
339 DCHECK(bracketSizes[idx] == size);
340 return idx;
341 }
342 // Rounds up the size up the nearest bracket size.
343 static size_t RoundToBracketSize(size_t size) {
344 DCHECK(size <= kLargeSizeThreshold);
345 if (LIKELY(size <= 512)) {
346 return RoundUp(size, 16);
347 } else if (512 < size && size <= 1 * KB) {
348 return 1 * KB;
349 } else {
350 DCHECK(1 * KB < size && size <= 2 * KB);
351 return 2 * KB;
352 }
353 }
354 // Returns the size bracket index from the byte size with rounding.
355 static size_t SizeToIndex(size_t size) {
356 DCHECK(size <= kLargeSizeThreshold);
357 if (LIKELY(size <= 512)) {
358 return RoundUp(size, 16) / 16 - 1;
359 } else if (512 < size && size <= 1 * KB) {
360 return kNumOfSizeBrackets - 2;
361 } else {
362 DCHECK(1 * KB < size && size <= 2 * KB);
363 return kNumOfSizeBrackets - 1;
364 }
365 }
366 // A combination of SizeToIndex() and RoundToBracketSize().
367 static size_t SizeToIndexAndBracketSize(size_t size, size_t* bracket_size_out) {
368 DCHECK(size <= kLargeSizeThreshold);
369 if (LIKELY(size <= 512)) {
370 size_t bracket_size = RoundUp(size, 16);
371 *bracket_size_out = bracket_size;
372 size_t idx = bracket_size / 16 - 1;
373 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
374 return idx;
375 } else if (512 < size && size <= 1 * KB) {
376 size_t bracket_size = 1024;
377 *bracket_size_out = bracket_size;
378 size_t idx = kNumOfSizeBrackets - 2;
379 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
380 return idx;
381 } else {
382 DCHECK(1 * KB < size && size <= 2 * KB);
383 size_t bracket_size = 2048;
384 *bracket_size_out = bracket_size;
385 size_t idx = kNumOfSizeBrackets - 1;
386 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
387 return idx;
388 }
389 }
390 // Returns the page map index from an address. Requires that the
391 // address is page size aligned.
392 size_t ToPageMapIndex(const void* addr) const {
393 DCHECK(base_ <= addr && addr < base_ + capacity_);
394 size_t byte_offset = reinterpret_cast<const byte*>(addr) - base_;
395 DCHECK_EQ(byte_offset % static_cast<size_t>(kPageSize), static_cast<size_t>(0));
396 return byte_offset / kPageSize;
397 }
398 // Returns the page map index from an address with rounding.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700399 size_t RoundDownToPageMapIndex(void* addr) const {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700400 DCHECK(base_ <= addr && addr < reinterpret_cast<byte*>(base_) + capacity_);
401 return (reinterpret_cast<uintptr_t>(addr) - reinterpret_cast<uintptr_t>(base_)) / kPageSize;
402 }
403
404 // A memory allocation request larger than this size is treated as a large object and allocated
405 // at a page-granularity.
406 static const size_t kLargeSizeThreshold = 2048;
407
408 // We use use thread-local runs for the size Brackets whose indexes
409 // are less than or equal to this index. We use shared (current)
410 // runs for the rest.
411 static const size_t kMaxThreadLocalSizeBracketIdx = 10;
412
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800413 // If true, check that the returned memory is actually zero.
414 static constexpr bool kCheckZeroMemory = kIsDebugBuild;
415
416 // If true, log verbose details of operations.
417 static constexpr bool kTraceRosAlloc = false;
418
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700419 struct hash_run {
420 size_t operator()(const RosAlloc::Run* r) const {
421 return reinterpret_cast<size_t>(r);
422 }
423 };
424
425 struct eq_run {
426 bool operator()(const RosAlloc::Run* r1, const RosAlloc::Run* r2) const {
427 return r1 == r2;
428 }
429 };
430
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800431 public:
432 // Different page release modes.
433 enum PageReleaseMode {
434 kPageReleaseModeNone, // Release no empty pages.
435 kPageReleaseModeEnd, // Release empty pages at the end of the space.
436 kPageReleaseModeSize, // Release empty pages that are larger than the threshold.
437 kPageReleaseModeSizeAndEnd, // Release empty pages that are larger than the threshold or
438 // at the end of the space.
439 kPageReleaseModeAll, // Release all empty pages.
440 };
441
442 // The default value for page_release_size_threshold_.
443 static constexpr size_t kDefaultPageReleaseSizeThreshold = 4 * MB;
444
445 private:
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700446 // The base address of the memory region that's managed by this allocator.
447 byte* base_;
448
449 // The footprint in bytes of the currently allocated portion of the
450 // memory region.
451 size_t footprint_;
452
453 // The maximum footprint. The address, base_ + capacity_, indicates
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800454 // the end of the memory region that's currently managed by this allocator.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700455 size_t capacity_;
456
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800457 // The maximum capacity. The address, base_ + max_capacity_, indicates
458 // the end of the memory region that's ever managed by this allocator.
459 size_t max_capacity_;
460
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700461 // The run sets that hold the runs whose slots are not all
462 // full. non_full_runs_[i] is guarded by size_bracket_locks_[i].
463 std::set<Run*> non_full_runs_[kNumOfSizeBrackets];
464 // The run sets that hold the runs whose slots are all full. This is
465 // debug only. full_runs_[i] is guarded by size_bracket_locks_[i].
466 hash_set<Run*, hash_run, eq_run> full_runs_[kNumOfSizeBrackets];
467 // The set of free pages.
468 std::set<FreePageRun*> free_page_runs_ GUARDED_BY(lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700469 // The dedicated full run, it is always full and shared by all threads when revoking happens.
470 // This is an optimization since enables us to avoid a null check for revoked runs.
471 static Run* dedicated_full_run_;
472 // Using size_t to ensure that it is at least word aligned.
473 static size_t dedicated_full_run_storage_[];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700474 // The current runs where the allocations are first attempted for
475 // the size brackes that do not use thread-local
476 // runs. current_runs_[i] is guarded by size_bracket_locks_[i].
477 Run* current_runs_[kNumOfSizeBrackets];
478 // The mutexes, one per size bracket.
479 Mutex* size_bracket_locks_[kNumOfSizeBrackets];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700480 // Bracket lock names (since locks only have char* names).
481 std::string size_bracket_lock_names[kNumOfSizeBrackets];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700482 // The types of page map entries.
483 enum {
484 kPageMapEmpty = 0, // Not allocated.
485 kPageMapRun = 1, // The beginning of a run.
486 kPageMapRunPart = 2, // The non-beginning part of a run.
487 kPageMapLargeObject = 3, // The beginning of a large object.
488 kPageMapLargeObjectPart = 4, // The non-beginning part of a large object.
489 };
490 // The table that indicates what pages are currently used for.
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800491 byte* page_map_; // No GUARDED_BY(lock_) for kReadPageMapEntryWithoutLockInBulkFree.
492 size_t page_map_size_;
493 size_t max_page_map_size_;
494 UniquePtr<MemMap> page_map_mem_map_;
495
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700496 // The table that indicates the size of free page runs. These sizes
497 // are stored here to avoid storing in the free page header and
498 // release backing pages.
499 std::vector<size_t> free_page_run_size_map_ GUARDED_BY(lock_);
500 // The global lock. Used to guard the page map, the free page set,
501 // and the footprint.
502 Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
503 // The reader-writer lock to allow one bulk free at a time while
Hiroshi Yamauchi70f60042014-02-03 12:31:29 -0800504 // allowing multiple individual frees at the same time. Also, this
505 // is used to avoid race conditions between BulkFree() and
506 // RevokeThreadLocalRuns() on the bulk free bitmaps.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700507 ReaderWriterMutex bulk_free_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
508
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800509 // The page release mode.
510 const PageReleaseMode page_release_mode_;
511 // Under kPageReleaseModeSize(AndEnd), if the free page run size is
512 // greater than or equal to this value, release pages.
513 const size_t page_release_size_threshold_;
514
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700515 // The base address of the memory region that's managed by this allocator.
516 byte* Begin() { return base_; }
517 // The end address of the memory region that's managed by this allocator.
518 byte* End() { return base_ + capacity_; }
519
520 // Page-granularity alloc/free
521 void* AllocPages(Thread* self, size_t num_pages, byte page_map_type)
522 EXCLUSIVE_LOCKS_REQUIRED(lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700523 // Returns how many bytes were freed.
524 size_t FreePages(Thread* self, void* ptr, bool already_zero) EXCLUSIVE_LOCKS_REQUIRED(lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700525
526 // Allocate/free a run slot.
527 void* AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated)
528 LOCKS_EXCLUDED(lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700529 // Returns the bracket size.
530 size_t FreeFromRun(Thread* self, void* ptr, Run* run)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700531 LOCKS_EXCLUDED(lock_);
532
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700533 // Used to allocate a new thread local run for a size bracket.
534 Run* AllocRun(Thread* self, size_t idx) LOCKS_EXCLUDED(lock_);
535
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700536 // Used to acquire a new/reused run for a size bracket. Used when a
537 // thread-local or current run gets full.
538 Run* RefillRun(Thread* self, size_t idx) LOCKS_EXCLUDED(lock_);
539
540 // The internal of non-bulk Free().
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700541 size_t FreeInternal(Thread* self, void* ptr) LOCKS_EXCLUDED(lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700542
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800543 // Allocates large objects.
544 void* AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) LOCKS_EXCLUDED(lock_);
545
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700546 public:
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800547 RosAlloc(void* base, size_t capacity, size_t max_capacity,
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800548 PageReleaseMode page_release_mode,
549 size_t page_release_size_threshold = kDefaultPageReleaseSizeThreshold);
Mathieu Chartier661974a2014-01-09 11:23:53 -0800550 ~RosAlloc();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700551 void* Alloc(Thread* self, size_t size, size_t* bytes_allocated)
552 LOCKS_EXCLUDED(lock_);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700553 size_t Free(Thread* self, void* ptr)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700554 LOCKS_EXCLUDED(bulk_free_lock_);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700555 size_t BulkFree(Thread* self, void** ptrs, size_t num_ptrs)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700556 LOCKS_EXCLUDED(bulk_free_lock_);
557 // Returns the size of the allocated slot for a given allocated memory chunk.
558 size_t UsableSize(void* ptr);
559 // Returns the size of the allocated slot for a given size.
560 size_t UsableSize(size_t bytes) {
561 if (UNLIKELY(bytes > kLargeSizeThreshold)) {
562 return RoundUp(bytes, kPageSize);
563 } else {
564 return RoundToBracketSize(bytes);
565 }
566 }
567 // Try to reduce the current footprint by releasing the free page
568 // run at the end of the memory region, if any.
569 bool Trim();
570 // Iterates over all the memory slots and apply the given function.
571 void InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
572 void* arg)
573 LOCKS_EXCLUDED(lock_);
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -0700574 // Release empty pages.
575 size_t ReleasePages() LOCKS_EXCLUDED(lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700576 // Returns the current footprint.
577 size_t Footprint() LOCKS_EXCLUDED(lock_);
578 // Returns the current capacity, maximum footprint.
579 size_t FootprintLimit() LOCKS_EXCLUDED(lock_);
580 // Update the current capacity.
581 void SetFootprintLimit(size_t bytes) LOCKS_EXCLUDED(lock_);
582 // Releases the thread-local runs assigned to the given thread back to the common set of runs.
583 void RevokeThreadLocalRuns(Thread* thread);
584 // Releases the thread-local runs assigned to all the threads back to the common set of runs.
585 void RevokeAllThreadLocalRuns() LOCKS_EXCLUDED(Locks::thread_list_lock_);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -0700586 // Assert the thread local runs of a thread are revoked.
587 void AssertThreadLocalRunsAreRevoked(Thread* thread);
588 // Assert all the thread local runs are revoked.
589 void AssertAllThreadLocalRunsAreRevoked() LOCKS_EXCLUDED(Locks::thread_list_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700590 // Dumps the page map for debugging.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800591 std::string DumpPageMap() EXCLUSIVE_LOCKS_REQUIRED(lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700592 static Run* GetDedicatedFullRun() {
593 return dedicated_full_run_;
594 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700595
596 // Callbacks for InspectAll that will count the number of bytes
597 // allocated and objects allocated, respectively.
598 static void BytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg);
599 static void ObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800600
601 bool DoesReleaseAllPages() const {
602 return page_release_mode_ == kPageReleaseModeAll;
603 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800604
605 // Verify for debugging.
606 void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700607};
608
609} // namespace allocator
610} // namespace gc
611} // namespace art
612
613#endif // ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_