blob: 9e6589489527b7d44f13f0f5e39aceb7132e3bc3 [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "base/mutex-inl.h"
18#include "thread.h"
19#include "thread_list.h"
20#include "rosalloc.h"
21
22#include <map>
23#include <list>
24#include <vector>
25
26namespace art {
27namespace gc {
28namespace allocator {
29
30// If true, log verbose details of operations.
31static const bool kTraceRosAlloc = false;
32// If true, check that the returned memory is actually zero.
33static const bool kCheckZeroMemory = kIsDebugBuild;
34
35extern "C" void* art_heap_rosalloc_morecore(RosAlloc* rosalloc, intptr_t increment);
36
37size_t RosAlloc::bracketSizes[kNumOfSizeBrackets];
38size_t RosAlloc::numOfPages[kNumOfSizeBrackets];
39size_t RosAlloc::numOfSlots[kNumOfSizeBrackets];
40size_t RosAlloc::headerSizes[kNumOfSizeBrackets];
41size_t RosAlloc::bulkFreeBitMapOffsets[kNumOfSizeBrackets];
42size_t RosAlloc::threadLocalFreeBitMapOffsets[kNumOfSizeBrackets];
43bool RosAlloc::initialized_ = false;
44
45RosAlloc::RosAlloc(void* base, size_t capacity)
46 : base_(reinterpret_cast<byte*>(base)), footprint_(capacity),
47 capacity_(capacity),
48 lock_("rosalloc global lock", kRosAllocGlobalLock),
49 bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock) {
50 DCHECK(RoundUp(capacity, kPageSize) == capacity);
51 if (!initialized_) {
52 Initialize();
53 }
54 VLOG(heap) << "RosAlloc base="
55 << std::hex << (intptr_t)base_ << ", end="
56 << std::hex << (intptr_t)(base_ + capacity_)
57 << ", capacity=" << std::dec << capacity_;
58 memset(current_runs_, 0, sizeof(current_runs_));
59 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
60 size_bracket_locks_[i] = new Mutex("an rosalloc size bracket lock",
61 kRosAllocBracketLock);
62 }
63 size_t num_of_pages = capacity_ / kPageSize;
64 page_map_.resize(num_of_pages);
65 free_page_run_size_map_.resize(num_of_pages);
66
67 FreePageRun* free_pages = reinterpret_cast<FreePageRun*>(base_);
68 if (kIsDebugBuild) {
69 free_pages->magic_num_ = kMagicNumFree;
70 }
71 free_pages->SetByteSize(this, capacity_);
72 DCHECK_EQ(capacity_ % kPageSize, static_cast<size_t>(0));
73 free_pages->ReleasePages(this);
74 free_page_runs_.insert(free_pages);
75 if (kTraceRosAlloc) {
76 LOG(INFO) << "RosAlloc::RosAlloc() : Inserted run 0x" << std::hex
77 << reinterpret_cast<intptr_t>(free_pages)
78 << " into free_page_runs_";
79 }
80}
81
82void* RosAlloc::AllocPages(Thread* self, size_t num_pages, byte page_map_type) {
83 lock_.AssertHeld(self);
84 DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
85 FreePageRun* res = NULL;
86 size_t req_byte_size = num_pages * kPageSize;
87 // Find the lowest address free page run that's large enough.
88 for (auto it = free_page_runs_.begin(); it != free_page_runs_.end(); ) {
89 FreePageRun* fpr = *it;
90 DCHECK(fpr->IsFree());
91 size_t fpr_byte_size = fpr->ByteSize(this);
92 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
93 if (req_byte_size <= fpr_byte_size) {
94 // Found one.
95 free_page_runs_.erase(it++);
96 if (kTraceRosAlloc) {
97 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x"
98 << std::hex << reinterpret_cast<intptr_t>(fpr)
99 << " from free_page_runs_";
100 }
101 if (req_byte_size < fpr_byte_size) {
102 // Split.
103 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<byte*>(fpr) + req_byte_size);
104 if (kIsDebugBuild) {
105 remainder->magic_num_ = kMagicNumFree;
106 }
107 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
108 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
109 // Don't need to call madvise on remainder here.
110 free_page_runs_.insert(remainder);
111 if (kTraceRosAlloc) {
112 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
113 << reinterpret_cast<intptr_t>(remainder)
114 << " into free_page_runs_";
115 }
116 fpr->SetByteSize(this, req_byte_size);
117 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
118 }
119 res = fpr;
120 break;
121 } else {
122 ++it;
123 }
124 }
125
126 // Failed to allocate pages. Grow the footprint, if possible.
127 if (UNLIKELY(res == NULL && capacity_ > footprint_)) {
128 FreePageRun* last_free_page_run = NULL;
129 size_t last_free_page_run_size;
130 auto it = free_page_runs_.rbegin();
131 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
132 // There is a free page run at the end.
133 DCHECK(last_free_page_run->IsFree());
134 DCHECK(page_map_[ToPageMapIndex(last_free_page_run)] == kPageMapEmpty);
135 last_free_page_run_size = last_free_page_run->ByteSize(this);
136 } else {
137 // There is no free page run at the end.
138 last_free_page_run_size = 0;
139 }
140 DCHECK_LT(last_free_page_run_size, req_byte_size);
141 if (capacity_ - footprint_ + last_free_page_run_size >= req_byte_size) {
142 // If we grow the heap, we can allocate it.
143 size_t increment = std::min(std::max(2 * MB, req_byte_size - last_free_page_run_size),
144 capacity_ - footprint_);
145 DCHECK_EQ(increment % kPageSize, static_cast<size_t>(0));
146 size_t new_footprint = footprint_ + increment;
147 size_t new_num_of_pages = new_footprint / kPageSize;
148 DCHECK_LT(page_map_.size(), new_num_of_pages);
149 DCHECK_LT(free_page_run_size_map_.size(), new_num_of_pages);
150 page_map_.resize(new_num_of_pages);
151 free_page_run_size_map_.resize(new_num_of_pages);
152 art_heap_rosalloc_morecore(this, increment);
153 if (last_free_page_run_size > 0) {
154 // There was a free page run at the end. Expand its size.
155 DCHECK_EQ(last_free_page_run_size, last_free_page_run->ByteSize(this));
156 last_free_page_run->SetByteSize(this, last_free_page_run_size + increment);
157 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
158 DCHECK(last_free_page_run->End(this) == base_ + new_footprint);
159 } else {
160 // Otherwise, insert a new free page run at the end.
161 FreePageRun* new_free_page_run = reinterpret_cast<FreePageRun*>(base_ + footprint_);
162 if (kIsDebugBuild) {
163 new_free_page_run->magic_num_ = kMagicNumFree;
164 }
165 new_free_page_run->SetByteSize(this, increment);
166 DCHECK_EQ(new_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
167 free_page_runs_.insert(new_free_page_run);
168 DCHECK(*free_page_runs_.rbegin() == new_free_page_run);
169 if (kTraceRosAlloc) {
170 LOG(INFO) << "RosAlloc::AlloPages() : Grew the heap by inserting run 0x"
171 << std::hex << reinterpret_cast<intptr_t>(new_free_page_run)
172 << " into free_page_runs_";
173 }
174 }
175 DCHECK_LE(footprint_ + increment, capacity_);
176 if (kTraceRosAlloc) {
177 LOG(INFO) << "RosAlloc::AllocPages() : increased the footprint from "
178 << footprint_ << " to " << new_footprint;
179 }
180 footprint_ = new_footprint;
181
182 // And retry the last free page run.
183 it = free_page_runs_.rbegin();
184 DCHECK(it != free_page_runs_.rend());
185 FreePageRun* fpr = *it;
186 if (kIsDebugBuild && last_free_page_run_size > 0) {
187 DCHECK(last_free_page_run != NULL);
188 DCHECK_EQ(last_free_page_run, fpr);
189 }
190 size_t fpr_byte_size = fpr->ByteSize(this);
191 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
192 DCHECK_LE(req_byte_size, fpr_byte_size);
193 free_page_runs_.erase(fpr);
194 if (kTraceRosAlloc) {
195 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
196 << " from free_page_runs_";
197 }
198 if (req_byte_size < fpr_byte_size) {
199 // Split if there's a remainder.
200 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<byte*>(fpr) + req_byte_size);
201 if (kIsDebugBuild) {
202 remainder->magic_num_ = kMagicNumFree;
203 }
204 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
205 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
206 free_page_runs_.insert(remainder);
207 if (kTraceRosAlloc) {
208 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
209 << reinterpret_cast<intptr_t>(remainder)
210 << " into free_page_runs_";
211 }
212 fpr->SetByteSize(this, req_byte_size);
213 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
214 }
215 res = fpr;
216 }
217 }
218 if (LIKELY(res != NULL)) {
219 // Update the page map.
220 size_t page_map_idx = ToPageMapIndex(res);
221 for (size_t i = 0; i < num_pages; i++) {
222 DCHECK(page_map_[page_map_idx + i] == kPageMapEmpty);
223 }
224 switch (page_map_type) {
225 case kPageMapRun:
226 page_map_[page_map_idx] = kPageMapRun;
227 for (size_t i = 1; i < num_pages; i++) {
228 page_map_[page_map_idx + i] = kPageMapRunPart;
229 }
230 break;
231 case kPageMapLargeObject:
232 page_map_[page_map_idx] = kPageMapLargeObject;
233 for (size_t i = 1; i < num_pages; i++) {
234 page_map_[page_map_idx + i] = kPageMapLargeObjectPart;
235 }
236 break;
237 default:
238 LOG(FATAL) << "Unreachable - page map type: " << page_map_type;
239 break;
240 }
241 if (kIsDebugBuild) {
242 // Clear the first page which isn't madvised away in the debug
243 // build for the magic number.
244 memset(res, 0, kPageSize);
245 }
246 if (kTraceRosAlloc) {
247 LOG(INFO) << "RosAlloc::AllocPages() : 0x" << std::hex << reinterpret_cast<intptr_t>(res)
248 << "-0x" << (reinterpret_cast<intptr_t>(res) + num_pages * kPageSize)
249 << "(" << std::dec << (num_pages * kPageSize) << ")";
250 }
251 return res;
252 }
253
254 // Fail.
255 if (kTraceRosAlloc) {
256 LOG(INFO) << "RosAlloc::AllocPages() : NULL";
257 }
258 return nullptr;
259}
260
261void RosAlloc::FreePages(Thread* self, void* ptr) {
262 lock_.AssertHeld(self);
263 size_t pm_idx = ToPageMapIndex(ptr);
264 DCHECK(pm_idx < page_map_.size());
265 byte pm_type = page_map_[pm_idx];
266 DCHECK(pm_type == kPageMapRun || pm_type == kPageMapLargeObject);
267 byte pm_part_type;
268 switch (pm_type) {
269 case kPageMapRun:
270 pm_part_type = kPageMapRunPart;
271 break;
272 case kPageMapLargeObject:
273 pm_part_type = kPageMapLargeObjectPart;
274 break;
275 default:
276 pm_part_type = kPageMapEmpty;
277 LOG(FATAL) << "Unreachable - RosAlloc::FreePages() : " << "pm_idx=" << pm_idx << ", pm_type="
278 << static_cast<int>(pm_type) << ", ptr=" << std::hex
279 << reinterpret_cast<intptr_t>(ptr);
280 return;
281 }
282 // Update the page map and count the number of pages.
283 size_t num_pages = 1;
284 page_map_[pm_idx] = kPageMapEmpty;
285 size_t idx = pm_idx + 1;
286 size_t end = page_map_.size();
287 while (idx < end && page_map_[idx] == pm_part_type) {
288 page_map_[idx] = kPageMapEmpty;
289 num_pages++;
290 idx++;
291 }
292
293 if (kTraceRosAlloc) {
294 LOG(INFO) << "RosAlloc::FreePages() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
295 << "-0x" << (reinterpret_cast<intptr_t>(ptr) + num_pages * kPageSize)
296 << "(" << std::dec << (num_pages * kPageSize) << ")";
297 }
298
299 // Turn it into a free run.
300 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(ptr);
301 if (kIsDebugBuild) {
302 fpr->magic_num_ = kMagicNumFree;
303 }
304 fpr->SetByteSize(this, num_pages * kPageSize);
305 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
306
307 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
308 if (!free_page_runs_.empty()) {
309 // Try to coalesce in the higher address direction.
310 if (kTraceRosAlloc) {
311 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce a free page run 0x"
312 << std::hex << reinterpret_cast<uintptr_t>(fpr) << " [" << std::dec << pm_idx << "] -0x"
313 << std::hex << reinterpret_cast<uintptr_t>(fpr->End(this)) << " [" << std::dec
314 << (fpr->End(this) == End() ? page_map_.size() : ToPageMapIndex(fpr->End(this))) << "]";
315 }
316 auto higher_it = free_page_runs_.upper_bound(fpr);
317 if (higher_it != free_page_runs_.end()) {
318 for (auto it = higher_it; it != free_page_runs_.end(); ) {
319 FreePageRun* h = *it;
320 DCHECK_EQ(h->ByteSize(this) % kPageSize, static_cast<size_t>(0));
321 if (kTraceRosAlloc) {
322 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a higher free page run 0x"
323 << std::hex << reinterpret_cast<uintptr_t>(h) << " [" << std::dec << ToPageMapIndex(h) << "] -0x"
324 << std::hex << reinterpret_cast<uintptr_t>(h->End(this)) << " [" << std::dec
325 << (h->End(this) == End() ? page_map_.size() : ToPageMapIndex(h->End(this))) << "]";
326 }
327 if (fpr->End(this) == h->Begin()) {
328 if (kTraceRosAlloc) {
329 LOG(INFO) << "Success";
330 }
331 free_page_runs_.erase(it++);
332 if (kTraceRosAlloc) {
333 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
334 << reinterpret_cast<intptr_t>(h)
335 << " from free_page_runs_";
336 }
337 fpr->SetByteSize(this, fpr->ByteSize(this) + h->ByteSize(this));
338 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
339 } else {
340 // Not adjacent. Stop.
341 if (kTraceRosAlloc) {
342 LOG(INFO) << "Fail";
343 }
344 break;
345 }
346 }
347 }
348 // Try to coalesce in the lower address direction.
349 auto lower_it = free_page_runs_.upper_bound(fpr);
350 if (lower_it != free_page_runs_.begin()) {
351 --lower_it;
352 for (auto it = lower_it; ; ) {
353 // We want to try to coalesce with the first element but
354 // there's no "<=" operator for the iterator.
355 bool to_exit_loop = it == free_page_runs_.begin();
356
357 FreePageRun* l = *it;
358 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
359 if (kTraceRosAlloc) {
360 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a lower free page run 0x"
361 << std::hex << reinterpret_cast<uintptr_t>(l) << " [" << std::dec << ToPageMapIndex(l) << "] -0x"
362 << std::hex << reinterpret_cast<uintptr_t>(l->End(this)) << " [" << std::dec
363 << (l->End(this) == End() ? page_map_.size() : ToPageMapIndex(l->End(this))) << "]";
364 }
365 if (l->End(this) == fpr->Begin()) {
366 if (kTraceRosAlloc) {
367 LOG(INFO) << "Success";
368 }
369 free_page_runs_.erase(it--);
370 if (kTraceRosAlloc) {
371 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
372 << reinterpret_cast<intptr_t>(l)
373 << " from free_page_runs_";
374 }
375 l->SetByteSize(this, l->ByteSize(this) + fpr->ByteSize(this));
376 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
377 fpr = l;
378 } else {
379 // Not adjacent. Stop.
380 if (kTraceRosAlloc) {
381 LOG(INFO) << "Fail";
382 }
383 break;
384 }
385 if (to_exit_loop) {
386 break;
387 }
388 }
389 }
390 }
391
392 // Insert it.
393 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
394 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
395 fpr->ReleasePages(this);
396 free_page_runs_.insert(fpr);
397 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
398 if (kTraceRosAlloc) {
399 LOG(INFO) << "RosAlloc::FreePages() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
400 << " into free_page_runs_";
401 }
402}
403
404void* RosAlloc::Alloc(Thread* self, size_t size, size_t* bytes_allocated) {
405 if (UNLIKELY(size > kLargeSizeThreshold)) {
406 size_t num_pages = RoundUp(size, kPageSize) / kPageSize;
407 void* r;
408 {
409 MutexLock mu(self, lock_);
410 r = AllocPages(self, num_pages, kPageMapLargeObject);
411 }
412 if (bytes_allocated != NULL) {
413 *bytes_allocated = num_pages * kPageSize;
414 }
415 if (kTraceRosAlloc) {
416 if (r != NULL) {
417 LOG(INFO) << "RosAlloc::Alloc() : (large obj) 0x" << std::hex << reinterpret_cast<intptr_t>(r)
418 << "-0x" << (reinterpret_cast<intptr_t>(r) + num_pages * kPageSize)
419 << "(" << std::dec << (num_pages * kPageSize) << ")";
420 } else {
421 LOG(INFO) << "RosAlloc::Alloc() : (large obj) NULL";
422 }
423 }
424 // Check if the returned memory is really all zero.
425 if (kCheckZeroMemory && r != NULL) {
426 byte* bytes = reinterpret_cast<byte*>(r);
427 for (size_t i = 0; i < size; ++i) {
428 DCHECK_EQ(bytes[i], 0);
429 }
430 }
431 return r;
432 }
433 void* m = AllocFromRun(self, size, bytes_allocated);
434 // Check if the returned memory is really all zero.
435 if (kCheckZeroMemory && m != NULL) {
436 byte* bytes = reinterpret_cast<byte*>(m);
437 for (size_t i = 0; i < size; ++i) {
438 DCHECK_EQ(bytes[i], 0);
439 }
440 }
441 return m;
442}
443
444void RosAlloc::FreeInternal(Thread* self, void* ptr) {
445 DCHECK(base_ <= ptr && ptr < base_ + footprint_);
446 size_t pm_idx = RoundDownToPageMapIndex(ptr);
447 bool free_from_run = false;
448 Run* run = NULL;
449 {
450 MutexLock mu(self, lock_);
451 DCHECK(pm_idx < page_map_.size());
452 byte page_map_entry = page_map_[pm_idx];
453 if (kTraceRosAlloc) {
454 LOG(INFO) << "RosAlloc::FreeInternal() : " << std::hex << ptr << ", pm_idx=" << std::dec << pm_idx
455 << ", page_map_entry=" << static_cast<int>(page_map_entry);
456 }
457 switch (page_map_[pm_idx]) {
458 case kPageMapEmpty:
459 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
460 return;
461 case kPageMapLargeObject:
462 FreePages(self, ptr);
463 return;
464 case kPageMapLargeObjectPart:
465 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
466 return;
467 case kPageMapRun:
468 case kPageMapRunPart: {
469 free_from_run = true;
470 size_t pi = pm_idx;
471 DCHECK(page_map_[pi] == kPageMapRun || page_map_[pi] == kPageMapRunPart);
472 // Find the beginning of the run.
473 while (page_map_[pi] != kPageMapRun) {
474 pi--;
475 DCHECK(pi < capacity_ / kPageSize);
476 }
477 DCHECK(page_map_[pi] == kPageMapRun);
478 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
479 DCHECK(run->magic_num_ == kMagicNum);
480 break;
481 }
482 default:
483 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
484 return;
485 }
486 }
487 if (LIKELY(free_from_run)) {
488 DCHECK(run != NULL);
489 FreeFromRun(self, ptr, run);
490 }
491}
492
493void RosAlloc::Free(Thread* self, void* ptr) {
494 ReaderMutexLock rmu(self, bulk_free_lock_);
495 FreeInternal(self, ptr);
496}
497
498RosAlloc::Run* RosAlloc::RefillRun(Thread* self, size_t idx) {
499 Run* new_run;
500 size_t num_pages = numOfPages[idx];
501 // Get the lowest address non-full run from the binary tree.
502 Run* temp = NULL;
503 std::set<Run*>* bt = &non_full_runs_[idx];
504 std::set<Run*>::iterator found = bt->lower_bound(temp);
505 if (found != bt->end()) {
506 // If there's one, use it as the current run.
507 Run* non_full_run = *found;
508 DCHECK(non_full_run != NULL);
509 new_run = non_full_run;
510 DCHECK_EQ(new_run->is_thread_local_, 0);
511 bt->erase(found);
512 DCHECK_EQ(non_full_run->is_thread_local_, 0);
513 } else {
514 // If there's none, allocate a new run and use it as the
515 // current run.
516 {
517 MutexLock mu(self, lock_);
518 new_run = reinterpret_cast<Run*>(AllocPages(self, num_pages, kPageMapRun));
519 }
520 if (new_run == NULL) {
521 return NULL;
522 }
523 if (kIsDebugBuild) {
524 new_run->magic_num_ = kMagicNum;
525 }
526 new_run->size_bracket_idx_ = idx;
527 new_run->top_slot_idx_ = 0;
528 new_run->ClearBitMaps();
529 new_run->to_be_bulk_freed_ = false;
530 }
531 return new_run;
532}
533
534void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated) {
535 DCHECK(size <= kLargeSizeThreshold);
536 size_t bracket_size;
537 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
538 DCHECK_EQ(idx, SizeToIndex(size));
539 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
540 DCHECK_EQ(bracket_size, bracketSizes[idx]);
541 DCHECK(size <= bracket_size);
542 DCHECK(size > 512 || bracket_size - size < 16);
543
544 void* slot_addr;
545
546 if (LIKELY(idx <= kMaxThreadLocalSizeBracketIdx)) {
547 // Use a thread-local run.
548 Run* thread_local_run = reinterpret_cast<Run*>(self->rosalloc_runs_[idx]);
549 if (UNLIKELY(thread_local_run == NULL)) {
550 MutexLock mu(self, *size_bracket_locks_[idx]);
551 thread_local_run = RefillRun(self, idx);
552 if (UNLIKELY(thread_local_run == NULL)) {
553 return NULL;
554 }
555 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
556 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
557 thread_local_run->is_thread_local_ = 1;
558 self->rosalloc_runs_[idx] = thread_local_run;
559 DCHECK(!thread_local_run->IsFull());
560 }
561
562 DCHECK(thread_local_run != NULL);
563 DCHECK_NE(thread_local_run->is_thread_local_, 0);
564 slot_addr = thread_local_run->AllocSlot();
565
566 if (UNLIKELY(slot_addr == NULL)) {
567 // The run got full. Try to free slots.
568 DCHECK(thread_local_run->IsFull());
569 MutexLock mu(self, *size_bracket_locks_[idx]);
570 bool is_all_free_after_merge;
571 if (thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&is_all_free_after_merge)) {
572 // Some slot got freed. Keep it.
573 DCHECK(!thread_local_run->IsFull());
574 DCHECK_EQ(is_all_free_after_merge, thread_local_run->IsAllFree());
575 if (is_all_free_after_merge) {
576 // Reinstate the bump index mode if it's all free.
577 DCHECK_EQ(thread_local_run->top_slot_idx_, numOfSlots[idx]);
578 thread_local_run->top_slot_idx_ = 0;
579 }
580 } else {
581 // No slots got freed. Try to refill the thread-local run.
582 DCHECK(thread_local_run->IsFull());
583 self->rosalloc_runs_[idx] = NULL;
584 thread_local_run->is_thread_local_ = 0;
585 if (kIsDebugBuild) {
586 full_runs_[idx].insert(thread_local_run);
587 if (kTraceRosAlloc) {
588 LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex
589 << reinterpret_cast<intptr_t>(thread_local_run)
590 << " into full_runs_[" << std::dec << idx << "]";
591 }
592 }
593 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
594 DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
595 thread_local_run = RefillRun(self, idx);
596 if (UNLIKELY(thread_local_run == NULL)) {
597 return NULL;
598 }
599 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
600 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
601 thread_local_run->is_thread_local_ = 1;
602 self->rosalloc_runs_[idx] = thread_local_run;
603 DCHECK(!thread_local_run->IsFull());
604 }
605
606 DCHECK(thread_local_run != NULL);
607 DCHECK(!thread_local_run->IsFull());
608 DCHECK_NE(thread_local_run->is_thread_local_, 0);
609 slot_addr = thread_local_run->AllocSlot();
610 // Must succeed now with a new run.
611 DCHECK(slot_addr != NULL);
612 }
613 if (kTraceRosAlloc) {
614 LOG(INFO) << "RosAlloc::AllocFromRun() thread-local : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
615 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
616 << "(" << std::dec << (bracket_size) << ")";
617 }
618 } else {
619 // Use the (shared) current run.
620 MutexLock mu(self, *size_bracket_locks_[idx]);
621 Run* current_run = current_runs_[idx];
622 if (UNLIKELY(current_run == NULL)) {
623 current_run = RefillRun(self, idx);
624 if (UNLIKELY(current_run == NULL)) {
625 return NULL;
626 }
627 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
628 DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
629 current_run->is_thread_local_ = 0;
630 current_runs_[idx] = current_run;
631 DCHECK(!current_run->IsFull());
632 }
633 DCHECK(current_run != NULL);
634 slot_addr = current_run->AllocSlot();
635 if (UNLIKELY(slot_addr == NULL)) {
636 // The current run got full. Try to refill it.
637 DCHECK(current_run->IsFull());
638 current_runs_[idx] = NULL;
639 if (kIsDebugBuild) {
640 // Insert it into full_runs and set the current run to NULL.
641 full_runs_[idx].insert(current_run);
642 if (kTraceRosAlloc) {
643 LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(current_run)
644 << " into full_runs_[" << std::dec << idx << "]";
645 }
646 }
647 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
648 DCHECK(full_runs_[idx].find(current_run) != full_runs_[idx].end());
649 current_run = RefillRun(self, idx);
650 if (UNLIKELY(current_run == NULL)) {
651 return NULL;
652 }
653 DCHECK(current_run != NULL);
654 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
655 DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
656 current_run->is_thread_local_ = 0;
657 current_runs_[idx] = current_run;
658 DCHECK(!current_run->IsFull());
659 slot_addr = current_run->AllocSlot();
660 // Must succeed now with a new run.
661 DCHECK(slot_addr != NULL);
662 }
663 if (kTraceRosAlloc) {
664 LOG(INFO) << "RosAlloc::AllocFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
665 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
666 << "(" << std::dec << (bracket_size) << ")";
667 }
668 }
669 if (LIKELY(bytes_allocated != NULL)) {
670 *bytes_allocated = bracket_size;
671 }
672 memset(slot_addr, 0, size);
673 return slot_addr;
674}
675
676void RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
677 DCHECK(run->magic_num_ == kMagicNum);
678 DCHECK(run < ptr && ptr < run->End());
679 size_t idx = run->size_bracket_idx_;
680 MutexLock mu(self, *size_bracket_locks_[idx]);
681 bool run_was_full = false;
682 if (kIsDebugBuild) {
683 run_was_full = run->IsFull();
684 }
685 if (kTraceRosAlloc) {
686 LOG(INFO) << "RosAlloc::FreeFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr);
687 }
688 if (LIKELY(run->is_thread_local_ != 0)) {
689 // It's a thread-local run. Just mark the thread-local free bit map and return.
690 DCHECK_LE(run->size_bracket_idx_, kMaxThreadLocalSizeBracketIdx);
691 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
692 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
693 run->MarkThreadLocalFreeBitMap(ptr);
694 if (kTraceRosAlloc) {
695 LOG(INFO) << "RosAlloc::FreeFromRun() : Freed a slot in a thread local run 0x" << std::hex
696 << reinterpret_cast<intptr_t>(run);
697 }
698 // A thread local run will be kept as a thread local even if it's become all free.
699 return;
700 }
701 // Free the slot in the run.
702 run->FreeSlot(ptr);
703 std::set<Run*>* non_full_runs = &non_full_runs_[idx];
704 if (run->IsAllFree()) {
705 // It has just become completely free. Free the pages of this run.
706 std::set<Run*>::iterator pos = non_full_runs->find(run);
707 if (pos != non_full_runs->end()) {
708 non_full_runs->erase(pos);
709 if (kTraceRosAlloc) {
710 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
711 << reinterpret_cast<intptr_t>(run) << " from non_full_runs_";
712 }
713 }
714 if (run == current_runs_[idx]) {
715 current_runs_[idx] = NULL;
716 }
717 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
718 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
719 {
720 MutexLock mu(self, lock_);
721 FreePages(self, run);
722 }
723 } else {
724 // It is not completely free. If it wasn't the current run or
725 // already in the non-full run set (i.e., it was full) insert it
726 // into the non-full run set.
727 if (run != current_runs_[idx]) {
728 hash_set<Run*, hash_run, eq_run>* full_runs =
729 kIsDebugBuild ? &full_runs_[idx] : NULL;
730 std::set<Run*>::iterator pos = non_full_runs->find(run);
731 if (pos == non_full_runs->end()) {
732 DCHECK(run_was_full);
733 DCHECK(full_runs->find(run) != full_runs->end());
734 if (kIsDebugBuild) {
735 full_runs->erase(run);
736 if (kTraceRosAlloc) {
737 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
738 << reinterpret_cast<intptr_t>(run) << " from full_runs_";
739 }
740 }
741 non_full_runs->insert(run);
742 DCHECK(!run->IsFull());
743 if (kTraceRosAlloc) {
744 LOG(INFO) << "RosAlloc::FreeFromRun() : Inserted run 0x" << std::hex
745 << reinterpret_cast<intptr_t>(run)
746 << " into non_full_runs_[" << std::dec << idx << "]";
747 }
748 }
749 }
750 }
751}
752
753void RosAlloc::Run::Dump() {
754 size_t idx = size_bracket_idx_;
755 size_t num_slots = numOfSlots[idx];
756 size_t num_vec = RoundUp(num_slots, 32) / 32;
757 std::string bit_map_str;
758 for (size_t v = 0; v < num_vec; v++) {
759 uint32_t vec = alloc_bit_map_[v];
760 if (v != num_vec - 1) {
761 bit_map_str.append(StringPrintf("%x-", vec));
762 } else {
763 bit_map_str.append(StringPrintf("%x", vec));
764 }
765 }
766 LOG(INFO) << "Run : " << std::hex << reinterpret_cast<intptr_t>(this)
767 << std::dec << ", idx=" << idx << ", bit_map=" << bit_map_str;
768}
769
770void* RosAlloc::Run::AllocSlot() {
771 size_t idx = size_bracket_idx_;
772 size_t num_slots = numOfSlots[idx];
773 DCHECK_LE(top_slot_idx_, num_slots);
774 if (LIKELY(top_slot_idx_ < num_slots)) {
775 // If it's in bump index mode, grab the top slot and increment the top index.
776 size_t slot_idx = top_slot_idx_;
777 byte* slot_addr = reinterpret_cast<byte*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
778 if (kTraceRosAlloc) {
779 LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
780 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
781 }
782 top_slot_idx_++;
783 size_t vec_idx = slot_idx / 32;
784 size_t vec_off = slot_idx % 32;
785 uint32_t* vec = &alloc_bit_map_[vec_idx];
786 DCHECK_EQ((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
787 *vec |= 1 << vec_off;
788 DCHECK_NE((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
789 return slot_addr;
790 }
791 // Not in bump index mode. Search the alloc bit map for an empty slot.
792 size_t num_vec = RoundUp(num_slots, 32) / 32;
793 size_t slot_idx = 0;
794 bool found_slot = false;
795 for (size_t v = 0; v < num_vec; v++) {
796 uint32_t *vecp = &alloc_bit_map_[v];
797 uint32_t ffz1 = __builtin_ffs(~*vecp);
798 uint32_t ffz;
799 // TODO: Use LIKELY or UNLIKELY here?
800 if (LIKELY(ffz1 > 0 && (ffz = ffz1 - 1) + v * 32 < num_slots)) {
801 // Found an empty slot. Set the bit.
802 DCHECK_EQ((*vecp & (1 << ffz)), static_cast<uint32_t>(0));
803 *vecp |= (1 << ffz);
804 DCHECK_NE((*vecp & (1 << ffz)), static_cast<uint32_t>(0));
805 slot_idx = ffz + v * 32;
806 found_slot = true;
807 break;
808 }
809 }
810 if (LIKELY(found_slot)) {
811 byte* slot_addr = reinterpret_cast<byte*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
812 if (kTraceRosAlloc) {
813 LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
814 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
815 }
816 return slot_addr;
817 }
818 return NULL;
819}
820
821inline void RosAlloc::Run::FreeSlot(void* ptr) {
822 DCHECK_EQ(is_thread_local_, 0);
823 byte idx = size_bracket_idx_;
824 size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
825 - (reinterpret_cast<byte*>(this) + headerSizes[idx]);
826 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
827 size_t slot_idx = offset_from_slot_base / bracketSizes[idx];
828 DCHECK(slot_idx < numOfSlots[idx]);
829 size_t vec_idx = slot_idx / 32;
830 if (kIsDebugBuild) {
831 size_t num_vec = RoundUp(numOfSlots[idx], 32) / 32;
832 DCHECK(vec_idx < num_vec);
833 }
834 size_t vec_off = slot_idx % 32;
835 uint32_t* vec = &alloc_bit_map_[vec_idx];
836 DCHECK_NE((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
837 *vec &= ~(1 << vec_off);
838 DCHECK_EQ((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
839 if (kTraceRosAlloc) {
840 LOG(INFO) << "RosAlloc::Run::FreeSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
841 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
842 }
843}
844
845inline bool RosAlloc::Run::MergeThreadLocalFreeBitMapToAllocBitMap(bool* is_all_free_after_out) {
846 DCHECK_NE(is_thread_local_, 0);
847 // Free slots in the alloc bit map based on the thread local free bit map.
848 byte idx = size_bracket_idx_;
849 size_t num_slots = numOfSlots[idx];
850 size_t num_vec = RoundUp(num_slots, 32) / 32;
851 bool changed = false;
852 uint32_t* vecp = &alloc_bit_map_[0];
853 uint32_t* tl_free_vecp = &thread_local_free_bit_map()[0];
854 bool is_all_free_after = true;
855 for (size_t v = 0; v < num_vec; v++, vecp++, tl_free_vecp++) {
856 uint32_t tl_free_vec = *tl_free_vecp;
857 uint32_t vec_before = *vecp;
858 uint32_t vec_after;
859 if (tl_free_vec != 0) {
860 vec_after = vec_before & ~tl_free_vec;
861 *vecp = vec_after;
862 changed = true;
863 *tl_free_vecp = 0; // clear the thread local free bit map.
864 } else {
865 vec_after = vec_before;
866 }
867 if (vec_after != 0) {
868 is_all_free_after = false;
869 }
870 DCHECK_EQ(*tl_free_vecp, static_cast<uint32_t>(0));
871 }
872 *is_all_free_after_out = is_all_free_after;
873 // Return true if there was at least a bit set in the thread-local
874 // free bit map and at least a bit in the alloc bit map changed.
875 return changed;
876}
877
878inline void RosAlloc::Run::MergeBulkFreeBitMapIntoAllocBitMap() {
879 DCHECK_EQ(is_thread_local_, 0);
880 // Free slots in the alloc bit map based on the bulk free bit map.
881 byte idx = size_bracket_idx_;
882 size_t num_slots = numOfSlots[idx];
883 size_t num_vec = RoundUp(num_slots, 32) / 32;
884 uint32_t* vecp = &alloc_bit_map_[0];
885 uint32_t* free_vecp = &bulk_free_bit_map()[0];
886 for (size_t v = 0; v < num_vec; v++, vecp++, free_vecp++) {
887 uint32_t free_vec = *free_vecp;
888 if (free_vec != 0) {
889 *vecp &= ~free_vec;
890 *free_vecp = 0; // clear the bulk free bit map.
891 }
892 DCHECK_EQ(*free_vecp, static_cast<uint32_t>(0));
893 }
894}
895
896inline void RosAlloc::Run::UnionBulkFreeBitMapToThreadLocalFreeBitMap() {
897 DCHECK_NE(is_thread_local_, 0);
898 // Union the thread local bit map with the bulk free bit map.
899 byte idx = size_bracket_idx_;
900 size_t num_slots = numOfSlots[idx];
901 size_t num_vec = RoundUp(num_slots, 32) / 32;
902 uint32_t* to_vecp = &thread_local_free_bit_map()[0];
903 uint32_t* from_vecp = &bulk_free_bit_map()[0];
904 for (size_t v = 0; v < num_vec; v++, to_vecp++, from_vecp++) {
905 uint32_t from_vec = *from_vecp;
906 if (from_vec != 0) {
907 *to_vecp |= from_vec;
908 *from_vecp = 0; // clear the from free bit map.
909 }
910 DCHECK_EQ(*from_vecp, static_cast<uint32_t>(0));
911 }
912}
913
914inline void RosAlloc::Run::MarkThreadLocalFreeBitMap(void* ptr) {
915 DCHECK_NE(is_thread_local_, 0);
916 MarkFreeBitMapShared(ptr, thread_local_free_bit_map(), "MarkThreadLocalFreeBitMap");
917}
918
919inline void RosAlloc::Run::MarkBulkFreeBitMap(void* ptr) {
920 MarkFreeBitMapShared(ptr, bulk_free_bit_map(), "MarkFreeBitMap");
921}
922
923inline void RosAlloc::Run::MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base,
924 const char* caller_name) {
925 byte idx = size_bracket_idx_;
926 size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
927 - (reinterpret_cast<byte*>(this) + headerSizes[idx]);
928 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
929 size_t slot_idx = offset_from_slot_base / bracketSizes[idx];
930 DCHECK(slot_idx < numOfSlots[idx]);
931 size_t vec_idx = slot_idx / 32;
932 if (kIsDebugBuild) {
933 size_t num_vec = RoundUp(numOfSlots[idx], 32) / 32;
934 DCHECK(vec_idx < num_vec);
935 }
936 size_t vec_off = slot_idx % 32;
937 uint32_t* vec = &free_bit_map_base[vec_idx];
938 DCHECK_EQ((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
939 *vec |= 1 << vec_off;
940 DCHECK_NE((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
941 if (kTraceRosAlloc) {
942 LOG(INFO) << "RosAlloc::Run::" << caller_name << "() : 0x" << std::hex
943 << reinterpret_cast<intptr_t>(ptr)
944 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
945 }
946}
947
948inline bool RosAlloc::Run::IsAllFree() {
949 byte idx = size_bracket_idx_;
950 size_t num_slots = numOfSlots[idx];
951 size_t num_vec = RoundUp(num_slots, 32) / 32;
952 for (size_t v = 0; v < num_vec; v++) {
953 uint32_t vec = alloc_bit_map_[v];
954 if (vec != 0) {
955 return false;
956 }
957 }
958 return true;
959}
960
961inline bool RosAlloc::Run::IsFull() {
962 byte idx = size_bracket_idx_;
963 size_t num_slots = numOfSlots[idx];
964 size_t num_vec = RoundUp(num_slots, 32) / 32;
965 size_t slots = 0;
966 for (size_t v = 0; v < num_vec; v++, slots += 32) {
967 DCHECK(num_slots >= slots);
968 uint32_t vec = alloc_bit_map_[v];
969 uint32_t mask = (num_slots - slots >= 32) ? static_cast<uint32_t>(-1)
970 : (1 << (num_slots - slots)) - 1;
971 DCHECK(num_slots - slots >= 32 ? mask == static_cast<uint32_t>(-1) : true);
972 if (vec != mask) {
973 return false;
974 }
975 }
976 return true;
977}
978
979inline void RosAlloc::Run::ClearBitMaps() {
980 byte idx = size_bracket_idx_;
981 size_t num_slots = numOfSlots[idx];
982 size_t num_vec = RoundUp(num_slots, 32) / 32;
983 memset(alloc_bit_map_, 0, sizeof(uint32_t) * num_vec * 3);
984}
985
986void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
987 void* arg) {
988 size_t idx = size_bracket_idx_;
989 byte* slot_base = reinterpret_cast<byte*>(this) + headerSizes[idx];
990 size_t num_slots = numOfSlots[idx];
991 size_t bracket_size = IndexToBracketSize(idx);
992 DCHECK_EQ(slot_base + num_slots * bracket_size, reinterpret_cast<byte*>(this) + numOfPages[idx] * kPageSize);
993 size_t num_vec = RoundUp(num_slots, 32) / 32;
994 size_t slots = 0;
995 for (size_t v = 0; v < num_vec; v++, slots += 32) {
996 DCHECK(num_slots >= slots);
997 uint32_t vec = alloc_bit_map_[v];
998 size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
999 for (size_t i = 0; i < end; ++i) {
1000 bool is_allocated = ((vec >> i) & 0x1) != 0;
1001 byte* slot_addr = slot_base + (slots + i) * bracket_size;
1002 if (is_allocated) {
1003 handler(slot_addr, slot_addr + bracket_size, bracket_size, arg);
1004 } else {
1005 handler(slot_addr, slot_addr + bracket_size, 0, arg);
1006 }
1007 }
1008 }
1009}
1010
1011void RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
1012 if (false) {
1013 // Used only to test Free() as GC uses only BulkFree().
1014 for (size_t i = 0; i < num_ptrs; ++i) {
1015 FreeInternal(self, ptrs[i]);
1016 }
1017 return;
1018 }
1019
1020 WriterMutexLock wmu(self, bulk_free_lock_);
1021
1022 // First mark slots to free in the bulk free bit map without locking the
1023 // size bracket locks. On host, hash_set is faster than vector + flag.
1024#ifdef HAVE_ANDROID_OS
1025 std::vector<Run*> runs;
1026#else
1027 hash_set<Run*, hash_run, eq_run> runs;
1028#endif
1029 {
1030 for (size_t i = 0; i < num_ptrs; i++) {
1031 void* ptr = ptrs[i];
1032 ptrs[i] = NULL;
1033 DCHECK(base_ <= ptr && ptr < base_ + footprint_);
1034 size_t pm_idx = RoundDownToPageMapIndex(ptr);
1035 bool free_from_run = false;
1036 Run* run = NULL;
1037 {
1038 MutexLock mu(self, lock_);
1039 DCHECK(pm_idx < page_map_.size());
1040 byte page_map_entry = page_map_[pm_idx];
1041 if (kTraceRosAlloc) {
1042 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1043 << std::dec << pm_idx
1044 << ", page_map_entry=" << static_cast<int>(page_map_entry);
1045 }
1046 if (LIKELY(page_map_entry == kPageMapRun)) {
1047 free_from_run = true;
1048 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1049 DCHECK(run->magic_num_ == kMagicNum);
1050 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1051 free_from_run = true;
1052 size_t pi = pm_idx;
1053 DCHECK(page_map_[pi] == kPageMapRun || page_map_[pi] == kPageMapRunPart);
1054 // Find the beginning of the run.
1055 while (page_map_[pi] != kPageMapRun) {
1056 pi--;
1057 DCHECK(pi < capacity_ / kPageSize);
1058 }
1059 DCHECK(page_map_[pi] == kPageMapRun);
1060 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
1061 DCHECK(run->magic_num_ == kMagicNum);
1062 } else if (page_map_entry == kPageMapLargeObject) {
1063 FreePages(self, ptr);
1064 } else {
1065 LOG(FATAL) << "Unreachable - page map type: " << page_map_entry;
1066 }
1067 }
1068 if (LIKELY(free_from_run)) {
1069 DCHECK(run != NULL);
1070 // Set the bit in the bulk free bit map.
1071 run->MarkBulkFreeBitMap(ptr);
1072#ifdef HAVE_ANDROID_OS
1073 if (!run->to_be_bulk_freed_) {
1074 run->to_be_bulk_freed_ = true;
1075 runs.push_back(run);
1076 }
1077#else
1078 runs.insert(run);
1079#endif
1080 }
1081 }
1082 }
1083
1084 // Now, iterate over the affected runs and update the alloc bit map
1085 // based on the bulk free bit map (for non-thread-local runs) and
1086 // union the bulk free bit map into the thread-local free bit map
1087 // (for thread-local runs.)
1088#ifdef HAVE_ANDROID_OS
1089 typedef std::vector<Run*>::iterator It;
1090#else
1091 typedef hash_set<Run*, hash_run, eq_run>::iterator It;
1092#endif
1093 for (It it = runs.begin(); it != runs.end(); ++it) {
1094 Run* run = *it;
1095#ifdef HAVE_ANDROID_OS
1096 DCHECK(run->to_be_bulk_freed_);
1097 run->to_be_bulk_freed_ = false;
1098#endif
1099 size_t idx = run->size_bracket_idx_;
1100 MutexLock mu(self, *size_bracket_locks_[idx]);
1101 if (run->is_thread_local_ != 0) {
1102 DCHECK_LE(run->size_bracket_idx_, kMaxThreadLocalSizeBracketIdx);
1103 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
1104 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
1105 run->UnionBulkFreeBitMapToThreadLocalFreeBitMap();
1106 if (kTraceRosAlloc) {
1107 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a thread local run 0x"
1108 << std::hex << reinterpret_cast<intptr_t>(run);
1109 }
1110 DCHECK_NE(run->is_thread_local_, 0);
1111 // A thread local run will be kept as a thread local even if
1112 // it's become all free.
1113 } else {
1114 bool run_was_full = run->IsFull();
1115 run->MergeBulkFreeBitMapIntoAllocBitMap();
1116 if (kTraceRosAlloc) {
1117 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a run 0x" << std::hex
1118 << reinterpret_cast<intptr_t>(run);
1119 }
1120 // Check if the run should be moved to non_full_runs_ or
1121 // free_page_runs_.
1122 std::set<Run*>* non_full_runs = &non_full_runs_[idx];
1123 hash_set<Run*, hash_run, eq_run>* full_runs =
1124 kIsDebugBuild ? &full_runs_[idx] : NULL;
1125 if (run->IsAllFree()) {
1126 // It has just become completely free. Free the pages of the
1127 // run.
1128 bool run_was_current = run == current_runs_[idx];
1129 if (run_was_current) {
1130 DCHECK(full_runs->find(run) == full_runs->end());
1131 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1132 // If it was a current run, reuse it.
1133 } else if (run_was_full) {
1134 // If it was full, remove it from the full run set (debug
1135 // only.)
1136 if (kIsDebugBuild) {
1137 hash_set<Run*, hash_run, eq_run>::iterator pos = full_runs->find(run);
1138 DCHECK(pos != full_runs->end());
1139 full_runs->erase(pos);
1140 if (kTraceRosAlloc) {
1141 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1142 << reinterpret_cast<intptr_t>(run)
1143 << " from full_runs_";
1144 }
1145 DCHECK(full_runs->find(run) == full_runs->end());
1146 }
1147 } else {
1148 // If it was in a non full run set, remove it from the set.
1149 DCHECK(full_runs->find(run) == full_runs->end());
1150 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1151 non_full_runs->erase(run);
1152 if (kTraceRosAlloc) {
1153 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1154 << reinterpret_cast<intptr_t>(run)
1155 << " from non_full_runs_";
1156 }
1157 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1158 }
1159 if (!run_was_current) {
1160 MutexLock mu(self, lock_);
1161 FreePages(self, run);
1162 }
1163 } else {
1164 // It is not completely free. If it wasn't the current run or
1165 // already in the non-full run set (i.e., it was full) insert
1166 // it into the non-full run set.
1167 if (run == current_runs_[idx]) {
1168 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1169 DCHECK(full_runs->find(run) == full_runs->end());
1170 // If it was a current run, keep it.
1171 } else if (run_was_full) {
1172 // If it was full, remove it from the full run set (debug
1173 // only) and insert into the non-full run set.
1174 DCHECK(full_runs->find(run) != full_runs->end());
1175 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1176 if (kIsDebugBuild) {
1177 full_runs->erase(run);
1178 if (kTraceRosAlloc) {
1179 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1180 << reinterpret_cast<intptr_t>(run)
1181 << " from full_runs_";
1182 }
1183 }
1184 non_full_runs->insert(run);
1185 if (kTraceRosAlloc) {
1186 LOG(INFO) << "RosAlloc::BulkFree() : Inserted run 0x" << std::hex
1187 << reinterpret_cast<intptr_t>(run)
1188 << " into non_full_runs_[" << std::dec << idx;
1189 }
1190 } else {
1191 // If it was not full, so leave it in the non full run set.
1192 DCHECK(full_runs->find(run) == full_runs->end());
1193 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1194 }
1195 }
1196 }
1197 }
1198}
1199
1200void RosAlloc::DumpPageMap(Thread* self) {
1201 MutexLock mu(self, lock_);
1202 size_t end = page_map_.size();
1203 FreePageRun* curr_fpr = NULL;
1204 size_t curr_fpr_size = 0;
1205 size_t remaining_curr_fpr_size = 0;
1206 size_t num_running_empty_pages = 0;
1207 for (size_t i = 0; i < end; ++i) {
1208 byte pm = page_map_[i];
1209 switch (pm) {
1210 case kPageMapEmpty: {
1211 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1212 if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
1213 // Encountered a fresh free page run.
1214 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1215 DCHECK(fpr->IsFree());
1216 DCHECK(curr_fpr == NULL);
1217 DCHECK_EQ(curr_fpr_size, static_cast<size_t>(0));
1218 curr_fpr = fpr;
1219 curr_fpr_size = fpr->ByteSize(this);
1220 DCHECK_EQ(curr_fpr_size % kPageSize, static_cast<size_t>(0));
1221 remaining_curr_fpr_size = curr_fpr_size - kPageSize;
1222 LOG(INFO) << "[" << i << "]=Empty (FPR start)"
1223 << " fpr_size=" << curr_fpr_size
1224 << " remaining_fpr_size=" << remaining_curr_fpr_size;
1225 if (remaining_curr_fpr_size == 0) {
1226 // Reset at the end of the current free page run.
1227 curr_fpr = NULL;
1228 curr_fpr_size = 0;
1229 }
1230 LOG(INFO) << "curr_fpr=0x" << std::hex << reinterpret_cast<intptr_t>(curr_fpr);
1231 DCHECK_EQ(num_running_empty_pages, static_cast<size_t>(0));
1232 } else {
1233 // Still part of the current free page run.
1234 DCHECK_NE(num_running_empty_pages, static_cast<size_t>(0));
1235 DCHECK(curr_fpr != NULL && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
1236 DCHECK_EQ(remaining_curr_fpr_size % kPageSize, static_cast<size_t>(0));
1237 DCHECK_GE(remaining_curr_fpr_size, static_cast<size_t>(kPageSize));
1238 remaining_curr_fpr_size -= kPageSize;
1239 LOG(INFO) << "[" << i << "]=Empty (FPR part)"
1240 << " remaining_fpr_size=" << remaining_curr_fpr_size;
1241 if (remaining_curr_fpr_size == 0) {
1242 // Reset at the end of the current free page run.
1243 curr_fpr = NULL;
1244 curr_fpr_size = 0;
1245 }
1246 }
1247 num_running_empty_pages++;
1248 break;
1249 }
1250 case kPageMapLargeObject: {
1251 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1252 num_running_empty_pages = 0;
1253 LOG(INFO) << "[" << i << "]=Large (start)";
1254 break;
1255 }
1256 case kPageMapLargeObjectPart:
1257 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1258 num_running_empty_pages = 0;
1259 LOG(INFO) << "[" << i << "]=Large (part)";
1260 break;
1261 case kPageMapRun: {
1262 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1263 num_running_empty_pages = 0;
1264 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1265 size_t idx = run->size_bracket_idx_;
1266 LOG(INFO) << "[" << i << "]=Run (start)"
1267 << " idx=" << idx
1268 << " numOfPages=" << numOfPages[idx]
1269 << " thread_local=" << static_cast<int>(run->is_thread_local_)
1270 << " is_all_free=" << (run->IsAllFree() ? 1 : 0);
1271 break;
1272 }
1273 case kPageMapRunPart:
1274 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1275 num_running_empty_pages = 0;
1276 LOG(INFO) << "[" << i << "]=Run (part)";
1277 break;
1278 default:
1279 LOG(FATAL) << "Unreachable - page map type: " << pm;
1280 break;
1281 }
1282 }
1283}
1284
1285size_t RosAlloc::UsableSize(void* ptr) {
1286 DCHECK(base_ <= ptr && ptr < base_ + footprint_);
1287 size_t pm_idx = RoundDownToPageMapIndex(ptr);
1288 MutexLock mu(Thread::Current(), lock_);
1289 switch (page_map_[pm_idx]) {
1290 case kPageMapEmpty:
1291 LOG(FATAL) << "Unreachable - RosAlloc::UsableSize(): pm_idx=" << pm_idx << ", ptr=" << std::hex
1292 << reinterpret_cast<intptr_t>(ptr);
1293 break;
1294 case kPageMapLargeObject: {
1295 size_t num_pages = 1;
1296 size_t idx = pm_idx + 1;
1297 size_t end = page_map_.size();
1298 while (idx < end && page_map_[idx] == kPageMapLargeObjectPart) {
1299 num_pages++;
1300 idx++;
1301 }
1302 return num_pages * kPageSize;
1303 }
1304 case kPageMapLargeObjectPart:
1305 LOG(FATAL) << "Unreachable - RosAlloc::UsableSize(): pm_idx=" << pm_idx << ", ptr=" << std::hex
1306 << reinterpret_cast<intptr_t>(ptr);
1307 break;
1308 case kPageMapRun:
1309 case kPageMapRunPart: {
1310 // Find the beginning of the run.
1311 while (page_map_[pm_idx] != kPageMapRun) {
1312 pm_idx--;
1313 DCHECK(pm_idx < capacity_ / kPageSize);
1314 }
1315 DCHECK(page_map_[pm_idx] == kPageMapRun);
1316 Run* run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1317 DCHECK(run->magic_num_ == kMagicNum);
1318 size_t idx = run->size_bracket_idx_;
1319 size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
1320 - (reinterpret_cast<byte*>(run) + headerSizes[idx]);
1321 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
1322 return IndexToBracketSize(idx);
1323 }
1324 default:
1325 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
1326 break;
1327 }
1328 return 0;
1329}
1330
1331bool RosAlloc::Trim() {
1332 MutexLock mu(Thread::Current(), lock_);
1333 FreePageRun* last_free_page_run;
1334 DCHECK_EQ(footprint_ % kPageSize, static_cast<size_t>(0));
1335 auto it = free_page_runs_.rbegin();
1336 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
1337 // Remove the last free page run, if any.
1338 DCHECK(last_free_page_run->IsFree());
1339 DCHECK(page_map_[ToPageMapIndex(last_free_page_run)] == kPageMapEmpty);
1340 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
1341 DCHECK_EQ(last_free_page_run->End(this), base_ + footprint_);
1342 free_page_runs_.erase(last_free_page_run);
1343 size_t decrement = last_free_page_run->ByteSize(this);
1344 size_t new_footprint = footprint_ - decrement;
1345 DCHECK_EQ(new_footprint % kPageSize, static_cast<size_t>(0));
1346 size_t new_num_of_pages = new_footprint / kPageSize;
1347 DCHECK_GE(page_map_.size(), new_num_of_pages);
1348 page_map_.resize(new_num_of_pages);
1349 DCHECK_EQ(page_map_.size(), new_num_of_pages);
1350 free_page_run_size_map_.resize(new_num_of_pages);
1351 DCHECK_EQ(free_page_run_size_map_.size(), new_num_of_pages);
1352 art_heap_rosalloc_morecore(this, -(static_cast<intptr_t>(decrement)));
1353 if (kTraceRosAlloc) {
1354 LOG(INFO) << "RosAlloc::Trim() : decreased the footprint from "
1355 << footprint_ << " to " << new_footprint;
1356 }
1357 DCHECK_LT(new_footprint, footprint_);
1358 DCHECK_LT(new_footprint, capacity_);
1359 footprint_ = new_footprint;
1360 return true;
1361 }
1362 return false;
1363}
1364
1365void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1366 void* arg) {
1367 // Note: no need to use this to release pages as we already do so in FreePages().
1368 if (handler == NULL) {
1369 return;
1370 }
1371 MutexLock mu(Thread::Current(), lock_);
1372 size_t pm_end = page_map_.size();
1373 size_t i = 0;
1374 while (i < pm_end) {
1375 byte pm = page_map_[i];
1376 switch (pm) {
1377 case kPageMapEmpty: {
1378 // The start of a free page run.
1379 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1380 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
1381 size_t fpr_size = fpr->ByteSize(this);
1382 DCHECK(IsAligned<kPageSize>(fpr_size));
1383 void* start = fpr;
1384 void* end = reinterpret_cast<byte*>(start) + fpr_size;
1385 handler(start, end, 0, arg);
1386 size_t num_pages = fpr_size / kPageSize;
1387 if (kIsDebugBuild) {
1388 for (size_t j = i + 1; j < i + num_pages; ++j) {
1389 DCHECK_EQ(page_map_[j], kPageMapEmpty);
1390 }
1391 }
1392 i += fpr_size / kPageSize;
1393 DCHECK_LE(i, pm_end);
1394 break;
1395 }
1396 case kPageMapLargeObject: {
1397 // The start of a large object.
1398 size_t num_pages = 1;
1399 size_t idx = i + 1;
1400 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1401 num_pages++;
1402 idx++;
1403 }
1404 void* start = base_ + i * kPageSize;
1405 void* end = base_ + (i + num_pages) * kPageSize;
1406 size_t used_bytes = num_pages * kPageSize;
1407 handler(start, end, used_bytes, arg);
1408 if (kIsDebugBuild) {
1409 for (size_t j = i + 1; j < i + num_pages; ++j) {
1410 DCHECK_EQ(page_map_[j], kPageMapLargeObjectPart);
1411 }
1412 }
1413 i += num_pages;
1414 DCHECK_LE(i, pm_end);
1415 break;
1416 }
1417 case kPageMapLargeObjectPart:
1418 LOG(FATAL) << "Unreachable - page map type: " << pm;
1419 break;
1420 case kPageMapRun: {
1421 // The start of a run.
1422 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1423 DCHECK(run->magic_num_ == kMagicNum);
1424 run->InspectAllSlots(handler, arg);
1425 size_t num_pages = numOfPages[run->size_bracket_idx_];
1426 if (kIsDebugBuild) {
1427 for (size_t j = i + 1; j < i + num_pages; ++j) {
1428 DCHECK_EQ(page_map_[j], kPageMapRunPart);
1429 }
1430 }
1431 i += num_pages;
1432 DCHECK_LE(i, pm_end);
1433 break;
1434 }
1435 case kPageMapRunPart:
1436 LOG(FATAL) << "Unreachable - page map type: " << pm;
1437 break;
1438 default:
1439 LOG(FATAL) << "Unreachable - page map type: " << pm;
1440 break;
1441 }
1442 }
1443}
1444
1445size_t RosAlloc::Footprint() {
1446 MutexLock mu(Thread::Current(), lock_);
1447 return footprint_;
1448}
1449
1450size_t RosAlloc::FootprintLimit() {
1451 MutexLock mu(Thread::Current(), lock_);
1452 return capacity_;
1453}
1454
1455void RosAlloc::SetFootprintLimit(size_t new_capacity) {
1456 MutexLock mu(Thread::Current(), lock_);
1457 DCHECK_EQ(RoundUp(new_capacity, kPageSize), new_capacity);
1458 // Only growing is supported here. But Trim() is supported.
1459 if (capacity_ < new_capacity) {
1460 capacity_ = new_capacity;
1461 VLOG(heap) << "new capacity=" << capacity_;
1462 }
1463}
1464
1465void RosAlloc::RevokeThreadLocalRuns(Thread* thread) {
1466 Thread* self = Thread::Current();
1467 for (size_t idx = 0; idx < kNumOfSizeBrackets; idx++) {
1468 MutexLock mu(self, *size_bracket_locks_[idx]);
1469 Run* thread_local_run = reinterpret_cast<Run*>(thread->rosalloc_runs_[idx]);
1470 if (thread_local_run != NULL) {
1471 DCHECK_EQ(thread_local_run->magic_num_, kMagicNum);
1472 DCHECK_NE(thread_local_run->is_thread_local_, 0);
1473 thread->rosalloc_runs_[idx] = NULL;
1474 // Note the thread local run may not be full here.
1475 bool dont_care;
1476 thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&dont_care);
1477 thread_local_run->is_thread_local_ = 0;
1478 thread_local_run->MergeBulkFreeBitMapIntoAllocBitMap();
1479 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
1480 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
1481 if (thread_local_run->IsFull()) {
1482 if (kIsDebugBuild) {
1483 full_runs_[idx].insert(thread_local_run);
1484 DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
1485 if (kTraceRosAlloc) {
1486 LOG(INFO) << "RosAlloc::RevokeThreadLocalRuns() : Inserted run 0x" << std::hex
1487 << reinterpret_cast<intptr_t>(thread_local_run)
1488 << " into full_runs_[" << std::dec << idx << "]";
1489 }
1490 }
1491 } else if (thread_local_run->IsAllFree()) {
1492 MutexLock mu(self, lock_);
1493 FreePages(self, thread_local_run);
1494 } else {
1495 non_full_runs_[idx].insert(thread_local_run);
1496 DCHECK(non_full_runs_[idx].find(thread_local_run) != non_full_runs_[idx].end());
1497 if (kTraceRosAlloc) {
1498 LOG(INFO) << "RosAlloc::RevokeThreadLocalRuns() : Inserted run 0x" << std::hex
1499 << reinterpret_cast<intptr_t>(thread_local_run)
1500 << " into non_full_runs_[" << std::dec << idx << "]";
1501 }
1502 }
1503 }
1504 }
1505}
1506
1507void RosAlloc::RevokeAllThreadLocalRuns() {
1508 // This is called when a mutator thread won't allocate such as at
1509 // the Zygote creation time or during the GC pause.
1510 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
1511 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1512 for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
1513 Thread* t = *it;
1514 RevokeThreadLocalRuns(t);
1515 }
1516}
1517
1518void RosAlloc::Initialize() {
1519 // Check the consistency of the number of size brackets.
1520 DCHECK_EQ(Thread::kRosAllocNumOfSizeBrackets, kNumOfSizeBrackets);
1521 // bracketSizes.
1522 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1523 if (i < kNumOfSizeBrackets - 2) {
1524 bracketSizes[i] = 16 * (i + 1);
1525 } else if (i == kNumOfSizeBrackets - 2) {
1526 bracketSizes[i] = 1 * KB;
1527 } else {
1528 DCHECK(i == kNumOfSizeBrackets - 1);
1529 bracketSizes[i] = 2 * KB;
1530 }
1531 if (kTraceRosAlloc) {
1532 LOG(INFO) << "bracketSizes[" << i << "]=" << bracketSizes[i];
1533 }
1534 }
1535 // numOfPages.
1536 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1537 if (i < 4) {
1538 numOfPages[i] = 1;
1539 } else if (i < 8) {
1540 numOfPages[i] = 2;
1541 } else if (i < 16) {
1542 numOfPages[i] = 4;
1543 } else if (i < 32) {
1544 numOfPages[i] = 8;
1545 } else if (i == 32) {
1546 DCHECK(i = kNumOfSizeBrackets - 2);
1547 numOfPages[i] = 16;
1548 } else {
1549 DCHECK(i = kNumOfSizeBrackets - 1);
1550 numOfPages[i] = 32;
1551 }
1552 if (kTraceRosAlloc) {
1553 LOG(INFO) << "numOfPages[" << i << "]=" << numOfPages[i];
1554 }
1555 }
1556 // Compute numOfSlots and slotOffsets.
1557 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1558 size_t bracket_size = bracketSizes[i];
1559 size_t run_size = kPageSize * numOfPages[i];
1560 size_t max_num_of_slots = run_size / bracket_size;
1561 // Compute the actual number of slots by taking the header and
1562 // alignment into account.
1563 size_t fixed_header_size = RoundUp(Run::fixed_header_size(), sizeof(uint32_t));
1564 DCHECK_EQ(fixed_header_size, static_cast<size_t>(8));
1565 size_t header_size = 0;
1566 size_t bulk_free_bit_map_offset = 0;
1567 size_t thread_local_free_bit_map_offset = 0;
1568 size_t num_of_slots = 0;
1569 // Search for the maximum number of slots that allows enough space
1570 // for the header (including the bit maps.)
1571 for (int s = max_num_of_slots; s >= 0; s--) {
1572 size_t tmp_slots_size = bracket_size * s;
1573 size_t tmp_bit_map_size = RoundUp(s, sizeof(uint32_t) * kBitsPerByte) / kBitsPerByte;
1574 size_t tmp_bulk_free_bit_map_size = tmp_bit_map_size;
1575 size_t tmp_bulk_free_bit_map_off = fixed_header_size + tmp_bit_map_size;
1576 size_t tmp_thread_local_free_bit_map_size = tmp_bit_map_size;
1577 size_t tmp_thread_local_free_bit_map_off = tmp_bulk_free_bit_map_off + tmp_bulk_free_bit_map_size;
1578 size_t tmp_unaligned_header_size = tmp_thread_local_free_bit_map_off + tmp_thread_local_free_bit_map_size;
1579 // Align up the unaligned header size. bracket_size may not be a power of two.
1580 size_t tmp_header_size = (tmp_unaligned_header_size % bracket_size == 0) ?
1581 tmp_unaligned_header_size :
1582 tmp_unaligned_header_size + (bracket_size - tmp_unaligned_header_size % bracket_size);
1583 DCHECK_EQ(tmp_header_size % bracket_size, static_cast<size_t>(0));
1584 DCHECK_EQ(tmp_header_size % 8, static_cast<size_t>(0));
1585 if (tmp_slots_size + tmp_header_size <= run_size) {
1586 // Found the right number of slots, that is, there was enough
1587 // space for the header (including the bit maps.)
1588 num_of_slots = s;
1589 header_size = tmp_header_size;
1590 bulk_free_bit_map_offset = tmp_bulk_free_bit_map_off;
1591 thread_local_free_bit_map_offset = tmp_thread_local_free_bit_map_off;
1592 break;
1593 }
1594 }
1595 DCHECK(num_of_slots > 0 && header_size > 0 && bulk_free_bit_map_offset > 0);
1596 // Add the padding for the alignment remainder.
1597 header_size += run_size % bracket_size;
1598 DCHECK(header_size + num_of_slots * bracket_size == run_size);
1599 numOfSlots[i] = num_of_slots;
1600 headerSizes[i] = header_size;
1601 bulkFreeBitMapOffsets[i] = bulk_free_bit_map_offset;
1602 threadLocalFreeBitMapOffsets[i] = thread_local_free_bit_map_offset;
1603 if (kTraceRosAlloc) {
1604 LOG(INFO) << "numOfSlots[" << i << "]=" << numOfSlots[i]
1605 << ", headerSizes[" << i << "]=" << headerSizes[i]
1606 << ", bulkFreeBitMapOffsets[" << i << "]=" << bulkFreeBitMapOffsets[i]
1607 << ", threadLocalFreeBitMapOffsets[" << i << "]=" << threadLocalFreeBitMapOffsets[i];;
1608 }
1609 }
1610}
1611
1612void RosAlloc::BytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
1613 if (used_bytes == 0) {
1614 return;
1615 }
1616 size_t* bytes_allocated = reinterpret_cast<size_t*>(arg);
1617 *bytes_allocated += used_bytes;
1618}
1619
1620void RosAlloc::ObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
1621 if (used_bytes == 0) {
1622 return;
1623 }
1624 size_t* objects_allocated = reinterpret_cast<size_t*>(arg);
1625 ++(*objects_allocated);
1626}
1627
1628} // namespace allocator
1629} // namespace gc
1630} // namespace art