blob: 65d4c441d96371ac0b051c3389a6b42312017814 [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "base/mutex-inl.h"
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -080018#include "mirror/class-inl.h"
19#include "mirror/object.h"
20#include "mirror/object-inl.h"
Brian Carlstrom218daa22013-11-25 14:51:44 -080021#include "thread-inl.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070022#include "thread_list.h"
23#include "rosalloc.h"
24
25#include <map>
26#include <list>
27#include <vector>
28
29namespace art {
30namespace gc {
31namespace allocator {
32
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070033extern "C" void* art_heap_rosalloc_morecore(RosAlloc* rosalloc, intptr_t increment);
34
35size_t RosAlloc::bracketSizes[kNumOfSizeBrackets];
36size_t RosAlloc::numOfPages[kNumOfSizeBrackets];
37size_t RosAlloc::numOfSlots[kNumOfSizeBrackets];
38size_t RosAlloc::headerSizes[kNumOfSizeBrackets];
39size_t RosAlloc::bulkFreeBitMapOffsets[kNumOfSizeBrackets];
40size_t RosAlloc::threadLocalFreeBitMapOffsets[kNumOfSizeBrackets];
41bool RosAlloc::initialized_ = false;
42
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080043RosAlloc::RosAlloc(void* base, size_t capacity,
44 PageReleaseMode page_release_mode, size_t page_release_size_threshold)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070045 : base_(reinterpret_cast<byte*>(base)), footprint_(capacity),
46 capacity_(capacity),
47 lock_("rosalloc global lock", kRosAllocGlobalLock),
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080048 bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock),
49 page_release_mode_(page_release_mode),
50 page_release_size_threshold_(page_release_size_threshold) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070051 DCHECK(RoundUp(capacity, kPageSize) == capacity);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080052 CHECK(IsAligned<kPageSize>(page_release_size_threshold_));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070053 if (!initialized_) {
54 Initialize();
55 }
56 VLOG(heap) << "RosAlloc base="
57 << std::hex << (intptr_t)base_ << ", end="
58 << std::hex << (intptr_t)(base_ + capacity_)
59 << ", capacity=" << std::dec << capacity_;
60 memset(current_runs_, 0, sizeof(current_runs_));
61 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
62 size_bracket_locks_[i] = new Mutex("an rosalloc size bracket lock",
63 kRosAllocBracketLock);
64 }
65 size_t num_of_pages = capacity_ / kPageSize;
66 page_map_.resize(num_of_pages);
67 free_page_run_size_map_.resize(num_of_pages);
68
69 FreePageRun* free_pages = reinterpret_cast<FreePageRun*>(base_);
70 if (kIsDebugBuild) {
71 free_pages->magic_num_ = kMagicNumFree;
72 }
73 free_pages->SetByteSize(this, capacity_);
74 DCHECK_EQ(capacity_ % kPageSize, static_cast<size_t>(0));
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080075 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070076 free_pages->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080077 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070078 free_page_runs_.insert(free_pages);
79 if (kTraceRosAlloc) {
80 LOG(INFO) << "RosAlloc::RosAlloc() : Inserted run 0x" << std::hex
81 << reinterpret_cast<intptr_t>(free_pages)
82 << " into free_page_runs_";
83 }
84}
85
86void* RosAlloc::AllocPages(Thread* self, size_t num_pages, byte page_map_type) {
87 lock_.AssertHeld(self);
88 DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
89 FreePageRun* res = NULL;
90 size_t req_byte_size = num_pages * kPageSize;
91 // Find the lowest address free page run that's large enough.
92 for (auto it = free_page_runs_.begin(); it != free_page_runs_.end(); ) {
93 FreePageRun* fpr = *it;
94 DCHECK(fpr->IsFree());
95 size_t fpr_byte_size = fpr->ByteSize(this);
96 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
97 if (req_byte_size <= fpr_byte_size) {
98 // Found one.
99 free_page_runs_.erase(it++);
100 if (kTraceRosAlloc) {
101 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x"
102 << std::hex << reinterpret_cast<intptr_t>(fpr)
103 << " from free_page_runs_";
104 }
105 if (req_byte_size < fpr_byte_size) {
106 // Split.
107 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<byte*>(fpr) + req_byte_size);
108 if (kIsDebugBuild) {
109 remainder->magic_num_ = kMagicNumFree;
110 }
111 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
112 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
113 // Don't need to call madvise on remainder here.
114 free_page_runs_.insert(remainder);
115 if (kTraceRosAlloc) {
116 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
117 << reinterpret_cast<intptr_t>(remainder)
118 << " into free_page_runs_";
119 }
120 fpr->SetByteSize(this, req_byte_size);
121 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
122 }
123 res = fpr;
124 break;
125 } else {
126 ++it;
127 }
128 }
129
130 // Failed to allocate pages. Grow the footprint, if possible.
131 if (UNLIKELY(res == NULL && capacity_ > footprint_)) {
132 FreePageRun* last_free_page_run = NULL;
133 size_t last_free_page_run_size;
134 auto it = free_page_runs_.rbegin();
135 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
136 // There is a free page run at the end.
137 DCHECK(last_free_page_run->IsFree());
138 DCHECK(page_map_[ToPageMapIndex(last_free_page_run)] == kPageMapEmpty);
139 last_free_page_run_size = last_free_page_run->ByteSize(this);
140 } else {
141 // There is no free page run at the end.
142 last_free_page_run_size = 0;
143 }
144 DCHECK_LT(last_free_page_run_size, req_byte_size);
145 if (capacity_ - footprint_ + last_free_page_run_size >= req_byte_size) {
146 // If we grow the heap, we can allocate it.
147 size_t increment = std::min(std::max(2 * MB, req_byte_size - last_free_page_run_size),
148 capacity_ - footprint_);
149 DCHECK_EQ(increment % kPageSize, static_cast<size_t>(0));
150 size_t new_footprint = footprint_ + increment;
151 size_t new_num_of_pages = new_footprint / kPageSize;
152 DCHECK_LT(page_map_.size(), new_num_of_pages);
153 DCHECK_LT(free_page_run_size_map_.size(), new_num_of_pages);
154 page_map_.resize(new_num_of_pages);
155 free_page_run_size_map_.resize(new_num_of_pages);
156 art_heap_rosalloc_morecore(this, increment);
157 if (last_free_page_run_size > 0) {
158 // There was a free page run at the end. Expand its size.
159 DCHECK_EQ(last_free_page_run_size, last_free_page_run->ByteSize(this));
160 last_free_page_run->SetByteSize(this, last_free_page_run_size + increment);
161 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
162 DCHECK(last_free_page_run->End(this) == base_ + new_footprint);
163 } else {
164 // Otherwise, insert a new free page run at the end.
165 FreePageRun* new_free_page_run = reinterpret_cast<FreePageRun*>(base_ + footprint_);
166 if (kIsDebugBuild) {
167 new_free_page_run->magic_num_ = kMagicNumFree;
168 }
169 new_free_page_run->SetByteSize(this, increment);
170 DCHECK_EQ(new_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
171 free_page_runs_.insert(new_free_page_run);
172 DCHECK(*free_page_runs_.rbegin() == new_free_page_run);
173 if (kTraceRosAlloc) {
174 LOG(INFO) << "RosAlloc::AlloPages() : Grew the heap by inserting run 0x"
175 << std::hex << reinterpret_cast<intptr_t>(new_free_page_run)
176 << " into free_page_runs_";
177 }
178 }
179 DCHECK_LE(footprint_ + increment, capacity_);
180 if (kTraceRosAlloc) {
181 LOG(INFO) << "RosAlloc::AllocPages() : increased the footprint from "
182 << footprint_ << " to " << new_footprint;
183 }
184 footprint_ = new_footprint;
185
186 // And retry the last free page run.
187 it = free_page_runs_.rbegin();
188 DCHECK(it != free_page_runs_.rend());
189 FreePageRun* fpr = *it;
190 if (kIsDebugBuild && last_free_page_run_size > 0) {
191 DCHECK(last_free_page_run != NULL);
192 DCHECK_EQ(last_free_page_run, fpr);
193 }
194 size_t fpr_byte_size = fpr->ByteSize(this);
195 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
196 DCHECK_LE(req_byte_size, fpr_byte_size);
197 free_page_runs_.erase(fpr);
198 if (kTraceRosAlloc) {
199 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
200 << " from free_page_runs_";
201 }
202 if (req_byte_size < fpr_byte_size) {
203 // Split if there's a remainder.
204 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<byte*>(fpr) + req_byte_size);
205 if (kIsDebugBuild) {
206 remainder->magic_num_ = kMagicNumFree;
207 }
208 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
209 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
210 free_page_runs_.insert(remainder);
211 if (kTraceRosAlloc) {
212 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
213 << reinterpret_cast<intptr_t>(remainder)
214 << " into free_page_runs_";
215 }
216 fpr->SetByteSize(this, req_byte_size);
217 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
218 }
219 res = fpr;
220 }
221 }
222 if (LIKELY(res != NULL)) {
223 // Update the page map.
224 size_t page_map_idx = ToPageMapIndex(res);
225 for (size_t i = 0; i < num_pages; i++) {
226 DCHECK(page_map_[page_map_idx + i] == kPageMapEmpty);
227 }
228 switch (page_map_type) {
229 case kPageMapRun:
230 page_map_[page_map_idx] = kPageMapRun;
231 for (size_t i = 1; i < num_pages; i++) {
232 page_map_[page_map_idx + i] = kPageMapRunPart;
233 }
234 break;
235 case kPageMapLargeObject:
236 page_map_[page_map_idx] = kPageMapLargeObject;
237 for (size_t i = 1; i < num_pages; i++) {
238 page_map_[page_map_idx + i] = kPageMapLargeObjectPart;
239 }
240 break;
241 default:
242 LOG(FATAL) << "Unreachable - page map type: " << page_map_type;
243 break;
244 }
245 if (kIsDebugBuild) {
246 // Clear the first page which isn't madvised away in the debug
247 // build for the magic number.
248 memset(res, 0, kPageSize);
249 }
250 if (kTraceRosAlloc) {
251 LOG(INFO) << "RosAlloc::AllocPages() : 0x" << std::hex << reinterpret_cast<intptr_t>(res)
252 << "-0x" << (reinterpret_cast<intptr_t>(res) + num_pages * kPageSize)
253 << "(" << std::dec << (num_pages * kPageSize) << ")";
254 }
255 return res;
256 }
257
258 // Fail.
259 if (kTraceRosAlloc) {
260 LOG(INFO) << "RosAlloc::AllocPages() : NULL";
261 }
262 return nullptr;
263}
264
265void RosAlloc::FreePages(Thread* self, void* ptr) {
266 lock_.AssertHeld(self);
267 size_t pm_idx = ToPageMapIndex(ptr);
268 DCHECK(pm_idx < page_map_.size());
269 byte pm_type = page_map_[pm_idx];
270 DCHECK(pm_type == kPageMapRun || pm_type == kPageMapLargeObject);
271 byte pm_part_type;
272 switch (pm_type) {
273 case kPageMapRun:
274 pm_part_type = kPageMapRunPart;
275 break;
276 case kPageMapLargeObject:
277 pm_part_type = kPageMapLargeObjectPart;
278 break;
279 default:
280 pm_part_type = kPageMapEmpty;
281 LOG(FATAL) << "Unreachable - RosAlloc::FreePages() : " << "pm_idx=" << pm_idx << ", pm_type="
282 << static_cast<int>(pm_type) << ", ptr=" << std::hex
283 << reinterpret_cast<intptr_t>(ptr);
284 return;
285 }
286 // Update the page map and count the number of pages.
287 size_t num_pages = 1;
288 page_map_[pm_idx] = kPageMapEmpty;
289 size_t idx = pm_idx + 1;
290 size_t end = page_map_.size();
291 while (idx < end && page_map_[idx] == pm_part_type) {
292 page_map_[idx] = kPageMapEmpty;
293 num_pages++;
294 idx++;
295 }
296
297 if (kTraceRosAlloc) {
298 LOG(INFO) << "RosAlloc::FreePages() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
299 << "-0x" << (reinterpret_cast<intptr_t>(ptr) + num_pages * kPageSize)
300 << "(" << std::dec << (num_pages * kPageSize) << ")";
301 }
302
303 // Turn it into a free run.
304 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(ptr);
305 if (kIsDebugBuild) {
306 fpr->magic_num_ = kMagicNumFree;
307 }
308 fpr->SetByteSize(this, num_pages * kPageSize);
309 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
310
311 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
312 if (!free_page_runs_.empty()) {
313 // Try to coalesce in the higher address direction.
314 if (kTraceRosAlloc) {
315 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce a free page run 0x"
316 << std::hex << reinterpret_cast<uintptr_t>(fpr) << " [" << std::dec << pm_idx << "] -0x"
317 << std::hex << reinterpret_cast<uintptr_t>(fpr->End(this)) << " [" << std::dec
318 << (fpr->End(this) == End() ? page_map_.size() : ToPageMapIndex(fpr->End(this))) << "]";
319 }
320 auto higher_it = free_page_runs_.upper_bound(fpr);
321 if (higher_it != free_page_runs_.end()) {
322 for (auto it = higher_it; it != free_page_runs_.end(); ) {
323 FreePageRun* h = *it;
324 DCHECK_EQ(h->ByteSize(this) % kPageSize, static_cast<size_t>(0));
325 if (kTraceRosAlloc) {
326 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a higher free page run 0x"
327 << std::hex << reinterpret_cast<uintptr_t>(h) << " [" << std::dec << ToPageMapIndex(h) << "] -0x"
328 << std::hex << reinterpret_cast<uintptr_t>(h->End(this)) << " [" << std::dec
329 << (h->End(this) == End() ? page_map_.size() : ToPageMapIndex(h->End(this))) << "]";
330 }
331 if (fpr->End(this) == h->Begin()) {
332 if (kTraceRosAlloc) {
333 LOG(INFO) << "Success";
334 }
335 free_page_runs_.erase(it++);
336 if (kTraceRosAlloc) {
337 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
338 << reinterpret_cast<intptr_t>(h)
339 << " from free_page_runs_";
340 }
341 fpr->SetByteSize(this, fpr->ByteSize(this) + h->ByteSize(this));
342 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
343 } else {
344 // Not adjacent. Stop.
345 if (kTraceRosAlloc) {
346 LOG(INFO) << "Fail";
347 }
348 break;
349 }
350 }
351 }
352 // Try to coalesce in the lower address direction.
353 auto lower_it = free_page_runs_.upper_bound(fpr);
354 if (lower_it != free_page_runs_.begin()) {
355 --lower_it;
356 for (auto it = lower_it; ; ) {
357 // We want to try to coalesce with the first element but
358 // there's no "<=" operator for the iterator.
359 bool to_exit_loop = it == free_page_runs_.begin();
360
361 FreePageRun* l = *it;
362 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
363 if (kTraceRosAlloc) {
364 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a lower free page run 0x"
365 << std::hex << reinterpret_cast<uintptr_t>(l) << " [" << std::dec << ToPageMapIndex(l) << "] -0x"
366 << std::hex << reinterpret_cast<uintptr_t>(l->End(this)) << " [" << std::dec
367 << (l->End(this) == End() ? page_map_.size() : ToPageMapIndex(l->End(this))) << "]";
368 }
369 if (l->End(this) == fpr->Begin()) {
370 if (kTraceRosAlloc) {
371 LOG(INFO) << "Success";
372 }
373 free_page_runs_.erase(it--);
374 if (kTraceRosAlloc) {
375 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
376 << reinterpret_cast<intptr_t>(l)
377 << " from free_page_runs_";
378 }
379 l->SetByteSize(this, l->ByteSize(this) + fpr->ByteSize(this));
380 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
381 fpr = l;
382 } else {
383 // Not adjacent. Stop.
384 if (kTraceRosAlloc) {
385 LOG(INFO) << "Fail";
386 }
387 break;
388 }
389 if (to_exit_loop) {
390 break;
391 }
392 }
393 }
394 }
395
396 // Insert it.
397 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
398 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800399 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700400 fpr->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800401 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700402 free_page_runs_.insert(fpr);
403 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
404 if (kTraceRosAlloc) {
405 LOG(INFO) << "RosAlloc::FreePages() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
406 << " into free_page_runs_";
407 }
408}
409
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800410void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) {
411 DCHECK(size > kLargeSizeThreshold);
412 size_t num_pages = RoundUp(size, kPageSize) / kPageSize;
413 void* r;
414 {
415 MutexLock mu(self, lock_);
416 r = AllocPages(self, num_pages, kPageMapLargeObject);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700417 }
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800418 if (UNLIKELY(r == nullptr)) {
419 if (kTraceRosAlloc) {
420 LOG(INFO) << "RosAlloc::AllocLargeObject() : NULL";
421 }
422 return nullptr;
423 }
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800424 if (bytes_allocated != NULL) {
425 *bytes_allocated = num_pages * kPageSize;
426 }
427 if (kTraceRosAlloc) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800428 LOG(INFO) << "RosAlloc::AllocLargeObject() : 0x" << std::hex << reinterpret_cast<intptr_t>(r)
429 << "-0x" << (reinterpret_cast<intptr_t>(r) + num_pages * kPageSize)
430 << "(" << std::dec << (num_pages * kPageSize) << ")";
431 }
432 if (!DoesReleaseAllPages()) {
433 // If it does not release all pages, pages may not be zeroed out.
434 memset(r, 0, size);
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800435 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700436 // Check if the returned memory is really all zero.
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800437 if (kCheckZeroMemory) {
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800438 byte* bytes = reinterpret_cast<byte*>(r);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700439 for (size_t i = 0; i < size; ++i) {
440 DCHECK_EQ(bytes[i], 0);
441 }
442 }
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800443 return r;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700444}
445
446void RosAlloc::FreeInternal(Thread* self, void* ptr) {
447 DCHECK(base_ <= ptr && ptr < base_ + footprint_);
448 size_t pm_idx = RoundDownToPageMapIndex(ptr);
449 bool free_from_run = false;
450 Run* run = NULL;
451 {
452 MutexLock mu(self, lock_);
453 DCHECK(pm_idx < page_map_.size());
454 byte page_map_entry = page_map_[pm_idx];
455 if (kTraceRosAlloc) {
456 LOG(INFO) << "RosAlloc::FreeInternal() : " << std::hex << ptr << ", pm_idx=" << std::dec << pm_idx
457 << ", page_map_entry=" << static_cast<int>(page_map_entry);
458 }
459 switch (page_map_[pm_idx]) {
460 case kPageMapEmpty:
461 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
462 return;
463 case kPageMapLargeObject:
464 FreePages(self, ptr);
465 return;
466 case kPageMapLargeObjectPart:
467 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
468 return;
469 case kPageMapRun:
470 case kPageMapRunPart: {
471 free_from_run = true;
472 size_t pi = pm_idx;
473 DCHECK(page_map_[pi] == kPageMapRun || page_map_[pi] == kPageMapRunPart);
474 // Find the beginning of the run.
475 while (page_map_[pi] != kPageMapRun) {
476 pi--;
477 DCHECK(pi < capacity_ / kPageSize);
478 }
479 DCHECK(page_map_[pi] == kPageMapRun);
480 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
481 DCHECK(run->magic_num_ == kMagicNum);
482 break;
483 }
484 default:
485 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
486 return;
487 }
488 }
489 if (LIKELY(free_from_run)) {
490 DCHECK(run != NULL);
491 FreeFromRun(self, ptr, run);
492 }
493}
494
495void RosAlloc::Free(Thread* self, void* ptr) {
496 ReaderMutexLock rmu(self, bulk_free_lock_);
497 FreeInternal(self, ptr);
498}
499
500RosAlloc::Run* RosAlloc::RefillRun(Thread* self, size_t idx) {
501 Run* new_run;
502 size_t num_pages = numOfPages[idx];
503 // Get the lowest address non-full run from the binary tree.
504 Run* temp = NULL;
505 std::set<Run*>* bt = &non_full_runs_[idx];
506 std::set<Run*>::iterator found = bt->lower_bound(temp);
507 if (found != bt->end()) {
508 // If there's one, use it as the current run.
509 Run* non_full_run = *found;
510 DCHECK(non_full_run != NULL);
511 new_run = non_full_run;
512 DCHECK_EQ(new_run->is_thread_local_, 0);
513 bt->erase(found);
514 DCHECK_EQ(non_full_run->is_thread_local_, 0);
515 } else {
516 // If there's none, allocate a new run and use it as the
517 // current run.
518 {
519 MutexLock mu(self, lock_);
520 new_run = reinterpret_cast<Run*>(AllocPages(self, num_pages, kPageMapRun));
521 }
522 if (new_run == NULL) {
523 return NULL;
524 }
525 if (kIsDebugBuild) {
526 new_run->magic_num_ = kMagicNum;
527 }
528 new_run->size_bracket_idx_ = idx;
529 new_run->top_slot_idx_ = 0;
530 new_run->ClearBitMaps();
531 new_run->to_be_bulk_freed_ = false;
532 }
533 return new_run;
534}
535
536void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated) {
537 DCHECK(size <= kLargeSizeThreshold);
538 size_t bracket_size;
539 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
540 DCHECK_EQ(idx, SizeToIndex(size));
541 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
542 DCHECK_EQ(bracket_size, bracketSizes[idx]);
543 DCHECK(size <= bracket_size);
544 DCHECK(size > 512 || bracket_size - size < 16);
545
546 void* slot_addr;
547
548 if (LIKELY(idx <= kMaxThreadLocalSizeBracketIdx)) {
549 // Use a thread-local run.
550 Run* thread_local_run = reinterpret_cast<Run*>(self->rosalloc_runs_[idx]);
551 if (UNLIKELY(thread_local_run == NULL)) {
552 MutexLock mu(self, *size_bracket_locks_[idx]);
553 thread_local_run = RefillRun(self, idx);
554 if (UNLIKELY(thread_local_run == NULL)) {
555 return NULL;
556 }
557 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
558 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
559 thread_local_run->is_thread_local_ = 1;
560 self->rosalloc_runs_[idx] = thread_local_run;
561 DCHECK(!thread_local_run->IsFull());
562 }
563
564 DCHECK(thread_local_run != NULL);
565 DCHECK_NE(thread_local_run->is_thread_local_, 0);
566 slot_addr = thread_local_run->AllocSlot();
567
568 if (UNLIKELY(slot_addr == NULL)) {
569 // The run got full. Try to free slots.
570 DCHECK(thread_local_run->IsFull());
571 MutexLock mu(self, *size_bracket_locks_[idx]);
572 bool is_all_free_after_merge;
573 if (thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&is_all_free_after_merge)) {
574 // Some slot got freed. Keep it.
575 DCHECK(!thread_local_run->IsFull());
576 DCHECK_EQ(is_all_free_after_merge, thread_local_run->IsAllFree());
577 if (is_all_free_after_merge) {
578 // Reinstate the bump index mode if it's all free.
579 DCHECK_EQ(thread_local_run->top_slot_idx_, numOfSlots[idx]);
580 thread_local_run->top_slot_idx_ = 0;
581 }
582 } else {
583 // No slots got freed. Try to refill the thread-local run.
584 DCHECK(thread_local_run->IsFull());
585 self->rosalloc_runs_[idx] = NULL;
586 thread_local_run->is_thread_local_ = 0;
587 if (kIsDebugBuild) {
588 full_runs_[idx].insert(thread_local_run);
589 if (kTraceRosAlloc) {
590 LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex
591 << reinterpret_cast<intptr_t>(thread_local_run)
592 << " into full_runs_[" << std::dec << idx << "]";
593 }
594 }
595 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
596 DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
597 thread_local_run = RefillRun(self, idx);
598 if (UNLIKELY(thread_local_run == NULL)) {
599 return NULL;
600 }
601 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
602 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
603 thread_local_run->is_thread_local_ = 1;
604 self->rosalloc_runs_[idx] = thread_local_run;
605 DCHECK(!thread_local_run->IsFull());
606 }
607
608 DCHECK(thread_local_run != NULL);
609 DCHECK(!thread_local_run->IsFull());
610 DCHECK_NE(thread_local_run->is_thread_local_, 0);
611 slot_addr = thread_local_run->AllocSlot();
612 // Must succeed now with a new run.
613 DCHECK(slot_addr != NULL);
614 }
615 if (kTraceRosAlloc) {
616 LOG(INFO) << "RosAlloc::AllocFromRun() thread-local : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
617 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
618 << "(" << std::dec << (bracket_size) << ")";
619 }
620 } else {
621 // Use the (shared) current run.
622 MutexLock mu(self, *size_bracket_locks_[idx]);
623 Run* current_run = current_runs_[idx];
624 if (UNLIKELY(current_run == NULL)) {
625 current_run = RefillRun(self, idx);
626 if (UNLIKELY(current_run == NULL)) {
627 return NULL;
628 }
629 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
630 DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
631 current_run->is_thread_local_ = 0;
632 current_runs_[idx] = current_run;
633 DCHECK(!current_run->IsFull());
634 }
635 DCHECK(current_run != NULL);
636 slot_addr = current_run->AllocSlot();
637 if (UNLIKELY(slot_addr == NULL)) {
638 // The current run got full. Try to refill it.
639 DCHECK(current_run->IsFull());
640 current_runs_[idx] = NULL;
641 if (kIsDebugBuild) {
642 // Insert it into full_runs and set the current run to NULL.
643 full_runs_[idx].insert(current_run);
644 if (kTraceRosAlloc) {
645 LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(current_run)
646 << " into full_runs_[" << std::dec << idx << "]";
647 }
648 }
649 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
650 DCHECK(full_runs_[idx].find(current_run) != full_runs_[idx].end());
651 current_run = RefillRun(self, idx);
652 if (UNLIKELY(current_run == NULL)) {
653 return NULL;
654 }
655 DCHECK(current_run != NULL);
656 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
657 DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
658 current_run->is_thread_local_ = 0;
659 current_runs_[idx] = current_run;
660 DCHECK(!current_run->IsFull());
661 slot_addr = current_run->AllocSlot();
662 // Must succeed now with a new run.
663 DCHECK(slot_addr != NULL);
664 }
665 if (kTraceRosAlloc) {
666 LOG(INFO) << "RosAlloc::AllocFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
667 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
668 << "(" << std::dec << (bracket_size) << ")";
669 }
670 }
671 if (LIKELY(bytes_allocated != NULL)) {
672 *bytes_allocated = bracket_size;
673 }
674 memset(slot_addr, 0, size);
675 return slot_addr;
676}
677
678void RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
679 DCHECK(run->magic_num_ == kMagicNum);
680 DCHECK(run < ptr && ptr < run->End());
681 size_t idx = run->size_bracket_idx_;
682 MutexLock mu(self, *size_bracket_locks_[idx]);
683 bool run_was_full = false;
684 if (kIsDebugBuild) {
685 run_was_full = run->IsFull();
686 }
687 if (kTraceRosAlloc) {
688 LOG(INFO) << "RosAlloc::FreeFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr);
689 }
690 if (LIKELY(run->is_thread_local_ != 0)) {
691 // It's a thread-local run. Just mark the thread-local free bit map and return.
692 DCHECK_LE(run->size_bracket_idx_, kMaxThreadLocalSizeBracketIdx);
693 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
694 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
695 run->MarkThreadLocalFreeBitMap(ptr);
696 if (kTraceRosAlloc) {
697 LOG(INFO) << "RosAlloc::FreeFromRun() : Freed a slot in a thread local run 0x" << std::hex
698 << reinterpret_cast<intptr_t>(run);
699 }
700 // A thread local run will be kept as a thread local even if it's become all free.
701 return;
702 }
703 // Free the slot in the run.
704 run->FreeSlot(ptr);
705 std::set<Run*>* non_full_runs = &non_full_runs_[idx];
706 if (run->IsAllFree()) {
707 // It has just become completely free. Free the pages of this run.
708 std::set<Run*>::iterator pos = non_full_runs->find(run);
709 if (pos != non_full_runs->end()) {
710 non_full_runs->erase(pos);
711 if (kTraceRosAlloc) {
712 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
713 << reinterpret_cast<intptr_t>(run) << " from non_full_runs_";
714 }
715 }
716 if (run == current_runs_[idx]) {
717 current_runs_[idx] = NULL;
718 }
719 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
720 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
721 {
722 MutexLock mu(self, lock_);
723 FreePages(self, run);
724 }
725 } else {
726 // It is not completely free. If it wasn't the current run or
727 // already in the non-full run set (i.e., it was full) insert it
728 // into the non-full run set.
729 if (run != current_runs_[idx]) {
730 hash_set<Run*, hash_run, eq_run>* full_runs =
731 kIsDebugBuild ? &full_runs_[idx] : NULL;
732 std::set<Run*>::iterator pos = non_full_runs->find(run);
733 if (pos == non_full_runs->end()) {
734 DCHECK(run_was_full);
735 DCHECK(full_runs->find(run) != full_runs->end());
736 if (kIsDebugBuild) {
737 full_runs->erase(run);
738 if (kTraceRosAlloc) {
739 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
740 << reinterpret_cast<intptr_t>(run) << " from full_runs_";
741 }
742 }
743 non_full_runs->insert(run);
744 DCHECK(!run->IsFull());
745 if (kTraceRosAlloc) {
746 LOG(INFO) << "RosAlloc::FreeFromRun() : Inserted run 0x" << std::hex
747 << reinterpret_cast<intptr_t>(run)
748 << " into non_full_runs_[" << std::dec << idx << "]";
749 }
750 }
751 }
752 }
753}
754
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800755std::string RosAlloc::Run::BitMapToStr(uint32_t* bit_map_base, size_t num_vec) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700756 std::string bit_map_str;
757 for (size_t v = 0; v < num_vec; v++) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800758 uint32_t vec = bit_map_base[v];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700759 if (v != num_vec - 1) {
760 bit_map_str.append(StringPrintf("%x-", vec));
761 } else {
762 bit_map_str.append(StringPrintf("%x", vec));
763 }
764 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800765 return bit_map_str.c_str();
766}
767
768std::string RosAlloc::Run::Dump() {
769 size_t idx = size_bracket_idx_;
770 size_t num_slots = numOfSlots[idx];
771 size_t num_vec = RoundUp(num_slots, 32) / 32;
772 std::ostringstream stream;
773 stream << "RosAlloc Run = " << reinterpret_cast<void*>(this)
774 << "{ magic_num=" << static_cast<int>(magic_num_)
775 << " size_bracket_idx=" << idx
776 << " is_thread_local=" << static_cast<int>(is_thread_local_)
777 << " to_be_bulk_freed=" << static_cast<int>(to_be_bulk_freed_)
778 << " top_slot_idx=" << top_slot_idx_
779 << " alloc_bit_map=" << BitMapToStr(alloc_bit_map_, num_vec)
780 << " bulk_free_bit_map=" << BitMapToStr(BulkFreeBitMap(), num_vec)
781 << " thread_local_bit_map=" << BitMapToStr(ThreadLocalFreeBitMap(), num_vec)
782 << " }" << std::endl;
783 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700784}
785
786void* RosAlloc::Run::AllocSlot() {
787 size_t idx = size_bracket_idx_;
788 size_t num_slots = numOfSlots[idx];
789 DCHECK_LE(top_slot_idx_, num_slots);
790 if (LIKELY(top_slot_idx_ < num_slots)) {
791 // If it's in bump index mode, grab the top slot and increment the top index.
792 size_t slot_idx = top_slot_idx_;
793 byte* slot_addr = reinterpret_cast<byte*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
794 if (kTraceRosAlloc) {
795 LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
796 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
797 }
798 top_slot_idx_++;
799 size_t vec_idx = slot_idx / 32;
800 size_t vec_off = slot_idx % 32;
801 uint32_t* vec = &alloc_bit_map_[vec_idx];
802 DCHECK_EQ((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
803 *vec |= 1 << vec_off;
804 DCHECK_NE((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
805 return slot_addr;
806 }
807 // Not in bump index mode. Search the alloc bit map for an empty slot.
808 size_t num_vec = RoundUp(num_slots, 32) / 32;
809 size_t slot_idx = 0;
810 bool found_slot = false;
811 for (size_t v = 0; v < num_vec; v++) {
812 uint32_t *vecp = &alloc_bit_map_[v];
813 uint32_t ffz1 = __builtin_ffs(~*vecp);
814 uint32_t ffz;
815 // TODO: Use LIKELY or UNLIKELY here?
816 if (LIKELY(ffz1 > 0 && (ffz = ffz1 - 1) + v * 32 < num_slots)) {
817 // Found an empty slot. Set the bit.
818 DCHECK_EQ((*vecp & (1 << ffz)), static_cast<uint32_t>(0));
819 *vecp |= (1 << ffz);
820 DCHECK_NE((*vecp & (1 << ffz)), static_cast<uint32_t>(0));
821 slot_idx = ffz + v * 32;
822 found_slot = true;
823 break;
824 }
825 }
826 if (LIKELY(found_slot)) {
827 byte* slot_addr = reinterpret_cast<byte*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
828 if (kTraceRosAlloc) {
829 LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
830 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
831 }
832 return slot_addr;
833 }
834 return NULL;
835}
836
837inline void RosAlloc::Run::FreeSlot(void* ptr) {
838 DCHECK_EQ(is_thread_local_, 0);
839 byte idx = size_bracket_idx_;
840 size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
841 - (reinterpret_cast<byte*>(this) + headerSizes[idx]);
842 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
843 size_t slot_idx = offset_from_slot_base / bracketSizes[idx];
844 DCHECK(slot_idx < numOfSlots[idx]);
845 size_t vec_idx = slot_idx / 32;
846 if (kIsDebugBuild) {
847 size_t num_vec = RoundUp(numOfSlots[idx], 32) / 32;
848 DCHECK(vec_idx < num_vec);
849 }
850 size_t vec_off = slot_idx % 32;
851 uint32_t* vec = &alloc_bit_map_[vec_idx];
852 DCHECK_NE((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
853 *vec &= ~(1 << vec_off);
854 DCHECK_EQ((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
855 if (kTraceRosAlloc) {
856 LOG(INFO) << "RosAlloc::Run::FreeSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
857 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
858 }
859}
860
861inline bool RosAlloc::Run::MergeThreadLocalFreeBitMapToAllocBitMap(bool* is_all_free_after_out) {
862 DCHECK_NE(is_thread_local_, 0);
863 // Free slots in the alloc bit map based on the thread local free bit map.
864 byte idx = size_bracket_idx_;
865 size_t num_slots = numOfSlots[idx];
866 size_t num_vec = RoundUp(num_slots, 32) / 32;
867 bool changed = false;
868 uint32_t* vecp = &alloc_bit_map_[0];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800869 uint32_t* tl_free_vecp = &ThreadLocalFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700870 bool is_all_free_after = true;
871 for (size_t v = 0; v < num_vec; v++, vecp++, tl_free_vecp++) {
872 uint32_t tl_free_vec = *tl_free_vecp;
873 uint32_t vec_before = *vecp;
874 uint32_t vec_after;
875 if (tl_free_vec != 0) {
876 vec_after = vec_before & ~tl_free_vec;
877 *vecp = vec_after;
878 changed = true;
879 *tl_free_vecp = 0; // clear the thread local free bit map.
880 } else {
881 vec_after = vec_before;
882 }
883 if (vec_after != 0) {
884 is_all_free_after = false;
885 }
886 DCHECK_EQ(*tl_free_vecp, static_cast<uint32_t>(0));
887 }
888 *is_all_free_after_out = is_all_free_after;
889 // Return true if there was at least a bit set in the thread-local
890 // free bit map and at least a bit in the alloc bit map changed.
891 return changed;
892}
893
894inline void RosAlloc::Run::MergeBulkFreeBitMapIntoAllocBitMap() {
895 DCHECK_EQ(is_thread_local_, 0);
896 // Free slots in the alloc bit map based on the bulk free bit map.
897 byte idx = size_bracket_idx_;
898 size_t num_slots = numOfSlots[idx];
899 size_t num_vec = RoundUp(num_slots, 32) / 32;
900 uint32_t* vecp = &alloc_bit_map_[0];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800901 uint32_t* free_vecp = &BulkFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700902 for (size_t v = 0; v < num_vec; v++, vecp++, free_vecp++) {
903 uint32_t free_vec = *free_vecp;
904 if (free_vec != 0) {
905 *vecp &= ~free_vec;
906 *free_vecp = 0; // clear the bulk free bit map.
907 }
908 DCHECK_EQ(*free_vecp, static_cast<uint32_t>(0));
909 }
910}
911
912inline void RosAlloc::Run::UnionBulkFreeBitMapToThreadLocalFreeBitMap() {
913 DCHECK_NE(is_thread_local_, 0);
914 // Union the thread local bit map with the bulk free bit map.
915 byte idx = size_bracket_idx_;
916 size_t num_slots = numOfSlots[idx];
917 size_t num_vec = RoundUp(num_slots, 32) / 32;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800918 uint32_t* to_vecp = &ThreadLocalFreeBitMap()[0];
919 uint32_t* from_vecp = &BulkFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700920 for (size_t v = 0; v < num_vec; v++, to_vecp++, from_vecp++) {
921 uint32_t from_vec = *from_vecp;
922 if (from_vec != 0) {
923 *to_vecp |= from_vec;
Hiroshi Yamauchi70f60042014-02-03 12:31:29 -0800924 *from_vecp = 0; // clear the bulk free bit map.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700925 }
926 DCHECK_EQ(*from_vecp, static_cast<uint32_t>(0));
927 }
928}
929
930inline void RosAlloc::Run::MarkThreadLocalFreeBitMap(void* ptr) {
931 DCHECK_NE(is_thread_local_, 0);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800932 MarkFreeBitMapShared(ptr, ThreadLocalFreeBitMap(), "MarkThreadLocalFreeBitMap");
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700933}
934
935inline void RosAlloc::Run::MarkBulkFreeBitMap(void* ptr) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800936 MarkFreeBitMapShared(ptr, BulkFreeBitMap(), "MarkFreeBitMap");
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700937}
938
939inline void RosAlloc::Run::MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base,
940 const char* caller_name) {
941 byte idx = size_bracket_idx_;
942 size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
943 - (reinterpret_cast<byte*>(this) + headerSizes[idx]);
944 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
945 size_t slot_idx = offset_from_slot_base / bracketSizes[idx];
946 DCHECK(slot_idx < numOfSlots[idx]);
947 size_t vec_idx = slot_idx / 32;
948 if (kIsDebugBuild) {
949 size_t num_vec = RoundUp(numOfSlots[idx], 32) / 32;
950 DCHECK(vec_idx < num_vec);
951 }
952 size_t vec_off = slot_idx % 32;
953 uint32_t* vec = &free_bit_map_base[vec_idx];
954 DCHECK_EQ((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
955 *vec |= 1 << vec_off;
956 DCHECK_NE((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
957 if (kTraceRosAlloc) {
958 LOG(INFO) << "RosAlloc::Run::" << caller_name << "() : 0x" << std::hex
959 << reinterpret_cast<intptr_t>(ptr)
960 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
961 }
962}
963
964inline bool RosAlloc::Run::IsAllFree() {
965 byte idx = size_bracket_idx_;
966 size_t num_slots = numOfSlots[idx];
967 size_t num_vec = RoundUp(num_slots, 32) / 32;
968 for (size_t v = 0; v < num_vec; v++) {
969 uint32_t vec = alloc_bit_map_[v];
970 if (vec != 0) {
971 return false;
972 }
973 }
974 return true;
975}
976
977inline bool RosAlloc::Run::IsFull() {
978 byte idx = size_bracket_idx_;
979 size_t num_slots = numOfSlots[idx];
980 size_t num_vec = RoundUp(num_slots, 32) / 32;
981 size_t slots = 0;
982 for (size_t v = 0; v < num_vec; v++, slots += 32) {
983 DCHECK(num_slots >= slots);
984 uint32_t vec = alloc_bit_map_[v];
985 uint32_t mask = (num_slots - slots >= 32) ? static_cast<uint32_t>(-1)
986 : (1 << (num_slots - slots)) - 1;
987 DCHECK(num_slots - slots >= 32 ? mask == static_cast<uint32_t>(-1) : true);
988 if (vec != mask) {
989 return false;
990 }
991 }
992 return true;
993}
994
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800995inline bool RosAlloc::Run::IsBulkFreeBitmapClean() {
996 byte idx = size_bracket_idx_;
997 size_t num_slots = numOfSlots[idx];
998 size_t num_vec = RoundUp(num_slots, 32) / 32;
999 for (size_t v = 0; v < num_vec; v++) {
1000 uint32_t vec = BulkFreeBitMap()[v];
1001 if (vec != 0) {
1002 return false;
1003 }
1004 }
1005 return true;
1006}
1007
1008inline bool RosAlloc::Run::IsThreadLocalFreeBitmapClean() {
1009 byte idx = size_bracket_idx_;
1010 size_t num_slots = numOfSlots[idx];
1011 size_t num_vec = RoundUp(num_slots, 32) / 32;
1012 for (size_t v = 0; v < num_vec; v++) {
1013 uint32_t vec = ThreadLocalFreeBitMap()[v];
1014 if (vec != 0) {
1015 return false;
1016 }
1017 }
1018 return true;
1019}
1020
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001021inline void RosAlloc::Run::ClearBitMaps() {
1022 byte idx = size_bracket_idx_;
1023 size_t num_slots = numOfSlots[idx];
1024 size_t num_vec = RoundUp(num_slots, 32) / 32;
1025 memset(alloc_bit_map_, 0, sizeof(uint32_t) * num_vec * 3);
1026}
1027
1028void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1029 void* arg) {
1030 size_t idx = size_bracket_idx_;
1031 byte* slot_base = reinterpret_cast<byte*>(this) + headerSizes[idx];
1032 size_t num_slots = numOfSlots[idx];
1033 size_t bracket_size = IndexToBracketSize(idx);
1034 DCHECK_EQ(slot_base + num_slots * bracket_size, reinterpret_cast<byte*>(this) + numOfPages[idx] * kPageSize);
1035 size_t num_vec = RoundUp(num_slots, 32) / 32;
1036 size_t slots = 0;
1037 for (size_t v = 0; v < num_vec; v++, slots += 32) {
1038 DCHECK(num_slots >= slots);
1039 uint32_t vec = alloc_bit_map_[v];
1040 size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
1041 for (size_t i = 0; i < end; ++i) {
1042 bool is_allocated = ((vec >> i) & 0x1) != 0;
1043 byte* slot_addr = slot_base + (slots + i) * bracket_size;
1044 if (is_allocated) {
1045 handler(slot_addr, slot_addr + bracket_size, bracket_size, arg);
1046 } else {
1047 handler(slot_addr, slot_addr + bracket_size, 0, arg);
1048 }
1049 }
1050 }
1051}
1052
1053void RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
1054 if (false) {
1055 // Used only to test Free() as GC uses only BulkFree().
1056 for (size_t i = 0; i < num_ptrs; ++i) {
1057 FreeInternal(self, ptrs[i]);
1058 }
1059 return;
1060 }
1061
1062 WriterMutexLock wmu(self, bulk_free_lock_);
1063
1064 // First mark slots to free in the bulk free bit map without locking the
1065 // size bracket locks. On host, hash_set is faster than vector + flag.
1066#ifdef HAVE_ANDROID_OS
1067 std::vector<Run*> runs;
1068#else
1069 hash_set<Run*, hash_run, eq_run> runs;
1070#endif
1071 {
1072 for (size_t i = 0; i < num_ptrs; i++) {
1073 void* ptr = ptrs[i];
1074 ptrs[i] = NULL;
1075 DCHECK(base_ <= ptr && ptr < base_ + footprint_);
1076 size_t pm_idx = RoundDownToPageMapIndex(ptr);
1077 bool free_from_run = false;
1078 Run* run = NULL;
1079 {
1080 MutexLock mu(self, lock_);
1081 DCHECK(pm_idx < page_map_.size());
1082 byte page_map_entry = page_map_[pm_idx];
1083 if (kTraceRosAlloc) {
1084 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1085 << std::dec << pm_idx
1086 << ", page_map_entry=" << static_cast<int>(page_map_entry);
1087 }
1088 if (LIKELY(page_map_entry == kPageMapRun)) {
1089 free_from_run = true;
1090 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1091 DCHECK(run->magic_num_ == kMagicNum);
1092 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1093 free_from_run = true;
1094 size_t pi = pm_idx;
1095 DCHECK(page_map_[pi] == kPageMapRun || page_map_[pi] == kPageMapRunPart);
1096 // Find the beginning of the run.
1097 while (page_map_[pi] != kPageMapRun) {
1098 pi--;
1099 DCHECK(pi < capacity_ / kPageSize);
1100 }
1101 DCHECK(page_map_[pi] == kPageMapRun);
1102 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
1103 DCHECK(run->magic_num_ == kMagicNum);
1104 } else if (page_map_entry == kPageMapLargeObject) {
1105 FreePages(self, ptr);
1106 } else {
1107 LOG(FATAL) << "Unreachable - page map type: " << page_map_entry;
1108 }
1109 }
1110 if (LIKELY(free_from_run)) {
1111 DCHECK(run != NULL);
1112 // Set the bit in the bulk free bit map.
1113 run->MarkBulkFreeBitMap(ptr);
1114#ifdef HAVE_ANDROID_OS
1115 if (!run->to_be_bulk_freed_) {
1116 run->to_be_bulk_freed_ = true;
1117 runs.push_back(run);
1118 }
1119#else
1120 runs.insert(run);
1121#endif
1122 }
1123 }
1124 }
1125
1126 // Now, iterate over the affected runs and update the alloc bit map
1127 // based on the bulk free bit map (for non-thread-local runs) and
1128 // union the bulk free bit map into the thread-local free bit map
1129 // (for thread-local runs.)
1130#ifdef HAVE_ANDROID_OS
1131 typedef std::vector<Run*>::iterator It;
1132#else
1133 typedef hash_set<Run*, hash_run, eq_run>::iterator It;
1134#endif
1135 for (It it = runs.begin(); it != runs.end(); ++it) {
1136 Run* run = *it;
1137#ifdef HAVE_ANDROID_OS
1138 DCHECK(run->to_be_bulk_freed_);
1139 run->to_be_bulk_freed_ = false;
1140#endif
1141 size_t idx = run->size_bracket_idx_;
1142 MutexLock mu(self, *size_bracket_locks_[idx]);
1143 if (run->is_thread_local_ != 0) {
1144 DCHECK_LE(run->size_bracket_idx_, kMaxThreadLocalSizeBracketIdx);
1145 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
1146 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
1147 run->UnionBulkFreeBitMapToThreadLocalFreeBitMap();
1148 if (kTraceRosAlloc) {
1149 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a thread local run 0x"
1150 << std::hex << reinterpret_cast<intptr_t>(run);
1151 }
1152 DCHECK_NE(run->is_thread_local_, 0);
1153 // A thread local run will be kept as a thread local even if
1154 // it's become all free.
1155 } else {
1156 bool run_was_full = run->IsFull();
1157 run->MergeBulkFreeBitMapIntoAllocBitMap();
1158 if (kTraceRosAlloc) {
1159 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a run 0x" << std::hex
1160 << reinterpret_cast<intptr_t>(run);
1161 }
1162 // Check if the run should be moved to non_full_runs_ or
1163 // free_page_runs_.
1164 std::set<Run*>* non_full_runs = &non_full_runs_[idx];
1165 hash_set<Run*, hash_run, eq_run>* full_runs =
1166 kIsDebugBuild ? &full_runs_[idx] : NULL;
1167 if (run->IsAllFree()) {
1168 // It has just become completely free. Free the pages of the
1169 // run.
1170 bool run_was_current = run == current_runs_[idx];
1171 if (run_was_current) {
1172 DCHECK(full_runs->find(run) == full_runs->end());
1173 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1174 // If it was a current run, reuse it.
1175 } else if (run_was_full) {
1176 // If it was full, remove it from the full run set (debug
1177 // only.)
1178 if (kIsDebugBuild) {
1179 hash_set<Run*, hash_run, eq_run>::iterator pos = full_runs->find(run);
1180 DCHECK(pos != full_runs->end());
1181 full_runs->erase(pos);
1182 if (kTraceRosAlloc) {
1183 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1184 << reinterpret_cast<intptr_t>(run)
1185 << " from full_runs_";
1186 }
1187 DCHECK(full_runs->find(run) == full_runs->end());
1188 }
1189 } else {
1190 // If it was in a non full run set, remove it from the set.
1191 DCHECK(full_runs->find(run) == full_runs->end());
1192 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1193 non_full_runs->erase(run);
1194 if (kTraceRosAlloc) {
1195 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1196 << reinterpret_cast<intptr_t>(run)
1197 << " from non_full_runs_";
1198 }
1199 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1200 }
1201 if (!run_was_current) {
1202 MutexLock mu(self, lock_);
1203 FreePages(self, run);
1204 }
1205 } else {
1206 // It is not completely free. If it wasn't the current run or
1207 // already in the non-full run set (i.e., it was full) insert
1208 // it into the non-full run set.
1209 if (run == current_runs_[idx]) {
1210 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1211 DCHECK(full_runs->find(run) == full_runs->end());
1212 // If it was a current run, keep it.
1213 } else if (run_was_full) {
1214 // If it was full, remove it from the full run set (debug
1215 // only) and insert into the non-full run set.
1216 DCHECK(full_runs->find(run) != full_runs->end());
1217 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1218 if (kIsDebugBuild) {
1219 full_runs->erase(run);
1220 if (kTraceRosAlloc) {
1221 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1222 << reinterpret_cast<intptr_t>(run)
1223 << " from full_runs_";
1224 }
1225 }
1226 non_full_runs->insert(run);
1227 if (kTraceRosAlloc) {
1228 LOG(INFO) << "RosAlloc::BulkFree() : Inserted run 0x" << std::hex
1229 << reinterpret_cast<intptr_t>(run)
1230 << " into non_full_runs_[" << std::dec << idx;
1231 }
1232 } else {
1233 // If it was not full, so leave it in the non full run set.
1234 DCHECK(full_runs->find(run) == full_runs->end());
1235 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1236 }
1237 }
1238 }
1239 }
1240}
1241
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001242std::string RosAlloc::DumpPageMap() {
1243 std::ostringstream stream;
1244 stream << "RosAlloc PageMap: " << std::endl;
1245 lock_.AssertHeld(Thread::Current());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001246 size_t end = page_map_.size();
1247 FreePageRun* curr_fpr = NULL;
1248 size_t curr_fpr_size = 0;
1249 size_t remaining_curr_fpr_size = 0;
1250 size_t num_running_empty_pages = 0;
1251 for (size_t i = 0; i < end; ++i) {
1252 byte pm = page_map_[i];
1253 switch (pm) {
1254 case kPageMapEmpty: {
1255 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1256 if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
1257 // Encountered a fresh free page run.
1258 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1259 DCHECK(fpr->IsFree());
1260 DCHECK(curr_fpr == NULL);
1261 DCHECK_EQ(curr_fpr_size, static_cast<size_t>(0));
1262 curr_fpr = fpr;
1263 curr_fpr_size = fpr->ByteSize(this);
1264 DCHECK_EQ(curr_fpr_size % kPageSize, static_cast<size_t>(0));
1265 remaining_curr_fpr_size = curr_fpr_size - kPageSize;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001266 stream << "[" << i << "]=Empty (FPR start)"
1267 << " fpr_size=" << curr_fpr_size
1268 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001269 if (remaining_curr_fpr_size == 0) {
1270 // Reset at the end of the current free page run.
1271 curr_fpr = NULL;
1272 curr_fpr_size = 0;
1273 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001274 stream << "curr_fpr=0x" << std::hex << reinterpret_cast<intptr_t>(curr_fpr) << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001275 DCHECK_EQ(num_running_empty_pages, static_cast<size_t>(0));
1276 } else {
1277 // Still part of the current free page run.
1278 DCHECK_NE(num_running_empty_pages, static_cast<size_t>(0));
1279 DCHECK(curr_fpr != NULL && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
1280 DCHECK_EQ(remaining_curr_fpr_size % kPageSize, static_cast<size_t>(0));
1281 DCHECK_GE(remaining_curr_fpr_size, static_cast<size_t>(kPageSize));
1282 remaining_curr_fpr_size -= kPageSize;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001283 stream << "[" << i << "]=Empty (FPR part)"
1284 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001285 if (remaining_curr_fpr_size == 0) {
1286 // Reset at the end of the current free page run.
1287 curr_fpr = NULL;
1288 curr_fpr_size = 0;
1289 }
1290 }
1291 num_running_empty_pages++;
1292 break;
1293 }
1294 case kPageMapLargeObject: {
1295 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1296 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001297 stream << "[" << i << "]=Large (start)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001298 break;
1299 }
1300 case kPageMapLargeObjectPart:
1301 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1302 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001303 stream << "[" << i << "]=Large (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001304 break;
1305 case kPageMapRun: {
1306 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1307 num_running_empty_pages = 0;
1308 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1309 size_t idx = run->size_bracket_idx_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001310 stream << "[" << i << "]=Run (start)"
1311 << " idx=" << idx
1312 << " numOfPages=" << numOfPages[idx]
1313 << " thread_local=" << static_cast<int>(run->is_thread_local_)
1314 << " is_all_free=" << (run->IsAllFree() ? 1 : 0)
1315 << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001316 break;
1317 }
1318 case kPageMapRunPart:
1319 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1320 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001321 stream << "[" << i << "]=Run (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001322 break;
1323 default:
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001324 stream << "[" << i << "]=Unrecognizable page map type: " << pm;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001325 break;
1326 }
1327 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001328 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001329}
1330
1331size_t RosAlloc::UsableSize(void* ptr) {
1332 DCHECK(base_ <= ptr && ptr < base_ + footprint_);
1333 size_t pm_idx = RoundDownToPageMapIndex(ptr);
1334 MutexLock mu(Thread::Current(), lock_);
1335 switch (page_map_[pm_idx]) {
1336 case kPageMapEmpty:
1337 LOG(FATAL) << "Unreachable - RosAlloc::UsableSize(): pm_idx=" << pm_idx << ", ptr=" << std::hex
1338 << reinterpret_cast<intptr_t>(ptr);
1339 break;
1340 case kPageMapLargeObject: {
1341 size_t num_pages = 1;
1342 size_t idx = pm_idx + 1;
1343 size_t end = page_map_.size();
1344 while (idx < end && page_map_[idx] == kPageMapLargeObjectPart) {
1345 num_pages++;
1346 idx++;
1347 }
1348 return num_pages * kPageSize;
1349 }
1350 case kPageMapLargeObjectPart:
1351 LOG(FATAL) << "Unreachable - RosAlloc::UsableSize(): pm_idx=" << pm_idx << ", ptr=" << std::hex
1352 << reinterpret_cast<intptr_t>(ptr);
1353 break;
1354 case kPageMapRun:
1355 case kPageMapRunPart: {
1356 // Find the beginning of the run.
1357 while (page_map_[pm_idx] != kPageMapRun) {
1358 pm_idx--;
1359 DCHECK(pm_idx < capacity_ / kPageSize);
1360 }
1361 DCHECK(page_map_[pm_idx] == kPageMapRun);
1362 Run* run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1363 DCHECK(run->magic_num_ == kMagicNum);
1364 size_t idx = run->size_bracket_idx_;
1365 size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
1366 - (reinterpret_cast<byte*>(run) + headerSizes[idx]);
1367 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
1368 return IndexToBracketSize(idx);
1369 }
1370 default:
1371 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
1372 break;
1373 }
1374 return 0;
1375}
1376
1377bool RosAlloc::Trim() {
1378 MutexLock mu(Thread::Current(), lock_);
1379 FreePageRun* last_free_page_run;
1380 DCHECK_EQ(footprint_ % kPageSize, static_cast<size_t>(0));
1381 auto it = free_page_runs_.rbegin();
1382 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
1383 // Remove the last free page run, if any.
1384 DCHECK(last_free_page_run->IsFree());
1385 DCHECK(page_map_[ToPageMapIndex(last_free_page_run)] == kPageMapEmpty);
1386 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
1387 DCHECK_EQ(last_free_page_run->End(this), base_ + footprint_);
1388 free_page_runs_.erase(last_free_page_run);
1389 size_t decrement = last_free_page_run->ByteSize(this);
1390 size_t new_footprint = footprint_ - decrement;
1391 DCHECK_EQ(new_footprint % kPageSize, static_cast<size_t>(0));
1392 size_t new_num_of_pages = new_footprint / kPageSize;
1393 DCHECK_GE(page_map_.size(), new_num_of_pages);
1394 page_map_.resize(new_num_of_pages);
1395 DCHECK_EQ(page_map_.size(), new_num_of_pages);
1396 free_page_run_size_map_.resize(new_num_of_pages);
1397 DCHECK_EQ(free_page_run_size_map_.size(), new_num_of_pages);
1398 art_heap_rosalloc_morecore(this, -(static_cast<intptr_t>(decrement)));
1399 if (kTraceRosAlloc) {
1400 LOG(INFO) << "RosAlloc::Trim() : decreased the footprint from "
1401 << footprint_ << " to " << new_footprint;
1402 }
1403 DCHECK_LT(new_footprint, footprint_);
1404 DCHECK_LT(new_footprint, capacity_);
1405 footprint_ = new_footprint;
1406 return true;
1407 }
1408 return false;
1409}
1410
1411void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1412 void* arg) {
1413 // Note: no need to use this to release pages as we already do so in FreePages().
1414 if (handler == NULL) {
1415 return;
1416 }
1417 MutexLock mu(Thread::Current(), lock_);
1418 size_t pm_end = page_map_.size();
1419 size_t i = 0;
1420 while (i < pm_end) {
1421 byte pm = page_map_[i];
1422 switch (pm) {
1423 case kPageMapEmpty: {
1424 // The start of a free page run.
1425 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1426 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
1427 size_t fpr_size = fpr->ByteSize(this);
1428 DCHECK(IsAligned<kPageSize>(fpr_size));
1429 void* start = fpr;
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -08001430 if (kIsDebugBuild) {
1431 // In the debug build, the first page of a free page run
1432 // contains a magic number for debugging. Exclude it.
1433 start = reinterpret_cast<byte*>(fpr) + kPageSize;
1434 }
1435 void* end = reinterpret_cast<byte*>(fpr) + fpr_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001436 handler(start, end, 0, arg);
1437 size_t num_pages = fpr_size / kPageSize;
1438 if (kIsDebugBuild) {
1439 for (size_t j = i + 1; j < i + num_pages; ++j) {
1440 DCHECK_EQ(page_map_[j], kPageMapEmpty);
1441 }
1442 }
1443 i += fpr_size / kPageSize;
1444 DCHECK_LE(i, pm_end);
1445 break;
1446 }
1447 case kPageMapLargeObject: {
1448 // The start of a large object.
1449 size_t num_pages = 1;
1450 size_t idx = i + 1;
1451 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1452 num_pages++;
1453 idx++;
1454 }
1455 void* start = base_ + i * kPageSize;
1456 void* end = base_ + (i + num_pages) * kPageSize;
1457 size_t used_bytes = num_pages * kPageSize;
1458 handler(start, end, used_bytes, arg);
1459 if (kIsDebugBuild) {
1460 for (size_t j = i + 1; j < i + num_pages; ++j) {
1461 DCHECK_EQ(page_map_[j], kPageMapLargeObjectPart);
1462 }
1463 }
1464 i += num_pages;
1465 DCHECK_LE(i, pm_end);
1466 break;
1467 }
1468 case kPageMapLargeObjectPart:
1469 LOG(FATAL) << "Unreachable - page map type: " << pm;
1470 break;
1471 case kPageMapRun: {
1472 // The start of a run.
1473 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1474 DCHECK(run->magic_num_ == kMagicNum);
1475 run->InspectAllSlots(handler, arg);
1476 size_t num_pages = numOfPages[run->size_bracket_idx_];
1477 if (kIsDebugBuild) {
1478 for (size_t j = i + 1; j < i + num_pages; ++j) {
1479 DCHECK_EQ(page_map_[j], kPageMapRunPart);
1480 }
1481 }
1482 i += num_pages;
1483 DCHECK_LE(i, pm_end);
1484 break;
1485 }
1486 case kPageMapRunPart:
1487 LOG(FATAL) << "Unreachable - page map type: " << pm;
1488 break;
1489 default:
1490 LOG(FATAL) << "Unreachable - page map type: " << pm;
1491 break;
1492 }
1493 }
1494}
1495
1496size_t RosAlloc::Footprint() {
1497 MutexLock mu(Thread::Current(), lock_);
1498 return footprint_;
1499}
1500
1501size_t RosAlloc::FootprintLimit() {
1502 MutexLock mu(Thread::Current(), lock_);
1503 return capacity_;
1504}
1505
1506void RosAlloc::SetFootprintLimit(size_t new_capacity) {
1507 MutexLock mu(Thread::Current(), lock_);
1508 DCHECK_EQ(RoundUp(new_capacity, kPageSize), new_capacity);
1509 // Only growing is supported here. But Trim() is supported.
1510 if (capacity_ < new_capacity) {
1511 capacity_ = new_capacity;
1512 VLOG(heap) << "new capacity=" << capacity_;
1513 }
1514}
1515
1516void RosAlloc::RevokeThreadLocalRuns(Thread* thread) {
1517 Thread* self = Thread::Current();
Hiroshi Yamauchi70f60042014-02-03 12:31:29 -08001518 // Avoid race conditions on the bulk free bit maps with BulkFree() (GC).
1519 WriterMutexLock wmu(self, bulk_free_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001520 for (size_t idx = 0; idx < kNumOfSizeBrackets; idx++) {
1521 MutexLock mu(self, *size_bracket_locks_[idx]);
1522 Run* thread_local_run = reinterpret_cast<Run*>(thread->rosalloc_runs_[idx]);
1523 if (thread_local_run != NULL) {
1524 DCHECK_EQ(thread_local_run->magic_num_, kMagicNum);
1525 DCHECK_NE(thread_local_run->is_thread_local_, 0);
1526 thread->rosalloc_runs_[idx] = NULL;
1527 // Note the thread local run may not be full here.
1528 bool dont_care;
1529 thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&dont_care);
1530 thread_local_run->is_thread_local_ = 0;
1531 thread_local_run->MergeBulkFreeBitMapIntoAllocBitMap();
1532 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
1533 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
1534 if (thread_local_run->IsFull()) {
1535 if (kIsDebugBuild) {
1536 full_runs_[idx].insert(thread_local_run);
1537 DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
1538 if (kTraceRosAlloc) {
1539 LOG(INFO) << "RosAlloc::RevokeThreadLocalRuns() : Inserted run 0x" << std::hex
1540 << reinterpret_cast<intptr_t>(thread_local_run)
1541 << " into full_runs_[" << std::dec << idx << "]";
1542 }
1543 }
1544 } else if (thread_local_run->IsAllFree()) {
1545 MutexLock mu(self, lock_);
1546 FreePages(self, thread_local_run);
1547 } else {
1548 non_full_runs_[idx].insert(thread_local_run);
1549 DCHECK(non_full_runs_[idx].find(thread_local_run) != non_full_runs_[idx].end());
1550 if (kTraceRosAlloc) {
1551 LOG(INFO) << "RosAlloc::RevokeThreadLocalRuns() : Inserted run 0x" << std::hex
1552 << reinterpret_cast<intptr_t>(thread_local_run)
1553 << " into non_full_runs_[" << std::dec << idx << "]";
1554 }
1555 }
1556 }
1557 }
1558}
1559
1560void RosAlloc::RevokeAllThreadLocalRuns() {
1561 // This is called when a mutator thread won't allocate such as at
1562 // the Zygote creation time or during the GC pause.
1563 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
1564 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1565 for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
1566 Thread* t = *it;
1567 RevokeThreadLocalRuns(t);
1568 }
1569}
1570
1571void RosAlloc::Initialize() {
1572 // Check the consistency of the number of size brackets.
1573 DCHECK_EQ(Thread::kRosAllocNumOfSizeBrackets, kNumOfSizeBrackets);
1574 // bracketSizes.
1575 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1576 if (i < kNumOfSizeBrackets - 2) {
1577 bracketSizes[i] = 16 * (i + 1);
1578 } else if (i == kNumOfSizeBrackets - 2) {
1579 bracketSizes[i] = 1 * KB;
1580 } else {
1581 DCHECK(i == kNumOfSizeBrackets - 1);
1582 bracketSizes[i] = 2 * KB;
1583 }
1584 if (kTraceRosAlloc) {
1585 LOG(INFO) << "bracketSizes[" << i << "]=" << bracketSizes[i];
1586 }
1587 }
1588 // numOfPages.
1589 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1590 if (i < 4) {
1591 numOfPages[i] = 1;
1592 } else if (i < 8) {
1593 numOfPages[i] = 2;
1594 } else if (i < 16) {
1595 numOfPages[i] = 4;
1596 } else if (i < 32) {
1597 numOfPages[i] = 8;
1598 } else if (i == 32) {
1599 DCHECK(i = kNumOfSizeBrackets - 2);
1600 numOfPages[i] = 16;
1601 } else {
1602 DCHECK(i = kNumOfSizeBrackets - 1);
1603 numOfPages[i] = 32;
1604 }
1605 if (kTraceRosAlloc) {
1606 LOG(INFO) << "numOfPages[" << i << "]=" << numOfPages[i];
1607 }
1608 }
1609 // Compute numOfSlots and slotOffsets.
1610 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1611 size_t bracket_size = bracketSizes[i];
1612 size_t run_size = kPageSize * numOfPages[i];
1613 size_t max_num_of_slots = run_size / bracket_size;
1614 // Compute the actual number of slots by taking the header and
1615 // alignment into account.
1616 size_t fixed_header_size = RoundUp(Run::fixed_header_size(), sizeof(uint32_t));
1617 DCHECK_EQ(fixed_header_size, static_cast<size_t>(8));
1618 size_t header_size = 0;
1619 size_t bulk_free_bit_map_offset = 0;
1620 size_t thread_local_free_bit_map_offset = 0;
1621 size_t num_of_slots = 0;
1622 // Search for the maximum number of slots that allows enough space
1623 // for the header (including the bit maps.)
1624 for (int s = max_num_of_slots; s >= 0; s--) {
1625 size_t tmp_slots_size = bracket_size * s;
1626 size_t tmp_bit_map_size = RoundUp(s, sizeof(uint32_t) * kBitsPerByte) / kBitsPerByte;
1627 size_t tmp_bulk_free_bit_map_size = tmp_bit_map_size;
1628 size_t tmp_bulk_free_bit_map_off = fixed_header_size + tmp_bit_map_size;
1629 size_t tmp_thread_local_free_bit_map_size = tmp_bit_map_size;
1630 size_t tmp_thread_local_free_bit_map_off = tmp_bulk_free_bit_map_off + tmp_bulk_free_bit_map_size;
1631 size_t tmp_unaligned_header_size = tmp_thread_local_free_bit_map_off + tmp_thread_local_free_bit_map_size;
1632 // Align up the unaligned header size. bracket_size may not be a power of two.
1633 size_t tmp_header_size = (tmp_unaligned_header_size % bracket_size == 0) ?
1634 tmp_unaligned_header_size :
1635 tmp_unaligned_header_size + (bracket_size - tmp_unaligned_header_size % bracket_size);
1636 DCHECK_EQ(tmp_header_size % bracket_size, static_cast<size_t>(0));
1637 DCHECK_EQ(tmp_header_size % 8, static_cast<size_t>(0));
1638 if (tmp_slots_size + tmp_header_size <= run_size) {
1639 // Found the right number of slots, that is, there was enough
1640 // space for the header (including the bit maps.)
1641 num_of_slots = s;
1642 header_size = tmp_header_size;
1643 bulk_free_bit_map_offset = tmp_bulk_free_bit_map_off;
1644 thread_local_free_bit_map_offset = tmp_thread_local_free_bit_map_off;
1645 break;
1646 }
1647 }
1648 DCHECK(num_of_slots > 0 && header_size > 0 && bulk_free_bit_map_offset > 0);
1649 // Add the padding for the alignment remainder.
1650 header_size += run_size % bracket_size;
1651 DCHECK(header_size + num_of_slots * bracket_size == run_size);
1652 numOfSlots[i] = num_of_slots;
1653 headerSizes[i] = header_size;
1654 bulkFreeBitMapOffsets[i] = bulk_free_bit_map_offset;
1655 threadLocalFreeBitMapOffsets[i] = thread_local_free_bit_map_offset;
1656 if (kTraceRosAlloc) {
1657 LOG(INFO) << "numOfSlots[" << i << "]=" << numOfSlots[i]
1658 << ", headerSizes[" << i << "]=" << headerSizes[i]
1659 << ", bulkFreeBitMapOffsets[" << i << "]=" << bulkFreeBitMapOffsets[i]
1660 << ", threadLocalFreeBitMapOffsets[" << i << "]=" << threadLocalFreeBitMapOffsets[i];;
1661 }
1662 }
1663}
1664
1665void RosAlloc::BytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
1666 if (used_bytes == 0) {
1667 return;
1668 }
1669 size_t* bytes_allocated = reinterpret_cast<size_t*>(arg);
1670 *bytes_allocated += used_bytes;
1671}
1672
1673void RosAlloc::ObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
1674 if (used_bytes == 0) {
1675 return;
1676 }
1677 size_t* objects_allocated = reinterpret_cast<size_t*>(arg);
1678 ++(*objects_allocated);
1679}
1680
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001681void RosAlloc::Verify() {
1682 Thread* self = Thread::Current();
1683 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self))
1684 << "The mutator locks isn't exclusively locked at RosAlloc::Verify()";
1685 MutexLock mu(self, *Locks::thread_list_lock_);
1686 WriterMutexLock wmu(self, bulk_free_lock_);
1687 std::vector<Run*> runs;
1688 {
1689 MutexLock mu(self, lock_);
1690 size_t pm_end = page_map_.size();
1691 size_t i = 0;
1692 while (i < pm_end) {
1693 byte pm = page_map_[i];
1694 switch (pm) {
1695 case kPageMapEmpty: {
1696 // The start of a free page run.
1697 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1698 DCHECK(fpr->magic_num_ == kMagicNumFree) << "Bad magic number : " << fpr->magic_num_;
1699 CHECK(free_page_runs_.find(fpr) != free_page_runs_.end())
1700 << "An empty page must belong to the free page run set";
1701 size_t fpr_size = fpr->ByteSize(this);
1702 CHECK(IsAligned<kPageSize>(fpr_size))
1703 << "A free page run size isn't page-aligned : " << fpr_size;
1704 size_t num_pages = fpr_size / kPageSize;
1705 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1706 << "A free page run size must be > 0 : " << fpr_size;
1707 for (size_t j = i + 1; j < i + num_pages; ++j) {
1708 CHECK_EQ(page_map_[j], kPageMapEmpty)
1709 << "A mismatch between the page map table for kPageMapEmpty "
1710 << " at page index " << j
1711 << " and the free page run size : page index range : "
1712 << i << " to " << (i + num_pages) << std::endl << DumpPageMap();
1713 }
1714 i += num_pages;
1715 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1716 << std::endl << DumpPageMap();
1717 break;
1718 }
1719 case kPageMapLargeObject: {
1720 // The start of a large object.
1721 size_t num_pages = 1;
1722 size_t idx = i + 1;
1723 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1724 num_pages++;
1725 idx++;
1726 }
1727 void* start = base_ + i * kPageSize;
1728 mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
1729 size_t obj_size = obj->SizeOf();
1730 CHECK(obj_size > kLargeSizeThreshold)
1731 << "A rosalloc large object size must be > " << kLargeSizeThreshold;
1732 CHECK_EQ(num_pages, RoundUp(obj_size, kPageSize) / kPageSize)
1733 << "A rosalloc large object size " << obj_size
1734 << " does not match the page map table " << (num_pages * kPageSize)
1735 << std::endl << DumpPageMap();
1736 i += num_pages;
1737 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1738 << std::endl << DumpPageMap();
1739 break;
1740 }
1741 case kPageMapLargeObjectPart:
1742 LOG(FATAL) << "Unreachable - page map type: " << pm << std::endl << DumpPageMap();
1743 break;
1744 case kPageMapRun: {
1745 // The start of a run.
1746 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1747 DCHECK(run->magic_num_ == kMagicNum) << "Bad magic number" << run->magic_num_;
1748 size_t idx = run->size_bracket_idx_;
1749 CHECK(idx < kNumOfSizeBrackets) << "Out of range size bracket index : " << idx;
1750 size_t num_pages = numOfPages[idx];
1751 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1752 << "Run size must be > 0 : " << num_pages;
1753 for (size_t j = i + 1; j < i + num_pages; ++j) {
1754 CHECK_EQ(page_map_[j], kPageMapRunPart)
1755 << "A mismatch between the page map table for kPageMapRunPart "
1756 << " at page index " << j
1757 << " and the run size : page index range " << i << " to " << (i + num_pages)
1758 << std::endl << DumpPageMap();
1759 }
1760 runs.push_back(run);
1761 i += num_pages;
1762 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1763 << std::endl << DumpPageMap();
1764 break;
1765 }
1766 case kPageMapRunPart:
1767 LOG(FATAL) << "Unreachable - page map type: " << pm << std::endl << DumpPageMap();
1768 break;
1769 default:
1770 LOG(FATAL) << "Unreachable - page map type: " << pm << std::endl << DumpPageMap();
1771 break;
1772 }
1773 }
1774 }
1775
1776 // Call Verify() here for the lock order.
1777 for (auto& run : runs) {
1778 run->Verify(self, this);
1779 }
1780}
1781
1782void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc) {
1783 DCHECK(magic_num_ == kMagicNum) << "Bad magic number : " << Dump();
1784 size_t idx = size_bracket_idx_;
1785 CHECK(idx < kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
1786 byte* slot_base = reinterpret_cast<byte*>(this) + headerSizes[idx];
1787 size_t num_slots = numOfSlots[idx];
1788 size_t bracket_size = IndexToBracketSize(idx);
1789 CHECK_EQ(slot_base + num_slots * bracket_size,
1790 reinterpret_cast<byte*>(this) + numOfPages[idx] * kPageSize)
1791 << "Mismatch in the end address of the run " << Dump();
1792 // Check that the bulk free bitmap is clean. It's only used during BulkFree().
1793 CHECK(IsBulkFreeBitmapClean()) << "The bulk free bit map isn't clean " << Dump();
1794 // Check the bump index mode, if it's on.
1795 if (top_slot_idx_ < num_slots) {
1796 // If the bump index mode is on (top_slot_idx_ < num_slots), then
1797 // all of the slots after the top index must be free.
1798 for (size_t i = top_slot_idx_; i < num_slots; ++i) {
1799 size_t vec_idx = i / 32;
1800 size_t vec_off = i % 32;
1801 uint32_t vec = alloc_bit_map_[vec_idx];
1802 CHECK_EQ((vec & (1 << vec_off)), static_cast<uint32_t>(0))
1803 << "A slot >= top_slot_idx_ isn't free " << Dump();
1804 }
1805 } else {
1806 CHECK_EQ(top_slot_idx_, num_slots)
1807 << "If the bump index mode is off, the top index == the number of slots "
1808 << Dump();
1809 }
1810 // Check the thread local runs, the current runs, and the run sets.
1811 if (is_thread_local_) {
1812 // If it's a thread local run, then it must be pointed to by an owner thread.
1813 bool owner_found = false;
1814 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1815 for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
1816 Thread* thread = *it;
1817 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1818 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
1819 Run* thread_local_run = reinterpret_cast<Run*>(thread->rosalloc_runs_[i]);
1820 if (thread_local_run == this) {
1821 CHECK(!owner_found)
1822 << "A thread local run has more than one owner thread " << Dump();
1823 CHECK_EQ(i, idx)
1824 << "A mismatching size bracket index in a thread local run " << Dump();
1825 owner_found = true;
1826 }
1827 }
1828 }
1829 CHECK(owner_found) << "A thread local run has no owner thread " << Dump();
1830 } else {
1831 // If it's not thread local, check that the thread local free bitmap is clean.
1832 CHECK(IsThreadLocalFreeBitmapClean())
1833 << "A non-thread-local run's thread local free bitmap isn't clean "
1834 << Dump();
1835 // Check if it's a current run for the size bucket.
1836 bool is_current_run = false;
1837 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1838 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
1839 Run* current_run = rosalloc->current_runs_[i];
1840 if (idx == i) {
1841 if (this == current_run) {
1842 is_current_run = true;
1843 }
1844 } else {
1845 // If the size bucket index does not match, then it must not
1846 // be a current run.
1847 CHECK_NE(this, current_run)
1848 << "A current run points to a run with a wrong size bracket index " << Dump();
1849 }
1850 }
1851 // If it's neither a thread local or current run, then it must be
1852 // in a run set.
1853 if (!is_current_run) {
1854 MutexLock mu(self, rosalloc->lock_);
1855 std::set<Run*>& non_full_runs = rosalloc->non_full_runs_[idx];
1856 // If it's all free, it must be a free page run rather than a run.
1857 CHECK(!IsAllFree()) << "A free run must be in a free page run set " << Dump();
1858 if (!IsFull()) {
1859 // If it's not full, it must in the non-full run set.
1860 CHECK(non_full_runs.find(this) != non_full_runs.end())
1861 << "A non-full run isn't in the non-full run set " << Dump();
1862 } else {
1863 // If it's full, it must in the full run set (debug build only.)
1864 if (kIsDebugBuild) {
1865 hash_set<Run*, hash_run, eq_run>& full_runs = rosalloc->full_runs_[idx];
1866 CHECK(full_runs.find(this) != full_runs.end())
1867 << " A full run isn't in the full run set " << Dump();
1868 }
1869 }
1870 }
1871 }
1872 // Check each slot.
1873 size_t num_vec = RoundUp(num_slots, 32) / 32;
1874 size_t slots = 0;
1875 for (size_t v = 0; v < num_vec; v++, slots += 32) {
1876 DCHECK(num_slots >= slots) << "Out of bounds";
1877 uint32_t vec = alloc_bit_map_[v];
1878 uint32_t thread_local_free_vec = ThreadLocalFreeBitMap()[v];
1879 size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
1880 for (size_t i = 0; i < end; ++i) {
1881 bool is_allocated = ((vec >> i) & 0x1) != 0;
1882 // If a thread local run, slots may be marked freed in the
1883 // thread local free bitmap.
1884 bool is_thread_local_freed = is_thread_local_ && ((thread_local_free_vec >> i) & 0x1) != 0;
1885 if (is_allocated && !is_thread_local_freed) {
1886 byte* slot_addr = slot_base + (slots + i) * bracket_size;
1887 mirror::Object* obj = reinterpret_cast<mirror::Object*>(slot_addr);
1888 size_t obj_size = obj->SizeOf();
1889 CHECK_LE(obj_size, kLargeSizeThreshold)
1890 << "A run slot contains a large object " << Dump();
1891 CHECK_EQ(SizeToIndex(obj_size), idx)
1892 << "A run slot contains an object with wrong size " << Dump();
1893 }
1894 }
1895 }
1896}
1897
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001898} // namespace allocator
1899} // namespace gc
1900} // namespace art