blob: 8ae61a3d2273df4e4ef575f068647a8c20904e79 [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "base/mutex-inl.h"
Brian Carlstrom218daa22013-11-25 14:51:44 -080018#include "thread-inl.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070019#include "thread_list.h"
20#include "rosalloc.h"
21
22#include <map>
23#include <list>
24#include <vector>
25
26namespace art {
27namespace gc {
28namespace allocator {
29
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070030extern "C" void* art_heap_rosalloc_morecore(RosAlloc* rosalloc, intptr_t increment);
31
32size_t RosAlloc::bracketSizes[kNumOfSizeBrackets];
33size_t RosAlloc::numOfPages[kNumOfSizeBrackets];
34size_t RosAlloc::numOfSlots[kNumOfSizeBrackets];
35size_t RosAlloc::headerSizes[kNumOfSizeBrackets];
36size_t RosAlloc::bulkFreeBitMapOffsets[kNumOfSizeBrackets];
37size_t RosAlloc::threadLocalFreeBitMapOffsets[kNumOfSizeBrackets];
38bool RosAlloc::initialized_ = false;
39
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080040RosAlloc::RosAlloc(void* base, size_t capacity,
41 PageReleaseMode page_release_mode, size_t page_release_size_threshold)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070042 : base_(reinterpret_cast<byte*>(base)), footprint_(capacity),
43 capacity_(capacity),
44 lock_("rosalloc global lock", kRosAllocGlobalLock),
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080045 bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock),
46 page_release_mode_(page_release_mode),
47 page_release_size_threshold_(page_release_size_threshold) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070048 DCHECK(RoundUp(capacity, kPageSize) == capacity);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080049 CHECK(IsAligned<kPageSize>(page_release_size_threshold_));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070050 if (!initialized_) {
51 Initialize();
52 }
53 VLOG(heap) << "RosAlloc base="
54 << std::hex << (intptr_t)base_ << ", end="
55 << std::hex << (intptr_t)(base_ + capacity_)
56 << ", capacity=" << std::dec << capacity_;
57 memset(current_runs_, 0, sizeof(current_runs_));
58 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
59 size_bracket_locks_[i] = new Mutex("an rosalloc size bracket lock",
60 kRosAllocBracketLock);
61 }
62 size_t num_of_pages = capacity_ / kPageSize;
63 page_map_.resize(num_of_pages);
64 free_page_run_size_map_.resize(num_of_pages);
65
66 FreePageRun* free_pages = reinterpret_cast<FreePageRun*>(base_);
67 if (kIsDebugBuild) {
68 free_pages->magic_num_ = kMagicNumFree;
69 }
70 free_pages->SetByteSize(this, capacity_);
71 DCHECK_EQ(capacity_ % kPageSize, static_cast<size_t>(0));
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080072 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070073 free_pages->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080074 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070075 free_page_runs_.insert(free_pages);
76 if (kTraceRosAlloc) {
77 LOG(INFO) << "RosAlloc::RosAlloc() : Inserted run 0x" << std::hex
78 << reinterpret_cast<intptr_t>(free_pages)
79 << " into free_page_runs_";
80 }
81}
82
83void* RosAlloc::AllocPages(Thread* self, size_t num_pages, byte page_map_type) {
84 lock_.AssertHeld(self);
85 DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
86 FreePageRun* res = NULL;
87 size_t req_byte_size = num_pages * kPageSize;
88 // Find the lowest address free page run that's large enough.
89 for (auto it = free_page_runs_.begin(); it != free_page_runs_.end(); ) {
90 FreePageRun* fpr = *it;
91 DCHECK(fpr->IsFree());
92 size_t fpr_byte_size = fpr->ByteSize(this);
93 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
94 if (req_byte_size <= fpr_byte_size) {
95 // Found one.
96 free_page_runs_.erase(it++);
97 if (kTraceRosAlloc) {
98 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x"
99 << std::hex << reinterpret_cast<intptr_t>(fpr)
100 << " from free_page_runs_";
101 }
102 if (req_byte_size < fpr_byte_size) {
103 // Split.
104 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<byte*>(fpr) + req_byte_size);
105 if (kIsDebugBuild) {
106 remainder->magic_num_ = kMagicNumFree;
107 }
108 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
109 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
110 // Don't need to call madvise on remainder here.
111 free_page_runs_.insert(remainder);
112 if (kTraceRosAlloc) {
113 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
114 << reinterpret_cast<intptr_t>(remainder)
115 << " into free_page_runs_";
116 }
117 fpr->SetByteSize(this, req_byte_size);
118 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
119 }
120 res = fpr;
121 break;
122 } else {
123 ++it;
124 }
125 }
126
127 // Failed to allocate pages. Grow the footprint, if possible.
128 if (UNLIKELY(res == NULL && capacity_ > footprint_)) {
129 FreePageRun* last_free_page_run = NULL;
130 size_t last_free_page_run_size;
131 auto it = free_page_runs_.rbegin();
132 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
133 // There is a free page run at the end.
134 DCHECK(last_free_page_run->IsFree());
135 DCHECK(page_map_[ToPageMapIndex(last_free_page_run)] == kPageMapEmpty);
136 last_free_page_run_size = last_free_page_run->ByteSize(this);
137 } else {
138 // There is no free page run at the end.
139 last_free_page_run_size = 0;
140 }
141 DCHECK_LT(last_free_page_run_size, req_byte_size);
142 if (capacity_ - footprint_ + last_free_page_run_size >= req_byte_size) {
143 // If we grow the heap, we can allocate it.
144 size_t increment = std::min(std::max(2 * MB, req_byte_size - last_free_page_run_size),
145 capacity_ - footprint_);
146 DCHECK_EQ(increment % kPageSize, static_cast<size_t>(0));
147 size_t new_footprint = footprint_ + increment;
148 size_t new_num_of_pages = new_footprint / kPageSize;
149 DCHECK_LT(page_map_.size(), new_num_of_pages);
150 DCHECK_LT(free_page_run_size_map_.size(), new_num_of_pages);
151 page_map_.resize(new_num_of_pages);
152 free_page_run_size_map_.resize(new_num_of_pages);
153 art_heap_rosalloc_morecore(this, increment);
154 if (last_free_page_run_size > 0) {
155 // There was a free page run at the end. Expand its size.
156 DCHECK_EQ(last_free_page_run_size, last_free_page_run->ByteSize(this));
157 last_free_page_run->SetByteSize(this, last_free_page_run_size + increment);
158 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
159 DCHECK(last_free_page_run->End(this) == base_ + new_footprint);
160 } else {
161 // Otherwise, insert a new free page run at the end.
162 FreePageRun* new_free_page_run = reinterpret_cast<FreePageRun*>(base_ + footprint_);
163 if (kIsDebugBuild) {
164 new_free_page_run->magic_num_ = kMagicNumFree;
165 }
166 new_free_page_run->SetByteSize(this, increment);
167 DCHECK_EQ(new_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
168 free_page_runs_.insert(new_free_page_run);
169 DCHECK(*free_page_runs_.rbegin() == new_free_page_run);
170 if (kTraceRosAlloc) {
171 LOG(INFO) << "RosAlloc::AlloPages() : Grew the heap by inserting run 0x"
172 << std::hex << reinterpret_cast<intptr_t>(new_free_page_run)
173 << " into free_page_runs_";
174 }
175 }
176 DCHECK_LE(footprint_ + increment, capacity_);
177 if (kTraceRosAlloc) {
178 LOG(INFO) << "RosAlloc::AllocPages() : increased the footprint from "
179 << footprint_ << " to " << new_footprint;
180 }
181 footprint_ = new_footprint;
182
183 // And retry the last free page run.
184 it = free_page_runs_.rbegin();
185 DCHECK(it != free_page_runs_.rend());
186 FreePageRun* fpr = *it;
187 if (kIsDebugBuild && last_free_page_run_size > 0) {
188 DCHECK(last_free_page_run != NULL);
189 DCHECK_EQ(last_free_page_run, fpr);
190 }
191 size_t fpr_byte_size = fpr->ByteSize(this);
192 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
193 DCHECK_LE(req_byte_size, fpr_byte_size);
194 free_page_runs_.erase(fpr);
195 if (kTraceRosAlloc) {
196 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
197 << " from free_page_runs_";
198 }
199 if (req_byte_size < fpr_byte_size) {
200 // Split if there's a remainder.
201 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<byte*>(fpr) + req_byte_size);
202 if (kIsDebugBuild) {
203 remainder->magic_num_ = kMagicNumFree;
204 }
205 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
206 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
207 free_page_runs_.insert(remainder);
208 if (kTraceRosAlloc) {
209 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
210 << reinterpret_cast<intptr_t>(remainder)
211 << " into free_page_runs_";
212 }
213 fpr->SetByteSize(this, req_byte_size);
214 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
215 }
216 res = fpr;
217 }
218 }
219 if (LIKELY(res != NULL)) {
220 // Update the page map.
221 size_t page_map_idx = ToPageMapIndex(res);
222 for (size_t i = 0; i < num_pages; i++) {
223 DCHECK(page_map_[page_map_idx + i] == kPageMapEmpty);
224 }
225 switch (page_map_type) {
226 case kPageMapRun:
227 page_map_[page_map_idx] = kPageMapRun;
228 for (size_t i = 1; i < num_pages; i++) {
229 page_map_[page_map_idx + i] = kPageMapRunPart;
230 }
231 break;
232 case kPageMapLargeObject:
233 page_map_[page_map_idx] = kPageMapLargeObject;
234 for (size_t i = 1; i < num_pages; i++) {
235 page_map_[page_map_idx + i] = kPageMapLargeObjectPart;
236 }
237 break;
238 default:
239 LOG(FATAL) << "Unreachable - page map type: " << page_map_type;
240 break;
241 }
242 if (kIsDebugBuild) {
243 // Clear the first page which isn't madvised away in the debug
244 // build for the magic number.
245 memset(res, 0, kPageSize);
246 }
247 if (kTraceRosAlloc) {
248 LOG(INFO) << "RosAlloc::AllocPages() : 0x" << std::hex << reinterpret_cast<intptr_t>(res)
249 << "-0x" << (reinterpret_cast<intptr_t>(res) + num_pages * kPageSize)
250 << "(" << std::dec << (num_pages * kPageSize) << ")";
251 }
252 return res;
253 }
254
255 // Fail.
256 if (kTraceRosAlloc) {
257 LOG(INFO) << "RosAlloc::AllocPages() : NULL";
258 }
259 return nullptr;
260}
261
262void RosAlloc::FreePages(Thread* self, void* ptr) {
263 lock_.AssertHeld(self);
264 size_t pm_idx = ToPageMapIndex(ptr);
265 DCHECK(pm_idx < page_map_.size());
266 byte pm_type = page_map_[pm_idx];
267 DCHECK(pm_type == kPageMapRun || pm_type == kPageMapLargeObject);
268 byte pm_part_type;
269 switch (pm_type) {
270 case kPageMapRun:
271 pm_part_type = kPageMapRunPart;
272 break;
273 case kPageMapLargeObject:
274 pm_part_type = kPageMapLargeObjectPart;
275 break;
276 default:
277 pm_part_type = kPageMapEmpty;
278 LOG(FATAL) << "Unreachable - RosAlloc::FreePages() : " << "pm_idx=" << pm_idx << ", pm_type="
279 << static_cast<int>(pm_type) << ", ptr=" << std::hex
280 << reinterpret_cast<intptr_t>(ptr);
281 return;
282 }
283 // Update the page map and count the number of pages.
284 size_t num_pages = 1;
285 page_map_[pm_idx] = kPageMapEmpty;
286 size_t idx = pm_idx + 1;
287 size_t end = page_map_.size();
288 while (idx < end && page_map_[idx] == pm_part_type) {
289 page_map_[idx] = kPageMapEmpty;
290 num_pages++;
291 idx++;
292 }
293
294 if (kTraceRosAlloc) {
295 LOG(INFO) << "RosAlloc::FreePages() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
296 << "-0x" << (reinterpret_cast<intptr_t>(ptr) + num_pages * kPageSize)
297 << "(" << std::dec << (num_pages * kPageSize) << ")";
298 }
299
300 // Turn it into a free run.
301 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(ptr);
302 if (kIsDebugBuild) {
303 fpr->magic_num_ = kMagicNumFree;
304 }
305 fpr->SetByteSize(this, num_pages * kPageSize);
306 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
307
308 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
309 if (!free_page_runs_.empty()) {
310 // Try to coalesce in the higher address direction.
311 if (kTraceRosAlloc) {
312 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce a free page run 0x"
313 << std::hex << reinterpret_cast<uintptr_t>(fpr) << " [" << std::dec << pm_idx << "] -0x"
314 << std::hex << reinterpret_cast<uintptr_t>(fpr->End(this)) << " [" << std::dec
315 << (fpr->End(this) == End() ? page_map_.size() : ToPageMapIndex(fpr->End(this))) << "]";
316 }
317 auto higher_it = free_page_runs_.upper_bound(fpr);
318 if (higher_it != free_page_runs_.end()) {
319 for (auto it = higher_it; it != free_page_runs_.end(); ) {
320 FreePageRun* h = *it;
321 DCHECK_EQ(h->ByteSize(this) % kPageSize, static_cast<size_t>(0));
322 if (kTraceRosAlloc) {
323 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a higher free page run 0x"
324 << std::hex << reinterpret_cast<uintptr_t>(h) << " [" << std::dec << ToPageMapIndex(h) << "] -0x"
325 << std::hex << reinterpret_cast<uintptr_t>(h->End(this)) << " [" << std::dec
326 << (h->End(this) == End() ? page_map_.size() : ToPageMapIndex(h->End(this))) << "]";
327 }
328 if (fpr->End(this) == h->Begin()) {
329 if (kTraceRosAlloc) {
330 LOG(INFO) << "Success";
331 }
332 free_page_runs_.erase(it++);
333 if (kTraceRosAlloc) {
334 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
335 << reinterpret_cast<intptr_t>(h)
336 << " from free_page_runs_";
337 }
338 fpr->SetByteSize(this, fpr->ByteSize(this) + h->ByteSize(this));
339 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
340 } else {
341 // Not adjacent. Stop.
342 if (kTraceRosAlloc) {
343 LOG(INFO) << "Fail";
344 }
345 break;
346 }
347 }
348 }
349 // Try to coalesce in the lower address direction.
350 auto lower_it = free_page_runs_.upper_bound(fpr);
351 if (lower_it != free_page_runs_.begin()) {
352 --lower_it;
353 for (auto it = lower_it; ; ) {
354 // We want to try to coalesce with the first element but
355 // there's no "<=" operator for the iterator.
356 bool to_exit_loop = it == free_page_runs_.begin();
357
358 FreePageRun* l = *it;
359 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
360 if (kTraceRosAlloc) {
361 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a lower free page run 0x"
362 << std::hex << reinterpret_cast<uintptr_t>(l) << " [" << std::dec << ToPageMapIndex(l) << "] -0x"
363 << std::hex << reinterpret_cast<uintptr_t>(l->End(this)) << " [" << std::dec
364 << (l->End(this) == End() ? page_map_.size() : ToPageMapIndex(l->End(this))) << "]";
365 }
366 if (l->End(this) == fpr->Begin()) {
367 if (kTraceRosAlloc) {
368 LOG(INFO) << "Success";
369 }
370 free_page_runs_.erase(it--);
371 if (kTraceRosAlloc) {
372 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
373 << reinterpret_cast<intptr_t>(l)
374 << " from free_page_runs_";
375 }
376 l->SetByteSize(this, l->ByteSize(this) + fpr->ByteSize(this));
377 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
378 fpr = l;
379 } else {
380 // Not adjacent. Stop.
381 if (kTraceRosAlloc) {
382 LOG(INFO) << "Fail";
383 }
384 break;
385 }
386 if (to_exit_loop) {
387 break;
388 }
389 }
390 }
391 }
392
393 // Insert it.
394 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
395 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800396 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700397 fpr->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800398 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700399 free_page_runs_.insert(fpr);
400 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
401 if (kTraceRosAlloc) {
402 LOG(INFO) << "RosAlloc::FreePages() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
403 << " into free_page_runs_";
404 }
405}
406
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800407void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) {
408 DCHECK(size > kLargeSizeThreshold);
409 size_t num_pages = RoundUp(size, kPageSize) / kPageSize;
410 void* r;
411 {
412 MutexLock mu(self, lock_);
413 r = AllocPages(self, num_pages, kPageMapLargeObject);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700414 }
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800415 if (UNLIKELY(r == nullptr)) {
416 if (kTraceRosAlloc) {
417 LOG(INFO) << "RosAlloc::AllocLargeObject() : NULL";
418 }
419 return nullptr;
420 }
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800421 if (bytes_allocated != NULL) {
422 *bytes_allocated = num_pages * kPageSize;
423 }
424 if (kTraceRosAlloc) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800425 LOG(INFO) << "RosAlloc::AllocLargeObject() : 0x" << std::hex << reinterpret_cast<intptr_t>(r)
426 << "-0x" << (reinterpret_cast<intptr_t>(r) + num_pages * kPageSize)
427 << "(" << std::dec << (num_pages * kPageSize) << ")";
428 }
429 if (!DoesReleaseAllPages()) {
430 // If it does not release all pages, pages may not be zeroed out.
431 memset(r, 0, size);
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800432 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700433 // Check if the returned memory is really all zero.
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800434 if (kCheckZeroMemory) {
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800435 byte* bytes = reinterpret_cast<byte*>(r);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700436 for (size_t i = 0; i < size; ++i) {
437 DCHECK_EQ(bytes[i], 0);
438 }
439 }
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800440 return r;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700441}
442
443void RosAlloc::FreeInternal(Thread* self, void* ptr) {
444 DCHECK(base_ <= ptr && ptr < base_ + footprint_);
445 size_t pm_idx = RoundDownToPageMapIndex(ptr);
446 bool free_from_run = false;
447 Run* run = NULL;
448 {
449 MutexLock mu(self, lock_);
450 DCHECK(pm_idx < page_map_.size());
451 byte page_map_entry = page_map_[pm_idx];
452 if (kTraceRosAlloc) {
453 LOG(INFO) << "RosAlloc::FreeInternal() : " << std::hex << ptr << ", pm_idx=" << std::dec << pm_idx
454 << ", page_map_entry=" << static_cast<int>(page_map_entry);
455 }
456 switch (page_map_[pm_idx]) {
457 case kPageMapEmpty:
458 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
459 return;
460 case kPageMapLargeObject:
461 FreePages(self, ptr);
462 return;
463 case kPageMapLargeObjectPart:
464 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
465 return;
466 case kPageMapRun:
467 case kPageMapRunPart: {
468 free_from_run = true;
469 size_t pi = pm_idx;
470 DCHECK(page_map_[pi] == kPageMapRun || page_map_[pi] == kPageMapRunPart);
471 // Find the beginning of the run.
472 while (page_map_[pi] != kPageMapRun) {
473 pi--;
474 DCHECK(pi < capacity_ / kPageSize);
475 }
476 DCHECK(page_map_[pi] == kPageMapRun);
477 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
478 DCHECK(run->magic_num_ == kMagicNum);
479 break;
480 }
481 default:
482 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
483 return;
484 }
485 }
486 if (LIKELY(free_from_run)) {
487 DCHECK(run != NULL);
488 FreeFromRun(self, ptr, run);
489 }
490}
491
492void RosAlloc::Free(Thread* self, void* ptr) {
493 ReaderMutexLock rmu(self, bulk_free_lock_);
494 FreeInternal(self, ptr);
495}
496
497RosAlloc::Run* RosAlloc::RefillRun(Thread* self, size_t idx) {
498 Run* new_run;
499 size_t num_pages = numOfPages[idx];
500 // Get the lowest address non-full run from the binary tree.
501 Run* temp = NULL;
502 std::set<Run*>* bt = &non_full_runs_[idx];
503 std::set<Run*>::iterator found = bt->lower_bound(temp);
504 if (found != bt->end()) {
505 // If there's one, use it as the current run.
506 Run* non_full_run = *found;
507 DCHECK(non_full_run != NULL);
508 new_run = non_full_run;
509 DCHECK_EQ(new_run->is_thread_local_, 0);
510 bt->erase(found);
511 DCHECK_EQ(non_full_run->is_thread_local_, 0);
512 } else {
513 // If there's none, allocate a new run and use it as the
514 // current run.
515 {
516 MutexLock mu(self, lock_);
517 new_run = reinterpret_cast<Run*>(AllocPages(self, num_pages, kPageMapRun));
518 }
519 if (new_run == NULL) {
520 return NULL;
521 }
522 if (kIsDebugBuild) {
523 new_run->magic_num_ = kMagicNum;
524 }
525 new_run->size_bracket_idx_ = idx;
526 new_run->top_slot_idx_ = 0;
527 new_run->ClearBitMaps();
528 new_run->to_be_bulk_freed_ = false;
529 }
530 return new_run;
531}
532
533void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated) {
534 DCHECK(size <= kLargeSizeThreshold);
535 size_t bracket_size;
536 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
537 DCHECK_EQ(idx, SizeToIndex(size));
538 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
539 DCHECK_EQ(bracket_size, bracketSizes[idx]);
540 DCHECK(size <= bracket_size);
541 DCHECK(size > 512 || bracket_size - size < 16);
542
543 void* slot_addr;
544
545 if (LIKELY(idx <= kMaxThreadLocalSizeBracketIdx)) {
546 // Use a thread-local run.
547 Run* thread_local_run = reinterpret_cast<Run*>(self->rosalloc_runs_[idx]);
548 if (UNLIKELY(thread_local_run == NULL)) {
549 MutexLock mu(self, *size_bracket_locks_[idx]);
550 thread_local_run = RefillRun(self, idx);
551 if (UNLIKELY(thread_local_run == NULL)) {
552 return NULL;
553 }
554 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
555 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
556 thread_local_run->is_thread_local_ = 1;
557 self->rosalloc_runs_[idx] = thread_local_run;
558 DCHECK(!thread_local_run->IsFull());
559 }
560
561 DCHECK(thread_local_run != NULL);
562 DCHECK_NE(thread_local_run->is_thread_local_, 0);
563 slot_addr = thread_local_run->AllocSlot();
564
565 if (UNLIKELY(slot_addr == NULL)) {
566 // The run got full. Try to free slots.
567 DCHECK(thread_local_run->IsFull());
568 MutexLock mu(self, *size_bracket_locks_[idx]);
569 bool is_all_free_after_merge;
570 if (thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&is_all_free_after_merge)) {
571 // Some slot got freed. Keep it.
572 DCHECK(!thread_local_run->IsFull());
573 DCHECK_EQ(is_all_free_after_merge, thread_local_run->IsAllFree());
574 if (is_all_free_after_merge) {
575 // Reinstate the bump index mode if it's all free.
576 DCHECK_EQ(thread_local_run->top_slot_idx_, numOfSlots[idx]);
577 thread_local_run->top_slot_idx_ = 0;
578 }
579 } else {
580 // No slots got freed. Try to refill the thread-local run.
581 DCHECK(thread_local_run->IsFull());
582 self->rosalloc_runs_[idx] = NULL;
583 thread_local_run->is_thread_local_ = 0;
584 if (kIsDebugBuild) {
585 full_runs_[idx].insert(thread_local_run);
586 if (kTraceRosAlloc) {
587 LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex
588 << reinterpret_cast<intptr_t>(thread_local_run)
589 << " into full_runs_[" << std::dec << idx << "]";
590 }
591 }
592 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
593 DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
594 thread_local_run = RefillRun(self, idx);
595 if (UNLIKELY(thread_local_run == NULL)) {
596 return NULL;
597 }
598 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
599 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
600 thread_local_run->is_thread_local_ = 1;
601 self->rosalloc_runs_[idx] = thread_local_run;
602 DCHECK(!thread_local_run->IsFull());
603 }
604
605 DCHECK(thread_local_run != NULL);
606 DCHECK(!thread_local_run->IsFull());
607 DCHECK_NE(thread_local_run->is_thread_local_, 0);
608 slot_addr = thread_local_run->AllocSlot();
609 // Must succeed now with a new run.
610 DCHECK(slot_addr != NULL);
611 }
612 if (kTraceRosAlloc) {
613 LOG(INFO) << "RosAlloc::AllocFromRun() thread-local : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
614 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
615 << "(" << std::dec << (bracket_size) << ")";
616 }
617 } else {
618 // Use the (shared) current run.
619 MutexLock mu(self, *size_bracket_locks_[idx]);
620 Run* current_run = current_runs_[idx];
621 if (UNLIKELY(current_run == NULL)) {
622 current_run = RefillRun(self, idx);
623 if (UNLIKELY(current_run == NULL)) {
624 return NULL;
625 }
626 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
627 DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
628 current_run->is_thread_local_ = 0;
629 current_runs_[idx] = current_run;
630 DCHECK(!current_run->IsFull());
631 }
632 DCHECK(current_run != NULL);
633 slot_addr = current_run->AllocSlot();
634 if (UNLIKELY(slot_addr == NULL)) {
635 // The current run got full. Try to refill it.
636 DCHECK(current_run->IsFull());
637 current_runs_[idx] = NULL;
638 if (kIsDebugBuild) {
639 // Insert it into full_runs and set the current run to NULL.
640 full_runs_[idx].insert(current_run);
641 if (kTraceRosAlloc) {
642 LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(current_run)
643 << " into full_runs_[" << std::dec << idx << "]";
644 }
645 }
646 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
647 DCHECK(full_runs_[idx].find(current_run) != full_runs_[idx].end());
648 current_run = RefillRun(self, idx);
649 if (UNLIKELY(current_run == NULL)) {
650 return NULL;
651 }
652 DCHECK(current_run != NULL);
653 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
654 DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
655 current_run->is_thread_local_ = 0;
656 current_runs_[idx] = current_run;
657 DCHECK(!current_run->IsFull());
658 slot_addr = current_run->AllocSlot();
659 // Must succeed now with a new run.
660 DCHECK(slot_addr != NULL);
661 }
662 if (kTraceRosAlloc) {
663 LOG(INFO) << "RosAlloc::AllocFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
664 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
665 << "(" << std::dec << (bracket_size) << ")";
666 }
667 }
668 if (LIKELY(bytes_allocated != NULL)) {
669 *bytes_allocated = bracket_size;
670 }
671 memset(slot_addr, 0, size);
672 return slot_addr;
673}
674
675void RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
676 DCHECK(run->magic_num_ == kMagicNum);
677 DCHECK(run < ptr && ptr < run->End());
678 size_t idx = run->size_bracket_idx_;
679 MutexLock mu(self, *size_bracket_locks_[idx]);
680 bool run_was_full = false;
681 if (kIsDebugBuild) {
682 run_was_full = run->IsFull();
683 }
684 if (kTraceRosAlloc) {
685 LOG(INFO) << "RosAlloc::FreeFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr);
686 }
687 if (LIKELY(run->is_thread_local_ != 0)) {
688 // It's a thread-local run. Just mark the thread-local free bit map and return.
689 DCHECK_LE(run->size_bracket_idx_, kMaxThreadLocalSizeBracketIdx);
690 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
691 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
692 run->MarkThreadLocalFreeBitMap(ptr);
693 if (kTraceRosAlloc) {
694 LOG(INFO) << "RosAlloc::FreeFromRun() : Freed a slot in a thread local run 0x" << std::hex
695 << reinterpret_cast<intptr_t>(run);
696 }
697 // A thread local run will be kept as a thread local even if it's become all free.
698 return;
699 }
700 // Free the slot in the run.
701 run->FreeSlot(ptr);
702 std::set<Run*>* non_full_runs = &non_full_runs_[idx];
703 if (run->IsAllFree()) {
704 // It has just become completely free. Free the pages of this run.
705 std::set<Run*>::iterator pos = non_full_runs->find(run);
706 if (pos != non_full_runs->end()) {
707 non_full_runs->erase(pos);
708 if (kTraceRosAlloc) {
709 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
710 << reinterpret_cast<intptr_t>(run) << " from non_full_runs_";
711 }
712 }
713 if (run == current_runs_[idx]) {
714 current_runs_[idx] = NULL;
715 }
716 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
717 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
718 {
719 MutexLock mu(self, lock_);
720 FreePages(self, run);
721 }
722 } else {
723 // It is not completely free. If it wasn't the current run or
724 // already in the non-full run set (i.e., it was full) insert it
725 // into the non-full run set.
726 if (run != current_runs_[idx]) {
727 hash_set<Run*, hash_run, eq_run>* full_runs =
728 kIsDebugBuild ? &full_runs_[idx] : NULL;
729 std::set<Run*>::iterator pos = non_full_runs->find(run);
730 if (pos == non_full_runs->end()) {
731 DCHECK(run_was_full);
732 DCHECK(full_runs->find(run) != full_runs->end());
733 if (kIsDebugBuild) {
734 full_runs->erase(run);
735 if (kTraceRosAlloc) {
736 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
737 << reinterpret_cast<intptr_t>(run) << " from full_runs_";
738 }
739 }
740 non_full_runs->insert(run);
741 DCHECK(!run->IsFull());
742 if (kTraceRosAlloc) {
743 LOG(INFO) << "RosAlloc::FreeFromRun() : Inserted run 0x" << std::hex
744 << reinterpret_cast<intptr_t>(run)
745 << " into non_full_runs_[" << std::dec << idx << "]";
746 }
747 }
748 }
749 }
750}
751
752void RosAlloc::Run::Dump() {
753 size_t idx = size_bracket_idx_;
754 size_t num_slots = numOfSlots[idx];
755 size_t num_vec = RoundUp(num_slots, 32) / 32;
756 std::string bit_map_str;
757 for (size_t v = 0; v < num_vec; v++) {
758 uint32_t vec = alloc_bit_map_[v];
759 if (v != num_vec - 1) {
760 bit_map_str.append(StringPrintf("%x-", vec));
761 } else {
762 bit_map_str.append(StringPrintf("%x", vec));
763 }
764 }
765 LOG(INFO) << "Run : " << std::hex << reinterpret_cast<intptr_t>(this)
766 << std::dec << ", idx=" << idx << ", bit_map=" << bit_map_str;
767}
768
769void* RosAlloc::Run::AllocSlot() {
770 size_t idx = size_bracket_idx_;
771 size_t num_slots = numOfSlots[idx];
772 DCHECK_LE(top_slot_idx_, num_slots);
773 if (LIKELY(top_slot_idx_ < num_slots)) {
774 // If it's in bump index mode, grab the top slot and increment the top index.
775 size_t slot_idx = top_slot_idx_;
776 byte* slot_addr = reinterpret_cast<byte*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
777 if (kTraceRosAlloc) {
778 LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
779 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
780 }
781 top_slot_idx_++;
782 size_t vec_idx = slot_idx / 32;
783 size_t vec_off = slot_idx % 32;
784 uint32_t* vec = &alloc_bit_map_[vec_idx];
785 DCHECK_EQ((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
786 *vec |= 1 << vec_off;
787 DCHECK_NE((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
788 return slot_addr;
789 }
790 // Not in bump index mode. Search the alloc bit map for an empty slot.
791 size_t num_vec = RoundUp(num_slots, 32) / 32;
792 size_t slot_idx = 0;
793 bool found_slot = false;
794 for (size_t v = 0; v < num_vec; v++) {
795 uint32_t *vecp = &alloc_bit_map_[v];
796 uint32_t ffz1 = __builtin_ffs(~*vecp);
797 uint32_t ffz;
798 // TODO: Use LIKELY or UNLIKELY here?
799 if (LIKELY(ffz1 > 0 && (ffz = ffz1 - 1) + v * 32 < num_slots)) {
800 // Found an empty slot. Set the bit.
801 DCHECK_EQ((*vecp & (1 << ffz)), static_cast<uint32_t>(0));
802 *vecp |= (1 << ffz);
803 DCHECK_NE((*vecp & (1 << ffz)), static_cast<uint32_t>(0));
804 slot_idx = ffz + v * 32;
805 found_slot = true;
806 break;
807 }
808 }
809 if (LIKELY(found_slot)) {
810 byte* slot_addr = reinterpret_cast<byte*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
811 if (kTraceRosAlloc) {
812 LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
813 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
814 }
815 return slot_addr;
816 }
817 return NULL;
818}
819
820inline void RosAlloc::Run::FreeSlot(void* ptr) {
821 DCHECK_EQ(is_thread_local_, 0);
822 byte idx = size_bracket_idx_;
823 size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
824 - (reinterpret_cast<byte*>(this) + headerSizes[idx]);
825 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
826 size_t slot_idx = offset_from_slot_base / bracketSizes[idx];
827 DCHECK(slot_idx < numOfSlots[idx]);
828 size_t vec_idx = slot_idx / 32;
829 if (kIsDebugBuild) {
830 size_t num_vec = RoundUp(numOfSlots[idx], 32) / 32;
831 DCHECK(vec_idx < num_vec);
832 }
833 size_t vec_off = slot_idx % 32;
834 uint32_t* vec = &alloc_bit_map_[vec_idx];
835 DCHECK_NE((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
836 *vec &= ~(1 << vec_off);
837 DCHECK_EQ((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
838 if (kTraceRosAlloc) {
839 LOG(INFO) << "RosAlloc::Run::FreeSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
840 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
841 }
842}
843
844inline bool RosAlloc::Run::MergeThreadLocalFreeBitMapToAllocBitMap(bool* is_all_free_after_out) {
845 DCHECK_NE(is_thread_local_, 0);
846 // Free slots in the alloc bit map based on the thread local free bit map.
847 byte idx = size_bracket_idx_;
848 size_t num_slots = numOfSlots[idx];
849 size_t num_vec = RoundUp(num_slots, 32) / 32;
850 bool changed = false;
851 uint32_t* vecp = &alloc_bit_map_[0];
852 uint32_t* tl_free_vecp = &thread_local_free_bit_map()[0];
853 bool is_all_free_after = true;
854 for (size_t v = 0; v < num_vec; v++, vecp++, tl_free_vecp++) {
855 uint32_t tl_free_vec = *tl_free_vecp;
856 uint32_t vec_before = *vecp;
857 uint32_t vec_after;
858 if (tl_free_vec != 0) {
859 vec_after = vec_before & ~tl_free_vec;
860 *vecp = vec_after;
861 changed = true;
862 *tl_free_vecp = 0; // clear the thread local free bit map.
863 } else {
864 vec_after = vec_before;
865 }
866 if (vec_after != 0) {
867 is_all_free_after = false;
868 }
869 DCHECK_EQ(*tl_free_vecp, static_cast<uint32_t>(0));
870 }
871 *is_all_free_after_out = is_all_free_after;
872 // Return true if there was at least a bit set in the thread-local
873 // free bit map and at least a bit in the alloc bit map changed.
874 return changed;
875}
876
877inline void RosAlloc::Run::MergeBulkFreeBitMapIntoAllocBitMap() {
878 DCHECK_EQ(is_thread_local_, 0);
879 // Free slots in the alloc bit map based on the bulk free bit map.
880 byte idx = size_bracket_idx_;
881 size_t num_slots = numOfSlots[idx];
882 size_t num_vec = RoundUp(num_slots, 32) / 32;
883 uint32_t* vecp = &alloc_bit_map_[0];
884 uint32_t* free_vecp = &bulk_free_bit_map()[0];
885 for (size_t v = 0; v < num_vec; v++, vecp++, free_vecp++) {
886 uint32_t free_vec = *free_vecp;
887 if (free_vec != 0) {
888 *vecp &= ~free_vec;
889 *free_vecp = 0; // clear the bulk free bit map.
890 }
891 DCHECK_EQ(*free_vecp, static_cast<uint32_t>(0));
892 }
893}
894
895inline void RosAlloc::Run::UnionBulkFreeBitMapToThreadLocalFreeBitMap() {
896 DCHECK_NE(is_thread_local_, 0);
897 // Union the thread local bit map with the bulk free bit map.
898 byte idx = size_bracket_idx_;
899 size_t num_slots = numOfSlots[idx];
900 size_t num_vec = RoundUp(num_slots, 32) / 32;
901 uint32_t* to_vecp = &thread_local_free_bit_map()[0];
902 uint32_t* from_vecp = &bulk_free_bit_map()[0];
903 for (size_t v = 0; v < num_vec; v++, to_vecp++, from_vecp++) {
904 uint32_t from_vec = *from_vecp;
905 if (from_vec != 0) {
906 *to_vecp |= from_vec;
907 *from_vecp = 0; // clear the from free bit map.
908 }
909 DCHECK_EQ(*from_vecp, static_cast<uint32_t>(0));
910 }
911}
912
913inline void RosAlloc::Run::MarkThreadLocalFreeBitMap(void* ptr) {
914 DCHECK_NE(is_thread_local_, 0);
915 MarkFreeBitMapShared(ptr, thread_local_free_bit_map(), "MarkThreadLocalFreeBitMap");
916}
917
918inline void RosAlloc::Run::MarkBulkFreeBitMap(void* ptr) {
919 MarkFreeBitMapShared(ptr, bulk_free_bit_map(), "MarkFreeBitMap");
920}
921
922inline void RosAlloc::Run::MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base,
923 const char* caller_name) {
924 byte idx = size_bracket_idx_;
925 size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
926 - (reinterpret_cast<byte*>(this) + headerSizes[idx]);
927 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
928 size_t slot_idx = offset_from_slot_base / bracketSizes[idx];
929 DCHECK(slot_idx < numOfSlots[idx]);
930 size_t vec_idx = slot_idx / 32;
931 if (kIsDebugBuild) {
932 size_t num_vec = RoundUp(numOfSlots[idx], 32) / 32;
933 DCHECK(vec_idx < num_vec);
934 }
935 size_t vec_off = slot_idx % 32;
936 uint32_t* vec = &free_bit_map_base[vec_idx];
937 DCHECK_EQ((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
938 *vec |= 1 << vec_off;
939 DCHECK_NE((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
940 if (kTraceRosAlloc) {
941 LOG(INFO) << "RosAlloc::Run::" << caller_name << "() : 0x" << std::hex
942 << reinterpret_cast<intptr_t>(ptr)
943 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
944 }
945}
946
947inline bool RosAlloc::Run::IsAllFree() {
948 byte idx = size_bracket_idx_;
949 size_t num_slots = numOfSlots[idx];
950 size_t num_vec = RoundUp(num_slots, 32) / 32;
951 for (size_t v = 0; v < num_vec; v++) {
952 uint32_t vec = alloc_bit_map_[v];
953 if (vec != 0) {
954 return false;
955 }
956 }
957 return true;
958}
959
960inline bool RosAlloc::Run::IsFull() {
961 byte idx = size_bracket_idx_;
962 size_t num_slots = numOfSlots[idx];
963 size_t num_vec = RoundUp(num_slots, 32) / 32;
964 size_t slots = 0;
965 for (size_t v = 0; v < num_vec; v++, slots += 32) {
966 DCHECK(num_slots >= slots);
967 uint32_t vec = alloc_bit_map_[v];
968 uint32_t mask = (num_slots - slots >= 32) ? static_cast<uint32_t>(-1)
969 : (1 << (num_slots - slots)) - 1;
970 DCHECK(num_slots - slots >= 32 ? mask == static_cast<uint32_t>(-1) : true);
971 if (vec != mask) {
972 return false;
973 }
974 }
975 return true;
976}
977
978inline void RosAlloc::Run::ClearBitMaps() {
979 byte idx = size_bracket_idx_;
980 size_t num_slots = numOfSlots[idx];
981 size_t num_vec = RoundUp(num_slots, 32) / 32;
982 memset(alloc_bit_map_, 0, sizeof(uint32_t) * num_vec * 3);
983}
984
985void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
986 void* arg) {
987 size_t idx = size_bracket_idx_;
988 byte* slot_base = reinterpret_cast<byte*>(this) + headerSizes[idx];
989 size_t num_slots = numOfSlots[idx];
990 size_t bracket_size = IndexToBracketSize(idx);
991 DCHECK_EQ(slot_base + num_slots * bracket_size, reinterpret_cast<byte*>(this) + numOfPages[idx] * kPageSize);
992 size_t num_vec = RoundUp(num_slots, 32) / 32;
993 size_t slots = 0;
994 for (size_t v = 0; v < num_vec; v++, slots += 32) {
995 DCHECK(num_slots >= slots);
996 uint32_t vec = alloc_bit_map_[v];
997 size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
998 for (size_t i = 0; i < end; ++i) {
999 bool is_allocated = ((vec >> i) & 0x1) != 0;
1000 byte* slot_addr = slot_base + (slots + i) * bracket_size;
1001 if (is_allocated) {
1002 handler(slot_addr, slot_addr + bracket_size, bracket_size, arg);
1003 } else {
1004 handler(slot_addr, slot_addr + bracket_size, 0, arg);
1005 }
1006 }
1007 }
1008}
1009
1010void RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
1011 if (false) {
1012 // Used only to test Free() as GC uses only BulkFree().
1013 for (size_t i = 0; i < num_ptrs; ++i) {
1014 FreeInternal(self, ptrs[i]);
1015 }
1016 return;
1017 }
1018
1019 WriterMutexLock wmu(self, bulk_free_lock_);
1020
1021 // First mark slots to free in the bulk free bit map without locking the
1022 // size bracket locks. On host, hash_set is faster than vector + flag.
1023#ifdef HAVE_ANDROID_OS
1024 std::vector<Run*> runs;
1025#else
1026 hash_set<Run*, hash_run, eq_run> runs;
1027#endif
1028 {
1029 for (size_t i = 0; i < num_ptrs; i++) {
1030 void* ptr = ptrs[i];
1031 ptrs[i] = NULL;
1032 DCHECK(base_ <= ptr && ptr < base_ + footprint_);
1033 size_t pm_idx = RoundDownToPageMapIndex(ptr);
1034 bool free_from_run = false;
1035 Run* run = NULL;
1036 {
1037 MutexLock mu(self, lock_);
1038 DCHECK(pm_idx < page_map_.size());
1039 byte page_map_entry = page_map_[pm_idx];
1040 if (kTraceRosAlloc) {
1041 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1042 << std::dec << pm_idx
1043 << ", page_map_entry=" << static_cast<int>(page_map_entry);
1044 }
1045 if (LIKELY(page_map_entry == kPageMapRun)) {
1046 free_from_run = true;
1047 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1048 DCHECK(run->magic_num_ == kMagicNum);
1049 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1050 free_from_run = true;
1051 size_t pi = pm_idx;
1052 DCHECK(page_map_[pi] == kPageMapRun || page_map_[pi] == kPageMapRunPart);
1053 // Find the beginning of the run.
1054 while (page_map_[pi] != kPageMapRun) {
1055 pi--;
1056 DCHECK(pi < capacity_ / kPageSize);
1057 }
1058 DCHECK(page_map_[pi] == kPageMapRun);
1059 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
1060 DCHECK(run->magic_num_ == kMagicNum);
1061 } else if (page_map_entry == kPageMapLargeObject) {
1062 FreePages(self, ptr);
1063 } else {
1064 LOG(FATAL) << "Unreachable - page map type: " << page_map_entry;
1065 }
1066 }
1067 if (LIKELY(free_from_run)) {
1068 DCHECK(run != NULL);
1069 // Set the bit in the bulk free bit map.
1070 run->MarkBulkFreeBitMap(ptr);
1071#ifdef HAVE_ANDROID_OS
1072 if (!run->to_be_bulk_freed_) {
1073 run->to_be_bulk_freed_ = true;
1074 runs.push_back(run);
1075 }
1076#else
1077 runs.insert(run);
1078#endif
1079 }
1080 }
1081 }
1082
1083 // Now, iterate over the affected runs and update the alloc bit map
1084 // based on the bulk free bit map (for non-thread-local runs) and
1085 // union the bulk free bit map into the thread-local free bit map
1086 // (for thread-local runs.)
1087#ifdef HAVE_ANDROID_OS
1088 typedef std::vector<Run*>::iterator It;
1089#else
1090 typedef hash_set<Run*, hash_run, eq_run>::iterator It;
1091#endif
1092 for (It it = runs.begin(); it != runs.end(); ++it) {
1093 Run* run = *it;
1094#ifdef HAVE_ANDROID_OS
1095 DCHECK(run->to_be_bulk_freed_);
1096 run->to_be_bulk_freed_ = false;
1097#endif
1098 size_t idx = run->size_bracket_idx_;
1099 MutexLock mu(self, *size_bracket_locks_[idx]);
1100 if (run->is_thread_local_ != 0) {
1101 DCHECK_LE(run->size_bracket_idx_, kMaxThreadLocalSizeBracketIdx);
1102 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
1103 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
1104 run->UnionBulkFreeBitMapToThreadLocalFreeBitMap();
1105 if (kTraceRosAlloc) {
1106 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a thread local run 0x"
1107 << std::hex << reinterpret_cast<intptr_t>(run);
1108 }
1109 DCHECK_NE(run->is_thread_local_, 0);
1110 // A thread local run will be kept as a thread local even if
1111 // it's become all free.
1112 } else {
1113 bool run_was_full = run->IsFull();
1114 run->MergeBulkFreeBitMapIntoAllocBitMap();
1115 if (kTraceRosAlloc) {
1116 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a run 0x" << std::hex
1117 << reinterpret_cast<intptr_t>(run);
1118 }
1119 // Check if the run should be moved to non_full_runs_ or
1120 // free_page_runs_.
1121 std::set<Run*>* non_full_runs = &non_full_runs_[idx];
1122 hash_set<Run*, hash_run, eq_run>* full_runs =
1123 kIsDebugBuild ? &full_runs_[idx] : NULL;
1124 if (run->IsAllFree()) {
1125 // It has just become completely free. Free the pages of the
1126 // run.
1127 bool run_was_current = run == current_runs_[idx];
1128 if (run_was_current) {
1129 DCHECK(full_runs->find(run) == full_runs->end());
1130 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1131 // If it was a current run, reuse it.
1132 } else if (run_was_full) {
1133 // If it was full, remove it from the full run set (debug
1134 // only.)
1135 if (kIsDebugBuild) {
1136 hash_set<Run*, hash_run, eq_run>::iterator pos = full_runs->find(run);
1137 DCHECK(pos != full_runs->end());
1138 full_runs->erase(pos);
1139 if (kTraceRosAlloc) {
1140 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1141 << reinterpret_cast<intptr_t>(run)
1142 << " from full_runs_";
1143 }
1144 DCHECK(full_runs->find(run) == full_runs->end());
1145 }
1146 } else {
1147 // If it was in a non full run set, remove it from the set.
1148 DCHECK(full_runs->find(run) == full_runs->end());
1149 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1150 non_full_runs->erase(run);
1151 if (kTraceRosAlloc) {
1152 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1153 << reinterpret_cast<intptr_t>(run)
1154 << " from non_full_runs_";
1155 }
1156 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1157 }
1158 if (!run_was_current) {
1159 MutexLock mu(self, lock_);
1160 FreePages(self, run);
1161 }
1162 } else {
1163 // It is not completely free. If it wasn't the current run or
1164 // already in the non-full run set (i.e., it was full) insert
1165 // it into the non-full run set.
1166 if (run == current_runs_[idx]) {
1167 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1168 DCHECK(full_runs->find(run) == full_runs->end());
1169 // If it was a current run, keep it.
1170 } else if (run_was_full) {
1171 // If it was full, remove it from the full run set (debug
1172 // only) and insert into the non-full run set.
1173 DCHECK(full_runs->find(run) != full_runs->end());
1174 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1175 if (kIsDebugBuild) {
1176 full_runs->erase(run);
1177 if (kTraceRosAlloc) {
1178 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1179 << reinterpret_cast<intptr_t>(run)
1180 << " from full_runs_";
1181 }
1182 }
1183 non_full_runs->insert(run);
1184 if (kTraceRosAlloc) {
1185 LOG(INFO) << "RosAlloc::BulkFree() : Inserted run 0x" << std::hex
1186 << reinterpret_cast<intptr_t>(run)
1187 << " into non_full_runs_[" << std::dec << idx;
1188 }
1189 } else {
1190 // If it was not full, so leave it in the non full run set.
1191 DCHECK(full_runs->find(run) == full_runs->end());
1192 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1193 }
1194 }
1195 }
1196 }
1197}
1198
1199void RosAlloc::DumpPageMap(Thread* self) {
1200 MutexLock mu(self, lock_);
1201 size_t end = page_map_.size();
1202 FreePageRun* curr_fpr = NULL;
1203 size_t curr_fpr_size = 0;
1204 size_t remaining_curr_fpr_size = 0;
1205 size_t num_running_empty_pages = 0;
1206 for (size_t i = 0; i < end; ++i) {
1207 byte pm = page_map_[i];
1208 switch (pm) {
1209 case kPageMapEmpty: {
1210 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1211 if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
1212 // Encountered a fresh free page run.
1213 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1214 DCHECK(fpr->IsFree());
1215 DCHECK(curr_fpr == NULL);
1216 DCHECK_EQ(curr_fpr_size, static_cast<size_t>(0));
1217 curr_fpr = fpr;
1218 curr_fpr_size = fpr->ByteSize(this);
1219 DCHECK_EQ(curr_fpr_size % kPageSize, static_cast<size_t>(0));
1220 remaining_curr_fpr_size = curr_fpr_size - kPageSize;
1221 LOG(INFO) << "[" << i << "]=Empty (FPR start)"
1222 << " fpr_size=" << curr_fpr_size
1223 << " remaining_fpr_size=" << remaining_curr_fpr_size;
1224 if (remaining_curr_fpr_size == 0) {
1225 // Reset at the end of the current free page run.
1226 curr_fpr = NULL;
1227 curr_fpr_size = 0;
1228 }
1229 LOG(INFO) << "curr_fpr=0x" << std::hex << reinterpret_cast<intptr_t>(curr_fpr);
1230 DCHECK_EQ(num_running_empty_pages, static_cast<size_t>(0));
1231 } else {
1232 // Still part of the current free page run.
1233 DCHECK_NE(num_running_empty_pages, static_cast<size_t>(0));
1234 DCHECK(curr_fpr != NULL && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
1235 DCHECK_EQ(remaining_curr_fpr_size % kPageSize, static_cast<size_t>(0));
1236 DCHECK_GE(remaining_curr_fpr_size, static_cast<size_t>(kPageSize));
1237 remaining_curr_fpr_size -= kPageSize;
1238 LOG(INFO) << "[" << i << "]=Empty (FPR part)"
1239 << " remaining_fpr_size=" << remaining_curr_fpr_size;
1240 if (remaining_curr_fpr_size == 0) {
1241 // Reset at the end of the current free page run.
1242 curr_fpr = NULL;
1243 curr_fpr_size = 0;
1244 }
1245 }
1246 num_running_empty_pages++;
1247 break;
1248 }
1249 case kPageMapLargeObject: {
1250 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1251 num_running_empty_pages = 0;
1252 LOG(INFO) << "[" << i << "]=Large (start)";
1253 break;
1254 }
1255 case kPageMapLargeObjectPart:
1256 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1257 num_running_empty_pages = 0;
1258 LOG(INFO) << "[" << i << "]=Large (part)";
1259 break;
1260 case kPageMapRun: {
1261 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1262 num_running_empty_pages = 0;
1263 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1264 size_t idx = run->size_bracket_idx_;
1265 LOG(INFO) << "[" << i << "]=Run (start)"
1266 << " idx=" << idx
1267 << " numOfPages=" << numOfPages[idx]
1268 << " thread_local=" << static_cast<int>(run->is_thread_local_)
1269 << " is_all_free=" << (run->IsAllFree() ? 1 : 0);
1270 break;
1271 }
1272 case kPageMapRunPart:
1273 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1274 num_running_empty_pages = 0;
1275 LOG(INFO) << "[" << i << "]=Run (part)";
1276 break;
1277 default:
1278 LOG(FATAL) << "Unreachable - page map type: " << pm;
1279 break;
1280 }
1281 }
1282}
1283
1284size_t RosAlloc::UsableSize(void* ptr) {
1285 DCHECK(base_ <= ptr && ptr < base_ + footprint_);
1286 size_t pm_idx = RoundDownToPageMapIndex(ptr);
1287 MutexLock mu(Thread::Current(), lock_);
1288 switch (page_map_[pm_idx]) {
1289 case kPageMapEmpty:
1290 LOG(FATAL) << "Unreachable - RosAlloc::UsableSize(): pm_idx=" << pm_idx << ", ptr=" << std::hex
1291 << reinterpret_cast<intptr_t>(ptr);
1292 break;
1293 case kPageMapLargeObject: {
1294 size_t num_pages = 1;
1295 size_t idx = pm_idx + 1;
1296 size_t end = page_map_.size();
1297 while (idx < end && page_map_[idx] == kPageMapLargeObjectPart) {
1298 num_pages++;
1299 idx++;
1300 }
1301 return num_pages * kPageSize;
1302 }
1303 case kPageMapLargeObjectPart:
1304 LOG(FATAL) << "Unreachable - RosAlloc::UsableSize(): pm_idx=" << pm_idx << ", ptr=" << std::hex
1305 << reinterpret_cast<intptr_t>(ptr);
1306 break;
1307 case kPageMapRun:
1308 case kPageMapRunPart: {
1309 // Find the beginning of the run.
1310 while (page_map_[pm_idx] != kPageMapRun) {
1311 pm_idx--;
1312 DCHECK(pm_idx < capacity_ / kPageSize);
1313 }
1314 DCHECK(page_map_[pm_idx] == kPageMapRun);
1315 Run* run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1316 DCHECK(run->magic_num_ == kMagicNum);
1317 size_t idx = run->size_bracket_idx_;
1318 size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
1319 - (reinterpret_cast<byte*>(run) + headerSizes[idx]);
1320 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
1321 return IndexToBracketSize(idx);
1322 }
1323 default:
1324 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
1325 break;
1326 }
1327 return 0;
1328}
1329
1330bool RosAlloc::Trim() {
1331 MutexLock mu(Thread::Current(), lock_);
1332 FreePageRun* last_free_page_run;
1333 DCHECK_EQ(footprint_ % kPageSize, static_cast<size_t>(0));
1334 auto it = free_page_runs_.rbegin();
1335 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
1336 // Remove the last free page run, if any.
1337 DCHECK(last_free_page_run->IsFree());
1338 DCHECK(page_map_[ToPageMapIndex(last_free_page_run)] == kPageMapEmpty);
1339 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
1340 DCHECK_EQ(last_free_page_run->End(this), base_ + footprint_);
1341 free_page_runs_.erase(last_free_page_run);
1342 size_t decrement = last_free_page_run->ByteSize(this);
1343 size_t new_footprint = footprint_ - decrement;
1344 DCHECK_EQ(new_footprint % kPageSize, static_cast<size_t>(0));
1345 size_t new_num_of_pages = new_footprint / kPageSize;
1346 DCHECK_GE(page_map_.size(), new_num_of_pages);
1347 page_map_.resize(new_num_of_pages);
1348 DCHECK_EQ(page_map_.size(), new_num_of_pages);
1349 free_page_run_size_map_.resize(new_num_of_pages);
1350 DCHECK_EQ(free_page_run_size_map_.size(), new_num_of_pages);
1351 art_heap_rosalloc_morecore(this, -(static_cast<intptr_t>(decrement)));
1352 if (kTraceRosAlloc) {
1353 LOG(INFO) << "RosAlloc::Trim() : decreased the footprint from "
1354 << footprint_ << " to " << new_footprint;
1355 }
1356 DCHECK_LT(new_footprint, footprint_);
1357 DCHECK_LT(new_footprint, capacity_);
1358 footprint_ = new_footprint;
1359 return true;
1360 }
1361 return false;
1362}
1363
1364void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1365 void* arg) {
1366 // Note: no need to use this to release pages as we already do so in FreePages().
1367 if (handler == NULL) {
1368 return;
1369 }
1370 MutexLock mu(Thread::Current(), lock_);
1371 size_t pm_end = page_map_.size();
1372 size_t i = 0;
1373 while (i < pm_end) {
1374 byte pm = page_map_[i];
1375 switch (pm) {
1376 case kPageMapEmpty: {
1377 // The start of a free page run.
1378 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1379 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
1380 size_t fpr_size = fpr->ByteSize(this);
1381 DCHECK(IsAligned<kPageSize>(fpr_size));
1382 void* start = fpr;
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -08001383 if (kIsDebugBuild) {
1384 // In the debug build, the first page of a free page run
1385 // contains a magic number for debugging. Exclude it.
1386 start = reinterpret_cast<byte*>(fpr) + kPageSize;
1387 }
1388 void* end = reinterpret_cast<byte*>(fpr) + fpr_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001389 handler(start, end, 0, arg);
1390 size_t num_pages = fpr_size / kPageSize;
1391 if (kIsDebugBuild) {
1392 for (size_t j = i + 1; j < i + num_pages; ++j) {
1393 DCHECK_EQ(page_map_[j], kPageMapEmpty);
1394 }
1395 }
1396 i += fpr_size / kPageSize;
1397 DCHECK_LE(i, pm_end);
1398 break;
1399 }
1400 case kPageMapLargeObject: {
1401 // The start of a large object.
1402 size_t num_pages = 1;
1403 size_t idx = i + 1;
1404 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1405 num_pages++;
1406 idx++;
1407 }
1408 void* start = base_ + i * kPageSize;
1409 void* end = base_ + (i + num_pages) * kPageSize;
1410 size_t used_bytes = num_pages * kPageSize;
1411 handler(start, end, used_bytes, arg);
1412 if (kIsDebugBuild) {
1413 for (size_t j = i + 1; j < i + num_pages; ++j) {
1414 DCHECK_EQ(page_map_[j], kPageMapLargeObjectPart);
1415 }
1416 }
1417 i += num_pages;
1418 DCHECK_LE(i, pm_end);
1419 break;
1420 }
1421 case kPageMapLargeObjectPart:
1422 LOG(FATAL) << "Unreachable - page map type: " << pm;
1423 break;
1424 case kPageMapRun: {
1425 // The start of a run.
1426 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1427 DCHECK(run->magic_num_ == kMagicNum);
1428 run->InspectAllSlots(handler, arg);
1429 size_t num_pages = numOfPages[run->size_bracket_idx_];
1430 if (kIsDebugBuild) {
1431 for (size_t j = i + 1; j < i + num_pages; ++j) {
1432 DCHECK_EQ(page_map_[j], kPageMapRunPart);
1433 }
1434 }
1435 i += num_pages;
1436 DCHECK_LE(i, pm_end);
1437 break;
1438 }
1439 case kPageMapRunPart:
1440 LOG(FATAL) << "Unreachable - page map type: " << pm;
1441 break;
1442 default:
1443 LOG(FATAL) << "Unreachable - page map type: " << pm;
1444 break;
1445 }
1446 }
1447}
1448
1449size_t RosAlloc::Footprint() {
1450 MutexLock mu(Thread::Current(), lock_);
1451 return footprint_;
1452}
1453
1454size_t RosAlloc::FootprintLimit() {
1455 MutexLock mu(Thread::Current(), lock_);
1456 return capacity_;
1457}
1458
1459void RosAlloc::SetFootprintLimit(size_t new_capacity) {
1460 MutexLock mu(Thread::Current(), lock_);
1461 DCHECK_EQ(RoundUp(new_capacity, kPageSize), new_capacity);
1462 // Only growing is supported here. But Trim() is supported.
1463 if (capacity_ < new_capacity) {
1464 capacity_ = new_capacity;
1465 VLOG(heap) << "new capacity=" << capacity_;
1466 }
1467}
1468
1469void RosAlloc::RevokeThreadLocalRuns(Thread* thread) {
1470 Thread* self = Thread::Current();
1471 for (size_t idx = 0; idx < kNumOfSizeBrackets; idx++) {
1472 MutexLock mu(self, *size_bracket_locks_[idx]);
1473 Run* thread_local_run = reinterpret_cast<Run*>(thread->rosalloc_runs_[idx]);
1474 if (thread_local_run != NULL) {
1475 DCHECK_EQ(thread_local_run->magic_num_, kMagicNum);
1476 DCHECK_NE(thread_local_run->is_thread_local_, 0);
1477 thread->rosalloc_runs_[idx] = NULL;
1478 // Note the thread local run may not be full here.
1479 bool dont_care;
1480 thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&dont_care);
1481 thread_local_run->is_thread_local_ = 0;
1482 thread_local_run->MergeBulkFreeBitMapIntoAllocBitMap();
1483 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
1484 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
1485 if (thread_local_run->IsFull()) {
1486 if (kIsDebugBuild) {
1487 full_runs_[idx].insert(thread_local_run);
1488 DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
1489 if (kTraceRosAlloc) {
1490 LOG(INFO) << "RosAlloc::RevokeThreadLocalRuns() : Inserted run 0x" << std::hex
1491 << reinterpret_cast<intptr_t>(thread_local_run)
1492 << " into full_runs_[" << std::dec << idx << "]";
1493 }
1494 }
1495 } else if (thread_local_run->IsAllFree()) {
1496 MutexLock mu(self, lock_);
1497 FreePages(self, thread_local_run);
1498 } else {
1499 non_full_runs_[idx].insert(thread_local_run);
1500 DCHECK(non_full_runs_[idx].find(thread_local_run) != non_full_runs_[idx].end());
1501 if (kTraceRosAlloc) {
1502 LOG(INFO) << "RosAlloc::RevokeThreadLocalRuns() : Inserted run 0x" << std::hex
1503 << reinterpret_cast<intptr_t>(thread_local_run)
1504 << " into non_full_runs_[" << std::dec << idx << "]";
1505 }
1506 }
1507 }
1508 }
1509}
1510
1511void RosAlloc::RevokeAllThreadLocalRuns() {
1512 // This is called when a mutator thread won't allocate such as at
1513 // the Zygote creation time or during the GC pause.
1514 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
1515 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1516 for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
1517 Thread* t = *it;
1518 RevokeThreadLocalRuns(t);
1519 }
1520}
1521
1522void RosAlloc::Initialize() {
1523 // Check the consistency of the number of size brackets.
1524 DCHECK_EQ(Thread::kRosAllocNumOfSizeBrackets, kNumOfSizeBrackets);
1525 // bracketSizes.
1526 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1527 if (i < kNumOfSizeBrackets - 2) {
1528 bracketSizes[i] = 16 * (i + 1);
1529 } else if (i == kNumOfSizeBrackets - 2) {
1530 bracketSizes[i] = 1 * KB;
1531 } else {
1532 DCHECK(i == kNumOfSizeBrackets - 1);
1533 bracketSizes[i] = 2 * KB;
1534 }
1535 if (kTraceRosAlloc) {
1536 LOG(INFO) << "bracketSizes[" << i << "]=" << bracketSizes[i];
1537 }
1538 }
1539 // numOfPages.
1540 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1541 if (i < 4) {
1542 numOfPages[i] = 1;
1543 } else if (i < 8) {
1544 numOfPages[i] = 2;
1545 } else if (i < 16) {
1546 numOfPages[i] = 4;
1547 } else if (i < 32) {
1548 numOfPages[i] = 8;
1549 } else if (i == 32) {
1550 DCHECK(i = kNumOfSizeBrackets - 2);
1551 numOfPages[i] = 16;
1552 } else {
1553 DCHECK(i = kNumOfSizeBrackets - 1);
1554 numOfPages[i] = 32;
1555 }
1556 if (kTraceRosAlloc) {
1557 LOG(INFO) << "numOfPages[" << i << "]=" << numOfPages[i];
1558 }
1559 }
1560 // Compute numOfSlots and slotOffsets.
1561 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1562 size_t bracket_size = bracketSizes[i];
1563 size_t run_size = kPageSize * numOfPages[i];
1564 size_t max_num_of_slots = run_size / bracket_size;
1565 // Compute the actual number of slots by taking the header and
1566 // alignment into account.
1567 size_t fixed_header_size = RoundUp(Run::fixed_header_size(), sizeof(uint32_t));
1568 DCHECK_EQ(fixed_header_size, static_cast<size_t>(8));
1569 size_t header_size = 0;
1570 size_t bulk_free_bit_map_offset = 0;
1571 size_t thread_local_free_bit_map_offset = 0;
1572 size_t num_of_slots = 0;
1573 // Search for the maximum number of slots that allows enough space
1574 // for the header (including the bit maps.)
1575 for (int s = max_num_of_slots; s >= 0; s--) {
1576 size_t tmp_slots_size = bracket_size * s;
1577 size_t tmp_bit_map_size = RoundUp(s, sizeof(uint32_t) * kBitsPerByte) / kBitsPerByte;
1578 size_t tmp_bulk_free_bit_map_size = tmp_bit_map_size;
1579 size_t tmp_bulk_free_bit_map_off = fixed_header_size + tmp_bit_map_size;
1580 size_t tmp_thread_local_free_bit_map_size = tmp_bit_map_size;
1581 size_t tmp_thread_local_free_bit_map_off = tmp_bulk_free_bit_map_off + tmp_bulk_free_bit_map_size;
1582 size_t tmp_unaligned_header_size = tmp_thread_local_free_bit_map_off + tmp_thread_local_free_bit_map_size;
1583 // Align up the unaligned header size. bracket_size may not be a power of two.
1584 size_t tmp_header_size = (tmp_unaligned_header_size % bracket_size == 0) ?
1585 tmp_unaligned_header_size :
1586 tmp_unaligned_header_size + (bracket_size - tmp_unaligned_header_size % bracket_size);
1587 DCHECK_EQ(tmp_header_size % bracket_size, static_cast<size_t>(0));
1588 DCHECK_EQ(tmp_header_size % 8, static_cast<size_t>(0));
1589 if (tmp_slots_size + tmp_header_size <= run_size) {
1590 // Found the right number of slots, that is, there was enough
1591 // space for the header (including the bit maps.)
1592 num_of_slots = s;
1593 header_size = tmp_header_size;
1594 bulk_free_bit_map_offset = tmp_bulk_free_bit_map_off;
1595 thread_local_free_bit_map_offset = tmp_thread_local_free_bit_map_off;
1596 break;
1597 }
1598 }
1599 DCHECK(num_of_slots > 0 && header_size > 0 && bulk_free_bit_map_offset > 0);
1600 // Add the padding for the alignment remainder.
1601 header_size += run_size % bracket_size;
1602 DCHECK(header_size + num_of_slots * bracket_size == run_size);
1603 numOfSlots[i] = num_of_slots;
1604 headerSizes[i] = header_size;
1605 bulkFreeBitMapOffsets[i] = bulk_free_bit_map_offset;
1606 threadLocalFreeBitMapOffsets[i] = thread_local_free_bit_map_offset;
1607 if (kTraceRosAlloc) {
1608 LOG(INFO) << "numOfSlots[" << i << "]=" << numOfSlots[i]
1609 << ", headerSizes[" << i << "]=" << headerSizes[i]
1610 << ", bulkFreeBitMapOffsets[" << i << "]=" << bulkFreeBitMapOffsets[i]
1611 << ", threadLocalFreeBitMapOffsets[" << i << "]=" << threadLocalFreeBitMapOffsets[i];;
1612 }
1613 }
1614}
1615
1616void RosAlloc::BytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
1617 if (used_bytes == 0) {
1618 return;
1619 }
1620 size_t* bytes_allocated = reinterpret_cast<size_t*>(arg);
1621 *bytes_allocated += used_bytes;
1622}
1623
1624void RosAlloc::ObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
1625 if (used_bytes == 0) {
1626 return;
1627 }
1628 size_t* objects_allocated = reinterpret_cast<size_t*>(arg);
1629 ++(*objects_allocated);
1630}
1631
1632} // namespace allocator
1633} // namespace gc
1634} // namespace art