blob: e86bee6d5c2343cb3eb47d674342c3e7d966253b [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "base/mutex-inl.h"
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -080018#include "mirror/class-inl.h"
19#include "mirror/object.h"
20#include "mirror/object-inl.h"
Brian Carlstrom218daa22013-11-25 14:51:44 -080021#include "thread-inl.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070022#include "thread_list.h"
23#include "rosalloc.h"
24
25#include <map>
26#include <list>
27#include <vector>
28
29namespace art {
30namespace gc {
31namespace allocator {
32
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070033extern "C" void* art_heap_rosalloc_morecore(RosAlloc* rosalloc, intptr_t increment);
34
35size_t RosAlloc::bracketSizes[kNumOfSizeBrackets];
36size_t RosAlloc::numOfPages[kNumOfSizeBrackets];
37size_t RosAlloc::numOfSlots[kNumOfSizeBrackets];
38size_t RosAlloc::headerSizes[kNumOfSizeBrackets];
39size_t RosAlloc::bulkFreeBitMapOffsets[kNumOfSizeBrackets];
40size_t RosAlloc::threadLocalFreeBitMapOffsets[kNumOfSizeBrackets];
41bool RosAlloc::initialized_ = false;
42
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080043RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080044 PageReleaseMode page_release_mode, size_t page_release_size_threshold)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070045 : base_(reinterpret_cast<byte*>(base)), footprint_(capacity),
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080046 capacity_(capacity), max_capacity_(max_capacity),
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070047 lock_("rosalloc global lock", kRosAllocGlobalLock),
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080048 bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock),
49 page_release_mode_(page_release_mode),
50 page_release_size_threshold_(page_release_size_threshold) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070051 DCHECK(RoundUp(capacity, kPageSize) == capacity);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080052 DCHECK(RoundUp(max_capacity, kPageSize) == max_capacity);
53 CHECK_LE(capacity, max_capacity);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080054 CHECK(IsAligned<kPageSize>(page_release_size_threshold_));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070055 if (!initialized_) {
56 Initialize();
57 }
58 VLOG(heap) << "RosAlloc base="
59 << std::hex << (intptr_t)base_ << ", end="
60 << std::hex << (intptr_t)(base_ + capacity_)
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080061 << ", capacity=" << std::dec << capacity_
62 << ", max_capacity=" << std::dec << max_capacity_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070063 memset(current_runs_, 0, sizeof(current_runs_));
64 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
65 size_bracket_locks_[i] = new Mutex("an rosalloc size bracket lock",
66 kRosAllocBracketLock);
67 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080068 DCHECK_EQ(footprint_, capacity_);
69 size_t num_of_pages = footprint_ / kPageSize;
70 size_t max_num_of_pages = max_capacity_ / kPageSize;
71 std::string error_msg;
72 page_map_mem_map_.reset(MemMap::MapAnonymous("rosalloc page map", NULL, RoundUp(max_num_of_pages, kPageSize),
73 PROT_READ | PROT_WRITE, false, &error_msg));
74 CHECK(page_map_mem_map_.get() != NULL) << "Couldn't allocate the page map : " << error_msg;
75 page_map_ = page_map_mem_map_->Begin();
76 page_map_size_ = num_of_pages;
77 max_page_map_size_ = max_num_of_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070078 free_page_run_size_map_.resize(num_of_pages);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070079 FreePageRun* free_pages = reinterpret_cast<FreePageRun*>(base_);
80 if (kIsDebugBuild) {
81 free_pages->magic_num_ = kMagicNumFree;
82 }
83 free_pages->SetByteSize(this, capacity_);
84 DCHECK_EQ(capacity_ % kPageSize, static_cast<size_t>(0));
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080085 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070086 free_pages->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080087 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070088 free_page_runs_.insert(free_pages);
89 if (kTraceRosAlloc) {
90 LOG(INFO) << "RosAlloc::RosAlloc() : Inserted run 0x" << std::hex
91 << reinterpret_cast<intptr_t>(free_pages)
92 << " into free_page_runs_";
93 }
94}
95
96void* RosAlloc::AllocPages(Thread* self, size_t num_pages, byte page_map_type) {
97 lock_.AssertHeld(self);
98 DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
99 FreePageRun* res = NULL;
100 size_t req_byte_size = num_pages * kPageSize;
101 // Find the lowest address free page run that's large enough.
102 for (auto it = free_page_runs_.begin(); it != free_page_runs_.end(); ) {
103 FreePageRun* fpr = *it;
104 DCHECK(fpr->IsFree());
105 size_t fpr_byte_size = fpr->ByteSize(this);
106 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
107 if (req_byte_size <= fpr_byte_size) {
108 // Found one.
109 free_page_runs_.erase(it++);
110 if (kTraceRosAlloc) {
111 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x"
112 << std::hex << reinterpret_cast<intptr_t>(fpr)
113 << " from free_page_runs_";
114 }
115 if (req_byte_size < fpr_byte_size) {
116 // Split.
117 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<byte*>(fpr) + req_byte_size);
118 if (kIsDebugBuild) {
119 remainder->magic_num_ = kMagicNumFree;
120 }
121 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
122 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
123 // Don't need to call madvise on remainder here.
124 free_page_runs_.insert(remainder);
125 if (kTraceRosAlloc) {
126 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
127 << reinterpret_cast<intptr_t>(remainder)
128 << " into free_page_runs_";
129 }
130 fpr->SetByteSize(this, req_byte_size);
131 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
132 }
133 res = fpr;
134 break;
135 } else {
136 ++it;
137 }
138 }
139
140 // Failed to allocate pages. Grow the footprint, if possible.
141 if (UNLIKELY(res == NULL && capacity_ > footprint_)) {
142 FreePageRun* last_free_page_run = NULL;
143 size_t last_free_page_run_size;
144 auto it = free_page_runs_.rbegin();
145 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
146 // There is a free page run at the end.
147 DCHECK(last_free_page_run->IsFree());
148 DCHECK(page_map_[ToPageMapIndex(last_free_page_run)] == kPageMapEmpty);
149 last_free_page_run_size = last_free_page_run->ByteSize(this);
150 } else {
151 // There is no free page run at the end.
152 last_free_page_run_size = 0;
153 }
154 DCHECK_LT(last_free_page_run_size, req_byte_size);
155 if (capacity_ - footprint_ + last_free_page_run_size >= req_byte_size) {
156 // If we grow the heap, we can allocate it.
157 size_t increment = std::min(std::max(2 * MB, req_byte_size - last_free_page_run_size),
158 capacity_ - footprint_);
159 DCHECK_EQ(increment % kPageSize, static_cast<size_t>(0));
160 size_t new_footprint = footprint_ + increment;
161 size_t new_num_of_pages = new_footprint / kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800162 DCHECK_LT(page_map_size_, new_num_of_pages);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700163 DCHECK_LT(free_page_run_size_map_.size(), new_num_of_pages);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800164 page_map_size_ = new_num_of_pages;
165 DCHECK_LE(page_map_size_, max_page_map_size_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700166 free_page_run_size_map_.resize(new_num_of_pages);
167 art_heap_rosalloc_morecore(this, increment);
168 if (last_free_page_run_size > 0) {
169 // There was a free page run at the end. Expand its size.
170 DCHECK_EQ(last_free_page_run_size, last_free_page_run->ByteSize(this));
171 last_free_page_run->SetByteSize(this, last_free_page_run_size + increment);
172 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
173 DCHECK(last_free_page_run->End(this) == base_ + new_footprint);
174 } else {
175 // Otherwise, insert a new free page run at the end.
176 FreePageRun* new_free_page_run = reinterpret_cast<FreePageRun*>(base_ + footprint_);
177 if (kIsDebugBuild) {
178 new_free_page_run->magic_num_ = kMagicNumFree;
179 }
180 new_free_page_run->SetByteSize(this, increment);
181 DCHECK_EQ(new_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
182 free_page_runs_.insert(new_free_page_run);
183 DCHECK(*free_page_runs_.rbegin() == new_free_page_run);
184 if (kTraceRosAlloc) {
185 LOG(INFO) << "RosAlloc::AlloPages() : Grew the heap by inserting run 0x"
186 << std::hex << reinterpret_cast<intptr_t>(new_free_page_run)
187 << " into free_page_runs_";
188 }
189 }
190 DCHECK_LE(footprint_ + increment, capacity_);
191 if (kTraceRosAlloc) {
192 LOG(INFO) << "RosAlloc::AllocPages() : increased the footprint from "
193 << footprint_ << " to " << new_footprint;
194 }
195 footprint_ = new_footprint;
196
197 // And retry the last free page run.
198 it = free_page_runs_.rbegin();
199 DCHECK(it != free_page_runs_.rend());
200 FreePageRun* fpr = *it;
201 if (kIsDebugBuild && last_free_page_run_size > 0) {
202 DCHECK(last_free_page_run != NULL);
203 DCHECK_EQ(last_free_page_run, fpr);
204 }
205 size_t fpr_byte_size = fpr->ByteSize(this);
206 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
207 DCHECK_LE(req_byte_size, fpr_byte_size);
208 free_page_runs_.erase(fpr);
209 if (kTraceRosAlloc) {
210 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
211 << " from free_page_runs_";
212 }
213 if (req_byte_size < fpr_byte_size) {
214 // Split if there's a remainder.
215 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<byte*>(fpr) + req_byte_size);
216 if (kIsDebugBuild) {
217 remainder->magic_num_ = kMagicNumFree;
218 }
219 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
220 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
221 free_page_runs_.insert(remainder);
222 if (kTraceRosAlloc) {
223 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
224 << reinterpret_cast<intptr_t>(remainder)
225 << " into free_page_runs_";
226 }
227 fpr->SetByteSize(this, req_byte_size);
228 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
229 }
230 res = fpr;
231 }
232 }
233 if (LIKELY(res != NULL)) {
234 // Update the page map.
235 size_t page_map_idx = ToPageMapIndex(res);
236 for (size_t i = 0; i < num_pages; i++) {
237 DCHECK(page_map_[page_map_idx + i] == kPageMapEmpty);
238 }
239 switch (page_map_type) {
240 case kPageMapRun:
241 page_map_[page_map_idx] = kPageMapRun;
242 for (size_t i = 1; i < num_pages; i++) {
243 page_map_[page_map_idx + i] = kPageMapRunPart;
244 }
245 break;
246 case kPageMapLargeObject:
247 page_map_[page_map_idx] = kPageMapLargeObject;
248 for (size_t i = 1; i < num_pages; i++) {
249 page_map_[page_map_idx + i] = kPageMapLargeObjectPart;
250 }
251 break;
252 default:
253 LOG(FATAL) << "Unreachable - page map type: " << page_map_type;
254 break;
255 }
256 if (kIsDebugBuild) {
257 // Clear the first page which isn't madvised away in the debug
258 // build for the magic number.
259 memset(res, 0, kPageSize);
260 }
261 if (kTraceRosAlloc) {
262 LOG(INFO) << "RosAlloc::AllocPages() : 0x" << std::hex << reinterpret_cast<intptr_t>(res)
263 << "-0x" << (reinterpret_cast<intptr_t>(res) + num_pages * kPageSize)
264 << "(" << std::dec << (num_pages * kPageSize) << ")";
265 }
266 return res;
267 }
268
269 // Fail.
270 if (kTraceRosAlloc) {
271 LOG(INFO) << "RosAlloc::AllocPages() : NULL";
272 }
273 return nullptr;
274}
275
276void RosAlloc::FreePages(Thread* self, void* ptr) {
277 lock_.AssertHeld(self);
278 size_t pm_idx = ToPageMapIndex(ptr);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800279 DCHECK(pm_idx < page_map_size_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700280 byte pm_type = page_map_[pm_idx];
281 DCHECK(pm_type == kPageMapRun || pm_type == kPageMapLargeObject);
282 byte pm_part_type;
283 switch (pm_type) {
284 case kPageMapRun:
285 pm_part_type = kPageMapRunPart;
286 break;
287 case kPageMapLargeObject:
288 pm_part_type = kPageMapLargeObjectPart;
289 break;
290 default:
291 pm_part_type = kPageMapEmpty;
292 LOG(FATAL) << "Unreachable - RosAlloc::FreePages() : " << "pm_idx=" << pm_idx << ", pm_type="
293 << static_cast<int>(pm_type) << ", ptr=" << std::hex
294 << reinterpret_cast<intptr_t>(ptr);
295 return;
296 }
297 // Update the page map and count the number of pages.
298 size_t num_pages = 1;
299 page_map_[pm_idx] = kPageMapEmpty;
300 size_t idx = pm_idx + 1;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800301 size_t end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700302 while (idx < end && page_map_[idx] == pm_part_type) {
303 page_map_[idx] = kPageMapEmpty;
304 num_pages++;
305 idx++;
306 }
307
308 if (kTraceRosAlloc) {
309 LOG(INFO) << "RosAlloc::FreePages() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
310 << "-0x" << (reinterpret_cast<intptr_t>(ptr) + num_pages * kPageSize)
311 << "(" << std::dec << (num_pages * kPageSize) << ")";
312 }
313
314 // Turn it into a free run.
315 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(ptr);
316 if (kIsDebugBuild) {
317 fpr->magic_num_ = kMagicNumFree;
318 }
319 fpr->SetByteSize(this, num_pages * kPageSize);
320 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
321
322 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
323 if (!free_page_runs_.empty()) {
324 // Try to coalesce in the higher address direction.
325 if (kTraceRosAlloc) {
326 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce a free page run 0x"
327 << std::hex << reinterpret_cast<uintptr_t>(fpr) << " [" << std::dec << pm_idx << "] -0x"
328 << std::hex << reinterpret_cast<uintptr_t>(fpr->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800329 << (fpr->End(this) == End() ? page_map_size_ : ToPageMapIndex(fpr->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700330 }
331 auto higher_it = free_page_runs_.upper_bound(fpr);
332 if (higher_it != free_page_runs_.end()) {
333 for (auto it = higher_it; it != free_page_runs_.end(); ) {
334 FreePageRun* h = *it;
335 DCHECK_EQ(h->ByteSize(this) % kPageSize, static_cast<size_t>(0));
336 if (kTraceRosAlloc) {
337 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a higher free page run 0x"
338 << std::hex << reinterpret_cast<uintptr_t>(h) << " [" << std::dec << ToPageMapIndex(h) << "] -0x"
339 << std::hex << reinterpret_cast<uintptr_t>(h->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800340 << (h->End(this) == End() ? page_map_size_ : ToPageMapIndex(h->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700341 }
342 if (fpr->End(this) == h->Begin()) {
343 if (kTraceRosAlloc) {
344 LOG(INFO) << "Success";
345 }
346 free_page_runs_.erase(it++);
347 if (kTraceRosAlloc) {
348 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
349 << reinterpret_cast<intptr_t>(h)
350 << " from free_page_runs_";
351 }
352 fpr->SetByteSize(this, fpr->ByteSize(this) + h->ByteSize(this));
353 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
354 } else {
355 // Not adjacent. Stop.
356 if (kTraceRosAlloc) {
357 LOG(INFO) << "Fail";
358 }
359 break;
360 }
361 }
362 }
363 // Try to coalesce in the lower address direction.
364 auto lower_it = free_page_runs_.upper_bound(fpr);
365 if (lower_it != free_page_runs_.begin()) {
366 --lower_it;
367 for (auto it = lower_it; ; ) {
368 // We want to try to coalesce with the first element but
369 // there's no "<=" operator for the iterator.
370 bool to_exit_loop = it == free_page_runs_.begin();
371
372 FreePageRun* l = *it;
373 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
374 if (kTraceRosAlloc) {
375 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a lower free page run 0x"
376 << std::hex << reinterpret_cast<uintptr_t>(l) << " [" << std::dec << ToPageMapIndex(l) << "] -0x"
377 << std::hex << reinterpret_cast<uintptr_t>(l->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800378 << (l->End(this) == End() ? page_map_size_ : ToPageMapIndex(l->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700379 }
380 if (l->End(this) == fpr->Begin()) {
381 if (kTraceRosAlloc) {
382 LOG(INFO) << "Success";
383 }
384 free_page_runs_.erase(it--);
385 if (kTraceRosAlloc) {
386 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
387 << reinterpret_cast<intptr_t>(l)
388 << " from free_page_runs_";
389 }
390 l->SetByteSize(this, l->ByteSize(this) + fpr->ByteSize(this));
391 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
392 fpr = l;
393 } else {
394 // Not adjacent. Stop.
395 if (kTraceRosAlloc) {
396 LOG(INFO) << "Fail";
397 }
398 break;
399 }
400 if (to_exit_loop) {
401 break;
402 }
403 }
404 }
405 }
406
407 // Insert it.
408 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
409 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800410 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700411 fpr->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800412 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700413 free_page_runs_.insert(fpr);
414 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
415 if (kTraceRosAlloc) {
416 LOG(INFO) << "RosAlloc::FreePages() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
417 << " into free_page_runs_";
418 }
419}
420
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800421void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) {
422 DCHECK(size > kLargeSizeThreshold);
423 size_t num_pages = RoundUp(size, kPageSize) / kPageSize;
424 void* r;
425 {
426 MutexLock mu(self, lock_);
427 r = AllocPages(self, num_pages, kPageMapLargeObject);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700428 }
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800429 if (UNLIKELY(r == nullptr)) {
430 if (kTraceRosAlloc) {
431 LOG(INFO) << "RosAlloc::AllocLargeObject() : NULL";
432 }
433 return nullptr;
434 }
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800435 if (bytes_allocated != NULL) {
436 *bytes_allocated = num_pages * kPageSize;
437 }
438 if (kTraceRosAlloc) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800439 LOG(INFO) << "RosAlloc::AllocLargeObject() : 0x" << std::hex << reinterpret_cast<intptr_t>(r)
440 << "-0x" << (reinterpret_cast<intptr_t>(r) + num_pages * kPageSize)
441 << "(" << std::dec << (num_pages * kPageSize) << ")";
442 }
443 if (!DoesReleaseAllPages()) {
444 // If it does not release all pages, pages may not be zeroed out.
445 memset(r, 0, size);
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800446 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700447 // Check if the returned memory is really all zero.
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800448 if (kCheckZeroMemory) {
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800449 byte* bytes = reinterpret_cast<byte*>(r);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700450 for (size_t i = 0; i < size; ++i) {
451 DCHECK_EQ(bytes[i], 0);
452 }
453 }
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800454 return r;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700455}
456
457void RosAlloc::FreeInternal(Thread* self, void* ptr) {
458 DCHECK(base_ <= ptr && ptr < base_ + footprint_);
459 size_t pm_idx = RoundDownToPageMapIndex(ptr);
460 bool free_from_run = false;
461 Run* run = NULL;
462 {
463 MutexLock mu(self, lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800464 DCHECK(pm_idx < page_map_size_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700465 byte page_map_entry = page_map_[pm_idx];
466 if (kTraceRosAlloc) {
467 LOG(INFO) << "RosAlloc::FreeInternal() : " << std::hex << ptr << ", pm_idx=" << std::dec << pm_idx
468 << ", page_map_entry=" << static_cast<int>(page_map_entry);
469 }
470 switch (page_map_[pm_idx]) {
471 case kPageMapEmpty:
472 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
473 return;
474 case kPageMapLargeObject:
475 FreePages(self, ptr);
476 return;
477 case kPageMapLargeObjectPart:
478 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
479 return;
480 case kPageMapRun:
481 case kPageMapRunPart: {
482 free_from_run = true;
483 size_t pi = pm_idx;
484 DCHECK(page_map_[pi] == kPageMapRun || page_map_[pi] == kPageMapRunPart);
485 // Find the beginning of the run.
486 while (page_map_[pi] != kPageMapRun) {
487 pi--;
488 DCHECK(pi < capacity_ / kPageSize);
489 }
490 DCHECK(page_map_[pi] == kPageMapRun);
491 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
492 DCHECK(run->magic_num_ == kMagicNum);
493 break;
494 }
495 default:
496 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
497 return;
498 }
499 }
500 if (LIKELY(free_from_run)) {
501 DCHECK(run != NULL);
502 FreeFromRun(self, ptr, run);
503 }
504}
505
506void RosAlloc::Free(Thread* self, void* ptr) {
507 ReaderMutexLock rmu(self, bulk_free_lock_);
508 FreeInternal(self, ptr);
509}
510
511RosAlloc::Run* RosAlloc::RefillRun(Thread* self, size_t idx) {
512 Run* new_run;
513 size_t num_pages = numOfPages[idx];
514 // Get the lowest address non-full run from the binary tree.
515 Run* temp = NULL;
516 std::set<Run*>* bt = &non_full_runs_[idx];
517 std::set<Run*>::iterator found = bt->lower_bound(temp);
518 if (found != bt->end()) {
519 // If there's one, use it as the current run.
520 Run* non_full_run = *found;
521 DCHECK(non_full_run != NULL);
522 new_run = non_full_run;
523 DCHECK_EQ(new_run->is_thread_local_, 0);
524 bt->erase(found);
525 DCHECK_EQ(non_full_run->is_thread_local_, 0);
526 } else {
527 // If there's none, allocate a new run and use it as the
528 // current run.
529 {
530 MutexLock mu(self, lock_);
531 new_run = reinterpret_cast<Run*>(AllocPages(self, num_pages, kPageMapRun));
532 }
533 if (new_run == NULL) {
534 return NULL;
535 }
536 if (kIsDebugBuild) {
537 new_run->magic_num_ = kMagicNum;
538 }
539 new_run->size_bracket_idx_ = idx;
540 new_run->top_slot_idx_ = 0;
541 new_run->ClearBitMaps();
542 new_run->to_be_bulk_freed_ = false;
543 }
544 return new_run;
545}
546
547void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated) {
548 DCHECK(size <= kLargeSizeThreshold);
549 size_t bracket_size;
550 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
551 DCHECK_EQ(idx, SizeToIndex(size));
552 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
553 DCHECK_EQ(bracket_size, bracketSizes[idx]);
554 DCHECK(size <= bracket_size);
555 DCHECK(size > 512 || bracket_size - size < 16);
556
557 void* slot_addr;
558
559 if (LIKELY(idx <= kMaxThreadLocalSizeBracketIdx)) {
560 // Use a thread-local run.
561 Run* thread_local_run = reinterpret_cast<Run*>(self->rosalloc_runs_[idx]);
562 if (UNLIKELY(thread_local_run == NULL)) {
563 MutexLock mu(self, *size_bracket_locks_[idx]);
564 thread_local_run = RefillRun(self, idx);
565 if (UNLIKELY(thread_local_run == NULL)) {
566 return NULL;
567 }
568 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
569 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
570 thread_local_run->is_thread_local_ = 1;
571 self->rosalloc_runs_[idx] = thread_local_run;
572 DCHECK(!thread_local_run->IsFull());
573 }
574
575 DCHECK(thread_local_run != NULL);
576 DCHECK_NE(thread_local_run->is_thread_local_, 0);
577 slot_addr = thread_local_run->AllocSlot();
578
579 if (UNLIKELY(slot_addr == NULL)) {
580 // The run got full. Try to free slots.
581 DCHECK(thread_local_run->IsFull());
582 MutexLock mu(self, *size_bracket_locks_[idx]);
583 bool is_all_free_after_merge;
584 if (thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&is_all_free_after_merge)) {
585 // Some slot got freed. Keep it.
586 DCHECK(!thread_local_run->IsFull());
587 DCHECK_EQ(is_all_free_after_merge, thread_local_run->IsAllFree());
588 if (is_all_free_after_merge) {
589 // Reinstate the bump index mode if it's all free.
590 DCHECK_EQ(thread_local_run->top_slot_idx_, numOfSlots[idx]);
591 thread_local_run->top_slot_idx_ = 0;
592 }
593 } else {
594 // No slots got freed. Try to refill the thread-local run.
595 DCHECK(thread_local_run->IsFull());
596 self->rosalloc_runs_[idx] = NULL;
597 thread_local_run->is_thread_local_ = 0;
598 if (kIsDebugBuild) {
599 full_runs_[idx].insert(thread_local_run);
600 if (kTraceRosAlloc) {
601 LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex
602 << reinterpret_cast<intptr_t>(thread_local_run)
603 << " into full_runs_[" << std::dec << idx << "]";
604 }
605 }
606 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
607 DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
608 thread_local_run = RefillRun(self, idx);
609 if (UNLIKELY(thread_local_run == NULL)) {
610 return NULL;
611 }
612 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
613 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
614 thread_local_run->is_thread_local_ = 1;
615 self->rosalloc_runs_[idx] = thread_local_run;
616 DCHECK(!thread_local_run->IsFull());
617 }
618
619 DCHECK(thread_local_run != NULL);
620 DCHECK(!thread_local_run->IsFull());
621 DCHECK_NE(thread_local_run->is_thread_local_, 0);
622 slot_addr = thread_local_run->AllocSlot();
623 // Must succeed now with a new run.
624 DCHECK(slot_addr != NULL);
625 }
626 if (kTraceRosAlloc) {
627 LOG(INFO) << "RosAlloc::AllocFromRun() thread-local : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
628 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
629 << "(" << std::dec << (bracket_size) << ")";
630 }
631 } else {
632 // Use the (shared) current run.
633 MutexLock mu(self, *size_bracket_locks_[idx]);
634 Run* current_run = current_runs_[idx];
635 if (UNLIKELY(current_run == NULL)) {
636 current_run = RefillRun(self, idx);
637 if (UNLIKELY(current_run == NULL)) {
638 return NULL;
639 }
640 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
641 DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
642 current_run->is_thread_local_ = 0;
643 current_runs_[idx] = current_run;
644 DCHECK(!current_run->IsFull());
645 }
646 DCHECK(current_run != NULL);
647 slot_addr = current_run->AllocSlot();
648 if (UNLIKELY(slot_addr == NULL)) {
649 // The current run got full. Try to refill it.
650 DCHECK(current_run->IsFull());
651 current_runs_[idx] = NULL;
652 if (kIsDebugBuild) {
653 // Insert it into full_runs and set the current run to NULL.
654 full_runs_[idx].insert(current_run);
655 if (kTraceRosAlloc) {
656 LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(current_run)
657 << " into full_runs_[" << std::dec << idx << "]";
658 }
659 }
660 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
661 DCHECK(full_runs_[idx].find(current_run) != full_runs_[idx].end());
662 current_run = RefillRun(self, idx);
663 if (UNLIKELY(current_run == NULL)) {
664 return NULL;
665 }
666 DCHECK(current_run != NULL);
667 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
668 DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
669 current_run->is_thread_local_ = 0;
670 current_runs_[idx] = current_run;
671 DCHECK(!current_run->IsFull());
672 slot_addr = current_run->AllocSlot();
673 // Must succeed now with a new run.
674 DCHECK(slot_addr != NULL);
675 }
676 if (kTraceRosAlloc) {
677 LOG(INFO) << "RosAlloc::AllocFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
678 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
679 << "(" << std::dec << (bracket_size) << ")";
680 }
681 }
682 if (LIKELY(bytes_allocated != NULL)) {
683 *bytes_allocated = bracket_size;
684 }
685 memset(slot_addr, 0, size);
686 return slot_addr;
687}
688
689void RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
690 DCHECK(run->magic_num_ == kMagicNum);
691 DCHECK(run < ptr && ptr < run->End());
692 size_t idx = run->size_bracket_idx_;
693 MutexLock mu(self, *size_bracket_locks_[idx]);
694 bool run_was_full = false;
695 if (kIsDebugBuild) {
696 run_was_full = run->IsFull();
697 }
698 if (kTraceRosAlloc) {
699 LOG(INFO) << "RosAlloc::FreeFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr);
700 }
701 if (LIKELY(run->is_thread_local_ != 0)) {
702 // It's a thread-local run. Just mark the thread-local free bit map and return.
703 DCHECK_LE(run->size_bracket_idx_, kMaxThreadLocalSizeBracketIdx);
704 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
705 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
706 run->MarkThreadLocalFreeBitMap(ptr);
707 if (kTraceRosAlloc) {
708 LOG(INFO) << "RosAlloc::FreeFromRun() : Freed a slot in a thread local run 0x" << std::hex
709 << reinterpret_cast<intptr_t>(run);
710 }
711 // A thread local run will be kept as a thread local even if it's become all free.
712 return;
713 }
714 // Free the slot in the run.
715 run->FreeSlot(ptr);
716 std::set<Run*>* non_full_runs = &non_full_runs_[idx];
717 if (run->IsAllFree()) {
718 // It has just become completely free. Free the pages of this run.
719 std::set<Run*>::iterator pos = non_full_runs->find(run);
720 if (pos != non_full_runs->end()) {
721 non_full_runs->erase(pos);
722 if (kTraceRosAlloc) {
723 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
724 << reinterpret_cast<intptr_t>(run) << " from non_full_runs_";
725 }
726 }
727 if (run == current_runs_[idx]) {
728 current_runs_[idx] = NULL;
729 }
730 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
731 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
732 {
733 MutexLock mu(self, lock_);
734 FreePages(self, run);
735 }
736 } else {
737 // It is not completely free. If it wasn't the current run or
738 // already in the non-full run set (i.e., it was full) insert it
739 // into the non-full run set.
740 if (run != current_runs_[idx]) {
741 hash_set<Run*, hash_run, eq_run>* full_runs =
742 kIsDebugBuild ? &full_runs_[idx] : NULL;
743 std::set<Run*>::iterator pos = non_full_runs->find(run);
744 if (pos == non_full_runs->end()) {
745 DCHECK(run_was_full);
746 DCHECK(full_runs->find(run) != full_runs->end());
747 if (kIsDebugBuild) {
748 full_runs->erase(run);
749 if (kTraceRosAlloc) {
750 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
751 << reinterpret_cast<intptr_t>(run) << " from full_runs_";
752 }
753 }
754 non_full_runs->insert(run);
755 DCHECK(!run->IsFull());
756 if (kTraceRosAlloc) {
757 LOG(INFO) << "RosAlloc::FreeFromRun() : Inserted run 0x" << std::hex
758 << reinterpret_cast<intptr_t>(run)
759 << " into non_full_runs_[" << std::dec << idx << "]";
760 }
761 }
762 }
763 }
764}
765
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800766std::string RosAlloc::Run::BitMapToStr(uint32_t* bit_map_base, size_t num_vec) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700767 std::string bit_map_str;
768 for (size_t v = 0; v < num_vec; v++) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800769 uint32_t vec = bit_map_base[v];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700770 if (v != num_vec - 1) {
771 bit_map_str.append(StringPrintf("%x-", vec));
772 } else {
773 bit_map_str.append(StringPrintf("%x", vec));
774 }
775 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800776 return bit_map_str.c_str();
777}
778
779std::string RosAlloc::Run::Dump() {
780 size_t idx = size_bracket_idx_;
781 size_t num_slots = numOfSlots[idx];
782 size_t num_vec = RoundUp(num_slots, 32) / 32;
783 std::ostringstream stream;
784 stream << "RosAlloc Run = " << reinterpret_cast<void*>(this)
785 << "{ magic_num=" << static_cast<int>(magic_num_)
786 << " size_bracket_idx=" << idx
787 << " is_thread_local=" << static_cast<int>(is_thread_local_)
788 << " to_be_bulk_freed=" << static_cast<int>(to_be_bulk_freed_)
789 << " top_slot_idx=" << top_slot_idx_
790 << " alloc_bit_map=" << BitMapToStr(alloc_bit_map_, num_vec)
791 << " bulk_free_bit_map=" << BitMapToStr(BulkFreeBitMap(), num_vec)
792 << " thread_local_bit_map=" << BitMapToStr(ThreadLocalFreeBitMap(), num_vec)
793 << " }" << std::endl;
794 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700795}
796
797void* RosAlloc::Run::AllocSlot() {
798 size_t idx = size_bracket_idx_;
799 size_t num_slots = numOfSlots[idx];
800 DCHECK_LE(top_slot_idx_, num_slots);
801 if (LIKELY(top_slot_idx_ < num_slots)) {
802 // If it's in bump index mode, grab the top slot and increment the top index.
803 size_t slot_idx = top_slot_idx_;
804 byte* slot_addr = reinterpret_cast<byte*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
805 if (kTraceRosAlloc) {
806 LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
807 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
808 }
809 top_slot_idx_++;
810 size_t vec_idx = slot_idx / 32;
811 size_t vec_off = slot_idx % 32;
812 uint32_t* vec = &alloc_bit_map_[vec_idx];
813 DCHECK_EQ((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
814 *vec |= 1 << vec_off;
815 DCHECK_NE((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
816 return slot_addr;
817 }
818 // Not in bump index mode. Search the alloc bit map for an empty slot.
819 size_t num_vec = RoundUp(num_slots, 32) / 32;
820 size_t slot_idx = 0;
821 bool found_slot = false;
822 for (size_t v = 0; v < num_vec; v++) {
823 uint32_t *vecp = &alloc_bit_map_[v];
824 uint32_t ffz1 = __builtin_ffs(~*vecp);
825 uint32_t ffz;
826 // TODO: Use LIKELY or UNLIKELY here?
827 if (LIKELY(ffz1 > 0 && (ffz = ffz1 - 1) + v * 32 < num_slots)) {
828 // Found an empty slot. Set the bit.
829 DCHECK_EQ((*vecp & (1 << ffz)), static_cast<uint32_t>(0));
830 *vecp |= (1 << ffz);
831 DCHECK_NE((*vecp & (1 << ffz)), static_cast<uint32_t>(0));
832 slot_idx = ffz + v * 32;
833 found_slot = true;
834 break;
835 }
836 }
837 if (LIKELY(found_slot)) {
838 byte* slot_addr = reinterpret_cast<byte*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
839 if (kTraceRosAlloc) {
840 LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
841 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
842 }
843 return slot_addr;
844 }
845 return NULL;
846}
847
848inline void RosAlloc::Run::FreeSlot(void* ptr) {
849 DCHECK_EQ(is_thread_local_, 0);
850 byte idx = size_bracket_idx_;
851 size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
852 - (reinterpret_cast<byte*>(this) + headerSizes[idx]);
853 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
854 size_t slot_idx = offset_from_slot_base / bracketSizes[idx];
855 DCHECK(slot_idx < numOfSlots[idx]);
856 size_t vec_idx = slot_idx / 32;
857 if (kIsDebugBuild) {
858 size_t num_vec = RoundUp(numOfSlots[idx], 32) / 32;
859 DCHECK(vec_idx < num_vec);
860 }
861 size_t vec_off = slot_idx % 32;
862 uint32_t* vec = &alloc_bit_map_[vec_idx];
863 DCHECK_NE((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
864 *vec &= ~(1 << vec_off);
865 DCHECK_EQ((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
866 if (kTraceRosAlloc) {
867 LOG(INFO) << "RosAlloc::Run::FreeSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
868 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
869 }
870}
871
872inline bool RosAlloc::Run::MergeThreadLocalFreeBitMapToAllocBitMap(bool* is_all_free_after_out) {
873 DCHECK_NE(is_thread_local_, 0);
874 // Free slots in the alloc bit map based on the thread local free bit map.
875 byte idx = size_bracket_idx_;
876 size_t num_slots = numOfSlots[idx];
877 size_t num_vec = RoundUp(num_slots, 32) / 32;
878 bool changed = false;
879 uint32_t* vecp = &alloc_bit_map_[0];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800880 uint32_t* tl_free_vecp = &ThreadLocalFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700881 bool is_all_free_after = true;
882 for (size_t v = 0; v < num_vec; v++, vecp++, tl_free_vecp++) {
883 uint32_t tl_free_vec = *tl_free_vecp;
884 uint32_t vec_before = *vecp;
885 uint32_t vec_after;
886 if (tl_free_vec != 0) {
887 vec_after = vec_before & ~tl_free_vec;
888 *vecp = vec_after;
889 changed = true;
890 *tl_free_vecp = 0; // clear the thread local free bit map.
891 } else {
892 vec_after = vec_before;
893 }
894 if (vec_after != 0) {
895 is_all_free_after = false;
896 }
897 DCHECK_EQ(*tl_free_vecp, static_cast<uint32_t>(0));
898 }
899 *is_all_free_after_out = is_all_free_after;
900 // Return true if there was at least a bit set in the thread-local
901 // free bit map and at least a bit in the alloc bit map changed.
902 return changed;
903}
904
905inline void RosAlloc::Run::MergeBulkFreeBitMapIntoAllocBitMap() {
906 DCHECK_EQ(is_thread_local_, 0);
907 // Free slots in the alloc bit map based on the bulk free bit map.
908 byte idx = size_bracket_idx_;
909 size_t num_slots = numOfSlots[idx];
910 size_t num_vec = RoundUp(num_slots, 32) / 32;
911 uint32_t* vecp = &alloc_bit_map_[0];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800912 uint32_t* free_vecp = &BulkFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700913 for (size_t v = 0; v < num_vec; v++, vecp++, free_vecp++) {
914 uint32_t free_vec = *free_vecp;
915 if (free_vec != 0) {
916 *vecp &= ~free_vec;
917 *free_vecp = 0; // clear the bulk free bit map.
918 }
919 DCHECK_EQ(*free_vecp, static_cast<uint32_t>(0));
920 }
921}
922
923inline void RosAlloc::Run::UnionBulkFreeBitMapToThreadLocalFreeBitMap() {
924 DCHECK_NE(is_thread_local_, 0);
925 // Union the thread local bit map with the bulk free bit map.
926 byte idx = size_bracket_idx_;
927 size_t num_slots = numOfSlots[idx];
928 size_t num_vec = RoundUp(num_slots, 32) / 32;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800929 uint32_t* to_vecp = &ThreadLocalFreeBitMap()[0];
930 uint32_t* from_vecp = &BulkFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700931 for (size_t v = 0; v < num_vec; v++, to_vecp++, from_vecp++) {
932 uint32_t from_vec = *from_vecp;
933 if (from_vec != 0) {
934 *to_vecp |= from_vec;
Hiroshi Yamauchi70f60042014-02-03 12:31:29 -0800935 *from_vecp = 0; // clear the bulk free bit map.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700936 }
937 DCHECK_EQ(*from_vecp, static_cast<uint32_t>(0));
938 }
939}
940
941inline void RosAlloc::Run::MarkThreadLocalFreeBitMap(void* ptr) {
942 DCHECK_NE(is_thread_local_, 0);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800943 MarkFreeBitMapShared(ptr, ThreadLocalFreeBitMap(), "MarkThreadLocalFreeBitMap");
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700944}
945
946inline void RosAlloc::Run::MarkBulkFreeBitMap(void* ptr) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800947 MarkFreeBitMapShared(ptr, BulkFreeBitMap(), "MarkFreeBitMap");
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700948}
949
950inline void RosAlloc::Run::MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base,
951 const char* caller_name) {
952 byte idx = size_bracket_idx_;
953 size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
954 - (reinterpret_cast<byte*>(this) + headerSizes[idx]);
955 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
956 size_t slot_idx = offset_from_slot_base / bracketSizes[idx];
957 DCHECK(slot_idx < numOfSlots[idx]);
958 size_t vec_idx = slot_idx / 32;
959 if (kIsDebugBuild) {
960 size_t num_vec = RoundUp(numOfSlots[idx], 32) / 32;
961 DCHECK(vec_idx < num_vec);
962 }
963 size_t vec_off = slot_idx % 32;
964 uint32_t* vec = &free_bit_map_base[vec_idx];
965 DCHECK_EQ((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
966 *vec |= 1 << vec_off;
967 DCHECK_NE((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
968 if (kTraceRosAlloc) {
969 LOG(INFO) << "RosAlloc::Run::" << caller_name << "() : 0x" << std::hex
970 << reinterpret_cast<intptr_t>(ptr)
971 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
972 }
973}
974
975inline bool RosAlloc::Run::IsAllFree() {
976 byte idx = size_bracket_idx_;
977 size_t num_slots = numOfSlots[idx];
978 size_t num_vec = RoundUp(num_slots, 32) / 32;
979 for (size_t v = 0; v < num_vec; v++) {
980 uint32_t vec = alloc_bit_map_[v];
981 if (vec != 0) {
982 return false;
983 }
984 }
985 return true;
986}
987
988inline bool RosAlloc::Run::IsFull() {
989 byte idx = size_bracket_idx_;
990 size_t num_slots = numOfSlots[idx];
991 size_t num_vec = RoundUp(num_slots, 32) / 32;
992 size_t slots = 0;
993 for (size_t v = 0; v < num_vec; v++, slots += 32) {
994 DCHECK(num_slots >= slots);
995 uint32_t vec = alloc_bit_map_[v];
996 uint32_t mask = (num_slots - slots >= 32) ? static_cast<uint32_t>(-1)
997 : (1 << (num_slots - slots)) - 1;
998 DCHECK(num_slots - slots >= 32 ? mask == static_cast<uint32_t>(-1) : true);
999 if (vec != mask) {
1000 return false;
1001 }
1002 }
1003 return true;
1004}
1005
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001006inline bool RosAlloc::Run::IsBulkFreeBitmapClean() {
1007 byte idx = size_bracket_idx_;
1008 size_t num_slots = numOfSlots[idx];
1009 size_t num_vec = RoundUp(num_slots, 32) / 32;
1010 for (size_t v = 0; v < num_vec; v++) {
1011 uint32_t vec = BulkFreeBitMap()[v];
1012 if (vec != 0) {
1013 return false;
1014 }
1015 }
1016 return true;
1017}
1018
1019inline bool RosAlloc::Run::IsThreadLocalFreeBitmapClean() {
1020 byte idx = size_bracket_idx_;
1021 size_t num_slots = numOfSlots[idx];
1022 size_t num_vec = RoundUp(num_slots, 32) / 32;
1023 for (size_t v = 0; v < num_vec; v++) {
1024 uint32_t vec = ThreadLocalFreeBitMap()[v];
1025 if (vec != 0) {
1026 return false;
1027 }
1028 }
1029 return true;
1030}
1031
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001032inline void RosAlloc::Run::ClearBitMaps() {
1033 byte idx = size_bracket_idx_;
1034 size_t num_slots = numOfSlots[idx];
1035 size_t num_vec = RoundUp(num_slots, 32) / 32;
1036 memset(alloc_bit_map_, 0, sizeof(uint32_t) * num_vec * 3);
1037}
1038
1039void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1040 void* arg) {
1041 size_t idx = size_bracket_idx_;
1042 byte* slot_base = reinterpret_cast<byte*>(this) + headerSizes[idx];
1043 size_t num_slots = numOfSlots[idx];
1044 size_t bracket_size = IndexToBracketSize(idx);
1045 DCHECK_EQ(slot_base + num_slots * bracket_size, reinterpret_cast<byte*>(this) + numOfPages[idx] * kPageSize);
1046 size_t num_vec = RoundUp(num_slots, 32) / 32;
1047 size_t slots = 0;
1048 for (size_t v = 0; v < num_vec; v++, slots += 32) {
1049 DCHECK(num_slots >= slots);
1050 uint32_t vec = alloc_bit_map_[v];
1051 size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
1052 for (size_t i = 0; i < end; ++i) {
1053 bool is_allocated = ((vec >> i) & 0x1) != 0;
1054 byte* slot_addr = slot_base + (slots + i) * bracket_size;
1055 if (is_allocated) {
1056 handler(slot_addr, slot_addr + bracket_size, bracket_size, arg);
1057 } else {
1058 handler(slot_addr, slot_addr + bracket_size, 0, arg);
1059 }
1060 }
1061 }
1062}
1063
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001064// If true, read the page map entries in BulkFree() without using the
1065// lock for better performance, assuming that the existence of an
1066// allocated chunk/pointer being freed in BulkFree() guarantees that
1067// the page map entry won't change. Disabled for now.
1068static constexpr bool kReadPageMapEntryWithoutLockInBulkFree = false;
1069
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001070void RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
1071 if (false) {
1072 // Used only to test Free() as GC uses only BulkFree().
1073 for (size_t i = 0; i < num_ptrs; ++i) {
1074 FreeInternal(self, ptrs[i]);
1075 }
1076 return;
1077 }
1078
1079 WriterMutexLock wmu(self, bulk_free_lock_);
1080
1081 // First mark slots to free in the bulk free bit map without locking the
1082 // size bracket locks. On host, hash_set is faster than vector + flag.
1083#ifdef HAVE_ANDROID_OS
1084 std::vector<Run*> runs;
1085#else
1086 hash_set<Run*, hash_run, eq_run> runs;
1087#endif
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001088 for (size_t i = 0; i < num_ptrs; i++) {
1089 void* ptr = ptrs[i];
1090 ptrs[i] = NULL;
1091 DCHECK(base_ <= ptr && ptr < base_ + footprint_);
1092 size_t pm_idx = RoundDownToPageMapIndex(ptr);
1093 Run* run = NULL;
1094 if (kReadPageMapEntryWithoutLockInBulkFree) {
1095 // Read the page map entries without locking the lock.
1096 byte page_map_entry = page_map_[pm_idx];
1097 if (kTraceRosAlloc) {
1098 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1099 << std::dec << pm_idx
1100 << ", page_map_entry=" << static_cast<int>(page_map_entry);
1101 }
1102 if (LIKELY(page_map_entry == kPageMapRun)) {
1103 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1104 DCHECK(run->magic_num_ == kMagicNum);
1105 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1106 size_t pi = pm_idx;
1107 DCHECK(page_map_[pi] == kPageMapRun || page_map_[pi] == kPageMapRunPart);
1108 // Find the beginning of the run.
1109 while (page_map_[pi] != kPageMapRun) {
1110 pi--;
1111 DCHECK(pi < capacity_ / kPageSize);
1112 }
1113 DCHECK(page_map_[pi] == kPageMapRun);
1114 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
1115 DCHECK(run->magic_num_ == kMagicNum);
1116 } else if (page_map_entry == kPageMapLargeObject) {
1117 MutexLock mu(self, lock_);
1118 FreePages(self, ptr);
1119 continue;
1120 } else {
1121 LOG(FATAL) << "Unreachable - page map type: " << page_map_entry;
1122 }
1123 DCHECK(run != NULL);
1124 // Set the bit in the bulk free bit map.
1125 run->MarkBulkFreeBitMap(ptr);
1126#ifdef HAVE_ANDROID_OS
1127 if (!run->to_be_bulk_freed_) {
1128 run->to_be_bulk_freed_ = true;
1129 runs.push_back(run);
1130 }
1131#else
1132 runs.insert(run);
1133#endif
1134 } else {
1135 // Read the page map entries with a lock.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001136 bool free_from_run = false;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001137 {
1138 MutexLock mu(self, lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001139 DCHECK(pm_idx < page_map_size_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001140 byte page_map_entry = page_map_[pm_idx];
1141 if (kTraceRosAlloc) {
1142 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1143 << std::dec << pm_idx
1144 << ", page_map_entry=" << static_cast<int>(page_map_entry);
1145 }
1146 if (LIKELY(page_map_entry == kPageMapRun)) {
1147 free_from_run = true;
1148 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1149 DCHECK(run->magic_num_ == kMagicNum);
1150 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1151 free_from_run = true;
1152 size_t pi = pm_idx;
1153 DCHECK(page_map_[pi] == kPageMapRun || page_map_[pi] == kPageMapRunPart);
1154 // Find the beginning of the run.
1155 while (page_map_[pi] != kPageMapRun) {
1156 pi--;
1157 DCHECK(pi < capacity_ / kPageSize);
1158 }
1159 DCHECK(page_map_[pi] == kPageMapRun);
1160 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
1161 DCHECK(run->magic_num_ == kMagicNum);
1162 } else if (page_map_entry == kPageMapLargeObject) {
1163 FreePages(self, ptr);
1164 } else {
1165 LOG(FATAL) << "Unreachable - page map type: " << page_map_entry;
1166 }
1167 }
1168 if (LIKELY(free_from_run)) {
1169 DCHECK(run != NULL);
1170 // Set the bit in the bulk free bit map.
1171 run->MarkBulkFreeBitMap(ptr);
1172#ifdef HAVE_ANDROID_OS
1173 if (!run->to_be_bulk_freed_) {
1174 run->to_be_bulk_freed_ = true;
1175 runs.push_back(run);
1176 }
1177#else
1178 runs.insert(run);
1179#endif
1180 }
1181 }
1182 }
1183
1184 // Now, iterate over the affected runs and update the alloc bit map
1185 // based on the bulk free bit map (for non-thread-local runs) and
1186 // union the bulk free bit map into the thread-local free bit map
1187 // (for thread-local runs.)
1188#ifdef HAVE_ANDROID_OS
1189 typedef std::vector<Run*>::iterator It;
1190#else
1191 typedef hash_set<Run*, hash_run, eq_run>::iterator It;
1192#endif
1193 for (It it = runs.begin(); it != runs.end(); ++it) {
1194 Run* run = *it;
1195#ifdef HAVE_ANDROID_OS
1196 DCHECK(run->to_be_bulk_freed_);
1197 run->to_be_bulk_freed_ = false;
1198#endif
1199 size_t idx = run->size_bracket_idx_;
1200 MutexLock mu(self, *size_bracket_locks_[idx]);
1201 if (run->is_thread_local_ != 0) {
1202 DCHECK_LE(run->size_bracket_idx_, kMaxThreadLocalSizeBracketIdx);
1203 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
1204 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
1205 run->UnionBulkFreeBitMapToThreadLocalFreeBitMap();
1206 if (kTraceRosAlloc) {
1207 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a thread local run 0x"
1208 << std::hex << reinterpret_cast<intptr_t>(run);
1209 }
1210 DCHECK_NE(run->is_thread_local_, 0);
1211 // A thread local run will be kept as a thread local even if
1212 // it's become all free.
1213 } else {
1214 bool run_was_full = run->IsFull();
1215 run->MergeBulkFreeBitMapIntoAllocBitMap();
1216 if (kTraceRosAlloc) {
1217 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a run 0x" << std::hex
1218 << reinterpret_cast<intptr_t>(run);
1219 }
1220 // Check if the run should be moved to non_full_runs_ or
1221 // free_page_runs_.
1222 std::set<Run*>* non_full_runs = &non_full_runs_[idx];
1223 hash_set<Run*, hash_run, eq_run>* full_runs =
1224 kIsDebugBuild ? &full_runs_[idx] : NULL;
1225 if (run->IsAllFree()) {
1226 // It has just become completely free. Free the pages of the
1227 // run.
1228 bool run_was_current = run == current_runs_[idx];
1229 if (run_was_current) {
1230 DCHECK(full_runs->find(run) == full_runs->end());
1231 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1232 // If it was a current run, reuse it.
1233 } else if (run_was_full) {
1234 // If it was full, remove it from the full run set (debug
1235 // only.)
1236 if (kIsDebugBuild) {
1237 hash_set<Run*, hash_run, eq_run>::iterator pos = full_runs->find(run);
1238 DCHECK(pos != full_runs->end());
1239 full_runs->erase(pos);
1240 if (kTraceRosAlloc) {
1241 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1242 << reinterpret_cast<intptr_t>(run)
1243 << " from full_runs_";
1244 }
1245 DCHECK(full_runs->find(run) == full_runs->end());
1246 }
1247 } else {
1248 // If it was in a non full run set, remove it from the set.
1249 DCHECK(full_runs->find(run) == full_runs->end());
1250 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1251 non_full_runs->erase(run);
1252 if (kTraceRosAlloc) {
1253 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1254 << reinterpret_cast<intptr_t>(run)
1255 << " from non_full_runs_";
1256 }
1257 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1258 }
1259 if (!run_was_current) {
1260 MutexLock mu(self, lock_);
1261 FreePages(self, run);
1262 }
1263 } else {
1264 // It is not completely free. If it wasn't the current run or
1265 // already in the non-full run set (i.e., it was full) insert
1266 // it into the non-full run set.
1267 if (run == current_runs_[idx]) {
1268 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1269 DCHECK(full_runs->find(run) == full_runs->end());
1270 // If it was a current run, keep it.
1271 } else if (run_was_full) {
1272 // If it was full, remove it from the full run set (debug
1273 // only) and insert into the non-full run set.
1274 DCHECK(full_runs->find(run) != full_runs->end());
1275 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1276 if (kIsDebugBuild) {
1277 full_runs->erase(run);
1278 if (kTraceRosAlloc) {
1279 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1280 << reinterpret_cast<intptr_t>(run)
1281 << " from full_runs_";
1282 }
1283 }
1284 non_full_runs->insert(run);
1285 if (kTraceRosAlloc) {
1286 LOG(INFO) << "RosAlloc::BulkFree() : Inserted run 0x" << std::hex
1287 << reinterpret_cast<intptr_t>(run)
1288 << " into non_full_runs_[" << std::dec << idx;
1289 }
1290 } else {
1291 // If it was not full, so leave it in the non full run set.
1292 DCHECK(full_runs->find(run) == full_runs->end());
1293 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1294 }
1295 }
1296 }
1297 }
1298}
1299
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001300std::string RosAlloc::DumpPageMap() {
1301 std::ostringstream stream;
1302 stream << "RosAlloc PageMap: " << std::endl;
1303 lock_.AssertHeld(Thread::Current());
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001304 size_t end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001305 FreePageRun* curr_fpr = NULL;
1306 size_t curr_fpr_size = 0;
1307 size_t remaining_curr_fpr_size = 0;
1308 size_t num_running_empty_pages = 0;
1309 for (size_t i = 0; i < end; ++i) {
1310 byte pm = page_map_[i];
1311 switch (pm) {
1312 case kPageMapEmpty: {
1313 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1314 if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
1315 // Encountered a fresh free page run.
1316 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1317 DCHECK(fpr->IsFree());
1318 DCHECK(curr_fpr == NULL);
1319 DCHECK_EQ(curr_fpr_size, static_cast<size_t>(0));
1320 curr_fpr = fpr;
1321 curr_fpr_size = fpr->ByteSize(this);
1322 DCHECK_EQ(curr_fpr_size % kPageSize, static_cast<size_t>(0));
1323 remaining_curr_fpr_size = curr_fpr_size - kPageSize;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001324 stream << "[" << i << "]=Empty (FPR start)"
1325 << " fpr_size=" << curr_fpr_size
1326 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001327 if (remaining_curr_fpr_size == 0) {
1328 // Reset at the end of the current free page run.
1329 curr_fpr = NULL;
1330 curr_fpr_size = 0;
1331 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001332 stream << "curr_fpr=0x" << std::hex << reinterpret_cast<intptr_t>(curr_fpr) << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001333 DCHECK_EQ(num_running_empty_pages, static_cast<size_t>(0));
1334 } else {
1335 // Still part of the current free page run.
1336 DCHECK_NE(num_running_empty_pages, static_cast<size_t>(0));
1337 DCHECK(curr_fpr != NULL && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
1338 DCHECK_EQ(remaining_curr_fpr_size % kPageSize, static_cast<size_t>(0));
1339 DCHECK_GE(remaining_curr_fpr_size, static_cast<size_t>(kPageSize));
1340 remaining_curr_fpr_size -= kPageSize;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001341 stream << "[" << i << "]=Empty (FPR part)"
1342 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001343 if (remaining_curr_fpr_size == 0) {
1344 // Reset at the end of the current free page run.
1345 curr_fpr = NULL;
1346 curr_fpr_size = 0;
1347 }
1348 }
1349 num_running_empty_pages++;
1350 break;
1351 }
1352 case kPageMapLargeObject: {
1353 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1354 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001355 stream << "[" << i << "]=Large (start)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001356 break;
1357 }
1358 case kPageMapLargeObjectPart:
1359 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1360 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001361 stream << "[" << i << "]=Large (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001362 break;
1363 case kPageMapRun: {
1364 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1365 num_running_empty_pages = 0;
1366 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1367 size_t idx = run->size_bracket_idx_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001368 stream << "[" << i << "]=Run (start)"
1369 << " idx=" << idx
1370 << " numOfPages=" << numOfPages[idx]
1371 << " thread_local=" << static_cast<int>(run->is_thread_local_)
1372 << " is_all_free=" << (run->IsAllFree() ? 1 : 0)
1373 << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001374 break;
1375 }
1376 case kPageMapRunPart:
1377 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1378 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001379 stream << "[" << i << "]=Run (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001380 break;
1381 default:
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001382 stream << "[" << i << "]=Unrecognizable page map type: " << pm;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001383 break;
1384 }
1385 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001386 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001387}
1388
1389size_t RosAlloc::UsableSize(void* ptr) {
1390 DCHECK(base_ <= ptr && ptr < base_ + footprint_);
1391 size_t pm_idx = RoundDownToPageMapIndex(ptr);
1392 MutexLock mu(Thread::Current(), lock_);
1393 switch (page_map_[pm_idx]) {
1394 case kPageMapEmpty:
1395 LOG(FATAL) << "Unreachable - RosAlloc::UsableSize(): pm_idx=" << pm_idx << ", ptr=" << std::hex
1396 << reinterpret_cast<intptr_t>(ptr);
1397 break;
1398 case kPageMapLargeObject: {
1399 size_t num_pages = 1;
1400 size_t idx = pm_idx + 1;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001401 size_t end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001402 while (idx < end && page_map_[idx] == kPageMapLargeObjectPart) {
1403 num_pages++;
1404 idx++;
1405 }
1406 return num_pages * kPageSize;
1407 }
1408 case kPageMapLargeObjectPart:
1409 LOG(FATAL) << "Unreachable - RosAlloc::UsableSize(): pm_idx=" << pm_idx << ", ptr=" << std::hex
1410 << reinterpret_cast<intptr_t>(ptr);
1411 break;
1412 case kPageMapRun:
1413 case kPageMapRunPart: {
1414 // Find the beginning of the run.
1415 while (page_map_[pm_idx] != kPageMapRun) {
1416 pm_idx--;
1417 DCHECK(pm_idx < capacity_ / kPageSize);
1418 }
1419 DCHECK(page_map_[pm_idx] == kPageMapRun);
1420 Run* run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1421 DCHECK(run->magic_num_ == kMagicNum);
1422 size_t idx = run->size_bracket_idx_;
1423 size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
1424 - (reinterpret_cast<byte*>(run) + headerSizes[idx]);
1425 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
1426 return IndexToBracketSize(idx);
1427 }
1428 default:
1429 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
1430 break;
1431 }
1432 return 0;
1433}
1434
1435bool RosAlloc::Trim() {
1436 MutexLock mu(Thread::Current(), lock_);
1437 FreePageRun* last_free_page_run;
1438 DCHECK_EQ(footprint_ % kPageSize, static_cast<size_t>(0));
1439 auto it = free_page_runs_.rbegin();
1440 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
1441 // Remove the last free page run, if any.
1442 DCHECK(last_free_page_run->IsFree());
1443 DCHECK(page_map_[ToPageMapIndex(last_free_page_run)] == kPageMapEmpty);
1444 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
1445 DCHECK_EQ(last_free_page_run->End(this), base_ + footprint_);
1446 free_page_runs_.erase(last_free_page_run);
1447 size_t decrement = last_free_page_run->ByteSize(this);
1448 size_t new_footprint = footprint_ - decrement;
1449 DCHECK_EQ(new_footprint % kPageSize, static_cast<size_t>(0));
1450 size_t new_num_of_pages = new_footprint / kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001451 DCHECK_GE(page_map_size_, new_num_of_pages);
1452 // Zero out the tail of the page map.
1453 byte* zero_begin = page_map_ + new_num_of_pages;
1454 byte* madvise_begin = AlignUp(zero_begin, kPageSize);
1455 DCHECK_LE(madvise_begin, page_map_mem_map_->End());
1456 size_t madvise_size = page_map_mem_map_->End() - madvise_begin;
1457 if (madvise_size > 0) {
1458 DCHECK_ALIGNED(madvise_begin, kPageSize);
1459 DCHECK_EQ(RoundUp(madvise_size, kPageSize), madvise_size);
1460 CHECK_EQ(madvise(madvise_begin, madvise_size, MADV_DONTNEED), 0);
1461 }
1462 if (madvise_begin - zero_begin) {
1463 memset(zero_begin, 0, madvise_begin - zero_begin);
1464 }
1465 page_map_size_ = new_num_of_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001466 free_page_run_size_map_.resize(new_num_of_pages);
1467 DCHECK_EQ(free_page_run_size_map_.size(), new_num_of_pages);
1468 art_heap_rosalloc_morecore(this, -(static_cast<intptr_t>(decrement)));
1469 if (kTraceRosAlloc) {
1470 LOG(INFO) << "RosAlloc::Trim() : decreased the footprint from "
1471 << footprint_ << " to " << new_footprint;
1472 }
1473 DCHECK_LT(new_footprint, footprint_);
1474 DCHECK_LT(new_footprint, capacity_);
1475 footprint_ = new_footprint;
1476 return true;
1477 }
1478 return false;
1479}
1480
1481void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1482 void* arg) {
1483 // Note: no need to use this to release pages as we already do so in FreePages().
1484 if (handler == NULL) {
1485 return;
1486 }
1487 MutexLock mu(Thread::Current(), lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001488 size_t pm_end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001489 size_t i = 0;
1490 while (i < pm_end) {
1491 byte pm = page_map_[i];
1492 switch (pm) {
1493 case kPageMapEmpty: {
1494 // The start of a free page run.
1495 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1496 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
1497 size_t fpr_size = fpr->ByteSize(this);
1498 DCHECK(IsAligned<kPageSize>(fpr_size));
1499 void* start = fpr;
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -08001500 if (kIsDebugBuild) {
1501 // In the debug build, the first page of a free page run
1502 // contains a magic number for debugging. Exclude it.
1503 start = reinterpret_cast<byte*>(fpr) + kPageSize;
1504 }
1505 void* end = reinterpret_cast<byte*>(fpr) + fpr_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001506 handler(start, end, 0, arg);
1507 size_t num_pages = fpr_size / kPageSize;
1508 if (kIsDebugBuild) {
1509 for (size_t j = i + 1; j < i + num_pages; ++j) {
1510 DCHECK_EQ(page_map_[j], kPageMapEmpty);
1511 }
1512 }
1513 i += fpr_size / kPageSize;
1514 DCHECK_LE(i, pm_end);
1515 break;
1516 }
1517 case kPageMapLargeObject: {
1518 // The start of a large object.
1519 size_t num_pages = 1;
1520 size_t idx = i + 1;
1521 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1522 num_pages++;
1523 idx++;
1524 }
1525 void* start = base_ + i * kPageSize;
1526 void* end = base_ + (i + num_pages) * kPageSize;
1527 size_t used_bytes = num_pages * kPageSize;
1528 handler(start, end, used_bytes, arg);
1529 if (kIsDebugBuild) {
1530 for (size_t j = i + 1; j < i + num_pages; ++j) {
1531 DCHECK_EQ(page_map_[j], kPageMapLargeObjectPart);
1532 }
1533 }
1534 i += num_pages;
1535 DCHECK_LE(i, pm_end);
1536 break;
1537 }
1538 case kPageMapLargeObjectPart:
1539 LOG(FATAL) << "Unreachable - page map type: " << pm;
1540 break;
1541 case kPageMapRun: {
1542 // The start of a run.
1543 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1544 DCHECK(run->magic_num_ == kMagicNum);
1545 run->InspectAllSlots(handler, arg);
1546 size_t num_pages = numOfPages[run->size_bracket_idx_];
1547 if (kIsDebugBuild) {
1548 for (size_t j = i + 1; j < i + num_pages; ++j) {
1549 DCHECK_EQ(page_map_[j], kPageMapRunPart);
1550 }
1551 }
1552 i += num_pages;
1553 DCHECK_LE(i, pm_end);
1554 break;
1555 }
1556 case kPageMapRunPart:
1557 LOG(FATAL) << "Unreachable - page map type: " << pm;
1558 break;
1559 default:
1560 LOG(FATAL) << "Unreachable - page map type: " << pm;
1561 break;
1562 }
1563 }
1564}
1565
1566size_t RosAlloc::Footprint() {
1567 MutexLock mu(Thread::Current(), lock_);
1568 return footprint_;
1569}
1570
1571size_t RosAlloc::FootprintLimit() {
1572 MutexLock mu(Thread::Current(), lock_);
1573 return capacity_;
1574}
1575
1576void RosAlloc::SetFootprintLimit(size_t new_capacity) {
1577 MutexLock mu(Thread::Current(), lock_);
1578 DCHECK_EQ(RoundUp(new_capacity, kPageSize), new_capacity);
1579 // Only growing is supported here. But Trim() is supported.
1580 if (capacity_ < new_capacity) {
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001581 CHECK_LE(new_capacity, max_capacity_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001582 capacity_ = new_capacity;
1583 VLOG(heap) << "new capacity=" << capacity_;
1584 }
1585}
1586
1587void RosAlloc::RevokeThreadLocalRuns(Thread* thread) {
1588 Thread* self = Thread::Current();
Hiroshi Yamauchi70f60042014-02-03 12:31:29 -08001589 // Avoid race conditions on the bulk free bit maps with BulkFree() (GC).
1590 WriterMutexLock wmu(self, bulk_free_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001591 for (size_t idx = 0; idx < kNumOfSizeBrackets; idx++) {
1592 MutexLock mu(self, *size_bracket_locks_[idx]);
1593 Run* thread_local_run = reinterpret_cast<Run*>(thread->rosalloc_runs_[idx]);
1594 if (thread_local_run != NULL) {
1595 DCHECK_EQ(thread_local_run->magic_num_, kMagicNum);
1596 DCHECK_NE(thread_local_run->is_thread_local_, 0);
1597 thread->rosalloc_runs_[idx] = NULL;
1598 // Note the thread local run may not be full here.
1599 bool dont_care;
1600 thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&dont_care);
1601 thread_local_run->is_thread_local_ = 0;
1602 thread_local_run->MergeBulkFreeBitMapIntoAllocBitMap();
1603 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
1604 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
1605 if (thread_local_run->IsFull()) {
1606 if (kIsDebugBuild) {
1607 full_runs_[idx].insert(thread_local_run);
1608 DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
1609 if (kTraceRosAlloc) {
1610 LOG(INFO) << "RosAlloc::RevokeThreadLocalRuns() : Inserted run 0x" << std::hex
1611 << reinterpret_cast<intptr_t>(thread_local_run)
1612 << " into full_runs_[" << std::dec << idx << "]";
1613 }
1614 }
1615 } else if (thread_local_run->IsAllFree()) {
1616 MutexLock mu(self, lock_);
1617 FreePages(self, thread_local_run);
1618 } else {
1619 non_full_runs_[idx].insert(thread_local_run);
1620 DCHECK(non_full_runs_[idx].find(thread_local_run) != non_full_runs_[idx].end());
1621 if (kTraceRosAlloc) {
1622 LOG(INFO) << "RosAlloc::RevokeThreadLocalRuns() : Inserted run 0x" << std::hex
1623 << reinterpret_cast<intptr_t>(thread_local_run)
1624 << " into non_full_runs_[" << std::dec << idx << "]";
1625 }
1626 }
1627 }
1628 }
1629}
1630
1631void RosAlloc::RevokeAllThreadLocalRuns() {
1632 // This is called when a mutator thread won't allocate such as at
1633 // the Zygote creation time or during the GC pause.
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001634 MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
1635 MutexLock mu2(Thread::Current(), *Locks::thread_list_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001636 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1637 for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
1638 Thread* t = *it;
1639 RevokeThreadLocalRuns(t);
1640 }
1641}
1642
1643void RosAlloc::Initialize() {
1644 // Check the consistency of the number of size brackets.
1645 DCHECK_EQ(Thread::kRosAllocNumOfSizeBrackets, kNumOfSizeBrackets);
1646 // bracketSizes.
1647 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1648 if (i < kNumOfSizeBrackets - 2) {
1649 bracketSizes[i] = 16 * (i + 1);
1650 } else if (i == kNumOfSizeBrackets - 2) {
1651 bracketSizes[i] = 1 * KB;
1652 } else {
1653 DCHECK(i == kNumOfSizeBrackets - 1);
1654 bracketSizes[i] = 2 * KB;
1655 }
1656 if (kTraceRosAlloc) {
1657 LOG(INFO) << "bracketSizes[" << i << "]=" << bracketSizes[i];
1658 }
1659 }
1660 // numOfPages.
1661 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1662 if (i < 4) {
1663 numOfPages[i] = 1;
1664 } else if (i < 8) {
1665 numOfPages[i] = 2;
1666 } else if (i < 16) {
1667 numOfPages[i] = 4;
1668 } else if (i < 32) {
1669 numOfPages[i] = 8;
1670 } else if (i == 32) {
1671 DCHECK(i = kNumOfSizeBrackets - 2);
1672 numOfPages[i] = 16;
1673 } else {
1674 DCHECK(i = kNumOfSizeBrackets - 1);
1675 numOfPages[i] = 32;
1676 }
1677 if (kTraceRosAlloc) {
1678 LOG(INFO) << "numOfPages[" << i << "]=" << numOfPages[i];
1679 }
1680 }
1681 // Compute numOfSlots and slotOffsets.
1682 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1683 size_t bracket_size = bracketSizes[i];
1684 size_t run_size = kPageSize * numOfPages[i];
1685 size_t max_num_of_slots = run_size / bracket_size;
1686 // Compute the actual number of slots by taking the header and
1687 // alignment into account.
1688 size_t fixed_header_size = RoundUp(Run::fixed_header_size(), sizeof(uint32_t));
1689 DCHECK_EQ(fixed_header_size, static_cast<size_t>(8));
1690 size_t header_size = 0;
1691 size_t bulk_free_bit_map_offset = 0;
1692 size_t thread_local_free_bit_map_offset = 0;
1693 size_t num_of_slots = 0;
1694 // Search for the maximum number of slots that allows enough space
1695 // for the header (including the bit maps.)
1696 for (int s = max_num_of_slots; s >= 0; s--) {
1697 size_t tmp_slots_size = bracket_size * s;
1698 size_t tmp_bit_map_size = RoundUp(s, sizeof(uint32_t) * kBitsPerByte) / kBitsPerByte;
1699 size_t tmp_bulk_free_bit_map_size = tmp_bit_map_size;
1700 size_t tmp_bulk_free_bit_map_off = fixed_header_size + tmp_bit_map_size;
1701 size_t tmp_thread_local_free_bit_map_size = tmp_bit_map_size;
1702 size_t tmp_thread_local_free_bit_map_off = tmp_bulk_free_bit_map_off + tmp_bulk_free_bit_map_size;
1703 size_t tmp_unaligned_header_size = tmp_thread_local_free_bit_map_off + tmp_thread_local_free_bit_map_size;
1704 // Align up the unaligned header size. bracket_size may not be a power of two.
1705 size_t tmp_header_size = (tmp_unaligned_header_size % bracket_size == 0) ?
1706 tmp_unaligned_header_size :
1707 tmp_unaligned_header_size + (bracket_size - tmp_unaligned_header_size % bracket_size);
1708 DCHECK_EQ(tmp_header_size % bracket_size, static_cast<size_t>(0));
1709 DCHECK_EQ(tmp_header_size % 8, static_cast<size_t>(0));
1710 if (tmp_slots_size + tmp_header_size <= run_size) {
1711 // Found the right number of slots, that is, there was enough
1712 // space for the header (including the bit maps.)
1713 num_of_slots = s;
1714 header_size = tmp_header_size;
1715 bulk_free_bit_map_offset = tmp_bulk_free_bit_map_off;
1716 thread_local_free_bit_map_offset = tmp_thread_local_free_bit_map_off;
1717 break;
1718 }
1719 }
1720 DCHECK(num_of_slots > 0 && header_size > 0 && bulk_free_bit_map_offset > 0);
1721 // Add the padding for the alignment remainder.
1722 header_size += run_size % bracket_size;
1723 DCHECK(header_size + num_of_slots * bracket_size == run_size);
1724 numOfSlots[i] = num_of_slots;
1725 headerSizes[i] = header_size;
1726 bulkFreeBitMapOffsets[i] = bulk_free_bit_map_offset;
1727 threadLocalFreeBitMapOffsets[i] = thread_local_free_bit_map_offset;
1728 if (kTraceRosAlloc) {
1729 LOG(INFO) << "numOfSlots[" << i << "]=" << numOfSlots[i]
1730 << ", headerSizes[" << i << "]=" << headerSizes[i]
1731 << ", bulkFreeBitMapOffsets[" << i << "]=" << bulkFreeBitMapOffsets[i]
1732 << ", threadLocalFreeBitMapOffsets[" << i << "]=" << threadLocalFreeBitMapOffsets[i];;
1733 }
1734 }
1735}
1736
1737void RosAlloc::BytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
1738 if (used_bytes == 0) {
1739 return;
1740 }
1741 size_t* bytes_allocated = reinterpret_cast<size_t*>(arg);
1742 *bytes_allocated += used_bytes;
1743}
1744
1745void RosAlloc::ObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
1746 if (used_bytes == 0) {
1747 return;
1748 }
1749 size_t* objects_allocated = reinterpret_cast<size_t*>(arg);
1750 ++(*objects_allocated);
1751}
1752
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001753void RosAlloc::Verify() {
1754 Thread* self = Thread::Current();
1755 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self))
1756 << "The mutator locks isn't exclusively locked at RosAlloc::Verify()";
1757 MutexLock mu(self, *Locks::thread_list_lock_);
1758 WriterMutexLock wmu(self, bulk_free_lock_);
1759 std::vector<Run*> runs;
1760 {
1761 MutexLock mu(self, lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001762 size_t pm_end = page_map_size_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001763 size_t i = 0;
1764 while (i < pm_end) {
1765 byte pm = page_map_[i];
1766 switch (pm) {
1767 case kPageMapEmpty: {
1768 // The start of a free page run.
1769 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1770 DCHECK(fpr->magic_num_ == kMagicNumFree) << "Bad magic number : " << fpr->magic_num_;
1771 CHECK(free_page_runs_.find(fpr) != free_page_runs_.end())
1772 << "An empty page must belong to the free page run set";
1773 size_t fpr_size = fpr->ByteSize(this);
1774 CHECK(IsAligned<kPageSize>(fpr_size))
1775 << "A free page run size isn't page-aligned : " << fpr_size;
1776 size_t num_pages = fpr_size / kPageSize;
1777 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1778 << "A free page run size must be > 0 : " << fpr_size;
1779 for (size_t j = i + 1; j < i + num_pages; ++j) {
1780 CHECK_EQ(page_map_[j], kPageMapEmpty)
1781 << "A mismatch between the page map table for kPageMapEmpty "
1782 << " at page index " << j
1783 << " and the free page run size : page index range : "
1784 << i << " to " << (i + num_pages) << std::endl << DumpPageMap();
1785 }
1786 i += num_pages;
1787 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1788 << std::endl << DumpPageMap();
1789 break;
1790 }
1791 case kPageMapLargeObject: {
1792 // The start of a large object.
1793 size_t num_pages = 1;
1794 size_t idx = i + 1;
1795 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1796 num_pages++;
1797 idx++;
1798 }
1799 void* start = base_ + i * kPageSize;
1800 mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
1801 size_t obj_size = obj->SizeOf();
1802 CHECK(obj_size > kLargeSizeThreshold)
1803 << "A rosalloc large object size must be > " << kLargeSizeThreshold;
1804 CHECK_EQ(num_pages, RoundUp(obj_size, kPageSize) / kPageSize)
1805 << "A rosalloc large object size " << obj_size
1806 << " does not match the page map table " << (num_pages * kPageSize)
1807 << std::endl << DumpPageMap();
1808 i += num_pages;
1809 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1810 << std::endl << DumpPageMap();
1811 break;
1812 }
1813 case kPageMapLargeObjectPart:
1814 LOG(FATAL) << "Unreachable - page map type: " << pm << std::endl << DumpPageMap();
1815 break;
1816 case kPageMapRun: {
1817 // The start of a run.
1818 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1819 DCHECK(run->magic_num_ == kMagicNum) << "Bad magic number" << run->magic_num_;
1820 size_t idx = run->size_bracket_idx_;
1821 CHECK(idx < kNumOfSizeBrackets) << "Out of range size bracket index : " << idx;
1822 size_t num_pages = numOfPages[idx];
1823 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1824 << "Run size must be > 0 : " << num_pages;
1825 for (size_t j = i + 1; j < i + num_pages; ++j) {
1826 CHECK_EQ(page_map_[j], kPageMapRunPart)
1827 << "A mismatch between the page map table for kPageMapRunPart "
1828 << " at page index " << j
1829 << " and the run size : page index range " << i << " to " << (i + num_pages)
1830 << std::endl << DumpPageMap();
1831 }
1832 runs.push_back(run);
1833 i += num_pages;
1834 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1835 << std::endl << DumpPageMap();
1836 break;
1837 }
1838 case kPageMapRunPart:
1839 LOG(FATAL) << "Unreachable - page map type: " << pm << std::endl << DumpPageMap();
1840 break;
1841 default:
1842 LOG(FATAL) << "Unreachable - page map type: " << pm << std::endl << DumpPageMap();
1843 break;
1844 }
1845 }
1846 }
1847
1848 // Call Verify() here for the lock order.
1849 for (auto& run : runs) {
1850 run->Verify(self, this);
1851 }
1852}
1853
1854void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc) {
1855 DCHECK(magic_num_ == kMagicNum) << "Bad magic number : " << Dump();
1856 size_t idx = size_bracket_idx_;
1857 CHECK(idx < kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
1858 byte* slot_base = reinterpret_cast<byte*>(this) + headerSizes[idx];
1859 size_t num_slots = numOfSlots[idx];
1860 size_t bracket_size = IndexToBracketSize(idx);
1861 CHECK_EQ(slot_base + num_slots * bracket_size,
1862 reinterpret_cast<byte*>(this) + numOfPages[idx] * kPageSize)
1863 << "Mismatch in the end address of the run " << Dump();
1864 // Check that the bulk free bitmap is clean. It's only used during BulkFree().
1865 CHECK(IsBulkFreeBitmapClean()) << "The bulk free bit map isn't clean " << Dump();
1866 // Check the bump index mode, if it's on.
1867 if (top_slot_idx_ < num_slots) {
1868 // If the bump index mode is on (top_slot_idx_ < num_slots), then
1869 // all of the slots after the top index must be free.
1870 for (size_t i = top_slot_idx_; i < num_slots; ++i) {
1871 size_t vec_idx = i / 32;
1872 size_t vec_off = i % 32;
1873 uint32_t vec = alloc_bit_map_[vec_idx];
1874 CHECK_EQ((vec & (1 << vec_off)), static_cast<uint32_t>(0))
1875 << "A slot >= top_slot_idx_ isn't free " << Dump();
1876 }
1877 } else {
1878 CHECK_EQ(top_slot_idx_, num_slots)
1879 << "If the bump index mode is off, the top index == the number of slots "
1880 << Dump();
1881 }
1882 // Check the thread local runs, the current runs, and the run sets.
1883 if (is_thread_local_) {
1884 // If it's a thread local run, then it must be pointed to by an owner thread.
1885 bool owner_found = false;
1886 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1887 for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
1888 Thread* thread = *it;
1889 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1890 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
1891 Run* thread_local_run = reinterpret_cast<Run*>(thread->rosalloc_runs_[i]);
1892 if (thread_local_run == this) {
1893 CHECK(!owner_found)
1894 << "A thread local run has more than one owner thread " << Dump();
1895 CHECK_EQ(i, idx)
1896 << "A mismatching size bracket index in a thread local run " << Dump();
1897 owner_found = true;
1898 }
1899 }
1900 }
1901 CHECK(owner_found) << "A thread local run has no owner thread " << Dump();
1902 } else {
1903 // If it's not thread local, check that the thread local free bitmap is clean.
1904 CHECK(IsThreadLocalFreeBitmapClean())
1905 << "A non-thread-local run's thread local free bitmap isn't clean "
1906 << Dump();
1907 // Check if it's a current run for the size bucket.
1908 bool is_current_run = false;
1909 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1910 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
1911 Run* current_run = rosalloc->current_runs_[i];
1912 if (idx == i) {
1913 if (this == current_run) {
1914 is_current_run = true;
1915 }
1916 } else {
1917 // If the size bucket index does not match, then it must not
1918 // be a current run.
1919 CHECK_NE(this, current_run)
1920 << "A current run points to a run with a wrong size bracket index " << Dump();
1921 }
1922 }
1923 // If it's neither a thread local or current run, then it must be
1924 // in a run set.
1925 if (!is_current_run) {
1926 MutexLock mu(self, rosalloc->lock_);
1927 std::set<Run*>& non_full_runs = rosalloc->non_full_runs_[idx];
1928 // If it's all free, it must be a free page run rather than a run.
1929 CHECK(!IsAllFree()) << "A free run must be in a free page run set " << Dump();
1930 if (!IsFull()) {
1931 // If it's not full, it must in the non-full run set.
1932 CHECK(non_full_runs.find(this) != non_full_runs.end())
1933 << "A non-full run isn't in the non-full run set " << Dump();
1934 } else {
1935 // If it's full, it must in the full run set (debug build only.)
1936 if (kIsDebugBuild) {
1937 hash_set<Run*, hash_run, eq_run>& full_runs = rosalloc->full_runs_[idx];
1938 CHECK(full_runs.find(this) != full_runs.end())
1939 << " A full run isn't in the full run set " << Dump();
1940 }
1941 }
1942 }
1943 }
1944 // Check each slot.
1945 size_t num_vec = RoundUp(num_slots, 32) / 32;
1946 size_t slots = 0;
1947 for (size_t v = 0; v < num_vec; v++, slots += 32) {
1948 DCHECK(num_slots >= slots) << "Out of bounds";
1949 uint32_t vec = alloc_bit_map_[v];
1950 uint32_t thread_local_free_vec = ThreadLocalFreeBitMap()[v];
1951 size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
1952 for (size_t i = 0; i < end; ++i) {
1953 bool is_allocated = ((vec >> i) & 0x1) != 0;
1954 // If a thread local run, slots may be marked freed in the
1955 // thread local free bitmap.
1956 bool is_thread_local_freed = is_thread_local_ && ((thread_local_free_vec >> i) & 0x1) != 0;
1957 if (is_allocated && !is_thread_local_freed) {
1958 byte* slot_addr = slot_base + (slots + i) * bracket_size;
1959 mirror::Object* obj = reinterpret_cast<mirror::Object*>(slot_addr);
1960 size_t obj_size = obj->SizeOf();
1961 CHECK_LE(obj_size, kLargeSizeThreshold)
1962 << "A run slot contains a large object " << Dump();
1963 CHECK_EQ(SizeToIndex(obj_size), idx)
1964 << "A run slot contains an object with wrong size " << Dump();
1965 }
1966 }
1967 }
1968}
1969
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001970} // namespace allocator
1971} // namespace gc
1972} // namespace art