blob: e13bd715c22f8b4c5ce06c76c4c4c7c3623628cd [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "base/mutex-inl.h"
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -080018#include "mirror/class-inl.h"
19#include "mirror/object.h"
20#include "mirror/object-inl.h"
Brian Carlstrom218daa22013-11-25 14:51:44 -080021#include "thread-inl.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070022#include "thread_list.h"
23#include "rosalloc.h"
24
25#include <map>
26#include <list>
27#include <vector>
28
29namespace art {
30namespace gc {
31namespace allocator {
32
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070033extern "C" void* art_heap_rosalloc_morecore(RosAlloc* rosalloc, intptr_t increment);
34
35size_t RosAlloc::bracketSizes[kNumOfSizeBrackets];
36size_t RosAlloc::numOfPages[kNumOfSizeBrackets];
37size_t RosAlloc::numOfSlots[kNumOfSizeBrackets];
38size_t RosAlloc::headerSizes[kNumOfSizeBrackets];
39size_t RosAlloc::bulkFreeBitMapOffsets[kNumOfSizeBrackets];
40size_t RosAlloc::threadLocalFreeBitMapOffsets[kNumOfSizeBrackets];
41bool RosAlloc::initialized_ = false;
42
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080043RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080044 PageReleaseMode page_release_mode, size_t page_release_size_threshold)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070045 : base_(reinterpret_cast<byte*>(base)), footprint_(capacity),
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080046 capacity_(capacity), max_capacity_(max_capacity),
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070047 lock_("rosalloc global lock", kRosAllocGlobalLock),
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080048 bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock),
49 page_release_mode_(page_release_mode),
50 page_release_size_threshold_(page_release_size_threshold) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070051 DCHECK(RoundUp(capacity, kPageSize) == capacity);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080052 DCHECK(RoundUp(max_capacity, kPageSize) == max_capacity);
53 CHECK_LE(capacity, max_capacity);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080054 CHECK(IsAligned<kPageSize>(page_release_size_threshold_));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070055 if (!initialized_) {
56 Initialize();
57 }
58 VLOG(heap) << "RosAlloc base="
59 << std::hex << (intptr_t)base_ << ", end="
60 << std::hex << (intptr_t)(base_ + capacity_)
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080061 << ", capacity=" << std::dec << capacity_
62 << ", max_capacity=" << std::dec << max_capacity_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070063 memset(current_runs_, 0, sizeof(current_runs_));
64 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
65 size_bracket_locks_[i] = new Mutex("an rosalloc size bracket lock",
66 kRosAllocBracketLock);
67 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080068 DCHECK_EQ(footprint_, capacity_);
69 size_t num_of_pages = footprint_ / kPageSize;
70 size_t max_num_of_pages = max_capacity_ / kPageSize;
71 std::string error_msg;
72 page_map_mem_map_.reset(MemMap::MapAnonymous("rosalloc page map", NULL, RoundUp(max_num_of_pages, kPageSize),
73 PROT_READ | PROT_WRITE, false, &error_msg));
74 CHECK(page_map_mem_map_.get() != NULL) << "Couldn't allocate the page map : " << error_msg;
75 page_map_ = page_map_mem_map_->Begin();
76 page_map_size_ = num_of_pages;
77 max_page_map_size_ = max_num_of_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070078 free_page_run_size_map_.resize(num_of_pages);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070079 FreePageRun* free_pages = reinterpret_cast<FreePageRun*>(base_);
80 if (kIsDebugBuild) {
81 free_pages->magic_num_ = kMagicNumFree;
82 }
83 free_pages->SetByteSize(this, capacity_);
84 DCHECK_EQ(capacity_ % kPageSize, static_cast<size_t>(0));
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080085 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070086 free_pages->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080087 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070088 free_page_runs_.insert(free_pages);
89 if (kTraceRosAlloc) {
90 LOG(INFO) << "RosAlloc::RosAlloc() : Inserted run 0x" << std::hex
91 << reinterpret_cast<intptr_t>(free_pages)
92 << " into free_page_runs_";
93 }
94}
95
Mathieu Chartier661974a2014-01-09 11:23:53 -080096RosAlloc::~RosAlloc() {
97 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
98 delete size_bracket_locks_[i];
99 }
100}
101
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700102void* RosAlloc::AllocPages(Thread* self, size_t num_pages, byte page_map_type) {
103 lock_.AssertHeld(self);
104 DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
105 FreePageRun* res = NULL;
106 size_t req_byte_size = num_pages * kPageSize;
107 // Find the lowest address free page run that's large enough.
108 for (auto it = free_page_runs_.begin(); it != free_page_runs_.end(); ) {
109 FreePageRun* fpr = *it;
110 DCHECK(fpr->IsFree());
111 size_t fpr_byte_size = fpr->ByteSize(this);
112 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
113 if (req_byte_size <= fpr_byte_size) {
114 // Found one.
115 free_page_runs_.erase(it++);
116 if (kTraceRosAlloc) {
117 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x"
118 << std::hex << reinterpret_cast<intptr_t>(fpr)
119 << " from free_page_runs_";
120 }
121 if (req_byte_size < fpr_byte_size) {
122 // Split.
123 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<byte*>(fpr) + req_byte_size);
124 if (kIsDebugBuild) {
125 remainder->magic_num_ = kMagicNumFree;
126 }
127 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
128 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
129 // Don't need to call madvise on remainder here.
130 free_page_runs_.insert(remainder);
131 if (kTraceRosAlloc) {
132 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
133 << reinterpret_cast<intptr_t>(remainder)
134 << " into free_page_runs_";
135 }
136 fpr->SetByteSize(this, req_byte_size);
137 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
138 }
139 res = fpr;
140 break;
141 } else {
142 ++it;
143 }
144 }
145
146 // Failed to allocate pages. Grow the footprint, if possible.
147 if (UNLIKELY(res == NULL && capacity_ > footprint_)) {
148 FreePageRun* last_free_page_run = NULL;
149 size_t last_free_page_run_size;
150 auto it = free_page_runs_.rbegin();
151 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
152 // There is a free page run at the end.
153 DCHECK(last_free_page_run->IsFree());
154 DCHECK(page_map_[ToPageMapIndex(last_free_page_run)] == kPageMapEmpty);
155 last_free_page_run_size = last_free_page_run->ByteSize(this);
156 } else {
157 // There is no free page run at the end.
158 last_free_page_run_size = 0;
159 }
160 DCHECK_LT(last_free_page_run_size, req_byte_size);
161 if (capacity_ - footprint_ + last_free_page_run_size >= req_byte_size) {
162 // If we grow the heap, we can allocate it.
163 size_t increment = std::min(std::max(2 * MB, req_byte_size - last_free_page_run_size),
164 capacity_ - footprint_);
165 DCHECK_EQ(increment % kPageSize, static_cast<size_t>(0));
166 size_t new_footprint = footprint_ + increment;
167 size_t new_num_of_pages = new_footprint / kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800168 DCHECK_LT(page_map_size_, new_num_of_pages);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700169 DCHECK_LT(free_page_run_size_map_.size(), new_num_of_pages);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800170 page_map_size_ = new_num_of_pages;
171 DCHECK_LE(page_map_size_, max_page_map_size_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700172 free_page_run_size_map_.resize(new_num_of_pages);
173 art_heap_rosalloc_morecore(this, increment);
174 if (last_free_page_run_size > 0) {
175 // There was a free page run at the end. Expand its size.
176 DCHECK_EQ(last_free_page_run_size, last_free_page_run->ByteSize(this));
177 last_free_page_run->SetByteSize(this, last_free_page_run_size + increment);
178 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
179 DCHECK(last_free_page_run->End(this) == base_ + new_footprint);
180 } else {
181 // Otherwise, insert a new free page run at the end.
182 FreePageRun* new_free_page_run = reinterpret_cast<FreePageRun*>(base_ + footprint_);
183 if (kIsDebugBuild) {
184 new_free_page_run->magic_num_ = kMagicNumFree;
185 }
186 new_free_page_run->SetByteSize(this, increment);
187 DCHECK_EQ(new_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
188 free_page_runs_.insert(new_free_page_run);
189 DCHECK(*free_page_runs_.rbegin() == new_free_page_run);
190 if (kTraceRosAlloc) {
191 LOG(INFO) << "RosAlloc::AlloPages() : Grew the heap by inserting run 0x"
192 << std::hex << reinterpret_cast<intptr_t>(new_free_page_run)
193 << " into free_page_runs_";
194 }
195 }
196 DCHECK_LE(footprint_ + increment, capacity_);
197 if (kTraceRosAlloc) {
198 LOG(INFO) << "RosAlloc::AllocPages() : increased the footprint from "
199 << footprint_ << " to " << new_footprint;
200 }
201 footprint_ = new_footprint;
202
203 // And retry the last free page run.
204 it = free_page_runs_.rbegin();
205 DCHECK(it != free_page_runs_.rend());
206 FreePageRun* fpr = *it;
207 if (kIsDebugBuild && last_free_page_run_size > 0) {
208 DCHECK(last_free_page_run != NULL);
209 DCHECK_EQ(last_free_page_run, fpr);
210 }
211 size_t fpr_byte_size = fpr->ByteSize(this);
212 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
213 DCHECK_LE(req_byte_size, fpr_byte_size);
214 free_page_runs_.erase(fpr);
215 if (kTraceRosAlloc) {
216 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
217 << " from free_page_runs_";
218 }
219 if (req_byte_size < fpr_byte_size) {
220 // Split if there's a remainder.
221 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<byte*>(fpr) + req_byte_size);
222 if (kIsDebugBuild) {
223 remainder->magic_num_ = kMagicNumFree;
224 }
225 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
226 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
227 free_page_runs_.insert(remainder);
228 if (kTraceRosAlloc) {
229 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
230 << reinterpret_cast<intptr_t>(remainder)
231 << " into free_page_runs_";
232 }
233 fpr->SetByteSize(this, req_byte_size);
234 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
235 }
236 res = fpr;
237 }
238 }
239 if (LIKELY(res != NULL)) {
240 // Update the page map.
241 size_t page_map_idx = ToPageMapIndex(res);
242 for (size_t i = 0; i < num_pages; i++) {
243 DCHECK(page_map_[page_map_idx + i] == kPageMapEmpty);
244 }
245 switch (page_map_type) {
246 case kPageMapRun:
247 page_map_[page_map_idx] = kPageMapRun;
248 for (size_t i = 1; i < num_pages; i++) {
249 page_map_[page_map_idx + i] = kPageMapRunPart;
250 }
251 break;
252 case kPageMapLargeObject:
253 page_map_[page_map_idx] = kPageMapLargeObject;
254 for (size_t i = 1; i < num_pages; i++) {
255 page_map_[page_map_idx + i] = kPageMapLargeObjectPart;
256 }
257 break;
258 default:
259 LOG(FATAL) << "Unreachable - page map type: " << page_map_type;
260 break;
261 }
262 if (kIsDebugBuild) {
263 // Clear the first page which isn't madvised away in the debug
264 // build for the magic number.
265 memset(res, 0, kPageSize);
266 }
267 if (kTraceRosAlloc) {
268 LOG(INFO) << "RosAlloc::AllocPages() : 0x" << std::hex << reinterpret_cast<intptr_t>(res)
269 << "-0x" << (reinterpret_cast<intptr_t>(res) + num_pages * kPageSize)
270 << "(" << std::dec << (num_pages * kPageSize) << ")";
271 }
272 return res;
273 }
274
275 // Fail.
276 if (kTraceRosAlloc) {
277 LOG(INFO) << "RosAlloc::AllocPages() : NULL";
278 }
279 return nullptr;
280}
281
282void RosAlloc::FreePages(Thread* self, void* ptr) {
283 lock_.AssertHeld(self);
284 size_t pm_idx = ToPageMapIndex(ptr);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800285 DCHECK(pm_idx < page_map_size_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700286 byte pm_type = page_map_[pm_idx];
287 DCHECK(pm_type == kPageMapRun || pm_type == kPageMapLargeObject);
288 byte pm_part_type;
289 switch (pm_type) {
290 case kPageMapRun:
291 pm_part_type = kPageMapRunPart;
292 break;
293 case kPageMapLargeObject:
294 pm_part_type = kPageMapLargeObjectPart;
295 break;
296 default:
297 pm_part_type = kPageMapEmpty;
298 LOG(FATAL) << "Unreachable - RosAlloc::FreePages() : " << "pm_idx=" << pm_idx << ", pm_type="
299 << static_cast<int>(pm_type) << ", ptr=" << std::hex
300 << reinterpret_cast<intptr_t>(ptr);
301 return;
302 }
303 // Update the page map and count the number of pages.
304 size_t num_pages = 1;
305 page_map_[pm_idx] = kPageMapEmpty;
306 size_t idx = pm_idx + 1;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800307 size_t end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700308 while (idx < end && page_map_[idx] == pm_part_type) {
309 page_map_[idx] = kPageMapEmpty;
310 num_pages++;
311 idx++;
312 }
313
314 if (kTraceRosAlloc) {
315 LOG(INFO) << "RosAlloc::FreePages() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
316 << "-0x" << (reinterpret_cast<intptr_t>(ptr) + num_pages * kPageSize)
317 << "(" << std::dec << (num_pages * kPageSize) << ")";
318 }
319
320 // Turn it into a free run.
321 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(ptr);
322 if (kIsDebugBuild) {
323 fpr->magic_num_ = kMagicNumFree;
324 }
325 fpr->SetByteSize(this, num_pages * kPageSize);
326 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
327
328 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
329 if (!free_page_runs_.empty()) {
330 // Try to coalesce in the higher address direction.
331 if (kTraceRosAlloc) {
332 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce a free page run 0x"
333 << std::hex << reinterpret_cast<uintptr_t>(fpr) << " [" << std::dec << pm_idx << "] -0x"
334 << std::hex << reinterpret_cast<uintptr_t>(fpr->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800335 << (fpr->End(this) == End() ? page_map_size_ : ToPageMapIndex(fpr->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700336 }
337 auto higher_it = free_page_runs_.upper_bound(fpr);
338 if (higher_it != free_page_runs_.end()) {
339 for (auto it = higher_it; it != free_page_runs_.end(); ) {
340 FreePageRun* h = *it;
341 DCHECK_EQ(h->ByteSize(this) % kPageSize, static_cast<size_t>(0));
342 if (kTraceRosAlloc) {
343 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a higher free page run 0x"
344 << std::hex << reinterpret_cast<uintptr_t>(h) << " [" << std::dec << ToPageMapIndex(h) << "] -0x"
345 << std::hex << reinterpret_cast<uintptr_t>(h->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800346 << (h->End(this) == End() ? page_map_size_ : ToPageMapIndex(h->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700347 }
348 if (fpr->End(this) == h->Begin()) {
349 if (kTraceRosAlloc) {
350 LOG(INFO) << "Success";
351 }
352 free_page_runs_.erase(it++);
353 if (kTraceRosAlloc) {
354 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
355 << reinterpret_cast<intptr_t>(h)
356 << " from free_page_runs_";
357 }
358 fpr->SetByteSize(this, fpr->ByteSize(this) + h->ByteSize(this));
359 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
360 } else {
361 // Not adjacent. Stop.
362 if (kTraceRosAlloc) {
363 LOG(INFO) << "Fail";
364 }
365 break;
366 }
367 }
368 }
369 // Try to coalesce in the lower address direction.
370 auto lower_it = free_page_runs_.upper_bound(fpr);
371 if (lower_it != free_page_runs_.begin()) {
372 --lower_it;
373 for (auto it = lower_it; ; ) {
374 // We want to try to coalesce with the first element but
375 // there's no "<=" operator for the iterator.
376 bool to_exit_loop = it == free_page_runs_.begin();
377
378 FreePageRun* l = *it;
379 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
380 if (kTraceRosAlloc) {
381 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a lower free page run 0x"
382 << std::hex << reinterpret_cast<uintptr_t>(l) << " [" << std::dec << ToPageMapIndex(l) << "] -0x"
383 << std::hex << reinterpret_cast<uintptr_t>(l->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800384 << (l->End(this) == End() ? page_map_size_ : ToPageMapIndex(l->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700385 }
386 if (l->End(this) == fpr->Begin()) {
387 if (kTraceRosAlloc) {
388 LOG(INFO) << "Success";
389 }
390 free_page_runs_.erase(it--);
391 if (kTraceRosAlloc) {
392 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
393 << reinterpret_cast<intptr_t>(l)
394 << " from free_page_runs_";
395 }
396 l->SetByteSize(this, l->ByteSize(this) + fpr->ByteSize(this));
397 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
398 fpr = l;
399 } else {
400 // Not adjacent. Stop.
401 if (kTraceRosAlloc) {
402 LOG(INFO) << "Fail";
403 }
404 break;
405 }
406 if (to_exit_loop) {
407 break;
408 }
409 }
410 }
411 }
412
413 // Insert it.
414 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
415 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800416 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700417 fpr->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800418 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700419 free_page_runs_.insert(fpr);
420 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
421 if (kTraceRosAlloc) {
422 LOG(INFO) << "RosAlloc::FreePages() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
423 << " into free_page_runs_";
424 }
425}
426
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800427void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) {
428 DCHECK(size > kLargeSizeThreshold);
429 size_t num_pages = RoundUp(size, kPageSize) / kPageSize;
430 void* r;
431 {
432 MutexLock mu(self, lock_);
433 r = AllocPages(self, num_pages, kPageMapLargeObject);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700434 }
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800435 if (UNLIKELY(r == nullptr)) {
436 if (kTraceRosAlloc) {
437 LOG(INFO) << "RosAlloc::AllocLargeObject() : NULL";
438 }
439 return nullptr;
440 }
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800441 if (bytes_allocated != NULL) {
442 *bytes_allocated = num_pages * kPageSize;
443 }
444 if (kTraceRosAlloc) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800445 LOG(INFO) << "RosAlloc::AllocLargeObject() : 0x" << std::hex << reinterpret_cast<intptr_t>(r)
446 << "-0x" << (reinterpret_cast<intptr_t>(r) + num_pages * kPageSize)
447 << "(" << std::dec << (num_pages * kPageSize) << ")";
448 }
449 if (!DoesReleaseAllPages()) {
450 // If it does not release all pages, pages may not be zeroed out.
451 memset(r, 0, size);
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800452 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700453 // Check if the returned memory is really all zero.
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800454 if (kCheckZeroMemory) {
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800455 byte* bytes = reinterpret_cast<byte*>(r);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700456 for (size_t i = 0; i < size; ++i) {
457 DCHECK_EQ(bytes[i], 0);
458 }
459 }
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800460 return r;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700461}
462
463void RosAlloc::FreeInternal(Thread* self, void* ptr) {
464 DCHECK(base_ <= ptr && ptr < base_ + footprint_);
465 size_t pm_idx = RoundDownToPageMapIndex(ptr);
466 bool free_from_run = false;
467 Run* run = NULL;
468 {
469 MutexLock mu(self, lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800470 DCHECK(pm_idx < page_map_size_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700471 byte page_map_entry = page_map_[pm_idx];
472 if (kTraceRosAlloc) {
473 LOG(INFO) << "RosAlloc::FreeInternal() : " << std::hex << ptr << ", pm_idx=" << std::dec << pm_idx
474 << ", page_map_entry=" << static_cast<int>(page_map_entry);
475 }
476 switch (page_map_[pm_idx]) {
477 case kPageMapEmpty:
478 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
479 return;
480 case kPageMapLargeObject:
481 FreePages(self, ptr);
482 return;
483 case kPageMapLargeObjectPart:
484 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
485 return;
486 case kPageMapRun:
487 case kPageMapRunPart: {
488 free_from_run = true;
489 size_t pi = pm_idx;
490 DCHECK(page_map_[pi] == kPageMapRun || page_map_[pi] == kPageMapRunPart);
491 // Find the beginning of the run.
492 while (page_map_[pi] != kPageMapRun) {
493 pi--;
494 DCHECK(pi < capacity_ / kPageSize);
495 }
496 DCHECK(page_map_[pi] == kPageMapRun);
497 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
498 DCHECK(run->magic_num_ == kMagicNum);
499 break;
500 }
501 default:
502 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
503 return;
504 }
505 }
506 if (LIKELY(free_from_run)) {
507 DCHECK(run != NULL);
508 FreeFromRun(self, ptr, run);
509 }
510}
511
512void RosAlloc::Free(Thread* self, void* ptr) {
513 ReaderMutexLock rmu(self, bulk_free_lock_);
514 FreeInternal(self, ptr);
515}
516
517RosAlloc::Run* RosAlloc::RefillRun(Thread* self, size_t idx) {
518 Run* new_run;
519 size_t num_pages = numOfPages[idx];
520 // Get the lowest address non-full run from the binary tree.
521 Run* temp = NULL;
522 std::set<Run*>* bt = &non_full_runs_[idx];
523 std::set<Run*>::iterator found = bt->lower_bound(temp);
524 if (found != bt->end()) {
525 // If there's one, use it as the current run.
526 Run* non_full_run = *found;
527 DCHECK(non_full_run != NULL);
528 new_run = non_full_run;
529 DCHECK_EQ(new_run->is_thread_local_, 0);
530 bt->erase(found);
531 DCHECK_EQ(non_full_run->is_thread_local_, 0);
532 } else {
533 // If there's none, allocate a new run and use it as the
534 // current run.
535 {
536 MutexLock mu(self, lock_);
537 new_run = reinterpret_cast<Run*>(AllocPages(self, num_pages, kPageMapRun));
538 }
539 if (new_run == NULL) {
540 return NULL;
541 }
542 if (kIsDebugBuild) {
543 new_run->magic_num_ = kMagicNum;
544 }
545 new_run->size_bracket_idx_ = idx;
546 new_run->top_slot_idx_ = 0;
547 new_run->ClearBitMaps();
548 new_run->to_be_bulk_freed_ = false;
549 }
550 return new_run;
551}
552
553void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated) {
554 DCHECK(size <= kLargeSizeThreshold);
555 size_t bracket_size;
556 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
557 DCHECK_EQ(idx, SizeToIndex(size));
558 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
559 DCHECK_EQ(bracket_size, bracketSizes[idx]);
560 DCHECK(size <= bracket_size);
561 DCHECK(size > 512 || bracket_size - size < 16);
562
563 void* slot_addr;
564
565 if (LIKELY(idx <= kMaxThreadLocalSizeBracketIdx)) {
566 // Use a thread-local run.
567 Run* thread_local_run = reinterpret_cast<Run*>(self->rosalloc_runs_[idx]);
568 if (UNLIKELY(thread_local_run == NULL)) {
569 MutexLock mu(self, *size_bracket_locks_[idx]);
570 thread_local_run = RefillRun(self, idx);
571 if (UNLIKELY(thread_local_run == NULL)) {
572 return NULL;
573 }
574 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
575 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
576 thread_local_run->is_thread_local_ = 1;
577 self->rosalloc_runs_[idx] = thread_local_run;
578 DCHECK(!thread_local_run->IsFull());
579 }
580
581 DCHECK(thread_local_run != NULL);
582 DCHECK_NE(thread_local_run->is_thread_local_, 0);
583 slot_addr = thread_local_run->AllocSlot();
584
585 if (UNLIKELY(slot_addr == NULL)) {
586 // The run got full. Try to free slots.
587 DCHECK(thread_local_run->IsFull());
588 MutexLock mu(self, *size_bracket_locks_[idx]);
589 bool is_all_free_after_merge;
590 if (thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&is_all_free_after_merge)) {
591 // Some slot got freed. Keep it.
592 DCHECK(!thread_local_run->IsFull());
593 DCHECK_EQ(is_all_free_after_merge, thread_local_run->IsAllFree());
594 if (is_all_free_after_merge) {
595 // Reinstate the bump index mode if it's all free.
596 DCHECK_EQ(thread_local_run->top_slot_idx_, numOfSlots[idx]);
597 thread_local_run->top_slot_idx_ = 0;
598 }
599 } else {
600 // No slots got freed. Try to refill the thread-local run.
601 DCHECK(thread_local_run->IsFull());
602 self->rosalloc_runs_[idx] = NULL;
603 thread_local_run->is_thread_local_ = 0;
604 if (kIsDebugBuild) {
605 full_runs_[idx].insert(thread_local_run);
606 if (kTraceRosAlloc) {
607 LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex
608 << reinterpret_cast<intptr_t>(thread_local_run)
609 << " into full_runs_[" << std::dec << idx << "]";
610 }
611 }
612 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
613 DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
614 thread_local_run = RefillRun(self, idx);
615 if (UNLIKELY(thread_local_run == NULL)) {
616 return NULL;
617 }
618 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
619 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
620 thread_local_run->is_thread_local_ = 1;
621 self->rosalloc_runs_[idx] = thread_local_run;
622 DCHECK(!thread_local_run->IsFull());
623 }
624
625 DCHECK(thread_local_run != NULL);
626 DCHECK(!thread_local_run->IsFull());
627 DCHECK_NE(thread_local_run->is_thread_local_, 0);
628 slot_addr = thread_local_run->AllocSlot();
629 // Must succeed now with a new run.
630 DCHECK(slot_addr != NULL);
631 }
632 if (kTraceRosAlloc) {
633 LOG(INFO) << "RosAlloc::AllocFromRun() thread-local : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
634 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
635 << "(" << std::dec << (bracket_size) << ")";
636 }
637 } else {
638 // Use the (shared) current run.
639 MutexLock mu(self, *size_bracket_locks_[idx]);
640 Run* current_run = current_runs_[idx];
641 if (UNLIKELY(current_run == NULL)) {
642 current_run = RefillRun(self, idx);
643 if (UNLIKELY(current_run == NULL)) {
644 return NULL;
645 }
646 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
647 DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
648 current_run->is_thread_local_ = 0;
649 current_runs_[idx] = current_run;
650 DCHECK(!current_run->IsFull());
651 }
652 DCHECK(current_run != NULL);
653 slot_addr = current_run->AllocSlot();
654 if (UNLIKELY(slot_addr == NULL)) {
655 // The current run got full. Try to refill it.
656 DCHECK(current_run->IsFull());
657 current_runs_[idx] = NULL;
658 if (kIsDebugBuild) {
659 // Insert it into full_runs and set the current run to NULL.
660 full_runs_[idx].insert(current_run);
661 if (kTraceRosAlloc) {
662 LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(current_run)
663 << " into full_runs_[" << std::dec << idx << "]";
664 }
665 }
666 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
667 DCHECK(full_runs_[idx].find(current_run) != full_runs_[idx].end());
668 current_run = RefillRun(self, idx);
669 if (UNLIKELY(current_run == NULL)) {
670 return NULL;
671 }
672 DCHECK(current_run != NULL);
673 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
674 DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
675 current_run->is_thread_local_ = 0;
676 current_runs_[idx] = current_run;
677 DCHECK(!current_run->IsFull());
678 slot_addr = current_run->AllocSlot();
679 // Must succeed now with a new run.
680 DCHECK(slot_addr != NULL);
681 }
682 if (kTraceRosAlloc) {
683 LOG(INFO) << "RosAlloc::AllocFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
684 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
685 << "(" << std::dec << (bracket_size) << ")";
686 }
687 }
688 if (LIKELY(bytes_allocated != NULL)) {
689 *bytes_allocated = bracket_size;
690 }
691 memset(slot_addr, 0, size);
692 return slot_addr;
693}
694
695void RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
696 DCHECK(run->magic_num_ == kMagicNum);
697 DCHECK(run < ptr && ptr < run->End());
698 size_t idx = run->size_bracket_idx_;
699 MutexLock mu(self, *size_bracket_locks_[idx]);
700 bool run_was_full = false;
701 if (kIsDebugBuild) {
702 run_was_full = run->IsFull();
703 }
704 if (kTraceRosAlloc) {
705 LOG(INFO) << "RosAlloc::FreeFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr);
706 }
707 if (LIKELY(run->is_thread_local_ != 0)) {
708 // It's a thread-local run. Just mark the thread-local free bit map and return.
709 DCHECK_LE(run->size_bracket_idx_, kMaxThreadLocalSizeBracketIdx);
710 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
711 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
712 run->MarkThreadLocalFreeBitMap(ptr);
713 if (kTraceRosAlloc) {
714 LOG(INFO) << "RosAlloc::FreeFromRun() : Freed a slot in a thread local run 0x" << std::hex
715 << reinterpret_cast<intptr_t>(run);
716 }
717 // A thread local run will be kept as a thread local even if it's become all free.
718 return;
719 }
720 // Free the slot in the run.
721 run->FreeSlot(ptr);
722 std::set<Run*>* non_full_runs = &non_full_runs_[idx];
723 if (run->IsAllFree()) {
724 // It has just become completely free. Free the pages of this run.
725 std::set<Run*>::iterator pos = non_full_runs->find(run);
726 if (pos != non_full_runs->end()) {
727 non_full_runs->erase(pos);
728 if (kTraceRosAlloc) {
729 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
730 << reinterpret_cast<intptr_t>(run) << " from non_full_runs_";
731 }
732 }
733 if (run == current_runs_[idx]) {
734 current_runs_[idx] = NULL;
735 }
736 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
737 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
738 {
739 MutexLock mu(self, lock_);
740 FreePages(self, run);
741 }
742 } else {
743 // It is not completely free. If it wasn't the current run or
744 // already in the non-full run set (i.e., it was full) insert it
745 // into the non-full run set.
746 if (run != current_runs_[idx]) {
747 hash_set<Run*, hash_run, eq_run>* full_runs =
748 kIsDebugBuild ? &full_runs_[idx] : NULL;
749 std::set<Run*>::iterator pos = non_full_runs->find(run);
750 if (pos == non_full_runs->end()) {
751 DCHECK(run_was_full);
752 DCHECK(full_runs->find(run) != full_runs->end());
753 if (kIsDebugBuild) {
754 full_runs->erase(run);
755 if (kTraceRosAlloc) {
756 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
757 << reinterpret_cast<intptr_t>(run) << " from full_runs_";
758 }
759 }
760 non_full_runs->insert(run);
761 DCHECK(!run->IsFull());
762 if (kTraceRosAlloc) {
763 LOG(INFO) << "RosAlloc::FreeFromRun() : Inserted run 0x" << std::hex
764 << reinterpret_cast<intptr_t>(run)
765 << " into non_full_runs_[" << std::dec << idx << "]";
766 }
767 }
768 }
769 }
770}
771
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800772std::string RosAlloc::Run::BitMapToStr(uint32_t* bit_map_base, size_t num_vec) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700773 std::string bit_map_str;
774 for (size_t v = 0; v < num_vec; v++) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800775 uint32_t vec = bit_map_base[v];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700776 if (v != num_vec - 1) {
777 bit_map_str.append(StringPrintf("%x-", vec));
778 } else {
779 bit_map_str.append(StringPrintf("%x", vec));
780 }
781 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800782 return bit_map_str.c_str();
783}
784
785std::string RosAlloc::Run::Dump() {
786 size_t idx = size_bracket_idx_;
787 size_t num_slots = numOfSlots[idx];
788 size_t num_vec = RoundUp(num_slots, 32) / 32;
789 std::ostringstream stream;
790 stream << "RosAlloc Run = " << reinterpret_cast<void*>(this)
791 << "{ magic_num=" << static_cast<int>(magic_num_)
792 << " size_bracket_idx=" << idx
793 << " is_thread_local=" << static_cast<int>(is_thread_local_)
794 << " to_be_bulk_freed=" << static_cast<int>(to_be_bulk_freed_)
795 << " top_slot_idx=" << top_slot_idx_
796 << " alloc_bit_map=" << BitMapToStr(alloc_bit_map_, num_vec)
797 << " bulk_free_bit_map=" << BitMapToStr(BulkFreeBitMap(), num_vec)
798 << " thread_local_bit_map=" << BitMapToStr(ThreadLocalFreeBitMap(), num_vec)
799 << " }" << std::endl;
800 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700801}
802
803void* RosAlloc::Run::AllocSlot() {
804 size_t idx = size_bracket_idx_;
805 size_t num_slots = numOfSlots[idx];
806 DCHECK_LE(top_slot_idx_, num_slots);
807 if (LIKELY(top_slot_idx_ < num_slots)) {
808 // If it's in bump index mode, grab the top slot and increment the top index.
809 size_t slot_idx = top_slot_idx_;
810 byte* slot_addr = reinterpret_cast<byte*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
811 if (kTraceRosAlloc) {
812 LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
813 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
814 }
815 top_slot_idx_++;
816 size_t vec_idx = slot_idx / 32;
817 size_t vec_off = slot_idx % 32;
818 uint32_t* vec = &alloc_bit_map_[vec_idx];
819 DCHECK_EQ((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
820 *vec |= 1 << vec_off;
821 DCHECK_NE((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
822 return slot_addr;
823 }
824 // Not in bump index mode. Search the alloc bit map for an empty slot.
825 size_t num_vec = RoundUp(num_slots, 32) / 32;
826 size_t slot_idx = 0;
827 bool found_slot = false;
828 for (size_t v = 0; v < num_vec; v++) {
829 uint32_t *vecp = &alloc_bit_map_[v];
830 uint32_t ffz1 = __builtin_ffs(~*vecp);
831 uint32_t ffz;
832 // TODO: Use LIKELY or UNLIKELY here?
833 if (LIKELY(ffz1 > 0 && (ffz = ffz1 - 1) + v * 32 < num_slots)) {
834 // Found an empty slot. Set the bit.
835 DCHECK_EQ((*vecp & (1 << ffz)), static_cast<uint32_t>(0));
836 *vecp |= (1 << ffz);
837 DCHECK_NE((*vecp & (1 << ffz)), static_cast<uint32_t>(0));
838 slot_idx = ffz + v * 32;
839 found_slot = true;
840 break;
841 }
842 }
843 if (LIKELY(found_slot)) {
844 byte* slot_addr = reinterpret_cast<byte*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
845 if (kTraceRosAlloc) {
846 LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
847 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
848 }
849 return slot_addr;
850 }
851 return NULL;
852}
853
854inline void RosAlloc::Run::FreeSlot(void* ptr) {
855 DCHECK_EQ(is_thread_local_, 0);
856 byte idx = size_bracket_idx_;
857 size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
858 - (reinterpret_cast<byte*>(this) + headerSizes[idx]);
859 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
860 size_t slot_idx = offset_from_slot_base / bracketSizes[idx];
861 DCHECK(slot_idx < numOfSlots[idx]);
862 size_t vec_idx = slot_idx / 32;
863 if (kIsDebugBuild) {
864 size_t num_vec = RoundUp(numOfSlots[idx], 32) / 32;
865 DCHECK(vec_idx < num_vec);
866 }
867 size_t vec_off = slot_idx % 32;
868 uint32_t* vec = &alloc_bit_map_[vec_idx];
869 DCHECK_NE((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
870 *vec &= ~(1 << vec_off);
871 DCHECK_EQ((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
872 if (kTraceRosAlloc) {
873 LOG(INFO) << "RosAlloc::Run::FreeSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
874 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
875 }
876}
877
878inline bool RosAlloc::Run::MergeThreadLocalFreeBitMapToAllocBitMap(bool* is_all_free_after_out) {
879 DCHECK_NE(is_thread_local_, 0);
880 // Free slots in the alloc bit map based on the thread local free bit map.
881 byte idx = size_bracket_idx_;
882 size_t num_slots = numOfSlots[idx];
883 size_t num_vec = RoundUp(num_slots, 32) / 32;
884 bool changed = false;
885 uint32_t* vecp = &alloc_bit_map_[0];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800886 uint32_t* tl_free_vecp = &ThreadLocalFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700887 bool is_all_free_after = true;
888 for (size_t v = 0; v < num_vec; v++, vecp++, tl_free_vecp++) {
889 uint32_t tl_free_vec = *tl_free_vecp;
890 uint32_t vec_before = *vecp;
891 uint32_t vec_after;
892 if (tl_free_vec != 0) {
893 vec_after = vec_before & ~tl_free_vec;
894 *vecp = vec_after;
895 changed = true;
896 *tl_free_vecp = 0; // clear the thread local free bit map.
897 } else {
898 vec_after = vec_before;
899 }
900 if (vec_after != 0) {
901 is_all_free_after = false;
902 }
903 DCHECK_EQ(*tl_free_vecp, static_cast<uint32_t>(0));
904 }
905 *is_all_free_after_out = is_all_free_after;
906 // Return true if there was at least a bit set in the thread-local
907 // free bit map and at least a bit in the alloc bit map changed.
908 return changed;
909}
910
911inline void RosAlloc::Run::MergeBulkFreeBitMapIntoAllocBitMap() {
912 DCHECK_EQ(is_thread_local_, 0);
913 // Free slots in the alloc bit map based on the bulk free bit map.
914 byte idx = size_bracket_idx_;
915 size_t num_slots = numOfSlots[idx];
916 size_t num_vec = RoundUp(num_slots, 32) / 32;
917 uint32_t* vecp = &alloc_bit_map_[0];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800918 uint32_t* free_vecp = &BulkFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700919 for (size_t v = 0; v < num_vec; v++, vecp++, free_vecp++) {
920 uint32_t free_vec = *free_vecp;
921 if (free_vec != 0) {
922 *vecp &= ~free_vec;
923 *free_vecp = 0; // clear the bulk free bit map.
924 }
925 DCHECK_EQ(*free_vecp, static_cast<uint32_t>(0));
926 }
927}
928
929inline void RosAlloc::Run::UnionBulkFreeBitMapToThreadLocalFreeBitMap() {
930 DCHECK_NE(is_thread_local_, 0);
931 // Union the thread local bit map with the bulk free bit map.
932 byte idx = size_bracket_idx_;
933 size_t num_slots = numOfSlots[idx];
934 size_t num_vec = RoundUp(num_slots, 32) / 32;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800935 uint32_t* to_vecp = &ThreadLocalFreeBitMap()[0];
936 uint32_t* from_vecp = &BulkFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700937 for (size_t v = 0; v < num_vec; v++, to_vecp++, from_vecp++) {
938 uint32_t from_vec = *from_vecp;
939 if (from_vec != 0) {
940 *to_vecp |= from_vec;
Hiroshi Yamauchi70f60042014-02-03 12:31:29 -0800941 *from_vecp = 0; // clear the bulk free bit map.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700942 }
943 DCHECK_EQ(*from_vecp, static_cast<uint32_t>(0));
944 }
945}
946
947inline void RosAlloc::Run::MarkThreadLocalFreeBitMap(void* ptr) {
948 DCHECK_NE(is_thread_local_, 0);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800949 MarkFreeBitMapShared(ptr, ThreadLocalFreeBitMap(), "MarkThreadLocalFreeBitMap");
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700950}
951
952inline void RosAlloc::Run::MarkBulkFreeBitMap(void* ptr) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800953 MarkFreeBitMapShared(ptr, BulkFreeBitMap(), "MarkFreeBitMap");
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700954}
955
956inline void RosAlloc::Run::MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base,
957 const char* caller_name) {
958 byte idx = size_bracket_idx_;
959 size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
960 - (reinterpret_cast<byte*>(this) + headerSizes[idx]);
961 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
962 size_t slot_idx = offset_from_slot_base / bracketSizes[idx];
963 DCHECK(slot_idx < numOfSlots[idx]);
964 size_t vec_idx = slot_idx / 32;
965 if (kIsDebugBuild) {
966 size_t num_vec = RoundUp(numOfSlots[idx], 32) / 32;
967 DCHECK(vec_idx < num_vec);
968 }
969 size_t vec_off = slot_idx % 32;
970 uint32_t* vec = &free_bit_map_base[vec_idx];
971 DCHECK_EQ((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
972 *vec |= 1 << vec_off;
973 DCHECK_NE((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
974 if (kTraceRosAlloc) {
975 LOG(INFO) << "RosAlloc::Run::" << caller_name << "() : 0x" << std::hex
976 << reinterpret_cast<intptr_t>(ptr)
977 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
978 }
979}
980
981inline bool RosAlloc::Run::IsAllFree() {
982 byte idx = size_bracket_idx_;
983 size_t num_slots = numOfSlots[idx];
984 size_t num_vec = RoundUp(num_slots, 32) / 32;
985 for (size_t v = 0; v < num_vec; v++) {
986 uint32_t vec = alloc_bit_map_[v];
987 if (vec != 0) {
988 return false;
989 }
990 }
991 return true;
992}
993
994inline bool RosAlloc::Run::IsFull() {
995 byte idx = size_bracket_idx_;
996 size_t num_slots = numOfSlots[idx];
997 size_t num_vec = RoundUp(num_slots, 32) / 32;
998 size_t slots = 0;
999 for (size_t v = 0; v < num_vec; v++, slots += 32) {
1000 DCHECK(num_slots >= slots);
1001 uint32_t vec = alloc_bit_map_[v];
1002 uint32_t mask = (num_slots - slots >= 32) ? static_cast<uint32_t>(-1)
1003 : (1 << (num_slots - slots)) - 1;
1004 DCHECK(num_slots - slots >= 32 ? mask == static_cast<uint32_t>(-1) : true);
1005 if (vec != mask) {
1006 return false;
1007 }
1008 }
1009 return true;
1010}
1011
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001012inline bool RosAlloc::Run::IsBulkFreeBitmapClean() {
1013 byte idx = size_bracket_idx_;
1014 size_t num_slots = numOfSlots[idx];
1015 size_t num_vec = RoundUp(num_slots, 32) / 32;
1016 for (size_t v = 0; v < num_vec; v++) {
1017 uint32_t vec = BulkFreeBitMap()[v];
1018 if (vec != 0) {
1019 return false;
1020 }
1021 }
1022 return true;
1023}
1024
1025inline bool RosAlloc::Run::IsThreadLocalFreeBitmapClean() {
1026 byte idx = size_bracket_idx_;
1027 size_t num_slots = numOfSlots[idx];
1028 size_t num_vec = RoundUp(num_slots, 32) / 32;
1029 for (size_t v = 0; v < num_vec; v++) {
1030 uint32_t vec = ThreadLocalFreeBitMap()[v];
1031 if (vec != 0) {
1032 return false;
1033 }
1034 }
1035 return true;
1036}
1037
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001038inline void RosAlloc::Run::ClearBitMaps() {
1039 byte idx = size_bracket_idx_;
1040 size_t num_slots = numOfSlots[idx];
1041 size_t num_vec = RoundUp(num_slots, 32) / 32;
1042 memset(alloc_bit_map_, 0, sizeof(uint32_t) * num_vec * 3);
1043}
1044
1045void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1046 void* arg) {
1047 size_t idx = size_bracket_idx_;
1048 byte* slot_base = reinterpret_cast<byte*>(this) + headerSizes[idx];
1049 size_t num_slots = numOfSlots[idx];
1050 size_t bracket_size = IndexToBracketSize(idx);
1051 DCHECK_EQ(slot_base + num_slots * bracket_size, reinterpret_cast<byte*>(this) + numOfPages[idx] * kPageSize);
1052 size_t num_vec = RoundUp(num_slots, 32) / 32;
1053 size_t slots = 0;
1054 for (size_t v = 0; v < num_vec; v++, slots += 32) {
1055 DCHECK(num_slots >= slots);
1056 uint32_t vec = alloc_bit_map_[v];
1057 size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
1058 for (size_t i = 0; i < end; ++i) {
1059 bool is_allocated = ((vec >> i) & 0x1) != 0;
1060 byte* slot_addr = slot_base + (slots + i) * bracket_size;
1061 if (is_allocated) {
1062 handler(slot_addr, slot_addr + bracket_size, bracket_size, arg);
1063 } else {
1064 handler(slot_addr, slot_addr + bracket_size, 0, arg);
1065 }
1066 }
1067 }
1068}
1069
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001070// If true, read the page map entries in BulkFree() without using the
1071// lock for better performance, assuming that the existence of an
1072// allocated chunk/pointer being freed in BulkFree() guarantees that
1073// the page map entry won't change. Disabled for now.
1074static constexpr bool kReadPageMapEntryWithoutLockInBulkFree = false;
1075
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001076void RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
1077 if (false) {
1078 // Used only to test Free() as GC uses only BulkFree().
1079 for (size_t i = 0; i < num_ptrs; ++i) {
1080 FreeInternal(self, ptrs[i]);
1081 }
1082 return;
1083 }
1084
1085 WriterMutexLock wmu(self, bulk_free_lock_);
1086
1087 // First mark slots to free in the bulk free bit map without locking the
1088 // size bracket locks. On host, hash_set is faster than vector + flag.
1089#ifdef HAVE_ANDROID_OS
1090 std::vector<Run*> runs;
1091#else
1092 hash_set<Run*, hash_run, eq_run> runs;
1093#endif
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001094 for (size_t i = 0; i < num_ptrs; i++) {
1095 void* ptr = ptrs[i];
1096 ptrs[i] = NULL;
1097 DCHECK(base_ <= ptr && ptr < base_ + footprint_);
1098 size_t pm_idx = RoundDownToPageMapIndex(ptr);
1099 Run* run = NULL;
1100 if (kReadPageMapEntryWithoutLockInBulkFree) {
1101 // Read the page map entries without locking the lock.
1102 byte page_map_entry = page_map_[pm_idx];
1103 if (kTraceRosAlloc) {
1104 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1105 << std::dec << pm_idx
1106 << ", page_map_entry=" << static_cast<int>(page_map_entry);
1107 }
1108 if (LIKELY(page_map_entry == kPageMapRun)) {
1109 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1110 DCHECK(run->magic_num_ == kMagicNum);
1111 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1112 size_t pi = pm_idx;
1113 DCHECK(page_map_[pi] == kPageMapRun || page_map_[pi] == kPageMapRunPart);
1114 // Find the beginning of the run.
1115 while (page_map_[pi] != kPageMapRun) {
1116 pi--;
1117 DCHECK(pi < capacity_ / kPageSize);
1118 }
1119 DCHECK(page_map_[pi] == kPageMapRun);
1120 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
1121 DCHECK(run->magic_num_ == kMagicNum);
1122 } else if (page_map_entry == kPageMapLargeObject) {
1123 MutexLock mu(self, lock_);
1124 FreePages(self, ptr);
1125 continue;
1126 } else {
1127 LOG(FATAL) << "Unreachable - page map type: " << page_map_entry;
1128 }
1129 DCHECK(run != NULL);
1130 // Set the bit in the bulk free bit map.
1131 run->MarkBulkFreeBitMap(ptr);
1132#ifdef HAVE_ANDROID_OS
1133 if (!run->to_be_bulk_freed_) {
1134 run->to_be_bulk_freed_ = true;
1135 runs.push_back(run);
1136 }
1137#else
1138 runs.insert(run);
1139#endif
1140 } else {
1141 // Read the page map entries with a lock.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001142 bool free_from_run = false;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001143 {
1144 MutexLock mu(self, lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001145 DCHECK(pm_idx < page_map_size_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001146 byte page_map_entry = page_map_[pm_idx];
1147 if (kTraceRosAlloc) {
1148 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1149 << std::dec << pm_idx
1150 << ", page_map_entry=" << static_cast<int>(page_map_entry);
1151 }
1152 if (LIKELY(page_map_entry == kPageMapRun)) {
1153 free_from_run = true;
1154 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1155 DCHECK(run->magic_num_ == kMagicNum);
1156 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1157 free_from_run = true;
1158 size_t pi = pm_idx;
1159 DCHECK(page_map_[pi] == kPageMapRun || page_map_[pi] == kPageMapRunPart);
1160 // Find the beginning of the run.
1161 while (page_map_[pi] != kPageMapRun) {
1162 pi--;
1163 DCHECK(pi < capacity_ / kPageSize);
1164 }
1165 DCHECK(page_map_[pi] == kPageMapRun);
1166 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
1167 DCHECK(run->magic_num_ == kMagicNum);
1168 } else if (page_map_entry == kPageMapLargeObject) {
1169 FreePages(self, ptr);
1170 } else {
1171 LOG(FATAL) << "Unreachable - page map type: " << page_map_entry;
1172 }
1173 }
1174 if (LIKELY(free_from_run)) {
1175 DCHECK(run != NULL);
1176 // Set the bit in the bulk free bit map.
1177 run->MarkBulkFreeBitMap(ptr);
1178#ifdef HAVE_ANDROID_OS
1179 if (!run->to_be_bulk_freed_) {
1180 run->to_be_bulk_freed_ = true;
1181 runs.push_back(run);
1182 }
1183#else
1184 runs.insert(run);
1185#endif
1186 }
1187 }
1188 }
1189
1190 // Now, iterate over the affected runs and update the alloc bit map
1191 // based on the bulk free bit map (for non-thread-local runs) and
1192 // union the bulk free bit map into the thread-local free bit map
1193 // (for thread-local runs.)
1194#ifdef HAVE_ANDROID_OS
1195 typedef std::vector<Run*>::iterator It;
1196#else
1197 typedef hash_set<Run*, hash_run, eq_run>::iterator It;
1198#endif
1199 for (It it = runs.begin(); it != runs.end(); ++it) {
1200 Run* run = *it;
1201#ifdef HAVE_ANDROID_OS
1202 DCHECK(run->to_be_bulk_freed_);
1203 run->to_be_bulk_freed_ = false;
1204#endif
1205 size_t idx = run->size_bracket_idx_;
1206 MutexLock mu(self, *size_bracket_locks_[idx]);
1207 if (run->is_thread_local_ != 0) {
1208 DCHECK_LE(run->size_bracket_idx_, kMaxThreadLocalSizeBracketIdx);
1209 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
1210 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
1211 run->UnionBulkFreeBitMapToThreadLocalFreeBitMap();
1212 if (kTraceRosAlloc) {
1213 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a thread local run 0x"
1214 << std::hex << reinterpret_cast<intptr_t>(run);
1215 }
1216 DCHECK_NE(run->is_thread_local_, 0);
1217 // A thread local run will be kept as a thread local even if
1218 // it's become all free.
1219 } else {
1220 bool run_was_full = run->IsFull();
1221 run->MergeBulkFreeBitMapIntoAllocBitMap();
1222 if (kTraceRosAlloc) {
1223 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a run 0x" << std::hex
1224 << reinterpret_cast<intptr_t>(run);
1225 }
1226 // Check if the run should be moved to non_full_runs_ or
1227 // free_page_runs_.
1228 std::set<Run*>* non_full_runs = &non_full_runs_[idx];
1229 hash_set<Run*, hash_run, eq_run>* full_runs =
1230 kIsDebugBuild ? &full_runs_[idx] : NULL;
1231 if (run->IsAllFree()) {
1232 // It has just become completely free. Free the pages of the
1233 // run.
1234 bool run_was_current = run == current_runs_[idx];
1235 if (run_was_current) {
1236 DCHECK(full_runs->find(run) == full_runs->end());
1237 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1238 // If it was a current run, reuse it.
1239 } else if (run_was_full) {
1240 // If it was full, remove it from the full run set (debug
1241 // only.)
1242 if (kIsDebugBuild) {
1243 hash_set<Run*, hash_run, eq_run>::iterator pos = full_runs->find(run);
1244 DCHECK(pos != full_runs->end());
1245 full_runs->erase(pos);
1246 if (kTraceRosAlloc) {
1247 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1248 << reinterpret_cast<intptr_t>(run)
1249 << " from full_runs_";
1250 }
1251 DCHECK(full_runs->find(run) == full_runs->end());
1252 }
1253 } else {
1254 // If it was in a non full run set, remove it from the set.
1255 DCHECK(full_runs->find(run) == full_runs->end());
1256 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1257 non_full_runs->erase(run);
1258 if (kTraceRosAlloc) {
1259 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1260 << reinterpret_cast<intptr_t>(run)
1261 << " from non_full_runs_";
1262 }
1263 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1264 }
1265 if (!run_was_current) {
1266 MutexLock mu(self, lock_);
1267 FreePages(self, run);
1268 }
1269 } else {
1270 // It is not completely free. If it wasn't the current run or
1271 // already in the non-full run set (i.e., it was full) insert
1272 // it into the non-full run set.
1273 if (run == current_runs_[idx]) {
1274 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1275 DCHECK(full_runs->find(run) == full_runs->end());
1276 // If it was a current run, keep it.
1277 } else if (run_was_full) {
1278 // If it was full, remove it from the full run set (debug
1279 // only) and insert into the non-full run set.
1280 DCHECK(full_runs->find(run) != full_runs->end());
1281 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1282 if (kIsDebugBuild) {
1283 full_runs->erase(run);
1284 if (kTraceRosAlloc) {
1285 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1286 << reinterpret_cast<intptr_t>(run)
1287 << " from full_runs_";
1288 }
1289 }
1290 non_full_runs->insert(run);
1291 if (kTraceRosAlloc) {
1292 LOG(INFO) << "RosAlloc::BulkFree() : Inserted run 0x" << std::hex
1293 << reinterpret_cast<intptr_t>(run)
1294 << " into non_full_runs_[" << std::dec << idx;
1295 }
1296 } else {
1297 // If it was not full, so leave it in the non full run set.
1298 DCHECK(full_runs->find(run) == full_runs->end());
1299 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1300 }
1301 }
1302 }
1303 }
1304}
1305
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001306std::string RosAlloc::DumpPageMap() {
1307 std::ostringstream stream;
1308 stream << "RosAlloc PageMap: " << std::endl;
1309 lock_.AssertHeld(Thread::Current());
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001310 size_t end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001311 FreePageRun* curr_fpr = NULL;
1312 size_t curr_fpr_size = 0;
1313 size_t remaining_curr_fpr_size = 0;
1314 size_t num_running_empty_pages = 0;
1315 for (size_t i = 0; i < end; ++i) {
1316 byte pm = page_map_[i];
1317 switch (pm) {
1318 case kPageMapEmpty: {
1319 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1320 if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
1321 // Encountered a fresh free page run.
1322 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1323 DCHECK(fpr->IsFree());
1324 DCHECK(curr_fpr == NULL);
1325 DCHECK_EQ(curr_fpr_size, static_cast<size_t>(0));
1326 curr_fpr = fpr;
1327 curr_fpr_size = fpr->ByteSize(this);
1328 DCHECK_EQ(curr_fpr_size % kPageSize, static_cast<size_t>(0));
1329 remaining_curr_fpr_size = curr_fpr_size - kPageSize;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001330 stream << "[" << i << "]=Empty (FPR start)"
1331 << " fpr_size=" << curr_fpr_size
1332 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001333 if (remaining_curr_fpr_size == 0) {
1334 // Reset at the end of the current free page run.
1335 curr_fpr = NULL;
1336 curr_fpr_size = 0;
1337 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001338 stream << "curr_fpr=0x" << std::hex << reinterpret_cast<intptr_t>(curr_fpr) << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001339 DCHECK_EQ(num_running_empty_pages, static_cast<size_t>(0));
1340 } else {
1341 // Still part of the current free page run.
1342 DCHECK_NE(num_running_empty_pages, static_cast<size_t>(0));
1343 DCHECK(curr_fpr != NULL && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
1344 DCHECK_EQ(remaining_curr_fpr_size % kPageSize, static_cast<size_t>(0));
1345 DCHECK_GE(remaining_curr_fpr_size, static_cast<size_t>(kPageSize));
1346 remaining_curr_fpr_size -= kPageSize;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001347 stream << "[" << i << "]=Empty (FPR part)"
1348 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001349 if (remaining_curr_fpr_size == 0) {
1350 // Reset at the end of the current free page run.
1351 curr_fpr = NULL;
1352 curr_fpr_size = 0;
1353 }
1354 }
1355 num_running_empty_pages++;
1356 break;
1357 }
1358 case kPageMapLargeObject: {
1359 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1360 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001361 stream << "[" << i << "]=Large (start)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001362 break;
1363 }
1364 case kPageMapLargeObjectPart:
1365 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1366 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001367 stream << "[" << i << "]=Large (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001368 break;
1369 case kPageMapRun: {
1370 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1371 num_running_empty_pages = 0;
1372 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1373 size_t idx = run->size_bracket_idx_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001374 stream << "[" << i << "]=Run (start)"
1375 << " idx=" << idx
1376 << " numOfPages=" << numOfPages[idx]
1377 << " thread_local=" << static_cast<int>(run->is_thread_local_)
1378 << " is_all_free=" << (run->IsAllFree() ? 1 : 0)
1379 << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001380 break;
1381 }
1382 case kPageMapRunPart:
1383 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1384 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001385 stream << "[" << i << "]=Run (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001386 break;
1387 default:
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001388 stream << "[" << i << "]=Unrecognizable page map type: " << pm;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001389 break;
1390 }
1391 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001392 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001393}
1394
1395size_t RosAlloc::UsableSize(void* ptr) {
1396 DCHECK(base_ <= ptr && ptr < base_ + footprint_);
1397 size_t pm_idx = RoundDownToPageMapIndex(ptr);
1398 MutexLock mu(Thread::Current(), lock_);
1399 switch (page_map_[pm_idx]) {
1400 case kPageMapEmpty:
1401 LOG(FATAL) << "Unreachable - RosAlloc::UsableSize(): pm_idx=" << pm_idx << ", ptr=" << std::hex
1402 << reinterpret_cast<intptr_t>(ptr);
1403 break;
1404 case kPageMapLargeObject: {
1405 size_t num_pages = 1;
1406 size_t idx = pm_idx + 1;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001407 size_t end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001408 while (idx < end && page_map_[idx] == kPageMapLargeObjectPart) {
1409 num_pages++;
1410 idx++;
1411 }
1412 return num_pages * kPageSize;
1413 }
1414 case kPageMapLargeObjectPart:
1415 LOG(FATAL) << "Unreachable - RosAlloc::UsableSize(): pm_idx=" << pm_idx << ", ptr=" << std::hex
1416 << reinterpret_cast<intptr_t>(ptr);
1417 break;
1418 case kPageMapRun:
1419 case kPageMapRunPart: {
1420 // Find the beginning of the run.
1421 while (page_map_[pm_idx] != kPageMapRun) {
1422 pm_idx--;
1423 DCHECK(pm_idx < capacity_ / kPageSize);
1424 }
1425 DCHECK(page_map_[pm_idx] == kPageMapRun);
1426 Run* run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1427 DCHECK(run->magic_num_ == kMagicNum);
1428 size_t idx = run->size_bracket_idx_;
1429 size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
1430 - (reinterpret_cast<byte*>(run) + headerSizes[idx]);
1431 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
1432 return IndexToBracketSize(idx);
1433 }
1434 default:
1435 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
1436 break;
1437 }
1438 return 0;
1439}
1440
1441bool RosAlloc::Trim() {
1442 MutexLock mu(Thread::Current(), lock_);
1443 FreePageRun* last_free_page_run;
1444 DCHECK_EQ(footprint_ % kPageSize, static_cast<size_t>(0));
1445 auto it = free_page_runs_.rbegin();
1446 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
1447 // Remove the last free page run, if any.
1448 DCHECK(last_free_page_run->IsFree());
1449 DCHECK(page_map_[ToPageMapIndex(last_free_page_run)] == kPageMapEmpty);
1450 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
1451 DCHECK_EQ(last_free_page_run->End(this), base_ + footprint_);
1452 free_page_runs_.erase(last_free_page_run);
1453 size_t decrement = last_free_page_run->ByteSize(this);
1454 size_t new_footprint = footprint_ - decrement;
1455 DCHECK_EQ(new_footprint % kPageSize, static_cast<size_t>(0));
1456 size_t new_num_of_pages = new_footprint / kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001457 DCHECK_GE(page_map_size_, new_num_of_pages);
1458 // Zero out the tail of the page map.
1459 byte* zero_begin = page_map_ + new_num_of_pages;
1460 byte* madvise_begin = AlignUp(zero_begin, kPageSize);
1461 DCHECK_LE(madvise_begin, page_map_mem_map_->End());
1462 size_t madvise_size = page_map_mem_map_->End() - madvise_begin;
1463 if (madvise_size > 0) {
1464 DCHECK_ALIGNED(madvise_begin, kPageSize);
1465 DCHECK_EQ(RoundUp(madvise_size, kPageSize), madvise_size);
1466 CHECK_EQ(madvise(madvise_begin, madvise_size, MADV_DONTNEED), 0);
1467 }
1468 if (madvise_begin - zero_begin) {
1469 memset(zero_begin, 0, madvise_begin - zero_begin);
1470 }
1471 page_map_size_ = new_num_of_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001472 free_page_run_size_map_.resize(new_num_of_pages);
1473 DCHECK_EQ(free_page_run_size_map_.size(), new_num_of_pages);
1474 art_heap_rosalloc_morecore(this, -(static_cast<intptr_t>(decrement)));
1475 if (kTraceRosAlloc) {
1476 LOG(INFO) << "RosAlloc::Trim() : decreased the footprint from "
1477 << footprint_ << " to " << new_footprint;
1478 }
1479 DCHECK_LT(new_footprint, footprint_);
1480 DCHECK_LT(new_footprint, capacity_);
1481 footprint_ = new_footprint;
1482 return true;
1483 }
1484 return false;
1485}
1486
1487void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1488 void* arg) {
1489 // Note: no need to use this to release pages as we already do so in FreePages().
1490 if (handler == NULL) {
1491 return;
1492 }
1493 MutexLock mu(Thread::Current(), lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001494 size_t pm_end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001495 size_t i = 0;
1496 while (i < pm_end) {
1497 byte pm = page_map_[i];
1498 switch (pm) {
1499 case kPageMapEmpty: {
1500 // The start of a free page run.
1501 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1502 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
1503 size_t fpr_size = fpr->ByteSize(this);
1504 DCHECK(IsAligned<kPageSize>(fpr_size));
1505 void* start = fpr;
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -08001506 if (kIsDebugBuild) {
1507 // In the debug build, the first page of a free page run
1508 // contains a magic number for debugging. Exclude it.
1509 start = reinterpret_cast<byte*>(fpr) + kPageSize;
1510 }
1511 void* end = reinterpret_cast<byte*>(fpr) + fpr_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001512 handler(start, end, 0, arg);
1513 size_t num_pages = fpr_size / kPageSize;
1514 if (kIsDebugBuild) {
1515 for (size_t j = i + 1; j < i + num_pages; ++j) {
1516 DCHECK_EQ(page_map_[j], kPageMapEmpty);
1517 }
1518 }
1519 i += fpr_size / kPageSize;
1520 DCHECK_LE(i, pm_end);
1521 break;
1522 }
1523 case kPageMapLargeObject: {
1524 // The start of a large object.
1525 size_t num_pages = 1;
1526 size_t idx = i + 1;
1527 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1528 num_pages++;
1529 idx++;
1530 }
1531 void* start = base_ + i * kPageSize;
1532 void* end = base_ + (i + num_pages) * kPageSize;
1533 size_t used_bytes = num_pages * kPageSize;
1534 handler(start, end, used_bytes, arg);
1535 if (kIsDebugBuild) {
1536 for (size_t j = i + 1; j < i + num_pages; ++j) {
1537 DCHECK_EQ(page_map_[j], kPageMapLargeObjectPart);
1538 }
1539 }
1540 i += num_pages;
1541 DCHECK_LE(i, pm_end);
1542 break;
1543 }
1544 case kPageMapLargeObjectPart:
1545 LOG(FATAL) << "Unreachable - page map type: " << pm;
1546 break;
1547 case kPageMapRun: {
1548 // The start of a run.
1549 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1550 DCHECK(run->magic_num_ == kMagicNum);
1551 run->InspectAllSlots(handler, arg);
1552 size_t num_pages = numOfPages[run->size_bracket_idx_];
1553 if (kIsDebugBuild) {
1554 for (size_t j = i + 1; j < i + num_pages; ++j) {
1555 DCHECK_EQ(page_map_[j], kPageMapRunPart);
1556 }
1557 }
1558 i += num_pages;
1559 DCHECK_LE(i, pm_end);
1560 break;
1561 }
1562 case kPageMapRunPart:
1563 LOG(FATAL) << "Unreachable - page map type: " << pm;
1564 break;
1565 default:
1566 LOG(FATAL) << "Unreachable - page map type: " << pm;
1567 break;
1568 }
1569 }
1570}
1571
1572size_t RosAlloc::Footprint() {
1573 MutexLock mu(Thread::Current(), lock_);
1574 return footprint_;
1575}
1576
1577size_t RosAlloc::FootprintLimit() {
1578 MutexLock mu(Thread::Current(), lock_);
1579 return capacity_;
1580}
1581
1582void RosAlloc::SetFootprintLimit(size_t new_capacity) {
1583 MutexLock mu(Thread::Current(), lock_);
1584 DCHECK_EQ(RoundUp(new_capacity, kPageSize), new_capacity);
1585 // Only growing is supported here. But Trim() is supported.
1586 if (capacity_ < new_capacity) {
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001587 CHECK_LE(new_capacity, max_capacity_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001588 capacity_ = new_capacity;
1589 VLOG(heap) << "new capacity=" << capacity_;
1590 }
1591}
1592
1593void RosAlloc::RevokeThreadLocalRuns(Thread* thread) {
1594 Thread* self = Thread::Current();
Hiroshi Yamauchi70f60042014-02-03 12:31:29 -08001595 // Avoid race conditions on the bulk free bit maps with BulkFree() (GC).
1596 WriterMutexLock wmu(self, bulk_free_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001597 for (size_t idx = 0; idx < kNumOfSizeBrackets; idx++) {
1598 MutexLock mu(self, *size_bracket_locks_[idx]);
1599 Run* thread_local_run = reinterpret_cast<Run*>(thread->rosalloc_runs_[idx]);
1600 if (thread_local_run != NULL) {
1601 DCHECK_EQ(thread_local_run->magic_num_, kMagicNum);
1602 DCHECK_NE(thread_local_run->is_thread_local_, 0);
1603 thread->rosalloc_runs_[idx] = NULL;
1604 // Note the thread local run may not be full here.
1605 bool dont_care;
1606 thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&dont_care);
1607 thread_local_run->is_thread_local_ = 0;
1608 thread_local_run->MergeBulkFreeBitMapIntoAllocBitMap();
1609 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
1610 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
1611 if (thread_local_run->IsFull()) {
1612 if (kIsDebugBuild) {
1613 full_runs_[idx].insert(thread_local_run);
1614 DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
1615 if (kTraceRosAlloc) {
1616 LOG(INFO) << "RosAlloc::RevokeThreadLocalRuns() : Inserted run 0x" << std::hex
1617 << reinterpret_cast<intptr_t>(thread_local_run)
1618 << " into full_runs_[" << std::dec << idx << "]";
1619 }
1620 }
1621 } else if (thread_local_run->IsAllFree()) {
1622 MutexLock mu(self, lock_);
1623 FreePages(self, thread_local_run);
1624 } else {
1625 non_full_runs_[idx].insert(thread_local_run);
1626 DCHECK(non_full_runs_[idx].find(thread_local_run) != non_full_runs_[idx].end());
1627 if (kTraceRosAlloc) {
1628 LOG(INFO) << "RosAlloc::RevokeThreadLocalRuns() : Inserted run 0x" << std::hex
1629 << reinterpret_cast<intptr_t>(thread_local_run)
1630 << " into non_full_runs_[" << std::dec << idx << "]";
1631 }
1632 }
1633 }
1634 }
1635}
1636
1637void RosAlloc::RevokeAllThreadLocalRuns() {
1638 // This is called when a mutator thread won't allocate such as at
1639 // the Zygote creation time or during the GC pause.
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001640 MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
1641 MutexLock mu2(Thread::Current(), *Locks::thread_list_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001642 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1643 for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
1644 Thread* t = *it;
1645 RevokeThreadLocalRuns(t);
1646 }
1647}
1648
1649void RosAlloc::Initialize() {
1650 // Check the consistency of the number of size brackets.
1651 DCHECK_EQ(Thread::kRosAllocNumOfSizeBrackets, kNumOfSizeBrackets);
1652 // bracketSizes.
1653 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1654 if (i < kNumOfSizeBrackets - 2) {
1655 bracketSizes[i] = 16 * (i + 1);
1656 } else if (i == kNumOfSizeBrackets - 2) {
1657 bracketSizes[i] = 1 * KB;
1658 } else {
1659 DCHECK(i == kNumOfSizeBrackets - 1);
1660 bracketSizes[i] = 2 * KB;
1661 }
1662 if (kTraceRosAlloc) {
1663 LOG(INFO) << "bracketSizes[" << i << "]=" << bracketSizes[i];
1664 }
1665 }
1666 // numOfPages.
1667 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1668 if (i < 4) {
1669 numOfPages[i] = 1;
1670 } else if (i < 8) {
1671 numOfPages[i] = 2;
1672 } else if (i < 16) {
1673 numOfPages[i] = 4;
1674 } else if (i < 32) {
1675 numOfPages[i] = 8;
1676 } else if (i == 32) {
1677 DCHECK(i = kNumOfSizeBrackets - 2);
1678 numOfPages[i] = 16;
1679 } else {
1680 DCHECK(i = kNumOfSizeBrackets - 1);
1681 numOfPages[i] = 32;
1682 }
1683 if (kTraceRosAlloc) {
1684 LOG(INFO) << "numOfPages[" << i << "]=" << numOfPages[i];
1685 }
1686 }
1687 // Compute numOfSlots and slotOffsets.
1688 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1689 size_t bracket_size = bracketSizes[i];
1690 size_t run_size = kPageSize * numOfPages[i];
1691 size_t max_num_of_slots = run_size / bracket_size;
1692 // Compute the actual number of slots by taking the header and
1693 // alignment into account.
1694 size_t fixed_header_size = RoundUp(Run::fixed_header_size(), sizeof(uint32_t));
1695 DCHECK_EQ(fixed_header_size, static_cast<size_t>(8));
1696 size_t header_size = 0;
1697 size_t bulk_free_bit_map_offset = 0;
1698 size_t thread_local_free_bit_map_offset = 0;
1699 size_t num_of_slots = 0;
1700 // Search for the maximum number of slots that allows enough space
1701 // for the header (including the bit maps.)
1702 for (int s = max_num_of_slots; s >= 0; s--) {
1703 size_t tmp_slots_size = bracket_size * s;
1704 size_t tmp_bit_map_size = RoundUp(s, sizeof(uint32_t) * kBitsPerByte) / kBitsPerByte;
1705 size_t tmp_bulk_free_bit_map_size = tmp_bit_map_size;
1706 size_t tmp_bulk_free_bit_map_off = fixed_header_size + tmp_bit_map_size;
1707 size_t tmp_thread_local_free_bit_map_size = tmp_bit_map_size;
1708 size_t tmp_thread_local_free_bit_map_off = tmp_bulk_free_bit_map_off + tmp_bulk_free_bit_map_size;
1709 size_t tmp_unaligned_header_size = tmp_thread_local_free_bit_map_off + tmp_thread_local_free_bit_map_size;
1710 // Align up the unaligned header size. bracket_size may not be a power of two.
1711 size_t tmp_header_size = (tmp_unaligned_header_size % bracket_size == 0) ?
1712 tmp_unaligned_header_size :
1713 tmp_unaligned_header_size + (bracket_size - tmp_unaligned_header_size % bracket_size);
1714 DCHECK_EQ(tmp_header_size % bracket_size, static_cast<size_t>(0));
1715 DCHECK_EQ(tmp_header_size % 8, static_cast<size_t>(0));
1716 if (tmp_slots_size + tmp_header_size <= run_size) {
1717 // Found the right number of slots, that is, there was enough
1718 // space for the header (including the bit maps.)
1719 num_of_slots = s;
1720 header_size = tmp_header_size;
1721 bulk_free_bit_map_offset = tmp_bulk_free_bit_map_off;
1722 thread_local_free_bit_map_offset = tmp_thread_local_free_bit_map_off;
1723 break;
1724 }
1725 }
1726 DCHECK(num_of_slots > 0 && header_size > 0 && bulk_free_bit_map_offset > 0);
1727 // Add the padding for the alignment remainder.
1728 header_size += run_size % bracket_size;
1729 DCHECK(header_size + num_of_slots * bracket_size == run_size);
1730 numOfSlots[i] = num_of_slots;
1731 headerSizes[i] = header_size;
1732 bulkFreeBitMapOffsets[i] = bulk_free_bit_map_offset;
1733 threadLocalFreeBitMapOffsets[i] = thread_local_free_bit_map_offset;
1734 if (kTraceRosAlloc) {
1735 LOG(INFO) << "numOfSlots[" << i << "]=" << numOfSlots[i]
1736 << ", headerSizes[" << i << "]=" << headerSizes[i]
1737 << ", bulkFreeBitMapOffsets[" << i << "]=" << bulkFreeBitMapOffsets[i]
1738 << ", threadLocalFreeBitMapOffsets[" << i << "]=" << threadLocalFreeBitMapOffsets[i];;
1739 }
1740 }
1741}
1742
1743void RosAlloc::BytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
1744 if (used_bytes == 0) {
1745 return;
1746 }
1747 size_t* bytes_allocated = reinterpret_cast<size_t*>(arg);
1748 *bytes_allocated += used_bytes;
1749}
1750
1751void RosAlloc::ObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
1752 if (used_bytes == 0) {
1753 return;
1754 }
1755 size_t* objects_allocated = reinterpret_cast<size_t*>(arg);
1756 ++(*objects_allocated);
1757}
1758
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001759void RosAlloc::Verify() {
1760 Thread* self = Thread::Current();
1761 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self))
1762 << "The mutator locks isn't exclusively locked at RosAlloc::Verify()";
1763 MutexLock mu(self, *Locks::thread_list_lock_);
1764 WriterMutexLock wmu(self, bulk_free_lock_);
1765 std::vector<Run*> runs;
1766 {
1767 MutexLock mu(self, lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001768 size_t pm_end = page_map_size_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001769 size_t i = 0;
1770 while (i < pm_end) {
1771 byte pm = page_map_[i];
1772 switch (pm) {
1773 case kPageMapEmpty: {
1774 // The start of a free page run.
1775 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1776 DCHECK(fpr->magic_num_ == kMagicNumFree) << "Bad magic number : " << fpr->magic_num_;
1777 CHECK(free_page_runs_.find(fpr) != free_page_runs_.end())
1778 << "An empty page must belong to the free page run set";
1779 size_t fpr_size = fpr->ByteSize(this);
1780 CHECK(IsAligned<kPageSize>(fpr_size))
1781 << "A free page run size isn't page-aligned : " << fpr_size;
1782 size_t num_pages = fpr_size / kPageSize;
1783 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1784 << "A free page run size must be > 0 : " << fpr_size;
1785 for (size_t j = i + 1; j < i + num_pages; ++j) {
1786 CHECK_EQ(page_map_[j], kPageMapEmpty)
1787 << "A mismatch between the page map table for kPageMapEmpty "
1788 << " at page index " << j
1789 << " and the free page run size : page index range : "
1790 << i << " to " << (i + num_pages) << std::endl << DumpPageMap();
1791 }
1792 i += num_pages;
1793 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1794 << std::endl << DumpPageMap();
1795 break;
1796 }
1797 case kPageMapLargeObject: {
1798 // The start of a large object.
1799 size_t num_pages = 1;
1800 size_t idx = i + 1;
1801 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1802 num_pages++;
1803 idx++;
1804 }
1805 void* start = base_ + i * kPageSize;
1806 mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
1807 size_t obj_size = obj->SizeOf();
1808 CHECK(obj_size > kLargeSizeThreshold)
1809 << "A rosalloc large object size must be > " << kLargeSizeThreshold;
1810 CHECK_EQ(num_pages, RoundUp(obj_size, kPageSize) / kPageSize)
1811 << "A rosalloc large object size " << obj_size
1812 << " does not match the page map table " << (num_pages * kPageSize)
1813 << std::endl << DumpPageMap();
1814 i += num_pages;
1815 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1816 << std::endl << DumpPageMap();
1817 break;
1818 }
1819 case kPageMapLargeObjectPart:
1820 LOG(FATAL) << "Unreachable - page map type: " << pm << std::endl << DumpPageMap();
1821 break;
1822 case kPageMapRun: {
1823 // The start of a run.
1824 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1825 DCHECK(run->magic_num_ == kMagicNum) << "Bad magic number" << run->magic_num_;
1826 size_t idx = run->size_bracket_idx_;
1827 CHECK(idx < kNumOfSizeBrackets) << "Out of range size bracket index : " << idx;
1828 size_t num_pages = numOfPages[idx];
1829 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1830 << "Run size must be > 0 : " << num_pages;
1831 for (size_t j = i + 1; j < i + num_pages; ++j) {
1832 CHECK_EQ(page_map_[j], kPageMapRunPart)
1833 << "A mismatch between the page map table for kPageMapRunPart "
1834 << " at page index " << j
1835 << " and the run size : page index range " << i << " to " << (i + num_pages)
1836 << std::endl << DumpPageMap();
1837 }
1838 runs.push_back(run);
1839 i += num_pages;
1840 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1841 << std::endl << DumpPageMap();
1842 break;
1843 }
1844 case kPageMapRunPart:
1845 LOG(FATAL) << "Unreachable - page map type: " << pm << std::endl << DumpPageMap();
1846 break;
1847 default:
1848 LOG(FATAL) << "Unreachable - page map type: " << pm << std::endl << DumpPageMap();
1849 break;
1850 }
1851 }
1852 }
1853
1854 // Call Verify() here for the lock order.
1855 for (auto& run : runs) {
1856 run->Verify(self, this);
1857 }
1858}
1859
1860void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc) {
1861 DCHECK(magic_num_ == kMagicNum) << "Bad magic number : " << Dump();
1862 size_t idx = size_bracket_idx_;
1863 CHECK(idx < kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
1864 byte* slot_base = reinterpret_cast<byte*>(this) + headerSizes[idx];
1865 size_t num_slots = numOfSlots[idx];
1866 size_t bracket_size = IndexToBracketSize(idx);
1867 CHECK_EQ(slot_base + num_slots * bracket_size,
1868 reinterpret_cast<byte*>(this) + numOfPages[idx] * kPageSize)
1869 << "Mismatch in the end address of the run " << Dump();
1870 // Check that the bulk free bitmap is clean. It's only used during BulkFree().
1871 CHECK(IsBulkFreeBitmapClean()) << "The bulk free bit map isn't clean " << Dump();
1872 // Check the bump index mode, if it's on.
1873 if (top_slot_idx_ < num_slots) {
1874 // If the bump index mode is on (top_slot_idx_ < num_slots), then
1875 // all of the slots after the top index must be free.
1876 for (size_t i = top_slot_idx_; i < num_slots; ++i) {
1877 size_t vec_idx = i / 32;
1878 size_t vec_off = i % 32;
1879 uint32_t vec = alloc_bit_map_[vec_idx];
1880 CHECK_EQ((vec & (1 << vec_off)), static_cast<uint32_t>(0))
1881 << "A slot >= top_slot_idx_ isn't free " << Dump();
1882 }
1883 } else {
1884 CHECK_EQ(top_slot_idx_, num_slots)
1885 << "If the bump index mode is off, the top index == the number of slots "
1886 << Dump();
1887 }
1888 // Check the thread local runs, the current runs, and the run sets.
1889 if (is_thread_local_) {
1890 // If it's a thread local run, then it must be pointed to by an owner thread.
1891 bool owner_found = false;
1892 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1893 for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
1894 Thread* thread = *it;
1895 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1896 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
1897 Run* thread_local_run = reinterpret_cast<Run*>(thread->rosalloc_runs_[i]);
1898 if (thread_local_run == this) {
1899 CHECK(!owner_found)
1900 << "A thread local run has more than one owner thread " << Dump();
1901 CHECK_EQ(i, idx)
1902 << "A mismatching size bracket index in a thread local run " << Dump();
1903 owner_found = true;
1904 }
1905 }
1906 }
1907 CHECK(owner_found) << "A thread local run has no owner thread " << Dump();
1908 } else {
1909 // If it's not thread local, check that the thread local free bitmap is clean.
1910 CHECK(IsThreadLocalFreeBitmapClean())
1911 << "A non-thread-local run's thread local free bitmap isn't clean "
1912 << Dump();
1913 // Check if it's a current run for the size bucket.
1914 bool is_current_run = false;
1915 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1916 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
1917 Run* current_run = rosalloc->current_runs_[i];
1918 if (idx == i) {
1919 if (this == current_run) {
1920 is_current_run = true;
1921 }
1922 } else {
1923 // If the size bucket index does not match, then it must not
1924 // be a current run.
1925 CHECK_NE(this, current_run)
1926 << "A current run points to a run with a wrong size bracket index " << Dump();
1927 }
1928 }
1929 // If it's neither a thread local or current run, then it must be
1930 // in a run set.
1931 if (!is_current_run) {
1932 MutexLock mu(self, rosalloc->lock_);
1933 std::set<Run*>& non_full_runs = rosalloc->non_full_runs_[idx];
1934 // If it's all free, it must be a free page run rather than a run.
1935 CHECK(!IsAllFree()) << "A free run must be in a free page run set " << Dump();
1936 if (!IsFull()) {
1937 // If it's not full, it must in the non-full run set.
1938 CHECK(non_full_runs.find(this) != non_full_runs.end())
1939 << "A non-full run isn't in the non-full run set " << Dump();
1940 } else {
1941 // If it's full, it must in the full run set (debug build only.)
1942 if (kIsDebugBuild) {
1943 hash_set<Run*, hash_run, eq_run>& full_runs = rosalloc->full_runs_[idx];
1944 CHECK(full_runs.find(this) != full_runs.end())
1945 << " A full run isn't in the full run set " << Dump();
1946 }
1947 }
1948 }
1949 }
1950 // Check each slot.
1951 size_t num_vec = RoundUp(num_slots, 32) / 32;
1952 size_t slots = 0;
1953 for (size_t v = 0; v < num_vec; v++, slots += 32) {
1954 DCHECK(num_slots >= slots) << "Out of bounds";
1955 uint32_t vec = alloc_bit_map_[v];
1956 uint32_t thread_local_free_vec = ThreadLocalFreeBitMap()[v];
1957 size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
1958 for (size_t i = 0; i < end; ++i) {
1959 bool is_allocated = ((vec >> i) & 0x1) != 0;
1960 // If a thread local run, slots may be marked freed in the
1961 // thread local free bitmap.
1962 bool is_thread_local_freed = is_thread_local_ && ((thread_local_free_vec >> i) & 0x1) != 0;
1963 if (is_allocated && !is_thread_local_freed) {
1964 byte* slot_addr = slot_base + (slots + i) * bracket_size;
1965 mirror::Object* obj = reinterpret_cast<mirror::Object*>(slot_addr);
1966 size_t obj_size = obj->SizeOf();
1967 CHECK_LE(obj_size, kLargeSizeThreshold)
1968 << "A run slot contains a large object " << Dump();
1969 CHECK_EQ(SizeToIndex(obj_size), idx)
1970 << "A run slot contains an object with wrong size " << Dump();
1971 }
1972 }
1973 }
1974}
1975
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001976} // namespace allocator
1977} // namespace gc
1978} // namespace art