blob: ace9f9e4252e6bc78547001ea07e43e8cc86ede7 [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "base/mutex-inl.h"
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -080018#include "mirror/class-inl.h"
19#include "mirror/object.h"
20#include "mirror/object-inl.h"
Brian Carlstrom218daa22013-11-25 14:51:44 -080021#include "thread-inl.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070022#include "thread_list.h"
23#include "rosalloc.h"
24
25#include <map>
26#include <list>
27#include <vector>
28
29namespace art {
30namespace gc {
31namespace allocator {
32
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070033extern "C" void* art_heap_rosalloc_morecore(RosAlloc* rosalloc, intptr_t increment);
34
35size_t RosAlloc::bracketSizes[kNumOfSizeBrackets];
36size_t RosAlloc::numOfPages[kNumOfSizeBrackets];
37size_t RosAlloc::numOfSlots[kNumOfSizeBrackets];
38size_t RosAlloc::headerSizes[kNumOfSizeBrackets];
39size_t RosAlloc::bulkFreeBitMapOffsets[kNumOfSizeBrackets];
40size_t RosAlloc::threadLocalFreeBitMapOffsets[kNumOfSizeBrackets];
41bool RosAlloc::initialized_ = false;
42
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080043RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080044 PageReleaseMode page_release_mode, size_t page_release_size_threshold)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070045 : base_(reinterpret_cast<byte*>(base)), footprint_(capacity),
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080046 capacity_(capacity), max_capacity_(max_capacity),
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070047 lock_("rosalloc global lock", kRosAllocGlobalLock),
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080048 bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock),
49 page_release_mode_(page_release_mode),
50 page_release_size_threshold_(page_release_size_threshold) {
Ian Rogers5d057052014-03-12 14:32:27 -070051 DCHECK_EQ(RoundUp(capacity, kPageSize), capacity);
52 DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080053 CHECK_LE(capacity, max_capacity);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080054 CHECK(IsAligned<kPageSize>(page_release_size_threshold_));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070055 if (!initialized_) {
56 Initialize();
57 }
58 VLOG(heap) << "RosAlloc base="
59 << std::hex << (intptr_t)base_ << ", end="
60 << std::hex << (intptr_t)(base_ + capacity_)
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080061 << ", capacity=" << std::dec << capacity_
62 << ", max_capacity=" << std::dec << max_capacity_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070063 memset(current_runs_, 0, sizeof(current_runs_));
64 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
65 size_bracket_locks_[i] = new Mutex("an rosalloc size bracket lock",
66 kRosAllocBracketLock);
67 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080068 DCHECK_EQ(footprint_, capacity_);
69 size_t num_of_pages = footprint_ / kPageSize;
70 size_t max_num_of_pages = max_capacity_ / kPageSize;
71 std::string error_msg;
72 page_map_mem_map_.reset(MemMap::MapAnonymous("rosalloc page map", NULL, RoundUp(max_num_of_pages, kPageSize),
73 PROT_READ | PROT_WRITE, false, &error_msg));
74 CHECK(page_map_mem_map_.get() != NULL) << "Couldn't allocate the page map : " << error_msg;
75 page_map_ = page_map_mem_map_->Begin();
76 page_map_size_ = num_of_pages;
77 max_page_map_size_ = max_num_of_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070078 free_page_run_size_map_.resize(num_of_pages);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070079 FreePageRun* free_pages = reinterpret_cast<FreePageRun*>(base_);
80 if (kIsDebugBuild) {
81 free_pages->magic_num_ = kMagicNumFree;
82 }
83 free_pages->SetByteSize(this, capacity_);
84 DCHECK_EQ(capacity_ % kPageSize, static_cast<size_t>(0));
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080085 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070086 free_pages->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080087 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070088 free_page_runs_.insert(free_pages);
89 if (kTraceRosAlloc) {
90 LOG(INFO) << "RosAlloc::RosAlloc() : Inserted run 0x" << std::hex
91 << reinterpret_cast<intptr_t>(free_pages)
92 << " into free_page_runs_";
93 }
94}
95
Mathieu Chartier661974a2014-01-09 11:23:53 -080096RosAlloc::~RosAlloc() {
97 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
98 delete size_bracket_locks_[i];
99 }
100}
101
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700102void* RosAlloc::AllocPages(Thread* self, size_t num_pages, byte page_map_type) {
103 lock_.AssertHeld(self);
104 DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
105 FreePageRun* res = NULL;
106 size_t req_byte_size = num_pages * kPageSize;
107 // Find the lowest address free page run that's large enough.
108 for (auto it = free_page_runs_.begin(); it != free_page_runs_.end(); ) {
109 FreePageRun* fpr = *it;
110 DCHECK(fpr->IsFree());
111 size_t fpr_byte_size = fpr->ByteSize(this);
112 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
113 if (req_byte_size <= fpr_byte_size) {
114 // Found one.
115 free_page_runs_.erase(it++);
116 if (kTraceRosAlloc) {
117 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x"
118 << std::hex << reinterpret_cast<intptr_t>(fpr)
119 << " from free_page_runs_";
120 }
121 if (req_byte_size < fpr_byte_size) {
122 // Split.
123 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<byte*>(fpr) + req_byte_size);
124 if (kIsDebugBuild) {
125 remainder->magic_num_ = kMagicNumFree;
126 }
127 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
128 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
129 // Don't need to call madvise on remainder here.
130 free_page_runs_.insert(remainder);
131 if (kTraceRosAlloc) {
132 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
133 << reinterpret_cast<intptr_t>(remainder)
134 << " into free_page_runs_";
135 }
136 fpr->SetByteSize(this, req_byte_size);
137 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
138 }
139 res = fpr;
140 break;
141 } else {
142 ++it;
143 }
144 }
145
146 // Failed to allocate pages. Grow the footprint, if possible.
147 if (UNLIKELY(res == NULL && capacity_ > footprint_)) {
148 FreePageRun* last_free_page_run = NULL;
149 size_t last_free_page_run_size;
150 auto it = free_page_runs_.rbegin();
151 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
152 // There is a free page run at the end.
153 DCHECK(last_free_page_run->IsFree());
Ian Rogers5d057052014-03-12 14:32:27 -0700154 DCHECK_EQ(page_map_[ToPageMapIndex(last_free_page_run)], kPageMapEmpty);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700155 last_free_page_run_size = last_free_page_run->ByteSize(this);
156 } else {
157 // There is no free page run at the end.
158 last_free_page_run_size = 0;
159 }
160 DCHECK_LT(last_free_page_run_size, req_byte_size);
161 if (capacity_ - footprint_ + last_free_page_run_size >= req_byte_size) {
162 // If we grow the heap, we can allocate it.
163 size_t increment = std::min(std::max(2 * MB, req_byte_size - last_free_page_run_size),
164 capacity_ - footprint_);
165 DCHECK_EQ(increment % kPageSize, static_cast<size_t>(0));
166 size_t new_footprint = footprint_ + increment;
167 size_t new_num_of_pages = new_footprint / kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800168 DCHECK_LT(page_map_size_, new_num_of_pages);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700169 DCHECK_LT(free_page_run_size_map_.size(), new_num_of_pages);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800170 page_map_size_ = new_num_of_pages;
171 DCHECK_LE(page_map_size_, max_page_map_size_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700172 free_page_run_size_map_.resize(new_num_of_pages);
173 art_heap_rosalloc_morecore(this, increment);
174 if (last_free_page_run_size > 0) {
175 // There was a free page run at the end. Expand its size.
176 DCHECK_EQ(last_free_page_run_size, last_free_page_run->ByteSize(this));
177 last_free_page_run->SetByteSize(this, last_free_page_run_size + increment);
178 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
Ian Rogers5d057052014-03-12 14:32:27 -0700179 DCHECK_EQ(last_free_page_run->End(this), base_ + new_footprint);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700180 } else {
181 // Otherwise, insert a new free page run at the end.
182 FreePageRun* new_free_page_run = reinterpret_cast<FreePageRun*>(base_ + footprint_);
183 if (kIsDebugBuild) {
184 new_free_page_run->magic_num_ = kMagicNumFree;
185 }
186 new_free_page_run->SetByteSize(this, increment);
187 DCHECK_EQ(new_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
188 free_page_runs_.insert(new_free_page_run);
Ian Rogers5d057052014-03-12 14:32:27 -0700189 DCHECK_EQ(*free_page_runs_.rbegin(), new_free_page_run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700190 if (kTraceRosAlloc) {
191 LOG(INFO) << "RosAlloc::AlloPages() : Grew the heap by inserting run 0x"
192 << std::hex << reinterpret_cast<intptr_t>(new_free_page_run)
193 << " into free_page_runs_";
194 }
195 }
196 DCHECK_LE(footprint_ + increment, capacity_);
197 if (kTraceRosAlloc) {
198 LOG(INFO) << "RosAlloc::AllocPages() : increased the footprint from "
199 << footprint_ << " to " << new_footprint;
200 }
201 footprint_ = new_footprint;
202
203 // And retry the last free page run.
204 it = free_page_runs_.rbegin();
205 DCHECK(it != free_page_runs_.rend());
206 FreePageRun* fpr = *it;
207 if (kIsDebugBuild && last_free_page_run_size > 0) {
208 DCHECK(last_free_page_run != NULL);
209 DCHECK_EQ(last_free_page_run, fpr);
210 }
211 size_t fpr_byte_size = fpr->ByteSize(this);
212 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
213 DCHECK_LE(req_byte_size, fpr_byte_size);
214 free_page_runs_.erase(fpr);
215 if (kTraceRosAlloc) {
216 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
217 << " from free_page_runs_";
218 }
219 if (req_byte_size < fpr_byte_size) {
220 // Split if there's a remainder.
221 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<byte*>(fpr) + req_byte_size);
222 if (kIsDebugBuild) {
223 remainder->magic_num_ = kMagicNumFree;
224 }
225 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
226 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
227 free_page_runs_.insert(remainder);
228 if (kTraceRosAlloc) {
229 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
230 << reinterpret_cast<intptr_t>(remainder)
231 << " into free_page_runs_";
232 }
233 fpr->SetByteSize(this, req_byte_size);
234 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
235 }
236 res = fpr;
237 }
238 }
239 if (LIKELY(res != NULL)) {
240 // Update the page map.
241 size_t page_map_idx = ToPageMapIndex(res);
242 for (size_t i = 0; i < num_pages; i++) {
Ian Rogers5d057052014-03-12 14:32:27 -0700243 DCHECK_EQ(page_map_[page_map_idx + i], kPageMapEmpty);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700244 }
245 switch (page_map_type) {
246 case kPageMapRun:
247 page_map_[page_map_idx] = kPageMapRun;
248 for (size_t i = 1; i < num_pages; i++) {
249 page_map_[page_map_idx + i] = kPageMapRunPart;
250 }
251 break;
252 case kPageMapLargeObject:
253 page_map_[page_map_idx] = kPageMapLargeObject;
254 for (size_t i = 1; i < num_pages; i++) {
255 page_map_[page_map_idx + i] = kPageMapLargeObjectPart;
256 }
257 break;
258 default:
259 LOG(FATAL) << "Unreachable - page map type: " << page_map_type;
260 break;
261 }
262 if (kIsDebugBuild) {
263 // Clear the first page which isn't madvised away in the debug
264 // build for the magic number.
265 memset(res, 0, kPageSize);
266 }
267 if (kTraceRosAlloc) {
268 LOG(INFO) << "RosAlloc::AllocPages() : 0x" << std::hex << reinterpret_cast<intptr_t>(res)
269 << "-0x" << (reinterpret_cast<intptr_t>(res) + num_pages * kPageSize)
270 << "(" << std::dec << (num_pages * kPageSize) << ")";
271 }
272 return res;
273 }
274
275 // Fail.
276 if (kTraceRosAlloc) {
277 LOG(INFO) << "RosAlloc::AllocPages() : NULL";
278 }
279 return nullptr;
280}
281
282void RosAlloc::FreePages(Thread* self, void* ptr) {
283 lock_.AssertHeld(self);
284 size_t pm_idx = ToPageMapIndex(ptr);
Ian Rogers5d057052014-03-12 14:32:27 -0700285 DCHECK_LT(pm_idx, page_map_size_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700286 byte pm_type = page_map_[pm_idx];
287 DCHECK(pm_type == kPageMapRun || pm_type == kPageMapLargeObject);
288 byte pm_part_type;
289 switch (pm_type) {
290 case kPageMapRun:
291 pm_part_type = kPageMapRunPart;
292 break;
293 case kPageMapLargeObject:
294 pm_part_type = kPageMapLargeObjectPart;
295 break;
296 default:
297 pm_part_type = kPageMapEmpty;
298 LOG(FATAL) << "Unreachable - RosAlloc::FreePages() : " << "pm_idx=" << pm_idx << ", pm_type="
299 << static_cast<int>(pm_type) << ", ptr=" << std::hex
300 << reinterpret_cast<intptr_t>(ptr);
301 return;
302 }
303 // Update the page map and count the number of pages.
304 size_t num_pages = 1;
305 page_map_[pm_idx] = kPageMapEmpty;
306 size_t idx = pm_idx + 1;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800307 size_t end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700308 while (idx < end && page_map_[idx] == pm_part_type) {
309 page_map_[idx] = kPageMapEmpty;
310 num_pages++;
311 idx++;
312 }
313
314 if (kTraceRosAlloc) {
315 LOG(INFO) << "RosAlloc::FreePages() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
316 << "-0x" << (reinterpret_cast<intptr_t>(ptr) + num_pages * kPageSize)
317 << "(" << std::dec << (num_pages * kPageSize) << ")";
318 }
319
320 // Turn it into a free run.
321 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(ptr);
322 if (kIsDebugBuild) {
323 fpr->magic_num_ = kMagicNumFree;
324 }
325 fpr->SetByteSize(this, num_pages * kPageSize);
326 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
327
328 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
329 if (!free_page_runs_.empty()) {
330 // Try to coalesce in the higher address direction.
331 if (kTraceRosAlloc) {
332 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce a free page run 0x"
333 << std::hex << reinterpret_cast<uintptr_t>(fpr) << " [" << std::dec << pm_idx << "] -0x"
334 << std::hex << reinterpret_cast<uintptr_t>(fpr->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800335 << (fpr->End(this) == End() ? page_map_size_ : ToPageMapIndex(fpr->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700336 }
337 auto higher_it = free_page_runs_.upper_bound(fpr);
338 if (higher_it != free_page_runs_.end()) {
339 for (auto it = higher_it; it != free_page_runs_.end(); ) {
340 FreePageRun* h = *it;
341 DCHECK_EQ(h->ByteSize(this) % kPageSize, static_cast<size_t>(0));
342 if (kTraceRosAlloc) {
343 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a higher free page run 0x"
344 << std::hex << reinterpret_cast<uintptr_t>(h) << " [" << std::dec << ToPageMapIndex(h) << "] -0x"
345 << std::hex << reinterpret_cast<uintptr_t>(h->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800346 << (h->End(this) == End() ? page_map_size_ : ToPageMapIndex(h->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700347 }
348 if (fpr->End(this) == h->Begin()) {
349 if (kTraceRosAlloc) {
350 LOG(INFO) << "Success";
351 }
352 free_page_runs_.erase(it++);
353 if (kTraceRosAlloc) {
354 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
355 << reinterpret_cast<intptr_t>(h)
356 << " from free_page_runs_";
357 }
358 fpr->SetByteSize(this, fpr->ByteSize(this) + h->ByteSize(this));
359 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
360 } else {
361 // Not adjacent. Stop.
362 if (kTraceRosAlloc) {
363 LOG(INFO) << "Fail";
364 }
365 break;
366 }
367 }
368 }
369 // Try to coalesce in the lower address direction.
370 auto lower_it = free_page_runs_.upper_bound(fpr);
371 if (lower_it != free_page_runs_.begin()) {
372 --lower_it;
373 for (auto it = lower_it; ; ) {
374 // We want to try to coalesce with the first element but
375 // there's no "<=" operator for the iterator.
376 bool to_exit_loop = it == free_page_runs_.begin();
377
378 FreePageRun* l = *it;
379 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
380 if (kTraceRosAlloc) {
381 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a lower free page run 0x"
382 << std::hex << reinterpret_cast<uintptr_t>(l) << " [" << std::dec << ToPageMapIndex(l) << "] -0x"
383 << std::hex << reinterpret_cast<uintptr_t>(l->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800384 << (l->End(this) == End() ? page_map_size_ : ToPageMapIndex(l->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700385 }
386 if (l->End(this) == fpr->Begin()) {
387 if (kTraceRosAlloc) {
388 LOG(INFO) << "Success";
389 }
390 free_page_runs_.erase(it--);
391 if (kTraceRosAlloc) {
392 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
393 << reinterpret_cast<intptr_t>(l)
394 << " from free_page_runs_";
395 }
396 l->SetByteSize(this, l->ByteSize(this) + fpr->ByteSize(this));
397 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
398 fpr = l;
399 } else {
400 // Not adjacent. Stop.
401 if (kTraceRosAlloc) {
402 LOG(INFO) << "Fail";
403 }
404 break;
405 }
406 if (to_exit_loop) {
407 break;
408 }
409 }
410 }
411 }
412
413 // Insert it.
414 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
415 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800416 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700417 fpr->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800418 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700419 free_page_runs_.insert(fpr);
420 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
421 if (kTraceRosAlloc) {
422 LOG(INFO) << "RosAlloc::FreePages() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
423 << " into free_page_runs_";
424 }
425}
426
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800427void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) {
Ian Rogers5d057052014-03-12 14:32:27 -0700428 DCHECK_GT(size, kLargeSizeThreshold);
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800429 size_t num_pages = RoundUp(size, kPageSize) / kPageSize;
430 void* r;
431 {
432 MutexLock mu(self, lock_);
433 r = AllocPages(self, num_pages, kPageMapLargeObject);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700434 }
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800435 if (UNLIKELY(r == nullptr)) {
436 if (kTraceRosAlloc) {
437 LOG(INFO) << "RosAlloc::AllocLargeObject() : NULL";
438 }
439 return nullptr;
440 }
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800441 if (bytes_allocated != NULL) {
442 *bytes_allocated = num_pages * kPageSize;
443 }
444 if (kTraceRosAlloc) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800445 LOG(INFO) << "RosAlloc::AllocLargeObject() : 0x" << std::hex << reinterpret_cast<intptr_t>(r)
446 << "-0x" << (reinterpret_cast<intptr_t>(r) + num_pages * kPageSize)
447 << "(" << std::dec << (num_pages * kPageSize) << ")";
448 }
449 if (!DoesReleaseAllPages()) {
450 // If it does not release all pages, pages may not be zeroed out.
451 memset(r, 0, size);
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800452 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700453 // Check if the returned memory is really all zero.
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800454 if (kCheckZeroMemory) {
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800455 byte* bytes = reinterpret_cast<byte*>(r);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700456 for (size_t i = 0; i < size; ++i) {
457 DCHECK_EQ(bytes[i], 0);
458 }
459 }
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800460 return r;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700461}
462
463void RosAlloc::FreeInternal(Thread* self, void* ptr) {
Ian Rogers5d057052014-03-12 14:32:27 -0700464 DCHECK_LE(base_, ptr);
465 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700466 size_t pm_idx = RoundDownToPageMapIndex(ptr);
467 bool free_from_run = false;
468 Run* run = NULL;
469 {
470 MutexLock mu(self, lock_);
Ian Rogers5d057052014-03-12 14:32:27 -0700471 DCHECK_LT(pm_idx, page_map_size_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700472 byte page_map_entry = page_map_[pm_idx];
473 if (kTraceRosAlloc) {
474 LOG(INFO) << "RosAlloc::FreeInternal() : " << std::hex << ptr << ", pm_idx=" << std::dec << pm_idx
475 << ", page_map_entry=" << static_cast<int>(page_map_entry);
476 }
477 switch (page_map_[pm_idx]) {
478 case kPageMapEmpty:
479 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
480 return;
481 case kPageMapLargeObject:
482 FreePages(self, ptr);
483 return;
484 case kPageMapLargeObjectPart:
485 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
486 return;
487 case kPageMapRun:
488 case kPageMapRunPart: {
489 free_from_run = true;
490 size_t pi = pm_idx;
491 DCHECK(page_map_[pi] == kPageMapRun || page_map_[pi] == kPageMapRunPart);
492 // Find the beginning of the run.
493 while (page_map_[pi] != kPageMapRun) {
494 pi--;
Ian Rogers5d057052014-03-12 14:32:27 -0700495 DCHECK_LT(pi, capacity_ / kPageSize);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700496 }
Ian Rogers5d057052014-03-12 14:32:27 -0700497 DCHECK_EQ(page_map_[pi], kPageMapRun);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700498 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -0700499 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700500 break;
501 }
502 default:
503 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
504 return;
505 }
506 }
507 if (LIKELY(free_from_run)) {
508 DCHECK(run != NULL);
509 FreeFromRun(self, ptr, run);
510 }
511}
512
513void RosAlloc::Free(Thread* self, void* ptr) {
514 ReaderMutexLock rmu(self, bulk_free_lock_);
515 FreeInternal(self, ptr);
516}
517
518RosAlloc::Run* RosAlloc::RefillRun(Thread* self, size_t idx) {
519 Run* new_run;
520 size_t num_pages = numOfPages[idx];
521 // Get the lowest address non-full run from the binary tree.
522 Run* temp = NULL;
523 std::set<Run*>* bt = &non_full_runs_[idx];
524 std::set<Run*>::iterator found = bt->lower_bound(temp);
525 if (found != bt->end()) {
526 // If there's one, use it as the current run.
527 Run* non_full_run = *found;
528 DCHECK(non_full_run != NULL);
529 new_run = non_full_run;
530 DCHECK_EQ(new_run->is_thread_local_, 0);
531 bt->erase(found);
532 DCHECK_EQ(non_full_run->is_thread_local_, 0);
533 } else {
534 // If there's none, allocate a new run and use it as the
535 // current run.
536 {
537 MutexLock mu(self, lock_);
538 new_run = reinterpret_cast<Run*>(AllocPages(self, num_pages, kPageMapRun));
539 }
540 if (new_run == NULL) {
541 return NULL;
542 }
543 if (kIsDebugBuild) {
544 new_run->magic_num_ = kMagicNum;
545 }
546 new_run->size_bracket_idx_ = idx;
547 new_run->top_slot_idx_ = 0;
548 new_run->ClearBitMaps();
549 new_run->to_be_bulk_freed_ = false;
550 }
551 return new_run;
552}
553
554void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated) {
Ian Rogers5d057052014-03-12 14:32:27 -0700555 DCHECK_LE(size, kLargeSizeThreshold);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700556 size_t bracket_size;
557 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
558 DCHECK_EQ(idx, SizeToIndex(size));
559 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
560 DCHECK_EQ(bracket_size, bracketSizes[idx]);
Ian Rogers5d057052014-03-12 14:32:27 -0700561 DCHECK_LE(size, bracket_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700562 DCHECK(size > 512 || bracket_size - size < 16);
563
564 void* slot_addr;
565
566 if (LIKELY(idx <= kMaxThreadLocalSizeBracketIdx)) {
567 // Use a thread-local run.
568 Run* thread_local_run = reinterpret_cast<Run*>(self->rosalloc_runs_[idx]);
569 if (UNLIKELY(thread_local_run == NULL)) {
570 MutexLock mu(self, *size_bracket_locks_[idx]);
571 thread_local_run = RefillRun(self, idx);
572 if (UNLIKELY(thread_local_run == NULL)) {
573 return NULL;
574 }
575 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
576 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
577 thread_local_run->is_thread_local_ = 1;
578 self->rosalloc_runs_[idx] = thread_local_run;
579 DCHECK(!thread_local_run->IsFull());
580 }
581
582 DCHECK(thread_local_run != NULL);
583 DCHECK_NE(thread_local_run->is_thread_local_, 0);
584 slot_addr = thread_local_run->AllocSlot();
585
586 if (UNLIKELY(slot_addr == NULL)) {
587 // The run got full. Try to free slots.
588 DCHECK(thread_local_run->IsFull());
589 MutexLock mu(self, *size_bracket_locks_[idx]);
590 bool is_all_free_after_merge;
591 if (thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&is_all_free_after_merge)) {
592 // Some slot got freed. Keep it.
593 DCHECK(!thread_local_run->IsFull());
594 DCHECK_EQ(is_all_free_after_merge, thread_local_run->IsAllFree());
595 if (is_all_free_after_merge) {
596 // Reinstate the bump index mode if it's all free.
597 DCHECK_EQ(thread_local_run->top_slot_idx_, numOfSlots[idx]);
598 thread_local_run->top_slot_idx_ = 0;
599 }
600 } else {
601 // No slots got freed. Try to refill the thread-local run.
602 DCHECK(thread_local_run->IsFull());
603 self->rosalloc_runs_[idx] = NULL;
604 thread_local_run->is_thread_local_ = 0;
605 if (kIsDebugBuild) {
606 full_runs_[idx].insert(thread_local_run);
607 if (kTraceRosAlloc) {
608 LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex
609 << reinterpret_cast<intptr_t>(thread_local_run)
610 << " into full_runs_[" << std::dec << idx << "]";
611 }
612 }
613 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
614 DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
615 thread_local_run = RefillRun(self, idx);
616 if (UNLIKELY(thread_local_run == NULL)) {
617 return NULL;
618 }
619 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
620 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
621 thread_local_run->is_thread_local_ = 1;
622 self->rosalloc_runs_[idx] = thread_local_run;
623 DCHECK(!thread_local_run->IsFull());
624 }
625
626 DCHECK(thread_local_run != NULL);
627 DCHECK(!thread_local_run->IsFull());
628 DCHECK_NE(thread_local_run->is_thread_local_, 0);
629 slot_addr = thread_local_run->AllocSlot();
630 // Must succeed now with a new run.
631 DCHECK(slot_addr != NULL);
632 }
633 if (kTraceRosAlloc) {
634 LOG(INFO) << "RosAlloc::AllocFromRun() thread-local : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
635 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
636 << "(" << std::dec << (bracket_size) << ")";
637 }
638 } else {
639 // Use the (shared) current run.
640 MutexLock mu(self, *size_bracket_locks_[idx]);
641 Run* current_run = current_runs_[idx];
642 if (UNLIKELY(current_run == NULL)) {
643 current_run = RefillRun(self, idx);
644 if (UNLIKELY(current_run == NULL)) {
645 return NULL;
646 }
647 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
648 DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
649 current_run->is_thread_local_ = 0;
650 current_runs_[idx] = current_run;
651 DCHECK(!current_run->IsFull());
652 }
653 DCHECK(current_run != NULL);
654 slot_addr = current_run->AllocSlot();
655 if (UNLIKELY(slot_addr == NULL)) {
656 // The current run got full. Try to refill it.
657 DCHECK(current_run->IsFull());
658 current_runs_[idx] = NULL;
659 if (kIsDebugBuild) {
660 // Insert it into full_runs and set the current run to NULL.
661 full_runs_[idx].insert(current_run);
662 if (kTraceRosAlloc) {
663 LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(current_run)
664 << " into full_runs_[" << std::dec << idx << "]";
665 }
666 }
667 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
668 DCHECK(full_runs_[idx].find(current_run) != full_runs_[idx].end());
669 current_run = RefillRun(self, idx);
670 if (UNLIKELY(current_run == NULL)) {
671 return NULL;
672 }
673 DCHECK(current_run != NULL);
674 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
675 DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
676 current_run->is_thread_local_ = 0;
677 current_runs_[idx] = current_run;
678 DCHECK(!current_run->IsFull());
679 slot_addr = current_run->AllocSlot();
680 // Must succeed now with a new run.
681 DCHECK(slot_addr != NULL);
682 }
683 if (kTraceRosAlloc) {
684 LOG(INFO) << "RosAlloc::AllocFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
685 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
686 << "(" << std::dec << (bracket_size) << ")";
687 }
688 }
689 if (LIKELY(bytes_allocated != NULL)) {
690 *bytes_allocated = bracket_size;
691 }
692 memset(slot_addr, 0, size);
693 return slot_addr;
694}
695
696void RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
Ian Rogers5d057052014-03-12 14:32:27 -0700697 DCHECK_EQ(run->magic_num_, kMagicNum);
698 DCHECK_LT(run, ptr);
699 DCHECK_LT(ptr, run->End());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700700 size_t idx = run->size_bracket_idx_;
701 MutexLock mu(self, *size_bracket_locks_[idx]);
702 bool run_was_full = false;
703 if (kIsDebugBuild) {
704 run_was_full = run->IsFull();
705 }
706 if (kTraceRosAlloc) {
707 LOG(INFO) << "RosAlloc::FreeFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr);
708 }
709 if (LIKELY(run->is_thread_local_ != 0)) {
710 // It's a thread-local run. Just mark the thread-local free bit map and return.
711 DCHECK_LE(run->size_bracket_idx_, kMaxThreadLocalSizeBracketIdx);
712 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
713 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
714 run->MarkThreadLocalFreeBitMap(ptr);
715 if (kTraceRosAlloc) {
716 LOG(INFO) << "RosAlloc::FreeFromRun() : Freed a slot in a thread local run 0x" << std::hex
717 << reinterpret_cast<intptr_t>(run);
718 }
719 // A thread local run will be kept as a thread local even if it's become all free.
720 return;
721 }
722 // Free the slot in the run.
723 run->FreeSlot(ptr);
724 std::set<Run*>* non_full_runs = &non_full_runs_[idx];
725 if (run->IsAllFree()) {
726 // It has just become completely free. Free the pages of this run.
727 std::set<Run*>::iterator pos = non_full_runs->find(run);
728 if (pos != non_full_runs->end()) {
729 non_full_runs->erase(pos);
730 if (kTraceRosAlloc) {
731 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
732 << reinterpret_cast<intptr_t>(run) << " from non_full_runs_";
733 }
734 }
735 if (run == current_runs_[idx]) {
736 current_runs_[idx] = NULL;
737 }
738 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
739 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
740 {
741 MutexLock mu(self, lock_);
742 FreePages(self, run);
743 }
744 } else {
745 // It is not completely free. If it wasn't the current run or
746 // already in the non-full run set (i.e., it was full) insert it
747 // into the non-full run set.
748 if (run != current_runs_[idx]) {
749 hash_set<Run*, hash_run, eq_run>* full_runs =
750 kIsDebugBuild ? &full_runs_[idx] : NULL;
751 std::set<Run*>::iterator pos = non_full_runs->find(run);
752 if (pos == non_full_runs->end()) {
753 DCHECK(run_was_full);
754 DCHECK(full_runs->find(run) != full_runs->end());
755 if (kIsDebugBuild) {
756 full_runs->erase(run);
757 if (kTraceRosAlloc) {
758 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
759 << reinterpret_cast<intptr_t>(run) << " from full_runs_";
760 }
761 }
762 non_full_runs->insert(run);
763 DCHECK(!run->IsFull());
764 if (kTraceRosAlloc) {
765 LOG(INFO) << "RosAlloc::FreeFromRun() : Inserted run 0x" << std::hex
766 << reinterpret_cast<intptr_t>(run)
767 << " into non_full_runs_[" << std::dec << idx << "]";
768 }
769 }
770 }
771 }
772}
773
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800774std::string RosAlloc::Run::BitMapToStr(uint32_t* bit_map_base, size_t num_vec) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700775 std::string bit_map_str;
776 for (size_t v = 0; v < num_vec; v++) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800777 uint32_t vec = bit_map_base[v];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700778 if (v != num_vec - 1) {
779 bit_map_str.append(StringPrintf("%x-", vec));
780 } else {
781 bit_map_str.append(StringPrintf("%x", vec));
782 }
783 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800784 return bit_map_str.c_str();
785}
786
787std::string RosAlloc::Run::Dump() {
788 size_t idx = size_bracket_idx_;
789 size_t num_slots = numOfSlots[idx];
790 size_t num_vec = RoundUp(num_slots, 32) / 32;
791 std::ostringstream stream;
792 stream << "RosAlloc Run = " << reinterpret_cast<void*>(this)
793 << "{ magic_num=" << static_cast<int>(magic_num_)
794 << " size_bracket_idx=" << idx
795 << " is_thread_local=" << static_cast<int>(is_thread_local_)
796 << " to_be_bulk_freed=" << static_cast<int>(to_be_bulk_freed_)
797 << " top_slot_idx=" << top_slot_idx_
798 << " alloc_bit_map=" << BitMapToStr(alloc_bit_map_, num_vec)
799 << " bulk_free_bit_map=" << BitMapToStr(BulkFreeBitMap(), num_vec)
800 << " thread_local_bit_map=" << BitMapToStr(ThreadLocalFreeBitMap(), num_vec)
801 << " }" << std::endl;
802 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700803}
804
805void* RosAlloc::Run::AllocSlot() {
806 size_t idx = size_bracket_idx_;
807 size_t num_slots = numOfSlots[idx];
808 DCHECK_LE(top_slot_idx_, num_slots);
809 if (LIKELY(top_slot_idx_ < num_slots)) {
810 // If it's in bump index mode, grab the top slot and increment the top index.
811 size_t slot_idx = top_slot_idx_;
812 byte* slot_addr = reinterpret_cast<byte*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
813 if (kTraceRosAlloc) {
814 LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
815 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
816 }
817 top_slot_idx_++;
818 size_t vec_idx = slot_idx / 32;
819 size_t vec_off = slot_idx % 32;
820 uint32_t* vec = &alloc_bit_map_[vec_idx];
821 DCHECK_EQ((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
822 *vec |= 1 << vec_off;
823 DCHECK_NE((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
824 return slot_addr;
825 }
826 // Not in bump index mode. Search the alloc bit map for an empty slot.
827 size_t num_vec = RoundUp(num_slots, 32) / 32;
828 size_t slot_idx = 0;
829 bool found_slot = false;
830 for (size_t v = 0; v < num_vec; v++) {
831 uint32_t *vecp = &alloc_bit_map_[v];
832 uint32_t ffz1 = __builtin_ffs(~*vecp);
833 uint32_t ffz;
834 // TODO: Use LIKELY or UNLIKELY here?
835 if (LIKELY(ffz1 > 0 && (ffz = ffz1 - 1) + v * 32 < num_slots)) {
836 // Found an empty slot. Set the bit.
837 DCHECK_EQ((*vecp & (1 << ffz)), static_cast<uint32_t>(0));
838 *vecp |= (1 << ffz);
839 DCHECK_NE((*vecp & (1 << ffz)), static_cast<uint32_t>(0));
840 slot_idx = ffz + v * 32;
841 found_slot = true;
842 break;
843 }
844 }
845 if (LIKELY(found_slot)) {
846 byte* slot_addr = reinterpret_cast<byte*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
847 if (kTraceRosAlloc) {
848 LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
849 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
850 }
851 return slot_addr;
852 }
853 return NULL;
854}
855
856inline void RosAlloc::Run::FreeSlot(void* ptr) {
857 DCHECK_EQ(is_thread_local_, 0);
858 byte idx = size_bracket_idx_;
859 size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
860 - (reinterpret_cast<byte*>(this) + headerSizes[idx]);
861 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
862 size_t slot_idx = offset_from_slot_base / bracketSizes[idx];
Ian Rogers5d057052014-03-12 14:32:27 -0700863 DCHECK_LT(slot_idx, numOfSlots[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700864 size_t vec_idx = slot_idx / 32;
865 if (kIsDebugBuild) {
866 size_t num_vec = RoundUp(numOfSlots[idx], 32) / 32;
Ian Rogers5d057052014-03-12 14:32:27 -0700867 DCHECK_LT(vec_idx, num_vec);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700868 }
869 size_t vec_off = slot_idx % 32;
870 uint32_t* vec = &alloc_bit_map_[vec_idx];
871 DCHECK_NE((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
872 *vec &= ~(1 << vec_off);
873 DCHECK_EQ((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
874 if (kTraceRosAlloc) {
875 LOG(INFO) << "RosAlloc::Run::FreeSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
876 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
877 }
878}
879
880inline bool RosAlloc::Run::MergeThreadLocalFreeBitMapToAllocBitMap(bool* is_all_free_after_out) {
881 DCHECK_NE(is_thread_local_, 0);
882 // Free slots in the alloc bit map based on the thread local free bit map.
883 byte idx = size_bracket_idx_;
884 size_t num_slots = numOfSlots[idx];
885 size_t num_vec = RoundUp(num_slots, 32) / 32;
886 bool changed = false;
887 uint32_t* vecp = &alloc_bit_map_[0];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800888 uint32_t* tl_free_vecp = &ThreadLocalFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700889 bool is_all_free_after = true;
890 for (size_t v = 0; v < num_vec; v++, vecp++, tl_free_vecp++) {
891 uint32_t tl_free_vec = *tl_free_vecp;
892 uint32_t vec_before = *vecp;
893 uint32_t vec_after;
894 if (tl_free_vec != 0) {
895 vec_after = vec_before & ~tl_free_vec;
896 *vecp = vec_after;
897 changed = true;
898 *tl_free_vecp = 0; // clear the thread local free bit map.
899 } else {
900 vec_after = vec_before;
901 }
902 if (vec_after != 0) {
903 is_all_free_after = false;
904 }
905 DCHECK_EQ(*tl_free_vecp, static_cast<uint32_t>(0));
906 }
907 *is_all_free_after_out = is_all_free_after;
908 // Return true if there was at least a bit set in the thread-local
909 // free bit map and at least a bit in the alloc bit map changed.
910 return changed;
911}
912
913inline void RosAlloc::Run::MergeBulkFreeBitMapIntoAllocBitMap() {
914 DCHECK_EQ(is_thread_local_, 0);
915 // Free slots in the alloc bit map based on the bulk free bit map.
916 byte idx = size_bracket_idx_;
917 size_t num_slots = numOfSlots[idx];
918 size_t num_vec = RoundUp(num_slots, 32) / 32;
919 uint32_t* vecp = &alloc_bit_map_[0];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800920 uint32_t* free_vecp = &BulkFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700921 for (size_t v = 0; v < num_vec; v++, vecp++, free_vecp++) {
922 uint32_t free_vec = *free_vecp;
923 if (free_vec != 0) {
924 *vecp &= ~free_vec;
925 *free_vecp = 0; // clear the bulk free bit map.
926 }
927 DCHECK_EQ(*free_vecp, static_cast<uint32_t>(0));
928 }
929}
930
931inline void RosAlloc::Run::UnionBulkFreeBitMapToThreadLocalFreeBitMap() {
932 DCHECK_NE(is_thread_local_, 0);
933 // Union the thread local bit map with the bulk free bit map.
934 byte idx = size_bracket_idx_;
935 size_t num_slots = numOfSlots[idx];
936 size_t num_vec = RoundUp(num_slots, 32) / 32;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800937 uint32_t* to_vecp = &ThreadLocalFreeBitMap()[0];
938 uint32_t* from_vecp = &BulkFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700939 for (size_t v = 0; v < num_vec; v++, to_vecp++, from_vecp++) {
940 uint32_t from_vec = *from_vecp;
941 if (from_vec != 0) {
942 *to_vecp |= from_vec;
Hiroshi Yamauchi70f60042014-02-03 12:31:29 -0800943 *from_vecp = 0; // clear the bulk free bit map.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700944 }
945 DCHECK_EQ(*from_vecp, static_cast<uint32_t>(0));
946 }
947}
948
949inline void RosAlloc::Run::MarkThreadLocalFreeBitMap(void* ptr) {
950 DCHECK_NE(is_thread_local_, 0);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800951 MarkFreeBitMapShared(ptr, ThreadLocalFreeBitMap(), "MarkThreadLocalFreeBitMap");
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700952}
953
954inline void RosAlloc::Run::MarkBulkFreeBitMap(void* ptr) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800955 MarkFreeBitMapShared(ptr, BulkFreeBitMap(), "MarkFreeBitMap");
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700956}
957
958inline void RosAlloc::Run::MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base,
959 const char* caller_name) {
960 byte idx = size_bracket_idx_;
961 size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
962 - (reinterpret_cast<byte*>(this) + headerSizes[idx]);
963 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
964 size_t slot_idx = offset_from_slot_base / bracketSizes[idx];
Ian Rogers5d057052014-03-12 14:32:27 -0700965 DCHECK_LT(slot_idx, numOfSlots[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700966 size_t vec_idx = slot_idx / 32;
967 if (kIsDebugBuild) {
968 size_t num_vec = RoundUp(numOfSlots[idx], 32) / 32;
Ian Rogers5d057052014-03-12 14:32:27 -0700969 DCHECK_LT(vec_idx, num_vec);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700970 }
971 size_t vec_off = slot_idx % 32;
972 uint32_t* vec = &free_bit_map_base[vec_idx];
973 DCHECK_EQ((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
974 *vec |= 1 << vec_off;
975 DCHECK_NE((*vec & (1 << vec_off)), static_cast<uint32_t>(0));
976 if (kTraceRosAlloc) {
977 LOG(INFO) << "RosAlloc::Run::" << caller_name << "() : 0x" << std::hex
978 << reinterpret_cast<intptr_t>(ptr)
979 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
980 }
981}
982
983inline bool RosAlloc::Run::IsAllFree() {
984 byte idx = size_bracket_idx_;
985 size_t num_slots = numOfSlots[idx];
986 size_t num_vec = RoundUp(num_slots, 32) / 32;
987 for (size_t v = 0; v < num_vec; v++) {
988 uint32_t vec = alloc_bit_map_[v];
989 if (vec != 0) {
990 return false;
991 }
992 }
993 return true;
994}
995
996inline bool RosAlloc::Run::IsFull() {
997 byte idx = size_bracket_idx_;
998 size_t num_slots = numOfSlots[idx];
999 size_t num_vec = RoundUp(num_slots, 32) / 32;
1000 size_t slots = 0;
1001 for (size_t v = 0; v < num_vec; v++, slots += 32) {
Ian Rogers5d057052014-03-12 14:32:27 -07001002 DCHECK_GE(num_slots, slots);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001003 uint32_t vec = alloc_bit_map_[v];
1004 uint32_t mask = (num_slots - slots >= 32) ? static_cast<uint32_t>(-1)
1005 : (1 << (num_slots - slots)) - 1;
Ian Rogers5d057052014-03-12 14:32:27 -07001006 if ((num_slots - slots) >= 32) {
1007 DCHECK_EQ(mask, static_cast<uint32_t>(-1));
1008 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001009 if (vec != mask) {
1010 return false;
1011 }
1012 }
1013 return true;
1014}
1015
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001016inline bool RosAlloc::Run::IsBulkFreeBitmapClean() {
1017 byte idx = size_bracket_idx_;
1018 size_t num_slots = numOfSlots[idx];
1019 size_t num_vec = RoundUp(num_slots, 32) / 32;
1020 for (size_t v = 0; v < num_vec; v++) {
1021 uint32_t vec = BulkFreeBitMap()[v];
1022 if (vec != 0) {
1023 return false;
1024 }
1025 }
1026 return true;
1027}
1028
1029inline bool RosAlloc::Run::IsThreadLocalFreeBitmapClean() {
1030 byte idx = size_bracket_idx_;
1031 size_t num_slots = numOfSlots[idx];
1032 size_t num_vec = RoundUp(num_slots, 32) / 32;
1033 for (size_t v = 0; v < num_vec; v++) {
1034 uint32_t vec = ThreadLocalFreeBitMap()[v];
1035 if (vec != 0) {
1036 return false;
1037 }
1038 }
1039 return true;
1040}
1041
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001042inline void RosAlloc::Run::ClearBitMaps() {
1043 byte idx = size_bracket_idx_;
1044 size_t num_slots = numOfSlots[idx];
1045 size_t num_vec = RoundUp(num_slots, 32) / 32;
1046 memset(alloc_bit_map_, 0, sizeof(uint32_t) * num_vec * 3);
1047}
1048
1049void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1050 void* arg) {
1051 size_t idx = size_bracket_idx_;
1052 byte* slot_base = reinterpret_cast<byte*>(this) + headerSizes[idx];
1053 size_t num_slots = numOfSlots[idx];
1054 size_t bracket_size = IndexToBracketSize(idx);
1055 DCHECK_EQ(slot_base + num_slots * bracket_size, reinterpret_cast<byte*>(this) + numOfPages[idx] * kPageSize);
1056 size_t num_vec = RoundUp(num_slots, 32) / 32;
1057 size_t slots = 0;
1058 for (size_t v = 0; v < num_vec; v++, slots += 32) {
Ian Rogers5d057052014-03-12 14:32:27 -07001059 DCHECK_GE(num_slots, slots);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001060 uint32_t vec = alloc_bit_map_[v];
1061 size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
1062 for (size_t i = 0; i < end; ++i) {
1063 bool is_allocated = ((vec >> i) & 0x1) != 0;
1064 byte* slot_addr = slot_base + (slots + i) * bracket_size;
1065 if (is_allocated) {
1066 handler(slot_addr, slot_addr + bracket_size, bracket_size, arg);
1067 } else {
1068 handler(slot_addr, slot_addr + bracket_size, 0, arg);
1069 }
1070 }
1071 }
1072}
1073
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001074// If true, read the page map entries in BulkFree() without using the
1075// lock for better performance, assuming that the existence of an
1076// allocated chunk/pointer being freed in BulkFree() guarantees that
1077// the page map entry won't change. Disabled for now.
1078static constexpr bool kReadPageMapEntryWithoutLockInBulkFree = false;
1079
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001080void RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
1081 if (false) {
1082 // Used only to test Free() as GC uses only BulkFree().
1083 for (size_t i = 0; i < num_ptrs; ++i) {
1084 FreeInternal(self, ptrs[i]);
1085 }
1086 return;
1087 }
1088
1089 WriterMutexLock wmu(self, bulk_free_lock_);
1090
1091 // First mark slots to free in the bulk free bit map without locking the
1092 // size bracket locks. On host, hash_set is faster than vector + flag.
1093#ifdef HAVE_ANDROID_OS
1094 std::vector<Run*> runs;
1095#else
1096 hash_set<Run*, hash_run, eq_run> runs;
1097#endif
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001098 for (size_t i = 0; i < num_ptrs; i++) {
1099 void* ptr = ptrs[i];
1100 ptrs[i] = NULL;
Ian Rogers5d057052014-03-12 14:32:27 -07001101 DCHECK_LE(base_, ptr);
1102 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001103 size_t pm_idx = RoundDownToPageMapIndex(ptr);
1104 Run* run = NULL;
1105 if (kReadPageMapEntryWithoutLockInBulkFree) {
1106 // Read the page map entries without locking the lock.
1107 byte page_map_entry = page_map_[pm_idx];
1108 if (kTraceRosAlloc) {
1109 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1110 << std::dec << pm_idx
1111 << ", page_map_entry=" << static_cast<int>(page_map_entry);
1112 }
1113 if (LIKELY(page_map_entry == kPageMapRun)) {
1114 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001115 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001116 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1117 size_t pi = pm_idx;
1118 DCHECK(page_map_[pi] == kPageMapRun || page_map_[pi] == kPageMapRunPart);
1119 // Find the beginning of the run.
1120 while (page_map_[pi] != kPageMapRun) {
1121 pi--;
Ian Rogers5d057052014-03-12 14:32:27 -07001122 DCHECK_LT(pi, capacity_ / kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001123 }
Ian Rogers5d057052014-03-12 14:32:27 -07001124 DCHECK_EQ(page_map_[pi], kPageMapRun);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001125 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001126 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001127 } else if (page_map_entry == kPageMapLargeObject) {
1128 MutexLock mu(self, lock_);
1129 FreePages(self, ptr);
1130 continue;
1131 } else {
1132 LOG(FATAL) << "Unreachable - page map type: " << page_map_entry;
1133 }
1134 DCHECK(run != NULL);
1135 // Set the bit in the bulk free bit map.
1136 run->MarkBulkFreeBitMap(ptr);
1137#ifdef HAVE_ANDROID_OS
1138 if (!run->to_be_bulk_freed_) {
1139 run->to_be_bulk_freed_ = true;
1140 runs.push_back(run);
1141 }
1142#else
1143 runs.insert(run);
1144#endif
1145 } else {
1146 // Read the page map entries with a lock.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001147 bool free_from_run = false;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001148 {
1149 MutexLock mu(self, lock_);
Ian Rogers5d057052014-03-12 14:32:27 -07001150 DCHECK_LT(pm_idx, page_map_size_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001151 byte page_map_entry = page_map_[pm_idx];
1152 if (kTraceRosAlloc) {
1153 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1154 << std::dec << pm_idx
1155 << ", page_map_entry=" << static_cast<int>(page_map_entry);
1156 }
1157 if (LIKELY(page_map_entry == kPageMapRun)) {
1158 free_from_run = true;
1159 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001160 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001161 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1162 free_from_run = true;
1163 size_t pi = pm_idx;
1164 DCHECK(page_map_[pi] == kPageMapRun || page_map_[pi] == kPageMapRunPart);
1165 // Find the beginning of the run.
1166 while (page_map_[pi] != kPageMapRun) {
1167 pi--;
Ian Rogers5d057052014-03-12 14:32:27 -07001168 DCHECK_LT(pi, capacity_ / kPageSize);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001169 }
Ian Rogers5d057052014-03-12 14:32:27 -07001170 DCHECK_EQ(page_map_[pi], kPageMapRun);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001171 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001172 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001173 } else if (page_map_entry == kPageMapLargeObject) {
1174 FreePages(self, ptr);
1175 } else {
1176 LOG(FATAL) << "Unreachable - page map type: " << page_map_entry;
1177 }
1178 }
1179 if (LIKELY(free_from_run)) {
1180 DCHECK(run != NULL);
1181 // Set the bit in the bulk free bit map.
1182 run->MarkBulkFreeBitMap(ptr);
1183#ifdef HAVE_ANDROID_OS
1184 if (!run->to_be_bulk_freed_) {
1185 run->to_be_bulk_freed_ = true;
1186 runs.push_back(run);
1187 }
1188#else
1189 runs.insert(run);
1190#endif
1191 }
1192 }
1193 }
1194
1195 // Now, iterate over the affected runs and update the alloc bit map
1196 // based on the bulk free bit map (for non-thread-local runs) and
1197 // union the bulk free bit map into the thread-local free bit map
1198 // (for thread-local runs.)
1199#ifdef HAVE_ANDROID_OS
1200 typedef std::vector<Run*>::iterator It;
1201#else
1202 typedef hash_set<Run*, hash_run, eq_run>::iterator It;
1203#endif
1204 for (It it = runs.begin(); it != runs.end(); ++it) {
1205 Run* run = *it;
1206#ifdef HAVE_ANDROID_OS
1207 DCHECK(run->to_be_bulk_freed_);
1208 run->to_be_bulk_freed_ = false;
1209#endif
1210 size_t idx = run->size_bracket_idx_;
1211 MutexLock mu(self, *size_bracket_locks_[idx]);
1212 if (run->is_thread_local_ != 0) {
1213 DCHECK_LE(run->size_bracket_idx_, kMaxThreadLocalSizeBracketIdx);
1214 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
1215 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
1216 run->UnionBulkFreeBitMapToThreadLocalFreeBitMap();
1217 if (kTraceRosAlloc) {
1218 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a thread local run 0x"
1219 << std::hex << reinterpret_cast<intptr_t>(run);
1220 }
1221 DCHECK_NE(run->is_thread_local_, 0);
1222 // A thread local run will be kept as a thread local even if
1223 // it's become all free.
1224 } else {
1225 bool run_was_full = run->IsFull();
1226 run->MergeBulkFreeBitMapIntoAllocBitMap();
1227 if (kTraceRosAlloc) {
1228 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a run 0x" << std::hex
1229 << reinterpret_cast<intptr_t>(run);
1230 }
1231 // Check if the run should be moved to non_full_runs_ or
1232 // free_page_runs_.
1233 std::set<Run*>* non_full_runs = &non_full_runs_[idx];
1234 hash_set<Run*, hash_run, eq_run>* full_runs =
1235 kIsDebugBuild ? &full_runs_[idx] : NULL;
1236 if (run->IsAllFree()) {
1237 // It has just become completely free. Free the pages of the
1238 // run.
1239 bool run_was_current = run == current_runs_[idx];
1240 if (run_was_current) {
1241 DCHECK(full_runs->find(run) == full_runs->end());
1242 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1243 // If it was a current run, reuse it.
1244 } else if (run_was_full) {
1245 // If it was full, remove it from the full run set (debug
1246 // only.)
1247 if (kIsDebugBuild) {
1248 hash_set<Run*, hash_run, eq_run>::iterator pos = full_runs->find(run);
1249 DCHECK(pos != full_runs->end());
1250 full_runs->erase(pos);
1251 if (kTraceRosAlloc) {
1252 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1253 << reinterpret_cast<intptr_t>(run)
1254 << " from full_runs_";
1255 }
1256 DCHECK(full_runs->find(run) == full_runs->end());
1257 }
1258 } else {
1259 // If it was in a non full run set, remove it from the set.
1260 DCHECK(full_runs->find(run) == full_runs->end());
1261 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1262 non_full_runs->erase(run);
1263 if (kTraceRosAlloc) {
1264 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1265 << reinterpret_cast<intptr_t>(run)
1266 << " from non_full_runs_";
1267 }
1268 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1269 }
1270 if (!run_was_current) {
1271 MutexLock mu(self, lock_);
1272 FreePages(self, run);
1273 }
1274 } else {
1275 // It is not completely free. If it wasn't the current run or
1276 // already in the non-full run set (i.e., it was full) insert
1277 // it into the non-full run set.
1278 if (run == current_runs_[idx]) {
1279 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1280 DCHECK(full_runs->find(run) == full_runs->end());
1281 // If it was a current run, keep it.
1282 } else if (run_was_full) {
1283 // If it was full, remove it from the full run set (debug
1284 // only) and insert into the non-full run set.
1285 DCHECK(full_runs->find(run) != full_runs->end());
1286 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1287 if (kIsDebugBuild) {
1288 full_runs->erase(run);
1289 if (kTraceRosAlloc) {
1290 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1291 << reinterpret_cast<intptr_t>(run)
1292 << " from full_runs_";
1293 }
1294 }
1295 non_full_runs->insert(run);
1296 if (kTraceRosAlloc) {
1297 LOG(INFO) << "RosAlloc::BulkFree() : Inserted run 0x" << std::hex
1298 << reinterpret_cast<intptr_t>(run)
1299 << " into non_full_runs_[" << std::dec << idx;
1300 }
1301 } else {
1302 // If it was not full, so leave it in the non full run set.
1303 DCHECK(full_runs->find(run) == full_runs->end());
1304 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1305 }
1306 }
1307 }
1308 }
1309}
1310
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001311std::string RosAlloc::DumpPageMap() {
1312 std::ostringstream stream;
1313 stream << "RosAlloc PageMap: " << std::endl;
1314 lock_.AssertHeld(Thread::Current());
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001315 size_t end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001316 FreePageRun* curr_fpr = NULL;
1317 size_t curr_fpr_size = 0;
1318 size_t remaining_curr_fpr_size = 0;
1319 size_t num_running_empty_pages = 0;
1320 for (size_t i = 0; i < end; ++i) {
1321 byte pm = page_map_[i];
1322 switch (pm) {
1323 case kPageMapEmpty: {
1324 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1325 if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
1326 // Encountered a fresh free page run.
1327 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1328 DCHECK(fpr->IsFree());
1329 DCHECK(curr_fpr == NULL);
1330 DCHECK_EQ(curr_fpr_size, static_cast<size_t>(0));
1331 curr_fpr = fpr;
1332 curr_fpr_size = fpr->ByteSize(this);
1333 DCHECK_EQ(curr_fpr_size % kPageSize, static_cast<size_t>(0));
1334 remaining_curr_fpr_size = curr_fpr_size - kPageSize;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001335 stream << "[" << i << "]=Empty (FPR start)"
1336 << " fpr_size=" << curr_fpr_size
1337 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001338 if (remaining_curr_fpr_size == 0) {
1339 // Reset at the end of the current free page run.
1340 curr_fpr = NULL;
1341 curr_fpr_size = 0;
1342 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001343 stream << "curr_fpr=0x" << std::hex << reinterpret_cast<intptr_t>(curr_fpr) << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001344 DCHECK_EQ(num_running_empty_pages, static_cast<size_t>(0));
1345 } else {
1346 // Still part of the current free page run.
1347 DCHECK_NE(num_running_empty_pages, static_cast<size_t>(0));
1348 DCHECK(curr_fpr != NULL && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
1349 DCHECK_EQ(remaining_curr_fpr_size % kPageSize, static_cast<size_t>(0));
1350 DCHECK_GE(remaining_curr_fpr_size, static_cast<size_t>(kPageSize));
1351 remaining_curr_fpr_size -= kPageSize;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001352 stream << "[" << i << "]=Empty (FPR part)"
1353 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001354 if (remaining_curr_fpr_size == 0) {
1355 // Reset at the end of the current free page run.
1356 curr_fpr = NULL;
1357 curr_fpr_size = 0;
1358 }
1359 }
1360 num_running_empty_pages++;
1361 break;
1362 }
1363 case kPageMapLargeObject: {
1364 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1365 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001366 stream << "[" << i << "]=Large (start)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001367 break;
1368 }
1369 case kPageMapLargeObjectPart:
1370 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1371 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001372 stream << "[" << i << "]=Large (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001373 break;
1374 case kPageMapRun: {
1375 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1376 num_running_empty_pages = 0;
1377 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1378 size_t idx = run->size_bracket_idx_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001379 stream << "[" << i << "]=Run (start)"
1380 << " idx=" << idx
1381 << " numOfPages=" << numOfPages[idx]
1382 << " thread_local=" << static_cast<int>(run->is_thread_local_)
1383 << " is_all_free=" << (run->IsAllFree() ? 1 : 0)
1384 << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001385 break;
1386 }
1387 case kPageMapRunPart:
1388 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1389 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001390 stream << "[" << i << "]=Run (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001391 break;
1392 default:
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001393 stream << "[" << i << "]=Unrecognizable page map type: " << pm;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001394 break;
1395 }
1396 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001397 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001398}
1399
1400size_t RosAlloc::UsableSize(void* ptr) {
Ian Rogers5d057052014-03-12 14:32:27 -07001401 DCHECK_LE(base_, ptr);
1402 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001403 size_t pm_idx = RoundDownToPageMapIndex(ptr);
1404 MutexLock mu(Thread::Current(), lock_);
1405 switch (page_map_[pm_idx]) {
1406 case kPageMapEmpty:
1407 LOG(FATAL) << "Unreachable - RosAlloc::UsableSize(): pm_idx=" << pm_idx << ", ptr=" << std::hex
1408 << reinterpret_cast<intptr_t>(ptr);
1409 break;
1410 case kPageMapLargeObject: {
1411 size_t num_pages = 1;
1412 size_t idx = pm_idx + 1;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001413 size_t end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001414 while (idx < end && page_map_[idx] == kPageMapLargeObjectPart) {
1415 num_pages++;
1416 idx++;
1417 }
1418 return num_pages * kPageSize;
1419 }
1420 case kPageMapLargeObjectPart:
1421 LOG(FATAL) << "Unreachable - RosAlloc::UsableSize(): pm_idx=" << pm_idx << ", ptr=" << std::hex
1422 << reinterpret_cast<intptr_t>(ptr);
1423 break;
1424 case kPageMapRun:
1425 case kPageMapRunPart: {
1426 // Find the beginning of the run.
1427 while (page_map_[pm_idx] != kPageMapRun) {
1428 pm_idx--;
Ian Rogers5d057052014-03-12 14:32:27 -07001429 DCHECK_LT(pm_idx, capacity_ / kPageSize);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001430 }
Ian Rogers5d057052014-03-12 14:32:27 -07001431 DCHECK_EQ(page_map_[pm_idx], kPageMapRun);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001432 Run* run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001433 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001434 size_t idx = run->size_bracket_idx_;
1435 size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
1436 - (reinterpret_cast<byte*>(run) + headerSizes[idx]);
1437 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
1438 return IndexToBracketSize(idx);
1439 }
1440 default:
1441 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
1442 break;
1443 }
1444 return 0;
1445}
1446
1447bool RosAlloc::Trim() {
1448 MutexLock mu(Thread::Current(), lock_);
1449 FreePageRun* last_free_page_run;
1450 DCHECK_EQ(footprint_ % kPageSize, static_cast<size_t>(0));
1451 auto it = free_page_runs_.rbegin();
1452 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
1453 // Remove the last free page run, if any.
1454 DCHECK(last_free_page_run->IsFree());
Ian Rogers5d057052014-03-12 14:32:27 -07001455 DCHECK_EQ(page_map_[ToPageMapIndex(last_free_page_run)], kPageMapEmpty);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001456 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
1457 DCHECK_EQ(last_free_page_run->End(this), base_ + footprint_);
1458 free_page_runs_.erase(last_free_page_run);
1459 size_t decrement = last_free_page_run->ByteSize(this);
1460 size_t new_footprint = footprint_ - decrement;
1461 DCHECK_EQ(new_footprint % kPageSize, static_cast<size_t>(0));
1462 size_t new_num_of_pages = new_footprint / kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001463 DCHECK_GE(page_map_size_, new_num_of_pages);
1464 // Zero out the tail of the page map.
1465 byte* zero_begin = page_map_ + new_num_of_pages;
1466 byte* madvise_begin = AlignUp(zero_begin, kPageSize);
1467 DCHECK_LE(madvise_begin, page_map_mem_map_->End());
1468 size_t madvise_size = page_map_mem_map_->End() - madvise_begin;
1469 if (madvise_size > 0) {
1470 DCHECK_ALIGNED(madvise_begin, kPageSize);
1471 DCHECK_EQ(RoundUp(madvise_size, kPageSize), madvise_size);
1472 CHECK_EQ(madvise(madvise_begin, madvise_size, MADV_DONTNEED), 0);
1473 }
1474 if (madvise_begin - zero_begin) {
1475 memset(zero_begin, 0, madvise_begin - zero_begin);
1476 }
1477 page_map_size_ = new_num_of_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001478 free_page_run_size_map_.resize(new_num_of_pages);
1479 DCHECK_EQ(free_page_run_size_map_.size(), new_num_of_pages);
1480 art_heap_rosalloc_morecore(this, -(static_cast<intptr_t>(decrement)));
1481 if (kTraceRosAlloc) {
1482 LOG(INFO) << "RosAlloc::Trim() : decreased the footprint from "
1483 << footprint_ << " to " << new_footprint;
1484 }
1485 DCHECK_LT(new_footprint, footprint_);
1486 DCHECK_LT(new_footprint, capacity_);
1487 footprint_ = new_footprint;
1488 return true;
1489 }
1490 return false;
1491}
1492
1493void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1494 void* arg) {
1495 // Note: no need to use this to release pages as we already do so in FreePages().
1496 if (handler == NULL) {
1497 return;
1498 }
1499 MutexLock mu(Thread::Current(), lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001500 size_t pm_end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001501 size_t i = 0;
1502 while (i < pm_end) {
1503 byte pm = page_map_[i];
1504 switch (pm) {
1505 case kPageMapEmpty: {
1506 // The start of a free page run.
1507 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1508 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
1509 size_t fpr_size = fpr->ByteSize(this);
1510 DCHECK(IsAligned<kPageSize>(fpr_size));
1511 void* start = fpr;
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -08001512 if (kIsDebugBuild) {
1513 // In the debug build, the first page of a free page run
1514 // contains a magic number for debugging. Exclude it.
1515 start = reinterpret_cast<byte*>(fpr) + kPageSize;
1516 }
1517 void* end = reinterpret_cast<byte*>(fpr) + fpr_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001518 handler(start, end, 0, arg);
1519 size_t num_pages = fpr_size / kPageSize;
1520 if (kIsDebugBuild) {
1521 for (size_t j = i + 1; j < i + num_pages; ++j) {
1522 DCHECK_EQ(page_map_[j], kPageMapEmpty);
1523 }
1524 }
1525 i += fpr_size / kPageSize;
1526 DCHECK_LE(i, pm_end);
1527 break;
1528 }
1529 case kPageMapLargeObject: {
1530 // The start of a large object.
1531 size_t num_pages = 1;
1532 size_t idx = i + 1;
1533 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1534 num_pages++;
1535 idx++;
1536 }
1537 void* start = base_ + i * kPageSize;
1538 void* end = base_ + (i + num_pages) * kPageSize;
1539 size_t used_bytes = num_pages * kPageSize;
1540 handler(start, end, used_bytes, arg);
1541 if (kIsDebugBuild) {
1542 for (size_t j = i + 1; j < i + num_pages; ++j) {
1543 DCHECK_EQ(page_map_[j], kPageMapLargeObjectPart);
1544 }
1545 }
1546 i += num_pages;
1547 DCHECK_LE(i, pm_end);
1548 break;
1549 }
1550 case kPageMapLargeObjectPart:
1551 LOG(FATAL) << "Unreachable - page map type: " << pm;
1552 break;
1553 case kPageMapRun: {
1554 // The start of a run.
1555 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001556 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001557 run->InspectAllSlots(handler, arg);
1558 size_t num_pages = numOfPages[run->size_bracket_idx_];
1559 if (kIsDebugBuild) {
1560 for (size_t j = i + 1; j < i + num_pages; ++j) {
1561 DCHECK_EQ(page_map_[j], kPageMapRunPart);
1562 }
1563 }
1564 i += num_pages;
1565 DCHECK_LE(i, pm_end);
1566 break;
1567 }
1568 case kPageMapRunPart:
1569 LOG(FATAL) << "Unreachable - page map type: " << pm;
1570 break;
1571 default:
1572 LOG(FATAL) << "Unreachable - page map type: " << pm;
1573 break;
1574 }
1575 }
1576}
1577
1578size_t RosAlloc::Footprint() {
1579 MutexLock mu(Thread::Current(), lock_);
1580 return footprint_;
1581}
1582
1583size_t RosAlloc::FootprintLimit() {
1584 MutexLock mu(Thread::Current(), lock_);
1585 return capacity_;
1586}
1587
1588void RosAlloc::SetFootprintLimit(size_t new_capacity) {
1589 MutexLock mu(Thread::Current(), lock_);
1590 DCHECK_EQ(RoundUp(new_capacity, kPageSize), new_capacity);
1591 // Only growing is supported here. But Trim() is supported.
1592 if (capacity_ < new_capacity) {
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001593 CHECK_LE(new_capacity, max_capacity_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001594 capacity_ = new_capacity;
1595 VLOG(heap) << "new capacity=" << capacity_;
1596 }
1597}
1598
1599void RosAlloc::RevokeThreadLocalRuns(Thread* thread) {
1600 Thread* self = Thread::Current();
Hiroshi Yamauchi70f60042014-02-03 12:31:29 -08001601 // Avoid race conditions on the bulk free bit maps with BulkFree() (GC).
1602 WriterMutexLock wmu(self, bulk_free_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001603 for (size_t idx = 0; idx < kNumOfSizeBrackets; idx++) {
1604 MutexLock mu(self, *size_bracket_locks_[idx]);
1605 Run* thread_local_run = reinterpret_cast<Run*>(thread->rosalloc_runs_[idx]);
1606 if (thread_local_run != NULL) {
1607 DCHECK_EQ(thread_local_run->magic_num_, kMagicNum);
1608 DCHECK_NE(thread_local_run->is_thread_local_, 0);
1609 thread->rosalloc_runs_[idx] = NULL;
1610 // Note the thread local run may not be full here.
1611 bool dont_care;
1612 thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&dont_care);
1613 thread_local_run->is_thread_local_ = 0;
1614 thread_local_run->MergeBulkFreeBitMapIntoAllocBitMap();
1615 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
1616 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
1617 if (thread_local_run->IsFull()) {
1618 if (kIsDebugBuild) {
1619 full_runs_[idx].insert(thread_local_run);
1620 DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
1621 if (kTraceRosAlloc) {
1622 LOG(INFO) << "RosAlloc::RevokeThreadLocalRuns() : Inserted run 0x" << std::hex
1623 << reinterpret_cast<intptr_t>(thread_local_run)
1624 << " into full_runs_[" << std::dec << idx << "]";
1625 }
1626 }
1627 } else if (thread_local_run->IsAllFree()) {
1628 MutexLock mu(self, lock_);
1629 FreePages(self, thread_local_run);
1630 } else {
1631 non_full_runs_[idx].insert(thread_local_run);
1632 DCHECK(non_full_runs_[idx].find(thread_local_run) != non_full_runs_[idx].end());
1633 if (kTraceRosAlloc) {
1634 LOG(INFO) << "RosAlloc::RevokeThreadLocalRuns() : Inserted run 0x" << std::hex
1635 << reinterpret_cast<intptr_t>(thread_local_run)
1636 << " into non_full_runs_[" << std::dec << idx << "]";
1637 }
1638 }
1639 }
1640 }
1641}
1642
1643void RosAlloc::RevokeAllThreadLocalRuns() {
1644 // This is called when a mutator thread won't allocate such as at
1645 // the Zygote creation time or during the GC pause.
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001646 MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
1647 MutexLock mu2(Thread::Current(), *Locks::thread_list_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001648 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1649 for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
1650 Thread* t = *it;
1651 RevokeThreadLocalRuns(t);
1652 }
1653}
1654
1655void RosAlloc::Initialize() {
1656 // Check the consistency of the number of size brackets.
1657 DCHECK_EQ(Thread::kRosAllocNumOfSizeBrackets, kNumOfSizeBrackets);
1658 // bracketSizes.
1659 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1660 if (i < kNumOfSizeBrackets - 2) {
1661 bracketSizes[i] = 16 * (i + 1);
1662 } else if (i == kNumOfSizeBrackets - 2) {
1663 bracketSizes[i] = 1 * KB;
1664 } else {
Ian Rogers5d057052014-03-12 14:32:27 -07001665 DCHECK_EQ(i, kNumOfSizeBrackets - 1);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001666 bracketSizes[i] = 2 * KB;
1667 }
1668 if (kTraceRosAlloc) {
1669 LOG(INFO) << "bracketSizes[" << i << "]=" << bracketSizes[i];
1670 }
1671 }
1672 // numOfPages.
1673 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1674 if (i < 4) {
1675 numOfPages[i] = 1;
1676 } else if (i < 8) {
1677 numOfPages[i] = 2;
1678 } else if (i < 16) {
1679 numOfPages[i] = 4;
1680 } else if (i < 32) {
1681 numOfPages[i] = 8;
1682 } else if (i == 32) {
Ian Rogers5d057052014-03-12 14:32:27 -07001683 DCHECK_EQ(i, kNumOfSizeBrackets - 2);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001684 numOfPages[i] = 16;
1685 } else {
Ian Rogers5d057052014-03-12 14:32:27 -07001686 DCHECK_EQ(i, kNumOfSizeBrackets - 1);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001687 numOfPages[i] = 32;
1688 }
1689 if (kTraceRosAlloc) {
1690 LOG(INFO) << "numOfPages[" << i << "]=" << numOfPages[i];
1691 }
1692 }
1693 // Compute numOfSlots and slotOffsets.
1694 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1695 size_t bracket_size = bracketSizes[i];
1696 size_t run_size = kPageSize * numOfPages[i];
1697 size_t max_num_of_slots = run_size / bracket_size;
1698 // Compute the actual number of slots by taking the header and
1699 // alignment into account.
1700 size_t fixed_header_size = RoundUp(Run::fixed_header_size(), sizeof(uint32_t));
1701 DCHECK_EQ(fixed_header_size, static_cast<size_t>(8));
1702 size_t header_size = 0;
1703 size_t bulk_free_bit_map_offset = 0;
1704 size_t thread_local_free_bit_map_offset = 0;
1705 size_t num_of_slots = 0;
1706 // Search for the maximum number of slots that allows enough space
1707 // for the header (including the bit maps.)
1708 for (int s = max_num_of_slots; s >= 0; s--) {
1709 size_t tmp_slots_size = bracket_size * s;
1710 size_t tmp_bit_map_size = RoundUp(s, sizeof(uint32_t) * kBitsPerByte) / kBitsPerByte;
1711 size_t tmp_bulk_free_bit_map_size = tmp_bit_map_size;
1712 size_t tmp_bulk_free_bit_map_off = fixed_header_size + tmp_bit_map_size;
1713 size_t tmp_thread_local_free_bit_map_size = tmp_bit_map_size;
1714 size_t tmp_thread_local_free_bit_map_off = tmp_bulk_free_bit_map_off + tmp_bulk_free_bit_map_size;
1715 size_t tmp_unaligned_header_size = tmp_thread_local_free_bit_map_off + tmp_thread_local_free_bit_map_size;
1716 // Align up the unaligned header size. bracket_size may not be a power of two.
1717 size_t tmp_header_size = (tmp_unaligned_header_size % bracket_size == 0) ?
1718 tmp_unaligned_header_size :
1719 tmp_unaligned_header_size + (bracket_size - tmp_unaligned_header_size % bracket_size);
1720 DCHECK_EQ(tmp_header_size % bracket_size, static_cast<size_t>(0));
1721 DCHECK_EQ(tmp_header_size % 8, static_cast<size_t>(0));
1722 if (tmp_slots_size + tmp_header_size <= run_size) {
1723 // Found the right number of slots, that is, there was enough
1724 // space for the header (including the bit maps.)
1725 num_of_slots = s;
1726 header_size = tmp_header_size;
1727 bulk_free_bit_map_offset = tmp_bulk_free_bit_map_off;
1728 thread_local_free_bit_map_offset = tmp_thread_local_free_bit_map_off;
1729 break;
1730 }
1731 }
1732 DCHECK(num_of_slots > 0 && header_size > 0 && bulk_free_bit_map_offset > 0);
1733 // Add the padding for the alignment remainder.
1734 header_size += run_size % bracket_size;
Ian Rogers5d057052014-03-12 14:32:27 -07001735 DCHECK_EQ(header_size + num_of_slots * bracket_size, run_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001736 numOfSlots[i] = num_of_slots;
1737 headerSizes[i] = header_size;
1738 bulkFreeBitMapOffsets[i] = bulk_free_bit_map_offset;
1739 threadLocalFreeBitMapOffsets[i] = thread_local_free_bit_map_offset;
1740 if (kTraceRosAlloc) {
1741 LOG(INFO) << "numOfSlots[" << i << "]=" << numOfSlots[i]
1742 << ", headerSizes[" << i << "]=" << headerSizes[i]
1743 << ", bulkFreeBitMapOffsets[" << i << "]=" << bulkFreeBitMapOffsets[i]
1744 << ", threadLocalFreeBitMapOffsets[" << i << "]=" << threadLocalFreeBitMapOffsets[i];;
1745 }
1746 }
1747}
1748
1749void RosAlloc::BytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
1750 if (used_bytes == 0) {
1751 return;
1752 }
1753 size_t* bytes_allocated = reinterpret_cast<size_t*>(arg);
1754 *bytes_allocated += used_bytes;
1755}
1756
1757void RosAlloc::ObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
1758 if (used_bytes == 0) {
1759 return;
1760 }
1761 size_t* objects_allocated = reinterpret_cast<size_t*>(arg);
1762 ++(*objects_allocated);
1763}
1764
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001765void RosAlloc::Verify() {
1766 Thread* self = Thread::Current();
1767 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self))
1768 << "The mutator locks isn't exclusively locked at RosAlloc::Verify()";
1769 MutexLock mu(self, *Locks::thread_list_lock_);
1770 WriterMutexLock wmu(self, bulk_free_lock_);
1771 std::vector<Run*> runs;
1772 {
1773 MutexLock mu(self, lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001774 size_t pm_end = page_map_size_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001775 size_t i = 0;
1776 while (i < pm_end) {
1777 byte pm = page_map_[i];
1778 switch (pm) {
1779 case kPageMapEmpty: {
1780 // The start of a free page run.
1781 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001782 DCHECK_EQ(fpr->magic_num_, kMagicNumFree);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001783 CHECK(free_page_runs_.find(fpr) != free_page_runs_.end())
1784 << "An empty page must belong to the free page run set";
1785 size_t fpr_size = fpr->ByteSize(this);
1786 CHECK(IsAligned<kPageSize>(fpr_size))
1787 << "A free page run size isn't page-aligned : " << fpr_size;
1788 size_t num_pages = fpr_size / kPageSize;
1789 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1790 << "A free page run size must be > 0 : " << fpr_size;
1791 for (size_t j = i + 1; j < i + num_pages; ++j) {
1792 CHECK_EQ(page_map_[j], kPageMapEmpty)
1793 << "A mismatch between the page map table for kPageMapEmpty "
1794 << " at page index " << j
1795 << " and the free page run size : page index range : "
1796 << i << " to " << (i + num_pages) << std::endl << DumpPageMap();
1797 }
1798 i += num_pages;
1799 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1800 << std::endl << DumpPageMap();
1801 break;
1802 }
1803 case kPageMapLargeObject: {
1804 // The start of a large object.
1805 size_t num_pages = 1;
1806 size_t idx = i + 1;
1807 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1808 num_pages++;
1809 idx++;
1810 }
1811 void* start = base_ + i * kPageSize;
1812 mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
1813 size_t obj_size = obj->SizeOf();
Ian Rogers5d057052014-03-12 14:32:27 -07001814 CHECK_GT(obj_size, kLargeSizeThreshold)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001815 << "A rosalloc large object size must be > " << kLargeSizeThreshold;
1816 CHECK_EQ(num_pages, RoundUp(obj_size, kPageSize) / kPageSize)
1817 << "A rosalloc large object size " << obj_size
1818 << " does not match the page map table " << (num_pages * kPageSize)
1819 << std::endl << DumpPageMap();
1820 i += num_pages;
1821 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1822 << std::endl << DumpPageMap();
1823 break;
1824 }
1825 case kPageMapLargeObjectPart:
1826 LOG(FATAL) << "Unreachable - page map type: " << pm << std::endl << DumpPageMap();
1827 break;
1828 case kPageMapRun: {
1829 // The start of a run.
1830 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001831 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001832 size_t idx = run->size_bracket_idx_;
Ian Rogers5d057052014-03-12 14:32:27 -07001833 CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << idx;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001834 size_t num_pages = numOfPages[idx];
1835 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1836 << "Run size must be > 0 : " << num_pages;
1837 for (size_t j = i + 1; j < i + num_pages; ++j) {
1838 CHECK_EQ(page_map_[j], kPageMapRunPart)
1839 << "A mismatch between the page map table for kPageMapRunPart "
1840 << " at page index " << j
1841 << " and the run size : page index range " << i << " to " << (i + num_pages)
1842 << std::endl << DumpPageMap();
1843 }
1844 runs.push_back(run);
1845 i += num_pages;
1846 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1847 << std::endl << DumpPageMap();
1848 break;
1849 }
1850 case kPageMapRunPart:
1851 LOG(FATAL) << "Unreachable - page map type: " << pm << std::endl << DumpPageMap();
1852 break;
1853 default:
1854 LOG(FATAL) << "Unreachable - page map type: " << pm << std::endl << DumpPageMap();
1855 break;
1856 }
1857 }
1858 }
1859
1860 // Call Verify() here for the lock order.
1861 for (auto& run : runs) {
1862 run->Verify(self, this);
1863 }
1864}
1865
1866void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc) {
Ian Rogers5d057052014-03-12 14:32:27 -07001867 DCHECK_EQ(magic_num_, kMagicNum) << "Bad magic number : " << Dump();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001868 size_t idx = size_bracket_idx_;
Ian Rogers5d057052014-03-12 14:32:27 -07001869 CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001870 byte* slot_base = reinterpret_cast<byte*>(this) + headerSizes[idx];
1871 size_t num_slots = numOfSlots[idx];
1872 size_t bracket_size = IndexToBracketSize(idx);
1873 CHECK_EQ(slot_base + num_slots * bracket_size,
1874 reinterpret_cast<byte*>(this) + numOfPages[idx] * kPageSize)
1875 << "Mismatch in the end address of the run " << Dump();
1876 // Check that the bulk free bitmap is clean. It's only used during BulkFree().
1877 CHECK(IsBulkFreeBitmapClean()) << "The bulk free bit map isn't clean " << Dump();
1878 // Check the bump index mode, if it's on.
1879 if (top_slot_idx_ < num_slots) {
1880 // If the bump index mode is on (top_slot_idx_ < num_slots), then
1881 // all of the slots after the top index must be free.
1882 for (size_t i = top_slot_idx_; i < num_slots; ++i) {
1883 size_t vec_idx = i / 32;
1884 size_t vec_off = i % 32;
1885 uint32_t vec = alloc_bit_map_[vec_idx];
1886 CHECK_EQ((vec & (1 << vec_off)), static_cast<uint32_t>(0))
1887 << "A slot >= top_slot_idx_ isn't free " << Dump();
1888 }
1889 } else {
1890 CHECK_EQ(top_slot_idx_, num_slots)
1891 << "If the bump index mode is off, the top index == the number of slots "
1892 << Dump();
1893 }
1894 // Check the thread local runs, the current runs, and the run sets.
1895 if (is_thread_local_) {
1896 // If it's a thread local run, then it must be pointed to by an owner thread.
1897 bool owner_found = false;
1898 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1899 for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
1900 Thread* thread = *it;
1901 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1902 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
1903 Run* thread_local_run = reinterpret_cast<Run*>(thread->rosalloc_runs_[i]);
1904 if (thread_local_run == this) {
1905 CHECK(!owner_found)
1906 << "A thread local run has more than one owner thread " << Dump();
1907 CHECK_EQ(i, idx)
1908 << "A mismatching size bracket index in a thread local run " << Dump();
1909 owner_found = true;
1910 }
1911 }
1912 }
1913 CHECK(owner_found) << "A thread local run has no owner thread " << Dump();
1914 } else {
1915 // If it's not thread local, check that the thread local free bitmap is clean.
1916 CHECK(IsThreadLocalFreeBitmapClean())
1917 << "A non-thread-local run's thread local free bitmap isn't clean "
1918 << Dump();
1919 // Check if it's a current run for the size bucket.
1920 bool is_current_run = false;
1921 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1922 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
1923 Run* current_run = rosalloc->current_runs_[i];
1924 if (idx == i) {
1925 if (this == current_run) {
1926 is_current_run = true;
1927 }
1928 } else {
1929 // If the size bucket index does not match, then it must not
1930 // be a current run.
1931 CHECK_NE(this, current_run)
1932 << "A current run points to a run with a wrong size bracket index " << Dump();
1933 }
1934 }
1935 // If it's neither a thread local or current run, then it must be
1936 // in a run set.
1937 if (!is_current_run) {
1938 MutexLock mu(self, rosalloc->lock_);
1939 std::set<Run*>& non_full_runs = rosalloc->non_full_runs_[idx];
1940 // If it's all free, it must be a free page run rather than a run.
1941 CHECK(!IsAllFree()) << "A free run must be in a free page run set " << Dump();
1942 if (!IsFull()) {
1943 // If it's not full, it must in the non-full run set.
1944 CHECK(non_full_runs.find(this) != non_full_runs.end())
1945 << "A non-full run isn't in the non-full run set " << Dump();
1946 } else {
1947 // If it's full, it must in the full run set (debug build only.)
1948 if (kIsDebugBuild) {
1949 hash_set<Run*, hash_run, eq_run>& full_runs = rosalloc->full_runs_[idx];
1950 CHECK(full_runs.find(this) != full_runs.end())
1951 << " A full run isn't in the full run set " << Dump();
1952 }
1953 }
1954 }
1955 }
1956 // Check each slot.
1957 size_t num_vec = RoundUp(num_slots, 32) / 32;
1958 size_t slots = 0;
1959 for (size_t v = 0; v < num_vec; v++, slots += 32) {
Ian Rogers5d057052014-03-12 14:32:27 -07001960 DCHECK_GE(num_slots, slots) << "Out of bounds";
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001961 uint32_t vec = alloc_bit_map_[v];
1962 uint32_t thread_local_free_vec = ThreadLocalFreeBitMap()[v];
1963 size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
1964 for (size_t i = 0; i < end; ++i) {
1965 bool is_allocated = ((vec >> i) & 0x1) != 0;
1966 // If a thread local run, slots may be marked freed in the
1967 // thread local free bitmap.
1968 bool is_thread_local_freed = is_thread_local_ && ((thread_local_free_vec >> i) & 0x1) != 0;
1969 if (is_allocated && !is_thread_local_freed) {
1970 byte* slot_addr = slot_base + (slots + i) * bracket_size;
1971 mirror::Object* obj = reinterpret_cast<mirror::Object*>(slot_addr);
1972 size_t obj_size = obj->SizeOf();
1973 CHECK_LE(obj_size, kLargeSizeThreshold)
1974 << "A run slot contains a large object " << Dump();
1975 CHECK_EQ(SizeToIndex(obj_size), idx)
1976 << "A run slot contains an object with wrong size " << Dump();
1977 }
1978 }
1979 }
1980}
1981
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001982} // namespace allocator
1983} // namespace gc
1984} // namespace art