blob: ff59016423085d5d4c4f6fff4daec1b87b242589 [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "base/mutex-inl.h"
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -080018#include "mirror/class-inl.h"
19#include "mirror/object.h"
20#include "mirror/object-inl.h"
Brian Carlstrom218daa22013-11-25 14:51:44 -080021#include "thread-inl.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070022#include "thread_list.h"
23#include "rosalloc.h"
24
25#include <map>
26#include <list>
27#include <vector>
28
29namespace art {
30namespace gc {
31namespace allocator {
32
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070033extern "C" void* art_heap_rosalloc_morecore(RosAlloc* rosalloc, intptr_t increment);
34
Mathieu Chartier73d1e172014-04-11 17:53:48 -070035static constexpr bool kUsePrefetchDuringAllocRun = true;
36static constexpr bool kPrefetchNewRunDataByZeroing = false;
37static constexpr size_t kPrefetchStride = 64;
38
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070039size_t RosAlloc::bracketSizes[kNumOfSizeBrackets];
40size_t RosAlloc::numOfPages[kNumOfSizeBrackets];
41size_t RosAlloc::numOfSlots[kNumOfSizeBrackets];
42size_t RosAlloc::headerSizes[kNumOfSizeBrackets];
43size_t RosAlloc::bulkFreeBitMapOffsets[kNumOfSizeBrackets];
44size_t RosAlloc::threadLocalFreeBitMapOffsets[kNumOfSizeBrackets];
45bool RosAlloc::initialized_ = false;
Mathieu Chartier73d1e172014-04-11 17:53:48 -070046size_t RosAlloc::dedicated_full_run_storage_[kPageSize / sizeof(size_t)] = { 0 };
47RosAlloc::Run* RosAlloc::dedicated_full_run_ =
48 reinterpret_cast<RosAlloc::Run*>(dedicated_full_run_storage_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070049
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080050RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080051 PageReleaseMode page_release_mode, size_t page_release_size_threshold)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070052 : base_(reinterpret_cast<byte*>(base)), footprint_(capacity),
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080053 capacity_(capacity), max_capacity_(max_capacity),
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070054 lock_("rosalloc global lock", kRosAllocGlobalLock),
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080055 bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock),
56 page_release_mode_(page_release_mode),
57 page_release_size_threshold_(page_release_size_threshold) {
Ian Rogers5d057052014-03-12 14:32:27 -070058 DCHECK_EQ(RoundUp(capacity, kPageSize), capacity);
59 DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080060 CHECK_LE(capacity, max_capacity);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080061 CHECK(IsAligned<kPageSize>(page_release_size_threshold_));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070062 if (!initialized_) {
63 Initialize();
64 }
65 VLOG(heap) << "RosAlloc base="
66 << std::hex << (intptr_t)base_ << ", end="
67 << std::hex << (intptr_t)(base_ + capacity_)
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080068 << ", capacity=" << std::dec << capacity_
69 << ", max_capacity=" << std::dec << max_capacity_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070070 memset(current_runs_, 0, sizeof(current_runs_));
71 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -070072 size_bracket_lock_names[i] =
73 StringPrintf("an rosalloc size bracket %d lock", static_cast<int>(i));
74 size_bracket_locks_[i] = new Mutex(size_bracket_lock_names[i].c_str(), kRosAllocBracketLock);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070075 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080076 DCHECK_EQ(footprint_, capacity_);
77 size_t num_of_pages = footprint_ / kPageSize;
78 size_t max_num_of_pages = max_capacity_ / kPageSize;
79 std::string error_msg;
80 page_map_mem_map_.reset(MemMap::MapAnonymous("rosalloc page map", NULL, RoundUp(max_num_of_pages, kPageSize),
81 PROT_READ | PROT_WRITE, false, &error_msg));
Mathieu Chartier73d1e172014-04-11 17:53:48 -070082 CHECK(page_map_mem_map_.get() != nullptr) << "Couldn't allocate the page map : " << error_msg;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080083 page_map_ = page_map_mem_map_->Begin();
84 page_map_size_ = num_of_pages;
85 max_page_map_size_ = max_num_of_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070086 free_page_run_size_map_.resize(num_of_pages);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070087 FreePageRun* free_pages = reinterpret_cast<FreePageRun*>(base_);
88 if (kIsDebugBuild) {
89 free_pages->magic_num_ = kMagicNumFree;
90 }
91 free_pages->SetByteSize(this, capacity_);
92 DCHECK_EQ(capacity_ % kPageSize, static_cast<size_t>(0));
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080093 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070094 free_pages->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080095 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070096 free_page_runs_.insert(free_pages);
97 if (kTraceRosAlloc) {
98 LOG(INFO) << "RosAlloc::RosAlloc() : Inserted run 0x" << std::hex
99 << reinterpret_cast<intptr_t>(free_pages)
100 << " into free_page_runs_";
101 }
102}
103
Mathieu Chartier661974a2014-01-09 11:23:53 -0800104RosAlloc::~RosAlloc() {
105 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
106 delete size_bracket_locks_[i];
107 }
108}
109
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700110void* RosAlloc::AllocPages(Thread* self, size_t num_pages, byte page_map_type) {
111 lock_.AssertHeld(self);
112 DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
113 FreePageRun* res = NULL;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700114 const size_t req_byte_size = num_pages * kPageSize;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700115 // Find the lowest address free page run that's large enough.
116 for (auto it = free_page_runs_.begin(); it != free_page_runs_.end(); ) {
117 FreePageRun* fpr = *it;
118 DCHECK(fpr->IsFree());
119 size_t fpr_byte_size = fpr->ByteSize(this);
120 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
121 if (req_byte_size <= fpr_byte_size) {
122 // Found one.
123 free_page_runs_.erase(it++);
124 if (kTraceRosAlloc) {
125 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x"
126 << std::hex << reinterpret_cast<intptr_t>(fpr)
127 << " from free_page_runs_";
128 }
129 if (req_byte_size < fpr_byte_size) {
130 // Split.
131 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<byte*>(fpr) + req_byte_size);
132 if (kIsDebugBuild) {
133 remainder->magic_num_ = kMagicNumFree;
134 }
135 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
136 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
137 // Don't need to call madvise on remainder here.
138 free_page_runs_.insert(remainder);
139 if (kTraceRosAlloc) {
140 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
141 << reinterpret_cast<intptr_t>(remainder)
142 << " into free_page_runs_";
143 }
144 fpr->SetByteSize(this, req_byte_size);
145 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
146 }
147 res = fpr;
148 break;
149 } else {
150 ++it;
151 }
152 }
153
154 // Failed to allocate pages. Grow the footprint, if possible.
155 if (UNLIKELY(res == NULL && capacity_ > footprint_)) {
156 FreePageRun* last_free_page_run = NULL;
157 size_t last_free_page_run_size;
158 auto it = free_page_runs_.rbegin();
159 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
160 // There is a free page run at the end.
161 DCHECK(last_free_page_run->IsFree());
Ian Rogers5d057052014-03-12 14:32:27 -0700162 DCHECK_EQ(page_map_[ToPageMapIndex(last_free_page_run)], kPageMapEmpty);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700163 last_free_page_run_size = last_free_page_run->ByteSize(this);
164 } else {
165 // There is no free page run at the end.
166 last_free_page_run_size = 0;
167 }
168 DCHECK_LT(last_free_page_run_size, req_byte_size);
169 if (capacity_ - footprint_ + last_free_page_run_size >= req_byte_size) {
170 // If we grow the heap, we can allocate it.
171 size_t increment = std::min(std::max(2 * MB, req_byte_size - last_free_page_run_size),
172 capacity_ - footprint_);
173 DCHECK_EQ(increment % kPageSize, static_cast<size_t>(0));
174 size_t new_footprint = footprint_ + increment;
175 size_t new_num_of_pages = new_footprint / kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800176 DCHECK_LT(page_map_size_, new_num_of_pages);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700177 DCHECK_LT(free_page_run_size_map_.size(), new_num_of_pages);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800178 page_map_size_ = new_num_of_pages;
179 DCHECK_LE(page_map_size_, max_page_map_size_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700180 free_page_run_size_map_.resize(new_num_of_pages);
181 art_heap_rosalloc_morecore(this, increment);
182 if (last_free_page_run_size > 0) {
183 // There was a free page run at the end. Expand its size.
184 DCHECK_EQ(last_free_page_run_size, last_free_page_run->ByteSize(this));
185 last_free_page_run->SetByteSize(this, last_free_page_run_size + increment);
186 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
Ian Rogers5d057052014-03-12 14:32:27 -0700187 DCHECK_EQ(last_free_page_run->End(this), base_ + new_footprint);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700188 } else {
189 // Otherwise, insert a new free page run at the end.
190 FreePageRun* new_free_page_run = reinterpret_cast<FreePageRun*>(base_ + footprint_);
191 if (kIsDebugBuild) {
192 new_free_page_run->magic_num_ = kMagicNumFree;
193 }
194 new_free_page_run->SetByteSize(this, increment);
195 DCHECK_EQ(new_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
196 free_page_runs_.insert(new_free_page_run);
Ian Rogers5d057052014-03-12 14:32:27 -0700197 DCHECK_EQ(*free_page_runs_.rbegin(), new_free_page_run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700198 if (kTraceRosAlloc) {
199 LOG(INFO) << "RosAlloc::AlloPages() : Grew the heap by inserting run 0x"
200 << std::hex << reinterpret_cast<intptr_t>(new_free_page_run)
201 << " into free_page_runs_";
202 }
203 }
204 DCHECK_LE(footprint_ + increment, capacity_);
205 if (kTraceRosAlloc) {
206 LOG(INFO) << "RosAlloc::AllocPages() : increased the footprint from "
207 << footprint_ << " to " << new_footprint;
208 }
209 footprint_ = new_footprint;
210
211 // And retry the last free page run.
212 it = free_page_runs_.rbegin();
213 DCHECK(it != free_page_runs_.rend());
214 FreePageRun* fpr = *it;
215 if (kIsDebugBuild && last_free_page_run_size > 0) {
216 DCHECK(last_free_page_run != NULL);
217 DCHECK_EQ(last_free_page_run, fpr);
218 }
219 size_t fpr_byte_size = fpr->ByteSize(this);
220 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
221 DCHECK_LE(req_byte_size, fpr_byte_size);
222 free_page_runs_.erase(fpr);
223 if (kTraceRosAlloc) {
224 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
225 << " from free_page_runs_";
226 }
227 if (req_byte_size < fpr_byte_size) {
228 // Split if there's a remainder.
229 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<byte*>(fpr) + req_byte_size);
230 if (kIsDebugBuild) {
231 remainder->magic_num_ = kMagicNumFree;
232 }
233 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
234 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
235 free_page_runs_.insert(remainder);
236 if (kTraceRosAlloc) {
237 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
238 << reinterpret_cast<intptr_t>(remainder)
239 << " into free_page_runs_";
240 }
241 fpr->SetByteSize(this, req_byte_size);
242 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
243 }
244 res = fpr;
245 }
246 }
247 if (LIKELY(res != NULL)) {
248 // Update the page map.
249 size_t page_map_idx = ToPageMapIndex(res);
250 for (size_t i = 0; i < num_pages; i++) {
Ian Rogers5d057052014-03-12 14:32:27 -0700251 DCHECK_EQ(page_map_[page_map_idx + i], kPageMapEmpty);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700252 }
253 switch (page_map_type) {
254 case kPageMapRun:
255 page_map_[page_map_idx] = kPageMapRun;
256 for (size_t i = 1; i < num_pages; i++) {
257 page_map_[page_map_idx + i] = kPageMapRunPart;
258 }
259 break;
260 case kPageMapLargeObject:
261 page_map_[page_map_idx] = kPageMapLargeObject;
262 for (size_t i = 1; i < num_pages; i++) {
263 page_map_[page_map_idx + i] = kPageMapLargeObjectPart;
264 }
265 break;
266 default:
267 LOG(FATAL) << "Unreachable - page map type: " << page_map_type;
268 break;
269 }
270 if (kIsDebugBuild) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700271 // Clear the first page since it is not madvised due to the magic number.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700272 memset(res, 0, kPageSize);
273 }
274 if (kTraceRosAlloc) {
275 LOG(INFO) << "RosAlloc::AllocPages() : 0x" << std::hex << reinterpret_cast<intptr_t>(res)
276 << "-0x" << (reinterpret_cast<intptr_t>(res) + num_pages * kPageSize)
277 << "(" << std::dec << (num_pages * kPageSize) << ")";
278 }
279 return res;
280 }
281
282 // Fail.
283 if (kTraceRosAlloc) {
284 LOG(INFO) << "RosAlloc::AllocPages() : NULL";
285 }
286 return nullptr;
287}
288
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700289size_t RosAlloc::FreePages(Thread* self, void* ptr, bool already_zero) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700290 lock_.AssertHeld(self);
291 size_t pm_idx = ToPageMapIndex(ptr);
Ian Rogers5d057052014-03-12 14:32:27 -0700292 DCHECK_LT(pm_idx, page_map_size_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700293 byte pm_type = page_map_[pm_idx];
294 DCHECK(pm_type == kPageMapRun || pm_type == kPageMapLargeObject);
295 byte pm_part_type;
296 switch (pm_type) {
297 case kPageMapRun:
298 pm_part_type = kPageMapRunPart;
299 break;
300 case kPageMapLargeObject:
301 pm_part_type = kPageMapLargeObjectPart;
302 break;
303 default:
304 pm_part_type = kPageMapEmpty;
305 LOG(FATAL) << "Unreachable - RosAlloc::FreePages() : " << "pm_idx=" << pm_idx << ", pm_type="
306 << static_cast<int>(pm_type) << ", ptr=" << std::hex
307 << reinterpret_cast<intptr_t>(ptr);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700308 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700309 }
310 // Update the page map and count the number of pages.
311 size_t num_pages = 1;
312 page_map_[pm_idx] = kPageMapEmpty;
313 size_t idx = pm_idx + 1;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800314 size_t end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700315 while (idx < end && page_map_[idx] == pm_part_type) {
316 page_map_[idx] = kPageMapEmpty;
317 num_pages++;
318 idx++;
319 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700320 const size_t byte_size = num_pages * kPageSize;
321 if (already_zero) {
322 if (kCheckZeroMemory) {
323 const uword* word_ptr = reinterpret_cast<uword*>(ptr);
324 for (size_t i = 0; i < byte_size / sizeof(uword); ++i) {
325 CHECK_EQ(word_ptr[i], 0U) << "words don't match at index " << i;
326 }
327 }
328 } else if (!DoesReleaseAllPages()) {
329 memset(ptr, 0, byte_size);
330 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700331
332 if (kTraceRosAlloc) {
333 LOG(INFO) << "RosAlloc::FreePages() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700334 << "-0x" << (reinterpret_cast<intptr_t>(ptr) + byte_size)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700335 << "(" << std::dec << (num_pages * kPageSize) << ")";
336 }
337
338 // Turn it into a free run.
339 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(ptr);
340 if (kIsDebugBuild) {
341 fpr->magic_num_ = kMagicNumFree;
342 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700343 fpr->SetByteSize(this, byte_size);
344 DCHECK(IsAligned<kPageSize>(fpr->ByteSize(this)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700345
346 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
347 if (!free_page_runs_.empty()) {
348 // Try to coalesce in the higher address direction.
349 if (kTraceRosAlloc) {
350 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce a free page run 0x"
351 << std::hex << reinterpret_cast<uintptr_t>(fpr) << " [" << std::dec << pm_idx << "] -0x"
352 << std::hex << reinterpret_cast<uintptr_t>(fpr->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800353 << (fpr->End(this) == End() ? page_map_size_ : ToPageMapIndex(fpr->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700354 }
355 auto higher_it = free_page_runs_.upper_bound(fpr);
356 if (higher_it != free_page_runs_.end()) {
357 for (auto it = higher_it; it != free_page_runs_.end(); ) {
358 FreePageRun* h = *it;
359 DCHECK_EQ(h->ByteSize(this) % kPageSize, static_cast<size_t>(0));
360 if (kTraceRosAlloc) {
361 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a higher free page run 0x"
362 << std::hex << reinterpret_cast<uintptr_t>(h) << " [" << std::dec << ToPageMapIndex(h) << "] -0x"
363 << std::hex << reinterpret_cast<uintptr_t>(h->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800364 << (h->End(this) == End() ? page_map_size_ : ToPageMapIndex(h->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700365 }
366 if (fpr->End(this) == h->Begin()) {
367 if (kTraceRosAlloc) {
368 LOG(INFO) << "Success";
369 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700370 // Clear magic num since this is no longer the start of a free page run.
371 if (kIsDebugBuild) {
372 h->magic_num_ = 0;
373 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700374 free_page_runs_.erase(it++);
375 if (kTraceRosAlloc) {
376 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
377 << reinterpret_cast<intptr_t>(h)
378 << " from free_page_runs_";
379 }
380 fpr->SetByteSize(this, fpr->ByteSize(this) + h->ByteSize(this));
381 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
382 } else {
383 // Not adjacent. Stop.
384 if (kTraceRosAlloc) {
385 LOG(INFO) << "Fail";
386 }
387 break;
388 }
389 }
390 }
391 // Try to coalesce in the lower address direction.
392 auto lower_it = free_page_runs_.upper_bound(fpr);
393 if (lower_it != free_page_runs_.begin()) {
394 --lower_it;
395 for (auto it = lower_it; ; ) {
396 // We want to try to coalesce with the first element but
397 // there's no "<=" operator for the iterator.
398 bool to_exit_loop = it == free_page_runs_.begin();
399
400 FreePageRun* l = *it;
401 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
402 if (kTraceRosAlloc) {
403 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a lower free page run 0x"
404 << std::hex << reinterpret_cast<uintptr_t>(l) << " [" << std::dec << ToPageMapIndex(l) << "] -0x"
405 << std::hex << reinterpret_cast<uintptr_t>(l->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800406 << (l->End(this) == End() ? page_map_size_ : ToPageMapIndex(l->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700407 }
408 if (l->End(this) == fpr->Begin()) {
409 if (kTraceRosAlloc) {
410 LOG(INFO) << "Success";
411 }
412 free_page_runs_.erase(it--);
413 if (kTraceRosAlloc) {
414 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
415 << reinterpret_cast<intptr_t>(l)
416 << " from free_page_runs_";
417 }
418 l->SetByteSize(this, l->ByteSize(this) + fpr->ByteSize(this));
419 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700420 // Clear magic num since this is no longer the start of a free page run.
421 if (kIsDebugBuild) {
422 fpr->magic_num_ = 0;
423 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700424 fpr = l;
425 } else {
426 // Not adjacent. Stop.
427 if (kTraceRosAlloc) {
428 LOG(INFO) << "Fail";
429 }
430 break;
431 }
432 if (to_exit_loop) {
433 break;
434 }
435 }
436 }
437 }
438
439 // Insert it.
440 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
441 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800442 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700443 fpr->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800444 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700445 free_page_runs_.insert(fpr);
446 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
447 if (kTraceRosAlloc) {
448 LOG(INFO) << "RosAlloc::FreePages() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
449 << " into free_page_runs_";
450 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700451 return byte_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700452}
453
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800454void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) {
Ian Rogers5d057052014-03-12 14:32:27 -0700455 DCHECK_GT(size, kLargeSizeThreshold);
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800456 size_t num_pages = RoundUp(size, kPageSize) / kPageSize;
457 void* r;
458 {
459 MutexLock mu(self, lock_);
460 r = AllocPages(self, num_pages, kPageMapLargeObject);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700461 }
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800462 if (UNLIKELY(r == nullptr)) {
463 if (kTraceRosAlloc) {
464 LOG(INFO) << "RosAlloc::AllocLargeObject() : NULL";
465 }
466 return nullptr;
467 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700468 const size_t total_bytes = num_pages * kPageSize;
469 *bytes_allocated = total_bytes;
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800470 if (kTraceRosAlloc) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800471 LOG(INFO) << "RosAlloc::AllocLargeObject() : 0x" << std::hex << reinterpret_cast<intptr_t>(r)
472 << "-0x" << (reinterpret_cast<intptr_t>(r) + num_pages * kPageSize)
473 << "(" << std::dec << (num_pages * kPageSize) << ")";
474 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700475 // Check if the returned memory is really all zero.
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800476 if (kCheckZeroMemory) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700477 CHECK_EQ(total_bytes % sizeof(uword), 0U);
478 const uword* words = reinterpret_cast<uword*>(r);
479 for (size_t i = 0; i < total_bytes / sizeof(uword); ++i) {
480 CHECK_EQ(words[i], 0U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700481 }
482 }
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800483 return r;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700484}
485
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700486size_t RosAlloc::FreeInternal(Thread* self, void* ptr) {
Ian Rogers5d057052014-03-12 14:32:27 -0700487 DCHECK_LE(base_, ptr);
488 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700489 size_t pm_idx = RoundDownToPageMapIndex(ptr);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700490 Run* run = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700491 {
492 MutexLock mu(self, lock_);
Ian Rogers5d057052014-03-12 14:32:27 -0700493 DCHECK_LT(pm_idx, page_map_size_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700494 byte page_map_entry = page_map_[pm_idx];
495 if (kTraceRosAlloc) {
496 LOG(INFO) << "RosAlloc::FreeInternal() : " << std::hex << ptr << ", pm_idx=" << std::dec << pm_idx
497 << ", page_map_entry=" << static_cast<int>(page_map_entry);
498 }
499 switch (page_map_[pm_idx]) {
500 case kPageMapEmpty:
501 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700502 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700503 case kPageMapLargeObject:
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700504 return FreePages(self, ptr, false);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700505 case kPageMapLargeObjectPart:
506 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700507 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700508 case kPageMapRun:
509 case kPageMapRunPart: {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700510 size_t pi = pm_idx;
511 DCHECK(page_map_[pi] == kPageMapRun || page_map_[pi] == kPageMapRunPart);
512 // Find the beginning of the run.
513 while (page_map_[pi] != kPageMapRun) {
514 pi--;
Ian Rogers5d057052014-03-12 14:32:27 -0700515 DCHECK_LT(pi, capacity_ / kPageSize);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700516 }
Ian Rogers5d057052014-03-12 14:32:27 -0700517 DCHECK_EQ(page_map_[pi], kPageMapRun);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700518 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -0700519 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700520 break;
521 }
522 default:
523 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700524 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700525 }
526 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700527 DCHECK(run != nullptr);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700528 return FreeFromRun(self, ptr, run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700529}
530
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700531size_t RosAlloc::Free(Thread* self, void* ptr) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700532 ReaderMutexLock rmu(self, bulk_free_lock_);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700533 return FreeInternal(self, ptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700534}
535
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700536RosAlloc::Run* RosAlloc::AllocRun(Thread* self, size_t idx) {
537 RosAlloc::Run* new_run = nullptr;
538 {
539 MutexLock mu(self, lock_);
540 new_run = reinterpret_cast<Run*>(AllocPages(self, numOfPages[idx], kPageMapRun));
541 }
542 if (LIKELY(new_run != nullptr)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700543 if (kIsDebugBuild) {
544 new_run->magic_num_ = kMagicNum;
545 }
546 new_run->size_bracket_idx_ = idx;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700547 new_run->SetAllocBitMapBitsForInvalidSlots();
548 DCHECK(!new_run->IsThreadLocal());
549 DCHECK_EQ(new_run->first_search_vec_idx_, 0U);
550 DCHECK(!new_run->to_be_bulk_freed_);
551 if (kUsePrefetchDuringAllocRun && idx <= kMaxThreadLocalSizeBracketIdx) {
552 // Take ownership of the cache lines if we are likely to be thread local run.
553 if (kPrefetchNewRunDataByZeroing) {
554 // Zeroing the data is sometimes faster than prefetching but it increases memory usage
555 // since we end up dirtying zero pages which may have been madvised.
556 new_run->ZeroData();
557 } else {
558 const size_t num_of_slots = numOfSlots[idx];
559 const size_t bracket_size = bracketSizes[idx];
560 const size_t num_of_bytes = num_of_slots * bracket_size;
561 byte* begin = reinterpret_cast<byte*>(new_run) + headerSizes[idx];
562 for (size_t i = 0; i < num_of_bytes; i += kPrefetchStride) {
563 __builtin_prefetch(begin + i);
564 }
565 }
566 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700567 }
568 return new_run;
569}
570
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700571RosAlloc::Run* RosAlloc::RefillRun(Thread* self, size_t idx) {
572 // Get the lowest address non-full run from the binary tree.
573 std::set<Run*>* const bt = &non_full_runs_[idx];
574 if (!bt->empty()) {
575 // If there's one, use it as the current run.
576 auto it = bt->begin();
577 Run* non_full_run = *it;
578 DCHECK(non_full_run != nullptr);
579 DCHECK(!non_full_run->IsThreadLocal());
580 bt->erase(it);
581 return non_full_run;
582 }
583 // If there's none, allocate a new run and use it as the current run.
584 return AllocRun(self, idx);
585}
586
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700587void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated) {
Ian Rogers5d057052014-03-12 14:32:27 -0700588 DCHECK_LE(size, kLargeSizeThreshold);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700589 size_t bracket_size;
590 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
591 DCHECK_EQ(idx, SizeToIndex(size));
592 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
593 DCHECK_EQ(bracket_size, bracketSizes[idx]);
Ian Rogers5d057052014-03-12 14:32:27 -0700594 DCHECK_LE(size, bracket_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700595 DCHECK(size > 512 || bracket_size - size < 16);
596
597 void* slot_addr;
598
599 if (LIKELY(idx <= kMaxThreadLocalSizeBracketIdx)) {
600 // Use a thread-local run.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700601 Run* thread_local_run = reinterpret_cast<Run*>(self->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700602 // Allow invalid since this will always fail the allocation.
Mathieu Chartier4fd20502014-04-28 09:35:55 -0700603 if (kIsDebugBuild) {
604 // Need the lock to prevent race conditions.
605 MutexLock mu(self, *size_bracket_locks_[idx]);
606 CHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
607 CHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
608 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700609 DCHECK(thread_local_run != nullptr);
610 DCHECK(thread_local_run->IsThreadLocal() || thread_local_run == dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700611 slot_addr = thread_local_run->AllocSlot();
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700612 // The allocation must fail if the run is invalid.
613 DCHECK(thread_local_run != dedicated_full_run_ || slot_addr == nullptr)
614 << "allocated from an invalid run";
615 if (UNLIKELY(slot_addr == nullptr)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700616 // The run got full. Try to free slots.
617 DCHECK(thread_local_run->IsFull());
618 MutexLock mu(self, *size_bracket_locks_[idx]);
619 bool is_all_free_after_merge;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700620 // This is safe to do for the dedicated_full_run_ since the bitmaps are empty.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700621 if (thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&is_all_free_after_merge)) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700622 DCHECK_NE(thread_local_run, dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700623 // Some slot got freed. Keep it.
624 DCHECK(!thread_local_run->IsFull());
625 DCHECK_EQ(is_all_free_after_merge, thread_local_run->IsAllFree());
626 if (is_all_free_after_merge) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700627 // Check that the bitmap idx is back at 0 if it's all free.
628 DCHECK_EQ(thread_local_run->first_search_vec_idx_, 0U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700629 }
630 } else {
631 // No slots got freed. Try to refill the thread-local run.
632 DCHECK(thread_local_run->IsFull());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700633 if (thread_local_run != dedicated_full_run_) {
634 self->SetRosAllocRun(idx, dedicated_full_run_);
635 thread_local_run->SetIsThreadLocal(false);
636 if (kIsDebugBuild) {
637 full_runs_[idx].insert(thread_local_run);
638 if (kTraceRosAlloc) {
639 LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex
640 << reinterpret_cast<intptr_t>(thread_local_run)
641 << " into full_runs_[" << std::dec << idx << "]";
642 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700643 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700644 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
645 DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700646 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700647
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700648 thread_local_run = RefillRun(self, idx);
649 if (UNLIKELY(thread_local_run == NULL)) {
650 return NULL;
651 }
652 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
653 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700654 thread_local_run->SetIsThreadLocal(true);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700655 self->SetRosAllocRun(idx, thread_local_run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700656 DCHECK(!thread_local_run->IsFull());
657 }
658
659 DCHECK(thread_local_run != NULL);
660 DCHECK(!thread_local_run->IsFull());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700661 DCHECK(thread_local_run->IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700662 slot_addr = thread_local_run->AllocSlot();
663 // Must succeed now with a new run.
664 DCHECK(slot_addr != NULL);
665 }
666 if (kTraceRosAlloc) {
667 LOG(INFO) << "RosAlloc::AllocFromRun() thread-local : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
668 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
669 << "(" << std::dec << (bracket_size) << ")";
670 }
671 } else {
672 // Use the (shared) current run.
673 MutexLock mu(self, *size_bracket_locks_[idx]);
674 Run* current_run = current_runs_[idx];
675 if (UNLIKELY(current_run == NULL)) {
676 current_run = RefillRun(self, idx);
677 if (UNLIKELY(current_run == NULL)) {
678 return NULL;
679 }
680 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
681 DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700682 current_run->SetIsThreadLocal(false);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700683 current_runs_[idx] = current_run;
684 DCHECK(!current_run->IsFull());
685 }
686 DCHECK(current_run != NULL);
687 slot_addr = current_run->AllocSlot();
688 if (UNLIKELY(slot_addr == NULL)) {
689 // The current run got full. Try to refill it.
690 DCHECK(current_run->IsFull());
691 current_runs_[idx] = NULL;
692 if (kIsDebugBuild) {
693 // Insert it into full_runs and set the current run to NULL.
694 full_runs_[idx].insert(current_run);
695 if (kTraceRosAlloc) {
696 LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(current_run)
697 << " into full_runs_[" << std::dec << idx << "]";
698 }
699 }
700 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
701 DCHECK(full_runs_[idx].find(current_run) != full_runs_[idx].end());
702 current_run = RefillRun(self, idx);
703 if (UNLIKELY(current_run == NULL)) {
704 return NULL;
705 }
706 DCHECK(current_run != NULL);
707 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
708 DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700709 current_run->SetIsThreadLocal(false);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700710 current_runs_[idx] = current_run;
711 DCHECK(!current_run->IsFull());
712 slot_addr = current_run->AllocSlot();
713 // Must succeed now with a new run.
714 DCHECK(slot_addr != NULL);
715 }
716 if (kTraceRosAlloc) {
717 LOG(INFO) << "RosAlloc::AllocFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
718 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
719 << "(" << std::dec << (bracket_size) << ")";
720 }
721 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700722 DCHECK(bytes_allocated != nullptr);
723 *bytes_allocated = bracket_size;
724 // Caller verifies that it is all 0.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700725 return slot_addr;
726}
727
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700728size_t RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
Ian Rogers5d057052014-03-12 14:32:27 -0700729 DCHECK_EQ(run->magic_num_, kMagicNum);
730 DCHECK_LT(run, ptr);
731 DCHECK_LT(ptr, run->End());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700732 const size_t idx = run->size_bracket_idx_;
733 const size_t bracket_size = bracketSizes[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700734 bool run_was_full = false;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700735 MutexLock mu(self, *size_bracket_locks_[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700736 if (kIsDebugBuild) {
737 run_was_full = run->IsFull();
738 }
739 if (kTraceRosAlloc) {
740 LOG(INFO) << "RosAlloc::FreeFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr);
741 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700742 if (LIKELY(run->IsThreadLocal())) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700743 // It's a thread-local run. Just mark the thread-local free bit map and return.
744 DCHECK_LE(run->size_bracket_idx_, kMaxThreadLocalSizeBracketIdx);
745 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
746 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
747 run->MarkThreadLocalFreeBitMap(ptr);
748 if (kTraceRosAlloc) {
749 LOG(INFO) << "RosAlloc::FreeFromRun() : Freed a slot in a thread local run 0x" << std::hex
750 << reinterpret_cast<intptr_t>(run);
751 }
752 // A thread local run will be kept as a thread local even if it's become all free.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700753 return bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700754 }
755 // Free the slot in the run.
756 run->FreeSlot(ptr);
757 std::set<Run*>* non_full_runs = &non_full_runs_[idx];
758 if (run->IsAllFree()) {
759 // It has just become completely free. Free the pages of this run.
760 std::set<Run*>::iterator pos = non_full_runs->find(run);
761 if (pos != non_full_runs->end()) {
762 non_full_runs->erase(pos);
763 if (kTraceRosAlloc) {
764 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
765 << reinterpret_cast<intptr_t>(run) << " from non_full_runs_";
766 }
767 }
768 if (run == current_runs_[idx]) {
769 current_runs_[idx] = NULL;
770 }
771 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
772 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700773 run->ZeroHeader();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700774 {
775 MutexLock mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700776 FreePages(self, run, true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700777 }
778 } else {
779 // It is not completely free. If it wasn't the current run or
780 // already in the non-full run set (i.e., it was full) insert it
781 // into the non-full run set.
782 if (run != current_runs_[idx]) {
783 hash_set<Run*, hash_run, eq_run>* full_runs =
784 kIsDebugBuild ? &full_runs_[idx] : NULL;
785 std::set<Run*>::iterator pos = non_full_runs->find(run);
786 if (pos == non_full_runs->end()) {
787 DCHECK(run_was_full);
788 DCHECK(full_runs->find(run) != full_runs->end());
789 if (kIsDebugBuild) {
790 full_runs->erase(run);
791 if (kTraceRosAlloc) {
792 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
793 << reinterpret_cast<intptr_t>(run) << " from full_runs_";
794 }
795 }
796 non_full_runs->insert(run);
797 DCHECK(!run->IsFull());
798 if (kTraceRosAlloc) {
799 LOG(INFO) << "RosAlloc::FreeFromRun() : Inserted run 0x" << std::hex
800 << reinterpret_cast<intptr_t>(run)
801 << " into non_full_runs_[" << std::dec << idx << "]";
802 }
803 }
804 }
805 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700806 return bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700807}
808
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800809std::string RosAlloc::Run::BitMapToStr(uint32_t* bit_map_base, size_t num_vec) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700810 std::string bit_map_str;
811 for (size_t v = 0; v < num_vec; v++) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800812 uint32_t vec = bit_map_base[v];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700813 if (v != num_vec - 1) {
814 bit_map_str.append(StringPrintf("%x-", vec));
815 } else {
816 bit_map_str.append(StringPrintf("%x", vec));
817 }
818 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800819 return bit_map_str.c_str();
820}
821
822std::string RosAlloc::Run::Dump() {
823 size_t idx = size_bracket_idx_;
824 size_t num_slots = numOfSlots[idx];
825 size_t num_vec = RoundUp(num_slots, 32) / 32;
826 std::ostringstream stream;
827 stream << "RosAlloc Run = " << reinterpret_cast<void*>(this)
828 << "{ magic_num=" << static_cast<int>(magic_num_)
829 << " size_bracket_idx=" << idx
830 << " is_thread_local=" << static_cast<int>(is_thread_local_)
831 << " to_be_bulk_freed=" << static_cast<int>(to_be_bulk_freed_)
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700832 << " first_search_vec_idx=" << first_search_vec_idx_
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800833 << " alloc_bit_map=" << BitMapToStr(alloc_bit_map_, num_vec)
834 << " bulk_free_bit_map=" << BitMapToStr(BulkFreeBitMap(), num_vec)
835 << " thread_local_bit_map=" << BitMapToStr(ThreadLocalFreeBitMap(), num_vec)
836 << " }" << std::endl;
837 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700838}
839
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700840inline void* RosAlloc::Run::AllocSlot() {
841 const size_t idx = size_bracket_idx_;
842 while (true) {
843 if (kIsDebugBuild) {
844 // Make sure that no slots leaked, the bitmap should be full for all previous vectors.
845 for (size_t i = 0; i < first_search_vec_idx_; ++i) {
846 CHECK_EQ(~alloc_bit_map_[i], 0U);
847 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700848 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700849 uint32_t* const alloc_bitmap_ptr = &alloc_bit_map_[first_search_vec_idx_];
850 uint32_t ffz1 = __builtin_ffs(~*alloc_bitmap_ptr);
851 if (LIKELY(ffz1 != 0)) {
852 const uint32_t ffz = ffz1 - 1;
853 const uint32_t slot_idx = ffz + first_search_vec_idx_ * sizeof(*alloc_bitmap_ptr) * kBitsPerByte;
854 const uint32_t mask = 1U << ffz;
855 DCHECK_LT(slot_idx, numOfSlots[idx]) << "out of range";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700856 // Found an empty slot. Set the bit.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700857 DCHECK_EQ(*alloc_bitmap_ptr & mask, 0U);
858 *alloc_bitmap_ptr |= mask;
859 DCHECK_NE(*alloc_bitmap_ptr & mask, 0U);
860 byte* slot_addr = reinterpret_cast<byte*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
861 if (kTraceRosAlloc) {
862 LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
863 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
864 }
865 return slot_addr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700866 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700867 const size_t num_words = RoundUp(numOfSlots[idx], 32) / 32;
868 if (first_search_vec_idx_ + 1 >= num_words) {
869 DCHECK(IsFull());
870 // Already at the last word, return null.
871 return nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700872 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700873 // Increase the index to the next word and try again.
874 ++first_search_vec_idx_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700875 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700876}
877
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700878void RosAlloc::Run::FreeSlot(void* ptr) {
879 DCHECK(!IsThreadLocal());
880 const byte idx = size_bracket_idx_;
881 const size_t bracket_size = bracketSizes[idx];
882 const size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700883 - (reinterpret_cast<byte*>(this) + headerSizes[idx]);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700884 DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
885 size_t slot_idx = offset_from_slot_base / bracket_size;
Ian Rogers5d057052014-03-12 14:32:27 -0700886 DCHECK_LT(slot_idx, numOfSlots[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700887 size_t vec_idx = slot_idx / 32;
888 if (kIsDebugBuild) {
889 size_t num_vec = RoundUp(numOfSlots[idx], 32) / 32;
Ian Rogers5d057052014-03-12 14:32:27 -0700890 DCHECK_LT(vec_idx, num_vec);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700891 }
892 size_t vec_off = slot_idx % 32;
893 uint32_t* vec = &alloc_bit_map_[vec_idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700894 first_search_vec_idx_ = std::min(first_search_vec_idx_, static_cast<uint32_t>(vec_idx));
895 const uint32_t mask = 1U << vec_off;
896 DCHECK_NE(*vec & mask, 0U);
897 *vec &= ~mask;
898 DCHECK_EQ(*vec & mask, 0U);
899 // Zero out the memory.
900 // TODO: Investigate alternate memset since ptr is guaranteed to be aligned to 16.
901 memset(ptr, 0, bracket_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700902 if (kTraceRosAlloc) {
903 LOG(INFO) << "RosAlloc::Run::FreeSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
904 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
905 }
906}
907
908inline bool RosAlloc::Run::MergeThreadLocalFreeBitMapToAllocBitMap(bool* is_all_free_after_out) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700909 DCHECK(IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700910 // Free slots in the alloc bit map based on the thread local free bit map.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700911 const size_t idx = size_bracket_idx_;
912 const size_t num_of_slots = numOfSlots[idx];
913 const size_t num_vec = RoundUp(num_of_slots, 32) / 32;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700914 bool changed = false;
915 uint32_t* vecp = &alloc_bit_map_[0];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800916 uint32_t* tl_free_vecp = &ThreadLocalFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700917 bool is_all_free_after = true;
918 for (size_t v = 0; v < num_vec; v++, vecp++, tl_free_vecp++) {
919 uint32_t tl_free_vec = *tl_free_vecp;
920 uint32_t vec_before = *vecp;
921 uint32_t vec_after;
922 if (tl_free_vec != 0) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700923 first_search_vec_idx_ = std::min(first_search_vec_idx_, static_cast<uint32_t>(v));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700924 vec_after = vec_before & ~tl_free_vec;
925 *vecp = vec_after;
926 changed = true;
927 *tl_free_vecp = 0; // clear the thread local free bit map.
928 } else {
929 vec_after = vec_before;
930 }
931 if (vec_after != 0) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700932 if (v == num_vec - 1) {
933 // Only not all free if a bit other than the mask bits are set.
934 is_all_free_after =
935 is_all_free_after && GetBitmapLastVectorMask(num_of_slots, num_vec) == vec_after;
936 } else {
937 is_all_free_after = false;
938 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700939 }
940 DCHECK_EQ(*tl_free_vecp, static_cast<uint32_t>(0));
941 }
942 *is_all_free_after_out = is_all_free_after;
943 // Return true if there was at least a bit set in the thread-local
944 // free bit map and at least a bit in the alloc bit map changed.
945 return changed;
946}
947
948inline void RosAlloc::Run::MergeBulkFreeBitMapIntoAllocBitMap() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700949 DCHECK(!IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700950 // Free slots in the alloc bit map based on the bulk free bit map.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700951 const size_t num_vec = NumberOfBitmapVectors();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700952 uint32_t* vecp = &alloc_bit_map_[0];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800953 uint32_t* free_vecp = &BulkFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700954 for (size_t v = 0; v < num_vec; v++, vecp++, free_vecp++) {
955 uint32_t free_vec = *free_vecp;
956 if (free_vec != 0) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700957 first_search_vec_idx_ = std::min(first_search_vec_idx_, static_cast<uint32_t>(v));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700958 *vecp &= ~free_vec;
959 *free_vecp = 0; // clear the bulk free bit map.
960 }
961 DCHECK_EQ(*free_vecp, static_cast<uint32_t>(0));
962 }
963}
964
965inline void RosAlloc::Run::UnionBulkFreeBitMapToThreadLocalFreeBitMap() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700966 DCHECK(IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700967 // Union the thread local bit map with the bulk free bit map.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700968 size_t num_vec = NumberOfBitmapVectors();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800969 uint32_t* to_vecp = &ThreadLocalFreeBitMap()[0];
970 uint32_t* from_vecp = &BulkFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700971 for (size_t v = 0; v < num_vec; v++, to_vecp++, from_vecp++) {
972 uint32_t from_vec = *from_vecp;
973 if (from_vec != 0) {
974 *to_vecp |= from_vec;
Hiroshi Yamauchi70f60042014-02-03 12:31:29 -0800975 *from_vecp = 0; // clear the bulk free bit map.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700976 }
977 DCHECK_EQ(*from_vecp, static_cast<uint32_t>(0));
978 }
979}
980
981inline void RosAlloc::Run::MarkThreadLocalFreeBitMap(void* ptr) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700982 DCHECK(IsThreadLocal());
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800983 MarkFreeBitMapShared(ptr, ThreadLocalFreeBitMap(), "MarkThreadLocalFreeBitMap");
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700984}
985
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700986inline size_t RosAlloc::Run::MarkBulkFreeBitMap(void* ptr) {
987 return MarkFreeBitMapShared(ptr, BulkFreeBitMap(), "MarkFreeBitMap");
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700988}
989
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700990inline size_t RosAlloc::Run::MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base,
991 const char* caller_name) {
992 const byte idx = size_bracket_idx_;
993 const size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700994 - (reinterpret_cast<byte*>(this) + headerSizes[idx]);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700995 const size_t bracket_size = bracketSizes[idx];
996 memset(ptr, 0, bracket_size);
997 DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
998 size_t slot_idx = offset_from_slot_base / bracket_size;
Ian Rogers5d057052014-03-12 14:32:27 -0700999 DCHECK_LT(slot_idx, numOfSlots[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001000 size_t vec_idx = slot_idx / 32;
1001 if (kIsDebugBuild) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001002 size_t num_vec = NumberOfBitmapVectors();
Ian Rogers5d057052014-03-12 14:32:27 -07001003 DCHECK_LT(vec_idx, num_vec);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001004 }
1005 size_t vec_off = slot_idx % 32;
1006 uint32_t* vec = &free_bit_map_base[vec_idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001007 const uint32_t mask = 1U << vec_off;
1008 DCHECK_EQ(*vec & mask, 0U);
1009 *vec |= mask;
1010 DCHECK_NE(*vec & mask, 0U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001011 if (kTraceRosAlloc) {
1012 LOG(INFO) << "RosAlloc::Run::" << caller_name << "() : 0x" << std::hex
1013 << reinterpret_cast<intptr_t>(ptr)
1014 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
1015 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001016 return bracket_size;
1017}
1018
1019inline uint32_t RosAlloc::Run::GetBitmapLastVectorMask(size_t num_slots, size_t num_vec) {
1020 const size_t kBitsPerVec = 32;
1021 DCHECK_GE(num_slots * kBitsPerVec, num_vec);
1022 size_t remain = num_vec * kBitsPerVec - num_slots;
1023 DCHECK_NE(remain, kBitsPerVec);
1024 return ((1U << remain) - 1) << (kBitsPerVec - remain);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001025}
1026
1027inline bool RosAlloc::Run::IsAllFree() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001028 const byte idx = size_bracket_idx_;
1029 const size_t num_slots = numOfSlots[idx];
1030 const size_t num_vec = NumberOfBitmapVectors();
1031 DCHECK_NE(num_vec, 0U);
1032 // Check the last vector after the loop since it uses a special case for the masked bits.
1033 for (size_t v = 0; v < num_vec - 1; v++) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001034 uint32_t vec = alloc_bit_map_[v];
1035 if (vec != 0) {
1036 return false;
1037 }
1038 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001039 // Make sure the last word is equal to the mask, all other bits must be 0.
1040 return alloc_bit_map_[num_vec - 1] == GetBitmapLastVectorMask(num_slots, num_vec);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001041}
1042
1043inline bool RosAlloc::Run::IsFull() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001044 const size_t num_vec = NumberOfBitmapVectors();
1045 for (size_t v = 0; v < num_vec; ++v) {
1046 if (~alloc_bit_map_[v] != 0) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001047 return false;
1048 }
1049 }
1050 return true;
1051}
1052
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001053inline bool RosAlloc::Run::IsBulkFreeBitmapClean() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001054 const size_t num_vec = NumberOfBitmapVectors();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001055 for (size_t v = 0; v < num_vec; v++) {
1056 uint32_t vec = BulkFreeBitMap()[v];
1057 if (vec != 0) {
1058 return false;
1059 }
1060 }
1061 return true;
1062}
1063
1064inline bool RosAlloc::Run::IsThreadLocalFreeBitmapClean() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001065 const size_t num_vec = NumberOfBitmapVectors();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001066 for (size_t v = 0; v < num_vec; v++) {
1067 uint32_t vec = ThreadLocalFreeBitMap()[v];
1068 if (vec != 0) {
1069 return false;
1070 }
1071 }
1072 return true;
1073}
1074
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001075inline void RosAlloc::Run::SetAllocBitMapBitsForInvalidSlots() {
1076 const size_t idx = size_bracket_idx_;
1077 const size_t num_slots = numOfSlots[idx];
1078 const size_t num_vec = RoundUp(num_slots, 32) / 32;
1079 DCHECK_NE(num_vec, 0U);
1080 // Make sure to set the bits at the end of the bitmap so that we don't allocate there since they
1081 // don't represent valid slots.
1082 alloc_bit_map_[num_vec - 1] |= GetBitmapLastVectorMask(num_slots, num_vec);
1083}
1084
1085inline void RosAlloc::Run::ZeroHeader() {
1086 const byte idx = size_bracket_idx_;
1087 memset(this, 0, headerSizes[idx]);
1088}
1089
1090inline void RosAlloc::Run::ZeroData() {
1091 const byte idx = size_bracket_idx_;
1092 byte* slot_begin = reinterpret_cast<byte*>(this) + headerSizes[idx];
1093 memset(slot_begin, 0, numOfSlots[idx] * bracketSizes[idx]);
1094}
1095
1096inline void RosAlloc::Run::FillAllocBitMap() {
1097 size_t num_vec = NumberOfBitmapVectors();
1098 memset(alloc_bit_map_, 0xFF, sizeof(uint32_t) * num_vec);
1099 first_search_vec_idx_ = num_vec - 1; // No free bits in any of the bitmap words.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001100}
1101
1102void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1103 void* arg) {
1104 size_t idx = size_bracket_idx_;
1105 byte* slot_base = reinterpret_cast<byte*>(this) + headerSizes[idx];
1106 size_t num_slots = numOfSlots[idx];
1107 size_t bracket_size = IndexToBracketSize(idx);
1108 DCHECK_EQ(slot_base + num_slots * bracket_size, reinterpret_cast<byte*>(this) + numOfPages[idx] * kPageSize);
1109 size_t num_vec = RoundUp(num_slots, 32) / 32;
1110 size_t slots = 0;
1111 for (size_t v = 0; v < num_vec; v++, slots += 32) {
Ian Rogers5d057052014-03-12 14:32:27 -07001112 DCHECK_GE(num_slots, slots);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001113 uint32_t vec = alloc_bit_map_[v];
1114 size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
1115 for (size_t i = 0; i < end; ++i) {
1116 bool is_allocated = ((vec >> i) & 0x1) != 0;
1117 byte* slot_addr = slot_base + (slots + i) * bracket_size;
1118 if (is_allocated) {
1119 handler(slot_addr, slot_addr + bracket_size, bracket_size, arg);
1120 } else {
1121 handler(slot_addr, slot_addr + bracket_size, 0, arg);
1122 }
1123 }
1124 }
1125}
1126
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001127// If true, read the page map entries in BulkFree() without using the
1128// lock for better performance, assuming that the existence of an
1129// allocated chunk/pointer being freed in BulkFree() guarantees that
1130// the page map entry won't change. Disabled for now.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001131static constexpr bool kReadPageMapEntryWithoutLockInBulkFree = true;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001132
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001133size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
1134 size_t freed_bytes = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001135 if (false) {
1136 // Used only to test Free() as GC uses only BulkFree().
1137 for (size_t i = 0; i < num_ptrs; ++i) {
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001138 freed_bytes += FreeInternal(self, ptrs[i]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001139 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001140 return freed_bytes;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001141 }
1142
1143 WriterMutexLock wmu(self, bulk_free_lock_);
1144
1145 // First mark slots to free in the bulk free bit map without locking the
1146 // size bracket locks. On host, hash_set is faster than vector + flag.
1147#ifdef HAVE_ANDROID_OS
1148 std::vector<Run*> runs;
1149#else
1150 hash_set<Run*, hash_run, eq_run> runs;
1151#endif
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001152 for (size_t i = 0; i < num_ptrs; i++) {
1153 void* ptr = ptrs[i];
Ian Rogers5d057052014-03-12 14:32:27 -07001154 DCHECK_LE(base_, ptr);
1155 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001156 size_t pm_idx = RoundDownToPageMapIndex(ptr);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001157 Run* run = nullptr;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001158 if (kReadPageMapEntryWithoutLockInBulkFree) {
1159 // Read the page map entries without locking the lock.
1160 byte page_map_entry = page_map_[pm_idx];
1161 if (kTraceRosAlloc) {
1162 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1163 << std::dec << pm_idx
1164 << ", page_map_entry=" << static_cast<int>(page_map_entry);
1165 }
1166 if (LIKELY(page_map_entry == kPageMapRun)) {
1167 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001168 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1169 size_t pi = pm_idx;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001170 // Find the beginning of the run.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001171 do {
1172 --pi;
Ian Rogers5d057052014-03-12 14:32:27 -07001173 DCHECK_LT(pi, capacity_ / kPageSize);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001174 } while (page_map_[pi] != kPageMapRun);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001175 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001176 } else if (page_map_entry == kPageMapLargeObject) {
1177 MutexLock mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001178 freed_bytes += FreePages(self, ptr, false);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001179 continue;
1180 } else {
1181 LOG(FATAL) << "Unreachable - page map type: " << page_map_entry;
1182 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001183 } else {
1184 // Read the page map entries with a lock.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001185 MutexLock mu(self, lock_);
1186 DCHECK_LT(pm_idx, page_map_size_);
1187 byte page_map_entry = page_map_[pm_idx];
1188 if (kTraceRosAlloc) {
1189 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1190 << std::dec << pm_idx
1191 << ", page_map_entry=" << static_cast<int>(page_map_entry);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001192 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001193 if (LIKELY(page_map_entry == kPageMapRun)) {
1194 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1195 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1196 size_t pi = pm_idx;
1197 // Find the beginning of the run.
1198 do {
1199 --pi;
1200 DCHECK_LT(pi, capacity_ / kPageSize);
1201 } while (page_map_[pi] != kPageMapRun);
1202 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
1203 } else if (page_map_entry == kPageMapLargeObject) {
1204 freed_bytes += FreePages(self, ptr, false);
1205 continue;
1206 } else {
1207 LOG(FATAL) << "Unreachable - page map type: " << page_map_entry;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001208 }
1209 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001210 DCHECK(run != nullptr);
1211 DCHECK_EQ(run->magic_num_, kMagicNum);
1212 // Set the bit in the bulk free bit map.
1213 freed_bytes += run->MarkBulkFreeBitMap(ptr);
1214#ifdef HAVE_ANDROID_OS
1215 if (!run->to_be_bulk_freed_) {
1216 run->to_be_bulk_freed_ = true;
1217 runs.push_back(run);
1218 }
1219#else
1220 runs.insert(run);
1221#endif
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001222 }
1223
1224 // Now, iterate over the affected runs and update the alloc bit map
1225 // based on the bulk free bit map (for non-thread-local runs) and
1226 // union the bulk free bit map into the thread-local free bit map
1227 // (for thread-local runs.)
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001228 for (Run* run : runs) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001229#ifdef HAVE_ANDROID_OS
1230 DCHECK(run->to_be_bulk_freed_);
1231 run->to_be_bulk_freed_ = false;
1232#endif
1233 size_t idx = run->size_bracket_idx_;
1234 MutexLock mu(self, *size_bracket_locks_[idx]);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001235 if (run->IsThreadLocal()) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001236 DCHECK_LE(run->size_bracket_idx_, kMaxThreadLocalSizeBracketIdx);
1237 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
1238 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
1239 run->UnionBulkFreeBitMapToThreadLocalFreeBitMap();
1240 if (kTraceRosAlloc) {
1241 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a thread local run 0x"
1242 << std::hex << reinterpret_cast<intptr_t>(run);
1243 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001244 DCHECK(run->IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001245 // A thread local run will be kept as a thread local even if
1246 // it's become all free.
1247 } else {
1248 bool run_was_full = run->IsFull();
1249 run->MergeBulkFreeBitMapIntoAllocBitMap();
1250 if (kTraceRosAlloc) {
1251 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a run 0x" << std::hex
1252 << reinterpret_cast<intptr_t>(run);
1253 }
1254 // Check if the run should be moved to non_full_runs_ or
1255 // free_page_runs_.
1256 std::set<Run*>* non_full_runs = &non_full_runs_[idx];
1257 hash_set<Run*, hash_run, eq_run>* full_runs =
1258 kIsDebugBuild ? &full_runs_[idx] : NULL;
1259 if (run->IsAllFree()) {
1260 // It has just become completely free. Free the pages of the
1261 // run.
1262 bool run_was_current = run == current_runs_[idx];
1263 if (run_was_current) {
1264 DCHECK(full_runs->find(run) == full_runs->end());
1265 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1266 // If it was a current run, reuse it.
1267 } else if (run_was_full) {
1268 // If it was full, remove it from the full run set (debug
1269 // only.)
1270 if (kIsDebugBuild) {
1271 hash_set<Run*, hash_run, eq_run>::iterator pos = full_runs->find(run);
1272 DCHECK(pos != full_runs->end());
1273 full_runs->erase(pos);
1274 if (kTraceRosAlloc) {
1275 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1276 << reinterpret_cast<intptr_t>(run)
1277 << " from full_runs_";
1278 }
1279 DCHECK(full_runs->find(run) == full_runs->end());
1280 }
1281 } else {
1282 // If it was in a non full run set, remove it from the set.
1283 DCHECK(full_runs->find(run) == full_runs->end());
1284 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1285 non_full_runs->erase(run);
1286 if (kTraceRosAlloc) {
1287 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1288 << reinterpret_cast<intptr_t>(run)
1289 << " from non_full_runs_";
1290 }
1291 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1292 }
1293 if (!run_was_current) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001294 run->ZeroHeader();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001295 MutexLock mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001296 FreePages(self, run, true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001297 }
1298 } else {
1299 // It is not completely free. If it wasn't the current run or
1300 // already in the non-full run set (i.e., it was full) insert
1301 // it into the non-full run set.
1302 if (run == current_runs_[idx]) {
1303 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1304 DCHECK(full_runs->find(run) == full_runs->end());
1305 // If it was a current run, keep it.
1306 } else if (run_was_full) {
1307 // If it was full, remove it from the full run set (debug
1308 // only) and insert into the non-full run set.
1309 DCHECK(full_runs->find(run) != full_runs->end());
1310 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1311 if (kIsDebugBuild) {
1312 full_runs->erase(run);
1313 if (kTraceRosAlloc) {
1314 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1315 << reinterpret_cast<intptr_t>(run)
1316 << " from full_runs_";
1317 }
1318 }
1319 non_full_runs->insert(run);
1320 if (kTraceRosAlloc) {
1321 LOG(INFO) << "RosAlloc::BulkFree() : Inserted run 0x" << std::hex
1322 << reinterpret_cast<intptr_t>(run)
1323 << " into non_full_runs_[" << std::dec << idx;
1324 }
1325 } else {
1326 // If it was not full, so leave it in the non full run set.
1327 DCHECK(full_runs->find(run) == full_runs->end());
1328 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1329 }
1330 }
1331 }
1332 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001333 return freed_bytes;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001334}
1335
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001336std::string RosAlloc::DumpPageMap() {
1337 std::ostringstream stream;
1338 stream << "RosAlloc PageMap: " << std::endl;
1339 lock_.AssertHeld(Thread::Current());
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001340 size_t end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001341 FreePageRun* curr_fpr = NULL;
1342 size_t curr_fpr_size = 0;
1343 size_t remaining_curr_fpr_size = 0;
1344 size_t num_running_empty_pages = 0;
1345 for (size_t i = 0; i < end; ++i) {
1346 byte pm = page_map_[i];
1347 switch (pm) {
1348 case kPageMapEmpty: {
1349 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1350 if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
1351 // Encountered a fresh free page run.
1352 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1353 DCHECK(fpr->IsFree());
1354 DCHECK(curr_fpr == NULL);
1355 DCHECK_EQ(curr_fpr_size, static_cast<size_t>(0));
1356 curr_fpr = fpr;
1357 curr_fpr_size = fpr->ByteSize(this);
1358 DCHECK_EQ(curr_fpr_size % kPageSize, static_cast<size_t>(0));
1359 remaining_curr_fpr_size = curr_fpr_size - kPageSize;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001360 stream << "[" << i << "]=Empty (FPR start)"
1361 << " fpr_size=" << curr_fpr_size
1362 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001363 if (remaining_curr_fpr_size == 0) {
1364 // Reset at the end of the current free page run.
1365 curr_fpr = NULL;
1366 curr_fpr_size = 0;
1367 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001368 stream << "curr_fpr=0x" << std::hex << reinterpret_cast<intptr_t>(curr_fpr) << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001369 DCHECK_EQ(num_running_empty_pages, static_cast<size_t>(0));
1370 } else {
1371 // Still part of the current free page run.
1372 DCHECK_NE(num_running_empty_pages, static_cast<size_t>(0));
1373 DCHECK(curr_fpr != NULL && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
1374 DCHECK_EQ(remaining_curr_fpr_size % kPageSize, static_cast<size_t>(0));
1375 DCHECK_GE(remaining_curr_fpr_size, static_cast<size_t>(kPageSize));
1376 remaining_curr_fpr_size -= kPageSize;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001377 stream << "[" << i << "]=Empty (FPR part)"
1378 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001379 if (remaining_curr_fpr_size == 0) {
1380 // Reset at the end of the current free page run.
1381 curr_fpr = NULL;
1382 curr_fpr_size = 0;
1383 }
1384 }
1385 num_running_empty_pages++;
1386 break;
1387 }
1388 case kPageMapLargeObject: {
1389 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1390 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001391 stream << "[" << i << "]=Large (start)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001392 break;
1393 }
1394 case kPageMapLargeObjectPart:
1395 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1396 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001397 stream << "[" << i << "]=Large (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001398 break;
1399 case kPageMapRun: {
1400 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1401 num_running_empty_pages = 0;
1402 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1403 size_t idx = run->size_bracket_idx_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001404 stream << "[" << i << "]=Run (start)"
1405 << " idx=" << idx
1406 << " numOfPages=" << numOfPages[idx]
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001407 << " is_thread_local=" << run->is_thread_local_
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001408 << " is_all_free=" << (run->IsAllFree() ? 1 : 0)
1409 << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001410 break;
1411 }
1412 case kPageMapRunPart:
1413 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1414 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001415 stream << "[" << i << "]=Run (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001416 break;
1417 default:
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001418 stream << "[" << i << "]=Unrecognizable page map type: " << pm;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001419 break;
1420 }
1421 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001422 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001423}
1424
1425size_t RosAlloc::UsableSize(void* ptr) {
Ian Rogers5d057052014-03-12 14:32:27 -07001426 DCHECK_LE(base_, ptr);
1427 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001428 size_t pm_idx = RoundDownToPageMapIndex(ptr);
1429 MutexLock mu(Thread::Current(), lock_);
1430 switch (page_map_[pm_idx]) {
1431 case kPageMapEmpty:
1432 LOG(FATAL) << "Unreachable - RosAlloc::UsableSize(): pm_idx=" << pm_idx << ", ptr=" << std::hex
1433 << reinterpret_cast<intptr_t>(ptr);
1434 break;
1435 case kPageMapLargeObject: {
1436 size_t num_pages = 1;
1437 size_t idx = pm_idx + 1;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001438 size_t end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001439 while (idx < end && page_map_[idx] == kPageMapLargeObjectPart) {
1440 num_pages++;
1441 idx++;
1442 }
1443 return num_pages * kPageSize;
1444 }
1445 case kPageMapLargeObjectPart:
1446 LOG(FATAL) << "Unreachable - RosAlloc::UsableSize(): pm_idx=" << pm_idx << ", ptr=" << std::hex
1447 << reinterpret_cast<intptr_t>(ptr);
1448 break;
1449 case kPageMapRun:
1450 case kPageMapRunPart: {
1451 // Find the beginning of the run.
1452 while (page_map_[pm_idx] != kPageMapRun) {
1453 pm_idx--;
Ian Rogers5d057052014-03-12 14:32:27 -07001454 DCHECK_LT(pm_idx, capacity_ / kPageSize);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001455 }
Ian Rogers5d057052014-03-12 14:32:27 -07001456 DCHECK_EQ(page_map_[pm_idx], kPageMapRun);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001457 Run* run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001458 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001459 size_t idx = run->size_bracket_idx_;
1460 size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
1461 - (reinterpret_cast<byte*>(run) + headerSizes[idx]);
1462 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
1463 return IndexToBracketSize(idx);
1464 }
1465 default:
1466 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
1467 break;
1468 }
1469 return 0;
1470}
1471
1472bool RosAlloc::Trim() {
1473 MutexLock mu(Thread::Current(), lock_);
1474 FreePageRun* last_free_page_run;
1475 DCHECK_EQ(footprint_ % kPageSize, static_cast<size_t>(0));
1476 auto it = free_page_runs_.rbegin();
1477 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
1478 // Remove the last free page run, if any.
1479 DCHECK(last_free_page_run->IsFree());
Ian Rogers5d057052014-03-12 14:32:27 -07001480 DCHECK_EQ(page_map_[ToPageMapIndex(last_free_page_run)], kPageMapEmpty);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001481 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
1482 DCHECK_EQ(last_free_page_run->End(this), base_ + footprint_);
1483 free_page_runs_.erase(last_free_page_run);
1484 size_t decrement = last_free_page_run->ByteSize(this);
1485 size_t new_footprint = footprint_ - decrement;
1486 DCHECK_EQ(new_footprint % kPageSize, static_cast<size_t>(0));
1487 size_t new_num_of_pages = new_footprint / kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001488 DCHECK_GE(page_map_size_, new_num_of_pages);
1489 // Zero out the tail of the page map.
1490 byte* zero_begin = page_map_ + new_num_of_pages;
1491 byte* madvise_begin = AlignUp(zero_begin, kPageSize);
1492 DCHECK_LE(madvise_begin, page_map_mem_map_->End());
1493 size_t madvise_size = page_map_mem_map_->End() - madvise_begin;
1494 if (madvise_size > 0) {
1495 DCHECK_ALIGNED(madvise_begin, kPageSize);
1496 DCHECK_EQ(RoundUp(madvise_size, kPageSize), madvise_size);
1497 CHECK_EQ(madvise(madvise_begin, madvise_size, MADV_DONTNEED), 0);
1498 }
1499 if (madvise_begin - zero_begin) {
1500 memset(zero_begin, 0, madvise_begin - zero_begin);
1501 }
1502 page_map_size_ = new_num_of_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001503 free_page_run_size_map_.resize(new_num_of_pages);
1504 DCHECK_EQ(free_page_run_size_map_.size(), new_num_of_pages);
1505 art_heap_rosalloc_morecore(this, -(static_cast<intptr_t>(decrement)));
1506 if (kTraceRosAlloc) {
1507 LOG(INFO) << "RosAlloc::Trim() : decreased the footprint from "
1508 << footprint_ << " to " << new_footprint;
1509 }
1510 DCHECK_LT(new_footprint, footprint_);
1511 DCHECK_LT(new_footprint, capacity_);
1512 footprint_ = new_footprint;
1513 return true;
1514 }
1515 return false;
1516}
1517
1518void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1519 void* arg) {
1520 // Note: no need to use this to release pages as we already do so in FreePages().
1521 if (handler == NULL) {
1522 return;
1523 }
1524 MutexLock mu(Thread::Current(), lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001525 size_t pm_end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001526 size_t i = 0;
1527 while (i < pm_end) {
1528 byte pm = page_map_[i];
1529 switch (pm) {
1530 case kPageMapEmpty: {
1531 // The start of a free page run.
1532 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1533 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
1534 size_t fpr_size = fpr->ByteSize(this);
1535 DCHECK(IsAligned<kPageSize>(fpr_size));
1536 void* start = fpr;
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -08001537 if (kIsDebugBuild) {
1538 // In the debug build, the first page of a free page run
1539 // contains a magic number for debugging. Exclude it.
1540 start = reinterpret_cast<byte*>(fpr) + kPageSize;
1541 }
1542 void* end = reinterpret_cast<byte*>(fpr) + fpr_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001543 handler(start, end, 0, arg);
1544 size_t num_pages = fpr_size / kPageSize;
1545 if (kIsDebugBuild) {
1546 for (size_t j = i + 1; j < i + num_pages; ++j) {
1547 DCHECK_EQ(page_map_[j], kPageMapEmpty);
1548 }
1549 }
1550 i += fpr_size / kPageSize;
1551 DCHECK_LE(i, pm_end);
1552 break;
1553 }
1554 case kPageMapLargeObject: {
1555 // The start of a large object.
1556 size_t num_pages = 1;
1557 size_t idx = i + 1;
1558 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1559 num_pages++;
1560 idx++;
1561 }
1562 void* start = base_ + i * kPageSize;
1563 void* end = base_ + (i + num_pages) * kPageSize;
1564 size_t used_bytes = num_pages * kPageSize;
1565 handler(start, end, used_bytes, arg);
1566 if (kIsDebugBuild) {
1567 for (size_t j = i + 1; j < i + num_pages; ++j) {
1568 DCHECK_EQ(page_map_[j], kPageMapLargeObjectPart);
1569 }
1570 }
1571 i += num_pages;
1572 DCHECK_LE(i, pm_end);
1573 break;
1574 }
1575 case kPageMapLargeObjectPart:
1576 LOG(FATAL) << "Unreachable - page map type: " << pm;
1577 break;
1578 case kPageMapRun: {
1579 // The start of a run.
1580 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001581 DCHECK_EQ(run->magic_num_, kMagicNum);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001582 // The dedicated full run doesn't contain any real allocations, don't visit the slots in
1583 // there.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001584 run->InspectAllSlots(handler, arg);
1585 size_t num_pages = numOfPages[run->size_bracket_idx_];
1586 if (kIsDebugBuild) {
1587 for (size_t j = i + 1; j < i + num_pages; ++j) {
1588 DCHECK_EQ(page_map_[j], kPageMapRunPart);
1589 }
1590 }
1591 i += num_pages;
1592 DCHECK_LE(i, pm_end);
1593 break;
1594 }
1595 case kPageMapRunPart:
1596 LOG(FATAL) << "Unreachable - page map type: " << pm;
1597 break;
1598 default:
1599 LOG(FATAL) << "Unreachable - page map type: " << pm;
1600 break;
1601 }
1602 }
1603}
1604
1605size_t RosAlloc::Footprint() {
1606 MutexLock mu(Thread::Current(), lock_);
1607 return footprint_;
1608}
1609
1610size_t RosAlloc::FootprintLimit() {
1611 MutexLock mu(Thread::Current(), lock_);
1612 return capacity_;
1613}
1614
1615void RosAlloc::SetFootprintLimit(size_t new_capacity) {
1616 MutexLock mu(Thread::Current(), lock_);
1617 DCHECK_EQ(RoundUp(new_capacity, kPageSize), new_capacity);
1618 // Only growing is supported here. But Trim() is supported.
1619 if (capacity_ < new_capacity) {
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001620 CHECK_LE(new_capacity, max_capacity_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001621 capacity_ = new_capacity;
1622 VLOG(heap) << "new capacity=" << capacity_;
1623 }
1624}
1625
1626void RosAlloc::RevokeThreadLocalRuns(Thread* thread) {
1627 Thread* self = Thread::Current();
Hiroshi Yamauchi70f60042014-02-03 12:31:29 -08001628 // Avoid race conditions on the bulk free bit maps with BulkFree() (GC).
1629 WriterMutexLock wmu(self, bulk_free_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001630 for (size_t idx = 0; idx < kNumOfSizeBrackets; idx++) {
1631 MutexLock mu(self, *size_bracket_locks_[idx]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001632 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001633 CHECK(thread_local_run != nullptr);
1634 // Invalid means already revoked.
1635 DCHECK(thread_local_run->IsThreadLocal());
1636 if (thread_local_run != dedicated_full_run_) {
1637 thread->SetRosAllocRun(idx, dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001638 DCHECK_EQ(thread_local_run->magic_num_, kMagicNum);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001639 // Note the thread local run may not be full here.
1640 bool dont_care;
1641 thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&dont_care);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001642 thread_local_run->SetIsThreadLocal(false);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001643 thread_local_run->MergeBulkFreeBitMapIntoAllocBitMap();
1644 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
1645 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
1646 if (thread_local_run->IsFull()) {
1647 if (kIsDebugBuild) {
1648 full_runs_[idx].insert(thread_local_run);
1649 DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
1650 if (kTraceRosAlloc) {
1651 LOG(INFO) << "RosAlloc::RevokeThreadLocalRuns() : Inserted run 0x" << std::hex
1652 << reinterpret_cast<intptr_t>(thread_local_run)
1653 << " into full_runs_[" << std::dec << idx << "]";
1654 }
1655 }
1656 } else if (thread_local_run->IsAllFree()) {
1657 MutexLock mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001658 thread_local_run->ZeroHeader();
1659 FreePages(self, thread_local_run, true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001660 } else {
1661 non_full_runs_[idx].insert(thread_local_run);
1662 DCHECK(non_full_runs_[idx].find(thread_local_run) != non_full_runs_[idx].end());
1663 if (kTraceRosAlloc) {
1664 LOG(INFO) << "RosAlloc::RevokeThreadLocalRuns() : Inserted run 0x" << std::hex
1665 << reinterpret_cast<intptr_t>(thread_local_run)
1666 << " into non_full_runs_[" << std::dec << idx << "]";
1667 }
1668 }
1669 }
1670 }
1671}
1672
1673void RosAlloc::RevokeAllThreadLocalRuns() {
1674 // This is called when a mutator thread won't allocate such as at
1675 // the Zygote creation time or during the GC pause.
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001676 MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
1677 MutexLock mu2(Thread::Current(), *Locks::thread_list_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001678 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001679 for (Thread* thread : thread_list) {
1680 RevokeThreadLocalRuns(thread);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001681 }
1682}
1683
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001684void RosAlloc::AssertThreadLocalRunsAreRevoked(Thread* thread) {
1685 if (kIsDebugBuild) {
1686 Thread* self = Thread::Current();
1687 // Avoid race conditions on the bulk free bit maps with BulkFree() (GC).
1688 WriterMutexLock wmu(self, bulk_free_lock_);
1689 for (size_t idx = 0; idx < kNumOfSizeBrackets; idx++) {
1690 MutexLock mu(self, *size_bracket_locks_[idx]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001691 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001692 DCHECK(thread_local_run == nullptr || thread_local_run == dedicated_full_run_);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001693 }
1694 }
1695}
1696
1697void RosAlloc::AssertAllThreadLocalRunsAreRevoked() {
1698 if (kIsDebugBuild) {
1699 MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
1700 MutexLock mu2(Thread::Current(), *Locks::thread_list_lock_);
1701 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1702 for (Thread* t : thread_list) {
1703 AssertThreadLocalRunsAreRevoked(t);
1704 }
1705 }
1706}
1707
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001708void RosAlloc::Initialize() {
1709 // Check the consistency of the number of size brackets.
1710 DCHECK_EQ(Thread::kRosAllocNumOfSizeBrackets, kNumOfSizeBrackets);
1711 // bracketSizes.
1712 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1713 if (i < kNumOfSizeBrackets - 2) {
1714 bracketSizes[i] = 16 * (i + 1);
1715 } else if (i == kNumOfSizeBrackets - 2) {
1716 bracketSizes[i] = 1 * KB;
1717 } else {
Ian Rogers5d057052014-03-12 14:32:27 -07001718 DCHECK_EQ(i, kNumOfSizeBrackets - 1);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001719 bracketSizes[i] = 2 * KB;
1720 }
1721 if (kTraceRosAlloc) {
1722 LOG(INFO) << "bracketSizes[" << i << "]=" << bracketSizes[i];
1723 }
1724 }
1725 // numOfPages.
1726 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1727 if (i < 4) {
1728 numOfPages[i] = 1;
1729 } else if (i < 8) {
1730 numOfPages[i] = 2;
1731 } else if (i < 16) {
1732 numOfPages[i] = 4;
1733 } else if (i < 32) {
1734 numOfPages[i] = 8;
1735 } else if (i == 32) {
Ian Rogers5d057052014-03-12 14:32:27 -07001736 DCHECK_EQ(i, kNumOfSizeBrackets - 2);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001737 numOfPages[i] = 16;
1738 } else {
Ian Rogers5d057052014-03-12 14:32:27 -07001739 DCHECK_EQ(i, kNumOfSizeBrackets - 1);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001740 numOfPages[i] = 32;
1741 }
1742 if (kTraceRosAlloc) {
1743 LOG(INFO) << "numOfPages[" << i << "]=" << numOfPages[i];
1744 }
1745 }
1746 // Compute numOfSlots and slotOffsets.
1747 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1748 size_t bracket_size = bracketSizes[i];
1749 size_t run_size = kPageSize * numOfPages[i];
1750 size_t max_num_of_slots = run_size / bracket_size;
1751 // Compute the actual number of slots by taking the header and
1752 // alignment into account.
1753 size_t fixed_header_size = RoundUp(Run::fixed_header_size(), sizeof(uint32_t));
1754 DCHECK_EQ(fixed_header_size, static_cast<size_t>(8));
1755 size_t header_size = 0;
1756 size_t bulk_free_bit_map_offset = 0;
1757 size_t thread_local_free_bit_map_offset = 0;
1758 size_t num_of_slots = 0;
1759 // Search for the maximum number of slots that allows enough space
1760 // for the header (including the bit maps.)
1761 for (int s = max_num_of_slots; s >= 0; s--) {
1762 size_t tmp_slots_size = bracket_size * s;
1763 size_t tmp_bit_map_size = RoundUp(s, sizeof(uint32_t) * kBitsPerByte) / kBitsPerByte;
1764 size_t tmp_bulk_free_bit_map_size = tmp_bit_map_size;
1765 size_t tmp_bulk_free_bit_map_off = fixed_header_size + tmp_bit_map_size;
1766 size_t tmp_thread_local_free_bit_map_size = tmp_bit_map_size;
1767 size_t tmp_thread_local_free_bit_map_off = tmp_bulk_free_bit_map_off + tmp_bulk_free_bit_map_size;
1768 size_t tmp_unaligned_header_size = tmp_thread_local_free_bit_map_off + tmp_thread_local_free_bit_map_size;
1769 // Align up the unaligned header size. bracket_size may not be a power of two.
1770 size_t tmp_header_size = (tmp_unaligned_header_size % bracket_size == 0) ?
1771 tmp_unaligned_header_size :
1772 tmp_unaligned_header_size + (bracket_size - tmp_unaligned_header_size % bracket_size);
1773 DCHECK_EQ(tmp_header_size % bracket_size, static_cast<size_t>(0));
1774 DCHECK_EQ(tmp_header_size % 8, static_cast<size_t>(0));
1775 if (tmp_slots_size + tmp_header_size <= run_size) {
1776 // Found the right number of slots, that is, there was enough
1777 // space for the header (including the bit maps.)
1778 num_of_slots = s;
1779 header_size = tmp_header_size;
1780 bulk_free_bit_map_offset = tmp_bulk_free_bit_map_off;
1781 thread_local_free_bit_map_offset = tmp_thread_local_free_bit_map_off;
1782 break;
1783 }
1784 }
1785 DCHECK(num_of_slots > 0 && header_size > 0 && bulk_free_bit_map_offset > 0);
1786 // Add the padding for the alignment remainder.
1787 header_size += run_size % bracket_size;
Ian Rogers5d057052014-03-12 14:32:27 -07001788 DCHECK_EQ(header_size + num_of_slots * bracket_size, run_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001789 numOfSlots[i] = num_of_slots;
1790 headerSizes[i] = header_size;
1791 bulkFreeBitMapOffsets[i] = bulk_free_bit_map_offset;
1792 threadLocalFreeBitMapOffsets[i] = thread_local_free_bit_map_offset;
1793 if (kTraceRosAlloc) {
1794 LOG(INFO) << "numOfSlots[" << i << "]=" << numOfSlots[i]
1795 << ", headerSizes[" << i << "]=" << headerSizes[i]
1796 << ", bulkFreeBitMapOffsets[" << i << "]=" << bulkFreeBitMapOffsets[i]
1797 << ", threadLocalFreeBitMapOffsets[" << i << "]=" << threadLocalFreeBitMapOffsets[i];;
1798 }
1799 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001800 // Fill the alloc bitmap so nobody can successfully allocate from it.
1801 if (kIsDebugBuild) {
1802 dedicated_full_run_->magic_num_ = kMagicNum;
1803 }
1804 // It doesn't matter which size bracket we use since the main goal is to have the allocation
1805 // fail 100% of the time you attempt to allocate into the dedicated full run.
1806 dedicated_full_run_->size_bracket_idx_ = 0;
1807 dedicated_full_run_->FillAllocBitMap();
1808 dedicated_full_run_->SetIsThreadLocal(true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001809}
1810
1811void RosAlloc::BytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
1812 if (used_bytes == 0) {
1813 return;
1814 }
1815 size_t* bytes_allocated = reinterpret_cast<size_t*>(arg);
1816 *bytes_allocated += used_bytes;
1817}
1818
1819void RosAlloc::ObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
1820 if (used_bytes == 0) {
1821 return;
1822 }
1823 size_t* objects_allocated = reinterpret_cast<size_t*>(arg);
1824 ++(*objects_allocated);
1825}
1826
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001827void RosAlloc::Verify() {
1828 Thread* self = Thread::Current();
1829 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self))
1830 << "The mutator locks isn't exclusively locked at RosAlloc::Verify()";
1831 MutexLock mu(self, *Locks::thread_list_lock_);
1832 WriterMutexLock wmu(self, bulk_free_lock_);
1833 std::vector<Run*> runs;
1834 {
1835 MutexLock mu(self, lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001836 size_t pm_end = page_map_size_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001837 size_t i = 0;
1838 while (i < pm_end) {
1839 byte pm = page_map_[i];
1840 switch (pm) {
1841 case kPageMapEmpty: {
1842 // The start of a free page run.
1843 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001844 DCHECK_EQ(fpr->magic_num_, kMagicNumFree);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001845 CHECK(free_page_runs_.find(fpr) != free_page_runs_.end())
1846 << "An empty page must belong to the free page run set";
1847 size_t fpr_size = fpr->ByteSize(this);
1848 CHECK(IsAligned<kPageSize>(fpr_size))
1849 << "A free page run size isn't page-aligned : " << fpr_size;
1850 size_t num_pages = fpr_size / kPageSize;
1851 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1852 << "A free page run size must be > 0 : " << fpr_size;
1853 for (size_t j = i + 1; j < i + num_pages; ++j) {
1854 CHECK_EQ(page_map_[j], kPageMapEmpty)
1855 << "A mismatch between the page map table for kPageMapEmpty "
1856 << " at page index " << j
1857 << " and the free page run size : page index range : "
1858 << i << " to " << (i + num_pages) << std::endl << DumpPageMap();
1859 }
1860 i += num_pages;
1861 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1862 << std::endl << DumpPageMap();
1863 break;
1864 }
1865 case kPageMapLargeObject: {
1866 // The start of a large object.
1867 size_t num_pages = 1;
1868 size_t idx = i + 1;
1869 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1870 num_pages++;
1871 idx++;
1872 }
1873 void* start = base_ + i * kPageSize;
1874 mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
1875 size_t obj_size = obj->SizeOf();
Ian Rogers5d057052014-03-12 14:32:27 -07001876 CHECK_GT(obj_size, kLargeSizeThreshold)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001877 << "A rosalloc large object size must be > " << kLargeSizeThreshold;
1878 CHECK_EQ(num_pages, RoundUp(obj_size, kPageSize) / kPageSize)
1879 << "A rosalloc large object size " << obj_size
1880 << " does not match the page map table " << (num_pages * kPageSize)
1881 << std::endl << DumpPageMap();
1882 i += num_pages;
1883 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1884 << std::endl << DumpPageMap();
1885 break;
1886 }
1887 case kPageMapLargeObjectPart:
1888 LOG(FATAL) << "Unreachable - page map type: " << pm << std::endl << DumpPageMap();
1889 break;
1890 case kPageMapRun: {
1891 // The start of a run.
1892 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001893 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001894 size_t idx = run->size_bracket_idx_;
Ian Rogers5d057052014-03-12 14:32:27 -07001895 CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << idx;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001896 size_t num_pages = numOfPages[idx];
1897 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1898 << "Run size must be > 0 : " << num_pages;
1899 for (size_t j = i + 1; j < i + num_pages; ++j) {
1900 CHECK_EQ(page_map_[j], kPageMapRunPart)
1901 << "A mismatch between the page map table for kPageMapRunPart "
1902 << " at page index " << j
1903 << " and the run size : page index range " << i << " to " << (i + num_pages)
1904 << std::endl << DumpPageMap();
1905 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001906 // Don't verify the dedicated_full_run_ since it doesn't have any real allocations.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001907 runs.push_back(run);
1908 i += num_pages;
1909 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1910 << std::endl << DumpPageMap();
1911 break;
1912 }
1913 case kPageMapRunPart:
1914 LOG(FATAL) << "Unreachable - page map type: " << pm << std::endl << DumpPageMap();
1915 break;
1916 default:
1917 LOG(FATAL) << "Unreachable - page map type: " << pm << std::endl << DumpPageMap();
1918 break;
1919 }
1920 }
1921 }
1922
1923 // Call Verify() here for the lock order.
1924 for (auto& run : runs) {
1925 run->Verify(self, this);
1926 }
1927}
1928
1929void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc) {
Ian Rogers5d057052014-03-12 14:32:27 -07001930 DCHECK_EQ(magic_num_, kMagicNum) << "Bad magic number : " << Dump();
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001931 const size_t idx = size_bracket_idx_;
Ian Rogers5d057052014-03-12 14:32:27 -07001932 CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001933 byte* slot_base = reinterpret_cast<byte*>(this) + headerSizes[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001934 const size_t num_slots = numOfSlots[idx];
1935 const size_t num_vec = RoundUp(num_slots, 32) / 32;
1936 CHECK_GT(num_vec, 0U);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001937 size_t bracket_size = IndexToBracketSize(idx);
1938 CHECK_EQ(slot_base + num_slots * bracket_size,
1939 reinterpret_cast<byte*>(this) + numOfPages[idx] * kPageSize)
1940 << "Mismatch in the end address of the run " << Dump();
1941 // Check that the bulk free bitmap is clean. It's only used during BulkFree().
1942 CHECK(IsBulkFreeBitmapClean()) << "The bulk free bit map isn't clean " << Dump();
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001943 uint32_t last_word_mask = GetBitmapLastVectorMask(num_slots, num_vec);
1944 // Make sure all the bits at the end of the run are set so that we don't allocate there.
1945 CHECK_EQ(alloc_bit_map_[num_vec - 1] & last_word_mask, last_word_mask);
1946 // Ensure that the first bitmap index is valid.
1947 CHECK_LT(first_search_vec_idx_, num_vec);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001948 // Check the thread local runs, the current runs, and the run sets.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001949 if (IsThreadLocal()) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001950 // If it's a thread local run, then it must be pointed to by an owner thread.
1951 bool owner_found = false;
1952 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1953 for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
1954 Thread* thread = *it;
1955 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1956 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001957 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i));
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001958 if (thread_local_run == this) {
1959 CHECK(!owner_found)
1960 << "A thread local run has more than one owner thread " << Dump();
1961 CHECK_EQ(i, idx)
1962 << "A mismatching size bracket index in a thread local run " << Dump();
1963 owner_found = true;
1964 }
1965 }
1966 }
1967 CHECK(owner_found) << "A thread local run has no owner thread " << Dump();
1968 } else {
1969 // If it's not thread local, check that the thread local free bitmap is clean.
1970 CHECK(IsThreadLocalFreeBitmapClean())
1971 << "A non-thread-local run's thread local free bitmap isn't clean "
1972 << Dump();
1973 // Check if it's a current run for the size bucket.
1974 bool is_current_run = false;
1975 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1976 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
1977 Run* current_run = rosalloc->current_runs_[i];
1978 if (idx == i) {
1979 if (this == current_run) {
1980 is_current_run = true;
1981 }
1982 } else {
1983 // If the size bucket index does not match, then it must not
1984 // be a current run.
1985 CHECK_NE(this, current_run)
1986 << "A current run points to a run with a wrong size bracket index " << Dump();
1987 }
1988 }
1989 // If it's neither a thread local or current run, then it must be
1990 // in a run set.
1991 if (!is_current_run) {
1992 MutexLock mu(self, rosalloc->lock_);
1993 std::set<Run*>& non_full_runs = rosalloc->non_full_runs_[idx];
1994 // If it's all free, it must be a free page run rather than a run.
1995 CHECK(!IsAllFree()) << "A free run must be in a free page run set " << Dump();
1996 if (!IsFull()) {
1997 // If it's not full, it must in the non-full run set.
1998 CHECK(non_full_runs.find(this) != non_full_runs.end())
1999 << "A non-full run isn't in the non-full run set " << Dump();
2000 } else {
2001 // If it's full, it must in the full run set (debug build only.)
2002 if (kIsDebugBuild) {
2003 hash_set<Run*, hash_run, eq_run>& full_runs = rosalloc->full_runs_[idx];
2004 CHECK(full_runs.find(this) != full_runs.end())
2005 << " A full run isn't in the full run set " << Dump();
2006 }
2007 }
2008 }
2009 }
2010 // Check each slot.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002011 size_t slots = 0;
2012 for (size_t v = 0; v < num_vec; v++, slots += 32) {
Ian Rogers5d057052014-03-12 14:32:27 -07002013 DCHECK_GE(num_slots, slots) << "Out of bounds";
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002014 uint32_t vec = alloc_bit_map_[v];
2015 uint32_t thread_local_free_vec = ThreadLocalFreeBitMap()[v];
2016 size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
2017 for (size_t i = 0; i < end; ++i) {
2018 bool is_allocated = ((vec >> i) & 0x1) != 0;
2019 // If a thread local run, slots may be marked freed in the
2020 // thread local free bitmap.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07002021 bool is_thread_local_freed = IsThreadLocal() && ((thread_local_free_vec >> i) & 0x1) != 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002022 if (is_allocated && !is_thread_local_freed) {
2023 byte* slot_addr = slot_base + (slots + i) * bracket_size;
2024 mirror::Object* obj = reinterpret_cast<mirror::Object*>(slot_addr);
2025 size_t obj_size = obj->SizeOf();
2026 CHECK_LE(obj_size, kLargeSizeThreshold)
2027 << "A run slot contains a large object " << Dump();
2028 CHECK_EQ(SizeToIndex(obj_size), idx)
Hiroshi Yamauchi4cd662e2014-04-03 16:28:10 -07002029 << PrettyTypeOf(obj) << " "
2030 << "obj_size=" << obj_size << ", idx=" << idx << " "
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002031 << "A run slot contains an object with wrong size " << Dump();
2032 }
2033 }
2034 }
2035}
2036
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002037size_t RosAlloc::ReleasePages() {
2038 VLOG(heap) << "RosAlloc::ReleasePages()";
2039 DCHECK(!DoesReleaseAllPages());
2040 Thread* self = Thread::Current();
2041 size_t reclaimed_bytes = 0;
2042 size_t i = 0;
2043 while (true) {
2044 MutexLock mu(self, lock_);
2045 // Check the page map size which might have changed due to grow/shrink.
2046 size_t pm_end = page_map_size_;
2047 if (i >= pm_end) {
2048 // Reached the end.
2049 break;
2050 }
2051 byte pm = page_map_[i];
2052 switch (pm) {
2053 case kPageMapEmpty: {
2054 // The start of a free page run. Release pages.
2055 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
2056 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
2057 size_t fpr_size = fpr->ByteSize(this);
2058 DCHECK(IsAligned<kPageSize>(fpr_size));
2059 byte* start = reinterpret_cast<byte*>(fpr);
2060 if (kIsDebugBuild) {
2061 // In the debug build, the first page of a free page run
2062 // contains a magic number for debugging. Exclude it.
2063 start = reinterpret_cast<byte*>(fpr) + kPageSize;
2064 }
2065 byte* end = reinterpret_cast<byte*>(fpr) + fpr_size;
2066 CHECK_EQ(madvise(start, end - start, MADV_DONTNEED), 0);
2067 reclaimed_bytes += fpr_size;
2068 size_t num_pages = fpr_size / kPageSize;
2069 if (kIsDebugBuild) {
2070 for (size_t j = i + 1; j < i + num_pages; ++j) {
2071 DCHECK_EQ(page_map_[j], kPageMapEmpty);
2072 }
2073 }
2074 i += num_pages;
2075 DCHECK_LE(i, pm_end);
2076 break;
2077 }
2078 case kPageMapLargeObject: // Fall through.
2079 case kPageMapLargeObjectPart: // Fall through.
2080 case kPageMapRun: // Fall through.
2081 case kPageMapRunPart: // Fall through.
2082 ++i;
2083 break; // Skip.
2084 default:
2085 LOG(FATAL) << "Unreachable - page map type: " << pm;
2086 break;
2087 }
2088 }
2089 return reclaimed_bytes;
2090}
2091
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07002092} // namespace allocator
2093} // namespace gc
2094} // namespace art