blob: 0cea89dc17ebd6bd7248e010d004982af61baf79 [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "base/mutex-inl.h"
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -080018#include "mirror/class-inl.h"
19#include "mirror/object.h"
20#include "mirror/object-inl.h"
Brian Carlstrom218daa22013-11-25 14:51:44 -080021#include "thread-inl.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070022#include "thread_list.h"
23#include "rosalloc.h"
24
25#include <map>
26#include <list>
27#include <vector>
28
29namespace art {
30namespace gc {
31namespace allocator {
32
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070033extern "C" void* art_heap_rosalloc_morecore(RosAlloc* rosalloc, intptr_t increment);
34
Mathieu Chartier73d1e172014-04-11 17:53:48 -070035static constexpr bool kUsePrefetchDuringAllocRun = true;
36static constexpr bool kPrefetchNewRunDataByZeroing = false;
37static constexpr size_t kPrefetchStride = 64;
38
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070039size_t RosAlloc::bracketSizes[kNumOfSizeBrackets];
40size_t RosAlloc::numOfPages[kNumOfSizeBrackets];
41size_t RosAlloc::numOfSlots[kNumOfSizeBrackets];
42size_t RosAlloc::headerSizes[kNumOfSizeBrackets];
43size_t RosAlloc::bulkFreeBitMapOffsets[kNumOfSizeBrackets];
44size_t RosAlloc::threadLocalFreeBitMapOffsets[kNumOfSizeBrackets];
45bool RosAlloc::initialized_ = false;
Mathieu Chartier73d1e172014-04-11 17:53:48 -070046size_t RosAlloc::dedicated_full_run_storage_[kPageSize / sizeof(size_t)] = { 0 };
47RosAlloc::Run* RosAlloc::dedicated_full_run_ =
48 reinterpret_cast<RosAlloc::Run*>(dedicated_full_run_storage_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070049
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080050RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080051 PageReleaseMode page_release_mode, size_t page_release_size_threshold)
Ian Rogers13735952014-10-08 12:43:28 -070052 : base_(reinterpret_cast<uint8_t*>(base)), footprint_(capacity),
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080053 capacity_(capacity), max_capacity_(max_capacity),
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070054 lock_("rosalloc global lock", kRosAllocGlobalLock),
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080055 bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock),
56 page_release_mode_(page_release_mode),
57 page_release_size_threshold_(page_release_size_threshold) {
Ian Rogers5d057052014-03-12 14:32:27 -070058 DCHECK_EQ(RoundUp(capacity, kPageSize), capacity);
59 DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080060 CHECK_LE(capacity, max_capacity);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080061 CHECK(IsAligned<kPageSize>(page_release_size_threshold_));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070062 if (!initialized_) {
63 Initialize();
64 }
65 VLOG(heap) << "RosAlloc base="
66 << std::hex << (intptr_t)base_ << ", end="
67 << std::hex << (intptr_t)(base_ + capacity_)
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080068 << ", capacity=" << std::dec << capacity_
69 << ", max_capacity=" << std::dec << max_capacity_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070070 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
Zuo Wangf37a88b2014-07-10 04:26:41 -070071 size_bracket_lock_names_[i] =
Mathieu Chartier73d1e172014-04-11 17:53:48 -070072 StringPrintf("an rosalloc size bracket %d lock", static_cast<int>(i));
Zuo Wangf37a88b2014-07-10 04:26:41 -070073 size_bracket_locks_[i] = new Mutex(size_bracket_lock_names_[i].c_str(), kRosAllocBracketLock);
Mathieu Chartier0651d412014-04-29 14:37:57 -070074 current_runs_[i] = dedicated_full_run_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070075 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080076 DCHECK_EQ(footprint_, capacity_);
77 size_t num_of_pages = footprint_ / kPageSize;
78 size_t max_num_of_pages = max_capacity_ / kPageSize;
79 std::string error_msg;
80 page_map_mem_map_.reset(MemMap::MapAnonymous("rosalloc page map", NULL, RoundUp(max_num_of_pages, kPageSize),
81 PROT_READ | PROT_WRITE, false, &error_msg));
Mathieu Chartier73d1e172014-04-11 17:53:48 -070082 CHECK(page_map_mem_map_.get() != nullptr) << "Couldn't allocate the page map : " << error_msg;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080083 page_map_ = page_map_mem_map_->Begin();
84 page_map_size_ = num_of_pages;
85 max_page_map_size_ = max_num_of_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070086 free_page_run_size_map_.resize(num_of_pages);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070087 FreePageRun* free_pages = reinterpret_cast<FreePageRun*>(base_);
88 if (kIsDebugBuild) {
89 free_pages->magic_num_ = kMagicNumFree;
90 }
91 free_pages->SetByteSize(this, capacity_);
92 DCHECK_EQ(capacity_ % kPageSize, static_cast<size_t>(0));
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080093 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070094 free_pages->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080095 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070096 free_page_runs_.insert(free_pages);
97 if (kTraceRosAlloc) {
98 LOG(INFO) << "RosAlloc::RosAlloc() : Inserted run 0x" << std::hex
99 << reinterpret_cast<intptr_t>(free_pages)
100 << " into free_page_runs_";
101 }
102}
103
Mathieu Chartier661974a2014-01-09 11:23:53 -0800104RosAlloc::~RosAlloc() {
105 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
106 delete size_bracket_locks_[i];
107 }
108}
109
Ian Rogers13735952014-10-08 12:43:28 -0700110void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700111 lock_.AssertHeld(self);
112 DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
113 FreePageRun* res = NULL;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700114 const size_t req_byte_size = num_pages * kPageSize;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700115 // Find the lowest address free page run that's large enough.
116 for (auto it = free_page_runs_.begin(); it != free_page_runs_.end(); ) {
117 FreePageRun* fpr = *it;
118 DCHECK(fpr->IsFree());
119 size_t fpr_byte_size = fpr->ByteSize(this);
120 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
121 if (req_byte_size <= fpr_byte_size) {
122 // Found one.
123 free_page_runs_.erase(it++);
124 if (kTraceRosAlloc) {
125 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x"
126 << std::hex << reinterpret_cast<intptr_t>(fpr)
127 << " from free_page_runs_";
128 }
129 if (req_byte_size < fpr_byte_size) {
130 // Split.
Ian Rogers13735952014-10-08 12:43:28 -0700131 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700132 if (kIsDebugBuild) {
133 remainder->magic_num_ = kMagicNumFree;
134 }
135 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
136 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
137 // Don't need to call madvise on remainder here.
138 free_page_runs_.insert(remainder);
139 if (kTraceRosAlloc) {
140 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
141 << reinterpret_cast<intptr_t>(remainder)
142 << " into free_page_runs_";
143 }
144 fpr->SetByteSize(this, req_byte_size);
145 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
146 }
147 res = fpr;
148 break;
149 } else {
150 ++it;
151 }
152 }
153
154 // Failed to allocate pages. Grow the footprint, if possible.
155 if (UNLIKELY(res == NULL && capacity_ > footprint_)) {
156 FreePageRun* last_free_page_run = NULL;
157 size_t last_free_page_run_size;
158 auto it = free_page_runs_.rbegin();
159 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
160 // There is a free page run at the end.
161 DCHECK(last_free_page_run->IsFree());
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700162 DCHECK(IsFreePage(ToPageMapIndex(last_free_page_run)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700163 last_free_page_run_size = last_free_page_run->ByteSize(this);
164 } else {
165 // There is no free page run at the end.
166 last_free_page_run_size = 0;
167 }
168 DCHECK_LT(last_free_page_run_size, req_byte_size);
169 if (capacity_ - footprint_ + last_free_page_run_size >= req_byte_size) {
170 // If we grow the heap, we can allocate it.
171 size_t increment = std::min(std::max(2 * MB, req_byte_size - last_free_page_run_size),
172 capacity_ - footprint_);
173 DCHECK_EQ(increment % kPageSize, static_cast<size_t>(0));
174 size_t new_footprint = footprint_ + increment;
175 size_t new_num_of_pages = new_footprint / kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800176 DCHECK_LT(page_map_size_, new_num_of_pages);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700177 DCHECK_LT(free_page_run_size_map_.size(), new_num_of_pages);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800178 page_map_size_ = new_num_of_pages;
179 DCHECK_LE(page_map_size_, max_page_map_size_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700180 free_page_run_size_map_.resize(new_num_of_pages);
181 art_heap_rosalloc_morecore(this, increment);
182 if (last_free_page_run_size > 0) {
183 // There was a free page run at the end. Expand its size.
184 DCHECK_EQ(last_free_page_run_size, last_free_page_run->ByteSize(this));
185 last_free_page_run->SetByteSize(this, last_free_page_run_size + increment);
186 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
Ian Rogers5d057052014-03-12 14:32:27 -0700187 DCHECK_EQ(last_free_page_run->End(this), base_ + new_footprint);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700188 } else {
189 // Otherwise, insert a new free page run at the end.
190 FreePageRun* new_free_page_run = reinterpret_cast<FreePageRun*>(base_ + footprint_);
191 if (kIsDebugBuild) {
192 new_free_page_run->magic_num_ = kMagicNumFree;
193 }
194 new_free_page_run->SetByteSize(this, increment);
195 DCHECK_EQ(new_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
196 free_page_runs_.insert(new_free_page_run);
Ian Rogers5d057052014-03-12 14:32:27 -0700197 DCHECK_EQ(*free_page_runs_.rbegin(), new_free_page_run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700198 if (kTraceRosAlloc) {
199 LOG(INFO) << "RosAlloc::AlloPages() : Grew the heap by inserting run 0x"
200 << std::hex << reinterpret_cast<intptr_t>(new_free_page_run)
201 << " into free_page_runs_";
202 }
203 }
204 DCHECK_LE(footprint_ + increment, capacity_);
205 if (kTraceRosAlloc) {
206 LOG(INFO) << "RosAlloc::AllocPages() : increased the footprint from "
207 << footprint_ << " to " << new_footprint;
208 }
209 footprint_ = new_footprint;
210
211 // And retry the last free page run.
212 it = free_page_runs_.rbegin();
213 DCHECK(it != free_page_runs_.rend());
214 FreePageRun* fpr = *it;
215 if (kIsDebugBuild && last_free_page_run_size > 0) {
216 DCHECK(last_free_page_run != NULL);
217 DCHECK_EQ(last_free_page_run, fpr);
218 }
219 size_t fpr_byte_size = fpr->ByteSize(this);
220 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
221 DCHECK_LE(req_byte_size, fpr_byte_size);
222 free_page_runs_.erase(fpr);
223 if (kTraceRosAlloc) {
224 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
225 << " from free_page_runs_";
226 }
227 if (req_byte_size < fpr_byte_size) {
228 // Split if there's a remainder.
Ian Rogers13735952014-10-08 12:43:28 -0700229 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700230 if (kIsDebugBuild) {
231 remainder->magic_num_ = kMagicNumFree;
232 }
233 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
234 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
235 free_page_runs_.insert(remainder);
236 if (kTraceRosAlloc) {
237 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
238 << reinterpret_cast<intptr_t>(remainder)
239 << " into free_page_runs_";
240 }
241 fpr->SetByteSize(this, req_byte_size);
242 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
243 }
244 res = fpr;
245 }
246 }
247 if (LIKELY(res != NULL)) {
248 // Update the page map.
249 size_t page_map_idx = ToPageMapIndex(res);
250 for (size_t i = 0; i < num_pages; i++) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700251 DCHECK(IsFreePage(page_map_idx + i));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700252 }
253 switch (page_map_type) {
254 case kPageMapRun:
255 page_map_[page_map_idx] = kPageMapRun;
256 for (size_t i = 1; i < num_pages; i++) {
257 page_map_[page_map_idx + i] = kPageMapRunPart;
258 }
259 break;
260 case kPageMapLargeObject:
261 page_map_[page_map_idx] = kPageMapLargeObject;
262 for (size_t i = 1; i < num_pages; i++) {
263 page_map_[page_map_idx + i] = kPageMapLargeObjectPart;
264 }
265 break;
266 default:
267 LOG(FATAL) << "Unreachable - page map type: " << page_map_type;
268 break;
269 }
270 if (kIsDebugBuild) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700271 // Clear the first page since it is not madvised due to the magic number.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700272 memset(res, 0, kPageSize);
273 }
274 if (kTraceRosAlloc) {
275 LOG(INFO) << "RosAlloc::AllocPages() : 0x" << std::hex << reinterpret_cast<intptr_t>(res)
276 << "-0x" << (reinterpret_cast<intptr_t>(res) + num_pages * kPageSize)
277 << "(" << std::dec << (num_pages * kPageSize) << ")";
278 }
279 return res;
280 }
281
282 // Fail.
283 if (kTraceRosAlloc) {
284 LOG(INFO) << "RosAlloc::AllocPages() : NULL";
285 }
286 return nullptr;
287}
288
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700289size_t RosAlloc::FreePages(Thread* self, void* ptr, bool already_zero) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700290 lock_.AssertHeld(self);
291 size_t pm_idx = ToPageMapIndex(ptr);
Ian Rogers5d057052014-03-12 14:32:27 -0700292 DCHECK_LT(pm_idx, page_map_size_);
Ian Rogers13735952014-10-08 12:43:28 -0700293 uint8_t pm_type = page_map_[pm_idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700294 DCHECK(pm_type == kPageMapRun || pm_type == kPageMapLargeObject);
Ian Rogers13735952014-10-08 12:43:28 -0700295 uint8_t pm_part_type;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700296 switch (pm_type) {
297 case kPageMapRun:
298 pm_part_type = kPageMapRunPart;
299 break;
300 case kPageMapLargeObject:
301 pm_part_type = kPageMapLargeObjectPart;
302 break;
303 default:
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700304 LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << " : " << "pm_idx=" << pm_idx << ", pm_type="
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700305 << static_cast<int>(pm_type) << ", ptr=" << std::hex
306 << reinterpret_cast<intptr_t>(ptr);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700307 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700308 }
309 // Update the page map and count the number of pages.
310 size_t num_pages = 1;
311 page_map_[pm_idx] = kPageMapEmpty;
312 size_t idx = pm_idx + 1;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800313 size_t end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700314 while (idx < end && page_map_[idx] == pm_part_type) {
315 page_map_[idx] = kPageMapEmpty;
316 num_pages++;
317 idx++;
318 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700319 const size_t byte_size = num_pages * kPageSize;
320 if (already_zero) {
321 if (kCheckZeroMemory) {
Ian Rogers13735952014-10-08 12:43:28 -0700322 const uintptr_t* word_ptr = reinterpret_cast<uintptr_t*>(ptr);
323 for (size_t i = 0; i < byte_size / sizeof(uintptr_t); ++i) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700324 CHECK_EQ(word_ptr[i], 0U) << "words don't match at index " << i;
325 }
326 }
327 } else if (!DoesReleaseAllPages()) {
328 memset(ptr, 0, byte_size);
329 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700330
331 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700332 LOG(INFO) << __PRETTY_FUNCTION__ << " : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700333 << "-0x" << (reinterpret_cast<intptr_t>(ptr) + byte_size)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700334 << "(" << std::dec << (num_pages * kPageSize) << ")";
335 }
336
337 // Turn it into a free run.
338 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(ptr);
339 if (kIsDebugBuild) {
340 fpr->magic_num_ = kMagicNumFree;
341 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700342 fpr->SetByteSize(this, byte_size);
343 DCHECK(IsAligned<kPageSize>(fpr->ByteSize(this)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700344
345 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
346 if (!free_page_runs_.empty()) {
347 // Try to coalesce in the higher address direction.
348 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700349 LOG(INFO) << __PRETTY_FUNCTION__ << "RosAlloc::FreePages() : trying to coalesce a free page run 0x"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700350 << std::hex << reinterpret_cast<uintptr_t>(fpr) << " [" << std::dec << pm_idx << "] -0x"
351 << std::hex << reinterpret_cast<uintptr_t>(fpr->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800352 << (fpr->End(this) == End() ? page_map_size_ : ToPageMapIndex(fpr->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700353 }
354 auto higher_it = free_page_runs_.upper_bound(fpr);
355 if (higher_it != free_page_runs_.end()) {
356 for (auto it = higher_it; it != free_page_runs_.end(); ) {
357 FreePageRun* h = *it;
358 DCHECK_EQ(h->ByteSize(this) % kPageSize, static_cast<size_t>(0));
359 if (kTraceRosAlloc) {
360 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a higher free page run 0x"
361 << std::hex << reinterpret_cast<uintptr_t>(h) << " [" << std::dec << ToPageMapIndex(h) << "] -0x"
362 << std::hex << reinterpret_cast<uintptr_t>(h->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800363 << (h->End(this) == End() ? page_map_size_ : ToPageMapIndex(h->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700364 }
365 if (fpr->End(this) == h->Begin()) {
366 if (kTraceRosAlloc) {
367 LOG(INFO) << "Success";
368 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700369 // Clear magic num since this is no longer the start of a free page run.
370 if (kIsDebugBuild) {
371 h->magic_num_ = 0;
372 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700373 free_page_runs_.erase(it++);
374 if (kTraceRosAlloc) {
375 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
376 << reinterpret_cast<intptr_t>(h)
377 << " from free_page_runs_";
378 }
379 fpr->SetByteSize(this, fpr->ByteSize(this) + h->ByteSize(this));
380 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
381 } else {
382 // Not adjacent. Stop.
383 if (kTraceRosAlloc) {
384 LOG(INFO) << "Fail";
385 }
386 break;
387 }
388 }
389 }
390 // Try to coalesce in the lower address direction.
391 auto lower_it = free_page_runs_.upper_bound(fpr);
392 if (lower_it != free_page_runs_.begin()) {
393 --lower_it;
394 for (auto it = lower_it; ; ) {
395 // We want to try to coalesce with the first element but
396 // there's no "<=" operator for the iterator.
397 bool to_exit_loop = it == free_page_runs_.begin();
398
399 FreePageRun* l = *it;
400 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
401 if (kTraceRosAlloc) {
402 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a lower free page run 0x"
403 << std::hex << reinterpret_cast<uintptr_t>(l) << " [" << std::dec << ToPageMapIndex(l) << "] -0x"
404 << std::hex << reinterpret_cast<uintptr_t>(l->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800405 << (l->End(this) == End() ? page_map_size_ : ToPageMapIndex(l->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700406 }
407 if (l->End(this) == fpr->Begin()) {
408 if (kTraceRosAlloc) {
409 LOG(INFO) << "Success";
410 }
411 free_page_runs_.erase(it--);
412 if (kTraceRosAlloc) {
413 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
414 << reinterpret_cast<intptr_t>(l)
415 << " from free_page_runs_";
416 }
417 l->SetByteSize(this, l->ByteSize(this) + fpr->ByteSize(this));
418 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700419 // Clear magic num since this is no longer the start of a free page run.
420 if (kIsDebugBuild) {
421 fpr->magic_num_ = 0;
422 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700423 fpr = l;
424 } else {
425 // Not adjacent. Stop.
426 if (kTraceRosAlloc) {
427 LOG(INFO) << "Fail";
428 }
429 break;
430 }
431 if (to_exit_loop) {
432 break;
433 }
434 }
435 }
436 }
437
438 // Insert it.
439 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
440 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800441 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700442 fpr->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800443 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700444 free_page_runs_.insert(fpr);
445 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
446 if (kTraceRosAlloc) {
447 LOG(INFO) << "RosAlloc::FreePages() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
448 << " into free_page_runs_";
449 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700450 return byte_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700451}
452
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800453void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) {
Ian Rogers5d057052014-03-12 14:32:27 -0700454 DCHECK_GT(size, kLargeSizeThreshold);
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800455 size_t num_pages = RoundUp(size, kPageSize) / kPageSize;
456 void* r;
457 {
458 MutexLock mu(self, lock_);
459 r = AllocPages(self, num_pages, kPageMapLargeObject);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700460 }
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800461 if (UNLIKELY(r == nullptr)) {
462 if (kTraceRosAlloc) {
463 LOG(INFO) << "RosAlloc::AllocLargeObject() : NULL";
464 }
465 return nullptr;
466 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700467 const size_t total_bytes = num_pages * kPageSize;
468 *bytes_allocated = total_bytes;
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800469 if (kTraceRosAlloc) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800470 LOG(INFO) << "RosAlloc::AllocLargeObject() : 0x" << std::hex << reinterpret_cast<intptr_t>(r)
471 << "-0x" << (reinterpret_cast<intptr_t>(r) + num_pages * kPageSize)
472 << "(" << std::dec << (num_pages * kPageSize) << ")";
473 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700474 // Check if the returned memory is really all zero.
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800475 if (kCheckZeroMemory) {
Ian Rogers13735952014-10-08 12:43:28 -0700476 CHECK_EQ(total_bytes % sizeof(uintptr_t), 0U);
477 const uintptr_t* words = reinterpret_cast<uintptr_t*>(r);
478 for (size_t i = 0; i < total_bytes / sizeof(uintptr_t); ++i) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700479 CHECK_EQ(words[i], 0U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700480 }
481 }
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800482 return r;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700483}
484
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700485size_t RosAlloc::FreeInternal(Thread* self, void* ptr) {
Ian Rogers5d057052014-03-12 14:32:27 -0700486 DCHECK_LE(base_, ptr);
487 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700488 size_t pm_idx = RoundDownToPageMapIndex(ptr);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700489 Run* run = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700490 {
491 MutexLock mu(self, lock_);
Ian Rogers5d057052014-03-12 14:32:27 -0700492 DCHECK_LT(pm_idx, page_map_size_);
Ian Rogers13735952014-10-08 12:43:28 -0700493 uint8_t page_map_entry = page_map_[pm_idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700494 if (kTraceRosAlloc) {
495 LOG(INFO) << "RosAlloc::FreeInternal() : " << std::hex << ptr << ", pm_idx=" << std::dec << pm_idx
496 << ", page_map_entry=" << static_cast<int>(page_map_entry);
497 }
498 switch (page_map_[pm_idx]) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700499 case kPageMapLargeObject:
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700500 return FreePages(self, ptr, false);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700501 case kPageMapLargeObjectPart:
502 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700503 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700504 case kPageMapRunPart: {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700505 // Find the beginning of the run.
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700506 do {
507 --pm_idx;
508 DCHECK_LT(pm_idx, capacity_ / kPageSize);
509 } while (page_map_[pm_idx] != kPageMapRun);
Ian Rogersfc787ec2014-10-09 21:56:44 -0700510 FALLTHROUGH_INTENDED;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700511 case kPageMapRun:
512 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -0700513 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700514 break;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700515 case kPageMapReleased:
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700516 case kPageMapEmpty:
517 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
518 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700519 }
520 default:
521 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700522 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700523 }
524 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700525 DCHECK(run != nullptr);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700526 return FreeFromRun(self, ptr, run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700527}
528
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700529size_t RosAlloc::Free(Thread* self, void* ptr) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700530 ReaderMutexLock rmu(self, bulk_free_lock_);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700531 return FreeInternal(self, ptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700532}
533
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700534RosAlloc::Run* RosAlloc::AllocRun(Thread* self, size_t idx) {
535 RosAlloc::Run* new_run = nullptr;
536 {
537 MutexLock mu(self, lock_);
538 new_run = reinterpret_cast<Run*>(AllocPages(self, numOfPages[idx], kPageMapRun));
539 }
540 if (LIKELY(new_run != nullptr)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700541 if (kIsDebugBuild) {
542 new_run->magic_num_ = kMagicNum;
543 }
544 new_run->size_bracket_idx_ = idx;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700545 new_run->SetAllocBitMapBitsForInvalidSlots();
546 DCHECK(!new_run->IsThreadLocal());
547 DCHECK_EQ(new_run->first_search_vec_idx_, 0U);
548 DCHECK(!new_run->to_be_bulk_freed_);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700549 if (kUsePrefetchDuringAllocRun && idx < kNumThreadLocalSizeBrackets) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700550 // Take ownership of the cache lines if we are likely to be thread local run.
551 if (kPrefetchNewRunDataByZeroing) {
552 // Zeroing the data is sometimes faster than prefetching but it increases memory usage
553 // since we end up dirtying zero pages which may have been madvised.
554 new_run->ZeroData();
555 } else {
556 const size_t num_of_slots = numOfSlots[idx];
557 const size_t bracket_size = bracketSizes[idx];
558 const size_t num_of_bytes = num_of_slots * bracket_size;
Ian Rogers13735952014-10-08 12:43:28 -0700559 uint8_t* begin = reinterpret_cast<uint8_t*>(new_run) + headerSizes[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700560 for (size_t i = 0; i < num_of_bytes; i += kPrefetchStride) {
561 __builtin_prefetch(begin + i);
562 }
563 }
564 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700565 }
566 return new_run;
567}
568
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700569RosAlloc::Run* RosAlloc::RefillRun(Thread* self, size_t idx) {
570 // Get the lowest address non-full run from the binary tree.
Mathieu Chartier58553c72014-09-16 16:25:55 -0700571 auto* const bt = &non_full_runs_[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700572 if (!bt->empty()) {
573 // If there's one, use it as the current run.
574 auto it = bt->begin();
575 Run* non_full_run = *it;
576 DCHECK(non_full_run != nullptr);
577 DCHECK(!non_full_run->IsThreadLocal());
578 bt->erase(it);
579 return non_full_run;
580 }
581 // If there's none, allocate a new run and use it as the current run.
582 return AllocRun(self, idx);
583}
584
Hiroshi Yamauchi52cf5c02014-05-02 12:20:36 -0700585inline void* RosAlloc::AllocFromCurrentRunUnlocked(Thread* self, size_t idx) {
Mathieu Chartier0651d412014-04-29 14:37:57 -0700586 Run* current_run = current_runs_[idx];
587 DCHECK(current_run != nullptr);
588 void* slot_addr = current_run->AllocSlot();
589 if (UNLIKELY(slot_addr == nullptr)) {
590 // The current run got full. Try to refill it.
591 DCHECK(current_run->IsFull());
592 if (kIsDebugBuild && current_run != dedicated_full_run_) {
593 full_runs_[idx].insert(current_run);
594 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700595 LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
596 << reinterpret_cast<intptr_t>(current_run)
Mathieu Chartier0651d412014-04-29 14:37:57 -0700597 << " into full_runs_[" << std::dec << idx << "]";
598 }
599 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
600 DCHECK(full_runs_[idx].find(current_run) != full_runs_[idx].end());
601 }
602 current_run = RefillRun(self, idx);
603 if (UNLIKELY(current_run == nullptr)) {
604 // Failed to allocate a new run, make sure that it is the dedicated full run.
605 current_runs_[idx] = dedicated_full_run_;
606 return nullptr;
607 }
608 DCHECK(current_run != nullptr);
609 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
610 DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
611 current_run->SetIsThreadLocal(false);
612 current_runs_[idx] = current_run;
613 DCHECK(!current_run->IsFull());
614 slot_addr = current_run->AllocSlot();
615 // Must succeed now with a new run.
616 DCHECK(slot_addr != nullptr);
617 }
618 return slot_addr;
619}
620
621void* RosAlloc::AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated) {
622 DCHECK_LE(size, kLargeSizeThreshold);
623 size_t bracket_size;
624 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
625 DCHECK_EQ(idx, SizeToIndex(size));
626 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
627 DCHECK_EQ(bracket_size, bracketSizes[idx]);
628 DCHECK_LE(size, bracket_size);
629 DCHECK(size > 512 || bracket_size - size < 16);
630 Locks::mutator_lock_->AssertExclusiveHeld(self);
631 void* slot_addr = AllocFromCurrentRunUnlocked(self, idx);
632 if (LIKELY(slot_addr != nullptr)) {
633 DCHECK(bytes_allocated != nullptr);
634 *bytes_allocated = bracket_size;
635 // Caller verifies that it is all 0.
636 }
637 return slot_addr;
638}
639
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700640void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated) {
Ian Rogers5d057052014-03-12 14:32:27 -0700641 DCHECK_LE(size, kLargeSizeThreshold);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700642 size_t bracket_size;
643 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
644 DCHECK_EQ(idx, SizeToIndex(size));
645 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
646 DCHECK_EQ(bracket_size, bracketSizes[idx]);
Ian Rogers5d057052014-03-12 14:32:27 -0700647 DCHECK_LE(size, bracket_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700648 DCHECK(size > 512 || bracket_size - size < 16);
649
650 void* slot_addr;
651
Mathieu Chartier0651d412014-04-29 14:37:57 -0700652 if (LIKELY(idx < kNumThreadLocalSizeBrackets)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700653 // Use a thread-local run.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700654 Run* thread_local_run = reinterpret_cast<Run*>(self->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700655 // Allow invalid since this will always fail the allocation.
Mathieu Chartier4fd20502014-04-28 09:35:55 -0700656 if (kIsDebugBuild) {
657 // Need the lock to prevent race conditions.
658 MutexLock mu(self, *size_bracket_locks_[idx]);
659 CHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
660 CHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
661 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700662 DCHECK(thread_local_run != nullptr);
663 DCHECK(thread_local_run->IsThreadLocal() || thread_local_run == dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700664 slot_addr = thread_local_run->AllocSlot();
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700665 // The allocation must fail if the run is invalid.
666 DCHECK(thread_local_run != dedicated_full_run_ || slot_addr == nullptr)
667 << "allocated from an invalid run";
668 if (UNLIKELY(slot_addr == nullptr)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700669 // The run got full. Try to free slots.
670 DCHECK(thread_local_run->IsFull());
671 MutexLock mu(self, *size_bracket_locks_[idx]);
672 bool is_all_free_after_merge;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700673 // This is safe to do for the dedicated_full_run_ since the bitmaps are empty.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700674 if (thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&is_all_free_after_merge)) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700675 DCHECK_NE(thread_local_run, dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700676 // Some slot got freed. Keep it.
677 DCHECK(!thread_local_run->IsFull());
678 DCHECK_EQ(is_all_free_after_merge, thread_local_run->IsAllFree());
679 if (is_all_free_after_merge) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700680 // Check that the bitmap idx is back at 0 if it's all free.
681 DCHECK_EQ(thread_local_run->first_search_vec_idx_, 0U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700682 }
683 } else {
684 // No slots got freed. Try to refill the thread-local run.
685 DCHECK(thread_local_run->IsFull());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700686 if (thread_local_run != dedicated_full_run_) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700687 thread_local_run->SetIsThreadLocal(false);
688 if (kIsDebugBuild) {
689 full_runs_[idx].insert(thread_local_run);
690 if (kTraceRosAlloc) {
691 LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex
692 << reinterpret_cast<intptr_t>(thread_local_run)
693 << " into full_runs_[" << std::dec << idx << "]";
694 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700695 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700696 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
697 DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700698 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700699
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700700 thread_local_run = RefillRun(self, idx);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700701 if (UNLIKELY(thread_local_run == nullptr)) {
702 self->SetRosAllocRun(idx, dedicated_full_run_);
703 return nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700704 }
705 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
706 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700707 thread_local_run->SetIsThreadLocal(true);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700708 self->SetRosAllocRun(idx, thread_local_run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700709 DCHECK(!thread_local_run->IsFull());
710 }
711
Mathieu Chartier0651d412014-04-29 14:37:57 -0700712 DCHECK(thread_local_run != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700713 DCHECK(!thread_local_run->IsFull());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700714 DCHECK(thread_local_run->IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700715 slot_addr = thread_local_run->AllocSlot();
716 // Must succeed now with a new run.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700717 DCHECK(slot_addr != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700718 }
719 if (kTraceRosAlloc) {
720 LOG(INFO) << "RosAlloc::AllocFromRun() thread-local : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
721 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
722 << "(" << std::dec << (bracket_size) << ")";
723 }
724 } else {
725 // Use the (shared) current run.
726 MutexLock mu(self, *size_bracket_locks_[idx]);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700727 slot_addr = AllocFromCurrentRunUnlocked(self, idx);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700728 if (kTraceRosAlloc) {
729 LOG(INFO) << "RosAlloc::AllocFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
730 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
731 << "(" << std::dec << (bracket_size) << ")";
732 }
733 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700734 DCHECK(bytes_allocated != nullptr);
735 *bytes_allocated = bracket_size;
736 // Caller verifies that it is all 0.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700737 return slot_addr;
738}
739
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700740size_t RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
Ian Rogers5d057052014-03-12 14:32:27 -0700741 DCHECK_EQ(run->magic_num_, kMagicNum);
742 DCHECK_LT(run, ptr);
743 DCHECK_LT(ptr, run->End());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700744 const size_t idx = run->size_bracket_idx_;
745 const size_t bracket_size = bracketSizes[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700746 bool run_was_full = false;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700747 MutexLock mu(self, *size_bracket_locks_[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700748 if (kIsDebugBuild) {
749 run_was_full = run->IsFull();
750 }
751 if (kTraceRosAlloc) {
752 LOG(INFO) << "RosAlloc::FreeFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr);
753 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700754 if (LIKELY(run->IsThreadLocal())) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700755 // It's a thread-local run. Just mark the thread-local free bit map and return.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700756 DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700757 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
758 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
759 run->MarkThreadLocalFreeBitMap(ptr);
760 if (kTraceRosAlloc) {
761 LOG(INFO) << "RosAlloc::FreeFromRun() : Freed a slot in a thread local run 0x" << std::hex
762 << reinterpret_cast<intptr_t>(run);
763 }
764 // A thread local run will be kept as a thread local even if it's become all free.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700765 return bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700766 }
767 // Free the slot in the run.
768 run->FreeSlot(ptr);
Mathieu Chartier58553c72014-09-16 16:25:55 -0700769 auto* non_full_runs = &non_full_runs_[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700770 if (run->IsAllFree()) {
771 // It has just become completely free. Free the pages of this run.
772 std::set<Run*>::iterator pos = non_full_runs->find(run);
773 if (pos != non_full_runs->end()) {
774 non_full_runs->erase(pos);
775 if (kTraceRosAlloc) {
776 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
777 << reinterpret_cast<intptr_t>(run) << " from non_full_runs_";
778 }
779 }
780 if (run == current_runs_[idx]) {
Mathieu Chartier0651d412014-04-29 14:37:57 -0700781 current_runs_[idx] = dedicated_full_run_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700782 }
783 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
784 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700785 run->ZeroHeader();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700786 {
787 MutexLock mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700788 FreePages(self, run, true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700789 }
790 } else {
791 // It is not completely free. If it wasn't the current run or
792 // already in the non-full run set (i.e., it was full) insert it
793 // into the non-full run set.
794 if (run != current_runs_[idx]) {
Mathieu Chartier58553c72014-09-16 16:25:55 -0700795 auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : NULL;
796 auto pos = non_full_runs->find(run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700797 if (pos == non_full_runs->end()) {
798 DCHECK(run_was_full);
799 DCHECK(full_runs->find(run) != full_runs->end());
800 if (kIsDebugBuild) {
801 full_runs->erase(run);
802 if (kTraceRosAlloc) {
803 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
804 << reinterpret_cast<intptr_t>(run) << " from full_runs_";
805 }
806 }
807 non_full_runs->insert(run);
808 DCHECK(!run->IsFull());
809 if (kTraceRosAlloc) {
810 LOG(INFO) << "RosAlloc::FreeFromRun() : Inserted run 0x" << std::hex
811 << reinterpret_cast<intptr_t>(run)
812 << " into non_full_runs_[" << std::dec << idx << "]";
813 }
814 }
815 }
816 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700817 return bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700818}
819
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800820std::string RosAlloc::Run::BitMapToStr(uint32_t* bit_map_base, size_t num_vec) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700821 std::string bit_map_str;
822 for (size_t v = 0; v < num_vec; v++) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800823 uint32_t vec = bit_map_base[v];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700824 if (v != num_vec - 1) {
825 bit_map_str.append(StringPrintf("%x-", vec));
826 } else {
827 bit_map_str.append(StringPrintf("%x", vec));
828 }
829 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800830 return bit_map_str.c_str();
831}
832
833std::string RosAlloc::Run::Dump() {
834 size_t idx = size_bracket_idx_;
835 size_t num_slots = numOfSlots[idx];
836 size_t num_vec = RoundUp(num_slots, 32) / 32;
837 std::ostringstream stream;
838 stream << "RosAlloc Run = " << reinterpret_cast<void*>(this)
839 << "{ magic_num=" << static_cast<int>(magic_num_)
840 << " size_bracket_idx=" << idx
841 << " is_thread_local=" << static_cast<int>(is_thread_local_)
842 << " to_be_bulk_freed=" << static_cast<int>(to_be_bulk_freed_)
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700843 << " first_search_vec_idx=" << first_search_vec_idx_
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800844 << " alloc_bit_map=" << BitMapToStr(alloc_bit_map_, num_vec)
845 << " bulk_free_bit_map=" << BitMapToStr(BulkFreeBitMap(), num_vec)
846 << " thread_local_bit_map=" << BitMapToStr(ThreadLocalFreeBitMap(), num_vec)
847 << " }" << std::endl;
848 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700849}
850
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700851inline void* RosAlloc::Run::AllocSlot() {
852 const size_t idx = size_bracket_idx_;
853 while (true) {
854 if (kIsDebugBuild) {
855 // Make sure that no slots leaked, the bitmap should be full for all previous vectors.
856 for (size_t i = 0; i < first_search_vec_idx_; ++i) {
857 CHECK_EQ(~alloc_bit_map_[i], 0U);
858 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700859 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700860 uint32_t* const alloc_bitmap_ptr = &alloc_bit_map_[first_search_vec_idx_];
861 uint32_t ffz1 = __builtin_ffs(~*alloc_bitmap_ptr);
862 if (LIKELY(ffz1 != 0)) {
863 const uint32_t ffz = ffz1 - 1;
864 const uint32_t slot_idx = ffz + first_search_vec_idx_ * sizeof(*alloc_bitmap_ptr) * kBitsPerByte;
865 const uint32_t mask = 1U << ffz;
866 DCHECK_LT(slot_idx, numOfSlots[idx]) << "out of range";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700867 // Found an empty slot. Set the bit.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700868 DCHECK_EQ(*alloc_bitmap_ptr & mask, 0U);
869 *alloc_bitmap_ptr |= mask;
870 DCHECK_NE(*alloc_bitmap_ptr & mask, 0U);
Ian Rogers13735952014-10-08 12:43:28 -0700871 uint8_t* slot_addr = reinterpret_cast<uint8_t*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700872 if (kTraceRosAlloc) {
873 LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
874 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
875 }
876 return slot_addr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700877 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700878 const size_t num_words = RoundUp(numOfSlots[idx], 32) / 32;
879 if (first_search_vec_idx_ + 1 >= num_words) {
880 DCHECK(IsFull());
881 // Already at the last word, return null.
882 return nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700883 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700884 // Increase the index to the next word and try again.
885 ++first_search_vec_idx_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700886 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700887}
888
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700889void RosAlloc::Run::FreeSlot(void* ptr) {
890 DCHECK(!IsThreadLocal());
Ian Rogers13735952014-10-08 12:43:28 -0700891 const uint8_t idx = size_bracket_idx_;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700892 const size_t bracket_size = bracketSizes[idx];
Ian Rogers13735952014-10-08 12:43:28 -0700893 const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
894 - (reinterpret_cast<uint8_t*>(this) + headerSizes[idx]);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700895 DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
896 size_t slot_idx = offset_from_slot_base / bracket_size;
Ian Rogers5d057052014-03-12 14:32:27 -0700897 DCHECK_LT(slot_idx, numOfSlots[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700898 size_t vec_idx = slot_idx / 32;
899 if (kIsDebugBuild) {
900 size_t num_vec = RoundUp(numOfSlots[idx], 32) / 32;
Ian Rogers5d057052014-03-12 14:32:27 -0700901 DCHECK_LT(vec_idx, num_vec);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700902 }
903 size_t vec_off = slot_idx % 32;
904 uint32_t* vec = &alloc_bit_map_[vec_idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700905 first_search_vec_idx_ = std::min(first_search_vec_idx_, static_cast<uint32_t>(vec_idx));
906 const uint32_t mask = 1U << vec_off;
907 DCHECK_NE(*vec & mask, 0U);
908 *vec &= ~mask;
909 DCHECK_EQ(*vec & mask, 0U);
910 // Zero out the memory.
911 // TODO: Investigate alternate memset since ptr is guaranteed to be aligned to 16.
912 memset(ptr, 0, bracket_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700913 if (kTraceRosAlloc) {
914 LOG(INFO) << "RosAlloc::Run::FreeSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
915 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
916 }
917}
918
919inline bool RosAlloc::Run::MergeThreadLocalFreeBitMapToAllocBitMap(bool* is_all_free_after_out) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700920 DCHECK(IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700921 // Free slots in the alloc bit map based on the thread local free bit map.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700922 const size_t idx = size_bracket_idx_;
923 const size_t num_of_slots = numOfSlots[idx];
924 const size_t num_vec = RoundUp(num_of_slots, 32) / 32;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700925 bool changed = false;
926 uint32_t* vecp = &alloc_bit_map_[0];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800927 uint32_t* tl_free_vecp = &ThreadLocalFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700928 bool is_all_free_after = true;
929 for (size_t v = 0; v < num_vec; v++, vecp++, tl_free_vecp++) {
930 uint32_t tl_free_vec = *tl_free_vecp;
931 uint32_t vec_before = *vecp;
932 uint32_t vec_after;
933 if (tl_free_vec != 0) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700934 first_search_vec_idx_ = std::min(first_search_vec_idx_, static_cast<uint32_t>(v));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700935 vec_after = vec_before & ~tl_free_vec;
936 *vecp = vec_after;
937 changed = true;
938 *tl_free_vecp = 0; // clear the thread local free bit map.
939 } else {
940 vec_after = vec_before;
941 }
942 if (vec_after != 0) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700943 if (v == num_vec - 1) {
944 // Only not all free if a bit other than the mask bits are set.
945 is_all_free_after =
946 is_all_free_after && GetBitmapLastVectorMask(num_of_slots, num_vec) == vec_after;
947 } else {
948 is_all_free_after = false;
949 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700950 }
951 DCHECK_EQ(*tl_free_vecp, static_cast<uint32_t>(0));
952 }
953 *is_all_free_after_out = is_all_free_after;
954 // Return true if there was at least a bit set in the thread-local
955 // free bit map and at least a bit in the alloc bit map changed.
956 return changed;
957}
958
959inline void RosAlloc::Run::MergeBulkFreeBitMapIntoAllocBitMap() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700960 DCHECK(!IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700961 // Free slots in the alloc bit map based on the bulk free bit map.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700962 const size_t num_vec = NumberOfBitmapVectors();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700963 uint32_t* vecp = &alloc_bit_map_[0];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800964 uint32_t* free_vecp = &BulkFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700965 for (size_t v = 0; v < num_vec; v++, vecp++, free_vecp++) {
966 uint32_t free_vec = *free_vecp;
967 if (free_vec != 0) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700968 first_search_vec_idx_ = std::min(first_search_vec_idx_, static_cast<uint32_t>(v));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700969 *vecp &= ~free_vec;
970 *free_vecp = 0; // clear the bulk free bit map.
971 }
972 DCHECK_EQ(*free_vecp, static_cast<uint32_t>(0));
973 }
974}
975
976inline void RosAlloc::Run::UnionBulkFreeBitMapToThreadLocalFreeBitMap() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700977 DCHECK(IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700978 // Union the thread local bit map with the bulk free bit map.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700979 size_t num_vec = NumberOfBitmapVectors();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800980 uint32_t* to_vecp = &ThreadLocalFreeBitMap()[0];
981 uint32_t* from_vecp = &BulkFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700982 for (size_t v = 0; v < num_vec; v++, to_vecp++, from_vecp++) {
983 uint32_t from_vec = *from_vecp;
984 if (from_vec != 0) {
985 *to_vecp |= from_vec;
Hiroshi Yamauchi70f60042014-02-03 12:31:29 -0800986 *from_vecp = 0; // clear the bulk free bit map.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700987 }
988 DCHECK_EQ(*from_vecp, static_cast<uint32_t>(0));
989 }
990}
991
992inline void RosAlloc::Run::MarkThreadLocalFreeBitMap(void* ptr) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700993 DCHECK(IsThreadLocal());
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800994 MarkFreeBitMapShared(ptr, ThreadLocalFreeBitMap(), "MarkThreadLocalFreeBitMap");
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700995}
996
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700997inline size_t RosAlloc::Run::MarkBulkFreeBitMap(void* ptr) {
998 return MarkFreeBitMapShared(ptr, BulkFreeBitMap(), "MarkFreeBitMap");
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700999}
1000
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001001inline size_t RosAlloc::Run::MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base,
1002 const char* caller_name) {
Ian Rogers13735952014-10-08 12:43:28 -07001003 const uint8_t idx = size_bracket_idx_;
1004 const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
1005 - (reinterpret_cast<uint8_t*>(this) + headerSizes[idx]);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001006 const size_t bracket_size = bracketSizes[idx];
1007 memset(ptr, 0, bracket_size);
1008 DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
1009 size_t slot_idx = offset_from_slot_base / bracket_size;
Ian Rogers5d057052014-03-12 14:32:27 -07001010 DCHECK_LT(slot_idx, numOfSlots[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001011 size_t vec_idx = slot_idx / 32;
1012 if (kIsDebugBuild) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001013 size_t num_vec = NumberOfBitmapVectors();
Ian Rogers5d057052014-03-12 14:32:27 -07001014 DCHECK_LT(vec_idx, num_vec);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001015 }
1016 size_t vec_off = slot_idx % 32;
1017 uint32_t* vec = &free_bit_map_base[vec_idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001018 const uint32_t mask = 1U << vec_off;
1019 DCHECK_EQ(*vec & mask, 0U);
1020 *vec |= mask;
1021 DCHECK_NE(*vec & mask, 0U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001022 if (kTraceRosAlloc) {
1023 LOG(INFO) << "RosAlloc::Run::" << caller_name << "() : 0x" << std::hex
1024 << reinterpret_cast<intptr_t>(ptr)
1025 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
1026 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001027 return bracket_size;
1028}
1029
1030inline uint32_t RosAlloc::Run::GetBitmapLastVectorMask(size_t num_slots, size_t num_vec) {
1031 const size_t kBitsPerVec = 32;
1032 DCHECK_GE(num_slots * kBitsPerVec, num_vec);
1033 size_t remain = num_vec * kBitsPerVec - num_slots;
1034 DCHECK_NE(remain, kBitsPerVec);
1035 return ((1U << remain) - 1) << (kBitsPerVec - remain);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001036}
1037
1038inline bool RosAlloc::Run::IsAllFree() {
Ian Rogers13735952014-10-08 12:43:28 -07001039 const uint8_t idx = size_bracket_idx_;
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001040 const size_t num_slots = numOfSlots[idx];
1041 const size_t num_vec = NumberOfBitmapVectors();
1042 DCHECK_NE(num_vec, 0U);
1043 // Check the last vector after the loop since it uses a special case for the masked bits.
1044 for (size_t v = 0; v < num_vec - 1; v++) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001045 uint32_t vec = alloc_bit_map_[v];
1046 if (vec != 0) {
1047 return false;
1048 }
1049 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001050 // Make sure the last word is equal to the mask, all other bits must be 0.
1051 return alloc_bit_map_[num_vec - 1] == GetBitmapLastVectorMask(num_slots, num_vec);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001052}
1053
1054inline bool RosAlloc::Run::IsFull() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001055 const size_t num_vec = NumberOfBitmapVectors();
1056 for (size_t v = 0; v < num_vec; ++v) {
1057 if (~alloc_bit_map_[v] != 0) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001058 return false;
1059 }
1060 }
1061 return true;
1062}
1063
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001064inline bool RosAlloc::Run::IsBulkFreeBitmapClean() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001065 const size_t num_vec = NumberOfBitmapVectors();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001066 for (size_t v = 0; v < num_vec; v++) {
1067 uint32_t vec = BulkFreeBitMap()[v];
1068 if (vec != 0) {
1069 return false;
1070 }
1071 }
1072 return true;
1073}
1074
1075inline bool RosAlloc::Run::IsThreadLocalFreeBitmapClean() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001076 const size_t num_vec = NumberOfBitmapVectors();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001077 for (size_t v = 0; v < num_vec; v++) {
1078 uint32_t vec = ThreadLocalFreeBitMap()[v];
1079 if (vec != 0) {
1080 return false;
1081 }
1082 }
1083 return true;
1084}
1085
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001086inline void RosAlloc::Run::SetAllocBitMapBitsForInvalidSlots() {
1087 const size_t idx = size_bracket_idx_;
1088 const size_t num_slots = numOfSlots[idx];
1089 const size_t num_vec = RoundUp(num_slots, 32) / 32;
1090 DCHECK_NE(num_vec, 0U);
1091 // Make sure to set the bits at the end of the bitmap so that we don't allocate there since they
1092 // don't represent valid slots.
1093 alloc_bit_map_[num_vec - 1] |= GetBitmapLastVectorMask(num_slots, num_vec);
1094}
1095
1096inline void RosAlloc::Run::ZeroHeader() {
Ian Rogers13735952014-10-08 12:43:28 -07001097 const uint8_t idx = size_bracket_idx_;
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001098 memset(this, 0, headerSizes[idx]);
1099}
1100
1101inline void RosAlloc::Run::ZeroData() {
Ian Rogers13735952014-10-08 12:43:28 -07001102 const uint8_t idx = size_bracket_idx_;
1103 uint8_t* slot_begin = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001104 memset(slot_begin, 0, numOfSlots[idx] * bracketSizes[idx]);
1105}
1106
1107inline void RosAlloc::Run::FillAllocBitMap() {
1108 size_t num_vec = NumberOfBitmapVectors();
1109 memset(alloc_bit_map_, 0xFF, sizeof(uint32_t) * num_vec);
1110 first_search_vec_idx_ = num_vec - 1; // No free bits in any of the bitmap words.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001111}
1112
1113void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1114 void* arg) {
1115 size_t idx = size_bracket_idx_;
Ian Rogers13735952014-10-08 12:43:28 -07001116 uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001117 size_t num_slots = numOfSlots[idx];
1118 size_t bracket_size = IndexToBracketSize(idx);
Ian Rogers13735952014-10-08 12:43:28 -07001119 DCHECK_EQ(slot_base + num_slots * bracket_size, reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001120 size_t num_vec = RoundUp(num_slots, 32) / 32;
1121 size_t slots = 0;
1122 for (size_t v = 0; v < num_vec; v++, slots += 32) {
Ian Rogers5d057052014-03-12 14:32:27 -07001123 DCHECK_GE(num_slots, slots);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001124 uint32_t vec = alloc_bit_map_[v];
1125 size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
1126 for (size_t i = 0; i < end; ++i) {
1127 bool is_allocated = ((vec >> i) & 0x1) != 0;
Ian Rogers13735952014-10-08 12:43:28 -07001128 uint8_t* slot_addr = slot_base + (slots + i) * bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001129 if (is_allocated) {
1130 handler(slot_addr, slot_addr + bracket_size, bracket_size, arg);
1131 } else {
1132 handler(slot_addr, slot_addr + bracket_size, 0, arg);
1133 }
1134 }
1135 }
1136}
1137
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001138// If true, read the page map entries in BulkFree() without using the
1139// lock for better performance, assuming that the existence of an
1140// allocated chunk/pointer being freed in BulkFree() guarantees that
1141// the page map entry won't change. Disabled for now.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001142static constexpr bool kReadPageMapEntryWithoutLockInBulkFree = true;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001143
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001144size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
1145 size_t freed_bytes = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001146 if (false) {
1147 // Used only to test Free() as GC uses only BulkFree().
1148 for (size_t i = 0; i < num_ptrs; ++i) {
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001149 freed_bytes += FreeInternal(self, ptrs[i]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001150 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001151 return freed_bytes;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001152 }
1153
1154 WriterMutexLock wmu(self, bulk_free_lock_);
1155
1156 // First mark slots to free in the bulk free bit map without locking the
Ian Rogers5fcfa7d2014-05-15 11:43:06 -07001157 // size bracket locks. On host, unordered_set is faster than vector + flag.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001158#ifdef HAVE_ANDROID_OS
1159 std::vector<Run*> runs;
1160#else
Ian Rogers700a4022014-05-19 16:49:03 -07001161 std::unordered_set<Run*, hash_run, eq_run> runs;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001162#endif
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001163 for (size_t i = 0; i < num_ptrs; i++) {
1164 void* ptr = ptrs[i];
Ian Rogers5d057052014-03-12 14:32:27 -07001165 DCHECK_LE(base_, ptr);
1166 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001167 size_t pm_idx = RoundDownToPageMapIndex(ptr);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001168 Run* run = nullptr;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001169 if (kReadPageMapEntryWithoutLockInBulkFree) {
1170 // Read the page map entries without locking the lock.
Ian Rogers13735952014-10-08 12:43:28 -07001171 uint8_t page_map_entry = page_map_[pm_idx];
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001172 if (kTraceRosAlloc) {
1173 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1174 << std::dec << pm_idx
1175 << ", page_map_entry=" << static_cast<int>(page_map_entry);
1176 }
1177 if (LIKELY(page_map_entry == kPageMapRun)) {
1178 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001179 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1180 size_t pi = pm_idx;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001181 // Find the beginning of the run.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001182 do {
1183 --pi;
Ian Rogers5d057052014-03-12 14:32:27 -07001184 DCHECK_LT(pi, capacity_ / kPageSize);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001185 } while (page_map_[pi] != kPageMapRun);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001186 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001187 } else if (page_map_entry == kPageMapLargeObject) {
1188 MutexLock mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001189 freed_bytes += FreePages(self, ptr, false);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001190 continue;
1191 } else {
1192 LOG(FATAL) << "Unreachable - page map type: " << page_map_entry;
1193 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001194 } else {
1195 // Read the page map entries with a lock.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001196 MutexLock mu(self, lock_);
1197 DCHECK_LT(pm_idx, page_map_size_);
Ian Rogers13735952014-10-08 12:43:28 -07001198 uint8_t page_map_entry = page_map_[pm_idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001199 if (kTraceRosAlloc) {
1200 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1201 << std::dec << pm_idx
1202 << ", page_map_entry=" << static_cast<int>(page_map_entry);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001203 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001204 if (LIKELY(page_map_entry == kPageMapRun)) {
1205 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1206 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1207 size_t pi = pm_idx;
1208 // Find the beginning of the run.
1209 do {
1210 --pi;
1211 DCHECK_LT(pi, capacity_ / kPageSize);
1212 } while (page_map_[pi] != kPageMapRun);
1213 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
1214 } else if (page_map_entry == kPageMapLargeObject) {
1215 freed_bytes += FreePages(self, ptr, false);
1216 continue;
1217 } else {
1218 LOG(FATAL) << "Unreachable - page map type: " << page_map_entry;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001219 }
1220 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001221 DCHECK(run != nullptr);
1222 DCHECK_EQ(run->magic_num_, kMagicNum);
1223 // Set the bit in the bulk free bit map.
1224 freed_bytes += run->MarkBulkFreeBitMap(ptr);
1225#ifdef HAVE_ANDROID_OS
1226 if (!run->to_be_bulk_freed_) {
1227 run->to_be_bulk_freed_ = true;
1228 runs.push_back(run);
1229 }
1230#else
1231 runs.insert(run);
1232#endif
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001233 }
1234
1235 // Now, iterate over the affected runs and update the alloc bit map
1236 // based on the bulk free bit map (for non-thread-local runs) and
1237 // union the bulk free bit map into the thread-local free bit map
1238 // (for thread-local runs.)
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001239 for (Run* run : runs) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001240#ifdef HAVE_ANDROID_OS
1241 DCHECK(run->to_be_bulk_freed_);
1242 run->to_be_bulk_freed_ = false;
1243#endif
1244 size_t idx = run->size_bracket_idx_;
1245 MutexLock mu(self, *size_bracket_locks_[idx]);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001246 if (run->IsThreadLocal()) {
Mathieu Chartier0651d412014-04-29 14:37:57 -07001247 DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001248 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
1249 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
1250 run->UnionBulkFreeBitMapToThreadLocalFreeBitMap();
1251 if (kTraceRosAlloc) {
1252 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a thread local run 0x"
1253 << std::hex << reinterpret_cast<intptr_t>(run);
1254 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001255 DCHECK(run->IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001256 // A thread local run will be kept as a thread local even if
1257 // it's become all free.
1258 } else {
1259 bool run_was_full = run->IsFull();
1260 run->MergeBulkFreeBitMapIntoAllocBitMap();
1261 if (kTraceRosAlloc) {
1262 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a run 0x" << std::hex
1263 << reinterpret_cast<intptr_t>(run);
1264 }
1265 // Check if the run should be moved to non_full_runs_ or
1266 // free_page_runs_.
Mathieu Chartier58553c72014-09-16 16:25:55 -07001267 auto* non_full_runs = &non_full_runs_[idx];
1268 auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : NULL;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001269 if (run->IsAllFree()) {
1270 // It has just become completely free. Free the pages of the
1271 // run.
1272 bool run_was_current = run == current_runs_[idx];
1273 if (run_was_current) {
1274 DCHECK(full_runs->find(run) == full_runs->end());
1275 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1276 // If it was a current run, reuse it.
1277 } else if (run_was_full) {
1278 // If it was full, remove it from the full run set (debug
1279 // only.)
1280 if (kIsDebugBuild) {
Ian Rogers700a4022014-05-19 16:49:03 -07001281 std::unordered_set<Run*, hash_run, eq_run>::iterator pos = full_runs->find(run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001282 DCHECK(pos != full_runs->end());
1283 full_runs->erase(pos);
1284 if (kTraceRosAlloc) {
1285 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1286 << reinterpret_cast<intptr_t>(run)
1287 << " from full_runs_";
1288 }
1289 DCHECK(full_runs->find(run) == full_runs->end());
1290 }
1291 } else {
1292 // If it was in a non full run set, remove it from the set.
1293 DCHECK(full_runs->find(run) == full_runs->end());
1294 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1295 non_full_runs->erase(run);
1296 if (kTraceRosAlloc) {
1297 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1298 << reinterpret_cast<intptr_t>(run)
1299 << " from non_full_runs_";
1300 }
1301 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1302 }
1303 if (!run_was_current) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001304 run->ZeroHeader();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001305 MutexLock mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001306 FreePages(self, run, true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001307 }
1308 } else {
1309 // It is not completely free. If it wasn't the current run or
1310 // already in the non-full run set (i.e., it was full) insert
1311 // it into the non-full run set.
1312 if (run == current_runs_[idx]) {
1313 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1314 DCHECK(full_runs->find(run) == full_runs->end());
1315 // If it was a current run, keep it.
1316 } else if (run_was_full) {
1317 // If it was full, remove it from the full run set (debug
1318 // only) and insert into the non-full run set.
1319 DCHECK(full_runs->find(run) != full_runs->end());
1320 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1321 if (kIsDebugBuild) {
1322 full_runs->erase(run);
1323 if (kTraceRosAlloc) {
1324 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1325 << reinterpret_cast<intptr_t>(run)
1326 << " from full_runs_";
1327 }
1328 }
1329 non_full_runs->insert(run);
1330 if (kTraceRosAlloc) {
1331 LOG(INFO) << "RosAlloc::BulkFree() : Inserted run 0x" << std::hex
1332 << reinterpret_cast<intptr_t>(run)
1333 << " into non_full_runs_[" << std::dec << idx;
1334 }
1335 } else {
1336 // If it was not full, so leave it in the non full run set.
1337 DCHECK(full_runs->find(run) == full_runs->end());
1338 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1339 }
1340 }
1341 }
1342 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001343 return freed_bytes;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001344}
1345
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001346std::string RosAlloc::DumpPageMap() {
1347 std::ostringstream stream;
1348 stream << "RosAlloc PageMap: " << std::endl;
1349 lock_.AssertHeld(Thread::Current());
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001350 size_t end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001351 FreePageRun* curr_fpr = NULL;
1352 size_t curr_fpr_size = 0;
1353 size_t remaining_curr_fpr_size = 0;
1354 size_t num_running_empty_pages = 0;
1355 for (size_t i = 0; i < end; ++i) {
Ian Rogers13735952014-10-08 12:43:28 -07001356 uint8_t pm = page_map_[i];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001357 switch (pm) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001358 case kPageMapReleased:
1359 // Fall-through.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001360 case kPageMapEmpty: {
1361 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1362 if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
1363 // Encountered a fresh free page run.
1364 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1365 DCHECK(fpr->IsFree());
1366 DCHECK(curr_fpr == NULL);
1367 DCHECK_EQ(curr_fpr_size, static_cast<size_t>(0));
1368 curr_fpr = fpr;
1369 curr_fpr_size = fpr->ByteSize(this);
1370 DCHECK_EQ(curr_fpr_size % kPageSize, static_cast<size_t>(0));
1371 remaining_curr_fpr_size = curr_fpr_size - kPageSize;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001372 stream << "[" << i << "]=" << (pm == kPageMapReleased ? "Released" : "Empty")
1373 << " (FPR start) fpr_size=" << curr_fpr_size
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001374 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001375 if (remaining_curr_fpr_size == 0) {
1376 // Reset at the end of the current free page run.
1377 curr_fpr = NULL;
1378 curr_fpr_size = 0;
1379 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001380 stream << "curr_fpr=0x" << std::hex << reinterpret_cast<intptr_t>(curr_fpr) << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001381 DCHECK_EQ(num_running_empty_pages, static_cast<size_t>(0));
1382 } else {
1383 // Still part of the current free page run.
1384 DCHECK_NE(num_running_empty_pages, static_cast<size_t>(0));
1385 DCHECK(curr_fpr != NULL && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
1386 DCHECK_EQ(remaining_curr_fpr_size % kPageSize, static_cast<size_t>(0));
1387 DCHECK_GE(remaining_curr_fpr_size, static_cast<size_t>(kPageSize));
1388 remaining_curr_fpr_size -= kPageSize;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001389 stream << "[" << i << "]=Empty (FPR part)"
1390 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001391 if (remaining_curr_fpr_size == 0) {
1392 // Reset at the end of the current free page run.
1393 curr_fpr = NULL;
1394 curr_fpr_size = 0;
1395 }
1396 }
1397 num_running_empty_pages++;
1398 break;
1399 }
1400 case kPageMapLargeObject: {
1401 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1402 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001403 stream << "[" << i << "]=Large (start)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001404 break;
1405 }
1406 case kPageMapLargeObjectPart:
1407 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1408 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001409 stream << "[" << i << "]=Large (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001410 break;
1411 case kPageMapRun: {
1412 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1413 num_running_empty_pages = 0;
1414 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1415 size_t idx = run->size_bracket_idx_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001416 stream << "[" << i << "]=Run (start)"
1417 << " idx=" << idx
1418 << " numOfPages=" << numOfPages[idx]
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001419 << " is_thread_local=" << run->is_thread_local_
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001420 << " is_all_free=" << (run->IsAllFree() ? 1 : 0)
1421 << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001422 break;
1423 }
1424 case kPageMapRunPart:
1425 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1426 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001427 stream << "[" << i << "]=Run (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001428 break;
1429 default:
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001430 stream << "[" << i << "]=Unrecognizable page map type: " << pm;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001431 break;
1432 }
1433 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001434 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001435}
1436
1437size_t RosAlloc::UsableSize(void* ptr) {
Ian Rogers5d057052014-03-12 14:32:27 -07001438 DCHECK_LE(base_, ptr);
1439 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001440 size_t pm_idx = RoundDownToPageMapIndex(ptr);
1441 MutexLock mu(Thread::Current(), lock_);
1442 switch (page_map_[pm_idx]) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001443 case kPageMapReleased:
1444 // Fall-through.
1445 case kPageMapEmpty:
1446 LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << ": pm_idx=" << pm_idx << ", ptr="
1447 << std::hex << reinterpret_cast<intptr_t>(ptr);
1448 break;
1449 case kPageMapLargeObject: {
1450 size_t num_pages = 1;
1451 size_t idx = pm_idx + 1;
1452 size_t end = page_map_size_;
1453 while (idx < end && page_map_[idx] == kPageMapLargeObjectPart) {
1454 num_pages++;
1455 idx++;
1456 }
1457 return num_pages * kPageSize;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001458 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001459 case kPageMapLargeObjectPart:
1460 LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << ": pm_idx=" << pm_idx << ", ptr="
1461 << std::hex << reinterpret_cast<intptr_t>(ptr);
1462 break;
1463 case kPageMapRun:
1464 case kPageMapRunPart: {
1465 // Find the beginning of the run.
1466 while (page_map_[pm_idx] != kPageMapRun) {
1467 pm_idx--;
1468 DCHECK_LT(pm_idx, capacity_ / kPageSize);
1469 }
1470 DCHECK_EQ(page_map_[pm_idx], kPageMapRun);
1471 Run* run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1472 DCHECK_EQ(run->magic_num_, kMagicNum);
1473 size_t idx = run->size_bracket_idx_;
Ian Rogers13735952014-10-08 12:43:28 -07001474 size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
1475 - (reinterpret_cast<uint8_t*>(run) + headerSizes[idx]);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001476 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
1477 return IndexToBracketSize(idx);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001478 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001479 default: {
1480 LOG(FATAL) << "Unreachable - page map type: " << page_map_[pm_idx];
1481 break;
1482 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001483 }
1484 return 0;
1485}
1486
1487bool RosAlloc::Trim() {
1488 MutexLock mu(Thread::Current(), lock_);
1489 FreePageRun* last_free_page_run;
1490 DCHECK_EQ(footprint_ % kPageSize, static_cast<size_t>(0));
1491 auto it = free_page_runs_.rbegin();
1492 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
1493 // Remove the last free page run, if any.
1494 DCHECK(last_free_page_run->IsFree());
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001495 DCHECK(IsFreePage(ToPageMapIndex(last_free_page_run)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001496 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
1497 DCHECK_EQ(last_free_page_run->End(this), base_ + footprint_);
1498 free_page_runs_.erase(last_free_page_run);
1499 size_t decrement = last_free_page_run->ByteSize(this);
1500 size_t new_footprint = footprint_ - decrement;
1501 DCHECK_EQ(new_footprint % kPageSize, static_cast<size_t>(0));
1502 size_t new_num_of_pages = new_footprint / kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001503 DCHECK_GE(page_map_size_, new_num_of_pages);
1504 // Zero out the tail of the page map.
Ian Rogers13735952014-10-08 12:43:28 -07001505 uint8_t* zero_begin = const_cast<uint8_t*>(page_map_) + new_num_of_pages;
1506 uint8_t* madvise_begin = AlignUp(zero_begin, kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001507 DCHECK_LE(madvise_begin, page_map_mem_map_->End());
1508 size_t madvise_size = page_map_mem_map_->End() - madvise_begin;
1509 if (madvise_size > 0) {
1510 DCHECK_ALIGNED(madvise_begin, kPageSize);
1511 DCHECK_EQ(RoundUp(madvise_size, kPageSize), madvise_size);
Ian Rogersc5f17732014-06-05 20:48:42 -07001512 if (!kMadviseZeroes) {
1513 memset(madvise_begin, 0, madvise_size);
1514 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001515 CHECK_EQ(madvise(madvise_begin, madvise_size, MADV_DONTNEED), 0);
1516 }
1517 if (madvise_begin - zero_begin) {
1518 memset(zero_begin, 0, madvise_begin - zero_begin);
1519 }
1520 page_map_size_ = new_num_of_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001521 free_page_run_size_map_.resize(new_num_of_pages);
1522 DCHECK_EQ(free_page_run_size_map_.size(), new_num_of_pages);
1523 art_heap_rosalloc_morecore(this, -(static_cast<intptr_t>(decrement)));
1524 if (kTraceRosAlloc) {
1525 LOG(INFO) << "RosAlloc::Trim() : decreased the footprint from "
1526 << footprint_ << " to " << new_footprint;
1527 }
1528 DCHECK_LT(new_footprint, footprint_);
1529 DCHECK_LT(new_footprint, capacity_);
1530 footprint_ = new_footprint;
1531 return true;
1532 }
1533 return false;
1534}
1535
1536void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1537 void* arg) {
1538 // Note: no need to use this to release pages as we already do so in FreePages().
1539 if (handler == NULL) {
1540 return;
1541 }
1542 MutexLock mu(Thread::Current(), lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001543 size_t pm_end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001544 size_t i = 0;
1545 while (i < pm_end) {
Ian Rogers13735952014-10-08 12:43:28 -07001546 uint8_t pm = page_map_[i];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001547 switch (pm) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001548 case kPageMapReleased:
1549 // Fall-through.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001550 case kPageMapEmpty: {
1551 // The start of a free page run.
1552 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1553 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
1554 size_t fpr_size = fpr->ByteSize(this);
1555 DCHECK(IsAligned<kPageSize>(fpr_size));
1556 void* start = fpr;
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -08001557 if (kIsDebugBuild) {
1558 // In the debug build, the first page of a free page run
1559 // contains a magic number for debugging. Exclude it.
Ian Rogers13735952014-10-08 12:43:28 -07001560 start = reinterpret_cast<uint8_t*>(fpr) + kPageSize;
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -08001561 }
Ian Rogers13735952014-10-08 12:43:28 -07001562 void* end = reinterpret_cast<uint8_t*>(fpr) + fpr_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001563 handler(start, end, 0, arg);
1564 size_t num_pages = fpr_size / kPageSize;
1565 if (kIsDebugBuild) {
1566 for (size_t j = i + 1; j < i + num_pages; ++j) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001567 DCHECK(IsFreePage(j));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001568 }
1569 }
1570 i += fpr_size / kPageSize;
1571 DCHECK_LE(i, pm_end);
1572 break;
1573 }
1574 case kPageMapLargeObject: {
1575 // The start of a large object.
1576 size_t num_pages = 1;
1577 size_t idx = i + 1;
1578 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1579 num_pages++;
1580 idx++;
1581 }
1582 void* start = base_ + i * kPageSize;
1583 void* end = base_ + (i + num_pages) * kPageSize;
1584 size_t used_bytes = num_pages * kPageSize;
1585 handler(start, end, used_bytes, arg);
1586 if (kIsDebugBuild) {
1587 for (size_t j = i + 1; j < i + num_pages; ++j) {
1588 DCHECK_EQ(page_map_[j], kPageMapLargeObjectPart);
1589 }
1590 }
1591 i += num_pages;
1592 DCHECK_LE(i, pm_end);
1593 break;
1594 }
1595 case kPageMapLargeObjectPart:
1596 LOG(FATAL) << "Unreachable - page map type: " << pm;
1597 break;
1598 case kPageMapRun: {
1599 // The start of a run.
1600 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001601 DCHECK_EQ(run->magic_num_, kMagicNum);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001602 // The dedicated full run doesn't contain any real allocations, don't visit the slots in
1603 // there.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001604 run->InspectAllSlots(handler, arg);
1605 size_t num_pages = numOfPages[run->size_bracket_idx_];
1606 if (kIsDebugBuild) {
1607 for (size_t j = i + 1; j < i + num_pages; ++j) {
1608 DCHECK_EQ(page_map_[j], kPageMapRunPart);
1609 }
1610 }
1611 i += num_pages;
1612 DCHECK_LE(i, pm_end);
1613 break;
1614 }
1615 case kPageMapRunPart:
1616 LOG(FATAL) << "Unreachable - page map type: " << pm;
1617 break;
1618 default:
1619 LOG(FATAL) << "Unreachable - page map type: " << pm;
1620 break;
1621 }
1622 }
1623}
1624
1625size_t RosAlloc::Footprint() {
1626 MutexLock mu(Thread::Current(), lock_);
1627 return footprint_;
1628}
1629
1630size_t RosAlloc::FootprintLimit() {
1631 MutexLock mu(Thread::Current(), lock_);
1632 return capacity_;
1633}
1634
1635void RosAlloc::SetFootprintLimit(size_t new_capacity) {
1636 MutexLock mu(Thread::Current(), lock_);
1637 DCHECK_EQ(RoundUp(new_capacity, kPageSize), new_capacity);
1638 // Only growing is supported here. But Trim() is supported.
1639 if (capacity_ < new_capacity) {
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001640 CHECK_LE(new_capacity, max_capacity_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001641 capacity_ = new_capacity;
1642 VLOG(heap) << "new capacity=" << capacity_;
1643 }
1644}
1645
1646void RosAlloc::RevokeThreadLocalRuns(Thread* thread) {
1647 Thread* self = Thread::Current();
Hiroshi Yamauchi70f60042014-02-03 12:31:29 -08001648 // Avoid race conditions on the bulk free bit maps with BulkFree() (GC).
Mathieu Chartiera1c1c712014-06-23 17:53:09 -07001649 ReaderMutexLock wmu(self, bulk_free_lock_);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001650 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; idx++) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001651 MutexLock mu(self, *size_bracket_locks_[idx]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001652 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001653 CHECK(thread_local_run != nullptr);
1654 // Invalid means already revoked.
1655 DCHECK(thread_local_run->IsThreadLocal());
1656 if (thread_local_run != dedicated_full_run_) {
1657 thread->SetRosAllocRun(idx, dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001658 DCHECK_EQ(thread_local_run->magic_num_, kMagicNum);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001659 // Note the thread local run may not be full here.
1660 bool dont_care;
1661 thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&dont_care);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001662 thread_local_run->SetIsThreadLocal(false);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001663 thread_local_run->MergeBulkFreeBitMapIntoAllocBitMap();
1664 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
1665 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
Mathieu Chartier0651d412014-04-29 14:37:57 -07001666 RevokeRun(self, idx, thread_local_run);
1667 }
1668 }
1669}
1670
1671void RosAlloc::RevokeRun(Thread* self, size_t idx, Run* run) {
1672 size_bracket_locks_[idx]->AssertHeld(self);
1673 DCHECK(run != dedicated_full_run_);
1674 if (run->IsFull()) {
1675 if (kIsDebugBuild) {
1676 full_runs_[idx].insert(run);
1677 DCHECK(full_runs_[idx].find(run) != full_runs_[idx].end());
1678 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001679 LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
Mathieu Chartier0651d412014-04-29 14:37:57 -07001680 << reinterpret_cast<intptr_t>(run)
1681 << " into full_runs_[" << std::dec << idx << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001682 }
1683 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001684 } else if (run->IsAllFree()) {
1685 run->ZeroHeader();
1686 MutexLock mu(self, lock_);
1687 FreePages(self, run, true);
1688 } else {
1689 non_full_runs_[idx].insert(run);
1690 DCHECK(non_full_runs_[idx].find(run) != non_full_runs_[idx].end());
1691 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001692 LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
Mathieu Chartier0651d412014-04-29 14:37:57 -07001693 << reinterpret_cast<intptr_t>(run)
1694 << " into non_full_runs_[" << std::dec << idx << "]";
1695 }
1696 }
1697}
1698
1699void RosAlloc::RevokeThreadUnsafeCurrentRuns() {
1700 // Revoke the current runs which share the same idx as thread local runs.
1701 Thread* self = Thread::Current();
1702 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; ++idx) {
1703 MutexLock mu(self, *size_bracket_locks_[idx]);
1704 if (current_runs_[idx] != dedicated_full_run_) {
1705 RevokeRun(self, idx, current_runs_[idx]);
1706 current_runs_[idx] = dedicated_full_run_;
1707 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001708 }
1709}
1710
1711void RosAlloc::RevokeAllThreadLocalRuns() {
1712 // This is called when a mutator thread won't allocate such as at
1713 // the Zygote creation time or during the GC pause.
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001714 MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
1715 MutexLock mu2(Thread::Current(), *Locks::thread_list_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001716 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001717 for (Thread* thread : thread_list) {
1718 RevokeThreadLocalRuns(thread);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001719 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001720 RevokeThreadUnsafeCurrentRuns();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001721}
1722
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001723void RosAlloc::AssertThreadLocalRunsAreRevoked(Thread* thread) {
1724 if (kIsDebugBuild) {
1725 Thread* self = Thread::Current();
1726 // Avoid race conditions on the bulk free bit maps with BulkFree() (GC).
Mathieu Chartiera1c1c712014-06-23 17:53:09 -07001727 ReaderMutexLock wmu(self, bulk_free_lock_);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001728 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; idx++) {
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001729 MutexLock mu(self, *size_bracket_locks_[idx]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001730 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001731 DCHECK(thread_local_run == nullptr || thread_local_run == dedicated_full_run_);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001732 }
1733 }
1734}
1735
1736void RosAlloc::AssertAllThreadLocalRunsAreRevoked() {
1737 if (kIsDebugBuild) {
Mathieu Chartier0651d412014-04-29 14:37:57 -07001738 Thread* self = Thread::Current();
1739 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
1740 MutexLock mu2(self, *Locks::thread_list_lock_);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001741 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1742 for (Thread* t : thread_list) {
1743 AssertThreadLocalRunsAreRevoked(t);
1744 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001745 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; ++idx) {
1746 MutexLock mu(self, *size_bracket_locks_[idx]);
1747 CHECK_EQ(current_runs_[idx], dedicated_full_run_);
1748 }
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001749 }
1750}
1751
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001752void RosAlloc::Initialize() {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001753 // bracketSizes.
1754 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1755 if (i < kNumOfSizeBrackets - 2) {
1756 bracketSizes[i] = 16 * (i + 1);
1757 } else if (i == kNumOfSizeBrackets - 2) {
1758 bracketSizes[i] = 1 * KB;
1759 } else {
Ian Rogers5d057052014-03-12 14:32:27 -07001760 DCHECK_EQ(i, kNumOfSizeBrackets - 1);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001761 bracketSizes[i] = 2 * KB;
1762 }
1763 if (kTraceRosAlloc) {
1764 LOG(INFO) << "bracketSizes[" << i << "]=" << bracketSizes[i];
1765 }
1766 }
1767 // numOfPages.
1768 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1769 if (i < 4) {
1770 numOfPages[i] = 1;
1771 } else if (i < 8) {
1772 numOfPages[i] = 2;
1773 } else if (i < 16) {
1774 numOfPages[i] = 4;
1775 } else if (i < 32) {
1776 numOfPages[i] = 8;
1777 } else if (i == 32) {
Ian Rogers5d057052014-03-12 14:32:27 -07001778 DCHECK_EQ(i, kNumOfSizeBrackets - 2);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001779 numOfPages[i] = 16;
1780 } else {
Ian Rogers5d057052014-03-12 14:32:27 -07001781 DCHECK_EQ(i, kNumOfSizeBrackets - 1);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001782 numOfPages[i] = 32;
1783 }
1784 if (kTraceRosAlloc) {
1785 LOG(INFO) << "numOfPages[" << i << "]=" << numOfPages[i];
1786 }
1787 }
1788 // Compute numOfSlots and slotOffsets.
1789 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1790 size_t bracket_size = bracketSizes[i];
1791 size_t run_size = kPageSize * numOfPages[i];
1792 size_t max_num_of_slots = run_size / bracket_size;
1793 // Compute the actual number of slots by taking the header and
1794 // alignment into account.
1795 size_t fixed_header_size = RoundUp(Run::fixed_header_size(), sizeof(uint32_t));
1796 DCHECK_EQ(fixed_header_size, static_cast<size_t>(8));
1797 size_t header_size = 0;
1798 size_t bulk_free_bit_map_offset = 0;
1799 size_t thread_local_free_bit_map_offset = 0;
1800 size_t num_of_slots = 0;
1801 // Search for the maximum number of slots that allows enough space
1802 // for the header (including the bit maps.)
1803 for (int s = max_num_of_slots; s >= 0; s--) {
1804 size_t tmp_slots_size = bracket_size * s;
1805 size_t tmp_bit_map_size = RoundUp(s, sizeof(uint32_t) * kBitsPerByte) / kBitsPerByte;
1806 size_t tmp_bulk_free_bit_map_size = tmp_bit_map_size;
1807 size_t tmp_bulk_free_bit_map_off = fixed_header_size + tmp_bit_map_size;
1808 size_t tmp_thread_local_free_bit_map_size = tmp_bit_map_size;
1809 size_t tmp_thread_local_free_bit_map_off = tmp_bulk_free_bit_map_off + tmp_bulk_free_bit_map_size;
1810 size_t tmp_unaligned_header_size = tmp_thread_local_free_bit_map_off + tmp_thread_local_free_bit_map_size;
1811 // Align up the unaligned header size. bracket_size may not be a power of two.
1812 size_t tmp_header_size = (tmp_unaligned_header_size % bracket_size == 0) ?
1813 tmp_unaligned_header_size :
1814 tmp_unaligned_header_size + (bracket_size - tmp_unaligned_header_size % bracket_size);
1815 DCHECK_EQ(tmp_header_size % bracket_size, static_cast<size_t>(0));
1816 DCHECK_EQ(tmp_header_size % 8, static_cast<size_t>(0));
1817 if (tmp_slots_size + tmp_header_size <= run_size) {
1818 // Found the right number of slots, that is, there was enough
1819 // space for the header (including the bit maps.)
1820 num_of_slots = s;
1821 header_size = tmp_header_size;
1822 bulk_free_bit_map_offset = tmp_bulk_free_bit_map_off;
1823 thread_local_free_bit_map_offset = tmp_thread_local_free_bit_map_off;
1824 break;
1825 }
1826 }
1827 DCHECK(num_of_slots > 0 && header_size > 0 && bulk_free_bit_map_offset > 0);
1828 // Add the padding for the alignment remainder.
1829 header_size += run_size % bracket_size;
Ian Rogers5d057052014-03-12 14:32:27 -07001830 DCHECK_EQ(header_size + num_of_slots * bracket_size, run_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001831 numOfSlots[i] = num_of_slots;
1832 headerSizes[i] = header_size;
1833 bulkFreeBitMapOffsets[i] = bulk_free_bit_map_offset;
1834 threadLocalFreeBitMapOffsets[i] = thread_local_free_bit_map_offset;
1835 if (kTraceRosAlloc) {
1836 LOG(INFO) << "numOfSlots[" << i << "]=" << numOfSlots[i]
1837 << ", headerSizes[" << i << "]=" << headerSizes[i]
1838 << ", bulkFreeBitMapOffsets[" << i << "]=" << bulkFreeBitMapOffsets[i]
1839 << ", threadLocalFreeBitMapOffsets[" << i << "]=" << threadLocalFreeBitMapOffsets[i];;
1840 }
1841 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001842 // Fill the alloc bitmap so nobody can successfully allocate from it.
1843 if (kIsDebugBuild) {
1844 dedicated_full_run_->magic_num_ = kMagicNum;
1845 }
1846 // It doesn't matter which size bracket we use since the main goal is to have the allocation
1847 // fail 100% of the time you attempt to allocate into the dedicated full run.
1848 dedicated_full_run_->size_bracket_idx_ = 0;
1849 dedicated_full_run_->FillAllocBitMap();
1850 dedicated_full_run_->SetIsThreadLocal(true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001851}
1852
1853void RosAlloc::BytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
1854 if (used_bytes == 0) {
1855 return;
1856 }
1857 size_t* bytes_allocated = reinterpret_cast<size_t*>(arg);
1858 *bytes_allocated += used_bytes;
1859}
1860
1861void RosAlloc::ObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
1862 if (used_bytes == 0) {
1863 return;
1864 }
1865 size_t* objects_allocated = reinterpret_cast<size_t*>(arg);
1866 ++(*objects_allocated);
1867}
1868
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001869void RosAlloc::Verify() {
1870 Thread* self = Thread::Current();
1871 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self))
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001872 << "The mutator locks isn't exclusively locked at " << __PRETTY_FUNCTION__;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001873 MutexLock mu(self, *Locks::thread_list_lock_);
Mathieu Chartiera1c1c712014-06-23 17:53:09 -07001874 ReaderMutexLock wmu(self, bulk_free_lock_);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001875 std::vector<Run*> runs;
1876 {
1877 MutexLock mu(self, lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001878 size_t pm_end = page_map_size_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001879 size_t i = 0;
1880 while (i < pm_end) {
Ian Rogers13735952014-10-08 12:43:28 -07001881 uint8_t pm = page_map_[i];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001882 switch (pm) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001883 case kPageMapReleased:
1884 // Fall-through.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001885 case kPageMapEmpty: {
1886 // The start of a free page run.
1887 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001888 DCHECK_EQ(fpr->magic_num_, kMagicNumFree);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001889 CHECK(free_page_runs_.find(fpr) != free_page_runs_.end())
1890 << "An empty page must belong to the free page run set";
1891 size_t fpr_size = fpr->ByteSize(this);
1892 CHECK(IsAligned<kPageSize>(fpr_size))
1893 << "A free page run size isn't page-aligned : " << fpr_size;
1894 size_t num_pages = fpr_size / kPageSize;
1895 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1896 << "A free page run size must be > 0 : " << fpr_size;
1897 for (size_t j = i + 1; j < i + num_pages; ++j) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001898 CHECK(IsFreePage(j))
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001899 << "A mismatch between the page map table for kPageMapEmpty "
1900 << " at page index " << j
1901 << " and the free page run size : page index range : "
1902 << i << " to " << (i + num_pages) << std::endl << DumpPageMap();
1903 }
1904 i += num_pages;
1905 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1906 << std::endl << DumpPageMap();
1907 break;
1908 }
1909 case kPageMapLargeObject: {
1910 // The start of a large object.
1911 size_t num_pages = 1;
1912 size_t idx = i + 1;
1913 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1914 num_pages++;
1915 idx++;
1916 }
1917 void* start = base_ + i * kPageSize;
1918 mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
1919 size_t obj_size = obj->SizeOf();
Ian Rogers5d057052014-03-12 14:32:27 -07001920 CHECK_GT(obj_size, kLargeSizeThreshold)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001921 << "A rosalloc large object size must be > " << kLargeSizeThreshold;
1922 CHECK_EQ(num_pages, RoundUp(obj_size, kPageSize) / kPageSize)
1923 << "A rosalloc large object size " << obj_size
1924 << " does not match the page map table " << (num_pages * kPageSize)
1925 << std::endl << DumpPageMap();
1926 i += num_pages;
1927 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1928 << std::endl << DumpPageMap();
1929 break;
1930 }
1931 case kPageMapLargeObjectPart:
1932 LOG(FATAL) << "Unreachable - page map type: " << pm << std::endl << DumpPageMap();
1933 break;
1934 case kPageMapRun: {
1935 // The start of a run.
1936 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001937 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001938 size_t idx = run->size_bracket_idx_;
Ian Rogers5d057052014-03-12 14:32:27 -07001939 CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << idx;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001940 size_t num_pages = numOfPages[idx];
1941 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1942 << "Run size must be > 0 : " << num_pages;
1943 for (size_t j = i + 1; j < i + num_pages; ++j) {
1944 CHECK_EQ(page_map_[j], kPageMapRunPart)
1945 << "A mismatch between the page map table for kPageMapRunPart "
1946 << " at page index " << j
1947 << " and the run size : page index range " << i << " to " << (i + num_pages)
1948 << std::endl << DumpPageMap();
1949 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001950 // Don't verify the dedicated_full_run_ since it doesn't have any real allocations.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001951 runs.push_back(run);
1952 i += num_pages;
1953 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1954 << std::endl << DumpPageMap();
1955 break;
1956 }
1957 case kPageMapRunPart:
Mathieu Chartier0651d412014-04-29 14:37:57 -07001958 // Fall-through.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001959 default:
1960 LOG(FATAL) << "Unreachable - page map type: " << pm << std::endl << DumpPageMap();
1961 break;
1962 }
1963 }
1964 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001965 std::list<Thread*> threads = Runtime::Current()->GetThreadList()->GetList();
1966 for (Thread* thread : threads) {
1967 for (size_t i = 0; i < kNumThreadLocalSizeBrackets; ++i) {
1968 MutexLock mu(self, *size_bracket_locks_[i]);
1969 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i));
1970 CHECK(thread_local_run != nullptr);
1971 CHECK(thread_local_run->IsThreadLocal());
1972 CHECK(thread_local_run == dedicated_full_run_ ||
1973 thread_local_run->size_bracket_idx_ == i);
1974 }
1975 }
1976 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1977 MutexLock mu(self, *size_bracket_locks_[i]);
1978 Run* current_run = current_runs_[i];
1979 CHECK(current_run != nullptr);
1980 if (current_run != dedicated_full_run_) {
1981 // The dedicated full run is currently marked as thread local.
1982 CHECK(!current_run->IsThreadLocal());
1983 CHECK_EQ(current_run->size_bracket_idx_, i);
1984 }
1985 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001986 // Call Verify() here for the lock order.
1987 for (auto& run : runs) {
1988 run->Verify(self, this);
1989 }
1990}
1991
1992void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc) {
Ian Rogers5d057052014-03-12 14:32:27 -07001993 DCHECK_EQ(magic_num_, kMagicNum) << "Bad magic number : " << Dump();
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001994 const size_t idx = size_bracket_idx_;
Ian Rogers5d057052014-03-12 14:32:27 -07001995 CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
Ian Rogers13735952014-10-08 12:43:28 -07001996 uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001997 const size_t num_slots = numOfSlots[idx];
1998 const size_t num_vec = RoundUp(num_slots, 32) / 32;
1999 CHECK_GT(num_vec, 0U);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002000 size_t bracket_size = IndexToBracketSize(idx);
2001 CHECK_EQ(slot_base + num_slots * bracket_size,
Ian Rogers13735952014-10-08 12:43:28 -07002002 reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002003 << "Mismatch in the end address of the run " << Dump();
2004 // Check that the bulk free bitmap is clean. It's only used during BulkFree().
2005 CHECK(IsBulkFreeBitmapClean()) << "The bulk free bit map isn't clean " << Dump();
Mathieu Chartier73d1e172014-04-11 17:53:48 -07002006 uint32_t last_word_mask = GetBitmapLastVectorMask(num_slots, num_vec);
2007 // Make sure all the bits at the end of the run are set so that we don't allocate there.
2008 CHECK_EQ(alloc_bit_map_[num_vec - 1] & last_word_mask, last_word_mask);
2009 // Ensure that the first bitmap index is valid.
2010 CHECK_LT(first_search_vec_idx_, num_vec);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002011 // Check the thread local runs, the current runs, and the run sets.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07002012 if (IsThreadLocal()) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002013 // If it's a thread local run, then it must be pointed to by an owner thread.
2014 bool owner_found = false;
2015 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
2016 for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
2017 Thread* thread = *it;
Mathieu Chartier0651d412014-04-29 14:37:57 -07002018 for (size_t i = 0; i < kNumThreadLocalSizeBrackets; i++) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002019 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07002020 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i));
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002021 if (thread_local_run == this) {
2022 CHECK(!owner_found)
2023 << "A thread local run has more than one owner thread " << Dump();
2024 CHECK_EQ(i, idx)
2025 << "A mismatching size bracket index in a thread local run " << Dump();
2026 owner_found = true;
2027 }
2028 }
2029 }
2030 CHECK(owner_found) << "A thread local run has no owner thread " << Dump();
2031 } else {
2032 // If it's not thread local, check that the thread local free bitmap is clean.
2033 CHECK(IsThreadLocalFreeBitmapClean())
2034 << "A non-thread-local run's thread local free bitmap isn't clean "
2035 << Dump();
2036 // Check if it's a current run for the size bucket.
2037 bool is_current_run = false;
2038 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
2039 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
2040 Run* current_run = rosalloc->current_runs_[i];
2041 if (idx == i) {
2042 if (this == current_run) {
2043 is_current_run = true;
2044 }
2045 } else {
2046 // If the size bucket index does not match, then it must not
2047 // be a current run.
2048 CHECK_NE(this, current_run)
2049 << "A current run points to a run with a wrong size bracket index " << Dump();
2050 }
2051 }
2052 // If it's neither a thread local or current run, then it must be
2053 // in a run set.
2054 if (!is_current_run) {
2055 MutexLock mu(self, rosalloc->lock_);
Mathieu Chartier58553c72014-09-16 16:25:55 -07002056 auto& non_full_runs = rosalloc->non_full_runs_[idx];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002057 // If it's all free, it must be a free page run rather than a run.
2058 CHECK(!IsAllFree()) << "A free run must be in a free page run set " << Dump();
2059 if (!IsFull()) {
2060 // If it's not full, it must in the non-full run set.
2061 CHECK(non_full_runs.find(this) != non_full_runs.end())
2062 << "A non-full run isn't in the non-full run set " << Dump();
2063 } else {
2064 // If it's full, it must in the full run set (debug build only.)
2065 if (kIsDebugBuild) {
Mathieu Chartier58553c72014-09-16 16:25:55 -07002066 auto& full_runs = rosalloc->full_runs_[idx];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002067 CHECK(full_runs.find(this) != full_runs.end())
2068 << " A full run isn't in the full run set " << Dump();
2069 }
2070 }
2071 }
2072 }
2073 // Check each slot.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002074 size_t slots = 0;
2075 for (size_t v = 0; v < num_vec; v++, slots += 32) {
Ian Rogers5d057052014-03-12 14:32:27 -07002076 DCHECK_GE(num_slots, slots) << "Out of bounds";
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002077 uint32_t vec = alloc_bit_map_[v];
2078 uint32_t thread_local_free_vec = ThreadLocalFreeBitMap()[v];
2079 size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
2080 for (size_t i = 0; i < end; ++i) {
2081 bool is_allocated = ((vec >> i) & 0x1) != 0;
2082 // If a thread local run, slots may be marked freed in the
2083 // thread local free bitmap.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07002084 bool is_thread_local_freed = IsThreadLocal() && ((thread_local_free_vec >> i) & 0x1) != 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002085 if (is_allocated && !is_thread_local_freed) {
Ian Rogers13735952014-10-08 12:43:28 -07002086 uint8_t* slot_addr = slot_base + (slots + i) * bracket_size;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002087 mirror::Object* obj = reinterpret_cast<mirror::Object*>(slot_addr);
2088 size_t obj_size = obj->SizeOf();
2089 CHECK_LE(obj_size, kLargeSizeThreshold)
2090 << "A run slot contains a large object " << Dump();
2091 CHECK_EQ(SizeToIndex(obj_size), idx)
Hiroshi Yamauchi4cd662e2014-04-03 16:28:10 -07002092 << PrettyTypeOf(obj) << " "
2093 << "obj_size=" << obj_size << ", idx=" << idx << " "
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002094 << "A run slot contains an object with wrong size " << Dump();
2095 }
2096 }
2097 }
2098}
2099
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002100size_t RosAlloc::ReleasePages() {
2101 VLOG(heap) << "RosAlloc::ReleasePages()";
2102 DCHECK(!DoesReleaseAllPages());
2103 Thread* self = Thread::Current();
2104 size_t reclaimed_bytes = 0;
2105 size_t i = 0;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002106 // Check the page map size which might have changed due to grow/shrink.
2107 while (i < page_map_size_) {
2108 // Reading the page map without a lock is racy but the race is benign since it should only
2109 // result in occasionally not releasing pages which we could release.
Ian Rogers13735952014-10-08 12:43:28 -07002110 uint8_t pm = page_map_[i];
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002111 switch (pm) {
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002112 case kPageMapReleased:
2113 // Fall through.
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002114 case kPageMapEmpty: {
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002115 // This is currently the start of a free page run.
2116 // Acquire the lock to prevent other threads racing in and modifying the page map.
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002117 MutexLock mu(self, lock_);
2118 // Check that it's still empty after we acquired the lock since another thread could have
2119 // raced in and placed an allocation here.
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002120 if (IsFreePage(i)) {
2121 // Free page runs can start with a released page if we coalesced a released page free
2122 // page run with an empty page run.
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002123 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002124 // There is a race condition where FreePage can coalesce fpr with the previous
2125 // free page run before we acquire lock_. In that case free_page_runs_.find will not find
2126 // a run starting at fpr. To handle this race, we skip reclaiming the page range and go
2127 // to the next page.
2128 if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
2129 size_t fpr_size = fpr->ByteSize(this);
2130 DCHECK(IsAligned<kPageSize>(fpr_size));
Ian Rogers13735952014-10-08 12:43:28 -07002131 uint8_t* start = reinterpret_cast<uint8_t*>(fpr);
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002132 reclaimed_bytes += ReleasePageRange(start, start + fpr_size);
2133 size_t pages = fpr_size / kPageSize;
2134 CHECK_GT(pages, 0U) << "Infinite loop probable";
2135 i += pages;
2136 DCHECK_LE(i, page_map_size_);
2137 break;
2138 }
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002139 }
Ian Rogersfc787ec2014-10-09 21:56:44 -07002140 FALLTHROUGH_INTENDED;
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002141 }
2142 case kPageMapLargeObject: // Fall through.
2143 case kPageMapLargeObjectPart: // Fall through.
2144 case kPageMapRun: // Fall through.
2145 case kPageMapRunPart: // Fall through.
2146 ++i;
2147 break; // Skip.
2148 default:
2149 LOG(FATAL) << "Unreachable - page map type: " << pm;
2150 break;
2151 }
2152 }
2153 return reclaimed_bytes;
2154}
2155
Ian Rogers13735952014-10-08 12:43:28 -07002156size_t RosAlloc::ReleasePageRange(uint8_t* start, uint8_t* end) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002157 DCHECK_ALIGNED(start, kPageSize);
2158 DCHECK_ALIGNED(end, kPageSize);
2159 DCHECK_LT(start, end);
2160 if (kIsDebugBuild) {
2161 // In the debug build, the first page of a free page run
2162 // contains a magic number for debugging. Exclude it.
2163 start += kPageSize;
2164 }
2165 if (!kMadviseZeroes) {
2166 // TODO: Do this when we resurrect the page instead.
2167 memset(start, 0, end - start);
2168 }
2169 CHECK_EQ(madvise(start, end - start, MADV_DONTNEED), 0);
2170 size_t pm_idx = ToPageMapIndex(start);
2171 size_t reclaimed_bytes = 0;
2172 // Calculate reclaimed bytes and upate page map.
2173 const size_t max_idx = pm_idx + (end - start) / kPageSize;
2174 for (; pm_idx < max_idx; ++pm_idx) {
2175 DCHECK(IsFreePage(pm_idx));
2176 if (page_map_[pm_idx] == kPageMapEmpty) {
2177 // Mark the page as released and update how many bytes we released.
2178 reclaimed_bytes += kPageSize;
2179 page_map_[pm_idx] = kPageMapReleased;
2180 }
2181 }
2182 return reclaimed_bytes;
2183}
2184
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07002185void RosAlloc::LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) {
2186 Thread* self = Thread::Current();
2187 size_t largest_continuous_free_pages = 0;
2188 WriterMutexLock wmu(self, bulk_free_lock_);
2189 MutexLock mu(self, lock_);
2190 for (FreePageRun* fpr : free_page_runs_) {
2191 largest_continuous_free_pages = std::max(largest_continuous_free_pages,
2192 fpr->ByteSize(this));
2193 }
2194 if (failed_alloc_bytes > kLargeSizeThreshold) {
2195 // Large allocation.
2196 size_t required_bytes = RoundUp(failed_alloc_bytes, kPageSize);
2197 if (required_bytes > largest_continuous_free_pages) {
2198 os << "; failed due to fragmentation (required continguous free "
2199 << required_bytes << " bytes where largest contiguous free "
2200 << largest_continuous_free_pages << " bytes)";
2201 }
2202 } else {
2203 // Non-large allocation.
2204 size_t required_bytes = numOfPages[SizeToIndex(failed_alloc_bytes)] * kPageSize;
2205 if (required_bytes > largest_continuous_free_pages) {
2206 os << "; failed due to fragmentation (required continguous free "
2207 << required_bytes << " bytes for a new buffer where largest contiguous free "
2208 << largest_continuous_free_pages << " bytes)";
2209 }
2210 }
2211}
2212
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07002213} // namespace allocator
2214} // namespace gc
2215} // namespace art