blob: f9d6a512cef491a4dc9ebec2f27c41554097ef9a [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "base/mutex-inl.h"
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -080018#include "mirror/class-inl.h"
19#include "mirror/object.h"
20#include "mirror/object-inl.h"
Brian Carlstrom218daa22013-11-25 14:51:44 -080021#include "thread-inl.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070022#include "thread_list.h"
23#include "rosalloc.h"
24
25#include <map>
26#include <list>
Ian Rogersc7dd2952014-10-21 23:31:19 -070027#include <sstream>
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070028#include <vector>
29
30namespace art {
31namespace gc {
32namespace allocator {
33
Mathieu Chartier73d1e172014-04-11 17:53:48 -070034static constexpr bool kUsePrefetchDuringAllocRun = true;
35static constexpr bool kPrefetchNewRunDataByZeroing = false;
36static constexpr size_t kPrefetchStride = 64;
37
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070038size_t RosAlloc::bracketSizes[kNumOfSizeBrackets];
39size_t RosAlloc::numOfPages[kNumOfSizeBrackets];
40size_t RosAlloc::numOfSlots[kNumOfSizeBrackets];
41size_t RosAlloc::headerSizes[kNumOfSizeBrackets];
42size_t RosAlloc::bulkFreeBitMapOffsets[kNumOfSizeBrackets];
43size_t RosAlloc::threadLocalFreeBitMapOffsets[kNumOfSizeBrackets];
44bool RosAlloc::initialized_ = false;
Mathieu Chartier73d1e172014-04-11 17:53:48 -070045size_t RosAlloc::dedicated_full_run_storage_[kPageSize / sizeof(size_t)] = { 0 };
46RosAlloc::Run* RosAlloc::dedicated_full_run_ =
47 reinterpret_cast<RosAlloc::Run*>(dedicated_full_run_storage_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070048
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080049RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080050 PageReleaseMode page_release_mode, size_t page_release_size_threshold)
Ian Rogers13735952014-10-08 12:43:28 -070051 : base_(reinterpret_cast<uint8_t*>(base)), footprint_(capacity),
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080052 capacity_(capacity), max_capacity_(max_capacity),
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070053 lock_("rosalloc global lock", kRosAllocGlobalLock),
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080054 bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock),
55 page_release_mode_(page_release_mode),
56 page_release_size_threshold_(page_release_size_threshold) {
Ian Rogers5d057052014-03-12 14:32:27 -070057 DCHECK_EQ(RoundUp(capacity, kPageSize), capacity);
58 DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080059 CHECK_LE(capacity, max_capacity);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080060 CHECK(IsAligned<kPageSize>(page_release_size_threshold_));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070061 if (!initialized_) {
62 Initialize();
63 }
64 VLOG(heap) << "RosAlloc base="
65 << std::hex << (intptr_t)base_ << ", end="
66 << std::hex << (intptr_t)(base_ + capacity_)
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080067 << ", capacity=" << std::dec << capacity_
68 << ", max_capacity=" << std::dec << max_capacity_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070069 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
Zuo Wangf37a88b2014-07-10 04:26:41 -070070 size_bracket_lock_names_[i] =
Mathieu Chartier73d1e172014-04-11 17:53:48 -070071 StringPrintf("an rosalloc size bracket %d lock", static_cast<int>(i));
Zuo Wangf37a88b2014-07-10 04:26:41 -070072 size_bracket_locks_[i] = new Mutex(size_bracket_lock_names_[i].c_str(), kRosAllocBracketLock);
Mathieu Chartier0651d412014-04-29 14:37:57 -070073 current_runs_[i] = dedicated_full_run_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070074 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080075 DCHECK_EQ(footprint_, capacity_);
76 size_t num_of_pages = footprint_ / kPageSize;
77 size_t max_num_of_pages = max_capacity_ / kPageSize;
78 std::string error_msg;
79 page_map_mem_map_.reset(MemMap::MapAnonymous("rosalloc page map", NULL, RoundUp(max_num_of_pages, kPageSize),
80 PROT_READ | PROT_WRITE, false, &error_msg));
Mathieu Chartier73d1e172014-04-11 17:53:48 -070081 CHECK(page_map_mem_map_.get() != nullptr) << "Couldn't allocate the page map : " << error_msg;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080082 page_map_ = page_map_mem_map_->Begin();
83 page_map_size_ = num_of_pages;
84 max_page_map_size_ = max_num_of_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070085 free_page_run_size_map_.resize(num_of_pages);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070086 FreePageRun* free_pages = reinterpret_cast<FreePageRun*>(base_);
87 if (kIsDebugBuild) {
88 free_pages->magic_num_ = kMagicNumFree;
89 }
90 free_pages->SetByteSize(this, capacity_);
91 DCHECK_EQ(capacity_ % kPageSize, static_cast<size_t>(0));
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080092 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070093 free_pages->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080094 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070095 free_page_runs_.insert(free_pages);
96 if (kTraceRosAlloc) {
97 LOG(INFO) << "RosAlloc::RosAlloc() : Inserted run 0x" << std::hex
98 << reinterpret_cast<intptr_t>(free_pages)
99 << " into free_page_runs_";
100 }
101}
102
Mathieu Chartier661974a2014-01-09 11:23:53 -0800103RosAlloc::~RosAlloc() {
104 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
105 delete size_bracket_locks_[i];
106 }
107}
108
Ian Rogers13735952014-10-08 12:43:28 -0700109void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700110 lock_.AssertHeld(self);
111 DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
112 FreePageRun* res = NULL;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700113 const size_t req_byte_size = num_pages * kPageSize;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700114 // Find the lowest address free page run that's large enough.
115 for (auto it = free_page_runs_.begin(); it != free_page_runs_.end(); ) {
116 FreePageRun* fpr = *it;
117 DCHECK(fpr->IsFree());
118 size_t fpr_byte_size = fpr->ByteSize(this);
119 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
120 if (req_byte_size <= fpr_byte_size) {
121 // Found one.
122 free_page_runs_.erase(it++);
123 if (kTraceRosAlloc) {
124 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x"
125 << std::hex << reinterpret_cast<intptr_t>(fpr)
126 << " from free_page_runs_";
127 }
128 if (req_byte_size < fpr_byte_size) {
129 // Split.
Ian Rogers13735952014-10-08 12:43:28 -0700130 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700131 if (kIsDebugBuild) {
132 remainder->magic_num_ = kMagicNumFree;
133 }
134 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
135 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
136 // Don't need to call madvise on remainder here.
137 free_page_runs_.insert(remainder);
138 if (kTraceRosAlloc) {
139 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
140 << reinterpret_cast<intptr_t>(remainder)
141 << " into free_page_runs_";
142 }
143 fpr->SetByteSize(this, req_byte_size);
144 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
145 }
146 res = fpr;
147 break;
148 } else {
149 ++it;
150 }
151 }
152
153 // Failed to allocate pages. Grow the footprint, if possible.
154 if (UNLIKELY(res == NULL && capacity_ > footprint_)) {
155 FreePageRun* last_free_page_run = NULL;
156 size_t last_free_page_run_size;
157 auto it = free_page_runs_.rbegin();
158 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
159 // There is a free page run at the end.
160 DCHECK(last_free_page_run->IsFree());
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700161 DCHECK(IsFreePage(ToPageMapIndex(last_free_page_run)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700162 last_free_page_run_size = last_free_page_run->ByteSize(this);
163 } else {
164 // There is no free page run at the end.
165 last_free_page_run_size = 0;
166 }
167 DCHECK_LT(last_free_page_run_size, req_byte_size);
168 if (capacity_ - footprint_ + last_free_page_run_size >= req_byte_size) {
169 // If we grow the heap, we can allocate it.
170 size_t increment = std::min(std::max(2 * MB, req_byte_size - last_free_page_run_size),
171 capacity_ - footprint_);
172 DCHECK_EQ(increment % kPageSize, static_cast<size_t>(0));
173 size_t new_footprint = footprint_ + increment;
174 size_t new_num_of_pages = new_footprint / kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800175 DCHECK_LT(page_map_size_, new_num_of_pages);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700176 DCHECK_LT(free_page_run_size_map_.size(), new_num_of_pages);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800177 page_map_size_ = new_num_of_pages;
178 DCHECK_LE(page_map_size_, max_page_map_size_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700179 free_page_run_size_map_.resize(new_num_of_pages);
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800180 ArtRosAllocMoreCore(this, increment);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700181 if (last_free_page_run_size > 0) {
182 // There was a free page run at the end. Expand its size.
183 DCHECK_EQ(last_free_page_run_size, last_free_page_run->ByteSize(this));
184 last_free_page_run->SetByteSize(this, last_free_page_run_size + increment);
185 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
Ian Rogers5d057052014-03-12 14:32:27 -0700186 DCHECK_EQ(last_free_page_run->End(this), base_ + new_footprint);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700187 } else {
188 // Otherwise, insert a new free page run at the end.
189 FreePageRun* new_free_page_run = reinterpret_cast<FreePageRun*>(base_ + footprint_);
190 if (kIsDebugBuild) {
191 new_free_page_run->magic_num_ = kMagicNumFree;
192 }
193 new_free_page_run->SetByteSize(this, increment);
194 DCHECK_EQ(new_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
195 free_page_runs_.insert(new_free_page_run);
Ian Rogers5d057052014-03-12 14:32:27 -0700196 DCHECK_EQ(*free_page_runs_.rbegin(), new_free_page_run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700197 if (kTraceRosAlloc) {
198 LOG(INFO) << "RosAlloc::AlloPages() : Grew the heap by inserting run 0x"
199 << std::hex << reinterpret_cast<intptr_t>(new_free_page_run)
200 << " into free_page_runs_";
201 }
202 }
203 DCHECK_LE(footprint_ + increment, capacity_);
204 if (kTraceRosAlloc) {
205 LOG(INFO) << "RosAlloc::AllocPages() : increased the footprint from "
206 << footprint_ << " to " << new_footprint;
207 }
208 footprint_ = new_footprint;
209
210 // And retry the last free page run.
211 it = free_page_runs_.rbegin();
212 DCHECK(it != free_page_runs_.rend());
213 FreePageRun* fpr = *it;
214 if (kIsDebugBuild && last_free_page_run_size > 0) {
215 DCHECK(last_free_page_run != NULL);
216 DCHECK_EQ(last_free_page_run, fpr);
217 }
218 size_t fpr_byte_size = fpr->ByteSize(this);
219 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
220 DCHECK_LE(req_byte_size, fpr_byte_size);
221 free_page_runs_.erase(fpr);
222 if (kTraceRosAlloc) {
223 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
224 << " from free_page_runs_";
225 }
226 if (req_byte_size < fpr_byte_size) {
227 // Split if there's a remainder.
Ian Rogers13735952014-10-08 12:43:28 -0700228 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700229 if (kIsDebugBuild) {
230 remainder->magic_num_ = kMagicNumFree;
231 }
232 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
233 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
234 free_page_runs_.insert(remainder);
235 if (kTraceRosAlloc) {
236 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
237 << reinterpret_cast<intptr_t>(remainder)
238 << " into free_page_runs_";
239 }
240 fpr->SetByteSize(this, req_byte_size);
241 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
242 }
243 res = fpr;
244 }
245 }
246 if (LIKELY(res != NULL)) {
247 // Update the page map.
248 size_t page_map_idx = ToPageMapIndex(res);
249 for (size_t i = 0; i < num_pages; i++) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700250 DCHECK(IsFreePage(page_map_idx + i));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700251 }
252 switch (page_map_type) {
253 case kPageMapRun:
254 page_map_[page_map_idx] = kPageMapRun;
255 for (size_t i = 1; i < num_pages; i++) {
256 page_map_[page_map_idx + i] = kPageMapRunPart;
257 }
258 break;
259 case kPageMapLargeObject:
260 page_map_[page_map_idx] = kPageMapLargeObject;
261 for (size_t i = 1; i < num_pages; i++) {
262 page_map_[page_map_idx + i] = kPageMapLargeObjectPart;
263 }
264 break;
265 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700266 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_type);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700267 break;
268 }
269 if (kIsDebugBuild) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700270 // Clear the first page since it is not madvised due to the magic number.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700271 memset(res, 0, kPageSize);
272 }
273 if (kTraceRosAlloc) {
274 LOG(INFO) << "RosAlloc::AllocPages() : 0x" << std::hex << reinterpret_cast<intptr_t>(res)
275 << "-0x" << (reinterpret_cast<intptr_t>(res) + num_pages * kPageSize)
276 << "(" << std::dec << (num_pages * kPageSize) << ")";
277 }
278 return res;
279 }
280
281 // Fail.
282 if (kTraceRosAlloc) {
283 LOG(INFO) << "RosAlloc::AllocPages() : NULL";
284 }
285 return nullptr;
286}
287
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700288size_t RosAlloc::FreePages(Thread* self, void* ptr, bool already_zero) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700289 lock_.AssertHeld(self);
290 size_t pm_idx = ToPageMapIndex(ptr);
Ian Rogers5d057052014-03-12 14:32:27 -0700291 DCHECK_LT(pm_idx, page_map_size_);
Ian Rogers13735952014-10-08 12:43:28 -0700292 uint8_t pm_type = page_map_[pm_idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700293 DCHECK(pm_type == kPageMapRun || pm_type == kPageMapLargeObject);
Ian Rogers13735952014-10-08 12:43:28 -0700294 uint8_t pm_part_type;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700295 switch (pm_type) {
296 case kPageMapRun:
297 pm_part_type = kPageMapRunPart;
298 break;
299 case kPageMapLargeObject:
300 pm_part_type = kPageMapLargeObjectPart;
301 break;
302 default:
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700303 LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << " : " << "pm_idx=" << pm_idx << ", pm_type="
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700304 << static_cast<int>(pm_type) << ", ptr=" << std::hex
305 << reinterpret_cast<intptr_t>(ptr);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700306 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700307 }
308 // Update the page map and count the number of pages.
309 size_t num_pages = 1;
310 page_map_[pm_idx] = kPageMapEmpty;
311 size_t idx = pm_idx + 1;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800312 size_t end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700313 while (idx < end && page_map_[idx] == pm_part_type) {
314 page_map_[idx] = kPageMapEmpty;
315 num_pages++;
316 idx++;
317 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700318 const size_t byte_size = num_pages * kPageSize;
319 if (already_zero) {
320 if (kCheckZeroMemory) {
Ian Rogers13735952014-10-08 12:43:28 -0700321 const uintptr_t* word_ptr = reinterpret_cast<uintptr_t*>(ptr);
322 for (size_t i = 0; i < byte_size / sizeof(uintptr_t); ++i) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700323 CHECK_EQ(word_ptr[i], 0U) << "words don't match at index " << i;
324 }
325 }
326 } else if (!DoesReleaseAllPages()) {
327 memset(ptr, 0, byte_size);
328 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700329
330 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700331 LOG(INFO) << __PRETTY_FUNCTION__ << " : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700332 << "-0x" << (reinterpret_cast<intptr_t>(ptr) + byte_size)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700333 << "(" << std::dec << (num_pages * kPageSize) << ")";
334 }
335
336 // Turn it into a free run.
337 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(ptr);
338 if (kIsDebugBuild) {
339 fpr->magic_num_ = kMagicNumFree;
340 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700341 fpr->SetByteSize(this, byte_size);
342 DCHECK(IsAligned<kPageSize>(fpr->ByteSize(this)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700343
344 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
345 if (!free_page_runs_.empty()) {
346 // Try to coalesce in the higher address direction.
347 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700348 LOG(INFO) << __PRETTY_FUNCTION__ << "RosAlloc::FreePages() : trying to coalesce a free page run 0x"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700349 << std::hex << reinterpret_cast<uintptr_t>(fpr) << " [" << std::dec << pm_idx << "] -0x"
350 << std::hex << reinterpret_cast<uintptr_t>(fpr->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800351 << (fpr->End(this) == End() ? page_map_size_ : ToPageMapIndex(fpr->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700352 }
353 auto higher_it = free_page_runs_.upper_bound(fpr);
354 if (higher_it != free_page_runs_.end()) {
355 for (auto it = higher_it; it != free_page_runs_.end(); ) {
356 FreePageRun* h = *it;
357 DCHECK_EQ(h->ByteSize(this) % kPageSize, static_cast<size_t>(0));
358 if (kTraceRosAlloc) {
359 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a higher free page run 0x"
360 << std::hex << reinterpret_cast<uintptr_t>(h) << " [" << std::dec << ToPageMapIndex(h) << "] -0x"
361 << std::hex << reinterpret_cast<uintptr_t>(h->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800362 << (h->End(this) == End() ? page_map_size_ : ToPageMapIndex(h->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700363 }
364 if (fpr->End(this) == h->Begin()) {
365 if (kTraceRosAlloc) {
366 LOG(INFO) << "Success";
367 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700368 // Clear magic num since this is no longer the start of a free page run.
369 if (kIsDebugBuild) {
370 h->magic_num_ = 0;
371 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700372 free_page_runs_.erase(it++);
373 if (kTraceRosAlloc) {
374 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
375 << reinterpret_cast<intptr_t>(h)
376 << " from free_page_runs_";
377 }
378 fpr->SetByteSize(this, fpr->ByteSize(this) + h->ByteSize(this));
379 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
380 } else {
381 // Not adjacent. Stop.
382 if (kTraceRosAlloc) {
383 LOG(INFO) << "Fail";
384 }
385 break;
386 }
387 }
388 }
389 // Try to coalesce in the lower address direction.
390 auto lower_it = free_page_runs_.upper_bound(fpr);
391 if (lower_it != free_page_runs_.begin()) {
392 --lower_it;
393 for (auto it = lower_it; ; ) {
394 // We want to try to coalesce with the first element but
395 // there's no "<=" operator for the iterator.
396 bool to_exit_loop = it == free_page_runs_.begin();
397
398 FreePageRun* l = *it;
399 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
400 if (kTraceRosAlloc) {
401 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a lower free page run 0x"
402 << std::hex << reinterpret_cast<uintptr_t>(l) << " [" << std::dec << ToPageMapIndex(l) << "] -0x"
403 << std::hex << reinterpret_cast<uintptr_t>(l->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800404 << (l->End(this) == End() ? page_map_size_ : ToPageMapIndex(l->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700405 }
406 if (l->End(this) == fpr->Begin()) {
407 if (kTraceRosAlloc) {
408 LOG(INFO) << "Success";
409 }
410 free_page_runs_.erase(it--);
411 if (kTraceRosAlloc) {
412 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
413 << reinterpret_cast<intptr_t>(l)
414 << " from free_page_runs_";
415 }
416 l->SetByteSize(this, l->ByteSize(this) + fpr->ByteSize(this));
417 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700418 // Clear magic num since this is no longer the start of a free page run.
419 if (kIsDebugBuild) {
420 fpr->magic_num_ = 0;
421 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700422 fpr = l;
423 } else {
424 // Not adjacent. Stop.
425 if (kTraceRosAlloc) {
426 LOG(INFO) << "Fail";
427 }
428 break;
429 }
430 if (to_exit_loop) {
431 break;
432 }
433 }
434 }
435 }
436
437 // Insert it.
438 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
439 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800440 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700441 fpr->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800442 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700443 free_page_runs_.insert(fpr);
444 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
445 if (kTraceRosAlloc) {
446 LOG(INFO) << "RosAlloc::FreePages() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
447 << " into free_page_runs_";
448 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700449 return byte_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700450}
451
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800452void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) {
Ian Rogers5d057052014-03-12 14:32:27 -0700453 DCHECK_GT(size, kLargeSizeThreshold);
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800454 size_t num_pages = RoundUp(size, kPageSize) / kPageSize;
455 void* r;
456 {
457 MutexLock mu(self, lock_);
458 r = AllocPages(self, num_pages, kPageMapLargeObject);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700459 }
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800460 if (UNLIKELY(r == nullptr)) {
461 if (kTraceRosAlloc) {
462 LOG(INFO) << "RosAlloc::AllocLargeObject() : NULL";
463 }
464 return nullptr;
465 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700466 const size_t total_bytes = num_pages * kPageSize;
467 *bytes_allocated = total_bytes;
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800468 if (kTraceRosAlloc) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800469 LOG(INFO) << "RosAlloc::AllocLargeObject() : 0x" << std::hex << reinterpret_cast<intptr_t>(r)
470 << "-0x" << (reinterpret_cast<intptr_t>(r) + num_pages * kPageSize)
471 << "(" << std::dec << (num_pages * kPageSize) << ")";
472 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700473 // Check if the returned memory is really all zero.
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800474 if (kCheckZeroMemory) {
Ian Rogers13735952014-10-08 12:43:28 -0700475 CHECK_EQ(total_bytes % sizeof(uintptr_t), 0U);
476 const uintptr_t* words = reinterpret_cast<uintptr_t*>(r);
477 for (size_t i = 0; i < total_bytes / sizeof(uintptr_t); ++i) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700478 CHECK_EQ(words[i], 0U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700479 }
480 }
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800481 return r;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700482}
483
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700484size_t RosAlloc::FreeInternal(Thread* self, void* ptr) {
Ian Rogers5d057052014-03-12 14:32:27 -0700485 DCHECK_LE(base_, ptr);
486 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700487 size_t pm_idx = RoundDownToPageMapIndex(ptr);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700488 Run* run = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700489 {
490 MutexLock mu(self, lock_);
Ian Rogers5d057052014-03-12 14:32:27 -0700491 DCHECK_LT(pm_idx, page_map_size_);
Ian Rogers13735952014-10-08 12:43:28 -0700492 uint8_t page_map_entry = page_map_[pm_idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700493 if (kTraceRosAlloc) {
494 LOG(INFO) << "RosAlloc::FreeInternal() : " << std::hex << ptr << ", pm_idx=" << std::dec << pm_idx
495 << ", page_map_entry=" << static_cast<int>(page_map_entry);
496 }
497 switch (page_map_[pm_idx]) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700498 case kPageMapLargeObject:
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700499 return FreePages(self, ptr, false);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700500 case kPageMapLargeObjectPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700501 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700502 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700503 case kPageMapRunPart: {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700504 // Find the beginning of the run.
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700505 do {
506 --pm_idx;
507 DCHECK_LT(pm_idx, capacity_ / kPageSize);
508 } while (page_map_[pm_idx] != kPageMapRun);
Ian Rogersfc787ec2014-10-09 21:56:44 -0700509 FALLTHROUGH_INTENDED;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700510 case kPageMapRun:
511 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -0700512 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700513 break;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700514 case kPageMapReleased:
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700515 case kPageMapEmpty:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700516 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700517 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700518 }
519 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700520 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700521 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700522 }
523 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700524 DCHECK(run != nullptr);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700525 return FreeFromRun(self, ptr, run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700526}
527
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700528size_t RosAlloc::Free(Thread* self, void* ptr) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700529 ReaderMutexLock rmu(self, bulk_free_lock_);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700530 return FreeInternal(self, ptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700531}
532
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700533RosAlloc::Run* RosAlloc::AllocRun(Thread* self, size_t idx) {
534 RosAlloc::Run* new_run = nullptr;
535 {
536 MutexLock mu(self, lock_);
537 new_run = reinterpret_cast<Run*>(AllocPages(self, numOfPages[idx], kPageMapRun));
538 }
539 if (LIKELY(new_run != nullptr)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700540 if (kIsDebugBuild) {
541 new_run->magic_num_ = kMagicNum;
542 }
543 new_run->size_bracket_idx_ = idx;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700544 new_run->SetAllocBitMapBitsForInvalidSlots();
545 DCHECK(!new_run->IsThreadLocal());
546 DCHECK_EQ(new_run->first_search_vec_idx_, 0U);
547 DCHECK(!new_run->to_be_bulk_freed_);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700548 if (kUsePrefetchDuringAllocRun && idx < kNumThreadLocalSizeBrackets) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700549 // Take ownership of the cache lines if we are likely to be thread local run.
550 if (kPrefetchNewRunDataByZeroing) {
551 // Zeroing the data is sometimes faster than prefetching but it increases memory usage
552 // since we end up dirtying zero pages which may have been madvised.
553 new_run->ZeroData();
554 } else {
555 const size_t num_of_slots = numOfSlots[idx];
556 const size_t bracket_size = bracketSizes[idx];
557 const size_t num_of_bytes = num_of_slots * bracket_size;
Ian Rogers13735952014-10-08 12:43:28 -0700558 uint8_t* begin = reinterpret_cast<uint8_t*>(new_run) + headerSizes[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700559 for (size_t i = 0; i < num_of_bytes; i += kPrefetchStride) {
560 __builtin_prefetch(begin + i);
561 }
562 }
563 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700564 }
565 return new_run;
566}
567
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700568RosAlloc::Run* RosAlloc::RefillRun(Thread* self, size_t idx) {
569 // Get the lowest address non-full run from the binary tree.
Mathieu Chartier58553c72014-09-16 16:25:55 -0700570 auto* const bt = &non_full_runs_[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700571 if (!bt->empty()) {
572 // If there's one, use it as the current run.
573 auto it = bt->begin();
574 Run* non_full_run = *it;
575 DCHECK(non_full_run != nullptr);
576 DCHECK(!non_full_run->IsThreadLocal());
577 bt->erase(it);
578 return non_full_run;
579 }
580 // If there's none, allocate a new run and use it as the current run.
581 return AllocRun(self, idx);
582}
583
Hiroshi Yamauchi52cf5c02014-05-02 12:20:36 -0700584inline void* RosAlloc::AllocFromCurrentRunUnlocked(Thread* self, size_t idx) {
Mathieu Chartier0651d412014-04-29 14:37:57 -0700585 Run* current_run = current_runs_[idx];
586 DCHECK(current_run != nullptr);
587 void* slot_addr = current_run->AllocSlot();
588 if (UNLIKELY(slot_addr == nullptr)) {
589 // The current run got full. Try to refill it.
590 DCHECK(current_run->IsFull());
591 if (kIsDebugBuild && current_run != dedicated_full_run_) {
592 full_runs_[idx].insert(current_run);
593 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700594 LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
595 << reinterpret_cast<intptr_t>(current_run)
Mathieu Chartier0651d412014-04-29 14:37:57 -0700596 << " into full_runs_[" << std::dec << idx << "]";
597 }
598 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
599 DCHECK(full_runs_[idx].find(current_run) != full_runs_[idx].end());
600 }
601 current_run = RefillRun(self, idx);
602 if (UNLIKELY(current_run == nullptr)) {
603 // Failed to allocate a new run, make sure that it is the dedicated full run.
604 current_runs_[idx] = dedicated_full_run_;
605 return nullptr;
606 }
607 DCHECK(current_run != nullptr);
608 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
609 DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
610 current_run->SetIsThreadLocal(false);
611 current_runs_[idx] = current_run;
612 DCHECK(!current_run->IsFull());
613 slot_addr = current_run->AllocSlot();
614 // Must succeed now with a new run.
615 DCHECK(slot_addr != nullptr);
616 }
617 return slot_addr;
618}
619
620void* RosAlloc::AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated) {
621 DCHECK_LE(size, kLargeSizeThreshold);
622 size_t bracket_size;
623 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
624 DCHECK_EQ(idx, SizeToIndex(size));
625 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
626 DCHECK_EQ(bracket_size, bracketSizes[idx]);
627 DCHECK_LE(size, bracket_size);
628 DCHECK(size > 512 || bracket_size - size < 16);
629 Locks::mutator_lock_->AssertExclusiveHeld(self);
630 void* slot_addr = AllocFromCurrentRunUnlocked(self, idx);
631 if (LIKELY(slot_addr != nullptr)) {
632 DCHECK(bytes_allocated != nullptr);
633 *bytes_allocated = bracket_size;
634 // Caller verifies that it is all 0.
635 }
636 return slot_addr;
637}
638
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700639void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated) {
Ian Rogers5d057052014-03-12 14:32:27 -0700640 DCHECK_LE(size, kLargeSizeThreshold);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700641 size_t bracket_size;
642 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
643 DCHECK_EQ(idx, SizeToIndex(size));
644 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
645 DCHECK_EQ(bracket_size, bracketSizes[idx]);
Ian Rogers5d057052014-03-12 14:32:27 -0700646 DCHECK_LE(size, bracket_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700647 DCHECK(size > 512 || bracket_size - size < 16);
648
649 void* slot_addr;
650
Mathieu Chartier0651d412014-04-29 14:37:57 -0700651 if (LIKELY(idx < kNumThreadLocalSizeBrackets)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700652 // Use a thread-local run.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700653 Run* thread_local_run = reinterpret_cast<Run*>(self->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700654 // Allow invalid since this will always fail the allocation.
Mathieu Chartier4fd20502014-04-28 09:35:55 -0700655 if (kIsDebugBuild) {
656 // Need the lock to prevent race conditions.
657 MutexLock mu(self, *size_bracket_locks_[idx]);
658 CHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
659 CHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
660 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700661 DCHECK(thread_local_run != nullptr);
662 DCHECK(thread_local_run->IsThreadLocal() || thread_local_run == dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700663 slot_addr = thread_local_run->AllocSlot();
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700664 // The allocation must fail if the run is invalid.
665 DCHECK(thread_local_run != dedicated_full_run_ || slot_addr == nullptr)
666 << "allocated from an invalid run";
667 if (UNLIKELY(slot_addr == nullptr)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700668 // The run got full. Try to free slots.
669 DCHECK(thread_local_run->IsFull());
670 MutexLock mu(self, *size_bracket_locks_[idx]);
671 bool is_all_free_after_merge;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700672 // This is safe to do for the dedicated_full_run_ since the bitmaps are empty.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700673 if (thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&is_all_free_after_merge)) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700674 DCHECK_NE(thread_local_run, dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700675 // Some slot got freed. Keep it.
676 DCHECK(!thread_local_run->IsFull());
677 DCHECK_EQ(is_all_free_after_merge, thread_local_run->IsAllFree());
678 if (is_all_free_after_merge) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700679 // Check that the bitmap idx is back at 0 if it's all free.
680 DCHECK_EQ(thread_local_run->first_search_vec_idx_, 0U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700681 }
682 } else {
683 // No slots got freed. Try to refill the thread-local run.
684 DCHECK(thread_local_run->IsFull());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700685 if (thread_local_run != dedicated_full_run_) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700686 thread_local_run->SetIsThreadLocal(false);
687 if (kIsDebugBuild) {
688 full_runs_[idx].insert(thread_local_run);
689 if (kTraceRosAlloc) {
690 LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex
691 << reinterpret_cast<intptr_t>(thread_local_run)
692 << " into full_runs_[" << std::dec << idx << "]";
693 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700694 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700695 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
696 DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700697 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700698
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700699 thread_local_run = RefillRun(self, idx);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700700 if (UNLIKELY(thread_local_run == nullptr)) {
701 self->SetRosAllocRun(idx, dedicated_full_run_);
702 return nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700703 }
704 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
705 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700706 thread_local_run->SetIsThreadLocal(true);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700707 self->SetRosAllocRun(idx, thread_local_run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700708 DCHECK(!thread_local_run->IsFull());
709 }
710
Mathieu Chartier0651d412014-04-29 14:37:57 -0700711 DCHECK(thread_local_run != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700712 DCHECK(!thread_local_run->IsFull());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700713 DCHECK(thread_local_run->IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700714 slot_addr = thread_local_run->AllocSlot();
715 // Must succeed now with a new run.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700716 DCHECK(slot_addr != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700717 }
718 if (kTraceRosAlloc) {
719 LOG(INFO) << "RosAlloc::AllocFromRun() thread-local : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
720 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
721 << "(" << std::dec << (bracket_size) << ")";
722 }
723 } else {
724 // Use the (shared) current run.
725 MutexLock mu(self, *size_bracket_locks_[idx]);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700726 slot_addr = AllocFromCurrentRunUnlocked(self, idx);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700727 if (kTraceRosAlloc) {
728 LOG(INFO) << "RosAlloc::AllocFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
729 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
730 << "(" << std::dec << (bracket_size) << ")";
731 }
732 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700733 DCHECK(bytes_allocated != nullptr);
734 *bytes_allocated = bracket_size;
735 // Caller verifies that it is all 0.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700736 return slot_addr;
737}
738
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700739size_t RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
Ian Rogers5d057052014-03-12 14:32:27 -0700740 DCHECK_EQ(run->magic_num_, kMagicNum);
741 DCHECK_LT(run, ptr);
742 DCHECK_LT(ptr, run->End());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700743 const size_t idx = run->size_bracket_idx_;
744 const size_t bracket_size = bracketSizes[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700745 bool run_was_full = false;
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800746 MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700747 if (kIsDebugBuild) {
748 run_was_full = run->IsFull();
749 }
750 if (kTraceRosAlloc) {
751 LOG(INFO) << "RosAlloc::FreeFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr);
752 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700753 if (LIKELY(run->IsThreadLocal())) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700754 // It's a thread-local run. Just mark the thread-local free bit map and return.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700755 DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700756 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
757 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
758 run->MarkThreadLocalFreeBitMap(ptr);
759 if (kTraceRosAlloc) {
760 LOG(INFO) << "RosAlloc::FreeFromRun() : Freed a slot in a thread local run 0x" << std::hex
761 << reinterpret_cast<intptr_t>(run);
762 }
763 // A thread local run will be kept as a thread local even if it's become all free.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700764 return bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700765 }
766 // Free the slot in the run.
767 run->FreeSlot(ptr);
Mathieu Chartier58553c72014-09-16 16:25:55 -0700768 auto* non_full_runs = &non_full_runs_[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700769 if (run->IsAllFree()) {
770 // It has just become completely free. Free the pages of this run.
771 std::set<Run*>::iterator pos = non_full_runs->find(run);
772 if (pos != non_full_runs->end()) {
773 non_full_runs->erase(pos);
774 if (kTraceRosAlloc) {
775 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
776 << reinterpret_cast<intptr_t>(run) << " from non_full_runs_";
777 }
778 }
779 if (run == current_runs_[idx]) {
Mathieu Chartier0651d412014-04-29 14:37:57 -0700780 current_runs_[idx] = dedicated_full_run_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700781 }
782 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
783 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700784 run->ZeroHeader();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700785 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800786 MutexLock lock_mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700787 FreePages(self, run, true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700788 }
789 } else {
790 // It is not completely free. If it wasn't the current run or
791 // already in the non-full run set (i.e., it was full) insert it
792 // into the non-full run set.
793 if (run != current_runs_[idx]) {
Mathieu Chartier58553c72014-09-16 16:25:55 -0700794 auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : NULL;
795 auto pos = non_full_runs->find(run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700796 if (pos == non_full_runs->end()) {
797 DCHECK(run_was_full);
798 DCHECK(full_runs->find(run) != full_runs->end());
799 if (kIsDebugBuild) {
800 full_runs->erase(run);
801 if (kTraceRosAlloc) {
802 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
803 << reinterpret_cast<intptr_t>(run) << " from full_runs_";
804 }
805 }
806 non_full_runs->insert(run);
807 DCHECK(!run->IsFull());
808 if (kTraceRosAlloc) {
809 LOG(INFO) << "RosAlloc::FreeFromRun() : Inserted run 0x" << std::hex
810 << reinterpret_cast<intptr_t>(run)
811 << " into non_full_runs_[" << std::dec << idx << "]";
812 }
813 }
814 }
815 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700816 return bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700817}
818
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800819std::string RosAlloc::Run::BitMapToStr(uint32_t* bit_map_base, size_t num_vec) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700820 std::string bit_map_str;
821 for (size_t v = 0; v < num_vec; v++) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800822 uint32_t vec = bit_map_base[v];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700823 if (v != num_vec - 1) {
824 bit_map_str.append(StringPrintf("%x-", vec));
825 } else {
826 bit_map_str.append(StringPrintf("%x", vec));
827 }
828 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800829 return bit_map_str.c_str();
830}
831
832std::string RosAlloc::Run::Dump() {
833 size_t idx = size_bracket_idx_;
834 size_t num_slots = numOfSlots[idx];
835 size_t num_vec = RoundUp(num_slots, 32) / 32;
836 std::ostringstream stream;
837 stream << "RosAlloc Run = " << reinterpret_cast<void*>(this)
838 << "{ magic_num=" << static_cast<int>(magic_num_)
839 << " size_bracket_idx=" << idx
840 << " is_thread_local=" << static_cast<int>(is_thread_local_)
841 << " to_be_bulk_freed=" << static_cast<int>(to_be_bulk_freed_)
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700842 << " first_search_vec_idx=" << first_search_vec_idx_
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800843 << " alloc_bit_map=" << BitMapToStr(alloc_bit_map_, num_vec)
844 << " bulk_free_bit_map=" << BitMapToStr(BulkFreeBitMap(), num_vec)
845 << " thread_local_bit_map=" << BitMapToStr(ThreadLocalFreeBitMap(), num_vec)
846 << " }" << std::endl;
847 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700848}
849
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700850inline void* RosAlloc::Run::AllocSlot() {
851 const size_t idx = size_bracket_idx_;
852 while (true) {
853 if (kIsDebugBuild) {
854 // Make sure that no slots leaked, the bitmap should be full for all previous vectors.
855 for (size_t i = 0; i < first_search_vec_idx_; ++i) {
856 CHECK_EQ(~alloc_bit_map_[i], 0U);
857 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700858 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700859 uint32_t* const alloc_bitmap_ptr = &alloc_bit_map_[first_search_vec_idx_];
860 uint32_t ffz1 = __builtin_ffs(~*alloc_bitmap_ptr);
861 if (LIKELY(ffz1 != 0)) {
862 const uint32_t ffz = ffz1 - 1;
863 const uint32_t slot_idx = ffz + first_search_vec_idx_ * sizeof(*alloc_bitmap_ptr) * kBitsPerByte;
864 const uint32_t mask = 1U << ffz;
865 DCHECK_LT(slot_idx, numOfSlots[idx]) << "out of range";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700866 // Found an empty slot. Set the bit.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700867 DCHECK_EQ(*alloc_bitmap_ptr & mask, 0U);
868 *alloc_bitmap_ptr |= mask;
869 DCHECK_NE(*alloc_bitmap_ptr & mask, 0U);
Ian Rogers13735952014-10-08 12:43:28 -0700870 uint8_t* slot_addr = reinterpret_cast<uint8_t*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700871 if (kTraceRosAlloc) {
872 LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
873 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
874 }
875 return slot_addr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700876 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700877 const size_t num_words = RoundUp(numOfSlots[idx], 32) / 32;
878 if (first_search_vec_idx_ + 1 >= num_words) {
879 DCHECK(IsFull());
880 // Already at the last word, return null.
881 return nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700882 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700883 // Increase the index to the next word and try again.
884 ++first_search_vec_idx_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700885 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700886}
887
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700888void RosAlloc::Run::FreeSlot(void* ptr) {
889 DCHECK(!IsThreadLocal());
Ian Rogers13735952014-10-08 12:43:28 -0700890 const uint8_t idx = size_bracket_idx_;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700891 const size_t bracket_size = bracketSizes[idx];
Ian Rogers13735952014-10-08 12:43:28 -0700892 const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
893 - (reinterpret_cast<uint8_t*>(this) + headerSizes[idx]);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700894 DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
895 size_t slot_idx = offset_from_slot_base / bracket_size;
Ian Rogers5d057052014-03-12 14:32:27 -0700896 DCHECK_LT(slot_idx, numOfSlots[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700897 size_t vec_idx = slot_idx / 32;
898 if (kIsDebugBuild) {
899 size_t num_vec = RoundUp(numOfSlots[idx], 32) / 32;
Ian Rogers5d057052014-03-12 14:32:27 -0700900 DCHECK_LT(vec_idx, num_vec);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700901 }
902 size_t vec_off = slot_idx % 32;
903 uint32_t* vec = &alloc_bit_map_[vec_idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700904 first_search_vec_idx_ = std::min(first_search_vec_idx_, static_cast<uint32_t>(vec_idx));
905 const uint32_t mask = 1U << vec_off;
906 DCHECK_NE(*vec & mask, 0U);
907 *vec &= ~mask;
908 DCHECK_EQ(*vec & mask, 0U);
909 // Zero out the memory.
910 // TODO: Investigate alternate memset since ptr is guaranteed to be aligned to 16.
911 memset(ptr, 0, bracket_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700912 if (kTraceRosAlloc) {
913 LOG(INFO) << "RosAlloc::Run::FreeSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
914 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
915 }
916}
917
918inline bool RosAlloc::Run::MergeThreadLocalFreeBitMapToAllocBitMap(bool* is_all_free_after_out) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700919 DCHECK(IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700920 // Free slots in the alloc bit map based on the thread local free bit map.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700921 const size_t idx = size_bracket_idx_;
922 const size_t num_of_slots = numOfSlots[idx];
923 const size_t num_vec = RoundUp(num_of_slots, 32) / 32;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700924 bool changed = false;
925 uint32_t* vecp = &alloc_bit_map_[0];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800926 uint32_t* tl_free_vecp = &ThreadLocalFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700927 bool is_all_free_after = true;
928 for (size_t v = 0; v < num_vec; v++, vecp++, tl_free_vecp++) {
929 uint32_t tl_free_vec = *tl_free_vecp;
930 uint32_t vec_before = *vecp;
931 uint32_t vec_after;
932 if (tl_free_vec != 0) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700933 first_search_vec_idx_ = std::min(first_search_vec_idx_, static_cast<uint32_t>(v));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700934 vec_after = vec_before & ~tl_free_vec;
935 *vecp = vec_after;
936 changed = true;
937 *tl_free_vecp = 0; // clear the thread local free bit map.
938 } else {
939 vec_after = vec_before;
940 }
941 if (vec_after != 0) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700942 if (v == num_vec - 1) {
943 // Only not all free if a bit other than the mask bits are set.
944 is_all_free_after =
945 is_all_free_after && GetBitmapLastVectorMask(num_of_slots, num_vec) == vec_after;
946 } else {
947 is_all_free_after = false;
948 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700949 }
950 DCHECK_EQ(*tl_free_vecp, static_cast<uint32_t>(0));
951 }
952 *is_all_free_after_out = is_all_free_after;
953 // Return true if there was at least a bit set in the thread-local
954 // free bit map and at least a bit in the alloc bit map changed.
955 return changed;
956}
957
958inline void RosAlloc::Run::MergeBulkFreeBitMapIntoAllocBitMap() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700959 DCHECK(!IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700960 // Free slots in the alloc bit map based on the bulk free bit map.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700961 const size_t num_vec = NumberOfBitmapVectors();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700962 uint32_t* vecp = &alloc_bit_map_[0];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800963 uint32_t* free_vecp = &BulkFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700964 for (size_t v = 0; v < num_vec; v++, vecp++, free_vecp++) {
965 uint32_t free_vec = *free_vecp;
966 if (free_vec != 0) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700967 first_search_vec_idx_ = std::min(first_search_vec_idx_, static_cast<uint32_t>(v));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700968 *vecp &= ~free_vec;
969 *free_vecp = 0; // clear the bulk free bit map.
970 }
971 DCHECK_EQ(*free_vecp, static_cast<uint32_t>(0));
972 }
973}
974
975inline void RosAlloc::Run::UnionBulkFreeBitMapToThreadLocalFreeBitMap() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700976 DCHECK(IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700977 // Union the thread local bit map with the bulk free bit map.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700978 size_t num_vec = NumberOfBitmapVectors();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800979 uint32_t* to_vecp = &ThreadLocalFreeBitMap()[0];
980 uint32_t* from_vecp = &BulkFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700981 for (size_t v = 0; v < num_vec; v++, to_vecp++, from_vecp++) {
982 uint32_t from_vec = *from_vecp;
983 if (from_vec != 0) {
984 *to_vecp |= from_vec;
Hiroshi Yamauchi70f60042014-02-03 12:31:29 -0800985 *from_vecp = 0; // clear the bulk free bit map.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700986 }
987 DCHECK_EQ(*from_vecp, static_cast<uint32_t>(0));
988 }
989}
990
991inline void RosAlloc::Run::MarkThreadLocalFreeBitMap(void* ptr) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700992 DCHECK(IsThreadLocal());
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800993 MarkFreeBitMapShared(ptr, ThreadLocalFreeBitMap(), "MarkThreadLocalFreeBitMap");
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700994}
995
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700996inline size_t RosAlloc::Run::MarkBulkFreeBitMap(void* ptr) {
997 return MarkFreeBitMapShared(ptr, BulkFreeBitMap(), "MarkFreeBitMap");
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700998}
999
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001000inline size_t RosAlloc::Run::MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base,
1001 const char* caller_name) {
Ian Rogers13735952014-10-08 12:43:28 -07001002 const uint8_t idx = size_bracket_idx_;
1003 const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
1004 - (reinterpret_cast<uint8_t*>(this) + headerSizes[idx]);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001005 const size_t bracket_size = bracketSizes[idx];
1006 memset(ptr, 0, bracket_size);
1007 DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
1008 size_t slot_idx = offset_from_slot_base / bracket_size;
Ian Rogers5d057052014-03-12 14:32:27 -07001009 DCHECK_LT(slot_idx, numOfSlots[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001010 size_t vec_idx = slot_idx / 32;
1011 if (kIsDebugBuild) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001012 size_t num_vec = NumberOfBitmapVectors();
Ian Rogers5d057052014-03-12 14:32:27 -07001013 DCHECK_LT(vec_idx, num_vec);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001014 }
1015 size_t vec_off = slot_idx % 32;
1016 uint32_t* vec = &free_bit_map_base[vec_idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001017 const uint32_t mask = 1U << vec_off;
1018 DCHECK_EQ(*vec & mask, 0U);
1019 *vec |= mask;
1020 DCHECK_NE(*vec & mask, 0U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001021 if (kTraceRosAlloc) {
1022 LOG(INFO) << "RosAlloc::Run::" << caller_name << "() : 0x" << std::hex
1023 << reinterpret_cast<intptr_t>(ptr)
1024 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
1025 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001026 return bracket_size;
1027}
1028
1029inline uint32_t RosAlloc::Run::GetBitmapLastVectorMask(size_t num_slots, size_t num_vec) {
1030 const size_t kBitsPerVec = 32;
1031 DCHECK_GE(num_slots * kBitsPerVec, num_vec);
1032 size_t remain = num_vec * kBitsPerVec - num_slots;
1033 DCHECK_NE(remain, kBitsPerVec);
1034 return ((1U << remain) - 1) << (kBitsPerVec - remain);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001035}
1036
1037inline bool RosAlloc::Run::IsAllFree() {
Ian Rogers13735952014-10-08 12:43:28 -07001038 const uint8_t idx = size_bracket_idx_;
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001039 const size_t num_slots = numOfSlots[idx];
1040 const size_t num_vec = NumberOfBitmapVectors();
1041 DCHECK_NE(num_vec, 0U);
1042 // Check the last vector after the loop since it uses a special case for the masked bits.
1043 for (size_t v = 0; v < num_vec - 1; v++) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001044 uint32_t vec = alloc_bit_map_[v];
1045 if (vec != 0) {
1046 return false;
1047 }
1048 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001049 // Make sure the last word is equal to the mask, all other bits must be 0.
1050 return alloc_bit_map_[num_vec - 1] == GetBitmapLastVectorMask(num_slots, num_vec);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001051}
1052
1053inline bool RosAlloc::Run::IsFull() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001054 const size_t num_vec = NumberOfBitmapVectors();
1055 for (size_t v = 0; v < num_vec; ++v) {
1056 if (~alloc_bit_map_[v] != 0) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001057 return false;
1058 }
1059 }
1060 return true;
1061}
1062
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001063inline bool RosAlloc::Run::IsBulkFreeBitmapClean() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001064 const size_t num_vec = NumberOfBitmapVectors();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001065 for (size_t v = 0; v < num_vec; v++) {
1066 uint32_t vec = BulkFreeBitMap()[v];
1067 if (vec != 0) {
1068 return false;
1069 }
1070 }
1071 return true;
1072}
1073
1074inline bool RosAlloc::Run::IsThreadLocalFreeBitmapClean() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001075 const size_t num_vec = NumberOfBitmapVectors();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001076 for (size_t v = 0; v < num_vec; v++) {
1077 uint32_t vec = ThreadLocalFreeBitMap()[v];
1078 if (vec != 0) {
1079 return false;
1080 }
1081 }
1082 return true;
1083}
1084
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001085inline void RosAlloc::Run::SetAllocBitMapBitsForInvalidSlots() {
1086 const size_t idx = size_bracket_idx_;
1087 const size_t num_slots = numOfSlots[idx];
1088 const size_t num_vec = RoundUp(num_slots, 32) / 32;
1089 DCHECK_NE(num_vec, 0U);
1090 // Make sure to set the bits at the end of the bitmap so that we don't allocate there since they
1091 // don't represent valid slots.
1092 alloc_bit_map_[num_vec - 1] |= GetBitmapLastVectorMask(num_slots, num_vec);
1093}
1094
1095inline void RosAlloc::Run::ZeroHeader() {
Ian Rogers13735952014-10-08 12:43:28 -07001096 const uint8_t idx = size_bracket_idx_;
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001097 memset(this, 0, headerSizes[idx]);
1098}
1099
1100inline void RosAlloc::Run::ZeroData() {
Ian Rogers13735952014-10-08 12:43:28 -07001101 const uint8_t idx = size_bracket_idx_;
1102 uint8_t* slot_begin = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001103 memset(slot_begin, 0, numOfSlots[idx] * bracketSizes[idx]);
1104}
1105
1106inline void RosAlloc::Run::FillAllocBitMap() {
1107 size_t num_vec = NumberOfBitmapVectors();
1108 memset(alloc_bit_map_, 0xFF, sizeof(uint32_t) * num_vec);
1109 first_search_vec_idx_ = num_vec - 1; // No free bits in any of the bitmap words.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001110}
1111
1112void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1113 void* arg) {
1114 size_t idx = size_bracket_idx_;
Ian Rogers13735952014-10-08 12:43:28 -07001115 uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001116 size_t num_slots = numOfSlots[idx];
1117 size_t bracket_size = IndexToBracketSize(idx);
Ian Rogers13735952014-10-08 12:43:28 -07001118 DCHECK_EQ(slot_base + num_slots * bracket_size, reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001119 size_t num_vec = RoundUp(num_slots, 32) / 32;
1120 size_t slots = 0;
1121 for (size_t v = 0; v < num_vec; v++, slots += 32) {
Ian Rogers5d057052014-03-12 14:32:27 -07001122 DCHECK_GE(num_slots, slots);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001123 uint32_t vec = alloc_bit_map_[v];
1124 size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
1125 for (size_t i = 0; i < end; ++i) {
1126 bool is_allocated = ((vec >> i) & 0x1) != 0;
Ian Rogers13735952014-10-08 12:43:28 -07001127 uint8_t* slot_addr = slot_base + (slots + i) * bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001128 if (is_allocated) {
1129 handler(slot_addr, slot_addr + bracket_size, bracket_size, arg);
1130 } else {
1131 handler(slot_addr, slot_addr + bracket_size, 0, arg);
1132 }
1133 }
1134 }
1135}
1136
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001137// If true, read the page map entries in BulkFree() without using the
1138// lock for better performance, assuming that the existence of an
1139// allocated chunk/pointer being freed in BulkFree() guarantees that
1140// the page map entry won't change. Disabled for now.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001141static constexpr bool kReadPageMapEntryWithoutLockInBulkFree = true;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001142
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001143size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
1144 size_t freed_bytes = 0;
Ian Rogerscf7f1912014-10-22 22:06:39 -07001145 if ((false)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001146 // Used only to test Free() as GC uses only BulkFree().
1147 for (size_t i = 0; i < num_ptrs; ++i) {
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001148 freed_bytes += FreeInternal(self, ptrs[i]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001149 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001150 return freed_bytes;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001151 }
1152
1153 WriterMutexLock wmu(self, bulk_free_lock_);
1154
1155 // First mark slots to free in the bulk free bit map without locking the
Ian Rogers5fcfa7d2014-05-15 11:43:06 -07001156 // size bracket locks. On host, unordered_set is faster than vector + flag.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001157#ifdef HAVE_ANDROID_OS
1158 std::vector<Run*> runs;
1159#else
Ian Rogers700a4022014-05-19 16:49:03 -07001160 std::unordered_set<Run*, hash_run, eq_run> runs;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001161#endif
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001162 for (size_t i = 0; i < num_ptrs; i++) {
1163 void* ptr = ptrs[i];
Ian Rogers5d057052014-03-12 14:32:27 -07001164 DCHECK_LE(base_, ptr);
1165 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001166 size_t pm_idx = RoundDownToPageMapIndex(ptr);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001167 Run* run = nullptr;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001168 if (kReadPageMapEntryWithoutLockInBulkFree) {
1169 // Read the page map entries without locking the lock.
Ian Rogers13735952014-10-08 12:43:28 -07001170 uint8_t page_map_entry = page_map_[pm_idx];
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001171 if (kTraceRosAlloc) {
1172 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1173 << std::dec << pm_idx
1174 << ", page_map_entry=" << static_cast<int>(page_map_entry);
1175 }
1176 if (LIKELY(page_map_entry == kPageMapRun)) {
1177 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001178 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1179 size_t pi = pm_idx;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001180 // Find the beginning of the run.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001181 do {
1182 --pi;
Ian Rogers5d057052014-03-12 14:32:27 -07001183 DCHECK_LT(pi, capacity_ / kPageSize);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001184 } while (page_map_[pi] != kPageMapRun);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001185 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001186 } else if (page_map_entry == kPageMapLargeObject) {
1187 MutexLock mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001188 freed_bytes += FreePages(self, ptr, false);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001189 continue;
1190 } else {
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001191 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_entry);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001192 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001193 } else {
1194 // Read the page map entries with a lock.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001195 MutexLock mu(self, lock_);
1196 DCHECK_LT(pm_idx, page_map_size_);
Ian Rogers13735952014-10-08 12:43:28 -07001197 uint8_t page_map_entry = page_map_[pm_idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001198 if (kTraceRosAlloc) {
1199 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1200 << std::dec << pm_idx
1201 << ", page_map_entry=" << static_cast<int>(page_map_entry);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001202 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001203 if (LIKELY(page_map_entry == kPageMapRun)) {
1204 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1205 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1206 size_t pi = pm_idx;
1207 // Find the beginning of the run.
1208 do {
1209 --pi;
1210 DCHECK_LT(pi, capacity_ / kPageSize);
1211 } while (page_map_[pi] != kPageMapRun);
1212 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
1213 } else if (page_map_entry == kPageMapLargeObject) {
1214 freed_bytes += FreePages(self, ptr, false);
1215 continue;
1216 } else {
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001217 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_entry);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001218 }
1219 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001220 DCHECK(run != nullptr);
1221 DCHECK_EQ(run->magic_num_, kMagicNum);
1222 // Set the bit in the bulk free bit map.
1223 freed_bytes += run->MarkBulkFreeBitMap(ptr);
1224#ifdef HAVE_ANDROID_OS
1225 if (!run->to_be_bulk_freed_) {
1226 run->to_be_bulk_freed_ = true;
1227 runs.push_back(run);
1228 }
1229#else
1230 runs.insert(run);
1231#endif
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001232 }
1233
1234 // Now, iterate over the affected runs and update the alloc bit map
1235 // based on the bulk free bit map (for non-thread-local runs) and
1236 // union the bulk free bit map into the thread-local free bit map
1237 // (for thread-local runs.)
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001238 for (Run* run : runs) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001239#ifdef HAVE_ANDROID_OS
1240 DCHECK(run->to_be_bulk_freed_);
1241 run->to_be_bulk_freed_ = false;
1242#endif
1243 size_t idx = run->size_bracket_idx_;
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001244 MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001245 if (run->IsThreadLocal()) {
Mathieu Chartier0651d412014-04-29 14:37:57 -07001246 DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001247 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
1248 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
1249 run->UnionBulkFreeBitMapToThreadLocalFreeBitMap();
1250 if (kTraceRosAlloc) {
1251 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a thread local run 0x"
1252 << std::hex << reinterpret_cast<intptr_t>(run);
1253 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001254 DCHECK(run->IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001255 // A thread local run will be kept as a thread local even if
1256 // it's become all free.
1257 } else {
1258 bool run_was_full = run->IsFull();
1259 run->MergeBulkFreeBitMapIntoAllocBitMap();
1260 if (kTraceRosAlloc) {
1261 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a run 0x" << std::hex
1262 << reinterpret_cast<intptr_t>(run);
1263 }
1264 // Check if the run should be moved to non_full_runs_ or
1265 // free_page_runs_.
Mathieu Chartier58553c72014-09-16 16:25:55 -07001266 auto* non_full_runs = &non_full_runs_[idx];
1267 auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : NULL;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001268 if (run->IsAllFree()) {
1269 // It has just become completely free. Free the pages of the
1270 // run.
1271 bool run_was_current = run == current_runs_[idx];
1272 if (run_was_current) {
1273 DCHECK(full_runs->find(run) == full_runs->end());
1274 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1275 // If it was a current run, reuse it.
1276 } else if (run_was_full) {
1277 // If it was full, remove it from the full run set (debug
1278 // only.)
1279 if (kIsDebugBuild) {
Ian Rogers700a4022014-05-19 16:49:03 -07001280 std::unordered_set<Run*, hash_run, eq_run>::iterator pos = full_runs->find(run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001281 DCHECK(pos != full_runs->end());
1282 full_runs->erase(pos);
1283 if (kTraceRosAlloc) {
1284 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1285 << reinterpret_cast<intptr_t>(run)
1286 << " from full_runs_";
1287 }
1288 DCHECK(full_runs->find(run) == full_runs->end());
1289 }
1290 } else {
1291 // If it was in a non full run set, remove it from the set.
1292 DCHECK(full_runs->find(run) == full_runs->end());
1293 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1294 non_full_runs->erase(run);
1295 if (kTraceRosAlloc) {
1296 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1297 << reinterpret_cast<intptr_t>(run)
1298 << " from non_full_runs_";
1299 }
1300 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1301 }
1302 if (!run_was_current) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001303 run->ZeroHeader();
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001304 MutexLock lock_mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001305 FreePages(self, run, true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001306 }
1307 } else {
1308 // It is not completely free. If it wasn't the current run or
1309 // already in the non-full run set (i.e., it was full) insert
1310 // it into the non-full run set.
1311 if (run == current_runs_[idx]) {
1312 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1313 DCHECK(full_runs->find(run) == full_runs->end());
1314 // If it was a current run, keep it.
1315 } else if (run_was_full) {
1316 // If it was full, remove it from the full run set (debug
1317 // only) and insert into the non-full run set.
1318 DCHECK(full_runs->find(run) != full_runs->end());
1319 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1320 if (kIsDebugBuild) {
1321 full_runs->erase(run);
1322 if (kTraceRosAlloc) {
1323 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1324 << reinterpret_cast<intptr_t>(run)
1325 << " from full_runs_";
1326 }
1327 }
1328 non_full_runs->insert(run);
1329 if (kTraceRosAlloc) {
1330 LOG(INFO) << "RosAlloc::BulkFree() : Inserted run 0x" << std::hex
1331 << reinterpret_cast<intptr_t>(run)
1332 << " into non_full_runs_[" << std::dec << idx;
1333 }
1334 } else {
1335 // If it was not full, so leave it in the non full run set.
1336 DCHECK(full_runs->find(run) == full_runs->end());
1337 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1338 }
1339 }
1340 }
1341 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001342 return freed_bytes;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001343}
1344
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001345std::string RosAlloc::DumpPageMap() {
1346 std::ostringstream stream;
1347 stream << "RosAlloc PageMap: " << std::endl;
1348 lock_.AssertHeld(Thread::Current());
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001349 size_t end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001350 FreePageRun* curr_fpr = NULL;
1351 size_t curr_fpr_size = 0;
1352 size_t remaining_curr_fpr_size = 0;
1353 size_t num_running_empty_pages = 0;
1354 for (size_t i = 0; i < end; ++i) {
Ian Rogers13735952014-10-08 12:43:28 -07001355 uint8_t pm = page_map_[i];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001356 switch (pm) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001357 case kPageMapReleased:
1358 // Fall-through.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001359 case kPageMapEmpty: {
1360 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1361 if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
1362 // Encountered a fresh free page run.
1363 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1364 DCHECK(fpr->IsFree());
1365 DCHECK(curr_fpr == NULL);
1366 DCHECK_EQ(curr_fpr_size, static_cast<size_t>(0));
1367 curr_fpr = fpr;
1368 curr_fpr_size = fpr->ByteSize(this);
1369 DCHECK_EQ(curr_fpr_size % kPageSize, static_cast<size_t>(0));
1370 remaining_curr_fpr_size = curr_fpr_size - kPageSize;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001371 stream << "[" << i << "]=" << (pm == kPageMapReleased ? "Released" : "Empty")
1372 << " (FPR start) fpr_size=" << curr_fpr_size
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001373 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001374 if (remaining_curr_fpr_size == 0) {
1375 // Reset at the end of the current free page run.
1376 curr_fpr = NULL;
1377 curr_fpr_size = 0;
1378 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001379 stream << "curr_fpr=0x" << std::hex << reinterpret_cast<intptr_t>(curr_fpr) << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001380 DCHECK_EQ(num_running_empty_pages, static_cast<size_t>(0));
1381 } else {
1382 // Still part of the current free page run.
1383 DCHECK_NE(num_running_empty_pages, static_cast<size_t>(0));
1384 DCHECK(curr_fpr != NULL && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
1385 DCHECK_EQ(remaining_curr_fpr_size % kPageSize, static_cast<size_t>(0));
1386 DCHECK_GE(remaining_curr_fpr_size, static_cast<size_t>(kPageSize));
1387 remaining_curr_fpr_size -= kPageSize;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001388 stream << "[" << i << "]=Empty (FPR part)"
1389 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001390 if (remaining_curr_fpr_size == 0) {
1391 // Reset at the end of the current free page run.
1392 curr_fpr = NULL;
1393 curr_fpr_size = 0;
1394 }
1395 }
1396 num_running_empty_pages++;
1397 break;
1398 }
1399 case kPageMapLargeObject: {
1400 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1401 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001402 stream << "[" << i << "]=Large (start)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001403 break;
1404 }
1405 case kPageMapLargeObjectPart:
1406 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1407 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001408 stream << "[" << i << "]=Large (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001409 break;
1410 case kPageMapRun: {
1411 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1412 num_running_empty_pages = 0;
1413 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1414 size_t idx = run->size_bracket_idx_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001415 stream << "[" << i << "]=Run (start)"
1416 << " idx=" << idx
1417 << " numOfPages=" << numOfPages[idx]
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001418 << " is_thread_local=" << run->is_thread_local_
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001419 << " is_all_free=" << (run->IsAllFree() ? 1 : 0)
1420 << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001421 break;
1422 }
1423 case kPageMapRunPart:
1424 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1425 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001426 stream << "[" << i << "]=Run (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001427 break;
1428 default:
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001429 stream << "[" << i << "]=Unrecognizable page map type: " << pm;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001430 break;
1431 }
1432 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001433 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001434}
1435
1436size_t RosAlloc::UsableSize(void* ptr) {
Ian Rogers5d057052014-03-12 14:32:27 -07001437 DCHECK_LE(base_, ptr);
1438 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001439 size_t pm_idx = RoundDownToPageMapIndex(ptr);
1440 MutexLock mu(Thread::Current(), lock_);
1441 switch (page_map_[pm_idx]) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001442 case kPageMapReleased:
1443 // Fall-through.
1444 case kPageMapEmpty:
1445 LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << ": pm_idx=" << pm_idx << ", ptr="
1446 << std::hex << reinterpret_cast<intptr_t>(ptr);
1447 break;
1448 case kPageMapLargeObject: {
1449 size_t num_pages = 1;
1450 size_t idx = pm_idx + 1;
1451 size_t end = page_map_size_;
1452 while (idx < end && page_map_[idx] == kPageMapLargeObjectPart) {
1453 num_pages++;
1454 idx++;
1455 }
1456 return num_pages * kPageSize;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001457 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001458 case kPageMapLargeObjectPart:
1459 LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << ": pm_idx=" << pm_idx << ", ptr="
1460 << std::hex << reinterpret_cast<intptr_t>(ptr);
1461 break;
1462 case kPageMapRun:
1463 case kPageMapRunPart: {
1464 // Find the beginning of the run.
1465 while (page_map_[pm_idx] != kPageMapRun) {
1466 pm_idx--;
1467 DCHECK_LT(pm_idx, capacity_ / kPageSize);
1468 }
1469 DCHECK_EQ(page_map_[pm_idx], kPageMapRun);
1470 Run* run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1471 DCHECK_EQ(run->magic_num_, kMagicNum);
1472 size_t idx = run->size_bracket_idx_;
Ian Rogers13735952014-10-08 12:43:28 -07001473 size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
1474 - (reinterpret_cast<uint8_t*>(run) + headerSizes[idx]);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001475 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
1476 return IndexToBracketSize(idx);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001477 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001478 default: {
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001479 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001480 break;
1481 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001482 }
1483 return 0;
1484}
1485
1486bool RosAlloc::Trim() {
1487 MutexLock mu(Thread::Current(), lock_);
1488 FreePageRun* last_free_page_run;
1489 DCHECK_EQ(footprint_ % kPageSize, static_cast<size_t>(0));
1490 auto it = free_page_runs_.rbegin();
1491 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
1492 // Remove the last free page run, if any.
1493 DCHECK(last_free_page_run->IsFree());
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001494 DCHECK(IsFreePage(ToPageMapIndex(last_free_page_run)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001495 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
1496 DCHECK_EQ(last_free_page_run->End(this), base_ + footprint_);
1497 free_page_runs_.erase(last_free_page_run);
1498 size_t decrement = last_free_page_run->ByteSize(this);
1499 size_t new_footprint = footprint_ - decrement;
1500 DCHECK_EQ(new_footprint % kPageSize, static_cast<size_t>(0));
1501 size_t new_num_of_pages = new_footprint / kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001502 DCHECK_GE(page_map_size_, new_num_of_pages);
1503 // Zero out the tail of the page map.
Ian Rogers13735952014-10-08 12:43:28 -07001504 uint8_t* zero_begin = const_cast<uint8_t*>(page_map_) + new_num_of_pages;
1505 uint8_t* madvise_begin = AlignUp(zero_begin, kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001506 DCHECK_LE(madvise_begin, page_map_mem_map_->End());
1507 size_t madvise_size = page_map_mem_map_->End() - madvise_begin;
1508 if (madvise_size > 0) {
1509 DCHECK_ALIGNED(madvise_begin, kPageSize);
1510 DCHECK_EQ(RoundUp(madvise_size, kPageSize), madvise_size);
Ian Rogersc5f17732014-06-05 20:48:42 -07001511 if (!kMadviseZeroes) {
1512 memset(madvise_begin, 0, madvise_size);
1513 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001514 CHECK_EQ(madvise(madvise_begin, madvise_size, MADV_DONTNEED), 0);
1515 }
1516 if (madvise_begin - zero_begin) {
1517 memset(zero_begin, 0, madvise_begin - zero_begin);
1518 }
1519 page_map_size_ = new_num_of_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001520 free_page_run_size_map_.resize(new_num_of_pages);
1521 DCHECK_EQ(free_page_run_size_map_.size(), new_num_of_pages);
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001522 ArtRosAllocMoreCore(this, -(static_cast<intptr_t>(decrement)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001523 if (kTraceRosAlloc) {
1524 LOG(INFO) << "RosAlloc::Trim() : decreased the footprint from "
1525 << footprint_ << " to " << new_footprint;
1526 }
1527 DCHECK_LT(new_footprint, footprint_);
1528 DCHECK_LT(new_footprint, capacity_);
1529 footprint_ = new_footprint;
1530 return true;
1531 }
1532 return false;
1533}
1534
1535void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1536 void* arg) {
1537 // Note: no need to use this to release pages as we already do so in FreePages().
1538 if (handler == NULL) {
1539 return;
1540 }
1541 MutexLock mu(Thread::Current(), lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001542 size_t pm_end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001543 size_t i = 0;
1544 while (i < pm_end) {
Ian Rogers13735952014-10-08 12:43:28 -07001545 uint8_t pm = page_map_[i];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001546 switch (pm) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001547 case kPageMapReleased:
1548 // Fall-through.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001549 case kPageMapEmpty: {
1550 // The start of a free page run.
1551 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1552 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
1553 size_t fpr_size = fpr->ByteSize(this);
1554 DCHECK(IsAligned<kPageSize>(fpr_size));
1555 void* start = fpr;
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -08001556 if (kIsDebugBuild) {
1557 // In the debug build, the first page of a free page run
1558 // contains a magic number for debugging. Exclude it.
Ian Rogers13735952014-10-08 12:43:28 -07001559 start = reinterpret_cast<uint8_t*>(fpr) + kPageSize;
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -08001560 }
Ian Rogers13735952014-10-08 12:43:28 -07001561 void* end = reinterpret_cast<uint8_t*>(fpr) + fpr_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001562 handler(start, end, 0, arg);
1563 size_t num_pages = fpr_size / kPageSize;
1564 if (kIsDebugBuild) {
1565 for (size_t j = i + 1; j < i + num_pages; ++j) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001566 DCHECK(IsFreePage(j));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001567 }
1568 }
1569 i += fpr_size / kPageSize;
1570 DCHECK_LE(i, pm_end);
1571 break;
1572 }
1573 case kPageMapLargeObject: {
1574 // The start of a large object.
1575 size_t num_pages = 1;
1576 size_t idx = i + 1;
1577 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1578 num_pages++;
1579 idx++;
1580 }
1581 void* start = base_ + i * kPageSize;
1582 void* end = base_ + (i + num_pages) * kPageSize;
1583 size_t used_bytes = num_pages * kPageSize;
1584 handler(start, end, used_bytes, arg);
1585 if (kIsDebugBuild) {
1586 for (size_t j = i + 1; j < i + num_pages; ++j) {
1587 DCHECK_EQ(page_map_[j], kPageMapLargeObjectPart);
1588 }
1589 }
1590 i += num_pages;
1591 DCHECK_LE(i, pm_end);
1592 break;
1593 }
1594 case kPageMapLargeObjectPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001595 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001596 break;
1597 case kPageMapRun: {
1598 // The start of a run.
1599 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001600 DCHECK_EQ(run->magic_num_, kMagicNum);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001601 // The dedicated full run doesn't contain any real allocations, don't visit the slots in
1602 // there.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001603 run->InspectAllSlots(handler, arg);
1604 size_t num_pages = numOfPages[run->size_bracket_idx_];
1605 if (kIsDebugBuild) {
1606 for (size_t j = i + 1; j < i + num_pages; ++j) {
1607 DCHECK_EQ(page_map_[j], kPageMapRunPart);
1608 }
1609 }
1610 i += num_pages;
1611 DCHECK_LE(i, pm_end);
1612 break;
1613 }
1614 case kPageMapRunPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001615 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001616 break;
1617 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001618 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001619 break;
1620 }
1621 }
1622}
1623
1624size_t RosAlloc::Footprint() {
1625 MutexLock mu(Thread::Current(), lock_);
1626 return footprint_;
1627}
1628
1629size_t RosAlloc::FootprintLimit() {
1630 MutexLock mu(Thread::Current(), lock_);
1631 return capacity_;
1632}
1633
1634void RosAlloc::SetFootprintLimit(size_t new_capacity) {
1635 MutexLock mu(Thread::Current(), lock_);
1636 DCHECK_EQ(RoundUp(new_capacity, kPageSize), new_capacity);
1637 // Only growing is supported here. But Trim() is supported.
1638 if (capacity_ < new_capacity) {
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001639 CHECK_LE(new_capacity, max_capacity_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001640 capacity_ = new_capacity;
1641 VLOG(heap) << "new capacity=" << capacity_;
1642 }
1643}
1644
1645void RosAlloc::RevokeThreadLocalRuns(Thread* thread) {
1646 Thread* self = Thread::Current();
Hiroshi Yamauchi70f60042014-02-03 12:31:29 -08001647 // Avoid race conditions on the bulk free bit maps with BulkFree() (GC).
Mathieu Chartiera1c1c712014-06-23 17:53:09 -07001648 ReaderMutexLock wmu(self, bulk_free_lock_);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001649 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; idx++) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001650 MutexLock mu(self, *size_bracket_locks_[idx]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001651 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001652 CHECK(thread_local_run != nullptr);
1653 // Invalid means already revoked.
1654 DCHECK(thread_local_run->IsThreadLocal());
1655 if (thread_local_run != dedicated_full_run_) {
1656 thread->SetRosAllocRun(idx, dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001657 DCHECK_EQ(thread_local_run->magic_num_, kMagicNum);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001658 // Note the thread local run may not be full here.
1659 bool dont_care;
1660 thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&dont_care);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001661 thread_local_run->SetIsThreadLocal(false);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001662 thread_local_run->MergeBulkFreeBitMapIntoAllocBitMap();
1663 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
1664 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
Mathieu Chartier0651d412014-04-29 14:37:57 -07001665 RevokeRun(self, idx, thread_local_run);
1666 }
1667 }
1668}
1669
1670void RosAlloc::RevokeRun(Thread* self, size_t idx, Run* run) {
1671 size_bracket_locks_[idx]->AssertHeld(self);
1672 DCHECK(run != dedicated_full_run_);
1673 if (run->IsFull()) {
1674 if (kIsDebugBuild) {
1675 full_runs_[idx].insert(run);
1676 DCHECK(full_runs_[idx].find(run) != full_runs_[idx].end());
1677 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001678 LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
Mathieu Chartier0651d412014-04-29 14:37:57 -07001679 << reinterpret_cast<intptr_t>(run)
1680 << " into full_runs_[" << std::dec << idx << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001681 }
1682 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001683 } else if (run->IsAllFree()) {
1684 run->ZeroHeader();
1685 MutexLock mu(self, lock_);
1686 FreePages(self, run, true);
1687 } else {
1688 non_full_runs_[idx].insert(run);
1689 DCHECK(non_full_runs_[idx].find(run) != non_full_runs_[idx].end());
1690 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001691 LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
Mathieu Chartier0651d412014-04-29 14:37:57 -07001692 << reinterpret_cast<intptr_t>(run)
1693 << " into non_full_runs_[" << std::dec << idx << "]";
1694 }
1695 }
1696}
1697
1698void RosAlloc::RevokeThreadUnsafeCurrentRuns() {
1699 // Revoke the current runs which share the same idx as thread local runs.
1700 Thread* self = Thread::Current();
1701 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; ++idx) {
1702 MutexLock mu(self, *size_bracket_locks_[idx]);
1703 if (current_runs_[idx] != dedicated_full_run_) {
1704 RevokeRun(self, idx, current_runs_[idx]);
1705 current_runs_[idx] = dedicated_full_run_;
1706 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001707 }
1708}
1709
1710void RosAlloc::RevokeAllThreadLocalRuns() {
1711 // This is called when a mutator thread won't allocate such as at
1712 // the Zygote creation time or during the GC pause.
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001713 MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
1714 MutexLock mu2(Thread::Current(), *Locks::thread_list_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001715 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001716 for (Thread* thread : thread_list) {
1717 RevokeThreadLocalRuns(thread);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001718 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001719 RevokeThreadUnsafeCurrentRuns();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001720}
1721
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001722void RosAlloc::AssertThreadLocalRunsAreRevoked(Thread* thread) {
1723 if (kIsDebugBuild) {
1724 Thread* self = Thread::Current();
1725 // Avoid race conditions on the bulk free bit maps with BulkFree() (GC).
Mathieu Chartiera1c1c712014-06-23 17:53:09 -07001726 ReaderMutexLock wmu(self, bulk_free_lock_);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001727 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; idx++) {
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001728 MutexLock mu(self, *size_bracket_locks_[idx]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001729 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001730 DCHECK(thread_local_run == nullptr || thread_local_run == dedicated_full_run_);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001731 }
1732 }
1733}
1734
1735void RosAlloc::AssertAllThreadLocalRunsAreRevoked() {
1736 if (kIsDebugBuild) {
Mathieu Chartier0651d412014-04-29 14:37:57 -07001737 Thread* self = Thread::Current();
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001738 MutexLock shutdown_mu(self, *Locks::runtime_shutdown_lock_);
1739 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001740 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1741 for (Thread* t : thread_list) {
1742 AssertThreadLocalRunsAreRevoked(t);
1743 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001744 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; ++idx) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001745 MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001746 CHECK_EQ(current_runs_[idx], dedicated_full_run_);
1747 }
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001748 }
1749}
1750
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001751void RosAlloc::Initialize() {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001752 // bracketSizes.
1753 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1754 if (i < kNumOfSizeBrackets - 2) {
1755 bracketSizes[i] = 16 * (i + 1);
1756 } else if (i == kNumOfSizeBrackets - 2) {
1757 bracketSizes[i] = 1 * KB;
1758 } else {
Ian Rogers5d057052014-03-12 14:32:27 -07001759 DCHECK_EQ(i, kNumOfSizeBrackets - 1);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001760 bracketSizes[i] = 2 * KB;
1761 }
1762 if (kTraceRosAlloc) {
1763 LOG(INFO) << "bracketSizes[" << i << "]=" << bracketSizes[i];
1764 }
1765 }
1766 // numOfPages.
1767 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1768 if (i < 4) {
1769 numOfPages[i] = 1;
1770 } else if (i < 8) {
1771 numOfPages[i] = 2;
1772 } else if (i < 16) {
1773 numOfPages[i] = 4;
1774 } else if (i < 32) {
1775 numOfPages[i] = 8;
1776 } else if (i == 32) {
Ian Rogers5d057052014-03-12 14:32:27 -07001777 DCHECK_EQ(i, kNumOfSizeBrackets - 2);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001778 numOfPages[i] = 16;
1779 } else {
Ian Rogers5d057052014-03-12 14:32:27 -07001780 DCHECK_EQ(i, kNumOfSizeBrackets - 1);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001781 numOfPages[i] = 32;
1782 }
1783 if (kTraceRosAlloc) {
1784 LOG(INFO) << "numOfPages[" << i << "]=" << numOfPages[i];
1785 }
1786 }
1787 // Compute numOfSlots and slotOffsets.
1788 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1789 size_t bracket_size = bracketSizes[i];
1790 size_t run_size = kPageSize * numOfPages[i];
1791 size_t max_num_of_slots = run_size / bracket_size;
1792 // Compute the actual number of slots by taking the header and
1793 // alignment into account.
1794 size_t fixed_header_size = RoundUp(Run::fixed_header_size(), sizeof(uint32_t));
1795 DCHECK_EQ(fixed_header_size, static_cast<size_t>(8));
1796 size_t header_size = 0;
1797 size_t bulk_free_bit_map_offset = 0;
1798 size_t thread_local_free_bit_map_offset = 0;
1799 size_t num_of_slots = 0;
1800 // Search for the maximum number of slots that allows enough space
1801 // for the header (including the bit maps.)
1802 for (int s = max_num_of_slots; s >= 0; s--) {
1803 size_t tmp_slots_size = bracket_size * s;
1804 size_t tmp_bit_map_size = RoundUp(s, sizeof(uint32_t) * kBitsPerByte) / kBitsPerByte;
1805 size_t tmp_bulk_free_bit_map_size = tmp_bit_map_size;
1806 size_t tmp_bulk_free_bit_map_off = fixed_header_size + tmp_bit_map_size;
1807 size_t tmp_thread_local_free_bit_map_size = tmp_bit_map_size;
1808 size_t tmp_thread_local_free_bit_map_off = tmp_bulk_free_bit_map_off + tmp_bulk_free_bit_map_size;
1809 size_t tmp_unaligned_header_size = tmp_thread_local_free_bit_map_off + tmp_thread_local_free_bit_map_size;
1810 // Align up the unaligned header size. bracket_size may not be a power of two.
1811 size_t tmp_header_size = (tmp_unaligned_header_size % bracket_size == 0) ?
1812 tmp_unaligned_header_size :
1813 tmp_unaligned_header_size + (bracket_size - tmp_unaligned_header_size % bracket_size);
1814 DCHECK_EQ(tmp_header_size % bracket_size, static_cast<size_t>(0));
1815 DCHECK_EQ(tmp_header_size % 8, static_cast<size_t>(0));
1816 if (tmp_slots_size + tmp_header_size <= run_size) {
1817 // Found the right number of slots, that is, there was enough
1818 // space for the header (including the bit maps.)
1819 num_of_slots = s;
1820 header_size = tmp_header_size;
1821 bulk_free_bit_map_offset = tmp_bulk_free_bit_map_off;
1822 thread_local_free_bit_map_offset = tmp_thread_local_free_bit_map_off;
1823 break;
1824 }
1825 }
1826 DCHECK(num_of_slots > 0 && header_size > 0 && bulk_free_bit_map_offset > 0);
1827 // Add the padding for the alignment remainder.
1828 header_size += run_size % bracket_size;
Ian Rogers5d057052014-03-12 14:32:27 -07001829 DCHECK_EQ(header_size + num_of_slots * bracket_size, run_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001830 numOfSlots[i] = num_of_slots;
1831 headerSizes[i] = header_size;
1832 bulkFreeBitMapOffsets[i] = bulk_free_bit_map_offset;
1833 threadLocalFreeBitMapOffsets[i] = thread_local_free_bit_map_offset;
1834 if (kTraceRosAlloc) {
1835 LOG(INFO) << "numOfSlots[" << i << "]=" << numOfSlots[i]
1836 << ", headerSizes[" << i << "]=" << headerSizes[i]
1837 << ", bulkFreeBitMapOffsets[" << i << "]=" << bulkFreeBitMapOffsets[i]
1838 << ", threadLocalFreeBitMapOffsets[" << i << "]=" << threadLocalFreeBitMapOffsets[i];;
1839 }
1840 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001841 // Fill the alloc bitmap so nobody can successfully allocate from it.
1842 if (kIsDebugBuild) {
1843 dedicated_full_run_->magic_num_ = kMagicNum;
1844 }
1845 // It doesn't matter which size bracket we use since the main goal is to have the allocation
1846 // fail 100% of the time you attempt to allocate into the dedicated full run.
1847 dedicated_full_run_->size_bracket_idx_ = 0;
1848 dedicated_full_run_->FillAllocBitMap();
1849 dedicated_full_run_->SetIsThreadLocal(true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001850}
1851
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001852void RosAlloc::BytesAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
1853 size_t used_bytes, void* arg) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001854 if (used_bytes == 0) {
1855 return;
1856 }
1857 size_t* bytes_allocated = reinterpret_cast<size_t*>(arg);
1858 *bytes_allocated += used_bytes;
1859}
1860
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001861void RosAlloc::ObjectsAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
1862 size_t used_bytes, void* arg) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001863 if (used_bytes == 0) {
1864 return;
1865 }
1866 size_t* objects_allocated = reinterpret_cast<size_t*>(arg);
1867 ++(*objects_allocated);
1868}
1869
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001870void RosAlloc::Verify() {
1871 Thread* self = Thread::Current();
1872 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self))
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001873 << "The mutator locks isn't exclusively locked at " << __PRETTY_FUNCTION__;
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001874 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
Mathieu Chartiera1c1c712014-06-23 17:53:09 -07001875 ReaderMutexLock wmu(self, bulk_free_lock_);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001876 std::vector<Run*> runs;
1877 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001878 MutexLock lock_mu(self, lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001879 size_t pm_end = page_map_size_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001880 size_t i = 0;
1881 while (i < pm_end) {
Ian Rogers13735952014-10-08 12:43:28 -07001882 uint8_t pm = page_map_[i];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001883 switch (pm) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001884 case kPageMapReleased:
1885 // Fall-through.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001886 case kPageMapEmpty: {
1887 // The start of a free page run.
1888 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001889 DCHECK_EQ(fpr->magic_num_, kMagicNumFree);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001890 CHECK(free_page_runs_.find(fpr) != free_page_runs_.end())
1891 << "An empty page must belong to the free page run set";
1892 size_t fpr_size = fpr->ByteSize(this);
1893 CHECK(IsAligned<kPageSize>(fpr_size))
1894 << "A free page run size isn't page-aligned : " << fpr_size;
1895 size_t num_pages = fpr_size / kPageSize;
1896 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1897 << "A free page run size must be > 0 : " << fpr_size;
1898 for (size_t j = i + 1; j < i + num_pages; ++j) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001899 CHECK(IsFreePage(j))
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001900 << "A mismatch between the page map table for kPageMapEmpty "
1901 << " at page index " << j
1902 << " and the free page run size : page index range : "
1903 << i << " to " << (i + num_pages) << std::endl << DumpPageMap();
1904 }
1905 i += num_pages;
1906 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1907 << std::endl << DumpPageMap();
1908 break;
1909 }
1910 case kPageMapLargeObject: {
1911 // The start of a large object.
1912 size_t num_pages = 1;
1913 size_t idx = i + 1;
1914 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1915 num_pages++;
1916 idx++;
1917 }
1918 void* start = base_ + i * kPageSize;
1919 mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
1920 size_t obj_size = obj->SizeOf();
Ian Rogers5d057052014-03-12 14:32:27 -07001921 CHECK_GT(obj_size, kLargeSizeThreshold)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001922 << "A rosalloc large object size must be > " << kLargeSizeThreshold;
1923 CHECK_EQ(num_pages, RoundUp(obj_size, kPageSize) / kPageSize)
1924 << "A rosalloc large object size " << obj_size
1925 << " does not match the page map table " << (num_pages * kPageSize)
1926 << std::endl << DumpPageMap();
1927 i += num_pages;
1928 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1929 << std::endl << DumpPageMap();
1930 break;
1931 }
1932 case kPageMapLargeObjectPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001933 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl << DumpPageMap();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001934 break;
1935 case kPageMapRun: {
1936 // The start of a run.
1937 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001938 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001939 size_t idx = run->size_bracket_idx_;
Ian Rogers5d057052014-03-12 14:32:27 -07001940 CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << idx;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001941 size_t num_pages = numOfPages[idx];
1942 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1943 << "Run size must be > 0 : " << num_pages;
1944 for (size_t j = i + 1; j < i + num_pages; ++j) {
1945 CHECK_EQ(page_map_[j], kPageMapRunPart)
1946 << "A mismatch between the page map table for kPageMapRunPart "
1947 << " at page index " << j
1948 << " and the run size : page index range " << i << " to " << (i + num_pages)
1949 << std::endl << DumpPageMap();
1950 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001951 // Don't verify the dedicated_full_run_ since it doesn't have any real allocations.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001952 runs.push_back(run);
1953 i += num_pages;
1954 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1955 << std::endl << DumpPageMap();
1956 break;
1957 }
1958 case kPageMapRunPart:
Mathieu Chartier0651d412014-04-29 14:37:57 -07001959 // Fall-through.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001960 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001961 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl << DumpPageMap();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001962 break;
1963 }
1964 }
1965 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001966 std::list<Thread*> threads = Runtime::Current()->GetThreadList()->GetList();
1967 for (Thread* thread : threads) {
1968 for (size_t i = 0; i < kNumThreadLocalSizeBrackets; ++i) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001969 MutexLock brackets_mu(self, *size_bracket_locks_[i]);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001970 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i));
1971 CHECK(thread_local_run != nullptr);
1972 CHECK(thread_local_run->IsThreadLocal());
1973 CHECK(thread_local_run == dedicated_full_run_ ||
1974 thread_local_run->size_bracket_idx_ == i);
1975 }
1976 }
1977 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001978 MutexLock brackets_mu(self, *size_bracket_locks_[i]);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001979 Run* current_run = current_runs_[i];
1980 CHECK(current_run != nullptr);
1981 if (current_run != dedicated_full_run_) {
1982 // The dedicated full run is currently marked as thread local.
1983 CHECK(!current_run->IsThreadLocal());
1984 CHECK_EQ(current_run->size_bracket_idx_, i);
1985 }
1986 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001987 // Call Verify() here for the lock order.
1988 for (auto& run : runs) {
1989 run->Verify(self, this);
1990 }
1991}
1992
1993void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc) {
Ian Rogers5d057052014-03-12 14:32:27 -07001994 DCHECK_EQ(magic_num_, kMagicNum) << "Bad magic number : " << Dump();
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001995 const size_t idx = size_bracket_idx_;
Ian Rogers5d057052014-03-12 14:32:27 -07001996 CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
Ian Rogers13735952014-10-08 12:43:28 -07001997 uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001998 const size_t num_slots = numOfSlots[idx];
1999 const size_t num_vec = RoundUp(num_slots, 32) / 32;
2000 CHECK_GT(num_vec, 0U);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002001 size_t bracket_size = IndexToBracketSize(idx);
2002 CHECK_EQ(slot_base + num_slots * bracket_size,
Ian Rogers13735952014-10-08 12:43:28 -07002003 reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002004 << "Mismatch in the end address of the run " << Dump();
2005 // Check that the bulk free bitmap is clean. It's only used during BulkFree().
2006 CHECK(IsBulkFreeBitmapClean()) << "The bulk free bit map isn't clean " << Dump();
Mathieu Chartier73d1e172014-04-11 17:53:48 -07002007 uint32_t last_word_mask = GetBitmapLastVectorMask(num_slots, num_vec);
2008 // Make sure all the bits at the end of the run are set so that we don't allocate there.
2009 CHECK_EQ(alloc_bit_map_[num_vec - 1] & last_word_mask, last_word_mask);
2010 // Ensure that the first bitmap index is valid.
2011 CHECK_LT(first_search_vec_idx_, num_vec);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002012 // Check the thread local runs, the current runs, and the run sets.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07002013 if (IsThreadLocal()) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002014 // If it's a thread local run, then it must be pointed to by an owner thread.
2015 bool owner_found = false;
2016 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
2017 for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
2018 Thread* thread = *it;
Mathieu Chartier0651d412014-04-29 14:37:57 -07002019 for (size_t i = 0; i < kNumThreadLocalSizeBrackets; i++) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002020 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07002021 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i));
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002022 if (thread_local_run == this) {
2023 CHECK(!owner_found)
2024 << "A thread local run has more than one owner thread " << Dump();
2025 CHECK_EQ(i, idx)
2026 << "A mismatching size bracket index in a thread local run " << Dump();
2027 owner_found = true;
2028 }
2029 }
2030 }
2031 CHECK(owner_found) << "A thread local run has no owner thread " << Dump();
2032 } else {
2033 // If it's not thread local, check that the thread local free bitmap is clean.
2034 CHECK(IsThreadLocalFreeBitmapClean())
2035 << "A non-thread-local run's thread local free bitmap isn't clean "
2036 << Dump();
2037 // Check if it's a current run for the size bucket.
2038 bool is_current_run = false;
2039 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
2040 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
2041 Run* current_run = rosalloc->current_runs_[i];
2042 if (idx == i) {
2043 if (this == current_run) {
2044 is_current_run = true;
2045 }
2046 } else {
2047 // If the size bucket index does not match, then it must not
2048 // be a current run.
2049 CHECK_NE(this, current_run)
2050 << "A current run points to a run with a wrong size bracket index " << Dump();
2051 }
2052 }
2053 // If it's neither a thread local or current run, then it must be
2054 // in a run set.
2055 if (!is_current_run) {
2056 MutexLock mu(self, rosalloc->lock_);
Mathieu Chartier58553c72014-09-16 16:25:55 -07002057 auto& non_full_runs = rosalloc->non_full_runs_[idx];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002058 // If it's all free, it must be a free page run rather than a run.
2059 CHECK(!IsAllFree()) << "A free run must be in a free page run set " << Dump();
2060 if (!IsFull()) {
2061 // If it's not full, it must in the non-full run set.
2062 CHECK(non_full_runs.find(this) != non_full_runs.end())
2063 << "A non-full run isn't in the non-full run set " << Dump();
2064 } else {
2065 // If it's full, it must in the full run set (debug build only.)
2066 if (kIsDebugBuild) {
Mathieu Chartier58553c72014-09-16 16:25:55 -07002067 auto& full_runs = rosalloc->full_runs_[idx];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002068 CHECK(full_runs.find(this) != full_runs.end())
2069 << " A full run isn't in the full run set " << Dump();
2070 }
2071 }
2072 }
2073 }
2074 // Check each slot.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002075 size_t slots = 0;
2076 for (size_t v = 0; v < num_vec; v++, slots += 32) {
Ian Rogers5d057052014-03-12 14:32:27 -07002077 DCHECK_GE(num_slots, slots) << "Out of bounds";
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002078 uint32_t vec = alloc_bit_map_[v];
2079 uint32_t thread_local_free_vec = ThreadLocalFreeBitMap()[v];
2080 size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
2081 for (size_t i = 0; i < end; ++i) {
2082 bool is_allocated = ((vec >> i) & 0x1) != 0;
2083 // If a thread local run, slots may be marked freed in the
2084 // thread local free bitmap.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07002085 bool is_thread_local_freed = IsThreadLocal() && ((thread_local_free_vec >> i) & 0x1) != 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002086 if (is_allocated && !is_thread_local_freed) {
Ian Rogers13735952014-10-08 12:43:28 -07002087 uint8_t* slot_addr = slot_base + (slots + i) * bracket_size;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002088 mirror::Object* obj = reinterpret_cast<mirror::Object*>(slot_addr);
2089 size_t obj_size = obj->SizeOf();
2090 CHECK_LE(obj_size, kLargeSizeThreshold)
2091 << "A run slot contains a large object " << Dump();
2092 CHECK_EQ(SizeToIndex(obj_size), idx)
Hiroshi Yamauchi4cd662e2014-04-03 16:28:10 -07002093 << PrettyTypeOf(obj) << " "
2094 << "obj_size=" << obj_size << ", idx=" << idx << " "
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002095 << "A run slot contains an object with wrong size " << Dump();
2096 }
2097 }
2098 }
2099}
2100
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002101size_t RosAlloc::ReleasePages() {
2102 VLOG(heap) << "RosAlloc::ReleasePages()";
2103 DCHECK(!DoesReleaseAllPages());
2104 Thread* self = Thread::Current();
2105 size_t reclaimed_bytes = 0;
2106 size_t i = 0;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002107 // Check the page map size which might have changed due to grow/shrink.
2108 while (i < page_map_size_) {
2109 // Reading the page map without a lock is racy but the race is benign since it should only
2110 // result in occasionally not releasing pages which we could release.
Ian Rogers13735952014-10-08 12:43:28 -07002111 uint8_t pm = page_map_[i];
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002112 switch (pm) {
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002113 case kPageMapReleased:
2114 // Fall through.
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002115 case kPageMapEmpty: {
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002116 // This is currently the start of a free page run.
2117 // Acquire the lock to prevent other threads racing in and modifying the page map.
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002118 MutexLock mu(self, lock_);
2119 // Check that it's still empty after we acquired the lock since another thread could have
2120 // raced in and placed an allocation here.
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002121 if (IsFreePage(i)) {
2122 // Free page runs can start with a released page if we coalesced a released page free
2123 // page run with an empty page run.
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002124 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002125 // There is a race condition where FreePage can coalesce fpr with the previous
2126 // free page run before we acquire lock_. In that case free_page_runs_.find will not find
2127 // a run starting at fpr. To handle this race, we skip reclaiming the page range and go
2128 // to the next page.
2129 if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
2130 size_t fpr_size = fpr->ByteSize(this);
2131 DCHECK(IsAligned<kPageSize>(fpr_size));
Ian Rogers13735952014-10-08 12:43:28 -07002132 uint8_t* start = reinterpret_cast<uint8_t*>(fpr);
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002133 reclaimed_bytes += ReleasePageRange(start, start + fpr_size);
2134 size_t pages = fpr_size / kPageSize;
2135 CHECK_GT(pages, 0U) << "Infinite loop probable";
2136 i += pages;
2137 DCHECK_LE(i, page_map_size_);
2138 break;
2139 }
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002140 }
Ian Rogersfc787ec2014-10-09 21:56:44 -07002141 FALLTHROUGH_INTENDED;
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002142 }
2143 case kPageMapLargeObject: // Fall through.
2144 case kPageMapLargeObjectPart: // Fall through.
2145 case kPageMapRun: // Fall through.
2146 case kPageMapRunPart: // Fall through.
2147 ++i;
2148 break; // Skip.
2149 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07002150 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002151 break;
2152 }
2153 }
2154 return reclaimed_bytes;
2155}
2156
Ian Rogers13735952014-10-08 12:43:28 -07002157size_t RosAlloc::ReleasePageRange(uint8_t* start, uint8_t* end) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002158 DCHECK_ALIGNED(start, kPageSize);
2159 DCHECK_ALIGNED(end, kPageSize);
2160 DCHECK_LT(start, end);
2161 if (kIsDebugBuild) {
2162 // In the debug build, the first page of a free page run
2163 // contains a magic number for debugging. Exclude it.
2164 start += kPageSize;
2165 }
2166 if (!kMadviseZeroes) {
2167 // TODO: Do this when we resurrect the page instead.
2168 memset(start, 0, end - start);
2169 }
2170 CHECK_EQ(madvise(start, end - start, MADV_DONTNEED), 0);
2171 size_t pm_idx = ToPageMapIndex(start);
2172 size_t reclaimed_bytes = 0;
2173 // Calculate reclaimed bytes and upate page map.
2174 const size_t max_idx = pm_idx + (end - start) / kPageSize;
2175 for (; pm_idx < max_idx; ++pm_idx) {
2176 DCHECK(IsFreePage(pm_idx));
2177 if (page_map_[pm_idx] == kPageMapEmpty) {
2178 // Mark the page as released and update how many bytes we released.
2179 reclaimed_bytes += kPageSize;
2180 page_map_[pm_idx] = kPageMapReleased;
2181 }
2182 }
2183 return reclaimed_bytes;
2184}
2185
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07002186void RosAlloc::LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) {
2187 Thread* self = Thread::Current();
2188 size_t largest_continuous_free_pages = 0;
2189 WriterMutexLock wmu(self, bulk_free_lock_);
2190 MutexLock mu(self, lock_);
2191 for (FreePageRun* fpr : free_page_runs_) {
2192 largest_continuous_free_pages = std::max(largest_continuous_free_pages,
2193 fpr->ByteSize(this));
2194 }
2195 if (failed_alloc_bytes > kLargeSizeThreshold) {
2196 // Large allocation.
2197 size_t required_bytes = RoundUp(failed_alloc_bytes, kPageSize);
2198 if (required_bytes > largest_continuous_free_pages) {
2199 os << "; failed due to fragmentation (required continguous free "
2200 << required_bytes << " bytes where largest contiguous free "
2201 << largest_continuous_free_pages << " bytes)";
2202 }
2203 } else {
2204 // Non-large allocation.
2205 size_t required_bytes = numOfPages[SizeToIndex(failed_alloc_bytes)] * kPageSize;
2206 if (required_bytes > largest_continuous_free_pages) {
2207 os << "; failed due to fragmentation (required continguous free "
2208 << required_bytes << " bytes for a new buffer where largest contiguous free "
2209 << largest_continuous_free_pages << " bytes)";
2210 }
2211 }
2212}
2213
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07002214} // namespace allocator
2215} // namespace gc
2216} // namespace art