blob: 35a251fda818f71c61b88f329e0c6e2f2bdff5ec [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andreas Gamped7576322014-10-24 22:13:45 -070017#include "rosalloc.h"
18
Andreas Gampe46ee31b2016-12-14 10:11:49 -080019#include <map>
20#include <list>
21#include <sstream>
22#include <vector>
23
24#include "android-base/stringprintf.h"
25
Evgenii Stepanov1e133742015-05-20 12:30:59 -070026#include "base/memory_tool.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070027#include "base/mutex-inl.h"
Evgenii Stepanov1e133742015-05-20 12:30:59 -070028#include "gc/space/memory_tool_settings.h"
Vladimir Marko3481ba22015-04-13 12:22:36 +010029#include "mem_map.h"
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -080030#include "mirror/class-inl.h"
31#include "mirror/object.h"
32#include "mirror/object-inl.h"
Brian Carlstrom218daa22013-11-25 14:51:44 -080033#include "thread-inl.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070034#include "thread_list.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070035
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070036namespace art {
37namespace gc {
38namespace allocator {
39
Andreas Gampe46ee31b2016-12-14 10:11:49 -080040using android::base::StringPrintf;
41
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -070042static constexpr bool kUsePrefetchDuringAllocRun = false;
Mathieu Chartier73d1e172014-04-11 17:53:48 -070043static constexpr bool kPrefetchNewRunDataByZeroing = false;
44static constexpr size_t kPrefetchStride = 64;
45
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070046size_t RosAlloc::bracketSizes[kNumOfSizeBrackets];
47size_t RosAlloc::numOfPages[kNumOfSizeBrackets];
48size_t RosAlloc::numOfSlots[kNumOfSizeBrackets];
49size_t RosAlloc::headerSizes[kNumOfSizeBrackets];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070050bool RosAlloc::initialized_ = false;
Mathieu Chartier73d1e172014-04-11 17:53:48 -070051size_t RosAlloc::dedicated_full_run_storage_[kPageSize / sizeof(size_t)] = { 0 };
52RosAlloc::Run* RosAlloc::dedicated_full_run_ =
53 reinterpret_cast<RosAlloc::Run*>(dedicated_full_run_storage_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070054
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080055RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
Evgenii Stepanov1e133742015-05-20 12:30:59 -070056 PageReleaseMode page_release_mode, bool running_on_memory_tool,
Andreas Gamped7576322014-10-24 22:13:45 -070057 size_t page_release_size_threshold)
Ian Rogers13735952014-10-08 12:43:28 -070058 : base_(reinterpret_cast<uint8_t*>(base)), footprint_(capacity),
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080059 capacity_(capacity), max_capacity_(max_capacity),
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070060 lock_("rosalloc global lock", kRosAllocGlobalLock),
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080061 bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock),
62 page_release_mode_(page_release_mode),
Andreas Gamped7576322014-10-24 22:13:45 -070063 page_release_size_threshold_(page_release_size_threshold),
Evgenii Stepanov1e133742015-05-20 12:30:59 -070064 is_running_on_memory_tool_(running_on_memory_tool) {
Hiroshi Yamauchib5e31f32016-02-18 15:01:17 -080065 DCHECK_ALIGNED(base, kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -070066 DCHECK_EQ(RoundUp(capacity, kPageSize), capacity);
67 DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080068 CHECK_LE(capacity, max_capacity);
Roland Levillain14d90572015-07-16 10:52:26 +010069 CHECK_ALIGNED(page_release_size_threshold_, kPageSize);
Hiroshi Yamauchib5e31f32016-02-18 15:01:17 -080070 // Zero the memory explicitly (don't rely on that the mem map is zero-initialized).
71 if (!kMadviseZeroes) {
72 memset(base_, 0, max_capacity);
73 }
74 CHECK_EQ(madvise(base_, max_capacity, MADV_DONTNEED), 0);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070075 if (!initialized_) {
76 Initialize();
77 }
78 VLOG(heap) << "RosAlloc base="
79 << std::hex << (intptr_t)base_ << ", end="
80 << std::hex << (intptr_t)(base_ + capacity_)
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080081 << ", capacity=" << std::dec << capacity_
82 << ", max_capacity=" << std::dec << max_capacity_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070083 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
Zuo Wangf37a88b2014-07-10 04:26:41 -070084 size_bracket_lock_names_[i] =
Mathieu Chartier73d1e172014-04-11 17:53:48 -070085 StringPrintf("an rosalloc size bracket %d lock", static_cast<int>(i));
Zuo Wangf37a88b2014-07-10 04:26:41 -070086 size_bracket_locks_[i] = new Mutex(size_bracket_lock_names_[i].c_str(), kRosAllocBracketLock);
Mathieu Chartier0651d412014-04-29 14:37:57 -070087 current_runs_[i] = dedicated_full_run_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070088 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080089 DCHECK_EQ(footprint_, capacity_);
90 size_t num_of_pages = footprint_ / kPageSize;
91 size_t max_num_of_pages = max_capacity_ / kPageSize;
92 std::string error_msg;
Vladimir Marko5c42c292015-02-25 12:02:49 +000093 page_map_mem_map_.reset(MemMap::MapAnonymous("rosalloc page map", nullptr,
94 RoundUp(max_num_of_pages, kPageSize),
95 PROT_READ | PROT_WRITE, false, false, &error_msg));
Mathieu Chartier73d1e172014-04-11 17:53:48 -070096 CHECK(page_map_mem_map_.get() != nullptr) << "Couldn't allocate the page map : " << error_msg;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080097 page_map_ = page_map_mem_map_->Begin();
98 page_map_size_ = num_of_pages;
99 max_page_map_size_ = max_num_of_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700100 free_page_run_size_map_.resize(num_of_pages);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700101 FreePageRun* free_pages = reinterpret_cast<FreePageRun*>(base_);
102 if (kIsDebugBuild) {
103 free_pages->magic_num_ = kMagicNumFree;
104 }
105 free_pages->SetByteSize(this, capacity_);
106 DCHECK_EQ(capacity_ % kPageSize, static_cast<size_t>(0));
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800107 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700108 free_pages->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800109 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700110 free_page_runs_.insert(free_pages);
111 if (kTraceRosAlloc) {
112 LOG(INFO) << "RosAlloc::RosAlloc() : Inserted run 0x" << std::hex
113 << reinterpret_cast<intptr_t>(free_pages)
114 << " into free_page_runs_";
115 }
116}
117
Mathieu Chartier661974a2014-01-09 11:23:53 -0800118RosAlloc::~RosAlloc() {
119 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
120 delete size_bracket_locks_[i];
121 }
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700122 if (is_running_on_memory_tool_) {
123 MEMORY_TOOL_MAKE_DEFINED(base_, capacity_);
124 }
Mathieu Chartier661974a2014-01-09 11:23:53 -0800125}
126
Ian Rogers13735952014-10-08 12:43:28 -0700127void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700128 lock_.AssertHeld(self);
129 DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700130 FreePageRun* res = nullptr;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700131 const size_t req_byte_size = num_pages * kPageSize;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700132 // Find the lowest address free page run that's large enough.
133 for (auto it = free_page_runs_.begin(); it != free_page_runs_.end(); ) {
134 FreePageRun* fpr = *it;
135 DCHECK(fpr->IsFree());
136 size_t fpr_byte_size = fpr->ByteSize(this);
137 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
138 if (req_byte_size <= fpr_byte_size) {
139 // Found one.
Vladimir Markoe74fe1e2016-08-31 18:56:04 +0100140 it = free_page_runs_.erase(it);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700141 if (kTraceRosAlloc) {
142 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x"
143 << std::hex << reinterpret_cast<intptr_t>(fpr)
144 << " from free_page_runs_";
145 }
146 if (req_byte_size < fpr_byte_size) {
147 // Split.
Vladimir Markoe74fe1e2016-08-31 18:56:04 +0100148 FreePageRun* remainder =
149 reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700150 if (kIsDebugBuild) {
151 remainder->magic_num_ = kMagicNumFree;
152 }
153 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
154 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
155 // Don't need to call madvise on remainder here.
156 free_page_runs_.insert(remainder);
157 if (kTraceRosAlloc) {
158 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
159 << reinterpret_cast<intptr_t>(remainder)
160 << " into free_page_runs_";
161 }
162 fpr->SetByteSize(this, req_byte_size);
163 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
164 }
165 res = fpr;
166 break;
167 } else {
168 ++it;
169 }
170 }
171
172 // Failed to allocate pages. Grow the footprint, if possible.
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700173 if (UNLIKELY(res == nullptr && capacity_ > footprint_)) {
174 FreePageRun* last_free_page_run = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700175 size_t last_free_page_run_size;
176 auto it = free_page_runs_.rbegin();
177 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
178 // There is a free page run at the end.
179 DCHECK(last_free_page_run->IsFree());
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700180 DCHECK(IsFreePage(ToPageMapIndex(last_free_page_run)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700181 last_free_page_run_size = last_free_page_run->ByteSize(this);
182 } else {
183 // There is no free page run at the end.
184 last_free_page_run_size = 0;
185 }
186 DCHECK_LT(last_free_page_run_size, req_byte_size);
187 if (capacity_ - footprint_ + last_free_page_run_size >= req_byte_size) {
188 // If we grow the heap, we can allocate it.
189 size_t increment = std::min(std::max(2 * MB, req_byte_size - last_free_page_run_size),
190 capacity_ - footprint_);
191 DCHECK_EQ(increment % kPageSize, static_cast<size_t>(0));
192 size_t new_footprint = footprint_ + increment;
193 size_t new_num_of_pages = new_footprint / kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800194 DCHECK_LT(page_map_size_, new_num_of_pages);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700195 DCHECK_LT(free_page_run_size_map_.size(), new_num_of_pages);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800196 page_map_size_ = new_num_of_pages;
197 DCHECK_LE(page_map_size_, max_page_map_size_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700198 free_page_run_size_map_.resize(new_num_of_pages);
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800199 ArtRosAllocMoreCore(this, increment);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700200 if (last_free_page_run_size > 0) {
201 // There was a free page run at the end. Expand its size.
202 DCHECK_EQ(last_free_page_run_size, last_free_page_run->ByteSize(this));
203 last_free_page_run->SetByteSize(this, last_free_page_run_size + increment);
204 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
Ian Rogers5d057052014-03-12 14:32:27 -0700205 DCHECK_EQ(last_free_page_run->End(this), base_ + new_footprint);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700206 } else {
207 // Otherwise, insert a new free page run at the end.
208 FreePageRun* new_free_page_run = reinterpret_cast<FreePageRun*>(base_ + footprint_);
209 if (kIsDebugBuild) {
210 new_free_page_run->magic_num_ = kMagicNumFree;
211 }
212 new_free_page_run->SetByteSize(this, increment);
213 DCHECK_EQ(new_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
214 free_page_runs_.insert(new_free_page_run);
Ian Rogers5d057052014-03-12 14:32:27 -0700215 DCHECK_EQ(*free_page_runs_.rbegin(), new_free_page_run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700216 if (kTraceRosAlloc) {
217 LOG(INFO) << "RosAlloc::AlloPages() : Grew the heap by inserting run 0x"
218 << std::hex << reinterpret_cast<intptr_t>(new_free_page_run)
219 << " into free_page_runs_";
220 }
221 }
222 DCHECK_LE(footprint_ + increment, capacity_);
223 if (kTraceRosAlloc) {
224 LOG(INFO) << "RosAlloc::AllocPages() : increased the footprint from "
225 << footprint_ << " to " << new_footprint;
226 }
227 footprint_ = new_footprint;
228
229 // And retry the last free page run.
230 it = free_page_runs_.rbegin();
231 DCHECK(it != free_page_runs_.rend());
232 FreePageRun* fpr = *it;
233 if (kIsDebugBuild && last_free_page_run_size > 0) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700234 DCHECK(last_free_page_run != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700235 DCHECK_EQ(last_free_page_run, fpr);
236 }
237 size_t fpr_byte_size = fpr->ByteSize(this);
238 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
239 DCHECK_LE(req_byte_size, fpr_byte_size);
240 free_page_runs_.erase(fpr);
241 if (kTraceRosAlloc) {
242 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
243 << " from free_page_runs_";
244 }
245 if (req_byte_size < fpr_byte_size) {
246 // Split if there's a remainder.
Ian Rogers13735952014-10-08 12:43:28 -0700247 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700248 if (kIsDebugBuild) {
249 remainder->magic_num_ = kMagicNumFree;
250 }
251 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
252 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
253 free_page_runs_.insert(remainder);
254 if (kTraceRosAlloc) {
255 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
256 << reinterpret_cast<intptr_t>(remainder)
257 << " into free_page_runs_";
258 }
259 fpr->SetByteSize(this, req_byte_size);
260 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
261 }
262 res = fpr;
263 }
264 }
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700265 if (LIKELY(res != nullptr)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700266 // Update the page map.
267 size_t page_map_idx = ToPageMapIndex(res);
268 for (size_t i = 0; i < num_pages; i++) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700269 DCHECK(IsFreePage(page_map_idx + i));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700270 }
271 switch (page_map_type) {
272 case kPageMapRun:
273 page_map_[page_map_idx] = kPageMapRun;
274 for (size_t i = 1; i < num_pages; i++) {
275 page_map_[page_map_idx + i] = kPageMapRunPart;
276 }
277 break;
278 case kPageMapLargeObject:
279 page_map_[page_map_idx] = kPageMapLargeObject;
280 for (size_t i = 1; i < num_pages; i++) {
281 page_map_[page_map_idx + i] = kPageMapLargeObjectPart;
282 }
283 break;
284 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700285 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_type);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700286 break;
287 }
288 if (kIsDebugBuild) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700289 // Clear the first page since it is not madvised due to the magic number.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700290 memset(res, 0, kPageSize);
291 }
292 if (kTraceRosAlloc) {
293 LOG(INFO) << "RosAlloc::AllocPages() : 0x" << std::hex << reinterpret_cast<intptr_t>(res)
294 << "-0x" << (reinterpret_cast<intptr_t>(res) + num_pages * kPageSize)
295 << "(" << std::dec << (num_pages * kPageSize) << ")";
296 }
297 return res;
298 }
299
300 // Fail.
301 if (kTraceRosAlloc) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700302 LOG(INFO) << "RosAlloc::AllocPages() : nullptr";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700303 }
304 return nullptr;
305}
306
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700307size_t RosAlloc::FreePages(Thread* self, void* ptr, bool already_zero) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700308 lock_.AssertHeld(self);
309 size_t pm_idx = ToPageMapIndex(ptr);
Ian Rogers5d057052014-03-12 14:32:27 -0700310 DCHECK_LT(pm_idx, page_map_size_);
Ian Rogers13735952014-10-08 12:43:28 -0700311 uint8_t pm_type = page_map_[pm_idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700312 DCHECK(pm_type == kPageMapRun || pm_type == kPageMapLargeObject);
Ian Rogers13735952014-10-08 12:43:28 -0700313 uint8_t pm_part_type;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700314 switch (pm_type) {
315 case kPageMapRun:
316 pm_part_type = kPageMapRunPart;
317 break;
318 case kPageMapLargeObject:
319 pm_part_type = kPageMapLargeObjectPart;
320 break;
321 default:
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700322 LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << " : " << "pm_idx=" << pm_idx << ", pm_type="
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700323 << static_cast<int>(pm_type) << ", ptr=" << std::hex
324 << reinterpret_cast<intptr_t>(ptr);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700325 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700326 }
327 // Update the page map and count the number of pages.
328 size_t num_pages = 1;
329 page_map_[pm_idx] = kPageMapEmpty;
330 size_t idx = pm_idx + 1;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800331 size_t end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700332 while (idx < end && page_map_[idx] == pm_part_type) {
333 page_map_[idx] = kPageMapEmpty;
334 num_pages++;
335 idx++;
336 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700337 const size_t byte_size = num_pages * kPageSize;
338 if (already_zero) {
Andreas Gamped7576322014-10-24 22:13:45 -0700339 if (ShouldCheckZeroMemory()) {
Ian Rogers13735952014-10-08 12:43:28 -0700340 const uintptr_t* word_ptr = reinterpret_cast<uintptr_t*>(ptr);
341 for (size_t i = 0; i < byte_size / sizeof(uintptr_t); ++i) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700342 CHECK_EQ(word_ptr[i], 0U) << "words don't match at index " << i;
343 }
344 }
345 } else if (!DoesReleaseAllPages()) {
346 memset(ptr, 0, byte_size);
347 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700348
349 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700350 LOG(INFO) << __PRETTY_FUNCTION__ << " : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700351 << "-0x" << (reinterpret_cast<intptr_t>(ptr) + byte_size)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700352 << "(" << std::dec << (num_pages * kPageSize) << ")";
353 }
354
355 // Turn it into a free run.
356 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(ptr);
357 if (kIsDebugBuild) {
358 fpr->magic_num_ = kMagicNumFree;
359 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700360 fpr->SetByteSize(this, byte_size);
Roland Levillain14d90572015-07-16 10:52:26 +0100361 DCHECK_ALIGNED(fpr->ByteSize(this), kPageSize);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700362
363 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
364 if (!free_page_runs_.empty()) {
365 // Try to coalesce in the higher address direction.
366 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700367 LOG(INFO) << __PRETTY_FUNCTION__ << "RosAlloc::FreePages() : trying to coalesce a free page run 0x"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700368 << std::hex << reinterpret_cast<uintptr_t>(fpr) << " [" << std::dec << pm_idx << "] -0x"
369 << std::hex << reinterpret_cast<uintptr_t>(fpr->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800370 << (fpr->End(this) == End() ? page_map_size_ : ToPageMapIndex(fpr->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700371 }
Vladimir Markoe74fe1e2016-08-31 18:56:04 +0100372 for (auto it = free_page_runs_.upper_bound(fpr); it != free_page_runs_.end(); ) {
373 FreePageRun* h = *it;
374 DCHECK_EQ(h->ByteSize(this) % kPageSize, static_cast<size_t>(0));
375 if (kTraceRosAlloc) {
376 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a higher free page run 0x"
377 << std::hex << reinterpret_cast<uintptr_t>(h) << " [" << std::dec << ToPageMapIndex(h) << "] -0x"
378 << std::hex << reinterpret_cast<uintptr_t>(h->End(this)) << " [" << std::dec
379 << (h->End(this) == End() ? page_map_size_ : ToPageMapIndex(h->End(this))) << "]";
380 }
381 if (fpr->End(this) == h->Begin()) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700382 if (kTraceRosAlloc) {
Vladimir Markoe74fe1e2016-08-31 18:56:04 +0100383 LOG(INFO) << "Success";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700384 }
Vladimir Markoe74fe1e2016-08-31 18:56:04 +0100385 // Clear magic num since this is no longer the start of a free page run.
386 if (kIsDebugBuild) {
387 h->magic_num_ = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700388 }
Vladimir Markoe74fe1e2016-08-31 18:56:04 +0100389 it = free_page_runs_.erase(it);
390 if (kTraceRosAlloc) {
391 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
392 << reinterpret_cast<intptr_t>(h)
393 << " from free_page_runs_";
394 }
395 fpr->SetByteSize(this, fpr->ByteSize(this) + h->ByteSize(this));
396 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
397 } else {
398 // Not adjacent. Stop.
399 if (kTraceRosAlloc) {
400 LOG(INFO) << "Fail";
401 }
402 break;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700403 }
404 }
405 // Try to coalesce in the lower address direction.
Vladimir Markoe74fe1e2016-08-31 18:56:04 +0100406 for (auto it = free_page_runs_.upper_bound(fpr); it != free_page_runs_.begin(); ) {
407 --it;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700408
Vladimir Markoe74fe1e2016-08-31 18:56:04 +0100409 FreePageRun* l = *it;
410 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
411 if (kTraceRosAlloc) {
412 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a lower free page run 0x"
413 << std::hex << reinterpret_cast<uintptr_t>(l) << " [" << std::dec << ToPageMapIndex(l) << "] -0x"
414 << std::hex << reinterpret_cast<uintptr_t>(l->End(this)) << " [" << std::dec
415 << (l->End(this) == End() ? page_map_size_ : ToPageMapIndex(l->End(this))) << "]";
416 }
417 if (l->End(this) == fpr->Begin()) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700418 if (kTraceRosAlloc) {
Vladimir Markoe74fe1e2016-08-31 18:56:04 +0100419 LOG(INFO) << "Success";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700420 }
Vladimir Markoe74fe1e2016-08-31 18:56:04 +0100421 it = free_page_runs_.erase(it);
422 if (kTraceRosAlloc) {
423 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
424 << reinterpret_cast<intptr_t>(l)
425 << " from free_page_runs_";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700426 }
Vladimir Markoe74fe1e2016-08-31 18:56:04 +0100427 l->SetByteSize(this, l->ByteSize(this) + fpr->ByteSize(this));
428 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
429 // Clear magic num since this is no longer the start of a free page run.
430 if (kIsDebugBuild) {
431 fpr->magic_num_ = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700432 }
Vladimir Markoe74fe1e2016-08-31 18:56:04 +0100433 fpr = l;
434 } else {
435 // Not adjacent. Stop.
436 if (kTraceRosAlloc) {
437 LOG(INFO) << "Fail";
438 }
439 break;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700440 }
441 }
442 }
443
444 // Insert it.
445 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
446 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800447 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700448 fpr->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800449 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700450 free_page_runs_.insert(fpr);
451 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
452 if (kTraceRosAlloc) {
453 LOG(INFO) << "RosAlloc::FreePages() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
454 << " into free_page_runs_";
455 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700456 return byte_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700457}
458
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700459void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated,
460 size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
461 DCHECK(bytes_allocated != nullptr);
462 DCHECK(usable_size != nullptr);
Ian Rogers5d057052014-03-12 14:32:27 -0700463 DCHECK_GT(size, kLargeSizeThreshold);
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800464 size_t num_pages = RoundUp(size, kPageSize) / kPageSize;
465 void* r;
466 {
467 MutexLock mu(self, lock_);
468 r = AllocPages(self, num_pages, kPageMapLargeObject);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700469 }
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800470 if (UNLIKELY(r == nullptr)) {
471 if (kTraceRosAlloc) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700472 LOG(INFO) << "RosAlloc::AllocLargeObject() : nullptr";
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800473 }
474 return nullptr;
475 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700476 const size_t total_bytes = num_pages * kPageSize;
477 *bytes_allocated = total_bytes;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700478 *usable_size = total_bytes;
479 *bytes_tl_bulk_allocated = total_bytes;
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800480 if (kTraceRosAlloc) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800481 LOG(INFO) << "RosAlloc::AllocLargeObject() : 0x" << std::hex << reinterpret_cast<intptr_t>(r)
482 << "-0x" << (reinterpret_cast<intptr_t>(r) + num_pages * kPageSize)
483 << "(" << std::dec << (num_pages * kPageSize) << ")";
484 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700485 // Check if the returned memory is really all zero.
Andreas Gamped7576322014-10-24 22:13:45 -0700486 if (ShouldCheckZeroMemory()) {
Ian Rogers13735952014-10-08 12:43:28 -0700487 CHECK_EQ(total_bytes % sizeof(uintptr_t), 0U);
488 const uintptr_t* words = reinterpret_cast<uintptr_t*>(r);
489 for (size_t i = 0; i < total_bytes / sizeof(uintptr_t); ++i) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700490 CHECK_EQ(words[i], 0U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700491 }
492 }
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800493 return r;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700494}
495
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700496size_t RosAlloc::FreeInternal(Thread* self, void* ptr) {
Ian Rogers5d057052014-03-12 14:32:27 -0700497 DCHECK_LE(base_, ptr);
498 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700499 size_t pm_idx = RoundDownToPageMapIndex(ptr);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700500 Run* run = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700501 {
502 MutexLock mu(self, lock_);
Ian Rogers5d057052014-03-12 14:32:27 -0700503 DCHECK_LT(pm_idx, page_map_size_);
Ian Rogers13735952014-10-08 12:43:28 -0700504 uint8_t page_map_entry = page_map_[pm_idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700505 if (kTraceRosAlloc) {
506 LOG(INFO) << "RosAlloc::FreeInternal() : " << std::hex << ptr << ", pm_idx=" << std::dec << pm_idx
507 << ", page_map_entry=" << static_cast<int>(page_map_entry);
508 }
509 switch (page_map_[pm_idx]) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700510 case kPageMapLargeObject:
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700511 return FreePages(self, ptr, false);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700512 case kPageMapLargeObjectPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700513 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700514 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700515 case kPageMapRunPart: {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700516 // Find the beginning of the run.
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700517 do {
518 --pm_idx;
519 DCHECK_LT(pm_idx, capacity_ / kPageSize);
520 } while (page_map_[pm_idx] != kPageMapRun);
Ian Rogersfc787ec2014-10-09 21:56:44 -0700521 FALLTHROUGH_INTENDED;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700522 case kPageMapRun:
523 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -0700524 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700525 break;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700526 case kPageMapReleased:
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700527 case kPageMapEmpty:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700528 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700529 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700530 }
531 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700532 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700533 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700534 }
535 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700536 DCHECK(run != nullptr);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700537 return FreeFromRun(self, ptr, run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700538}
539
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700540size_t RosAlloc::Free(Thread* self, void* ptr) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700541 ReaderMutexLock rmu(self, bulk_free_lock_);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700542 return FreeInternal(self, ptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700543}
544
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700545RosAlloc::Run* RosAlloc::AllocRun(Thread* self, size_t idx) {
546 RosAlloc::Run* new_run = nullptr;
547 {
548 MutexLock mu(self, lock_);
549 new_run = reinterpret_cast<Run*>(AllocPages(self, numOfPages[idx], kPageMapRun));
550 }
551 if (LIKELY(new_run != nullptr)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700552 if (kIsDebugBuild) {
553 new_run->magic_num_ = kMagicNum;
554 }
555 new_run->size_bracket_idx_ = idx;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700556 DCHECK(!new_run->IsThreadLocal());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700557 DCHECK(!new_run->to_be_bulk_freed_);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700558 if (kUsePrefetchDuringAllocRun && idx < kNumThreadLocalSizeBrackets) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700559 // Take ownership of the cache lines if we are likely to be thread local run.
560 if (kPrefetchNewRunDataByZeroing) {
561 // Zeroing the data is sometimes faster than prefetching but it increases memory usage
562 // since we end up dirtying zero pages which may have been madvised.
563 new_run->ZeroData();
564 } else {
565 const size_t num_of_slots = numOfSlots[idx];
566 const size_t bracket_size = bracketSizes[idx];
567 const size_t num_of_bytes = num_of_slots * bracket_size;
Ian Rogers13735952014-10-08 12:43:28 -0700568 uint8_t* begin = reinterpret_cast<uint8_t*>(new_run) + headerSizes[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700569 for (size_t i = 0; i < num_of_bytes; i += kPrefetchStride) {
570 __builtin_prefetch(begin + i);
571 }
572 }
573 }
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700574 new_run->InitFreeList();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700575 }
576 return new_run;
577}
578
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700579RosAlloc::Run* RosAlloc::RefillRun(Thread* self, size_t idx) {
580 // Get the lowest address non-full run from the binary tree.
Mathieu Chartier58553c72014-09-16 16:25:55 -0700581 auto* const bt = &non_full_runs_[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700582 if (!bt->empty()) {
583 // If there's one, use it as the current run.
584 auto it = bt->begin();
585 Run* non_full_run = *it;
586 DCHECK(non_full_run != nullptr);
587 DCHECK(!non_full_run->IsThreadLocal());
588 bt->erase(it);
589 return non_full_run;
590 }
591 // If there's none, allocate a new run and use it as the current run.
592 return AllocRun(self, idx);
593}
594
Hiroshi Yamauchi52cf5c02014-05-02 12:20:36 -0700595inline void* RosAlloc::AllocFromCurrentRunUnlocked(Thread* self, size_t idx) {
Mathieu Chartier0651d412014-04-29 14:37:57 -0700596 Run* current_run = current_runs_[idx];
597 DCHECK(current_run != nullptr);
598 void* slot_addr = current_run->AllocSlot();
599 if (UNLIKELY(slot_addr == nullptr)) {
600 // The current run got full. Try to refill it.
601 DCHECK(current_run->IsFull());
602 if (kIsDebugBuild && current_run != dedicated_full_run_) {
603 full_runs_[idx].insert(current_run);
604 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700605 LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
606 << reinterpret_cast<intptr_t>(current_run)
Mathieu Chartier0651d412014-04-29 14:37:57 -0700607 << " into full_runs_[" << std::dec << idx << "]";
608 }
609 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
610 DCHECK(full_runs_[idx].find(current_run) != full_runs_[idx].end());
611 }
612 current_run = RefillRun(self, idx);
613 if (UNLIKELY(current_run == nullptr)) {
614 // Failed to allocate a new run, make sure that it is the dedicated full run.
615 current_runs_[idx] = dedicated_full_run_;
616 return nullptr;
617 }
618 DCHECK(current_run != nullptr);
619 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
620 DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
621 current_run->SetIsThreadLocal(false);
622 current_runs_[idx] = current_run;
623 DCHECK(!current_run->IsFull());
624 slot_addr = current_run->AllocSlot();
625 // Must succeed now with a new run.
626 DCHECK(slot_addr != nullptr);
627 }
628 return slot_addr;
629}
630
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700631void* RosAlloc::AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated,
632 size_t* usable_size,
633 size_t* bytes_tl_bulk_allocated) {
634 DCHECK(bytes_allocated != nullptr);
635 DCHECK(usable_size != nullptr);
636 DCHECK(bytes_tl_bulk_allocated != nullptr);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700637 DCHECK_LE(size, kLargeSizeThreshold);
638 size_t bracket_size;
639 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700640 Locks::mutator_lock_->AssertExclusiveHeld(self);
641 void* slot_addr = AllocFromCurrentRunUnlocked(self, idx);
642 if (LIKELY(slot_addr != nullptr)) {
Mathieu Chartier0651d412014-04-29 14:37:57 -0700643 *bytes_allocated = bracket_size;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700644 *usable_size = bracket_size;
645 *bytes_tl_bulk_allocated = bracket_size;
Mathieu Chartier0651d412014-04-29 14:37:57 -0700646 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700647 // Caller verifies that it is all 0.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700648 return slot_addr;
649}
650
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700651void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated,
652 size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
653 DCHECK(bytes_allocated != nullptr);
654 DCHECK(usable_size != nullptr);
655 DCHECK(bytes_tl_bulk_allocated != nullptr);
Ian Rogers5d057052014-03-12 14:32:27 -0700656 DCHECK_LE(size, kLargeSizeThreshold);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700657 size_t bracket_size;
658 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700659 void* slot_addr;
Mathieu Chartier0651d412014-04-29 14:37:57 -0700660 if (LIKELY(idx < kNumThreadLocalSizeBrackets)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700661 // Use a thread-local run.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700662 Run* thread_local_run = reinterpret_cast<Run*>(self->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700663 // Allow invalid since this will always fail the allocation.
Mathieu Chartier4fd20502014-04-28 09:35:55 -0700664 if (kIsDebugBuild) {
665 // Need the lock to prevent race conditions.
666 MutexLock mu(self, *size_bracket_locks_[idx]);
667 CHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
668 CHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
669 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700670 DCHECK(thread_local_run != nullptr);
671 DCHECK(thread_local_run->IsThreadLocal() || thread_local_run == dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700672 slot_addr = thread_local_run->AllocSlot();
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700673 // The allocation must fail if the run is invalid.
674 DCHECK(thread_local_run != dedicated_full_run_ || slot_addr == nullptr)
675 << "allocated from an invalid run";
676 if (UNLIKELY(slot_addr == nullptr)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700677 // The run got full. Try to free slots.
678 DCHECK(thread_local_run->IsFull());
679 MutexLock mu(self, *size_bracket_locks_[idx]);
680 bool is_all_free_after_merge;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700681 // This is safe to do for the dedicated_full_run_ since the bitmaps are empty.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700682 if (thread_local_run->MergeThreadLocalFreeListToFreeList(&is_all_free_after_merge)) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700683 DCHECK_NE(thread_local_run, dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700684 // Some slot got freed. Keep it.
685 DCHECK(!thread_local_run->IsFull());
686 DCHECK_EQ(is_all_free_after_merge, thread_local_run->IsAllFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700687 } else {
688 // No slots got freed. Try to refill the thread-local run.
689 DCHECK(thread_local_run->IsFull());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700690 if (thread_local_run != dedicated_full_run_) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700691 thread_local_run->SetIsThreadLocal(false);
692 if (kIsDebugBuild) {
693 full_runs_[idx].insert(thread_local_run);
694 if (kTraceRosAlloc) {
695 LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex
696 << reinterpret_cast<intptr_t>(thread_local_run)
697 << " into full_runs_[" << std::dec << idx << "]";
698 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700699 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700700 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
701 DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700702 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700703
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700704 thread_local_run = RefillRun(self, idx);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700705 if (UNLIKELY(thread_local_run == nullptr)) {
706 self->SetRosAllocRun(idx, dedicated_full_run_);
707 return nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700708 }
709 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
710 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700711 thread_local_run->SetIsThreadLocal(true);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700712 self->SetRosAllocRun(idx, thread_local_run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700713 DCHECK(!thread_local_run->IsFull());
714 }
Mathieu Chartier0651d412014-04-29 14:37:57 -0700715 DCHECK(thread_local_run != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700716 DCHECK(!thread_local_run->IsFull());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700717 DCHECK(thread_local_run->IsThreadLocal());
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700718 // Account for all the free slots in the new or refreshed thread local run.
719 *bytes_tl_bulk_allocated = thread_local_run->NumberOfFreeSlots() * bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700720 slot_addr = thread_local_run->AllocSlot();
721 // Must succeed now with a new run.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700722 DCHECK(slot_addr != nullptr);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700723 } else {
724 // The slot is already counted. Leave it as is.
725 *bytes_tl_bulk_allocated = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700726 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700727 DCHECK(slot_addr != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700728 if (kTraceRosAlloc) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700729 LOG(INFO) << "RosAlloc::AllocFromRun() thread-local : 0x" << std::hex
730 << reinterpret_cast<intptr_t>(slot_addr)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700731 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
732 << "(" << std::dec << (bracket_size) << ")";
733 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700734 *bytes_allocated = bracket_size;
735 *usable_size = bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700736 } else {
737 // Use the (shared) current run.
738 MutexLock mu(self, *size_bracket_locks_[idx]);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700739 slot_addr = AllocFromCurrentRunUnlocked(self, idx);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700740 if (kTraceRosAlloc) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700741 LOG(INFO) << "RosAlloc::AllocFromRun() : 0x" << std::hex
742 << reinterpret_cast<intptr_t>(slot_addr)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700743 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
744 << "(" << std::dec << (bracket_size) << ")";
745 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700746 if (LIKELY(slot_addr != nullptr)) {
747 *bytes_allocated = bracket_size;
748 *usable_size = bracket_size;
749 *bytes_tl_bulk_allocated = bracket_size;
750 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700751 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700752 // Caller verifies that it is all 0.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700753 return slot_addr;
754}
755
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700756size_t RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
Ian Rogers5d057052014-03-12 14:32:27 -0700757 DCHECK_EQ(run->magic_num_, kMagicNum);
758 DCHECK_LT(run, ptr);
759 DCHECK_LT(ptr, run->End());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700760 const size_t idx = run->size_bracket_idx_;
761 const size_t bracket_size = bracketSizes[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700762 bool run_was_full = false;
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800763 MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700764 if (kIsDebugBuild) {
765 run_was_full = run->IsFull();
766 }
767 if (kTraceRosAlloc) {
768 LOG(INFO) << "RosAlloc::FreeFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr);
769 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700770 if (LIKELY(run->IsThreadLocal())) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700771 // It's a thread-local run. Just mark the thread-local free bit map and return.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700772 DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700773 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
774 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700775 run->AddToThreadLocalFreeList(ptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700776 if (kTraceRosAlloc) {
777 LOG(INFO) << "RosAlloc::FreeFromRun() : Freed a slot in a thread local run 0x" << std::hex
778 << reinterpret_cast<intptr_t>(run);
779 }
780 // A thread local run will be kept as a thread local even if it's become all free.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700781 return bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700782 }
783 // Free the slot in the run.
784 run->FreeSlot(ptr);
Mathieu Chartier58553c72014-09-16 16:25:55 -0700785 auto* non_full_runs = &non_full_runs_[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700786 if (run->IsAllFree()) {
787 // It has just become completely free. Free the pages of this run.
788 std::set<Run*>::iterator pos = non_full_runs->find(run);
789 if (pos != non_full_runs->end()) {
790 non_full_runs->erase(pos);
791 if (kTraceRosAlloc) {
792 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
793 << reinterpret_cast<intptr_t>(run) << " from non_full_runs_";
794 }
795 }
796 if (run == current_runs_[idx]) {
Mathieu Chartier0651d412014-04-29 14:37:57 -0700797 current_runs_[idx] = dedicated_full_run_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700798 }
799 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
800 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700801 run->ZeroHeaderAndSlotHeaders();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700802 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800803 MutexLock lock_mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700804 FreePages(self, run, true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700805 }
806 } else {
807 // It is not completely free. If it wasn't the current run or
808 // already in the non-full run set (i.e., it was full) insert it
809 // into the non-full run set.
810 if (run != current_runs_[idx]) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700811 auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : nullptr;
Mathieu Chartier58553c72014-09-16 16:25:55 -0700812 auto pos = non_full_runs->find(run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700813 if (pos == non_full_runs->end()) {
814 DCHECK(run_was_full);
815 DCHECK(full_runs->find(run) != full_runs->end());
816 if (kIsDebugBuild) {
817 full_runs->erase(run);
818 if (kTraceRosAlloc) {
819 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
820 << reinterpret_cast<intptr_t>(run) << " from full_runs_";
821 }
822 }
823 non_full_runs->insert(run);
824 DCHECK(!run->IsFull());
825 if (kTraceRosAlloc) {
826 LOG(INFO) << "RosAlloc::FreeFromRun() : Inserted run 0x" << std::hex
827 << reinterpret_cast<intptr_t>(run)
828 << " into non_full_runs_[" << std::dec << idx << "]";
829 }
830 }
831 }
832 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700833 return bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700834}
835
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700836template<bool kUseTail>
837std::string RosAlloc::Run::FreeListToStr(SlotFreeList<kUseTail>* free_list) {
838 std::string free_list_str;
839 const uint8_t idx = size_bracket_idx_;
840 const size_t bracket_size = bracketSizes[idx];
841 for (Slot* slot = free_list->Head(); slot != nullptr; slot = slot->Next()) {
842 bool is_last = slot->Next() == nullptr;
843 uintptr_t slot_offset = reinterpret_cast<uintptr_t>(slot) -
844 reinterpret_cast<uintptr_t>(FirstSlot());
845 DCHECK_EQ(slot_offset % bracket_size, 0U);
846 uintptr_t slot_idx = slot_offset / bracket_size;
847 if (!is_last) {
848 free_list_str.append(StringPrintf("%u-", static_cast<uint32_t>(slot_idx)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700849 } else {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700850 free_list_str.append(StringPrintf("%u", static_cast<uint32_t>(slot_idx)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700851 }
852 }
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700853 return free_list_str;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800854}
855
856std::string RosAlloc::Run::Dump() {
857 size_t idx = size_bracket_idx_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800858 std::ostringstream stream;
859 stream << "RosAlloc Run = " << reinterpret_cast<void*>(this)
860 << "{ magic_num=" << static_cast<int>(magic_num_)
861 << " size_bracket_idx=" << idx
862 << " is_thread_local=" << static_cast<int>(is_thread_local_)
863 << " to_be_bulk_freed=" << static_cast<int>(to_be_bulk_freed_)
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700864 << " free_list=" << FreeListToStr(&free_list_)
865 << " bulk_free_list=" << FreeListToStr(&bulk_free_list_)
866 << " thread_local_list=" << FreeListToStr(&thread_local_free_list_)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800867 << " }" << std::endl;
868 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700869}
870
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700871void RosAlloc::Run::FreeSlot(void* ptr) {
872 DCHECK(!IsThreadLocal());
Ian Rogers13735952014-10-08 12:43:28 -0700873 const uint8_t idx = size_bracket_idx_;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700874 const size_t bracket_size = bracketSizes[idx];
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700875 Slot* slot = ToSlot(ptr);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700876 // Zero out the memory.
877 // TODO: Investigate alternate memset since ptr is guaranteed to be aligned to 16.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700878 memset(slot, 0, bracket_size);
879 free_list_.Add(slot);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700880 if (kTraceRosAlloc) {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700881 LOG(INFO) << "RosAlloc::Run::FreeSlot() : " << slot
882 << ", bracket_size=" << std::dec << bracket_size << ", slot_idx=" << SlotIndex(slot);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700883 }
884}
885
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700886inline bool RosAlloc::Run::MergeThreadLocalFreeListToFreeList(bool* is_all_free_after_out) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700887 DCHECK(IsThreadLocal());
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700888 // Merge the thread local free list into the free list and clear the thread local free list.
Ian Rogers13735952014-10-08 12:43:28 -0700889 const uint8_t idx = size_bracket_idx_;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700890 bool thread_local_free_list_size = thread_local_free_list_.Size();
891 const size_t size_before = free_list_.Size();
892 free_list_.Merge(&thread_local_free_list_);
893 const size_t size_after = free_list_.Size();
894 DCHECK_EQ(size_before < size_after, thread_local_free_list_size > 0);
895 DCHECK_LE(size_before, size_after);
896 *is_all_free_after_out = free_list_.Size() == numOfSlots[idx];
897 // Return true at least one slot was added to the free list.
898 return size_before < size_after;
899}
900
901inline void RosAlloc::Run::MergeBulkFreeListToFreeList() {
902 DCHECK(!IsThreadLocal());
903 // Merge the bulk free list into the free list and clear the bulk free list.
904 free_list_.Merge(&bulk_free_list_);
905}
906
907inline void RosAlloc::Run::MergeBulkFreeListToThreadLocalFreeList() {
908 DCHECK(IsThreadLocal());
909 // Merge the bulk free list into the thread local free list and clear the bulk free list.
910 thread_local_free_list_.Merge(&bulk_free_list_);
911}
912
913inline void RosAlloc::Run::AddToThreadLocalFreeList(void* ptr) {
914 DCHECK(IsThreadLocal());
915 AddToFreeListShared(ptr, &thread_local_free_list_, __FUNCTION__);
916}
917
918inline size_t RosAlloc::Run::AddToBulkFreeList(void* ptr) {
919 return AddToFreeListShared(ptr, &bulk_free_list_, __FUNCTION__);
920}
921
922inline size_t RosAlloc::Run::AddToFreeListShared(void* ptr,
923 SlotFreeList<true>* free_list,
924 const char* caller_name) {
925 const uint8_t idx = size_bracket_idx_;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700926 const size_t bracket_size = bracketSizes[idx];
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700927 Slot* slot = ToSlot(ptr);
928 memset(slot, 0, bracket_size);
929 free_list->Add(slot);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700930 if (kTraceRosAlloc) {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700931 LOG(INFO) << "RosAlloc::Run::" << caller_name << "() : " << ptr
932 << ", bracket_size=" << std::dec << bracket_size << ", slot_idx=" << SlotIndex(slot);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700933 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700934 return bracket_size;
935}
936
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700937inline void RosAlloc::Run::ZeroHeaderAndSlotHeaders() {
938 DCHECK(IsAllFree());
Ian Rogers13735952014-10-08 12:43:28 -0700939 const uint8_t idx = size_bracket_idx_;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700940 // Zero the slot header (next pointers).
941 for (Slot* slot = free_list_.Head(); slot != nullptr; ) {
942 Slot* next_slot = slot->Next();
943 slot->Clear();
944 slot = next_slot;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700945 }
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700946 // Zero the header.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700947 memset(this, 0, headerSizes[idx]);
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700948 // Check that the entire run is all zero.
949 if (kIsDebugBuild) {
950 const size_t size = numOfPages[idx] * kPageSize;
951 const uintptr_t* word_ptr = reinterpret_cast<uintptr_t*>(this);
952 for (size_t i = 0; i < size / sizeof(uintptr_t); ++i) {
953 CHECK_EQ(word_ptr[i], 0U) << "words don't match at index " << i;
954 }
955 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700956}
957
958inline void RosAlloc::Run::ZeroData() {
Ian Rogers13735952014-10-08 12:43:28 -0700959 const uint8_t idx = size_bracket_idx_;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700960 uint8_t* slot_begin = reinterpret_cast<uint8_t*>(FirstSlot());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700961 memset(slot_begin, 0, numOfSlots[idx] * bracketSizes[idx]);
962}
963
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700964void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
965 void* arg) {
966 size_t idx = size_bracket_idx_;
Ian Rogers13735952014-10-08 12:43:28 -0700967 uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700968 size_t num_slots = numOfSlots[idx];
969 size_t bracket_size = IndexToBracketSize(idx);
Mathieu Chartierc38c5ea2015-02-04 17:46:29 -0800970 DCHECK_EQ(slot_base + num_slots * bracket_size,
971 reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize);
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700972 // Free slots are on the free list and the allocated/used slots are not. We traverse the free list
973 // to find out and record which slots are free in the is_free array.
974 std::unique_ptr<bool[]> is_free(new bool[num_slots]()); // zero initialized
975 for (Slot* slot = free_list_.Head(); slot != nullptr; slot = slot->Next()) {
976 size_t slot_idx = SlotIndex(slot);
977 DCHECK_LT(slot_idx, num_slots);
978 is_free[slot_idx] = true;
979 }
980 if (IsThreadLocal()) {
981 for (Slot* slot = thread_local_free_list_.Head(); slot != nullptr; slot = slot->Next()) {
982 size_t slot_idx = SlotIndex(slot);
983 DCHECK_LT(slot_idx, num_slots);
984 is_free[slot_idx] = true;
Mathieu Chartierc38c5ea2015-02-04 17:46:29 -0800985 }
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700986 }
987 for (size_t slot_idx = 0; slot_idx < num_slots; ++slot_idx) {
988 uint8_t* slot_addr = slot_base + slot_idx * bracket_size;
989 if (!is_free[slot_idx]) {
990 handler(slot_addr, slot_addr + bracket_size, bracket_size, arg);
991 } else {
992 handler(slot_addr, slot_addr + bracket_size, 0, arg);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700993 }
994 }
995}
996
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800997// If true, read the page map entries in BulkFree() without using the
998// lock for better performance, assuming that the existence of an
999// allocated chunk/pointer being freed in BulkFree() guarantees that
1000// the page map entry won't change. Disabled for now.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001001static constexpr bool kReadPageMapEntryWithoutLockInBulkFree = true;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001002
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001003size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
1004 size_t freed_bytes = 0;
Ian Rogerscf7f1912014-10-22 22:06:39 -07001005 if ((false)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001006 // Used only to test Free() as GC uses only BulkFree().
1007 for (size_t i = 0; i < num_ptrs; ++i) {
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001008 freed_bytes += FreeInternal(self, ptrs[i]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001009 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001010 return freed_bytes;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001011 }
1012
1013 WriterMutexLock wmu(self, bulk_free_lock_);
1014
1015 // First mark slots to free in the bulk free bit map without locking the
Ian Rogers5fcfa7d2014-05-15 11:43:06 -07001016 // size bracket locks. On host, unordered_set is faster than vector + flag.
Bilyan Borisovbb661c02016-04-04 16:27:32 +01001017#ifdef ART_TARGET_ANDROID
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001018 std::vector<Run*> runs;
1019#else
Ian Rogers700a4022014-05-19 16:49:03 -07001020 std::unordered_set<Run*, hash_run, eq_run> runs;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001021#endif
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001022 for (size_t i = 0; i < num_ptrs; i++) {
1023 void* ptr = ptrs[i];
Ian Rogers5d057052014-03-12 14:32:27 -07001024 DCHECK_LE(base_, ptr);
1025 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001026 size_t pm_idx = RoundDownToPageMapIndex(ptr);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001027 Run* run = nullptr;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001028 if (kReadPageMapEntryWithoutLockInBulkFree) {
1029 // Read the page map entries without locking the lock.
Ian Rogers13735952014-10-08 12:43:28 -07001030 uint8_t page_map_entry = page_map_[pm_idx];
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001031 if (kTraceRosAlloc) {
1032 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1033 << std::dec << pm_idx
1034 << ", page_map_entry=" << static_cast<int>(page_map_entry);
1035 }
1036 if (LIKELY(page_map_entry == kPageMapRun)) {
1037 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001038 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1039 size_t pi = pm_idx;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001040 // Find the beginning of the run.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001041 do {
1042 --pi;
Ian Rogers5d057052014-03-12 14:32:27 -07001043 DCHECK_LT(pi, capacity_ / kPageSize);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001044 } while (page_map_[pi] != kPageMapRun);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001045 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001046 } else if (page_map_entry == kPageMapLargeObject) {
1047 MutexLock mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001048 freed_bytes += FreePages(self, ptr, false);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001049 continue;
1050 } else {
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001051 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_entry);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001052 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001053 } else {
1054 // Read the page map entries with a lock.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001055 MutexLock mu(self, lock_);
1056 DCHECK_LT(pm_idx, page_map_size_);
Ian Rogers13735952014-10-08 12:43:28 -07001057 uint8_t page_map_entry = page_map_[pm_idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001058 if (kTraceRosAlloc) {
1059 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1060 << std::dec << pm_idx
1061 << ", page_map_entry=" << static_cast<int>(page_map_entry);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001062 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001063 if (LIKELY(page_map_entry == kPageMapRun)) {
1064 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1065 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1066 size_t pi = pm_idx;
1067 // Find the beginning of the run.
1068 do {
1069 --pi;
1070 DCHECK_LT(pi, capacity_ / kPageSize);
1071 } while (page_map_[pi] != kPageMapRun);
1072 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
1073 } else if (page_map_entry == kPageMapLargeObject) {
1074 freed_bytes += FreePages(self, ptr, false);
1075 continue;
1076 } else {
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001077 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_entry);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001078 }
1079 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001080 DCHECK(run != nullptr);
1081 DCHECK_EQ(run->magic_num_, kMagicNum);
1082 // Set the bit in the bulk free bit map.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001083 freed_bytes += run->AddToBulkFreeList(ptr);
Bilyan Borisovbb661c02016-04-04 16:27:32 +01001084#ifdef ART_TARGET_ANDROID
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001085 if (!run->to_be_bulk_freed_) {
1086 run->to_be_bulk_freed_ = true;
1087 runs.push_back(run);
1088 }
1089#else
1090 runs.insert(run);
1091#endif
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001092 }
1093
1094 // Now, iterate over the affected runs and update the alloc bit map
1095 // based on the bulk free bit map (for non-thread-local runs) and
1096 // union the bulk free bit map into the thread-local free bit map
1097 // (for thread-local runs.)
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001098 for (Run* run : runs) {
Bilyan Borisovbb661c02016-04-04 16:27:32 +01001099#ifdef ART_TARGET_ANDROID
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001100 DCHECK(run->to_be_bulk_freed_);
1101 run->to_be_bulk_freed_ = false;
1102#endif
1103 size_t idx = run->size_bracket_idx_;
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001104 MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001105 if (run->IsThreadLocal()) {
Mathieu Chartier0651d412014-04-29 14:37:57 -07001106 DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001107 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
1108 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001109 run->MergeBulkFreeListToThreadLocalFreeList();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001110 if (kTraceRosAlloc) {
1111 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a thread local run 0x"
1112 << std::hex << reinterpret_cast<intptr_t>(run);
1113 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001114 DCHECK(run->IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001115 // A thread local run will be kept as a thread local even if
1116 // it's become all free.
1117 } else {
1118 bool run_was_full = run->IsFull();
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001119 run->MergeBulkFreeListToFreeList();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001120 if (kTraceRosAlloc) {
1121 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a run 0x" << std::hex
1122 << reinterpret_cast<intptr_t>(run);
1123 }
1124 // Check if the run should be moved to non_full_runs_ or
1125 // free_page_runs_.
Mathieu Chartier58553c72014-09-16 16:25:55 -07001126 auto* non_full_runs = &non_full_runs_[idx];
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001127 auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001128 if (run->IsAllFree()) {
1129 // It has just become completely free. Free the pages of the
1130 // run.
1131 bool run_was_current = run == current_runs_[idx];
1132 if (run_was_current) {
1133 DCHECK(full_runs->find(run) == full_runs->end());
1134 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1135 // If it was a current run, reuse it.
1136 } else if (run_was_full) {
1137 // If it was full, remove it from the full run set (debug
1138 // only.)
1139 if (kIsDebugBuild) {
Ian Rogers700a4022014-05-19 16:49:03 -07001140 std::unordered_set<Run*, hash_run, eq_run>::iterator pos = full_runs->find(run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001141 DCHECK(pos != full_runs->end());
1142 full_runs->erase(pos);
1143 if (kTraceRosAlloc) {
1144 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1145 << reinterpret_cast<intptr_t>(run)
1146 << " from full_runs_";
1147 }
1148 DCHECK(full_runs->find(run) == full_runs->end());
1149 }
1150 } else {
1151 // If it was in a non full run set, remove it from the set.
1152 DCHECK(full_runs->find(run) == full_runs->end());
1153 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1154 non_full_runs->erase(run);
1155 if (kTraceRosAlloc) {
1156 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1157 << reinterpret_cast<intptr_t>(run)
1158 << " from non_full_runs_";
1159 }
1160 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1161 }
1162 if (!run_was_current) {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001163 run->ZeroHeaderAndSlotHeaders();
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001164 MutexLock lock_mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001165 FreePages(self, run, true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001166 }
1167 } else {
1168 // It is not completely free. If it wasn't the current run or
1169 // already in the non-full run set (i.e., it was full) insert
1170 // it into the non-full run set.
1171 if (run == current_runs_[idx]) {
1172 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1173 DCHECK(full_runs->find(run) == full_runs->end());
1174 // If it was a current run, keep it.
1175 } else if (run_was_full) {
1176 // If it was full, remove it from the full run set (debug
1177 // only) and insert into the non-full run set.
1178 DCHECK(full_runs->find(run) != full_runs->end());
1179 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1180 if (kIsDebugBuild) {
1181 full_runs->erase(run);
1182 if (kTraceRosAlloc) {
1183 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1184 << reinterpret_cast<intptr_t>(run)
1185 << " from full_runs_";
1186 }
1187 }
1188 non_full_runs->insert(run);
1189 if (kTraceRosAlloc) {
1190 LOG(INFO) << "RosAlloc::BulkFree() : Inserted run 0x" << std::hex
1191 << reinterpret_cast<intptr_t>(run)
1192 << " into non_full_runs_[" << std::dec << idx;
1193 }
1194 } else {
1195 // If it was not full, so leave it in the non full run set.
1196 DCHECK(full_runs->find(run) == full_runs->end());
1197 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1198 }
1199 }
1200 }
1201 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001202 return freed_bytes;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001203}
1204
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001205std::string RosAlloc::DumpPageMap() {
1206 std::ostringstream stream;
1207 stream << "RosAlloc PageMap: " << std::endl;
1208 lock_.AssertHeld(Thread::Current());
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001209 size_t end = page_map_size_;
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001210 FreePageRun* curr_fpr = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001211 size_t curr_fpr_size = 0;
1212 size_t remaining_curr_fpr_size = 0;
1213 size_t num_running_empty_pages = 0;
1214 for (size_t i = 0; i < end; ++i) {
Ian Rogers13735952014-10-08 12:43:28 -07001215 uint8_t pm = page_map_[i];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001216 switch (pm) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001217 case kPageMapReleased:
1218 // Fall-through.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001219 case kPageMapEmpty: {
1220 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1221 if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
1222 // Encountered a fresh free page run.
1223 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1224 DCHECK(fpr->IsFree());
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001225 DCHECK(curr_fpr == nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001226 DCHECK_EQ(curr_fpr_size, static_cast<size_t>(0));
1227 curr_fpr = fpr;
1228 curr_fpr_size = fpr->ByteSize(this);
1229 DCHECK_EQ(curr_fpr_size % kPageSize, static_cast<size_t>(0));
1230 remaining_curr_fpr_size = curr_fpr_size - kPageSize;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001231 stream << "[" << i << "]=" << (pm == kPageMapReleased ? "Released" : "Empty")
1232 << " (FPR start) fpr_size=" << curr_fpr_size
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001233 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001234 if (remaining_curr_fpr_size == 0) {
1235 // Reset at the end of the current free page run.
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001236 curr_fpr = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001237 curr_fpr_size = 0;
1238 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001239 stream << "curr_fpr=0x" << std::hex << reinterpret_cast<intptr_t>(curr_fpr) << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001240 DCHECK_EQ(num_running_empty_pages, static_cast<size_t>(0));
1241 } else {
1242 // Still part of the current free page run.
1243 DCHECK_NE(num_running_empty_pages, static_cast<size_t>(0));
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001244 DCHECK(curr_fpr != nullptr && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001245 DCHECK_EQ(remaining_curr_fpr_size % kPageSize, static_cast<size_t>(0));
1246 DCHECK_GE(remaining_curr_fpr_size, static_cast<size_t>(kPageSize));
1247 remaining_curr_fpr_size -= kPageSize;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001248 stream << "[" << i << "]=Empty (FPR part)"
1249 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001250 if (remaining_curr_fpr_size == 0) {
1251 // Reset at the end of the current free page run.
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001252 curr_fpr = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001253 curr_fpr_size = 0;
1254 }
1255 }
1256 num_running_empty_pages++;
1257 break;
1258 }
1259 case kPageMapLargeObject: {
1260 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1261 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001262 stream << "[" << i << "]=Large (start)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001263 break;
1264 }
1265 case kPageMapLargeObjectPart:
1266 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1267 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001268 stream << "[" << i << "]=Large (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001269 break;
1270 case kPageMapRun: {
1271 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1272 num_running_empty_pages = 0;
1273 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1274 size_t idx = run->size_bracket_idx_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001275 stream << "[" << i << "]=Run (start)"
1276 << " idx=" << idx
1277 << " numOfPages=" << numOfPages[idx]
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001278 << " is_thread_local=" << run->is_thread_local_
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001279 << " is_all_free=" << (run->IsAllFree() ? 1 : 0)
1280 << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001281 break;
1282 }
1283 case kPageMapRunPart:
1284 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1285 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001286 stream << "[" << i << "]=Run (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001287 break;
1288 default:
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001289 stream << "[" << i << "]=Unrecognizable page map type: " << pm;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001290 break;
1291 }
1292 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001293 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001294}
1295
Andreas Gamped7576322014-10-24 22:13:45 -07001296size_t RosAlloc::UsableSize(const void* ptr) {
Ian Rogers5d057052014-03-12 14:32:27 -07001297 DCHECK_LE(base_, ptr);
1298 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001299 size_t pm_idx = RoundDownToPageMapIndex(ptr);
1300 MutexLock mu(Thread::Current(), lock_);
1301 switch (page_map_[pm_idx]) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001302 case kPageMapReleased:
1303 // Fall-through.
1304 case kPageMapEmpty:
1305 LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << ": pm_idx=" << pm_idx << ", ptr="
1306 << std::hex << reinterpret_cast<intptr_t>(ptr);
1307 break;
1308 case kPageMapLargeObject: {
1309 size_t num_pages = 1;
1310 size_t idx = pm_idx + 1;
1311 size_t end = page_map_size_;
1312 while (idx < end && page_map_[idx] == kPageMapLargeObjectPart) {
1313 num_pages++;
1314 idx++;
1315 }
1316 return num_pages * kPageSize;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001317 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001318 case kPageMapLargeObjectPart:
1319 LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << ": pm_idx=" << pm_idx << ", ptr="
1320 << std::hex << reinterpret_cast<intptr_t>(ptr);
1321 break;
1322 case kPageMapRun:
1323 case kPageMapRunPart: {
1324 // Find the beginning of the run.
1325 while (page_map_[pm_idx] != kPageMapRun) {
1326 pm_idx--;
1327 DCHECK_LT(pm_idx, capacity_ / kPageSize);
1328 }
1329 DCHECK_EQ(page_map_[pm_idx], kPageMapRun);
1330 Run* run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1331 DCHECK_EQ(run->magic_num_, kMagicNum);
1332 size_t idx = run->size_bracket_idx_;
Andreas Gamped7576322014-10-24 22:13:45 -07001333 size_t offset_from_slot_base = reinterpret_cast<const uint8_t*>(ptr)
Ian Rogers13735952014-10-08 12:43:28 -07001334 - (reinterpret_cast<uint8_t*>(run) + headerSizes[idx]);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001335 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
1336 return IndexToBracketSize(idx);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001337 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001338 default: {
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001339 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001340 break;
1341 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001342 }
1343 return 0;
1344}
1345
1346bool RosAlloc::Trim() {
1347 MutexLock mu(Thread::Current(), lock_);
1348 FreePageRun* last_free_page_run;
1349 DCHECK_EQ(footprint_ % kPageSize, static_cast<size_t>(0));
1350 auto it = free_page_runs_.rbegin();
1351 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
1352 // Remove the last free page run, if any.
1353 DCHECK(last_free_page_run->IsFree());
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001354 DCHECK(IsFreePage(ToPageMapIndex(last_free_page_run)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001355 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
1356 DCHECK_EQ(last_free_page_run->End(this), base_ + footprint_);
1357 free_page_runs_.erase(last_free_page_run);
1358 size_t decrement = last_free_page_run->ByteSize(this);
1359 size_t new_footprint = footprint_ - decrement;
1360 DCHECK_EQ(new_footprint % kPageSize, static_cast<size_t>(0));
1361 size_t new_num_of_pages = new_footprint / kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001362 DCHECK_GE(page_map_size_, new_num_of_pages);
1363 // Zero out the tail of the page map.
Ian Rogers13735952014-10-08 12:43:28 -07001364 uint8_t* zero_begin = const_cast<uint8_t*>(page_map_) + new_num_of_pages;
1365 uint8_t* madvise_begin = AlignUp(zero_begin, kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001366 DCHECK_LE(madvise_begin, page_map_mem_map_->End());
1367 size_t madvise_size = page_map_mem_map_->End() - madvise_begin;
1368 if (madvise_size > 0) {
1369 DCHECK_ALIGNED(madvise_begin, kPageSize);
1370 DCHECK_EQ(RoundUp(madvise_size, kPageSize), madvise_size);
Ian Rogersc5f17732014-06-05 20:48:42 -07001371 if (!kMadviseZeroes) {
1372 memset(madvise_begin, 0, madvise_size);
1373 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001374 CHECK_EQ(madvise(madvise_begin, madvise_size, MADV_DONTNEED), 0);
1375 }
1376 if (madvise_begin - zero_begin) {
1377 memset(zero_begin, 0, madvise_begin - zero_begin);
1378 }
1379 page_map_size_ = new_num_of_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001380 free_page_run_size_map_.resize(new_num_of_pages);
1381 DCHECK_EQ(free_page_run_size_map_.size(), new_num_of_pages);
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001382 ArtRosAllocMoreCore(this, -(static_cast<intptr_t>(decrement)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001383 if (kTraceRosAlloc) {
1384 LOG(INFO) << "RosAlloc::Trim() : decreased the footprint from "
1385 << footprint_ << " to " << new_footprint;
1386 }
1387 DCHECK_LT(new_footprint, footprint_);
1388 DCHECK_LT(new_footprint, capacity_);
1389 footprint_ = new_footprint;
1390 return true;
1391 }
1392 return false;
1393}
1394
1395void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1396 void* arg) {
1397 // Note: no need to use this to release pages as we already do so in FreePages().
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001398 if (handler == nullptr) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001399 return;
1400 }
1401 MutexLock mu(Thread::Current(), lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001402 size_t pm_end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001403 size_t i = 0;
1404 while (i < pm_end) {
Ian Rogers13735952014-10-08 12:43:28 -07001405 uint8_t pm = page_map_[i];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001406 switch (pm) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001407 case kPageMapReleased:
1408 // Fall-through.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001409 case kPageMapEmpty: {
1410 // The start of a free page run.
1411 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1412 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
1413 size_t fpr_size = fpr->ByteSize(this);
Roland Levillain14d90572015-07-16 10:52:26 +01001414 DCHECK_ALIGNED(fpr_size, kPageSize);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001415 void* start = fpr;
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -08001416 if (kIsDebugBuild) {
1417 // In the debug build, the first page of a free page run
1418 // contains a magic number for debugging. Exclude it.
Ian Rogers13735952014-10-08 12:43:28 -07001419 start = reinterpret_cast<uint8_t*>(fpr) + kPageSize;
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -08001420 }
Ian Rogers13735952014-10-08 12:43:28 -07001421 void* end = reinterpret_cast<uint8_t*>(fpr) + fpr_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001422 handler(start, end, 0, arg);
1423 size_t num_pages = fpr_size / kPageSize;
1424 if (kIsDebugBuild) {
1425 for (size_t j = i + 1; j < i + num_pages; ++j) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001426 DCHECK(IsFreePage(j));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001427 }
1428 }
1429 i += fpr_size / kPageSize;
1430 DCHECK_LE(i, pm_end);
1431 break;
1432 }
1433 case kPageMapLargeObject: {
1434 // The start of a large object.
1435 size_t num_pages = 1;
1436 size_t idx = i + 1;
1437 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1438 num_pages++;
1439 idx++;
1440 }
1441 void* start = base_ + i * kPageSize;
1442 void* end = base_ + (i + num_pages) * kPageSize;
1443 size_t used_bytes = num_pages * kPageSize;
1444 handler(start, end, used_bytes, arg);
1445 if (kIsDebugBuild) {
1446 for (size_t j = i + 1; j < i + num_pages; ++j) {
1447 DCHECK_EQ(page_map_[j], kPageMapLargeObjectPart);
1448 }
1449 }
1450 i += num_pages;
1451 DCHECK_LE(i, pm_end);
1452 break;
1453 }
1454 case kPageMapLargeObjectPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001455 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001456 break;
1457 case kPageMapRun: {
1458 // The start of a run.
1459 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001460 DCHECK_EQ(run->magic_num_, kMagicNum);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001461 // The dedicated full run doesn't contain any real allocations, don't visit the slots in
1462 // there.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001463 run->InspectAllSlots(handler, arg);
1464 size_t num_pages = numOfPages[run->size_bracket_idx_];
1465 if (kIsDebugBuild) {
1466 for (size_t j = i + 1; j < i + num_pages; ++j) {
1467 DCHECK_EQ(page_map_[j], kPageMapRunPart);
1468 }
1469 }
1470 i += num_pages;
1471 DCHECK_LE(i, pm_end);
1472 break;
1473 }
1474 case kPageMapRunPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001475 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001476 break;
1477 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001478 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001479 break;
1480 }
1481 }
1482}
1483
1484size_t RosAlloc::Footprint() {
1485 MutexLock mu(Thread::Current(), lock_);
1486 return footprint_;
1487}
1488
1489size_t RosAlloc::FootprintLimit() {
1490 MutexLock mu(Thread::Current(), lock_);
1491 return capacity_;
1492}
1493
1494void RosAlloc::SetFootprintLimit(size_t new_capacity) {
1495 MutexLock mu(Thread::Current(), lock_);
1496 DCHECK_EQ(RoundUp(new_capacity, kPageSize), new_capacity);
1497 // Only growing is supported here. But Trim() is supported.
1498 if (capacity_ < new_capacity) {
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001499 CHECK_LE(new_capacity, max_capacity_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001500 capacity_ = new_capacity;
1501 VLOG(heap) << "new capacity=" << capacity_;
1502 }
1503}
1504
Lei Li57846212015-06-11 17:50:20 +08001505// Below may be called by mutator itself just before thread termination.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001506size_t RosAlloc::RevokeThreadLocalRuns(Thread* thread) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001507 Thread* self = Thread::Current();
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001508 size_t free_bytes = 0U;
Mathieu Chartier0651d412014-04-29 14:37:57 -07001509 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; idx++) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001510 MutexLock mu(self, *size_bracket_locks_[idx]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001511 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001512 CHECK(thread_local_run != nullptr);
1513 // Invalid means already revoked.
1514 DCHECK(thread_local_run->IsThreadLocal());
1515 if (thread_local_run != dedicated_full_run_) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001516 // Note the thread local run may not be full here.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001517 thread->SetRosAllocRun(idx, dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001518 DCHECK_EQ(thread_local_run->magic_num_, kMagicNum);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001519 // Count the number of free slots left.
1520 size_t num_free_slots = thread_local_run->NumberOfFreeSlots();
1521 free_bytes += num_free_slots * bracketSizes[idx];
Lei Li57846212015-06-11 17:50:20 +08001522 // The above bracket index lock guards thread local free list to avoid race condition
1523 // with unioning bulk free list to thread local free list by GC thread in BulkFree.
1524 // If thread local run is true, GC thread will help update thread local free list
1525 // in BulkFree. And the latest thread local free list will be merged to free list
1526 // either when this thread local run is full or when revoking this run here. In this
1527 // case the free list wll be updated. If thread local run is false, GC thread will help
1528 // merge bulk free list in next BulkFree.
1529 // Thus no need to merge bulk free list to free list again here.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001530 bool dont_care;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001531 thread_local_run->MergeThreadLocalFreeListToFreeList(&dont_care);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001532 thread_local_run->SetIsThreadLocal(false);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001533 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
1534 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
Mathieu Chartier0651d412014-04-29 14:37:57 -07001535 RevokeRun(self, idx, thread_local_run);
1536 }
1537 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001538 return free_bytes;
Mathieu Chartier0651d412014-04-29 14:37:57 -07001539}
1540
1541void RosAlloc::RevokeRun(Thread* self, size_t idx, Run* run) {
1542 size_bracket_locks_[idx]->AssertHeld(self);
1543 DCHECK(run != dedicated_full_run_);
1544 if (run->IsFull()) {
1545 if (kIsDebugBuild) {
1546 full_runs_[idx].insert(run);
1547 DCHECK(full_runs_[idx].find(run) != full_runs_[idx].end());
1548 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001549 LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
Mathieu Chartier0651d412014-04-29 14:37:57 -07001550 << reinterpret_cast<intptr_t>(run)
1551 << " into full_runs_[" << std::dec << idx << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001552 }
1553 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001554 } else if (run->IsAllFree()) {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001555 run->ZeroHeaderAndSlotHeaders();
Mathieu Chartier0651d412014-04-29 14:37:57 -07001556 MutexLock mu(self, lock_);
1557 FreePages(self, run, true);
1558 } else {
1559 non_full_runs_[idx].insert(run);
1560 DCHECK(non_full_runs_[idx].find(run) != non_full_runs_[idx].end());
1561 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001562 LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
Mathieu Chartier0651d412014-04-29 14:37:57 -07001563 << reinterpret_cast<intptr_t>(run)
1564 << " into non_full_runs_[" << std::dec << idx << "]";
1565 }
1566 }
1567}
1568
1569void RosAlloc::RevokeThreadUnsafeCurrentRuns() {
1570 // Revoke the current runs which share the same idx as thread local runs.
1571 Thread* self = Thread::Current();
1572 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; ++idx) {
1573 MutexLock mu(self, *size_bracket_locks_[idx]);
1574 if (current_runs_[idx] != dedicated_full_run_) {
1575 RevokeRun(self, idx, current_runs_[idx]);
1576 current_runs_[idx] = dedicated_full_run_;
1577 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001578 }
1579}
1580
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001581size_t RosAlloc::RevokeAllThreadLocalRuns() {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001582 // This is called when a mutator thread won't allocate such as at
1583 // the Zygote creation time or during the GC pause.
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001584 MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
1585 MutexLock mu2(Thread::Current(), *Locks::thread_list_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001586 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001587 size_t free_bytes = 0U;
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001588 for (Thread* thread : thread_list) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001589 free_bytes += RevokeThreadLocalRuns(thread);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001590 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001591 RevokeThreadUnsafeCurrentRuns();
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001592 return free_bytes;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001593}
1594
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001595void RosAlloc::AssertThreadLocalRunsAreRevoked(Thread* thread) {
1596 if (kIsDebugBuild) {
1597 Thread* self = Thread::Current();
1598 // Avoid race conditions on the bulk free bit maps with BulkFree() (GC).
Mathieu Chartiera1c1c712014-06-23 17:53:09 -07001599 ReaderMutexLock wmu(self, bulk_free_lock_);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001600 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; idx++) {
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001601 MutexLock mu(self, *size_bracket_locks_[idx]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001602 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001603 DCHECK(thread_local_run == nullptr || thread_local_run == dedicated_full_run_);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001604 }
1605 }
1606}
1607
1608void RosAlloc::AssertAllThreadLocalRunsAreRevoked() {
1609 if (kIsDebugBuild) {
Mathieu Chartier0651d412014-04-29 14:37:57 -07001610 Thread* self = Thread::Current();
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001611 MutexLock shutdown_mu(self, *Locks::runtime_shutdown_lock_);
1612 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001613 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1614 for (Thread* t : thread_list) {
1615 AssertThreadLocalRunsAreRevoked(t);
1616 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001617 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; ++idx) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001618 MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001619 CHECK_EQ(current_runs_[idx], dedicated_full_run_);
1620 }
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001621 }
1622}
1623
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001624void RosAlloc::Initialize() {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001625 // bracketSizes.
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001626 static_assert(kNumRegularSizeBrackets == kNumOfSizeBrackets - 2,
1627 "There should be two non-regular brackets");
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001628 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001629 if (i < kNumThreadLocalSizeBrackets) {
1630 bracketSizes[i] = kThreadLocalBracketQuantumSize * (i + 1);
1631 } else if (i < kNumRegularSizeBrackets) {
1632 bracketSizes[i] = kBracketQuantumSize * (i - kNumThreadLocalSizeBrackets + 1) +
1633 (kThreadLocalBracketQuantumSize * kNumThreadLocalSizeBrackets);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001634 } else if (i == kNumOfSizeBrackets - 2) {
1635 bracketSizes[i] = 1 * KB;
1636 } else {
Ian Rogers5d057052014-03-12 14:32:27 -07001637 DCHECK_EQ(i, kNumOfSizeBrackets - 1);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001638 bracketSizes[i] = 2 * KB;
1639 }
1640 if (kTraceRosAlloc) {
1641 LOG(INFO) << "bracketSizes[" << i << "]=" << bracketSizes[i];
1642 }
1643 }
1644 // numOfPages.
1645 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001646 if (i < kNumThreadLocalSizeBrackets) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001647 numOfPages[i] = 1;
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001648 } else if (i < (kNumThreadLocalSizeBrackets + kNumRegularSizeBrackets) / 2) {
Hiroshi Yamauchifc067bf2016-03-23 14:22:34 -07001649 numOfPages[i] = 1;
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001650 } else if (i < kNumRegularSizeBrackets) {
Hiroshi Yamauchifc067bf2016-03-23 14:22:34 -07001651 numOfPages[i] = 1;
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001652 } else if (i == kNumOfSizeBrackets - 2) {
Hiroshi Yamauchifc067bf2016-03-23 14:22:34 -07001653 numOfPages[i] = 2;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001654 } else {
Ian Rogers5d057052014-03-12 14:32:27 -07001655 DCHECK_EQ(i, kNumOfSizeBrackets - 1);
Hiroshi Yamauchifc067bf2016-03-23 14:22:34 -07001656 numOfPages[i] = 4;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001657 }
1658 if (kTraceRosAlloc) {
1659 LOG(INFO) << "numOfPages[" << i << "]=" << numOfPages[i];
1660 }
1661 }
1662 // Compute numOfSlots and slotOffsets.
1663 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1664 size_t bracket_size = bracketSizes[i];
1665 size_t run_size = kPageSize * numOfPages[i];
1666 size_t max_num_of_slots = run_size / bracket_size;
1667 // Compute the actual number of slots by taking the header and
1668 // alignment into account.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001669 size_t fixed_header_size = RoundUp(Run::fixed_header_size(), sizeof(uint64_t));
1670 DCHECK_EQ(fixed_header_size, 80U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001671 size_t header_size = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001672 size_t num_of_slots = 0;
1673 // Search for the maximum number of slots that allows enough space
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001674 // for the header.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001675 for (int s = max_num_of_slots; s >= 0; s--) {
1676 size_t tmp_slots_size = bracket_size * s;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001677 size_t tmp_unaligned_header_size = fixed_header_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001678 // Align up the unaligned header size. bracket_size may not be a power of two.
1679 size_t tmp_header_size = (tmp_unaligned_header_size % bracket_size == 0) ?
1680 tmp_unaligned_header_size :
1681 tmp_unaligned_header_size + (bracket_size - tmp_unaligned_header_size % bracket_size);
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001682 DCHECK_EQ(tmp_header_size % bracket_size, 0U);
1683 DCHECK_EQ(tmp_header_size % sizeof(uint64_t), 0U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001684 if (tmp_slots_size + tmp_header_size <= run_size) {
1685 // Found the right number of slots, that is, there was enough
1686 // space for the header (including the bit maps.)
1687 num_of_slots = s;
1688 header_size = tmp_header_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001689 break;
1690 }
1691 }
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001692 DCHECK_GT(num_of_slots, 0U) << i;
1693 DCHECK_GT(header_size, 0U) << i;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001694 // Add the padding for the alignment remainder.
1695 header_size += run_size % bracket_size;
Ian Rogers5d057052014-03-12 14:32:27 -07001696 DCHECK_EQ(header_size + num_of_slots * bracket_size, run_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001697 numOfSlots[i] = num_of_slots;
1698 headerSizes[i] = header_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001699 if (kTraceRosAlloc) {
1700 LOG(INFO) << "numOfSlots[" << i << "]=" << numOfSlots[i]
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001701 << ", headerSizes[" << i << "]=" << headerSizes[i];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001702 }
1703 }
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001704 // Set up the dedicated full run so that nobody can successfully allocate from it.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001705 if (kIsDebugBuild) {
1706 dedicated_full_run_->magic_num_ = kMagicNum;
1707 }
1708 // It doesn't matter which size bracket we use since the main goal is to have the allocation
1709 // fail 100% of the time you attempt to allocate into the dedicated full run.
1710 dedicated_full_run_->size_bracket_idx_ = 0;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001711 DCHECK_EQ(dedicated_full_run_->FreeList()->Size(), 0U); // It looks full.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001712 dedicated_full_run_->SetIsThreadLocal(true);
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001713
1714 // The smallest bracket size must be at least as large as the sizeof(Slot).
1715 DCHECK_LE(sizeof(Slot), bracketSizes[0]) << "sizeof(Slot) <= the smallest bracket size";
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001716 // Check the invariants between the max bracket sizes and the number of brackets.
1717 DCHECK_EQ(kMaxThreadLocalBracketSize, bracketSizes[kNumThreadLocalSizeBrackets - 1]);
1718 DCHECK_EQ(kMaxRegularBracketSize, bracketSizes[kNumRegularSizeBrackets - 1]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001719}
1720
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001721void RosAlloc::BytesAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
1722 size_t used_bytes, void* arg) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001723 if (used_bytes == 0) {
1724 return;
1725 }
1726 size_t* bytes_allocated = reinterpret_cast<size_t*>(arg);
1727 *bytes_allocated += used_bytes;
1728}
1729
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001730void RosAlloc::ObjectsAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
1731 size_t used_bytes, void* arg) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001732 if (used_bytes == 0) {
1733 return;
1734 }
1735 size_t* objects_allocated = reinterpret_cast<size_t*>(arg);
1736 ++(*objects_allocated);
1737}
1738
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001739void RosAlloc::Verify() {
1740 Thread* self = Thread::Current();
1741 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self))
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001742 << "The mutator locks isn't exclusively locked at " << __PRETTY_FUNCTION__;
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001743 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
Mathieu Chartiera1c1c712014-06-23 17:53:09 -07001744 ReaderMutexLock wmu(self, bulk_free_lock_);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001745 std::vector<Run*> runs;
1746 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001747 MutexLock lock_mu(self, lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001748 size_t pm_end = page_map_size_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001749 size_t i = 0;
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001750 size_t memory_tool_modifier = is_running_on_memory_tool_ ?
1751 2 * ::art::gc::space::kDefaultMemoryToolRedZoneBytes : // Redzones before and after.
Andreas Gampefef16ad2015-02-19 16:44:32 -08001752 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001753 while (i < pm_end) {
Ian Rogers13735952014-10-08 12:43:28 -07001754 uint8_t pm = page_map_[i];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001755 switch (pm) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001756 case kPageMapReleased:
1757 // Fall-through.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001758 case kPageMapEmpty: {
1759 // The start of a free page run.
1760 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001761 DCHECK_EQ(fpr->magic_num_, kMagicNumFree);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001762 CHECK(free_page_runs_.find(fpr) != free_page_runs_.end())
1763 << "An empty page must belong to the free page run set";
1764 size_t fpr_size = fpr->ByteSize(this);
Roland Levillain14d90572015-07-16 10:52:26 +01001765 CHECK_ALIGNED(fpr_size, kPageSize)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001766 << "A free page run size isn't page-aligned : " << fpr_size;
1767 size_t num_pages = fpr_size / kPageSize;
1768 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1769 << "A free page run size must be > 0 : " << fpr_size;
1770 for (size_t j = i + 1; j < i + num_pages; ++j) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001771 CHECK(IsFreePage(j))
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001772 << "A mismatch between the page map table for kPageMapEmpty "
1773 << " at page index " << j
1774 << " and the free page run size : page index range : "
1775 << i << " to " << (i + num_pages) << std::endl << DumpPageMap();
1776 }
1777 i += num_pages;
1778 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1779 << std::endl << DumpPageMap();
1780 break;
1781 }
1782 case kPageMapLargeObject: {
1783 // The start of a large object.
1784 size_t num_pages = 1;
1785 size_t idx = i + 1;
1786 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1787 num_pages++;
1788 idx++;
1789 }
Andreas Gamped7576322014-10-24 22:13:45 -07001790 uint8_t* start = base_ + i * kPageSize;
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001791 if (is_running_on_memory_tool_) {
1792 start += ::art::gc::space::kDefaultMemoryToolRedZoneBytes;
Andreas Gamped7576322014-10-24 22:13:45 -07001793 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001794 mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
1795 size_t obj_size = obj->SizeOf();
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001796 CHECK_GT(obj_size + memory_tool_modifier, kLargeSizeThreshold)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001797 << "A rosalloc large object size must be > " << kLargeSizeThreshold;
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001798 CHECK_EQ(num_pages, RoundUp(obj_size + memory_tool_modifier, kPageSize) / kPageSize)
1799 << "A rosalloc large object size " << obj_size + memory_tool_modifier
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001800 << " does not match the page map table " << (num_pages * kPageSize)
1801 << std::endl << DumpPageMap();
1802 i += num_pages;
1803 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1804 << std::endl << DumpPageMap();
1805 break;
1806 }
1807 case kPageMapLargeObjectPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001808 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl << DumpPageMap();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001809 break;
1810 case kPageMapRun: {
1811 // The start of a run.
1812 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001813 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001814 size_t idx = run->size_bracket_idx_;
Ian Rogers5d057052014-03-12 14:32:27 -07001815 CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << idx;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001816 size_t num_pages = numOfPages[idx];
1817 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1818 << "Run size must be > 0 : " << num_pages;
1819 for (size_t j = i + 1; j < i + num_pages; ++j) {
1820 CHECK_EQ(page_map_[j], kPageMapRunPart)
1821 << "A mismatch between the page map table for kPageMapRunPart "
1822 << " at page index " << j
1823 << " and the run size : page index range " << i << " to " << (i + num_pages)
1824 << std::endl << DumpPageMap();
1825 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001826 // Don't verify the dedicated_full_run_ since it doesn't have any real allocations.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001827 runs.push_back(run);
1828 i += num_pages;
1829 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1830 << std::endl << DumpPageMap();
1831 break;
1832 }
1833 case kPageMapRunPart:
Mathieu Chartier0651d412014-04-29 14:37:57 -07001834 // Fall-through.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001835 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001836 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl << DumpPageMap();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001837 break;
1838 }
1839 }
1840 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001841 std::list<Thread*> threads = Runtime::Current()->GetThreadList()->GetList();
1842 for (Thread* thread : threads) {
1843 for (size_t i = 0; i < kNumThreadLocalSizeBrackets; ++i) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001844 MutexLock brackets_mu(self, *size_bracket_locks_[i]);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001845 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i));
1846 CHECK(thread_local_run != nullptr);
1847 CHECK(thread_local_run->IsThreadLocal());
1848 CHECK(thread_local_run == dedicated_full_run_ ||
1849 thread_local_run->size_bracket_idx_ == i);
1850 }
1851 }
1852 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001853 MutexLock brackets_mu(self, *size_bracket_locks_[i]);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001854 Run* current_run = current_runs_[i];
1855 CHECK(current_run != nullptr);
1856 if (current_run != dedicated_full_run_) {
1857 // The dedicated full run is currently marked as thread local.
1858 CHECK(!current_run->IsThreadLocal());
1859 CHECK_EQ(current_run->size_bracket_idx_, i);
1860 }
1861 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001862 // Call Verify() here for the lock order.
1863 for (auto& run : runs) {
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001864 run->Verify(self, this, is_running_on_memory_tool_);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001865 }
1866}
1867
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001868void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc, bool running_on_memory_tool) {
Ian Rogers5d057052014-03-12 14:32:27 -07001869 DCHECK_EQ(magic_num_, kMagicNum) << "Bad magic number : " << Dump();
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001870 const size_t idx = size_bracket_idx_;
Ian Rogers5d057052014-03-12 14:32:27 -07001871 CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
Ian Rogers13735952014-10-08 12:43:28 -07001872 uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001873 const size_t num_slots = numOfSlots[idx];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001874 size_t bracket_size = IndexToBracketSize(idx);
1875 CHECK_EQ(slot_base + num_slots * bracket_size,
Ian Rogers13735952014-10-08 12:43:28 -07001876 reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001877 << "Mismatch in the end address of the run " << Dump();
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001878 // Check that the bulk free list is empty. It's only used during BulkFree().
1879 CHECK(IsBulkFreeListEmpty()) << "The bulk free isn't empty " << Dump();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001880 // Check the thread local runs, the current runs, and the run sets.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001881 if (IsThreadLocal()) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001882 // If it's a thread local run, then it must be pointed to by an owner thread.
1883 bool owner_found = false;
1884 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1885 for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
1886 Thread* thread = *it;
Mathieu Chartier0651d412014-04-29 14:37:57 -07001887 for (size_t i = 0; i < kNumThreadLocalSizeBrackets; i++) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001888 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001889 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i));
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001890 if (thread_local_run == this) {
1891 CHECK(!owner_found)
1892 << "A thread local run has more than one owner thread " << Dump();
1893 CHECK_EQ(i, idx)
1894 << "A mismatching size bracket index in a thread local run " << Dump();
1895 owner_found = true;
1896 }
1897 }
1898 }
1899 CHECK(owner_found) << "A thread local run has no owner thread " << Dump();
1900 } else {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001901 // If it's not thread local, check that the thread local free list is empty.
1902 CHECK(IsThreadLocalFreeListEmpty())
1903 << "A non-thread-local run's thread local free list isn't empty "
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001904 << Dump();
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001905 // Check if it's a current run for the size bracket.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001906 bool is_current_run = false;
1907 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1908 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
1909 Run* current_run = rosalloc->current_runs_[i];
1910 if (idx == i) {
1911 if (this == current_run) {
1912 is_current_run = true;
1913 }
1914 } else {
1915 // If the size bucket index does not match, then it must not
1916 // be a current run.
1917 CHECK_NE(this, current_run)
1918 << "A current run points to a run with a wrong size bracket index " << Dump();
1919 }
1920 }
1921 // If it's neither a thread local or current run, then it must be
1922 // in a run set.
1923 if (!is_current_run) {
1924 MutexLock mu(self, rosalloc->lock_);
Mathieu Chartier58553c72014-09-16 16:25:55 -07001925 auto& non_full_runs = rosalloc->non_full_runs_[idx];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001926 // If it's all free, it must be a free page run rather than a run.
1927 CHECK(!IsAllFree()) << "A free run must be in a free page run set " << Dump();
1928 if (!IsFull()) {
1929 // If it's not full, it must in the non-full run set.
1930 CHECK(non_full_runs.find(this) != non_full_runs.end())
1931 << "A non-full run isn't in the non-full run set " << Dump();
1932 } else {
1933 // If it's full, it must in the full run set (debug build only.)
1934 if (kIsDebugBuild) {
Mathieu Chartier58553c72014-09-16 16:25:55 -07001935 auto& full_runs = rosalloc->full_runs_[idx];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001936 CHECK(full_runs.find(this) != full_runs.end())
1937 << " A full run isn't in the full run set " << Dump();
1938 }
1939 }
1940 }
1941 }
1942 // Check each slot.
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001943 size_t memory_tool_modifier = running_on_memory_tool ?
1944 2 * ::art::gc::space::kDefaultMemoryToolRedZoneBytes :
Andreas Gamped7576322014-10-24 22:13:45 -07001945 0U;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001946 // TODO: reuse InspectAllSlots().
1947 std::unique_ptr<bool[]> is_free(new bool[num_slots]()); // zero initialized
1948 // Mark the free slots and the remaining ones are allocated.
1949 for (Slot* slot = free_list_.Head(); slot != nullptr; slot = slot->Next()) {
1950 size_t slot_idx = SlotIndex(slot);
1951 DCHECK_LT(slot_idx, num_slots);
1952 is_free[slot_idx] = true;
1953 }
1954 if (IsThreadLocal()) {
1955 for (Slot* slot = thread_local_free_list_.Head(); slot != nullptr; slot = slot->Next()) {
1956 size_t slot_idx = SlotIndex(slot);
1957 DCHECK_LT(slot_idx, num_slots);
1958 is_free[slot_idx] = true;
1959 }
1960 }
1961 for (size_t slot_idx = 0; slot_idx < num_slots; ++slot_idx) {
1962 uint8_t* slot_addr = slot_base + slot_idx * bracket_size;
1963 if (running_on_memory_tool) {
1964 slot_addr += ::art::gc::space::kDefaultMemoryToolRedZoneBytes;
1965 }
1966 if (!is_free[slot_idx]) {
1967 // The slot is allocated
1968 mirror::Object* obj = reinterpret_cast<mirror::Object*>(slot_addr);
1969 size_t obj_size = obj->SizeOf();
1970 CHECK_LE(obj_size + memory_tool_modifier, kLargeSizeThreshold)
1971 << "A run slot contains a large object " << Dump();
1972 CHECK_EQ(SizeToIndex(obj_size + memory_tool_modifier), idx)
David Sehr709b0702016-10-13 09:12:37 -07001973 << obj->PrettyTypeOf() << " "
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001974 << "obj_size=" << obj_size << "(" << obj_size + memory_tool_modifier << "), idx=" << idx
1975 << " A run slot contains an object with wrong size " << Dump();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001976 }
1977 }
1978}
1979
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07001980size_t RosAlloc::ReleasePages() {
1981 VLOG(heap) << "RosAlloc::ReleasePages()";
1982 DCHECK(!DoesReleaseAllPages());
1983 Thread* self = Thread::Current();
1984 size_t reclaimed_bytes = 0;
1985 size_t i = 0;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001986 // Check the page map size which might have changed due to grow/shrink.
1987 while (i < page_map_size_) {
1988 // Reading the page map without a lock is racy but the race is benign since it should only
1989 // result in occasionally not releasing pages which we could release.
Ian Rogers13735952014-10-08 12:43:28 -07001990 uint8_t pm = page_map_[i];
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07001991 switch (pm) {
Mathieu Chartiere28ed992014-07-10 10:16:44 -07001992 case kPageMapReleased:
1993 // Fall through.
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07001994 case kPageMapEmpty: {
Mathieu Chartiere28ed992014-07-10 10:16:44 -07001995 // This is currently the start of a free page run.
1996 // Acquire the lock to prevent other threads racing in and modifying the page map.
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001997 MutexLock mu(self, lock_);
1998 // Check that it's still empty after we acquired the lock since another thread could have
1999 // raced in and placed an allocation here.
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002000 if (IsFreePage(i)) {
2001 // Free page runs can start with a released page if we coalesced a released page free
2002 // page run with an empty page run.
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002003 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002004 // There is a race condition where FreePage can coalesce fpr with the previous
2005 // free page run before we acquire lock_. In that case free_page_runs_.find will not find
2006 // a run starting at fpr. To handle this race, we skip reclaiming the page range and go
2007 // to the next page.
2008 if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
2009 size_t fpr_size = fpr->ByteSize(this);
Roland Levillain14d90572015-07-16 10:52:26 +01002010 DCHECK_ALIGNED(fpr_size, kPageSize);
Ian Rogers13735952014-10-08 12:43:28 -07002011 uint8_t* start = reinterpret_cast<uint8_t*>(fpr);
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002012 reclaimed_bytes += ReleasePageRange(start, start + fpr_size);
2013 size_t pages = fpr_size / kPageSize;
2014 CHECK_GT(pages, 0U) << "Infinite loop probable";
2015 i += pages;
2016 DCHECK_LE(i, page_map_size_);
2017 break;
2018 }
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002019 }
Ian Rogersfc787ec2014-10-09 21:56:44 -07002020 FALLTHROUGH_INTENDED;
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002021 }
2022 case kPageMapLargeObject: // Fall through.
2023 case kPageMapLargeObjectPart: // Fall through.
2024 case kPageMapRun: // Fall through.
2025 case kPageMapRunPart: // Fall through.
2026 ++i;
2027 break; // Skip.
2028 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07002029 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002030 break;
2031 }
2032 }
2033 return reclaimed_bytes;
2034}
2035
Ian Rogers13735952014-10-08 12:43:28 -07002036size_t RosAlloc::ReleasePageRange(uint8_t* start, uint8_t* end) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002037 DCHECK_ALIGNED(start, kPageSize);
2038 DCHECK_ALIGNED(end, kPageSize);
2039 DCHECK_LT(start, end);
2040 if (kIsDebugBuild) {
2041 // In the debug build, the first page of a free page run
2042 // contains a magic number for debugging. Exclude it.
2043 start += kPageSize;
Andreas Gamped7576322014-10-24 22:13:45 -07002044
2045 // Single pages won't be released.
2046 if (start == end) {
2047 return 0;
2048 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002049 }
2050 if (!kMadviseZeroes) {
2051 // TODO: Do this when we resurrect the page instead.
2052 memset(start, 0, end - start);
2053 }
2054 CHECK_EQ(madvise(start, end - start, MADV_DONTNEED), 0);
2055 size_t pm_idx = ToPageMapIndex(start);
2056 size_t reclaimed_bytes = 0;
2057 // Calculate reclaimed bytes and upate page map.
2058 const size_t max_idx = pm_idx + (end - start) / kPageSize;
2059 for (; pm_idx < max_idx; ++pm_idx) {
2060 DCHECK(IsFreePage(pm_idx));
2061 if (page_map_[pm_idx] == kPageMapEmpty) {
2062 // Mark the page as released and update how many bytes we released.
2063 reclaimed_bytes += kPageSize;
2064 page_map_[pm_idx] = kPageMapReleased;
2065 }
2066 }
2067 return reclaimed_bytes;
2068}
2069
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07002070void RosAlloc::LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) {
2071 Thread* self = Thread::Current();
2072 size_t largest_continuous_free_pages = 0;
2073 WriterMutexLock wmu(self, bulk_free_lock_);
2074 MutexLock mu(self, lock_);
Mathieu Chartiera9033d72016-12-01 17:41:17 -08002075 uint64_t total_free = 0;
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07002076 for (FreePageRun* fpr : free_page_runs_) {
2077 largest_continuous_free_pages = std::max(largest_continuous_free_pages,
2078 fpr->ByteSize(this));
Mathieu Chartiera9033d72016-12-01 17:41:17 -08002079 total_free += fpr->ByteSize(this);
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07002080 }
Mathieu Chartiera9033d72016-12-01 17:41:17 -08002081 size_t required_bytes = 0;
2082 const char* new_buffer_msg = "";
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07002083 if (failed_alloc_bytes > kLargeSizeThreshold) {
2084 // Large allocation.
Mathieu Chartiera9033d72016-12-01 17:41:17 -08002085 required_bytes = RoundUp(failed_alloc_bytes, kPageSize);
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07002086 } else {
2087 // Non-large allocation.
Mathieu Chartiera9033d72016-12-01 17:41:17 -08002088 required_bytes = numOfPages[SizeToIndex(failed_alloc_bytes)] * kPageSize;
2089 new_buffer_msg = " for a new buffer";
2090 }
2091 if (required_bytes > largest_continuous_free_pages) {
2092 os << "; failed due to fragmentation ("
2093 << "required contiguous free " << required_bytes << " bytes" << new_buffer_msg
2094 << ", largest contiguous free " << largest_continuous_free_pages << " bytes"
2095 << ", total free pages " << total_free << " bytes"
2096 << ", space footprint " << footprint_ << " bytes"
2097 << ", space max capacity " << max_capacity_ << " bytes"
2098 << ")" << std::endl;
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07002099 }
2100}
2101
Hiroshi Yamauchib62f2e62016-03-23 15:51:24 -07002102void RosAlloc::DumpStats(std::ostream& os) {
2103 Thread* self = Thread::Current();
2104 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self))
2105 << "The mutator locks isn't exclusively locked at " << __PRETTY_FUNCTION__;
2106 size_t num_large_objects = 0;
2107 size_t num_pages_large_objects = 0;
2108 // These arrays are zero initialized.
2109 std::unique_ptr<size_t[]> num_runs(new size_t[kNumOfSizeBrackets]());
2110 std::unique_ptr<size_t[]> num_pages_runs(new size_t[kNumOfSizeBrackets]());
2111 std::unique_ptr<size_t[]> num_slots(new size_t[kNumOfSizeBrackets]());
2112 std::unique_ptr<size_t[]> num_used_slots(new size_t[kNumOfSizeBrackets]());
2113 std::unique_ptr<size_t[]> num_metadata_bytes(new size_t[kNumOfSizeBrackets]());
2114 ReaderMutexLock rmu(self, bulk_free_lock_);
2115 MutexLock lock_mu(self, lock_);
2116 for (size_t i = 0; i < page_map_size_; ) {
2117 uint8_t pm = page_map_[i];
2118 switch (pm) {
2119 case kPageMapReleased:
2120 case kPageMapEmpty:
2121 ++i;
2122 break;
2123 case kPageMapLargeObject: {
2124 size_t num_pages = 1;
2125 size_t idx = i + 1;
2126 while (idx < page_map_size_ && page_map_[idx] == kPageMapLargeObjectPart) {
2127 num_pages++;
2128 idx++;
2129 }
2130 num_large_objects++;
2131 num_pages_large_objects += num_pages;
2132 i += num_pages;
2133 break;
2134 }
2135 case kPageMapLargeObjectPart:
2136 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl
2137 << DumpPageMap();
2138 break;
2139 case kPageMapRun: {
2140 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
2141 size_t idx = run->size_bracket_idx_;
2142 size_t num_pages = numOfPages[idx];
2143 num_runs[idx]++;
2144 num_pages_runs[idx] += num_pages;
2145 num_slots[idx] += numOfSlots[idx];
2146 size_t num_free_slots = run->NumberOfFreeSlots();
2147 num_used_slots[idx] += numOfSlots[idx] - num_free_slots;
2148 num_metadata_bytes[idx] += headerSizes[idx];
2149 i += num_pages;
2150 break;
2151 }
2152 case kPageMapRunPart:
2153 // Fall-through.
2154 default:
2155 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl
2156 << DumpPageMap();
2157 break;
2158 }
2159 }
2160 os << "RosAlloc stats:\n";
2161 for (size_t i = 0; i < kNumOfSizeBrackets; ++i) {
2162 os << "Bracket " << i << " (" << bracketSizes[i] << "):"
2163 << " #runs=" << num_runs[i]
2164 << " #pages=" << num_pages_runs[i]
2165 << " (" << PrettySize(num_pages_runs[i] * kPageSize) << ")"
2166 << " #metadata_bytes=" << PrettySize(num_metadata_bytes[i])
2167 << " #slots=" << num_slots[i] << " (" << PrettySize(num_slots[i] * bracketSizes[i]) << ")"
2168 << " #used_slots=" << num_used_slots[i]
2169 << " (" << PrettySize(num_used_slots[i] * bracketSizes[i]) << ")\n";
2170 }
2171 os << "Large #allocations=" << num_large_objects
2172 << " #pages=" << num_pages_large_objects
2173 << " (" << PrettySize(num_pages_large_objects * kPageSize) << ")\n";
2174 size_t total_num_pages = 0;
2175 size_t total_metadata_bytes = 0;
2176 size_t total_allocated_bytes = 0;
2177 for (size_t i = 0; i < kNumOfSizeBrackets; ++i) {
2178 total_num_pages += num_pages_runs[i];
2179 total_metadata_bytes += num_metadata_bytes[i];
2180 total_allocated_bytes += num_used_slots[i] * bracketSizes[i];
2181 }
2182 total_num_pages += num_pages_large_objects;
2183 total_allocated_bytes += num_pages_large_objects * kPageSize;
2184 os << "Total #total_bytes=" << PrettySize(total_num_pages * kPageSize)
2185 << " #metadata_bytes=" << PrettySize(total_metadata_bytes)
2186 << " #used_bytes=" << PrettySize(total_allocated_bytes) << "\n";
2187 os << "\n";
2188}
2189
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07002190} // namespace allocator
2191} // namespace gc
2192} // namespace art