blob: 2c487fe8df206dd0e818e29711333b2ecd4c1c6c [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andreas Gamped7576322014-10-24 22:13:45 -070017#include "rosalloc.h"
18
Evgenii Stepanov1e133742015-05-20 12:30:59 -070019#include "base/memory_tool.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070020#include "base/mutex-inl.h"
Evgenii Stepanov1e133742015-05-20 12:30:59 -070021#include "gc/space/memory_tool_settings.h"
Vladimir Marko3481ba22015-04-13 12:22:36 +010022#include "mem_map.h"
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -080023#include "mirror/class-inl.h"
24#include "mirror/object.h"
25#include "mirror/object-inl.h"
Brian Carlstrom218daa22013-11-25 14:51:44 -080026#include "thread-inl.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070027#include "thread_list.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070028
29#include <map>
30#include <list>
Ian Rogersc7dd2952014-10-21 23:31:19 -070031#include <sstream>
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070032#include <vector>
33
34namespace art {
35namespace gc {
36namespace allocator {
37
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -070038static constexpr bool kUsePrefetchDuringAllocRun = false;
Mathieu Chartier73d1e172014-04-11 17:53:48 -070039static constexpr bool kPrefetchNewRunDataByZeroing = false;
40static constexpr size_t kPrefetchStride = 64;
41
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070042size_t RosAlloc::bracketSizes[kNumOfSizeBrackets];
43size_t RosAlloc::numOfPages[kNumOfSizeBrackets];
44size_t RosAlloc::numOfSlots[kNumOfSizeBrackets];
45size_t RosAlloc::headerSizes[kNumOfSizeBrackets];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070046bool RosAlloc::initialized_ = false;
Mathieu Chartier73d1e172014-04-11 17:53:48 -070047size_t RosAlloc::dedicated_full_run_storage_[kPageSize / sizeof(size_t)] = { 0 };
48RosAlloc::Run* RosAlloc::dedicated_full_run_ =
49 reinterpret_cast<RosAlloc::Run*>(dedicated_full_run_storage_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070050
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080051RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
Evgenii Stepanov1e133742015-05-20 12:30:59 -070052 PageReleaseMode page_release_mode, bool running_on_memory_tool,
Andreas Gamped7576322014-10-24 22:13:45 -070053 size_t page_release_size_threshold)
Ian Rogers13735952014-10-08 12:43:28 -070054 : base_(reinterpret_cast<uint8_t*>(base)), footprint_(capacity),
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080055 capacity_(capacity), max_capacity_(max_capacity),
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070056 lock_("rosalloc global lock", kRosAllocGlobalLock),
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080057 bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock),
58 page_release_mode_(page_release_mode),
Andreas Gamped7576322014-10-24 22:13:45 -070059 page_release_size_threshold_(page_release_size_threshold),
Evgenii Stepanov1e133742015-05-20 12:30:59 -070060 is_running_on_memory_tool_(running_on_memory_tool) {
Hiroshi Yamauchib5e31f32016-02-18 15:01:17 -080061 DCHECK_ALIGNED(base, kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -070062 DCHECK_EQ(RoundUp(capacity, kPageSize), capacity);
63 DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080064 CHECK_LE(capacity, max_capacity);
Roland Levillain14d90572015-07-16 10:52:26 +010065 CHECK_ALIGNED(page_release_size_threshold_, kPageSize);
Hiroshi Yamauchib5e31f32016-02-18 15:01:17 -080066 // Zero the memory explicitly (don't rely on that the mem map is zero-initialized).
67 if (!kMadviseZeroes) {
68 memset(base_, 0, max_capacity);
69 }
70 CHECK_EQ(madvise(base_, max_capacity, MADV_DONTNEED), 0);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070071 if (!initialized_) {
72 Initialize();
73 }
74 VLOG(heap) << "RosAlloc base="
75 << std::hex << (intptr_t)base_ << ", end="
76 << std::hex << (intptr_t)(base_ + capacity_)
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080077 << ", capacity=" << std::dec << capacity_
78 << ", max_capacity=" << std::dec << max_capacity_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070079 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
Zuo Wangf37a88b2014-07-10 04:26:41 -070080 size_bracket_lock_names_[i] =
Mathieu Chartier73d1e172014-04-11 17:53:48 -070081 StringPrintf("an rosalloc size bracket %d lock", static_cast<int>(i));
Zuo Wangf37a88b2014-07-10 04:26:41 -070082 size_bracket_locks_[i] = new Mutex(size_bracket_lock_names_[i].c_str(), kRosAllocBracketLock);
Mathieu Chartier0651d412014-04-29 14:37:57 -070083 current_runs_[i] = dedicated_full_run_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070084 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080085 DCHECK_EQ(footprint_, capacity_);
86 size_t num_of_pages = footprint_ / kPageSize;
87 size_t max_num_of_pages = max_capacity_ / kPageSize;
88 std::string error_msg;
Vladimir Marko5c42c292015-02-25 12:02:49 +000089 page_map_mem_map_.reset(MemMap::MapAnonymous("rosalloc page map", nullptr,
90 RoundUp(max_num_of_pages, kPageSize),
91 PROT_READ | PROT_WRITE, false, false, &error_msg));
Mathieu Chartier73d1e172014-04-11 17:53:48 -070092 CHECK(page_map_mem_map_.get() != nullptr) << "Couldn't allocate the page map : " << error_msg;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080093 page_map_ = page_map_mem_map_->Begin();
94 page_map_size_ = num_of_pages;
95 max_page_map_size_ = max_num_of_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070096 free_page_run_size_map_.resize(num_of_pages);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070097 FreePageRun* free_pages = reinterpret_cast<FreePageRun*>(base_);
98 if (kIsDebugBuild) {
99 free_pages->magic_num_ = kMagicNumFree;
100 }
101 free_pages->SetByteSize(this, capacity_);
102 DCHECK_EQ(capacity_ % kPageSize, static_cast<size_t>(0));
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800103 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700104 free_pages->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800105 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700106 free_page_runs_.insert(free_pages);
107 if (kTraceRosAlloc) {
108 LOG(INFO) << "RosAlloc::RosAlloc() : Inserted run 0x" << std::hex
109 << reinterpret_cast<intptr_t>(free_pages)
110 << " into free_page_runs_";
111 }
112}
113
Mathieu Chartier661974a2014-01-09 11:23:53 -0800114RosAlloc::~RosAlloc() {
115 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
116 delete size_bracket_locks_[i];
117 }
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700118 if (is_running_on_memory_tool_) {
119 MEMORY_TOOL_MAKE_DEFINED(base_, capacity_);
120 }
Mathieu Chartier661974a2014-01-09 11:23:53 -0800121}
122
Ian Rogers13735952014-10-08 12:43:28 -0700123void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700124 lock_.AssertHeld(self);
125 DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700126 FreePageRun* res = nullptr;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700127 const size_t req_byte_size = num_pages * kPageSize;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700128 // Find the lowest address free page run that's large enough.
129 for (auto it = free_page_runs_.begin(); it != free_page_runs_.end(); ) {
130 FreePageRun* fpr = *it;
131 DCHECK(fpr->IsFree());
132 size_t fpr_byte_size = fpr->ByteSize(this);
133 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
134 if (req_byte_size <= fpr_byte_size) {
135 // Found one.
136 free_page_runs_.erase(it++);
137 if (kTraceRosAlloc) {
138 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x"
139 << std::hex << reinterpret_cast<intptr_t>(fpr)
140 << " from free_page_runs_";
141 }
142 if (req_byte_size < fpr_byte_size) {
143 // Split.
Ian Rogers13735952014-10-08 12:43:28 -0700144 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700145 if (kIsDebugBuild) {
146 remainder->magic_num_ = kMagicNumFree;
147 }
148 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
149 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
150 // Don't need to call madvise on remainder here.
151 free_page_runs_.insert(remainder);
152 if (kTraceRosAlloc) {
153 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
154 << reinterpret_cast<intptr_t>(remainder)
155 << " into free_page_runs_";
156 }
157 fpr->SetByteSize(this, req_byte_size);
158 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
159 }
160 res = fpr;
161 break;
162 } else {
163 ++it;
164 }
165 }
166
167 // Failed to allocate pages. Grow the footprint, if possible.
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700168 if (UNLIKELY(res == nullptr && capacity_ > footprint_)) {
169 FreePageRun* last_free_page_run = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700170 size_t last_free_page_run_size;
171 auto it = free_page_runs_.rbegin();
172 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
173 // There is a free page run at the end.
174 DCHECK(last_free_page_run->IsFree());
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700175 DCHECK(IsFreePage(ToPageMapIndex(last_free_page_run)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700176 last_free_page_run_size = last_free_page_run->ByteSize(this);
177 } else {
178 // There is no free page run at the end.
179 last_free_page_run_size = 0;
180 }
181 DCHECK_LT(last_free_page_run_size, req_byte_size);
182 if (capacity_ - footprint_ + last_free_page_run_size >= req_byte_size) {
183 // If we grow the heap, we can allocate it.
184 size_t increment = std::min(std::max(2 * MB, req_byte_size - last_free_page_run_size),
185 capacity_ - footprint_);
186 DCHECK_EQ(increment % kPageSize, static_cast<size_t>(0));
187 size_t new_footprint = footprint_ + increment;
188 size_t new_num_of_pages = new_footprint / kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800189 DCHECK_LT(page_map_size_, new_num_of_pages);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700190 DCHECK_LT(free_page_run_size_map_.size(), new_num_of_pages);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800191 page_map_size_ = new_num_of_pages;
192 DCHECK_LE(page_map_size_, max_page_map_size_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700193 free_page_run_size_map_.resize(new_num_of_pages);
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800194 ArtRosAllocMoreCore(this, increment);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700195 if (last_free_page_run_size > 0) {
196 // There was a free page run at the end. Expand its size.
197 DCHECK_EQ(last_free_page_run_size, last_free_page_run->ByteSize(this));
198 last_free_page_run->SetByteSize(this, last_free_page_run_size + increment);
199 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
Ian Rogers5d057052014-03-12 14:32:27 -0700200 DCHECK_EQ(last_free_page_run->End(this), base_ + new_footprint);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700201 } else {
202 // Otherwise, insert a new free page run at the end.
203 FreePageRun* new_free_page_run = reinterpret_cast<FreePageRun*>(base_ + footprint_);
204 if (kIsDebugBuild) {
205 new_free_page_run->magic_num_ = kMagicNumFree;
206 }
207 new_free_page_run->SetByteSize(this, increment);
208 DCHECK_EQ(new_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
209 free_page_runs_.insert(new_free_page_run);
Ian Rogers5d057052014-03-12 14:32:27 -0700210 DCHECK_EQ(*free_page_runs_.rbegin(), new_free_page_run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700211 if (kTraceRosAlloc) {
212 LOG(INFO) << "RosAlloc::AlloPages() : Grew the heap by inserting run 0x"
213 << std::hex << reinterpret_cast<intptr_t>(new_free_page_run)
214 << " into free_page_runs_";
215 }
216 }
217 DCHECK_LE(footprint_ + increment, capacity_);
218 if (kTraceRosAlloc) {
219 LOG(INFO) << "RosAlloc::AllocPages() : increased the footprint from "
220 << footprint_ << " to " << new_footprint;
221 }
222 footprint_ = new_footprint;
223
224 // And retry the last free page run.
225 it = free_page_runs_.rbegin();
226 DCHECK(it != free_page_runs_.rend());
227 FreePageRun* fpr = *it;
228 if (kIsDebugBuild && last_free_page_run_size > 0) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700229 DCHECK(last_free_page_run != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700230 DCHECK_EQ(last_free_page_run, fpr);
231 }
232 size_t fpr_byte_size = fpr->ByteSize(this);
233 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
234 DCHECK_LE(req_byte_size, fpr_byte_size);
235 free_page_runs_.erase(fpr);
236 if (kTraceRosAlloc) {
237 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
238 << " from free_page_runs_";
239 }
240 if (req_byte_size < fpr_byte_size) {
241 // Split if there's a remainder.
Ian Rogers13735952014-10-08 12:43:28 -0700242 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700243 if (kIsDebugBuild) {
244 remainder->magic_num_ = kMagicNumFree;
245 }
246 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
247 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
248 free_page_runs_.insert(remainder);
249 if (kTraceRosAlloc) {
250 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
251 << reinterpret_cast<intptr_t>(remainder)
252 << " into free_page_runs_";
253 }
254 fpr->SetByteSize(this, req_byte_size);
255 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
256 }
257 res = fpr;
258 }
259 }
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700260 if (LIKELY(res != nullptr)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700261 // Update the page map.
262 size_t page_map_idx = ToPageMapIndex(res);
263 for (size_t i = 0; i < num_pages; i++) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700264 DCHECK(IsFreePage(page_map_idx + i));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700265 }
266 switch (page_map_type) {
267 case kPageMapRun:
268 page_map_[page_map_idx] = kPageMapRun;
269 for (size_t i = 1; i < num_pages; i++) {
270 page_map_[page_map_idx + i] = kPageMapRunPart;
271 }
272 break;
273 case kPageMapLargeObject:
274 page_map_[page_map_idx] = kPageMapLargeObject;
275 for (size_t i = 1; i < num_pages; i++) {
276 page_map_[page_map_idx + i] = kPageMapLargeObjectPart;
277 }
278 break;
279 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700280 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_type);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700281 break;
282 }
283 if (kIsDebugBuild) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700284 // Clear the first page since it is not madvised due to the magic number.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700285 memset(res, 0, kPageSize);
286 }
287 if (kTraceRosAlloc) {
288 LOG(INFO) << "RosAlloc::AllocPages() : 0x" << std::hex << reinterpret_cast<intptr_t>(res)
289 << "-0x" << (reinterpret_cast<intptr_t>(res) + num_pages * kPageSize)
290 << "(" << std::dec << (num_pages * kPageSize) << ")";
291 }
292 return res;
293 }
294
295 // Fail.
296 if (kTraceRosAlloc) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700297 LOG(INFO) << "RosAlloc::AllocPages() : nullptr";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700298 }
299 return nullptr;
300}
301
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700302size_t RosAlloc::FreePages(Thread* self, void* ptr, bool already_zero) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700303 lock_.AssertHeld(self);
304 size_t pm_idx = ToPageMapIndex(ptr);
Ian Rogers5d057052014-03-12 14:32:27 -0700305 DCHECK_LT(pm_idx, page_map_size_);
Ian Rogers13735952014-10-08 12:43:28 -0700306 uint8_t pm_type = page_map_[pm_idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700307 DCHECK(pm_type == kPageMapRun || pm_type == kPageMapLargeObject);
Ian Rogers13735952014-10-08 12:43:28 -0700308 uint8_t pm_part_type;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700309 switch (pm_type) {
310 case kPageMapRun:
311 pm_part_type = kPageMapRunPart;
312 break;
313 case kPageMapLargeObject:
314 pm_part_type = kPageMapLargeObjectPart;
315 break;
316 default:
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700317 LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << " : " << "pm_idx=" << pm_idx << ", pm_type="
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700318 << static_cast<int>(pm_type) << ", ptr=" << std::hex
319 << reinterpret_cast<intptr_t>(ptr);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700320 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700321 }
322 // Update the page map and count the number of pages.
323 size_t num_pages = 1;
324 page_map_[pm_idx] = kPageMapEmpty;
325 size_t idx = pm_idx + 1;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800326 size_t end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700327 while (idx < end && page_map_[idx] == pm_part_type) {
328 page_map_[idx] = kPageMapEmpty;
329 num_pages++;
330 idx++;
331 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700332 const size_t byte_size = num_pages * kPageSize;
333 if (already_zero) {
Andreas Gamped7576322014-10-24 22:13:45 -0700334 if (ShouldCheckZeroMemory()) {
Ian Rogers13735952014-10-08 12:43:28 -0700335 const uintptr_t* word_ptr = reinterpret_cast<uintptr_t*>(ptr);
336 for (size_t i = 0; i < byte_size / sizeof(uintptr_t); ++i) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700337 CHECK_EQ(word_ptr[i], 0U) << "words don't match at index " << i;
338 }
339 }
340 } else if (!DoesReleaseAllPages()) {
341 memset(ptr, 0, byte_size);
342 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700343
344 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700345 LOG(INFO) << __PRETTY_FUNCTION__ << " : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700346 << "-0x" << (reinterpret_cast<intptr_t>(ptr) + byte_size)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700347 << "(" << std::dec << (num_pages * kPageSize) << ")";
348 }
349
350 // Turn it into a free run.
351 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(ptr);
352 if (kIsDebugBuild) {
353 fpr->magic_num_ = kMagicNumFree;
354 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700355 fpr->SetByteSize(this, byte_size);
Roland Levillain14d90572015-07-16 10:52:26 +0100356 DCHECK_ALIGNED(fpr->ByteSize(this), kPageSize);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700357
358 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
359 if (!free_page_runs_.empty()) {
360 // Try to coalesce in the higher address direction.
361 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700362 LOG(INFO) << __PRETTY_FUNCTION__ << "RosAlloc::FreePages() : trying to coalesce a free page run 0x"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700363 << std::hex << reinterpret_cast<uintptr_t>(fpr) << " [" << std::dec << pm_idx << "] -0x"
364 << std::hex << reinterpret_cast<uintptr_t>(fpr->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800365 << (fpr->End(this) == End() ? page_map_size_ : ToPageMapIndex(fpr->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700366 }
367 auto higher_it = free_page_runs_.upper_bound(fpr);
368 if (higher_it != free_page_runs_.end()) {
369 for (auto it = higher_it; it != free_page_runs_.end(); ) {
370 FreePageRun* h = *it;
371 DCHECK_EQ(h->ByteSize(this) % kPageSize, static_cast<size_t>(0));
372 if (kTraceRosAlloc) {
373 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a higher free page run 0x"
374 << std::hex << reinterpret_cast<uintptr_t>(h) << " [" << std::dec << ToPageMapIndex(h) << "] -0x"
375 << std::hex << reinterpret_cast<uintptr_t>(h->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800376 << (h->End(this) == End() ? page_map_size_ : ToPageMapIndex(h->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700377 }
378 if (fpr->End(this) == h->Begin()) {
379 if (kTraceRosAlloc) {
380 LOG(INFO) << "Success";
381 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700382 // Clear magic num since this is no longer the start of a free page run.
383 if (kIsDebugBuild) {
384 h->magic_num_ = 0;
385 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700386 free_page_runs_.erase(it++);
387 if (kTraceRosAlloc) {
388 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
389 << reinterpret_cast<intptr_t>(h)
390 << " from free_page_runs_";
391 }
392 fpr->SetByteSize(this, fpr->ByteSize(this) + h->ByteSize(this));
393 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
394 } else {
395 // Not adjacent. Stop.
396 if (kTraceRosAlloc) {
397 LOG(INFO) << "Fail";
398 }
399 break;
400 }
401 }
402 }
403 // Try to coalesce in the lower address direction.
404 auto lower_it = free_page_runs_.upper_bound(fpr);
405 if (lower_it != free_page_runs_.begin()) {
406 --lower_it;
407 for (auto it = lower_it; ; ) {
408 // We want to try to coalesce with the first element but
409 // there's no "<=" operator for the iterator.
410 bool to_exit_loop = it == free_page_runs_.begin();
411
412 FreePageRun* l = *it;
413 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
414 if (kTraceRosAlloc) {
415 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a lower free page run 0x"
416 << std::hex << reinterpret_cast<uintptr_t>(l) << " [" << std::dec << ToPageMapIndex(l) << "] -0x"
417 << std::hex << reinterpret_cast<uintptr_t>(l->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800418 << (l->End(this) == End() ? page_map_size_ : ToPageMapIndex(l->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700419 }
420 if (l->End(this) == fpr->Begin()) {
421 if (kTraceRosAlloc) {
422 LOG(INFO) << "Success";
423 }
424 free_page_runs_.erase(it--);
425 if (kTraceRosAlloc) {
426 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
427 << reinterpret_cast<intptr_t>(l)
428 << " from free_page_runs_";
429 }
430 l->SetByteSize(this, l->ByteSize(this) + fpr->ByteSize(this));
431 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700432 // Clear magic num since this is no longer the start of a free page run.
433 if (kIsDebugBuild) {
434 fpr->magic_num_ = 0;
435 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700436 fpr = l;
437 } else {
438 // Not adjacent. Stop.
439 if (kTraceRosAlloc) {
440 LOG(INFO) << "Fail";
441 }
442 break;
443 }
444 if (to_exit_loop) {
445 break;
446 }
447 }
448 }
449 }
450
451 // Insert it.
452 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
453 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800454 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700455 fpr->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800456 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700457 free_page_runs_.insert(fpr);
458 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
459 if (kTraceRosAlloc) {
460 LOG(INFO) << "RosAlloc::FreePages() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
461 << " into free_page_runs_";
462 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700463 return byte_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700464}
465
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700466void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated,
467 size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
468 DCHECK(bytes_allocated != nullptr);
469 DCHECK(usable_size != nullptr);
Ian Rogers5d057052014-03-12 14:32:27 -0700470 DCHECK_GT(size, kLargeSizeThreshold);
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800471 size_t num_pages = RoundUp(size, kPageSize) / kPageSize;
472 void* r;
473 {
474 MutexLock mu(self, lock_);
475 r = AllocPages(self, num_pages, kPageMapLargeObject);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700476 }
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800477 if (UNLIKELY(r == nullptr)) {
478 if (kTraceRosAlloc) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700479 LOG(INFO) << "RosAlloc::AllocLargeObject() : nullptr";
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800480 }
481 return nullptr;
482 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700483 const size_t total_bytes = num_pages * kPageSize;
484 *bytes_allocated = total_bytes;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700485 *usable_size = total_bytes;
486 *bytes_tl_bulk_allocated = total_bytes;
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800487 if (kTraceRosAlloc) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800488 LOG(INFO) << "RosAlloc::AllocLargeObject() : 0x" << std::hex << reinterpret_cast<intptr_t>(r)
489 << "-0x" << (reinterpret_cast<intptr_t>(r) + num_pages * kPageSize)
490 << "(" << std::dec << (num_pages * kPageSize) << ")";
491 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700492 // Check if the returned memory is really all zero.
Andreas Gamped7576322014-10-24 22:13:45 -0700493 if (ShouldCheckZeroMemory()) {
Ian Rogers13735952014-10-08 12:43:28 -0700494 CHECK_EQ(total_bytes % sizeof(uintptr_t), 0U);
495 const uintptr_t* words = reinterpret_cast<uintptr_t*>(r);
496 for (size_t i = 0; i < total_bytes / sizeof(uintptr_t); ++i) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700497 CHECK_EQ(words[i], 0U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700498 }
499 }
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800500 return r;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700501}
502
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700503size_t RosAlloc::FreeInternal(Thread* self, void* ptr) {
Ian Rogers5d057052014-03-12 14:32:27 -0700504 DCHECK_LE(base_, ptr);
505 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700506 size_t pm_idx = RoundDownToPageMapIndex(ptr);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700507 Run* run = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700508 {
509 MutexLock mu(self, lock_);
Ian Rogers5d057052014-03-12 14:32:27 -0700510 DCHECK_LT(pm_idx, page_map_size_);
Ian Rogers13735952014-10-08 12:43:28 -0700511 uint8_t page_map_entry = page_map_[pm_idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700512 if (kTraceRosAlloc) {
513 LOG(INFO) << "RosAlloc::FreeInternal() : " << std::hex << ptr << ", pm_idx=" << std::dec << pm_idx
514 << ", page_map_entry=" << static_cast<int>(page_map_entry);
515 }
516 switch (page_map_[pm_idx]) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700517 case kPageMapLargeObject:
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700518 return FreePages(self, ptr, false);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700519 case kPageMapLargeObjectPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700520 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700521 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700522 case kPageMapRunPart: {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700523 // Find the beginning of the run.
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700524 do {
525 --pm_idx;
526 DCHECK_LT(pm_idx, capacity_ / kPageSize);
527 } while (page_map_[pm_idx] != kPageMapRun);
Ian Rogersfc787ec2014-10-09 21:56:44 -0700528 FALLTHROUGH_INTENDED;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700529 case kPageMapRun:
530 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -0700531 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700532 break;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700533 case kPageMapReleased:
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700534 case kPageMapEmpty:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700535 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700536 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700537 }
538 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700539 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700540 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700541 }
542 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700543 DCHECK(run != nullptr);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700544 return FreeFromRun(self, ptr, run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700545}
546
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700547size_t RosAlloc::Free(Thread* self, void* ptr) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700548 ReaderMutexLock rmu(self, bulk_free_lock_);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700549 return FreeInternal(self, ptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700550}
551
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700552RosAlloc::Run* RosAlloc::AllocRun(Thread* self, size_t idx) {
553 RosAlloc::Run* new_run = nullptr;
554 {
555 MutexLock mu(self, lock_);
556 new_run = reinterpret_cast<Run*>(AllocPages(self, numOfPages[idx], kPageMapRun));
557 }
558 if (LIKELY(new_run != nullptr)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700559 if (kIsDebugBuild) {
560 new_run->magic_num_ = kMagicNum;
561 }
562 new_run->size_bracket_idx_ = idx;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700563 DCHECK(!new_run->IsThreadLocal());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700564 DCHECK(!new_run->to_be_bulk_freed_);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700565 if (kUsePrefetchDuringAllocRun && idx < kNumThreadLocalSizeBrackets) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700566 // Take ownership of the cache lines if we are likely to be thread local run.
567 if (kPrefetchNewRunDataByZeroing) {
568 // Zeroing the data is sometimes faster than prefetching but it increases memory usage
569 // since we end up dirtying zero pages which may have been madvised.
570 new_run->ZeroData();
571 } else {
572 const size_t num_of_slots = numOfSlots[idx];
573 const size_t bracket_size = bracketSizes[idx];
574 const size_t num_of_bytes = num_of_slots * bracket_size;
Ian Rogers13735952014-10-08 12:43:28 -0700575 uint8_t* begin = reinterpret_cast<uint8_t*>(new_run) + headerSizes[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700576 for (size_t i = 0; i < num_of_bytes; i += kPrefetchStride) {
577 __builtin_prefetch(begin + i);
578 }
579 }
580 }
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700581 new_run->InitFreeList();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700582 }
583 return new_run;
584}
585
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700586RosAlloc::Run* RosAlloc::RefillRun(Thread* self, size_t idx) {
587 // Get the lowest address non-full run from the binary tree.
Mathieu Chartier58553c72014-09-16 16:25:55 -0700588 auto* const bt = &non_full_runs_[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700589 if (!bt->empty()) {
590 // If there's one, use it as the current run.
591 auto it = bt->begin();
592 Run* non_full_run = *it;
593 DCHECK(non_full_run != nullptr);
594 DCHECK(!non_full_run->IsThreadLocal());
595 bt->erase(it);
596 return non_full_run;
597 }
598 // If there's none, allocate a new run and use it as the current run.
599 return AllocRun(self, idx);
600}
601
Hiroshi Yamauchi52cf5c02014-05-02 12:20:36 -0700602inline void* RosAlloc::AllocFromCurrentRunUnlocked(Thread* self, size_t idx) {
Mathieu Chartier0651d412014-04-29 14:37:57 -0700603 Run* current_run = current_runs_[idx];
604 DCHECK(current_run != nullptr);
605 void* slot_addr = current_run->AllocSlot();
606 if (UNLIKELY(slot_addr == nullptr)) {
607 // The current run got full. Try to refill it.
608 DCHECK(current_run->IsFull());
609 if (kIsDebugBuild && current_run != dedicated_full_run_) {
610 full_runs_[idx].insert(current_run);
611 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700612 LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
613 << reinterpret_cast<intptr_t>(current_run)
Mathieu Chartier0651d412014-04-29 14:37:57 -0700614 << " into full_runs_[" << std::dec << idx << "]";
615 }
616 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
617 DCHECK(full_runs_[idx].find(current_run) != full_runs_[idx].end());
618 }
619 current_run = RefillRun(self, idx);
620 if (UNLIKELY(current_run == nullptr)) {
621 // Failed to allocate a new run, make sure that it is the dedicated full run.
622 current_runs_[idx] = dedicated_full_run_;
623 return nullptr;
624 }
625 DCHECK(current_run != nullptr);
626 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
627 DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
628 current_run->SetIsThreadLocal(false);
629 current_runs_[idx] = current_run;
630 DCHECK(!current_run->IsFull());
631 slot_addr = current_run->AllocSlot();
632 // Must succeed now with a new run.
633 DCHECK(slot_addr != nullptr);
634 }
635 return slot_addr;
636}
637
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700638void* RosAlloc::AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated,
639 size_t* usable_size,
640 size_t* bytes_tl_bulk_allocated) {
641 DCHECK(bytes_allocated != nullptr);
642 DCHECK(usable_size != nullptr);
643 DCHECK(bytes_tl_bulk_allocated != nullptr);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700644 DCHECK_LE(size, kLargeSizeThreshold);
645 size_t bracket_size;
646 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700647 Locks::mutator_lock_->AssertExclusiveHeld(self);
648 void* slot_addr = AllocFromCurrentRunUnlocked(self, idx);
649 if (LIKELY(slot_addr != nullptr)) {
Mathieu Chartier0651d412014-04-29 14:37:57 -0700650 *bytes_allocated = bracket_size;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700651 *usable_size = bracket_size;
652 *bytes_tl_bulk_allocated = bracket_size;
Mathieu Chartier0651d412014-04-29 14:37:57 -0700653 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700654 // Caller verifies that it is all 0.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700655 return slot_addr;
656}
657
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700658void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated,
659 size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
660 DCHECK(bytes_allocated != nullptr);
661 DCHECK(usable_size != nullptr);
662 DCHECK(bytes_tl_bulk_allocated != nullptr);
Ian Rogers5d057052014-03-12 14:32:27 -0700663 DCHECK_LE(size, kLargeSizeThreshold);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700664 size_t bracket_size;
665 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700666 void* slot_addr;
Mathieu Chartier0651d412014-04-29 14:37:57 -0700667 if (LIKELY(idx < kNumThreadLocalSizeBrackets)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700668 // Use a thread-local run.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700669 Run* thread_local_run = reinterpret_cast<Run*>(self->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700670 // Allow invalid since this will always fail the allocation.
Mathieu Chartier4fd20502014-04-28 09:35:55 -0700671 if (kIsDebugBuild) {
672 // Need the lock to prevent race conditions.
673 MutexLock mu(self, *size_bracket_locks_[idx]);
674 CHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
675 CHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
676 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700677 DCHECK(thread_local_run != nullptr);
678 DCHECK(thread_local_run->IsThreadLocal() || thread_local_run == dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700679 slot_addr = thread_local_run->AllocSlot();
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700680 // The allocation must fail if the run is invalid.
681 DCHECK(thread_local_run != dedicated_full_run_ || slot_addr == nullptr)
682 << "allocated from an invalid run";
683 if (UNLIKELY(slot_addr == nullptr)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700684 // The run got full. Try to free slots.
685 DCHECK(thread_local_run->IsFull());
686 MutexLock mu(self, *size_bracket_locks_[idx]);
687 bool is_all_free_after_merge;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700688 // This is safe to do for the dedicated_full_run_ since the bitmaps are empty.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700689 if (thread_local_run->MergeThreadLocalFreeListToFreeList(&is_all_free_after_merge)) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700690 DCHECK_NE(thread_local_run, dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700691 // Some slot got freed. Keep it.
692 DCHECK(!thread_local_run->IsFull());
693 DCHECK_EQ(is_all_free_after_merge, thread_local_run->IsAllFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700694 } else {
695 // No slots got freed. Try to refill the thread-local run.
696 DCHECK(thread_local_run->IsFull());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700697 if (thread_local_run != dedicated_full_run_) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700698 thread_local_run->SetIsThreadLocal(false);
699 if (kIsDebugBuild) {
700 full_runs_[idx].insert(thread_local_run);
701 if (kTraceRosAlloc) {
702 LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex
703 << reinterpret_cast<intptr_t>(thread_local_run)
704 << " into full_runs_[" << std::dec << idx << "]";
705 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700706 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700707 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
708 DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700709 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700710
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700711 thread_local_run = RefillRun(self, idx);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700712 if (UNLIKELY(thread_local_run == nullptr)) {
713 self->SetRosAllocRun(idx, dedicated_full_run_);
714 return nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700715 }
716 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
717 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700718 thread_local_run->SetIsThreadLocal(true);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700719 self->SetRosAllocRun(idx, thread_local_run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700720 DCHECK(!thread_local_run->IsFull());
721 }
Mathieu Chartier0651d412014-04-29 14:37:57 -0700722 DCHECK(thread_local_run != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700723 DCHECK(!thread_local_run->IsFull());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700724 DCHECK(thread_local_run->IsThreadLocal());
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700725 // Account for all the free slots in the new or refreshed thread local run.
726 *bytes_tl_bulk_allocated = thread_local_run->NumberOfFreeSlots() * bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700727 slot_addr = thread_local_run->AllocSlot();
728 // Must succeed now with a new run.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700729 DCHECK(slot_addr != nullptr);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700730 } else {
731 // The slot is already counted. Leave it as is.
732 *bytes_tl_bulk_allocated = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700733 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700734 DCHECK(slot_addr != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700735 if (kTraceRosAlloc) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700736 LOG(INFO) << "RosAlloc::AllocFromRun() thread-local : 0x" << std::hex
737 << reinterpret_cast<intptr_t>(slot_addr)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700738 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
739 << "(" << std::dec << (bracket_size) << ")";
740 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700741 *bytes_allocated = bracket_size;
742 *usable_size = bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700743 } else {
744 // Use the (shared) current run.
745 MutexLock mu(self, *size_bracket_locks_[idx]);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700746 slot_addr = AllocFromCurrentRunUnlocked(self, idx);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700747 if (kTraceRosAlloc) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700748 LOG(INFO) << "RosAlloc::AllocFromRun() : 0x" << std::hex
749 << reinterpret_cast<intptr_t>(slot_addr)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700750 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
751 << "(" << std::dec << (bracket_size) << ")";
752 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700753 if (LIKELY(slot_addr != nullptr)) {
754 *bytes_allocated = bracket_size;
755 *usable_size = bracket_size;
756 *bytes_tl_bulk_allocated = bracket_size;
757 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700758 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700759 // Caller verifies that it is all 0.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700760 return slot_addr;
761}
762
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700763size_t RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
Ian Rogers5d057052014-03-12 14:32:27 -0700764 DCHECK_EQ(run->magic_num_, kMagicNum);
765 DCHECK_LT(run, ptr);
766 DCHECK_LT(ptr, run->End());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700767 const size_t idx = run->size_bracket_idx_;
768 const size_t bracket_size = bracketSizes[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700769 bool run_was_full = false;
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800770 MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700771 if (kIsDebugBuild) {
772 run_was_full = run->IsFull();
773 }
774 if (kTraceRosAlloc) {
775 LOG(INFO) << "RosAlloc::FreeFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr);
776 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700777 if (LIKELY(run->IsThreadLocal())) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700778 // It's a thread-local run. Just mark the thread-local free bit map and return.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700779 DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700780 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
781 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700782 run->AddToThreadLocalFreeList(ptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700783 if (kTraceRosAlloc) {
784 LOG(INFO) << "RosAlloc::FreeFromRun() : Freed a slot in a thread local run 0x" << std::hex
785 << reinterpret_cast<intptr_t>(run);
786 }
787 // A thread local run will be kept as a thread local even if it's become all free.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700788 return bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700789 }
790 // Free the slot in the run.
791 run->FreeSlot(ptr);
Mathieu Chartier58553c72014-09-16 16:25:55 -0700792 auto* non_full_runs = &non_full_runs_[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700793 if (run->IsAllFree()) {
794 // It has just become completely free. Free the pages of this run.
795 std::set<Run*>::iterator pos = non_full_runs->find(run);
796 if (pos != non_full_runs->end()) {
797 non_full_runs->erase(pos);
798 if (kTraceRosAlloc) {
799 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
800 << reinterpret_cast<intptr_t>(run) << " from non_full_runs_";
801 }
802 }
803 if (run == current_runs_[idx]) {
Mathieu Chartier0651d412014-04-29 14:37:57 -0700804 current_runs_[idx] = dedicated_full_run_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700805 }
806 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
807 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700808 run->ZeroHeaderAndSlotHeaders();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700809 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800810 MutexLock lock_mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700811 FreePages(self, run, true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700812 }
813 } else {
814 // It is not completely free. If it wasn't the current run or
815 // already in the non-full run set (i.e., it was full) insert it
816 // into the non-full run set.
817 if (run != current_runs_[idx]) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700818 auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : nullptr;
Mathieu Chartier58553c72014-09-16 16:25:55 -0700819 auto pos = non_full_runs->find(run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700820 if (pos == non_full_runs->end()) {
821 DCHECK(run_was_full);
822 DCHECK(full_runs->find(run) != full_runs->end());
823 if (kIsDebugBuild) {
824 full_runs->erase(run);
825 if (kTraceRosAlloc) {
826 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
827 << reinterpret_cast<intptr_t>(run) << " from full_runs_";
828 }
829 }
830 non_full_runs->insert(run);
831 DCHECK(!run->IsFull());
832 if (kTraceRosAlloc) {
833 LOG(INFO) << "RosAlloc::FreeFromRun() : Inserted run 0x" << std::hex
834 << reinterpret_cast<intptr_t>(run)
835 << " into non_full_runs_[" << std::dec << idx << "]";
836 }
837 }
838 }
839 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700840 return bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700841}
842
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700843template<bool kUseTail>
844std::string RosAlloc::Run::FreeListToStr(SlotFreeList<kUseTail>* free_list) {
845 std::string free_list_str;
846 const uint8_t idx = size_bracket_idx_;
847 const size_t bracket_size = bracketSizes[idx];
848 for (Slot* slot = free_list->Head(); slot != nullptr; slot = slot->Next()) {
849 bool is_last = slot->Next() == nullptr;
850 uintptr_t slot_offset = reinterpret_cast<uintptr_t>(slot) -
851 reinterpret_cast<uintptr_t>(FirstSlot());
852 DCHECK_EQ(slot_offset % bracket_size, 0U);
853 uintptr_t slot_idx = slot_offset / bracket_size;
854 if (!is_last) {
855 free_list_str.append(StringPrintf("%u-", static_cast<uint32_t>(slot_idx)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700856 } else {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700857 free_list_str.append(StringPrintf("%u", static_cast<uint32_t>(slot_idx)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700858 }
859 }
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700860 return free_list_str;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800861}
862
863std::string RosAlloc::Run::Dump() {
864 size_t idx = size_bracket_idx_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800865 std::ostringstream stream;
866 stream << "RosAlloc Run = " << reinterpret_cast<void*>(this)
867 << "{ magic_num=" << static_cast<int>(magic_num_)
868 << " size_bracket_idx=" << idx
869 << " is_thread_local=" << static_cast<int>(is_thread_local_)
870 << " to_be_bulk_freed=" << static_cast<int>(to_be_bulk_freed_)
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700871 << " free_list=" << FreeListToStr(&free_list_)
872 << " bulk_free_list=" << FreeListToStr(&bulk_free_list_)
873 << " thread_local_list=" << FreeListToStr(&thread_local_free_list_)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800874 << " }" << std::endl;
875 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700876}
877
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700878void RosAlloc::Run::FreeSlot(void* ptr) {
879 DCHECK(!IsThreadLocal());
Ian Rogers13735952014-10-08 12:43:28 -0700880 const uint8_t idx = size_bracket_idx_;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700881 const size_t bracket_size = bracketSizes[idx];
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700882 Slot* slot = ToSlot(ptr);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700883 // Zero out the memory.
884 // TODO: Investigate alternate memset since ptr is guaranteed to be aligned to 16.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700885 memset(slot, 0, bracket_size);
886 free_list_.Add(slot);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700887 if (kTraceRosAlloc) {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700888 LOG(INFO) << "RosAlloc::Run::FreeSlot() : " << slot
889 << ", bracket_size=" << std::dec << bracket_size << ", slot_idx=" << SlotIndex(slot);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700890 }
891}
892
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700893inline bool RosAlloc::Run::MergeThreadLocalFreeListToFreeList(bool* is_all_free_after_out) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700894 DCHECK(IsThreadLocal());
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700895 // Merge the thread local free list into the free list and clear the thread local free list.
Ian Rogers13735952014-10-08 12:43:28 -0700896 const uint8_t idx = size_bracket_idx_;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700897 bool thread_local_free_list_size = thread_local_free_list_.Size();
898 const size_t size_before = free_list_.Size();
899 free_list_.Merge(&thread_local_free_list_);
900 const size_t size_after = free_list_.Size();
901 DCHECK_EQ(size_before < size_after, thread_local_free_list_size > 0);
902 DCHECK_LE(size_before, size_after);
903 *is_all_free_after_out = free_list_.Size() == numOfSlots[idx];
904 // Return true at least one slot was added to the free list.
905 return size_before < size_after;
906}
907
908inline void RosAlloc::Run::MergeBulkFreeListToFreeList() {
909 DCHECK(!IsThreadLocal());
910 // Merge the bulk free list into the free list and clear the bulk free list.
911 free_list_.Merge(&bulk_free_list_);
912}
913
914inline void RosAlloc::Run::MergeBulkFreeListToThreadLocalFreeList() {
915 DCHECK(IsThreadLocal());
916 // Merge the bulk free list into the thread local free list and clear the bulk free list.
917 thread_local_free_list_.Merge(&bulk_free_list_);
918}
919
920inline void RosAlloc::Run::AddToThreadLocalFreeList(void* ptr) {
921 DCHECK(IsThreadLocal());
922 AddToFreeListShared(ptr, &thread_local_free_list_, __FUNCTION__);
923}
924
925inline size_t RosAlloc::Run::AddToBulkFreeList(void* ptr) {
926 return AddToFreeListShared(ptr, &bulk_free_list_, __FUNCTION__);
927}
928
929inline size_t RosAlloc::Run::AddToFreeListShared(void* ptr,
930 SlotFreeList<true>* free_list,
931 const char* caller_name) {
932 const uint8_t idx = size_bracket_idx_;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700933 const size_t bracket_size = bracketSizes[idx];
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700934 Slot* slot = ToSlot(ptr);
935 memset(slot, 0, bracket_size);
936 free_list->Add(slot);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700937 if (kTraceRosAlloc) {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700938 LOG(INFO) << "RosAlloc::Run::" << caller_name << "() : " << ptr
939 << ", bracket_size=" << std::dec << bracket_size << ", slot_idx=" << SlotIndex(slot);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700940 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700941 return bracket_size;
942}
943
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700944inline void RosAlloc::Run::ZeroHeaderAndSlotHeaders() {
945 DCHECK(IsAllFree());
Ian Rogers13735952014-10-08 12:43:28 -0700946 const uint8_t idx = size_bracket_idx_;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700947 // Zero the slot header (next pointers).
948 for (Slot* slot = free_list_.Head(); slot != nullptr; ) {
949 Slot* next_slot = slot->Next();
950 slot->Clear();
951 slot = next_slot;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700952 }
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700953 // Zero the header.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700954 memset(this, 0, headerSizes[idx]);
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700955 // Check that the entire run is all zero.
956 if (kIsDebugBuild) {
957 const size_t size = numOfPages[idx] * kPageSize;
958 const uintptr_t* word_ptr = reinterpret_cast<uintptr_t*>(this);
959 for (size_t i = 0; i < size / sizeof(uintptr_t); ++i) {
960 CHECK_EQ(word_ptr[i], 0U) << "words don't match at index " << i;
961 }
962 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700963}
964
965inline void RosAlloc::Run::ZeroData() {
Ian Rogers13735952014-10-08 12:43:28 -0700966 const uint8_t idx = size_bracket_idx_;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700967 uint8_t* slot_begin = reinterpret_cast<uint8_t*>(FirstSlot());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700968 memset(slot_begin, 0, numOfSlots[idx] * bracketSizes[idx]);
969}
970
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700971void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
972 void* arg) {
973 size_t idx = size_bracket_idx_;
Ian Rogers13735952014-10-08 12:43:28 -0700974 uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700975 size_t num_slots = numOfSlots[idx];
976 size_t bracket_size = IndexToBracketSize(idx);
Mathieu Chartierc38c5ea2015-02-04 17:46:29 -0800977 DCHECK_EQ(slot_base + num_slots * bracket_size,
978 reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize);
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700979 // Free slots are on the free list and the allocated/used slots are not. We traverse the free list
980 // to find out and record which slots are free in the is_free array.
981 std::unique_ptr<bool[]> is_free(new bool[num_slots]()); // zero initialized
982 for (Slot* slot = free_list_.Head(); slot != nullptr; slot = slot->Next()) {
983 size_t slot_idx = SlotIndex(slot);
984 DCHECK_LT(slot_idx, num_slots);
985 is_free[slot_idx] = true;
986 }
987 if (IsThreadLocal()) {
988 for (Slot* slot = thread_local_free_list_.Head(); slot != nullptr; slot = slot->Next()) {
989 size_t slot_idx = SlotIndex(slot);
990 DCHECK_LT(slot_idx, num_slots);
991 is_free[slot_idx] = true;
Mathieu Chartierc38c5ea2015-02-04 17:46:29 -0800992 }
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700993 }
994 for (size_t slot_idx = 0; slot_idx < num_slots; ++slot_idx) {
995 uint8_t* slot_addr = slot_base + slot_idx * bracket_size;
996 if (!is_free[slot_idx]) {
997 handler(slot_addr, slot_addr + bracket_size, bracket_size, arg);
998 } else {
999 handler(slot_addr, slot_addr + bracket_size, 0, arg);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001000 }
1001 }
1002}
1003
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001004// If true, read the page map entries in BulkFree() without using the
1005// lock for better performance, assuming that the existence of an
1006// allocated chunk/pointer being freed in BulkFree() guarantees that
1007// the page map entry won't change. Disabled for now.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001008static constexpr bool kReadPageMapEntryWithoutLockInBulkFree = true;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001009
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001010size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
1011 size_t freed_bytes = 0;
Ian Rogerscf7f1912014-10-22 22:06:39 -07001012 if ((false)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001013 // Used only to test Free() as GC uses only BulkFree().
1014 for (size_t i = 0; i < num_ptrs; ++i) {
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001015 freed_bytes += FreeInternal(self, ptrs[i]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001016 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001017 return freed_bytes;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001018 }
1019
1020 WriterMutexLock wmu(self, bulk_free_lock_);
1021
1022 // First mark slots to free in the bulk free bit map without locking the
Ian Rogers5fcfa7d2014-05-15 11:43:06 -07001023 // size bracket locks. On host, unordered_set is faster than vector + flag.
Andreas Gampec60e1b72015-07-30 08:57:50 -07001024#ifdef __ANDROID__
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001025 std::vector<Run*> runs;
1026#else
Ian Rogers700a4022014-05-19 16:49:03 -07001027 std::unordered_set<Run*, hash_run, eq_run> runs;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001028#endif
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001029 for (size_t i = 0; i < num_ptrs; i++) {
1030 void* ptr = ptrs[i];
Ian Rogers5d057052014-03-12 14:32:27 -07001031 DCHECK_LE(base_, ptr);
1032 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001033 size_t pm_idx = RoundDownToPageMapIndex(ptr);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001034 Run* run = nullptr;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001035 if (kReadPageMapEntryWithoutLockInBulkFree) {
1036 // Read the page map entries without locking the lock.
Ian Rogers13735952014-10-08 12:43:28 -07001037 uint8_t page_map_entry = page_map_[pm_idx];
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001038 if (kTraceRosAlloc) {
1039 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1040 << std::dec << pm_idx
1041 << ", page_map_entry=" << static_cast<int>(page_map_entry);
1042 }
1043 if (LIKELY(page_map_entry == kPageMapRun)) {
1044 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001045 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1046 size_t pi = pm_idx;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001047 // Find the beginning of the run.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001048 do {
1049 --pi;
Ian Rogers5d057052014-03-12 14:32:27 -07001050 DCHECK_LT(pi, capacity_ / kPageSize);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001051 } while (page_map_[pi] != kPageMapRun);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001052 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001053 } else if (page_map_entry == kPageMapLargeObject) {
1054 MutexLock mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001055 freed_bytes += FreePages(self, ptr, false);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001056 continue;
1057 } else {
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001058 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_entry);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001059 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001060 } else {
1061 // Read the page map entries with a lock.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001062 MutexLock mu(self, lock_);
1063 DCHECK_LT(pm_idx, page_map_size_);
Ian Rogers13735952014-10-08 12:43:28 -07001064 uint8_t page_map_entry = page_map_[pm_idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001065 if (kTraceRosAlloc) {
1066 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1067 << std::dec << pm_idx
1068 << ", page_map_entry=" << static_cast<int>(page_map_entry);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001069 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001070 if (LIKELY(page_map_entry == kPageMapRun)) {
1071 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1072 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1073 size_t pi = pm_idx;
1074 // Find the beginning of the run.
1075 do {
1076 --pi;
1077 DCHECK_LT(pi, capacity_ / kPageSize);
1078 } while (page_map_[pi] != kPageMapRun);
1079 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
1080 } else if (page_map_entry == kPageMapLargeObject) {
1081 freed_bytes += FreePages(self, ptr, false);
1082 continue;
1083 } else {
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001084 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_entry);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001085 }
1086 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001087 DCHECK(run != nullptr);
1088 DCHECK_EQ(run->magic_num_, kMagicNum);
1089 // Set the bit in the bulk free bit map.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001090 freed_bytes += run->AddToBulkFreeList(ptr);
Andreas Gampec60e1b72015-07-30 08:57:50 -07001091#ifdef __ANDROID__
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001092 if (!run->to_be_bulk_freed_) {
1093 run->to_be_bulk_freed_ = true;
1094 runs.push_back(run);
1095 }
1096#else
1097 runs.insert(run);
1098#endif
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001099 }
1100
1101 // Now, iterate over the affected runs and update the alloc bit map
1102 // based on the bulk free bit map (for non-thread-local runs) and
1103 // union the bulk free bit map into the thread-local free bit map
1104 // (for thread-local runs.)
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001105 for (Run* run : runs) {
Andreas Gampec60e1b72015-07-30 08:57:50 -07001106#ifdef __ANDROID__
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001107 DCHECK(run->to_be_bulk_freed_);
1108 run->to_be_bulk_freed_ = false;
1109#endif
1110 size_t idx = run->size_bracket_idx_;
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001111 MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001112 if (run->IsThreadLocal()) {
Mathieu Chartier0651d412014-04-29 14:37:57 -07001113 DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001114 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
1115 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001116 run->MergeBulkFreeListToThreadLocalFreeList();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001117 if (kTraceRosAlloc) {
1118 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a thread local run 0x"
1119 << std::hex << reinterpret_cast<intptr_t>(run);
1120 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001121 DCHECK(run->IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001122 // A thread local run will be kept as a thread local even if
1123 // it's become all free.
1124 } else {
1125 bool run_was_full = run->IsFull();
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001126 run->MergeBulkFreeListToFreeList();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001127 if (kTraceRosAlloc) {
1128 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a run 0x" << std::hex
1129 << reinterpret_cast<intptr_t>(run);
1130 }
1131 // Check if the run should be moved to non_full_runs_ or
1132 // free_page_runs_.
Mathieu Chartier58553c72014-09-16 16:25:55 -07001133 auto* non_full_runs = &non_full_runs_[idx];
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001134 auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001135 if (run->IsAllFree()) {
1136 // It has just become completely free. Free the pages of the
1137 // run.
1138 bool run_was_current = run == current_runs_[idx];
1139 if (run_was_current) {
1140 DCHECK(full_runs->find(run) == full_runs->end());
1141 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1142 // If it was a current run, reuse it.
1143 } else if (run_was_full) {
1144 // If it was full, remove it from the full run set (debug
1145 // only.)
1146 if (kIsDebugBuild) {
Ian Rogers700a4022014-05-19 16:49:03 -07001147 std::unordered_set<Run*, hash_run, eq_run>::iterator pos = full_runs->find(run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001148 DCHECK(pos != full_runs->end());
1149 full_runs->erase(pos);
1150 if (kTraceRosAlloc) {
1151 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1152 << reinterpret_cast<intptr_t>(run)
1153 << " from full_runs_";
1154 }
1155 DCHECK(full_runs->find(run) == full_runs->end());
1156 }
1157 } else {
1158 // If it was in a non full run set, remove it from the set.
1159 DCHECK(full_runs->find(run) == full_runs->end());
1160 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1161 non_full_runs->erase(run);
1162 if (kTraceRosAlloc) {
1163 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1164 << reinterpret_cast<intptr_t>(run)
1165 << " from non_full_runs_";
1166 }
1167 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1168 }
1169 if (!run_was_current) {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001170 run->ZeroHeaderAndSlotHeaders();
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001171 MutexLock lock_mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001172 FreePages(self, run, true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001173 }
1174 } else {
1175 // It is not completely free. If it wasn't the current run or
1176 // already in the non-full run set (i.e., it was full) insert
1177 // it into the non-full run set.
1178 if (run == current_runs_[idx]) {
1179 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1180 DCHECK(full_runs->find(run) == full_runs->end());
1181 // If it was a current run, keep it.
1182 } else if (run_was_full) {
1183 // If it was full, remove it from the full run set (debug
1184 // only) and insert into the non-full run set.
1185 DCHECK(full_runs->find(run) != full_runs->end());
1186 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1187 if (kIsDebugBuild) {
1188 full_runs->erase(run);
1189 if (kTraceRosAlloc) {
1190 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1191 << reinterpret_cast<intptr_t>(run)
1192 << " from full_runs_";
1193 }
1194 }
1195 non_full_runs->insert(run);
1196 if (kTraceRosAlloc) {
1197 LOG(INFO) << "RosAlloc::BulkFree() : Inserted run 0x" << std::hex
1198 << reinterpret_cast<intptr_t>(run)
1199 << " into non_full_runs_[" << std::dec << idx;
1200 }
1201 } else {
1202 // If it was not full, so leave it in the non full run set.
1203 DCHECK(full_runs->find(run) == full_runs->end());
1204 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1205 }
1206 }
1207 }
1208 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001209 return freed_bytes;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001210}
1211
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001212std::string RosAlloc::DumpPageMap() {
1213 std::ostringstream stream;
1214 stream << "RosAlloc PageMap: " << std::endl;
1215 lock_.AssertHeld(Thread::Current());
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001216 size_t end = page_map_size_;
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001217 FreePageRun* curr_fpr = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001218 size_t curr_fpr_size = 0;
1219 size_t remaining_curr_fpr_size = 0;
1220 size_t num_running_empty_pages = 0;
1221 for (size_t i = 0; i < end; ++i) {
Ian Rogers13735952014-10-08 12:43:28 -07001222 uint8_t pm = page_map_[i];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001223 switch (pm) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001224 case kPageMapReleased:
1225 // Fall-through.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001226 case kPageMapEmpty: {
1227 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1228 if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
1229 // Encountered a fresh free page run.
1230 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1231 DCHECK(fpr->IsFree());
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001232 DCHECK(curr_fpr == nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001233 DCHECK_EQ(curr_fpr_size, static_cast<size_t>(0));
1234 curr_fpr = fpr;
1235 curr_fpr_size = fpr->ByteSize(this);
1236 DCHECK_EQ(curr_fpr_size % kPageSize, static_cast<size_t>(0));
1237 remaining_curr_fpr_size = curr_fpr_size - kPageSize;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001238 stream << "[" << i << "]=" << (pm == kPageMapReleased ? "Released" : "Empty")
1239 << " (FPR start) fpr_size=" << curr_fpr_size
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001240 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001241 if (remaining_curr_fpr_size == 0) {
1242 // Reset at the end of the current free page run.
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001243 curr_fpr = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001244 curr_fpr_size = 0;
1245 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001246 stream << "curr_fpr=0x" << std::hex << reinterpret_cast<intptr_t>(curr_fpr) << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001247 DCHECK_EQ(num_running_empty_pages, static_cast<size_t>(0));
1248 } else {
1249 // Still part of the current free page run.
1250 DCHECK_NE(num_running_empty_pages, static_cast<size_t>(0));
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001251 DCHECK(curr_fpr != nullptr && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001252 DCHECK_EQ(remaining_curr_fpr_size % kPageSize, static_cast<size_t>(0));
1253 DCHECK_GE(remaining_curr_fpr_size, static_cast<size_t>(kPageSize));
1254 remaining_curr_fpr_size -= kPageSize;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001255 stream << "[" << i << "]=Empty (FPR part)"
1256 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001257 if (remaining_curr_fpr_size == 0) {
1258 // Reset at the end of the current free page run.
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001259 curr_fpr = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001260 curr_fpr_size = 0;
1261 }
1262 }
1263 num_running_empty_pages++;
1264 break;
1265 }
1266 case kPageMapLargeObject: {
1267 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1268 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001269 stream << "[" << i << "]=Large (start)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001270 break;
1271 }
1272 case kPageMapLargeObjectPart:
1273 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1274 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001275 stream << "[" << i << "]=Large (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001276 break;
1277 case kPageMapRun: {
1278 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1279 num_running_empty_pages = 0;
1280 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1281 size_t idx = run->size_bracket_idx_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001282 stream << "[" << i << "]=Run (start)"
1283 << " idx=" << idx
1284 << " numOfPages=" << numOfPages[idx]
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001285 << " is_thread_local=" << run->is_thread_local_
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001286 << " is_all_free=" << (run->IsAllFree() ? 1 : 0)
1287 << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001288 break;
1289 }
1290 case kPageMapRunPart:
1291 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1292 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001293 stream << "[" << i << "]=Run (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001294 break;
1295 default:
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001296 stream << "[" << i << "]=Unrecognizable page map type: " << pm;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001297 break;
1298 }
1299 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001300 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001301}
1302
Andreas Gamped7576322014-10-24 22:13:45 -07001303size_t RosAlloc::UsableSize(const void* ptr) {
Ian Rogers5d057052014-03-12 14:32:27 -07001304 DCHECK_LE(base_, ptr);
1305 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001306 size_t pm_idx = RoundDownToPageMapIndex(ptr);
1307 MutexLock mu(Thread::Current(), lock_);
1308 switch (page_map_[pm_idx]) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001309 case kPageMapReleased:
1310 // Fall-through.
1311 case kPageMapEmpty:
1312 LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << ": pm_idx=" << pm_idx << ", ptr="
1313 << std::hex << reinterpret_cast<intptr_t>(ptr);
1314 break;
1315 case kPageMapLargeObject: {
1316 size_t num_pages = 1;
1317 size_t idx = pm_idx + 1;
1318 size_t end = page_map_size_;
1319 while (idx < end && page_map_[idx] == kPageMapLargeObjectPart) {
1320 num_pages++;
1321 idx++;
1322 }
1323 return num_pages * kPageSize;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001324 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001325 case kPageMapLargeObjectPart:
1326 LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << ": pm_idx=" << pm_idx << ", ptr="
1327 << std::hex << reinterpret_cast<intptr_t>(ptr);
1328 break;
1329 case kPageMapRun:
1330 case kPageMapRunPart: {
1331 // Find the beginning of the run.
1332 while (page_map_[pm_idx] != kPageMapRun) {
1333 pm_idx--;
1334 DCHECK_LT(pm_idx, capacity_ / kPageSize);
1335 }
1336 DCHECK_EQ(page_map_[pm_idx], kPageMapRun);
1337 Run* run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1338 DCHECK_EQ(run->magic_num_, kMagicNum);
1339 size_t idx = run->size_bracket_idx_;
Andreas Gamped7576322014-10-24 22:13:45 -07001340 size_t offset_from_slot_base = reinterpret_cast<const uint8_t*>(ptr)
Ian Rogers13735952014-10-08 12:43:28 -07001341 - (reinterpret_cast<uint8_t*>(run) + headerSizes[idx]);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001342 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
1343 return IndexToBracketSize(idx);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001344 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001345 default: {
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001346 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001347 break;
1348 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001349 }
1350 return 0;
1351}
1352
1353bool RosAlloc::Trim() {
1354 MutexLock mu(Thread::Current(), lock_);
1355 FreePageRun* last_free_page_run;
1356 DCHECK_EQ(footprint_ % kPageSize, static_cast<size_t>(0));
1357 auto it = free_page_runs_.rbegin();
1358 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
1359 // Remove the last free page run, if any.
1360 DCHECK(last_free_page_run->IsFree());
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001361 DCHECK(IsFreePage(ToPageMapIndex(last_free_page_run)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001362 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
1363 DCHECK_EQ(last_free_page_run->End(this), base_ + footprint_);
1364 free_page_runs_.erase(last_free_page_run);
1365 size_t decrement = last_free_page_run->ByteSize(this);
1366 size_t new_footprint = footprint_ - decrement;
1367 DCHECK_EQ(new_footprint % kPageSize, static_cast<size_t>(0));
1368 size_t new_num_of_pages = new_footprint / kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001369 DCHECK_GE(page_map_size_, new_num_of_pages);
1370 // Zero out the tail of the page map.
Ian Rogers13735952014-10-08 12:43:28 -07001371 uint8_t* zero_begin = const_cast<uint8_t*>(page_map_) + new_num_of_pages;
1372 uint8_t* madvise_begin = AlignUp(zero_begin, kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001373 DCHECK_LE(madvise_begin, page_map_mem_map_->End());
1374 size_t madvise_size = page_map_mem_map_->End() - madvise_begin;
1375 if (madvise_size > 0) {
1376 DCHECK_ALIGNED(madvise_begin, kPageSize);
1377 DCHECK_EQ(RoundUp(madvise_size, kPageSize), madvise_size);
Ian Rogersc5f17732014-06-05 20:48:42 -07001378 if (!kMadviseZeroes) {
1379 memset(madvise_begin, 0, madvise_size);
1380 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001381 CHECK_EQ(madvise(madvise_begin, madvise_size, MADV_DONTNEED), 0);
1382 }
1383 if (madvise_begin - zero_begin) {
1384 memset(zero_begin, 0, madvise_begin - zero_begin);
1385 }
1386 page_map_size_ = new_num_of_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001387 free_page_run_size_map_.resize(new_num_of_pages);
1388 DCHECK_EQ(free_page_run_size_map_.size(), new_num_of_pages);
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001389 ArtRosAllocMoreCore(this, -(static_cast<intptr_t>(decrement)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001390 if (kTraceRosAlloc) {
1391 LOG(INFO) << "RosAlloc::Trim() : decreased the footprint from "
1392 << footprint_ << " to " << new_footprint;
1393 }
1394 DCHECK_LT(new_footprint, footprint_);
1395 DCHECK_LT(new_footprint, capacity_);
1396 footprint_ = new_footprint;
1397 return true;
1398 }
1399 return false;
1400}
1401
1402void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1403 void* arg) {
1404 // Note: no need to use this to release pages as we already do so in FreePages().
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001405 if (handler == nullptr) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001406 return;
1407 }
1408 MutexLock mu(Thread::Current(), lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001409 size_t pm_end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001410 size_t i = 0;
1411 while (i < pm_end) {
Ian Rogers13735952014-10-08 12:43:28 -07001412 uint8_t pm = page_map_[i];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001413 switch (pm) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001414 case kPageMapReleased:
1415 // Fall-through.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001416 case kPageMapEmpty: {
1417 // The start of a free page run.
1418 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1419 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
1420 size_t fpr_size = fpr->ByteSize(this);
Roland Levillain14d90572015-07-16 10:52:26 +01001421 DCHECK_ALIGNED(fpr_size, kPageSize);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001422 void* start = fpr;
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -08001423 if (kIsDebugBuild) {
1424 // In the debug build, the first page of a free page run
1425 // contains a magic number for debugging. Exclude it.
Ian Rogers13735952014-10-08 12:43:28 -07001426 start = reinterpret_cast<uint8_t*>(fpr) + kPageSize;
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -08001427 }
Ian Rogers13735952014-10-08 12:43:28 -07001428 void* end = reinterpret_cast<uint8_t*>(fpr) + fpr_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001429 handler(start, end, 0, arg);
1430 size_t num_pages = fpr_size / kPageSize;
1431 if (kIsDebugBuild) {
1432 for (size_t j = i + 1; j < i + num_pages; ++j) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001433 DCHECK(IsFreePage(j));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001434 }
1435 }
1436 i += fpr_size / kPageSize;
1437 DCHECK_LE(i, pm_end);
1438 break;
1439 }
1440 case kPageMapLargeObject: {
1441 // The start of a large object.
1442 size_t num_pages = 1;
1443 size_t idx = i + 1;
1444 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1445 num_pages++;
1446 idx++;
1447 }
1448 void* start = base_ + i * kPageSize;
1449 void* end = base_ + (i + num_pages) * kPageSize;
1450 size_t used_bytes = num_pages * kPageSize;
1451 handler(start, end, used_bytes, arg);
1452 if (kIsDebugBuild) {
1453 for (size_t j = i + 1; j < i + num_pages; ++j) {
1454 DCHECK_EQ(page_map_[j], kPageMapLargeObjectPart);
1455 }
1456 }
1457 i += num_pages;
1458 DCHECK_LE(i, pm_end);
1459 break;
1460 }
1461 case kPageMapLargeObjectPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001462 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001463 break;
1464 case kPageMapRun: {
1465 // The start of a run.
1466 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001467 DCHECK_EQ(run->magic_num_, kMagicNum);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001468 // The dedicated full run doesn't contain any real allocations, don't visit the slots in
1469 // there.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001470 run->InspectAllSlots(handler, arg);
1471 size_t num_pages = numOfPages[run->size_bracket_idx_];
1472 if (kIsDebugBuild) {
1473 for (size_t j = i + 1; j < i + num_pages; ++j) {
1474 DCHECK_EQ(page_map_[j], kPageMapRunPart);
1475 }
1476 }
1477 i += num_pages;
1478 DCHECK_LE(i, pm_end);
1479 break;
1480 }
1481 case kPageMapRunPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001482 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001483 break;
1484 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001485 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001486 break;
1487 }
1488 }
1489}
1490
1491size_t RosAlloc::Footprint() {
1492 MutexLock mu(Thread::Current(), lock_);
1493 return footprint_;
1494}
1495
1496size_t RosAlloc::FootprintLimit() {
1497 MutexLock mu(Thread::Current(), lock_);
1498 return capacity_;
1499}
1500
1501void RosAlloc::SetFootprintLimit(size_t new_capacity) {
1502 MutexLock mu(Thread::Current(), lock_);
1503 DCHECK_EQ(RoundUp(new_capacity, kPageSize), new_capacity);
1504 // Only growing is supported here. But Trim() is supported.
1505 if (capacity_ < new_capacity) {
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001506 CHECK_LE(new_capacity, max_capacity_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001507 capacity_ = new_capacity;
1508 VLOG(heap) << "new capacity=" << capacity_;
1509 }
1510}
1511
Lei Li57846212015-06-11 17:50:20 +08001512// Below may be called by mutator itself just before thread termination.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001513size_t RosAlloc::RevokeThreadLocalRuns(Thread* thread) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001514 Thread* self = Thread::Current();
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001515 size_t free_bytes = 0U;
Mathieu Chartier0651d412014-04-29 14:37:57 -07001516 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; idx++) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001517 MutexLock mu(self, *size_bracket_locks_[idx]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001518 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001519 CHECK(thread_local_run != nullptr);
1520 // Invalid means already revoked.
1521 DCHECK(thread_local_run->IsThreadLocal());
1522 if (thread_local_run != dedicated_full_run_) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001523 // Note the thread local run may not be full here.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001524 thread->SetRosAllocRun(idx, dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001525 DCHECK_EQ(thread_local_run->magic_num_, kMagicNum);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001526 // Count the number of free slots left.
1527 size_t num_free_slots = thread_local_run->NumberOfFreeSlots();
1528 free_bytes += num_free_slots * bracketSizes[idx];
Lei Li57846212015-06-11 17:50:20 +08001529 // The above bracket index lock guards thread local free list to avoid race condition
1530 // with unioning bulk free list to thread local free list by GC thread in BulkFree.
1531 // If thread local run is true, GC thread will help update thread local free list
1532 // in BulkFree. And the latest thread local free list will be merged to free list
1533 // either when this thread local run is full or when revoking this run here. In this
1534 // case the free list wll be updated. If thread local run is false, GC thread will help
1535 // merge bulk free list in next BulkFree.
1536 // Thus no need to merge bulk free list to free list again here.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001537 bool dont_care;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001538 thread_local_run->MergeThreadLocalFreeListToFreeList(&dont_care);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001539 thread_local_run->SetIsThreadLocal(false);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001540 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
1541 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
Mathieu Chartier0651d412014-04-29 14:37:57 -07001542 RevokeRun(self, idx, thread_local_run);
1543 }
1544 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001545 return free_bytes;
Mathieu Chartier0651d412014-04-29 14:37:57 -07001546}
1547
1548void RosAlloc::RevokeRun(Thread* self, size_t idx, Run* run) {
1549 size_bracket_locks_[idx]->AssertHeld(self);
1550 DCHECK(run != dedicated_full_run_);
1551 if (run->IsFull()) {
1552 if (kIsDebugBuild) {
1553 full_runs_[idx].insert(run);
1554 DCHECK(full_runs_[idx].find(run) != full_runs_[idx].end());
1555 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001556 LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
Mathieu Chartier0651d412014-04-29 14:37:57 -07001557 << reinterpret_cast<intptr_t>(run)
1558 << " into full_runs_[" << std::dec << idx << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001559 }
1560 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001561 } else if (run->IsAllFree()) {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001562 run->ZeroHeaderAndSlotHeaders();
Mathieu Chartier0651d412014-04-29 14:37:57 -07001563 MutexLock mu(self, lock_);
1564 FreePages(self, run, true);
1565 } else {
1566 non_full_runs_[idx].insert(run);
1567 DCHECK(non_full_runs_[idx].find(run) != non_full_runs_[idx].end());
1568 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001569 LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
Mathieu Chartier0651d412014-04-29 14:37:57 -07001570 << reinterpret_cast<intptr_t>(run)
1571 << " into non_full_runs_[" << std::dec << idx << "]";
1572 }
1573 }
1574}
1575
1576void RosAlloc::RevokeThreadUnsafeCurrentRuns() {
1577 // Revoke the current runs which share the same idx as thread local runs.
1578 Thread* self = Thread::Current();
1579 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; ++idx) {
1580 MutexLock mu(self, *size_bracket_locks_[idx]);
1581 if (current_runs_[idx] != dedicated_full_run_) {
1582 RevokeRun(self, idx, current_runs_[idx]);
1583 current_runs_[idx] = dedicated_full_run_;
1584 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001585 }
1586}
1587
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001588size_t RosAlloc::RevokeAllThreadLocalRuns() {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001589 // This is called when a mutator thread won't allocate such as at
1590 // the Zygote creation time or during the GC pause.
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001591 MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
1592 MutexLock mu2(Thread::Current(), *Locks::thread_list_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001593 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001594 size_t free_bytes = 0U;
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001595 for (Thread* thread : thread_list) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001596 free_bytes += RevokeThreadLocalRuns(thread);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001597 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001598 RevokeThreadUnsafeCurrentRuns();
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001599 return free_bytes;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001600}
1601
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001602void RosAlloc::AssertThreadLocalRunsAreRevoked(Thread* thread) {
1603 if (kIsDebugBuild) {
1604 Thread* self = Thread::Current();
1605 // Avoid race conditions on the bulk free bit maps with BulkFree() (GC).
Mathieu Chartiera1c1c712014-06-23 17:53:09 -07001606 ReaderMutexLock wmu(self, bulk_free_lock_);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001607 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; idx++) {
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001608 MutexLock mu(self, *size_bracket_locks_[idx]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001609 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001610 DCHECK(thread_local_run == nullptr || thread_local_run == dedicated_full_run_);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001611 }
1612 }
1613}
1614
1615void RosAlloc::AssertAllThreadLocalRunsAreRevoked() {
1616 if (kIsDebugBuild) {
Mathieu Chartier0651d412014-04-29 14:37:57 -07001617 Thread* self = Thread::Current();
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001618 MutexLock shutdown_mu(self, *Locks::runtime_shutdown_lock_);
1619 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001620 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1621 for (Thread* t : thread_list) {
1622 AssertThreadLocalRunsAreRevoked(t);
1623 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001624 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; ++idx) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001625 MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001626 CHECK_EQ(current_runs_[idx], dedicated_full_run_);
1627 }
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001628 }
1629}
1630
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001631void RosAlloc::Initialize() {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001632 // bracketSizes.
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001633 static_assert(kNumRegularSizeBrackets == kNumOfSizeBrackets - 2,
1634 "There should be two non-regular brackets");
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001635 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001636 if (i < kNumThreadLocalSizeBrackets) {
1637 bracketSizes[i] = kThreadLocalBracketQuantumSize * (i + 1);
1638 } else if (i < kNumRegularSizeBrackets) {
1639 bracketSizes[i] = kBracketQuantumSize * (i - kNumThreadLocalSizeBrackets + 1) +
1640 (kThreadLocalBracketQuantumSize * kNumThreadLocalSizeBrackets);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001641 } else if (i == kNumOfSizeBrackets - 2) {
1642 bracketSizes[i] = 1 * KB;
1643 } else {
Ian Rogers5d057052014-03-12 14:32:27 -07001644 DCHECK_EQ(i, kNumOfSizeBrackets - 1);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001645 bracketSizes[i] = 2 * KB;
1646 }
1647 if (kTraceRosAlloc) {
1648 LOG(INFO) << "bracketSizes[" << i << "]=" << bracketSizes[i];
1649 }
1650 }
1651 // numOfPages.
1652 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001653 if (i < kNumThreadLocalSizeBrackets) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001654 numOfPages[i] = 1;
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001655 } else if (i < (kNumThreadLocalSizeBrackets + kNumRegularSizeBrackets) / 2) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001656 numOfPages[i] = 4;
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001657 } else if (i < kNumRegularSizeBrackets) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001658 numOfPages[i] = 8;
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001659 } else if (i == kNumOfSizeBrackets - 2) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001660 numOfPages[i] = 16;
1661 } else {
Ian Rogers5d057052014-03-12 14:32:27 -07001662 DCHECK_EQ(i, kNumOfSizeBrackets - 1);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001663 numOfPages[i] = 32;
1664 }
1665 if (kTraceRosAlloc) {
1666 LOG(INFO) << "numOfPages[" << i << "]=" << numOfPages[i];
1667 }
1668 }
1669 // Compute numOfSlots and slotOffsets.
1670 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1671 size_t bracket_size = bracketSizes[i];
1672 size_t run_size = kPageSize * numOfPages[i];
1673 size_t max_num_of_slots = run_size / bracket_size;
1674 // Compute the actual number of slots by taking the header and
1675 // alignment into account.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001676 size_t fixed_header_size = RoundUp(Run::fixed_header_size(), sizeof(uint64_t));
1677 DCHECK_EQ(fixed_header_size, 80U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001678 size_t header_size = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001679 size_t num_of_slots = 0;
1680 // Search for the maximum number of slots that allows enough space
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001681 // for the header.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001682 for (int s = max_num_of_slots; s >= 0; s--) {
1683 size_t tmp_slots_size = bracket_size * s;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001684 size_t tmp_unaligned_header_size = fixed_header_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001685 // Align up the unaligned header size. bracket_size may not be a power of two.
1686 size_t tmp_header_size = (tmp_unaligned_header_size % bracket_size == 0) ?
1687 tmp_unaligned_header_size :
1688 tmp_unaligned_header_size + (bracket_size - tmp_unaligned_header_size % bracket_size);
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001689 DCHECK_EQ(tmp_header_size % bracket_size, 0U);
1690 DCHECK_EQ(tmp_header_size % sizeof(uint64_t), 0U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001691 if (tmp_slots_size + tmp_header_size <= run_size) {
1692 // Found the right number of slots, that is, there was enough
1693 // space for the header (including the bit maps.)
1694 num_of_slots = s;
1695 header_size = tmp_header_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001696 break;
1697 }
1698 }
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001699 DCHECK_GT(num_of_slots, 0U) << i;
1700 DCHECK_GT(header_size, 0U) << i;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001701 // Add the padding for the alignment remainder.
1702 header_size += run_size % bracket_size;
Ian Rogers5d057052014-03-12 14:32:27 -07001703 DCHECK_EQ(header_size + num_of_slots * bracket_size, run_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001704 numOfSlots[i] = num_of_slots;
1705 headerSizes[i] = header_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001706 if (kTraceRosAlloc) {
1707 LOG(INFO) << "numOfSlots[" << i << "]=" << numOfSlots[i]
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001708 << ", headerSizes[" << i << "]=" << headerSizes[i];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001709 }
1710 }
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001711 // Set up the dedicated full run so that nobody can successfully allocate from it.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001712 if (kIsDebugBuild) {
1713 dedicated_full_run_->magic_num_ = kMagicNum;
1714 }
1715 // It doesn't matter which size bracket we use since the main goal is to have the allocation
1716 // fail 100% of the time you attempt to allocate into the dedicated full run.
1717 dedicated_full_run_->size_bracket_idx_ = 0;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001718 DCHECK_EQ(dedicated_full_run_->FreeList()->Size(), 0U); // It looks full.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001719 dedicated_full_run_->SetIsThreadLocal(true);
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001720
1721 // The smallest bracket size must be at least as large as the sizeof(Slot).
1722 DCHECK_LE(sizeof(Slot), bracketSizes[0]) << "sizeof(Slot) <= the smallest bracket size";
Hiroshi Yamauchi7ed9c562016-02-02 15:22:09 -08001723 // Check the invariants between the max bracket sizes and the number of brackets.
1724 DCHECK_EQ(kMaxThreadLocalBracketSize, bracketSizes[kNumThreadLocalSizeBrackets - 1]);
1725 DCHECK_EQ(kMaxRegularBracketSize, bracketSizes[kNumRegularSizeBrackets - 1]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001726}
1727
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001728void RosAlloc::BytesAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
1729 size_t used_bytes, void* arg) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001730 if (used_bytes == 0) {
1731 return;
1732 }
1733 size_t* bytes_allocated = reinterpret_cast<size_t*>(arg);
1734 *bytes_allocated += used_bytes;
1735}
1736
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001737void RosAlloc::ObjectsAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
1738 size_t used_bytes, void* arg) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001739 if (used_bytes == 0) {
1740 return;
1741 }
1742 size_t* objects_allocated = reinterpret_cast<size_t*>(arg);
1743 ++(*objects_allocated);
1744}
1745
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001746void RosAlloc::Verify() {
1747 Thread* self = Thread::Current();
1748 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self))
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001749 << "The mutator locks isn't exclusively locked at " << __PRETTY_FUNCTION__;
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001750 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
Mathieu Chartiera1c1c712014-06-23 17:53:09 -07001751 ReaderMutexLock wmu(self, bulk_free_lock_);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001752 std::vector<Run*> runs;
1753 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001754 MutexLock lock_mu(self, lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001755 size_t pm_end = page_map_size_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001756 size_t i = 0;
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001757 size_t memory_tool_modifier = is_running_on_memory_tool_ ?
1758 2 * ::art::gc::space::kDefaultMemoryToolRedZoneBytes : // Redzones before and after.
Andreas Gampefef16ad2015-02-19 16:44:32 -08001759 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001760 while (i < pm_end) {
Ian Rogers13735952014-10-08 12:43:28 -07001761 uint8_t pm = page_map_[i];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001762 switch (pm) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001763 case kPageMapReleased:
1764 // Fall-through.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001765 case kPageMapEmpty: {
1766 // The start of a free page run.
1767 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001768 DCHECK_EQ(fpr->magic_num_, kMagicNumFree);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001769 CHECK(free_page_runs_.find(fpr) != free_page_runs_.end())
1770 << "An empty page must belong to the free page run set";
1771 size_t fpr_size = fpr->ByteSize(this);
Roland Levillain14d90572015-07-16 10:52:26 +01001772 CHECK_ALIGNED(fpr_size, kPageSize)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001773 << "A free page run size isn't page-aligned : " << fpr_size;
1774 size_t num_pages = fpr_size / kPageSize;
1775 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1776 << "A free page run size must be > 0 : " << fpr_size;
1777 for (size_t j = i + 1; j < i + num_pages; ++j) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001778 CHECK(IsFreePage(j))
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001779 << "A mismatch between the page map table for kPageMapEmpty "
1780 << " at page index " << j
1781 << " and the free page run size : page index range : "
1782 << i << " to " << (i + num_pages) << std::endl << DumpPageMap();
1783 }
1784 i += num_pages;
1785 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1786 << std::endl << DumpPageMap();
1787 break;
1788 }
1789 case kPageMapLargeObject: {
1790 // The start of a large object.
1791 size_t num_pages = 1;
1792 size_t idx = i + 1;
1793 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1794 num_pages++;
1795 idx++;
1796 }
Andreas Gamped7576322014-10-24 22:13:45 -07001797 uint8_t* start = base_ + i * kPageSize;
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001798 if (is_running_on_memory_tool_) {
1799 start += ::art::gc::space::kDefaultMemoryToolRedZoneBytes;
Andreas Gamped7576322014-10-24 22:13:45 -07001800 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001801 mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
1802 size_t obj_size = obj->SizeOf();
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001803 CHECK_GT(obj_size + memory_tool_modifier, kLargeSizeThreshold)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001804 << "A rosalloc large object size must be > " << kLargeSizeThreshold;
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001805 CHECK_EQ(num_pages, RoundUp(obj_size + memory_tool_modifier, kPageSize) / kPageSize)
1806 << "A rosalloc large object size " << obj_size + memory_tool_modifier
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001807 << " does not match the page map table " << (num_pages * kPageSize)
1808 << std::endl << DumpPageMap();
1809 i += num_pages;
1810 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1811 << std::endl << DumpPageMap();
1812 break;
1813 }
1814 case kPageMapLargeObjectPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001815 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl << DumpPageMap();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001816 break;
1817 case kPageMapRun: {
1818 // The start of a run.
1819 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001820 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001821 size_t idx = run->size_bracket_idx_;
Ian Rogers5d057052014-03-12 14:32:27 -07001822 CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << idx;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001823 size_t num_pages = numOfPages[idx];
1824 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1825 << "Run size must be > 0 : " << num_pages;
1826 for (size_t j = i + 1; j < i + num_pages; ++j) {
1827 CHECK_EQ(page_map_[j], kPageMapRunPart)
1828 << "A mismatch between the page map table for kPageMapRunPart "
1829 << " at page index " << j
1830 << " and the run size : page index range " << i << " to " << (i + num_pages)
1831 << std::endl << DumpPageMap();
1832 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001833 // Don't verify the dedicated_full_run_ since it doesn't have any real allocations.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001834 runs.push_back(run);
1835 i += num_pages;
1836 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1837 << std::endl << DumpPageMap();
1838 break;
1839 }
1840 case kPageMapRunPart:
Mathieu Chartier0651d412014-04-29 14:37:57 -07001841 // Fall-through.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001842 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001843 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl << DumpPageMap();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001844 break;
1845 }
1846 }
1847 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001848 std::list<Thread*> threads = Runtime::Current()->GetThreadList()->GetList();
1849 for (Thread* thread : threads) {
1850 for (size_t i = 0; i < kNumThreadLocalSizeBrackets; ++i) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001851 MutexLock brackets_mu(self, *size_bracket_locks_[i]);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001852 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i));
1853 CHECK(thread_local_run != nullptr);
1854 CHECK(thread_local_run->IsThreadLocal());
1855 CHECK(thread_local_run == dedicated_full_run_ ||
1856 thread_local_run->size_bracket_idx_ == i);
1857 }
1858 }
1859 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001860 MutexLock brackets_mu(self, *size_bracket_locks_[i]);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001861 Run* current_run = current_runs_[i];
1862 CHECK(current_run != nullptr);
1863 if (current_run != dedicated_full_run_) {
1864 // The dedicated full run is currently marked as thread local.
1865 CHECK(!current_run->IsThreadLocal());
1866 CHECK_EQ(current_run->size_bracket_idx_, i);
1867 }
1868 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001869 // Call Verify() here for the lock order.
1870 for (auto& run : runs) {
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001871 run->Verify(self, this, is_running_on_memory_tool_);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001872 }
1873}
1874
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001875void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc, bool running_on_memory_tool) {
Ian Rogers5d057052014-03-12 14:32:27 -07001876 DCHECK_EQ(magic_num_, kMagicNum) << "Bad magic number : " << Dump();
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001877 const size_t idx = size_bracket_idx_;
Ian Rogers5d057052014-03-12 14:32:27 -07001878 CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
Ian Rogers13735952014-10-08 12:43:28 -07001879 uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001880 const size_t num_slots = numOfSlots[idx];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001881 size_t bracket_size = IndexToBracketSize(idx);
1882 CHECK_EQ(slot_base + num_slots * bracket_size,
Ian Rogers13735952014-10-08 12:43:28 -07001883 reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001884 << "Mismatch in the end address of the run " << Dump();
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001885 // Check that the bulk free list is empty. It's only used during BulkFree().
1886 CHECK(IsBulkFreeListEmpty()) << "The bulk free isn't empty " << Dump();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001887 // Check the thread local runs, the current runs, and the run sets.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001888 if (IsThreadLocal()) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001889 // If it's a thread local run, then it must be pointed to by an owner thread.
1890 bool owner_found = false;
1891 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1892 for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
1893 Thread* thread = *it;
Mathieu Chartier0651d412014-04-29 14:37:57 -07001894 for (size_t i = 0; i < kNumThreadLocalSizeBrackets; i++) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001895 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001896 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i));
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001897 if (thread_local_run == this) {
1898 CHECK(!owner_found)
1899 << "A thread local run has more than one owner thread " << Dump();
1900 CHECK_EQ(i, idx)
1901 << "A mismatching size bracket index in a thread local run " << Dump();
1902 owner_found = true;
1903 }
1904 }
1905 }
1906 CHECK(owner_found) << "A thread local run has no owner thread " << Dump();
1907 } else {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001908 // If it's not thread local, check that the thread local free list is empty.
1909 CHECK(IsThreadLocalFreeListEmpty())
1910 << "A non-thread-local run's thread local free list isn't empty "
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001911 << Dump();
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001912 // Check if it's a current run for the size bracket.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001913 bool is_current_run = false;
1914 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1915 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
1916 Run* current_run = rosalloc->current_runs_[i];
1917 if (idx == i) {
1918 if (this == current_run) {
1919 is_current_run = true;
1920 }
1921 } else {
1922 // If the size bucket index does not match, then it must not
1923 // be a current run.
1924 CHECK_NE(this, current_run)
1925 << "A current run points to a run with a wrong size bracket index " << Dump();
1926 }
1927 }
1928 // If it's neither a thread local or current run, then it must be
1929 // in a run set.
1930 if (!is_current_run) {
1931 MutexLock mu(self, rosalloc->lock_);
Mathieu Chartier58553c72014-09-16 16:25:55 -07001932 auto& non_full_runs = rosalloc->non_full_runs_[idx];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001933 // If it's all free, it must be a free page run rather than a run.
1934 CHECK(!IsAllFree()) << "A free run must be in a free page run set " << Dump();
1935 if (!IsFull()) {
1936 // If it's not full, it must in the non-full run set.
1937 CHECK(non_full_runs.find(this) != non_full_runs.end())
1938 << "A non-full run isn't in the non-full run set " << Dump();
1939 } else {
1940 // If it's full, it must in the full run set (debug build only.)
1941 if (kIsDebugBuild) {
Mathieu Chartier58553c72014-09-16 16:25:55 -07001942 auto& full_runs = rosalloc->full_runs_[idx];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001943 CHECK(full_runs.find(this) != full_runs.end())
1944 << " A full run isn't in the full run set " << Dump();
1945 }
1946 }
1947 }
1948 }
1949 // Check each slot.
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001950 size_t memory_tool_modifier = running_on_memory_tool ?
1951 2 * ::art::gc::space::kDefaultMemoryToolRedZoneBytes :
Andreas Gamped7576322014-10-24 22:13:45 -07001952 0U;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001953 // TODO: reuse InspectAllSlots().
1954 std::unique_ptr<bool[]> is_free(new bool[num_slots]()); // zero initialized
1955 // Mark the free slots and the remaining ones are allocated.
1956 for (Slot* slot = free_list_.Head(); slot != nullptr; slot = slot->Next()) {
1957 size_t slot_idx = SlotIndex(slot);
1958 DCHECK_LT(slot_idx, num_slots);
1959 is_free[slot_idx] = true;
1960 }
1961 if (IsThreadLocal()) {
1962 for (Slot* slot = thread_local_free_list_.Head(); slot != nullptr; slot = slot->Next()) {
1963 size_t slot_idx = SlotIndex(slot);
1964 DCHECK_LT(slot_idx, num_slots);
1965 is_free[slot_idx] = true;
1966 }
1967 }
1968 for (size_t slot_idx = 0; slot_idx < num_slots; ++slot_idx) {
1969 uint8_t* slot_addr = slot_base + slot_idx * bracket_size;
1970 if (running_on_memory_tool) {
1971 slot_addr += ::art::gc::space::kDefaultMemoryToolRedZoneBytes;
1972 }
1973 if (!is_free[slot_idx]) {
1974 // The slot is allocated
1975 mirror::Object* obj = reinterpret_cast<mirror::Object*>(slot_addr);
1976 size_t obj_size = obj->SizeOf();
1977 CHECK_LE(obj_size + memory_tool_modifier, kLargeSizeThreshold)
1978 << "A run slot contains a large object " << Dump();
1979 CHECK_EQ(SizeToIndex(obj_size + memory_tool_modifier), idx)
1980 << PrettyTypeOf(obj) << " "
1981 << "obj_size=" << obj_size << "(" << obj_size + memory_tool_modifier << "), idx=" << idx
1982 << " A run slot contains an object with wrong size " << Dump();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001983 }
1984 }
1985}
1986
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07001987size_t RosAlloc::ReleasePages() {
1988 VLOG(heap) << "RosAlloc::ReleasePages()";
1989 DCHECK(!DoesReleaseAllPages());
1990 Thread* self = Thread::Current();
1991 size_t reclaimed_bytes = 0;
1992 size_t i = 0;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001993 // Check the page map size which might have changed due to grow/shrink.
1994 while (i < page_map_size_) {
1995 // Reading the page map without a lock is racy but the race is benign since it should only
1996 // result in occasionally not releasing pages which we could release.
Ian Rogers13735952014-10-08 12:43:28 -07001997 uint8_t pm = page_map_[i];
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07001998 switch (pm) {
Mathieu Chartiere28ed992014-07-10 10:16:44 -07001999 case kPageMapReleased:
2000 // Fall through.
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002001 case kPageMapEmpty: {
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002002 // This is currently the start of a free page run.
2003 // Acquire the lock to prevent other threads racing in and modifying the page map.
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002004 MutexLock mu(self, lock_);
2005 // Check that it's still empty after we acquired the lock since another thread could have
2006 // raced in and placed an allocation here.
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002007 if (IsFreePage(i)) {
2008 // Free page runs can start with a released page if we coalesced a released page free
2009 // page run with an empty page run.
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002010 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002011 // There is a race condition where FreePage can coalesce fpr with the previous
2012 // free page run before we acquire lock_. In that case free_page_runs_.find will not find
2013 // a run starting at fpr. To handle this race, we skip reclaiming the page range and go
2014 // to the next page.
2015 if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
2016 size_t fpr_size = fpr->ByteSize(this);
Roland Levillain14d90572015-07-16 10:52:26 +01002017 DCHECK_ALIGNED(fpr_size, kPageSize);
Ian Rogers13735952014-10-08 12:43:28 -07002018 uint8_t* start = reinterpret_cast<uint8_t*>(fpr);
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002019 reclaimed_bytes += ReleasePageRange(start, start + fpr_size);
2020 size_t pages = fpr_size / kPageSize;
2021 CHECK_GT(pages, 0U) << "Infinite loop probable";
2022 i += pages;
2023 DCHECK_LE(i, page_map_size_);
2024 break;
2025 }
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002026 }
Ian Rogersfc787ec2014-10-09 21:56:44 -07002027 FALLTHROUGH_INTENDED;
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002028 }
2029 case kPageMapLargeObject: // Fall through.
2030 case kPageMapLargeObjectPart: // Fall through.
2031 case kPageMapRun: // Fall through.
2032 case kPageMapRunPart: // Fall through.
2033 ++i;
2034 break; // Skip.
2035 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07002036 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002037 break;
2038 }
2039 }
2040 return reclaimed_bytes;
2041}
2042
Ian Rogers13735952014-10-08 12:43:28 -07002043size_t RosAlloc::ReleasePageRange(uint8_t* start, uint8_t* end) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002044 DCHECK_ALIGNED(start, kPageSize);
2045 DCHECK_ALIGNED(end, kPageSize);
2046 DCHECK_LT(start, end);
2047 if (kIsDebugBuild) {
2048 // In the debug build, the first page of a free page run
2049 // contains a magic number for debugging. Exclude it.
2050 start += kPageSize;
Andreas Gamped7576322014-10-24 22:13:45 -07002051
2052 // Single pages won't be released.
2053 if (start == end) {
2054 return 0;
2055 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002056 }
2057 if (!kMadviseZeroes) {
2058 // TODO: Do this when we resurrect the page instead.
2059 memset(start, 0, end - start);
2060 }
2061 CHECK_EQ(madvise(start, end - start, MADV_DONTNEED), 0);
2062 size_t pm_idx = ToPageMapIndex(start);
2063 size_t reclaimed_bytes = 0;
2064 // Calculate reclaimed bytes and upate page map.
2065 const size_t max_idx = pm_idx + (end - start) / kPageSize;
2066 for (; pm_idx < max_idx; ++pm_idx) {
2067 DCHECK(IsFreePage(pm_idx));
2068 if (page_map_[pm_idx] == kPageMapEmpty) {
2069 // Mark the page as released and update how many bytes we released.
2070 reclaimed_bytes += kPageSize;
2071 page_map_[pm_idx] = kPageMapReleased;
2072 }
2073 }
2074 return reclaimed_bytes;
2075}
2076
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07002077void RosAlloc::LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) {
2078 Thread* self = Thread::Current();
2079 size_t largest_continuous_free_pages = 0;
2080 WriterMutexLock wmu(self, bulk_free_lock_);
2081 MutexLock mu(self, lock_);
2082 for (FreePageRun* fpr : free_page_runs_) {
2083 largest_continuous_free_pages = std::max(largest_continuous_free_pages,
2084 fpr->ByteSize(this));
2085 }
2086 if (failed_alloc_bytes > kLargeSizeThreshold) {
2087 // Large allocation.
2088 size_t required_bytes = RoundUp(failed_alloc_bytes, kPageSize);
2089 if (required_bytes > largest_continuous_free_pages) {
2090 os << "; failed due to fragmentation (required continguous free "
2091 << required_bytes << " bytes where largest contiguous free "
2092 << largest_continuous_free_pages << " bytes)";
2093 }
2094 } else {
2095 // Non-large allocation.
2096 size_t required_bytes = numOfPages[SizeToIndex(failed_alloc_bytes)] * kPageSize;
2097 if (required_bytes > largest_continuous_free_pages) {
2098 os << "; failed due to fragmentation (required continguous free "
2099 << required_bytes << " bytes for a new buffer where largest contiguous free "
2100 << largest_continuous_free_pages << " bytes)";
2101 }
2102 }
2103}
2104
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07002105} // namespace allocator
2106} // namespace gc
2107} // namespace art