blob: 7d00094c9fae1fb6d442452d080ed086c0f76427 [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andreas Gamped7576322014-10-24 22:13:45 -070017#include "rosalloc.h"
18
Evgenii Stepanov1e133742015-05-20 12:30:59 -070019#include "base/memory_tool.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070020#include "base/mutex-inl.h"
Evgenii Stepanov1e133742015-05-20 12:30:59 -070021#include "gc/space/memory_tool_settings.h"
Vladimir Marko3481ba22015-04-13 12:22:36 +010022#include "mem_map.h"
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -080023#include "mirror/class-inl.h"
24#include "mirror/object.h"
25#include "mirror/object-inl.h"
Brian Carlstrom218daa22013-11-25 14:51:44 -080026#include "thread-inl.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070027#include "thread_list.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070028
29#include <map>
30#include <list>
Ian Rogersc7dd2952014-10-21 23:31:19 -070031#include <sstream>
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070032#include <vector>
33
34namespace art {
35namespace gc {
36namespace allocator {
37
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -070038static constexpr bool kUsePrefetchDuringAllocRun = false;
Mathieu Chartier73d1e172014-04-11 17:53:48 -070039static constexpr bool kPrefetchNewRunDataByZeroing = false;
40static constexpr size_t kPrefetchStride = 64;
41
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070042size_t RosAlloc::bracketSizes[kNumOfSizeBrackets];
43size_t RosAlloc::numOfPages[kNumOfSizeBrackets];
44size_t RosAlloc::numOfSlots[kNumOfSizeBrackets];
45size_t RosAlloc::headerSizes[kNumOfSizeBrackets];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070046bool RosAlloc::initialized_ = false;
Mathieu Chartier73d1e172014-04-11 17:53:48 -070047size_t RosAlloc::dedicated_full_run_storage_[kPageSize / sizeof(size_t)] = { 0 };
48RosAlloc::Run* RosAlloc::dedicated_full_run_ =
49 reinterpret_cast<RosAlloc::Run*>(dedicated_full_run_storage_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070050
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080051RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
Evgenii Stepanov1e133742015-05-20 12:30:59 -070052 PageReleaseMode page_release_mode, bool running_on_memory_tool,
Andreas Gamped7576322014-10-24 22:13:45 -070053 size_t page_release_size_threshold)
Ian Rogers13735952014-10-08 12:43:28 -070054 : base_(reinterpret_cast<uint8_t*>(base)), footprint_(capacity),
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080055 capacity_(capacity), max_capacity_(max_capacity),
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070056 lock_("rosalloc global lock", kRosAllocGlobalLock),
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080057 bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock),
58 page_release_mode_(page_release_mode),
Andreas Gamped7576322014-10-24 22:13:45 -070059 page_release_size_threshold_(page_release_size_threshold),
Evgenii Stepanov1e133742015-05-20 12:30:59 -070060 is_running_on_memory_tool_(running_on_memory_tool) {
Ian Rogers5d057052014-03-12 14:32:27 -070061 DCHECK_EQ(RoundUp(capacity, kPageSize), capacity);
62 DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080063 CHECK_LE(capacity, max_capacity);
Roland Levillain14d90572015-07-16 10:52:26 +010064 CHECK_ALIGNED(page_release_size_threshold_, kPageSize);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070065 if (!initialized_) {
66 Initialize();
67 }
68 VLOG(heap) << "RosAlloc base="
69 << std::hex << (intptr_t)base_ << ", end="
70 << std::hex << (intptr_t)(base_ + capacity_)
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080071 << ", capacity=" << std::dec << capacity_
72 << ", max_capacity=" << std::dec << max_capacity_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070073 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
Zuo Wangf37a88b2014-07-10 04:26:41 -070074 size_bracket_lock_names_[i] =
Mathieu Chartier73d1e172014-04-11 17:53:48 -070075 StringPrintf("an rosalloc size bracket %d lock", static_cast<int>(i));
Zuo Wangf37a88b2014-07-10 04:26:41 -070076 size_bracket_locks_[i] = new Mutex(size_bracket_lock_names_[i].c_str(), kRosAllocBracketLock);
Mathieu Chartier0651d412014-04-29 14:37:57 -070077 current_runs_[i] = dedicated_full_run_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070078 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080079 DCHECK_EQ(footprint_, capacity_);
80 size_t num_of_pages = footprint_ / kPageSize;
81 size_t max_num_of_pages = max_capacity_ / kPageSize;
82 std::string error_msg;
Vladimir Marko5c42c292015-02-25 12:02:49 +000083 page_map_mem_map_.reset(MemMap::MapAnonymous("rosalloc page map", nullptr,
84 RoundUp(max_num_of_pages, kPageSize),
85 PROT_READ | PROT_WRITE, false, false, &error_msg));
Mathieu Chartier73d1e172014-04-11 17:53:48 -070086 CHECK(page_map_mem_map_.get() != nullptr) << "Couldn't allocate the page map : " << error_msg;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080087 page_map_ = page_map_mem_map_->Begin();
88 page_map_size_ = num_of_pages;
89 max_page_map_size_ = max_num_of_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070090 free_page_run_size_map_.resize(num_of_pages);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070091 FreePageRun* free_pages = reinterpret_cast<FreePageRun*>(base_);
92 if (kIsDebugBuild) {
93 free_pages->magic_num_ = kMagicNumFree;
94 }
95 free_pages->SetByteSize(this, capacity_);
96 DCHECK_EQ(capacity_ % kPageSize, static_cast<size_t>(0));
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080097 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070098 free_pages->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080099 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700100 free_page_runs_.insert(free_pages);
101 if (kTraceRosAlloc) {
102 LOG(INFO) << "RosAlloc::RosAlloc() : Inserted run 0x" << std::hex
103 << reinterpret_cast<intptr_t>(free_pages)
104 << " into free_page_runs_";
105 }
106}
107
Mathieu Chartier661974a2014-01-09 11:23:53 -0800108RosAlloc::~RosAlloc() {
109 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
110 delete size_bracket_locks_[i];
111 }
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700112 if (is_running_on_memory_tool_) {
113 MEMORY_TOOL_MAKE_DEFINED(base_, capacity_);
114 }
Mathieu Chartier661974a2014-01-09 11:23:53 -0800115}
116
Ian Rogers13735952014-10-08 12:43:28 -0700117void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700118 lock_.AssertHeld(self);
119 DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700120 FreePageRun* res = nullptr;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700121 const size_t req_byte_size = num_pages * kPageSize;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700122 // Find the lowest address free page run that's large enough.
123 for (auto it = free_page_runs_.begin(); it != free_page_runs_.end(); ) {
124 FreePageRun* fpr = *it;
125 DCHECK(fpr->IsFree());
126 size_t fpr_byte_size = fpr->ByteSize(this);
127 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
128 if (req_byte_size <= fpr_byte_size) {
129 // Found one.
130 free_page_runs_.erase(it++);
131 if (kTraceRosAlloc) {
132 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x"
133 << std::hex << reinterpret_cast<intptr_t>(fpr)
134 << " from free_page_runs_";
135 }
136 if (req_byte_size < fpr_byte_size) {
137 // Split.
Ian Rogers13735952014-10-08 12:43:28 -0700138 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700139 if (kIsDebugBuild) {
140 remainder->magic_num_ = kMagicNumFree;
141 }
142 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
143 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
144 // Don't need to call madvise on remainder here.
145 free_page_runs_.insert(remainder);
146 if (kTraceRosAlloc) {
147 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
148 << reinterpret_cast<intptr_t>(remainder)
149 << " into free_page_runs_";
150 }
151 fpr->SetByteSize(this, req_byte_size);
152 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
153 }
154 res = fpr;
155 break;
156 } else {
157 ++it;
158 }
159 }
160
161 // Failed to allocate pages. Grow the footprint, if possible.
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700162 if (UNLIKELY(res == nullptr && capacity_ > footprint_)) {
163 FreePageRun* last_free_page_run = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700164 size_t last_free_page_run_size;
165 auto it = free_page_runs_.rbegin();
166 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
167 // There is a free page run at the end.
168 DCHECK(last_free_page_run->IsFree());
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700169 DCHECK(IsFreePage(ToPageMapIndex(last_free_page_run)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700170 last_free_page_run_size = last_free_page_run->ByteSize(this);
171 } else {
172 // There is no free page run at the end.
173 last_free_page_run_size = 0;
174 }
175 DCHECK_LT(last_free_page_run_size, req_byte_size);
176 if (capacity_ - footprint_ + last_free_page_run_size >= req_byte_size) {
177 // If we grow the heap, we can allocate it.
178 size_t increment = std::min(std::max(2 * MB, req_byte_size - last_free_page_run_size),
179 capacity_ - footprint_);
180 DCHECK_EQ(increment % kPageSize, static_cast<size_t>(0));
181 size_t new_footprint = footprint_ + increment;
182 size_t new_num_of_pages = new_footprint / kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800183 DCHECK_LT(page_map_size_, new_num_of_pages);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700184 DCHECK_LT(free_page_run_size_map_.size(), new_num_of_pages);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800185 page_map_size_ = new_num_of_pages;
186 DCHECK_LE(page_map_size_, max_page_map_size_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700187 free_page_run_size_map_.resize(new_num_of_pages);
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800188 ArtRosAllocMoreCore(this, increment);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700189 if (last_free_page_run_size > 0) {
190 // There was a free page run at the end. Expand its size.
191 DCHECK_EQ(last_free_page_run_size, last_free_page_run->ByteSize(this));
192 last_free_page_run->SetByteSize(this, last_free_page_run_size + increment);
193 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
Ian Rogers5d057052014-03-12 14:32:27 -0700194 DCHECK_EQ(last_free_page_run->End(this), base_ + new_footprint);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700195 } else {
196 // Otherwise, insert a new free page run at the end.
197 FreePageRun* new_free_page_run = reinterpret_cast<FreePageRun*>(base_ + footprint_);
198 if (kIsDebugBuild) {
199 new_free_page_run->magic_num_ = kMagicNumFree;
200 }
201 new_free_page_run->SetByteSize(this, increment);
202 DCHECK_EQ(new_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
203 free_page_runs_.insert(new_free_page_run);
Ian Rogers5d057052014-03-12 14:32:27 -0700204 DCHECK_EQ(*free_page_runs_.rbegin(), new_free_page_run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700205 if (kTraceRosAlloc) {
206 LOG(INFO) << "RosAlloc::AlloPages() : Grew the heap by inserting run 0x"
207 << std::hex << reinterpret_cast<intptr_t>(new_free_page_run)
208 << " into free_page_runs_";
209 }
210 }
211 DCHECK_LE(footprint_ + increment, capacity_);
212 if (kTraceRosAlloc) {
213 LOG(INFO) << "RosAlloc::AllocPages() : increased the footprint from "
214 << footprint_ << " to " << new_footprint;
215 }
216 footprint_ = new_footprint;
217
218 // And retry the last free page run.
219 it = free_page_runs_.rbegin();
220 DCHECK(it != free_page_runs_.rend());
221 FreePageRun* fpr = *it;
222 if (kIsDebugBuild && last_free_page_run_size > 0) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700223 DCHECK(last_free_page_run != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700224 DCHECK_EQ(last_free_page_run, fpr);
225 }
226 size_t fpr_byte_size = fpr->ByteSize(this);
227 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
228 DCHECK_LE(req_byte_size, fpr_byte_size);
229 free_page_runs_.erase(fpr);
230 if (kTraceRosAlloc) {
231 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
232 << " from free_page_runs_";
233 }
234 if (req_byte_size < fpr_byte_size) {
235 // Split if there's a remainder.
Ian Rogers13735952014-10-08 12:43:28 -0700236 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700237 if (kIsDebugBuild) {
238 remainder->magic_num_ = kMagicNumFree;
239 }
240 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
241 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
242 free_page_runs_.insert(remainder);
243 if (kTraceRosAlloc) {
244 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
245 << reinterpret_cast<intptr_t>(remainder)
246 << " into free_page_runs_";
247 }
248 fpr->SetByteSize(this, req_byte_size);
249 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
250 }
251 res = fpr;
252 }
253 }
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700254 if (LIKELY(res != nullptr)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700255 // Update the page map.
256 size_t page_map_idx = ToPageMapIndex(res);
257 for (size_t i = 0; i < num_pages; i++) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700258 DCHECK(IsFreePage(page_map_idx + i));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700259 }
260 switch (page_map_type) {
261 case kPageMapRun:
262 page_map_[page_map_idx] = kPageMapRun;
263 for (size_t i = 1; i < num_pages; i++) {
264 page_map_[page_map_idx + i] = kPageMapRunPart;
265 }
266 break;
267 case kPageMapLargeObject:
268 page_map_[page_map_idx] = kPageMapLargeObject;
269 for (size_t i = 1; i < num_pages; i++) {
270 page_map_[page_map_idx + i] = kPageMapLargeObjectPart;
271 }
272 break;
273 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700274 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_type);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700275 break;
276 }
277 if (kIsDebugBuild) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700278 // Clear the first page since it is not madvised due to the magic number.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700279 memset(res, 0, kPageSize);
280 }
281 if (kTraceRosAlloc) {
282 LOG(INFO) << "RosAlloc::AllocPages() : 0x" << std::hex << reinterpret_cast<intptr_t>(res)
283 << "-0x" << (reinterpret_cast<intptr_t>(res) + num_pages * kPageSize)
284 << "(" << std::dec << (num_pages * kPageSize) << ")";
285 }
286 return res;
287 }
288
289 // Fail.
290 if (kTraceRosAlloc) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700291 LOG(INFO) << "RosAlloc::AllocPages() : nullptr";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700292 }
293 return nullptr;
294}
295
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700296size_t RosAlloc::FreePages(Thread* self, void* ptr, bool already_zero) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700297 lock_.AssertHeld(self);
298 size_t pm_idx = ToPageMapIndex(ptr);
Ian Rogers5d057052014-03-12 14:32:27 -0700299 DCHECK_LT(pm_idx, page_map_size_);
Ian Rogers13735952014-10-08 12:43:28 -0700300 uint8_t pm_type = page_map_[pm_idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700301 DCHECK(pm_type == kPageMapRun || pm_type == kPageMapLargeObject);
Ian Rogers13735952014-10-08 12:43:28 -0700302 uint8_t pm_part_type;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700303 switch (pm_type) {
304 case kPageMapRun:
305 pm_part_type = kPageMapRunPart;
306 break;
307 case kPageMapLargeObject:
308 pm_part_type = kPageMapLargeObjectPart;
309 break;
310 default:
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700311 LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << " : " << "pm_idx=" << pm_idx << ", pm_type="
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700312 << static_cast<int>(pm_type) << ", ptr=" << std::hex
313 << reinterpret_cast<intptr_t>(ptr);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700314 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700315 }
316 // Update the page map and count the number of pages.
317 size_t num_pages = 1;
318 page_map_[pm_idx] = kPageMapEmpty;
319 size_t idx = pm_idx + 1;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800320 size_t end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700321 while (idx < end && page_map_[idx] == pm_part_type) {
322 page_map_[idx] = kPageMapEmpty;
323 num_pages++;
324 idx++;
325 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700326 const size_t byte_size = num_pages * kPageSize;
327 if (already_zero) {
Andreas Gamped7576322014-10-24 22:13:45 -0700328 if (ShouldCheckZeroMemory()) {
Ian Rogers13735952014-10-08 12:43:28 -0700329 const uintptr_t* word_ptr = reinterpret_cast<uintptr_t*>(ptr);
330 for (size_t i = 0; i < byte_size / sizeof(uintptr_t); ++i) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700331 CHECK_EQ(word_ptr[i], 0U) << "words don't match at index " << i;
332 }
333 }
334 } else if (!DoesReleaseAllPages()) {
335 memset(ptr, 0, byte_size);
336 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700337
338 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700339 LOG(INFO) << __PRETTY_FUNCTION__ << " : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700340 << "-0x" << (reinterpret_cast<intptr_t>(ptr) + byte_size)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700341 << "(" << std::dec << (num_pages * kPageSize) << ")";
342 }
343
344 // Turn it into a free run.
345 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(ptr);
346 if (kIsDebugBuild) {
347 fpr->magic_num_ = kMagicNumFree;
348 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700349 fpr->SetByteSize(this, byte_size);
Roland Levillain14d90572015-07-16 10:52:26 +0100350 DCHECK_ALIGNED(fpr->ByteSize(this), kPageSize);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700351
352 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
353 if (!free_page_runs_.empty()) {
354 // Try to coalesce in the higher address direction.
355 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700356 LOG(INFO) << __PRETTY_FUNCTION__ << "RosAlloc::FreePages() : trying to coalesce a free page run 0x"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700357 << std::hex << reinterpret_cast<uintptr_t>(fpr) << " [" << std::dec << pm_idx << "] -0x"
358 << std::hex << reinterpret_cast<uintptr_t>(fpr->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800359 << (fpr->End(this) == End() ? page_map_size_ : ToPageMapIndex(fpr->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700360 }
361 auto higher_it = free_page_runs_.upper_bound(fpr);
362 if (higher_it != free_page_runs_.end()) {
363 for (auto it = higher_it; it != free_page_runs_.end(); ) {
364 FreePageRun* h = *it;
365 DCHECK_EQ(h->ByteSize(this) % kPageSize, static_cast<size_t>(0));
366 if (kTraceRosAlloc) {
367 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a higher free page run 0x"
368 << std::hex << reinterpret_cast<uintptr_t>(h) << " [" << std::dec << ToPageMapIndex(h) << "] -0x"
369 << std::hex << reinterpret_cast<uintptr_t>(h->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800370 << (h->End(this) == End() ? page_map_size_ : ToPageMapIndex(h->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700371 }
372 if (fpr->End(this) == h->Begin()) {
373 if (kTraceRosAlloc) {
374 LOG(INFO) << "Success";
375 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700376 // Clear magic num since this is no longer the start of a free page run.
377 if (kIsDebugBuild) {
378 h->magic_num_ = 0;
379 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700380 free_page_runs_.erase(it++);
381 if (kTraceRosAlloc) {
382 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
383 << reinterpret_cast<intptr_t>(h)
384 << " from free_page_runs_";
385 }
386 fpr->SetByteSize(this, fpr->ByteSize(this) + h->ByteSize(this));
387 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
388 } else {
389 // Not adjacent. Stop.
390 if (kTraceRosAlloc) {
391 LOG(INFO) << "Fail";
392 }
393 break;
394 }
395 }
396 }
397 // Try to coalesce in the lower address direction.
398 auto lower_it = free_page_runs_.upper_bound(fpr);
399 if (lower_it != free_page_runs_.begin()) {
400 --lower_it;
401 for (auto it = lower_it; ; ) {
402 // We want to try to coalesce with the first element but
403 // there's no "<=" operator for the iterator.
404 bool to_exit_loop = it == free_page_runs_.begin();
405
406 FreePageRun* l = *it;
407 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
408 if (kTraceRosAlloc) {
409 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a lower free page run 0x"
410 << std::hex << reinterpret_cast<uintptr_t>(l) << " [" << std::dec << ToPageMapIndex(l) << "] -0x"
411 << std::hex << reinterpret_cast<uintptr_t>(l->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800412 << (l->End(this) == End() ? page_map_size_ : ToPageMapIndex(l->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700413 }
414 if (l->End(this) == fpr->Begin()) {
415 if (kTraceRosAlloc) {
416 LOG(INFO) << "Success";
417 }
418 free_page_runs_.erase(it--);
419 if (kTraceRosAlloc) {
420 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
421 << reinterpret_cast<intptr_t>(l)
422 << " from free_page_runs_";
423 }
424 l->SetByteSize(this, l->ByteSize(this) + fpr->ByteSize(this));
425 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700426 // Clear magic num since this is no longer the start of a free page run.
427 if (kIsDebugBuild) {
428 fpr->magic_num_ = 0;
429 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700430 fpr = l;
431 } else {
432 // Not adjacent. Stop.
433 if (kTraceRosAlloc) {
434 LOG(INFO) << "Fail";
435 }
436 break;
437 }
438 if (to_exit_loop) {
439 break;
440 }
441 }
442 }
443 }
444
445 // Insert it.
446 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
447 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800448 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700449 fpr->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800450 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700451 free_page_runs_.insert(fpr);
452 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
453 if (kTraceRosAlloc) {
454 LOG(INFO) << "RosAlloc::FreePages() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
455 << " into free_page_runs_";
456 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700457 return byte_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700458}
459
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700460void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated,
461 size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
462 DCHECK(bytes_allocated != nullptr);
463 DCHECK(usable_size != nullptr);
Ian Rogers5d057052014-03-12 14:32:27 -0700464 DCHECK_GT(size, kLargeSizeThreshold);
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800465 size_t num_pages = RoundUp(size, kPageSize) / kPageSize;
466 void* r;
467 {
468 MutexLock mu(self, lock_);
469 r = AllocPages(self, num_pages, kPageMapLargeObject);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700470 }
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800471 if (UNLIKELY(r == nullptr)) {
472 if (kTraceRosAlloc) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700473 LOG(INFO) << "RosAlloc::AllocLargeObject() : nullptr";
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800474 }
475 return nullptr;
476 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700477 const size_t total_bytes = num_pages * kPageSize;
478 *bytes_allocated = total_bytes;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700479 *usable_size = total_bytes;
480 *bytes_tl_bulk_allocated = total_bytes;
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800481 if (kTraceRosAlloc) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800482 LOG(INFO) << "RosAlloc::AllocLargeObject() : 0x" << std::hex << reinterpret_cast<intptr_t>(r)
483 << "-0x" << (reinterpret_cast<intptr_t>(r) + num_pages * kPageSize)
484 << "(" << std::dec << (num_pages * kPageSize) << ")";
485 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700486 // Check if the returned memory is really all zero.
Andreas Gamped7576322014-10-24 22:13:45 -0700487 if (ShouldCheckZeroMemory()) {
Ian Rogers13735952014-10-08 12:43:28 -0700488 CHECK_EQ(total_bytes % sizeof(uintptr_t), 0U);
489 const uintptr_t* words = reinterpret_cast<uintptr_t*>(r);
490 for (size_t i = 0; i < total_bytes / sizeof(uintptr_t); ++i) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700491 CHECK_EQ(words[i], 0U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700492 }
493 }
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800494 return r;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700495}
496
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700497size_t RosAlloc::FreeInternal(Thread* self, void* ptr) {
Ian Rogers5d057052014-03-12 14:32:27 -0700498 DCHECK_LE(base_, ptr);
499 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700500 size_t pm_idx = RoundDownToPageMapIndex(ptr);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700501 Run* run = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700502 {
503 MutexLock mu(self, lock_);
Ian Rogers5d057052014-03-12 14:32:27 -0700504 DCHECK_LT(pm_idx, page_map_size_);
Ian Rogers13735952014-10-08 12:43:28 -0700505 uint8_t page_map_entry = page_map_[pm_idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700506 if (kTraceRosAlloc) {
507 LOG(INFO) << "RosAlloc::FreeInternal() : " << std::hex << ptr << ", pm_idx=" << std::dec << pm_idx
508 << ", page_map_entry=" << static_cast<int>(page_map_entry);
509 }
510 switch (page_map_[pm_idx]) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700511 case kPageMapLargeObject:
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700512 return FreePages(self, ptr, false);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700513 case kPageMapLargeObjectPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700514 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700515 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700516 case kPageMapRunPart: {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700517 // Find the beginning of the run.
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700518 do {
519 --pm_idx;
520 DCHECK_LT(pm_idx, capacity_ / kPageSize);
521 } while (page_map_[pm_idx] != kPageMapRun);
Ian Rogersfc787ec2014-10-09 21:56:44 -0700522 FALLTHROUGH_INTENDED;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700523 case kPageMapRun:
524 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -0700525 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700526 break;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700527 case kPageMapReleased:
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700528 case kPageMapEmpty:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700529 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700530 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700531 }
532 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700533 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700534 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700535 }
536 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700537 DCHECK(run != nullptr);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700538 return FreeFromRun(self, ptr, run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700539}
540
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700541size_t RosAlloc::Free(Thread* self, void* ptr) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700542 ReaderMutexLock rmu(self, bulk_free_lock_);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700543 return FreeInternal(self, ptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700544}
545
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700546RosAlloc::Run* RosAlloc::AllocRun(Thread* self, size_t idx) {
547 RosAlloc::Run* new_run = nullptr;
548 {
549 MutexLock mu(self, lock_);
550 new_run = reinterpret_cast<Run*>(AllocPages(self, numOfPages[idx], kPageMapRun));
551 }
552 if (LIKELY(new_run != nullptr)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700553 if (kIsDebugBuild) {
554 new_run->magic_num_ = kMagicNum;
555 }
556 new_run->size_bracket_idx_ = idx;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700557 DCHECK(!new_run->IsThreadLocal());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700558 DCHECK(!new_run->to_be_bulk_freed_);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700559 if (kUsePrefetchDuringAllocRun && idx < kNumThreadLocalSizeBrackets) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700560 // Take ownership of the cache lines if we are likely to be thread local run.
561 if (kPrefetchNewRunDataByZeroing) {
562 // Zeroing the data is sometimes faster than prefetching but it increases memory usage
563 // since we end up dirtying zero pages which may have been madvised.
564 new_run->ZeroData();
565 } else {
566 const size_t num_of_slots = numOfSlots[idx];
567 const size_t bracket_size = bracketSizes[idx];
568 const size_t num_of_bytes = num_of_slots * bracket_size;
Ian Rogers13735952014-10-08 12:43:28 -0700569 uint8_t* begin = reinterpret_cast<uint8_t*>(new_run) + headerSizes[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700570 for (size_t i = 0; i < num_of_bytes; i += kPrefetchStride) {
571 __builtin_prefetch(begin + i);
572 }
573 }
574 }
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700575 new_run->InitFreeList();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700576 }
577 return new_run;
578}
579
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700580RosAlloc::Run* RosAlloc::RefillRun(Thread* self, size_t idx) {
581 // Get the lowest address non-full run from the binary tree.
Mathieu Chartier58553c72014-09-16 16:25:55 -0700582 auto* const bt = &non_full_runs_[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700583 if (!bt->empty()) {
584 // If there's one, use it as the current run.
585 auto it = bt->begin();
586 Run* non_full_run = *it;
587 DCHECK(non_full_run != nullptr);
588 DCHECK(!non_full_run->IsThreadLocal());
589 bt->erase(it);
590 return non_full_run;
591 }
592 // If there's none, allocate a new run and use it as the current run.
593 return AllocRun(self, idx);
594}
595
Hiroshi Yamauchi52cf5c02014-05-02 12:20:36 -0700596inline void* RosAlloc::AllocFromCurrentRunUnlocked(Thread* self, size_t idx) {
Mathieu Chartier0651d412014-04-29 14:37:57 -0700597 Run* current_run = current_runs_[idx];
598 DCHECK(current_run != nullptr);
599 void* slot_addr = current_run->AllocSlot();
600 if (UNLIKELY(slot_addr == nullptr)) {
601 // The current run got full. Try to refill it.
602 DCHECK(current_run->IsFull());
603 if (kIsDebugBuild && current_run != dedicated_full_run_) {
604 full_runs_[idx].insert(current_run);
605 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700606 LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
607 << reinterpret_cast<intptr_t>(current_run)
Mathieu Chartier0651d412014-04-29 14:37:57 -0700608 << " into full_runs_[" << std::dec << idx << "]";
609 }
610 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
611 DCHECK(full_runs_[idx].find(current_run) != full_runs_[idx].end());
612 }
613 current_run = RefillRun(self, idx);
614 if (UNLIKELY(current_run == nullptr)) {
615 // Failed to allocate a new run, make sure that it is the dedicated full run.
616 current_runs_[idx] = dedicated_full_run_;
617 return nullptr;
618 }
619 DCHECK(current_run != nullptr);
620 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
621 DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
622 current_run->SetIsThreadLocal(false);
623 current_runs_[idx] = current_run;
624 DCHECK(!current_run->IsFull());
625 slot_addr = current_run->AllocSlot();
626 // Must succeed now with a new run.
627 DCHECK(slot_addr != nullptr);
628 }
629 return slot_addr;
630}
631
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700632void* RosAlloc::AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated,
633 size_t* usable_size,
634 size_t* bytes_tl_bulk_allocated) {
635 DCHECK(bytes_allocated != nullptr);
636 DCHECK(usable_size != nullptr);
637 DCHECK(bytes_tl_bulk_allocated != nullptr);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700638 DCHECK_LE(size, kLargeSizeThreshold);
639 size_t bracket_size;
640 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
641 DCHECK_EQ(idx, SizeToIndex(size));
642 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
643 DCHECK_EQ(bracket_size, bracketSizes[idx]);
644 DCHECK_LE(size, bracket_size);
645 DCHECK(size > 512 || bracket_size - size < 16);
646 Locks::mutator_lock_->AssertExclusiveHeld(self);
647 void* slot_addr = AllocFromCurrentRunUnlocked(self, idx);
648 if (LIKELY(slot_addr != nullptr)) {
Mathieu Chartier0651d412014-04-29 14:37:57 -0700649 *bytes_allocated = bracket_size;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700650 *usable_size = bracket_size;
651 *bytes_tl_bulk_allocated = bracket_size;
Mathieu Chartier0651d412014-04-29 14:37:57 -0700652 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700653 // Caller verifies that it is all 0.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700654 return slot_addr;
655}
656
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700657void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated,
658 size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
659 DCHECK(bytes_allocated != nullptr);
660 DCHECK(usable_size != nullptr);
661 DCHECK(bytes_tl_bulk_allocated != nullptr);
Ian Rogers5d057052014-03-12 14:32:27 -0700662 DCHECK_LE(size, kLargeSizeThreshold);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700663 size_t bracket_size;
664 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
665 DCHECK_EQ(idx, SizeToIndex(size));
666 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
667 DCHECK_EQ(bracket_size, bracketSizes[idx]);
Ian Rogers5d057052014-03-12 14:32:27 -0700668 DCHECK_LE(size, bracket_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700669 DCHECK(size > 512 || bracket_size - size < 16);
670
671 void* slot_addr;
672
Mathieu Chartier0651d412014-04-29 14:37:57 -0700673 if (LIKELY(idx < kNumThreadLocalSizeBrackets)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700674 // Use a thread-local run.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700675 Run* thread_local_run = reinterpret_cast<Run*>(self->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700676 // Allow invalid since this will always fail the allocation.
Mathieu Chartier4fd20502014-04-28 09:35:55 -0700677 if (kIsDebugBuild) {
678 // Need the lock to prevent race conditions.
679 MutexLock mu(self, *size_bracket_locks_[idx]);
680 CHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
681 CHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
682 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700683 DCHECK(thread_local_run != nullptr);
684 DCHECK(thread_local_run->IsThreadLocal() || thread_local_run == dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700685 slot_addr = thread_local_run->AllocSlot();
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700686 // The allocation must fail if the run is invalid.
687 DCHECK(thread_local_run != dedicated_full_run_ || slot_addr == nullptr)
688 << "allocated from an invalid run";
689 if (UNLIKELY(slot_addr == nullptr)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700690 // The run got full. Try to free slots.
691 DCHECK(thread_local_run->IsFull());
692 MutexLock mu(self, *size_bracket_locks_[idx]);
693 bool is_all_free_after_merge;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700694 // This is safe to do for the dedicated_full_run_ since the bitmaps are empty.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700695 if (thread_local_run->MergeThreadLocalFreeListToFreeList(&is_all_free_after_merge)) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700696 DCHECK_NE(thread_local_run, dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700697 // Some slot got freed. Keep it.
698 DCHECK(!thread_local_run->IsFull());
699 DCHECK_EQ(is_all_free_after_merge, thread_local_run->IsAllFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700700 } else {
701 // No slots got freed. Try to refill the thread-local run.
702 DCHECK(thread_local_run->IsFull());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700703 if (thread_local_run != dedicated_full_run_) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700704 thread_local_run->SetIsThreadLocal(false);
705 if (kIsDebugBuild) {
706 full_runs_[idx].insert(thread_local_run);
707 if (kTraceRosAlloc) {
708 LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex
709 << reinterpret_cast<intptr_t>(thread_local_run)
710 << " into full_runs_[" << std::dec << idx << "]";
711 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700712 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700713 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
714 DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700715 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700716
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700717 thread_local_run = RefillRun(self, idx);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700718 if (UNLIKELY(thread_local_run == nullptr)) {
719 self->SetRosAllocRun(idx, dedicated_full_run_);
720 return nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700721 }
722 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
723 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700724 thread_local_run->SetIsThreadLocal(true);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700725 self->SetRosAllocRun(idx, thread_local_run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700726 DCHECK(!thread_local_run->IsFull());
727 }
Mathieu Chartier0651d412014-04-29 14:37:57 -0700728 DCHECK(thread_local_run != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700729 DCHECK(!thread_local_run->IsFull());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700730 DCHECK(thread_local_run->IsThreadLocal());
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700731 // Account for all the free slots in the new or refreshed thread local run.
732 *bytes_tl_bulk_allocated = thread_local_run->NumberOfFreeSlots() * bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700733 slot_addr = thread_local_run->AllocSlot();
734 // Must succeed now with a new run.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700735 DCHECK(slot_addr != nullptr);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700736 } else {
737 // The slot is already counted. Leave it as is.
738 *bytes_tl_bulk_allocated = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700739 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700740 DCHECK(slot_addr != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700741 if (kTraceRosAlloc) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700742 LOG(INFO) << "RosAlloc::AllocFromRun() thread-local : 0x" << std::hex
743 << reinterpret_cast<intptr_t>(slot_addr)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700744 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
745 << "(" << std::dec << (bracket_size) << ")";
746 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700747 *bytes_allocated = bracket_size;
748 *usable_size = bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700749 } else {
750 // Use the (shared) current run.
751 MutexLock mu(self, *size_bracket_locks_[idx]);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700752 slot_addr = AllocFromCurrentRunUnlocked(self, idx);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700753 if (kTraceRosAlloc) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700754 LOG(INFO) << "RosAlloc::AllocFromRun() : 0x" << std::hex
755 << reinterpret_cast<intptr_t>(slot_addr)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700756 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
757 << "(" << std::dec << (bracket_size) << ")";
758 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700759 if (LIKELY(slot_addr != nullptr)) {
760 *bytes_allocated = bracket_size;
761 *usable_size = bracket_size;
762 *bytes_tl_bulk_allocated = bracket_size;
763 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700764 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700765 // Caller verifies that it is all 0.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700766 return slot_addr;
767}
768
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700769size_t RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
Ian Rogers5d057052014-03-12 14:32:27 -0700770 DCHECK_EQ(run->magic_num_, kMagicNum);
771 DCHECK_LT(run, ptr);
772 DCHECK_LT(ptr, run->End());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700773 const size_t idx = run->size_bracket_idx_;
774 const size_t bracket_size = bracketSizes[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700775 bool run_was_full = false;
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800776 MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700777 if (kIsDebugBuild) {
778 run_was_full = run->IsFull();
779 }
780 if (kTraceRosAlloc) {
781 LOG(INFO) << "RosAlloc::FreeFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr);
782 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700783 if (LIKELY(run->IsThreadLocal())) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700784 // It's a thread-local run. Just mark the thread-local free bit map and return.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700785 DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700786 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
787 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700788 run->AddToThreadLocalFreeList(ptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700789 if (kTraceRosAlloc) {
790 LOG(INFO) << "RosAlloc::FreeFromRun() : Freed a slot in a thread local run 0x" << std::hex
791 << reinterpret_cast<intptr_t>(run);
792 }
793 // A thread local run will be kept as a thread local even if it's become all free.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700794 return bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700795 }
796 // Free the slot in the run.
797 run->FreeSlot(ptr);
Mathieu Chartier58553c72014-09-16 16:25:55 -0700798 auto* non_full_runs = &non_full_runs_[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700799 if (run->IsAllFree()) {
800 // It has just become completely free. Free the pages of this run.
801 std::set<Run*>::iterator pos = non_full_runs->find(run);
802 if (pos != non_full_runs->end()) {
803 non_full_runs->erase(pos);
804 if (kTraceRosAlloc) {
805 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
806 << reinterpret_cast<intptr_t>(run) << " from non_full_runs_";
807 }
808 }
809 if (run == current_runs_[idx]) {
Mathieu Chartier0651d412014-04-29 14:37:57 -0700810 current_runs_[idx] = dedicated_full_run_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700811 }
812 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
813 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700814 run->ZeroHeaderAndSlotHeaders();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700815 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800816 MutexLock lock_mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700817 FreePages(self, run, true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700818 }
819 } else {
820 // It is not completely free. If it wasn't the current run or
821 // already in the non-full run set (i.e., it was full) insert it
822 // into the non-full run set.
823 if (run != current_runs_[idx]) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700824 auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : nullptr;
Mathieu Chartier58553c72014-09-16 16:25:55 -0700825 auto pos = non_full_runs->find(run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700826 if (pos == non_full_runs->end()) {
827 DCHECK(run_was_full);
828 DCHECK(full_runs->find(run) != full_runs->end());
829 if (kIsDebugBuild) {
830 full_runs->erase(run);
831 if (kTraceRosAlloc) {
832 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
833 << reinterpret_cast<intptr_t>(run) << " from full_runs_";
834 }
835 }
836 non_full_runs->insert(run);
837 DCHECK(!run->IsFull());
838 if (kTraceRosAlloc) {
839 LOG(INFO) << "RosAlloc::FreeFromRun() : Inserted run 0x" << std::hex
840 << reinterpret_cast<intptr_t>(run)
841 << " into non_full_runs_[" << std::dec << idx << "]";
842 }
843 }
844 }
845 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700846 return bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700847}
848
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700849template<bool kUseTail>
850std::string RosAlloc::Run::FreeListToStr(SlotFreeList<kUseTail>* free_list) {
851 std::string free_list_str;
852 const uint8_t idx = size_bracket_idx_;
853 const size_t bracket_size = bracketSizes[idx];
854 for (Slot* slot = free_list->Head(); slot != nullptr; slot = slot->Next()) {
855 bool is_last = slot->Next() == nullptr;
856 uintptr_t slot_offset = reinterpret_cast<uintptr_t>(slot) -
857 reinterpret_cast<uintptr_t>(FirstSlot());
858 DCHECK_EQ(slot_offset % bracket_size, 0U);
859 uintptr_t slot_idx = slot_offset / bracket_size;
860 if (!is_last) {
861 free_list_str.append(StringPrintf("%u-", static_cast<uint32_t>(slot_idx)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700862 } else {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700863 free_list_str.append(StringPrintf("%u", static_cast<uint32_t>(slot_idx)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700864 }
865 }
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700866 return free_list_str;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800867}
868
869std::string RosAlloc::Run::Dump() {
870 size_t idx = size_bracket_idx_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800871 std::ostringstream stream;
872 stream << "RosAlloc Run = " << reinterpret_cast<void*>(this)
873 << "{ magic_num=" << static_cast<int>(magic_num_)
874 << " size_bracket_idx=" << idx
875 << " is_thread_local=" << static_cast<int>(is_thread_local_)
876 << " to_be_bulk_freed=" << static_cast<int>(to_be_bulk_freed_)
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700877 << " free_list=" << FreeListToStr(&free_list_)
878 << " bulk_free_list=" << FreeListToStr(&bulk_free_list_)
879 << " thread_local_list=" << FreeListToStr(&thread_local_free_list_)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800880 << " }" << std::endl;
881 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700882}
883
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700884inline size_t RosAlloc::Run::SlotIndex(Slot* slot) {
885 const uint8_t idx = size_bracket_idx_;
886 const size_t bracket_size = bracketSizes[idx];
887 const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(slot)
888 - reinterpret_cast<uint8_t*>(FirstSlot());
889 DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
890 size_t slot_idx = offset_from_slot_base / bracket_size;
891 DCHECK_LT(slot_idx, numOfSlots[idx]);
892 return slot_idx;
893}
894
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700895void RosAlloc::Run::FreeSlot(void* ptr) {
896 DCHECK(!IsThreadLocal());
Ian Rogers13735952014-10-08 12:43:28 -0700897 const uint8_t idx = size_bracket_idx_;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700898 const size_t bracket_size = bracketSizes[idx];
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700899 Slot* slot = ToSlot(ptr);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700900 // Zero out the memory.
901 // TODO: Investigate alternate memset since ptr is guaranteed to be aligned to 16.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700902 memset(slot, 0, bracket_size);
903 free_list_.Add(slot);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700904 if (kTraceRosAlloc) {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700905 LOG(INFO) << "RosAlloc::Run::FreeSlot() : " << slot
906 << ", bracket_size=" << std::dec << bracket_size << ", slot_idx=" << SlotIndex(slot);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700907 }
908}
909
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700910inline bool RosAlloc::Run::MergeThreadLocalFreeListToFreeList(bool* is_all_free_after_out) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700911 DCHECK(IsThreadLocal());
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700912 // Merge the thread local free list into the free list and clear the thread local free list.
Ian Rogers13735952014-10-08 12:43:28 -0700913 const uint8_t idx = size_bracket_idx_;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700914 bool thread_local_free_list_size = thread_local_free_list_.Size();
915 const size_t size_before = free_list_.Size();
916 free_list_.Merge(&thread_local_free_list_);
917 const size_t size_after = free_list_.Size();
918 DCHECK_EQ(size_before < size_after, thread_local_free_list_size > 0);
919 DCHECK_LE(size_before, size_after);
920 *is_all_free_after_out = free_list_.Size() == numOfSlots[idx];
921 // Return true at least one slot was added to the free list.
922 return size_before < size_after;
923}
924
925inline void RosAlloc::Run::MergeBulkFreeListToFreeList() {
926 DCHECK(!IsThreadLocal());
927 // Merge the bulk free list into the free list and clear the bulk free list.
928 free_list_.Merge(&bulk_free_list_);
929}
930
931inline void RosAlloc::Run::MergeBulkFreeListToThreadLocalFreeList() {
932 DCHECK(IsThreadLocal());
933 // Merge the bulk free list into the thread local free list and clear the bulk free list.
934 thread_local_free_list_.Merge(&bulk_free_list_);
935}
936
937inline void RosAlloc::Run::AddToThreadLocalFreeList(void* ptr) {
938 DCHECK(IsThreadLocal());
939 AddToFreeListShared(ptr, &thread_local_free_list_, __FUNCTION__);
940}
941
942inline size_t RosAlloc::Run::AddToBulkFreeList(void* ptr) {
943 return AddToFreeListShared(ptr, &bulk_free_list_, __FUNCTION__);
944}
945
946inline size_t RosAlloc::Run::AddToFreeListShared(void* ptr,
947 SlotFreeList<true>* free_list,
948 const char* caller_name) {
949 const uint8_t idx = size_bracket_idx_;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700950 const size_t bracket_size = bracketSizes[idx];
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700951 Slot* slot = ToSlot(ptr);
952 memset(slot, 0, bracket_size);
953 free_list->Add(slot);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700954 if (kTraceRosAlloc) {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700955 LOG(INFO) << "RosAlloc::Run::" << caller_name << "() : " << ptr
956 << ", bracket_size=" << std::dec << bracket_size << ", slot_idx=" << SlotIndex(slot);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700957 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700958 return bracket_size;
959}
960
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700961inline void RosAlloc::Run::ZeroHeaderAndSlotHeaders() {
962 DCHECK(IsAllFree());
Ian Rogers13735952014-10-08 12:43:28 -0700963 const uint8_t idx = size_bracket_idx_;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700964 // Zero the slot header (next pointers).
965 for (Slot* slot = free_list_.Head(); slot != nullptr; ) {
966 Slot* next_slot = slot->Next();
967 slot->Clear();
968 slot = next_slot;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700969 }
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700970 // Zero the header.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700971 memset(this, 0, headerSizes[idx]);
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700972 // Check that the entire run is all zero.
973 if (kIsDebugBuild) {
974 const size_t size = numOfPages[idx] * kPageSize;
975 const uintptr_t* word_ptr = reinterpret_cast<uintptr_t*>(this);
976 for (size_t i = 0; i < size / sizeof(uintptr_t); ++i) {
977 CHECK_EQ(word_ptr[i], 0U) << "words don't match at index " << i;
978 }
979 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700980}
981
982inline void RosAlloc::Run::ZeroData() {
Ian Rogers13735952014-10-08 12:43:28 -0700983 const uint8_t idx = size_bracket_idx_;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700984 uint8_t* slot_begin = reinterpret_cast<uint8_t*>(FirstSlot());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700985 memset(slot_begin, 0, numOfSlots[idx] * bracketSizes[idx]);
986}
987
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700988void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
989 void* arg) {
990 size_t idx = size_bracket_idx_;
Ian Rogers13735952014-10-08 12:43:28 -0700991 uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700992 size_t num_slots = numOfSlots[idx];
993 size_t bracket_size = IndexToBracketSize(idx);
Mathieu Chartierc38c5ea2015-02-04 17:46:29 -0800994 DCHECK_EQ(slot_base + num_slots * bracket_size,
995 reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize);
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700996 // Free slots are on the free list and the allocated/used slots are not. We traverse the free list
997 // to find out and record which slots are free in the is_free array.
998 std::unique_ptr<bool[]> is_free(new bool[num_slots]()); // zero initialized
999 for (Slot* slot = free_list_.Head(); slot != nullptr; slot = slot->Next()) {
1000 size_t slot_idx = SlotIndex(slot);
1001 DCHECK_LT(slot_idx, num_slots);
1002 is_free[slot_idx] = true;
1003 }
1004 if (IsThreadLocal()) {
1005 for (Slot* slot = thread_local_free_list_.Head(); slot != nullptr; slot = slot->Next()) {
1006 size_t slot_idx = SlotIndex(slot);
1007 DCHECK_LT(slot_idx, num_slots);
1008 is_free[slot_idx] = true;
Mathieu Chartierc38c5ea2015-02-04 17:46:29 -08001009 }
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001010 }
1011 for (size_t slot_idx = 0; slot_idx < num_slots; ++slot_idx) {
1012 uint8_t* slot_addr = slot_base + slot_idx * bracket_size;
1013 if (!is_free[slot_idx]) {
1014 handler(slot_addr, slot_addr + bracket_size, bracket_size, arg);
1015 } else {
1016 handler(slot_addr, slot_addr + bracket_size, 0, arg);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001017 }
1018 }
1019}
1020
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001021// If true, read the page map entries in BulkFree() without using the
1022// lock for better performance, assuming that the existence of an
1023// allocated chunk/pointer being freed in BulkFree() guarantees that
1024// the page map entry won't change. Disabled for now.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001025static constexpr bool kReadPageMapEntryWithoutLockInBulkFree = true;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001026
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001027size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
1028 size_t freed_bytes = 0;
Ian Rogerscf7f1912014-10-22 22:06:39 -07001029 if ((false)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001030 // Used only to test Free() as GC uses only BulkFree().
1031 for (size_t i = 0; i < num_ptrs; ++i) {
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001032 freed_bytes += FreeInternal(self, ptrs[i]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001033 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001034 return freed_bytes;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001035 }
1036
1037 WriterMutexLock wmu(self, bulk_free_lock_);
1038
1039 // First mark slots to free in the bulk free bit map without locking the
Ian Rogers5fcfa7d2014-05-15 11:43:06 -07001040 // size bracket locks. On host, unordered_set is faster than vector + flag.
Andreas Gampec60e1b72015-07-30 08:57:50 -07001041#ifdef __ANDROID__
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001042 std::vector<Run*> runs;
1043#else
Ian Rogers700a4022014-05-19 16:49:03 -07001044 std::unordered_set<Run*, hash_run, eq_run> runs;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001045#endif
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001046 for (size_t i = 0; i < num_ptrs; i++) {
1047 void* ptr = ptrs[i];
Ian Rogers5d057052014-03-12 14:32:27 -07001048 DCHECK_LE(base_, ptr);
1049 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001050 size_t pm_idx = RoundDownToPageMapIndex(ptr);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001051 Run* run = nullptr;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001052 if (kReadPageMapEntryWithoutLockInBulkFree) {
1053 // Read the page map entries without locking the lock.
Ian Rogers13735952014-10-08 12:43:28 -07001054 uint8_t page_map_entry = page_map_[pm_idx];
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001055 if (kTraceRosAlloc) {
1056 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1057 << std::dec << pm_idx
1058 << ", page_map_entry=" << static_cast<int>(page_map_entry);
1059 }
1060 if (LIKELY(page_map_entry == kPageMapRun)) {
1061 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001062 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1063 size_t pi = pm_idx;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001064 // Find the beginning of the run.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001065 do {
1066 --pi;
Ian Rogers5d057052014-03-12 14:32:27 -07001067 DCHECK_LT(pi, capacity_ / kPageSize);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001068 } while (page_map_[pi] != kPageMapRun);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001069 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001070 } else if (page_map_entry == kPageMapLargeObject) {
1071 MutexLock mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001072 freed_bytes += FreePages(self, ptr, false);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001073 continue;
1074 } else {
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001075 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_entry);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001076 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001077 } else {
1078 // Read the page map entries with a lock.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001079 MutexLock mu(self, lock_);
1080 DCHECK_LT(pm_idx, page_map_size_);
Ian Rogers13735952014-10-08 12:43:28 -07001081 uint8_t page_map_entry = page_map_[pm_idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001082 if (kTraceRosAlloc) {
1083 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1084 << std::dec << pm_idx
1085 << ", page_map_entry=" << static_cast<int>(page_map_entry);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001086 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001087 if (LIKELY(page_map_entry == kPageMapRun)) {
1088 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1089 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1090 size_t pi = pm_idx;
1091 // Find the beginning of the run.
1092 do {
1093 --pi;
1094 DCHECK_LT(pi, capacity_ / kPageSize);
1095 } while (page_map_[pi] != kPageMapRun);
1096 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
1097 } else if (page_map_entry == kPageMapLargeObject) {
1098 freed_bytes += FreePages(self, ptr, false);
1099 continue;
1100 } else {
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001101 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_entry);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001102 }
1103 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001104 DCHECK(run != nullptr);
1105 DCHECK_EQ(run->magic_num_, kMagicNum);
1106 // Set the bit in the bulk free bit map.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001107 freed_bytes += run->AddToBulkFreeList(ptr);
Andreas Gampec60e1b72015-07-30 08:57:50 -07001108#ifdef __ANDROID__
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001109 if (!run->to_be_bulk_freed_) {
1110 run->to_be_bulk_freed_ = true;
1111 runs.push_back(run);
1112 }
1113#else
1114 runs.insert(run);
1115#endif
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001116 }
1117
1118 // Now, iterate over the affected runs and update the alloc bit map
1119 // based on the bulk free bit map (for non-thread-local runs) and
1120 // union the bulk free bit map into the thread-local free bit map
1121 // (for thread-local runs.)
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001122 for (Run* run : runs) {
Andreas Gampec60e1b72015-07-30 08:57:50 -07001123#ifdef __ANDROID__
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001124 DCHECK(run->to_be_bulk_freed_);
1125 run->to_be_bulk_freed_ = false;
1126#endif
1127 size_t idx = run->size_bracket_idx_;
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001128 MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001129 if (run->IsThreadLocal()) {
Mathieu Chartier0651d412014-04-29 14:37:57 -07001130 DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001131 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
1132 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001133 run->MergeBulkFreeListToThreadLocalFreeList();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001134 if (kTraceRosAlloc) {
1135 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a thread local run 0x"
1136 << std::hex << reinterpret_cast<intptr_t>(run);
1137 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001138 DCHECK(run->IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001139 // A thread local run will be kept as a thread local even if
1140 // it's become all free.
1141 } else {
1142 bool run_was_full = run->IsFull();
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001143 run->MergeBulkFreeListToFreeList();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001144 if (kTraceRosAlloc) {
1145 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a run 0x" << std::hex
1146 << reinterpret_cast<intptr_t>(run);
1147 }
1148 // Check if the run should be moved to non_full_runs_ or
1149 // free_page_runs_.
Mathieu Chartier58553c72014-09-16 16:25:55 -07001150 auto* non_full_runs = &non_full_runs_[idx];
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001151 auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001152 if (run->IsAllFree()) {
1153 // It has just become completely free. Free the pages of the
1154 // run.
1155 bool run_was_current = run == current_runs_[idx];
1156 if (run_was_current) {
1157 DCHECK(full_runs->find(run) == full_runs->end());
1158 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1159 // If it was a current run, reuse it.
1160 } else if (run_was_full) {
1161 // If it was full, remove it from the full run set (debug
1162 // only.)
1163 if (kIsDebugBuild) {
Ian Rogers700a4022014-05-19 16:49:03 -07001164 std::unordered_set<Run*, hash_run, eq_run>::iterator pos = full_runs->find(run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001165 DCHECK(pos != full_runs->end());
1166 full_runs->erase(pos);
1167 if (kTraceRosAlloc) {
1168 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1169 << reinterpret_cast<intptr_t>(run)
1170 << " from full_runs_";
1171 }
1172 DCHECK(full_runs->find(run) == full_runs->end());
1173 }
1174 } else {
1175 // If it was in a non full run set, remove it from the set.
1176 DCHECK(full_runs->find(run) == full_runs->end());
1177 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1178 non_full_runs->erase(run);
1179 if (kTraceRosAlloc) {
1180 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1181 << reinterpret_cast<intptr_t>(run)
1182 << " from non_full_runs_";
1183 }
1184 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1185 }
1186 if (!run_was_current) {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001187 run->ZeroHeaderAndSlotHeaders();
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001188 MutexLock lock_mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001189 FreePages(self, run, true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001190 }
1191 } else {
1192 // It is not completely free. If it wasn't the current run or
1193 // already in the non-full run set (i.e., it was full) insert
1194 // it into the non-full run set.
1195 if (run == current_runs_[idx]) {
1196 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1197 DCHECK(full_runs->find(run) == full_runs->end());
1198 // If it was a current run, keep it.
1199 } else if (run_was_full) {
1200 // If it was full, remove it from the full run set (debug
1201 // only) and insert into the non-full run set.
1202 DCHECK(full_runs->find(run) != full_runs->end());
1203 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1204 if (kIsDebugBuild) {
1205 full_runs->erase(run);
1206 if (kTraceRosAlloc) {
1207 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1208 << reinterpret_cast<intptr_t>(run)
1209 << " from full_runs_";
1210 }
1211 }
1212 non_full_runs->insert(run);
1213 if (kTraceRosAlloc) {
1214 LOG(INFO) << "RosAlloc::BulkFree() : Inserted run 0x" << std::hex
1215 << reinterpret_cast<intptr_t>(run)
1216 << " into non_full_runs_[" << std::dec << idx;
1217 }
1218 } else {
1219 // If it was not full, so leave it in the non full run set.
1220 DCHECK(full_runs->find(run) == full_runs->end());
1221 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1222 }
1223 }
1224 }
1225 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001226 return freed_bytes;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001227}
1228
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001229std::string RosAlloc::DumpPageMap() {
1230 std::ostringstream stream;
1231 stream << "RosAlloc PageMap: " << std::endl;
1232 lock_.AssertHeld(Thread::Current());
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001233 size_t end = page_map_size_;
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001234 FreePageRun* curr_fpr = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001235 size_t curr_fpr_size = 0;
1236 size_t remaining_curr_fpr_size = 0;
1237 size_t num_running_empty_pages = 0;
1238 for (size_t i = 0; i < end; ++i) {
Ian Rogers13735952014-10-08 12:43:28 -07001239 uint8_t pm = page_map_[i];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001240 switch (pm) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001241 case kPageMapReleased:
1242 // Fall-through.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001243 case kPageMapEmpty: {
1244 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1245 if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
1246 // Encountered a fresh free page run.
1247 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1248 DCHECK(fpr->IsFree());
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001249 DCHECK(curr_fpr == nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001250 DCHECK_EQ(curr_fpr_size, static_cast<size_t>(0));
1251 curr_fpr = fpr;
1252 curr_fpr_size = fpr->ByteSize(this);
1253 DCHECK_EQ(curr_fpr_size % kPageSize, static_cast<size_t>(0));
1254 remaining_curr_fpr_size = curr_fpr_size - kPageSize;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001255 stream << "[" << i << "]=" << (pm == kPageMapReleased ? "Released" : "Empty")
1256 << " (FPR start) fpr_size=" << curr_fpr_size
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001257 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001258 if (remaining_curr_fpr_size == 0) {
1259 // Reset at the end of the current free page run.
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001260 curr_fpr = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001261 curr_fpr_size = 0;
1262 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001263 stream << "curr_fpr=0x" << std::hex << reinterpret_cast<intptr_t>(curr_fpr) << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001264 DCHECK_EQ(num_running_empty_pages, static_cast<size_t>(0));
1265 } else {
1266 // Still part of the current free page run.
1267 DCHECK_NE(num_running_empty_pages, static_cast<size_t>(0));
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001268 DCHECK(curr_fpr != nullptr && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001269 DCHECK_EQ(remaining_curr_fpr_size % kPageSize, static_cast<size_t>(0));
1270 DCHECK_GE(remaining_curr_fpr_size, static_cast<size_t>(kPageSize));
1271 remaining_curr_fpr_size -= kPageSize;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001272 stream << "[" << i << "]=Empty (FPR part)"
1273 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001274 if (remaining_curr_fpr_size == 0) {
1275 // Reset at the end of the current free page run.
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001276 curr_fpr = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001277 curr_fpr_size = 0;
1278 }
1279 }
1280 num_running_empty_pages++;
1281 break;
1282 }
1283 case kPageMapLargeObject: {
1284 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1285 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001286 stream << "[" << i << "]=Large (start)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001287 break;
1288 }
1289 case kPageMapLargeObjectPart:
1290 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1291 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001292 stream << "[" << i << "]=Large (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001293 break;
1294 case kPageMapRun: {
1295 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1296 num_running_empty_pages = 0;
1297 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1298 size_t idx = run->size_bracket_idx_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001299 stream << "[" << i << "]=Run (start)"
1300 << " idx=" << idx
1301 << " numOfPages=" << numOfPages[idx]
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001302 << " is_thread_local=" << run->is_thread_local_
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001303 << " is_all_free=" << (run->IsAllFree() ? 1 : 0)
1304 << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001305 break;
1306 }
1307 case kPageMapRunPart:
1308 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1309 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001310 stream << "[" << i << "]=Run (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001311 break;
1312 default:
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001313 stream << "[" << i << "]=Unrecognizable page map type: " << pm;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001314 break;
1315 }
1316 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001317 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001318}
1319
Andreas Gamped7576322014-10-24 22:13:45 -07001320size_t RosAlloc::UsableSize(const void* ptr) {
Ian Rogers5d057052014-03-12 14:32:27 -07001321 DCHECK_LE(base_, ptr);
1322 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001323 size_t pm_idx = RoundDownToPageMapIndex(ptr);
1324 MutexLock mu(Thread::Current(), lock_);
1325 switch (page_map_[pm_idx]) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001326 case kPageMapReleased:
1327 // Fall-through.
1328 case kPageMapEmpty:
1329 LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << ": pm_idx=" << pm_idx << ", ptr="
1330 << std::hex << reinterpret_cast<intptr_t>(ptr);
1331 break;
1332 case kPageMapLargeObject: {
1333 size_t num_pages = 1;
1334 size_t idx = pm_idx + 1;
1335 size_t end = page_map_size_;
1336 while (idx < end && page_map_[idx] == kPageMapLargeObjectPart) {
1337 num_pages++;
1338 idx++;
1339 }
1340 return num_pages * kPageSize;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001341 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001342 case kPageMapLargeObjectPart:
1343 LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << ": pm_idx=" << pm_idx << ", ptr="
1344 << std::hex << reinterpret_cast<intptr_t>(ptr);
1345 break;
1346 case kPageMapRun:
1347 case kPageMapRunPart: {
1348 // Find the beginning of the run.
1349 while (page_map_[pm_idx] != kPageMapRun) {
1350 pm_idx--;
1351 DCHECK_LT(pm_idx, capacity_ / kPageSize);
1352 }
1353 DCHECK_EQ(page_map_[pm_idx], kPageMapRun);
1354 Run* run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1355 DCHECK_EQ(run->magic_num_, kMagicNum);
1356 size_t idx = run->size_bracket_idx_;
Andreas Gamped7576322014-10-24 22:13:45 -07001357 size_t offset_from_slot_base = reinterpret_cast<const uint8_t*>(ptr)
Ian Rogers13735952014-10-08 12:43:28 -07001358 - (reinterpret_cast<uint8_t*>(run) + headerSizes[idx]);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001359 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
1360 return IndexToBracketSize(idx);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001361 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001362 default: {
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001363 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001364 break;
1365 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001366 }
1367 return 0;
1368}
1369
1370bool RosAlloc::Trim() {
1371 MutexLock mu(Thread::Current(), lock_);
1372 FreePageRun* last_free_page_run;
1373 DCHECK_EQ(footprint_ % kPageSize, static_cast<size_t>(0));
1374 auto it = free_page_runs_.rbegin();
1375 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
1376 // Remove the last free page run, if any.
1377 DCHECK(last_free_page_run->IsFree());
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001378 DCHECK(IsFreePage(ToPageMapIndex(last_free_page_run)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001379 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
1380 DCHECK_EQ(last_free_page_run->End(this), base_ + footprint_);
1381 free_page_runs_.erase(last_free_page_run);
1382 size_t decrement = last_free_page_run->ByteSize(this);
1383 size_t new_footprint = footprint_ - decrement;
1384 DCHECK_EQ(new_footprint % kPageSize, static_cast<size_t>(0));
1385 size_t new_num_of_pages = new_footprint / kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001386 DCHECK_GE(page_map_size_, new_num_of_pages);
1387 // Zero out the tail of the page map.
Ian Rogers13735952014-10-08 12:43:28 -07001388 uint8_t* zero_begin = const_cast<uint8_t*>(page_map_) + new_num_of_pages;
1389 uint8_t* madvise_begin = AlignUp(zero_begin, kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001390 DCHECK_LE(madvise_begin, page_map_mem_map_->End());
1391 size_t madvise_size = page_map_mem_map_->End() - madvise_begin;
1392 if (madvise_size > 0) {
1393 DCHECK_ALIGNED(madvise_begin, kPageSize);
1394 DCHECK_EQ(RoundUp(madvise_size, kPageSize), madvise_size);
Ian Rogersc5f17732014-06-05 20:48:42 -07001395 if (!kMadviseZeroes) {
1396 memset(madvise_begin, 0, madvise_size);
1397 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001398 CHECK_EQ(madvise(madvise_begin, madvise_size, MADV_DONTNEED), 0);
1399 }
1400 if (madvise_begin - zero_begin) {
1401 memset(zero_begin, 0, madvise_begin - zero_begin);
1402 }
1403 page_map_size_ = new_num_of_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001404 free_page_run_size_map_.resize(new_num_of_pages);
1405 DCHECK_EQ(free_page_run_size_map_.size(), new_num_of_pages);
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001406 ArtRosAllocMoreCore(this, -(static_cast<intptr_t>(decrement)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001407 if (kTraceRosAlloc) {
1408 LOG(INFO) << "RosAlloc::Trim() : decreased the footprint from "
1409 << footprint_ << " to " << new_footprint;
1410 }
1411 DCHECK_LT(new_footprint, footprint_);
1412 DCHECK_LT(new_footprint, capacity_);
1413 footprint_ = new_footprint;
1414 return true;
1415 }
1416 return false;
1417}
1418
1419void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1420 void* arg) {
1421 // Note: no need to use this to release pages as we already do so in FreePages().
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001422 if (handler == nullptr) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001423 return;
1424 }
1425 MutexLock mu(Thread::Current(), lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001426 size_t pm_end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001427 size_t i = 0;
1428 while (i < pm_end) {
Ian Rogers13735952014-10-08 12:43:28 -07001429 uint8_t pm = page_map_[i];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001430 switch (pm) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001431 case kPageMapReleased:
1432 // Fall-through.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001433 case kPageMapEmpty: {
1434 // The start of a free page run.
1435 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1436 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
1437 size_t fpr_size = fpr->ByteSize(this);
Roland Levillain14d90572015-07-16 10:52:26 +01001438 DCHECK_ALIGNED(fpr_size, kPageSize);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001439 void* start = fpr;
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -08001440 if (kIsDebugBuild) {
1441 // In the debug build, the first page of a free page run
1442 // contains a magic number for debugging. Exclude it.
Ian Rogers13735952014-10-08 12:43:28 -07001443 start = reinterpret_cast<uint8_t*>(fpr) + kPageSize;
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -08001444 }
Ian Rogers13735952014-10-08 12:43:28 -07001445 void* end = reinterpret_cast<uint8_t*>(fpr) + fpr_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001446 handler(start, end, 0, arg);
1447 size_t num_pages = fpr_size / kPageSize;
1448 if (kIsDebugBuild) {
1449 for (size_t j = i + 1; j < i + num_pages; ++j) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001450 DCHECK(IsFreePage(j));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001451 }
1452 }
1453 i += fpr_size / kPageSize;
1454 DCHECK_LE(i, pm_end);
1455 break;
1456 }
1457 case kPageMapLargeObject: {
1458 // The start of a large object.
1459 size_t num_pages = 1;
1460 size_t idx = i + 1;
1461 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1462 num_pages++;
1463 idx++;
1464 }
1465 void* start = base_ + i * kPageSize;
1466 void* end = base_ + (i + num_pages) * kPageSize;
1467 size_t used_bytes = num_pages * kPageSize;
1468 handler(start, end, used_bytes, arg);
1469 if (kIsDebugBuild) {
1470 for (size_t j = i + 1; j < i + num_pages; ++j) {
1471 DCHECK_EQ(page_map_[j], kPageMapLargeObjectPart);
1472 }
1473 }
1474 i += num_pages;
1475 DCHECK_LE(i, pm_end);
1476 break;
1477 }
1478 case kPageMapLargeObjectPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001479 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001480 break;
1481 case kPageMapRun: {
1482 // The start of a run.
1483 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001484 DCHECK_EQ(run->magic_num_, kMagicNum);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001485 // The dedicated full run doesn't contain any real allocations, don't visit the slots in
1486 // there.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001487 run->InspectAllSlots(handler, arg);
1488 size_t num_pages = numOfPages[run->size_bracket_idx_];
1489 if (kIsDebugBuild) {
1490 for (size_t j = i + 1; j < i + num_pages; ++j) {
1491 DCHECK_EQ(page_map_[j], kPageMapRunPart);
1492 }
1493 }
1494 i += num_pages;
1495 DCHECK_LE(i, pm_end);
1496 break;
1497 }
1498 case kPageMapRunPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001499 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001500 break;
1501 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001502 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001503 break;
1504 }
1505 }
1506}
1507
1508size_t RosAlloc::Footprint() {
1509 MutexLock mu(Thread::Current(), lock_);
1510 return footprint_;
1511}
1512
1513size_t RosAlloc::FootprintLimit() {
1514 MutexLock mu(Thread::Current(), lock_);
1515 return capacity_;
1516}
1517
1518void RosAlloc::SetFootprintLimit(size_t new_capacity) {
1519 MutexLock mu(Thread::Current(), lock_);
1520 DCHECK_EQ(RoundUp(new_capacity, kPageSize), new_capacity);
1521 // Only growing is supported here. But Trim() is supported.
1522 if (capacity_ < new_capacity) {
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001523 CHECK_LE(new_capacity, max_capacity_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001524 capacity_ = new_capacity;
1525 VLOG(heap) << "new capacity=" << capacity_;
1526 }
1527}
1528
Lei Li57846212015-06-11 17:50:20 +08001529// Below may be called by mutator itself just before thread termination.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001530size_t RosAlloc::RevokeThreadLocalRuns(Thread* thread) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001531 Thread* self = Thread::Current();
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001532 size_t free_bytes = 0U;
Mathieu Chartier0651d412014-04-29 14:37:57 -07001533 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; idx++) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001534 MutexLock mu(self, *size_bracket_locks_[idx]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001535 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001536 CHECK(thread_local_run != nullptr);
1537 // Invalid means already revoked.
1538 DCHECK(thread_local_run->IsThreadLocal());
1539 if (thread_local_run != dedicated_full_run_) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001540 // Note the thread local run may not be full here.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001541 thread->SetRosAllocRun(idx, dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001542 DCHECK_EQ(thread_local_run->magic_num_, kMagicNum);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001543 // Count the number of free slots left.
1544 size_t num_free_slots = thread_local_run->NumberOfFreeSlots();
1545 free_bytes += num_free_slots * bracketSizes[idx];
Lei Li57846212015-06-11 17:50:20 +08001546 // The above bracket index lock guards thread local free list to avoid race condition
1547 // with unioning bulk free list to thread local free list by GC thread in BulkFree.
1548 // If thread local run is true, GC thread will help update thread local free list
1549 // in BulkFree. And the latest thread local free list will be merged to free list
1550 // either when this thread local run is full or when revoking this run here. In this
1551 // case the free list wll be updated. If thread local run is false, GC thread will help
1552 // merge bulk free list in next BulkFree.
1553 // Thus no need to merge bulk free list to free list again here.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001554 bool dont_care;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001555 thread_local_run->MergeThreadLocalFreeListToFreeList(&dont_care);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001556 thread_local_run->SetIsThreadLocal(false);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001557 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
1558 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
Mathieu Chartier0651d412014-04-29 14:37:57 -07001559 RevokeRun(self, idx, thread_local_run);
1560 }
1561 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001562 return free_bytes;
Mathieu Chartier0651d412014-04-29 14:37:57 -07001563}
1564
1565void RosAlloc::RevokeRun(Thread* self, size_t idx, Run* run) {
1566 size_bracket_locks_[idx]->AssertHeld(self);
1567 DCHECK(run != dedicated_full_run_);
1568 if (run->IsFull()) {
1569 if (kIsDebugBuild) {
1570 full_runs_[idx].insert(run);
1571 DCHECK(full_runs_[idx].find(run) != full_runs_[idx].end());
1572 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001573 LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
Mathieu Chartier0651d412014-04-29 14:37:57 -07001574 << reinterpret_cast<intptr_t>(run)
1575 << " into full_runs_[" << std::dec << idx << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001576 }
1577 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001578 } else if (run->IsAllFree()) {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001579 run->ZeroHeaderAndSlotHeaders();
Mathieu Chartier0651d412014-04-29 14:37:57 -07001580 MutexLock mu(self, lock_);
1581 FreePages(self, run, true);
1582 } else {
1583 non_full_runs_[idx].insert(run);
1584 DCHECK(non_full_runs_[idx].find(run) != non_full_runs_[idx].end());
1585 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001586 LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
Mathieu Chartier0651d412014-04-29 14:37:57 -07001587 << reinterpret_cast<intptr_t>(run)
1588 << " into non_full_runs_[" << std::dec << idx << "]";
1589 }
1590 }
1591}
1592
1593void RosAlloc::RevokeThreadUnsafeCurrentRuns() {
1594 // Revoke the current runs which share the same idx as thread local runs.
1595 Thread* self = Thread::Current();
1596 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; ++idx) {
1597 MutexLock mu(self, *size_bracket_locks_[idx]);
1598 if (current_runs_[idx] != dedicated_full_run_) {
1599 RevokeRun(self, idx, current_runs_[idx]);
1600 current_runs_[idx] = dedicated_full_run_;
1601 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001602 }
1603}
1604
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001605size_t RosAlloc::RevokeAllThreadLocalRuns() {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001606 // This is called when a mutator thread won't allocate such as at
1607 // the Zygote creation time or during the GC pause.
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001608 MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
1609 MutexLock mu2(Thread::Current(), *Locks::thread_list_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001610 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001611 size_t free_bytes = 0U;
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001612 for (Thread* thread : thread_list) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001613 free_bytes += RevokeThreadLocalRuns(thread);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001614 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001615 RevokeThreadUnsafeCurrentRuns();
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001616 return free_bytes;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001617}
1618
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001619void RosAlloc::AssertThreadLocalRunsAreRevoked(Thread* thread) {
1620 if (kIsDebugBuild) {
1621 Thread* self = Thread::Current();
1622 // Avoid race conditions on the bulk free bit maps with BulkFree() (GC).
Mathieu Chartiera1c1c712014-06-23 17:53:09 -07001623 ReaderMutexLock wmu(self, bulk_free_lock_);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001624 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; idx++) {
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001625 MutexLock mu(self, *size_bracket_locks_[idx]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001626 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001627 DCHECK(thread_local_run == nullptr || thread_local_run == dedicated_full_run_);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001628 }
1629 }
1630}
1631
1632void RosAlloc::AssertAllThreadLocalRunsAreRevoked() {
1633 if (kIsDebugBuild) {
Mathieu Chartier0651d412014-04-29 14:37:57 -07001634 Thread* self = Thread::Current();
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001635 MutexLock shutdown_mu(self, *Locks::runtime_shutdown_lock_);
1636 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001637 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1638 for (Thread* t : thread_list) {
1639 AssertThreadLocalRunsAreRevoked(t);
1640 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001641 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; ++idx) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001642 MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001643 CHECK_EQ(current_runs_[idx], dedicated_full_run_);
1644 }
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001645 }
1646}
1647
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001648void RosAlloc::Initialize() {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001649 // bracketSizes.
1650 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1651 if (i < kNumOfSizeBrackets - 2) {
1652 bracketSizes[i] = 16 * (i + 1);
1653 } else if (i == kNumOfSizeBrackets - 2) {
1654 bracketSizes[i] = 1 * KB;
1655 } else {
Ian Rogers5d057052014-03-12 14:32:27 -07001656 DCHECK_EQ(i, kNumOfSizeBrackets - 1);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001657 bracketSizes[i] = 2 * KB;
1658 }
1659 if (kTraceRosAlloc) {
1660 LOG(INFO) << "bracketSizes[" << i << "]=" << bracketSizes[i];
1661 }
1662 }
1663 // numOfPages.
1664 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1665 if (i < 4) {
1666 numOfPages[i] = 1;
1667 } else if (i < 8) {
Hiroshi Yamauchi3f3c6c02014-11-20 14:16:06 -08001668 numOfPages[i] = 1;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001669 } else if (i < 16) {
1670 numOfPages[i] = 4;
1671 } else if (i < 32) {
1672 numOfPages[i] = 8;
1673 } else if (i == 32) {
Ian Rogers5d057052014-03-12 14:32:27 -07001674 DCHECK_EQ(i, kNumOfSizeBrackets - 2);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001675 numOfPages[i] = 16;
1676 } else {
Ian Rogers5d057052014-03-12 14:32:27 -07001677 DCHECK_EQ(i, kNumOfSizeBrackets - 1);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001678 numOfPages[i] = 32;
1679 }
1680 if (kTraceRosAlloc) {
1681 LOG(INFO) << "numOfPages[" << i << "]=" << numOfPages[i];
1682 }
1683 }
1684 // Compute numOfSlots and slotOffsets.
1685 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1686 size_t bracket_size = bracketSizes[i];
1687 size_t run_size = kPageSize * numOfPages[i];
1688 size_t max_num_of_slots = run_size / bracket_size;
1689 // Compute the actual number of slots by taking the header and
1690 // alignment into account.
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001691 size_t fixed_header_size = RoundUp(Run::fixed_header_size(), sizeof(uint64_t));
1692 DCHECK_EQ(fixed_header_size, 80U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001693 size_t header_size = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001694 size_t num_of_slots = 0;
1695 // Search for the maximum number of slots that allows enough space
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001696 // for the header.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001697 for (int s = max_num_of_slots; s >= 0; s--) {
1698 size_t tmp_slots_size = bracket_size * s;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001699 size_t tmp_unaligned_header_size = fixed_header_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001700 // Align up the unaligned header size. bracket_size may not be a power of two.
1701 size_t tmp_header_size = (tmp_unaligned_header_size % bracket_size == 0) ?
1702 tmp_unaligned_header_size :
1703 tmp_unaligned_header_size + (bracket_size - tmp_unaligned_header_size % bracket_size);
1704 DCHECK_EQ(tmp_header_size % bracket_size, static_cast<size_t>(0));
1705 DCHECK_EQ(tmp_header_size % 8, static_cast<size_t>(0));
1706 if (tmp_slots_size + tmp_header_size <= run_size) {
1707 // Found the right number of slots, that is, there was enough
1708 // space for the header (including the bit maps.)
1709 num_of_slots = s;
1710 header_size = tmp_header_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001711 break;
1712 }
1713 }
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001714 DCHECK_GT(num_of_slots, 0U);
1715 DCHECK_GT(header_size, 0U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001716 // Add the padding for the alignment remainder.
1717 header_size += run_size % bracket_size;
Ian Rogers5d057052014-03-12 14:32:27 -07001718 DCHECK_EQ(header_size + num_of_slots * bracket_size, run_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001719 numOfSlots[i] = num_of_slots;
1720 headerSizes[i] = header_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001721 if (kTraceRosAlloc) {
1722 LOG(INFO) << "numOfSlots[" << i << "]=" << numOfSlots[i]
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001723 << ", headerSizes[" << i << "]=" << headerSizes[i];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001724 }
1725 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001726 // Fill the alloc bitmap so nobody can successfully allocate from it.
1727 if (kIsDebugBuild) {
1728 dedicated_full_run_->magic_num_ = kMagicNum;
1729 }
1730 // It doesn't matter which size bracket we use since the main goal is to have the allocation
1731 // fail 100% of the time you attempt to allocate into the dedicated full run.
1732 dedicated_full_run_->size_bracket_idx_ = 0;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001733 DCHECK_EQ(dedicated_full_run_->FreeList()->Size(), 0U); // It looks full.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001734 dedicated_full_run_->SetIsThreadLocal(true);
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001735
1736 // The smallest bracket size must be at least as large as the sizeof(Slot).
1737 DCHECK_LE(sizeof(Slot), bracketSizes[0]) << "sizeof(Slot) <= the smallest bracket size";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001738}
1739
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001740void RosAlloc::BytesAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
1741 size_t used_bytes, void* arg) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001742 if (used_bytes == 0) {
1743 return;
1744 }
1745 size_t* bytes_allocated = reinterpret_cast<size_t*>(arg);
1746 *bytes_allocated += used_bytes;
1747}
1748
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001749void RosAlloc::ObjectsAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
1750 size_t used_bytes, void* arg) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001751 if (used_bytes == 0) {
1752 return;
1753 }
1754 size_t* objects_allocated = reinterpret_cast<size_t*>(arg);
1755 ++(*objects_allocated);
1756}
1757
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001758void RosAlloc::Verify() {
1759 Thread* self = Thread::Current();
1760 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self))
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001761 << "The mutator locks isn't exclusively locked at " << __PRETTY_FUNCTION__;
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001762 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
Mathieu Chartiera1c1c712014-06-23 17:53:09 -07001763 ReaderMutexLock wmu(self, bulk_free_lock_);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001764 std::vector<Run*> runs;
1765 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001766 MutexLock lock_mu(self, lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001767 size_t pm_end = page_map_size_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001768 size_t i = 0;
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001769 size_t memory_tool_modifier = is_running_on_memory_tool_ ?
1770 2 * ::art::gc::space::kDefaultMemoryToolRedZoneBytes : // Redzones before and after.
Andreas Gampefef16ad2015-02-19 16:44:32 -08001771 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001772 while (i < pm_end) {
Ian Rogers13735952014-10-08 12:43:28 -07001773 uint8_t pm = page_map_[i];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001774 switch (pm) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001775 case kPageMapReleased:
1776 // Fall-through.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001777 case kPageMapEmpty: {
1778 // The start of a free page run.
1779 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001780 DCHECK_EQ(fpr->magic_num_, kMagicNumFree);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001781 CHECK(free_page_runs_.find(fpr) != free_page_runs_.end())
1782 << "An empty page must belong to the free page run set";
1783 size_t fpr_size = fpr->ByteSize(this);
Roland Levillain14d90572015-07-16 10:52:26 +01001784 CHECK_ALIGNED(fpr_size, kPageSize)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001785 << "A free page run size isn't page-aligned : " << fpr_size;
1786 size_t num_pages = fpr_size / kPageSize;
1787 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1788 << "A free page run size must be > 0 : " << fpr_size;
1789 for (size_t j = i + 1; j < i + num_pages; ++j) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001790 CHECK(IsFreePage(j))
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001791 << "A mismatch between the page map table for kPageMapEmpty "
1792 << " at page index " << j
1793 << " and the free page run size : page index range : "
1794 << i << " to " << (i + num_pages) << std::endl << DumpPageMap();
1795 }
1796 i += num_pages;
1797 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1798 << std::endl << DumpPageMap();
1799 break;
1800 }
1801 case kPageMapLargeObject: {
1802 // The start of a large object.
1803 size_t num_pages = 1;
1804 size_t idx = i + 1;
1805 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1806 num_pages++;
1807 idx++;
1808 }
Andreas Gamped7576322014-10-24 22:13:45 -07001809 uint8_t* start = base_ + i * kPageSize;
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001810 if (is_running_on_memory_tool_) {
1811 start += ::art::gc::space::kDefaultMemoryToolRedZoneBytes;
Andreas Gamped7576322014-10-24 22:13:45 -07001812 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001813 mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
1814 size_t obj_size = obj->SizeOf();
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001815 CHECK_GT(obj_size + memory_tool_modifier, kLargeSizeThreshold)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001816 << "A rosalloc large object size must be > " << kLargeSizeThreshold;
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001817 CHECK_EQ(num_pages, RoundUp(obj_size + memory_tool_modifier, kPageSize) / kPageSize)
1818 << "A rosalloc large object size " << obj_size + memory_tool_modifier
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001819 << " does not match the page map table " << (num_pages * kPageSize)
1820 << std::endl << DumpPageMap();
1821 i += num_pages;
1822 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1823 << std::endl << DumpPageMap();
1824 break;
1825 }
1826 case kPageMapLargeObjectPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001827 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl << DumpPageMap();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001828 break;
1829 case kPageMapRun: {
1830 // The start of a run.
1831 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001832 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001833 size_t idx = run->size_bracket_idx_;
Ian Rogers5d057052014-03-12 14:32:27 -07001834 CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << idx;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001835 size_t num_pages = numOfPages[idx];
1836 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1837 << "Run size must be > 0 : " << num_pages;
1838 for (size_t j = i + 1; j < i + num_pages; ++j) {
1839 CHECK_EQ(page_map_[j], kPageMapRunPart)
1840 << "A mismatch between the page map table for kPageMapRunPart "
1841 << " at page index " << j
1842 << " and the run size : page index range " << i << " to " << (i + num_pages)
1843 << std::endl << DumpPageMap();
1844 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001845 // Don't verify the dedicated_full_run_ since it doesn't have any real allocations.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001846 runs.push_back(run);
1847 i += num_pages;
1848 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1849 << std::endl << DumpPageMap();
1850 break;
1851 }
1852 case kPageMapRunPart:
Mathieu Chartier0651d412014-04-29 14:37:57 -07001853 // Fall-through.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001854 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001855 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl << DumpPageMap();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001856 break;
1857 }
1858 }
1859 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001860 std::list<Thread*> threads = Runtime::Current()->GetThreadList()->GetList();
1861 for (Thread* thread : threads) {
1862 for (size_t i = 0; i < kNumThreadLocalSizeBrackets; ++i) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001863 MutexLock brackets_mu(self, *size_bracket_locks_[i]);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001864 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i));
1865 CHECK(thread_local_run != nullptr);
1866 CHECK(thread_local_run->IsThreadLocal());
1867 CHECK(thread_local_run == dedicated_full_run_ ||
1868 thread_local_run->size_bracket_idx_ == i);
1869 }
1870 }
1871 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001872 MutexLock brackets_mu(self, *size_bracket_locks_[i]);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001873 Run* current_run = current_runs_[i];
1874 CHECK(current_run != nullptr);
1875 if (current_run != dedicated_full_run_) {
1876 // The dedicated full run is currently marked as thread local.
1877 CHECK(!current_run->IsThreadLocal());
1878 CHECK_EQ(current_run->size_bracket_idx_, i);
1879 }
1880 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001881 // Call Verify() here for the lock order.
1882 for (auto& run : runs) {
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001883 run->Verify(self, this, is_running_on_memory_tool_);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001884 }
1885}
1886
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001887void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc, bool running_on_memory_tool) {
Ian Rogers5d057052014-03-12 14:32:27 -07001888 DCHECK_EQ(magic_num_, kMagicNum) << "Bad magic number : " << Dump();
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001889 const size_t idx = size_bracket_idx_;
Ian Rogers5d057052014-03-12 14:32:27 -07001890 CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
Ian Rogers13735952014-10-08 12:43:28 -07001891 uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001892 const size_t num_slots = numOfSlots[idx];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001893 size_t bracket_size = IndexToBracketSize(idx);
1894 CHECK_EQ(slot_base + num_slots * bracket_size,
Ian Rogers13735952014-10-08 12:43:28 -07001895 reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001896 << "Mismatch in the end address of the run " << Dump();
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001897 // Check that the bulk free list is empty. It's only used during BulkFree().
1898 CHECK(IsBulkFreeListEmpty()) << "The bulk free isn't empty " << Dump();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001899 // Check the thread local runs, the current runs, and the run sets.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001900 if (IsThreadLocal()) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001901 // If it's a thread local run, then it must be pointed to by an owner thread.
1902 bool owner_found = false;
1903 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1904 for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
1905 Thread* thread = *it;
Mathieu Chartier0651d412014-04-29 14:37:57 -07001906 for (size_t i = 0; i < kNumThreadLocalSizeBrackets; i++) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001907 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001908 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i));
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001909 if (thread_local_run == this) {
1910 CHECK(!owner_found)
1911 << "A thread local run has more than one owner thread " << Dump();
1912 CHECK_EQ(i, idx)
1913 << "A mismatching size bracket index in a thread local run " << Dump();
1914 owner_found = true;
1915 }
1916 }
1917 }
1918 CHECK(owner_found) << "A thread local run has no owner thread " << Dump();
1919 } else {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001920 // If it's not thread local, check that the thread local free list is empty.
1921 CHECK(IsThreadLocalFreeListEmpty())
1922 << "A non-thread-local run's thread local free list isn't empty "
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001923 << Dump();
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001924 // Check if it's a current run for the size bracket.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001925 bool is_current_run = false;
1926 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1927 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
1928 Run* current_run = rosalloc->current_runs_[i];
1929 if (idx == i) {
1930 if (this == current_run) {
1931 is_current_run = true;
1932 }
1933 } else {
1934 // If the size bucket index does not match, then it must not
1935 // be a current run.
1936 CHECK_NE(this, current_run)
1937 << "A current run points to a run with a wrong size bracket index " << Dump();
1938 }
1939 }
1940 // If it's neither a thread local or current run, then it must be
1941 // in a run set.
1942 if (!is_current_run) {
1943 MutexLock mu(self, rosalloc->lock_);
Mathieu Chartier58553c72014-09-16 16:25:55 -07001944 auto& non_full_runs = rosalloc->non_full_runs_[idx];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001945 // If it's all free, it must be a free page run rather than a run.
1946 CHECK(!IsAllFree()) << "A free run must be in a free page run set " << Dump();
1947 if (!IsFull()) {
1948 // If it's not full, it must in the non-full run set.
1949 CHECK(non_full_runs.find(this) != non_full_runs.end())
1950 << "A non-full run isn't in the non-full run set " << Dump();
1951 } else {
1952 // If it's full, it must in the full run set (debug build only.)
1953 if (kIsDebugBuild) {
Mathieu Chartier58553c72014-09-16 16:25:55 -07001954 auto& full_runs = rosalloc->full_runs_[idx];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001955 CHECK(full_runs.find(this) != full_runs.end())
1956 << " A full run isn't in the full run set " << Dump();
1957 }
1958 }
1959 }
1960 }
1961 // Check each slot.
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001962 size_t memory_tool_modifier = running_on_memory_tool ?
1963 2 * ::art::gc::space::kDefaultMemoryToolRedZoneBytes :
Andreas Gamped7576322014-10-24 22:13:45 -07001964 0U;
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -07001965 // TODO: reuse InspectAllSlots().
1966 std::unique_ptr<bool[]> is_free(new bool[num_slots]()); // zero initialized
1967 // Mark the free slots and the remaining ones are allocated.
1968 for (Slot* slot = free_list_.Head(); slot != nullptr; slot = slot->Next()) {
1969 size_t slot_idx = SlotIndex(slot);
1970 DCHECK_LT(slot_idx, num_slots);
1971 is_free[slot_idx] = true;
1972 }
1973 if (IsThreadLocal()) {
1974 for (Slot* slot = thread_local_free_list_.Head(); slot != nullptr; slot = slot->Next()) {
1975 size_t slot_idx = SlotIndex(slot);
1976 DCHECK_LT(slot_idx, num_slots);
1977 is_free[slot_idx] = true;
1978 }
1979 }
1980 for (size_t slot_idx = 0; slot_idx < num_slots; ++slot_idx) {
1981 uint8_t* slot_addr = slot_base + slot_idx * bracket_size;
1982 if (running_on_memory_tool) {
1983 slot_addr += ::art::gc::space::kDefaultMemoryToolRedZoneBytes;
1984 }
1985 if (!is_free[slot_idx]) {
1986 // The slot is allocated
1987 mirror::Object* obj = reinterpret_cast<mirror::Object*>(slot_addr);
1988 size_t obj_size = obj->SizeOf();
1989 CHECK_LE(obj_size + memory_tool_modifier, kLargeSizeThreshold)
1990 << "A run slot contains a large object " << Dump();
1991 CHECK_EQ(SizeToIndex(obj_size + memory_tool_modifier), idx)
1992 << PrettyTypeOf(obj) << " "
1993 << "obj_size=" << obj_size << "(" << obj_size + memory_tool_modifier << "), idx=" << idx
1994 << " A run slot contains an object with wrong size " << Dump();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001995 }
1996 }
1997}
1998
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07001999size_t RosAlloc::ReleasePages() {
2000 VLOG(heap) << "RosAlloc::ReleasePages()";
2001 DCHECK(!DoesReleaseAllPages());
2002 Thread* self = Thread::Current();
2003 size_t reclaimed_bytes = 0;
2004 size_t i = 0;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002005 // Check the page map size which might have changed due to grow/shrink.
2006 while (i < page_map_size_) {
2007 // Reading the page map without a lock is racy but the race is benign since it should only
2008 // result in occasionally not releasing pages which we could release.
Ian Rogers13735952014-10-08 12:43:28 -07002009 uint8_t pm = page_map_[i];
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002010 switch (pm) {
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002011 case kPageMapReleased:
2012 // Fall through.
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002013 case kPageMapEmpty: {
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002014 // This is currently the start of a free page run.
2015 // Acquire the lock to prevent other threads racing in and modifying the page map.
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002016 MutexLock mu(self, lock_);
2017 // Check that it's still empty after we acquired the lock since another thread could have
2018 // raced in and placed an allocation here.
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002019 if (IsFreePage(i)) {
2020 // Free page runs can start with a released page if we coalesced a released page free
2021 // page run with an empty page run.
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002022 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002023 // There is a race condition where FreePage can coalesce fpr with the previous
2024 // free page run before we acquire lock_. In that case free_page_runs_.find will not find
2025 // a run starting at fpr. To handle this race, we skip reclaiming the page range and go
2026 // to the next page.
2027 if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
2028 size_t fpr_size = fpr->ByteSize(this);
Roland Levillain14d90572015-07-16 10:52:26 +01002029 DCHECK_ALIGNED(fpr_size, kPageSize);
Ian Rogers13735952014-10-08 12:43:28 -07002030 uint8_t* start = reinterpret_cast<uint8_t*>(fpr);
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002031 reclaimed_bytes += ReleasePageRange(start, start + fpr_size);
2032 size_t pages = fpr_size / kPageSize;
2033 CHECK_GT(pages, 0U) << "Infinite loop probable";
2034 i += pages;
2035 DCHECK_LE(i, page_map_size_);
2036 break;
2037 }
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002038 }
Ian Rogersfc787ec2014-10-09 21:56:44 -07002039 FALLTHROUGH_INTENDED;
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002040 }
2041 case kPageMapLargeObject: // Fall through.
2042 case kPageMapLargeObjectPart: // Fall through.
2043 case kPageMapRun: // Fall through.
2044 case kPageMapRunPart: // Fall through.
2045 ++i;
2046 break; // Skip.
2047 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07002048 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002049 break;
2050 }
2051 }
2052 return reclaimed_bytes;
2053}
2054
Ian Rogers13735952014-10-08 12:43:28 -07002055size_t RosAlloc::ReleasePageRange(uint8_t* start, uint8_t* end) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002056 DCHECK_ALIGNED(start, kPageSize);
2057 DCHECK_ALIGNED(end, kPageSize);
2058 DCHECK_LT(start, end);
2059 if (kIsDebugBuild) {
2060 // In the debug build, the first page of a free page run
2061 // contains a magic number for debugging. Exclude it.
2062 start += kPageSize;
Andreas Gamped7576322014-10-24 22:13:45 -07002063
2064 // Single pages won't be released.
2065 if (start == end) {
2066 return 0;
2067 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002068 }
2069 if (!kMadviseZeroes) {
2070 // TODO: Do this when we resurrect the page instead.
2071 memset(start, 0, end - start);
2072 }
2073 CHECK_EQ(madvise(start, end - start, MADV_DONTNEED), 0);
2074 size_t pm_idx = ToPageMapIndex(start);
2075 size_t reclaimed_bytes = 0;
2076 // Calculate reclaimed bytes and upate page map.
2077 const size_t max_idx = pm_idx + (end - start) / kPageSize;
2078 for (; pm_idx < max_idx; ++pm_idx) {
2079 DCHECK(IsFreePage(pm_idx));
2080 if (page_map_[pm_idx] == kPageMapEmpty) {
2081 // Mark the page as released and update how many bytes we released.
2082 reclaimed_bytes += kPageSize;
2083 page_map_[pm_idx] = kPageMapReleased;
2084 }
2085 }
2086 return reclaimed_bytes;
2087}
2088
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07002089void RosAlloc::LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) {
2090 Thread* self = Thread::Current();
2091 size_t largest_continuous_free_pages = 0;
2092 WriterMutexLock wmu(self, bulk_free_lock_);
2093 MutexLock mu(self, lock_);
2094 for (FreePageRun* fpr : free_page_runs_) {
2095 largest_continuous_free_pages = std::max(largest_continuous_free_pages,
2096 fpr->ByteSize(this));
2097 }
2098 if (failed_alloc_bytes > kLargeSizeThreshold) {
2099 // Large allocation.
2100 size_t required_bytes = RoundUp(failed_alloc_bytes, kPageSize);
2101 if (required_bytes > largest_continuous_free_pages) {
2102 os << "; failed due to fragmentation (required continguous free "
2103 << required_bytes << " bytes where largest contiguous free "
2104 << largest_continuous_free_pages << " bytes)";
2105 }
2106 } else {
2107 // Non-large allocation.
2108 size_t required_bytes = numOfPages[SizeToIndex(failed_alloc_bytes)] * kPageSize;
2109 if (required_bytes > largest_continuous_free_pages) {
2110 os << "; failed due to fragmentation (required continguous free "
2111 << required_bytes << " bytes for a new buffer where largest contiguous free "
2112 << largest_continuous_free_pages << " bytes)";
2113 }
2114 }
2115}
2116
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07002117} // namespace allocator
2118} // namespace gc
2119} // namespace art