blob: bd10f7bbf0ff7f6baa732deacd8780757f03d83a [file] [log] [blame]
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Andreas Gamped7576322014-10-24 22:13:45 -070017#include "rosalloc.h"
18
Evgenii Stepanov1e133742015-05-20 12:30:59 -070019#include "base/memory_tool.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070020#include "base/mutex-inl.h"
Evgenii Stepanov1e133742015-05-20 12:30:59 -070021#include "gc/space/memory_tool_settings.h"
Vladimir Marko3481ba22015-04-13 12:22:36 +010022#include "mem_map.h"
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -080023#include "mirror/class-inl.h"
24#include "mirror/object.h"
25#include "mirror/object-inl.h"
Brian Carlstrom218daa22013-11-25 14:51:44 -080026#include "thread-inl.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070027#include "thread_list.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070028
29#include <map>
30#include <list>
Ian Rogersc7dd2952014-10-21 23:31:19 -070031#include <sstream>
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070032#include <vector>
33
34namespace art {
35namespace gc {
36namespace allocator {
37
Mathieu Chartier73d1e172014-04-11 17:53:48 -070038static constexpr bool kUsePrefetchDuringAllocRun = true;
39static constexpr bool kPrefetchNewRunDataByZeroing = false;
40static constexpr size_t kPrefetchStride = 64;
41
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070042size_t RosAlloc::bracketSizes[kNumOfSizeBrackets];
43size_t RosAlloc::numOfPages[kNumOfSizeBrackets];
44size_t RosAlloc::numOfSlots[kNumOfSizeBrackets];
45size_t RosAlloc::headerSizes[kNumOfSizeBrackets];
46size_t RosAlloc::bulkFreeBitMapOffsets[kNumOfSizeBrackets];
47size_t RosAlloc::threadLocalFreeBitMapOffsets[kNumOfSizeBrackets];
48bool RosAlloc::initialized_ = false;
Mathieu Chartier73d1e172014-04-11 17:53:48 -070049size_t RosAlloc::dedicated_full_run_storage_[kPageSize / sizeof(size_t)] = { 0 };
50RosAlloc::Run* RosAlloc::dedicated_full_run_ =
51 reinterpret_cast<RosAlloc::Run*>(dedicated_full_run_storage_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070052
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080053RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity,
Evgenii Stepanov1e133742015-05-20 12:30:59 -070054 PageReleaseMode page_release_mode, bool running_on_memory_tool,
Andreas Gamped7576322014-10-24 22:13:45 -070055 size_t page_release_size_threshold)
Ian Rogers13735952014-10-08 12:43:28 -070056 : base_(reinterpret_cast<uint8_t*>(base)), footprint_(capacity),
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080057 capacity_(capacity), max_capacity_(max_capacity),
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070058 lock_("rosalloc global lock", kRosAllocGlobalLock),
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080059 bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock),
60 page_release_mode_(page_release_mode),
Andreas Gamped7576322014-10-24 22:13:45 -070061 page_release_size_threshold_(page_release_size_threshold),
Evgenii Stepanov1e133742015-05-20 12:30:59 -070062 is_running_on_memory_tool_(running_on_memory_tool) {
Ian Rogers5d057052014-03-12 14:32:27 -070063 DCHECK_EQ(RoundUp(capacity, kPageSize), capacity);
64 DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080065 CHECK_LE(capacity, max_capacity);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080066 CHECK(IsAligned<kPageSize>(page_release_size_threshold_));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070067 if (!initialized_) {
68 Initialize();
69 }
70 VLOG(heap) << "RosAlloc base="
71 << std::hex << (intptr_t)base_ << ", end="
72 << std::hex << (intptr_t)(base_ + capacity_)
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080073 << ", capacity=" << std::dec << capacity_
74 << ", max_capacity=" << std::dec << max_capacity_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070075 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
Zuo Wangf37a88b2014-07-10 04:26:41 -070076 size_bracket_lock_names_[i] =
Mathieu Chartier73d1e172014-04-11 17:53:48 -070077 StringPrintf("an rosalloc size bracket %d lock", static_cast<int>(i));
Zuo Wangf37a88b2014-07-10 04:26:41 -070078 size_bracket_locks_[i] = new Mutex(size_bracket_lock_names_[i].c_str(), kRosAllocBracketLock);
Mathieu Chartier0651d412014-04-29 14:37:57 -070079 current_runs_[i] = dedicated_full_run_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070080 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080081 DCHECK_EQ(footprint_, capacity_);
82 size_t num_of_pages = footprint_ / kPageSize;
83 size_t max_num_of_pages = max_capacity_ / kPageSize;
84 std::string error_msg;
Vladimir Marko5c42c292015-02-25 12:02:49 +000085 page_map_mem_map_.reset(MemMap::MapAnonymous("rosalloc page map", nullptr,
86 RoundUp(max_num_of_pages, kPageSize),
87 PROT_READ | PROT_WRITE, false, false, &error_msg));
Mathieu Chartier73d1e172014-04-11 17:53:48 -070088 CHECK(page_map_mem_map_.get() != nullptr) << "Couldn't allocate the page map : " << error_msg;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -080089 page_map_ = page_map_mem_map_->Begin();
90 page_map_size_ = num_of_pages;
91 max_page_map_size_ = max_num_of_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070092 free_page_run_size_map_.resize(num_of_pages);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070093 FreePageRun* free_pages = reinterpret_cast<FreePageRun*>(base_);
94 if (kIsDebugBuild) {
95 free_pages->magic_num_ = kMagicNumFree;
96 }
97 free_pages->SetByteSize(this, capacity_);
98 DCHECK_EQ(capacity_ % kPageSize, static_cast<size_t>(0));
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -080099 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700100 free_pages->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800101 DCHECK(free_pages->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700102 free_page_runs_.insert(free_pages);
103 if (kTraceRosAlloc) {
104 LOG(INFO) << "RosAlloc::RosAlloc() : Inserted run 0x" << std::hex
105 << reinterpret_cast<intptr_t>(free_pages)
106 << " into free_page_runs_";
107 }
108}
109
Mathieu Chartier661974a2014-01-09 11:23:53 -0800110RosAlloc::~RosAlloc() {
111 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
112 delete size_bracket_locks_[i];
113 }
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700114 if (is_running_on_memory_tool_) {
115 MEMORY_TOOL_MAKE_DEFINED(base_, capacity_);
116 }
Mathieu Chartier661974a2014-01-09 11:23:53 -0800117}
118
Ian Rogers13735952014-10-08 12:43:28 -0700119void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700120 lock_.AssertHeld(self);
121 DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject);
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700122 FreePageRun* res = nullptr;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700123 const size_t req_byte_size = num_pages * kPageSize;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700124 // Find the lowest address free page run that's large enough.
125 for (auto it = free_page_runs_.begin(); it != free_page_runs_.end(); ) {
126 FreePageRun* fpr = *it;
127 DCHECK(fpr->IsFree());
128 size_t fpr_byte_size = fpr->ByteSize(this);
129 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
130 if (req_byte_size <= fpr_byte_size) {
131 // Found one.
132 free_page_runs_.erase(it++);
133 if (kTraceRosAlloc) {
134 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x"
135 << std::hex << reinterpret_cast<intptr_t>(fpr)
136 << " from free_page_runs_";
137 }
138 if (req_byte_size < fpr_byte_size) {
139 // Split.
Ian Rogers13735952014-10-08 12:43:28 -0700140 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700141 if (kIsDebugBuild) {
142 remainder->magic_num_ = kMagicNumFree;
143 }
144 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
145 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
146 // Don't need to call madvise on remainder here.
147 free_page_runs_.insert(remainder);
148 if (kTraceRosAlloc) {
149 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
150 << reinterpret_cast<intptr_t>(remainder)
151 << " into free_page_runs_";
152 }
153 fpr->SetByteSize(this, req_byte_size);
154 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
155 }
156 res = fpr;
157 break;
158 } else {
159 ++it;
160 }
161 }
162
163 // Failed to allocate pages. Grow the footprint, if possible.
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700164 if (UNLIKELY(res == nullptr && capacity_ > footprint_)) {
165 FreePageRun* last_free_page_run = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700166 size_t last_free_page_run_size;
167 auto it = free_page_runs_.rbegin();
168 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
169 // There is a free page run at the end.
170 DCHECK(last_free_page_run->IsFree());
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700171 DCHECK(IsFreePage(ToPageMapIndex(last_free_page_run)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700172 last_free_page_run_size = last_free_page_run->ByteSize(this);
173 } else {
174 // There is no free page run at the end.
175 last_free_page_run_size = 0;
176 }
177 DCHECK_LT(last_free_page_run_size, req_byte_size);
178 if (capacity_ - footprint_ + last_free_page_run_size >= req_byte_size) {
179 // If we grow the heap, we can allocate it.
180 size_t increment = std::min(std::max(2 * MB, req_byte_size - last_free_page_run_size),
181 capacity_ - footprint_);
182 DCHECK_EQ(increment % kPageSize, static_cast<size_t>(0));
183 size_t new_footprint = footprint_ + increment;
184 size_t new_num_of_pages = new_footprint / kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800185 DCHECK_LT(page_map_size_, new_num_of_pages);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700186 DCHECK_LT(free_page_run_size_map_.size(), new_num_of_pages);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800187 page_map_size_ = new_num_of_pages;
188 DCHECK_LE(page_map_size_, max_page_map_size_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700189 free_page_run_size_map_.resize(new_num_of_pages);
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800190 ArtRosAllocMoreCore(this, increment);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700191 if (last_free_page_run_size > 0) {
192 // There was a free page run at the end. Expand its size.
193 DCHECK_EQ(last_free_page_run_size, last_free_page_run->ByteSize(this));
194 last_free_page_run->SetByteSize(this, last_free_page_run_size + increment);
195 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
Ian Rogers5d057052014-03-12 14:32:27 -0700196 DCHECK_EQ(last_free_page_run->End(this), base_ + new_footprint);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700197 } else {
198 // Otherwise, insert a new free page run at the end.
199 FreePageRun* new_free_page_run = reinterpret_cast<FreePageRun*>(base_ + footprint_);
200 if (kIsDebugBuild) {
201 new_free_page_run->magic_num_ = kMagicNumFree;
202 }
203 new_free_page_run->SetByteSize(this, increment);
204 DCHECK_EQ(new_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
205 free_page_runs_.insert(new_free_page_run);
Ian Rogers5d057052014-03-12 14:32:27 -0700206 DCHECK_EQ(*free_page_runs_.rbegin(), new_free_page_run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700207 if (kTraceRosAlloc) {
208 LOG(INFO) << "RosAlloc::AlloPages() : Grew the heap by inserting run 0x"
209 << std::hex << reinterpret_cast<intptr_t>(new_free_page_run)
210 << " into free_page_runs_";
211 }
212 }
213 DCHECK_LE(footprint_ + increment, capacity_);
214 if (kTraceRosAlloc) {
215 LOG(INFO) << "RosAlloc::AllocPages() : increased the footprint from "
216 << footprint_ << " to " << new_footprint;
217 }
218 footprint_ = new_footprint;
219
220 // And retry the last free page run.
221 it = free_page_runs_.rbegin();
222 DCHECK(it != free_page_runs_.rend());
223 FreePageRun* fpr = *it;
224 if (kIsDebugBuild && last_free_page_run_size > 0) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700225 DCHECK(last_free_page_run != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700226 DCHECK_EQ(last_free_page_run, fpr);
227 }
228 size_t fpr_byte_size = fpr->ByteSize(this);
229 DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
230 DCHECK_LE(req_byte_size, fpr_byte_size);
231 free_page_runs_.erase(fpr);
232 if (kTraceRosAlloc) {
233 LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
234 << " from free_page_runs_";
235 }
236 if (req_byte_size < fpr_byte_size) {
237 // Split if there's a remainder.
Ian Rogers13735952014-10-08 12:43:28 -0700238 FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700239 if (kIsDebugBuild) {
240 remainder->magic_num_ = kMagicNumFree;
241 }
242 remainder->SetByteSize(this, fpr_byte_size - req_byte_size);
243 DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0));
244 free_page_runs_.insert(remainder);
245 if (kTraceRosAlloc) {
246 LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex
247 << reinterpret_cast<intptr_t>(remainder)
248 << " into free_page_runs_";
249 }
250 fpr->SetByteSize(this, req_byte_size);
251 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
252 }
253 res = fpr;
254 }
255 }
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700256 if (LIKELY(res != nullptr)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700257 // Update the page map.
258 size_t page_map_idx = ToPageMapIndex(res);
259 for (size_t i = 0; i < num_pages; i++) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700260 DCHECK(IsFreePage(page_map_idx + i));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700261 }
262 switch (page_map_type) {
263 case kPageMapRun:
264 page_map_[page_map_idx] = kPageMapRun;
265 for (size_t i = 1; i < num_pages; i++) {
266 page_map_[page_map_idx + i] = kPageMapRunPart;
267 }
268 break;
269 case kPageMapLargeObject:
270 page_map_[page_map_idx] = kPageMapLargeObject;
271 for (size_t i = 1; i < num_pages; i++) {
272 page_map_[page_map_idx + i] = kPageMapLargeObjectPart;
273 }
274 break;
275 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700276 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_type);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700277 break;
278 }
279 if (kIsDebugBuild) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700280 // Clear the first page since it is not madvised due to the magic number.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700281 memset(res, 0, kPageSize);
282 }
283 if (kTraceRosAlloc) {
284 LOG(INFO) << "RosAlloc::AllocPages() : 0x" << std::hex << reinterpret_cast<intptr_t>(res)
285 << "-0x" << (reinterpret_cast<intptr_t>(res) + num_pages * kPageSize)
286 << "(" << std::dec << (num_pages * kPageSize) << ")";
287 }
288 return res;
289 }
290
291 // Fail.
292 if (kTraceRosAlloc) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700293 LOG(INFO) << "RosAlloc::AllocPages() : nullptr";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700294 }
295 return nullptr;
296}
297
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700298size_t RosAlloc::FreePages(Thread* self, void* ptr, bool already_zero) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700299 lock_.AssertHeld(self);
300 size_t pm_idx = ToPageMapIndex(ptr);
Ian Rogers5d057052014-03-12 14:32:27 -0700301 DCHECK_LT(pm_idx, page_map_size_);
Ian Rogers13735952014-10-08 12:43:28 -0700302 uint8_t pm_type = page_map_[pm_idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700303 DCHECK(pm_type == kPageMapRun || pm_type == kPageMapLargeObject);
Ian Rogers13735952014-10-08 12:43:28 -0700304 uint8_t pm_part_type;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700305 switch (pm_type) {
306 case kPageMapRun:
307 pm_part_type = kPageMapRunPart;
308 break;
309 case kPageMapLargeObject:
310 pm_part_type = kPageMapLargeObjectPart;
311 break;
312 default:
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700313 LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << " : " << "pm_idx=" << pm_idx << ", pm_type="
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700314 << static_cast<int>(pm_type) << ", ptr=" << std::hex
315 << reinterpret_cast<intptr_t>(ptr);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700316 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700317 }
318 // Update the page map and count the number of pages.
319 size_t num_pages = 1;
320 page_map_[pm_idx] = kPageMapEmpty;
321 size_t idx = pm_idx + 1;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800322 size_t end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700323 while (idx < end && page_map_[idx] == pm_part_type) {
324 page_map_[idx] = kPageMapEmpty;
325 num_pages++;
326 idx++;
327 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700328 const size_t byte_size = num_pages * kPageSize;
329 if (already_zero) {
Andreas Gamped7576322014-10-24 22:13:45 -0700330 if (ShouldCheckZeroMemory()) {
Ian Rogers13735952014-10-08 12:43:28 -0700331 const uintptr_t* word_ptr = reinterpret_cast<uintptr_t*>(ptr);
332 for (size_t i = 0; i < byte_size / sizeof(uintptr_t); ++i) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700333 CHECK_EQ(word_ptr[i], 0U) << "words don't match at index " << i;
334 }
335 }
336 } else if (!DoesReleaseAllPages()) {
337 memset(ptr, 0, byte_size);
338 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700339
340 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700341 LOG(INFO) << __PRETTY_FUNCTION__ << " : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700342 << "-0x" << (reinterpret_cast<intptr_t>(ptr) + byte_size)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700343 << "(" << std::dec << (num_pages * kPageSize) << ")";
344 }
345
346 // Turn it into a free run.
347 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(ptr);
348 if (kIsDebugBuild) {
349 fpr->magic_num_ = kMagicNumFree;
350 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700351 fpr->SetByteSize(this, byte_size);
352 DCHECK(IsAligned<kPageSize>(fpr->ByteSize(this)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700353
354 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
355 if (!free_page_runs_.empty()) {
356 // Try to coalesce in the higher address direction.
357 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700358 LOG(INFO) << __PRETTY_FUNCTION__ << "RosAlloc::FreePages() : trying to coalesce a free page run 0x"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700359 << std::hex << reinterpret_cast<uintptr_t>(fpr) << " [" << std::dec << pm_idx << "] -0x"
360 << std::hex << reinterpret_cast<uintptr_t>(fpr->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800361 << (fpr->End(this) == End() ? page_map_size_ : ToPageMapIndex(fpr->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700362 }
363 auto higher_it = free_page_runs_.upper_bound(fpr);
364 if (higher_it != free_page_runs_.end()) {
365 for (auto it = higher_it; it != free_page_runs_.end(); ) {
366 FreePageRun* h = *it;
367 DCHECK_EQ(h->ByteSize(this) % kPageSize, static_cast<size_t>(0));
368 if (kTraceRosAlloc) {
369 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a higher free page run 0x"
370 << std::hex << reinterpret_cast<uintptr_t>(h) << " [" << std::dec << ToPageMapIndex(h) << "] -0x"
371 << std::hex << reinterpret_cast<uintptr_t>(h->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800372 << (h->End(this) == End() ? page_map_size_ : ToPageMapIndex(h->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700373 }
374 if (fpr->End(this) == h->Begin()) {
375 if (kTraceRosAlloc) {
376 LOG(INFO) << "Success";
377 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700378 // Clear magic num since this is no longer the start of a free page run.
379 if (kIsDebugBuild) {
380 h->magic_num_ = 0;
381 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700382 free_page_runs_.erase(it++);
383 if (kTraceRosAlloc) {
384 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
385 << reinterpret_cast<intptr_t>(h)
386 << " from free_page_runs_";
387 }
388 fpr->SetByteSize(this, fpr->ByteSize(this) + h->ByteSize(this));
389 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
390 } else {
391 // Not adjacent. Stop.
392 if (kTraceRosAlloc) {
393 LOG(INFO) << "Fail";
394 }
395 break;
396 }
397 }
398 }
399 // Try to coalesce in the lower address direction.
400 auto lower_it = free_page_runs_.upper_bound(fpr);
401 if (lower_it != free_page_runs_.begin()) {
402 --lower_it;
403 for (auto it = lower_it; ; ) {
404 // We want to try to coalesce with the first element but
405 // there's no "<=" operator for the iterator.
406 bool to_exit_loop = it == free_page_runs_.begin();
407
408 FreePageRun* l = *it;
409 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
410 if (kTraceRosAlloc) {
411 LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a lower free page run 0x"
412 << std::hex << reinterpret_cast<uintptr_t>(l) << " [" << std::dec << ToPageMapIndex(l) << "] -0x"
413 << std::hex << reinterpret_cast<uintptr_t>(l->End(this)) << " [" << std::dec
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -0800414 << (l->End(this) == End() ? page_map_size_ : ToPageMapIndex(l->End(this))) << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700415 }
416 if (l->End(this) == fpr->Begin()) {
417 if (kTraceRosAlloc) {
418 LOG(INFO) << "Success";
419 }
420 free_page_runs_.erase(it--);
421 if (kTraceRosAlloc) {
422 LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
423 << reinterpret_cast<intptr_t>(l)
424 << " from free_page_runs_";
425 }
426 l->SetByteSize(this, l->ByteSize(this) + fpr->ByteSize(this));
427 DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700428 // Clear magic num since this is no longer the start of a free page run.
429 if (kIsDebugBuild) {
430 fpr->magic_num_ = 0;
431 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700432 fpr = l;
433 } else {
434 // Not adjacent. Stop.
435 if (kTraceRosAlloc) {
436 LOG(INFO) << "Fail";
437 }
438 break;
439 }
440 if (to_exit_loop) {
441 break;
442 }
443 }
444 }
445 }
446
447 // Insert it.
448 DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
449 DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end());
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800450 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700451 fpr->ReleasePages(this);
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800452 DCHECK(fpr->IsFree());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700453 free_page_runs_.insert(fpr);
454 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
455 if (kTraceRosAlloc) {
456 LOG(INFO) << "RosAlloc::FreePages() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr)
457 << " into free_page_runs_";
458 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700459 return byte_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700460}
461
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700462void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated,
463 size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
464 DCHECK(bytes_allocated != nullptr);
465 DCHECK(usable_size != nullptr);
Ian Rogers5d057052014-03-12 14:32:27 -0700466 DCHECK_GT(size, kLargeSizeThreshold);
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800467 size_t num_pages = RoundUp(size, kPageSize) / kPageSize;
468 void* r;
469 {
470 MutexLock mu(self, lock_);
471 r = AllocPages(self, num_pages, kPageMapLargeObject);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700472 }
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800473 if (UNLIKELY(r == nullptr)) {
474 if (kTraceRosAlloc) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700475 LOG(INFO) << "RosAlloc::AllocLargeObject() : nullptr";
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800476 }
477 return nullptr;
478 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700479 const size_t total_bytes = num_pages * kPageSize;
480 *bytes_allocated = total_bytes;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700481 *usable_size = total_bytes;
482 *bytes_tl_bulk_allocated = total_bytes;
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800483 if (kTraceRosAlloc) {
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -0800484 LOG(INFO) << "RosAlloc::AllocLargeObject() : 0x" << std::hex << reinterpret_cast<intptr_t>(r)
485 << "-0x" << (reinterpret_cast<intptr_t>(r) + num_pages * kPageSize)
486 << "(" << std::dec << (num_pages * kPageSize) << ")";
487 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700488 // Check if the returned memory is really all zero.
Andreas Gamped7576322014-10-24 22:13:45 -0700489 if (ShouldCheckZeroMemory()) {
Ian Rogers13735952014-10-08 12:43:28 -0700490 CHECK_EQ(total_bytes % sizeof(uintptr_t), 0U);
491 const uintptr_t* words = reinterpret_cast<uintptr_t*>(r);
492 for (size_t i = 0; i < total_bytes / sizeof(uintptr_t); ++i) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700493 CHECK_EQ(words[i], 0U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700494 }
495 }
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800496 return r;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700497}
498
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700499size_t RosAlloc::FreeInternal(Thread* self, void* ptr) {
Ian Rogers5d057052014-03-12 14:32:27 -0700500 DCHECK_LE(base_, ptr);
501 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700502 size_t pm_idx = RoundDownToPageMapIndex(ptr);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700503 Run* run = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700504 {
505 MutexLock mu(self, lock_);
Ian Rogers5d057052014-03-12 14:32:27 -0700506 DCHECK_LT(pm_idx, page_map_size_);
Ian Rogers13735952014-10-08 12:43:28 -0700507 uint8_t page_map_entry = page_map_[pm_idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700508 if (kTraceRosAlloc) {
509 LOG(INFO) << "RosAlloc::FreeInternal() : " << std::hex << ptr << ", pm_idx=" << std::dec << pm_idx
510 << ", page_map_entry=" << static_cast<int>(page_map_entry);
511 }
512 switch (page_map_[pm_idx]) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700513 case kPageMapLargeObject:
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700514 return FreePages(self, ptr, false);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700515 case kPageMapLargeObjectPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700516 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700517 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700518 case kPageMapRunPart: {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700519 // Find the beginning of the run.
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700520 do {
521 --pm_idx;
522 DCHECK_LT(pm_idx, capacity_ / kPageSize);
523 } while (page_map_[pm_idx] != kPageMapRun);
Ian Rogersfc787ec2014-10-09 21:56:44 -0700524 FALLTHROUGH_INTENDED;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700525 case kPageMapRun:
526 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -0700527 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700528 break;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700529 case kPageMapReleased:
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700530 case kPageMapEmpty:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700531 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700532 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700533 }
534 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +0700535 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700536 return 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700537 }
538 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700539 DCHECK(run != nullptr);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700540 return FreeFromRun(self, ptr, run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700541}
542
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700543size_t RosAlloc::Free(Thread* self, void* ptr) {
Mathieu Chartierf5997b42014-06-20 10:37:54 -0700544 ReaderMutexLock rmu(self, bulk_free_lock_);
Mathieu Chartier8585bad2014-04-11 17:53:48 -0700545 return FreeInternal(self, ptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700546}
547
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700548RosAlloc::Run* RosAlloc::AllocRun(Thread* self, size_t idx) {
549 RosAlloc::Run* new_run = nullptr;
550 {
551 MutexLock mu(self, lock_);
552 new_run = reinterpret_cast<Run*>(AllocPages(self, numOfPages[idx], kPageMapRun));
553 }
554 if (LIKELY(new_run != nullptr)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700555 if (kIsDebugBuild) {
556 new_run->magic_num_ = kMagicNum;
557 }
558 new_run->size_bracket_idx_ = idx;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700559 new_run->SetAllocBitMapBitsForInvalidSlots();
560 DCHECK(!new_run->IsThreadLocal());
561 DCHECK_EQ(new_run->first_search_vec_idx_, 0U);
562 DCHECK(!new_run->to_be_bulk_freed_);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700563 if (kUsePrefetchDuringAllocRun && idx < kNumThreadLocalSizeBrackets) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700564 // Take ownership of the cache lines if we are likely to be thread local run.
565 if (kPrefetchNewRunDataByZeroing) {
566 // Zeroing the data is sometimes faster than prefetching but it increases memory usage
567 // since we end up dirtying zero pages which may have been madvised.
568 new_run->ZeroData();
569 } else {
570 const size_t num_of_slots = numOfSlots[idx];
571 const size_t bracket_size = bracketSizes[idx];
572 const size_t num_of_bytes = num_of_slots * bracket_size;
Ian Rogers13735952014-10-08 12:43:28 -0700573 uint8_t* begin = reinterpret_cast<uint8_t*>(new_run) + headerSizes[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700574 for (size_t i = 0; i < num_of_bytes; i += kPrefetchStride) {
575 __builtin_prefetch(begin + i);
576 }
577 }
578 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700579 }
580 return new_run;
581}
582
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700583RosAlloc::Run* RosAlloc::RefillRun(Thread* self, size_t idx) {
584 // Get the lowest address non-full run from the binary tree.
Mathieu Chartier58553c72014-09-16 16:25:55 -0700585 auto* const bt = &non_full_runs_[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700586 if (!bt->empty()) {
587 // If there's one, use it as the current run.
588 auto it = bt->begin();
589 Run* non_full_run = *it;
590 DCHECK(non_full_run != nullptr);
591 DCHECK(!non_full_run->IsThreadLocal());
592 bt->erase(it);
593 return non_full_run;
594 }
595 // If there's none, allocate a new run and use it as the current run.
596 return AllocRun(self, idx);
597}
598
Hiroshi Yamauchi52cf5c02014-05-02 12:20:36 -0700599inline void* RosAlloc::AllocFromCurrentRunUnlocked(Thread* self, size_t idx) {
Mathieu Chartier0651d412014-04-29 14:37:57 -0700600 Run* current_run = current_runs_[idx];
601 DCHECK(current_run != nullptr);
602 void* slot_addr = current_run->AllocSlot();
603 if (UNLIKELY(slot_addr == nullptr)) {
604 // The current run got full. Try to refill it.
605 DCHECK(current_run->IsFull());
606 if (kIsDebugBuild && current_run != dedicated_full_run_) {
607 full_runs_[idx].insert(current_run);
608 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -0700609 LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
610 << reinterpret_cast<intptr_t>(current_run)
Mathieu Chartier0651d412014-04-29 14:37:57 -0700611 << " into full_runs_[" << std::dec << idx << "]";
612 }
613 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
614 DCHECK(full_runs_[idx].find(current_run) != full_runs_[idx].end());
615 }
616 current_run = RefillRun(self, idx);
617 if (UNLIKELY(current_run == nullptr)) {
618 // Failed to allocate a new run, make sure that it is the dedicated full run.
619 current_runs_[idx] = dedicated_full_run_;
620 return nullptr;
621 }
622 DCHECK(current_run != nullptr);
623 DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end());
624 DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end());
625 current_run->SetIsThreadLocal(false);
626 current_runs_[idx] = current_run;
627 DCHECK(!current_run->IsFull());
628 slot_addr = current_run->AllocSlot();
629 // Must succeed now with a new run.
630 DCHECK(slot_addr != nullptr);
631 }
632 return slot_addr;
633}
634
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700635void* RosAlloc::AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated,
636 size_t* usable_size,
637 size_t* bytes_tl_bulk_allocated) {
638 DCHECK(bytes_allocated != nullptr);
639 DCHECK(usable_size != nullptr);
640 DCHECK(bytes_tl_bulk_allocated != nullptr);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700641 DCHECK_LE(size, kLargeSizeThreshold);
642 size_t bracket_size;
643 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
644 DCHECK_EQ(idx, SizeToIndex(size));
645 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
646 DCHECK_EQ(bracket_size, bracketSizes[idx]);
647 DCHECK_LE(size, bracket_size);
648 DCHECK(size > 512 || bracket_size - size < 16);
649 Locks::mutator_lock_->AssertExclusiveHeld(self);
650 void* slot_addr = AllocFromCurrentRunUnlocked(self, idx);
651 if (LIKELY(slot_addr != nullptr)) {
Mathieu Chartier0651d412014-04-29 14:37:57 -0700652 *bytes_allocated = bracket_size;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700653 *usable_size = bracket_size;
654 *bytes_tl_bulk_allocated = bracket_size;
Mathieu Chartier0651d412014-04-29 14:37:57 -0700655 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700656 // Caller verifies that it is all 0.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700657 return slot_addr;
658}
659
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700660void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated,
661 size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
662 DCHECK(bytes_allocated != nullptr);
663 DCHECK(usable_size != nullptr);
664 DCHECK(bytes_tl_bulk_allocated != nullptr);
Ian Rogers5d057052014-03-12 14:32:27 -0700665 DCHECK_LE(size, kLargeSizeThreshold);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700666 size_t bracket_size;
667 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
668 DCHECK_EQ(idx, SizeToIndex(size));
669 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
670 DCHECK_EQ(bracket_size, bracketSizes[idx]);
Ian Rogers5d057052014-03-12 14:32:27 -0700671 DCHECK_LE(size, bracket_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700672 DCHECK(size > 512 || bracket_size - size < 16);
673
674 void* slot_addr;
675
Mathieu Chartier0651d412014-04-29 14:37:57 -0700676 if (LIKELY(idx < kNumThreadLocalSizeBrackets)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700677 // Use a thread-local run.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700678 Run* thread_local_run = reinterpret_cast<Run*>(self->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700679 // Allow invalid since this will always fail the allocation.
Mathieu Chartier4fd20502014-04-28 09:35:55 -0700680 if (kIsDebugBuild) {
681 // Need the lock to prevent race conditions.
682 MutexLock mu(self, *size_bracket_locks_[idx]);
683 CHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
684 CHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
685 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700686 DCHECK(thread_local_run != nullptr);
687 DCHECK(thread_local_run->IsThreadLocal() || thread_local_run == dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700688 slot_addr = thread_local_run->AllocSlot();
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700689 // The allocation must fail if the run is invalid.
690 DCHECK(thread_local_run != dedicated_full_run_ || slot_addr == nullptr)
691 << "allocated from an invalid run";
692 if (UNLIKELY(slot_addr == nullptr)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700693 // The run got full. Try to free slots.
694 DCHECK(thread_local_run->IsFull());
695 MutexLock mu(self, *size_bracket_locks_[idx]);
696 bool is_all_free_after_merge;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700697 // This is safe to do for the dedicated_full_run_ since the bitmaps are empty.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700698 if (thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&is_all_free_after_merge)) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700699 DCHECK_NE(thread_local_run, dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700700 // Some slot got freed. Keep it.
701 DCHECK(!thread_local_run->IsFull());
702 DCHECK_EQ(is_all_free_after_merge, thread_local_run->IsAllFree());
703 if (is_all_free_after_merge) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700704 // Check that the bitmap idx is back at 0 if it's all free.
705 DCHECK_EQ(thread_local_run->first_search_vec_idx_, 0U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700706 }
707 } else {
708 // No slots got freed. Try to refill the thread-local run.
709 DCHECK(thread_local_run->IsFull());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700710 if (thread_local_run != dedicated_full_run_) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700711 thread_local_run->SetIsThreadLocal(false);
712 if (kIsDebugBuild) {
713 full_runs_[idx].insert(thread_local_run);
714 if (kTraceRosAlloc) {
715 LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex
716 << reinterpret_cast<intptr_t>(thread_local_run)
717 << " into full_runs_[" << std::dec << idx << "]";
718 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700719 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700720 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
721 DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700722 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700723
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700724 thread_local_run = RefillRun(self, idx);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700725 if (UNLIKELY(thread_local_run == nullptr)) {
726 self->SetRosAllocRun(idx, dedicated_full_run_);
727 return nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700728 }
729 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
730 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700731 thread_local_run->SetIsThreadLocal(true);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700732 self->SetRosAllocRun(idx, thread_local_run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700733 DCHECK(!thread_local_run->IsFull());
734 }
Mathieu Chartier0651d412014-04-29 14:37:57 -0700735 DCHECK(thread_local_run != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700736 DCHECK(!thread_local_run->IsFull());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700737 DCHECK(thread_local_run->IsThreadLocal());
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700738 // Account for all the free slots in the new or refreshed thread local run.
739 *bytes_tl_bulk_allocated = thread_local_run->NumberOfFreeSlots() * bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700740 slot_addr = thread_local_run->AllocSlot();
741 // Must succeed now with a new run.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700742 DCHECK(slot_addr != nullptr);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700743 } else {
744 // The slot is already counted. Leave it as is.
745 *bytes_tl_bulk_allocated = 0;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700746 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700747 DCHECK(slot_addr != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700748 if (kTraceRosAlloc) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700749 LOG(INFO) << "RosAlloc::AllocFromRun() thread-local : 0x" << std::hex
750 << reinterpret_cast<intptr_t>(slot_addr)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700751 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
752 << "(" << std::dec << (bracket_size) << ")";
753 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700754 *bytes_allocated = bracket_size;
755 *usable_size = bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700756 } else {
757 // Use the (shared) current run.
758 MutexLock mu(self, *size_bracket_locks_[idx]);
Mathieu Chartier0651d412014-04-29 14:37:57 -0700759 slot_addr = AllocFromCurrentRunUnlocked(self, idx);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700760 if (kTraceRosAlloc) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700761 LOG(INFO) << "RosAlloc::AllocFromRun() : 0x" << std::hex
762 << reinterpret_cast<intptr_t>(slot_addr)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700763 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
764 << "(" << std::dec << (bracket_size) << ")";
765 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700766 if (LIKELY(slot_addr != nullptr)) {
767 *bytes_allocated = bracket_size;
768 *usable_size = bracket_size;
769 *bytes_tl_bulk_allocated = bracket_size;
770 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700771 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700772 // Caller verifies that it is all 0.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700773 return slot_addr;
774}
775
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700776size_t RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
Ian Rogers5d057052014-03-12 14:32:27 -0700777 DCHECK_EQ(run->magic_num_, kMagicNum);
778 DCHECK_LT(run, ptr);
779 DCHECK_LT(ptr, run->End());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700780 const size_t idx = run->size_bracket_idx_;
781 const size_t bracket_size = bracketSizes[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700782 bool run_was_full = false;
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800783 MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700784 if (kIsDebugBuild) {
785 run_was_full = run->IsFull();
786 }
787 if (kTraceRosAlloc) {
788 LOG(INFO) << "RosAlloc::FreeFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr);
789 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700790 if (LIKELY(run->IsThreadLocal())) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700791 // It's a thread-local run. Just mark the thread-local free bit map and return.
Mathieu Chartier0651d412014-04-29 14:37:57 -0700792 DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700793 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
794 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
795 run->MarkThreadLocalFreeBitMap(ptr);
796 if (kTraceRosAlloc) {
797 LOG(INFO) << "RosAlloc::FreeFromRun() : Freed a slot in a thread local run 0x" << std::hex
798 << reinterpret_cast<intptr_t>(run);
799 }
800 // A thread local run will be kept as a thread local even if it's become all free.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700801 return bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700802 }
803 // Free the slot in the run.
804 run->FreeSlot(ptr);
Mathieu Chartier58553c72014-09-16 16:25:55 -0700805 auto* non_full_runs = &non_full_runs_[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700806 if (run->IsAllFree()) {
807 // It has just become completely free. Free the pages of this run.
808 std::set<Run*>::iterator pos = non_full_runs->find(run);
809 if (pos != non_full_runs->end()) {
810 non_full_runs->erase(pos);
811 if (kTraceRosAlloc) {
812 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
813 << reinterpret_cast<intptr_t>(run) << " from non_full_runs_";
814 }
815 }
816 if (run == current_runs_[idx]) {
Mathieu Chartier0651d412014-04-29 14:37:57 -0700817 current_runs_[idx] = dedicated_full_run_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700818 }
819 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
820 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700821 run->ZeroHeader();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700822 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800823 MutexLock lock_mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700824 FreePages(self, run, true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700825 }
826 } else {
827 // It is not completely free. If it wasn't the current run or
828 // already in the non-full run set (i.e., it was full) insert it
829 // into the non-full run set.
830 if (run != current_runs_[idx]) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700831 auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : nullptr;
Mathieu Chartier58553c72014-09-16 16:25:55 -0700832 auto pos = non_full_runs->find(run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700833 if (pos == non_full_runs->end()) {
834 DCHECK(run_was_full);
835 DCHECK(full_runs->find(run) != full_runs->end());
836 if (kIsDebugBuild) {
837 full_runs->erase(run);
838 if (kTraceRosAlloc) {
839 LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex
840 << reinterpret_cast<intptr_t>(run) << " from full_runs_";
841 }
842 }
843 non_full_runs->insert(run);
844 DCHECK(!run->IsFull());
845 if (kTraceRosAlloc) {
846 LOG(INFO) << "RosAlloc::FreeFromRun() : Inserted run 0x" << std::hex
847 << reinterpret_cast<intptr_t>(run)
848 << " into non_full_runs_[" << std::dec << idx << "]";
849 }
850 }
851 }
852 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700853 return bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700854}
855
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800856std::string RosAlloc::Run::BitMapToStr(uint32_t* bit_map_base, size_t num_vec) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700857 std::string bit_map_str;
858 for (size_t v = 0; v < num_vec; v++) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800859 uint32_t vec = bit_map_base[v];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700860 if (v != num_vec - 1) {
861 bit_map_str.append(StringPrintf("%x-", vec));
862 } else {
863 bit_map_str.append(StringPrintf("%x", vec));
864 }
865 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800866 return bit_map_str.c_str();
867}
868
869std::string RosAlloc::Run::Dump() {
870 size_t idx = size_bracket_idx_;
871 size_t num_slots = numOfSlots[idx];
872 size_t num_vec = RoundUp(num_slots, 32) / 32;
873 std::ostringstream stream;
874 stream << "RosAlloc Run = " << reinterpret_cast<void*>(this)
875 << "{ magic_num=" << static_cast<int>(magic_num_)
876 << " size_bracket_idx=" << idx
877 << " is_thread_local=" << static_cast<int>(is_thread_local_)
878 << " to_be_bulk_freed=" << static_cast<int>(to_be_bulk_freed_)
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700879 << " first_search_vec_idx=" << first_search_vec_idx_
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800880 << " alloc_bit_map=" << BitMapToStr(alloc_bit_map_, num_vec)
881 << " bulk_free_bit_map=" << BitMapToStr(BulkFreeBitMap(), num_vec)
882 << " thread_local_bit_map=" << BitMapToStr(ThreadLocalFreeBitMap(), num_vec)
883 << " }" << std::endl;
884 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700885}
886
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700887void RosAlloc::Run::FreeSlot(void* ptr) {
888 DCHECK(!IsThreadLocal());
Ian Rogers13735952014-10-08 12:43:28 -0700889 const uint8_t idx = size_bracket_idx_;
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700890 const size_t bracket_size = bracketSizes[idx];
Ian Rogers13735952014-10-08 12:43:28 -0700891 const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
892 - (reinterpret_cast<uint8_t*>(this) + headerSizes[idx]);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700893 DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
894 size_t slot_idx = offset_from_slot_base / bracket_size;
Ian Rogers5d057052014-03-12 14:32:27 -0700895 DCHECK_LT(slot_idx, numOfSlots[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700896 size_t vec_idx = slot_idx / 32;
897 if (kIsDebugBuild) {
898 size_t num_vec = RoundUp(numOfSlots[idx], 32) / 32;
Ian Rogers5d057052014-03-12 14:32:27 -0700899 DCHECK_LT(vec_idx, num_vec);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700900 }
901 size_t vec_off = slot_idx % 32;
902 uint32_t* vec = &alloc_bit_map_[vec_idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700903 first_search_vec_idx_ = std::min(first_search_vec_idx_, static_cast<uint32_t>(vec_idx));
904 const uint32_t mask = 1U << vec_off;
905 DCHECK_NE(*vec & mask, 0U);
906 *vec &= ~mask;
907 DCHECK_EQ(*vec & mask, 0U);
908 // Zero out the memory.
909 // TODO: Investigate alternate memset since ptr is guaranteed to be aligned to 16.
910 memset(ptr, 0, bracket_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700911 if (kTraceRosAlloc) {
912 LOG(INFO) << "RosAlloc::Run::FreeSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr)
913 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
914 }
915}
916
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700917size_t RosAlloc::Run::NumberOfFreeSlots() {
918 size_t num_alloc_slots = 0;
919 const size_t idx = size_bracket_idx_;
920 const size_t num_slots = numOfSlots[idx];
921 const size_t num_vec = RoundUp(num_slots, 32) / 32;
922 DCHECK_NE(num_vec, 0U);
923 for (size_t v = 0; v < num_vec - 1; v++) {
924 num_alloc_slots += POPCOUNT(alloc_bit_map_[v]);
925 }
926 // Don't count the invalid bits in the last vector.
927 uint32_t last_vec_masked = alloc_bit_map_[num_vec - 1] &
928 ~GetBitmapLastVectorMask(num_slots, num_vec);
929 num_alloc_slots += POPCOUNT(last_vec_masked);
930 size_t num_free_slots = num_slots - num_alloc_slots;
931 DCHECK_LE(num_alloc_slots, num_slots);
932 DCHECK_LE(num_free_slots, num_slots);
933 return num_free_slots;
934}
935
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700936inline bool RosAlloc::Run::MergeThreadLocalFreeBitMapToAllocBitMap(bool* is_all_free_after_out) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700937 DCHECK(IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700938 // Free slots in the alloc bit map based on the thread local free bit map.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700939 const size_t idx = size_bracket_idx_;
940 const size_t num_of_slots = numOfSlots[idx];
941 const size_t num_vec = RoundUp(num_of_slots, 32) / 32;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700942 bool changed = false;
943 uint32_t* vecp = &alloc_bit_map_[0];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800944 uint32_t* tl_free_vecp = &ThreadLocalFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700945 bool is_all_free_after = true;
946 for (size_t v = 0; v < num_vec; v++, vecp++, tl_free_vecp++) {
947 uint32_t tl_free_vec = *tl_free_vecp;
948 uint32_t vec_before = *vecp;
949 uint32_t vec_after;
950 if (tl_free_vec != 0) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700951 first_search_vec_idx_ = std::min(first_search_vec_idx_, static_cast<uint32_t>(v));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700952 vec_after = vec_before & ~tl_free_vec;
953 *vecp = vec_after;
954 changed = true;
955 *tl_free_vecp = 0; // clear the thread local free bit map.
956 } else {
957 vec_after = vec_before;
958 }
959 if (vec_after != 0) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700960 if (v == num_vec - 1) {
961 // Only not all free if a bit other than the mask bits are set.
962 is_all_free_after =
963 is_all_free_after && GetBitmapLastVectorMask(num_of_slots, num_vec) == vec_after;
964 } else {
965 is_all_free_after = false;
966 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700967 }
968 DCHECK_EQ(*tl_free_vecp, static_cast<uint32_t>(0));
969 }
970 *is_all_free_after_out = is_all_free_after;
971 // Return true if there was at least a bit set in the thread-local
972 // free bit map and at least a bit in the alloc bit map changed.
973 return changed;
974}
975
976inline void RosAlloc::Run::MergeBulkFreeBitMapIntoAllocBitMap() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700977 DCHECK(!IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700978 // Free slots in the alloc bit map based on the bulk free bit map.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700979 const size_t num_vec = NumberOfBitmapVectors();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700980 uint32_t* vecp = &alloc_bit_map_[0];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800981 uint32_t* free_vecp = &BulkFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700982 for (size_t v = 0; v < num_vec; v++, vecp++, free_vecp++) {
983 uint32_t free_vec = *free_vecp;
984 if (free_vec != 0) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700985 first_search_vec_idx_ = std::min(first_search_vec_idx_, static_cast<uint32_t>(v));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700986 *vecp &= ~free_vec;
987 *free_vecp = 0; // clear the bulk free bit map.
988 }
989 DCHECK_EQ(*free_vecp, static_cast<uint32_t>(0));
990 }
991}
992
993inline void RosAlloc::Run::UnionBulkFreeBitMapToThreadLocalFreeBitMap() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700994 DCHECK(IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700995 // Union the thread local bit map with the bulk free bit map.
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700996 size_t num_vec = NumberOfBitmapVectors();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800997 uint32_t* to_vecp = &ThreadLocalFreeBitMap()[0];
998 uint32_t* from_vecp = &BulkFreeBitMap()[0];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700999 for (size_t v = 0; v < num_vec; v++, to_vecp++, from_vecp++) {
1000 uint32_t from_vec = *from_vecp;
1001 if (from_vec != 0) {
1002 *to_vecp |= from_vec;
Hiroshi Yamauchi70f60042014-02-03 12:31:29 -08001003 *from_vecp = 0; // clear the bulk free bit map.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001004 }
1005 DCHECK_EQ(*from_vecp, static_cast<uint32_t>(0));
1006 }
1007}
1008
1009inline void RosAlloc::Run::MarkThreadLocalFreeBitMap(void* ptr) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001010 DCHECK(IsThreadLocal());
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001011 MarkFreeBitMapShared(ptr, ThreadLocalFreeBitMap(), "MarkThreadLocalFreeBitMap");
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001012}
1013
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001014inline size_t RosAlloc::Run::MarkBulkFreeBitMap(void* ptr) {
1015 return MarkFreeBitMapShared(ptr, BulkFreeBitMap(), "MarkFreeBitMap");
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001016}
1017
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001018inline size_t RosAlloc::Run::MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base,
1019 const char* caller_name) {
Ian Rogers13735952014-10-08 12:43:28 -07001020 const uint8_t idx = size_bracket_idx_;
1021 const size_t offset_from_slot_base = reinterpret_cast<uint8_t*>(ptr)
1022 - (reinterpret_cast<uint8_t*>(this) + headerSizes[idx]);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001023 const size_t bracket_size = bracketSizes[idx];
1024 memset(ptr, 0, bracket_size);
1025 DCHECK_EQ(offset_from_slot_base % bracket_size, static_cast<size_t>(0));
1026 size_t slot_idx = offset_from_slot_base / bracket_size;
Ian Rogers5d057052014-03-12 14:32:27 -07001027 DCHECK_LT(slot_idx, numOfSlots[idx]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001028 size_t vec_idx = slot_idx / 32;
1029 if (kIsDebugBuild) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001030 size_t num_vec = NumberOfBitmapVectors();
Ian Rogers5d057052014-03-12 14:32:27 -07001031 DCHECK_LT(vec_idx, num_vec);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001032 }
1033 size_t vec_off = slot_idx % 32;
1034 uint32_t* vec = &free_bit_map_base[vec_idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001035 const uint32_t mask = 1U << vec_off;
1036 DCHECK_EQ(*vec & mask, 0U);
1037 *vec |= mask;
1038 DCHECK_NE(*vec & mask, 0U);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001039 if (kTraceRosAlloc) {
1040 LOG(INFO) << "RosAlloc::Run::" << caller_name << "() : 0x" << std::hex
1041 << reinterpret_cast<intptr_t>(ptr)
1042 << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
1043 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001044 return bracket_size;
1045}
1046
1047inline uint32_t RosAlloc::Run::GetBitmapLastVectorMask(size_t num_slots, size_t num_vec) {
1048 const size_t kBitsPerVec = 32;
Dan Alberte48b29b2015-04-16 11:50:30 -07001049 DCHECK_GE(num_vec * kBitsPerVec, num_slots);
1050 DCHECK_NE(num_vec, 0U);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001051 size_t remain = num_vec * kBitsPerVec - num_slots;
Dan Alberte48b29b2015-04-16 11:50:30 -07001052 DCHECK_LT(remain, kBitsPerVec);
1053 return ((1U << remain) - 1) << ((kBitsPerVec - remain) & 0x1F);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001054}
1055
1056inline bool RosAlloc::Run::IsAllFree() {
Ian Rogers13735952014-10-08 12:43:28 -07001057 const uint8_t idx = size_bracket_idx_;
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001058 const size_t num_slots = numOfSlots[idx];
1059 const size_t num_vec = NumberOfBitmapVectors();
1060 DCHECK_NE(num_vec, 0U);
1061 // Check the last vector after the loop since it uses a special case for the masked bits.
1062 for (size_t v = 0; v < num_vec - 1; v++) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001063 uint32_t vec = alloc_bit_map_[v];
1064 if (vec != 0) {
1065 return false;
1066 }
1067 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001068 // Make sure the last word is equal to the mask, all other bits must be 0.
1069 return alloc_bit_map_[num_vec - 1] == GetBitmapLastVectorMask(num_slots, num_vec);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001070}
1071
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001072inline bool RosAlloc::Run::IsBulkFreeBitmapClean() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001073 const size_t num_vec = NumberOfBitmapVectors();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001074 for (size_t v = 0; v < num_vec; v++) {
1075 uint32_t vec = BulkFreeBitMap()[v];
1076 if (vec != 0) {
1077 return false;
1078 }
1079 }
1080 return true;
1081}
1082
1083inline bool RosAlloc::Run::IsThreadLocalFreeBitmapClean() {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001084 const size_t num_vec = NumberOfBitmapVectors();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001085 for (size_t v = 0; v < num_vec; v++) {
1086 uint32_t vec = ThreadLocalFreeBitMap()[v];
1087 if (vec != 0) {
1088 return false;
1089 }
1090 }
1091 return true;
1092}
1093
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001094inline void RosAlloc::Run::SetAllocBitMapBitsForInvalidSlots() {
1095 const size_t idx = size_bracket_idx_;
1096 const size_t num_slots = numOfSlots[idx];
1097 const size_t num_vec = RoundUp(num_slots, 32) / 32;
1098 DCHECK_NE(num_vec, 0U);
1099 // Make sure to set the bits at the end of the bitmap so that we don't allocate there since they
1100 // don't represent valid slots.
1101 alloc_bit_map_[num_vec - 1] |= GetBitmapLastVectorMask(num_slots, num_vec);
1102}
1103
1104inline void RosAlloc::Run::ZeroHeader() {
Ian Rogers13735952014-10-08 12:43:28 -07001105 const uint8_t idx = size_bracket_idx_;
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001106 memset(this, 0, headerSizes[idx]);
1107}
1108
1109inline void RosAlloc::Run::ZeroData() {
Ian Rogers13735952014-10-08 12:43:28 -07001110 const uint8_t idx = size_bracket_idx_;
1111 uint8_t* slot_begin = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001112 memset(slot_begin, 0, numOfSlots[idx] * bracketSizes[idx]);
1113}
1114
1115inline void RosAlloc::Run::FillAllocBitMap() {
1116 size_t num_vec = NumberOfBitmapVectors();
1117 memset(alloc_bit_map_, 0xFF, sizeof(uint32_t) * num_vec);
1118 first_search_vec_idx_ = num_vec - 1; // No free bits in any of the bitmap words.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001119}
1120
1121void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1122 void* arg) {
1123 size_t idx = size_bracket_idx_;
Ian Rogers13735952014-10-08 12:43:28 -07001124 uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001125 size_t num_slots = numOfSlots[idx];
1126 size_t bracket_size = IndexToBracketSize(idx);
Mathieu Chartierc38c5ea2015-02-04 17:46:29 -08001127 DCHECK_EQ(slot_base + num_slots * bracket_size,
1128 reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001129 size_t num_vec = RoundUp(num_slots, 32) / 32;
1130 size_t slots = 0;
Mathieu Chartierc38c5ea2015-02-04 17:46:29 -08001131 const uint32_t* const tl_free_vecp = IsThreadLocal() ? ThreadLocalFreeBitMap() : nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001132 for (size_t v = 0; v < num_vec; v++, slots += 32) {
Ian Rogers5d057052014-03-12 14:32:27 -07001133 DCHECK_GE(num_slots, slots);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001134 uint32_t vec = alloc_bit_map_[v];
Mathieu Chartierc38c5ea2015-02-04 17:46:29 -08001135 if (tl_free_vecp != nullptr) {
1136 // Clear out the set bits in the thread local free bitmap since these aren't actually
1137 // allocated.
1138 vec &= ~tl_free_vecp[v];
1139 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001140 size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
1141 for (size_t i = 0; i < end; ++i) {
1142 bool is_allocated = ((vec >> i) & 0x1) != 0;
Ian Rogers13735952014-10-08 12:43:28 -07001143 uint8_t* slot_addr = slot_base + (slots + i) * bracket_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001144 if (is_allocated) {
1145 handler(slot_addr, slot_addr + bracket_size, bracket_size, arg);
1146 } else {
1147 handler(slot_addr, slot_addr + bracket_size, 0, arg);
1148 }
1149 }
1150 }
1151}
1152
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001153// If true, read the page map entries in BulkFree() without using the
1154// lock for better performance, assuming that the existence of an
1155// allocated chunk/pointer being freed in BulkFree() guarantees that
1156// the page map entry won't change. Disabled for now.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001157static constexpr bool kReadPageMapEntryWithoutLockInBulkFree = true;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001158
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001159size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
1160 size_t freed_bytes = 0;
Ian Rogerscf7f1912014-10-22 22:06:39 -07001161 if ((false)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001162 // Used only to test Free() as GC uses only BulkFree().
1163 for (size_t i = 0; i < num_ptrs; ++i) {
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001164 freed_bytes += FreeInternal(self, ptrs[i]);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001165 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001166 return freed_bytes;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001167 }
1168
1169 WriterMutexLock wmu(self, bulk_free_lock_);
1170
1171 // First mark slots to free in the bulk free bit map without locking the
Ian Rogers5fcfa7d2014-05-15 11:43:06 -07001172 // size bracket locks. On host, unordered_set is faster than vector + flag.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001173#ifdef HAVE_ANDROID_OS
1174 std::vector<Run*> runs;
1175#else
Ian Rogers700a4022014-05-19 16:49:03 -07001176 std::unordered_set<Run*, hash_run, eq_run> runs;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001177#endif
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001178 for (size_t i = 0; i < num_ptrs; i++) {
1179 void* ptr = ptrs[i];
Ian Rogers5d057052014-03-12 14:32:27 -07001180 DCHECK_LE(base_, ptr);
1181 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001182 size_t pm_idx = RoundDownToPageMapIndex(ptr);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001183 Run* run = nullptr;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001184 if (kReadPageMapEntryWithoutLockInBulkFree) {
1185 // Read the page map entries without locking the lock.
Ian Rogers13735952014-10-08 12:43:28 -07001186 uint8_t page_map_entry = page_map_[pm_idx];
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001187 if (kTraceRosAlloc) {
1188 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1189 << std::dec << pm_idx
1190 << ", page_map_entry=" << static_cast<int>(page_map_entry);
1191 }
1192 if (LIKELY(page_map_entry == kPageMapRun)) {
1193 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001194 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1195 size_t pi = pm_idx;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001196 // Find the beginning of the run.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001197 do {
1198 --pi;
Ian Rogers5d057052014-03-12 14:32:27 -07001199 DCHECK_LT(pi, capacity_ / kPageSize);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001200 } while (page_map_[pi] != kPageMapRun);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001201 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001202 } else if (page_map_entry == kPageMapLargeObject) {
1203 MutexLock mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001204 freed_bytes += FreePages(self, ptr, false);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001205 continue;
1206 } else {
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001207 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_entry);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001208 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001209 } else {
1210 // Read the page map entries with a lock.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001211 MutexLock mu(self, lock_);
1212 DCHECK_LT(pm_idx, page_map_size_);
Ian Rogers13735952014-10-08 12:43:28 -07001213 uint8_t page_map_entry = page_map_[pm_idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001214 if (kTraceRosAlloc) {
1215 LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
1216 << std::dec << pm_idx
1217 << ", page_map_entry=" << static_cast<int>(page_map_entry);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001218 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001219 if (LIKELY(page_map_entry == kPageMapRun)) {
1220 run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1221 } else if (LIKELY(page_map_entry == kPageMapRunPart)) {
1222 size_t pi = pm_idx;
1223 // Find the beginning of the run.
1224 do {
1225 --pi;
1226 DCHECK_LT(pi, capacity_ / kPageSize);
1227 } while (page_map_[pi] != kPageMapRun);
1228 run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
1229 } else if (page_map_entry == kPageMapLargeObject) {
1230 freed_bytes += FreePages(self, ptr, false);
1231 continue;
1232 } else {
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001233 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_entry);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001234 }
1235 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001236 DCHECK(run != nullptr);
1237 DCHECK_EQ(run->magic_num_, kMagicNum);
1238 // Set the bit in the bulk free bit map.
1239 freed_bytes += run->MarkBulkFreeBitMap(ptr);
1240#ifdef HAVE_ANDROID_OS
1241 if (!run->to_be_bulk_freed_) {
1242 run->to_be_bulk_freed_ = true;
1243 runs.push_back(run);
1244 }
1245#else
1246 runs.insert(run);
1247#endif
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001248 }
1249
1250 // Now, iterate over the affected runs and update the alloc bit map
1251 // based on the bulk free bit map (for non-thread-local runs) and
1252 // union the bulk free bit map into the thread-local free bit map
1253 // (for thread-local runs.)
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001254 for (Run* run : runs) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001255#ifdef HAVE_ANDROID_OS
1256 DCHECK(run->to_be_bulk_freed_);
1257 run->to_be_bulk_freed_ = false;
1258#endif
1259 size_t idx = run->size_bracket_idx_;
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001260 MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001261 if (run->IsThreadLocal()) {
Mathieu Chartier0651d412014-04-29 14:37:57 -07001262 DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001263 DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end());
1264 DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end());
1265 run->UnionBulkFreeBitMapToThreadLocalFreeBitMap();
1266 if (kTraceRosAlloc) {
1267 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a thread local run 0x"
1268 << std::hex << reinterpret_cast<intptr_t>(run);
1269 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001270 DCHECK(run->IsThreadLocal());
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001271 // A thread local run will be kept as a thread local even if
1272 // it's become all free.
1273 } else {
1274 bool run_was_full = run->IsFull();
1275 run->MergeBulkFreeBitMapIntoAllocBitMap();
1276 if (kTraceRosAlloc) {
1277 LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a run 0x" << std::hex
1278 << reinterpret_cast<intptr_t>(run);
1279 }
1280 // Check if the run should be moved to non_full_runs_ or
1281 // free_page_runs_.
Mathieu Chartier58553c72014-09-16 16:25:55 -07001282 auto* non_full_runs = &non_full_runs_[idx];
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001283 auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001284 if (run->IsAllFree()) {
1285 // It has just become completely free. Free the pages of the
1286 // run.
1287 bool run_was_current = run == current_runs_[idx];
1288 if (run_was_current) {
1289 DCHECK(full_runs->find(run) == full_runs->end());
1290 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1291 // If it was a current run, reuse it.
1292 } else if (run_was_full) {
1293 // If it was full, remove it from the full run set (debug
1294 // only.)
1295 if (kIsDebugBuild) {
Ian Rogers700a4022014-05-19 16:49:03 -07001296 std::unordered_set<Run*, hash_run, eq_run>::iterator pos = full_runs->find(run);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001297 DCHECK(pos != full_runs->end());
1298 full_runs->erase(pos);
1299 if (kTraceRosAlloc) {
1300 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1301 << reinterpret_cast<intptr_t>(run)
1302 << " from full_runs_";
1303 }
1304 DCHECK(full_runs->find(run) == full_runs->end());
1305 }
1306 } else {
1307 // If it was in a non full run set, remove it from the set.
1308 DCHECK(full_runs->find(run) == full_runs->end());
1309 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1310 non_full_runs->erase(run);
1311 if (kTraceRosAlloc) {
1312 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1313 << reinterpret_cast<intptr_t>(run)
1314 << " from non_full_runs_";
1315 }
1316 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1317 }
1318 if (!run_was_current) {
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001319 run->ZeroHeader();
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001320 MutexLock lock_mu(self, lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001321 FreePages(self, run, true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001322 }
1323 } else {
1324 // It is not completely free. If it wasn't the current run or
1325 // already in the non-full run set (i.e., it was full) insert
1326 // it into the non-full run set.
1327 if (run == current_runs_[idx]) {
1328 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1329 DCHECK(full_runs->find(run) == full_runs->end());
1330 // If it was a current run, keep it.
1331 } else if (run_was_full) {
1332 // If it was full, remove it from the full run set (debug
1333 // only) and insert into the non-full run set.
1334 DCHECK(full_runs->find(run) != full_runs->end());
1335 DCHECK(non_full_runs->find(run) == non_full_runs->end());
1336 if (kIsDebugBuild) {
1337 full_runs->erase(run);
1338 if (kTraceRosAlloc) {
1339 LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex
1340 << reinterpret_cast<intptr_t>(run)
1341 << " from full_runs_";
1342 }
1343 }
1344 non_full_runs->insert(run);
1345 if (kTraceRosAlloc) {
1346 LOG(INFO) << "RosAlloc::BulkFree() : Inserted run 0x" << std::hex
1347 << reinterpret_cast<intptr_t>(run)
1348 << " into non_full_runs_[" << std::dec << idx;
1349 }
1350 } else {
1351 // If it was not full, so leave it in the non full run set.
1352 DCHECK(full_runs->find(run) == full_runs->end());
1353 DCHECK(non_full_runs->find(run) != non_full_runs->end());
1354 }
1355 }
1356 }
1357 }
Mathieu Chartier8585bad2014-04-11 17:53:48 -07001358 return freed_bytes;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001359}
1360
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001361std::string RosAlloc::DumpPageMap() {
1362 std::ostringstream stream;
1363 stream << "RosAlloc PageMap: " << std::endl;
1364 lock_.AssertHeld(Thread::Current());
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001365 size_t end = page_map_size_;
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001366 FreePageRun* curr_fpr = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001367 size_t curr_fpr_size = 0;
1368 size_t remaining_curr_fpr_size = 0;
1369 size_t num_running_empty_pages = 0;
1370 for (size_t i = 0; i < end; ++i) {
Ian Rogers13735952014-10-08 12:43:28 -07001371 uint8_t pm = page_map_[i];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001372 switch (pm) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001373 case kPageMapReleased:
1374 // Fall-through.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001375 case kPageMapEmpty: {
1376 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1377 if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
1378 // Encountered a fresh free page run.
1379 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1380 DCHECK(fpr->IsFree());
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001381 DCHECK(curr_fpr == nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001382 DCHECK_EQ(curr_fpr_size, static_cast<size_t>(0));
1383 curr_fpr = fpr;
1384 curr_fpr_size = fpr->ByteSize(this);
1385 DCHECK_EQ(curr_fpr_size % kPageSize, static_cast<size_t>(0));
1386 remaining_curr_fpr_size = curr_fpr_size - kPageSize;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001387 stream << "[" << i << "]=" << (pm == kPageMapReleased ? "Released" : "Empty")
1388 << " (FPR start) fpr_size=" << curr_fpr_size
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001389 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001390 if (remaining_curr_fpr_size == 0) {
1391 // Reset at the end of the current free page run.
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001392 curr_fpr = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001393 curr_fpr_size = 0;
1394 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001395 stream << "curr_fpr=0x" << std::hex << reinterpret_cast<intptr_t>(curr_fpr) << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001396 DCHECK_EQ(num_running_empty_pages, static_cast<size_t>(0));
1397 } else {
1398 // Still part of the current free page run.
1399 DCHECK_NE(num_running_empty_pages, static_cast<size_t>(0));
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001400 DCHECK(curr_fpr != nullptr && curr_fpr_size > 0 && remaining_curr_fpr_size > 0);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001401 DCHECK_EQ(remaining_curr_fpr_size % kPageSize, static_cast<size_t>(0));
1402 DCHECK_GE(remaining_curr_fpr_size, static_cast<size_t>(kPageSize));
1403 remaining_curr_fpr_size -= kPageSize;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001404 stream << "[" << i << "]=Empty (FPR part)"
1405 << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001406 if (remaining_curr_fpr_size == 0) {
1407 // Reset at the end of the current free page run.
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001408 curr_fpr = nullptr;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001409 curr_fpr_size = 0;
1410 }
1411 }
1412 num_running_empty_pages++;
1413 break;
1414 }
1415 case kPageMapLargeObject: {
1416 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1417 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001418 stream << "[" << i << "]=Large (start)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001419 break;
1420 }
1421 case kPageMapLargeObjectPart:
1422 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1423 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001424 stream << "[" << i << "]=Large (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001425 break;
1426 case kPageMapRun: {
1427 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1428 num_running_empty_pages = 0;
1429 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
1430 size_t idx = run->size_bracket_idx_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001431 stream << "[" << i << "]=Run (start)"
1432 << " idx=" << idx
1433 << " numOfPages=" << numOfPages[idx]
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001434 << " is_thread_local=" << run->is_thread_local_
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001435 << " is_all_free=" << (run->IsAllFree() ? 1 : 0)
1436 << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001437 break;
1438 }
1439 case kPageMapRunPart:
1440 DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0));
1441 num_running_empty_pages = 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001442 stream << "[" << i << "]=Run (part)" << std::endl;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001443 break;
1444 default:
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001445 stream << "[" << i << "]=Unrecognizable page map type: " << pm;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001446 break;
1447 }
1448 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001449 return stream.str();
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001450}
1451
Andreas Gamped7576322014-10-24 22:13:45 -07001452size_t RosAlloc::UsableSize(const void* ptr) {
Ian Rogers5d057052014-03-12 14:32:27 -07001453 DCHECK_LE(base_, ptr);
1454 DCHECK_LT(ptr, base_ + footprint_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001455 size_t pm_idx = RoundDownToPageMapIndex(ptr);
1456 MutexLock mu(Thread::Current(), lock_);
1457 switch (page_map_[pm_idx]) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001458 case kPageMapReleased:
1459 // Fall-through.
1460 case kPageMapEmpty:
1461 LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << ": pm_idx=" << pm_idx << ", ptr="
1462 << std::hex << reinterpret_cast<intptr_t>(ptr);
1463 break;
1464 case kPageMapLargeObject: {
1465 size_t num_pages = 1;
1466 size_t idx = pm_idx + 1;
1467 size_t end = page_map_size_;
1468 while (idx < end && page_map_[idx] == kPageMapLargeObjectPart) {
1469 num_pages++;
1470 idx++;
1471 }
1472 return num_pages * kPageSize;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001473 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001474 case kPageMapLargeObjectPart:
1475 LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << ": pm_idx=" << pm_idx << ", ptr="
1476 << std::hex << reinterpret_cast<intptr_t>(ptr);
1477 break;
1478 case kPageMapRun:
1479 case kPageMapRunPart: {
1480 // Find the beginning of the run.
1481 while (page_map_[pm_idx] != kPageMapRun) {
1482 pm_idx--;
1483 DCHECK_LT(pm_idx, capacity_ / kPageSize);
1484 }
1485 DCHECK_EQ(page_map_[pm_idx], kPageMapRun);
1486 Run* run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
1487 DCHECK_EQ(run->magic_num_, kMagicNum);
1488 size_t idx = run->size_bracket_idx_;
Andreas Gamped7576322014-10-24 22:13:45 -07001489 size_t offset_from_slot_base = reinterpret_cast<const uint8_t*>(ptr)
Ian Rogers13735952014-10-08 12:43:28 -07001490 - (reinterpret_cast<uint8_t*>(run) + headerSizes[idx]);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001491 DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
1492 return IndexToBracketSize(idx);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001493 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001494 default: {
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001495 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]);
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001496 break;
1497 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001498 }
1499 return 0;
1500}
1501
1502bool RosAlloc::Trim() {
1503 MutexLock mu(Thread::Current(), lock_);
1504 FreePageRun* last_free_page_run;
1505 DCHECK_EQ(footprint_ % kPageSize, static_cast<size_t>(0));
1506 auto it = free_page_runs_.rbegin();
1507 if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
1508 // Remove the last free page run, if any.
1509 DCHECK(last_free_page_run->IsFree());
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001510 DCHECK(IsFreePage(ToPageMapIndex(last_free_page_run)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001511 DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
1512 DCHECK_EQ(last_free_page_run->End(this), base_ + footprint_);
1513 free_page_runs_.erase(last_free_page_run);
1514 size_t decrement = last_free_page_run->ByteSize(this);
1515 size_t new_footprint = footprint_ - decrement;
1516 DCHECK_EQ(new_footprint % kPageSize, static_cast<size_t>(0));
1517 size_t new_num_of_pages = new_footprint / kPageSize;
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001518 DCHECK_GE(page_map_size_, new_num_of_pages);
1519 // Zero out the tail of the page map.
Ian Rogers13735952014-10-08 12:43:28 -07001520 uint8_t* zero_begin = const_cast<uint8_t*>(page_map_) + new_num_of_pages;
1521 uint8_t* madvise_begin = AlignUp(zero_begin, kPageSize);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001522 DCHECK_LE(madvise_begin, page_map_mem_map_->End());
1523 size_t madvise_size = page_map_mem_map_->End() - madvise_begin;
1524 if (madvise_size > 0) {
1525 DCHECK_ALIGNED(madvise_begin, kPageSize);
1526 DCHECK_EQ(RoundUp(madvise_size, kPageSize), madvise_size);
Ian Rogersc5f17732014-06-05 20:48:42 -07001527 if (!kMadviseZeroes) {
1528 memset(madvise_begin, 0, madvise_size);
1529 }
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001530 CHECK_EQ(madvise(madvise_begin, madvise_size, MADV_DONTNEED), 0);
1531 }
1532 if (madvise_begin - zero_begin) {
1533 memset(zero_begin, 0, madvise_begin - zero_begin);
1534 }
1535 page_map_size_ = new_num_of_pages;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001536 free_page_run_size_map_.resize(new_num_of_pages);
1537 DCHECK_EQ(free_page_run_size_map_.size(), new_num_of_pages);
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001538 ArtRosAllocMoreCore(this, -(static_cast<intptr_t>(decrement)));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001539 if (kTraceRosAlloc) {
1540 LOG(INFO) << "RosAlloc::Trim() : decreased the footprint from "
1541 << footprint_ << " to " << new_footprint;
1542 }
1543 DCHECK_LT(new_footprint, footprint_);
1544 DCHECK_LT(new_footprint, capacity_);
1545 footprint_ = new_footprint;
1546 return true;
1547 }
1548 return false;
1549}
1550
1551void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
1552 void* arg) {
1553 // Note: no need to use this to release pages as we already do so in FreePages().
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001554 if (handler == nullptr) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001555 return;
1556 }
1557 MutexLock mu(Thread::Current(), lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001558 size_t pm_end = page_map_size_;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001559 size_t i = 0;
1560 while (i < pm_end) {
Ian Rogers13735952014-10-08 12:43:28 -07001561 uint8_t pm = page_map_[i];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001562 switch (pm) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001563 case kPageMapReleased:
1564 // Fall-through.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001565 case kPageMapEmpty: {
1566 // The start of a free page run.
1567 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
1568 DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end());
1569 size_t fpr_size = fpr->ByteSize(this);
1570 DCHECK(IsAligned<kPageSize>(fpr_size));
1571 void* start = fpr;
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -08001572 if (kIsDebugBuild) {
1573 // In the debug build, the first page of a free page run
1574 // contains a magic number for debugging. Exclude it.
Ian Rogers13735952014-10-08 12:43:28 -07001575 start = reinterpret_cast<uint8_t*>(fpr) + kPageSize;
Hiroshi Yamauchi573f7d22013-12-17 11:54:23 -08001576 }
Ian Rogers13735952014-10-08 12:43:28 -07001577 void* end = reinterpret_cast<uint8_t*>(fpr) + fpr_size;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001578 handler(start, end, 0, arg);
1579 size_t num_pages = fpr_size / kPageSize;
1580 if (kIsDebugBuild) {
1581 for (size_t j = i + 1; j < i + num_pages; ++j) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001582 DCHECK(IsFreePage(j));
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001583 }
1584 }
1585 i += fpr_size / kPageSize;
1586 DCHECK_LE(i, pm_end);
1587 break;
1588 }
1589 case kPageMapLargeObject: {
1590 // The start of a large object.
1591 size_t num_pages = 1;
1592 size_t idx = i + 1;
1593 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1594 num_pages++;
1595 idx++;
1596 }
1597 void* start = base_ + i * kPageSize;
1598 void* end = base_ + (i + num_pages) * kPageSize;
1599 size_t used_bytes = num_pages * kPageSize;
1600 handler(start, end, used_bytes, arg);
1601 if (kIsDebugBuild) {
1602 for (size_t j = i + 1; j < i + num_pages; ++j) {
1603 DCHECK_EQ(page_map_[j], kPageMapLargeObjectPart);
1604 }
1605 }
1606 i += num_pages;
1607 DCHECK_LE(i, pm_end);
1608 break;
1609 }
1610 case kPageMapLargeObjectPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001611 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001612 break;
1613 case kPageMapRun: {
1614 // The start of a run.
1615 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001616 DCHECK_EQ(run->magic_num_, kMagicNum);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001617 // The dedicated full run doesn't contain any real allocations, don't visit the slots in
1618 // there.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001619 run->InspectAllSlots(handler, arg);
1620 size_t num_pages = numOfPages[run->size_bracket_idx_];
1621 if (kIsDebugBuild) {
1622 for (size_t j = i + 1; j < i + num_pages; ++j) {
1623 DCHECK_EQ(page_map_[j], kPageMapRunPart);
1624 }
1625 }
1626 i += num_pages;
1627 DCHECK_LE(i, pm_end);
1628 break;
1629 }
1630 case kPageMapRunPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001631 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001632 break;
1633 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001634 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001635 break;
1636 }
1637 }
1638}
1639
1640size_t RosAlloc::Footprint() {
1641 MutexLock mu(Thread::Current(), lock_);
1642 return footprint_;
1643}
1644
1645size_t RosAlloc::FootprintLimit() {
1646 MutexLock mu(Thread::Current(), lock_);
1647 return capacity_;
1648}
1649
1650void RosAlloc::SetFootprintLimit(size_t new_capacity) {
1651 MutexLock mu(Thread::Current(), lock_);
1652 DCHECK_EQ(RoundUp(new_capacity, kPageSize), new_capacity);
1653 // Only growing is supported here. But Trim() is supported.
1654 if (capacity_ < new_capacity) {
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001655 CHECK_LE(new_capacity, max_capacity_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001656 capacity_ = new_capacity;
1657 VLOG(heap) << "new capacity=" << capacity_;
1658 }
1659}
1660
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001661size_t RosAlloc::RevokeThreadLocalRuns(Thread* thread) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001662 Thread* self = Thread::Current();
Hiroshi Yamauchi70f60042014-02-03 12:31:29 -08001663 // Avoid race conditions on the bulk free bit maps with BulkFree() (GC).
Mathieu Chartiera1c1c712014-06-23 17:53:09 -07001664 ReaderMutexLock wmu(self, bulk_free_lock_);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001665 size_t free_bytes = 0U;
Mathieu Chartier0651d412014-04-29 14:37:57 -07001666 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; idx++) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001667 MutexLock mu(self, *size_bracket_locks_[idx]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001668 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001669 CHECK(thread_local_run != nullptr);
1670 // Invalid means already revoked.
1671 DCHECK(thread_local_run->IsThreadLocal());
1672 if (thread_local_run != dedicated_full_run_) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001673 // Note the thread local run may not be full here.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001674 thread->SetRosAllocRun(idx, dedicated_full_run_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001675 DCHECK_EQ(thread_local_run->magic_num_, kMagicNum);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001676 // Count the number of free slots left.
1677 size_t num_free_slots = thread_local_run->NumberOfFreeSlots();
1678 free_bytes += num_free_slots * bracketSizes[idx];
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001679 bool dont_care;
1680 thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&dont_care);
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001681 thread_local_run->SetIsThreadLocal(false);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001682 thread_local_run->MergeBulkFreeBitMapIntoAllocBitMap();
1683 DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
1684 DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
Mathieu Chartier0651d412014-04-29 14:37:57 -07001685 RevokeRun(self, idx, thread_local_run);
1686 }
1687 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001688 return free_bytes;
Mathieu Chartier0651d412014-04-29 14:37:57 -07001689}
1690
1691void RosAlloc::RevokeRun(Thread* self, size_t idx, Run* run) {
1692 size_bracket_locks_[idx]->AssertHeld(self);
1693 DCHECK(run != dedicated_full_run_);
1694 if (run->IsFull()) {
1695 if (kIsDebugBuild) {
1696 full_runs_[idx].insert(run);
1697 DCHECK(full_runs_[idx].find(run) != full_runs_[idx].end());
1698 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001699 LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
Mathieu Chartier0651d412014-04-29 14:37:57 -07001700 << reinterpret_cast<intptr_t>(run)
1701 << " into full_runs_[" << std::dec << idx << "]";
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001702 }
1703 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001704 } else if (run->IsAllFree()) {
1705 run->ZeroHeader();
1706 MutexLock mu(self, lock_);
1707 FreePages(self, run, true);
1708 } else {
1709 non_full_runs_[idx].insert(run);
1710 DCHECK(non_full_runs_[idx].find(run) != non_full_runs_[idx].end());
1711 if (kTraceRosAlloc) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001712 LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex
Mathieu Chartier0651d412014-04-29 14:37:57 -07001713 << reinterpret_cast<intptr_t>(run)
1714 << " into non_full_runs_[" << std::dec << idx << "]";
1715 }
1716 }
1717}
1718
1719void RosAlloc::RevokeThreadUnsafeCurrentRuns() {
1720 // Revoke the current runs which share the same idx as thread local runs.
1721 Thread* self = Thread::Current();
1722 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; ++idx) {
1723 MutexLock mu(self, *size_bracket_locks_[idx]);
1724 if (current_runs_[idx] != dedicated_full_run_) {
1725 RevokeRun(self, idx, current_runs_[idx]);
1726 current_runs_[idx] = dedicated_full_run_;
1727 }
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001728 }
1729}
1730
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001731size_t RosAlloc::RevokeAllThreadLocalRuns() {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001732 // This is called when a mutator thread won't allocate such as at
1733 // the Zygote creation time or during the GC pause.
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001734 MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
1735 MutexLock mu2(Thread::Current(), *Locks::thread_list_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001736 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001737 size_t free_bytes = 0U;
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001738 for (Thread* thread : thread_list) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001739 free_bytes += RevokeThreadLocalRuns(thread);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001740 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001741 RevokeThreadUnsafeCurrentRuns();
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001742 return free_bytes;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001743}
1744
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001745void RosAlloc::AssertThreadLocalRunsAreRevoked(Thread* thread) {
1746 if (kIsDebugBuild) {
1747 Thread* self = Thread::Current();
1748 // Avoid race conditions on the bulk free bit maps with BulkFree() (GC).
Mathieu Chartiera1c1c712014-06-23 17:53:09 -07001749 ReaderMutexLock wmu(self, bulk_free_lock_);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001750 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; idx++) {
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001751 MutexLock mu(self, *size_bracket_locks_[idx]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001752 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx));
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001753 DCHECK(thread_local_run == nullptr || thread_local_run == dedicated_full_run_);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001754 }
1755 }
1756}
1757
1758void RosAlloc::AssertAllThreadLocalRunsAreRevoked() {
1759 if (kIsDebugBuild) {
Mathieu Chartier0651d412014-04-29 14:37:57 -07001760 Thread* self = Thread::Current();
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001761 MutexLock shutdown_mu(self, *Locks::runtime_shutdown_lock_);
1762 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001763 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
1764 for (Thread* t : thread_list) {
1765 AssertThreadLocalRunsAreRevoked(t);
1766 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001767 for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; ++idx) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001768 MutexLock brackets_mu(self, *size_bracket_locks_[idx]);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001769 CHECK_EQ(current_runs_[idx], dedicated_full_run_);
1770 }
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -07001771 }
1772}
1773
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001774void RosAlloc::Initialize() {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001775 // bracketSizes.
1776 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1777 if (i < kNumOfSizeBrackets - 2) {
1778 bracketSizes[i] = 16 * (i + 1);
1779 } else if (i == kNumOfSizeBrackets - 2) {
1780 bracketSizes[i] = 1 * KB;
1781 } else {
Ian Rogers5d057052014-03-12 14:32:27 -07001782 DCHECK_EQ(i, kNumOfSizeBrackets - 1);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001783 bracketSizes[i] = 2 * KB;
1784 }
1785 if (kTraceRosAlloc) {
1786 LOG(INFO) << "bracketSizes[" << i << "]=" << bracketSizes[i];
1787 }
1788 }
1789 // numOfPages.
1790 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1791 if (i < 4) {
1792 numOfPages[i] = 1;
1793 } else if (i < 8) {
Hiroshi Yamauchi3f3c6c02014-11-20 14:16:06 -08001794 numOfPages[i] = 1;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001795 } else if (i < 16) {
1796 numOfPages[i] = 4;
1797 } else if (i < 32) {
1798 numOfPages[i] = 8;
1799 } else if (i == 32) {
Ian Rogers5d057052014-03-12 14:32:27 -07001800 DCHECK_EQ(i, kNumOfSizeBrackets - 2);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001801 numOfPages[i] = 16;
1802 } else {
Ian Rogers5d057052014-03-12 14:32:27 -07001803 DCHECK_EQ(i, kNumOfSizeBrackets - 1);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001804 numOfPages[i] = 32;
1805 }
1806 if (kTraceRosAlloc) {
1807 LOG(INFO) << "numOfPages[" << i << "]=" << numOfPages[i];
1808 }
1809 }
1810 // Compute numOfSlots and slotOffsets.
1811 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
1812 size_t bracket_size = bracketSizes[i];
1813 size_t run_size = kPageSize * numOfPages[i];
1814 size_t max_num_of_slots = run_size / bracket_size;
1815 // Compute the actual number of slots by taking the header and
1816 // alignment into account.
1817 size_t fixed_header_size = RoundUp(Run::fixed_header_size(), sizeof(uint32_t));
1818 DCHECK_EQ(fixed_header_size, static_cast<size_t>(8));
1819 size_t header_size = 0;
1820 size_t bulk_free_bit_map_offset = 0;
1821 size_t thread_local_free_bit_map_offset = 0;
1822 size_t num_of_slots = 0;
1823 // Search for the maximum number of slots that allows enough space
1824 // for the header (including the bit maps.)
1825 for (int s = max_num_of_slots; s >= 0; s--) {
1826 size_t tmp_slots_size = bracket_size * s;
1827 size_t tmp_bit_map_size = RoundUp(s, sizeof(uint32_t) * kBitsPerByte) / kBitsPerByte;
1828 size_t tmp_bulk_free_bit_map_size = tmp_bit_map_size;
1829 size_t tmp_bulk_free_bit_map_off = fixed_header_size + tmp_bit_map_size;
1830 size_t tmp_thread_local_free_bit_map_size = tmp_bit_map_size;
1831 size_t tmp_thread_local_free_bit_map_off = tmp_bulk_free_bit_map_off + tmp_bulk_free_bit_map_size;
1832 size_t tmp_unaligned_header_size = tmp_thread_local_free_bit_map_off + tmp_thread_local_free_bit_map_size;
1833 // Align up the unaligned header size. bracket_size may not be a power of two.
1834 size_t tmp_header_size = (tmp_unaligned_header_size % bracket_size == 0) ?
1835 tmp_unaligned_header_size :
1836 tmp_unaligned_header_size + (bracket_size - tmp_unaligned_header_size % bracket_size);
1837 DCHECK_EQ(tmp_header_size % bracket_size, static_cast<size_t>(0));
1838 DCHECK_EQ(tmp_header_size % 8, static_cast<size_t>(0));
1839 if (tmp_slots_size + tmp_header_size <= run_size) {
1840 // Found the right number of slots, that is, there was enough
1841 // space for the header (including the bit maps.)
1842 num_of_slots = s;
1843 header_size = tmp_header_size;
1844 bulk_free_bit_map_offset = tmp_bulk_free_bit_map_off;
1845 thread_local_free_bit_map_offset = tmp_thread_local_free_bit_map_off;
1846 break;
1847 }
1848 }
1849 DCHECK(num_of_slots > 0 && header_size > 0 && bulk_free_bit_map_offset > 0);
1850 // Add the padding for the alignment remainder.
1851 header_size += run_size % bracket_size;
Ian Rogers5d057052014-03-12 14:32:27 -07001852 DCHECK_EQ(header_size + num_of_slots * bracket_size, run_size);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001853 numOfSlots[i] = num_of_slots;
1854 headerSizes[i] = header_size;
1855 bulkFreeBitMapOffsets[i] = bulk_free_bit_map_offset;
1856 threadLocalFreeBitMapOffsets[i] = thread_local_free_bit_map_offset;
1857 if (kTraceRosAlloc) {
1858 LOG(INFO) << "numOfSlots[" << i << "]=" << numOfSlots[i]
1859 << ", headerSizes[" << i << "]=" << headerSizes[i]
1860 << ", bulkFreeBitMapOffsets[" << i << "]=" << bulkFreeBitMapOffsets[i]
1861 << ", threadLocalFreeBitMapOffsets[" << i << "]=" << threadLocalFreeBitMapOffsets[i];;
1862 }
1863 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001864 // Fill the alloc bitmap so nobody can successfully allocate from it.
1865 if (kIsDebugBuild) {
1866 dedicated_full_run_->magic_num_ = kMagicNum;
1867 }
1868 // It doesn't matter which size bracket we use since the main goal is to have the allocation
1869 // fail 100% of the time you attempt to allocate into the dedicated full run.
1870 dedicated_full_run_->size_bracket_idx_ = 0;
1871 dedicated_full_run_->FillAllocBitMap();
1872 dedicated_full_run_->SetIsThreadLocal(true);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001873}
1874
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001875void RosAlloc::BytesAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
1876 size_t used_bytes, void* arg) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001877 if (used_bytes == 0) {
1878 return;
1879 }
1880 size_t* bytes_allocated = reinterpret_cast<size_t*>(arg);
1881 *bytes_allocated += used_bytes;
1882}
1883
Ian Rogers6a3c1fc2014-10-31 00:33:20 -07001884void RosAlloc::ObjectsAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
1885 size_t used_bytes, void* arg) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001886 if (used_bytes == 0) {
1887 return;
1888 }
1889 size_t* objects_allocated = reinterpret_cast<size_t*>(arg);
1890 ++(*objects_allocated);
1891}
1892
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001893void RosAlloc::Verify() {
1894 Thread* self = Thread::Current();
1895 CHECK(Locks::mutator_lock_->IsExclusiveHeld(self))
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001896 << "The mutator locks isn't exclusively locked at " << __PRETTY_FUNCTION__;
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001897 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
Mathieu Chartiera1c1c712014-06-23 17:53:09 -07001898 ReaderMutexLock wmu(self, bulk_free_lock_);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001899 std::vector<Run*> runs;
1900 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001901 MutexLock lock_mu(self, lock_);
Hiroshi Yamauchi26d69ff2014-02-27 11:27:10 -08001902 size_t pm_end = page_map_size_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001903 size_t i = 0;
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001904 size_t memory_tool_modifier = is_running_on_memory_tool_ ?
1905 2 * ::art::gc::space::kDefaultMemoryToolRedZoneBytes : // Redzones before and after.
Andreas Gampefef16ad2015-02-19 16:44:32 -08001906 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001907 while (i < pm_end) {
Ian Rogers13735952014-10-08 12:43:28 -07001908 uint8_t pm = page_map_[i];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001909 switch (pm) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001910 case kPageMapReleased:
1911 // Fall-through.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001912 case kPageMapEmpty: {
1913 // The start of a free page run.
1914 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001915 DCHECK_EQ(fpr->magic_num_, kMagicNumFree);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001916 CHECK(free_page_runs_.find(fpr) != free_page_runs_.end())
1917 << "An empty page must belong to the free page run set";
1918 size_t fpr_size = fpr->ByteSize(this);
1919 CHECK(IsAligned<kPageSize>(fpr_size))
1920 << "A free page run size isn't page-aligned : " << fpr_size;
1921 size_t num_pages = fpr_size / kPageSize;
1922 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1923 << "A free page run size must be > 0 : " << fpr_size;
1924 for (size_t j = i + 1; j < i + num_pages; ++j) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07001925 CHECK(IsFreePage(j))
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001926 << "A mismatch between the page map table for kPageMapEmpty "
1927 << " at page index " << j
1928 << " and the free page run size : page index range : "
1929 << i << " to " << (i + num_pages) << std::endl << DumpPageMap();
1930 }
1931 i += num_pages;
1932 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1933 << std::endl << DumpPageMap();
1934 break;
1935 }
1936 case kPageMapLargeObject: {
1937 // The start of a large object.
1938 size_t num_pages = 1;
1939 size_t idx = i + 1;
1940 while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) {
1941 num_pages++;
1942 idx++;
1943 }
Andreas Gamped7576322014-10-24 22:13:45 -07001944 uint8_t* start = base_ + i * kPageSize;
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001945 if (is_running_on_memory_tool_) {
1946 start += ::art::gc::space::kDefaultMemoryToolRedZoneBytes;
Andreas Gamped7576322014-10-24 22:13:45 -07001947 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001948 mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
1949 size_t obj_size = obj->SizeOf();
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001950 CHECK_GT(obj_size + memory_tool_modifier, kLargeSizeThreshold)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001951 << "A rosalloc large object size must be > " << kLargeSizeThreshold;
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001952 CHECK_EQ(num_pages, RoundUp(obj_size + memory_tool_modifier, kPageSize) / kPageSize)
1953 << "A rosalloc large object size " << obj_size + memory_tool_modifier
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001954 << " does not match the page map table " << (num_pages * kPageSize)
1955 << std::endl << DumpPageMap();
1956 i += num_pages;
1957 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1958 << std::endl << DumpPageMap();
1959 break;
1960 }
1961 case kPageMapLargeObjectPart:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001962 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl << DumpPageMap();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001963 break;
1964 case kPageMapRun: {
1965 // The start of a run.
1966 Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
Ian Rogers5d057052014-03-12 14:32:27 -07001967 DCHECK_EQ(run->magic_num_, kMagicNum);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001968 size_t idx = run->size_bracket_idx_;
Ian Rogers5d057052014-03-12 14:32:27 -07001969 CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << idx;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001970 size_t num_pages = numOfPages[idx];
1971 CHECK_GT(num_pages, static_cast<uintptr_t>(0))
1972 << "Run size must be > 0 : " << num_pages;
1973 for (size_t j = i + 1; j < i + num_pages; ++j) {
1974 CHECK_EQ(page_map_[j], kPageMapRunPart)
1975 << "A mismatch between the page map table for kPageMapRunPart "
1976 << " at page index " << j
1977 << " and the run size : page index range " << i << " to " << (i + num_pages)
1978 << std::endl << DumpPageMap();
1979 }
Mathieu Chartier73d1e172014-04-11 17:53:48 -07001980 // Don't verify the dedicated_full_run_ since it doesn't have any real allocations.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001981 runs.push_back(run);
1982 i += num_pages;
1983 CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end
1984 << std::endl << DumpPageMap();
1985 break;
1986 }
1987 case kPageMapRunPart:
Mathieu Chartier0651d412014-04-29 14:37:57 -07001988 // Fall-through.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001989 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07001990 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl << DumpPageMap();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001991 break;
1992 }
1993 }
1994 }
Mathieu Chartier0651d412014-04-29 14:37:57 -07001995 std::list<Thread*> threads = Runtime::Current()->GetThreadList()->GetList();
1996 for (Thread* thread : threads) {
1997 for (size_t i = 0; i < kNumThreadLocalSizeBrackets; ++i) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001998 MutexLock brackets_mu(self, *size_bracket_locks_[i]);
Mathieu Chartier0651d412014-04-29 14:37:57 -07001999 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i));
2000 CHECK(thread_local_run != nullptr);
2001 CHECK(thread_local_run->IsThreadLocal());
2002 CHECK(thread_local_run == dedicated_full_run_ ||
2003 thread_local_run->size_bracket_idx_ == i);
2004 }
2005 }
2006 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08002007 MutexLock brackets_mu(self, *size_bracket_locks_[i]);
Mathieu Chartier0651d412014-04-29 14:37:57 -07002008 Run* current_run = current_runs_[i];
2009 CHECK(current_run != nullptr);
2010 if (current_run != dedicated_full_run_) {
2011 // The dedicated full run is currently marked as thread local.
2012 CHECK(!current_run->IsThreadLocal());
2013 CHECK_EQ(current_run->size_bracket_idx_, i);
2014 }
2015 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002016 // Call Verify() here for the lock order.
2017 for (auto& run : runs) {
Evgenii Stepanov1e133742015-05-20 12:30:59 -07002018 run->Verify(self, this, is_running_on_memory_tool_);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002019 }
2020}
2021
Evgenii Stepanov1e133742015-05-20 12:30:59 -07002022void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc, bool running_on_memory_tool) {
Ian Rogers5d057052014-03-12 14:32:27 -07002023 DCHECK_EQ(magic_num_, kMagicNum) << "Bad magic number : " << Dump();
Mathieu Chartier73d1e172014-04-11 17:53:48 -07002024 const size_t idx = size_bracket_idx_;
Ian Rogers5d057052014-03-12 14:32:27 -07002025 CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
Ian Rogers13735952014-10-08 12:43:28 -07002026 uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx];
Mathieu Chartier73d1e172014-04-11 17:53:48 -07002027 const size_t num_slots = numOfSlots[idx];
2028 const size_t num_vec = RoundUp(num_slots, 32) / 32;
2029 CHECK_GT(num_vec, 0U);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002030 size_t bracket_size = IndexToBracketSize(idx);
2031 CHECK_EQ(slot_base + num_slots * bracket_size,
Ian Rogers13735952014-10-08 12:43:28 -07002032 reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002033 << "Mismatch in the end address of the run " << Dump();
2034 // Check that the bulk free bitmap is clean. It's only used during BulkFree().
2035 CHECK(IsBulkFreeBitmapClean()) << "The bulk free bit map isn't clean " << Dump();
Mathieu Chartier73d1e172014-04-11 17:53:48 -07002036 uint32_t last_word_mask = GetBitmapLastVectorMask(num_slots, num_vec);
2037 // Make sure all the bits at the end of the run are set so that we don't allocate there.
2038 CHECK_EQ(alloc_bit_map_[num_vec - 1] & last_word_mask, last_word_mask);
2039 // Ensure that the first bitmap index is valid.
2040 CHECK_LT(first_search_vec_idx_, num_vec);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002041 // Check the thread local runs, the current runs, and the run sets.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07002042 if (IsThreadLocal()) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002043 // If it's a thread local run, then it must be pointed to by an owner thread.
2044 bool owner_found = false;
2045 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
2046 for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
2047 Thread* thread = *it;
Mathieu Chartier0651d412014-04-29 14:37:57 -07002048 for (size_t i = 0; i < kNumThreadLocalSizeBrackets; i++) {
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002049 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
Ian Rogersdd7624d2014-03-14 17:43:00 -07002050 Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i));
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002051 if (thread_local_run == this) {
2052 CHECK(!owner_found)
2053 << "A thread local run has more than one owner thread " << Dump();
2054 CHECK_EQ(i, idx)
2055 << "A mismatching size bracket index in a thread local run " << Dump();
2056 owner_found = true;
2057 }
2058 }
2059 }
2060 CHECK(owner_found) << "A thread local run has no owner thread " << Dump();
2061 } else {
2062 // If it's not thread local, check that the thread local free bitmap is clean.
2063 CHECK(IsThreadLocalFreeBitmapClean())
2064 << "A non-thread-local run's thread local free bitmap isn't clean "
2065 << Dump();
2066 // Check if it's a current run for the size bucket.
2067 bool is_current_run = false;
2068 for (size_t i = 0; i < kNumOfSizeBrackets; i++) {
2069 MutexLock mu(self, *rosalloc->size_bracket_locks_[i]);
2070 Run* current_run = rosalloc->current_runs_[i];
2071 if (idx == i) {
2072 if (this == current_run) {
2073 is_current_run = true;
2074 }
2075 } else {
2076 // If the size bucket index does not match, then it must not
2077 // be a current run.
2078 CHECK_NE(this, current_run)
2079 << "A current run points to a run with a wrong size bracket index " << Dump();
2080 }
2081 }
2082 // If it's neither a thread local or current run, then it must be
2083 // in a run set.
2084 if (!is_current_run) {
2085 MutexLock mu(self, rosalloc->lock_);
Mathieu Chartier58553c72014-09-16 16:25:55 -07002086 auto& non_full_runs = rosalloc->non_full_runs_[idx];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002087 // If it's all free, it must be a free page run rather than a run.
2088 CHECK(!IsAllFree()) << "A free run must be in a free page run set " << Dump();
2089 if (!IsFull()) {
2090 // If it's not full, it must in the non-full run set.
2091 CHECK(non_full_runs.find(this) != non_full_runs.end())
2092 << "A non-full run isn't in the non-full run set " << Dump();
2093 } else {
2094 // If it's full, it must in the full run set (debug build only.)
2095 if (kIsDebugBuild) {
Mathieu Chartier58553c72014-09-16 16:25:55 -07002096 auto& full_runs = rosalloc->full_runs_[idx];
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002097 CHECK(full_runs.find(this) != full_runs.end())
2098 << " A full run isn't in the full run set " << Dump();
2099 }
2100 }
2101 }
2102 }
2103 // Check each slot.
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002104 size_t slots = 0;
Evgenii Stepanov1e133742015-05-20 12:30:59 -07002105 size_t memory_tool_modifier = running_on_memory_tool ?
2106 2 * ::art::gc::space::kDefaultMemoryToolRedZoneBytes :
Andreas Gamped7576322014-10-24 22:13:45 -07002107 0U;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002108 for (size_t v = 0; v < num_vec; v++, slots += 32) {
Ian Rogers5d057052014-03-12 14:32:27 -07002109 DCHECK_GE(num_slots, slots) << "Out of bounds";
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002110 uint32_t vec = alloc_bit_map_[v];
2111 uint32_t thread_local_free_vec = ThreadLocalFreeBitMap()[v];
2112 size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
2113 for (size_t i = 0; i < end; ++i) {
2114 bool is_allocated = ((vec >> i) & 0x1) != 0;
2115 // If a thread local run, slots may be marked freed in the
2116 // thread local free bitmap.
Mathieu Chartier73d1e172014-04-11 17:53:48 -07002117 bool is_thread_local_freed = IsThreadLocal() && ((thread_local_free_vec >> i) & 0x1) != 0;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002118 if (is_allocated && !is_thread_local_freed) {
Ian Rogers13735952014-10-08 12:43:28 -07002119 uint8_t* slot_addr = slot_base + (slots + i) * bracket_size;
Evgenii Stepanov1e133742015-05-20 12:30:59 -07002120 if (running_on_memory_tool) {
2121 slot_addr += ::art::gc::space::kDefaultMemoryToolRedZoneBytes;
Andreas Gamped7576322014-10-24 22:13:45 -07002122 }
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002123 mirror::Object* obj = reinterpret_cast<mirror::Object*>(slot_addr);
2124 size_t obj_size = obj->SizeOf();
Evgenii Stepanov1e133742015-05-20 12:30:59 -07002125 CHECK_LE(obj_size + memory_tool_modifier, kLargeSizeThreshold)
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002126 << "A run slot contains a large object " << Dump();
Evgenii Stepanov1e133742015-05-20 12:30:59 -07002127 CHECK_EQ(SizeToIndex(obj_size + memory_tool_modifier), idx)
Hiroshi Yamauchi4cd662e2014-04-03 16:28:10 -07002128 << PrettyTypeOf(obj) << " "
Evgenii Stepanov1e133742015-05-20 12:30:59 -07002129 << "obj_size=" << obj_size << "(" << obj_size + memory_tool_modifier << "), idx=" << idx
Andreas Gamped7576322014-10-24 22:13:45 -07002130 << " A run slot contains an object with wrong size " << Dump();
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08002131 }
2132 }
2133 }
2134}
2135
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002136size_t RosAlloc::ReleasePages() {
2137 VLOG(heap) << "RosAlloc::ReleasePages()";
2138 DCHECK(!DoesReleaseAllPages());
2139 Thread* self = Thread::Current();
2140 size_t reclaimed_bytes = 0;
2141 size_t i = 0;
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002142 // Check the page map size which might have changed due to grow/shrink.
2143 while (i < page_map_size_) {
2144 // Reading the page map without a lock is racy but the race is benign since it should only
2145 // result in occasionally not releasing pages which we could release.
Ian Rogers13735952014-10-08 12:43:28 -07002146 uint8_t pm = page_map_[i];
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002147 switch (pm) {
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002148 case kPageMapReleased:
2149 // Fall through.
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002150 case kPageMapEmpty: {
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002151 // This is currently the start of a free page run.
2152 // Acquire the lock to prevent other threads racing in and modifying the page map.
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002153 MutexLock mu(self, lock_);
2154 // Check that it's still empty after we acquired the lock since another thread could have
2155 // raced in and placed an allocation here.
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002156 if (IsFreePage(i)) {
2157 // Free page runs can start with a released page if we coalesced a released page free
2158 // page run with an empty page run.
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002159 FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002160 // There is a race condition where FreePage can coalesce fpr with the previous
2161 // free page run before we acquire lock_. In that case free_page_runs_.find will not find
2162 // a run starting at fpr. To handle this race, we skip reclaiming the page range and go
2163 // to the next page.
2164 if (free_page_runs_.find(fpr) != free_page_runs_.end()) {
2165 size_t fpr_size = fpr->ByteSize(this);
2166 DCHECK(IsAligned<kPageSize>(fpr_size));
Ian Rogers13735952014-10-08 12:43:28 -07002167 uint8_t* start = reinterpret_cast<uint8_t*>(fpr);
Mathieu Chartiere28ed992014-07-10 10:16:44 -07002168 reclaimed_bytes += ReleasePageRange(start, start + fpr_size);
2169 size_t pages = fpr_size / kPageSize;
2170 CHECK_GT(pages, 0U) << "Infinite loop probable";
2171 i += pages;
2172 DCHECK_LE(i, page_map_size_);
2173 break;
2174 }
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002175 }
Ian Rogersfc787ec2014-10-09 21:56:44 -07002176 FALLTHROUGH_INTENDED;
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002177 }
2178 case kPageMapLargeObject: // Fall through.
2179 case kPageMapLargeObjectPart: // Fall through.
2180 case kPageMapRun: // Fall through.
2181 case kPageMapRunPart: // Fall through.
2182 ++i;
2183 break; // Skip.
2184 default:
Maxim Kazantsev2fdeecb2014-10-16 10:55:47 +07002185 LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm);
Hiroshi Yamauchid9a88de2014-04-07 13:52:31 -07002186 break;
2187 }
2188 }
2189 return reclaimed_bytes;
2190}
2191
Ian Rogers13735952014-10-08 12:43:28 -07002192size_t RosAlloc::ReleasePageRange(uint8_t* start, uint8_t* end) {
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002193 DCHECK_ALIGNED(start, kPageSize);
2194 DCHECK_ALIGNED(end, kPageSize);
2195 DCHECK_LT(start, end);
2196 if (kIsDebugBuild) {
2197 // In the debug build, the first page of a free page run
2198 // contains a magic number for debugging. Exclude it.
2199 start += kPageSize;
Andreas Gamped7576322014-10-24 22:13:45 -07002200
2201 // Single pages won't be released.
2202 if (start == end) {
2203 return 0;
2204 }
Mathieu Chartiera5b5c552014-06-24 14:48:59 -07002205 }
2206 if (!kMadviseZeroes) {
2207 // TODO: Do this when we resurrect the page instead.
2208 memset(start, 0, end - start);
2209 }
2210 CHECK_EQ(madvise(start, end - start, MADV_DONTNEED), 0);
2211 size_t pm_idx = ToPageMapIndex(start);
2212 size_t reclaimed_bytes = 0;
2213 // Calculate reclaimed bytes and upate page map.
2214 const size_t max_idx = pm_idx + (end - start) / kPageSize;
2215 for (; pm_idx < max_idx; ++pm_idx) {
2216 DCHECK(IsFreePage(pm_idx));
2217 if (page_map_[pm_idx] == kPageMapEmpty) {
2218 // Mark the page as released and update how many bytes we released.
2219 reclaimed_bytes += kPageSize;
2220 page_map_[pm_idx] = kPageMapReleased;
2221 }
2222 }
2223 return reclaimed_bytes;
2224}
2225
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07002226void RosAlloc::LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) {
2227 Thread* self = Thread::Current();
2228 size_t largest_continuous_free_pages = 0;
2229 WriterMutexLock wmu(self, bulk_free_lock_);
2230 MutexLock mu(self, lock_);
2231 for (FreePageRun* fpr : free_page_runs_) {
2232 largest_continuous_free_pages = std::max(largest_continuous_free_pages,
2233 fpr->ByteSize(this));
2234 }
2235 if (failed_alloc_bytes > kLargeSizeThreshold) {
2236 // Large allocation.
2237 size_t required_bytes = RoundUp(failed_alloc_bytes, kPageSize);
2238 if (required_bytes > largest_continuous_free_pages) {
2239 os << "; failed due to fragmentation (required continguous free "
2240 << required_bytes << " bytes where largest contiguous free "
2241 << largest_continuous_free_pages << " bytes)";
2242 }
2243 } else {
2244 // Non-large allocation.
2245 size_t required_bytes = numOfPages[SizeToIndex(failed_alloc_bytes)] * kPageSize;
2246 if (required_bytes > largest_continuous_free_pages) {
2247 os << "; failed due to fragmentation (required continguous free "
2248 << required_bytes << " bytes for a new buffer where largest contiguous free "
2249 << largest_continuous_free_pages << " bytes)";
2250 }
2251 }
2252}
2253
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07002254} // namespace allocator
2255} // namespace gc
2256} // namespace art