blob: e71a39703959d287331969a44dbbd83a6f32404a [file] [log] [blame]
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Ian Rogers1d54e732013-05-02 21:10:01 -070017#include "large_object_space.h"
18
Mathieu Chartier3cf22532015-07-09 15:15:09 -070019#include <valgrind.h>
Ian Rogers700a4022014-05-19 16:49:03 -070020#include <memory>
Mathieu Chartier3cf22532015-07-09 15:15:09 -070021#include <memcheck/memcheck.h>
Ian Rogers700a4022014-05-19 16:49:03 -070022
Mathieu Chartierc8980de2015-04-19 13:36:11 -070023#include "gc/accounting/heap_bitmap-inl.h"
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070024#include "gc/accounting/space_bitmap-inl.h"
Elliott Hughes07ed66b2012-12-12 18:34:25 -080025#include "base/logging.h"
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -070026#include "base/mutex-inl.h"
Elliott Hughes1aa246d2012-12-13 09:29:36 -080027#include "base/stl_util.h"
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070028#include "image.h"
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070029#include "os.h"
Mathieu Chartier0795f232016-09-27 18:43:30 -070030#include "scoped_thread_state_change-inl.h"
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070031#include "space-inl.h"
Brian Carlstroma3d27182013-11-05 23:22:27 -080032#include "thread-inl.h"
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070033
34namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070035namespace gc {
36namespace space {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070037
Evgenii Stepanov1e133742015-05-20 12:30:59 -070038class MemoryToolLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
Mathieu Chartier0767c9a2014-03-26 12:53:19 -070039 public:
Evgenii Stepanov1e133742015-05-20 12:30:59 -070040 explicit MemoryToolLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
Mathieu Chartier0767c9a2014-03-26 12:53:19 -070041 }
42
Evgenii Stepanov1e133742015-05-20 12:30:59 -070043 ~MemoryToolLargeObjectMapSpace() OVERRIDE {
Mathieu Chartier9086b652015-04-14 09:35:18 -070044 // Keep valgrind happy if there is any large objects such as dex cache arrays which aren't
45 // freed since they are held live by the class linker.
46 MutexLock mu(Thread::Current(), lock_);
Mathieu Chartiere7158112015-06-03 13:32:15 -070047 for (auto& m : large_objects_) {
48 delete m.second.mem_map;
Mathieu Chartier9086b652015-04-14 09:35:18 -070049 }
50 }
51
Mathieu Chartierf6c2a272015-06-03 17:32:42 -070052 mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
53 size_t* usable_size, size_t* bytes_tl_bulk_allocated)
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070054 OVERRIDE {
Mathieu Chartier0767c9a2014-03-26 12:53:19 -070055 mirror::Object* obj =
Evgenii Stepanov1e133742015-05-20 12:30:59 -070056 LargeObjectMapSpace::Alloc(self, num_bytes + kMemoryToolRedZoneBytes * 2, bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070057 usable_size, bytes_tl_bulk_allocated);
Mathieu Chartier0767c9a2014-03-26 12:53:19 -070058 mirror::Object* object_without_rdz = reinterpret_cast<mirror::Object*>(
Evgenii Stepanov1e133742015-05-20 12:30:59 -070059 reinterpret_cast<uintptr_t>(obj) + kMemoryToolRedZoneBytes);
60 MEMORY_TOOL_MAKE_NOACCESS(reinterpret_cast<void*>(obj), kMemoryToolRedZoneBytes);
61 MEMORY_TOOL_MAKE_NOACCESS(
62 reinterpret_cast<uint8_t*>(object_without_rdz) + num_bytes,
63 kMemoryToolRedZoneBytes);
Mathieu Chartier0767c9a2014-03-26 12:53:19 -070064 if (usable_size != nullptr) {
65 *usable_size = num_bytes; // Since we have redzones, shrink the usable size.
66 }
67 return object_without_rdz;
68 }
69
Mathieu Chartierf6c2a272015-06-03 17:32:42 -070070 size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
71 return LargeObjectMapSpace::AllocationSize(ObjectWithRedzone(obj), usable_size);
Mathieu Chartier0767c9a2014-03-26 12:53:19 -070072 }
73
Mathieu Chartierf6c2a272015-06-03 17:32:42 -070074 bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const OVERRIDE {
75 return LargeObjectMapSpace::IsZygoteLargeObject(self, ObjectWithRedzone(obj));
76 }
77
78 size_t Free(Thread* self, mirror::Object* obj) OVERRIDE {
79 mirror::Object* object_with_rdz = ObjectWithRedzone(obj);
Evgenii Stepanov1e133742015-05-20 12:30:59 -070080 MEMORY_TOOL_MAKE_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr));
Mathieu Chartier0767c9a2014-03-26 12:53:19 -070081 return LargeObjectMapSpace::Free(self, object_with_rdz);
82 }
83
84 bool Contains(const mirror::Object* obj) const OVERRIDE {
Mathieu Chartierf6c2a272015-06-03 17:32:42 -070085 return LargeObjectMapSpace::Contains(ObjectWithRedzone(obj));
Mathieu Chartier0767c9a2014-03-26 12:53:19 -070086 }
87
88 private:
Mathieu Chartierf6c2a272015-06-03 17:32:42 -070089 static const mirror::Object* ObjectWithRedzone(const mirror::Object* obj) {
90 return reinterpret_cast<const mirror::Object*>(
Evgenii Stepanov1e133742015-05-20 12:30:59 -070091 reinterpret_cast<uintptr_t>(obj) - kMemoryToolRedZoneBytes);
Mathieu Chartierf6c2a272015-06-03 17:32:42 -070092 }
93
94 static mirror::Object* ObjectWithRedzone(mirror::Object* obj) {
95 return reinterpret_cast<mirror::Object*>(
Evgenii Stepanov1e133742015-05-20 12:30:59 -070096 reinterpret_cast<uintptr_t>(obj) - kMemoryToolRedZoneBytes);
Mathieu Chartierf6c2a272015-06-03 17:32:42 -070097 }
98
Evgenii Stepanov1e133742015-05-20 12:30:59 -070099 static constexpr size_t kMemoryToolRedZoneBytes = kPageSize;
Mathieu Chartier0767c9a2014-03-26 12:53:19 -0700100};
101
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700102void LargeObjectSpace::SwapBitmaps() {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700103 live_bitmap_.swap(mark_bitmap_);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700104 // Swap names to get more descriptive diagnostics.
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700105 std::string temp_name = live_bitmap_->GetName();
106 live_bitmap_->SetName(mark_bitmap_->GetName());
107 mark_bitmap_->SetName(temp_name);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700108}
109
Ian Rogers13735952014-10-08 12:43:28 -0700110LargeObjectSpace::LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end)
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700111 : DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
112 num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700113 total_objects_allocated_(0), begin_(begin), end_(end) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700114}
115
116
117void LargeObjectSpace::CopyLiveToMarked() {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700118 mark_bitmap_->CopyFrom(live_bitmap_.get());
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700119}
120
121LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700122 : LargeObjectSpace(name, nullptr, nullptr),
Brian Carlstrom0cd7ec22013-07-17 23:40:20 -0700123 lock_("large object map space lock", kAllocSpaceLock) {}
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700124
125LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700126 if (Runtime::Current()->IsRunningOnMemoryTool()) {
127 return new MemoryToolLargeObjectMapSpace(name);
Mathieu Chartier0767c9a2014-03-26 12:53:19 -0700128 } else {
129 return new LargeObjectMapSpace(name);
130 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700131}
132
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700133mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700134 size_t* bytes_allocated, size_t* usable_size,
135 size_t* bytes_tl_bulk_allocated) {
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700136 std::string error_msg;
Vladimir Marko5c42c292015-02-25 12:02:49 +0000137 MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", nullptr, num_bytes,
138 PROT_READ | PROT_WRITE, true, false, &error_msg);
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700139 if (UNLIKELY(mem_map == nullptr)) {
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700140 LOG(WARNING) << "Large object allocation failed: " << error_msg;
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700141 return nullptr;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700142 }
Mathieu Chartierc8980de2015-04-19 13:36:11 -0700143 mirror::Object* const obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
144 if (kIsDebugBuild) {
145 ReaderMutexLock mu2(Thread::Current(), *Locks::heap_bitmap_lock_);
146 auto* heap = Runtime::Current()->GetHeap();
147 auto* live_bitmap = heap->GetLiveBitmap();
148 auto* space_bitmap = live_bitmap->GetContinuousSpaceBitmap(obj);
149 CHECK(space_bitmap == nullptr) << obj << " overlaps with bitmap " << *space_bitmap;
150 auto* obj_end = reinterpret_cast<mirror::Object*>(mem_map->End());
151 space_bitmap = live_bitmap->GetContinuousSpaceBitmap(obj_end - 1);
152 CHECK(space_bitmap == nullptr) << obj_end << " overlaps with bitmap " << *space_bitmap;
153 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700154 MutexLock mu(self, lock_);
Mathieu Chartiere7158112015-06-03 13:32:15 -0700155 large_objects_.Put(obj, LargeObject {mem_map, false /* not zygote */});
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700156 const size_t allocation_size = mem_map->BaseSize();
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700157 DCHECK(bytes_allocated != nullptr);
tony.ys_liuc93f7122016-10-17 11:18:09 +0800158
159 if (begin_ == nullptr || begin_ > reinterpret_cast<uint8_t*>(obj)) {
160 begin_ = reinterpret_cast<uint8_t*>(obj);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700161 }
tony.ys_liuc93f7122016-10-17 11:18:09 +0800162 end_ = std::max(end_, reinterpret_cast<uint8_t*>(obj) + allocation_size);
163
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700164 *bytes_allocated = allocation_size;
Ian Rogers6fac4472014-02-25 17:01:10 -0800165 if (usable_size != nullptr) {
166 *usable_size = allocation_size;
167 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700168 DCHECK(bytes_tl_bulk_allocated != nullptr);
169 *bytes_tl_bulk_allocated = allocation_size;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700170 num_bytes_allocated_ += allocation_size;
171 total_bytes_allocated_ += allocation_size;
172 ++num_objects_allocated_;
173 ++total_objects_allocated_;
174 return obj;
175}
176
Mathieu Chartiere7158112015-06-03 13:32:15 -0700177bool LargeObjectMapSpace::IsZygoteLargeObject(Thread* self, mirror::Object* obj) const {
178 MutexLock mu(self, lock_);
179 auto it = large_objects_.find(obj);
180 CHECK(it != large_objects_.end());
181 return it->second.is_zygote;
182}
183
184void LargeObjectMapSpace::SetAllLargeObjectsAsZygoteObjects(Thread* self) {
185 MutexLock mu(self, lock_);
186 for (auto& pair : large_objects_) {
187 pair.second.is_zygote = true;
188 }
189}
190
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800191size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700192 MutexLock mu(self, lock_);
Mathieu Chartiere7158112015-06-03 13:32:15 -0700193 auto it = large_objects_.find(ptr);
194 if (UNLIKELY(it == large_objects_.end())) {
Mathieu Chartier03d21bc2016-03-07 10:25:04 -0800195 ScopedObjectAccess soa(self);
Andreas Gampe3fec9ac2016-09-13 10:47:28 -0700196 Runtime::Current()->GetHeap()->DumpSpaces(LOG_STREAM(FATAL_WITHOUT_ABORT));
Mathieu Chartierd07a9132014-05-23 16:42:20 -0700197 LOG(FATAL) << "Attempted to free large object " << ptr << " which was not live";
198 }
Mathieu Chartiere7158112015-06-03 13:32:15 -0700199 MemMap* mem_map = it->second.mem_map;
200 const size_t map_size = mem_map->BaseSize();
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700201 DCHECK_GE(num_bytes_allocated_, map_size);
202 size_t allocation_size = map_size;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700203 num_bytes_allocated_ -= allocation_size;
204 --num_objects_allocated_;
Mathieu Chartiere7158112015-06-03 13:32:15 -0700205 delete mem_map;
206 large_objects_.erase(it);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700207 return allocation_size;
208}
209
Ian Rogers6fac4472014-02-25 17:01:10 -0800210size_t LargeObjectMapSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700211 MutexLock mu(Thread::Current(), lock_);
Mathieu Chartiere7158112015-06-03 13:32:15 -0700212 auto it = large_objects_.find(obj);
213 CHECK(it != large_objects_.end()) << "Attempted to get size of a large object which is not live";
214 size_t alloc_size = it->second.mem_map->BaseSize();
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700215 if (usable_size != nullptr) {
216 *usable_size = alloc_size;
217 }
218 return alloc_size;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700219}
220
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800221size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700222 size_t total = 0;
223 for (size_t i = 0; i < num_ptrs; ++i) {
224 if (kDebugSpaces) {
225 CHECK(Contains(ptrs[i]));
226 }
227 total += Free(self, ptrs[i]);
228 }
229 return total;
230}
231
232void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
233 MutexLock mu(Thread::Current(), lock_);
Mathieu Chartiere7158112015-06-03 13:32:15 -0700234 for (auto& pair : large_objects_) {
235 MemMap* mem_map = pair.second.mem_map;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700236 callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700237 callback(nullptr, nullptr, 0, arg);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700238 }
239}
240
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800241bool LargeObjectMapSpace::Contains(const mirror::Object* obj) const {
Ian Rogersa3dd0b32013-03-19 19:30:59 -0700242 Thread* self = Thread::Current();
243 if (lock_.IsExclusiveHeld(self)) {
244 // We hold lock_ so do the check.
Mathieu Chartiere7158112015-06-03 13:32:15 -0700245 return large_objects_.find(const_cast<mirror::Object*>(obj)) != large_objects_.end();
Ian Rogersa3dd0b32013-03-19 19:30:59 -0700246 } else {
247 MutexLock mu(self, lock_);
Mathieu Chartiere7158112015-06-03 13:32:15 -0700248 return large_objects_.find(const_cast<mirror::Object*>(obj)) != large_objects_.end();
Ian Rogersa3dd0b32013-03-19 19:30:59 -0700249 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700250}
251
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700252// Keeps track of allocation sizes + whether or not the previous allocation is free.
Mathieu Chartiere7158112015-06-03 13:32:15 -0700253// Used to coalesce free blocks and find the best fit block for an allocation for best fit object
254// allocation. Each allocation has an AllocationInfo which contains the size of the previous free
255// block preceding it. Implemented in such a way that we can also find the iterator for any
256// allocation info pointer.
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700257class AllocationInfo {
258 public:
259 AllocationInfo() : prev_free_(0), alloc_size_(0) {
260 }
261 // Return the number of pages that the allocation info covers.
262 size_t AlignSize() const {
Mathieu Chartiere7158112015-06-03 13:32:15 -0700263 return alloc_size_ & kFlagsMask;
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700264 }
265 // Returns the allocation size in bytes.
266 size_t ByteSize() const {
267 return AlignSize() * FreeListSpace::kAlignment;
268 }
269 // Updates the allocation size and whether or not it is free.
270 void SetByteSize(size_t size, bool free) {
Mathieu Chartierf6c2a272015-06-03 17:32:42 -0700271 DCHECK_EQ(size & ~kFlagsMask, 0u);
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700272 DCHECK_ALIGNED(size, FreeListSpace::kAlignment);
Mathieu Chartiere7158112015-06-03 13:32:15 -0700273 alloc_size_ = (size / FreeListSpace::kAlignment) | (free ? kFlagFree : 0u);
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700274 }
Mathieu Chartiere7158112015-06-03 13:32:15 -0700275 // Returns true if the block is free.
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700276 bool IsFree() const {
277 return (alloc_size_ & kFlagFree) != 0;
278 }
Mathieu Chartiere7158112015-06-03 13:32:15 -0700279 // Return true if the large object is a zygote object.
280 bool IsZygoteObject() const {
281 return (alloc_size_ & kFlagZygote) != 0;
282 }
283 // Change the object to be a zygote object.
284 void SetZygoteObject() {
285 alloc_size_ |= kFlagZygote;
286 }
287 // Return true if this is a zygote large object.
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700288 // Finds and returns the next non free allocation info after ourself.
289 AllocationInfo* GetNextInfo() {
290 return this + AlignSize();
291 }
292 const AllocationInfo* GetNextInfo() const {
293 return this + AlignSize();
294 }
295 // Returns the previous free allocation info by using the prev_free_ member to figure out
296 // where it is. This is only used for coalescing so we only need to be able to do it if the
297 // previous allocation info is free.
298 AllocationInfo* GetPrevFreeInfo() {
299 DCHECK_NE(prev_free_, 0U);
300 return this - prev_free_;
301 }
302 // Returns the address of the object associated with this allocation info.
303 mirror::Object* GetObjectAddress() {
304 return reinterpret_cast<mirror::Object*>(reinterpret_cast<uintptr_t>(this) + sizeof(*this));
305 }
306 // Return how many kAlignment units there are before the free block.
307 size_t GetPrevFree() const {
308 return prev_free_;
309 }
310 // Returns how many free bytes there is before the block.
311 size_t GetPrevFreeBytes() const {
312 return GetPrevFree() * FreeListSpace::kAlignment;
313 }
314 // Update the size of the free block prior to the allocation.
315 void SetPrevFreeBytes(size_t bytes) {
316 DCHECK_ALIGNED(bytes, FreeListSpace::kAlignment);
317 prev_free_ = bytes / FreeListSpace::kAlignment;
318 }
319
320 private:
Mathieu Chartiere7158112015-06-03 13:32:15 -0700321 static constexpr uint32_t kFlagFree = 0x80000000; // If block is free.
322 static constexpr uint32_t kFlagZygote = 0x40000000; // If the large object is a zygote object.
323 static constexpr uint32_t kFlagsMask = ~(kFlagFree | kFlagZygote); // Combined flags for masking.
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700324 // Contains the size of the previous free block with kAlignment as the unit. If 0 then the
325 // allocation before us is not free.
326 // These variables are undefined in the middle of allocations / free blocks.
327 uint32_t prev_free_;
328 // Allocation size of this object in kAlignment as the unit.
329 uint32_t alloc_size_;
330};
331
332size_t FreeListSpace::GetSlotIndexForAllocationInfo(const AllocationInfo* info) const {
333 DCHECK_GE(info, allocation_info_);
334 DCHECK_LT(info, reinterpret_cast<AllocationInfo*>(allocation_info_map_->End()));
335 return info - allocation_info_;
336}
337
338AllocationInfo* FreeListSpace::GetAllocationInfoForAddress(uintptr_t address) {
339 return &allocation_info_[GetSlotIndexForAddress(address)];
340}
341
342const AllocationInfo* FreeListSpace::GetAllocationInfoForAddress(uintptr_t address) const {
343 return &allocation_info_[GetSlotIndexForAddress(address)];
344}
345
346inline bool FreeListSpace::SortByPrevFree::operator()(const AllocationInfo* a,
347 const AllocationInfo* b) const {
348 if (a->GetPrevFree() < b->GetPrevFree()) return true;
349 if (a->GetPrevFree() > b->GetPrevFree()) return false;
350 if (a->AlignSize() < b->AlignSize()) return true;
351 if (a->AlignSize() > b->AlignSize()) return false;
352 return reinterpret_cast<uintptr_t>(a) < reinterpret_cast<uintptr_t>(b);
353}
354
Ian Rogers13735952014-10-08 12:43:28 -0700355FreeListSpace* FreeListSpace::Create(const std::string& name, uint8_t* requested_begin, size_t size) {
Brian Carlstrom42748892013-07-18 18:04:08 -0700356 CHECK_EQ(size % kAlignment, 0U);
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700357 std::string error_msg;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700358 MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000359 PROT_READ | PROT_WRITE, true, false, &error_msg);
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700360 CHECK(mem_map != nullptr) << "Failed to allocate large object space mem map: " << error_msg;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700361 return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
362}
363
Ian Rogers13735952014-10-08 12:43:28 -0700364FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end)
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700365 : LargeObjectSpace(name, begin, end),
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700366 mem_map_(mem_map),
367 lock_("free list space lock", kAllocSpaceLock) {
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700368 const size_t space_capacity = end - begin;
369 free_end_ = space_capacity;
370 CHECK_ALIGNED(space_capacity, kAlignment);
371 const size_t alloc_info_size = sizeof(AllocationInfo) * (space_capacity / kAlignment);
372 std::string error_msg;
Vladimir Marko5c42c292015-02-25 12:02:49 +0000373 allocation_info_map_.reset(
374 MemMap::MapAnonymous("large object free list space allocation info map",
375 nullptr, alloc_info_size, PROT_READ | PROT_WRITE,
376 false, false, &error_msg));
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700377 CHECK(allocation_info_map_.get() != nullptr) << "Failed to allocate allocation info map"
378 << error_msg;
379 allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_->Begin());
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700380}
381
Brian Carlstrom0cd7ec22013-07-17 23:40:20 -0700382FreeListSpace::~FreeListSpace() {}
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700383
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700384void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
385 MutexLock mu(Thread::Current(), lock_);
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700386 const uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
387 AllocationInfo* cur_info = &allocation_info_[0];
388 const AllocationInfo* end_info = GetAllocationInfoForAddress(free_end_start);
389 while (cur_info < end_info) {
390 if (!cur_info->IsFree()) {
391 size_t alloc_size = cur_info->ByteSize();
Ian Rogers13735952014-10-08 12:43:28 -0700392 uint8_t* byte_start = reinterpret_cast<uint8_t*>(GetAddressForAllocationInfo(cur_info));
393 uint8_t* byte_end = byte_start + alloc_size;
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700394 callback(byte_start, byte_end, alloc_size, arg);
395 callback(nullptr, nullptr, 0, arg);
396 }
397 cur_info = cur_info->GetNextInfo();
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700398 }
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700399 CHECK_EQ(cur_info, end_info);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700400}
401
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700402void FreeListSpace::RemoveFreePrev(AllocationInfo* info) {
403 CHECK_GT(info->GetPrevFree(), 0U);
404 auto it = free_blocks_.lower_bound(info);
405 CHECK(it != free_blocks_.end());
406 CHECK_EQ(*it, info);
407 free_blocks_.erase(it);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700408}
409
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800410size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700411 MutexLock mu(self, lock_);
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700412 DCHECK(Contains(obj)) << reinterpret_cast<void*>(Begin()) << " " << obj << " "
413 << reinterpret_cast<void*>(End());
414 DCHECK_ALIGNED(obj, kAlignment);
415 AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
416 DCHECK(!info->IsFree());
417 const size_t allocation_size = info->ByteSize();
418 DCHECK_GT(allocation_size, 0U);
419 DCHECK_ALIGNED(allocation_size, kAlignment);
420 info->SetByteSize(allocation_size, true); // Mark as free.
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700421 // Look at the next chunk.
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700422 AllocationInfo* next_info = info->GetNextInfo();
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700423 // Calculate the start of the end free block.
424 uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700425 size_t prev_free_bytes = info->GetPrevFreeBytes();
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700426 size_t new_free_size = allocation_size;
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700427 if (prev_free_bytes != 0) {
428 // Coalesce with previous free chunk.
429 new_free_size += prev_free_bytes;
430 RemoveFreePrev(info);
431 info = info->GetPrevFreeInfo();
432 // The previous allocation info must not be free since we are supposed to always coalesce.
433 DCHECK_EQ(info->GetPrevFreeBytes(), 0U) << "Previous allocation was free";
Ian Rogers22a20862013-03-16 16:34:57 -0700434 }
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700435 uintptr_t next_addr = GetAddressForAllocationInfo(next_info);
436 if (next_addr >= free_end_start) {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700437 // Easy case, the next chunk is the end free region.
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700438 CHECK_EQ(next_addr, free_end_start);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700439 free_end_ += new_free_size;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700440 } else {
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700441 AllocationInfo* new_free_info;
442 if (next_info->IsFree()) {
443 AllocationInfo* next_next_info = next_info->GetNextInfo();
444 // Next next info can't be free since we always coalesce.
445 DCHECK(!next_next_info->IsFree());
Roland Levillain14d90572015-07-16 10:52:26 +0100446 DCHECK_ALIGNED(next_next_info->ByteSize(), kAlignment);
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700447 new_free_info = next_next_info;
448 new_free_size += next_next_info->GetPrevFreeBytes();
449 RemoveFreePrev(next_next_info);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700450 } else {
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700451 new_free_info = next_info;
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700452 }
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700453 new_free_info->SetPrevFreeBytes(new_free_size);
454 free_blocks_.insert(new_free_info);
455 info->SetByteSize(new_free_size, true);
456 DCHECK_EQ(info->GetNextInfo(), new_free_info);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700457 }
458 --num_objects_allocated_;
459 DCHECK_LE(allocation_size, num_bytes_allocated_);
460 num_bytes_allocated_ -= allocation_size;
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700461 madvise(obj, allocation_size, MADV_DONTNEED);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700462 if (kIsDebugBuild) {
463 // Can't disallow reads since we use them to find next chunks during coalescing.
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700464 mprotect(obj, allocation_size, PROT_READ);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700465 }
466 return allocation_size;
467}
468
Ian Rogers6fac4472014-02-25 17:01:10 -0800469size_t FreeListSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700470 DCHECK(Contains(obj));
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700471 AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
472 DCHECK(!info->IsFree());
473 size_t alloc_size = info->ByteSize();
Ian Rogers6fac4472014-02-25 17:01:10 -0800474 if (usable_size != nullptr) {
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700475 *usable_size = alloc_size;
Ian Rogers6fac4472014-02-25 17:01:10 -0800476 }
477 return alloc_size;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700478}
479
Ian Rogers6fac4472014-02-25 17:01:10 -0800480mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700481 size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700482 MutexLock mu(self, lock_);
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700483 const size_t allocation_size = RoundUp(num_bytes, kAlignment);
484 AllocationInfo temp_info;
485 temp_info.SetPrevFreeBytes(allocation_size);
486 temp_info.SetByteSize(0, false);
487 AllocationInfo* new_info;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700488 // Find the smallest chunk at least num_bytes in size.
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700489 auto it = free_blocks_.lower_bound(&temp_info);
490 if (it != free_blocks_.end()) {
491 AllocationInfo* info = *it;
492 free_blocks_.erase(it);
493 // Fit our object in the previous allocation info free space.
494 new_info = info->GetPrevFreeInfo();
495 // Remove the newly allocated block from the info and update the prev_free_.
496 info->SetPrevFreeBytes(info->GetPrevFreeBytes() - allocation_size);
497 if (info->GetPrevFreeBytes() > 0) {
498 AllocationInfo* new_free = info - info->GetPrevFree();
499 new_free->SetPrevFreeBytes(0);
500 new_free->SetByteSize(info->GetPrevFreeBytes(), true);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700501 // If there is remaining space, insert back into the free set.
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700502 free_blocks_.insert(info);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700503 }
504 } else {
505 // Try to steal some memory from the free space at the end of the space.
506 if (LIKELY(free_end_ >= allocation_size)) {
507 // Fit our object at the start of the end free block.
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700508 new_info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(End()) - free_end_);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700509 free_end_ -= allocation_size;
510 } else {
Ian Rogers6fac4472014-02-25 17:01:10 -0800511 return nullptr;
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700512 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700513 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800514 DCHECK(bytes_allocated != nullptr);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700515 *bytes_allocated = allocation_size;
Ian Rogers6fac4472014-02-25 17:01:10 -0800516 if (usable_size != nullptr) {
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700517 *usable_size = allocation_size;
Ian Rogers6fac4472014-02-25 17:01:10 -0800518 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700519 DCHECK(bytes_tl_bulk_allocated != nullptr);
520 *bytes_tl_bulk_allocated = allocation_size;
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700521 // Need to do these inside of the lock.
522 ++num_objects_allocated_;
523 ++total_objects_allocated_;
524 num_bytes_allocated_ += allocation_size;
525 total_bytes_allocated_ += allocation_size;
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700526 mirror::Object* obj = reinterpret_cast<mirror::Object*>(GetAddressForAllocationInfo(new_info));
Roland Levillain91d65e02016-01-19 15:59:16 +0000527 // We always put our object at the start of the free block, there cannot be another free block
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700528 // before it.
529 if (kIsDebugBuild) {
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700530 mprotect(obj, allocation_size, PROT_READ | PROT_WRITE);
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700531 }
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700532 new_info->SetPrevFreeBytes(0);
533 new_info->SetByteSize(allocation_size, false);
534 return obj;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700535}
536
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700537void FreeListSpace::Dump(std::ostream& os) const {
Mathieu Chartiere7158112015-06-03 13:32:15 -0700538 MutexLock mu(Thread::Current(), lock_);
Mathieu Chartier128c52c2012-10-16 14:12:41 -0700539 os << GetName() << " -"
540 << " begin: " << reinterpret_cast<void*>(Begin())
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700541 << " end: " << reinterpret_cast<void*>(End()) << "\n";
542 uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700543 const AllocationInfo* cur_info =
544 GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(Begin()));
545 const AllocationInfo* end_info = GetAllocationInfoForAddress(free_end_start);
546 while (cur_info < end_info) {
547 size_t size = cur_info->ByteSize();
548 uintptr_t address = GetAddressForAllocationInfo(cur_info);
549 if (cur_info->IsFree()) {
550 os << "Free block at address: " << reinterpret_cast<const void*>(address)
551 << " of length " << size << " bytes\n";
552 } else {
553 os << "Large object at address: " << reinterpret_cast<const void*>(address)
554 << " of length " << size << " bytes\n";
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700555 }
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700556 cur_info = cur_info->GetNextInfo();
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700557 }
558 if (free_end_) {
559 os << "Free block at address: " << reinterpret_cast<const void*>(free_end_start)
560 << " of length " << free_end_ << " bytes\n";
561 }
Mathieu Chartier128c52c2012-10-16 14:12:41 -0700562}
563
Mathieu Chartiere7158112015-06-03 13:32:15 -0700564bool FreeListSpace::IsZygoteLargeObject(Thread* self ATTRIBUTE_UNUSED, mirror::Object* obj) const {
565 const AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
566 DCHECK(info != nullptr);
567 return info->IsZygoteObject();
568}
569
570void FreeListSpace::SetAllLargeObjectsAsZygoteObjects(Thread* self) {
571 MutexLock mu(self, lock_);
572 uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
573 for (AllocationInfo* cur_info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(Begin())),
574 *end_info = GetAllocationInfoForAddress(free_end_start); cur_info < end_info;
575 cur_info = cur_info->GetNextInfo()) {
576 if (!cur_info->IsFree()) {
577 cur_info->SetZygoteObject();
578 }
579 }
580}
581
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700582void LargeObjectSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {
583 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
584 space::LargeObjectSpace* space = context->space->AsLargeObjectSpace();
585 Thread* self = context->self;
586 Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
587 // If the bitmaps aren't swapped we need to clear the bits since the GC isn't going to re-swap
588 // the bitmaps as an optimization.
589 if (!context->swap_bitmaps) {
590 accounting::LargeObjectBitmap* bitmap = space->GetLiveBitmap();
591 for (size_t i = 0; i < num_ptrs; ++i) {
592 bitmap->Clear(ptrs[i]);
Mathieu Chartierdb7f37d2014-01-10 11:09:06 -0800593 }
594 }
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700595 context->freed.objects += num_ptrs;
596 context->freed.bytes += space->FreeList(self, num_ptrs, ptrs);
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700597}
598
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700599collector::ObjectBytePair LargeObjectSpace::Sweep(bool swap_bitmaps) {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700600 if (Begin() >= End()) {
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700601 return collector::ObjectBytePair(0, 0);
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700602 }
603 accounting::LargeObjectBitmap* live_bitmap = GetLiveBitmap();
604 accounting::LargeObjectBitmap* mark_bitmap = GetMarkBitmap();
605 if (swap_bitmaps) {
606 std::swap(live_bitmap, mark_bitmap);
607 }
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700608 AllocSpace::SweepCallbackContext scc(swap_bitmaps, this);
Mathieu Chartier208aaf02016-10-25 10:45:08 -0700609 std::pair<uint8_t*, uint8_t*> range = GetBeginEndAtomic();
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700610 accounting::LargeObjectBitmap::SweepWalk(*live_bitmap, *mark_bitmap,
Mathieu Chartier208aaf02016-10-25 10:45:08 -0700611 reinterpret_cast<uintptr_t>(range.first),
612 reinterpret_cast<uintptr_t>(range.second),
613 SweepCallback,
614 &scc);
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700615 return scc.freed;
Mathieu Chartierdb7f37d2014-01-10 11:09:06 -0800616}
617
Mathieu Chartierb363f662014-07-16 13:28:58 -0700618void LargeObjectSpace::LogFragmentationAllocFailure(std::ostream& /*os*/,
619 size_t /*failed_alloc_bytes*/) {
620 UNIMPLEMENTED(FATAL);
621}
622
Mathieu Chartier208aaf02016-10-25 10:45:08 -0700623std::pair<uint8_t*, uint8_t*> LargeObjectMapSpace::GetBeginEndAtomic() const {
624 MutexLock mu(Thread::Current(), lock_);
625 return std::make_pair(Begin(), End());
626}
627
628std::pair<uint8_t*, uint8_t*> FreeListSpace::GetBeginEndAtomic() const {
629 MutexLock mu(Thread::Current(), lock_);
630 return std::make_pair(Begin(), End());
631}
632
Ian Rogers1d54e732013-05-02 21:10:01 -0700633} // namespace space
634} // namespace gc
635} // namespace art