blob: 7523de58bfbd842d1d530d460da6690d348e834b [file] [log] [blame]
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Ian Rogers1d54e732013-05-02 21:10:01 -070017#include "large_object_space.h"
18
Ian Rogers700a4022014-05-19 16:49:03 -070019#include <memory>
20
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070021#include "gc/accounting/space_bitmap-inl.h"
Elliott Hughes07ed66b2012-12-12 18:34:25 -080022#include "base/logging.h"
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -070023#include "base/mutex-inl.h"
Elliott Hughes1aa246d2012-12-13 09:29:36 -080024#include "base/stl_util.h"
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070025#include "image.h"
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070026#include "os.h"
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070027#include "space-inl.h"
Brian Carlstroma3d27182013-11-05 23:22:27 -080028#include "thread-inl.h"
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070029#include "utils.h"
30
31namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070032namespace gc {
33namespace space {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070034
Mathieu Chartier0767c9a2014-03-26 12:53:19 -070035class ValgrindLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
36 public:
37 explicit ValgrindLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
38 }
39
40 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
41 size_t* usable_size) OVERRIDE {
42 mirror::Object* obj =
43 LargeObjectMapSpace::Alloc(self, num_bytes + kValgrindRedZoneBytes * 2, bytes_allocated,
44 usable_size);
45 mirror::Object* object_without_rdz = reinterpret_cast<mirror::Object*>(
46 reinterpret_cast<uintptr_t>(obj) + kValgrindRedZoneBytes);
47 VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<void*>(obj), kValgrindRedZoneBytes);
Ian Rogers13735952014-10-08 12:43:28 -070048 VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<uint8_t*>(object_without_rdz) + num_bytes,
Mathieu Chartier0767c9a2014-03-26 12:53:19 -070049 kValgrindRedZoneBytes);
50 if (usable_size != nullptr) {
51 *usable_size = num_bytes; // Since we have redzones, shrink the usable size.
52 }
53 return object_without_rdz;
54 }
55
56 virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
57 mirror::Object* object_with_rdz = reinterpret_cast<mirror::Object*>(
58 reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
59 return LargeObjectMapSpace::AllocationSize(object_with_rdz, usable_size);
60 }
61
62 virtual size_t Free(Thread* self, mirror::Object* obj) OVERRIDE {
63 mirror::Object* object_with_rdz = reinterpret_cast<mirror::Object*>(
64 reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
65 VALGRIND_MAKE_MEM_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr));
66 return LargeObjectMapSpace::Free(self, object_with_rdz);
67 }
68
69 bool Contains(const mirror::Object* obj) const OVERRIDE {
70 mirror::Object* object_with_rdz = reinterpret_cast<mirror::Object*>(
71 reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
72 return LargeObjectMapSpace::Contains(object_with_rdz);
73 }
74
75 private:
76 static constexpr size_t kValgrindRedZoneBytes = kPageSize;
77};
78
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070079void LargeObjectSpace::SwapBitmaps() {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070080 live_bitmap_.swap(mark_bitmap_);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070081 // Swap names to get more descriptive diagnostics.
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070082 std::string temp_name = live_bitmap_->GetName();
83 live_bitmap_->SetName(mark_bitmap_->GetName());
84 mark_bitmap_->SetName(temp_name);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070085}
86
Ian Rogers13735952014-10-08 12:43:28 -070087LargeObjectSpace::LargeObjectSpace(const std::string& name, uint8_t* begin, uint8_t* end)
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070088 : DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
89 num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070090 total_objects_allocated_(0), begin_(begin), end_(end) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070091}
92
93
94void LargeObjectSpace::CopyLiveToMarked() {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070095 mark_bitmap_->CopyFrom(live_bitmap_.get());
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070096}
97
98LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
Mathieu Chartier6f365cc2014-04-23 12:42:27 -070099 : LargeObjectSpace(name, nullptr, nullptr),
Brian Carlstrom0cd7ec22013-07-17 23:40:20 -0700100 lock_("large object map space lock", kAllocSpaceLock) {}
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700101
102LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
Mathieu Chartierda44d772014-04-01 15:01:46 -0700103 if (Runtime::Current()->RunningOnValgrind()) {
Mathieu Chartier0767c9a2014-03-26 12:53:19 -0700104 return new ValgrindLargeObjectMapSpace(name);
105 } else {
106 return new LargeObjectMapSpace(name);
107 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700108}
109
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700110mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
Ian Rogers6fac4472014-02-25 17:01:10 -0800111 size_t* bytes_allocated, size_t* usable_size) {
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700112 std::string error_msg;
Vladimir Marko5c42c292015-02-25 12:02:49 +0000113 MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", nullptr, num_bytes,
114 PROT_READ | PROT_WRITE, true, false, &error_msg);
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700115 if (UNLIKELY(mem_map == NULL)) {
116 LOG(WARNING) << "Large object allocation failed: " << error_msg;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700117 return NULL;
118 }
119 MutexLock mu(self, lock_);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800120 mirror::Object* obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700121 large_objects_.push_back(obj);
122 mem_maps_.Put(obj, mem_map);
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700123 const size_t allocation_size = mem_map->BaseSize();
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700124 DCHECK(bytes_allocated != nullptr);
Ian Rogers13735952014-10-08 12:43:28 -0700125 begin_ = std::min(begin_, reinterpret_cast<uint8_t*>(obj));
126 uint8_t* obj_end = reinterpret_cast<uint8_t*>(obj) + allocation_size;
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700127 if (end_ == nullptr || obj_end > end_) {
128 end_ = obj_end;
129 }
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700130 *bytes_allocated = allocation_size;
Ian Rogers6fac4472014-02-25 17:01:10 -0800131 if (usable_size != nullptr) {
132 *usable_size = allocation_size;
133 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700134 num_bytes_allocated_ += allocation_size;
135 total_bytes_allocated_ += allocation_size;
136 ++num_objects_allocated_;
137 ++total_objects_allocated_;
138 return obj;
139}
140
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800141size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700142 MutexLock mu(self, lock_);
143 MemMaps::iterator found = mem_maps_.find(ptr);
Mathieu Chartierd07a9132014-05-23 16:42:20 -0700144 if (UNLIKELY(found == mem_maps_.end())) {
145 Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
146 LOG(FATAL) << "Attempted to free large object " << ptr << " which was not live";
147 }
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700148 const size_t map_size = found->second->BaseSize();
149 DCHECK_GE(num_bytes_allocated_, map_size);
150 size_t allocation_size = map_size;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700151 num_bytes_allocated_ -= allocation_size;
152 --num_objects_allocated_;
153 delete found->second;
154 mem_maps_.erase(found);
155 return allocation_size;
156}
157
Ian Rogers6fac4472014-02-25 17:01:10 -0800158size_t LargeObjectMapSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700159 MutexLock mu(Thread::Current(), lock_);
Ian Rogers6fac4472014-02-25 17:01:10 -0800160 auto found = mem_maps_.find(obj);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700161 CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live";
Ian Rogers6a3c1fc2014-10-31 00:33:20 -0700162 size_t alloc_size = found->second->BaseSize();
163 if (usable_size != nullptr) {
164 *usable_size = alloc_size;
165 }
166 return alloc_size;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700167}
168
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800169size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700170 size_t total = 0;
171 for (size_t i = 0; i < num_ptrs; ++i) {
172 if (kDebugSpaces) {
173 CHECK(Contains(ptrs[i]));
174 }
175 total += Free(self, ptrs[i]);
176 }
177 return total;
178}
179
180void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
181 MutexLock mu(Thread::Current(), lock_);
Ian Rogers6fac4472014-02-25 17:01:10 -0800182 for (auto it = mem_maps_.begin(); it != mem_maps_.end(); ++it) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700183 MemMap* mem_map = it->second;
184 callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
185 callback(NULL, NULL, 0, arg);
186 }
187}
188
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800189bool LargeObjectMapSpace::Contains(const mirror::Object* obj) const {
Ian Rogersa3dd0b32013-03-19 19:30:59 -0700190 Thread* self = Thread::Current();
191 if (lock_.IsExclusiveHeld(self)) {
192 // We hold lock_ so do the check.
193 return mem_maps_.find(const_cast<mirror::Object*>(obj)) != mem_maps_.end();
194 } else {
195 MutexLock mu(self, lock_);
196 return mem_maps_.find(const_cast<mirror::Object*>(obj)) != mem_maps_.end();
197 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700198}
199
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700200// Keeps track of allocation sizes + whether or not the previous allocation is free.
201// Used to coalesce free blocks and find the best fit block for an allocation.
202class AllocationInfo {
203 public:
204 AllocationInfo() : prev_free_(0), alloc_size_(0) {
205 }
206 // Return the number of pages that the allocation info covers.
207 size_t AlignSize() const {
208 return alloc_size_ & ~kFlagFree;
209 }
210 // Returns the allocation size in bytes.
211 size_t ByteSize() const {
212 return AlignSize() * FreeListSpace::kAlignment;
213 }
214 // Updates the allocation size and whether or not it is free.
215 void SetByteSize(size_t size, bool free) {
216 DCHECK_ALIGNED(size, FreeListSpace::kAlignment);
217 alloc_size_ = (size / FreeListSpace::kAlignment) | (free ? kFlagFree : 0U);
218 }
219 bool IsFree() const {
220 return (alloc_size_ & kFlagFree) != 0;
221 }
222 // Finds and returns the next non free allocation info after ourself.
223 AllocationInfo* GetNextInfo() {
224 return this + AlignSize();
225 }
226 const AllocationInfo* GetNextInfo() const {
227 return this + AlignSize();
228 }
229 // Returns the previous free allocation info by using the prev_free_ member to figure out
230 // where it is. This is only used for coalescing so we only need to be able to do it if the
231 // previous allocation info is free.
232 AllocationInfo* GetPrevFreeInfo() {
233 DCHECK_NE(prev_free_, 0U);
234 return this - prev_free_;
235 }
236 // Returns the address of the object associated with this allocation info.
237 mirror::Object* GetObjectAddress() {
238 return reinterpret_cast<mirror::Object*>(reinterpret_cast<uintptr_t>(this) + sizeof(*this));
239 }
240 // Return how many kAlignment units there are before the free block.
241 size_t GetPrevFree() const {
242 return prev_free_;
243 }
244 // Returns how many free bytes there is before the block.
245 size_t GetPrevFreeBytes() const {
246 return GetPrevFree() * FreeListSpace::kAlignment;
247 }
248 // Update the size of the free block prior to the allocation.
249 void SetPrevFreeBytes(size_t bytes) {
250 DCHECK_ALIGNED(bytes, FreeListSpace::kAlignment);
251 prev_free_ = bytes / FreeListSpace::kAlignment;
252 }
253
254 private:
255 // Used to implement best fit object allocation. Each allocation has an AllocationInfo which
256 // contains the size of the previous free block preceding it. Implemented in such a way that we
257 // can also find the iterator for any allocation info pointer.
258 static constexpr uint32_t kFlagFree = 0x8000000;
259 // Contains the size of the previous free block with kAlignment as the unit. If 0 then the
260 // allocation before us is not free.
261 // These variables are undefined in the middle of allocations / free blocks.
262 uint32_t prev_free_;
263 // Allocation size of this object in kAlignment as the unit.
264 uint32_t alloc_size_;
265};
266
267size_t FreeListSpace::GetSlotIndexForAllocationInfo(const AllocationInfo* info) const {
268 DCHECK_GE(info, allocation_info_);
269 DCHECK_LT(info, reinterpret_cast<AllocationInfo*>(allocation_info_map_->End()));
270 return info - allocation_info_;
271}
272
273AllocationInfo* FreeListSpace::GetAllocationInfoForAddress(uintptr_t address) {
274 return &allocation_info_[GetSlotIndexForAddress(address)];
275}
276
277const AllocationInfo* FreeListSpace::GetAllocationInfoForAddress(uintptr_t address) const {
278 return &allocation_info_[GetSlotIndexForAddress(address)];
279}
280
281inline bool FreeListSpace::SortByPrevFree::operator()(const AllocationInfo* a,
282 const AllocationInfo* b) const {
283 if (a->GetPrevFree() < b->GetPrevFree()) return true;
284 if (a->GetPrevFree() > b->GetPrevFree()) return false;
285 if (a->AlignSize() < b->AlignSize()) return true;
286 if (a->AlignSize() > b->AlignSize()) return false;
287 return reinterpret_cast<uintptr_t>(a) < reinterpret_cast<uintptr_t>(b);
288}
289
Ian Rogers13735952014-10-08 12:43:28 -0700290FreeListSpace* FreeListSpace::Create(const std::string& name, uint8_t* requested_begin, size_t size) {
Brian Carlstrom42748892013-07-18 18:04:08 -0700291 CHECK_EQ(size % kAlignment, 0U);
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700292 std::string error_msg;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700293 MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000294 PROT_READ | PROT_WRITE, true, false, &error_msg);
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700295 CHECK(mem_map != NULL) << "Failed to allocate large object space mem map: " << error_msg;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700296 return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
297}
298
Ian Rogers13735952014-10-08 12:43:28 -0700299FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end)
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700300 : LargeObjectSpace(name, begin, end),
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700301 mem_map_(mem_map),
302 lock_("free list space lock", kAllocSpaceLock) {
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700303 const size_t space_capacity = end - begin;
304 free_end_ = space_capacity;
305 CHECK_ALIGNED(space_capacity, kAlignment);
306 const size_t alloc_info_size = sizeof(AllocationInfo) * (space_capacity / kAlignment);
307 std::string error_msg;
Vladimir Marko5c42c292015-02-25 12:02:49 +0000308 allocation_info_map_.reset(
309 MemMap::MapAnonymous("large object free list space allocation info map",
310 nullptr, alloc_info_size, PROT_READ | PROT_WRITE,
311 false, false, &error_msg));
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700312 CHECK(allocation_info_map_.get() != nullptr) << "Failed to allocate allocation info map"
313 << error_msg;
314 allocation_info_ = reinterpret_cast<AllocationInfo*>(allocation_info_map_->Begin());
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700315}
316
Brian Carlstrom0cd7ec22013-07-17 23:40:20 -0700317FreeListSpace::~FreeListSpace() {}
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700318
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700319void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
320 MutexLock mu(Thread::Current(), lock_);
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700321 const uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
322 AllocationInfo* cur_info = &allocation_info_[0];
323 const AllocationInfo* end_info = GetAllocationInfoForAddress(free_end_start);
324 while (cur_info < end_info) {
325 if (!cur_info->IsFree()) {
326 size_t alloc_size = cur_info->ByteSize();
Ian Rogers13735952014-10-08 12:43:28 -0700327 uint8_t* byte_start = reinterpret_cast<uint8_t*>(GetAddressForAllocationInfo(cur_info));
328 uint8_t* byte_end = byte_start + alloc_size;
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700329 callback(byte_start, byte_end, alloc_size, arg);
330 callback(nullptr, nullptr, 0, arg);
331 }
332 cur_info = cur_info->GetNextInfo();
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700333 }
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700334 CHECK_EQ(cur_info, end_info);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700335}
336
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700337void FreeListSpace::RemoveFreePrev(AllocationInfo* info) {
338 CHECK_GT(info->GetPrevFree(), 0U);
339 auto it = free_blocks_.lower_bound(info);
340 CHECK(it != free_blocks_.end());
341 CHECK_EQ(*it, info);
342 free_blocks_.erase(it);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700343}
344
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800345size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700346 MutexLock mu(self, lock_);
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700347 DCHECK(Contains(obj)) << reinterpret_cast<void*>(Begin()) << " " << obj << " "
348 << reinterpret_cast<void*>(End());
349 DCHECK_ALIGNED(obj, kAlignment);
350 AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
351 DCHECK(!info->IsFree());
352 const size_t allocation_size = info->ByteSize();
353 DCHECK_GT(allocation_size, 0U);
354 DCHECK_ALIGNED(allocation_size, kAlignment);
355 info->SetByteSize(allocation_size, true); // Mark as free.
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700356 // Look at the next chunk.
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700357 AllocationInfo* next_info = info->GetNextInfo();
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700358 // Calculate the start of the end free block.
359 uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700360 size_t prev_free_bytes = info->GetPrevFreeBytes();
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700361 size_t new_free_size = allocation_size;
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700362 if (prev_free_bytes != 0) {
363 // Coalesce with previous free chunk.
364 new_free_size += prev_free_bytes;
365 RemoveFreePrev(info);
366 info = info->GetPrevFreeInfo();
367 // The previous allocation info must not be free since we are supposed to always coalesce.
368 DCHECK_EQ(info->GetPrevFreeBytes(), 0U) << "Previous allocation was free";
Ian Rogers22a20862013-03-16 16:34:57 -0700369 }
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700370 uintptr_t next_addr = GetAddressForAllocationInfo(next_info);
371 if (next_addr >= free_end_start) {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700372 // Easy case, the next chunk is the end free region.
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700373 CHECK_EQ(next_addr, free_end_start);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700374 free_end_ += new_free_size;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700375 } else {
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700376 AllocationInfo* new_free_info;
377 if (next_info->IsFree()) {
378 AllocationInfo* next_next_info = next_info->GetNextInfo();
379 // Next next info can't be free since we always coalesce.
380 DCHECK(!next_next_info->IsFree());
381 DCHECK(IsAligned<kAlignment>(next_next_info->ByteSize()));
382 new_free_info = next_next_info;
383 new_free_size += next_next_info->GetPrevFreeBytes();
384 RemoveFreePrev(next_next_info);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700385 } else {
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700386 new_free_info = next_info;
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700387 }
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700388 new_free_info->SetPrevFreeBytes(new_free_size);
389 free_blocks_.insert(new_free_info);
390 info->SetByteSize(new_free_size, true);
391 DCHECK_EQ(info->GetNextInfo(), new_free_info);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700392 }
393 --num_objects_allocated_;
394 DCHECK_LE(allocation_size, num_bytes_allocated_);
395 num_bytes_allocated_ -= allocation_size;
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700396 madvise(obj, allocation_size, MADV_DONTNEED);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700397 if (kIsDebugBuild) {
398 // Can't disallow reads since we use them to find next chunks during coalescing.
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700399 mprotect(obj, allocation_size, PROT_READ);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700400 }
401 return allocation_size;
402}
403
Ian Rogers6fac4472014-02-25 17:01:10 -0800404size_t FreeListSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700405 DCHECK(Contains(obj));
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700406 AllocationInfo* info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(obj));
407 DCHECK(!info->IsFree());
408 size_t alloc_size = info->ByteSize();
Ian Rogers6fac4472014-02-25 17:01:10 -0800409 if (usable_size != nullptr) {
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700410 *usable_size = alloc_size;
Ian Rogers6fac4472014-02-25 17:01:10 -0800411 }
412 return alloc_size;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700413}
414
Ian Rogers6fac4472014-02-25 17:01:10 -0800415mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
416 size_t* usable_size) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700417 MutexLock mu(self, lock_);
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700418 const size_t allocation_size = RoundUp(num_bytes, kAlignment);
419 AllocationInfo temp_info;
420 temp_info.SetPrevFreeBytes(allocation_size);
421 temp_info.SetByteSize(0, false);
422 AllocationInfo* new_info;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700423 // Find the smallest chunk at least num_bytes in size.
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700424 auto it = free_blocks_.lower_bound(&temp_info);
425 if (it != free_blocks_.end()) {
426 AllocationInfo* info = *it;
427 free_blocks_.erase(it);
428 // Fit our object in the previous allocation info free space.
429 new_info = info->GetPrevFreeInfo();
430 // Remove the newly allocated block from the info and update the prev_free_.
431 info->SetPrevFreeBytes(info->GetPrevFreeBytes() - allocation_size);
432 if (info->GetPrevFreeBytes() > 0) {
433 AllocationInfo* new_free = info - info->GetPrevFree();
434 new_free->SetPrevFreeBytes(0);
435 new_free->SetByteSize(info->GetPrevFreeBytes(), true);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700436 // If there is remaining space, insert back into the free set.
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700437 free_blocks_.insert(info);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700438 }
439 } else {
440 // Try to steal some memory from the free space at the end of the space.
441 if (LIKELY(free_end_ >= allocation_size)) {
442 // Fit our object at the start of the end free block.
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700443 new_info = GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(End()) - free_end_);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700444 free_end_ -= allocation_size;
445 } else {
Ian Rogers6fac4472014-02-25 17:01:10 -0800446 return nullptr;
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700447 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700448 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800449 DCHECK(bytes_allocated != nullptr);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700450 *bytes_allocated = allocation_size;
Ian Rogers6fac4472014-02-25 17:01:10 -0800451 if (usable_size != nullptr) {
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700452 *usable_size = allocation_size;
Ian Rogers6fac4472014-02-25 17:01:10 -0800453 }
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700454 // Need to do these inside of the lock.
455 ++num_objects_allocated_;
456 ++total_objects_allocated_;
457 num_bytes_allocated_ += allocation_size;
458 total_bytes_allocated_ += allocation_size;
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700459 mirror::Object* obj = reinterpret_cast<mirror::Object*>(GetAddressForAllocationInfo(new_info));
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700460 // We always put our object at the start of the free block, there can not be another free block
461 // before it.
462 if (kIsDebugBuild) {
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700463 mprotect(obj, allocation_size, PROT_READ | PROT_WRITE);
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700464 }
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700465 new_info->SetPrevFreeBytes(0);
466 new_info->SetByteSize(allocation_size, false);
467 return obj;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700468}
469
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700470void FreeListSpace::Dump(std::ostream& os) const {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700471 MutexLock mu(Thread::Current(), const_cast<Mutex&>(lock_));
Mathieu Chartier128c52c2012-10-16 14:12:41 -0700472 os << GetName() << " -"
473 << " begin: " << reinterpret_cast<void*>(Begin())
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700474 << " end: " << reinterpret_cast<void*>(End()) << "\n";
475 uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700476 const AllocationInfo* cur_info =
477 GetAllocationInfoForAddress(reinterpret_cast<uintptr_t>(Begin()));
478 const AllocationInfo* end_info = GetAllocationInfoForAddress(free_end_start);
479 while (cur_info < end_info) {
480 size_t size = cur_info->ByteSize();
481 uintptr_t address = GetAddressForAllocationInfo(cur_info);
482 if (cur_info->IsFree()) {
483 os << "Free block at address: " << reinterpret_cast<const void*>(address)
484 << " of length " << size << " bytes\n";
485 } else {
486 os << "Large object at address: " << reinterpret_cast<const void*>(address)
487 << " of length " << size << " bytes\n";
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700488 }
Mathieu Chartieraf4edbd2014-09-08 17:42:48 -0700489 cur_info = cur_info->GetNextInfo();
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700490 }
491 if (free_end_) {
492 os << "Free block at address: " << reinterpret_cast<const void*>(free_end_start)
493 << " of length " << free_end_ << " bytes\n";
494 }
Mathieu Chartier128c52c2012-10-16 14:12:41 -0700495}
496
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700497void LargeObjectSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {
498 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
499 space::LargeObjectSpace* space = context->space->AsLargeObjectSpace();
500 Thread* self = context->self;
501 Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
502 // If the bitmaps aren't swapped we need to clear the bits since the GC isn't going to re-swap
503 // the bitmaps as an optimization.
504 if (!context->swap_bitmaps) {
505 accounting::LargeObjectBitmap* bitmap = space->GetLiveBitmap();
506 for (size_t i = 0; i < num_ptrs; ++i) {
507 bitmap->Clear(ptrs[i]);
Mathieu Chartierdb7f37d2014-01-10 11:09:06 -0800508 }
509 }
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700510 context->freed.objects += num_ptrs;
511 context->freed.bytes += space->FreeList(self, num_ptrs, ptrs);
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700512}
513
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700514collector::ObjectBytePair LargeObjectSpace::Sweep(bool swap_bitmaps) {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700515 if (Begin() >= End()) {
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700516 return collector::ObjectBytePair(0, 0);
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700517 }
518 accounting::LargeObjectBitmap* live_bitmap = GetLiveBitmap();
519 accounting::LargeObjectBitmap* mark_bitmap = GetMarkBitmap();
520 if (swap_bitmaps) {
521 std::swap(live_bitmap, mark_bitmap);
522 }
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700523 AllocSpace::SweepCallbackContext scc(swap_bitmaps, this);
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700524 accounting::LargeObjectBitmap::SweepWalk(*live_bitmap, *mark_bitmap,
525 reinterpret_cast<uintptr_t>(Begin()),
526 reinterpret_cast<uintptr_t>(End()), SweepCallback, &scc);
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700527 return scc.freed;
Mathieu Chartierdb7f37d2014-01-10 11:09:06 -0800528}
529
Mathieu Chartierb363f662014-07-16 13:28:58 -0700530void LargeObjectSpace::LogFragmentationAllocFailure(std::ostream& /*os*/,
531 size_t /*failed_alloc_bytes*/) {
532 UNIMPLEMENTED(FATAL);
533}
534
Ian Rogers1d54e732013-05-02 21:10:01 -0700535} // namespace space
536} // namespace gc
537} // namespace art