blob: e63cc3980d0bc9982d9ae3bf1ab9ad3975b6dabb [file] [log] [blame]
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Ian Rogers1d54e732013-05-02 21:10:01 -070017#include "large_object_space.h"
18
Ian Rogers700a4022014-05-19 16:49:03 -070019#include <memory>
20
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070021#include "gc/accounting/space_bitmap-inl.h"
Elliott Hughes07ed66b2012-12-12 18:34:25 -080022#include "base/logging.h"
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -070023#include "base/mutex-inl.h"
Elliott Hughes1aa246d2012-12-13 09:29:36 -080024#include "base/stl_util.h"
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070025#include "image.h"
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070026#include "os.h"
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070027#include "space-inl.h"
Brian Carlstroma3d27182013-11-05 23:22:27 -080028#include "thread-inl.h"
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070029#include "utils.h"
30
31namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070032namespace gc {
33namespace space {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070034
Mathieu Chartier0767c9a2014-03-26 12:53:19 -070035class ValgrindLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
36 public:
37 explicit ValgrindLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
38 }
39
40 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
41 size_t* usable_size) OVERRIDE {
42 mirror::Object* obj =
43 LargeObjectMapSpace::Alloc(self, num_bytes + kValgrindRedZoneBytes * 2, bytes_allocated,
44 usable_size);
45 mirror::Object* object_without_rdz = reinterpret_cast<mirror::Object*>(
46 reinterpret_cast<uintptr_t>(obj) + kValgrindRedZoneBytes);
47 VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<void*>(obj), kValgrindRedZoneBytes);
48 VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(object_without_rdz) + num_bytes,
49 kValgrindRedZoneBytes);
50 if (usable_size != nullptr) {
51 *usable_size = num_bytes; // Since we have redzones, shrink the usable size.
52 }
53 return object_without_rdz;
54 }
55
56 virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
57 mirror::Object* object_with_rdz = reinterpret_cast<mirror::Object*>(
58 reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
59 return LargeObjectMapSpace::AllocationSize(object_with_rdz, usable_size);
60 }
61
62 virtual size_t Free(Thread* self, mirror::Object* obj) OVERRIDE {
63 mirror::Object* object_with_rdz = reinterpret_cast<mirror::Object*>(
64 reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
65 VALGRIND_MAKE_MEM_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr));
66 return LargeObjectMapSpace::Free(self, object_with_rdz);
67 }
68
69 bool Contains(const mirror::Object* obj) const OVERRIDE {
70 mirror::Object* object_with_rdz = reinterpret_cast<mirror::Object*>(
71 reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
72 return LargeObjectMapSpace::Contains(object_with_rdz);
73 }
74
75 private:
76 static constexpr size_t kValgrindRedZoneBytes = kPageSize;
77};
78
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070079void LargeObjectSpace::SwapBitmaps() {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070080 live_bitmap_.swap(mark_bitmap_);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070081 // Swap names to get more descriptive diagnostics.
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070082 std::string temp_name = live_bitmap_->GetName();
83 live_bitmap_->SetName(mark_bitmap_->GetName());
84 mark_bitmap_->SetName(temp_name);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070085}
86
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070087LargeObjectSpace::LargeObjectSpace(const std::string& name, byte* begin, byte* end)
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070088 : DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
89 num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070090 total_objects_allocated_(0), begin_(begin), end_(end) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070091}
92
93
94void LargeObjectSpace::CopyLiveToMarked() {
Mathieu Chartierbbd695c2014-04-16 09:48:48 -070095 mark_bitmap_->CopyFrom(live_bitmap_.get());
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070096}
97
98LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
Mathieu Chartier6f365cc2014-04-23 12:42:27 -070099 : LargeObjectSpace(name, nullptr, nullptr),
Brian Carlstrom0cd7ec22013-07-17 23:40:20 -0700100 lock_("large object map space lock", kAllocSpaceLock) {}
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700101
102LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
Mathieu Chartierda44d772014-04-01 15:01:46 -0700103 if (Runtime::Current()->RunningOnValgrind()) {
Mathieu Chartier0767c9a2014-03-26 12:53:19 -0700104 return new ValgrindLargeObjectMapSpace(name);
105 } else {
106 return new LargeObjectMapSpace(name);
107 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700108}
109
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700110mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
Ian Rogers6fac4472014-02-25 17:01:10 -0800111 size_t* bytes_allocated, size_t* usable_size) {
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700112 std::string error_msg;
Ian Rogersa40307e2013-02-22 11:32:44 -0800113 MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", NULL, num_bytes,
Ian Rogersef7d42f2014-01-06 12:55:46 -0800114 PROT_READ | PROT_WRITE, true, &error_msg);
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700115 if (UNLIKELY(mem_map == NULL)) {
116 LOG(WARNING) << "Large object allocation failed: " << error_msg;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700117 return NULL;
118 }
119 MutexLock mu(self, lock_);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800120 mirror::Object* obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700121 large_objects_.push_back(obj);
122 mem_maps_.Put(obj, mem_map);
123 size_t allocation_size = mem_map->Size();
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700124 DCHECK(bytes_allocated != nullptr);
125 begin_ = std::min(begin_, reinterpret_cast<byte*>(obj));
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700126 byte* obj_end = reinterpret_cast<byte*>(obj) + allocation_size;
127 if (end_ == nullptr || obj_end > end_) {
128 end_ = obj_end;
129 }
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700130 *bytes_allocated = allocation_size;
Ian Rogers6fac4472014-02-25 17:01:10 -0800131 if (usable_size != nullptr) {
132 *usable_size = allocation_size;
133 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700134 num_bytes_allocated_ += allocation_size;
135 total_bytes_allocated_ += allocation_size;
136 ++num_objects_allocated_;
137 ++total_objects_allocated_;
138 return obj;
139}
140
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800141size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700142 MutexLock mu(self, lock_);
143 MemMaps::iterator found = mem_maps_.find(ptr);
Mathieu Chartier22986832014-05-15 09:35:48 -0700144 CHECK(found != mem_maps_.end()) << "Attempted to free large object" << ptr
145 << "which was not live";
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700146 DCHECK_GE(num_bytes_allocated_, found->second->Size());
147 size_t allocation_size = found->second->Size();
148 num_bytes_allocated_ -= allocation_size;
149 --num_objects_allocated_;
150 delete found->second;
151 mem_maps_.erase(found);
152 return allocation_size;
153}
154
Ian Rogers6fac4472014-02-25 17:01:10 -0800155size_t LargeObjectMapSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700156 MutexLock mu(Thread::Current(), lock_);
Ian Rogers6fac4472014-02-25 17:01:10 -0800157 auto found = mem_maps_.find(obj);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700158 CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live";
159 return found->second->Size();
160}
161
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800162size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700163 size_t total = 0;
164 for (size_t i = 0; i < num_ptrs; ++i) {
165 if (kDebugSpaces) {
166 CHECK(Contains(ptrs[i]));
167 }
168 total += Free(self, ptrs[i]);
169 }
170 return total;
171}
172
173void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
174 MutexLock mu(Thread::Current(), lock_);
Ian Rogers6fac4472014-02-25 17:01:10 -0800175 for (auto it = mem_maps_.begin(); it != mem_maps_.end(); ++it) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700176 MemMap* mem_map = it->second;
177 callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
178 callback(NULL, NULL, 0, arg);
179 }
180}
181
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800182bool LargeObjectMapSpace::Contains(const mirror::Object* obj) const {
Ian Rogersa3dd0b32013-03-19 19:30:59 -0700183 Thread* self = Thread::Current();
184 if (lock_.IsExclusiveHeld(self)) {
185 // We hold lock_ so do the check.
186 return mem_maps_.find(const_cast<mirror::Object*>(obj)) != mem_maps_.end();
187 } else {
188 MutexLock mu(self, lock_);
189 return mem_maps_.find(const_cast<mirror::Object*>(obj)) != mem_maps_.end();
190 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700191}
192
193FreeListSpace* FreeListSpace::Create(const std::string& name, byte* requested_begin, size_t size) {
Brian Carlstrom42748892013-07-18 18:04:08 -0700194 CHECK_EQ(size % kAlignment, 0U);
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700195 std::string error_msg;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700196 MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
Ian Rogersef7d42f2014-01-06 12:55:46 -0800197 PROT_READ | PROT_WRITE, true, &error_msg);
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700198 CHECK(mem_map != NULL) << "Failed to allocate large object space mem map: " << error_msg;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700199 return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
200}
201
202FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end)
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700203 : LargeObjectSpace(name, begin, end),
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700204 mem_map_(mem_map),
205 lock_("free list space lock", kAllocSpaceLock) {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700206 free_end_ = end - begin;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700207}
208
Brian Carlstrom0cd7ec22013-07-17 23:40:20 -0700209FreeListSpace::~FreeListSpace() {}
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700210
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700211void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
212 MutexLock mu(Thread::Current(), lock_);
213 uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
214 AllocationHeader* cur_header = reinterpret_cast<AllocationHeader*>(Begin());
215 while (reinterpret_cast<uintptr_t>(cur_header) < free_end_start) {
216 cur_header = cur_header->GetNextNonFree();
217 size_t alloc_size = cur_header->AllocationSize();
218 byte* byte_start = reinterpret_cast<byte*>(cur_header->GetObjectAddress());
219 byte* byte_end = byte_start + alloc_size - sizeof(AllocationHeader);
220 callback(byte_start, byte_end, alloc_size, arg);
221 callback(NULL, NULL, 0, arg);
222 cur_header = reinterpret_cast<AllocationHeader*>(byte_end);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700223 }
224}
225
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700226void FreeListSpace::RemoveFreePrev(AllocationHeader* header) {
227 CHECK(!header->IsFree());
228 CHECK_GT(header->GetPrevFree(), size_t(0));
229 FreeBlocks::iterator found = free_blocks_.lower_bound(header);
230 CHECK(found != free_blocks_.end());
231 CHECK_EQ(*found, header);
232 free_blocks_.erase(found);
233}
234
235FreeListSpace::AllocationHeader* FreeListSpace::GetAllocationHeader(const mirror::Object* obj) {
236 DCHECK(Contains(obj));
237 return reinterpret_cast<AllocationHeader*>(reinterpret_cast<uintptr_t>(obj) -
238 sizeof(AllocationHeader));
239}
240
241FreeListSpace::AllocationHeader* FreeListSpace::AllocationHeader::GetNextNonFree() {
242 // We know that there has to be at least one object after us or else we would have
243 // coalesced with the free end region. May be worth investigating a better way to do this
244 // as it may be expensive for large allocations.
245 for (uintptr_t pos = reinterpret_cast<uintptr_t>(this);; pos += kAlignment) {
246 AllocationHeader* cur = reinterpret_cast<AllocationHeader*>(pos);
247 if (!cur->IsFree()) return cur;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700248 }
249}
250
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800251size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700252 MutexLock mu(self, lock_);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700253 DCHECK(Contains(obj));
254 AllocationHeader* header = GetAllocationHeader(obj);
255 CHECK(IsAligned<kAlignment>(header));
256 size_t allocation_size = header->AllocationSize();
257 DCHECK_GT(allocation_size, size_t(0));
258 DCHECK(IsAligned<kAlignment>(allocation_size));
259 // Look at the next chunk.
260 AllocationHeader* next_header = header->GetNextAllocationHeader();
261 // Calculate the start of the end free block.
262 uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
263 size_t header_prev_free = header->GetPrevFree();
264 size_t new_free_size = allocation_size;
265 if (header_prev_free) {
266 new_free_size += header_prev_free;
267 RemoveFreePrev(header);
Ian Rogers22a20862013-03-16 16:34:57 -0700268 }
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700269 if (reinterpret_cast<uintptr_t>(next_header) >= free_end_start) {
270 // Easy case, the next chunk is the end free region.
271 CHECK_EQ(reinterpret_cast<uintptr_t>(next_header), free_end_start);
272 free_end_ += new_free_size;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700273 } else {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700274 AllocationHeader* new_free_header;
275 DCHECK(IsAligned<kAlignment>(next_header));
276 if (next_header->IsFree()) {
277 // Find the next chunk by reading each page until we hit one with non-zero chunk.
278 AllocationHeader* next_next_header = next_header->GetNextNonFree();
279 DCHECK(IsAligned<kAlignment>(next_next_header));
280 DCHECK(IsAligned<kAlignment>(next_next_header->AllocationSize()));
281 RemoveFreePrev(next_next_header);
282 new_free_header = next_next_header;
283 new_free_size += next_next_header->GetPrevFree();
284 } else {
285 new_free_header = next_header;
286 }
287 new_free_header->prev_free_ = new_free_size;
288 free_blocks_.insert(new_free_header);
289 }
290 --num_objects_allocated_;
291 DCHECK_LE(allocation_size, num_bytes_allocated_);
292 num_bytes_allocated_ -= allocation_size;
293 madvise(header, allocation_size, MADV_DONTNEED);
294 if (kIsDebugBuild) {
295 // Can't disallow reads since we use them to find next chunks during coalescing.
296 mprotect(header, allocation_size, PROT_READ);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700297 }
298 return allocation_size;
299}
300
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800301bool FreeListSpace::Contains(const mirror::Object* obj) const {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700302 return mem_map_->HasAddress(obj);
303}
304
Ian Rogers6fac4472014-02-25 17:01:10 -0800305size_t FreeListSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700306 AllocationHeader* header = GetAllocationHeader(obj);
307 DCHECK(Contains(obj));
308 DCHECK(!header->IsFree());
Ian Rogers6fac4472014-02-25 17:01:10 -0800309 size_t alloc_size = header->AllocationSize();
310 if (usable_size != nullptr) {
311 *usable_size = alloc_size - sizeof(AllocationHeader);
312 }
313 return alloc_size;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700314}
315
Ian Rogers6fac4472014-02-25 17:01:10 -0800316mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
317 size_t* usable_size) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700318 MutexLock mu(self, lock_);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700319 size_t allocation_size = RoundUp(num_bytes + sizeof(AllocationHeader), kAlignment);
320 AllocationHeader temp;
321 temp.SetPrevFree(allocation_size);
322 temp.SetAllocationSize(0);
323 AllocationHeader* new_header;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700324 // Find the smallest chunk at least num_bytes in size.
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700325 FreeBlocks::iterator found = free_blocks_.lower_bound(&temp);
326 if (found != free_blocks_.end()) {
327 AllocationHeader* header = *found;
328 free_blocks_.erase(found);
329
330 // Fit our object in the previous free header space.
331 new_header = header->GetPrevFreeAllocationHeader();
332
333 // Remove the newly allocated block from the header and update the prev_free_.
334 header->prev_free_ -= allocation_size;
335 if (header->prev_free_ > 0) {
336 // If there is remaining space, insert back into the free set.
337 free_blocks_.insert(header);
338 }
339 } else {
340 // Try to steal some memory from the free space at the end of the space.
341 if (LIKELY(free_end_ >= allocation_size)) {
342 // Fit our object at the start of the end free block.
343 new_header = reinterpret_cast<AllocationHeader*>(end_ - free_end_);
344 free_end_ -= allocation_size;
345 } else {
Ian Rogers6fac4472014-02-25 17:01:10 -0800346 return nullptr;
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700347 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700348 }
349
Ian Rogers6fac4472014-02-25 17:01:10 -0800350 DCHECK(bytes_allocated != nullptr);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700351 *bytes_allocated = allocation_size;
Ian Rogers6fac4472014-02-25 17:01:10 -0800352 if (usable_size != nullptr) {
353 *usable_size = allocation_size - sizeof(AllocationHeader);
354 }
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700355 // Need to do these inside of the lock.
356 ++num_objects_allocated_;
357 ++total_objects_allocated_;
358 num_bytes_allocated_ += allocation_size;
359 total_bytes_allocated_ += allocation_size;
360
361 // We always put our object at the start of the free block, there can not be another free block
362 // before it.
363 if (kIsDebugBuild) {
364 mprotect(new_header, allocation_size, PROT_READ | PROT_WRITE);
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700365 }
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700366 new_header->SetPrevFree(0);
367 new_header->SetAllocationSize(allocation_size);
368 return new_header->GetObjectAddress();
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700369}
370
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700371void FreeListSpace::Dump(std::ostream& os) const {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700372 MutexLock mu(Thread::Current(), const_cast<Mutex&>(lock_));
Mathieu Chartier128c52c2012-10-16 14:12:41 -0700373 os << GetName() << " -"
374 << " begin: " << reinterpret_cast<void*>(Begin())
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700375 << " end: " << reinterpret_cast<void*>(End()) << "\n";
376 uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
377 AllocationHeader* cur_header = reinterpret_cast<AllocationHeader*>(Begin());
378 while (reinterpret_cast<uintptr_t>(cur_header) < free_end_start) {
379 byte* free_start = reinterpret_cast<byte*>(cur_header);
380 cur_header = cur_header->GetNextNonFree();
381 byte* free_end = reinterpret_cast<byte*>(cur_header);
382 if (free_start != free_end) {
383 os << "Free block at address: " << reinterpret_cast<const void*>(free_start)
384 << " of length " << free_end - free_start << " bytes\n";
385 }
386 size_t alloc_size = cur_header->AllocationSize();
387 byte* byte_start = reinterpret_cast<byte*>(cur_header->GetObjectAddress());
388 byte* byte_end = byte_start + alloc_size - sizeof(AllocationHeader);
389 os << "Large object at address: " << reinterpret_cast<const void*>(free_start)
390 << " of length " << byte_end - byte_start << " bytes\n";
391 cur_header = reinterpret_cast<AllocationHeader*>(byte_end);
392 }
393 if (free_end_) {
394 os << "Free block at address: " << reinterpret_cast<const void*>(free_end_start)
395 << " of length " << free_end_ << " bytes\n";
396 }
Mathieu Chartier128c52c2012-10-16 14:12:41 -0700397}
398
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700399void LargeObjectSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {
400 SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
401 space::LargeObjectSpace* space = context->space->AsLargeObjectSpace();
402 Thread* self = context->self;
403 Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
404 // If the bitmaps aren't swapped we need to clear the bits since the GC isn't going to re-swap
405 // the bitmaps as an optimization.
406 if (!context->swap_bitmaps) {
407 accounting::LargeObjectBitmap* bitmap = space->GetLiveBitmap();
408 for (size_t i = 0; i < num_ptrs; ++i) {
409 bitmap->Clear(ptrs[i]);
Mathieu Chartierdb7f37d2014-01-10 11:09:06 -0800410 }
411 }
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700412 context->freed_objects += num_ptrs;
413 context->freed_bytes += space->FreeList(self, num_ptrs, ptrs);
414}
415
416void LargeObjectSpace::Sweep(bool swap_bitmaps, size_t* out_freed_objects,
417 size_t* out_freed_bytes) {
418 if (Begin() >= End()) {
419 return;
420 }
421 accounting::LargeObjectBitmap* live_bitmap = GetLiveBitmap();
422 accounting::LargeObjectBitmap* mark_bitmap = GetMarkBitmap();
423 if (swap_bitmaps) {
424 std::swap(live_bitmap, mark_bitmap);
425 }
426 DCHECK(out_freed_objects != nullptr);
427 DCHECK(out_freed_bytes != nullptr);
428 SweepCallbackContext scc(swap_bitmaps, this);
429 accounting::LargeObjectBitmap::SweepWalk(*live_bitmap, *mark_bitmap,
430 reinterpret_cast<uintptr_t>(Begin()),
431 reinterpret_cast<uintptr_t>(End()), SweepCallback, &scc);
432 *out_freed_objects += scc.freed_objects;
433 *out_freed_bytes += scc.freed_bytes;
Mathieu Chartierdb7f37d2014-01-10 11:09:06 -0800434}
435
Ian Rogers1d54e732013-05-02 21:10:01 -0700436} // namespace space
437} // namespace gc
438} // namespace art