blob: 2fc67ece00b0aef037cc0dfaa3eb829ebd64ab97 [file] [log] [blame]
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Ian Rogers1d54e732013-05-02 21:10:01 -070017#include "large_object_space.h"
18
Elliott Hughes07ed66b2012-12-12 18:34:25 -080019#include "base/logging.h"
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -070020#include "base/mutex-inl.h"
Elliott Hughes1aa246d2012-12-13 09:29:36 -080021#include "base/stl_util.h"
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070022#include "UniquePtr.h"
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070023#include "image.h"
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070024#include "os.h"
Brian Carlstroma3d27182013-11-05 23:22:27 -080025#include "thread-inl.h"
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070026#include "utils.h"
27
28namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070029namespace gc {
30namespace space {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070031
Mathieu Chartier0767c9a2014-03-26 12:53:19 -070032class ValgrindLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
33 public:
34 explicit ValgrindLargeObjectMapSpace(const std::string& name) : LargeObjectMapSpace(name) {
35 }
36
37 virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
38 size_t* usable_size) OVERRIDE {
39 mirror::Object* obj =
40 LargeObjectMapSpace::Alloc(self, num_bytes + kValgrindRedZoneBytes * 2, bytes_allocated,
41 usable_size);
42 mirror::Object* object_without_rdz = reinterpret_cast<mirror::Object*>(
43 reinterpret_cast<uintptr_t>(obj) + kValgrindRedZoneBytes);
44 VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<void*>(obj), kValgrindRedZoneBytes);
45 VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<byte*>(object_without_rdz) + num_bytes,
46 kValgrindRedZoneBytes);
47 if (usable_size != nullptr) {
48 *usable_size = num_bytes; // Since we have redzones, shrink the usable size.
49 }
50 return object_without_rdz;
51 }
52
53 virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
54 mirror::Object* object_with_rdz = reinterpret_cast<mirror::Object*>(
55 reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
56 return LargeObjectMapSpace::AllocationSize(object_with_rdz, usable_size);
57 }
58
59 virtual size_t Free(Thread* self, mirror::Object* obj) OVERRIDE {
60 mirror::Object* object_with_rdz = reinterpret_cast<mirror::Object*>(
61 reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
62 VALGRIND_MAKE_MEM_UNDEFINED(object_with_rdz, AllocationSize(obj, nullptr));
63 return LargeObjectMapSpace::Free(self, object_with_rdz);
64 }
65
66 bool Contains(const mirror::Object* obj) const OVERRIDE {
67 mirror::Object* object_with_rdz = reinterpret_cast<mirror::Object*>(
68 reinterpret_cast<uintptr_t>(obj) - kValgrindRedZoneBytes);
69 return LargeObjectMapSpace::Contains(object_with_rdz);
70 }
71
72 private:
73 static constexpr size_t kValgrindRedZoneBytes = kPageSize;
74};
75
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070076void LargeObjectSpace::SwapBitmaps() {
Mathieu Chartier2b82db42012-11-14 17:29:05 -080077 live_objects_.swap(mark_objects_);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070078 // Swap names to get more descriptive diagnostics.
79 std::string temp_name = live_objects_->GetName();
80 live_objects_->SetName(mark_objects_->GetName());
81 mark_objects_->SetName(temp_name);
82}
83
84LargeObjectSpace::LargeObjectSpace(const std::string& name)
85 : DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
86 num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
87 total_objects_allocated_(0) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070088}
89
90
91void LargeObjectSpace::CopyLiveToMarked() {
92 mark_objects_->CopyFrom(*live_objects_.get());
93}
94
95LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
96 : LargeObjectSpace(name),
Brian Carlstrom0cd7ec22013-07-17 23:40:20 -070097 lock_("large object map space lock", kAllocSpaceLock) {}
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070098
99LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
Mathieu Chartier0767c9a2014-03-26 12:53:19 -0700100 if (RUNNING_ON_VALGRIND > 0) {
101 return new ValgrindLargeObjectMapSpace(name);
102 } else {
103 return new LargeObjectMapSpace(name);
104 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700105}
106
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700107mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
Ian Rogers6fac4472014-02-25 17:01:10 -0800108 size_t* bytes_allocated, size_t* usable_size) {
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700109 std::string error_msg;
Ian Rogersa40307e2013-02-22 11:32:44 -0800110 MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", NULL, num_bytes,
Ian Rogersef7d42f2014-01-06 12:55:46 -0800111 PROT_READ | PROT_WRITE, true, &error_msg);
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700112 if (UNLIKELY(mem_map == NULL)) {
113 LOG(WARNING) << "Large object allocation failed: " << error_msg;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700114 return NULL;
115 }
116 MutexLock mu(self, lock_);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800117 mirror::Object* obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700118 large_objects_.push_back(obj);
119 mem_maps_.Put(obj, mem_map);
120 size_t allocation_size = mem_map->Size();
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700121 DCHECK(bytes_allocated != NULL);
122 *bytes_allocated = allocation_size;
Ian Rogers6fac4472014-02-25 17:01:10 -0800123 if (usable_size != nullptr) {
124 *usable_size = allocation_size;
125 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700126 num_bytes_allocated_ += allocation_size;
127 total_bytes_allocated_ += allocation_size;
128 ++num_objects_allocated_;
129 ++total_objects_allocated_;
130 return obj;
131}
132
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800133size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700134 MutexLock mu(self, lock_);
135 MemMaps::iterator found = mem_maps_.find(ptr);
136 CHECK(found != mem_maps_.end()) << "Attempted to free large object which was not live";
137 DCHECK_GE(num_bytes_allocated_, found->second->Size());
138 size_t allocation_size = found->second->Size();
139 num_bytes_allocated_ -= allocation_size;
140 --num_objects_allocated_;
141 delete found->second;
142 mem_maps_.erase(found);
143 return allocation_size;
144}
145
Ian Rogers6fac4472014-02-25 17:01:10 -0800146size_t LargeObjectMapSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700147 MutexLock mu(Thread::Current(), lock_);
Ian Rogers6fac4472014-02-25 17:01:10 -0800148 auto found = mem_maps_.find(obj);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700149 CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live";
150 return found->second->Size();
151}
152
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800153size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700154 size_t total = 0;
155 for (size_t i = 0; i < num_ptrs; ++i) {
156 if (kDebugSpaces) {
157 CHECK(Contains(ptrs[i]));
158 }
159 total += Free(self, ptrs[i]);
160 }
161 return total;
162}
163
164void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
165 MutexLock mu(Thread::Current(), lock_);
Ian Rogers6fac4472014-02-25 17:01:10 -0800166 for (auto it = mem_maps_.begin(); it != mem_maps_.end(); ++it) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700167 MemMap* mem_map = it->second;
168 callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
169 callback(NULL, NULL, 0, arg);
170 }
171}
172
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800173bool LargeObjectMapSpace::Contains(const mirror::Object* obj) const {
Ian Rogersa3dd0b32013-03-19 19:30:59 -0700174 Thread* self = Thread::Current();
175 if (lock_.IsExclusiveHeld(self)) {
176 // We hold lock_ so do the check.
177 return mem_maps_.find(const_cast<mirror::Object*>(obj)) != mem_maps_.end();
178 } else {
179 MutexLock mu(self, lock_);
180 return mem_maps_.find(const_cast<mirror::Object*>(obj)) != mem_maps_.end();
181 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700182}
183
184FreeListSpace* FreeListSpace::Create(const std::string& name, byte* requested_begin, size_t size) {
Brian Carlstrom42748892013-07-18 18:04:08 -0700185 CHECK_EQ(size % kAlignment, 0U);
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700186 std::string error_msg;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700187 MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
Ian Rogersef7d42f2014-01-06 12:55:46 -0800188 PROT_READ | PROT_WRITE, true, &error_msg);
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700189 CHECK(mem_map != NULL) << "Failed to allocate large object space mem map: " << error_msg;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700190 return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
191}
192
193FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end)
194 : LargeObjectSpace(name),
195 begin_(begin),
196 end_(end),
197 mem_map_(mem_map),
198 lock_("free list space lock", kAllocSpaceLock) {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700199 free_end_ = end - begin;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700200}
201
Brian Carlstrom0cd7ec22013-07-17 23:40:20 -0700202FreeListSpace::~FreeListSpace() {}
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700203
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700204void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
205 MutexLock mu(Thread::Current(), lock_);
206 uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
207 AllocationHeader* cur_header = reinterpret_cast<AllocationHeader*>(Begin());
208 while (reinterpret_cast<uintptr_t>(cur_header) < free_end_start) {
209 cur_header = cur_header->GetNextNonFree();
210 size_t alloc_size = cur_header->AllocationSize();
211 byte* byte_start = reinterpret_cast<byte*>(cur_header->GetObjectAddress());
212 byte* byte_end = byte_start + alloc_size - sizeof(AllocationHeader);
213 callback(byte_start, byte_end, alloc_size, arg);
214 callback(NULL, NULL, 0, arg);
215 cur_header = reinterpret_cast<AllocationHeader*>(byte_end);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700216 }
217}
218
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700219void FreeListSpace::RemoveFreePrev(AllocationHeader* header) {
220 CHECK(!header->IsFree());
221 CHECK_GT(header->GetPrevFree(), size_t(0));
222 FreeBlocks::iterator found = free_blocks_.lower_bound(header);
223 CHECK(found != free_blocks_.end());
224 CHECK_EQ(*found, header);
225 free_blocks_.erase(found);
226}
227
228FreeListSpace::AllocationHeader* FreeListSpace::GetAllocationHeader(const mirror::Object* obj) {
229 DCHECK(Contains(obj));
230 return reinterpret_cast<AllocationHeader*>(reinterpret_cast<uintptr_t>(obj) -
231 sizeof(AllocationHeader));
232}
233
234FreeListSpace::AllocationHeader* FreeListSpace::AllocationHeader::GetNextNonFree() {
235 // We know that there has to be at least one object after us or else we would have
236 // coalesced with the free end region. May be worth investigating a better way to do this
237 // as it may be expensive for large allocations.
238 for (uintptr_t pos = reinterpret_cast<uintptr_t>(this);; pos += kAlignment) {
239 AllocationHeader* cur = reinterpret_cast<AllocationHeader*>(pos);
240 if (!cur->IsFree()) return cur;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700241 }
242}
243
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800244size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700245 MutexLock mu(self, lock_);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700246 DCHECK(Contains(obj));
247 AllocationHeader* header = GetAllocationHeader(obj);
248 CHECK(IsAligned<kAlignment>(header));
249 size_t allocation_size = header->AllocationSize();
250 DCHECK_GT(allocation_size, size_t(0));
251 DCHECK(IsAligned<kAlignment>(allocation_size));
252 // Look at the next chunk.
253 AllocationHeader* next_header = header->GetNextAllocationHeader();
254 // Calculate the start of the end free block.
255 uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
256 size_t header_prev_free = header->GetPrevFree();
257 size_t new_free_size = allocation_size;
258 if (header_prev_free) {
259 new_free_size += header_prev_free;
260 RemoveFreePrev(header);
Ian Rogers22a20862013-03-16 16:34:57 -0700261 }
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700262 if (reinterpret_cast<uintptr_t>(next_header) >= free_end_start) {
263 // Easy case, the next chunk is the end free region.
264 CHECK_EQ(reinterpret_cast<uintptr_t>(next_header), free_end_start);
265 free_end_ += new_free_size;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700266 } else {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700267 AllocationHeader* new_free_header;
268 DCHECK(IsAligned<kAlignment>(next_header));
269 if (next_header->IsFree()) {
270 // Find the next chunk by reading each page until we hit one with non-zero chunk.
271 AllocationHeader* next_next_header = next_header->GetNextNonFree();
272 DCHECK(IsAligned<kAlignment>(next_next_header));
273 DCHECK(IsAligned<kAlignment>(next_next_header->AllocationSize()));
274 RemoveFreePrev(next_next_header);
275 new_free_header = next_next_header;
276 new_free_size += next_next_header->GetPrevFree();
277 } else {
278 new_free_header = next_header;
279 }
280 new_free_header->prev_free_ = new_free_size;
281 free_blocks_.insert(new_free_header);
282 }
283 --num_objects_allocated_;
284 DCHECK_LE(allocation_size, num_bytes_allocated_);
285 num_bytes_allocated_ -= allocation_size;
286 madvise(header, allocation_size, MADV_DONTNEED);
287 if (kIsDebugBuild) {
288 // Can't disallow reads since we use them to find next chunks during coalescing.
289 mprotect(header, allocation_size, PROT_READ);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700290 }
291 return allocation_size;
292}
293
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800294bool FreeListSpace::Contains(const mirror::Object* obj) const {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700295 return mem_map_->HasAddress(obj);
296}
297
Ian Rogers6fac4472014-02-25 17:01:10 -0800298size_t FreeListSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700299 AllocationHeader* header = GetAllocationHeader(obj);
300 DCHECK(Contains(obj));
301 DCHECK(!header->IsFree());
Ian Rogers6fac4472014-02-25 17:01:10 -0800302 size_t alloc_size = header->AllocationSize();
303 if (usable_size != nullptr) {
304 *usable_size = alloc_size - sizeof(AllocationHeader);
305 }
306 return alloc_size;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700307}
308
Ian Rogers6fac4472014-02-25 17:01:10 -0800309mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
310 size_t* usable_size) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700311 MutexLock mu(self, lock_);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700312 size_t allocation_size = RoundUp(num_bytes + sizeof(AllocationHeader), kAlignment);
313 AllocationHeader temp;
314 temp.SetPrevFree(allocation_size);
315 temp.SetAllocationSize(0);
316 AllocationHeader* new_header;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700317 // Find the smallest chunk at least num_bytes in size.
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700318 FreeBlocks::iterator found = free_blocks_.lower_bound(&temp);
319 if (found != free_blocks_.end()) {
320 AllocationHeader* header = *found;
321 free_blocks_.erase(found);
322
323 // Fit our object in the previous free header space.
324 new_header = header->GetPrevFreeAllocationHeader();
325
326 // Remove the newly allocated block from the header and update the prev_free_.
327 header->prev_free_ -= allocation_size;
328 if (header->prev_free_ > 0) {
329 // If there is remaining space, insert back into the free set.
330 free_blocks_.insert(header);
331 }
332 } else {
333 // Try to steal some memory from the free space at the end of the space.
334 if (LIKELY(free_end_ >= allocation_size)) {
335 // Fit our object at the start of the end free block.
336 new_header = reinterpret_cast<AllocationHeader*>(end_ - free_end_);
337 free_end_ -= allocation_size;
338 } else {
Ian Rogers6fac4472014-02-25 17:01:10 -0800339 return nullptr;
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700340 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700341 }
342
Ian Rogers6fac4472014-02-25 17:01:10 -0800343 DCHECK(bytes_allocated != nullptr);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700344 *bytes_allocated = allocation_size;
Ian Rogers6fac4472014-02-25 17:01:10 -0800345 if (usable_size != nullptr) {
346 *usable_size = allocation_size - sizeof(AllocationHeader);
347 }
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700348 // Need to do these inside of the lock.
349 ++num_objects_allocated_;
350 ++total_objects_allocated_;
351 num_bytes_allocated_ += allocation_size;
352 total_bytes_allocated_ += allocation_size;
353
354 // We always put our object at the start of the free block, there can not be another free block
355 // before it.
356 if (kIsDebugBuild) {
357 mprotect(new_header, allocation_size, PROT_READ | PROT_WRITE);
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700358 }
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700359 new_header->SetPrevFree(0);
360 new_header->SetAllocationSize(allocation_size);
361 return new_header->GetObjectAddress();
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700362}
363
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700364void FreeListSpace::Dump(std::ostream& os) const {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700365 MutexLock mu(Thread::Current(), const_cast<Mutex&>(lock_));
Mathieu Chartier128c52c2012-10-16 14:12:41 -0700366 os << GetName() << " -"
367 << " begin: " << reinterpret_cast<void*>(Begin())
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700368 << " end: " << reinterpret_cast<void*>(End()) << "\n";
369 uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
370 AllocationHeader* cur_header = reinterpret_cast<AllocationHeader*>(Begin());
371 while (reinterpret_cast<uintptr_t>(cur_header) < free_end_start) {
372 byte* free_start = reinterpret_cast<byte*>(cur_header);
373 cur_header = cur_header->GetNextNonFree();
374 byte* free_end = reinterpret_cast<byte*>(cur_header);
375 if (free_start != free_end) {
376 os << "Free block at address: " << reinterpret_cast<const void*>(free_start)
377 << " of length " << free_end - free_start << " bytes\n";
378 }
379 size_t alloc_size = cur_header->AllocationSize();
380 byte* byte_start = reinterpret_cast<byte*>(cur_header->GetObjectAddress());
381 byte* byte_end = byte_start + alloc_size - sizeof(AllocationHeader);
382 os << "Large object at address: " << reinterpret_cast<const void*>(free_start)
383 << " of length " << byte_end - byte_start << " bytes\n";
384 cur_header = reinterpret_cast<AllocationHeader*>(byte_end);
385 }
386 if (free_end_) {
387 os << "Free block at address: " << reinterpret_cast<const void*>(free_end_start)
388 << " of length " << free_end_ << " bytes\n";
389 }
Mathieu Chartier128c52c2012-10-16 14:12:41 -0700390}
391
Mathieu Chartierdb7f37d2014-01-10 11:09:06 -0800392void LargeObjectSpace::Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes) {
393 // Sweep large objects
394 accounting::ObjectSet* large_live_objects = GetLiveObjects();
395 accounting::ObjectSet* large_mark_objects = GetMarkObjects();
396 if (swap_bitmaps) {
397 std::swap(large_live_objects, large_mark_objects);
398 }
399 DCHECK(freed_objects != nullptr);
400 DCHECK(freed_bytes != nullptr);
401 // O(n*log(n)) but hopefully there are not too many large objects.
402 size_t objects = 0;
403 size_t bytes = 0;
404 Thread* self = Thread::Current();
405 for (const mirror::Object* obj : large_live_objects->GetObjects()) {
406 if (!large_mark_objects->Test(obj)) {
407 bytes += Free(self, const_cast<mirror::Object*>(obj));
408 ++objects;
409 }
410 }
411 *freed_objects += objects;
412 *freed_bytes += bytes;
413}
414
Ian Rogers1d54e732013-05-02 21:10:01 -0700415} // namespace space
416} // namespace gc
417} // namespace art