blob: ab6e42bb2a6f494ece2ee77d0e111639deef1822 [file] [log] [blame]
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Ian Rogers1d54e732013-05-02 21:10:01 -070017#include "large_object_space.h"
18
Elliott Hughes07ed66b2012-12-12 18:34:25 -080019#include "base/logging.h"
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -070020#include "base/mutex-inl.h"
Elliott Hughes1aa246d2012-12-13 09:29:36 -080021#include "base/stl_util.h"
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070022#include "UniquePtr.h"
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070023#include "image.h"
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070024#include "os.h"
Brian Carlstroma3d27182013-11-05 23:22:27 -080025#include "thread-inl.h"
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070026#include "utils.h"
27
28namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070029namespace gc {
30namespace space {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070031
32void LargeObjectSpace::SwapBitmaps() {
Mathieu Chartier2b82db42012-11-14 17:29:05 -080033 live_objects_.swap(mark_objects_);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070034 // Swap names to get more descriptive diagnostics.
35 std::string temp_name = live_objects_->GetName();
36 live_objects_->SetName(mark_objects_->GetName());
37 mark_objects_->SetName(temp_name);
38}
39
40LargeObjectSpace::LargeObjectSpace(const std::string& name)
41 : DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
42 num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
43 total_objects_allocated_(0) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070044}
45
46
47void LargeObjectSpace::CopyLiveToMarked() {
48 mark_objects_->CopyFrom(*live_objects_.get());
49}
50
51LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
52 : LargeObjectSpace(name),
Brian Carlstrom0cd7ec22013-07-17 23:40:20 -070053 lock_("large object map space lock", kAllocSpaceLock) {}
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070054
55LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
56 return new LargeObjectMapSpace(name);
57}
58
Ian Rogers8d31bbd2013-10-13 10:44:14 -070059mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
60 size_t* bytes_allocated) {
61 std::string error_msg;
Ian Rogersa40307e2013-02-22 11:32:44 -080062 MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", NULL, num_bytes,
Ian Rogers8d31bbd2013-10-13 10:44:14 -070063 PROT_READ | PROT_WRITE, &error_msg);
64 if (UNLIKELY(mem_map == NULL)) {
65 LOG(WARNING) << "Large object allocation failed: " << error_msg;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070066 return NULL;
67 }
68 MutexLock mu(self, lock_);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080069 mirror::Object* obj = reinterpret_cast<mirror::Object*>(mem_map->Begin());
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070070 large_objects_.push_back(obj);
71 mem_maps_.Put(obj, mem_map);
72 size_t allocation_size = mem_map->Size();
Mathieu Chartiereb5710e2013-07-25 15:19:42 -070073 DCHECK(bytes_allocated != NULL);
74 *bytes_allocated = allocation_size;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070075 num_bytes_allocated_ += allocation_size;
76 total_bytes_allocated_ += allocation_size;
77 ++num_objects_allocated_;
78 ++total_objects_allocated_;
79 return obj;
80}
81
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080082size_t LargeObjectMapSpace::Free(Thread* self, mirror::Object* ptr) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070083 MutexLock mu(self, lock_);
84 MemMaps::iterator found = mem_maps_.find(ptr);
85 CHECK(found != mem_maps_.end()) << "Attempted to free large object which was not live";
86 DCHECK_GE(num_bytes_allocated_, found->second->Size());
87 size_t allocation_size = found->second->Size();
88 num_bytes_allocated_ -= allocation_size;
89 --num_objects_allocated_;
90 delete found->second;
91 mem_maps_.erase(found);
92 return allocation_size;
93}
94
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080095size_t LargeObjectMapSpace::AllocationSize(const mirror::Object* obj) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070096 MutexLock mu(Thread::Current(), lock_);
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080097 MemMaps::iterator found = mem_maps_.find(const_cast<mirror::Object*>(obj));
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070098 CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live";
99 return found->second->Size();
100}
101
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800102size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700103 size_t total = 0;
104 for (size_t i = 0; i < num_ptrs; ++i) {
105 if (kDebugSpaces) {
106 CHECK(Contains(ptrs[i]));
107 }
108 total += Free(self, ptrs[i]);
109 }
110 return total;
111}
112
113void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
114 MutexLock mu(Thread::Current(), lock_);
115 for (MemMaps::iterator it = mem_maps_.begin(); it != mem_maps_.end(); ++it) {
116 MemMap* mem_map = it->second;
117 callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
118 callback(NULL, NULL, 0, arg);
119 }
120}
121
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800122bool LargeObjectMapSpace::Contains(const mirror::Object* obj) const {
Ian Rogersa3dd0b32013-03-19 19:30:59 -0700123 Thread* self = Thread::Current();
124 if (lock_.IsExclusiveHeld(self)) {
125 // We hold lock_ so do the check.
126 return mem_maps_.find(const_cast<mirror::Object*>(obj)) != mem_maps_.end();
127 } else {
128 MutexLock mu(self, lock_);
129 return mem_maps_.find(const_cast<mirror::Object*>(obj)) != mem_maps_.end();
130 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700131}
132
133FreeListSpace* FreeListSpace::Create(const std::string& name, byte* requested_begin, size_t size) {
Brian Carlstrom42748892013-07-18 18:04:08 -0700134 CHECK_EQ(size % kAlignment, 0U);
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700135 std::string error_msg;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700136 MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700137 PROT_READ | PROT_WRITE, &error_msg);
138 CHECK(mem_map != NULL) << "Failed to allocate large object space mem map: " << error_msg;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700139 return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
140}
141
142FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end)
143 : LargeObjectSpace(name),
144 begin_(begin),
145 end_(end),
146 mem_map_(mem_map),
147 lock_("free list space lock", kAllocSpaceLock) {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700148 free_end_ = end - begin;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700149}
150
Brian Carlstrom0cd7ec22013-07-17 23:40:20 -0700151FreeListSpace::~FreeListSpace() {}
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700152
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700153void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
154 MutexLock mu(Thread::Current(), lock_);
155 uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
156 AllocationHeader* cur_header = reinterpret_cast<AllocationHeader*>(Begin());
157 while (reinterpret_cast<uintptr_t>(cur_header) < free_end_start) {
158 cur_header = cur_header->GetNextNonFree();
159 size_t alloc_size = cur_header->AllocationSize();
160 byte* byte_start = reinterpret_cast<byte*>(cur_header->GetObjectAddress());
161 byte* byte_end = byte_start + alloc_size - sizeof(AllocationHeader);
162 callback(byte_start, byte_end, alloc_size, arg);
163 callback(NULL, NULL, 0, arg);
164 cur_header = reinterpret_cast<AllocationHeader*>(byte_end);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700165 }
166}
167
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700168void FreeListSpace::RemoveFreePrev(AllocationHeader* header) {
169 CHECK(!header->IsFree());
170 CHECK_GT(header->GetPrevFree(), size_t(0));
171 FreeBlocks::iterator found = free_blocks_.lower_bound(header);
172 CHECK(found != free_blocks_.end());
173 CHECK_EQ(*found, header);
174 free_blocks_.erase(found);
175}
176
177FreeListSpace::AllocationHeader* FreeListSpace::GetAllocationHeader(const mirror::Object* obj) {
178 DCHECK(Contains(obj));
179 return reinterpret_cast<AllocationHeader*>(reinterpret_cast<uintptr_t>(obj) -
180 sizeof(AllocationHeader));
181}
182
183FreeListSpace::AllocationHeader* FreeListSpace::AllocationHeader::GetNextNonFree() {
184 // We know that there has to be at least one object after us or else we would have
185 // coalesced with the free end region. May be worth investigating a better way to do this
186 // as it may be expensive for large allocations.
187 for (uintptr_t pos = reinterpret_cast<uintptr_t>(this);; pos += kAlignment) {
188 AllocationHeader* cur = reinterpret_cast<AllocationHeader*>(pos);
189 if (!cur->IsFree()) return cur;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700190 }
191}
192
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800193size_t FreeListSpace::Free(Thread* self, mirror::Object* obj) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700194 MutexLock mu(self, lock_);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700195 DCHECK(Contains(obj));
196 AllocationHeader* header = GetAllocationHeader(obj);
197 CHECK(IsAligned<kAlignment>(header));
198 size_t allocation_size = header->AllocationSize();
199 DCHECK_GT(allocation_size, size_t(0));
200 DCHECK(IsAligned<kAlignment>(allocation_size));
201 // Look at the next chunk.
202 AllocationHeader* next_header = header->GetNextAllocationHeader();
203 // Calculate the start of the end free block.
204 uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
205 size_t header_prev_free = header->GetPrevFree();
206 size_t new_free_size = allocation_size;
207 if (header_prev_free) {
208 new_free_size += header_prev_free;
209 RemoveFreePrev(header);
Ian Rogers22a20862013-03-16 16:34:57 -0700210 }
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700211 if (reinterpret_cast<uintptr_t>(next_header) >= free_end_start) {
212 // Easy case, the next chunk is the end free region.
213 CHECK_EQ(reinterpret_cast<uintptr_t>(next_header), free_end_start);
214 free_end_ += new_free_size;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700215 } else {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700216 AllocationHeader* new_free_header;
217 DCHECK(IsAligned<kAlignment>(next_header));
218 if (next_header->IsFree()) {
219 // Find the next chunk by reading each page until we hit one with non-zero chunk.
220 AllocationHeader* next_next_header = next_header->GetNextNonFree();
221 DCHECK(IsAligned<kAlignment>(next_next_header));
222 DCHECK(IsAligned<kAlignment>(next_next_header->AllocationSize()));
223 RemoveFreePrev(next_next_header);
224 new_free_header = next_next_header;
225 new_free_size += next_next_header->GetPrevFree();
226 } else {
227 new_free_header = next_header;
228 }
229 new_free_header->prev_free_ = new_free_size;
230 free_blocks_.insert(new_free_header);
231 }
232 --num_objects_allocated_;
233 DCHECK_LE(allocation_size, num_bytes_allocated_);
234 num_bytes_allocated_ -= allocation_size;
235 madvise(header, allocation_size, MADV_DONTNEED);
236 if (kIsDebugBuild) {
237 // Can't disallow reads since we use them to find next chunks during coalescing.
238 mprotect(header, allocation_size, PROT_READ);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700239 }
240 return allocation_size;
241}
242
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800243bool FreeListSpace::Contains(const mirror::Object* obj) const {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700244 return mem_map_->HasAddress(obj);
245}
246
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800247size_t FreeListSpace::AllocationSize(const mirror::Object* obj) {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700248 AllocationHeader* header = GetAllocationHeader(obj);
249 DCHECK(Contains(obj));
250 DCHECK(!header->IsFree());
251 return header->AllocationSize();
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700252}
253
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700254mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) {
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700255 MutexLock mu(self, lock_);
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700256 size_t allocation_size = RoundUp(num_bytes + sizeof(AllocationHeader), kAlignment);
257 AllocationHeader temp;
258 temp.SetPrevFree(allocation_size);
259 temp.SetAllocationSize(0);
260 AllocationHeader* new_header;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700261 // Find the smallest chunk at least num_bytes in size.
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700262 FreeBlocks::iterator found = free_blocks_.lower_bound(&temp);
263 if (found != free_blocks_.end()) {
264 AllocationHeader* header = *found;
265 free_blocks_.erase(found);
266
267 // Fit our object in the previous free header space.
268 new_header = header->GetPrevFreeAllocationHeader();
269
270 // Remove the newly allocated block from the header and update the prev_free_.
271 header->prev_free_ -= allocation_size;
272 if (header->prev_free_ > 0) {
273 // If there is remaining space, insert back into the free set.
274 free_blocks_.insert(header);
275 }
276 } else {
277 // Try to steal some memory from the free space at the end of the space.
278 if (LIKELY(free_end_ >= allocation_size)) {
279 // Fit our object at the start of the end free block.
280 new_header = reinterpret_cast<AllocationHeader*>(end_ - free_end_);
281 free_end_ -= allocation_size;
282 } else {
283 return NULL;
284 }
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700285 }
286
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700287 DCHECK(bytes_allocated != NULL);
288 *bytes_allocated = allocation_size;
289
290 // Need to do these inside of the lock.
291 ++num_objects_allocated_;
292 ++total_objects_allocated_;
293 num_bytes_allocated_ += allocation_size;
294 total_bytes_allocated_ += allocation_size;
295
296 // We always put our object at the start of the free block, there can not be another free block
297 // before it.
298 if (kIsDebugBuild) {
299 mprotect(new_header, allocation_size, PROT_READ | PROT_WRITE);
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700300 }
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700301 new_header->SetPrevFree(0);
302 new_header->SetAllocationSize(allocation_size);
303 return new_header->GetObjectAddress();
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700304}
305
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700306void FreeListSpace::Dump(std::ostream& os) const {
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700307 MutexLock mu(Thread::Current(), const_cast<Mutex&>(lock_));
Mathieu Chartier128c52c2012-10-16 14:12:41 -0700308 os << GetName() << " -"
309 << " begin: " << reinterpret_cast<void*>(Begin())
Mathieu Chartiereb5710e2013-07-25 15:19:42 -0700310 << " end: " << reinterpret_cast<void*>(End()) << "\n";
311 uintptr_t free_end_start = reinterpret_cast<uintptr_t>(end_) - free_end_;
312 AllocationHeader* cur_header = reinterpret_cast<AllocationHeader*>(Begin());
313 while (reinterpret_cast<uintptr_t>(cur_header) < free_end_start) {
314 byte* free_start = reinterpret_cast<byte*>(cur_header);
315 cur_header = cur_header->GetNextNonFree();
316 byte* free_end = reinterpret_cast<byte*>(cur_header);
317 if (free_start != free_end) {
318 os << "Free block at address: " << reinterpret_cast<const void*>(free_start)
319 << " of length " << free_end - free_start << " bytes\n";
320 }
321 size_t alloc_size = cur_header->AllocationSize();
322 byte* byte_start = reinterpret_cast<byte*>(cur_header->GetObjectAddress());
323 byte* byte_end = byte_start + alloc_size - sizeof(AllocationHeader);
324 os << "Large object at address: " << reinterpret_cast<const void*>(free_start)
325 << " of length " << byte_end - byte_start << " bytes\n";
326 cur_header = reinterpret_cast<AllocationHeader*>(byte_end);
327 }
328 if (free_end_) {
329 os << "Free block at address: " << reinterpret_cast<const void*>(free_end_start)
330 << " of length " << free_end_ << " bytes\n";
331 }
Mathieu Chartier128c52c2012-10-16 14:12:41 -0700332}
333
Ian Rogers1d54e732013-05-02 21:10:01 -0700334} // namespace space
335} // namespace gc
336} // namespace art