blob: 72c6c733dcc10353f33d3e50c8a49b3f1df7878a [file] [log] [blame]
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "large_object_space.h"
18#include "UniquePtr.h"
19#include "dlmalloc.h"
20#include "file.h"
21#include "image.h"
22#include "logging.h"
23#include "os.h"
24#include "space_bitmap.h"
25#include "stl_util.h"
26#include "utils.h"
27
28namespace art {
29
30void LargeObjectSpace::SwapBitmaps() {
31 SpaceSetMap* temp_live_objects = live_objects_.release();
32 live_objects_.reset(mark_objects_.release());
33 mark_objects_.reset(temp_live_objects);
34 // Swap names to get more descriptive diagnostics.
35 std::string temp_name = live_objects_->GetName();
36 live_objects_->SetName(mark_objects_->GetName());
37 mark_objects_->SetName(temp_name);
38}
39
40LargeObjectSpace::LargeObjectSpace(const std::string& name)
41 : DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
42 num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
43 total_objects_allocated_(0) {
44 live_objects_.reset(new SpaceSetMap("large live objects"));
45 mark_objects_.reset(new SpaceSetMap("large marked objects"));
46}
47
48
49void LargeObjectSpace::CopyLiveToMarked() {
50 mark_objects_->CopyFrom(*live_objects_.get());
51}
52
53LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
54 : LargeObjectSpace(name),
55 lock_("large object space lock", kAllocSpaceLock)
56{
57
58}
59
60LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
61 return new LargeObjectMapSpace(name);
62}
63
64Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes) {
65 MemMap* mem_map = MemMap::MapAnonymous("allocation", NULL, num_bytes, PROT_READ | PROT_WRITE);
66 if (mem_map == NULL) {
67 return NULL;
68 }
69 MutexLock mu(self, lock_);
70 Object* obj = reinterpret_cast<Object*>(mem_map->Begin());
71 large_objects_.push_back(obj);
72 mem_maps_.Put(obj, mem_map);
73 size_t allocation_size = mem_map->Size();
74 num_bytes_allocated_ += allocation_size;
75 total_bytes_allocated_ += allocation_size;
76 ++num_objects_allocated_;
77 ++total_objects_allocated_;
78 return obj;
79}
80
81size_t LargeObjectMapSpace::Free(Thread* self, Object* ptr) {
82 MutexLock mu(self, lock_);
83 MemMaps::iterator found = mem_maps_.find(ptr);
84 CHECK(found != mem_maps_.end()) << "Attempted to free large object which was not live";
85 DCHECK_GE(num_bytes_allocated_, found->second->Size());
86 size_t allocation_size = found->second->Size();
87 num_bytes_allocated_ -= allocation_size;
88 --num_objects_allocated_;
89 delete found->second;
90 mem_maps_.erase(found);
91 return allocation_size;
92}
93
94size_t LargeObjectMapSpace::AllocationSize(const Object* obj) {
95 MutexLock mu(Thread::Current(), lock_);
96 MemMaps::iterator found = mem_maps_.find(const_cast<Object*>(obj));
97 CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live";
98 return found->second->Size();
99}
100
101size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, Object** ptrs) {
102 size_t total = 0;
103 for (size_t i = 0; i < num_ptrs; ++i) {
104 if (kDebugSpaces) {
105 CHECK(Contains(ptrs[i]));
106 }
107 total += Free(self, ptrs[i]);
108 }
109 return total;
110}
111
112void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
113 MutexLock mu(Thread::Current(), lock_);
114 for (MemMaps::iterator it = mem_maps_.begin(); it != mem_maps_.end(); ++it) {
115 MemMap* mem_map = it->second;
116 callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
117 callback(NULL, NULL, 0, arg);
118 }
119}
120
121bool LargeObjectMapSpace::Contains(const Object* obj) const {
122 MutexLock mu(Thread::Current(), lock_);
123 return mem_maps_.find(const_cast<Object*>(obj)) != mem_maps_.end();
124}
125
126FreeListSpace* FreeListSpace::Create(const std::string& name, byte* requested_begin, size_t size) {
127 CHECK(size % kAlignment == 0);
128 MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
129 PROT_READ | PROT_WRITE);
130 CHECK(mem_map != NULL) << "Failed to allocate large object space mem map";
131 return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
132}
133
134FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end)
135 : LargeObjectSpace(name),
136 begin_(begin),
137 end_(end),
138 mem_map_(mem_map),
139 lock_("free list space lock", kAllocSpaceLock) {
140 chunks_.resize(Size() / kAlignment + 1);
141 // Add a dummy chunk so we don't need to handle chunks having no next chunk.
142 chunks_.back().SetSize(kAlignment, false);
143 // Start out with one large free chunk.
144 AddFreeChunk(begin_, end_ - begin_, NULL);
145}
146
147FreeListSpace::~FreeListSpace() {
148
149}
150
151void FreeListSpace::AddFreeChunk(void* address, size_t size, Chunk* previous) {
152 Chunk* chunk = ChunkFromAddr(address);
153 chunk->SetSize(size, true);
154 chunk->SetPrevious(previous);
155 Chunk* next_chunk = GetNextChunk(chunk);
156 next_chunk->SetPrevious(chunk);
157 free_chunks_.insert(chunk);
158}
159
160FreeListSpace::Chunk* FreeListSpace::ChunkFromAddr(void* address) {
161 size_t offset = reinterpret_cast<byte*>(address) - Begin();
162 DCHECK(IsAligned<kAlignment>(offset));
163 DCHECK_LT(offset, Size());
164 return &chunks_[offset / kAlignment];
165}
166
167void* FreeListSpace::AddrFromChunk(Chunk* chunk) {
168 return reinterpret_cast<void*>(Begin() + (chunk - &chunks_.front()) * kAlignment);
169}
170
171void FreeListSpace::RemoveFreeChunk(Chunk* chunk) {
172 // TODO: C++0x
173 // TODO: Improve performance, this might be slow.
174 std::pair<FreeChunks::iterator, FreeChunks::iterator> range = free_chunks_.equal_range(chunk);
175 for (FreeChunks::iterator it = range.first; it != range.second; ++it) {
176 if (*it == chunk) {
177 free_chunks_.erase(it);
178 return;
179 }
180 }
181}
182
183void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
184 MutexLock mu(Thread::Current(), lock_);
185 for (Chunk* chunk = &chunks_.front(); chunk < &chunks_.back(); ) {
186 if (!chunk->IsFree()) {
187 size_t size = chunk->GetSize();
188 void* begin = AddrFromChunk(chunk);
189 void* end = reinterpret_cast<void*>(reinterpret_cast<byte*>(begin) + size);
190 callback(begin, end, size, arg);
191 callback(NULL, NULL, 0, arg);
192 }
193 chunk = GetNextChunk(chunk);
194 }
195}
196
197size_t FreeListSpace::Free(Thread* self, Object* obj) {
198 MutexLock mu(self, lock_);
199 CHECK(Contains(obj));
200 // Check adjacent chunks to see if we need to combine.
201 Chunk* chunk = ChunkFromAddr(obj);
202 CHECK(!chunk->IsFree());
203
204 size_t allocation_size = chunk->GetSize();
205 madvise(obj, allocation_size, MADV_DONTNEED);
206 num_objects_allocated_--;
207 num_bytes_allocated_ -= allocation_size;
208 Chunk* prev = chunk->GetPrevious();
209 Chunk* next = GetNextChunk(chunk);
210
211 // Combine any adjacent free chunks
212 size_t extra_size = chunk->GetSize();
213 if (next->IsFree()) {
214 extra_size += next->GetSize();
215 RemoveFreeChunk(next);
216 }
217 if (prev != NULL && prev->IsFree()) {
218 RemoveFreeChunk(prev);
219 AddFreeChunk(AddrFromChunk(prev), prev->GetSize() + extra_size, prev->GetPrevious());
220 } else {
221 AddFreeChunk(AddrFromChunk(chunk), extra_size, prev);
222 }
223 return allocation_size;
224}
225
226bool FreeListSpace::Contains(const Object* obj) const {
227 return mem_map_->HasAddress(obj);
228}
229
230FreeListSpace::Chunk* FreeListSpace::GetNextChunk(Chunk* chunk) {
231 return chunk + chunk->GetSize() / kAlignment;
232}
233
234size_t FreeListSpace::AllocationSize(const Object* obj) {
235 Chunk* chunk = ChunkFromAddr(const_cast<Object*>(obj));
236 CHECK(!chunk->IsFree());
237 return chunk->GetSize();
238}
239
240Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes) {
241 MutexLock mu(self, lock_);
242 num_bytes = RoundUp(num_bytes, kAlignment);
243 Chunk temp;
244 temp.SetSize(num_bytes);
245 // Find the smallest chunk at least num_bytes in size.
246 FreeChunks::iterator found = free_chunks_.lower_bound(&temp);
247 if (found == free_chunks_.end()) {
248 // Out of memory, or too much fragmentation.
249 return NULL;
250 }
251 Chunk* chunk = *found;
252 free_chunks_.erase(found);
253 CHECK(chunk->IsFree());
254 void* addr = AddrFromChunk(chunk);
255 size_t chunk_size = chunk->GetSize();
256 chunk->SetSize(num_bytes);
257 if (chunk_size > num_bytes) {
258 // Split the chunk into two chunks.
259 Chunk* new_chunk = GetNextChunk(chunk);
260 AddFreeChunk(AddrFromChunk(new_chunk), chunk_size - num_bytes, chunk);
261 }
262
263 num_objects_allocated_++;
264 total_objects_allocated_++;
265 num_bytes_allocated_ += num_bytes;
266 total_bytes_allocated_ += num_bytes;
267 return reinterpret_cast<Object*>(addr);
268}
269
270}