blob: b2e0d2fbb074d2a55622e94434bfa406c47ac063 [file] [log] [blame]
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "large_object_space.h"
18#include "UniquePtr.h"
19#include "dlmalloc.h"
20#include "file.h"
21#include "image.h"
22#include "logging.h"
23#include "os.h"
24#include "space_bitmap.h"
25#include "stl_util.h"
26#include "utils.h"
27
28namespace art {
29
30void LargeObjectSpace::SwapBitmaps() {
Mathieu Chartier2b82db42012-11-14 17:29:05 -080031 live_objects_.swap(mark_objects_);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070032 // Swap names to get more descriptive diagnostics.
33 std::string temp_name = live_objects_->GetName();
34 live_objects_->SetName(mark_objects_->GetName());
35 mark_objects_->SetName(temp_name);
36}
37
38LargeObjectSpace::LargeObjectSpace(const std::string& name)
39 : DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
40 num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
41 total_objects_allocated_(0) {
42 live_objects_.reset(new SpaceSetMap("large live objects"));
43 mark_objects_.reset(new SpaceSetMap("large marked objects"));
44}
45
46
47void LargeObjectSpace::CopyLiveToMarked() {
48 mark_objects_->CopyFrom(*live_objects_.get());
49}
50
51LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
52 : LargeObjectSpace(name),
53 lock_("large object space lock", kAllocSpaceLock)
54{
55
56}
57
58LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
59 return new LargeObjectMapSpace(name);
60}
61
62Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes) {
63 MemMap* mem_map = MemMap::MapAnonymous("allocation", NULL, num_bytes, PROT_READ | PROT_WRITE);
64 if (mem_map == NULL) {
65 return NULL;
66 }
67 MutexLock mu(self, lock_);
68 Object* obj = reinterpret_cast<Object*>(mem_map->Begin());
69 large_objects_.push_back(obj);
70 mem_maps_.Put(obj, mem_map);
71 size_t allocation_size = mem_map->Size();
72 num_bytes_allocated_ += allocation_size;
73 total_bytes_allocated_ += allocation_size;
74 ++num_objects_allocated_;
75 ++total_objects_allocated_;
76 return obj;
77}
78
79size_t LargeObjectMapSpace::Free(Thread* self, Object* ptr) {
80 MutexLock mu(self, lock_);
81 MemMaps::iterator found = mem_maps_.find(ptr);
82 CHECK(found != mem_maps_.end()) << "Attempted to free large object which was not live";
83 DCHECK_GE(num_bytes_allocated_, found->second->Size());
84 size_t allocation_size = found->second->Size();
85 num_bytes_allocated_ -= allocation_size;
86 --num_objects_allocated_;
87 delete found->second;
88 mem_maps_.erase(found);
89 return allocation_size;
90}
91
92size_t LargeObjectMapSpace::AllocationSize(const Object* obj) {
93 MutexLock mu(Thread::Current(), lock_);
94 MemMaps::iterator found = mem_maps_.find(const_cast<Object*>(obj));
95 CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live";
96 return found->second->Size();
97}
98
99size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, Object** ptrs) {
100 size_t total = 0;
101 for (size_t i = 0; i < num_ptrs; ++i) {
102 if (kDebugSpaces) {
103 CHECK(Contains(ptrs[i]));
104 }
105 total += Free(self, ptrs[i]);
106 }
107 return total;
108}
109
110void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
111 MutexLock mu(Thread::Current(), lock_);
112 for (MemMaps::iterator it = mem_maps_.begin(); it != mem_maps_.end(); ++it) {
113 MemMap* mem_map = it->second;
114 callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
115 callback(NULL, NULL, 0, arg);
116 }
117}
118
119bool LargeObjectMapSpace::Contains(const Object* obj) const {
120 MutexLock mu(Thread::Current(), lock_);
121 return mem_maps_.find(const_cast<Object*>(obj)) != mem_maps_.end();
122}
123
124FreeListSpace* FreeListSpace::Create(const std::string& name, byte* requested_begin, size_t size) {
125 CHECK(size % kAlignment == 0);
126 MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
127 PROT_READ | PROT_WRITE);
128 CHECK(mem_map != NULL) << "Failed to allocate large object space mem map";
129 return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
130}
131
132FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end)
133 : LargeObjectSpace(name),
134 begin_(begin),
135 end_(end),
136 mem_map_(mem_map),
137 lock_("free list space lock", kAllocSpaceLock) {
138 chunks_.resize(Size() / kAlignment + 1);
139 // Add a dummy chunk so we don't need to handle chunks having no next chunk.
140 chunks_.back().SetSize(kAlignment, false);
141 // Start out with one large free chunk.
142 AddFreeChunk(begin_, end_ - begin_, NULL);
143}
144
145FreeListSpace::~FreeListSpace() {
146
147}
148
149void FreeListSpace::AddFreeChunk(void* address, size_t size, Chunk* previous) {
150 Chunk* chunk = ChunkFromAddr(address);
151 chunk->SetSize(size, true);
152 chunk->SetPrevious(previous);
153 Chunk* next_chunk = GetNextChunk(chunk);
154 next_chunk->SetPrevious(chunk);
155 free_chunks_.insert(chunk);
156}
157
158FreeListSpace::Chunk* FreeListSpace::ChunkFromAddr(void* address) {
159 size_t offset = reinterpret_cast<byte*>(address) - Begin();
160 DCHECK(IsAligned<kAlignment>(offset));
161 DCHECK_LT(offset, Size());
162 return &chunks_[offset / kAlignment];
163}
164
165void* FreeListSpace::AddrFromChunk(Chunk* chunk) {
166 return reinterpret_cast<void*>(Begin() + (chunk - &chunks_.front()) * kAlignment);
167}
168
169void FreeListSpace::RemoveFreeChunk(Chunk* chunk) {
170 // TODO: C++0x
171 // TODO: Improve performance, this might be slow.
172 std::pair<FreeChunks::iterator, FreeChunks::iterator> range = free_chunks_.equal_range(chunk);
173 for (FreeChunks::iterator it = range.first; it != range.second; ++it) {
174 if (*it == chunk) {
175 free_chunks_.erase(it);
176 return;
177 }
178 }
179}
180
181void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
182 MutexLock mu(Thread::Current(), lock_);
183 for (Chunk* chunk = &chunks_.front(); chunk < &chunks_.back(); ) {
184 if (!chunk->IsFree()) {
185 size_t size = chunk->GetSize();
186 void* begin = AddrFromChunk(chunk);
187 void* end = reinterpret_cast<void*>(reinterpret_cast<byte*>(begin) + size);
188 callback(begin, end, size, arg);
189 callback(NULL, NULL, 0, arg);
190 }
191 chunk = GetNextChunk(chunk);
192 }
193}
194
195size_t FreeListSpace::Free(Thread* self, Object* obj) {
196 MutexLock mu(self, lock_);
197 CHECK(Contains(obj));
198 // Check adjacent chunks to see if we need to combine.
199 Chunk* chunk = ChunkFromAddr(obj);
200 CHECK(!chunk->IsFree());
201
202 size_t allocation_size = chunk->GetSize();
203 madvise(obj, allocation_size, MADV_DONTNEED);
204 num_objects_allocated_--;
205 num_bytes_allocated_ -= allocation_size;
206 Chunk* prev = chunk->GetPrevious();
207 Chunk* next = GetNextChunk(chunk);
208
209 // Combine any adjacent free chunks
210 size_t extra_size = chunk->GetSize();
211 if (next->IsFree()) {
212 extra_size += next->GetSize();
213 RemoveFreeChunk(next);
214 }
215 if (prev != NULL && prev->IsFree()) {
216 RemoveFreeChunk(prev);
217 AddFreeChunk(AddrFromChunk(prev), prev->GetSize() + extra_size, prev->GetPrevious());
218 } else {
219 AddFreeChunk(AddrFromChunk(chunk), extra_size, prev);
220 }
221 return allocation_size;
222}
223
224bool FreeListSpace::Contains(const Object* obj) const {
225 return mem_map_->HasAddress(obj);
226}
227
228FreeListSpace::Chunk* FreeListSpace::GetNextChunk(Chunk* chunk) {
229 return chunk + chunk->GetSize() / kAlignment;
230}
231
232size_t FreeListSpace::AllocationSize(const Object* obj) {
233 Chunk* chunk = ChunkFromAddr(const_cast<Object*>(obj));
234 CHECK(!chunk->IsFree());
235 return chunk->GetSize();
236}
237
238Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes) {
239 MutexLock mu(self, lock_);
240 num_bytes = RoundUp(num_bytes, kAlignment);
241 Chunk temp;
242 temp.SetSize(num_bytes);
243 // Find the smallest chunk at least num_bytes in size.
244 FreeChunks::iterator found = free_chunks_.lower_bound(&temp);
245 if (found == free_chunks_.end()) {
246 // Out of memory, or too much fragmentation.
247 return NULL;
248 }
249 Chunk* chunk = *found;
250 free_chunks_.erase(found);
251 CHECK(chunk->IsFree());
252 void* addr = AddrFromChunk(chunk);
253 size_t chunk_size = chunk->GetSize();
254 chunk->SetSize(num_bytes);
255 if (chunk_size > num_bytes) {
256 // Split the chunk into two chunks.
257 Chunk* new_chunk = GetNextChunk(chunk);
258 AddFreeChunk(AddrFromChunk(new_chunk), chunk_size - num_bytes, chunk);
259 }
260
261 num_objects_allocated_++;
262 total_objects_allocated_++;
263 num_bytes_allocated_ += num_bytes;
264 total_bytes_allocated_ += num_bytes;
265 return reinterpret_cast<Object*>(addr);
266}
267
Mathieu Chartier128c52c2012-10-16 14:12:41 -0700268void FreeListSpace::Dump(std::ostream& os) const{
269 os << GetName() << " -"
270 << " begin: " << reinterpret_cast<void*>(Begin())
271 << " end: " << reinterpret_cast<void*>(End());
272}
273
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700274}