Mathieu Chartier | 1c23e1e | 2012-10-12 14:14:11 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Elliott Hughes | 07ed66b | 2012-12-12 18:34:25 -0800 | [diff] [blame^] | 17 | #include "base/logging.h" |
Mathieu Chartier | 1c23e1e | 2012-10-12 14:14:11 -0700 | [diff] [blame] | 18 | #include "large_object_space.h" |
| 19 | #include "UniquePtr.h" |
| 20 | #include "dlmalloc.h" |
Mathieu Chartier | 1c23e1e | 2012-10-12 14:14:11 -0700 | [diff] [blame] | 21 | #include "image.h" |
Mathieu Chartier | 1c23e1e | 2012-10-12 14:14:11 -0700 | [diff] [blame] | 22 | #include "os.h" |
| 23 | #include "space_bitmap.h" |
| 24 | #include "stl_util.h" |
| 25 | #include "utils.h" |
| 26 | |
| 27 | namespace art { |
| 28 | |
| 29 | void LargeObjectSpace::SwapBitmaps() { |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 30 | live_objects_.swap(mark_objects_); |
Mathieu Chartier | 1c23e1e | 2012-10-12 14:14:11 -0700 | [diff] [blame] | 31 | // Swap names to get more descriptive diagnostics. |
| 32 | std::string temp_name = live_objects_->GetName(); |
| 33 | live_objects_->SetName(mark_objects_->GetName()); |
| 34 | mark_objects_->SetName(temp_name); |
| 35 | } |
| 36 | |
| 37 | LargeObjectSpace::LargeObjectSpace(const std::string& name) |
| 38 | : DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect), |
| 39 | num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0), |
| 40 | total_objects_allocated_(0) { |
| 41 | live_objects_.reset(new SpaceSetMap("large live objects")); |
| 42 | mark_objects_.reset(new SpaceSetMap("large marked objects")); |
| 43 | } |
| 44 | |
| 45 | |
| 46 | void LargeObjectSpace::CopyLiveToMarked() { |
| 47 | mark_objects_->CopyFrom(*live_objects_.get()); |
| 48 | } |
| 49 | |
| 50 | LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name) |
| 51 | : LargeObjectSpace(name), |
| 52 | lock_("large object space lock", kAllocSpaceLock) |
| 53 | { |
| 54 | |
| 55 | } |
| 56 | |
| 57 | LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) { |
| 58 | return new LargeObjectMapSpace(name); |
| 59 | } |
| 60 | |
| 61 | Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes) { |
| 62 | MemMap* mem_map = MemMap::MapAnonymous("allocation", NULL, num_bytes, PROT_READ | PROT_WRITE); |
| 63 | if (mem_map == NULL) { |
| 64 | return NULL; |
| 65 | } |
| 66 | MutexLock mu(self, lock_); |
| 67 | Object* obj = reinterpret_cast<Object*>(mem_map->Begin()); |
| 68 | large_objects_.push_back(obj); |
| 69 | mem_maps_.Put(obj, mem_map); |
| 70 | size_t allocation_size = mem_map->Size(); |
| 71 | num_bytes_allocated_ += allocation_size; |
| 72 | total_bytes_allocated_ += allocation_size; |
| 73 | ++num_objects_allocated_; |
| 74 | ++total_objects_allocated_; |
| 75 | return obj; |
| 76 | } |
| 77 | |
| 78 | size_t LargeObjectMapSpace::Free(Thread* self, Object* ptr) { |
| 79 | MutexLock mu(self, lock_); |
| 80 | MemMaps::iterator found = mem_maps_.find(ptr); |
| 81 | CHECK(found != mem_maps_.end()) << "Attempted to free large object which was not live"; |
| 82 | DCHECK_GE(num_bytes_allocated_, found->second->Size()); |
| 83 | size_t allocation_size = found->second->Size(); |
| 84 | num_bytes_allocated_ -= allocation_size; |
| 85 | --num_objects_allocated_; |
| 86 | delete found->second; |
| 87 | mem_maps_.erase(found); |
| 88 | return allocation_size; |
| 89 | } |
| 90 | |
| 91 | size_t LargeObjectMapSpace::AllocationSize(const Object* obj) { |
| 92 | MutexLock mu(Thread::Current(), lock_); |
| 93 | MemMaps::iterator found = mem_maps_.find(const_cast<Object*>(obj)); |
| 94 | CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live"; |
| 95 | return found->second->Size(); |
| 96 | } |
| 97 | |
| 98 | size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, Object** ptrs) { |
| 99 | size_t total = 0; |
| 100 | for (size_t i = 0; i < num_ptrs; ++i) { |
| 101 | if (kDebugSpaces) { |
| 102 | CHECK(Contains(ptrs[i])); |
| 103 | } |
| 104 | total += Free(self, ptrs[i]); |
| 105 | } |
| 106 | return total; |
| 107 | } |
| 108 | |
| 109 | void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) { |
| 110 | MutexLock mu(Thread::Current(), lock_); |
| 111 | for (MemMaps::iterator it = mem_maps_.begin(); it != mem_maps_.end(); ++it) { |
| 112 | MemMap* mem_map = it->second; |
| 113 | callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg); |
| 114 | callback(NULL, NULL, 0, arg); |
| 115 | } |
| 116 | } |
| 117 | |
| 118 | bool LargeObjectMapSpace::Contains(const Object* obj) const { |
| 119 | MutexLock mu(Thread::Current(), lock_); |
| 120 | return mem_maps_.find(const_cast<Object*>(obj)) != mem_maps_.end(); |
| 121 | } |
| 122 | |
| 123 | FreeListSpace* FreeListSpace::Create(const std::string& name, byte* requested_begin, size_t size) { |
| 124 | CHECK(size % kAlignment == 0); |
| 125 | MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size, |
| 126 | PROT_READ | PROT_WRITE); |
| 127 | CHECK(mem_map != NULL) << "Failed to allocate large object space mem map"; |
| 128 | return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End()); |
| 129 | } |
| 130 | |
| 131 | FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end) |
| 132 | : LargeObjectSpace(name), |
| 133 | begin_(begin), |
| 134 | end_(end), |
| 135 | mem_map_(mem_map), |
| 136 | lock_("free list space lock", kAllocSpaceLock) { |
| 137 | chunks_.resize(Size() / kAlignment + 1); |
| 138 | // Add a dummy chunk so we don't need to handle chunks having no next chunk. |
| 139 | chunks_.back().SetSize(kAlignment, false); |
| 140 | // Start out with one large free chunk. |
| 141 | AddFreeChunk(begin_, end_ - begin_, NULL); |
| 142 | } |
| 143 | |
| 144 | FreeListSpace::~FreeListSpace() { |
| 145 | |
| 146 | } |
| 147 | |
| 148 | void FreeListSpace::AddFreeChunk(void* address, size_t size, Chunk* previous) { |
| 149 | Chunk* chunk = ChunkFromAddr(address); |
| 150 | chunk->SetSize(size, true); |
| 151 | chunk->SetPrevious(previous); |
| 152 | Chunk* next_chunk = GetNextChunk(chunk); |
| 153 | next_chunk->SetPrevious(chunk); |
| 154 | free_chunks_.insert(chunk); |
| 155 | } |
| 156 | |
| 157 | FreeListSpace::Chunk* FreeListSpace::ChunkFromAddr(void* address) { |
| 158 | size_t offset = reinterpret_cast<byte*>(address) - Begin(); |
| 159 | DCHECK(IsAligned<kAlignment>(offset)); |
| 160 | DCHECK_LT(offset, Size()); |
| 161 | return &chunks_[offset / kAlignment]; |
| 162 | } |
| 163 | |
| 164 | void* FreeListSpace::AddrFromChunk(Chunk* chunk) { |
| 165 | return reinterpret_cast<void*>(Begin() + (chunk - &chunks_.front()) * kAlignment); |
| 166 | } |
| 167 | |
| 168 | void FreeListSpace::RemoveFreeChunk(Chunk* chunk) { |
| 169 | // TODO: C++0x |
| 170 | // TODO: Improve performance, this might be slow. |
| 171 | std::pair<FreeChunks::iterator, FreeChunks::iterator> range = free_chunks_.equal_range(chunk); |
| 172 | for (FreeChunks::iterator it = range.first; it != range.second; ++it) { |
| 173 | if (*it == chunk) { |
| 174 | free_chunks_.erase(it); |
| 175 | return; |
| 176 | } |
| 177 | } |
| 178 | } |
| 179 | |
| 180 | void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) { |
| 181 | MutexLock mu(Thread::Current(), lock_); |
| 182 | for (Chunk* chunk = &chunks_.front(); chunk < &chunks_.back(); ) { |
| 183 | if (!chunk->IsFree()) { |
| 184 | size_t size = chunk->GetSize(); |
| 185 | void* begin = AddrFromChunk(chunk); |
| 186 | void* end = reinterpret_cast<void*>(reinterpret_cast<byte*>(begin) + size); |
| 187 | callback(begin, end, size, arg); |
| 188 | callback(NULL, NULL, 0, arg); |
| 189 | } |
| 190 | chunk = GetNextChunk(chunk); |
| 191 | } |
| 192 | } |
| 193 | |
| 194 | size_t FreeListSpace::Free(Thread* self, Object* obj) { |
| 195 | MutexLock mu(self, lock_); |
| 196 | CHECK(Contains(obj)); |
| 197 | // Check adjacent chunks to see if we need to combine. |
| 198 | Chunk* chunk = ChunkFromAddr(obj); |
| 199 | CHECK(!chunk->IsFree()); |
| 200 | |
| 201 | size_t allocation_size = chunk->GetSize(); |
| 202 | madvise(obj, allocation_size, MADV_DONTNEED); |
| 203 | num_objects_allocated_--; |
| 204 | num_bytes_allocated_ -= allocation_size; |
| 205 | Chunk* prev = chunk->GetPrevious(); |
| 206 | Chunk* next = GetNextChunk(chunk); |
| 207 | |
| 208 | // Combine any adjacent free chunks |
| 209 | size_t extra_size = chunk->GetSize(); |
| 210 | if (next->IsFree()) { |
| 211 | extra_size += next->GetSize(); |
| 212 | RemoveFreeChunk(next); |
| 213 | } |
| 214 | if (prev != NULL && prev->IsFree()) { |
| 215 | RemoveFreeChunk(prev); |
| 216 | AddFreeChunk(AddrFromChunk(prev), prev->GetSize() + extra_size, prev->GetPrevious()); |
| 217 | } else { |
| 218 | AddFreeChunk(AddrFromChunk(chunk), extra_size, prev); |
| 219 | } |
| 220 | return allocation_size; |
| 221 | } |
| 222 | |
| 223 | bool FreeListSpace::Contains(const Object* obj) const { |
| 224 | return mem_map_->HasAddress(obj); |
| 225 | } |
| 226 | |
| 227 | FreeListSpace::Chunk* FreeListSpace::GetNextChunk(Chunk* chunk) { |
| 228 | return chunk + chunk->GetSize() / kAlignment; |
| 229 | } |
| 230 | |
| 231 | size_t FreeListSpace::AllocationSize(const Object* obj) { |
| 232 | Chunk* chunk = ChunkFromAddr(const_cast<Object*>(obj)); |
| 233 | CHECK(!chunk->IsFree()); |
| 234 | return chunk->GetSize(); |
| 235 | } |
| 236 | |
| 237 | Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes) { |
| 238 | MutexLock mu(self, lock_); |
| 239 | num_bytes = RoundUp(num_bytes, kAlignment); |
| 240 | Chunk temp; |
| 241 | temp.SetSize(num_bytes); |
| 242 | // Find the smallest chunk at least num_bytes in size. |
| 243 | FreeChunks::iterator found = free_chunks_.lower_bound(&temp); |
| 244 | if (found == free_chunks_.end()) { |
| 245 | // Out of memory, or too much fragmentation. |
| 246 | return NULL; |
| 247 | } |
| 248 | Chunk* chunk = *found; |
| 249 | free_chunks_.erase(found); |
| 250 | CHECK(chunk->IsFree()); |
| 251 | void* addr = AddrFromChunk(chunk); |
| 252 | size_t chunk_size = chunk->GetSize(); |
| 253 | chunk->SetSize(num_bytes); |
| 254 | if (chunk_size > num_bytes) { |
| 255 | // Split the chunk into two chunks. |
| 256 | Chunk* new_chunk = GetNextChunk(chunk); |
| 257 | AddFreeChunk(AddrFromChunk(new_chunk), chunk_size - num_bytes, chunk); |
| 258 | } |
| 259 | |
| 260 | num_objects_allocated_++; |
| 261 | total_objects_allocated_++; |
| 262 | num_bytes_allocated_ += num_bytes; |
| 263 | total_bytes_allocated_ += num_bytes; |
| 264 | return reinterpret_cast<Object*>(addr); |
| 265 | } |
| 266 | |
Mathieu Chartier | 128c52c | 2012-10-16 14:12:41 -0700 | [diff] [blame] | 267 | void FreeListSpace::Dump(std::ostream& os) const{ |
| 268 | os << GetName() << " -" |
| 269 | << " begin: " << reinterpret_cast<void*>(Begin()) |
| 270 | << " end: " << reinterpret_cast<void*>(End()); |
| 271 | } |
| 272 | |
Mathieu Chartier | 1c23e1e | 2012-10-12 14:14:11 -0700 | [diff] [blame] | 273 | } |