Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2013 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "bump_pointer_space.h" |
| 18 | #include "bump_pointer_space-inl.h" |
| 19 | #include "mirror/object-inl.h" |
| 20 | #include "mirror/class-inl.h" |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 21 | #include "thread_list.h" |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 22 | |
| 23 | namespace art { |
| 24 | namespace gc { |
| 25 | namespace space { |
| 26 | |
| 27 | BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capacity, |
| 28 | byte* requested_begin) { |
| 29 | capacity = RoundUp(capacity, kPageSize); |
| 30 | std::string error_msg; |
| 31 | UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity, |
| 32 | PROT_READ | PROT_WRITE, &error_msg)); |
| 33 | if (mem_map.get() == nullptr) { |
| 34 | LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size " |
| 35 | << PrettySize(capacity) << " with message " << error_msg; |
| 36 | return nullptr; |
| 37 | } |
| 38 | return new BumpPointerSpace(name, mem_map.release()); |
| 39 | } |
| 40 | |
| 41 | BumpPointerSpace::BumpPointerSpace(const std::string& name, byte* begin, byte* limit) |
| 42 | : ContinuousMemMapAllocSpace(name, nullptr, begin, begin, limit, |
| 43 | kGcRetentionPolicyAlwaysCollect), |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 44 | growth_end_(limit), |
| 45 | objects_allocated_(0), bytes_allocated_(0), |
| 46 | block_lock_("Block lock"), |
| 47 | num_blocks_(0) { |
| 48 | CHECK_GE(Capacity(), sizeof(BlockHeader)); |
| 49 | end_ += sizeof(BlockHeader); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 50 | } |
| 51 | |
| 52 | BumpPointerSpace::BumpPointerSpace(const std::string& name, MemMap* mem_map) |
| 53 | : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->Begin(), mem_map->End(), |
| 54 | kGcRetentionPolicyAlwaysCollect), |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 55 | growth_end_(mem_map->End()), |
| 56 | objects_allocated_(0), bytes_allocated_(0), |
| 57 | block_lock_("Block lock"), |
| 58 | num_blocks_(0) { |
| 59 | CHECK_GE(Capacity(), sizeof(BlockHeader)); |
| 60 | end_ += sizeof(BlockHeader); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 61 | } |
| 62 | |
| 63 | mirror::Object* BumpPointerSpace::Alloc(Thread*, size_t num_bytes, size_t* bytes_allocated) { |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 64 | num_bytes = RoundUp(num_bytes, kAlignment); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 65 | mirror::Object* ret = AllocNonvirtual(num_bytes); |
| 66 | if (LIKELY(ret != nullptr)) { |
| 67 | *bytes_allocated = num_bytes; |
| 68 | } |
| 69 | return ret; |
| 70 | } |
| 71 | |
| 72 | size_t BumpPointerSpace::AllocationSize(const mirror::Object* obj) { |
| 73 | return AllocationSizeNonvirtual(obj); |
| 74 | } |
| 75 | |
| 76 | void BumpPointerSpace::Clear() { |
| 77 | // Release the pages back to the operating system. |
| 78 | CHECK_NE(madvise(Begin(), Limit() - Begin(), MADV_DONTNEED), -1) << "madvise failed"; |
| 79 | // Reset the end of the space back to the beginning, we move the end forward as we allocate |
| 80 | // objects. |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 81 | SetEnd(Begin() + sizeof(BlockHeader)); |
| 82 | objects_allocated_ = 0; |
| 83 | bytes_allocated_ = 0; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 84 | growth_end_ = Limit(); |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 85 | { |
| 86 | MutexLock mu(Thread::Current(), block_lock_); |
| 87 | num_blocks_ = 0; |
| 88 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 89 | } |
| 90 | |
| 91 | void BumpPointerSpace::Dump(std::ostream& os) const { |
| 92 | os << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(End()) << " - " |
| 93 | << reinterpret_cast<void*>(Limit()); |
| 94 | } |
| 95 | |
| 96 | mirror::Object* BumpPointerSpace::GetNextObject(mirror::Object* obj) { |
| 97 | const uintptr_t position = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf(); |
| 98 | return reinterpret_cast<mirror::Object*>(RoundUp(position, kAlignment)); |
| 99 | } |
| 100 | |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 101 | void BumpPointerSpace::RevokeThreadLocalBuffers(Thread* thread) { |
| 102 | MutexLock mu(Thread::Current(), block_lock_); |
| 103 | RevokeThreadLocalBuffersLocked(thread); |
| 104 | } |
| 105 | |
| 106 | void BumpPointerSpace::RevokeAllThreadLocalBuffers() { |
| 107 | Thread* self = Thread::Current(); |
| 108 | MutexLock mu(self, *Locks::runtime_shutdown_lock_); |
| 109 | MutexLock mu2(self, *Locks::thread_list_lock_); |
| 110 | // TODO: Not do a copy of the thread list? |
| 111 | std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); |
| 112 | for (Thread* thread : thread_list) { |
| 113 | RevokeThreadLocalBuffers(thread); |
| 114 | } |
| 115 | } |
| 116 | |
| 117 | void BumpPointerSpace::UpdateMainBlock() { |
| 118 | BlockHeader* header = reinterpret_cast<BlockHeader*>(Begin()); |
| 119 | header->size_ = Size() - sizeof(BlockHeader); |
| 120 | DCHECK_EQ(num_blocks_, 0U); |
| 121 | } |
| 122 | |
| 123 | // Returns the start of the storage. |
| 124 | byte* BumpPointerSpace::AllocBlock(size_t bytes) { |
| 125 | bytes = RoundUp(bytes, kAlignment); |
| 126 | if (!num_blocks_) { |
| 127 | UpdateMainBlock(); |
| 128 | } |
| 129 | byte* storage = reinterpret_cast<byte*>( |
| 130 | AllocNonvirtualWithoutAccounting(bytes + sizeof(BlockHeader))); |
| 131 | if (LIKELY(storage != nullptr)) { |
| 132 | BlockHeader* header = reinterpret_cast<BlockHeader*>(storage); |
| 133 | header->size_ = bytes; // Write out the block header. |
| 134 | storage += sizeof(BlockHeader); |
| 135 | ++num_blocks_; |
| 136 | } |
| 137 | return storage; |
| 138 | } |
| 139 | |
| 140 | void BumpPointerSpace::Walk(ObjectVisitorCallback callback, void* arg) { |
| 141 | byte* pos = Begin(); |
| 142 | |
| 143 | { |
| 144 | MutexLock mu(Thread::Current(), block_lock_); |
| 145 | // If we have 0 blocks then we need to update the main header since we have bump pointer style |
| 146 | // allocation into an unbounded region (actually bounded by Capacity()). |
| 147 | if (num_blocks_ == 0) { |
| 148 | UpdateMainBlock(); |
| 149 | } |
| 150 | } |
| 151 | |
| 152 | while (pos < End()) { |
| 153 | BlockHeader* header = reinterpret_cast<BlockHeader*>(pos); |
| 154 | size_t block_size = header->size_; |
| 155 | pos += sizeof(BlockHeader); // Skip the header so that we know where the objects |
| 156 | mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos); |
| 157 | const mirror::Object* end = reinterpret_cast<const mirror::Object*>(pos + block_size); |
| 158 | CHECK_LE(reinterpret_cast<const byte*>(end), End()); |
| 159 | // We don't know how many objects are allocated in the current block. When we hit a null class |
| 160 | // assume its the end. TODO: Have a thread update the header when it flushes the block? |
| 161 | while (obj < end && obj->GetClass() != nullptr) { |
| 162 | callback(obj, arg); |
| 163 | obj = GetNextObject(obj); |
| 164 | } |
| 165 | pos += block_size; |
| 166 | } |
| 167 | } |
| 168 | |
| 169 | bool BumpPointerSpace::IsEmpty() const { |
| 170 | return Size() == sizeof(BlockHeader); |
| 171 | } |
| 172 | |
| 173 | uint64_t BumpPointerSpace::GetBytesAllocated() { |
| 174 | // Start out pre-determined amount (blocks which are not being allocated into). |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 175 | uint64_t total = static_cast<uint64_t>(bytes_allocated_.Load()); |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 176 | Thread* self = Thread::Current(); |
| 177 | MutexLock mu(self, *Locks::runtime_shutdown_lock_); |
| 178 | MutexLock mu2(self, *Locks::thread_list_lock_); |
| 179 | std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); |
| 180 | MutexLock mu3(Thread::Current(), block_lock_); |
| 181 | // If we don't have any blocks, we don't have any thread local buffers. This check is required |
| 182 | // since there can exist multiple bump pointer spaces which exist at the same time. |
| 183 | if (num_blocks_ > 0) { |
| 184 | for (Thread* thread : thread_list) { |
| 185 | total += thread->thread_local_pos_ - thread->thread_local_start_; |
| 186 | } |
| 187 | } |
| 188 | return total; |
| 189 | } |
| 190 | |
| 191 | uint64_t BumpPointerSpace::GetObjectsAllocated() { |
| 192 | // Start out pre-determined amount (blocks which are not being allocated into). |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 193 | uint64_t total = static_cast<uint64_t>(objects_allocated_.Load()); |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 194 | Thread* self = Thread::Current(); |
| 195 | MutexLock mu(self, *Locks::runtime_shutdown_lock_); |
| 196 | MutexLock mu2(self, *Locks::thread_list_lock_); |
| 197 | std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); |
| 198 | MutexLock mu3(Thread::Current(), block_lock_); |
| 199 | // If we don't have any blocks, we don't have any thread local buffers. This check is required |
| 200 | // since there can exist multiple bump pointer spaces which exist at the same time. |
| 201 | if (num_blocks_ > 0) { |
| 202 | for (Thread* thread : thread_list) { |
| 203 | total += thread->thread_local_objects_; |
| 204 | } |
| 205 | } |
| 206 | return total; |
| 207 | } |
| 208 | |
| 209 | void BumpPointerSpace::RevokeThreadLocalBuffersLocked(Thread* thread) { |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 210 | objects_allocated_.FetchAndAdd(thread->thread_local_objects_); |
| 211 | bytes_allocated_.FetchAndAdd(thread->thread_local_pos_ - thread->thread_local_start_); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 212 | thread->SetTlab(nullptr, nullptr); |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 213 | } |
| 214 | |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 215 | bool BumpPointerSpace::AllocNewTlab(Thread* self, size_t bytes) { |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 216 | MutexLock mu(Thread::Current(), block_lock_); |
| 217 | RevokeThreadLocalBuffersLocked(self); |
| 218 | byte* start = AllocBlock(bytes); |
| 219 | if (start == nullptr) { |
| 220 | return false; |
| 221 | } |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 222 | self->SetTlab(start, start + bytes); |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 223 | return true; |
| 224 | } |
| 225 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 226 | } // namespace space |
| 227 | } // namespace gc |
| 228 | } // namespace art |