blob: 95e44b3e0dca10679a49792effad4448b8f8a234 [file] [log] [blame]
buzbee862a7602013-04-05 10:58:54 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "compiler_internals.h"
18#include "dex_file-inl.h"
19#include "arena_allocator.h"
20#include "base/logging.h"
Mathieu Chartierf6c4b3b2013-08-24 16:11:37 -070021#include "base/mutex.h"
Ian Rogers02ed4c02013-09-06 13:10:04 -070022#include "thread-inl.h"
Mathieu Chartier75165d02013-09-12 14:00:31 -070023#include <memcheck/memcheck.h>
buzbee862a7602013-04-05 10:58:54 -070024
25namespace art {
26
Mathieu Chartierf6c4b3b2013-08-24 16:11:37 -070027// Memmap is a bit slower than malloc according to my measurements.
28static constexpr bool kUseMemMap = false;
29static constexpr bool kUseMemSet = true && kUseMemMap;
Mathieu Chartier75165d02013-09-12 14:00:31 -070030static constexpr size_t kValgrindRedZoneBytes = 8;
Mathieu Chartierf6c4b3b2013-08-24 16:11:37 -070031
buzbee862a7602013-04-05 10:58:54 -070032static const char* alloc_names[ArenaAllocator::kNumAllocKinds] = {
33 "Misc ",
34 "BasicBlock ",
35 "LIR ",
36 "MIR ",
37 "DataFlow ",
38 "GrowList ",
39 "GrowBitMap ",
40 "Dalvik2SSA ",
41 "DebugInfo ",
42 "Successor ",
43 "RegAlloc ",
44 "Data ",
45 "Preds ",
46};
47
Mathieu Chartierf6c4b3b2013-08-24 16:11:37 -070048Arena::Arena(size_t size)
49 : bytes_allocated_(0),
50 map_(nullptr),
51 next_(nullptr) {
52 if (kUseMemMap) {
Ian Rogers8d31bbd2013-10-13 10:44:14 -070053 std::string error_msg;
54 map_ = MemMap::MapAnonymous("dalvik-arena", NULL, size, PROT_READ | PROT_WRITE, &error_msg);
55 CHECK(map_ != nullptr) << error_msg;
Mathieu Chartierf6c4b3b2013-08-24 16:11:37 -070056 memory_ = map_->Begin();
57 size_ = map_->Size();
58 } else {
59 memory_ = reinterpret_cast<uint8_t*>(calloc(1, size));
60 size_ = size;
Ian Rogerse7a5b7d2013-04-18 20:09:02 -070061 }
Ian Rogerse7a5b7d2013-04-18 20:09:02 -070062}
63
Mathieu Chartierf6c4b3b2013-08-24 16:11:37 -070064Arena::~Arena() {
65 if (kUseMemMap) {
66 delete map_;
67 } else {
68 free(reinterpret_cast<void*>(memory_));
69 }
buzbee862a7602013-04-05 10:58:54 -070070}
71
Mathieu Chartierf6c4b3b2013-08-24 16:11:37 -070072void Arena::Reset() {
73 if (bytes_allocated_) {
74 if (kUseMemSet || !kUseMemMap) {
75 memset(Begin(), 0, bytes_allocated_);
buzbeea5abf702013-04-12 14:39:29 -070076 } else {
Mathieu Chartierf6c4b3b2013-08-24 16:11:37 -070077 madvise(Begin(), bytes_allocated_, MADV_DONTNEED);
buzbeea5abf702013-04-12 14:39:29 -070078 }
Mathieu Chartierf6c4b3b2013-08-24 16:11:37 -070079 bytes_allocated_ = 0;
buzbee862a7602013-04-05 10:58:54 -070080 }
buzbee862a7602013-04-05 10:58:54 -070081}
82
Mathieu Chartierf6c4b3b2013-08-24 16:11:37 -070083ArenaPool::ArenaPool()
84 : lock_("Arena pool lock"),
85 free_arenas_(nullptr) {
86}
87
88ArenaPool::~ArenaPool() {
89 while (free_arenas_ != nullptr) {
90 auto* arena = free_arenas_;
91 free_arenas_ = free_arenas_->next_;
92 delete arena;
93 }
94}
95
96Arena* ArenaPool::AllocArena(size_t size) {
97 Thread* self = Thread::Current();
98 Arena* ret = nullptr;
99 {
100 MutexLock lock(self, lock_);
101 if (free_arenas_ != nullptr && LIKELY(free_arenas_->Size() >= size)) {
102 ret = free_arenas_;
103 free_arenas_ = free_arenas_->next_;
104 }
105 }
106 if (ret == nullptr) {
107 ret = new Arena(size);
108 }
109 ret->Reset();
110 return ret;
111}
112
113void ArenaPool::FreeArena(Arena* arena) {
114 Thread* self = Thread::Current();
Mathieu Chartier75165d02013-09-12 14:00:31 -0700115 if (UNLIKELY(RUNNING_ON_VALGRIND)) {
116 VALGRIND_MAKE_MEM_UNDEFINED(arena->memory_, arena->bytes_allocated_);
117 }
Mathieu Chartierf6c4b3b2013-08-24 16:11:37 -0700118 {
119 MutexLock lock(self, lock_);
120 arena->next_ = free_arenas_;
121 free_arenas_ = arena;
122 }
123}
124
125size_t ArenaAllocator::BytesAllocated() const {
buzbee862a7602013-04-05 10:58:54 -0700126 size_t total = 0;
127 for (int i = 0; i < kNumAllocKinds; i++) {
128 total += alloc_stats_[i];
129 }
Mathieu Chartierf6c4b3b2013-08-24 16:11:37 -0700130 return total;
131}
132
133ArenaAllocator::ArenaAllocator(ArenaPool* pool)
134 : pool_(pool),
135 begin_(nullptr),
136 end_(nullptr),
137 ptr_(nullptr),
138 arena_head_(nullptr),
Mathieu Chartier75165d02013-09-12 14:00:31 -0700139 num_allocations_(0),
140 running_on_valgrind_(RUNNING_ON_VALGRIND) {
Mathieu Chartierf6c4b3b2013-08-24 16:11:37 -0700141 memset(&alloc_stats_[0], 0, sizeof(alloc_stats_));
142}
143
144void ArenaAllocator::UpdateBytesAllocated() {
145 if (arena_head_ != nullptr) {
146 // Update how many bytes we have allocated into the arena so that the arena pool knows how
147 // much memory to zero out.
148 arena_head_->bytes_allocated_ = ptr_ - begin_;
149 }
150}
151
Mathieu Chartier75165d02013-09-12 14:00:31 -0700152void* ArenaAllocator::AllocValgrind(size_t bytes, ArenaAllocKind kind) {
153 size_t rounded_bytes = (bytes + 3 + kValgrindRedZoneBytes) & ~3;
154 if (UNLIKELY(ptr_ + rounded_bytes > end_)) {
155 // Obtain a new block.
156 ObtainNewArenaForAllocation(rounded_bytes);
157 if (UNLIKELY(ptr_ == nullptr)) {
158 return nullptr;
159 }
160 }
161 if (kCountAllocations) {
162 alloc_stats_[kind] += rounded_bytes;
163 ++num_allocations_;
164 }
165 uint8_t* ret = ptr_;
166 ptr_ += rounded_bytes;
167 // Check that the memory is already zeroed out.
168 for (uint8_t* ptr = ret; ptr < ptr_; ++ptr) {
169 CHECK_EQ(*ptr, 0U);
170 }
171 VALGRIND_MAKE_MEM_NOACCESS(ret + bytes, rounded_bytes - bytes);
172 return ret;
173}
174
Mathieu Chartierf6c4b3b2013-08-24 16:11:37 -0700175ArenaAllocator::~ArenaAllocator() {
176 // Reclaim all the arenas by giving them back to the thread pool.
177 UpdateBytesAllocated();
178 while (arena_head_ != nullptr) {
179 Arena* arena = arena_head_;
180 arena_head_ = arena_head_->next_;
181 pool_->FreeArena(arena);
182 }
183}
184
185void ArenaAllocator::ObtainNewArenaForAllocation(size_t allocation_size) {
186 UpdateBytesAllocated();
187 Arena* new_arena = pool_->AllocArena(std::max(Arena::kDefaultSize, allocation_size));
188 new_arena->next_ = arena_head_;
189 arena_head_ = new_arena;
190 // Update our internal data structures.
191 ptr_ = begin_ = new_arena->Begin();
192 end_ = new_arena->End();
193}
194
195// Dump memory usage stats.
196void ArenaAllocator::DumpMemStats(std::ostream& os) const {
197 size_t malloc_bytes = 0;
198 // Start out with how many lost bytes we have in the arena we are currently allocating into.
199 size_t lost_bytes(end_ - ptr_);
200 size_t num_arenas = 0;
201 for (Arena* arena = arena_head_; arena != nullptr; arena = arena->next_) {
202 malloc_bytes += arena->Size();
203 if (arena != arena_head_) {
204 lost_bytes += arena->RemainingSpace();
205 }
206 ++num_arenas;
207 }
208 const size_t bytes_allocated = BytesAllocated();
209 os << " MEM: used: " << bytes_allocated << ", allocated: " << malloc_bytes
210 << ", lost: " << lost_bytes << "\n";
211 if (num_allocations_ != 0) {
212 os << "Number of arenas allocated: " << num_arenas << ", Number of allocations: "
213 << num_allocations_ << ", avg size: " << bytes_allocated / num_allocations_ << "\n";
214 }
buzbee862a7602013-04-05 10:58:54 -0700215 os << "===== Allocation by kind\n";
216 for (int i = 0; i < kNumAllocKinds; i++) {
217 os << alloc_names[i] << std::setw(10) << alloc_stats_[i] << "\n";
218 }
219}
220
221} // namespace art