blob: c1f438468b4c5175df70aeebb9ee2e8fda4f8430 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "space.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070018
Elliott Hughes90a33692011-08-30 13:27:07 -070019#include "UniquePtr.h"
Ian Rogers30fab402012-01-23 15:43:46 -080020#include "dlmalloc.h"
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070021#include "file.h"
22#include "image.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070023#include "logging.h"
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070024#include "os.h"
Mathieu Chartiercc236d72012-07-20 10:29:05 -070025#include "space_bitmap.h"
Mathieu Chartierb062fdd2012-07-03 09:51:48 -070026#include "stl_util.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070027#include "utils.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070028
29namespace art {
30
Ian Rogers30fab402012-01-23 15:43:46 -080031#ifndef NDEBUG
Mathieu Chartier8e9a1492012-10-04 12:25:40 -070032static const bool kDebugSpaces = true;
Mathieu Chartier2fde5332012-09-14 14:51:54 -070033#else
Mathieu Chartier8e9a1492012-10-04 12:25:40 -070034static const bool kDebugSpaces = false;
Ian Rogers30fab402012-01-23 15:43:46 -080035#endif
Mathieu Chartier8e9a1492012-10-04 12:25:40 -070036// Magic padding value that we use to check for buffer overruns.
37static const word kPaddingValue = 0xBAC0BAC0;
Ian Rogers30fab402012-01-23 15:43:46 -080038
Mathieu Chartier2fde5332012-09-14 14:51:54 -070039// TODO: Remove define macro
Ian Rogers30fab402012-01-23 15:43:46 -080040#define CHECK_MEMORY_CALL(call, args, what) \
41 do { \
42 int rc = call args; \
43 if (UNLIKELY(rc != 0)) { \
44 errno = rc; \
45 PLOG(FATAL) << # call << " failed for " << what; \
46 } \
47 } while (false)
48
Mathieu Chartier2fde5332012-09-14 14:51:54 -070049Space::Space(const std::string& name, GcRetentionPolicy gc_retention_policy)
50 : name_(name),
51 gc_retention_policy_(gc_retention_policy) {
52
53}
54
55ContinuousSpace::ContinuousSpace(const std::string& name, byte* begin, byte* end,
56 GcRetentionPolicy gc_retention_policy)
57 : Space(name, gc_retention_policy),
58 begin_(begin),
59 end_(end) {
60
61}
62
63MemMapSpace::MemMapSpace(const std::string& name, MemMap* mem_map, size_t initial_size,
64 GcRetentionPolicy gc_retention_policy)
65 : ContinuousSpace(name, mem_map->Begin(), mem_map->Begin() + initial_size, gc_retention_policy),
66 mem_map_(mem_map)
67{
68
69}
70
Mathieu Chartierb062fdd2012-07-03 09:51:48 -070071size_t AllocSpace::bitmap_index_ = 0;
72
Mathieu Chartier2fde5332012-09-14 14:51:54 -070073AllocSpace::AllocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
74 byte* end, size_t growth_limit)
75 : MemMapSpace(name, mem_map, end - begin, GCRP_ALWAYS_COLLECT),
76 num_bytes_allocated_(0), num_objects_allocated_(0),
Ian Rogers15bf2d32012-08-28 17:33:04 -070077 lock_("allocation space lock", kAllocSpaceLock), mspace_(mspace),
78 growth_limit_(growth_limit) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -070079 CHECK(mspace != NULL);
80
81 size_t bitmap_index = bitmap_index_++;
82
Mathieu Chartier2fde5332012-09-14 14:51:54 -070083 static const uintptr_t kGcCardSize = static_cast<uintptr_t>(GC_CARD_SIZE);
84 CHECK(reinterpret_cast<uintptr_t>(mem_map->Begin()) % kGcCardSize == 0);
85 CHECK(reinterpret_cast<uintptr_t>(mem_map->End()) % kGcCardSize == 0);
Mathieu Chartiercc236d72012-07-20 10:29:05 -070086
Mathieu Chartierb062fdd2012-07-03 09:51:48 -070087 live_bitmap_.reset(SpaceBitmap::Create(
88 StringPrintf("allocspace-%s-live-bitmap-%d", name.c_str(), static_cast<int>(bitmap_index)),
89 Begin(), Capacity()));
90 DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace live bitmap #" << bitmap_index;
91
92 mark_bitmap_.reset(SpaceBitmap::Create(
93 StringPrintf("allocspace-%s-mark-bitmap-%d", name.c_str(), static_cast<int>(bitmap_index)),
94 Begin(), Capacity()));
95 DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace mark bitmap #" << bitmap_index;
96}
97
Mathieu Chartier2fde5332012-09-14 14:51:54 -070098AllocSpace* AllocSpace::Create(const std::string& name, size_t initial_size, size_t growth_limit,
99 size_t capacity, byte* requested_begin) {
Ian Rogers3bb17a62012-01-27 23:56:44 -0800100 // Memory we promise to dlmalloc before it asks for morecore.
101 // Note: making this value large means that large allocations are unlikely to succeed as dlmalloc
102 // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
103 // size of the large allocation) will be greater than the footprint limit.
104 size_t starting_size = kPageSize;
Ian Rogers30fab402012-01-23 15:43:46 -0800105 uint64_t start_time = 0;
106 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
107 start_time = NanoTime();
108 VLOG(startup) << "Space::CreateAllocSpace entering " << name
Ian Rogers3bb17a62012-01-27 23:56:44 -0800109 << " initial_size=" << PrettySize(initial_size)
110 << " growth_limit=" << PrettySize(growth_limit)
111 << " capacity=" << PrettySize(capacity)
Ian Rogers30fab402012-01-23 15:43:46 -0800112 << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700113 }
Ian Rogers30fab402012-01-23 15:43:46 -0800114
115 // Sanity check arguments
Ian Rogers3bb17a62012-01-27 23:56:44 -0800116 if (starting_size > initial_size) {
117 initial_size = starting_size;
118 }
Ian Rogers30fab402012-01-23 15:43:46 -0800119 if (initial_size > growth_limit) {
120 LOG(ERROR) << "Failed to create alloc space (" << name << ") where the initial size ("
Ian Rogers3bb17a62012-01-27 23:56:44 -0800121 << PrettySize(initial_size) << ") is larger than its capacity ("
122 << PrettySize(growth_limit) << ")";
Ian Rogers30fab402012-01-23 15:43:46 -0800123 return NULL;
124 }
125 if (growth_limit > capacity) {
Ian Rogers3bb17a62012-01-27 23:56:44 -0800126 LOG(ERROR) << "Failed to create alloc space (" << name << ") where the growth limit capacity ("
127 << PrettySize(growth_limit) << ") is larger than the capacity ("
128 << PrettySize(capacity) << ")";
Ian Rogers30fab402012-01-23 15:43:46 -0800129 return NULL;
130 }
131
132 // Page align growth limit and capacity which will be used to manage mmapped storage
133 growth_limit = RoundUp(growth_limit, kPageSize);
134 capacity = RoundUp(capacity, kPageSize);
135
136 UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin,
137 capacity, PROT_READ | PROT_WRITE));
138 if (mem_map.get() == NULL) {
139 LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
Ian Rogers3bb17a62012-01-27 23:56:44 -0800140 << PrettySize(capacity);
Ian Rogers30fab402012-01-23 15:43:46 -0800141 return NULL;
142 }
143
Ian Rogers3bb17a62012-01-27 23:56:44 -0800144 void* mspace = AllocSpace::CreateMallocSpace(mem_map->Begin(), starting_size, initial_size);
Ian Rogers30fab402012-01-23 15:43:46 -0800145 if (mspace == NULL) {
146 LOG(ERROR) << "Failed to initialize mspace for alloc space (" << name << ")";
147 return NULL;
148 }
149
Ian Rogers3bb17a62012-01-27 23:56:44 -0800150 // Protect memory beyond the initial size.
151 byte* end = mem_map->Begin() + starting_size;
Ian Rogers30fab402012-01-23 15:43:46 -0800152 if (capacity - initial_size > 0) {
153 CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), name);
154 }
155
156 // Everything is set so record in immutable structure and leave
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700157 MemMap* mem_map_ptr = mem_map.release();
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700158 AllocSpace* space = new AllocSpace(name, mem_map_ptr, mspace, mem_map_ptr->Begin(), end,
159 growth_limit);
Ian Rogers30fab402012-01-23 15:43:46 -0800160 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Ian Rogers3bb17a62012-01-27 23:56:44 -0800161 LOG(INFO) << "Space::CreateAllocSpace exiting (" << PrettyDuration(NanoTime() - start_time)
162 << " ) " << *space;
Ian Rogers30fab402012-01-23 15:43:46 -0800163 }
164 return space;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700165}
166
Ian Rogers3bb17a62012-01-27 23:56:44 -0800167void* AllocSpace::CreateMallocSpace(void* begin, size_t morecore_start, size_t initial_size) {
Ian Rogers30fab402012-01-23 15:43:46 -0800168 // clear errno to allow PLOG on error
Carl Shapiro69759ea2011-07-21 18:13:35 -0700169 errno = 0;
Ian Rogers3bb17a62012-01-27 23:56:44 -0800170 // create mspace using our backing storage starting at begin and with a footprint of
171 // morecore_start. Don't use an internal dlmalloc lock (as we already hold heap lock). When
172 // morecore_start bytes of memory is exhaused morecore will be called.
173 void* msp = create_mspace_with_base(begin, morecore_start, false /*locked*/);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700174 if (msp != NULL) {
Ian Rogers30fab402012-01-23 15:43:46 -0800175 // Do not allow morecore requests to succeed beyond the initial size of the heap
Ian Rogers3bb17a62012-01-27 23:56:44 -0800176 mspace_set_footprint_limit(msp, initial_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700177 } else {
Ian Rogers30fab402012-01-23 15:43:46 -0800178 PLOG(ERROR) << "create_mspace_with_base failed";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700179 }
180 return msp;
181}
182
Mathieu Chartier654d3a22012-07-11 17:54:18 -0700183void AllocSpace::SwapBitmaps() {
184 SpaceBitmap* temp_live_bitmap = live_bitmap_.release();
185 live_bitmap_.reset(mark_bitmap_.release());
186 mark_bitmap_.reset(temp_live_bitmap);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700187 // Swap names to get more descriptive diagnostics.
188 std::string temp_name = live_bitmap_->GetName();
189 live_bitmap_->SetName(mark_bitmap_->GetName());
190 mark_bitmap_->SetName(temp_name);
Mathieu Chartier654d3a22012-07-11 17:54:18 -0700191}
192
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700193Object* AllocSpace::AllocWithoutGrowthLocked(size_t num_bytes) {
Mathieu Chartier8e9a1492012-10-04 12:25:40 -0700194 if (kDebugSpaces) {
195 num_bytes += sizeof(word);
196 }
197
Ian Rogers30fab402012-01-23 15:43:46 -0800198 Object* result = reinterpret_cast<Object*>(mspace_calloc(mspace_, 1, num_bytes));
Mathieu Chartier8e9a1492012-10-04 12:25:40 -0700199 if (kDebugSpaces && result != NULL) {
Ian Rogers30fab402012-01-23 15:43:46 -0800200 CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700201 << ") not in bounds of allocation space " << *this;
Mathieu Chartier8e9a1492012-10-04 12:25:40 -0700202 // Put a magic pattern before and after the allocation.
203 *reinterpret_cast<word*>(reinterpret_cast<byte*>(result) + AllocationSize(result)
204 - sizeof(word) - kChunkOverhead) = kPaddingValue;
jeffhaoc1160702011-10-27 15:48:45 -0700205 }
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700206 num_bytes_allocated_ += AllocationSize(result);
207 ++num_objects_allocated_;
Ian Rogers30fab402012-01-23 15:43:46 -0800208 return result;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700209}
210
Ian Rogers50b35e22012-10-04 10:09:15 -0700211Object* AllocSpace::AllocWithoutGrowth(Thread* self, size_t num_bytes) {
212 MutexLock mu(self, lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700213 return AllocWithoutGrowthLocked(num_bytes);
214}
215
Ian Rogers50b35e22012-10-04 10:09:15 -0700216Object* AllocSpace::AllocWithGrowth(Thread* self, size_t num_bytes) {
217 MutexLock mu(self, lock_);
Ian Rogers30fab402012-01-23 15:43:46 -0800218 // Grow as much as possible within the mspace.
219 size_t max_allowed = Capacity();
220 mspace_set_footprint_limit(mspace_, max_allowed);
221 // Try the allocation.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700222 void* ptr = AllocWithoutGrowthLocked(num_bytes);
Ian Rogers30fab402012-01-23 15:43:46 -0800223 // Shrink back down as small as possible.
224 size_t footprint = mspace_footprint(mspace_);
225 mspace_set_footprint_limit(mspace_, footprint);
226 // Return the new allocation or NULL.
227 Object* result = reinterpret_cast<Object*>(ptr);
Mathieu Chartier8e9a1492012-10-04 12:25:40 -0700228 CHECK(!kDebugSpaces || result == NULL || Contains(result));
Ian Rogers30fab402012-01-23 15:43:46 -0800229 return result;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700230}
231
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700232void AllocSpace::SetGrowthLimit(size_t growth_limit) {
233 growth_limit = RoundUp(growth_limit, kPageSize);
234 growth_limit_ = growth_limit;
235 if (Size() > growth_limit_) {
236 end_ = begin_ + growth_limit;
237 }
238}
239
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700240AllocSpace* AllocSpace::CreateZygoteSpace() {
241 end_ = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(end_), kPageSize));
242 DCHECK(IsAligned<GC_CARD_SIZE>(begin_));
243 DCHECK(IsAligned<GC_CARD_SIZE>(end_));
244 DCHECK(IsAligned<kPageSize>(begin_));
245 DCHECK(IsAligned<kPageSize>(end_));
246 size_t size = RoundUp(Size(), kPageSize);
247 // Trim the heap so that we minimize the size of the Zygote space.
248 Trim();
249 // Trim our mem-map to free unused pages.
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700250 GetMemMap()->UnMapAtEnd(end_);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700251 // TODO: Not hardcode these in?
252 const size_t starting_size = kPageSize;
253 const size_t initial_size = 2 * MB;
254 // Remaining size is for the new alloc space.
255 const size_t growth_limit = growth_limit_ - size;
256 const size_t capacity = Capacity() - size;
257 VLOG(heap) << "Begin " << reinterpret_cast<const void*>(begin_);
258 VLOG(heap) << "End " << reinterpret_cast<const void*>(end_);
259 VLOG(heap) << "Size " << size;
260 VLOG(heap) << "GrowthLimit " << growth_limit_;
261 VLOG(heap) << "Capacity " << Capacity();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700262 SetGrowthLimit(RoundUp(size, kPageSize));
263 SetFootprintLimit(RoundUp(size, kPageSize));
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700264 // FIXME: Do we need reference counted pointers here?
265 // Make the two spaces share the same mark bitmaps since the bitmaps span both of the spaces.
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700266 VLOG(heap) << "Creating new AllocSpace: ";
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700267 VLOG(heap) << "Size " << GetMemMap()->Size();
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700268 VLOG(heap) << "GrowthLimit " << PrettySize(growth_limit);
269 VLOG(heap) << "Capacity " << PrettySize(capacity);
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700270 UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(GetName().c_str(), End(), capacity, PROT_READ | PROT_WRITE));
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700271 void* mspace = CreateMallocSpace(end_, starting_size, initial_size);
272 // Protect memory beyond the initial size.
273 byte* end = mem_map->Begin() + starting_size;
274 if (capacity - initial_size > 0) {
275 CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), name_.c_str());
276 }
277 AllocSpace* alloc_space = new AllocSpace(name_, mem_map.release(), mspace, end_, end, growth_limit);
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700278 live_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
279 CHECK_EQ(live_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(End()));
280 mark_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
281 CHECK_EQ(mark_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(End()));
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700282 name_ += "-zygote-transformed";
283 VLOG(heap) << "zygote space creation done";
284 return alloc_space;
285}
286
Ian Rogers50b35e22012-10-04 10:09:15 -0700287void AllocSpace::Free(Thread* self, Object* ptr) {
288 MutexLock mu(self, lock_);
Mathieu Chartier8e9a1492012-10-04 12:25:40 -0700289 if (kDebugSpaces) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700290 CHECK(ptr != NULL);
291 CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
Mathieu Chartier8e9a1492012-10-04 12:25:40 -0700292 CHECK_EQ(
293 *reinterpret_cast<word*>(reinterpret_cast<byte*>(ptr) + AllocationSize(ptr) -
294 sizeof(word) - kChunkOverhead), kPaddingValue);
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700295 }
296 num_bytes_allocated_ -= AllocationSize(ptr);
297 --num_objects_allocated_;
Ian Rogers30fab402012-01-23 15:43:46 -0800298 mspace_free(mspace_, ptr);
299}
300
Ian Rogers50b35e22012-10-04 10:09:15 -0700301void AllocSpace::FreeList(Thread* self, size_t num_ptrs, Object** ptrs) {
302 MutexLock mu(self, lock_);
Mathieu Chartier8e9a1492012-10-04 12:25:40 -0700303 if (kDebugSpaces) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700304 CHECK(ptrs != NULL);
305 size_t num_broken_ptrs = 0;
306 for (size_t i = 0; i < num_ptrs; i++) {
307 if (!Contains(ptrs[i])) {
308 num_broken_ptrs++;
309 LOG(ERROR) << "FreeList[" << i << "] (" << ptrs[i] << ") not in bounds of heap " << *this;
310 } else {
311 size_t size = mspace_usable_size(ptrs[i]);
312 memset(ptrs[i], 0xEF, size);
313 }
Ian Rogers30fab402012-01-23 15:43:46 -0800314 }
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700315 CHECK_EQ(num_broken_ptrs, 0u);
Ian Rogers30fab402012-01-23 15:43:46 -0800316 }
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700317 for (size_t i = 0; i < num_ptrs; i++) {
318 num_bytes_allocated_ -= AllocationSize(ptrs[i]);
319 }
320 num_objects_allocated_ -= num_ptrs;
Ian Rogers30fab402012-01-23 15:43:46 -0800321 mspace_bulk_free(mspace_, reinterpret_cast<void**>(ptrs), num_ptrs);
322}
323
324// Callback from dlmalloc when it needs to increase the footprint
325extern "C" void* art_heap_morecore(void* mspace, intptr_t increment) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800326 Heap* heap = Runtime::Current()->GetHeap();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700327 DCHECK_EQ(heap->GetAllocSpace()->GetMspace(), mspace);
328 return heap->GetAllocSpace()->MoreCore(increment);
Ian Rogers30fab402012-01-23 15:43:46 -0800329}
330
331void* AllocSpace::MoreCore(intptr_t increment) {
Ian Rogers50b35e22012-10-04 10:09:15 -0700332 lock_.AssertHeld(Thread::Current());
Ian Rogers30fab402012-01-23 15:43:46 -0800333 byte* original_end = end_;
334 if (increment != 0) {
Ian Rogers3bb17a62012-01-27 23:56:44 -0800335 VLOG(heap) << "AllocSpace::MoreCore " << PrettySize(increment);
Ian Rogers30fab402012-01-23 15:43:46 -0800336 byte* new_end = original_end + increment;
337 if (increment > 0) {
338#if DEBUG_SPACES
339 // Should never be asked to increase the allocation beyond the capacity of the space. Enforced
340 // by mspace_set_footprint_limit.
341 CHECK_LE(new_end, Begin() + Capacity());
342#endif
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700343 CHECK_MEMORY_CALL(mprotect, (original_end, increment, PROT_READ | PROT_WRITE), GetName());
Ian Rogers30fab402012-01-23 15:43:46 -0800344 } else {
345#if DEBUG_SPACES
346 // Should never be asked for negative footprint (ie before begin)
347 CHECK_GT(original_end + increment, Begin());
348#endif
349 // Advise we don't need the pages and protect them
Ian Rogers3bb17a62012-01-27 23:56:44 -0800350 // TODO: by removing permissions to the pages we may be causing TLB shoot-down which can be
351 // expensive (note the same isn't true for giving permissions to a page as the protected
352 // page shouldn't be in a TLB). We should investigate performance impact of just
353 // removing ignoring the memory protection change here and in Space::CreateAllocSpace. It's
354 // likely just a useful debug feature.
Ian Rogers30fab402012-01-23 15:43:46 -0800355 size_t size = -increment;
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700356 CHECK_MEMORY_CALL(madvise, (new_end, size, MADV_DONTNEED), GetName());
357 CHECK_MEMORY_CALL(mprotect, (new_end, size, PROT_NONE), GetName());
Ian Rogers30fab402012-01-23 15:43:46 -0800358 }
359 // Update end_
360 end_ = new_end;
361 }
362 return original_end;
363}
364
365size_t AllocSpace::AllocationSize(const Object* obj) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700366 return mspace_usable_size(const_cast<void*>(reinterpret_cast<const void*>(obj))) +
367 kChunkOverhead;
Ian Rogers30fab402012-01-23 15:43:46 -0800368}
369
Brian Carlstromb18e77a2012-08-21 14:20:03 -0700370void MspaceMadviseCallback(void* start, void* end, size_t used_bytes, void* /* arg */) {
371 // Is this chunk in use?
372 if (used_bytes != 0) {
373 return;
374 }
Elliott Hughes9eebd3b2012-06-08 13:56:31 -0700375 // Do we have any whole pages to give back?
376 start = reinterpret_cast<void*>(RoundUp(reinterpret_cast<uintptr_t>(start), kPageSize));
377 end = reinterpret_cast<void*>(RoundDown(reinterpret_cast<uintptr_t>(end), kPageSize));
378 if (end > start) {
379 size_t length = reinterpret_cast<byte*>(end) - reinterpret_cast<byte*>(start);
380 CHECK_MEMORY_CALL(madvise, (start, length, MADV_DONTNEED), "trim");
Ian Rogers30fab402012-01-23 15:43:46 -0800381 }
382}
383
Elliott Hughes9eebd3b2012-06-08 13:56:31 -0700384void AllocSpace::Trim() {
Ian Rogers50b35e22012-10-04 10:09:15 -0700385 MutexLock mu(Thread::Current(), lock_);
Elliott Hughes9eebd3b2012-06-08 13:56:31 -0700386 // Trim to release memory at the end of the space.
387 mspace_trim(mspace_, 0);
388 // Visit space looking for page-sized holes to advise the kernel we don't need.
389 mspace_inspect_all(mspace_, MspaceMadviseCallback, NULL);
390}
Ian Rogers30fab402012-01-23 15:43:46 -0800391
392void AllocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
393 void* arg) {
Ian Rogers50b35e22012-10-04 10:09:15 -0700394 MutexLock mu(Thread::Current(), lock_);
Ian Rogers30fab402012-01-23 15:43:46 -0800395 mspace_inspect_all(mspace_, callback, arg);
Ian Rogers15bf2d32012-08-28 17:33:04 -0700396 callback(NULL, NULL, 0, arg); // Indicate end of a space.
Ian Rogers30fab402012-01-23 15:43:46 -0800397}
398
399size_t AllocSpace::GetFootprintLimit() {
Ian Rogers50b35e22012-10-04 10:09:15 -0700400 MutexLock mu(Thread::Current(), lock_);
Ian Rogers30fab402012-01-23 15:43:46 -0800401 return mspace_footprint_limit(mspace_);
402}
403
404void AllocSpace::SetFootprintLimit(size_t new_size) {
Ian Rogers50b35e22012-10-04 10:09:15 -0700405 MutexLock mu(Thread::Current(), lock_);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800406 VLOG(heap) << "AllocSpace::SetFootprintLimit " << PrettySize(new_size);
Ian Rogers30fab402012-01-23 15:43:46 -0800407 // Compare against the actual footprint, rather than the Size(), because the heap may not have
408 // grown all the way to the allowed size yet.
Ian Rogers30fab402012-01-23 15:43:46 -0800409 size_t current_space_size = mspace_footprint(mspace_);
410 if (new_size < current_space_size) {
411 // Don't let the space grow any more.
412 new_size = current_space_size;
413 }
414 mspace_set_footprint_limit(mspace_, new_size);
415}
416
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700417size_t ImageSpace::bitmap_index_ = 0;
418
419ImageSpace::ImageSpace(const std::string& name, MemMap* mem_map)
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700420 : MemMapSpace(name, mem_map, mem_map->Size(), GCRP_NEVER_COLLECT) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700421 const size_t bitmap_index = bitmap_index_++;
422 live_bitmap_.reset(SpaceBitmap::Create(
423 StringPrintf("imagespace-%s-live-bitmap-%d", name.c_str(), static_cast<int>(bitmap_index)),
424 Begin(), Capacity()));
425 DCHECK(live_bitmap_.get() != NULL) << "could not create imagespace live bitmap #" << bitmap_index;
426}
427
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700428ImageSpace* ImageSpace::Create(const std::string& image_file_name) {
Brian Carlstrom5643b782012-02-05 12:32:53 -0800429 CHECK(!image_file_name.empty());
Ian Rogers30fab402012-01-23 15:43:46 -0800430
431 uint64_t start_time = 0;
432 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
433 start_time = NanoTime();
434 LOG(INFO) << "Space::CreateImageSpace entering" << " image_file_name=" << image_file_name;
435 }
436
Brian Carlstrom58ae9412011-10-04 00:56:06 -0700437 UniquePtr<File> file(OS::OpenFile(image_file_name.c_str(), false));
Elliott Hughes90a33692011-08-30 13:27:07 -0700438 if (file.get() == NULL) {
Ian Rogers30fab402012-01-23 15:43:46 -0800439 LOG(ERROR) << "Failed to open " << image_file_name;
440 return NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700441 }
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700442 ImageHeader image_header;
443 bool success = file->ReadFully(&image_header, sizeof(image_header));
444 if (!success || !image_header.IsValid()) {
Ian Rogers30fab402012-01-23 15:43:46 -0800445 LOG(ERROR) << "Invalid image header " << image_file_name;
446 return NULL;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700447 }
Ian Rogers30fab402012-01-23 15:43:46 -0800448 UniquePtr<MemMap> map(MemMap::MapFileAtAddress(image_header.GetImageBegin(),
Brian Carlstrom89521892011-12-07 22:05:07 -0800449 file->Length(),
450 // TODO: selectively PROT_EXEC stubs
451 PROT_READ | PROT_WRITE | PROT_EXEC,
452 MAP_PRIVATE | MAP_FIXED,
453 file->Fd(),
454 0));
Elliott Hughes90a33692011-08-30 13:27:07 -0700455 if (map.get() == NULL) {
Ian Rogers30fab402012-01-23 15:43:46 -0800456 LOG(ERROR) << "Failed to map " << image_file_name;
457 return NULL;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700458 }
Ian Rogers30fab402012-01-23 15:43:46 -0800459 CHECK_EQ(image_header.GetImageBegin(), map->Begin());
460 DCHECK_EQ(0, memcmp(&image_header, map->Begin(), sizeof(ImageHeader)));
Brian Carlstroma663ea52011-08-19 23:33:41 -0700461
Ian Rogers30fab402012-01-23 15:43:46 -0800462 Runtime* runtime = Runtime::Current();
Brian Carlstrom16192862011-09-12 17:50:06 -0700463 Object* jni_stub_array = image_header.GetImageRoot(ImageHeader::kJniStubArray);
Ian Rogers169c9a72011-11-13 20:13:17 -0800464 runtime->SetJniDlsymLookupStub(down_cast<ByteArray*>(jni_stub_array));
Brian Carlstrom16192862011-09-12 17:50:06 -0700465
Brian Carlstrome24fa612011-09-29 00:53:55 -0700466 Object* ame_stub_array = image_header.GetImageRoot(ImageHeader::kAbstractMethodErrorStubArray);
Ian Rogers4f0d07c2011-10-06 23:38:47 -0700467 runtime->SetAbstractMethodErrorStubArray(down_cast<ByteArray*>(ame_stub_array));
Brian Carlstrome24fa612011-09-29 00:53:55 -0700468
Ian Rogersfb6adba2012-03-04 21:51:51 -0800469 Object* resolution_stub_array =
470 image_header.GetImageRoot(ImageHeader::kStaticResolutionStubArray);
Ian Rogers4f0d07c2011-10-06 23:38:47 -0700471 runtime->SetResolutionStubArray(
Ian Rogers1cb0a1d2011-10-06 15:24:35 -0700472 down_cast<ByteArray*>(resolution_stub_array), Runtime::kStaticMethod);
473 resolution_stub_array = image_header.GetImageRoot(ImageHeader::kUnknownMethodResolutionStubArray);
Ian Rogers4f0d07c2011-10-06 23:38:47 -0700474 runtime->SetResolutionStubArray(
Ian Rogers1cb0a1d2011-10-06 15:24:35 -0700475 down_cast<ByteArray*>(resolution_stub_array), Runtime::kUnknownMethod);
Ian Rogersad25ac52011-10-04 19:13:33 -0700476
Ian Rogers19846512012-02-24 11:42:47 -0800477 Object* resolution_method = image_header.GetImageRoot(ImageHeader::kResolutionMethod);
Mathieu Chartier66f19252012-09-18 08:57:04 -0700478 runtime->SetResolutionMethod(down_cast<AbstractMethod*>(resolution_method));
Ian Rogers19846512012-02-24 11:42:47 -0800479
Ian Rogersff1ed472011-09-20 13:46:24 -0700480 Object* callee_save_method = image_header.GetImageRoot(ImageHeader::kCalleeSaveMethod);
Mathieu Chartier66f19252012-09-18 08:57:04 -0700481 runtime->SetCalleeSaveMethod(down_cast<AbstractMethod*>(callee_save_method), Runtime::kSaveAll);
Ian Rogers4f0d07c2011-10-06 23:38:47 -0700482 callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsOnlySaveMethod);
Mathieu Chartier66f19252012-09-18 08:57:04 -0700483 runtime->SetCalleeSaveMethod(down_cast<AbstractMethod*>(callee_save_method), Runtime::kRefsOnly);
Ian Rogers4f0d07c2011-10-06 23:38:47 -0700484 callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsAndArgsSaveMethod);
Mathieu Chartier66f19252012-09-18 08:57:04 -0700485 runtime->SetCalleeSaveMethod(down_cast<AbstractMethod*>(callee_save_method), Runtime::kRefsAndArgs);
Ian Rogersff1ed472011-09-20 13:46:24 -0700486
Ian Rogers30fab402012-01-23 15:43:46 -0800487 ImageSpace* space = new ImageSpace(image_file_name, map.release());
488 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Ian Rogers3bb17a62012-01-27 23:56:44 -0800489 LOG(INFO) << "Space::CreateImageSpace exiting (" << PrettyDuration(NanoTime() - start_time)
490 << ") " << *space;
Ian Rogers5d76c432011-10-31 21:42:49 -0700491 }
Ian Rogers30fab402012-01-23 15:43:46 -0800492 return space;
Ian Rogers5d76c432011-10-31 21:42:49 -0700493}
494
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700495void ImageSpace::RecordImageAllocations(SpaceBitmap* live_bitmap) const {
Ian Rogers30fab402012-01-23 15:43:46 -0800496 uint64_t start_time = 0;
497 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
498 LOG(INFO) << "ImageSpace::RecordImageAllocations entering";
499 start_time = NanoTime();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700500 }
Ian Rogers30fab402012-01-23 15:43:46 -0800501 DCHECK(!Runtime::Current()->IsStarted());
502 CHECK(live_bitmap != NULL);
503 byte* current = Begin() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
504 byte* end = End();
505 while (current < end) {
506 DCHECK_ALIGNED(current, kObjectAlignment);
507 const Object* obj = reinterpret_cast<const Object*>(current);
508 live_bitmap->Set(obj);
509 current += RoundUp(obj->SizeOf(), kObjectAlignment);
510 }
511 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Ian Rogers3bb17a62012-01-27 23:56:44 -0800512 LOG(INFO) << "ImageSpace::RecordImageAllocations exiting ("
513 << PrettyDuration(NanoTime() - start_time) << ")";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700514 }
515}
516
Ian Rogers30fab402012-01-23 15:43:46 -0800517std::ostream& operator<<(std::ostream& os, const Space& space) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700518 space.Dump(os);
Ian Rogers30fab402012-01-23 15:43:46 -0800519 return os;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700520}
521
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700522void AllocSpace::Dump(std::ostream& os) const {
523 os << GetType()
524 << "begin=" << reinterpret_cast<void*>(Begin())
525 << ",end=" << reinterpret_cast<void*>(End())
526 << ",size=" << PrettySize(Size()) << ",capacity=" << PrettySize(Capacity())
527 << ",name=\"" << GetName() << "\"]";
528}
529
530void ImageSpace::Dump(std::ostream& os) const {
531 os << GetType()
532 << "begin=" << reinterpret_cast<void*>(Begin())
533 << ",end=" << reinterpret_cast<void*>(End())
534 << ",size=" << PrettySize(Size())
535 << ",name=\"" << GetName() << "\"]";
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700536}
537
538void LargeObjectSpace::SwapBitmaps() {
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700539 SpaceSetMap* temp_live_objects = live_objects_.release();
540 live_objects_.reset(mark_objects_.release());
541 mark_objects_.reset(temp_live_objects);
542 // Swap names to get more descriptive diagnostics.
543 std::string temp_name = live_objects_->GetName();
544 live_objects_->SetName(mark_objects_->GetName());
545 mark_objects_->SetName(temp_name);
546}
547
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700548DiscontinuousSpace::DiscontinuousSpace(const std::string& name,
549 GcRetentionPolicy gc_retention_policy)
550 : Space(name, gc_retention_policy) {
551
552}
553
554LargeObjectSpace::LargeObjectSpace(const std::string& name)
555 : DiscontinuousSpace(name, GCRP_ALWAYS_COLLECT),
556 num_bytes_allocated_(0),
557 num_objects_allocated_(0) {
558 live_objects_.reset(new SpaceSetMap("large live objects"));
559 mark_objects_.reset(new SpaceSetMap("large marked objects"));
560}
561
562
563void LargeObjectSpace::CopyLiveToMarked() {
564 mark_objects_->CopyFrom(*live_objects_.get());
565}
566
567LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
568 : LargeObjectSpace(name),
569 lock_("large object space lock", kAllocSpaceLock)
570{
571
572}
573
574LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
575 return new LargeObjectMapSpace(name);
576}
577
Ian Rogers50b35e22012-10-04 10:09:15 -0700578Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes) {
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700579 MemMap* mem_map = MemMap::MapAnonymous("allocation", NULL, num_bytes, PROT_READ | PROT_WRITE);
580 if (mem_map == NULL) {
581 return NULL;
582 }
Ian Rogers50b35e22012-10-04 10:09:15 -0700583 MutexLock mu(self, lock_);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700584 Object* obj = reinterpret_cast<Object*>(mem_map->Begin());
585 large_objects_.push_back(obj);
586 mem_maps_.Put(obj, mem_map);
587 num_bytes_allocated_ += mem_map->Size();
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700588 ++num_objects_allocated_;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700589 return obj;
590}
591
Ian Rogers50b35e22012-10-04 10:09:15 -0700592void LargeObjectMapSpace::Free(Thread* self, Object* ptr) {
593 MutexLock mu(self, lock_);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700594 MemMaps::iterator found = mem_maps_.find(ptr);
595 CHECK(found != mem_maps_.end()) << "Attempted to free large object which was not live";
596 DCHECK_GE(num_bytes_allocated_, found->second->Size());
597 num_bytes_allocated_ -= found->second->Size();
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700598 --num_objects_allocated_;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700599 delete found->second;
600 mem_maps_.erase(found);
601}
602
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700603size_t LargeObjectMapSpace::AllocationSize(const Object* obj) {
Ian Rogers50b35e22012-10-04 10:09:15 -0700604 MutexLock mu(Thread::Current(), lock_);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700605 MemMaps::iterator found = mem_maps_.find(const_cast<Object*>(obj));
606 CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live";
607 return found->second->Size();
608}
609
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700610void LargeObjectMapSpace::Walk(AllocSpace::WalkCallback callback, void* arg) {
Ian Rogers50b35e22012-10-04 10:09:15 -0700611 MutexLock mu(Thread::Current(), lock_);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700612 for (MemMaps::iterator it = mem_maps_.begin(); it != mem_maps_.end(); ++it) {
613 MemMap* mem_map = it->second;
614 callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
615 callback(NULL, NULL, 0, arg);
616 }
617}
618
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700619bool LargeObjectMapSpace::Contains(const Object* obj) const {
Ian Rogers50b35e22012-10-04 10:09:15 -0700620 MutexLock mu(Thread::Current(), lock_);
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700621 return mem_maps_.find(const_cast<Object*>(obj)) != mem_maps_.end();
622}
623
Mathieu Chartier8e9a1492012-10-04 12:25:40 -0700624FreeListSpace* FreeListSpace::Create(const std::string& name, byte* requested_begin, size_t size) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700625 CHECK(size % kAlignment == 0);
Mathieu Chartier8e9a1492012-10-04 12:25:40 -0700626 MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
627 PROT_READ | PROT_WRITE);
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700628 CHECK(mem_map != NULL) << "Failed to allocate large object space mem map";
629 return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
630}
631
632FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end)
633 : LargeObjectSpace(name),
634 begin_(begin),
635 end_(end),
636 mem_map_(mem_map),
637 lock_("free list space lock", kAllocSpaceLock) {
638 chunks_.resize(Size() / kAlignment + 1);
639 // Add a dummy chunk so we don't need to handle chunks having no next chunk.
640 chunks_.back().SetSize(kAlignment, false);
641 // Start out with one large free chunk.
642 AddFreeChunk(begin_, end_ - begin_, NULL);
643}
644
645FreeListSpace::~FreeListSpace() {
646
647}
648
649void FreeListSpace::AddFreeChunk(void* address, size_t size, Chunk* previous) {
650 Chunk* chunk = ChunkFromAddr(address);
651 chunk->SetSize(size, true);
652 chunk->SetPrevious(previous);
653 Chunk* next_chunk = GetNextChunk(chunk);
654 next_chunk->SetPrevious(chunk);
655 free_chunks_.insert(chunk);
656}
657
658FreeListSpace::Chunk* FreeListSpace::ChunkFromAddr(void* address) {
659 size_t offset = reinterpret_cast<byte*>(address) - Begin();
660 DCHECK(IsAligned<kAlignment>(offset));
661 DCHECK_LT(offset, Size());
662 return &chunks_[offset / kAlignment];
663}
664
665void* FreeListSpace::AddrFromChunk(Chunk* chunk) {
666 return reinterpret_cast<void*>(Begin() + (chunk - &chunks_.front()) * kAlignment);
667}
668
669void FreeListSpace::RemoveFreeChunk(Chunk* chunk) {
670 // TODO: C++0x
671 // TODO: Improve performance, this might be slow.
672 std::pair<FreeChunks::iterator, FreeChunks::iterator> range = free_chunks_.equal_range(chunk);
673 for (FreeChunks::iterator it = range.first; it != range.second; ++it) {
674 if (*it == chunk) {
675 free_chunks_.erase(it);
676 return;
677 }
678 }
679}
680
681void FreeListSpace::Walk(AllocSpace::WalkCallback callback, void* arg) {
Ian Rogers50b35e22012-10-04 10:09:15 -0700682 MutexLock mu(Thread::Current(), lock_);
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700683 for (Chunk* chunk = &chunks_.front(); chunk < &chunks_.back(); ) {
684 if (!chunk->IsFree()) {
685 size_t size = chunk->GetSize();
686 void* begin = AddrFromChunk(chunk);
687 void* end = reinterpret_cast<void*>(reinterpret_cast<byte*>(begin) + size);
688 callback(begin, end, size, arg);
689 callback(NULL, NULL, 0, arg);
690 }
691 chunk = GetNextChunk(chunk);
692 }
693}
694
Ian Rogers50b35e22012-10-04 10:09:15 -0700695void FreeListSpace::Free(Thread* self, Object* obj) {
696 MutexLock mu(self, lock_);
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700697 CHECK(Contains(obj));
698 // Check adjacent chunks to see if we need to combine.
699 Chunk* chunk = ChunkFromAddr(obj);
700 CHECK(!chunk->IsFree());
701
702 size_t allocation_size = chunk->GetSize();
703 madvise(obj, allocation_size, MADV_DONTNEED);
704 num_objects_allocated_--;
705 num_bytes_allocated_ -= allocation_size;
706 Chunk* prev = chunk->GetPrevious();
707 Chunk* next = GetNextChunk(chunk);
708
709 // Combine any adjacent free chunks
710 size_t extra_size = chunk->GetSize();
711 if (next->IsFree()) {
712 extra_size += next->GetSize();
713 RemoveFreeChunk(next);
714 }
715 if (prev != NULL && prev->IsFree()) {
716 RemoveFreeChunk(prev);
717 AddFreeChunk(AddrFromChunk(prev), prev->GetSize() + extra_size, prev->GetPrevious());
718 } else {
719 AddFreeChunk(AddrFromChunk(chunk), extra_size, prev);
720 }
721}
722
723bool FreeListSpace::Contains(const Object* obj) const {
724 return mem_map_->HasAddress(obj);
725}
726
727FreeListSpace::Chunk* FreeListSpace::GetNextChunk(Chunk* chunk) {
728 return chunk + chunk->GetSize() / kAlignment;
729}
730
731size_t FreeListSpace::AllocationSize(const Object* obj) {
732 Chunk* chunk = ChunkFromAddr(const_cast<Object*>(obj));
733 CHECK(!chunk->IsFree());
734 return chunk->GetSize();
735}
736
Ian Rogers50b35e22012-10-04 10:09:15 -0700737Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes) {
738 MutexLock mu(self, lock_);
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700739 num_bytes = RoundUp(num_bytes, kAlignment);
740 Chunk temp;
741 temp.SetSize(num_bytes);
742 // Find the smallest chunk at least num_bytes in size.
743 FreeChunks::iterator found = free_chunks_.lower_bound(&temp);
744 if (found == free_chunks_.end()) {
745 // Out of memory, or too much fragmentation.
746 return NULL;
747 }
748 Chunk* chunk = *found;
749 free_chunks_.erase(found);
750 CHECK(chunk->IsFree());
751 void* addr = AddrFromChunk(chunk);
752 size_t chunk_size = chunk->GetSize();
753 chunk->SetSize(num_bytes);
754 if (chunk_size > num_bytes) {
755 // Split the chunk into two chunks.
756 Chunk* new_chunk = GetNextChunk(chunk);
757 AddFreeChunk(AddrFromChunk(new_chunk), chunk_size - num_bytes, chunk);
758 }
759
760 num_objects_allocated_++;
761 num_bytes_allocated_ += num_bytes;
762 return reinterpret_cast<Object*>(addr);
763}
764
Ian Rogers50b35e22012-10-04 10:09:15 -0700765void FreeListSpace::FreeList(Thread* self, size_t num_ptrs, Object** ptrs) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700766 for (size_t i = 0; i < num_ptrs; ++i) {
Ian Rogers50b35e22012-10-04 10:09:15 -0700767 Free(self, ptrs[i]);
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700768 }
769}
770
Carl Shapiro69759ea2011-07-21 18:13:35 -0700771} // namespace art