blob: bef71aeadb5f87c3ba7fddd75bd50cc8e3054e78 [file] [log] [blame]
Carl Shapiro69759ea2011-07-21 18:13:35 -07001// Copyright 2011 Google Inc. All Rights Reserved.
Carl Shapiro69759ea2011-07-21 18:13:35 -07002
Brian Carlstrom578bbdc2011-07-21 14:07:47 -07003#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -07004
Brian Carlstrom58ae9412011-10-04 00:56:06 -07005#include <limits>
Carl Shapiro58551df2011-07-24 03:09:51 -07006#include <vector>
7
Elliott Hughes90a33692011-08-30 13:27:07 -07008#include "UniquePtr.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -07009#include "image.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070010#include "mark_sweep.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070011#include "object.h"
12#include "space.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070013#include "stl_util.h"
Elliott Hughes307f75d2011-10-12 18:04:40 -070014#include "timing_logger.h"
Elliott Hughes8d768a92011-09-14 16:35:25 -070015#include "thread_list.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
17namespace art {
18
Brian Carlstrom6b4ef022011-10-23 14:59:04 -070019bool Heap::is_verbose_heap_ = false;
20
21bool Heap::is_verbose_gc_ = false;
22
Carl Shapiro58551df2011-07-24 03:09:51 -070023std::vector<Space*> Heap::spaces_;
Carl Shapiro69759ea2011-07-21 18:13:35 -070024
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070025Space* Heap::alloc_space_ = NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -070026
27size_t Heap::maximum_size_ = 0;
28
Carl Shapiro58551df2011-07-24 03:09:51 -070029size_t Heap::num_bytes_allocated_ = 0;
30
31size_t Heap::num_objects_allocated_ = 0;
32
Carl Shapiro69759ea2011-07-21 18:13:35 -070033bool Heap::is_gc_running_ = false;
34
35HeapBitmap* Heap::mark_bitmap_ = NULL;
36
37HeapBitmap* Heap::live_bitmap_ = NULL;
38
Elliott Hughesadb460d2011-10-05 17:02:34 -070039Class* Heap::java_lang_ref_FinalizerReference_ = NULL;
40Class* Heap::java_lang_ref_ReferenceQueue_ = NULL;
41
Ian Rogers0cfe1fb2011-08-26 03:29:44 -070042MemberOffset Heap::reference_referent_offset_ = MemberOffset(0);
43MemberOffset Heap::reference_queue_offset_ = MemberOffset(0);
44MemberOffset Heap::reference_queueNext_offset_ = MemberOffset(0);
45MemberOffset Heap::reference_pendingNext_offset_ = MemberOffset(0);
46MemberOffset Heap::finalizer_reference_zombie_offset_ = MemberOffset(0);
Brian Carlstrom1f870082011-08-23 16:02:11 -070047
Brian Carlstrom395520e2011-09-25 19:35:00 -070048float Heap::target_utilization_ = 0.5;
49
Elliott Hughes92b3b562011-09-08 16:32:26 -070050Mutex* Heap::lock_ = NULL;
51
Elliott Hughes9d5ccec2011-09-19 13:19:50 -070052bool Heap::verify_objects_ = false;
53
Elliott Hughes92b3b562011-09-08 16:32:26 -070054class ScopedHeapLock {
55 public:
56 ScopedHeapLock() {
57 Heap::Lock();
58 }
59
60 ~ScopedHeapLock() {
61 Heap::Unlock();
62 }
63};
64
Brian Carlstrom6b4ef022011-10-23 14:59:04 -070065void Heap::Init(bool is_verbose_heap, bool is_verbose_gc,
66 size_t initial_size, size_t maximum_size,
Brian Carlstrom58ae9412011-10-04 00:56:06 -070067 const std::vector<std::string>& image_file_names) {
Brian Carlstrom6b4ef022011-10-23 14:59:04 -070068 is_verbose_heap_ = is_verbose_heap;
69 is_verbose_gc_ = is_verbose_gc;
70
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -070071 const Runtime* runtime = Runtime::Current();
Brian Carlstrom6b4ef022011-10-23 14:59:04 -070072 if (is_verbose_heap_ || runtime->IsVerboseStartup()) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -070073 LOG(INFO) << "Heap::Init entering";
74 }
75
Brian Carlstrom58ae9412011-10-04 00:56:06 -070076 // bounds of all spaces for allocating live and mark bitmaps
77 // there will be at least one space (the alloc space),
78 // so set to base to max and limit to min to start
79 byte* base = reinterpret_cast<byte*>(std::numeric_limits<uintptr_t>::max());
80 byte* limit = reinterpret_cast<byte*>(std::numeric_limits<uintptr_t>::min());
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070081
Brian Carlstrom58ae9412011-10-04 00:56:06 -070082 byte* requested_base = NULL;
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070083 std::vector<Space*> image_spaces;
84 for (size_t i = 0; i < image_file_names.size(); i++) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -070085 Space* space = Space::CreateFromImage(image_file_names[i]);
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070086 if (space == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -070087 LOG(FATAL) << "Failed to create space from " << image_file_names[i];
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070088 }
89 image_spaces.push_back(space);
90 spaces_.push_back(space);
Brian Carlstrome24fa612011-09-29 00:53:55 -070091 byte* oat_limit_addr = space->GetImageHeader().GetOatLimitAddr();
Brian Carlstrom58ae9412011-10-04 00:56:06 -070092 if (oat_limit_addr > requested_base) {
93 requested_base = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(oat_limit_addr),
94 kPageSize));
95 }
96 base = std::min(base, space->GetBase());
97 limit = std::max(limit, space->GetLimit());
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070098 }
99
Elliott Hughes307f75d2011-10-12 18:04:40 -0700100 alloc_space_ = Space::Create("alloc space", initial_size, maximum_size, requested_base);
101 if (alloc_space_ == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700102 LOG(FATAL) << "Failed to create alloc space";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700103 }
Elliott Hughes307f75d2011-10-12 18:04:40 -0700104 base = std::min(base, alloc_space_->GetBase());
105 limit = std::max(limit, alloc_space_->GetLimit());
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700106 DCHECK_LT(base, limit);
107 size_t num_bytes = limit - base;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700108
109 // Allocate the initial live bitmap.
Elliott Hughes90a33692011-08-30 13:27:07 -0700110 UniquePtr<HeapBitmap> live_bitmap(HeapBitmap::Create(base, num_bytes));
111 if (live_bitmap.get() == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700112 LOG(FATAL) << "Failed to create live bitmap";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700113 }
114
115 // Allocate the initial mark bitmap.
Elliott Hughes90a33692011-08-30 13:27:07 -0700116 UniquePtr<HeapBitmap> mark_bitmap(HeapBitmap::Create(base, num_bytes));
117 if (mark_bitmap.get() == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700118 LOG(FATAL) << "Failed to create mark bitmap";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700119 }
120
Elliott Hughes307f75d2011-10-12 18:04:40 -0700121 spaces_.push_back(alloc_space_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700122 maximum_size_ = maximum_size;
123 live_bitmap_ = live_bitmap.release();
124 mark_bitmap_ = mark_bitmap.release();
125
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700126 num_bytes_allocated_ = 0;
127 num_objects_allocated_ = 0;
128
Carl Shapiro69759ea2011-07-21 18:13:35 -0700129 // TODO: allocate the card table
130
Brian Carlstrom58ae9412011-10-04 00:56:06 -0700131 // Make image objects live (after live_bitmap_ is set)
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700132 for (size_t i = 0; i < image_spaces.size(); i++) {
133 RecordImageAllocations(image_spaces[i]);
134 }
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700135
Elliott Hughes85d15452011-09-16 17:33:01 -0700136 Heap::EnableObjectValidation();
137
Elliott Hughes92b3b562011-09-08 16:32:26 -0700138 // It's still to early to take a lock because there are no threads yet,
139 // but we can create the heap lock now. We don't create it earlier to
140 // make it clear that you can't use locks during heap initialization.
Elliott Hughes8daa0922011-09-11 13:46:25 -0700141 lock_ = new Mutex("Heap lock");
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700142
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700143 if (is_verbose_heap_ || runtime->IsVerboseStartup()) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700144 LOG(INFO) << "Heap::Init exiting";
145 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700146}
147
148void Heap::Destroy() {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700149 ScopedHeapLock lock;
Carl Shapiro58551df2011-07-24 03:09:51 -0700150 STLDeleteElements(&spaces_);
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700151 if (mark_bitmap_ != NULL) {
152 delete mark_bitmap_;
153 mark_bitmap_ = NULL;
154 }
155 if (live_bitmap_ != NULL) {
156 delete live_bitmap_;
157 }
158 live_bitmap_ = NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700159}
160
Elliott Hughes418dfe72011-10-06 18:56:27 -0700161Object* Heap::AllocObject(Class* klass, size_t byte_count) {
162 {
163 ScopedHeapLock lock;
164 DCHECK(klass == NULL || klass->GetDescriptor() == NULL ||
165 (klass->IsClassClass() && byte_count >= sizeof(Class)) ||
166 (klass->IsVariableSize() || klass->GetObjectSize() == byte_count));
167 DCHECK_GE(byte_count, sizeof(Object));
168 Object* obj = AllocateLocked(byte_count);
169 if (obj != NULL) {
170 obj->SetClass(klass);
171 return obj;
172 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700173 }
Elliott Hughes418dfe72011-10-06 18:56:27 -0700174
175 Thread::Current()->ThrowOutOfMemoryError(klass, byte_count);
176 return NULL;
Carl Shapiro58551df2011-07-24 03:09:51 -0700177}
178
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700179bool Heap::IsHeapAddress(const Object* obj) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700180 // Note: we deliberately don't take the lock here, and mustn't test anything that would
181 // require taking the lock.
Elliott Hughes06b37d92011-10-16 11:51:29 -0700182 if (!IsAligned<kObjectAlignment>(obj)) {
Elliott Hughesa2501992011-08-26 19:39:54 -0700183 return false;
184 }
185 // TODO
186 return true;
187}
188
Elliott Hughes3e465b12011-09-02 18:26:12 -0700189#if VERIFY_OBJECT_ENABLED
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700190void Heap::VerifyObject(const Object* obj) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700191 if (!verify_objects_) {
192 return;
193 }
Elliott Hughes92b3b562011-09-08 16:32:26 -0700194 ScopedHeapLock lock;
195 Heap::VerifyObjectLocked(obj);
196}
197#endif
198
199void Heap::VerifyObjectLocked(const Object* obj) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700200 lock_->AssertHeld();
Elliott Hughes85d15452011-09-16 17:33:01 -0700201 if (obj != NULL) {
Elliott Hughes06b37d92011-10-16 11:51:29 -0700202 if (!IsAligned<kObjectAlignment>(obj)) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700203 LOG(FATAL) << "Object isn't aligned: " << obj;
204 } else if (!live_bitmap_->Test(obj)) {
205 // TODO: we don't hold a lock here as it is assumed the live bit map
206 // isn't changing if the mutator is running.
207 LOG(FATAL) << "Object is dead: " << obj;
208 }
209 // Ignore early dawn of the universe verifications
Brian Carlstromdbc05252011-09-09 01:59:59 -0700210 if (num_objects_allocated_ > 10) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700211 const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
212 Object::ClassOffset().Int32Value();
213 const Class* c = *reinterpret_cast<Class* const *>(raw_addr);
214 if (c == NULL) {
215 LOG(FATAL) << "Null class" << " in object: " << obj;
Elliott Hughes06b37d92011-10-16 11:51:29 -0700216 } else if (!IsAligned<kObjectAlignment>(c)) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700217 LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
218 } else if (!live_bitmap_->Test(c)) {
219 LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
220 }
221 // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
Ian Rogersad25ac52011-10-04 19:13:33 -0700222 // Note: we don't use the accessors here as they have internal sanity checks
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700223 // that we don't want to run
224 raw_addr = reinterpret_cast<const byte*>(c) +
225 Object::ClassOffset().Int32Value();
226 const Class* c_c = *reinterpret_cast<Class* const *>(raw_addr);
227 raw_addr = reinterpret_cast<const byte*>(c_c) +
228 Object::ClassOffset().Int32Value();
229 const Class* c_c_c = *reinterpret_cast<Class* const *>(raw_addr);
230 CHECK_EQ(c_c, c_c_c);
231 }
232 }
233}
234
Brian Carlstrom78128a62011-09-15 17:21:19 -0700235void Heap::VerificationCallback(Object* obj, void* arg) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700236 DCHECK(obj != NULL);
Elliott Hughes92b3b562011-09-08 16:32:26 -0700237 Heap::VerifyObjectLocked(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700238}
239
240void Heap::VerifyHeap() {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700241 ScopedHeapLock lock;
242 live_bitmap_->Walk(Heap::VerificationCallback, NULL);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700243}
244
Elliott Hughes92b3b562011-09-08 16:32:26 -0700245void Heap::RecordAllocationLocked(Space* space, const Object* obj) {
246#ifndef NDEBUG
247 if (Runtime::Current()->IsStarted()) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700248 lock_->AssertHeld();
Elliott Hughes92b3b562011-09-08 16:32:26 -0700249 }
250#endif
Carl Shapiro58551df2011-07-24 03:09:51 -0700251 size_t size = space->AllocationSize(obj);
252 DCHECK_NE(size, 0u);
253 num_bytes_allocated_ += size;
254 num_objects_allocated_ += 1;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700255
256 if (Runtime::Current()->HasStatsEnabled()) {
257 RuntimeStats* global_stats = Runtime::Current()->GetStats();
258 RuntimeStats* thread_stats = Thread::Current()->GetStats();
259 ++global_stats->allocated_objects;
260 ++thread_stats->allocated_objects;
261 global_stats->allocated_bytes += size;
262 thread_stats->allocated_bytes += size;
263 }
264
Carl Shapiro58551df2011-07-24 03:09:51 -0700265 live_bitmap_->Set(obj);
266}
267
Elliott Hughes307f75d2011-10-12 18:04:40 -0700268void Heap::RecordFreeLocked(size_t freed_objects, size_t freed_bytes) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700269 lock_->AssertHeld();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700270
271 if (freed_objects < num_objects_allocated_) {
272 num_objects_allocated_ -= freed_objects;
273 } else {
274 num_objects_allocated_ = 0;
275 }
276 if (freed_bytes < num_bytes_allocated_) {
277 num_bytes_allocated_ -= freed_bytes;
Carl Shapiro58551df2011-07-24 03:09:51 -0700278 } else {
279 num_bytes_allocated_ = 0;
280 }
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700281
282 if (Runtime::Current()->HasStatsEnabled()) {
283 RuntimeStats* global_stats = Runtime::Current()->GetStats();
284 RuntimeStats* thread_stats = Thread::Current()->GetStats();
285 ++global_stats->freed_objects;
286 ++thread_stats->freed_objects;
Elliott Hughes307f75d2011-10-12 18:04:40 -0700287 global_stats->freed_bytes += freed_bytes;
288 thread_stats->freed_bytes += freed_bytes;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700289 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700290}
291
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700292void Heap::RecordImageAllocations(Space* space) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700293 const Runtime* runtime = Runtime::Current();
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700294 if (is_verbose_heap_ || runtime->IsVerboseStartup()) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700295 LOG(INFO) << "Heap::RecordImageAllocations entering";
296 }
Elliott Hughes92b3b562011-09-08 16:32:26 -0700297 DCHECK(!Runtime::Current()->IsStarted());
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700298 CHECK(space != NULL);
299 CHECK(live_bitmap_ != NULL);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700300 byte* current = space->GetBase() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700301 while (current < space->GetLimit()) {
Elliott Hughes06b37d92011-10-16 11:51:29 -0700302 DCHECK_ALIGNED(current, kObjectAlignment);
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700303 const Object* obj = reinterpret_cast<const Object*>(current);
304 live_bitmap_->Set(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700305 current += RoundUp(obj->SizeOf(), kObjectAlignment);
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700306 }
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700307 if (is_verbose_heap_ || runtime->IsVerboseStartup()) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700308 LOG(INFO) << "Heap::RecordImageAllocations exiting";
309 }
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700310}
311
Elliott Hughes92b3b562011-09-08 16:32:26 -0700312Object* Heap::AllocateLocked(size_t size) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700313 lock_->AssertHeld();
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700314 DCHECK(alloc_space_ != NULL);
315 Space* space = alloc_space_;
Elliott Hughes92b3b562011-09-08 16:32:26 -0700316 Object* obj = AllocateLocked(space, size);
Carl Shapiro58551df2011-07-24 03:09:51 -0700317 if (obj != NULL) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700318 RecordAllocationLocked(space, obj);
Carl Shapiro58551df2011-07-24 03:09:51 -0700319 }
320 return obj;
321}
322
Elliott Hughes92b3b562011-09-08 16:32:26 -0700323Object* Heap::AllocateLocked(Space* space, size_t size) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700324 lock_->AssertHeld();
Elliott Hughes92b3b562011-09-08 16:32:26 -0700325
Carl Shapiro69759ea2011-07-21 18:13:35 -0700326 // Fail impossible allocations. TODO: collect soft references.
327 if (size > maximum_size_) {
328 return NULL;
329 }
330
Carl Shapiro58551df2011-07-24 03:09:51 -0700331 Object* ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700332 if (ptr != NULL) {
333 return ptr;
334 }
335
336 // The allocation failed. If the GC is running, block until it
337 // completes and retry.
338 if (is_gc_running_) {
339 // The GC is concurrently tracing the heap. Release the heap
340 // lock, wait for the GC to complete, and retrying allocating.
341 WaitForConcurrentGcToComplete();
Carl Shapiro58551df2011-07-24 03:09:51 -0700342 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700343 if (ptr != NULL) {
344 return ptr;
345 }
346 }
347
348 // Another failure. Our thread was starved or there may be too many
349 // live objects. Try a foreground GC. This will have no effect if
350 // the concurrent GC is already running.
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700351 if (Runtime::Current()->HasStatsEnabled()) {
352 ++Runtime::Current()->GetStats()->gc_for_alloc_count;
353 ++Thread::Current()->GetStats()->gc_for_alloc_count;
354 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700355 CollectGarbageInternal();
356 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700357 if (ptr != NULL) {
358 return ptr;
359 }
360
361 // Even that didn't work; this is an exceptional state.
362 // Try harder, growing the heap if necessary.
Carl Shapiro58551df2011-07-24 03:09:51 -0700363 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700364 if (ptr != NULL) {
365 //size_t new_footprint = dvmHeapSourceGetIdealFootprint();
Shih-wei Liao7f1caab2011-10-06 12:11:04 -0700366 size_t new_footprint = space->GetMaxAllowedFootprint();
Elliott Hughes418dfe72011-10-06 18:56:27 -0700367 // OLD-TODO: may want to grow a little bit more so that the amount of
Carl Shapiro58551df2011-07-24 03:09:51 -0700368 // free space is equal to the old free space + the
369 // utilization slop for the new allocation.
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700370 if (is_verbose_gc_) {
371 LOG(INFO) << "Grow heap (frag case) to " << new_footprint / MB
Brian Carlstromf28bc5b2011-10-26 01:15:03 -0700372 << " for " << size << "-byte allocation";
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700373 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700374 return ptr;
375 }
376
377 // Most allocations should have succeeded by now, so the heap is
378 // really full, really fragmented, or the requested size is really
379 // big. Do another GC, collecting SoftReferences this time. The VM
380 // spec requires that all SoftReferences have been collected and
381 // cleared before throwing an OOME.
382
Elliott Hughes418dfe72011-10-06 18:56:27 -0700383 // OLD-TODO: wait for the finalizers from the previous GC to finish
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700384 if (is_verbose_gc_) {
385 LOG(INFO) << "Forcing collection of SoftReferences for "
386 << size << "-byte allocation";
387 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700388 CollectGarbageInternal();
389 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700390 if (ptr != NULL) {
391 return ptr;
392 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700393
Carl Shapiro69759ea2011-07-21 18:13:35 -0700394 LOG(ERROR) << "Out of memory on a " << size << " byte allocation";
395
Carl Shapiro58551df2011-07-24 03:09:51 -0700396 // TODO: tell the HeapSource to dump its state
397 // TODO: dump stack traces for all threads
Carl Shapiro69759ea2011-07-21 18:13:35 -0700398
Carl Shapiro69759ea2011-07-21 18:13:35 -0700399 return NULL;
400}
401
Elliott Hughesbf86d042011-08-31 17:53:14 -0700402int64_t Heap::GetMaxMemory() {
403 UNIMPLEMENTED(WARNING);
404 return 0;
405}
406
407int64_t Heap::GetTotalMemory() {
408 UNIMPLEMENTED(WARNING);
409 return 0;
410}
411
412int64_t Heap::GetFreeMemory() {
413 UNIMPLEMENTED(WARNING);
414 return 0;
415}
416
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700417class InstanceCounter {
418 public:
419 InstanceCounter(Class* c, bool count_assignable)
420 : class_(c), count_assignable_(count_assignable), count_(0) {
421 }
422
423 size_t GetCount() {
424 return count_;
425 }
426
427 static void Callback(Object* o, void* arg) {
428 reinterpret_cast<InstanceCounter*>(arg)->VisitInstance(o);
429 }
430
431 private:
432 void VisitInstance(Object* o) {
433 Class* instance_class = o->GetClass();
434 if (count_assignable_) {
435 if (instance_class == class_) {
436 ++count_;
437 }
438 } else {
439 if (instance_class != NULL && class_->IsAssignableFrom(instance_class)) {
440 ++count_;
441 }
442 }
443 }
444
445 Class* class_;
446 bool count_assignable_;
447 size_t count_;
448};
449
450int64_t Heap::CountInstances(Class* c, bool count_assignable) {
451 ScopedHeapLock lock;
452 InstanceCounter counter(c, count_assignable);
453 live_bitmap_->Walk(InstanceCounter::Callback, &counter);
454 return counter.GetCount();
455}
456
Carl Shapiro69759ea2011-07-21 18:13:35 -0700457void Heap::CollectGarbage() {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700458 ScopedHeapLock lock;
Carl Shapiro58551df2011-07-24 03:09:51 -0700459 CollectGarbageInternal();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700460}
461
462void Heap::CollectGarbageInternal() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700463 lock_->AssertHeld();
Carl Shapiro58551df2011-07-24 03:09:51 -0700464
Elliott Hughes8d768a92011-09-14 16:35:25 -0700465 ThreadList* thread_list = Runtime::Current()->GetThreadList();
466 thread_list->SuspendAll();
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700467
468 size_t initial_size = num_bytes_allocated_;
Elliott Hughes307f75d2011-10-12 18:04:40 -0700469 TimingLogger timings("CollectGarbageInternal");
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700470 uint64_t t0 = NanoTime();
Elliott Hughesadb460d2011-10-05 17:02:34 -0700471 Object* cleared_references = NULL;
Carl Shapiro58551df2011-07-24 03:09:51 -0700472 {
473 MarkSweep mark_sweep;
Elliott Hughes307f75d2011-10-12 18:04:40 -0700474 timings.AddSplit("ctor");
Carl Shapiro58551df2011-07-24 03:09:51 -0700475
476 mark_sweep.Init();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700477 timings.AddSplit("Init");
Carl Shapiro58551df2011-07-24 03:09:51 -0700478
479 mark_sweep.MarkRoots();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700480 timings.AddSplit("MarkRoots");
Carl Shapiro58551df2011-07-24 03:09:51 -0700481
482 // Push marked roots onto the mark stack
483
484 // TODO: if concurrent
485 // unlock heap
Elliott Hughes8d768a92011-09-14 16:35:25 -0700486 // thread_list->ResumeAll();
Carl Shapiro58551df2011-07-24 03:09:51 -0700487
488 mark_sweep.RecursiveMark();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700489 timings.AddSplit("RecursiveMark");
Carl Shapiro58551df2011-07-24 03:09:51 -0700490
491 // TODO: if concurrent
492 // lock heap
Elliott Hughes8d768a92011-09-14 16:35:25 -0700493 // thread_list->SuspendAll();
Carl Shapiro58551df2011-07-24 03:09:51 -0700494 // re-mark root set
495 // scan dirty objects
496
497 mark_sweep.ProcessReferences(false);
Elliott Hughes307f75d2011-10-12 18:04:40 -0700498 timings.AddSplit("ProcessReferences");
Carl Shapiro58551df2011-07-24 03:09:51 -0700499
Elliott Hughes2da50362011-10-10 16:57:08 -0700500 // TODO: if concurrent
501 // swap bitmaps
Carl Shapiro58551df2011-07-24 03:09:51 -0700502
503 mark_sweep.Sweep();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700504 timings.AddSplit("Sweep");
Elliott Hughesadb460d2011-10-05 17:02:34 -0700505
506 cleared_references = mark_sweep.GetClearedReferences();
Carl Shapiro58551df2011-07-24 03:09:51 -0700507 }
508
509 GrowForUtilization();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700510 timings.AddSplit("GrowForUtilization");
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700511 uint64_t t1 = NanoTime();
Elliott Hughes8d768a92011-09-14 16:35:25 -0700512 thread_list->ResumeAll();
Elliott Hughesadb460d2011-10-05 17:02:34 -0700513
514 EnqueueClearedReferences(&cleared_references);
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700515
516 // TODO: somehow make the specific GC implementation (here MarkSweep) responsible for logging.
517 size_t bytes_freed = initial_size - num_bytes_allocated_;
518 bool is_small = (bytes_freed > 0 && bytes_freed < 1024);
519 size_t kib_freed = (bytes_freed > 0 ? std::max(bytes_freed/1024, 1U) : 0);
520
521 size_t footprint = alloc_space_->Size();
522 size_t percentFree = 100 - static_cast<size_t>(100.0f * float(num_bytes_allocated_) / footprint);
523
524 uint32_t duration = (t1 - t0)/1000/1000;
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700525 if (is_verbose_gc_) {
526 LOG(INFO) << "GC freed " << (is_small ? "<" : "") << kib_freed << "KiB, "
527 << percentFree << "% free "
528 << (num_bytes_allocated_/1024) << "KiB/" << (footprint/1024) << "KiB, "
529 << "paused " << duration << "ms";
530 }
531 if (is_verbose_heap_) {
532 timings.Dump();
533 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700534}
535
536void Heap::WaitForConcurrentGcToComplete() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700537 lock_->AssertHeld();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700538}
539
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700540/* Terminology:
541 * 1. Footprint: Capacity we allocate from system.
542 * 2. Active space: a.k.a. alloc_space_.
543 * 3. Soft footprint: external allocation + spaces footprint + active space footprint
544 * 4. Overhead: soft footprint excluding active.
545 *
Shih-wei Liao7f1caab2011-10-06 12:11:04 -0700546 * Layout: (The spaces below might not be contiguous, but are lumped together to depict size.)
547 * |----------------------spaces footprint--------- --------------|----active space footprint----|
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700548 * |--active space allocated--|
549 * |--------------------soft footprint (include active)--------------------------------------|
550 * |----------------soft footprint excluding active---------------|
551 * |------------soft limit-------...|
552 * |------------------------------------ideal footprint-----------------------------------------...|
553 *
554 */
555
556// Sets the maximum number of bytes that the heap is allowed to
557// allocate from the system. Clamps to the appropriate maximum
558// value.
559// Old spaces will count against the ideal size.
560//
561void Heap::SetIdealFootprint(size_t max_allowed_footprint)
562{
563 if (max_allowed_footprint > Heap::maximum_size_) {
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700564 if (is_verbose_gc_) {
565 LOG(INFO) << "Clamp target GC heap from " << max_allowed_footprint
566 << " to " << Heap::maximum_size_;
567 }
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700568 max_allowed_footprint = Heap::maximum_size_;
569 }
570
Shih-wei Liao7f1caab2011-10-06 12:11:04 -0700571 alloc_space_->SetMaxAllowedFootprint(max_allowed_footprint);
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700572}
573
Shih-wei Liao7f1caab2011-10-06 12:11:04 -0700574// kHeapIdealFree is the ideal maximum free size, when we grow the heap for
575// utlization.
576static const size_t kHeapIdealFree = 2 * MB;
577// kHeapMinFree guarantees that you always have at least 512 KB free, when
578// you grow for utilization, regardless of target utilization ratio.
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700579static const size_t kHeapMinFree = kHeapIdealFree / 4;
580
581// Given the current contents of the active space, increase the allowed
Carl Shapiro69759ea2011-07-21 18:13:35 -0700582// heap footprint to match the target utilization ratio. This should
583// only be called immediately after a full garbage collection.
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700584//
Carl Shapiro69759ea2011-07-21 18:13:35 -0700585void Heap::GrowForUtilization() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700586 lock_->AssertHeld();
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700587
588 // We know what our utilization is at this moment.
589 // This doesn't actually resize any memory. It just lets the heap grow more
590 // when necessary.
Elliott Hughes362f9bc2011-10-17 18:56:41 -0700591 size_t target_size(num_bytes_allocated_ / Heap::GetTargetHeapUtilization());
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700592
593 if (target_size > num_bytes_allocated_ + kHeapIdealFree) {
594 target_size = num_bytes_allocated_ + kHeapIdealFree;
595 } else if (target_size < num_bytes_allocated_ + kHeapMinFree) {
596 target_size = num_bytes_allocated_ + kHeapMinFree;
597 }
598
599 SetIdealFootprint(target_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700600}
601
Brian Carlstrom24a3c2e2011-10-17 18:07:52 -0700602pid_t Heap::GetLockOwner() {
Elliott Hughesaccd83d2011-10-17 14:25:58 -0700603 return lock_->GetOwner();
604}
605
Elliott Hughes92b3b562011-09-08 16:32:26 -0700606void Heap::Lock() {
Brian Carlstromfad71432011-10-16 20:25:10 -0700607 // Grab the lock, but put ourselves into Thread::kVmWait if it looks
608 // like we're going to have to wait on the mutex. This prevents
609 // deadlock if another thread is calling CollectGarbageInternal,
610 // since they will have the heap lock and be waiting for mutators to
611 // suspend.
612 if (!lock_->TryLock()) {
613 ScopedThreadStateChange tsc(Thread::Current(), Thread::kVmWait);
614 lock_->Lock();
615 }
Elliott Hughes92b3b562011-09-08 16:32:26 -0700616}
617
618void Heap::Unlock() {
619 lock_->Unlock();
620}
621
Elliott Hughesadb460d2011-10-05 17:02:34 -0700622void Heap::SetWellKnownClasses(Class* java_lang_ref_FinalizerReference,
623 Class* java_lang_ref_ReferenceQueue) {
624 java_lang_ref_FinalizerReference_ = java_lang_ref_FinalizerReference;
625 java_lang_ref_ReferenceQueue_ = java_lang_ref_ReferenceQueue;
626 CHECK(java_lang_ref_FinalizerReference_ != NULL);
627 CHECK(java_lang_ref_ReferenceQueue_ != NULL);
628}
629
630void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset,
631 MemberOffset reference_queue_offset,
632 MemberOffset reference_queueNext_offset,
633 MemberOffset reference_pendingNext_offset,
634 MemberOffset finalizer_reference_zombie_offset) {
635 reference_referent_offset_ = reference_referent_offset;
636 reference_queue_offset_ = reference_queue_offset;
637 reference_queueNext_offset_ = reference_queueNext_offset;
638 reference_pendingNext_offset_ = reference_pendingNext_offset;
639 finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset;
640 CHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
641 CHECK_NE(reference_queue_offset_.Uint32Value(), 0U);
642 CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U);
643 CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
644 CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
645}
646
647Object* Heap::GetReferenceReferent(Object* reference) {
648 DCHECK(reference != NULL);
649 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
650 return reference->GetFieldObject<Object*>(reference_referent_offset_, true);
651}
652
653void Heap::ClearReferenceReferent(Object* reference) {
654 DCHECK(reference != NULL);
655 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
656 reference->SetFieldObject(reference_referent_offset_, NULL, true);
657}
658
659// Returns true if the reference object has not yet been enqueued.
660bool Heap::IsEnqueuable(const Object* ref) {
661 DCHECK(ref != NULL);
662 const Object* queue = ref->GetFieldObject<Object*>(reference_queue_offset_, false);
663 const Object* queue_next = ref->GetFieldObject<Object*>(reference_queueNext_offset_, false);
664 return (queue != NULL) && (queue_next == NULL);
665}
666
667void Heap::EnqueueReference(Object* ref, Object** cleared_reference_list) {
668 DCHECK(ref != NULL);
669 CHECK(ref->GetFieldObject<Object*>(reference_queue_offset_, false) != NULL);
670 CHECK(ref->GetFieldObject<Object*>(reference_queueNext_offset_, false) == NULL);
671 EnqueuePendingReference(ref, cleared_reference_list);
672}
673
674void Heap::EnqueuePendingReference(Object* ref, Object** list) {
675 DCHECK(ref != NULL);
676 DCHECK(list != NULL);
677
678 if (*list == NULL) {
679 ref->SetFieldObject(reference_pendingNext_offset_, ref, false);
680 *list = ref;
681 } else {
682 Object* head = (*list)->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
683 ref->SetFieldObject(reference_pendingNext_offset_, head, false);
684 (*list)->SetFieldObject(reference_pendingNext_offset_, ref, false);
685 }
686}
687
688Object* Heap::DequeuePendingReference(Object** list) {
689 DCHECK(list != NULL);
690 DCHECK(*list != NULL);
691 Object* head = (*list)->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
692 Object* ref;
693 if (*list == head) {
694 ref = *list;
695 *list = NULL;
696 } else {
697 Object* next = head->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
698 (*list)->SetFieldObject(reference_pendingNext_offset_, next, false);
699 ref = head;
700 }
701 ref->SetFieldObject(reference_pendingNext_offset_, NULL, false);
702 return ref;
703}
704
705void Heap::AddFinalizerReference(Object* object) {
706 static Method* FinalizerReference_add =
707 java_lang_ref_FinalizerReference_->FindDirectMethod("add", "(Ljava/lang/Object;)V");
708 DCHECK(FinalizerReference_add != NULL);
709 Object* args[] = { object };
710 FinalizerReference_add->Invoke(Thread::Current(), NULL, reinterpret_cast<byte*>(&args), NULL);
711}
712
713void Heap::EnqueueClearedReferences(Object** cleared) {
714 DCHECK(cleared != NULL);
715 if (*cleared != NULL) {
716 static Method* ReferenceQueue_add =
717 java_lang_ref_ReferenceQueue_->FindDirectMethod("add", "(Ljava/lang/ref/Reference;)V");
718 DCHECK(ReferenceQueue_add != NULL);
719
720 Thread* self = Thread::Current();
721 ScopedThreadStateChange tsc(self, Thread::kRunnable);
722 Object* args[] = { *cleared };
723 ReferenceQueue_add->Invoke(self, NULL, reinterpret_cast<byte*>(&args), NULL);
724 *cleared = NULL;
725 }
726}
727
Carl Shapiro69759ea2011-07-21 18:13:35 -0700728} // namespace art