blob: 6fd4ade7a14f576091e2b9bdfc6e877644dd1dca [file] [log] [blame]
Carl Shapiro69759ea2011-07-21 18:13:35 -07001// Copyright 2011 Google Inc. All Rights Reserved.
Carl Shapiro69759ea2011-07-21 18:13:35 -07002
Brian Carlstrom578bbdc2011-07-21 14:07:47 -07003#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -07004
Brian Carlstrom58ae9412011-10-04 00:56:06 -07005#include <limits>
Carl Shapiro58551df2011-07-24 03:09:51 -07006#include <vector>
7
Elliott Hughes767a1472011-10-26 18:49:02 -07008#include "debugger.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -07009#include "image.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070010#include "mark_sweep.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070011#include "object.h"
12#include "space.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070013#include "stl_util.h"
Elliott Hughes8d768a92011-09-14 16:35:25 -070014#include "thread_list.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070015#include "timing_logger.h"
16#include "UniquePtr.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070017
18namespace art {
19
Brian Carlstrom6b4ef022011-10-23 14:59:04 -070020bool Heap::is_verbose_heap_ = false;
21
22bool Heap::is_verbose_gc_ = false;
23
Carl Shapiro58551df2011-07-24 03:09:51 -070024std::vector<Space*> Heap::spaces_;
Carl Shapiro69759ea2011-07-21 18:13:35 -070025
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070026Space* Heap::alloc_space_ = NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -070027
28size_t Heap::maximum_size_ = 0;
29
jeffhaoc1160702011-10-27 15:48:45 -070030size_t Heap::growth_size_ = 0;
31
Carl Shapiro58551df2011-07-24 03:09:51 -070032size_t Heap::num_bytes_allocated_ = 0;
33
34size_t Heap::num_objects_allocated_ = 0;
35
Carl Shapiro69759ea2011-07-21 18:13:35 -070036bool Heap::is_gc_running_ = false;
37
38HeapBitmap* Heap::mark_bitmap_ = NULL;
39
40HeapBitmap* Heap::live_bitmap_ = NULL;
41
Elliott Hughesadb460d2011-10-05 17:02:34 -070042Class* Heap::java_lang_ref_FinalizerReference_ = NULL;
43Class* Heap::java_lang_ref_ReferenceQueue_ = NULL;
44
Ian Rogers0cfe1fb2011-08-26 03:29:44 -070045MemberOffset Heap::reference_referent_offset_ = MemberOffset(0);
46MemberOffset Heap::reference_queue_offset_ = MemberOffset(0);
47MemberOffset Heap::reference_queueNext_offset_ = MemberOffset(0);
48MemberOffset Heap::reference_pendingNext_offset_ = MemberOffset(0);
49MemberOffset Heap::finalizer_reference_zombie_offset_ = MemberOffset(0);
Brian Carlstrom1f870082011-08-23 16:02:11 -070050
Brian Carlstrom395520e2011-09-25 19:35:00 -070051float Heap::target_utilization_ = 0.5;
52
Elliott Hughes92b3b562011-09-08 16:32:26 -070053Mutex* Heap::lock_ = NULL;
54
Elliott Hughes9d5ccec2011-09-19 13:19:50 -070055bool Heap::verify_objects_ = false;
56
Elliott Hughes92b3b562011-09-08 16:32:26 -070057class ScopedHeapLock {
58 public:
59 ScopedHeapLock() {
60 Heap::Lock();
61 }
62
63 ~ScopedHeapLock() {
64 Heap::Unlock();
65 }
66};
67
Brian Carlstrom6b4ef022011-10-23 14:59:04 -070068void Heap::Init(bool is_verbose_heap, bool is_verbose_gc,
jeffhaoc1160702011-10-27 15:48:45 -070069 size_t initial_size, size_t maximum_size, size_t growth_size,
Brian Carlstrom58ae9412011-10-04 00:56:06 -070070 const std::vector<std::string>& image_file_names) {
Brian Carlstrom6b4ef022011-10-23 14:59:04 -070071 is_verbose_heap_ = is_verbose_heap;
72 is_verbose_gc_ = is_verbose_gc;
73
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -070074 const Runtime* runtime = Runtime::Current();
Brian Carlstrom6b4ef022011-10-23 14:59:04 -070075 if (is_verbose_heap_ || runtime->IsVerboseStartup()) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -070076 LOG(INFO) << "Heap::Init entering";
77 }
78
Brian Carlstrom58ae9412011-10-04 00:56:06 -070079 // bounds of all spaces for allocating live and mark bitmaps
80 // there will be at least one space (the alloc space),
81 // so set to base to max and limit to min to start
82 byte* base = reinterpret_cast<byte*>(std::numeric_limits<uintptr_t>::max());
83 byte* limit = reinterpret_cast<byte*>(std::numeric_limits<uintptr_t>::min());
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070084
Brian Carlstrom58ae9412011-10-04 00:56:06 -070085 byte* requested_base = NULL;
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070086 std::vector<Space*> image_spaces;
87 for (size_t i = 0; i < image_file_names.size(); i++) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -070088 Space* space = Space::CreateFromImage(image_file_names[i]);
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070089 if (space == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -070090 LOG(FATAL) << "Failed to create space from " << image_file_names[i];
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070091 }
92 image_spaces.push_back(space);
93 spaces_.push_back(space);
Brian Carlstrome24fa612011-09-29 00:53:55 -070094 byte* oat_limit_addr = space->GetImageHeader().GetOatLimitAddr();
Brian Carlstrom58ae9412011-10-04 00:56:06 -070095 if (oat_limit_addr > requested_base) {
96 requested_base = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(oat_limit_addr),
97 kPageSize));
98 }
99 base = std::min(base, space->GetBase());
100 limit = std::max(limit, space->GetLimit());
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700101 }
102
jeffhaoc1160702011-10-27 15:48:45 -0700103 alloc_space_ = Space::Create("alloc space", initial_size, maximum_size, growth_size, requested_base);
Elliott Hughes307f75d2011-10-12 18:04:40 -0700104 if (alloc_space_ == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700105 LOG(FATAL) << "Failed to create alloc space";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700106 }
Elliott Hughes307f75d2011-10-12 18:04:40 -0700107 base = std::min(base, alloc_space_->GetBase());
108 limit = std::max(limit, alloc_space_->GetLimit());
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700109 DCHECK_LT(base, limit);
110 size_t num_bytes = limit - base;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700111
112 // Allocate the initial live bitmap.
Elliott Hughes90a33692011-08-30 13:27:07 -0700113 UniquePtr<HeapBitmap> live_bitmap(HeapBitmap::Create(base, num_bytes));
114 if (live_bitmap.get() == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700115 LOG(FATAL) << "Failed to create live bitmap";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700116 }
117
118 // Allocate the initial mark bitmap.
Elliott Hughes90a33692011-08-30 13:27:07 -0700119 UniquePtr<HeapBitmap> mark_bitmap(HeapBitmap::Create(base, num_bytes));
120 if (mark_bitmap.get() == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700121 LOG(FATAL) << "Failed to create mark bitmap";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700122 }
123
Elliott Hughes307f75d2011-10-12 18:04:40 -0700124 spaces_.push_back(alloc_space_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700125 maximum_size_ = maximum_size;
jeffhaoc1160702011-10-27 15:48:45 -0700126 growth_size_ = growth_size;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700127 live_bitmap_ = live_bitmap.release();
128 mark_bitmap_ = mark_bitmap.release();
129
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700130 num_bytes_allocated_ = 0;
131 num_objects_allocated_ = 0;
132
Carl Shapiro69759ea2011-07-21 18:13:35 -0700133 // TODO: allocate the card table
134
Brian Carlstrom58ae9412011-10-04 00:56:06 -0700135 // Make image objects live (after live_bitmap_ is set)
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700136 for (size_t i = 0; i < image_spaces.size(); i++) {
137 RecordImageAllocations(image_spaces[i]);
138 }
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700139
Elliott Hughes85d15452011-09-16 17:33:01 -0700140 Heap::EnableObjectValidation();
141
Elliott Hughes92b3b562011-09-08 16:32:26 -0700142 // It's still to early to take a lock because there are no threads yet,
143 // but we can create the heap lock now. We don't create it earlier to
144 // make it clear that you can't use locks during heap initialization.
Elliott Hughes8daa0922011-09-11 13:46:25 -0700145 lock_ = new Mutex("Heap lock");
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700146
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700147 if (is_verbose_heap_ || runtime->IsVerboseStartup()) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700148 LOG(INFO) << "Heap::Init exiting";
149 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700150}
151
152void Heap::Destroy() {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700153 ScopedHeapLock lock;
Carl Shapiro58551df2011-07-24 03:09:51 -0700154 STLDeleteElements(&spaces_);
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700155 if (mark_bitmap_ != NULL) {
156 delete mark_bitmap_;
157 mark_bitmap_ = NULL;
158 }
159 if (live_bitmap_ != NULL) {
160 delete live_bitmap_;
161 }
162 live_bitmap_ = NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700163}
164
Elliott Hughes418dfe72011-10-06 18:56:27 -0700165Object* Heap::AllocObject(Class* klass, size_t byte_count) {
166 {
167 ScopedHeapLock lock;
168 DCHECK(klass == NULL || klass->GetDescriptor() == NULL ||
169 (klass->IsClassClass() && byte_count >= sizeof(Class)) ||
170 (klass->IsVariableSize() || klass->GetObjectSize() == byte_count));
171 DCHECK_GE(byte_count, sizeof(Object));
172 Object* obj = AllocateLocked(byte_count);
173 if (obj != NULL) {
174 obj->SetClass(klass);
175 return obj;
176 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700177 }
Elliott Hughes418dfe72011-10-06 18:56:27 -0700178
179 Thread::Current()->ThrowOutOfMemoryError(klass, byte_count);
180 return NULL;
Carl Shapiro58551df2011-07-24 03:09:51 -0700181}
182
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700183bool Heap::IsHeapAddress(const Object* obj) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700184 // Note: we deliberately don't take the lock here, and mustn't test anything that would
185 // require taking the lock.
Elliott Hughes06b37d92011-10-16 11:51:29 -0700186 if (!IsAligned<kObjectAlignment>(obj)) {
Elliott Hughesa2501992011-08-26 19:39:54 -0700187 return false;
188 }
189 // TODO
190 return true;
191}
192
Elliott Hughes3e465b12011-09-02 18:26:12 -0700193#if VERIFY_OBJECT_ENABLED
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700194void Heap::VerifyObject(const Object* obj) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700195 if (!verify_objects_) {
196 return;
197 }
Elliott Hughes92b3b562011-09-08 16:32:26 -0700198 ScopedHeapLock lock;
199 Heap::VerifyObjectLocked(obj);
200}
201#endif
202
203void Heap::VerifyObjectLocked(const Object* obj) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700204 lock_->AssertHeld();
Elliott Hughes85d15452011-09-16 17:33:01 -0700205 if (obj != NULL) {
Elliott Hughes06b37d92011-10-16 11:51:29 -0700206 if (!IsAligned<kObjectAlignment>(obj)) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700207 LOG(FATAL) << "Object isn't aligned: " << obj;
208 } else if (!live_bitmap_->Test(obj)) {
209 // TODO: we don't hold a lock here as it is assumed the live bit map
210 // isn't changing if the mutator is running.
211 LOG(FATAL) << "Object is dead: " << obj;
212 }
213 // Ignore early dawn of the universe verifications
Brian Carlstromdbc05252011-09-09 01:59:59 -0700214 if (num_objects_allocated_ > 10) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700215 const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
216 Object::ClassOffset().Int32Value();
217 const Class* c = *reinterpret_cast<Class* const *>(raw_addr);
218 if (c == NULL) {
219 LOG(FATAL) << "Null class" << " in object: " << obj;
Elliott Hughes06b37d92011-10-16 11:51:29 -0700220 } else if (!IsAligned<kObjectAlignment>(c)) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700221 LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
222 } else if (!live_bitmap_->Test(c)) {
223 LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
224 }
225 // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
Ian Rogersad25ac52011-10-04 19:13:33 -0700226 // Note: we don't use the accessors here as they have internal sanity checks
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700227 // that we don't want to run
228 raw_addr = reinterpret_cast<const byte*>(c) +
229 Object::ClassOffset().Int32Value();
230 const Class* c_c = *reinterpret_cast<Class* const *>(raw_addr);
231 raw_addr = reinterpret_cast<const byte*>(c_c) +
232 Object::ClassOffset().Int32Value();
233 const Class* c_c_c = *reinterpret_cast<Class* const *>(raw_addr);
234 CHECK_EQ(c_c, c_c_c);
235 }
236 }
237}
238
Brian Carlstrom78128a62011-09-15 17:21:19 -0700239void Heap::VerificationCallback(Object* obj, void* arg) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700240 DCHECK(obj != NULL);
Elliott Hughes92b3b562011-09-08 16:32:26 -0700241 Heap::VerifyObjectLocked(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700242}
243
244void Heap::VerifyHeap() {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700245 ScopedHeapLock lock;
246 live_bitmap_->Walk(Heap::VerificationCallback, NULL);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700247}
248
Elliott Hughes92b3b562011-09-08 16:32:26 -0700249void Heap::RecordAllocationLocked(Space* space, const Object* obj) {
250#ifndef NDEBUG
251 if (Runtime::Current()->IsStarted()) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700252 lock_->AssertHeld();
Elliott Hughes92b3b562011-09-08 16:32:26 -0700253 }
254#endif
Carl Shapiro58551df2011-07-24 03:09:51 -0700255 size_t size = space->AllocationSize(obj);
256 DCHECK_NE(size, 0u);
257 num_bytes_allocated_ += size;
258 num_objects_allocated_ += 1;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700259
260 if (Runtime::Current()->HasStatsEnabled()) {
261 RuntimeStats* global_stats = Runtime::Current()->GetStats();
262 RuntimeStats* thread_stats = Thread::Current()->GetStats();
263 ++global_stats->allocated_objects;
264 ++thread_stats->allocated_objects;
265 global_stats->allocated_bytes += size;
266 thread_stats->allocated_bytes += size;
267 }
268
Carl Shapiro58551df2011-07-24 03:09:51 -0700269 live_bitmap_->Set(obj);
270}
271
Elliott Hughes307f75d2011-10-12 18:04:40 -0700272void Heap::RecordFreeLocked(size_t freed_objects, size_t freed_bytes) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700273 lock_->AssertHeld();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700274
275 if (freed_objects < num_objects_allocated_) {
276 num_objects_allocated_ -= freed_objects;
277 } else {
278 num_objects_allocated_ = 0;
279 }
280 if (freed_bytes < num_bytes_allocated_) {
281 num_bytes_allocated_ -= freed_bytes;
Carl Shapiro58551df2011-07-24 03:09:51 -0700282 } else {
283 num_bytes_allocated_ = 0;
284 }
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700285
286 if (Runtime::Current()->HasStatsEnabled()) {
287 RuntimeStats* global_stats = Runtime::Current()->GetStats();
288 RuntimeStats* thread_stats = Thread::Current()->GetStats();
289 ++global_stats->freed_objects;
290 ++thread_stats->freed_objects;
Elliott Hughes307f75d2011-10-12 18:04:40 -0700291 global_stats->freed_bytes += freed_bytes;
292 thread_stats->freed_bytes += freed_bytes;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700293 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700294}
295
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700296void Heap::RecordImageAllocations(Space* space) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700297 const Runtime* runtime = Runtime::Current();
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700298 if (is_verbose_heap_ || runtime->IsVerboseStartup()) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700299 LOG(INFO) << "Heap::RecordImageAllocations entering";
300 }
Elliott Hughes92b3b562011-09-08 16:32:26 -0700301 DCHECK(!Runtime::Current()->IsStarted());
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700302 CHECK(space != NULL);
303 CHECK(live_bitmap_ != NULL);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700304 byte* current = space->GetBase() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700305 while (current < space->GetLimit()) {
Elliott Hughes06b37d92011-10-16 11:51:29 -0700306 DCHECK_ALIGNED(current, kObjectAlignment);
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700307 const Object* obj = reinterpret_cast<const Object*>(current);
308 live_bitmap_->Set(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700309 current += RoundUp(obj->SizeOf(), kObjectAlignment);
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700310 }
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700311 if (is_verbose_heap_ || runtime->IsVerboseStartup()) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700312 LOG(INFO) << "Heap::RecordImageAllocations exiting";
313 }
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700314}
315
Elliott Hughes92b3b562011-09-08 16:32:26 -0700316Object* Heap::AllocateLocked(size_t size) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700317 lock_->AssertHeld();
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700318 DCHECK(alloc_space_ != NULL);
319 Space* space = alloc_space_;
Elliott Hughes92b3b562011-09-08 16:32:26 -0700320 Object* obj = AllocateLocked(space, size);
Carl Shapiro58551df2011-07-24 03:09:51 -0700321 if (obj != NULL) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700322 RecordAllocationLocked(space, obj);
Carl Shapiro58551df2011-07-24 03:09:51 -0700323 }
324 return obj;
325}
326
Elliott Hughes92b3b562011-09-08 16:32:26 -0700327Object* Heap::AllocateLocked(Space* space, size_t size) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700328 lock_->AssertHeld();
Elliott Hughes92b3b562011-09-08 16:32:26 -0700329
Brian Carlstromb82b6872011-10-26 17:18:07 -0700330 // Since allocation can cause a GC which will need to SuspendAll,
331 // make sure all allocators are in the kRunnable state.
332 DCHECK_EQ(Thread::Current()->GetState(), Thread::kRunnable);
333
Carl Shapiro69759ea2011-07-21 18:13:35 -0700334 // Fail impossible allocations. TODO: collect soft references.
jeffhaoc1160702011-10-27 15:48:45 -0700335 if (size > growth_size_) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700336 return NULL;
337 }
338
Carl Shapiro58551df2011-07-24 03:09:51 -0700339 Object* ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700340 if (ptr != NULL) {
341 return ptr;
342 }
343
344 // The allocation failed. If the GC is running, block until it
345 // completes and retry.
346 if (is_gc_running_) {
347 // The GC is concurrently tracing the heap. Release the heap
348 // lock, wait for the GC to complete, and retrying allocating.
349 WaitForConcurrentGcToComplete();
Carl Shapiro58551df2011-07-24 03:09:51 -0700350 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700351 if (ptr != NULL) {
352 return ptr;
353 }
354 }
355
356 // Another failure. Our thread was starved or there may be too many
357 // live objects. Try a foreground GC. This will have no effect if
358 // the concurrent GC is already running.
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700359 if (Runtime::Current()->HasStatsEnabled()) {
360 ++Runtime::Current()->GetStats()->gc_for_alloc_count;
361 ++Thread::Current()->GetStats()->gc_for_alloc_count;
362 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700363 CollectGarbageInternal();
364 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700365 if (ptr != NULL) {
366 return ptr;
367 }
368
369 // Even that didn't work; this is an exceptional state.
370 // Try harder, growing the heap if necessary.
Carl Shapiro58551df2011-07-24 03:09:51 -0700371 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700372 if (ptr != NULL) {
373 //size_t new_footprint = dvmHeapSourceGetIdealFootprint();
Shih-wei Liao7f1caab2011-10-06 12:11:04 -0700374 size_t new_footprint = space->GetMaxAllowedFootprint();
Elliott Hughes418dfe72011-10-06 18:56:27 -0700375 // OLD-TODO: may want to grow a little bit more so that the amount of
Carl Shapiro58551df2011-07-24 03:09:51 -0700376 // free space is equal to the old free space + the
377 // utilization slop for the new allocation.
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700378 if (is_verbose_gc_) {
379 LOG(INFO) << "Grow heap (frag case) to " << new_footprint / MB
Brian Carlstromf28bc5b2011-10-26 01:15:03 -0700380 << " for " << size << "-byte allocation";
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700381 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700382 return ptr;
383 }
384
385 // Most allocations should have succeeded by now, so the heap is
386 // really full, really fragmented, or the requested size is really
387 // big. Do another GC, collecting SoftReferences this time. The VM
388 // spec requires that all SoftReferences have been collected and
389 // cleared before throwing an OOME.
390
Elliott Hughes418dfe72011-10-06 18:56:27 -0700391 // OLD-TODO: wait for the finalizers from the previous GC to finish
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700392 if (is_verbose_gc_) {
393 LOG(INFO) << "Forcing collection of SoftReferences for "
394 << size << "-byte allocation";
395 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700396 CollectGarbageInternal();
397 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700398 if (ptr != NULL) {
399 return ptr;
400 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700401
Carl Shapiro69759ea2011-07-21 18:13:35 -0700402 LOG(ERROR) << "Out of memory on a " << size << " byte allocation";
403
Carl Shapiro58551df2011-07-24 03:09:51 -0700404 // TODO: tell the HeapSource to dump its state
405 // TODO: dump stack traces for all threads
Carl Shapiro69759ea2011-07-21 18:13:35 -0700406
Carl Shapiro69759ea2011-07-21 18:13:35 -0700407 return NULL;
408}
409
Elliott Hughesbf86d042011-08-31 17:53:14 -0700410int64_t Heap::GetMaxMemory() {
jeffhaoc1160702011-10-27 15:48:45 -0700411 return growth_size_;
Elliott Hughesbf86d042011-08-31 17:53:14 -0700412}
413
414int64_t Heap::GetTotalMemory() {
Elliott Hughes7162ad92011-10-27 14:08:42 -0700415 return alloc_space_->Size();
Elliott Hughesbf86d042011-08-31 17:53:14 -0700416}
417
418int64_t Heap::GetFreeMemory() {
Elliott Hughes7162ad92011-10-27 14:08:42 -0700419 return alloc_space_->Size() - num_bytes_allocated_;
Elliott Hughesbf86d042011-08-31 17:53:14 -0700420}
421
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700422class InstanceCounter {
423 public:
424 InstanceCounter(Class* c, bool count_assignable)
425 : class_(c), count_assignable_(count_assignable), count_(0) {
426 }
427
428 size_t GetCount() {
429 return count_;
430 }
431
432 static void Callback(Object* o, void* arg) {
433 reinterpret_cast<InstanceCounter*>(arg)->VisitInstance(o);
434 }
435
436 private:
437 void VisitInstance(Object* o) {
438 Class* instance_class = o->GetClass();
439 if (count_assignable_) {
440 if (instance_class == class_) {
441 ++count_;
442 }
443 } else {
444 if (instance_class != NULL && class_->IsAssignableFrom(instance_class)) {
445 ++count_;
446 }
447 }
448 }
449
450 Class* class_;
451 bool count_assignable_;
452 size_t count_;
453};
454
455int64_t Heap::CountInstances(Class* c, bool count_assignable) {
456 ScopedHeapLock lock;
457 InstanceCounter counter(c, count_assignable);
458 live_bitmap_->Walk(InstanceCounter::Callback, &counter);
459 return counter.GetCount();
460}
461
Carl Shapiro69759ea2011-07-21 18:13:35 -0700462void Heap::CollectGarbage() {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700463 ScopedHeapLock lock;
Carl Shapiro58551df2011-07-24 03:09:51 -0700464 CollectGarbageInternal();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700465}
466
467void Heap::CollectGarbageInternal() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700468 lock_->AssertHeld();
Carl Shapiro58551df2011-07-24 03:09:51 -0700469
Elliott Hughes8d768a92011-09-14 16:35:25 -0700470 ThreadList* thread_list = Runtime::Current()->GetThreadList();
471 thread_list->SuspendAll();
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700472
473 size_t initial_size = num_bytes_allocated_;
Elliott Hughes307f75d2011-10-12 18:04:40 -0700474 TimingLogger timings("CollectGarbageInternal");
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700475 uint64_t t0 = NanoTime();
Elliott Hughesadb460d2011-10-05 17:02:34 -0700476 Object* cleared_references = NULL;
Carl Shapiro58551df2011-07-24 03:09:51 -0700477 {
478 MarkSweep mark_sweep;
Elliott Hughes307f75d2011-10-12 18:04:40 -0700479 timings.AddSplit("ctor");
Carl Shapiro58551df2011-07-24 03:09:51 -0700480
481 mark_sweep.Init();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700482 timings.AddSplit("Init");
Carl Shapiro58551df2011-07-24 03:09:51 -0700483
484 mark_sweep.MarkRoots();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700485 timings.AddSplit("MarkRoots");
Carl Shapiro58551df2011-07-24 03:09:51 -0700486
487 // Push marked roots onto the mark stack
488
489 // TODO: if concurrent
490 // unlock heap
Elliott Hughes8d768a92011-09-14 16:35:25 -0700491 // thread_list->ResumeAll();
Carl Shapiro58551df2011-07-24 03:09:51 -0700492
493 mark_sweep.RecursiveMark();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700494 timings.AddSplit("RecursiveMark");
Carl Shapiro58551df2011-07-24 03:09:51 -0700495
496 // TODO: if concurrent
497 // lock heap
Elliott Hughes8d768a92011-09-14 16:35:25 -0700498 // thread_list->SuspendAll();
Carl Shapiro58551df2011-07-24 03:09:51 -0700499 // re-mark root set
500 // scan dirty objects
501
502 mark_sweep.ProcessReferences(false);
Elliott Hughes307f75d2011-10-12 18:04:40 -0700503 timings.AddSplit("ProcessReferences");
Carl Shapiro58551df2011-07-24 03:09:51 -0700504
Elliott Hughes2da50362011-10-10 16:57:08 -0700505 // TODO: if concurrent
506 // swap bitmaps
Carl Shapiro58551df2011-07-24 03:09:51 -0700507
508 mark_sweep.Sweep();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700509 timings.AddSplit("Sweep");
Elliott Hughesadb460d2011-10-05 17:02:34 -0700510
511 cleared_references = mark_sweep.GetClearedReferences();
Carl Shapiro58551df2011-07-24 03:09:51 -0700512 }
513
514 GrowForUtilization();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700515 timings.AddSplit("GrowForUtilization");
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700516 uint64_t t1 = NanoTime();
Elliott Hughes8d768a92011-09-14 16:35:25 -0700517 thread_list->ResumeAll();
Elliott Hughesadb460d2011-10-05 17:02:34 -0700518
519 EnqueueClearedReferences(&cleared_references);
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700520
521 // TODO: somehow make the specific GC implementation (here MarkSweep) responsible for logging.
522 size_t bytes_freed = initial_size - num_bytes_allocated_;
523 bool is_small = (bytes_freed > 0 && bytes_freed < 1024);
524 size_t kib_freed = (bytes_freed > 0 ? std::max(bytes_freed/1024, 1U) : 0);
525
Elliott Hughes7162ad92011-10-27 14:08:42 -0700526 size_t total = GetTotalMemory();
527 size_t percentFree = 100 - static_cast<size_t>(100.0f * float(num_bytes_allocated_) / total);
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700528
529 uint32_t duration = (t1 - t0)/1000/1000;
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700530 if (is_verbose_gc_) {
531 LOG(INFO) << "GC freed " << (is_small ? "<" : "") << kib_freed << "KiB, "
532 << percentFree << "% free "
Elliott Hughes7162ad92011-10-27 14:08:42 -0700533 << (num_bytes_allocated_/1024) << "KiB/" << (total/1024) << "KiB, "
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700534 << "paused " << duration << "ms";
535 }
Elliott Hughes767a1472011-10-26 18:49:02 -0700536 Dbg::GcDidFinish();
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700537 if (is_verbose_heap_) {
538 timings.Dump();
539 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700540}
541
542void Heap::WaitForConcurrentGcToComplete() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700543 lock_->AssertHeld();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700544}
545
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700546/* Terminology:
547 * 1. Footprint: Capacity we allocate from system.
548 * 2. Active space: a.k.a. alloc_space_.
549 * 3. Soft footprint: external allocation + spaces footprint + active space footprint
550 * 4. Overhead: soft footprint excluding active.
551 *
Shih-wei Liao7f1caab2011-10-06 12:11:04 -0700552 * Layout: (The spaces below might not be contiguous, but are lumped together to depict size.)
553 * |----------------------spaces footprint--------- --------------|----active space footprint----|
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700554 * |--active space allocated--|
555 * |--------------------soft footprint (include active)--------------------------------------|
556 * |----------------soft footprint excluding active---------------|
557 * |------------soft limit-------...|
558 * |------------------------------------ideal footprint-----------------------------------------...|
559 *
560 */
561
562// Sets the maximum number of bytes that the heap is allowed to
563// allocate from the system. Clamps to the appropriate maximum
564// value.
565// Old spaces will count against the ideal size.
566//
567void Heap::SetIdealFootprint(size_t max_allowed_footprint)
568{
jeffhaoc1160702011-10-27 15:48:45 -0700569 if (max_allowed_footprint > Heap::growth_size_) {
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700570 if (is_verbose_gc_) {
571 LOG(INFO) << "Clamp target GC heap from " << max_allowed_footprint
jeffhaoc1160702011-10-27 15:48:45 -0700572 << " to " << Heap::growth_size_;
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700573 }
jeffhaoc1160702011-10-27 15:48:45 -0700574 max_allowed_footprint = Heap::growth_size_;
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700575 }
576
Shih-wei Liao7f1caab2011-10-06 12:11:04 -0700577 alloc_space_->SetMaxAllowedFootprint(max_allowed_footprint);
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700578}
579
Shih-wei Liao7f1caab2011-10-06 12:11:04 -0700580// kHeapIdealFree is the ideal maximum free size, when we grow the heap for
581// utlization.
582static const size_t kHeapIdealFree = 2 * MB;
583// kHeapMinFree guarantees that you always have at least 512 KB free, when
584// you grow for utilization, regardless of target utilization ratio.
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700585static const size_t kHeapMinFree = kHeapIdealFree / 4;
586
587// Given the current contents of the active space, increase the allowed
Carl Shapiro69759ea2011-07-21 18:13:35 -0700588// heap footprint to match the target utilization ratio. This should
589// only be called immediately after a full garbage collection.
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700590//
Carl Shapiro69759ea2011-07-21 18:13:35 -0700591void Heap::GrowForUtilization() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700592 lock_->AssertHeld();
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700593
594 // We know what our utilization is at this moment.
595 // This doesn't actually resize any memory. It just lets the heap grow more
596 // when necessary.
Elliott Hughes362f9bc2011-10-17 18:56:41 -0700597 size_t target_size(num_bytes_allocated_ / Heap::GetTargetHeapUtilization());
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700598
599 if (target_size > num_bytes_allocated_ + kHeapIdealFree) {
600 target_size = num_bytes_allocated_ + kHeapIdealFree;
601 } else if (target_size < num_bytes_allocated_ + kHeapMinFree) {
602 target_size = num_bytes_allocated_ + kHeapMinFree;
603 }
604
605 SetIdealFootprint(target_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700606}
607
jeffhaoc1160702011-10-27 15:48:45 -0700608void Heap::ClearGrowthLimit() {
609 ScopedHeapLock lock;
610 WaitForConcurrentGcToComplete();
611 CHECK_GE(maximum_size_, growth_size_);
612 growth_size_ = maximum_size_;
613 alloc_space_->ClearGrowthLimit();
614}
615
Brian Carlstrom24a3c2e2011-10-17 18:07:52 -0700616pid_t Heap::GetLockOwner() {
Elliott Hughesaccd83d2011-10-17 14:25:58 -0700617 return lock_->GetOwner();
618}
619
Elliott Hughes92b3b562011-09-08 16:32:26 -0700620void Heap::Lock() {
Brian Carlstromfad71432011-10-16 20:25:10 -0700621 // Grab the lock, but put ourselves into Thread::kVmWait if it looks
622 // like we're going to have to wait on the mutex. This prevents
623 // deadlock if another thread is calling CollectGarbageInternal,
624 // since they will have the heap lock and be waiting for mutators to
625 // suspend.
626 if (!lock_->TryLock()) {
627 ScopedThreadStateChange tsc(Thread::Current(), Thread::kVmWait);
628 lock_->Lock();
629 }
Elliott Hughes92b3b562011-09-08 16:32:26 -0700630}
631
632void Heap::Unlock() {
633 lock_->Unlock();
634}
635
Elliott Hughesadb460d2011-10-05 17:02:34 -0700636void Heap::SetWellKnownClasses(Class* java_lang_ref_FinalizerReference,
637 Class* java_lang_ref_ReferenceQueue) {
638 java_lang_ref_FinalizerReference_ = java_lang_ref_FinalizerReference;
639 java_lang_ref_ReferenceQueue_ = java_lang_ref_ReferenceQueue;
640 CHECK(java_lang_ref_FinalizerReference_ != NULL);
641 CHECK(java_lang_ref_ReferenceQueue_ != NULL);
642}
643
644void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset,
645 MemberOffset reference_queue_offset,
646 MemberOffset reference_queueNext_offset,
647 MemberOffset reference_pendingNext_offset,
648 MemberOffset finalizer_reference_zombie_offset) {
649 reference_referent_offset_ = reference_referent_offset;
650 reference_queue_offset_ = reference_queue_offset;
651 reference_queueNext_offset_ = reference_queueNext_offset;
652 reference_pendingNext_offset_ = reference_pendingNext_offset;
653 finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset;
654 CHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
655 CHECK_NE(reference_queue_offset_.Uint32Value(), 0U);
656 CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U);
657 CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
658 CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
659}
660
661Object* Heap::GetReferenceReferent(Object* reference) {
662 DCHECK(reference != NULL);
663 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
664 return reference->GetFieldObject<Object*>(reference_referent_offset_, true);
665}
666
667void Heap::ClearReferenceReferent(Object* reference) {
668 DCHECK(reference != NULL);
669 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
670 reference->SetFieldObject(reference_referent_offset_, NULL, true);
671}
672
673// Returns true if the reference object has not yet been enqueued.
674bool Heap::IsEnqueuable(const Object* ref) {
675 DCHECK(ref != NULL);
676 const Object* queue = ref->GetFieldObject<Object*>(reference_queue_offset_, false);
677 const Object* queue_next = ref->GetFieldObject<Object*>(reference_queueNext_offset_, false);
678 return (queue != NULL) && (queue_next == NULL);
679}
680
681void Heap::EnqueueReference(Object* ref, Object** cleared_reference_list) {
682 DCHECK(ref != NULL);
683 CHECK(ref->GetFieldObject<Object*>(reference_queue_offset_, false) != NULL);
684 CHECK(ref->GetFieldObject<Object*>(reference_queueNext_offset_, false) == NULL);
685 EnqueuePendingReference(ref, cleared_reference_list);
686}
687
688void Heap::EnqueuePendingReference(Object* ref, Object** list) {
689 DCHECK(ref != NULL);
690 DCHECK(list != NULL);
691
692 if (*list == NULL) {
693 ref->SetFieldObject(reference_pendingNext_offset_, ref, false);
694 *list = ref;
695 } else {
696 Object* head = (*list)->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
697 ref->SetFieldObject(reference_pendingNext_offset_, head, false);
698 (*list)->SetFieldObject(reference_pendingNext_offset_, ref, false);
699 }
700}
701
702Object* Heap::DequeuePendingReference(Object** list) {
703 DCHECK(list != NULL);
704 DCHECK(*list != NULL);
705 Object* head = (*list)->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
706 Object* ref;
707 if (*list == head) {
708 ref = *list;
709 *list = NULL;
710 } else {
711 Object* next = head->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
712 (*list)->SetFieldObject(reference_pendingNext_offset_, next, false);
713 ref = head;
714 }
715 ref->SetFieldObject(reference_pendingNext_offset_, NULL, false);
716 return ref;
717}
718
719void Heap::AddFinalizerReference(Object* object) {
720 static Method* FinalizerReference_add =
721 java_lang_ref_FinalizerReference_->FindDirectMethod("add", "(Ljava/lang/Object;)V");
722 DCHECK(FinalizerReference_add != NULL);
723 Object* args[] = { object };
724 FinalizerReference_add->Invoke(Thread::Current(), NULL, reinterpret_cast<byte*>(&args), NULL);
725}
726
727void Heap::EnqueueClearedReferences(Object** cleared) {
728 DCHECK(cleared != NULL);
729 if (*cleared != NULL) {
730 static Method* ReferenceQueue_add =
731 java_lang_ref_ReferenceQueue_->FindDirectMethod("add", "(Ljava/lang/ref/Reference;)V");
732 DCHECK(ReferenceQueue_add != NULL);
733
734 Thread* self = Thread::Current();
735 ScopedThreadStateChange tsc(self, Thread::kRunnable);
736 Object* args[] = { *cleared };
737 ReferenceQueue_add->Invoke(self, NULL, reinterpret_cast<byte*>(&args), NULL);
738 *cleared = NULL;
739 }
740}
741
Carl Shapiro69759ea2011-07-21 18:13:35 -0700742} // namespace art