blob: 92ebbd0cf2b4e944a121b186e38d547b6b2f78d4 [file] [log] [blame]
Carl Shapiro69759ea2011-07-21 18:13:35 -07001// Copyright 2011 Google Inc. All Rights Reserved.
Carl Shapiro69759ea2011-07-21 18:13:35 -07002
Brian Carlstrom578bbdc2011-07-21 14:07:47 -07003#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -07004
Brian Carlstrom58ae9412011-10-04 00:56:06 -07005#include <limits>
Carl Shapiro58551df2011-07-24 03:09:51 -07006#include <vector>
7
Elliott Hughes90a33692011-08-30 13:27:07 -07008#include "UniquePtr.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -07009#include "image.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070010#include "mark_sweep.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070011#include "object.h"
12#include "space.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070013#include "stl_util.h"
Elliott Hughes8d768a92011-09-14 16:35:25 -070014#include "thread_list.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070015
16namespace art {
17
Carl Shapiro58551df2011-07-24 03:09:51 -070018std::vector<Space*> Heap::spaces_;
Carl Shapiro69759ea2011-07-21 18:13:35 -070019
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070020Space* Heap::alloc_space_ = NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -070021
22size_t Heap::maximum_size_ = 0;
23
Carl Shapiro58551df2011-07-24 03:09:51 -070024size_t Heap::num_bytes_allocated_ = 0;
25
26size_t Heap::num_objects_allocated_ = 0;
27
Carl Shapiro69759ea2011-07-21 18:13:35 -070028bool Heap::is_gc_running_ = false;
29
30HeapBitmap* Heap::mark_bitmap_ = NULL;
31
32HeapBitmap* Heap::live_bitmap_ = NULL;
33
Ian Rogers0cfe1fb2011-08-26 03:29:44 -070034MemberOffset Heap::reference_referent_offset_ = MemberOffset(0);
35MemberOffset Heap::reference_queue_offset_ = MemberOffset(0);
36MemberOffset Heap::reference_queueNext_offset_ = MemberOffset(0);
37MemberOffset Heap::reference_pendingNext_offset_ = MemberOffset(0);
38MemberOffset Heap::finalizer_reference_zombie_offset_ = MemberOffset(0);
Brian Carlstrom1f870082011-08-23 16:02:11 -070039
Brian Carlstrom395520e2011-09-25 19:35:00 -070040float Heap::target_utilization_ = 0.5;
41
Elliott Hughes92b3b562011-09-08 16:32:26 -070042Mutex* Heap::lock_ = NULL;
43
Elliott Hughes9d5ccec2011-09-19 13:19:50 -070044bool Heap::verify_objects_ = false;
45
Elliott Hughes92b3b562011-09-08 16:32:26 -070046class ScopedHeapLock {
47 public:
48 ScopedHeapLock() {
49 Heap::Lock();
50 }
51
52 ~ScopedHeapLock() {
53 Heap::Unlock();
54 }
55};
56
Elliott Hughesbe759c62011-09-08 19:38:21 -070057void Heap::Init(size_t initial_size, size_t maximum_size,
Brian Carlstrom58ae9412011-10-04 00:56:06 -070058 const std::vector<std::string>& image_file_names) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -070059 const Runtime* runtime = Runtime::Current();
60 if (runtime->IsVerboseStartup()) {
61 LOG(INFO) << "Heap::Init entering";
62 }
63
Brian Carlstrom58ae9412011-10-04 00:56:06 -070064 // bounds of all spaces for allocating live and mark bitmaps
65 // there will be at least one space (the alloc space),
66 // so set to base to max and limit to min to start
67 byte* base = reinterpret_cast<byte*>(std::numeric_limits<uintptr_t>::max());
68 byte* limit = reinterpret_cast<byte*>(std::numeric_limits<uintptr_t>::min());
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070069
Brian Carlstrom58ae9412011-10-04 00:56:06 -070070 byte* requested_base = NULL;
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070071 std::vector<Space*> image_spaces;
72 for (size_t i = 0; i < image_file_names.size(); i++) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -070073 Space* space = Space::CreateFromImage(image_file_names[i]);
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070074 if (space == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -070075 LOG(FATAL) << "Failed to create space from " << image_file_names[i];
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070076 }
77 image_spaces.push_back(space);
78 spaces_.push_back(space);
Brian Carlstrome24fa612011-09-29 00:53:55 -070079 byte* oat_limit_addr = space->GetImageHeader().GetOatLimitAddr();
Brian Carlstrom58ae9412011-10-04 00:56:06 -070080 if (oat_limit_addr > requested_base) {
81 requested_base = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(oat_limit_addr),
82 kPageSize));
83 }
84 base = std::min(base, space->GetBase());
85 limit = std::max(limit, space->GetLimit());
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070086 }
87
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070088 Space* space = Space::Create(initial_size, maximum_size, requested_base);
Carl Shapiro58551df2011-07-24 03:09:51 -070089 if (space == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -070090 LOG(FATAL) << "Failed to create alloc space";
Carl Shapiro69759ea2011-07-21 18:13:35 -070091 }
Brian Carlstrom58ae9412011-10-04 00:56:06 -070092 base = std::min(base, space->GetBase());
93 limit = std::max(limit, space->GetLimit());
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070094 DCHECK_LT(base, limit);
95 size_t num_bytes = limit - base;
Carl Shapiro69759ea2011-07-21 18:13:35 -070096
97 // Allocate the initial live bitmap.
Elliott Hughes90a33692011-08-30 13:27:07 -070098 UniquePtr<HeapBitmap> live_bitmap(HeapBitmap::Create(base, num_bytes));
99 if (live_bitmap.get() == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700100 LOG(FATAL) << "Failed to create live bitmap";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700101 }
102
103 // Allocate the initial mark bitmap.
Elliott Hughes90a33692011-08-30 13:27:07 -0700104 UniquePtr<HeapBitmap> mark_bitmap(HeapBitmap::Create(base, num_bytes));
105 if (mark_bitmap.get() == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700106 LOG(FATAL) << "Failed to create mark bitmap";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700107 }
108
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700109 alloc_space_ = space;
Carl Shapiro58551df2011-07-24 03:09:51 -0700110 spaces_.push_back(space);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700111 maximum_size_ = maximum_size;
112 live_bitmap_ = live_bitmap.release();
113 mark_bitmap_ = mark_bitmap.release();
114
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700115 num_bytes_allocated_ = 0;
116 num_objects_allocated_ = 0;
117
Carl Shapiro69759ea2011-07-21 18:13:35 -0700118 // TODO: allocate the card table
119
Brian Carlstrom58ae9412011-10-04 00:56:06 -0700120 // Make image objects live (after live_bitmap_ is set)
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700121 for (size_t i = 0; i < image_spaces.size(); i++) {
122 RecordImageAllocations(image_spaces[i]);
123 }
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700124
Elliott Hughes85d15452011-09-16 17:33:01 -0700125 Heap::EnableObjectValidation();
126
Elliott Hughes92b3b562011-09-08 16:32:26 -0700127 // It's still to early to take a lock because there are no threads yet,
128 // but we can create the heap lock now. We don't create it earlier to
129 // make it clear that you can't use locks during heap initialization.
Elliott Hughes8daa0922011-09-11 13:46:25 -0700130 lock_ = new Mutex("Heap lock");
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700131
132 if (runtime->IsVerboseStartup()) {
133 LOG(INFO) << "Heap::Init exiting";
134 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700135}
136
137void Heap::Destroy() {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700138 ScopedHeapLock lock;
Carl Shapiro58551df2011-07-24 03:09:51 -0700139 STLDeleteElements(&spaces_);
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700140 if (mark_bitmap_ != NULL) {
141 delete mark_bitmap_;
142 mark_bitmap_ = NULL;
143 }
144 if (live_bitmap_ != NULL) {
145 delete live_bitmap_;
146 }
147 live_bitmap_ = NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700148}
149
Carl Shapiro58551df2011-07-24 03:09:51 -0700150Object* Heap::AllocObject(Class* klass, size_t num_bytes) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700151 ScopedHeapLock lock;
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700152 DCHECK(klass == NULL
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700153 || klass->GetDescriptor() == NULL
Brian Carlstrom4873d462011-08-21 15:23:39 -0700154 || (klass->IsClassClass() && num_bytes >= sizeof(Class))
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700155 || (klass->IsVariableSize() || klass->GetObjectSize() == num_bytes));
156 DCHECK(num_bytes >= sizeof(Object));
Elliott Hughes92b3b562011-09-08 16:32:26 -0700157 Object* obj = AllocateLocked(num_bytes);
Carl Shapiro58551df2011-07-24 03:09:51 -0700158 if (obj != NULL) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700159 obj->SetClass(klass);
Carl Shapiro58551df2011-07-24 03:09:51 -0700160 }
161 return obj;
162}
163
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700164bool Heap::IsHeapAddress(const Object* obj) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700165 // Note: we deliberately don't take the lock here, and mustn't test anything that would
166 // require taking the lock.
Elliott Hughesa2501992011-08-26 19:39:54 -0700167 if (!IsAligned(obj, kObjectAlignment)) {
168 return false;
169 }
170 // TODO
171 return true;
172}
173
Elliott Hughes3e465b12011-09-02 18:26:12 -0700174#if VERIFY_OBJECT_ENABLED
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700175void Heap::VerifyObject(const Object* obj) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700176 if (!verify_objects_) {
177 return;
178 }
Elliott Hughes92b3b562011-09-08 16:32:26 -0700179 ScopedHeapLock lock;
180 Heap::VerifyObjectLocked(obj);
181}
182#endif
183
184void Heap::VerifyObjectLocked(const Object* obj) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700185 lock_->AssertHeld();
Elliott Hughes85d15452011-09-16 17:33:01 -0700186 if (obj != NULL) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700187 if (!IsAligned(obj, kObjectAlignment)) {
188 LOG(FATAL) << "Object isn't aligned: " << obj;
189 } else if (!live_bitmap_->Test(obj)) {
190 // TODO: we don't hold a lock here as it is assumed the live bit map
191 // isn't changing if the mutator is running.
192 LOG(FATAL) << "Object is dead: " << obj;
193 }
194 // Ignore early dawn of the universe verifications
Brian Carlstromdbc05252011-09-09 01:59:59 -0700195 if (num_objects_allocated_ > 10) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700196 const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
197 Object::ClassOffset().Int32Value();
198 const Class* c = *reinterpret_cast<Class* const *>(raw_addr);
199 if (c == NULL) {
200 LOG(FATAL) << "Null class" << " in object: " << obj;
201 } else if (!IsAligned(c, kObjectAlignment)) {
202 LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
203 } else if (!live_bitmap_->Test(c)) {
204 LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
205 }
206 // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
207 // NB we don't use the accessors here as they have internal sanity checks
208 // that we don't want to run
209 raw_addr = reinterpret_cast<const byte*>(c) +
210 Object::ClassOffset().Int32Value();
211 const Class* c_c = *reinterpret_cast<Class* const *>(raw_addr);
212 raw_addr = reinterpret_cast<const byte*>(c_c) +
213 Object::ClassOffset().Int32Value();
214 const Class* c_c_c = *reinterpret_cast<Class* const *>(raw_addr);
215 CHECK_EQ(c_c, c_c_c);
216 }
217 }
218}
219
Brian Carlstrom78128a62011-09-15 17:21:19 -0700220void Heap::VerificationCallback(Object* obj, void* arg) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700221 DCHECK(obj != NULL);
Elliott Hughes92b3b562011-09-08 16:32:26 -0700222 Heap::VerifyObjectLocked(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700223}
224
225void Heap::VerifyHeap() {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700226 ScopedHeapLock lock;
227 live_bitmap_->Walk(Heap::VerificationCallback, NULL);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700228}
229
Elliott Hughes92b3b562011-09-08 16:32:26 -0700230void Heap::RecordAllocationLocked(Space* space, const Object* obj) {
231#ifndef NDEBUG
232 if (Runtime::Current()->IsStarted()) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700233 lock_->AssertHeld();
Elliott Hughes92b3b562011-09-08 16:32:26 -0700234 }
235#endif
Carl Shapiro58551df2011-07-24 03:09:51 -0700236 size_t size = space->AllocationSize(obj);
237 DCHECK_NE(size, 0u);
238 num_bytes_allocated_ += size;
239 num_objects_allocated_ += 1;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700240
241 if (Runtime::Current()->HasStatsEnabled()) {
242 RuntimeStats* global_stats = Runtime::Current()->GetStats();
243 RuntimeStats* thread_stats = Thread::Current()->GetStats();
244 ++global_stats->allocated_objects;
245 ++thread_stats->allocated_objects;
246 global_stats->allocated_bytes += size;
247 thread_stats->allocated_bytes += size;
248 }
249
Carl Shapiro58551df2011-07-24 03:09:51 -0700250 live_bitmap_->Set(obj);
251}
252
Elliott Hughes92b3b562011-09-08 16:32:26 -0700253void Heap::RecordFreeLocked(Space* space, const Object* obj) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700254 lock_->AssertHeld();
Carl Shapiro58551df2011-07-24 03:09:51 -0700255 size_t size = space->AllocationSize(obj);
256 DCHECK_NE(size, 0u);
257 if (size < num_bytes_allocated_) {
258 num_bytes_allocated_ -= size;
259 } else {
260 num_bytes_allocated_ = 0;
261 }
262 live_bitmap_->Clear(obj);
263 if (num_objects_allocated_ > 0) {
264 num_objects_allocated_ -= 1;
265 }
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700266
267 if (Runtime::Current()->HasStatsEnabled()) {
268 RuntimeStats* global_stats = Runtime::Current()->GetStats();
269 RuntimeStats* thread_stats = Thread::Current()->GetStats();
270 ++global_stats->freed_objects;
271 ++thread_stats->freed_objects;
272 global_stats->freed_bytes += size;
273 thread_stats->freed_bytes += size;
274 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700275}
276
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700277void Heap::RecordImageAllocations(Space* space) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700278 const Runtime* runtime = Runtime::Current();
279 if (runtime->IsVerboseStartup()) {
280 LOG(INFO) << "Heap::RecordImageAllocations entering";
281 }
Elliott Hughes92b3b562011-09-08 16:32:26 -0700282 DCHECK(!Runtime::Current()->IsStarted());
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700283 CHECK(space != NULL);
284 CHECK(live_bitmap_ != NULL);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700285 byte* current = space->GetBase() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700286 while (current < space->GetLimit()) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700287 DCHECK(IsAligned(current, kObjectAlignment));
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700288 const Object* obj = reinterpret_cast<const Object*>(current);
289 live_bitmap_->Set(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700290 current += RoundUp(obj->SizeOf(), kObjectAlignment);
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700291 }
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700292 if (runtime->IsVerboseStartup()) {
293 LOG(INFO) << "Heap::RecordImageAllocations exiting";
294 }
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700295}
296
Elliott Hughes92b3b562011-09-08 16:32:26 -0700297Object* Heap::AllocateLocked(size_t size) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700298 lock_->AssertHeld();
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700299 DCHECK(alloc_space_ != NULL);
300 Space* space = alloc_space_;
Elliott Hughes92b3b562011-09-08 16:32:26 -0700301 Object* obj = AllocateLocked(space, size);
Carl Shapiro58551df2011-07-24 03:09:51 -0700302 if (obj != NULL) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700303 RecordAllocationLocked(space, obj);
Carl Shapiro58551df2011-07-24 03:09:51 -0700304 }
305 return obj;
306}
307
Elliott Hughes92b3b562011-09-08 16:32:26 -0700308Object* Heap::AllocateLocked(Space* space, size_t size) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700309 lock_->AssertHeld();
Elliott Hughes92b3b562011-09-08 16:32:26 -0700310
Carl Shapiro69759ea2011-07-21 18:13:35 -0700311 // Fail impossible allocations. TODO: collect soft references.
312 if (size > maximum_size_) {
313 return NULL;
314 }
315
Carl Shapiro58551df2011-07-24 03:09:51 -0700316 Object* ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700317 if (ptr != NULL) {
318 return ptr;
319 }
320
321 // The allocation failed. If the GC is running, block until it
322 // completes and retry.
323 if (is_gc_running_) {
324 // The GC is concurrently tracing the heap. Release the heap
325 // lock, wait for the GC to complete, and retrying allocating.
326 WaitForConcurrentGcToComplete();
Carl Shapiro58551df2011-07-24 03:09:51 -0700327 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700328 if (ptr != NULL) {
329 return ptr;
330 }
331 }
332
333 // Another failure. Our thread was starved or there may be too many
334 // live objects. Try a foreground GC. This will have no effect if
335 // the concurrent GC is already running.
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700336 if (Runtime::Current()->HasStatsEnabled()) {
337 ++Runtime::Current()->GetStats()->gc_for_alloc_count;
338 ++Thread::Current()->GetStats()->gc_for_alloc_count;
339 }
Ian Rogersd6b1f612011-09-27 13:38:14 -0700340 LOG(INFO) << "GC_FOR_ALLOC: TODO: test";
Carl Shapiro58551df2011-07-24 03:09:51 -0700341 CollectGarbageInternal();
342 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700343 if (ptr != NULL) {
344 return ptr;
345 }
Ian Rogersd6b1f612011-09-27 13:38:14 -0700346 UNIMPLEMENTED(FATAL) << "No AllocWithGrowth, use larger -Xms -Xmx";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700347
348 // Even that didn't work; this is an exceptional state.
349 // Try harder, growing the heap if necessary.
Carl Shapiro58551df2011-07-24 03:09:51 -0700350 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700351 if (ptr != NULL) {
352 //size_t new_footprint = dvmHeapSourceGetIdealFootprint();
Carl Shapiro58551df2011-07-24 03:09:51 -0700353 size_t new_footprint = space->MaxAllowedFootprint();
354 // TODO: may want to grow a little bit more so that the amount of
355 // free space is equal to the old free space + the
356 // utilization slop for the new allocation.
357 LOG(INFO) << "Grow heap (frag case) to " << new_footprint / MB
Carl Shapiro69759ea2011-07-21 18:13:35 -0700358 << "for " << size << "-byte allocation";
359 return ptr;
360 }
361
362 // Most allocations should have succeeded by now, so the heap is
363 // really full, really fragmented, or the requested size is really
364 // big. Do another GC, collecting SoftReferences this time. The VM
365 // spec requires that all SoftReferences have been collected and
366 // cleared before throwing an OOME.
367
Carl Shapiro58551df2011-07-24 03:09:51 -0700368 // TODO: wait for the finalizers from the previous GC to finish
Carl Shapiro69759ea2011-07-21 18:13:35 -0700369 LOG(INFO) << "Forcing collection of SoftReferences for "
370 << size << "-byte allocation";
Carl Shapiro58551df2011-07-24 03:09:51 -0700371 CollectGarbageInternal();
372 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700373 if (ptr != NULL) {
374 return ptr;
375 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700376
Carl Shapiro69759ea2011-07-21 18:13:35 -0700377 LOG(ERROR) << "Out of memory on a " << size << " byte allocation";
378
Carl Shapiro58551df2011-07-24 03:09:51 -0700379 // TODO: tell the HeapSource to dump its state
380 // TODO: dump stack traces for all threads
Carl Shapiro69759ea2011-07-21 18:13:35 -0700381
Carl Shapiro69759ea2011-07-21 18:13:35 -0700382 return NULL;
383}
384
Elliott Hughesbf86d042011-08-31 17:53:14 -0700385int64_t Heap::GetMaxMemory() {
386 UNIMPLEMENTED(WARNING);
387 return 0;
388}
389
390int64_t Heap::GetTotalMemory() {
391 UNIMPLEMENTED(WARNING);
392 return 0;
393}
394
395int64_t Heap::GetFreeMemory() {
396 UNIMPLEMENTED(WARNING);
397 return 0;
398}
399
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700400class InstanceCounter {
401 public:
402 InstanceCounter(Class* c, bool count_assignable)
403 : class_(c), count_assignable_(count_assignable), count_(0) {
404 }
405
406 size_t GetCount() {
407 return count_;
408 }
409
410 static void Callback(Object* o, void* arg) {
411 reinterpret_cast<InstanceCounter*>(arg)->VisitInstance(o);
412 }
413
414 private:
415 void VisitInstance(Object* o) {
416 Class* instance_class = o->GetClass();
417 if (count_assignable_) {
418 if (instance_class == class_) {
419 ++count_;
420 }
421 } else {
422 if (instance_class != NULL && class_->IsAssignableFrom(instance_class)) {
423 ++count_;
424 }
425 }
426 }
427
428 Class* class_;
429 bool count_assignable_;
430 size_t count_;
431};
432
433int64_t Heap::CountInstances(Class* c, bool count_assignable) {
434 ScopedHeapLock lock;
435 InstanceCounter counter(c, count_assignable);
436 live_bitmap_->Walk(InstanceCounter::Callback, &counter);
437 return counter.GetCount();
438}
439
Carl Shapiro69759ea2011-07-21 18:13:35 -0700440void Heap::CollectGarbage() {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700441 ScopedHeapLock lock;
Carl Shapiro58551df2011-07-24 03:09:51 -0700442 CollectGarbageInternal();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700443}
444
445void Heap::CollectGarbageInternal() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700446 lock_->AssertHeld();
Carl Shapiro58551df2011-07-24 03:09:51 -0700447
Elliott Hughes8d768a92011-09-14 16:35:25 -0700448 ThreadList* thread_list = Runtime::Current()->GetThreadList();
449 thread_list->SuspendAll();
Carl Shapiro58551df2011-07-24 03:09:51 -0700450 {
451 MarkSweep mark_sweep;
452
453 mark_sweep.Init();
454
455 mark_sweep.MarkRoots();
456
457 // Push marked roots onto the mark stack
458
459 // TODO: if concurrent
460 // unlock heap
Elliott Hughes8d768a92011-09-14 16:35:25 -0700461 // thread_list->ResumeAll();
Carl Shapiro58551df2011-07-24 03:09:51 -0700462
463 mark_sweep.RecursiveMark();
464
465 // TODO: if concurrent
466 // lock heap
Elliott Hughes8d768a92011-09-14 16:35:25 -0700467 // thread_list->SuspendAll();
Carl Shapiro58551df2011-07-24 03:09:51 -0700468 // re-mark root set
469 // scan dirty objects
470
471 mark_sweep.ProcessReferences(false);
472
473 // TODO: swap bitmaps
474
475 mark_sweep.Sweep();
476 }
477
478 GrowForUtilization();
Elliott Hughes8d768a92011-09-14 16:35:25 -0700479 thread_list->ResumeAll();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700480}
481
482void Heap::WaitForConcurrentGcToComplete() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700483 lock_->AssertHeld();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700484}
485
486// Given the current contents of the active heap, increase the allowed
487// heap footprint to match the target utilization ratio. This should
488// only be called immediately after a full garbage collection.
489void Heap::GrowForUtilization() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700490 lock_->AssertHeld();
Elliott Hughes53b61312011-08-12 18:28:20 -0700491 UNIMPLEMENTED(ERROR);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700492}
493
Elliott Hughes92b3b562011-09-08 16:32:26 -0700494void Heap::Lock() {
Elliott Hughes93e74e82011-09-13 11:07:03 -0700495 // TODO: grab the lock, but put ourselves into Thread::kVmWait if it looks like
Elliott Hughes92b3b562011-09-08 16:32:26 -0700496 // we're going to have to wait on the mutex.
497 lock_->Lock();
498}
499
500void Heap::Unlock() {
501 lock_->Unlock();
502}
503
Carl Shapiro69759ea2011-07-21 18:13:35 -0700504} // namespace art