blob: 2051051ab57a4b1d6b94ba01498ea5eb6ef76421 [file] [log] [blame]
Carl Shapiro69759ea2011-07-21 18:13:35 -07001// Copyright 2011 Google Inc. All Rights Reserved.
Carl Shapiro69759ea2011-07-21 18:13:35 -07002
Brian Carlstrom578bbdc2011-07-21 14:07:47 -07003#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -07004
5#include <vector>
6
Elliott Hughes90a33692011-08-30 13:27:07 -07007#include "UniquePtr.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -07008#include "image.h"
Carl Shapiro58551df2011-07-24 03:09:51 -07009#include "mark_sweep.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070010#include "object.h"
11#include "space.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070012#include "stl_util.h"
Elliott Hughes8d768a92011-09-14 16:35:25 -070013#include "thread_list.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070014
15namespace art {
16
Carl Shapiro58551df2011-07-24 03:09:51 -070017std::vector<Space*> Heap::spaces_;
Carl Shapiro69759ea2011-07-21 18:13:35 -070018
Brian Carlstroma663ea52011-08-19 23:33:41 -070019Space* Heap::boot_space_ = NULL;
20
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070021Space* Heap::alloc_space_ = NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -070022
23size_t Heap::maximum_size_ = 0;
24
Carl Shapiro58551df2011-07-24 03:09:51 -070025size_t Heap::num_bytes_allocated_ = 0;
26
27size_t Heap::num_objects_allocated_ = 0;
28
Carl Shapiro69759ea2011-07-21 18:13:35 -070029bool Heap::is_gc_running_ = false;
30
31HeapBitmap* Heap::mark_bitmap_ = NULL;
32
33HeapBitmap* Heap::live_bitmap_ = NULL;
34
Ian Rogers0cfe1fb2011-08-26 03:29:44 -070035MemberOffset Heap::reference_referent_offset_ = MemberOffset(0);
36MemberOffset Heap::reference_queue_offset_ = MemberOffset(0);
37MemberOffset Heap::reference_queueNext_offset_ = MemberOffset(0);
38MemberOffset Heap::reference_pendingNext_offset_ = MemberOffset(0);
39MemberOffset Heap::finalizer_reference_zombie_offset_ = MemberOffset(0);
Brian Carlstrom1f870082011-08-23 16:02:11 -070040
Brian Carlstrom395520e2011-09-25 19:35:00 -070041float Heap::target_utilization_ = 0.5;
42
Elliott Hughes92b3b562011-09-08 16:32:26 -070043Mutex* Heap::lock_ = NULL;
44
Elliott Hughes9d5ccec2011-09-19 13:19:50 -070045bool Heap::verify_objects_ = false;
46
Elliott Hughes92b3b562011-09-08 16:32:26 -070047class ScopedHeapLock {
48 public:
49 ScopedHeapLock() {
50 Heap::Lock();
51 }
52
53 ~ScopedHeapLock() {
54 Heap::Unlock();
55 }
56};
57
Elliott Hughesbe759c62011-09-08 19:38:21 -070058void Heap::Init(size_t initial_size, size_t maximum_size,
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070059 const char* boot_image_file_name,
60 std::vector<const char*>& image_file_names) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -070061 const Runtime* runtime = Runtime::Current();
62 if (runtime->IsVerboseStartup()) {
63 LOG(INFO) << "Heap::Init entering";
64 }
65
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070066 Space* boot_space;
67 byte* requested_base;
68 if (boot_image_file_name == NULL) {
69 boot_space = NULL;
70 requested_base = NULL;
71 } else {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -070072 boot_space = Space::CreateFromImage(boot_image_file_name);
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070073 if (boot_space == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -070074 LOG(FATAL) << "Failed to create space from " << boot_image_file_name;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070075 }
76 spaces_.push_back(boot_space);
Brian Carlstrome24fa612011-09-29 00:53:55 -070077 byte* oat_limit_addr = boot_space->GetImageHeader().GetOatLimitAddr();
78 requested_base = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(oat_limit_addr),
79 kPageSize));
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070080 }
81
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070082 std::vector<Space*> image_spaces;
83 for (size_t i = 0; i < image_file_names.size(); i++) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -070084 Space* space = Space::CreateFromImage(image_file_names[i]);
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070085 if (space == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -070086 LOG(FATAL) << "Failed to create space from " << image_file_names[i];
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070087 }
88 image_spaces.push_back(space);
89 spaces_.push_back(space);
Brian Carlstrome24fa612011-09-29 00:53:55 -070090 byte* oat_limit_addr = space->GetImageHeader().GetOatLimitAddr();
91 requested_base = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(oat_limit_addr),
92 kPageSize));
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070093 }
94
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070095 Space* space = Space::Create(initial_size, maximum_size, requested_base);
Carl Shapiro58551df2011-07-24 03:09:51 -070096 if (space == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -070097 LOG(FATAL) << "Failed to create alloc space";
Carl Shapiro69759ea2011-07-21 18:13:35 -070098 }
99
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700100 if (boot_space == NULL) {
101 boot_space = space;
102 }
103 byte* base = std::min(boot_space->GetBase(), space->GetBase());
104 byte* limit = std::max(boot_space->GetLimit(), space->GetLimit());
105 DCHECK_LT(base, limit);
106 size_t num_bytes = limit - base;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700107
108 // Allocate the initial live bitmap.
Elliott Hughes90a33692011-08-30 13:27:07 -0700109 UniquePtr<HeapBitmap> live_bitmap(HeapBitmap::Create(base, num_bytes));
110 if (live_bitmap.get() == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700111 LOG(FATAL) << "Failed to create live bitmap";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700112 }
113
114 // Allocate the initial mark bitmap.
Elliott Hughes90a33692011-08-30 13:27:07 -0700115 UniquePtr<HeapBitmap> mark_bitmap(HeapBitmap::Create(base, num_bytes));
116 if (mark_bitmap.get() == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700117 LOG(FATAL) << "Failed to create mark bitmap";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700118 }
119
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700120 alloc_space_ = space;
Carl Shapiro58551df2011-07-24 03:09:51 -0700121 spaces_.push_back(space);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700122 maximum_size_ = maximum_size;
123 live_bitmap_ = live_bitmap.release();
124 mark_bitmap_ = mark_bitmap.release();
125
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700126 num_bytes_allocated_ = 0;
127 num_objects_allocated_ = 0;
128
Carl Shapiro69759ea2011-07-21 18:13:35 -0700129 // TODO: allocate the card table
130
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700131 // Make objects in boot_space live (after live_bitmap_ is set)
132 if (boot_image_file_name != NULL) {
Brian Carlstroma663ea52011-08-19 23:33:41 -0700133 boot_space_ = boot_space;
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700134 RecordImageAllocations(boot_space);
135 }
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700136 for (size_t i = 0; i < image_spaces.size(); i++) {
137 RecordImageAllocations(image_spaces[i]);
138 }
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700139
Elliott Hughes85d15452011-09-16 17:33:01 -0700140 Heap::EnableObjectValidation();
141
Elliott Hughes92b3b562011-09-08 16:32:26 -0700142 // It's still to early to take a lock because there are no threads yet,
143 // but we can create the heap lock now. We don't create it earlier to
144 // make it clear that you can't use locks during heap initialization.
Elliott Hughes8daa0922011-09-11 13:46:25 -0700145 lock_ = new Mutex("Heap lock");
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700146
147 if (runtime->IsVerboseStartup()) {
148 LOG(INFO) << "Heap::Init exiting";
149 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700150}
151
152void Heap::Destroy() {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700153 ScopedHeapLock lock;
Carl Shapiro58551df2011-07-24 03:09:51 -0700154 STLDeleteElements(&spaces_);
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700155 if (mark_bitmap_ != NULL) {
156 delete mark_bitmap_;
157 mark_bitmap_ = NULL;
158 }
159 if (live_bitmap_ != NULL) {
160 delete live_bitmap_;
161 }
162 live_bitmap_ = NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700163}
164
Carl Shapiro58551df2011-07-24 03:09:51 -0700165Object* Heap::AllocObject(Class* klass, size_t num_bytes) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700166 ScopedHeapLock lock;
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700167 DCHECK(klass == NULL
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700168 || klass->GetDescriptor() == NULL
Brian Carlstrom4873d462011-08-21 15:23:39 -0700169 || (klass->IsClassClass() && num_bytes >= sizeof(Class))
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700170 || (klass->IsVariableSize() || klass->GetObjectSize() == num_bytes));
171 DCHECK(num_bytes >= sizeof(Object));
Elliott Hughes92b3b562011-09-08 16:32:26 -0700172 Object* obj = AllocateLocked(num_bytes);
Carl Shapiro58551df2011-07-24 03:09:51 -0700173 if (obj != NULL) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700174 obj->SetClass(klass);
Carl Shapiro58551df2011-07-24 03:09:51 -0700175 }
176 return obj;
177}
178
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700179bool Heap::IsHeapAddress(const Object* obj) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700180 // Note: we deliberately don't take the lock here, and mustn't test anything that would
181 // require taking the lock.
Elliott Hughesa2501992011-08-26 19:39:54 -0700182 if (!IsAligned(obj, kObjectAlignment)) {
183 return false;
184 }
185 // TODO
186 return true;
187}
188
Elliott Hughes3e465b12011-09-02 18:26:12 -0700189#if VERIFY_OBJECT_ENABLED
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700190void Heap::VerifyObject(const Object* obj) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700191 if (!verify_objects_) {
192 return;
193 }
Elliott Hughes92b3b562011-09-08 16:32:26 -0700194 ScopedHeapLock lock;
195 Heap::VerifyObjectLocked(obj);
196}
197#endif
198
199void Heap::VerifyObjectLocked(const Object* obj) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700200 lock_->AssertHeld();
Elliott Hughes85d15452011-09-16 17:33:01 -0700201 if (obj != NULL) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700202 if (!IsAligned(obj, kObjectAlignment)) {
203 LOG(FATAL) << "Object isn't aligned: " << obj;
204 } else if (!live_bitmap_->Test(obj)) {
205 // TODO: we don't hold a lock here as it is assumed the live bit map
206 // isn't changing if the mutator is running.
207 LOG(FATAL) << "Object is dead: " << obj;
208 }
209 // Ignore early dawn of the universe verifications
Brian Carlstromdbc05252011-09-09 01:59:59 -0700210 if (num_objects_allocated_ > 10) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700211 const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
212 Object::ClassOffset().Int32Value();
213 const Class* c = *reinterpret_cast<Class* const *>(raw_addr);
214 if (c == NULL) {
215 LOG(FATAL) << "Null class" << " in object: " << obj;
216 } else if (!IsAligned(c, kObjectAlignment)) {
217 LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
218 } else if (!live_bitmap_->Test(c)) {
219 LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
220 }
221 // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
222 // NB we don't use the accessors here as they have internal sanity checks
223 // that we don't want to run
224 raw_addr = reinterpret_cast<const byte*>(c) +
225 Object::ClassOffset().Int32Value();
226 const Class* c_c = *reinterpret_cast<Class* const *>(raw_addr);
227 raw_addr = reinterpret_cast<const byte*>(c_c) +
228 Object::ClassOffset().Int32Value();
229 const Class* c_c_c = *reinterpret_cast<Class* const *>(raw_addr);
230 CHECK_EQ(c_c, c_c_c);
231 }
232 }
233}
234
Brian Carlstrom78128a62011-09-15 17:21:19 -0700235void Heap::VerificationCallback(Object* obj, void* arg) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700236 DCHECK(obj != NULL);
Elliott Hughes92b3b562011-09-08 16:32:26 -0700237 Heap::VerifyObjectLocked(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700238}
239
240void Heap::VerifyHeap() {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700241 ScopedHeapLock lock;
242 live_bitmap_->Walk(Heap::VerificationCallback, NULL);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700243}
244
Elliott Hughes92b3b562011-09-08 16:32:26 -0700245void Heap::RecordAllocationLocked(Space* space, const Object* obj) {
246#ifndef NDEBUG
247 if (Runtime::Current()->IsStarted()) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700248 lock_->AssertHeld();
Elliott Hughes92b3b562011-09-08 16:32:26 -0700249 }
250#endif
Carl Shapiro58551df2011-07-24 03:09:51 -0700251 size_t size = space->AllocationSize(obj);
252 DCHECK_NE(size, 0u);
253 num_bytes_allocated_ += size;
254 num_objects_allocated_ += 1;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700255
256 if (Runtime::Current()->HasStatsEnabled()) {
257 RuntimeStats* global_stats = Runtime::Current()->GetStats();
258 RuntimeStats* thread_stats = Thread::Current()->GetStats();
259 ++global_stats->allocated_objects;
260 ++thread_stats->allocated_objects;
261 global_stats->allocated_bytes += size;
262 thread_stats->allocated_bytes += size;
263 }
264
Carl Shapiro58551df2011-07-24 03:09:51 -0700265 live_bitmap_->Set(obj);
266}
267
Elliott Hughes92b3b562011-09-08 16:32:26 -0700268void Heap::RecordFreeLocked(Space* space, const Object* obj) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700269 lock_->AssertHeld();
Carl Shapiro58551df2011-07-24 03:09:51 -0700270 size_t size = space->AllocationSize(obj);
271 DCHECK_NE(size, 0u);
272 if (size < num_bytes_allocated_) {
273 num_bytes_allocated_ -= size;
274 } else {
275 num_bytes_allocated_ = 0;
276 }
277 live_bitmap_->Clear(obj);
278 if (num_objects_allocated_ > 0) {
279 num_objects_allocated_ -= 1;
280 }
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700281
282 if (Runtime::Current()->HasStatsEnabled()) {
283 RuntimeStats* global_stats = Runtime::Current()->GetStats();
284 RuntimeStats* thread_stats = Thread::Current()->GetStats();
285 ++global_stats->freed_objects;
286 ++thread_stats->freed_objects;
287 global_stats->freed_bytes += size;
288 thread_stats->freed_bytes += size;
289 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700290}
291
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700292void Heap::RecordImageAllocations(Space* space) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700293 const Runtime* runtime = Runtime::Current();
294 if (runtime->IsVerboseStartup()) {
295 LOG(INFO) << "Heap::RecordImageAllocations entering";
296 }
Elliott Hughes92b3b562011-09-08 16:32:26 -0700297 DCHECK(!Runtime::Current()->IsStarted());
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700298 CHECK(space != NULL);
299 CHECK(live_bitmap_ != NULL);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700300 byte* current = space->GetBase() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700301 while (current < space->GetLimit()) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700302 DCHECK(IsAligned(current, kObjectAlignment));
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700303 const Object* obj = reinterpret_cast<const Object*>(current);
304 live_bitmap_->Set(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700305 current += RoundUp(obj->SizeOf(), kObjectAlignment);
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700306 }
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700307 if (runtime->IsVerboseStartup()) {
308 LOG(INFO) << "Heap::RecordImageAllocations exiting";
309 }
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700310}
311
Elliott Hughes92b3b562011-09-08 16:32:26 -0700312Object* Heap::AllocateLocked(size_t size) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700313 lock_->AssertHeld();
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700314 DCHECK(alloc_space_ != NULL);
315 Space* space = alloc_space_;
Elliott Hughes92b3b562011-09-08 16:32:26 -0700316 Object* obj = AllocateLocked(space, size);
Carl Shapiro58551df2011-07-24 03:09:51 -0700317 if (obj != NULL) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700318 RecordAllocationLocked(space, obj);
Carl Shapiro58551df2011-07-24 03:09:51 -0700319 }
320 return obj;
321}
322
Elliott Hughes92b3b562011-09-08 16:32:26 -0700323Object* Heap::AllocateLocked(Space* space, size_t size) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700324 lock_->AssertHeld();
Elliott Hughes92b3b562011-09-08 16:32:26 -0700325
Carl Shapiro69759ea2011-07-21 18:13:35 -0700326 // Fail impossible allocations. TODO: collect soft references.
327 if (size > maximum_size_) {
328 return NULL;
329 }
330
Carl Shapiro58551df2011-07-24 03:09:51 -0700331 Object* ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700332 if (ptr != NULL) {
333 return ptr;
334 }
335
336 // The allocation failed. If the GC is running, block until it
337 // completes and retry.
338 if (is_gc_running_) {
339 // The GC is concurrently tracing the heap. Release the heap
340 // lock, wait for the GC to complete, and retrying allocating.
341 WaitForConcurrentGcToComplete();
Carl Shapiro58551df2011-07-24 03:09:51 -0700342 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700343 if (ptr != NULL) {
344 return ptr;
345 }
346 }
347
348 // Another failure. Our thread was starved or there may be too many
349 // live objects. Try a foreground GC. This will have no effect if
350 // the concurrent GC is already running.
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700351 if (Runtime::Current()->HasStatsEnabled()) {
352 ++Runtime::Current()->GetStats()->gc_for_alloc_count;
353 ++Thread::Current()->GetStats()->gc_for_alloc_count;
354 }
Ian Rogersd6b1f612011-09-27 13:38:14 -0700355 LOG(INFO) << "GC_FOR_ALLOC: TODO: test";
Carl Shapiro58551df2011-07-24 03:09:51 -0700356 CollectGarbageInternal();
357 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700358 if (ptr != NULL) {
359 return ptr;
360 }
Ian Rogersd6b1f612011-09-27 13:38:14 -0700361 UNIMPLEMENTED(FATAL) << "No AllocWithGrowth, use larger -Xms -Xmx";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700362
363 // Even that didn't work; this is an exceptional state.
364 // Try harder, growing the heap if necessary.
Carl Shapiro58551df2011-07-24 03:09:51 -0700365 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700366 if (ptr != NULL) {
367 //size_t new_footprint = dvmHeapSourceGetIdealFootprint();
Carl Shapiro58551df2011-07-24 03:09:51 -0700368 size_t new_footprint = space->MaxAllowedFootprint();
369 // TODO: may want to grow a little bit more so that the amount of
370 // free space is equal to the old free space + the
371 // utilization slop for the new allocation.
372 LOG(INFO) << "Grow heap (frag case) to " << new_footprint / MB
Carl Shapiro69759ea2011-07-21 18:13:35 -0700373 << "for " << size << "-byte allocation";
374 return ptr;
375 }
376
377 // Most allocations should have succeeded by now, so the heap is
378 // really full, really fragmented, or the requested size is really
379 // big. Do another GC, collecting SoftReferences this time. The VM
380 // spec requires that all SoftReferences have been collected and
381 // cleared before throwing an OOME.
382
Carl Shapiro58551df2011-07-24 03:09:51 -0700383 // TODO: wait for the finalizers from the previous GC to finish
Carl Shapiro69759ea2011-07-21 18:13:35 -0700384 LOG(INFO) << "Forcing collection of SoftReferences for "
385 << size << "-byte allocation";
Carl Shapiro58551df2011-07-24 03:09:51 -0700386 CollectGarbageInternal();
387 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700388 if (ptr != NULL) {
389 return ptr;
390 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700391
Carl Shapiro69759ea2011-07-21 18:13:35 -0700392 LOG(ERROR) << "Out of memory on a " << size << " byte allocation";
393
Carl Shapiro58551df2011-07-24 03:09:51 -0700394 // TODO: tell the HeapSource to dump its state
395 // TODO: dump stack traces for all threads
Carl Shapiro69759ea2011-07-21 18:13:35 -0700396
Carl Shapiro69759ea2011-07-21 18:13:35 -0700397 return NULL;
398}
399
Elliott Hughesbf86d042011-08-31 17:53:14 -0700400int64_t Heap::GetMaxMemory() {
401 UNIMPLEMENTED(WARNING);
402 return 0;
403}
404
405int64_t Heap::GetTotalMemory() {
406 UNIMPLEMENTED(WARNING);
407 return 0;
408}
409
410int64_t Heap::GetFreeMemory() {
411 UNIMPLEMENTED(WARNING);
412 return 0;
413}
414
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700415class InstanceCounter {
416 public:
417 InstanceCounter(Class* c, bool count_assignable)
418 : class_(c), count_assignable_(count_assignable), count_(0) {
419 }
420
421 size_t GetCount() {
422 return count_;
423 }
424
425 static void Callback(Object* o, void* arg) {
426 reinterpret_cast<InstanceCounter*>(arg)->VisitInstance(o);
427 }
428
429 private:
430 void VisitInstance(Object* o) {
431 Class* instance_class = o->GetClass();
432 if (count_assignable_) {
433 if (instance_class == class_) {
434 ++count_;
435 }
436 } else {
437 if (instance_class != NULL && class_->IsAssignableFrom(instance_class)) {
438 ++count_;
439 }
440 }
441 }
442
443 Class* class_;
444 bool count_assignable_;
445 size_t count_;
446};
447
448int64_t Heap::CountInstances(Class* c, bool count_assignable) {
449 ScopedHeapLock lock;
450 InstanceCounter counter(c, count_assignable);
451 live_bitmap_->Walk(InstanceCounter::Callback, &counter);
452 return counter.GetCount();
453}
454
Carl Shapiro69759ea2011-07-21 18:13:35 -0700455void Heap::CollectGarbage() {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700456 ScopedHeapLock lock;
Carl Shapiro58551df2011-07-24 03:09:51 -0700457 CollectGarbageInternal();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700458}
459
460void Heap::CollectGarbageInternal() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700461 lock_->AssertHeld();
Carl Shapiro58551df2011-07-24 03:09:51 -0700462
Elliott Hughes8d768a92011-09-14 16:35:25 -0700463 ThreadList* thread_list = Runtime::Current()->GetThreadList();
464 thread_list->SuspendAll();
Carl Shapiro58551df2011-07-24 03:09:51 -0700465 {
466 MarkSweep mark_sweep;
467
468 mark_sweep.Init();
469
470 mark_sweep.MarkRoots();
471
472 // Push marked roots onto the mark stack
473
474 // TODO: if concurrent
475 // unlock heap
Elliott Hughes8d768a92011-09-14 16:35:25 -0700476 // thread_list->ResumeAll();
Carl Shapiro58551df2011-07-24 03:09:51 -0700477
478 mark_sweep.RecursiveMark();
479
480 // TODO: if concurrent
481 // lock heap
Elliott Hughes8d768a92011-09-14 16:35:25 -0700482 // thread_list->SuspendAll();
Carl Shapiro58551df2011-07-24 03:09:51 -0700483 // re-mark root set
484 // scan dirty objects
485
486 mark_sweep.ProcessReferences(false);
487
488 // TODO: swap bitmaps
489
490 mark_sweep.Sweep();
491 }
492
493 GrowForUtilization();
Elliott Hughes8d768a92011-09-14 16:35:25 -0700494 thread_list->ResumeAll();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700495}
496
497void Heap::WaitForConcurrentGcToComplete() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700498 lock_->AssertHeld();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700499}
500
501// Given the current contents of the active heap, increase the allowed
502// heap footprint to match the target utilization ratio. This should
503// only be called immediately after a full garbage collection.
504void Heap::GrowForUtilization() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700505 lock_->AssertHeld();
Elliott Hughes53b61312011-08-12 18:28:20 -0700506 UNIMPLEMENTED(ERROR);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700507}
508
Elliott Hughes92b3b562011-09-08 16:32:26 -0700509void Heap::Lock() {
Elliott Hughes93e74e82011-09-13 11:07:03 -0700510 // TODO: grab the lock, but put ourselves into Thread::kVmWait if it looks like
Elliott Hughes92b3b562011-09-08 16:32:26 -0700511 // we're going to have to wait on the mutex.
512 lock_->Lock();
513}
514
515void Heap::Unlock() {
516 lock_->Unlock();
517}
518
Carl Shapiro69759ea2011-07-21 18:13:35 -0700519} // namespace art